text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python3
import board
import busio
import adafruit_ads1x15.ads1115 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
from RPLCD import CharLCD
import time
from RPi import GPIO
i2c = busio.I2C(board.SCL, board.SDA)
ads = ADS.ADS1115(i2c)
lightchan = AnalogIn(ads, ADS.P1)
rainchan = AnalogIn(ads, ADS.P0)
GPIO.cleanup()
#lcd = CharLCD(numbering_mode=GPIO.BOARD, cols=20, rows=4, pin_rs=37, pin_e=35, pins_data=[33, 31, 29, 23])
lcd = CharLCD(numbering_mode=GPIO.BCM, cols=20, rows=4, pin_rs=26, pin_e=19, pins_data=[13, 6, 5, 11])
lcd.clear()
while True:
lcd.cursor_pos = (0, 0)
lcd.write_string("Time: %s" %time.strftime("%H:%M:%S"))
lcd.cursor_pos = (1, 0)
lcd.write_string("Date: %s" %time.strftime("%d/%m/%Y"))
lcd.cursor_pos = (2, 0)
lightValue = int(lightchan.voltage/3.3 * 100)
lcd.write_string("Light: %d%%\n" %lightValue)
rainValue = int(100 - rainchan.voltage/3.3 * 100)
#print(chan.value, chan.voltage)
lcd.cursor_pos = (3, 0)
lcd.write_string("Rain: %d%%\n" %rainValue)
time.sleep(.25)
|
from django.shortcuts import render
import requests
import datetime
from datetime import date
def index(request):
x = datetime.datetime.now()
month = x.strftime('%-m')
day = x.strftime('%-d')
year = x.strftime('%Y')
future_date = date(2019, 6, 4) ## returns the number of day from this date
today_date = date(int(year), int(month), int(day))
number_of_days = (future_date - today_date).days
weeks = int((number_of_days % 365) / 7)
total_days_completed = (365 - (number_of_days))
percent = ('{:.1%}'.format(total_days_completed / 365))
## api with zip code for city locations
api_keywest = 'https://api.apixu.com/v1/current.json?key=21ef922400764f56a2c150914182906&q=33040'
api_baycity = 'https://api.apixu.com/v1/current.json?key=21ef922400764f56a2c150914182906&q=48708'
api_scottsdale = 'https://api.apixu.com/v1/current.json?key=21ef922400764f56a2c150914182906&q=85257'
keywest = requests.get(api_keywest).json()
kw_temp_f = keywest['current']['temp_f']
kw_conditions = keywest['current']['condition']['text']
kw_icon_code = keywest['current']['condition']['icon']
kw_icon = 'http:' + kw_icon_code
kw_feelslike_f = keywest['current']['feelslike_f']
kw_humidity = keywest['current']['humidity']
kw_city = keywest['location']['name']
kw_state = keywest['location']['region']
kw_update = keywest['current']['last_updated']
baycity = requests.get(api_baycity).json()
bc_temp_f = baycity['current']['temp_f']
bc_conditions = baycity['current']['condition']['text']
bc_icon_code = baycity['current']['condition']['icon']
bc_icon = 'http:' + bc_icon_code
bc_feelslike_f = baycity['current']['feelslike_f']
bc_humidity = baycity['current']['humidity']
bc_city = baycity['location']['name']
bc_state = baycity['location']['region']
scottsdale = requests.get(api_scottsdale).json()
az_temp_f = scottsdale['current']['temp_f']
az_conditions = scottsdale['current']['condition']['text']
az_icon_code = scottsdale['current']['condition']['icon']
az_icon = 'http:' + az_icon_code
az_feelslike_f = scottsdale['current']['feelslike_f']
az_humidity = scottsdale['current']['humidity']
az_city = scottsdale['location']['name']
az_state = scottsdale['location']['region']
context = {
'kw_temp_f' : kw_temp_f,
'kw_conditions' : kw_conditions,
'kw_feelslike_f' : kw_feelslike_f,
'kw_humidity' : kw_humidity,
'kw_city' : kw_city,
'kw_state' : kw_state,
'kw_humidity' : kw_humidity,
'kw_icon' : kw_icon,
'kw_update' : kw_update,
'bc_temp_f' : bc_temp_f,
'bc_conditions' : bc_conditions,
'bc_feelslike_f' : bc_feelslike_f,
'bc_humidity' : bc_humidity,
'bc_city' : bc_city,
'bc_state' : bc_state,
'bc_humidity' : bc_humidity,
'bc_icon' : bc_icon,
'az_temp_f' : az_temp_f,
'az_conditions' : az_conditions,
'az_feelslike_f' : az_feelslike_f,
'az_humidity' : az_humidity,
'az_city' : az_city,
'az_state' : az_state,
'az_humidity' : az_humidity,
'az_icon' : az_icon,
'days' : number_of_days,
'weeks' : weeks,
'percent' : percent,
}
return render(request, 'keywest/index.html', context) |
"""Core tests."""
import csv
import logging
import unittest
from typing import Iterator, List, Tuple
import yaml
from oaklib import get_adapter
from oaklib.datamodels.vocabulary import IS_A, PART_OF
from oaklib.interfaces.obograph_interface import OboGraphInterface
from ontogpt.io.yaml_wrapper import dump_minimal_yaml
from ontogpt.ontex import extractor
from ontogpt.ontex.extractor import OntologyExtractor, Task, TaskCollection
from tests import (
CELLULAR_ANATOMICAL_ENTITY,
ENVELOPE,
FUNGI,
IMBO,
INPUT_DIR,
INTRACELLULAR_ORGANELLE,
MEMBRANE_BOUNDED_ORGANELLE,
NUCLEAR_ENVELOPE,
NUCLEAR_MEMBRANE,
NUCLEUS,
ORGANELLE,
OUTPUT_DIR,
VACUOLE,
)
TEST_ONTOLOGY_OAK = INPUT_DIR / "go-nucleus.db"
TEST_ONTOLOGY_ABOX = INPUT_DIR / "fhkb.db"
logger = logging.getLogger(extractor.__name__)
logger.setLevel(level=logging.INFO)
IS_ANCESTOR_OF = "fhkb:isAncestorOf"
HAS_ANCESTOR = "fhkb:hasAncestor"
HAS_PARENT = "fhkb:hasParent"
IS_PARENT_OF = "fhkb:isParentOf"
HAS_FATHER = "fhkb:hasFather"
HAS_MOTHER = "fhkb:hasMother"
HAS_PARENT = "fhkb:hasParent"
HAS_SIBLING = "fhkb:hasSibling"
HAS_BROTHER = "fhkb:hasBrother"
HAS_SISTER = "fhkb:hasSister"
class TestOntologyExtractor(unittest.TestCase):
"""Test ability to convert from OAK to native HALO form."""
def setUp(self) -> None:
"""Set up."""
self.adapter = get_adapter(str(TEST_ONTOLOGY_OAK))
self.abox_adapter = get_adapter(str(TEST_ONTOLOGY_ABOX))
if not isinstance(self.adapter, OboGraphInterface):
raise ValueError("Not an OboGraphInterface")
self.extractor = OntologyExtractor(adapter=self.adapter)
self.abox_extractor = OntologyExtractor(adapter=self.abox_adapter)
self.abox_extractor.query_predicates = [HAS_ANCESTOR, HAS_SIBLING]
def cases(self) -> Iterator[Tuple[Task, List[str]]]:
extractor = self.extractor
# yield extractor.extract_indirect_superclasses_task(select_random=True), None
yield extractor.extract_transitive_superclasses_task(
subclass=NUCLEUS, siblings=[VACUOLE], roots=[ORGANELLE]
), [ORGANELLE, IMBO, INTRACELLULAR_ORGANELLE, MEMBRANE_BOUNDED_ORGANELLE]
yield extractor.extract_indirect_superclasses_task(
subclass=NUCLEUS, siblings=[VACUOLE], roots=[ORGANELLE]
), [ORGANELLE, INTRACELLULAR_ORGANELLE, MEMBRANE_BOUNDED_ORGANELLE]
yield extractor.extract_most_recent_common_subsumers_task(
subclass1=NUCLEUS, subclass2=VACUOLE, siblings=[NUCLEAR_MEMBRANE], roots=[]
), [IMBO]
yield extractor.extract_incoherent_ontology_task(
incoherents=[NUCLEUS],
siblings=[VACUOLE],
disjoints=[(ORGANELLE, ENVELOPE)],
spiked_relationships=[(NUCLEUS, IS_A, NUCLEAR_MEMBRANE)],
roots=[CELLULAR_ANATOMICAL_ENTITY],
), [NUCLEUS]
yield extractor.extract_subclass_of_expression_task(
superclass=NUCLEUS,
predicate=PART_OF,
siblings=[VACUOLE],
), [NUCLEAR_MEMBRANE, NUCLEAR_ENVELOPE]
yield extractor.extract_subclass_of_expression_task(
superclass=IMBO,
predicate=PART_OF,
siblings=[FUNGI],
), [NUCLEAR_MEMBRANE, NUCLEAR_ENVELOPE]
def test_extract(self):
"""Test extract tasks."""
extractor = self.extractor
for task, expected in self.cases():
if not task.ontology.axioms:
raise ValueError(f"Task {task} has no axioms")
print(yaml.dump(task.dict(), sort_keys=False))
answer_texts = [a.text for a in task.answers]
if expected is not None:
self.assertCountEqual(answer_texts, [extractor._name(x) for x in expected])
@unittest.skip("Non-deterministic")
def test_random_taxon_constraints(self):
"""Test extract random tasks."""
extractor = self.extractor
task = extractor.extract_taxon_constraint_task(select_random=True, never_in=True)
print(dump_minimal_yaml(task))
@unittest.skip("Non-deterministic")
def test_random(self):
"""Test extract random tasks."""
extractor = self.extractor
abox_extractor = self.abox_extractor
abox_tc = abox_extractor.create_random_tasks(20, abox=True)
tc = extractor.create_random_tasks(20, abox=False)
tc.tasks.extend(abox_tc.tasks)
for task in tc.tasks:
if not task.answers:
print(f"Task {task} has no answers")
# raise ValueError(f"Task {task} has no answers")
if not task.ontology.axioms:
raise ValueError(f"Task {task} has no axioms")
# raise ValueError(f"Task {task} has no axioms")
path = OUTPUT_DIR / "random-reasoner-tasks.yaml"
with open(path, "w") as f:
f.write(dump_minimal_yaml(tc))
tc = TaskCollection.load(path)
task_types = {type(obj) for obj in tc.tasks}
print(len(tc.tasks))
print(task_types)
# increase this every time you add a new task type
self.assertEqual(len(task_types), 7)
@unittest.skip("Non-deterministic")
def test_random_obfuscated(self):
extractor = self.extractor
extractor.obfuscate = True
abox_extractor = self.abox_extractor
abox_extractor.obfuscate = True
abox_tc = abox_extractor.create_random_tasks(20, abox=True)
tc = extractor.create_random_tasks(20, abox=False)
# merge
tc.tasks.extend(abox_tc.tasks)
for task in tc.tasks:
if not task.answers:
print(f"Task {task} has no answers")
# raise ValueError(f"Task {task} has no answers")
if not task.ontology.axioms:
raise ValueError(f"Task {task} has no axioms")
# raise ValueError(f"Task {task} has no axioms")
path = OUTPUT_DIR / "obfuscated-reasoner-tasks.yaml"
with open(path, "w") as f:
f.write(dump_minimal_yaml(tc))
tc = TaskCollection.load(path)
task_types = {type(obj) for obj in tc.tasks}
print(len(tc.tasks))
print(task_types)
# increase this every time you add a new task type
self.assertEqual(len(task_types), 6)
self.assertGreater(len(tc.obfuscated_curie_map.keys()), 20)
def test_introspect(self):
"""Test introspection."""
root_class = Task
with open(OUTPUT_DIR / "task-classes.tsv", "w") as f:
writer = csv.DictWriter(f, fieldnames=["code", "task", "description"], delimiter="\t")
writer.writeheader()
for subclass in root_class.__subclasses__():
row = {
"code": subclass._code,
"task": subclass.__name__,
"description": subclass.__doc__.replace("\n", " ").strip(),
}
writer.writerow(row)
|
#TAGS dp, kadane
# variant of kadane
class Solution:
def maxProduct(self, nums: List[int]) -> int:
cur_max = 1
cur_min = 1
res = float('-inf')
for v in nums:
cur_max, cur_min = max(v * cur_max, v * cur_min, v), min(v * cur_max, v * cur_min, v)
res = max(cur_max, res)
return res
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
import unittest
import torch
from .context import BasicBundleTestSuite, BasicDetectionBundleTestSuite, BasicEndpointV2TestSuite
class EndPointInfer(BasicEndpointV2TestSuite):
def test_segmentation(self):
if not torch.cuda.is_available():
return
model = "segmentation_spleen"
image = "spleen_3"
response = self.client.post(f"/infer/{model}?image={image}")
assert response.status_code == 200
time.sleep(1)
def test_deepgrow_pipeline(self):
if not torch.cuda.is_available():
return
model = "deepgrow_pipeline"
image = "spleen_3"
params = {"foreground": [[140, 210, 28]], "background": []}
response = self.client.post(f"/infer/{model}?image={image}", data={"params": json.dumps(params)})
assert response.status_code == 200
time.sleep(1)
class TestBundleInferTask(BasicBundleTestSuite):
def test_spleen_bundle_infer(self):
if not torch.cuda.is_available():
return
model = "spleen_ct_segmentation"
image = "spleen_8"
response = self.client.post(f"/infer/{model}?image={image}")
assert response.status_code == 200
time.sleep(1)
class TestDetectionBundleInferTask(BasicDetectionBundleTestSuite):
def test_lung_nodule_detector_infer(self):
if not torch.cuda.is_available():
return
model = "lung_nodule_ct_detection"
image = "1.3.6.1.4.1.14519.5.2.1.6279.6001.188385286346390202873004762827"
response = self.client.post(f"/infer/{model}?image={image}")
assert response.status_code == 200
time.sleep(1)
if __name__ == "__main__":
unittest.main()
|
import arviz as az
import pymc3 as pm
import random
import numpy as np
import pandas as pd
import logging
# removes pymc3 sampling output apart from errors
logger = logging.getLogger('pymc3')
logger.setLevel(logging.ERROR)
def convert_full_model(model, num_samples, InferenceData_dims, InferenceData_coords):
"""
Runs MCMC sampling on model provided and creates data for prior, posterior and PPC
packages data into ArviZ InferenceData object and returns that
"""
with model:
trace = pm.sample(progressbar=False,draws=num_samples)
prior = pm.sample_prior_predictive()
posterior = pm.sample_posterior_predictive(trace,progressbar=False)
"""********************************************************
Forcing the output values from the model to be
float64. This creates continuity on the values
and allows for plotting of multiple variables/outputs
into one plot without error"""
for key, val in prior.items():
prior[key] = val.astype('float64')
for key, val in posterior.items():
posterior[key] = val.astype('float64')
"""*******************************************************"""
if InferenceData_dims != {} and InferenceData_coords != {}:
try:
data = az.from_pymc3(
trace=trace,
prior=prior,
posterior_predictive=posterior,
dims=InferenceData_dims,
coords=InferenceData_coords,
)
except:
data = az.from_pymc3(
trace=trace,
prior=prior,
posterior_predictive=posterior,
)
else:
data = az.from_pymc3(
trace=trace,
prior=prior,
posterior_predictive=posterior,
)
return data
def convert_posterior_model(model, num_samples, InferenceData_dims, InferenceData_coords):
"""
Runs MCMC sampling on model provided and creates data for posterior and PPC
Does not produce prior data because not required
packages data into ArviZ InferenceData object and returns that
"""
with model:
trace = pm.sample(progressbar=False, draws=num_samples)
posterior = pm.sample_posterior_predictive(trace,progressbar=False)
"""********************************************************
Forcing the output values from the model to be
float64. This creates continuity on the values
and allows for plotting of multiple variables/outputs
into one plot without error"""
for key, val in posterior.items():
posterior[key] = val.astype('float64')
"""*******************************************************"""
if InferenceData_dims != {} and InferenceData_coords != {}:
try:
data = az.from_pymc3(
trace=trace,
posterior_predictive=posterior,
dims=InferenceData_dims,
coords=InferenceData_coords,
)
except ValueError:
data = az.from_pymc3(
trace=trace,
posterior_predictive=posterior,
)
else:
data = az.from_pymc3(
trace=trace,
posterior_predictive=posterior,
)
return data
def data_reduce_cache(func):
"""
caching the call to reduce the data so that the same reduced data set is
used for all models. Could have stored the data somewhere else but made the change at end
and wanted to fix the issue without changing the other parts of the program
"""
cache = {}
def wrapped(**kwargs):
args = kwargs['fraction']
if args in cache:
return cache[args]
else:
val = func(**kwargs)
cache[args] = val
return val
return wrapped
@data_reduce_cache
def reduce_data_remove(data, fraction):
"""
Reduces data by selecting random rows in the DataFrame and removing them entirely.
Often replacing with nan values is not appropriate and will result in a model that
cannot be sampled from properly
"""
data = data.copy(deep=True)
size = len(data)
to_remove = int(size - (size*fraction))
data = data.drop(list(data.loc[random.sample(list(data.index), to_remove)].index))
return data
|
import math
import pandas as pd
from uuid import uuid4
from db import select_song, insert_db
def extract_log_file(path):
df = pd.read_json(str(path), dtype={"userId": object, "sessionId": object}, lines=True)
# get rows which have NextSong value
df = df[df['page']=='NextSong']
return (
pd.to_datetime(df['ts'], unit='ms'),
list(df[['userId', 'firstName', 'lastName', 'gender', 'level']].to_records(index=False)),
list(zip(df[['song', 'artist', 'length']].to_records(index=False), df[['ts', 'userId', 'level', 'sessionId', 'location', 'userAgent']].to_records(index=False)))
)
def transform_time_data(times_series):
return [(time, time.hour, time.day, time.week, time.month, time.year, time.day_name()) for time in times_series]
def transform_playsong_data(playsongs, conn):
result = []
for playsong in playsongs:
query_result = select_song(conn, playsong[0])
song_id, artist_id = None, None
if query_result:
song_id, artist_id = query_result[0]
result.append((str(uuid4()), pd.to_datetime(playsong[1][0], unit='ms'), *list(playsong[1])[1:3], song_id, artist_id, *list(playsong[1])[3:]))
return result
def process_log_file(conn, path):
# Extract
time_data, user_data, playsong_data = extract_log_file(path)
# Transform
transformed_time_data = transform_time_data(time_data)
transformed_playsong_data = transform_playsong_data(playsong_data, conn)
# Load
insert_db(conn, 'time', transformed_time_data)
insert_db(conn, 'user', user_data)
insert_db(conn, 'songplay', transformed_playsong_data)
|
from typing import Dict
from typing import List
from arg.qck.decl import QKUnit, QCKQuery, QCKCandidate
from arg.qck.instance_generator.qcknc_datagen import QCKInstanceGenerator, QCKCandidateI
from arg.qck.qck_worker import QCKWorker
from arg.robust.qc_common import load_candidate_all_passage
from cache import load_from_pickle, save_to_pickle, load_cache
from data_generator.job_runner import JobRunner
from epath import job_man_dir
def main():
def is_correct(query: QCKQuery, candidate: QCKCandidate):
return 0
qk_candidate: List[QKUnit] = load_from_pickle("robust_on_clueweb_qk_candidate_filtered")
candidate_dict = load_cache("candidate_for_robust_qck_10_predict")
if candidate_dict is None:
candidate_dict: \
Dict[str, List[QCKCandidateI]] = load_candidate_all_passage(256)
save_to_pickle(candidate_dict, "candidate_for_robust_qck_10_predict")
generator = QCKInstanceGenerator(candidate_dict, is_correct)
num_jobs = 250
def worker_factory(out_dir):
worker = QCKWorker(qk_candidate, generator, out_dir)
return worker
##
job_name = "robust_qck_10_predict"
runner = JobRunner(job_man_dir, num_jobs, job_name, worker_factory)
runner.start()
if __name__ == "__main__":
main()
|
# -*- coding: utf8 -*-
import string
from django.core.management.base import BaseCommand
from strongs.models import BibleBook
from progressbar import print_progress
class Command(BaseCommand):
help = 'Initializes the database with the bible books found in the file bibleBooks_de.txt.'
def add_arguments(self, parser):
return
def handle(self, *args, **options):
f = open('./bibleBooks_de.txt', 'r')
bookNr = 0
for line in f:
bookNr += 1
ele = line.split(',')
if len(ele) >= 2:
ele = [x.strip() for x in ele]
bookNames = BibleBook.objects.filter(nr=bookNr, language='de')
if bookNames.count() > 0:
bookNames = bookNames[0]
else:
bookNames = BibleBook()
bookNames.nr = bookNr
bookNames.language = 'de'
bookNames.name = ele[0]
if len(ele) > 1:
bookNames.short_name = ele[1]
if len(ele) > 2:
bookNames.alternativeNames = ',' + string.join(ele[2:], ',') + ','
bookNames.save()
print_progress(bookNr, 66) |
# Help on class TempConstr in module gurobipy:
class TempConstr():
"""
TempConstr(lhs, sense, rhs)
Gurobi temporary constraint object. Objects of this class are created
as intermediate results when building constraints using overloaded
operators.
Methods defined here:
__ge__(self, rhs)
__init__(self, lhs, sense, rhs)
__le__(self, rhs)
__repr__(self)
__rshift__(self, other)
----------------------------------------------------------------------
Data descriptors defined here:
__dict__
dictionary for instance variables (if defined)
__weakref__
list of weak references to the object (if defined)
"""
def __init__(self, lhs, sense, rhs):
pass |
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
from __future__ import print_function
import SimpleITK as sitk
import sys
# this test is suppose to test the python interface to the sitk::Image
image = sitk.Image( 10, 10, sitk.sitkInt32 )
image + image
image + 1
1 + image
image - image
image - 1
1 - image
image * image
image * 1
1 * image
image / image
1.0 / image
image / 1.0
image // image
image // 1
1 // image
image & image
image | image
image ^ image
~image
image += image
image -= image
image *= image
image /= image
image //= image
image = image * 0
image.SetPixel( 0, 0, 1 )
image[ [0,1] ] = 2
image[ 9,9 ] = 3
image.GetPixel( 1,1 )
#image.GetPixel( [1,1] )
image[1,1]
image[ [ 1,1 ] ]
if sum(image) != 6:
print( "image sum not 6" )
sys.exit( 1 )
if len( image ) != 100:
print( "len not 100!" )
sys.exit(1)
|
weight = int(input("Enter your weight: "))
choice = input("(L)bs or (K)gs ? ")
if choice == 'l' or choice == 'L':
wei = str((weight*0.45))
print("Your weight in Kilos is: "+wei+" Kgs")
elif choice == 'k' or choice == 'K':
wei1 = str((weight*2.205))
print("Your weight in Pounds is: "+wei1+" Pounds")
else:
print("Wrong Choice.")
|
"""
All code must be within the following functions
"""
def format_names(name):
"""
The format names function accepts a list of names.
It then modifies each name in that list to ensure that it is in
title case.
You must return a list of title case names
"""
def name_swap(passage, word, swap):
"""
The function name_swap accepts the following:
passage - A long passage of text.
word - a word that needs to be found and swapped
swap - the word that needs to be swapped into word.
"""
def speed_check(speed):
"""
The speed_check function checks the speed of drivers.
This function must complete the following processes:
1. If the speed is less than 60 it should return OK
2. Otherwise, for each 5km above the speed limit of 60
the driver loses one demerit point.
Example: If the speed recorded is 80 the function returns:
DEMERITS: 4
3. If the driver gets more than 12 demerit points the function
returns "SUSPENDED!"
"""
def compound_interest(rate, periods, future_value=None, present_value = None):
"""
This function can be used in two ways. The first is that the user submits a present
value but not a future value or they submit a future value but not a present value.
If they submit the present value you must return the future value.
If they submit the future value you must return the present value.
"""
def draw_shapes(shape, rows):
"""
The draw_shapes function can draw the following shapes:
Squares, LHS Right Angle Triangles, RHS Right Angle Triangles
and Diamonds.
Each shape is defined by the number of rows:
draw_shapes("square", 5) returns:
*****
*****
*****
*****
*****
draw_shapes("lhs_triangle", 3) returns:
*
**
***
draw_shapes("rhs_triangle", 4) returns:
*
**
***
****
"""
|
import json
import glob
import pickle
import sys
import os
import random
import hashlib
import uuid
import datetime
from typing import Dict
import numpy as np
from PySide2.QtWidgets import QApplication
from gui.mainplate import MainPlate
import function.const as const
if __name__ == '__main__':
if not os.path.isdir(const.DATASET_ROUTE):
os.mkdir(const.DATASET_ROUTE)
if not os.path.isdir(const.PREFERENCE_ROUTE):
os.mkdir(const.PREFERENCE_ROUTE)
if not os.path.isdir(const.WEIGHT_PREFERENCE_ROUTE):
os.mkdir(const.WEIGHT_PREFERENCE_ROUTE)
app = QApplication(sys.argv)
window = MainPlate(None)
sys.exit(app.exec_())
|
# -*- coding: UTF-8 -*-
from flask import Flask, render_template, request, redirect, abort, url_for, g, session, send_file
from flask.ext.mysql import MySQL
import os, sys, re, auth, files, filters
from decorators import login_required
# Encoding Hack
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except e:
pass
# Configuration
DEBUG = True
MYSQL_DATABASE_USER = 'root'
MYSQL_DATABASE_PASSWORD = '123456'
MYSQL_DATABASE_DB = 'cloudstore'
UPLOAD_DIR = 'uploads'
app = Flask(__name__)
app.secret_key = '\x9b@\t\x04x\x05b( \xb7\r\xf2\xe6\xa7\x0c\x95\x99\x9a\xe1\x89\x0c6\xb2\xbb'
app.config.from_object(__name__)
app.config.from_envvar('CLOUDSTORE_SETTINGS', silent=True)
# custom filters
app.jinja_env.filters['datetimeformat'] = filters.datetimeformat
mysql = MySQL(app)
@app.before_request
def before_request():
g.db = mysql.get_db()
@app.route('/')
@login_required()
def index():
return render_template('index.html', type='user' if 'user' in session else 'group')
@app.route('/login', methods=[ 'GET', 'POST' ])
def login():
if request.method == 'GET':
login_to_group = request.args.get('login_to_group', '') == '1'
if 'user' in session or 'group' in session:
return redirect(url_for('index'))
return render_template('login.html', login_to_group=login_to_group)
if request.method == 'POST':
user = auth.login(request.form)
success = False
if isinstance(user, auth.User):
success = True
session['user'] = user
elif isinstance(user, auth.Group):
success = True
session['group'] = user
return redirect(request.args.get('next', '/')) if success else render_template('login.html', error=u'用户(用户组)名或密码错误,请重试')
@app.route('/register', methods=[ 'GET', 'POST' ])
def register():
if request.method == 'GET':
return render_template('register.html')
if request.method == 'POST':
user = auth.register(request.form)
if isinstance(user, auth.User):
session['user'] = user
return redirect(url_for('index'))
else:
return render_template('register.html', errors=user)
@app.route('/logout')
def logout():
session.pop('user', None)
session.pop('group', None)
return redirect(url_for('login'))
@app.route('/admin/users', methods=[ 'GET', 'POST' ])
@login_required(only_superuser=True)
def admin_users():
if request.method == 'GET':
users = auth.get_users(exclude=[1])
groups = auth.get_groups_dict()
return render_template('users.html', users=users, groups=groups)
if request.method == 'POST':
auth.set_users_admin(request.form)
return redirect(url_for('admin_users'))
@app.route('/admin/groups')
@login_required(only_superuser=True)
def admin_groups():
groups = auth.get_groups_with_members()
return render_template('groups.html', groups=groups)
@app.route('/admin/group/add', methods=[ 'GET', 'POST' ])
@login_required(only_superuser=True)
def admin_group_add():
if request.method == 'GET':
users = auth.get_users_not_in_group()
return render_template('add_group.html', users=users)
if request.method == 'POST':
group = auth.add_group(request.form)
if isinstance(group, auth.Group):
return redirect(url_for('admin_groups'))
users = auth.get_users_not_in_group()
return render_template('add_group.html', users=users, errors=group)
@app.route('/admin/group/<int:group_id>', methods=[ 'GET', 'POST' ])
@login_required()
def admin_group(group_id):
user = session['user']
if user.id != 1 and ( user.groupid != group_id or not user.is_admin ):
abort(404)
if request.method == 'GET':
group = auth.get_groups_with_members(include=[group_id])[0]
users = auth.get_users_not_in_group()
group.members = [ member for member in group.members if member.id != user.id ]
ids = [ str(member.id) for member in group.members + users if member.id != user.id ]
return render_template('group.html', group=group, users=users, ids=':'.join(ids))
if request.method == 'POST':
auth.update_group_members(group_id, request.form)
return redirect(url_for('admin_group', group_id=group_id))
@app.route('/file/own')
@login_required()
def list_file():
user = session['user']
users = None
if user.id == 1:
users = auth.get_users_dict()
fs = files.get_files([user.id] if user.id != 1 else None)
folders = files.get_folders_dict()
return render_template('files.html', files=fs, users=users, user=user, folders=folders)
@app.route('/file/share')
@login_required()
def list_share_file():
user = session['user']
if user.id == 1:
abort(404)
fs = files.get_user_shared_files(user.id)
users = auth.get_users_dict()
return render_template('files.html', files=fs, users=users, user=user, share_view=True)
@app.route('/group/files')
@login_required(only_group=True)
def list_group_files():
group = session['group']
fs = files.get_files_by_group(group.id)
folders = files.get_folders_dict()
users = auth.get_users_in_the_group(group.id)
return render_template('files.html', files=fs, users=dict([[user.id, user] for user in users]), folders=folders, group_view=True)
FILE_BASE = os.sep.join([app.config.root_path, UPLOAD_DIR, ''])
@app.route('/file/upload', methods=['GET', 'POST'])
@login_required()
def upload_file():
to_folder = request.args.get('to_folder', '')
set_public_default = request.args.get('public_default', '') == '1'
if re.search('^\d+$', to_folder) is None:
to_folder = None
if request.method == 'GET':
return render_template('upload.html', to_folder=to_folder, set_public_default=set_public_default)
if request.method == 'POST':
user = session['user']
f = files.upload_file(FILE_BASE, request, user.id)
if to_folder:
files.add_file_to_folder(f.id, to_folder, user.id)
return redirect(url_for('list_file') if to_folder is None else url_for('list_folder_files', folder_id=to_folder))
@app.route('/file/<int:file_id>', methods=['GET', 'POST'])
@login_required()
def edit_file(file_id):
if request.method == 'GET':
f = files.get_file_by_id(file_id)
folders = files.get_folders(f.folderid) if f.folderid else []
return render_template('file.html', file=f, folders=folders)
if request.method == 'POST':
errors = files.edit_file(file_id, request.form)
if errors:
f = files.get_file_by_id(file_id)
folders = files.get_folders(f.folderid) if f.folderid else []
return render_template('file.html', file=f, folders=folders)
referrer = request.args.get('referrer', url_for('list_file'))
return redirect(referrer)
@app.route('/file/<int:file_id>/download')
@login_required()
def download_file(file_id):
user = None
group = None
if 'user' in session:
user = session['user']
elif 'group' in session:
group = session['group']
ff = files.download_file(file_id, user_id=user.id if user else None, group_id=group.id if group else None)
if ff:
return send_file('%s%s' % (FILE_BASE, ff.location), mimetype=ff.type, as_attachment=True, attachment_filename=ff.name)
abort(404)
@app.route('/file/<int:file_id>/delete')
@login_required()
def delete_file(file_id):
user = None
group = None
if 'user' in session:
user = session['user']
elif 'group' in session:
group = session['group']
try:
result = files.delete_file(FILE_BASE, file_id, user_id=user.id if user else None, group_id=group.id if group else None)
except Exception, e:
app.logger.error(e)
result = None
if result is not None:
abort(404)
return redirect(request.referrer)
@app.route('/folder/add', methods=['GET', 'POST'])
@login_required()
def add_folder():
user = session['user']
# superuser cannot create folder
if user.id == 1:
abort(404)
next = request.args.get('next')
if request.method == 'GET':
return render_template('add_folder.html', next_page=next)
if request.method == 'POST':
errors = files.add_folder(request.form, user.id)
if errors:
return render_template('add_folder.html', errors=errors, next_page=next)
return redirect(next)
@app.route('/file/<int:file_id>/addto/folder', defaults={ 'folder_id': 0 })
@app.route('/file/<int:file_id>/addto/folder/<int:folder_id>')
@login_required()
def add_file_to_folder(file_id, folder_id):
user = session['user']
# superuser can not add file to folder
if user.id == 1:
abort(404)
# add file to folder
if folder_id:
files.add_file_to_folder(file_id, folder_id, user.id)
return redirect(url_for('list_file'))
folders = files.get_folders(user.id if user.id != 1 else None)
return render_template('folders.html', folders=folders, file_id=file_id, type='addto')
@app.route('/folder')
@login_required()
def list_folder():
user = session['user']
folders = files.get_folders(user.id if user.id != 1 else None)
return render_template('folders.html', folders=folders)
@app.route('/folder/<int:folder_id>')
@login_required()
def list_folder_files(folder_id):
user = session['user']
fs = files.get_files_by_folder(folder_id)
folder = files.get_folder_by_id(folder_id)
users = None
if user.id == 1:
users = auth.get_users_dict()
return render_template('files.html', files=fs, users=users, user=user, folder=folder)
@app.route('/file/search')
@login_required()
def search_files():
query = request.args.get('q', None)
if query is None:
return redirect(request.referrer)
user = None
group_view = False
if 'user' in session:
is_own_file = request.args.get('is_own_file', '1') == '1'
user = session['user']
users = None
if user.id == 1:
# shared files are not enabled for superuser
if is_own_file is False:
abort(404)
users = auth.get_users_dict()
folders = None
if is_own_file is False:
users = auth.get_users_dict()
elif user.id != 1:
folders = files.get_folders_dict()
fs = files.get_files([user.id] if user.id != 1 else None) if is_own_file else files.get_user_shared_files(user.id)
# group view
else:
group = session['group']
group_view = True
fs = files.get_files_by_group(group.id)
folders = files.get_folders_dict()
users = auth.get_users_in_the_group(group.id)
users = dict([[user.id, user] for user in users])
fs = [ f for f in fs if f.name.find(query) >= 0 ]
return render_template('files.html', files=fs, users=users, user=user, folders=folders, group_view=group_view, search_view=True, query=query)
@app.route('/folder/search')
@login_required()
def search_folders():
if 'user' not in session:
abort(404)
query = request.args.get('q', None)
if query is None:
return redirect(request.referrer)
user = session['user']
folders = files.get_folders(user.id if user.id != 1 else None)
folders = [ f for f in folders if f.name.find(query) >= 0 ]
return render_template('folders.html', folders=folders, search_view=True, query=query)
if __name__ == '__main__':
app.run()
|
import numpy as np, scipy, math, itertools
import pandas as pd
from numpy import matlib
from scipy.stats import chi
from scipy import integrate
from pip.util import Inf
from functions import *
import numpy as np
import itertools
import math
import scipy
from scipy.stats import chi
import time
from scipy.integrate import quad
from scipy import integrate
import matplotlib
" ESTIMATORS "
# multivariate crude estimate
def Crude(M, d, mu, sigma, rho, region, regionNumber, nu, omega):
r = np.random.multivariate_normal(mu, sigma(d, rho), M)
d = 0
for x in r:
if region((math.sqrt(nu) / omega) * x, regionNumber):
d += 1
return d / M
# multivariate antithetic variates estimate
# M - number of runs
def antithetic(M, d, mu, sigma, rho, region, regionNumber, nu, omega):
zeros = np.zeros(d)
identityMatrix = np.identity(d)
mu = np.array(mu)
gamma = np.linalg.cholesky(sigma(d, rho))
z = np.random.multivariate_normal(zeros, identityMatrix, M) # generuojam atsitiktinius dydzius
xPositive = [mu + np.squeeze(np.array(np.dot(gamma, elem))) for elem in z]
xNegative = [mu - np.squeeze(np.array(np.dot(gamma, elem))) for elem in z]
dPositive = 0
dNegative = 0
for x in xPositive:
if region((math.sqrt(nu) / omega) * x, regionNumber):
dPositive += 1
for x in xNegative:
if region((math.sqrt(nu) / omega) * x, regionNumber):
dNegative += 1
return (dPositive + dNegative) / (2 * M)
# functions for pV and pVantithetic functions
# T orthogonal matrix
# d - dimension
# m - montecarlo itterations number
def orthoT(d):
a = np.random.rand(d, d) # for run in range(0, m)]
b = scipy.linalg.qr(a)[0] # for matrix in a] # gram schmidt
return np.matrix(b)
# central symmetric subset V
# d - dimension
# n - |V|
def unitV(d):
file = 'C:/Users/Adomas/Dropbox/Bakalaurinis/vektoriai/vector' + str(d) + '.csv'
vectors = pd.read_csv(file, sep=" ", header=None)
vectors = vectors.as_matrix()
return vectors
# random chi
# d - dimension
# n - |V|
def radius(d, n):
rv = chi.rvs(d, 0, 1, n) #for x in range(0,n)] #for y in range(0,m)
return rv
# pV estimate
def pV(M, d, mu, sigma, rho, region, regionNumber, nu, omega):
k = []
v = unitV(d)
gamma = np.linalg.cholesky(sigma(d, rho))
for i in range(0, M):
T = orthoT(d)
r = radius(d, v.shape[0])
z = []
[z.append(np.squeeze(np.array(r[j] * np.dot(T, v[j])))) for j in range(v.shape[0])]
x = [mu + np.squeeze(np.array((np.dot(gamma, elem)))) for elem in z]
win = 0
for l in range(0, v.shape[0]):
if region((math.sqrt(nu) / omega) * x[l], regionNumber):
win += 1
k.append(win)
return sum(k) / (M * v.shape[0])
# pV w/ antithetic variates
def pVantithetic(M, d, mu, sigma, rho, region, regionNumber, nu, omega):
k = []
for i in range(0, M):
T = orthoT(d)
v = unitV(d)
r = radius(d, v.shape[0])
zPositive = []
zNegative = []
for j in range(0, v.shape[0]):
c = np.squeeze(np.array(r[j] * np.dot(T, v[j])))
zPositive.append(c)
zNegative.append(-c)
gamma = np.linalg.cholesky(sigma(d, rho))
xPositive = [mu + np.squeeze(np.array((np.dot(gamma, elem)))) for elem in zPositive]
xNegative = [mu + np.squeeze(np.array((np.dot(gamma, elem)))) for elem in zNegative]
winPositive = 0
winNegative = 0
for l in range(0, v.shape[0]):
if region((math.sqrt(nu) / omega) * xPositive[l], regionNumber):
winPositive += 1
if region((math.sqrt(nu) / omega) * xNegative[l], regionNumber):
winNegative += 1
k.append(winPositive)
k.append(winNegative)
return sum(k) / (2 * M * v.shape[0])
def pVantitheticNew(M, d, mu, sigma, rho, region, regionNumber, nu, omega):
k = []
vectors = unitV(d)
v = []
gamma = np.linalg.cholesky(sigma(d, rho))
for vector in vectors:
for x in vector:
if x > 0:
v.append(vector)
break
elif x < 0:
break
for i in range(0, M):
T = orthoT(d)
r = radius(d, len(v))
zPositive = []
zNegative = []
for j in range(0, len(v)):
c = np.squeeze(np.array(r[j] * np.dot(T, v[j])))
zPositive.append(c)
zNegative.append(-c)
xPositive = [mu + np.squeeze(np.array((np.dot(gamma, elem)))) for elem in zPositive]
xNegative = [mu + np.squeeze(np.array((np.dot(gamma, elem)))) for elem in zNegative]
winPositive = 0
winNegative = 0
for l in range(0, len(v)):
if region((math.sqrt(nu) / omega) * xPositive[l], regionNumber):
winPositive += 1
if region((math.sqrt(nu) / omega) * xNegative[l], regionNumber):
winNegative += 1
k.append(winPositive)
k.append(winNegative)
return sum(k) / (2 * M * len(v))
def pStar(M, d, mu, sigma, rho, region, regionNumber, nu, omega):
gamma = sigma(d, rho)
if region == elipsoid:
win = []
for i in range(M):
T = orthoT(d)
v = unitV(d)
inRegion = [2*(math.sqrt(nu)/omega)*np.dot(T, vector).item(0) for vector in v if np.dot(T, vector).item(0) >= 0]
win.append(sum([chi.cdf(value, d) for value in inRegion]))
else:
win = [0]
for i in range(M):
T = orthoT(d)
v = unitV(d)
Tv = [np.dot(T, vector) for vector in v]
for vector in Tv:
if (np.array(np.dot(vector, gamma) * (math.sqrt(nu) / omega)) <= 0).all():
win.append(1)
return sum(win)/(M*len(v))
" COV MATRICES "
# in each, insert n & rho, receive sigma
# n - dimension, rho - value
# one factor
def oneFactor(n, rho):
matrix = np.empty((n, n))
for i in range(0, n):
for j in range(0, n):
if i == j:
matrix[i, j] = 1
else:
matrix[i, j] = rho
return (matrix)
# AR
def covAR(n, rho):
matrix = np.empty((n, n))
for i in range(0, n):
for j in range(0, n):
matrix[i, j] = rho ** math.fabs(i - j)
return (matrix)
# identity
def identity(n, rho):
return np.matlib.identity(n)
"Regions"
# in each insert x and set number of area
# Elipsoid
def elipsoid(x, regionNumber):
if regionNumber == 1:
b = np.array([1])
b = np.append(b, np.repeat(0, len(x) - 1))
elif regionNumber == 2:
b = np.array([0.5])
b = np.append(b, np.repeat(0, len(x) - 1))
else:
b = np.repeat(1, len(x))
y = x - b
np.dot(y, y) <= 1
return np.dot(y, y) <= 1
# Orthant
def orthant(x, regionNumber):
if regionNumber == 1:
return all(np.array(x) <= 0)
elif regionNumber == 2:
return all(np.array(x) <= 1)
else:
return all(np.array(x) <= -1)
# Rectangular
def rectangular(x, regionNumber):
if regionNumber == 1:
return all(-1 < np.array(x)) & all(np.array(x) < 1)
elif regionNumber == 2:
return all(0 < np.array(x)) & all(np.array(x) < 2)
else:
return all(0.5 < np.array(x)) & all(np.array(x) < 1.5)
" GOING FOR A STUDENT "
# the last function to integrate for student
def approxFunction(nu, omega):
return (omega ** (nu - 1)) * (math.exp(-((omega ** 2) / 2)))
# function inside integral
def innerFunction(omega, nu):
return (omega ** (nu - 1))*(math.exp(-((omega ** 2)/2)))
# give nu and criteria how much you allow to be near zero and lowerbound will be given
def determineLowerBound(nu, criteria):
testPoints = np.arange(0.0, 5.0, 0.1)
testResults = []
[testResults.append(innerFunction(testPoints[i], nu)) for i in range(len(testPoints))]
return testPoints[np.max(np.where(np.array(testResults) < criteria))]
# give nu and lower from determinelowerbound and criteria, upperbound will be given
def determineUpperBound(nu, lower, criteria):
testPoints = np.arange(lower, 20.0, 0.1)
testResults = []
[testResults.append(quad(innerFunction, testPoints[i], np.inf, args=nu)[0]) for i in range(len(testPoints))]
return testPoints[np.min(np.where(np.array(testResults) < criteria))]
# give evything, optimal step will be given
def determineStep(nu, lower, upper, initStep, criteria):
last = 0
while True:
testPoints = np.arange(lower, upper, initStep)
testResults = []
[testResults.append(innerFunction(testPoints[i], nu)) for i in range(len(testPoints))]
if math.fabs(integrate.simps(testResults, testPoints) - last) < criteria:
break
else:
last = integrate.simps(testResults, testPoints)
initStep -= 0.05
return initStep
# calculates students prob with selected estimate and region
# upperOmegaBound - upper bound until which integration is approximated
# nOmegas - number of integration approximation points
# nu - degrees of freedom
# M - number of inside runs
# estimate - function of estimate
# d - dimension of orthogonal matrix and v vectors length
# n - |V| number of unit vectors
# mu - vector of means
# sigma - cov matrix
# region - function of region
# regionNumber - region number
def studentProbOld(upperOmegaBound, nOmegas, nu, M, estimate, d, mu, sigma, rho, region, regionNumber):
omegaX = np.linspace(0.00001, upperOmegaBound, nOmegas) # points on X axis for integration approximation
step = upperOmegaBound / nOmegas # increase of each step
normalProbabilities = []
[normalProbabilities.append(estimate(M, d, mu, sigma, rho, region, regionNumber, nu, omegaX[i])) for i in
range(0, len(omegaX))]
approxBlocks = []
[approxBlocks.append(step * normalProbabilities[i] * approxFunction(nu, omegaX[i])) for i in range(0, len(omegaX))]
approx = sum(approxBlocks)
multiplier = (2 ** (1 - (nu / 2))) / (math.gamma(nu / 2))
return multiplier * approx
# new version
def studentProb(lower, upper, step, nu, M, estimate, d, mu, sigma, rho, region, regionNumber):
omega = np.arange(lower, upper, step) # points on X axis for integration approximation
normalProbabilities = []
[normalProbabilities.append(estimate(M, d, mu, sigma, rho, region, regionNumber, nu, omega[i])) for i in range(0, len(omega))]
y = []
[y.append(normalProbabilities[i]*innerFunction(omega[i], nu)) for i in range(len(omega))]
approx = integrate.simps(y, omega)
multiplier = (2 ** (1 - (nu / 2))) / (math.gamma(nu / 2))
return multiplier * approx
|
import timeit
test_hasattr = """
if hasattr(gizmo, 'gadget'):
feature = gizmo.gadget
else:
feature = None
"""
test_getattr = """
feature = getattr(gizmo, 'gadget', None)
"""
test_tryget = """
try:
feature = getattr(gizmo, 'gadget')
except AttributeError:
feature = None
"""
class Gizmo:
def __init__(self):
self.gadget = True
gizmo = Gizmo()
test_keys = 'hasattr', 'getattr', 'tryget'
def test():
for test_key in test_keys:
test_name = 'test_' + test_key
test = globals()[test_name]
setup = 'from __main__ import gizmo'
t_present = min(timeit.repeat(test, setup=setup))
del gizmo.gadget
t_absent = min(timeit.repeat(test, setup=setup))
gizmo.gadget = True
print('{:7} {:.3f} {:.3f}'.format(test_key, t_present, t_absent))
if __name__ == '__main__':
test()
|
import requests
import json
import io
from PIL import Image
import base64
subscription_key = ""
#subscription_key = ""
#################################################
#face-detect 1
#################################################
face_api_url = "https://westcentralus.api.cognitive.microsoft.com/face/v1.0/detect"
headers = {
'Ocp-Apim-Subscription-Key': subscription_key,
'Content-Type': 'application/octet-stream'
}
parameters = {
'returnFaceId':'true',
'returnFaceLandmarks':'false'
}
image_data = open('bean.jpg','rb').read()
response = requests.post(face_api_url, headers=headers, params=parameters, data=image_data)
#print(response.json())
print(response.json()[0]['faceId'])
id1 = response.json()[0]['faceId']
#################################################
#face-detect 2
#################################################
image_data = open('bean1.jpg','rb').read()
response = requests.post(face_api_url, headers=headers, params=parameters, data=image_data)
id2 = response.json()[0]['faceId']
print(id2)
#################################################
#face-verify 1&2
#################################################
face_api_url_verify = "https://westcentralus.api.cognitive.microsoft.com/face/v1.0/verify"
body = {
'faceId1': id1,
'faceid2': id2
}
parameters = {
}
headers = {
'Ocp-Apim-Subscription-Key': subscription_key,
'Content-Type': 'application/json'
}
response = requests.post(face_api_url_verify, headers=headers, params=parameters, json=body)
print(response.json())
#################################################
#PersonGroups Create
#################################################
headers = {
'Ocp-Apim-Subscription-Key': subscription_key,
'Content-Type': 'application/json'
}
body = {
'name':'group1',
'userdata':'Whitelisted users',
}
face_api_person_group_url = "https://westcentralus.api.cognitive.microsoft.com/face/v1.0/persongroups/pone"
#response = requests.put(face_api_person_group_url, headers=headers, json=body)
#print(response.json())
#################################################
#PersonGroups Person Create
#################################################
face_api_person_group_create_url = "https://westcentralus.api.cognitive.microsoft.com/face/v1.0/persongroups/pone/persons"
headers = {
'Ocp-Apim-Subscription-Key': subscription_key,
'Content-Type': 'application/json'
}
body = {
'name':'Person1',
'userData':'user provided data',
}
parameters = {'personGroupId':'pone'}
response = requests.post(face_api_person_group_create_url, headers=headers,params=parameters, json=body)
print(response.json())
pid = response.json()['personId']
#################################################
#PersonGroups Train
#################################################
face_api_url_person_group_train= "https://westcentralus.api.cognitive.microsoft.com/face/v1.0/persongroups/pone/train"
headers = {'Ocp-Apim-Subscription-Key': subscription_key}
parameters={'personGroupId':'pone'}
body = {}
response = requests.post(face_api_url_person_group_train, headers=headers)
print(response)
#################################################
#PersonGroups Person AddFace
#################################################
parameters = {
'personId': pid,
}
headers = {
'Ocp-Apim-Subscription-Key': subscription_key,
'Content-Type': 'application/octet-stream'
}
image_data = open('bean.jpg','rb').read()
#image_data = str(image_data)
b_data_string = base64.b64encode(image_data)
body = "{b_data_string}"
#body = str({[image_data]})
#body = str(body)
face_api_person_group_create_url = "https://westcentralus.api.cognitive.microsoft.com/face/v1.0/persongroups/pone/persons/"
response = requests.post(face_api_person_group_create_url, headers=headers,params=parameters,data=body)
print(response.json())
|
import string
def punc_func(exclude):
punc = r''
for char in string.punctuation:
if char not in exclude:
punc = punc + r'\%s' % char
return punc
digits = string.digits
letters = string.letters
literal_punc = punc_func("'")
dbl_quoted_punc = punc_func("\"")
strongem_punc = punc_func('*')
under_punc = punc_func('_<>')
phrase_delimiters = r'\s\.\,\?\/\!\&\(\)'
|
import io
import logging
import os
import time
from contextlib import redirect_stderr
from unittest.mock import patch
import pytest
import ray
from ray import tune
from ray.data.preprocessor import Preprocessor
from ray.train.data_parallel_trainer import DataParallelTrainer
from ray.train.gbdt_trainer import GBDTTrainer
from ray.train.trainer import BaseTrainer
from ray.util.placement_group import get_current_placement_group
logger = logging.getLogger(__name__)
@pytest.fixture
def ray_start_4_cpus():
address_info = ray.init(num_cpus=4)
yield address_info
# The code after the yield will run as teardown code.
ray.shutdown()
class DummyPreprocessor(Preprocessor):
def __init__(self):
self.fit_counter = 0
def fit(self, ds):
self.fit_counter += 1
def transform(self, ds):
return ds.map(lambda x: x + 1)
class DummyTrainer(BaseTrainer):
_scaling_config_allowed_keys = [
"trainer_resources",
"num_workers",
"use_gpu",
"resources_per_worker",
"placement_strategy",
]
def __init__(self, train_loop, custom_arg=None, **kwargs):
self.custom_arg = custom_arg
self.train_loop = train_loop
super().__init__(**kwargs)
def training_loop(self) -> None:
self.train_loop(self)
class DummyGBDTTrainer(GBDTTrainer):
_dmatrix_cls: type = None
_ray_params_cls: type = None
_tune_callback_cls: type = None
_init_model_arg_name: str = None
def test_trainer_fit(ray_start_4_cpus):
def training_loop(self):
tune.report(my_metric=1)
trainer = DummyTrainer(train_loop=training_loop)
result = trainer.fit()
assert result.metrics["my_metric"] == 1
def test_preprocess_datasets(ray_start_4_cpus):
def training_loop(self):
assert self.datasets["my_dataset"].take() == [2, 3, 4]
datasets = {"my_dataset": ray.data.from_items([1, 2, 3])}
trainer = DummyTrainer(
training_loop, datasets=datasets, preprocessor=DummyPreprocessor()
)
trainer.fit()
def test_resources(ray_start_4_cpus):
def check_cpus(self):
assert ray.available_resources()["CPU"] == 2
assert ray.available_resources()["CPU"] == 4
trainer = DummyTrainer(check_cpus, scaling_config={"trainer_resources": {"CPU": 2}})
trainer.fit()
@pytest.mark.parametrize("gen_dataset", [True, False])
def test_preprocess_fit_on_train(ray_start_4_cpus, gen_dataset):
def training_loop(self):
# Fit was only called once.
assert self.preprocessor.fit_counter == 1
# Datasets should all be transformed.
assert self.datasets["train"].take() == [2, 3, 4]
assert self.datasets["my_dataset"].take() == [2, 3, 4]
if gen_dataset:
datasets = {
"train": lambda: ray.data.from_items([1, 2, 3]),
"my_dataset": lambda: ray.data.from_items([1, 2, 3]),
}
else:
datasets = {
"train": ray.data.from_items([1, 2, 3]),
"my_dataset": ray.data.from_items([1, 2, 3]),
}
trainer = DummyTrainer(
training_loop, datasets=datasets, preprocessor=DummyPreprocessor()
)
trainer.fit()
def test_preprocessor_already_fitted(ray_start_4_cpus):
def training_loop(self):
# Make sure fit is not called if preprocessor is already fit.
assert self.preprocessor.fit_counter == 1
# Datasets should all be transformed.
assert self.datasets["train"].take() == [2, 3, 4]
assert self.datasets["my_dataset"].take() == [2, 3, 4]
datasets = {
"train": ray.data.from_items([1, 2, 3]),
"my_dataset": ray.data.from_items([1, 2, 3]),
}
preprocessor = DummyPreprocessor()
preprocessor.fit(ray.data.from_items([1]))
trainer = DummyTrainer(
training_loop, datasets=datasets, preprocessor=DummyPreprocessor()
)
trainer.fit()
def test_arg_override(ray_start_4_cpus):
def check_override(self):
assert self.scaling_config["num_workers"] == 1
# Should do deep update.
assert not self.custom_arg["outer"]["inner"]
assert self.custom_arg["outer"]["fixed"] == 1
# Should merge with base config.
assert self.preprocessor.original
pg = get_current_placement_group()
assert len(pg.bundle_specs) == 2 # 1 trainer, 1 worker
preprocessor = DummyPreprocessor()
preprocessor.original = True
scale_config = {"num_workers": 4}
trainer = DummyTrainer(
check_override,
custom_arg={"outer": {"inner": True, "fixed": 1}},
preprocessor=preprocessor,
scaling_config=scale_config,
)
new_config = {
"custom_arg": {"outer": {"inner": False}},
"scaling_config": {"num_workers": 1},
}
tune.run(trainer.as_trainable(), config=new_config)
def test_setup(ray_start_4_cpus):
def check_setup(self):
assert self._has_setup
class DummyTrainerWithSetup(DummyTrainer):
def setup(self):
self._has_setup = True
trainer = DummyTrainerWithSetup(check_setup)
trainer.fit()
def test_fail(ray_start_4_cpus):
def fail(self):
raise ValueError
trainer = DummyTrainer(fail)
with pytest.raises(ValueError):
trainer.fit()
@patch.dict(os.environ, {"RAY_LOG_TO_STDERR": "1"})
def _is_trainable_name_overriden(trainer: BaseTrainer):
trainable = trainer.as_trainable()
output = io.StringIO()
def say(self):
logger.warning("say")
trainable.say = say
with redirect_stderr(output):
remote_trainable = ray.remote(trainable)
remote_actor = remote_trainable.remote()
ray.get(remote_actor.say.remote())
time.sleep(1) # make sure logging gets caught
output = output.getvalue()
print(output)
assert trainable().__repr__() in output
def test_trainable_name_is_overriden_data_parallel_trainer(ray_start_4_cpus):
trainer = DataParallelTrainer(lambda x: x, scaling_config=dict(num_workers=1))
_is_trainable_name_overriden(trainer)
def test_trainable_name_is_overriden_gbdt_trainer(ray_start_4_cpus):
trainer = DummyGBDTTrainer(
params={},
label_column="__values__",
datasets={"train": ray.data.from_items([1, 2, 3])},
scaling_config=dict(num_workers=1),
)
_is_trainable_name_overriden(trainer)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", "-x", __file__]))
|
import sunck
"""
__name__属性:
模块就是一个可执行的.py文件,一个模块被另一个程序引入,我不让想让模块中的某些代码执行,
可以用__name__属性来是程序仅调用模块中的一部分。
"""
sunck.say_good()
|
#!/usr/bin/python
# Start pensando agent and hostsim on all nodes
import time
import sys
import os
import argparse
import paramiko
import threading
import json
import http
import exceptions
import traceback
# Port numbers - keep in sync with venice/globals/constants.go
APIGwRESTPort = "443"
CMDClusterMgmtPort = "9002"
CMDResolverPort = "9009"
CMDAuthCertAPIPort = "9009"
# Utility function to run ssh
def ssh_exec_thread(ssh_object, command):
print "run: " + command
stdin, stdout, stderr = ssh_object.exec_command(command)
out = stdout.readlines()
print out
print "Program exited: " + command
exitCode = stdout.channel.recv_exit_status()
if exitCode != 0:
print "Exit code: " + str(exitCode)
# This class represents a vagrant node
class Node:
def __init__(self, addr, username='vagrant', password='vagrant', gobin='/import/bin'):
self.addr = addr
self.username = username
self.password = password
self.gobin = gobin
self.ssh = self.sshConnect(username, password)
out, err, ec = self.runCmd("hostname")
self.hostname = out[0].split('\n')[0]
print "Connected to " + self.hostname
# Connect to vagrant node
def sshConnect(self, username, password):
ssh_object = paramiko.SSHClient()
ssh_object.set_missing_host_key_policy( paramiko.AutoAddPolicy() )
print "Connecting to " + self.addr + " with userid: " + username + " password: " + password
try:
ssh_object.connect(self.addr, username=username, password=password)
return ssh_object
except paramiko.ssh_exception.AuthenticationException:
tutils.exit("Authentication failed")
def isConnected(self):
transport = self.ssh.get_transport() if self.ssh else None
return transport and transport.is_active()
# Run a command on vagrant node
def runCmd(self, cmd, timeout=None):
try:
print "run: " + cmd
# We we disconnected for any reason, reconnect
if not self.isConnected():
self.ssh = self.sshConnect(self.username, self.password)
# Execute the command
stdin, stdout, stderr = self.ssh.exec_command(cmd, timeout=timeout)
out = stdout.readlines()
err = stderr.readlines()
exitCode = stdout.channel.recv_exit_status()
if out != [] or exitCode != 0:
print "stdout(" + str(exitCode) + "):" + ''.join(out)
if err != []:
print "stderr: " + ''.join(err)
return out, err, exitCode
except exceptions.EOFError:
print "Ignoring EOF errors executing command"
return [], [], 0
# Start K8s agent process on vagrant node
def startK8sAgent(self, args=""):
ssh_object = self.sshConnect(self.username, self.password)
command = "sudo " + self.gobin + "/k8sagent " + args + "> /tmp/pensando-k8sagent.log 2>&1"
self.npThread = threading.Thread(target=ssh_exec_thread, args=(ssh_object, command))
# npThread.setDaemon(True)
self.npThread.start()
# Start Naples netagent process on the node
def startN4sAgent(self, npm, resolvers, hostif, uplink):
ssh_object = self.sshConnect(self.username, self.password)
command = "sudo " + self.gobin + "/n4sagent -npm " + npm + " -resolver-urls " + resolvers + " -hostif " + hostif + " -uplink " + uplink + " > /tmp/pensando-n4sagent.log 2>&1"
self.npThread = threading.Thread(target=ssh_exec_thread, args=(ssh_object, command))
# npThread.setDaemon(True)
self.npThread.start()
# Start NMD process on the node
def startNMD(self, cmdreg, cmdcerts, resolvers, hostif, uplink):
ssh_object = self.sshConnect(self.username, self.password)
command = "sudo " + self.gobin + "/nmd -cmdregistration " + cmdreg + " -cmdcerts " + cmdcerts + \
" -mode managed -hostif " + hostif + " > /tmp/pensando-nmd.log 2>&1"
self.npThread = threading.Thread(target=ssh_exec_thread, args=(ssh_object, command))
# npThread.setDaemon(True)
self.npThread.start()
# Start hostsim process on the node
def startHostsim(self, simif, simbin):
ssh_object = self.sshConnect(self.username, self.password)
command = "sudo -E " + simbin + "/hostsim -uplink " + simif + " > /tmp/pensando-hostsim.log 2>&1"
self.npThread = threading.Thread(target=ssh_exec_thread, args=(ssh_object, command))
# npThread.setDaemon(True)
self.npThread.start()
# Start vcsim
def startVcsim(self, hostsims, snics):
ssh_object = self.sshConnect(self.username, self.password)
command = self.gobin + "/vcsim -hostsim-urls " + hostsims + " -snic-list " + snics + " > /tmp/pensando-vcsim.log 2>&1"
self.npThread = threading.Thread(target=ssh_exec_thread, args=(ssh_object, command))
# npThread.setDaemon(True)
self.npThread.start()
# self.runCmd("docker run -d --net=host --name vcSim pen-vcsim -hostsim-urls " + hostsims + " -snic-list " + snics)
# Create the network by posting a message to apigw
def createNetwork(nodeAddr, name, subnet, gw, vlanId):
postUrl = 'http://' + nodeAddr + ':' + APIGwRESTPort + '/configs/network/v1/tenant/default/networks'
print "Posting to URL: " + postUrl
# network json parameters
jdata = json.dumps({
"kind": "Network",
"meta": {
"tenant": "default",
"name": name
},
"spec": {
"ipv4-subnet": subnet,
"ipv4-gateway": gw,
"vlan-id": vlanId
}
})
# Post the data
response = http.httpPost(postUrl, jdata)
print "Network create response is: " + response
# Parse command line args
# Create the parser and sub parser
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version='1.0.0')
parser.add_argument("-nodes", default='', help="list of nodes(comma separated)")
parser.add_argument("-npm", default='pen-npm', help="NPM URL")
parser.add_argument("-cmdregistration", default='pen-master:' + CMDClusterMgmtPort, help="CMD Cluster Mgmt URL")
parser.add_argument("-cmdcerts", default='pen-master:' + CMDAuthCertAPIPort, help="CMD Authenticated Certificates API URL")
parser.add_argument("-resolvers", default='pen-master:' + CMDResolverPort, help="Resolver URLs")
parser.add_argument("-simnodes", default='', help="list of nodes(comma separated)")
parser.add_argument("-user", default='vagrant', help="User id for ssh")
parser.add_argument("-password", default='vagrant', help="password for ssh")
parser.add_argument("-gobin", default='/import/bin', help="$GOPATH/bin directory path")
parser.add_argument("-simbin", default='/import/bin', help="host sim & vcsim binary path")
parser.add_argument("-k8s", dest='k8s', action='store_true')
parser.add_argument("-stop", dest='stop', action='store_true')
parser.add_argument("-hostif", default='ntrunk0', help="Host facing interface")
parser.add_argument("-uplink", default='eth2', help="Naples uplink")
parser.add_argument("-simif", default='strunk0', help="Hostsim uplink")
# Parse the args
args = parser.parse_args()
addrList = args.nodes.split(",")
simAddrList = args.simnodes.split(",")
if args.nodes == '':
addrList = os.environ["PENS_NODES"].split(",")
if args.simnodes == '':
simAddrList = addrList
# Basic error checking
if len(addrList) < 1:
print "Empty address list"
sys.exit(1)
nodes = []
simNodes = []
try:
# Connect to nodes
for addr in addrList:
node = Node(addr, args.user, args.password, args.gobin)
nodes.append(node)
# cleanup any old agent instances still running
node.runCmd("sudo pkill n4sagent")
node.runCmd("sudo pkill k8sagent")
node.runCmd("sudo pkill nmd")
node.runCmd("sudo pkill hostsim")
node.runCmd("sudo pkill vcsim")
node.runCmd("/usr/sbin/ifconfig -a | grep -e vport | awk '{print $1}' | xargs -r -n1 -I{} sudo ip link delete {} type veth")
node.runCmd("sudo ip link delete strunk0 type veth peer name ntrunk0")
node.runCmd("sudo ovs-vsctl del-br SimBridge")
# Copy conf and binary files for CNI plugin
node.runCmd("sudo cp " + node.gobin + "../src/github.com/pensando/sw/agent/plugins/k8s/cni/pensandonet/01-pensando.conf /etc/cni/net.d/")
node.runCmd("sudo cp " + node.gobin + "/pensandonet /opt/cni/bin/")
# create directory for .sock files and remove any stale .sock files
node.runCmd("sudo mkdir -p /run/pensando/")
node.runCmd("sudo rm /run/pensando/pensando-cni.sock")
# Connect to sim nodes
for addr in simAddrList:
snode = Node(addr, args.user, args.password, args.gobin)
simNodes.append(snode)
# cleanup any old agent instances still running
snode.runCmd("sudo pkill n4sagent")
snode.runCmd("sudo pkill k8sagent")
snode.runCmd("sudo pkill nmd")
snode.runCmd("sudo pkill hostsim")
snode.runCmd("sudo docker ps -a | grep alpine | awk '{print $1}' | xargs -r -n1 -I{} echo sudo docker rm -f {}")
snode.runCmd("sudo ip link delete strunk0 type veth peer name ntrunk0")
snode.runCmd("sudo ovs-vsctl del-br SimBridge")
# Stop vcsim
try:
vcsim = Node("pen-master", args.user, args.password, args.simbin)
vcsim.runCmd("sudo pkill vcsim")
except:
pass
# When -stop was passed, we are done
if args.stop:
os._exit(0)
# Start pensando agent
for idx, node in enumerate(nodes):
# creat the veth pair for ovs
node.runCmd("sudo ip link add strunk0 type veth peer name ntrunk0")
node.runCmd("sudo ifconfig ntrunk0 hw ether 02:02:02:02:02:" + '{:02x}'.format(idx+1) + " promisc up")
node.runCmd("sudo ifconfig strunk0 hw ether 02:02:02:02:02:" + '{:02x}'.format(idx+1) + " promisc up")
node.runCmd("sudo ifconfig " + args.hostif + " promisc up")
node.runCmd("sudo ifconfig " + args.uplink + " promisc up")
# start the agent
if args.k8s:
node.startK8sAgent()
else:
node.startN4sAgent(args.npm, args.resolvers, args.hostif, args.uplink)
node.startNMD(args.cmdregistration, args.cmdcerts, args.resolvers, args.hostif, args.uplink)
print "################### Started Pensando Agents #####################"
for idx, snode in enumerate(simNodes):
snode.runCmd("sudo ifconfig " + args.simif + " promisc up")
snode.runCmd("sudo docker pull alpine")
# start hostsim
snode.startHostsim(args.simif, args.simbin)
# gather ism addresses
hsims = []
snics = []
for idx, addr in enumerate(simAddrList):
hsims.append("http://" + addr + ":5050")
snics.append("02:02:02:02:02:" + '{:02x}'.format(idx+1))
# start vcsim
vcsim = Node("pen-master", args.user, args.password, args.simbin)
vcsim.startVcsim(",".join(hsims), ",".join(snics))
time.sleep(25)
print "################### Started Simulation agents #####################"
# Create a network
createNetwork("localhost", "default", "10.1.0.0/16", "10.1.254.254", 2)
print "################### Created default network #####################"
except Exception, err:
print "ERROR while running the script: "
print err
traceback.print_exc()
os._exit(1)
time.sleep(1)
os._exit(0)
|
import numpy as np
from RandomWalk import RandomWalk
import math
import matplotlib.pyplot as plt
from datetime import datetime
def run_exp3():
# params is lamda and best alpha pair
params = []
for i in range(11):
if i < 5:
params.append([i*0.1, 0.2])
elif i < 8:
params.append([i*0.1, 0.15])
elif i < 10:
params.append([i*0.1, 0.10])
else:
params.append([i*0.1, 0.05])
rsmes = []
training_size = 100
sequence_size = 10
random_walk = RandomWalk(training_size, sequence_size)
random_walk.generate_data()
filename = 'generate_fig5_' +\
datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + '.txt'
f = open('results/' + filename, 'w', )
f.write('lambda\tRSME\talpha\tSE\n')
for i in params:
weights, rsme, se = get_predictions(i[0], random_walk, training_size, sequence_size, i[1])
rsmes.append(rsme)
f.write('{}\t{}\t{}\t{}\n'.format(i[0], rsme, i[1], se))
print 'at alpha = ', i[1], 'at lambda = ', i[0], ' --> rsme = ', rsme, ' | weights = ', weights
f.close()
# plot different lambdas
lams = [i[0] for i in params]
plt.plot(lams, rsmes)
plt.xlabel('Lambda')
plt.ylabel('Error (RSME)')
plt.title('Random Walk - Reproducing Figure 5')
plt.grid(True)
plt.show()
def get_predictions(lam, random_walk, training_size, sequence_size, alpha):
# perform experiment on random walk to replicate figure 4 results
rsme_list = []
for i in range(training_size):
weights = [0.5, 0.5, 0.5, 0.5, 0.5]
observations = random_walk.observations[i]
for j in range(sequence_size):
obs = observations[j]
delta_w = 0
for t in range(1, len(obs) + 1):
ind = t - 1
p_t = np.dot(weights, obs[ind])
p_tn1 = get_p_tn1(obs, ind, weights)
discount_delta_w = 0
for k in range(1, t + 1):
temp = np.multiply(lam ** (t - k), obs[k - 1])
discount_delta_w = np.add(temp, discount_delta_w)
dw = np.multiply(alpha * (p_tn1 - p_t), discount_delta_w)
delta_w += dw
weights += delta_w
# print i, ' | iter: ', iter, ' --> weights: ', weights, 'delta: ', delta_w
# iter += 1
err = compute_error(weights)
rsme_list.append(err)
stdev = np.std(rsme_list, ddof=1)
se = stdev / math.sqrt(len(rsme_list))
return weights, np.mean(rsme_list), se
def compute_error(weights):
expected = np.array([1.0/6, 1.0/3, 1.0/2, 2.0/3, 5.0/6])
rsme = math.sqrt(np.mean(np.power(np.subtract(weights, expected), 2)))
return rsme
def get_p_tn1(obs, ind, weights):
if ind == len(obs) - 1:
# at last observation of sequence
if obs[ind] == [0,0,0,0,1]:
return 1
elif obs[ind] == [1,0,0,0,0]:
return 0
else:
# not last observation of sequence
return np.dot(weights, obs[ind + 1])
if __name__ == '__main__':
run_exp3() |
import os
N=[120,300,600,1200,2400,4800,9600,19200]
R=[2,3]
K=4
os.system("rm result/*.csv")
for r in R:
os.system("echo N,K,R,tmap,tshuffle,treduce,texecution > result/"+str(r)+"_coded.csv")
os.system("echo N,K,R,tmap,tshuffle,treduce,texecution > result/uncoded.csv")
for n in N:
os.system("rm *.txt")
print("node",n)
print("Generating Graph")
os.system("python3 GraphPartitionCreator.py 4 " + str(n))
os.system("./cp.sh")
os.system("mpirun -np 5 -machinefile machinefile -mca btl_tcp_if_include tun0 -mca btl_base_warn_component_unused 0 python3 uncodedPageRank.py 4 " + str(n) + " >> result/uncoded.csv")
for r in R:
os.system("echo python3 GraphPartitionCreatorCoded.py 4 " + str(n) + " " + str(r))
os.system("python3 GraphPartitionCreatorCoded.py 4 " + str(n) + " " + str(r))
os.system("echo ./cp.sh")
os.system("./cp.sh")
os.system("clear")
os.system("echo mpirun -np 5 -machinefile machinefile -mca btl_tcp_if_include tun0 -mca btl_base_warn_component_unused 0 python3 codedPageRank.py 4 " + str(n) + " " + str(r))
os.system("mpirun -np 5 -machinefile machinefile -mca btl_tcp_if_include tun0 -mca btl_base_warn_component_unused 0 python3 codedPageRank.py 4 " + str(n) + " " + str(r) + " >> result/" +str(r) + "_" + "coded.csv")
os.system("cat result/" + str(r) + "_" + "coded.csv")
|
import os
import numpy as np
fpath = os.path.join('Resources', 'aoc201805_data.txt')
with open(fpath, 'r') as f:
data = f.read().strip()
def react(data):
stack = []
for letter in data:
if stack and letter.lower() == stack[-1].lower() and letter != stack[-1]:
stack.pop()
else:
stack.append(letter)
return stack
print(f'The answer to part 1 is {len(react(data))}')
polymer_lengths = []
alphabet = map(chr, range(ord('a'), ord('z') + 1))
for letter in alphabet:
polymer_lengths.append(len(react(data.replace(letter, '').replace(letter.upper(), ''))))
print(f'The answer to part 2 is {min(polymer_lengths)}') |
import os, sys
import numpy as np
import matplotlib.pyplot as plt
import pickle
def vel_track(x, u):
track = np.where(u > 0.50)
if len(track[0]) > 0: # field is above half max
x_l, x_h = track[0][0], track[0][-1]
if x_l < 5 or x_h > N - 5:
print("Hit boundary...")
end = True
else:
end = False
return [x[x_l], x[x_h], end]
else: # field not yet above half max
start_pos = int(x.shape[0]/2)
return [x[start_pos], x[start_pos], False]
def save_name(int_):
# Return the save label given to output png file
if int_ < 10:
return 'img-0000' + str(int_)
elif int_ < 100:
return 'img-000' + str(int_)
elif int_ < 1000:
return 'img-00' + str(int_)
elif int_ >= 1000:
return 'img-0' + str(int_)
name = input('Enter name of folder to animate: ')
path = os.getcwd() + '/' + name + '/'
with open(path + '_const.pickle', 'rb') as handle:
constants = pickle.load(handle)
files = sorted(os.listdir(path))[1:] # remove last parameters.txt file
diff_map_name, num_map_name, sea_map_name = files[0], files[1], files[2]
L = constants['L'] # box size
d = constants['d'] # spacial descretization
N = constants['N'] # resolution
dx = constants['dx']
dt = constants['dt'] # time step size
max_ = constants['max'] # max value of field
gamma = constants['gamma'] # growth constant
save_freq = constants['s_freq'] # save frame frequency
track_value = constants["track"] # trace value to find velocity
print('Growth {}'.format(gamma))
print('Max D {}'.format(d.max()))
x = np.linspace(0, L, N) # space between [0, L]
diffusion_map = np.load(os.path.join(path, diff_map_name))
num_map = np.load(os.path.join(path, num_map_name))
sea_map = np.load(os.path.join(path, sea_map_name))
ulim = num_map.max()
files = files[3:]
fig_, ax_ = plt.subplots()
times = []
minutes = 0
tex_plots = True
diff, lg, fkpp = [False, False, True]
num = len(files) - 1
latex_plt_labels = [num - 10*i for i in range(4)] # Plot end 4 time steps
latex_plt_labels = latex_plt_labels[::-1]
# GENERATE each individual frame in the animation
start = 0
for i, file in enumerate(files):
print('step : {} | {}'.format(i * save_freq, i))
u, u_lg, u_diff = np.load(path + file)
fig, ax = plt.subplots()
if diff:
ax.scatter(x, u_diff, color='blue', s=5, alpha=0.50)
ax.plot(x, u_diff, color='blue', linewidth=1, label='diffusion eq', alpha=0.50)
if lg:
ax.plot(x, u_lg, color='red', linestyle='--', label='logistic growth', alpha=0.50)
if fkpp:
ax.plot(x, u, color='black', linestyle='--', label='fkpp')
track = np.where(u > track_value)
if len(track[0]) > 0: # Plot the velocity of wave-front
x_l, x_h = track[0][0], track[0][-1]
ax.scatter([x[x_l], x[x_h]], [track_value, track_value], c='red')
if start == 0: # Triggered once
start_pos = [x[x_l], x[x_h]]
start += 1
seconds = round(i * save_freq * dt)
ax.set_ylabel('u(x)')
ax.set_xlabel('x')
time_ = 'Time: {} (s)'.format(seconds)
ax.set_title(time_)
ax.set_ylim([0, 1.5])
if i in latex_plt_labels:
times.append(time_)
plt.text(0, 1.3*max_, s='dt = {}, CFL ={}'.format(constants["dt"], constants["CFL"]))
plt.legend()
plt.savefig(os.getcwd() + '/frames_2_anim/' + save_name(i))
plt.close()
if tex_plots:
# GENERATE Latex plots
fig, ax = plt.subplots(figsize=(7.5, 5.5))
for i, ind in enumerate(latex_plt_labels):
print('i = ', i)
file = files[ind]
u, u_lg, u_diff = np.load(path + file)
if fkpp:
ax.scatter(x, u, s=1)
ax.plot(x, u, alpha=0.65, label='t = {} '.format(times[i]))
label_ = "FTCD FKPP"
if lg:
ax.scatter(x, u_lg, color='black', alpha=0.90, s=5)
lg_label = 'Analytic Logistic Growth'
if diff:
ax.scatter(x, u_diff, color='black', alpha=0.90, s=5)
ax.plot(x, u_diff, color='black', alpha=0.50, linewidth=1.0)
diff_label = 'Analytic Diffusion'
if i == 3: # On last plot set labels
# ax.plot(x, u, label=label_, color='r', alpha=0.65)
if lg:
ax.scatter(x, u_lg, color='black', label=lg_label, alpha=0.90, s=5)
if diff:
ax.scatter(x, u_lg, color='black', label=diff_label, alpha=0.90, s=5)
plt.text(0, 1.1 * max_, s=' gamma = {}, dt ={}, '.format(constants["gamma"], constants["dt"]))
ax.set_xlabel('x')
ax.set_ylabel('u(x, t)')
ax.set_ylim(0, max_+0.25)
ax.set_title('Non-uniform (Quadratic) Directed Diffusion')
plt.legend()
print('saved')
plt.savefig(os.getcwd() + '/_tex')
plt.close()
sys.exit('Done...')
|
import torch
import numpy as np
IMG_WIDTH = 448
IMG_HEIGHT = 448
S = 7 # number of grid cell is S*S
B = 2 # number of bbox for each grid cell
C = 20 # number of classses
def read_labels(label_file):
with open(label_file, 'r') as f:
lines = f.readlines()
labels = []
for l in lines:
l = l.split()
l = [float(elem) for elem in l]
labels.append(l)
return labels
def labels2tensor(labels):
"""
Build Groundtruth tensor S*S*5.
:param labels: list of labels with bounding box classification and position for each image.
:return: T: Groundtruth tensor S*S*5.
format <x> <y> <w> <h> <class name>
"""
T = torch.zeros(S, S, 5) # init
gcell_size = 1. / S
for label in labels: # mark labels
cls = label[0]
x = label[1]
y = label[2]
w = label[3]
h = label[4]
# Be aware: row are x-axis image coordinate, in 2nd dimension of Tensor
T[int(y/gcell_size), int(x/gcell_size), 0] = x
T[int(y/gcell_size), int(x/gcell_size), 1] = y
T[int(y/gcell_size), int(x/gcell_size), 2] = w
T[int(y/gcell_size), int(x/gcell_size), 3] = h
T[int(y/gcell_size), int(x/gcell_size), 4] = cls
'''
# w,h already related to whole image, no action required
# normalize x,y to grid cell offset
x = (x - int(x/gcell_size) * gcell_size) / gcell_size
y = (y - int(y/gcell_size) * gcell_size) / gcell_size
'''
T[int(y / gcell_size), int(x / gcell_size)] = torch.tensor([x, y, w, h, cls])
return T |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import jupyxplorer.parser as parser
import pytest
from . import CONFIG_EXAMPLE
def test_load_yaml(mocker):
# Given
data = CONFIG_EXAMPLE
file_path = "/path/tests"
expected_result = data
mocker.patch.object(parser.yaml, "load", return_value=expected_result)
file_open_mock = mocker.patch("builtins.open")
file_open_mock.read_data = data
# When
result = parser.load_yaml(file_path)
file_open_mock.assert_called_once_with(file_path, 'r')
# Then
assert result == expected_result
def test_invalid_doc_error(mocker):
data = "data"
file_path = "/path/tests"
mocker.patch.object(parser.yaml, "load", return_value=data)
file_open_mock = mocker.patch("builtins.open")
file_open_mock.read_data = data
with pytest.raises(Exception, match=r'YAML SyntaxError:.*document.*'):
parser.load_yaml(file_path)
file_open_mock.assert_called_once_with(file_path, 'r')
def test_syntax_error(mocker):
data = dict(
manolo='esetioeh!'
)
file_path = "/path/tests"
expected_line = 1
expected_column = 10
expected_exception = parser.yaml.YAMLError()
expected_exception.problem_mark = mocker.MagicMock()
expected_exception.problem_mark.line = expected_line
expected_exception.problem_mark.column = expected_column
mocker.patch.object(parser.yaml, "load", side_effect=expected_exception)
file_open_mock = mocker.patch("builtins.open")
file_open_mock.read_data = data
with pytest.raises(Exception,
match=r'YAML SyntaxError.*[{}].*[{}].*'.format(expected_line+1, expected_column+1)):
parser.load_yaml(file_path)
file_open_mock.assert_called_once_with(file_path, 'r')
def test_schema_error(mocker):
data = {}
file_path = "/path/tests"
mocker.patch.object(parser.yaml, "load", return_value=data)
file_open_mock = mocker.patch("builtins.open")
file_open_mock.read_data = data
with pytest.raises(Exception, match=r'YAML SchemaError:.*'):
parser.load_yaml(file_path)
file_open_mock.assert_called_once_with(file_path, 'r')
|
from django.conf.urls import url
from . import views
urlpatterns=[
url(r'^$',
views.Lista.as_view(),
name='list'),
url(r'^(?P<pk>\d+)$',
views.Detalle.as_view(),
name='detail'),
url(r'^nuevo/$',
views.Crear.as_view(),
name='new'),
url(r'^editar/(?P<pk>\d+)$',
views.Actualiza.as_view(),
name='edit'),
url(r'^borrar/(?P<pk>\d+)$',
views.Borrale.as_view(),
name='delete'),
] |
from typing import List
PMAP = {
"AUG": "Methionine",
"UUU": "Phenylalanine",
"UUC": "Phenylalanine",
"UUA": "Leucine",
"UUG": "Leucine",
"UCU": "Serine",
"UCC": "Serine",
"UCA": "Serine",
"UCG": "Serine",
"UAU": "Tyrosine",
"UAC": "Tyrosine",
"UGU": "Cysteine",
"UGC": "Cysteine",
"UGG": "Tryptophan",
"UAA": "STOP",
"UAG": "STOP",
"UGA": "STOP",
}
def proteins(strand: str) -> List[str]:
""" Given a strand of RNA: "AUGUUUUCUUAAAUG" generate Protein names based on
three character codons:
RNA: "AUGUUUUCUUAAAUG" =>
Codons: "AUG", "UUU", "UCU", "UAA", "AUG" =>
Protein: "Methionine", "Phenylalanine", "Serine"
"""
# seperate them into 3 char codon chunks
codon_chunks = [strand[i:i + 3] for i in range(0, len(strand), 3)]
protein_names = [] # Holds final protein name results
# parse each item and compare to PMAPS until we hit a stop codon
for codon in codon_chunks:
if PMAP.get(codon) is not "STOP":
protein_names.append(PMAP.get(codon))
continue
else:
break
return protein_names
|
import argparse
import os,sys
import subprocess as sp
import shlex
import yaml
def run_bash_cmd(cmd_str):
"""
runs command string in a bash shell
"""
print("%s" %(cmd_str))
sp.call(cmd_str,shell=True,executable="/bin/bash")
#This allows for the command to output to console while running but with certain commands it causes issues (awk)
def run_shell_cmd(cmd_str):
# process = sp.Popen(cmd_lst, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE,env=cur_env)
# cmd_stdout = ""
# for c in iter(lambda: process.stdout.read(1), b""):
# sys.stdout.buffer.write(c)
# cmd_stdout += c.decode("utf-8")
# _, cmd_stderr = process.communicate()
# cmd_stderr = cmd_stderr.decode("utf-8")
# return cmd_stdout, cmd_stderr
cmd_lst = shlex.split(cmd_str)
process = sp.Popen(cmd_lst, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE,env=cur_env)
cmd_stdout = ""
for line in iter(process.stdout.readline, ""):
if(process.poll() is None):
cmd_stdout += line.decode("utf-8")
sys.stdout.buffer.write(line)
else:
break
_, cmd_stderr = process.communicate()
cmd_stderr = cmd_stderr.decode("utf-8")
return cmd_stdout, cmd_stderr
### CUSTOM FLOW TESTS ###
def run_custom_flow(in_config_fpath, mode="quick"):
## init run options ##
run_opts = {}
if mode == "quick":
run_opts["iters"] = 1
else:
run_opts["iters"] = 4
## run custom flow ##
config_name = os.path.splitext(os.path.split(in_config_fpath)[-1])[0]
log_out = os.path.join(unit_test_home,f"{config_name}.log")
print(f"Running custom flow with input config: {in_config_fpath} ...")
iters = run_opts["iters"]
coffe_cmd = f"python3 coffe.py -i {iters} {in_config_fpath} | tee {log_out}"
run_bash_cmd(coffe_cmd)
return log_out
### STDCELL FLOW TESTS ###
def run_stdcell_flow(in_config_fpath):
## init run options ##
run_opts = {}
## run stdcell flow ##
config_name = os.path.splitext(os.path.split(in_config_fpath)[-1])[0]
log_out = os.path.join(unit_test_home,f"{config_name}.log")
print(f"Running stdcell flow with input config: {in_config_fpath} ...")
# Default mode is to run in parallel
coffe_cmd = f"python3 coffe.py {in_config_fpath} -ho -p | tee {log_out}"
run_bash_cmd(coffe_cmd)
return log_out
def main():
global cur_env
global coffe_home
global unit_test_home
cur_env = os.environ.copy()
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("-c",
"--coffe_top_level",
action='store',
default='~/COFFE',
help='Top level of your COFFE installation')
parser.add_argument('-o', '--test_opts', type=str, choices=["custom", "stdcell", "all"], default="custom", help="choose test type")
args = parser.parse_args()
# Get paths needed for script
coffe_home = os.path.expanduser((args.coffe_top_level))
os.chdir(coffe_home)
unit_test_home = os.path.join(coffe_home,"unit_tests")
input_test_dir = "input_files"
custom_flow_dir = "custom_flow"
stdcell_flow_dir = "stdcell_flow"
custom_flow_inputs_path = os.path.join(unit_test_home,input_test_dir,custom_flow_dir)
stdcell_flow_inputs_path = os.path.join(unit_test_home,input_test_dir,stdcell_flow_dir)
# List of Custom Flow tests
# Custom flow is automatic transistor sizing of FPGA circuits
custom_flow_input_fpaths = [os.path.join(custom_flow_inputs_path,f) for f in os.listdir(custom_flow_inputs_path) if f.endswith(".yaml")]
# List of Standard Cell flow tests
stdcell_flow_input_fpaths = [os.path.join(stdcell_flow_inputs_path,f) for f in os.listdir(stdcell_flow_inputs_path) if f.endswith(".yaml")]
if args.test_opts == "custom":
for custom_flow_input_fpath in custom_flow_input_fpaths:
coffe_log = run_custom_flow(custom_flow_input_fpath)
break
elif args.test_opts == "stdcell":
for stdcell_flow_input_fpath in stdcell_flow_input_fpaths:
coffe_log = run_stdcell_flow(stdcell_flow_input_fpath)
elif args.test_opts == "all":
for custom_flow_input_fpath in custom_flow_input_fpaths:
coffe_log = run_custom_flow(custom_flow_input_fpath)
for stdcell_flow_input_fpath in stdcell_flow_input_fpaths:
coffe_log = run_stdcell_flow(stdcell_flow_input_fpath)
# TODO compare with archived results
if __name__ == "__main__":
main() |
__all__ = ['IpoptConfig', 'IpoptSolver', 'SnoptConfig', 'SnoptSolver', 'OptProblem', 'OptResult',
'OptSolver', 'OptConfig']
from .pyoptsolvercpp import __with_snopt__, __with_ipopt__, __version__
if __with_snopt__:
from .pyoptsolvercpp import IpoptConfig, IpoptSolver
else:
print("Cannot import IpoptWrapper")
if __with_snopt__:
from .pyoptsolvercpp import SnoptConfig, SnoptSolver
else:
print("Cannot import SnoptWrapper")
from .pyoptsolvercpp import OptProblem, OptResult
from .pyoptsolver import OptSolver, OptConfig
|
#!/usr/bin/python
# coding: utf-8
import logging
import os
import re
import sys
import util
from datetime import datetime, timedelta
from future.utils import string_types
from telegram import MessageEntity
from telegram.error import BadRequest, TelegramError
from telegram.ext import Updater, CommandHandler, RegexHandler, MessageHandler, ConversationHandler
from telegram.ext.filters import Filters
def get_your_id(bot, update):
print("get_your_id")
telegram_name = update.message.from_user.name
telegram_id = update.message.chat_id
update.message.reply_text("%s:%s" % (telegram_name, telegram_id))
def register_telegram(bot, update):
print("register_telegram")
telegram_name = update.message.from_user.name
telegram_id = update.message.from_user.id
chat_id = update.message.chat_id
update.message.reply_text("%s:%s" % (telegram_name, chat_id))
@util.just_one_instance()
def main(*args, **kargs):
updater = Updater('663483212:AAGcITtCSKLQ5ZOYHQP3tMZloOxAOttUoak')
updater.dispatcher.add_handler(CommandHandler("id", get_your_id))
updater.dispatcher.add_handler(RegexHandler('^[0-9a-zA-Z]{6}$', register_telegram))
updater.dispatcher.add_handler(RegexHandler('^0x[0-9a-zA-Z]{40}$', register_telegram))
updater.dispatcher.add_handler(RegexHandler('^/0x[0-9a-zA-Z]{40}$', register_telegram))
updater.start_polling()
logging.info('start')
updater.idle()
if __name__ == '__main__':
main(port=60033);
#https://api.telegram.org/bot663483212:AAGcITtCSKLQ5ZOYHQP3tMZloOxAOttUoak/sendMessage?chat_id=539823814&text=welcome
|
'''PROJECT EULER PROBLEMS
AUTHOR: VAL MCCULLOCH
PROBLEM 2:
Each new term in the Fibonacci sequence is generated by
adding the previous two terms. By starting with 1 and 2,
the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do
not exceed four million, find the sum of the even-valued terms.
SOLUTION: 4613732
'''
def main():
#initialize variables
fib1 = 1
fib2 = 2
#initialize sum at 2
fibSum = 2
while(True):
fib = fib1+fib2
#reestablish values
fib1 = fib2
fib2 = fib
if fib >= 4000000: #break when number exceeds 4 mil
break
#if even add to sum
elif fib%2==0:
fibSum+=fib
#print final answer
print(fibSum)
main()
|
from typing import List
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
def combinations(nums, combination):
if len(combination) == k:
result.append(list(combination))
return
for i in range(len(nums)):
combination.append(nums[i])
combinations(nums[i+1:], combination)
combination.pop()
result = list()
combinations([num+1 for num in range(n)], [])
return result
sol = Solution()
n = 5
k = 3
print(sol.combine(n, k))
|
from dataclasses import dataclass
from typing import Optional
from labby.server import ExperimentSequenceStatus, Server, ServerResponse, ServerRequest
@dataclass(frozen=True)
class ExperimentStatusResponse(ServerResponse):
sequence_status: Optional[ExperimentSequenceStatus]
@dataclass(frozen=True)
class ExperimentStatusRequest(ServerRequest[ExperimentStatusResponse]):
def handle(self, server: Server) -> ExperimentStatusResponse:
return ExperimentStatusResponse(
sequence_status=server.get_experiment_sequence_status()
)
|
"""GRID CLASS."""
import pygame
class grid():
def __init__(self, screen, height, width, blockSize):
self.height = height
self.width = width
self.blockSize = blockSize
self.row = height // blockSize
self.col = width // blockSize
self.screen = screen
self.posArray = []
self.posList = []
self.weights = {}
def cost(self, from_node, to_node):
return self.weights.get(to_node, 1)
def show(self):
for i in range(self.row):
for j in range(self.col):
pygame.draw.rect(self.screen, [
51, 51, 51], (
i * self.blockSize,
j * self.blockSize,
self.blockSize,
self.blockSize),
1)
def position(self, test):
for i in range(self.row):
rowArray = []
for j in range(self.col):
rowArray.append([i, j])
self.posArray.append(rowArray)
# return self.posArray
return (test[0] // self.blockSize, test[1] // self.blockSize)
def list(self):
for i in range(self.row):
for j in range(self.col):
self.posList.append([i, j])
return self.posList
def neighbors(self, test):
neighbors = [[0, 1], [0, -1], [1, 0], [-1, 0]]
nList = []
for i in neighbors:
x = test[0] + i[0]
y = test[1] + i[1]
if x >= 0 and x <= self.width and y >= 0 and y <= self.height:
nList.append((x, y))
return nList
|
from datetime import datetime
from unittest import TestCase
from apel.parsers import SlurmParser
class ParserSlurmTest(TestCase):
'''
Test case for SLURM parser
'''
def setUp(self):
self.parser = SlurmParser('testSite', 'testHost', True)
def test_parse_line(self):
line1 = ('667|sleep|root|root|2013-03-11T12:47:37|2013-03-11T12:47:40|00:00:03|12|debug|4|2|cloud-vm-[03-04]|560K|100904K ')
line1_values = {"JobName": "667",
"LocalUserID":"root",
"LocalUserGroup": "root",
"WallDuration":3,
"CpuDuration": 12,
"StartTime": datetime(2013, 3, 11, 12, 47, 37),
"StopTime": datetime(2013, 3, 11, 12, 47, 40),
"MemoryReal": 560,
"MemoryVirtual": 100904,
"NodeCount": 2,
"Processors": 4
}
cases = {line1:line1_values}
for line in cases.keys():
record = self.parser.parse(line)
cont = record._record_content
self.assertTrue(cont.has_key("Site"))
self.assertTrue(cont.has_key("JobName"))
self.assertTrue(cont.has_key("LocalUserID"))
self.assertTrue(cont.has_key("LocalUserGroup"))
self.assertTrue(cont.has_key("WallDuration"))
self.assertTrue(cont.has_key("CpuDuration"))
self.assertTrue(cont.has_key("StartTime"))
self.assertTrue(cont.has_key("StopTime"))
self.assertTrue(cont.has_key("MemoryReal"))
self.assertTrue(cont.has_key("MemoryReal"))
for key in cases[line].keys():
self.assertEqual(cont[key], cases[line][key], "%s != %s for key %s" % (cont[key], cases[line][key], key))
|
# Please edit this list and import only required elements
import webnotes
from webnotes.utils import add_days, add_months, add_years, cint, cstr, date_diff, default_fields, flt, fmt_money, formatdate, generate_hash, getTraceback, get_defaults, get_first_day, get_last_day, getdate, has_common, month_name, now, nowdate, replace_newlines, sendmail, set_default, str_esc_quote, user_format, validate_email_add
from webnotes.model import db_exists
from webnotes.model.doc import Document, addchild, removechild, getchildren, make_autoname, SuperDocType
from webnotes.model.doclist import getlist, copy_doclist
from webnotes.model.code import get_obj, get_server_obj, run_server_obj, updatedb, check_syntax
from webnotes import session, form, is_testing, msgprint, errprint
set = webnotes.conn.set
sql = webnotes.conn.sql
get_value = webnotes.conn.get_value
in_transaction = webnotes.conn.in_transaction
convert_to_lists = webnotes.conn.convert_to_lists
# -----------------------------------------------------------------------------------------
class DocType:
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
self.prefix = is_testing and 'test' or 'tab'
def validate(self):
import string
if not (self.doc.address_line1) and not (self.doc.address_line2) and not (self.doc.city) and not (self.doc.state) and not (self.doc.country) and not (self.doc.pincode):
return "Please enter address"
else:
address =["address_line1", "address_line2", "city", "state", "country", "pincode"]
comp_address=''
for d in address:
if self.doc.fields[d]:
comp_address += self.doc.fields[d] + "\n"
self.doc.address = comp_address
def check_state(self):
return "\n" + "\n".join([i[0] for i in sql("select state_name from `tabState` where `tabState`.country='%s' " % self.doc.country)])
def get_contacts(self,nm):
if nm:
contact_details =convert_to_lists(sql("select name, CONCAT(IFNULL(first_name,''),' ',IFNULL(last_name,'')),contact_no,email_id from `tabContact` where sales_partner = '%s'"%nm))
return contact_details
else:
return '' |
"""
재무제표 누락데이터 채우기
누락데이터 기준 : total_assets 가 누락된 행
누락데이터 보유 파일 :
"""
import os
import re
# import pandas as pd
import db_oper
import financial_reports as fr
def get_rdate_list():
df = db_oper.select_by_query('select distinct rdate from reports order by rdate')
rdate_df = df[['rdate']]
# print(rdate_df.head())
result = list(rdate_df.rdate)
result.sort()
# print(result)
return result
def do_main_proc_for_financial_reports_fill_missing_data():
# pd.set_option('display.width', None)
# pd.set_option('display.max_rows', None)
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_colwidth', -1)
# 파일 목록 읽어둠
reports_path = os.path.join('data', 'financial_reports')
files = fr.get_file_list(reports_path)
rdate_list = get_rdate_list()
# 1. total_assets 가 누락된 row 찾기
missing_df = db_oper.select_table('reports', 'total_assets is null or total_assets = 0')
print('missing data count : ', len(missing_df))
# print(missing_df.head())
# 누락값을 하나씩 돌면서
for idx, row in missing_df.iterrows():
miss_rdate = str(row['rdate'])
miss_yyyy = miss_rdate[:4]
miss_mm = miss_rdate[4:6]
miss_code = row['code']
print('\n\n## missing data : ', miss_rdate, miss_code)
# 누락분기 이후 4개 분기 날짜를 가져와서
idx = rdate_list.index(row['rdate'])
rdate_4q = rdate_list[idx:min(idx+4, len(rdate_list))]
# 가장 최신 분기데이터부터 검색
for rd in rdate_4q:
rd = str(rd)
yyyy = rd[:4]
mm = rd[4:6]
# print(rd, yyyy, mm)
# fq4 = [f for f in files if re.search('reports/%s.*%s.*/' % (yyyy, mm), f)]
fq4 = [f for f in files if re.search('%s.*%s.*' % (yyyy, mm), os.path.split(os.path.dirname(f))[1])]
# 보고서 엑셀파일을 돌면서
for f_report in fq4:
dfs = fr.read_reports(f_report)
# 각 탭을 돌면서 찾음
for tab in dfs.keys():
df = dfs[tab]
df = fr.remove_useless_companies(df)
report = fr.get_reports(df, miss_yyyy, miss_mm)
report = fr.replace_field_name(report)
# print(report.head())
# print(report.keys())
report = report[report['code'] == miss_code]
if len(report) == 0:
continue
# print('total_assets : ', report.total_assets)
if report.total_assets.any() <= 0:
continue
# NaN 컬럼 삭제
report = report.dropna(axis=1)
print(f_report)
print('tab : ', tab)
print(report)
db_oper.update_table('reports', report, ['code', 'rdate'])
# break
# break
return
if __name__ == "__main__":
do_main_proc_for_financial_reports_fill_missing_data()
|
import pandas as pd
from tqdm import tqdm
req_map = {}
with open("data/requests.csv", 'r') as trace:
for i, row in tqdm(enumerate(trace), desc="Running trace", total=417882879):
row = row.split(',')
id_ = int(row[2])
if id_ in req_map:
req_map[id_] += 1
else:
req_map[id_] = 1
L = list(req_map.items())
L = list(sorted(L, reverse=True, key=lambda x: x[1]))[:300000]
L = set([x[0] for x in L])
# print(len(df))
# df = df[df.iloc[:,2].isin(L)]
# print(len(df))
with open("data/requests_300000.csv", "w") as f:
with open("data/requests.csv", 'r') as trace:
for i, row in tqdm(enumerate(trace), desc="Running trace", total=417882879):
id_ = int(row.split(',')[2])
if id_ in L:
f.write(row)
|
try:
from app import create_app # noqa - running at container root
except ImportError:
from .app import create_app
application = create_app()
if __name__ == "__main__":
application.run()
|
import json
import re
from utils import *
class Vers:
def __init__(self, character, content):
self.character = character
self.content = content
def __str__(self):
return "{} : {}".format(self.character, self.content)
class Piece:
def __init__(self, title):
self.title = title
self.actes = []
@property
def verses(self):
return [v for a in self.actes for v in a.verses]
class Acte:
def __init__(self, number):
self.scenes = []
self.number = number
@property
def verses(self):
return [v for s in self.scenes for v in s.verses]
class Scene:
def __init__(self, number):
self.verses = []
self.number = number
def find_character(line, persos):
candidate = sanitize(line.replace('.', ''))
if candidate in persos:
return candidate
raise ValueError("Not a character : %s" % candidate)
def find_scene(string):
sanitized = sanitize(string.replace('.', ''))
regex = "S(CÈNE|cène) ([\w,È,\d]*)"
try:
return re.search(regex, sanitized).group(2)
except AttributeError:
raise ValueError("Not a scene: %s" % string)
def find_acte(string):
sanitized = sanitize(string.replace('.', ''))
regex = "ACTE ([\w]*)"
try:
return re.search(regex, sanitized).group(1)
except AttributeError:
raise ValueError("Not an acte: %s" % string) |
from django.db import models
# Create your models here.
class passage(models.Model):
source = models.CharField(max_length=120)
reference = models.CharField(max_length=120)
genres = models.CharField(max_length=20)
authors = models.CharField(max_length=50)
time_period = models.DateField()
location = models.CharField(max_length=50)
link = models.CharField(max_length=200)
content = models.TextField(null = True)
quote = models.CharField(max_length=400)
que = models.CharField(max_length=200)
l_count = models.BigIntegerField(null = True)
d_count = models.BigIntegerField(null = True)
def _str_(self):
return self.title
class question(models.Model):
passage = models.ForeignKey(passage, on_delete=models.CASCADE)
comment = models.CharField(max_length=500)
p_resp = models.BigIntegerField(null = True)
n_resp = models.BigIntegerField(null = True) |
import pytest
import numpy as np
import sys
import cv2
sys.path.append(".")
import ex0
import re
epsilon = .0001
def all_similar(t1, t2):
"""Test the maximum square error is lesser than epsilon."""
delta = (t1 - t2) ** 2
correct = delta > epsilon
return correct.reshape(-1).mean() == 0
def test_authors():
author_dict = ex0.__authors__
assert 0 < len(author_dict) < 3
fauid_re = re.compile('^[a-z]{2}[0-9]{2}[a-z]{4}$')
assert all([len(fauid_re.findall(fid))==1 for fid in author_dict.keys()])
def test_save_images(tmp_path):
color_img = (np.random.rand(100, 110, 3) * 255).astype(np.uint8)
gray_img = (np.random.rand(100, 100) * 255).astype(np.uint8)
d = tmp_path / "sub"
d.mkdir()
color_path = d / "color.png"
gray_path = d / "gray.png"
ex0.save_images([color_img, gray_img], [str(color_path), str(gray_path)])
loaded_color = cv2.imread(str(color_path))
loaded_gray = cv2.imread(str(gray_path), cv2.IMREAD_GRAYSCALE)
assert all_similar(color_img, loaded_color)
assert all_similar(gray_img, loaded_gray)
def test_scale(tmp_path):
large_img = np.zeros([100, 101, 3], dtype=np.uint8)
large_img[:50, :50, :] = 255
large_img[:50, 50, :] = 127
small_img = np.zeros([50, 50, 3], dtype=np.uint8)
small_img[:25, :25, :] = 255
computed_small_img = ex0.scale_down(large_img)
assert all_similar(computed_small_img, small_img)
def test_color(tmp_path):
color_img = (np.random.rand(100, 110, 3) * 255).astype(np.uint8)
blue, green, red = ex0.separate_channels(color_img)
assert all_similar(color_img, blue + green + red)
|
from sklearn.model_selection import GroupShuffleSplit
def find_best_group_split(dataframe, target_feature, group_by_feature, balance_focus="train"):
"""
:param dataframe: pandas.Dataframe
dataframe to split for max balance
:param target_feature: string
name of the target feature of the dataset
:param group_by_feature: string
name of the feature on which to group sets, preventing data_1-24h leakage when needed
:param balance_focus: string
{'train', 'test'}
:return: pandas.Dataframe, pandas.Dataframe, float, float
returns train, test,
"""
min_train_diff = 1
min_test_diff = 1
min_train_indices = list()
min_test_indices = list()
# using GroupShuffleSplit to generate 10 splits (the golden rule) and find the best split for our goal
for train_indices, test_indices in GroupShuffleSplit(test_size=0.20,
n_splits=10,
random_state=42
).split(dataframe.drop(target_feature, axis=1),
dataframe[target_feature],
groups=dataframe[group_by_feature]):
train, test = dataframe.iloc[train_indices].copy(), dataframe.iloc[test_indices].copy()
#
vc_train = dict(train[target_feature].value_counts())
n_train = vc_train.get(0) + vc_train.get(1)
zero_train, target_train = vc_train.get(0) / n_train, vc_train.get(1) / n_train
vc_test = dict(test[target_feature].value_counts())
n_test = vc_test.get(0) + vc_test.get(1)
zero_test, target_test = vc_test.get(0) / n_test, vc_test.get(1) / n_test
if len(min_train_indices) == 0 and len(min_test_indices) == 0:
min_train_diff = abs(zero_train - target_train)
min_test_diff = abs(zero_test - target_test)
min_train_indices = train_indices
min_test_indices = test_indices
elif balance_focus == 'train' and abs(zero_train - target_train) < min_train_diff:
min_train_diff = abs(zero_train - target_train)
min_test_diff = abs(zero_test - target_test)
min_train_indices = train_indices
min_test_indices = test_indices
elif balance_focus == 'test' and abs(zero_test - target_test) < min_test_diff:
min_train_diff = abs(zero_train - target_train)
min_test_diff = abs(zero_test - target_test)
min_train_indices = train_indices
min_test_indices = test_indices
train_best, test_best = dataframe.iloc[min_train_indices].copy(), dataframe.iloc[min_test_indices].copy()
return train_best, test_best, min_train_diff, min_test_diff |
import random
from abc import ABC, abstractmethod
from collections import OrderedDict
import numpy as np
from mesa import Agent
from sklearn.metrics.pairwise import cosine_similarity
import copy
import sys
import re
class SnetAgent(Agent, ABC):
def __init__(self, unique_id, model, message, parameters):
# In the subclass, make sure to submit an initial message to the blackboard, if the message field is blank.
# When the simulation initializes, the first thing created are the agents with initial messages defined in
# the configuration file. In these cases, the message field to this routine is filled in with the agent's
# message from the config file and the unique id of the agent is the order in the file. Next, agents are
# created that do not have a specified message in the configuration file. The parameters of the config file
# refers to them as the random_agents, and specifies the number to generate of each. A function is supplied
# ,float_vec_to_trade_plan, that converts a vector of floats to a message, that could be a random vector.
# However, it is up the individual agent what initial message to submit to the board. Regardless of the origin
# of their initial message, agents are parameterized in the config file parameters under the name
# agent_parameters. These parameters are tailored to their subclasses and come into this routine as the
# "parameters" parameter.
# Asynchronous Optimization (an ask and tell interface) is needed in subclasses because SnetSim
# has the program control
super().__init__(unique_id, model)
self.message = message
self.p = self.model.parameters # convenience method: its shorter
self.b = self.model.blackboard # convenience method: its shorter
self.o = self.model.ontology # convenience method: its shorter
self.parameters = parameters
self.wealth = 0
self.emergent_pattern = re.compile(r'^f\d+\.\d+_\d+_\d+_\d+')
self.item_type_pattern = re.compile(r'^([a-zA-Z0-9]+)_?')
self.test_item_type_pattern = re.compile(r'^test_([a-zA-Z0-9]+)_?')
@abstractmethod
def step(self):
# Each agent has a message slot on the blackboard, and the purpose of this step is to submit a new message
# into this slot, if the subclassed agent so desires. Human agents do not submit new messages during the run
# of the simulation, which operates on a different time scale than they do. Machine learning agents that
# submit a message here can see the detailed results of their message added to that message the next time
# this function is called. These results include the entire blackboard, with each agent indicating the
# messages they bought from self, selfs test scores, and the money given to self. All this is taken account
# in the net AGI tokens, which is available to agents to use as part of the reward for reinforcement learning
# or machine learning. The change in tokens from the last time step has been called is the result of the
# previous message. A notification is sent that this agent can use to keep track of the net,
# in the payment_notification convenience method. For the final step the agent can observe the results of the
# last and submit None to the blackboard.
pass
@abstractmethod
def payment_notification(self, agi_tokens, tradenum):
# This routine is called to notify the agent that his wealth has been changed by an agi_tokens amount,
# which can be negative. The blackboard may be examined for more information on which parts of the trade
# plans resulted in what payments, test scores, etc. This is called after each
pass
@abstractmethod
def seller_score_notification(self, score, tradenum):
# This routine is called to notify the agent that his wealth has been changed by an agi_tokens amount,
# which can be negative. The blackboard may be examined for more information on which parts of the trade
# plans resulted in what payments, test scores, etc. This is called after each
pass
@abstractmethod
def buyer_score_notification(self, score, tradenum):
# This routine is called to notify the agent that his wealth has been changed by an agi_tokens amount,
# which can be negative. The blackboard may be examined for more information on which parts of the trade
# plans resulted in what payments, test scores, etc. This is called after each
pass
def price_overlap(self,buy, sell):
# overlap occurs when the trades are
# sorted and the lowest price of an offer is higher then the highest price of the previous offer
# buy_low = min(buy['midpoint']-buy['range'], 0.0)
# buy_high = max(buy['midpoint']+buy['range'], 1.0)
# sell_low = min(sell['midpoint'] - sell['range'], 0.0)
# sell_high = max(sell['midpoint'] + sell['range'], 1.0)
#
# price_overlap = False
# if buy_low <= sell_low and sell_low <= buy_high \
# or sell_low <= buy_low and buy_low <= sell_high:
# price_overlap = True
price_overlap = False
if buy['midpoint'] >= sell['midpoint']:
price_overlap = True
return price_overlap
def price(self,buy, sell):
# overlap occurs when the trades are
# sorted and the lowest price of an offer is higher then the highest price of the previous offer
# The agreed upon price is the midpoint of the overlap
# buy_low = min(buy['midpoint'] - buy['range'], 0.0)
# buy_high = max(buy['midpoint'] + buy['range'], 1.0)
# sell_low = min(sell['midpoint'] - sell['range'], 0.0)
# sell_high = max(sell['midpoint'] + sell['range'], 1.0)
# price = None
#
# if buy_low <= sell_low and sell_low <= buy_high:
# price = (sell_low + buy_high) / 2
#
# elif sell_low <= buy_low and buy_low <= sell_high:
# price = (buy_low + sell_high) / 2
price = (buy['midpoint'] + sell['midpoint'])/2.0
return price
def set_message(self, message):
self.message = message
self.model.blackboard[self.unique_id]=self.message
def gather_offers(self):
# for every buy offer an agent has, look for sell offers from other agents for an item that is the same
# category asked for, for which there is an overlap in price. One can tell the same category because the
# ontology name begins in the same way list the possible trades to be considered in the individual buy trades
# of the agents tradeplan. which there is overlap in price. list offers in the message, uniqueId:tradeNum
# The lowest cosine similarity never wins, because a random cosine similarity can still be around 60,
# and we want the ones that have learned signs to have even greater chance of succeeding.
print("In gather_offers," + self.b[self.unique_id]['label'])
buyer_stop_codon_reached = False
for buy in self.message['trades']:
if (not buyer_stop_codon_reached)and buy['type'] == 'buy':
offers = []
for i, message in enumerate(self.b):
if i != self.unique_id:
offer = None
seller_stop_codon_reached = False
for j, sell in enumerate(message['trades']):
if (not seller_stop_codon_reached) and sell['type'] == 'sell':
stop_cut_off = buy['item'].split('_stop')
if sell['item'].startswith(stop_cut_off[0]) and self.price_overlap(buy, sell):
if not offer:
# First the distance between the buyers sought and the sellers displayed
sought_sign = np.array(buy['sign']).reshape(-1, len(buy['sign']))
displayed_sign = np.array(self.b[i]['sign']).reshape(-1, len(self.b[i]['sign']))
# print ('sought_sign.shape')
# print (sought_sign.shape)
buyers_sim = cosine_similarity(sought_sign, displayed_sign)
if buyers_sim:
buyers_sim = buyers_sim.flatten()[0]
# Next the distance between the sellers sought and the buyers displayed
sought_sign = np.array(sell['sign']).reshape(-1, len(sell['sign']))
displayed_sign = np.array(self.message['sign']).reshape(-1,
len(self.message['sign']))
# print ('sought_sign.shape')
# print (sought_sign.shape)
sellers_sim = cosine_similarity(sought_sign, displayed_sign)
if sellers_sim:
sellers_sim = sellers_sim.flatten()[0]
#weighted sum of the buyers and sellers simularities
sim = (self.p['buyers_weight'] * buyers_sim) + ((1-self.p['buyers_weight']) * sellers_sim)
offer = OrderedDict([('agent', i), ('cosine_sim', sim), ('trades', [])])
offers.append(offer)
offer['trades'].append(j)
elif sell['type'] == 'stop':
seller_stop_codon_reached = True
buy['offers'] = offers
elif buy['type'] == 'stop':
buyer_stop_codon_reached = True
# convert cosine distances into probabilities
buyer_stop_codon_reached = False
for buy in self.message['trades']:
if (not buyer_stop_codon_reached) and buy['type'] == 'buy':
if len(buy['offers']) > 1:
minimum = 1.0
for offer in buy['offers']:
if offer['cosine_sim'] < minimum:
minimum = offer['cosine_sim']
simsum = 0
for offer in buy['offers']:
simsum += (offer['cosine_sim'] - minimum)
for offer in buy['offers']:
offer['probability'] = (offer['cosine_sim'] - minimum) / simsum
elif len(buy['offers']) == 1:
buy['offers'][0]['probability'] = 1.0
elif buy['type'] == 'stop':
buyer_stop_codon_reached = True
# print(str(self.unique_id) + " in gather offers" )
def retrieve_ontology_item(self, cumulative_category):
# return the ontology item from the underscore notation moregeneral_lessgeneralandmorespecific_evenmorespecific
# it must start with the very first category
# print('cumulative_category')
# print(cumulative_category)
adict = OrderedDict()
if cumulative_category:
levels = cumulative_category.split('_')
adict = self.o
#print('levels')
#print(levels)
for level in levels:
if level in adict:
adict = adict[level]
elif level == 'ontology':
pass
else:
return OrderedDict()
return adict
def descendants(self, cumulative_category):
# see if this ontology item is the most specific possible, which it must be for example, if it is a test and
# is to be called with one item : the tested program, or if it is to be called and yield data.
descendants = []
cat_dict = self.retrieve_ontology_item(cumulative_category)
no_descendants = True
for name, adict in cat_dict.items():
if not name.startswith('_') and isinstance(adict, dict):
no_descendants = False
if cumulative_category == 'ontology':
descendants.extend(self.descendants(name))
else:
descendants.extend(self.descendants(cumulative_category + '_' + name))
if no_descendants:
descendants.append(cumulative_category)
return descendants
def perform_test(self, function_list):
score = 0
pickle_name = ""
gepResult = self.modular_gep(function_list) # put the ordered Dictionary in the global so a decorated function can access
if any(gepResult.values()):
root = next(iter(gepResult.items()))[0]
score_tuple = self.model.call_emergent_function(gepResult,root)
if score_tuple and len(score_tuple) and score_tuple[0]:
pickle_name = score_tuple[0]
if score_tuple and len(score_tuple) >1 and score_tuple[1]:
score = score_tuple[1]
result= (gepResult, score, pickle_name)
return result
def clean_item_name (self,name):
new_name = self.model.remove_prefix(name)
new_name = self.model.remove_suffix(new_name)
return new_name
def original_arity(self, func):
arity = None
description = self.retrieve_ontology_item(func)
#print('func')
#print(func)
if description and "_args" in description:
arity = len(description["_args"])
#if description and "_args" not in description:
#print ('description')
#print (description)
return arity
def arity(self, func):
arity = None
new_func = self.clean_item_name(func)
arity = self.model.emergent_functions_arity[new_func] \
if new_func in self.model.emergent_functions_arity \
else self.original_arity(new_func)
return arity
def function_list_arity(self, function_list_dont_modify):
function_list = []
function_list.extend(function_list_dont_modify)
arity = 0
if function_list:
levels = OrderedDict([(1, [function_list.pop(0)])])
current_level = 1
more = True
while more:
length_next_level = 0
for func in levels[current_level]:
arity = self.arity(func)
length_next_level += arity
current_level += 1
levels[current_level] = function_list[0:length_next_level]
function_list = function_list[length_next_level:]
arity = length_next_level - len(levels[current_level])
if not length_next_level:
break
more = function_list or levels[current_level]
#more = length_next_level
return arity
def next_call_number_prefix(self):
prefix = 'f' + str(self.model.emergent_functions_call_number) + '_'
self.model.emergent_functions_call_number += 1
return prefix
def prefix_call_numbers(self,function_list):
prefix_call_numbers = []
functions_once = OrderedDict()
for function in function_list :
if function not in prefix_call_numbers:
prefix = self.next_call_number_prefix()
functions_once[function]= prefix+function
prefix_call_numbers.append(functions_once[function])
return prefix_call_numbers
def gep(self, function_list_dont_modify):
# assign input and output functions as defined by the Karva notation.
# get arity of the items and divide the levels according to that arity,
# then make the assignments across the levels
# example. for the following program list with the following arity, the karva notation result is the following
# self.learnedProgram = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s']
# self.arity = {'a':2,'b':3,'c':2,'d':2,'e':1,'f':1,'g':2,'h':1,'i':1,'j':1,'k':0,'l':0,'m':1,'n':1,'o':0,'p':0,'q':0,'r':0,'s':0}
# self.results = {'a':['b','c'],'b':['d','e','f'],'c':['g','h'], 'd':['i','j'],'e':['k'],
# 'f':['l'],'g':['m','n'],'h':['o'],'i':['p'],'j':['q'],'m':['r'], 'n':['s']}
# divide into levels
#function_list = []
#function_list.extend(function_list_dont_modify)
gep_result = OrderedDict()
function_list = self.prefix_call_numbers(function_list_dont_modify)
if function_list:
levels = OrderedDict([(1, [function_list.pop(0)])])
current_level = 1
while function_list:
length_next_level = 0
for func in levels[current_level]:
noprefix = self.model.remove_prefix(func)
arity = self.arity(noprefix)
length_next_level += arity
current_level += 1
levels[current_level] = function_list[0:length_next_level]
function_list = function_list[length_next_level:]
if not length_next_level:
break
# make assignments
for level, function_list in levels.items():
next_level = level + 1
cursor = 0
for func in function_list:
noprefix = self.model.remove_prefix(func)
arity = self.arity(noprefix)
next_cursor = cursor + arity
if next_level in levels:
gep_result[func] = levels[next_level][cursor:next_cursor]
else:
gep_result[func] =[]
cursor = next_cursor
return gep_result
def get_all_emergent_subroutines(self, function_list):
# if any function in the list is emergent, then add it and its own emergent
# subroutines to the list
emergent_subroutines = set()
for function in function_list:
if function in self.model.emergent_functions:
emergent_subroutines.update(self.get_all_emergents_set(function))
return list(emergent_subroutines)
def get_all_emergents_set(self,function_name, call_depth = 0):
emergent_subroutines = set()
if call_depth < self.p["recursive_trade_depth_limit"]:
if function_name in self.model.emergent_functions:
children = self.model.emergent_functions[function_name]
for child in children:
level = call_depth+1
descendants = self.get_all_emergents_set(child, call_depth = level)
emergent_subroutines.update(descendants)
emergent_subroutines.add(function_name)
#print('emergent_subroutines')
#print(emergent_subroutines)
return emergent_subroutines
def gep_clean(self, gepResult):
#return true if this gepResult has an emergent function anywhere
geptuple = self.geptuple(gepResult)
clean = not any((x and self.emergent_pattern.match(x)) for x in geptuple)
return clean
def modular_gep(self, function_list):
# take a function list, that has the emergent functions in it. go through
# the list and for each emergent function it has, get its list,and for each
# they have, get its list, until you have the original function list and
# the entire tree of its subroutines.
# now, send each list to gep and get a list of gep results. send that list to
# another routine, combine_modules, that takes the list of gep results, and
# creates a gep result that has only registry functions in it.
#print('function_list')
#print(function_list)
emergent_functions = self.get_all_emergent_subroutines(function_list)
#print('emergent_functions')
#print(emergent_functions)
gep_ordered_dict = OrderedDict()
gep_ordered_dict['root'] = self.gep(function_list)
gep_dict = OrderedDict([(f,self.gep(self.model.emergent_functions[f])) for f in emergent_functions])
gep_ordered_dict.update(gep_dict)
gep_result = self.combine_modules(gep_ordered_dict)
#print ('gep_result')
#print (gep_result)
if not self.gep_clean(gep_result):
print ('recursive function not allowed :')
print(gep_result)
gep_result = OrderedDict()
return gep_result
def make_equivalent_gep(self, gep_result):
prefix_map = OrderedDict()
equivalent_gep = OrderedDict()
for func_name, arglist in gep_result.items():
prefix = self.model.get_call_prefix(func_name)
if prefix not in prefix_map:
prefix_map[prefix ]= self.next_call_number_prefix()
for arg in arglist:
prefix = self.model.get_call_prefix(arg)
if prefix not in prefix_map:
prefix_map[prefix ]= self.next_call_number_prefix()
for func_name, arglist in gep_result.items():
new_arglist = []
for arg in arglist:
prefix = self.model.get_call_prefix(arg)
if prefix and (prefix in prefix_map):
new_arglist.append (prefix_map[prefix]+ self.model.remove_prefix(arg))
else:
print('null prefix')
prefix = self.model.get_call_prefix(func_name)
new_func_name = prefix_map[prefix]+ self.model.remove_prefix(func_name)
equivalent_gep[new_func_name]= new_arglist
return equivalent_gep
def find_unbounds(self,gep_result):
unbounds = set()
for func,arglist in gep_result.items():
regular_function_arity = self.arity(func)
if regular_function_arity is not None:
if regular_function_arity > 0 and len(gep_result[func]) == 0:
unbounds.add(func)
#elif func in self.model.emergent_functions_arity and self.model.emergent_functions_arity[func] > 0 \
#and len(gep_result[func]) == 0:
#unbounds.add(func)
for arg in arglist:
regular_function_arity = self.arity(arg)
if regular_function_arity is not None:
if regular_function_arity > 0 and (arg not in gep_result or len(gep_result[arg]) ==0):
unbounds.add(arg)
#elif arg in self.model.emergent_functions_arity and self.model.emergent_functions_arity[arg]> 0 \
#and arg not in gep_result or len(gep_result[arg]) ==0:
#unbounds.add(arg)
return list(unbounds)
def flattern(self, A):
rt = []
for i in A:
if isinstance(i, list):
rt.extend(self.flattern(i))
else:
rt.append(i)
return rt
def geptuple(self, gepresult):
# flatten, remove prefix, make a tuple
geptuple = tuple()
if (len(gepresult)):
alist = self.flattern(gepresult.values())
alist.extend(gepresult.keys())
blist = [self.model.remove_prefix(x) for x in alist]
geptuple = tuple(set(blist))
return (geptuple)
def combine_modules(self,emergent_function_dict_dont_modify):
#print ('emergent_function_dict_dont_modify')
#print(emergent_function_dict_dont_modify)
emergent_function_dict = copy.deepcopy(emergent_function_dict_dont_modify)
# take the list of gep results, and
# create a gep result that has only registry functions in it.
# for every function that uses emergent functions Q,in dictionary of all emergent functions as gep results D
# take each emergent separately. the emergent functions that have arity will appear as a key.
# remove them from the list and set aside a, b, cOrderedDict([('root', OrderedDict([('f0_test_clusterer_silhouette', ['f1_clusterer_sklearn_affinityPropagation_10clusters']), ('f1_clusterer_sklearn_affinityPropagation_10clusters', ['f2_vectorSpace_gensim_doc2vec_50size_200iterations_5minFreq']), ('f2_vectorSpace_gensim_doc2vec_50size_200iterations_5minFreq', ['f3_preprocessor_freetext_tag']), ('f3_preprocessor_freetext_tag', ['f4_preprocessor_freetext_shuffle_stochastic4']), ('f4_preprocessor_freetext_shuffle_stochastic4', ['f5_data_freetext_internetResearchAgency'])]))])
# for each of these usages, create a new copy of the emergent function that has different call order numbers (but arranged the same) d, e, f
# in d e f, change the call number of the root, if there is one, to be the call number from the removed functions, and change every place that the emergent function is in an input list to its new name (a b c)
# then, in d e f, map functions that are missing inputs to the inputs of the removed functions(a b c)
# put contents of modified emergent functions d e f back into the original function list Q
# remove the emergent function from the larger list D, emergent_function_dict
#OrderedDict([('root', OrderedDict([('f0_test_clusterer_silhouette', ['f1_clusterer_sklearn_affinityPropagation_10clusters']), ('f1_clusterer_sklearn_affinityPropagation_10clusters', ['f2_vectorSpace_gensim_doc2vec_50size_200iterations_5minFreq']), ('f2_vectorSpace_gensim_doc2vec_50size_200iterations_5minFreq', ['f3_preprocessor_freetext_tag']), ('f3_preprocessor_freetext_tag', ['f4_preprocessor_freetext_shuffle_stochastic4']), ('f4_preprocessor_freetext_shuffle_stochastic4', ['f5_data_freetext_internetResearchAgency'])]))])
#all of these are emergent functions, but some of them dont use other emergent functions and some do
use_emergent_functions = OrderedDict(
[(fname, gep_result) for fname, gep_result in emergent_function_dict.items() if \
not self.gep_clean(gep_result)])
previous_length = sys.maxsize
while (not len(use_emergent_functions) == 0) and len(use_emergent_functions)< previous_length :
previous_length = len(use_emergent_functions)
use_only_non_emergent_functions = OrderedDict(
[(fname, gep_result) for fname, gep_result in emergent_function_dict.items() \
if self.gep_clean(gep_result)])
# we are depending on emergent_function_dict contents to be passed by value
# for every function that uses emergent functions Q (user_gep_result),
# take each emergent separately. the emergent functions that have arity will appear as a key.
# remove them from the list and set aside a, b, c (emergent_funct_usages)
for user_name , user_gep_result in use_emergent_functions.items():
for non_user_name, non_user_gep_result in use_only_non_emergent_functions.items():
#emergent_funct_usages = {name: gep_results for name, gep_results in
#use_emergent_functions.items() \
#if non_user_name in name}
emergent_funct_usages = OrderedDict([(name, gep_results) for name, gep_results in \
user_gep_result.items() if non_user_name in name])
#for each of these usages, create a new copy of the emergent function non_user_gep_result that has
# different call order numbers (but arranged the same) d, e, f (combined functions)
combined_functions = [self.make_equivalent_gep (non_user_gep_result)for _ in emergent_funct_usages]
# in d e f, (combined functions) change the call number of each root key
# to be the call number from the removed functions, and change every place that the
# emergent function is in an input list to its new name (a b c)(emergent_funct_usages)
for i, (name, arg_list) in enumerate(emergent_funct_usages.items()):
prefix = self.model.get_call_prefix(name)
# for gep_result in combined_functions:
gep_result = combined_functions[i]
if (len(gep_result)):
root = next(iter(gep_result.items()))[0]
new_name = prefix + self.model.remove_prefix(root)
gep_result[new_name] = gep_result.pop(root)
gep_result.move_to_end(new_name, last=False)
for fname, arglist in user_gep_result.items():
if name in arglist:
arglist[arglist.index(name)] = new_name
# then, in d e f, (combined functions) map functions that are missing inputs to the inputs
# of the removed functions(a b c) (emergent_funct_usages)
# match the prefix of the root in combined functions to the prefix of the key in emergent funct usages
input_assignments = OrderedDict()
unbound_list = self.find_unbounds(gep_result)
cursor = 0
for unbound in unbound_list:
# arity = self.model.emergent_functions_arity[unbound]
arity = self.arity(unbound)
next_cursor = cursor + arity
input_assignments[unbound] = arg_list[cursor:next_cursor]
cursor = next_cursor
# put contents of modified emergent functions d e f back into the original function list Q (user_gep_result)
user_gep_result.update(gep_result)
if list(user_gep_result.keys()).index(name)== 0: #this is the root
user_gep_result.move_to_end(new_name, last=False)
user_gep_result.update(input_assignments)
user_gep_result.pop(name)
else: # empty function
user_gep_result.pop(name)
for fname, arglist in user_gep_result.items():
if name in arglist:
arglist[arglist.index(name)] = None
# remove the emergent function from the original function list D (emergent_function_dict)
ename = self.model.remove_prefix(name)
if ename in emergent_function_dict:
emergent_function_dict.pop(ename)
use_emergent_functions = OrderedDict(
[(fname, gep_result) for fname, gep_result in emergent_function_dict.items() \
if not self.gep_clean(gep_result)])
#use_emergent_functions = {fname: gep_result for fname, gep_result in emergent_function_dict.items() \
#if any ( f in tuple(self.model.emergent_functions.keys()) for f in tuple(gep_result.values()))}
#print ('emergent_function_dict')
#print(emergent_function_dict)
result = emergent_function_dict['root']
return (result)
def transfer_funds(self, buyer, buynum, seller, sellnum, price):
buyer.wealth -= price
seller.wealth += price
buyer.payment_notification(-price, buynum)
seller.payment_notification(price, sellnum)
def distribute_funds(self, buy, buynum):
offer = buy['offers'][buy['chosen']]
sellnum = offer['trades'][0]
sells = self.b[offer['agent']]['trades'][offer['trades'][0]:]
found = False
stopval = -1
for i, trade in enumerate(sells):
if (not found) and (trade['item'].startswith('stop') or trade['type'] == 'stop'):
found = True
stopval = i
sells = sells[0:stopval]
# now find the buys and insert settled software (or ignore)
# list.insert(index, elem)
if sells:
self.transfer_funds(self, buynum, self.model.schedule.agents[offer['agent']], sellnum, buy['price'])
buylist = [i for i, sell in enumerate(sells) if sell['type'] == 'buy' and 'chosen' in sell and sell['chosen']]
for i in buylist:
sellnum = offer['trades'][0] + i
seller = self.model.schedule.agents[offer['agent']]
seller.distribute_funds(sells[i], sellnum)
def obtain_trade_list(self, offer, call_depth = 0):
# todo implement more than one item sold by agent, for now just take the first one
# software that one is selling is defined as the trades from the first one marked sold to either
# the end of the trades or a stop codon. if one of them is a buy,then that software is obtained as well.
# the buy must have a chosen offer number and the agent who offered the goods must have them, and so on.
# a recursion depth limit exists in the parameters
if call_depth > self.p["recursive_trade_depth_limit"]:
unique_id = None
else:
unique_id = "f" + str(self.model.schedule.time) + "_" + str(self.unique_id) \
+ "_" + str(offer['agent']) + "_" + str(offer['trades'][0])
if unique_id not in self.model.emergent_functions:
# print("in obtain software")
sells = self.b[offer['agent']]['trades'][offer['trades'][0]:]
# print("sells")
# print(sells)
found = False
stopval = -1
for i, trade in enumerate(sells):
if (not found) and (trade['item'].startswith('stop') or trade['type'] == 'stop'):
found = True
stopval = i
sells = sells[0:stopval]
#We want to refer to this piece of code again in the same iteration without making an
# extra copy of it in the emergent_functions routine. to do so give it a unique id.
# uniqueid is made of self.model.schedule.time, self.unique_id, offer['agent'], offer['trades'][0]
# now find the buys and insert settled software (or ignore)
#buys_inserted = []
buylist = [i for i, sell in enumerate(sells) if sell['type'] == 'buy' \
and 'chosen' in sell and sell['chosen'] is not None]
# this is the straight up insertion of code, rather than a subroutine
# cursor = 0
# if not buylist:
# buys_inserted = sells
# for i in buylist: #iterate by writing out all the sells, and then the buy insertion
# next_cursor = i
# buys_inserted.extend(sells[cursor:next_cursor])
# cursor = next_cursor + 1 #skip the actual buy
# #sell = buylist[i]
# sell = sells[i]
# buys_inserted.extend(self.obtain_trade_list(sell['offers'][sell['chosen']]))
clean_funcs = []
for sell in sells:
func = sell['item']
clean_func = func.split('_stop')
clean_funcs.append(clean_func[0])
for i in buylist:
sell = sells[i]
depth = call_depth +1
clean_funcs[i]= self.obtain_trade_list(sell['offers'][sell['chosen']], call_depth = depth)
only_existing = [program for program in clean_funcs \
if program and ((self.model.remove_suffix(program) in self.model.registry or \
program in self.model.emergent_functions))]
self.model.emergent_functions[unique_id] = only_existing
self.model.emergent_functions_arity[unique_id] = self.function_list_arity(only_existing)
return unique_id #self.model.emergent_functions[unique_id]
def pass_all_tests(self, buy, offernum):
# Implements a testing system where a human or agent can require any test in a category on any data in a
# category if generality is required, or all tests on data that are listed. This function tells if the agent
# has passed all tests, and saves the scores for feedback to the agent. One test that must be passed is the
# offerer must have the item. It does not have to be all bought, but the part that is said to be the item
# must be present, if the offer is the result of a sell that was not constructed,but bought. Second,
# every test listed in the tests, before the first stop codon, must be passed The test score on each test is
# noted in the test score description
# First retrieve item to buy. Item is defined as including all from the sell statement to the next stop or the
# end of the list. if there are buys, and they are settled, retrieve the bought software
# a stop in the first place of either the test or the data is a stop for all tests.
# if the stop is midway through the test or the data, it indicates general tests or data,
# that is, if it passes for any test or data in this category over this threshold it passes
cumulative_score = 0
pickle_name = ""
gepResult = None
pass_all_tests = True
itemlist = None
numtests = 0
if offernum is not None:
func_name = self.obtain_trade_list(buy['offers'][offernum])
itemlist = self.model.emergent_functions[func_name]
if itemlist:
stop_codon_reached = False
for test_dict in buy['tests']:
if test_dict['stophere']:
stop_codon_reached = True
if not stop_codon_reached:
stop_cut_off = test_dict['test'].split('_stop')
clean_test = stop_cut_off[0]
stop_cut_off = test_dict['data'].split('_stop')
clean_data = stop_cut_off[0]
if not clean_test or not clean_data:
stop_codon_reached = True
else:
# The data or tests may be general categories, so see if there are tests
# or data in those categories
testlist = self.descendants(clean_test)
if not testlist:
testlist.append(clean_test)
datalist = self.descendants(clean_data)
if not datalist:
datalist.append(clean_data)
anypass = False
for test in testlist:
for data in datalist:
# if any is passed in this group, then give a pass
#however, do not run it unless the software is the same type as the test
item_type = self.item_type(itemlist[0])
test_item_type = self.test_item_type (test)
if item_type and (item_type == test_item_type) and len(self.retrieve_ontology_item(test)):
program_list = [test]
program_list.extend(itemlist)
program_list.append(data)
#non-coding segments are implemented when non completed functions are ignored
#so dont put non completed funcitons in the registry.
program_list = [program for program in program_list \
if (self.model.remove_suffix(program) in self.model.registry or \
program in self.model.emergent_functions)]
gepResult, score, pickle_name = self.perform_test(program_list)
# record the score no matter what, as feedback
if 'results' not in test_dict:
test_dict['results'] = []
# result = {'offer': offernum, 'score': score, 'time': self.model.schedule.time,
#'test': test, 'data': data}
result = OrderedDict([('offer', offernum), ('score', score), ('time', self.model.schedule.time),
('test', test), ('data', data)])
seller = self.model.schedule.agents[buy['offers'][offernum]["agent"]]
tradenum = buy['offers'][offernum]["trades"][0]
seller.seller_score_notification(score,tradenum)
numtests += 1
cumulative_score += score
test_dict['results'].append(result)
if score is not None and score > test_dict['threshold']:
anypass = True
else:
pass_all_tests = False
if not anypass:
pass_all_tests = False
elif gepResult is None and itemlist:
gepResult = self.modular_gep(itemlist)
else:
pass_all_tests = False
else:
pass_all_tests = False
final_score = cumulative_score / numtests if numtests else 0
results = (pass_all_tests, gepResult, final_score, pickle_name)
return results
def choose_partners(self):
# for every buy trade, roll to pick who fills the slot. If a test is required, run what the agent has
# through the test, and if it doesnt pass, nothing fills the slot this time. If an agent buys goods from
# an agent that does ot have them, the same thing happens. All other buys can not be redone once accepted.
# Whatever the state of the called routine when the human accepts it is what is paid, as opposed to when the
# buying agent accepts it. In other words, an agent can If the agent is a human, who accepts the trade,
# money is disbursed throughout the network according to the chain of contracted prices.
for buynum, buy in enumerate(self.message['trades']):
if buy['type'] == 'buy' and 'offers' in buy and buy['offers'] and 'chosen' not in buy:
weighted_choices = OrderedDict([(offernum, offer['probability']) \
for offernum, offer in enumerate(buy['offers'])])
sorted_choices = sorted(weighted_choices.items(), key=lambda x:x[1],reverse=True)
found = False
count = 0
while not found and count < len(sorted_choices):
winning_num = sorted_choices[count][0]
count+=1
pass_all_tests, gepResult, max_score, pickle_name = self.pass_all_tests(buy, winning_num)
if pass_all_tests:
found = True
buy['chosen'] = winning_num
sell = self.b[buy['offers'][winning_num]['agent']]['trades'][buy['offers'][winning_num]['trades'][0]]
buy['price'] = self.price(buy, sell)
buy['code']= gepResult
buy['pickle']= self.model.pickles[pickle_name]
self.buyer_score_notification(max_score, buynum)
if 'distributes' in self.message and self.message['distributes']:
self.distribute_funds(buy, buynum)
#self.distribute_funds(buy['offers'][winning_num], buynum)
# functions that translate the float vec to a trade plan that can go on the blackboard
def vector_size(self):
return (
self.p['sign_size'] +
self.p['num_trade_plans'] * self.trade_size())
def trade_size(self):
return (1 + self.p['sign_size'] +
self.p['item_size'] + 2 +
self.p['num_tests'] * self.test_size())
def test_size(self):
return 2 * self.p['item_size'] + 3
def trade_type(self, afloat):
trade_type = 'stop'
weighted_choices = OrderedDict([('buy', 1), ('construct', 1), ('sell', 1)])
#OrderedDict needed if you are not using python 3.7 or above!
#weighted_choices = {'buy': 1, 'construct': 1, 'sell': 1}
previously_taken_space = self.p['chance_of_stop_codon']
choice = self.choose_weighted(weighted_choices, afloat, previously_taken_space=previously_taken_space)
if choice:
trade_type = choice
return trade_type
def float_for_trade_type(self, trade_type):
float_for_trade_type = random.uniform(self.p['chance_of_stop_codon'],1.0)
weighted_choices = OrderedDict([('buy', 1),('construct', 1), ('sell', 1)])
#weighted_choices = {'buy': 1, 'construct': 1, 'sell': 1}
float_for_choice = self.float_for_weighted_choice(weighted_choices, trade_type, previously_taken_space=self.p['chance_of_stop_codon'])
if float_for_choice != None:
float_for_trade_type = float_for_choice
return float_for_trade_type
def hidden(self, afloat):
weighted_choices = OrderedDict([(True, 1), (False, 1)])
#weighted_choices = {True: 1, False: 1}
hidden = self.choose_weighted(weighted_choices, afloat)
return hidden
def stop(self, afloat):
weighted_choices = OrderedDict([(True, 1), (False, 1)])
stop = self.choose_weighted(weighted_choices, afloat)
return stop
def float_for_stop(self, isStop):
weighted_choices = OrderedDict([(True, 1), (False, 1)])
float_for_stop = self.float_for_weighted_choice(weighted_choices, isStop)
return float_for_stop
def float_for_hidden(self, isHidden):
weighted_choices = OrderedDict([(True, 1), (False, 1)])
float_for_hidden = self.float_for_weighted_choice(weighted_choices, isHidden)
return float_for_hidden
def weights_for_level(self, cumulative_category):
weights_for_level = OrderedDict()
if not cumulative_category.endswith('_stop') and not cumulative_category == 'stop':
cat_dict = self.retrieve_ontology_item(cumulative_category)
for name, adict in cat_dict.items():
if not name.startswith('_') and isinstance(adict, dict) and '_weight' in adict:
weights_for_level[name] = adict['_weight']
return weights_for_level
def parameters_set(self,cumulative_category):
parameters_set = False
#check if this category is at the last level, whether it has a stop condon or not
if cumulative_category.endswith('_stop'):
cumulative_category = cumulative_category[:-5]
if not cumulative_category == 'stop' and not self.weights_for_level(cumulative_category):
parameters_set = True
return parameters_set
def is_stochastic(self,cumulative_category):
description = self.retrieve_ontology_item(cumulative_category)
is_stochastic = not description['_deterministic'] if '_deterministic' in description else False
return is_stochastic
def stochastic_roll(self):
n = self.p['stochastic_copies']
weighted_choices = OrderedDict([(x,1) for x in range(n)])
roll = random.uniform(0, 1)
stochastic_roll = self.choose_weighted(weighted_choices,roll)
return stochastic_roll
def ontology_item(self, float_vec, category='ontology', include_stop=True):
# if category is filled in, the float starts from the given category
cumulative_category = category
#print ('in ontology item , category')
#print (category)
for afloat in float_vec:
weighted_choices = self.weights_for_level(cumulative_category)
#print('weighted_choices')
#print(weighted_choices)
if not any(weighted_choices.values()):
#You have come to the end of what is determined.
#Now see if that is stochastic
if self.parameters_set(cumulative_category) and self.is_stochastic(cumulative_category):
cumulative_category = cumulative_category + "_stochastic" + str(self.stochastic_roll())
break
# roll = random.uniform(0, 1)
roll = afloat
#print ('afloat')
#print (afloat)
if include_stop:
choice = 'stop'
previously_taken_space = self.p['chance_of_stop_codon']
else:
previously_taken_space = 0
#print('previously_taken_space')
#print(previously_taken_space)
guess = self.choose_weighted(weighted_choices, roll, previously_taken_space=previously_taken_space)
#print('guess')
#print(guess)
if guess:
choice = guess
if cumulative_category == 'ontology':
cumulative_category = choice
else:
cumulative_category = cumulative_category + "_" + choice
return cumulative_category
def floats_for_ontology_item(self, ontology_item, include_stop=True, skip = 0):
# if category is filled in, the float starts from the given category
#print("floats_for_ontology_item")
#print ('ontology_item')
#print (ontology_item)
float_list = []
category_list = ontology_item.split("_")
cumulative_category = "ontology"
for choice in category_list:
#print('cumulative_category')
#print(cumulative_category)
weighted_choices = self.weights_for_level(cumulative_category)
#print('weighted_choices')
#print(weighted_choices)
if include_stop:
previously_taken_space = self.p['chance_of_stop_codon']
else:
previously_taken_space = 0
#print('previously_taken_space')
#print(previously_taken_space)
float_guess = self.float_for_weighted_choice(weighted_choices, choice, previously_taken_space)
if float_guess == None:
float_guess = random.uniform(self.p['chance_of_stop_codon'], 1.0)
float_list.append(float_guess)
#print('float_guess')
#print(float_guess)
if cumulative_category == 'ontology':
cumulative_category = choice
else:
cumulative_category = cumulative_category + "_" + choice
floats_left = self.p['item_size']-len(category_list)
floatVec = np.random.uniform(low=0.0, high=1.0, size=(floats_left,))
float_list.extend(list(floatVec))
for i in range(skip):
float_list.pop(0)
float_list.append(np.random.uniform(low=0.0, high=1.0))
#print ('len(float_list)')
#print (len(float_list))
#print ('self.p[item_size]')
#print (self.p['item_size'])
#print ('float_list')
#print (float_list)
return float_list
def float_for_agi_token(self, tokens):
afloat = (tokens - self.p["min_token_price"])/(self.p["max_token_price"] - self.p["min_token_price"])
if afloat > 1.0:
afloat = 1.0
elif afloat < 0.0:
afloat = 0.0
return afloat
def agi_token(self, afloat):
return self.p["min_token_price"] + (self.p["max_token_price"] - self.p["min_token_price"]) * afloat
def float_for_weighted_choice(self, weighted_choices, atype, previously_taken_space=0):
# reverse of choose weighted function
# return null if roll returns within previously taken space
cumulative = self.normalized_cumulative(weighted_choices)
space = 1 - previously_taken_space
choice_float = None
last_weight = 0.0
for choice, weight in cumulative.items():
if choice == atype:
choice_float = random.uniform(last_weight*space, weight*space)
last_weight = weight
if choice_float == None:
choice_float = random.uniform(last_weight*space, 1.0)
return choice_float
def normalized_cumulative(self, weighted_choices):
# transform to cumulative distribution
total = 0
for choice, weight in weighted_choices.items():
total += weight
normalized = OrderedDict([(choice, (weight / total)) for choice, weight in weighted_choices.items()])
previous_weight = 0
cumulative = OrderedDict()
for choice, weight in normalized.items():
cumulative[choice] = weight + previous_weight
previous_weight += weight
return cumulative
def choose_weighted(self, weighted_choices, roll, previously_taken_space=0):
# return null if roll returns within previously taken space
cumulative = self.normalized_cumulative(weighted_choices)
space = 1 - previously_taken_space
chosen = None
for choice, weight in cumulative.items():
if roll < weight * space:
chosen = choice
break
return chosen
def convert_to_cumulative_category(self, function_name, call_depth=0):
#this could either be a ontology item already, or an emergent function
#it its emergent, recursively take the first item until a
#cumulative category is reached, within the recursion limit
name = function_name
if name in self.model.emergent_functions:
emergent_root = self.model.emergent_functions[name][0]
if call_depth < self.p["recursive_trade_depth_limit"]:
name = self.convert_to_cumulative_category(emergent_root, call_depth = call_depth+1)
return name
def item_type(self, general_function):
cumulative_category = self.convert_to_cumulative_category(general_function)
item_type = self.item_type_pattern.search(cumulative_category)
if (item_type):
item_type = item_type.group(1)
return(item_type)
def test_item_type(self, cumulative_category):
item_type = self.test_item_type_pattern.search(cumulative_category)
if (item_type):
item_type = item_type.group(1)
return(item_type)
def float_vec_to_trade_plan(self, float_vec_dont_change, mask = None):
float_vec = copy.deepcopy(float_vec_dont_change)
first_level = ["distributes", "initial_message", "final_message", "message_period"]
cursor = 0
trade_plan = OrderedDict([('type', self.__class__.__name__)])
trade_plan['label'] = trade_plan['type'] + " Agent " + str(self.unique_id)
if mask and "label" in mask :
trade_plan['label'] = mask["label"]+ ", " + trade_plan['label']
for name in first_level:
if mask and name in mask:
trade_plan[name] = mask[name]
# First find the sign, which is the raw float representation
next_cursor = self.p['sign_size']
trade_plan['sign'] = list(float_vec[cursor:next_cursor])
if mask and "sign" in mask:
for i in range(min(len(mask["sign"]),self.p['sign_size'])):
trade_plan['sign'][i] = mask["sign"][i]
float_vec[cursor+i]= mask["sign"][i]
cursor = next_cursor
trade_plan['trades'] = []
cursor_before_trade_plans = cursor
# Then each trade plan.
for i in range(self.p['num_trade_plans']):
trade_plan['trades'].append(dict())
cursor = cursor_before_trade_plans + i * self.trade_size()
# First the type
if mask and "trades" in mask and i < len(mask['trades']) and 'type' in mask['trades'][i]:
trade_plan['trades'][i]['type'] = mask['trades'][i]['type']
float_vec[cursor]= self.float_for_trade_type(trade_plan['trades'][i]['type'])
else:
trade_plan['trades'][i]['type'] = self.trade_type(float_vec[cursor])
# Next the sign, which is the raw float representation
cursor += 1
next_cursor = cursor + self.p['sign_size']
trade_plan['trades'][i]['sign'] = list(float_vec[cursor:next_cursor])
if mask and "trades" in mask and i < len(mask['trades']) and 'sign' in mask['trades'][i]:
for j in range(min(len(mask['trades'][i]['sign']), self.p['sign_size'])):
trade_plan['trades'][i]['sign'][j] = mask['trades'][i]['sign'][j]
float_vec[cursor+j]= mask['trades'][i]['sign'][j]
cursor = next_cursor
# Next the item
next_cursor = cursor + self.p['item_size']
if mask and "trades" in mask and i < len(mask['trades']) and 'item' in mask['trades'][i]:
floats_for_item = self.floats_for_ontology_item(mask['trades'][i]['item'])
ontlist = mask['trades'][i]['item'].split("_")
for k in range (len(ontlist)):
float_vec[cursor+k]=floats_for_item[k]
trade_plan['trades'][i]['item'] = self.ontology_item(float_vec[cursor:next_cursor])
item_type = self.item_type(trade_plan['trades'][i]['item'])
cursor = next_cursor
# lowest price accepted
if mask and "trades" in mask and i < len(mask['trades']) and 'midpoint' in mask['trades'][i]:
trade_plan['trades'][i]['midpoint'] = mask['trades'][i]['midpoint']
float_vec[cursor]= self.float_for_agi_token(trade_plan['trades'][i]['midpoint'])
else:
trade_plan['trades'][i]['midpoint'] = self.agi_token(float_vec[cursor])
cursor += 1
# highest price accepted
if mask and "trades" in mask and i < len(mask['trades']) and 'range' in mask['trades'][i]:
trade_plan['trades'][i]['range'] = mask['trades'][i]['range']
float_vec[cursor]= self.float_for_agi_token(trade_plan['trades'][i]['range'])
else:
trade_plan['trades'][i]['range'] = self.agi_token(float_vec[cursor])
cursor += 1
cursor_before_tests = cursor
# Finally, each test that they buyer wants to have passed before he will accept the product
trade_plan['trades'][i]['tests'] = []
for j in range(self.p['num_tests']):
trade_plan['trades'][i]['tests'].append(dict())
cursor = cursor_before_tests + j * self.test_size()
#Are we to count this and all subsequent tests?
if (mask and "trades" in mask and i < len(mask['trades']) and 'tests' in mask['trades'][i]
and j < len(mask['trades'][i]['tests']) and 'stophere' in mask['trades'][i]['tests'][j] ):
trade_plan['trades'][i]['tests'][j]['stophere'] = mask['trades'][i]['tests'][j]['stophere']
float_vec[cursor]= self.float_for_stop(trade_plan['trades'][i]['tests'][j]['stophere'])
else:
trade_plan['trades'][i]['tests'][j]['stophere'] = self.stop(float_vec[cursor])
cursor += 1
# The test
next_cursor = cursor + self.p['item_size']
category = 'test_'+ item_type
if (mask and "trades" in mask and i < len(mask['trades']) and 'tests' in mask['trades'][i]
and j < len(mask['trades'][i]['tests']) and 'test' in mask['trades'][i]['tests'][j]):
floats_for_item = self.floats_for_ontology_item(mask['trades'][i]['tests'][j]['test'], skip=2)
ontlist = mask['trades'][i]['tests'][j]['test'].split("_")
for k in range(len(ontlist)-1):
float_vec[cursor + k] = floats_for_item[k]
trade_plan['trades'][i]['tests'][j]['test'] = self.ontology_item(float_vec[cursor:next_cursor],
category=category)
cursor = next_cursor
# The data
next_cursor = cursor + self.p['item_size']
if (mask and "trades" in mask and i < len(mask['trades']) and 'tests' in mask['trades'][i]
and j < len(mask['trades'][i]['tests']) and 'test' in mask['trades'][i]['tests'][j]):
floats_for_item = self.floats_for_ontology_item(mask['trades'][i]['tests'][j]['data'], skip=1)
ontlist = mask['trades'][i]['tests'][j]['data'].split("_")
for k in range(len(ontlist) - 1):
float_vec[cursor + k] = floats_for_item[k]
trade_plan['trades'][i]['tests'][j]['data'] = self.ontology_item(float_vec[cursor:next_cursor],
category='data')
cursor = next_cursor
if (mask and "trades" in mask and i < len(mask['trades']) and 'tests' in mask['trades'][i]
and j < len(mask['trades'][i]['tests']) and 'threshold' in mask['trades'][i]['tests'][j] ):
trade_plan['trades'][i]['tests'][j]['threshold'] = mask['trades'][i]['tests'][j]['threshold']
float_vec[cursor] = mask['trades'][i]['tests'][j]['threshold']
else:
trade_plan['trades'][i]['tests'][j]['threshold'] = float_vec[cursor]
cursor += 1
if (mask and "trades" in mask and i < len(mask['trades']) and 'tests' in mask['trades'][i]
and j < len(mask['trades'][i]['tests']) and 'hidden' in mask['trades'][i]['tests'][j] ):
trade_plan['trades'][i]['tests'][j]['hidden'] = mask['trades'][i]['tests'][j]['hidden']
float_vec[cursor] = self.float_for_hidden(trade_plan['trades'][i]['tests'][j]['hidden'])
else:
trade_plan['trades'][i]['tests'][j]['hidden'] = self.hidden(float_vec[cursor])
cursor += 1
#print('trade_plan')
#print(trade_plan)
return (trade_plan,float_vec)
def trade_plan_to_float_vec(self, trade_plan):
#print ("trade_plan_to_float_vec")
float_list = []
# First translate the sign, which is the raw float representation
float_list.extend(trade_plan['sign'])
# Then each trade plan.
for i in range(self.p['num_trade_plans']):
if i >= len(trade_plan['trades']):
floats_left = self.trade_size()
floatVec = np.random.uniform(low=0.0, high=1.0, size=(floats_left,))
float_list.extend(list(floatVec))
#print ('trade plan floats')
#print (floatVec)
else:
# First the type
float_list.append( self.float_for_trade_type(trade_plan['trades'][i]['type']))
# Next the sign, which is the raw float representation
float_list.extend(trade_plan['trades'][i]['sign'])
# Next the item
float_list.extend(self.floats_for_ontology_item(trade_plan['trades'][i]['item']))
# lowest price accepted
low = self.float_for_agi_token(trade_plan['trades'][i]['midpoint'])
float_list.append(low)
#print('midpoint')
#print(low)
# highest price accepted
high = self.float_for_agi_token(trade_plan['trades'][i]['range'])
float_list.append(high)
#print('range')
#print(high)
# Finally, each test that they buyer wants to have passed before he will accept the product
for j in range(self.p['num_tests']):
#import pdb;
#pdb.set_trace()
if j >= len(trade_plan['trades'][i]['tests']):
floats_left = self.test_size()
floatVec = np.random.uniform(low=0.0, high=1.0, size=(floats_left,))
float_list.extend(list(floatVec))
#print('test floats')
#print (floatVec)
else:
# Whether to count this and all tests after this
float_for_stop = self.float_for_stop(trade_plan['trades'][i]['tests'][j]['stophere'])
float_list.append(float_for_stop)
#print('float_for_stop')
#print(float_for_stop)
# The test
floats_for_ontology_item = self.floats_for_ontology_item(trade_plan['trades'][i]['tests'][j]['test'], skip = 2)
float_list.extend(floats_for_ontology_item)
#print ('floats_for_ontology_item')
#print(floats_for_ontology_item)
# The data
floats_for_ontology_item =self.floats_for_ontology_item(trade_plan['trades'][i]['tests'][j]['data'], skip = 1)
float_list.extend(floats_for_ontology_item)
#print ('floats_for_ontology_item')
#print(floats_for_ontology_item)
float_list.append(trade_plan['trades'][i]['tests'][j]['threshold'])
#print("trade_plan['trades'][i]['tests'][j]['threshold']")
#print(trade_plan['trades'][i]['tests'][j]['threshold'])
float_for_hidden = self.float_for_hidden(trade_plan['trades'][i]['tests'][j]['hidden'])
float_list.append(float_for_hidden)
#print('float_for_hidden')
#print(float_for_hidden)
#print ('len(float_list)')
#print (len(float_list))
#print ('self.vector_size()')
#print (self.vector_size())
#print (float_list)
return np.asarray(float_list)
def blank_message(self):
cursor = 0
trade_plan = OrderedDict([('type', self.__class__.__name__)])
trade_plan['label'] = trade_plan['type'] + " Agent " + str(self.unique_id)
# First find the sign, which is the raw float representation
trade_plan['sign'] = [0.0]*self.p['sign_size']
trade_plan['trades'] = []
trade_plan['trades'].append(dict())
trade_plan['trades'][0]['type'] = 'stop'
return trade_plan
def get_bought_items(self):
bought_items = OrderedDict()
for trade in self.message['trades']:
if ('code' in trade) and ('pickle' in trade) and trade['code'] and trade['pickle']:
bought_items[trade['pickle']] = trade['code']
return bought_items |
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseNotFound
from .models import *
import requests, json
import xml.etree.ElementTree as ET
import kookoo
from random import random, randint
from django.shortcuts import render, render_to_response
def call_ivr(request):
if request.method == 'GET':
print request.GET
event = request.GET.get('event', None)
print event
if event == "GotDTMF":
pincode = int(request.GET['data'])
top_10_schools = SchoolNames.Query.all().limit(2)
s = "The schools near you are "
for school in top_10_schools:
s += school.SCHOOL_NAME
s += " and"
r = kookoo.Response()
r.addPlayText(s)
r.addHangup()
return HttpResponse(r)
else:
r = kookoo.Response()
pincode = r.append(kookoo.CollectDtmf(maxDigits=6))
pincode.append(kookoo.PlayText("Please enter the pincode"))
return HttpResponse(r)
def get_coordinates(request):
postal_code = request.GET.get('postal_code')
geometry = requests.get("https://maps.googleapis.com/maps/api/geocode/json?components=postal_code:" + postal_code).json()["results"][0]["geometry"]
coordinates = geometry["location"]
return HttpResponse(json.dumps(coordinates))
def home(request):
return render(request, "base.html")
def search(request):
pass |
from random import randint
numero_informado = -1
numero_secreto = randint(0, 8)
while numero_informado != numero_secreto:
numero_informado = int(input('Informe a senha: '))
print('Numero encontrado')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
UW, CSEP 573, Win19
"""
from pomdp import POMDP
from onlineSolver import OnlineSolver
import numpy as np
class AEMS2(OnlineSolver):
def __init__(self, pomdp, lb_solver, ub_solver, precision = .001, action_selection_time = .1):
super(AEMS2, self).__init__(pomdp, precision, action_selection_time)
self.lb_solver = lb_solver
self.ub_solver = ub_solver
"""
*****Your code
You can add any attribute you want
"""
self.rewards = np.zeros([len(self.pomdp.actions), len(self.pomdp.states)])
for s_index in range(len(self.pomdp.states)):
for a_index in range(len(self.pomdp.actions)):
for o_index in range(len(self.pomdp.observations)):
self.rewards[a_index, s_index] += np.dot(self.pomdp.T[a_index, s_index, :] * self.pomdp.O[a_index, :, o_index], self.pomdp.R[a_index, s_index, :, o_index])
cur_belief = np.array(self.pomdp.prior).reshape(1, len(self.pomdp.prior))
self.root = OrNode(cur_belief, self.lb_solver, self.ub_solver, 1, [], None)
def expandOneNode(self, forceExpand = False):
"""
*****Your code
"""
# return False #remove this after your implementation
leaves = [(leaf, self.__computeError(leaf, depth)) for (leaf, depth) in self.__getAllLeaves(self.root)]
highestErrorLeaf = max(leaves, key = lambda n: n[1])[0]
if not forceExpand:
highestError = max(leaves, key = lambda n: n[1])[1]
if highestError < self.precision:
return False
andNodes = []
for a_index in range(len(self.pomdp.actions)):
probabilities = (highestErrorLeaf.belief @ self.pomdp.T[a_index, :, :] @ self.pomdp.O[a_index, :, :])[0]
reward = sum(highestErrorLeaf.belief @ self.rewards[a_index])
andNode = AndNode(a_index, self.pomdp.discount, reward, [], probabilities, highestErrorLeaf)
andNode.children = [OrNode(self.__updateBelief(highestErrorLeaf.belief, a_index, o_index), self.lb_solver, self.ub_solver, probabilities[o_index], [], andNode) for o_index, observation in enumerate(self.pomdp.observations)]
andNode.backtrack()
andNodes.append(andNode)
highestErrorLeaf.children = andNodes
highestErrorLeaf.backtrack()
parent_node = highestErrorLeaf.parent
while parent_node is not None:
parent_node.backtrack()
parent_node = parent_node.parent
def chooseAction(self):
"""
*****Your code
"""
# return 0 #remove this after your implementation
if len(self.root.children) == 0:
self.expandOneNode(forceExpand=True)
return max([(action_index, andNode.upperBound) for action_index, andNode in enumerate(self.root.children)], key = lambda n: n[1])[0]
def updateRoot(self, action, observation):
"""
***Your code
"""
self.root = self.root.children[action].children[observation]
def __computeError(self, node, depth):
error = node.getError()
if depth == 0:
return error
error = (self.pomdp.discount ** depth) * error
orNode = node
for _ in range(depth):
andNode = orNode.parent
parentOrNode = andNode.parent
bestAction = max([(child.action, child.upperBound) for child in parentOrNode.children], key = lambda n: n[1])[0]
if andNode.action != bestAction:
return 0
error = error * orNode.probability
orNode = parentOrNode
return error
def __updateBelief(self, current_belief, action, observation):
current_belief = np.matmul(current_belief, self.pomdp.T[action, :, :])
current_belief = current_belief * self.pomdp.O[action, :, observation]
return current_belief / np.sum(current_belief) if np.sum(current_belief) > 0 else current_belief
def __getAllLeaves(self, orNode, depth = 0):
if len(orNode.children) == 0:
return [(orNode, depth)]
andNode = max([(andNode, andNode.upperBound) for andNode in orNode.children], key = lambda n: n[1])[0]
leaves = []
for subOrNode in andNode.children:
leaves += self.__getAllLeaves(subOrNode, depth + 1)
return leaves
"""
****Your code
add any data structure, code, etc you want
We recommend to have a super class of Node and two subclasses of AndNode and OrNode
"""
class Node(object):
def __init__(self, children = [], parent = None):
self.lowerBound = float('-inf')
self.upperBound = float('inf')
self.children = children
self.parent = parent
def getError(self):
return self.upperBound - self.lowerBound
class AndNode(Node):
def __init__(self, action, discount, reward, children = [], probabilities = [], parent = None):
super(AndNode, self).__init__(children, parent)
self.action = action
self.discount = discount
self.reward = reward
if sum(probabilities) < 1:
probabilities = probabilities / np.sum(probabilities)
self.probabilities = probabilities
def backtrack(self):
self.lowerBound = self.reward + self.discount * sum([self.probabilities[index] * child.lowerBound for index, child in enumerate(self.children)])
self.upperBound = self.reward + self.discount * sum([self.probabilities[index] * child.upperBound for index, child in enumerate(self.children)])
class OrNode(Node):
def __init__(self, belief, lb_solver, ub_solver, probability, children = [], parent = None):
super(OrNode, self).__init__(children, parent)
self.belief = belief
self.lowerBound = lb_solver.getValue(self.belief)
self.upperBound = ub_solver.getValue(self.belief)
self.probability = probability
def backtrack(self):
self.lowerBound = max([child.lowerBound for child in self.children])
self.upperBound = max([child.upperBound for child in self.children])
|
import logging
from cookiecutter.main import cookiecutter
def run_task(name: str, description: str, parameters: dict, service_metadata: dict):
logging.info("Generating project directory using cookiecutter")
template_url_field_name = parameters['template_url_field_name']
parameters_field_name = parameters['parameters_field_name']
output_dir = parameters['output_dir']
template_url = service_metadata[template_url_field_name]
parameters = service_metadata[parameters_field_name]
cookiecutter(
template=template_url,
no_input=True,
extra_context=parameters,
output_dir=output_dir,
)
logging.info(f"Generated project directory using cookiecutter from template {template_url}")
|
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return a boolean
def isSymmetric(self, root):
if root == None:
return True
else:
return self.isSymmetric2(root.left, root.right)
def isSymmetric2(self, a, b):
if a == None and b == None:
return True
elif a != None and b != None:
return a.val == b.val and self.isSymmetric2(a.left, b.right) and self.isSymmetric2(a.right, b.left)
else:
return False |
"""
Pytorch Implementation of our GFFnet models based on Deeplabv3+ with WiderResNet-38 as backbone.
Author: Xiangtai Li (lxtpku@pku.edu.cn)
"""
import logging
import torch
from torch import nn
from network.wider_resnet import wider_resnet38_a2
from network.deepv3 import _AtrousSpatialPyramidPoolingModule
from network.nn.mynn import initialize_weights, Norm2d, Upsample
from network.nn.operators import conv_bn_relu, conv_sigmoid, DenseBlock
class DeepWV3PlusGFFNet(nn.Module):
def __init__(self, num_classes, trunk='WideResnet38', criterion=None):
super(DeepWV3PlusGFFNet, self).__init__()
self.criterion = criterion
logging.info("Trunk: %s", trunk)
wide_resnet = wider_resnet38_a2(classes=1000, dilation=True)
wide_resnet = torch.nn.DataParallel(wide_resnet)
wide_resnet = wide_resnet.module
self.mod1 = wide_resnet.mod1
self.mod2 = wide_resnet.mod2
self.mod3 = wide_resnet.mod3
self.mod4 = wide_resnet.mod4
self.mod5 = wide_resnet.mod5
self.mod6 = wide_resnet.mod6
self.mod7 = wide_resnet.mod7
self.pool2 = wide_resnet.pool2
self.pool3 = wide_resnet.pool3
del wide_resnet
self.aspp = _AtrousSpatialPyramidPoolingModule(4096, 256,
output_stride=8)
self.bot_aspp = nn.Conv2d(1280, 256, kernel_size=3, padding=1, bias=False)
self.gff_head = WiderResNetGFFDFPHead(num_classes, norm_layer=Norm2d)
initialize_weights(self.gff_head)
def forward(self, inp, gts=None):
x_size = inp.size()
x = self.mod1(inp)
m2 = self.mod2(self.pool2(x))
x = self.mod3(self.pool3(m2))
x = self.mod4(x)
m5 = self.mod5(x)
x = self.mod6(m5)
x = self.mod7(x)
x_aspp = self.aspp(x)
aspp = self.bot_aspp(x_aspp)
dec1 = self.gff_head([m2, m5, aspp])
out = Upsample(dec1, x_size[2:])
if self.training:
return self.criterion(out, gts)
return out
class WiderResNetGFFDFPHead(nn.Module):
def __init__(self, num_classes, norm_layer=nn.BatchNorm2d):
super(WiderResNetGFFDFPHead, self).__init__()
self.d_in1 = conv_bn_relu(128, 128, 1, norm_layer=norm_layer)
self.d_in2 = conv_bn_relu(1024, 128, 1, norm_layer=norm_layer)
self.d_in3 = conv_bn_relu(256, 128, 1, norm_layer=norm_layer)
self.gate1 = conv_sigmoid(128, 128)
self.gate2 = conv_sigmoid(1024, 128)
self.gate3 = conv_sigmoid(256, 128)
in_channel = 128
self.dense_3 = DenseBlock(in_channel, 128, 128, 3, drop_out=0, norm_layer=norm_layer)
self.dense_6 = DenseBlock(in_channel + 128, 128, 128, 6, drop_out=0, norm_layer=norm_layer)
self.dense_9 = DenseBlock(in_channel + 128 * 2, 128, 128, 9, drop_out=0, norm_layer=norm_layer)
self.cls = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1, bias=False),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_classes, kernel_size=1, bias=False))
def forward(self, x):
m2, m5, aspp = x
m2_size = m2.size()[2:]
m5_size = m5.size()[2:]
aspp_size = aspp.size()[2:]
g_m2 = self.gate1(m2)
g_m5 = self.gate2(m5)
g_aspp = self.gate3(aspp)
m2 = self.d_in1(m2)
m5 = self.d_in2(m5)
aspp = self.d_in3(aspp)
# GFF fusion
m2 = m2 + g_m2 * m2 + (1 - g_m2) * ( Upsample(g_m5 * m5, size=m2_size) + Upsample(g_aspp * aspp, size=m2_size) )
m5 = m5 + g_m5 * m5 + (1 - g_m5) * (
Upsample(g_m2 * m2, size=m5_size) + Upsample(g_aspp *aspp, size=m5_size))
aspp_f = aspp + aspp * g_aspp + (1 - g_aspp) * (
Upsample(g_m5 *m5, size=aspp_size) + Upsample(g_m2 * m2, size=aspp_size))
aspp_f = Upsample(aspp_f, size=m2_size)
aspp = Upsample(aspp, size=m2_size)
m5 = Upsample(m5, size=m2_size)
# DFP fusion
out = aspp_f
aspp_f = self.dense_3(out)
out = torch.cat([aspp_f,m5], dim=1)
m5 = self.dense_6(out)
out = torch.cat([aspp_f,m5,m2],dim=1)
m2 = self.dense_9(out)
f = torch.cat([aspp_f, aspp, m5, m2], dim=1)
out = self.cls(f)
return out |
#!/usr/bin/env python
import sys
if len(sys.argv) != 3 :
sys.stderr.write('%s flux d_kpc\n' % sys.argv[0])
exit()
flux = float(sys.argv[1])
d_kpc = float(sys.argv[2])
luminosity = 1.2e+32 * (flux / 1e-12) * d_kpc**2
dump = "flux: %.3e (erg/s/cm2)\n" % flux
dump += "distance : %.3e (kpc)\n" % d_kpc
dump += "luminosity: %.3e (erg/s)" % luminosity
print(dump) |
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from .models import Clothes
import json
from django.views import View
from django.shortcuts import get_object_or_404
from django.core.serializers import serialize
class IndexView(View):
def get(self, request):
clothes = Clothes.objects.all().order_by('-id')
data = json.loads(serialize('json',clothes))
return JsonResponse({'clothes' : data})
def post(self, request):
return HttpResponse('Post')
def put(self, request):
return HttpResponse('put')
def delete(self, request):
return HttpResponse('delete')
def Clothes_detail(request, number):
clothes = Clothes.objects.get(categories=number)
data = json.loads(serialize('json', clothes))
return JsonResponse({'clothes' : data})
def Clothes_kind(request, kind):
clothes = Clothes.objects.filter()
data = json.loads(serialize('json', clothes))
return HttpResponse(data) |
from os import getenv as env
class Config:
app_address = env('APP_ADDR', '0.0.0.0')
app_port = env('APP_PORT', '8080')
app_name = env('APP_NAME', 'mortimer')
llevel = env('LLEVEL', 'DEBUG')
app_salt = env('APP_SALT', 'MXhZifmhG7Zzegk2')
# cache settings
redis_enabled = bool(env('REDIS_ENABLED', False))
# persistent backend settings
db_host = env('DB_HOST', '0.0.0.0')
db_port = env('DB_PORT', '5432')
db_name = env('DB_NAME', 'test')
db_user = env('DB_USER', 'test')
db_pass = env('DB_PASS', 'test') |
import numpy as np
import pandas as pd
from backtesting.Indicator.Meanvariance import MeanVariance
from backtesting.example.measure.annualize_return import getReturn
"""
expand_dims的应用
预估收益率和协方差矩阵
"""
def black_litterman(returns,tau,P,Q):
# 先验收益率
expect_returns = returns.mean()
# 协方差矩阵
cov_returns = returns.cov()
# 代表假设分布的方差
t_cov = tau * cov_returns
# 构建观点的方差矩阵,只有对角线上的数,才代表观点的方差
w = np.dot(np.dot(P,t_cov),P.T)
# 构建观点的对角线矩阵,对角线上表示的时候每个观点的方差,代表观点的精度
# [p1.T*cov*p1 0 ]
# [ 0 p2.T*cov*p2]
Omega = w*np.eye(Q.shape[0])
# 求逆矩阵
t_cov_inv = np.linalg.inv(t_cov)
Omega_inv = np.linalg.inv(Omega)
"""
# 方法一:
# 方法一和二的结果是一样的
middle = np.linalg.inv(w+Omega)
er = np.expand_dims(expect_returns,axis=0).T + np.dot(np.dot(np.dot(t_cov,P.T),middle),
(Q-np.expand_dims(np.dot(P,expect_returns.T),axis=1)))
pSigma = cov_returns+t_cov -np.dot(t_cov.dot(P.T).dot(middle).dot(P),t_cov)
"""
'''
计算出每一个资产的后验收益率
简单就是(先验+新息)的加权平均
权重就是先验协方差矩阵和新息的协方差矩阵之和
'''
posteriorWeight = np.linalg.inv(t_cov_inv+np.dot(np.dot(P.T,Omega_inv),P))
posteriorReturn =np.dot(posteriorWeight,np.expand_dims(np.dot(t_cov_inv,expect_returns.T),axis=1)+np.dot(np.dot(P.T,Omega_inv),Q))
posteriorCov = cov_returns + posteriorWeight
# print(posteriorReturn)
# print(pd.DataFrame(cov_returns+posteriorSigma))
return [posteriorReturn,posteriorCov]
code_list = [
'002192.SZ', # 融捷
'300618.SZ', # 寒锐
'300433.SZ', # 蓝思
'002299.SZ', # 圣农
'300251.SZ', # 光线
# '600276.SH', # 恒瑞
# '600196.SH', # 复星
# '300760.SZ', # 迈瑞
# '000001.SH', #上证
# '399001.SZ' #深证
]
returns = getReturn(code_list,'20190101','20200413')
mean = returns.mean()
pick1 = np.array([1,0,1,1,1])
q1 = np.array([0.003*4])
pick2 = np.array([0.5,0.5,0,0,-1])
q2 = np.array([0.001])
P = np.array([pick1,pick2])
Q = np.array([q1,q2])
# print(np.dot(P,mean.T))
# print(Q)
res = black_litterman(returns,0.1,P,Q)
p_mean = pd.DataFrame(res[0],index= returns.columns,columns=['posterior_mean'])
print(mean)
print(p_mean)
p_cov = res[1]*252
mv = MeanVariance(returns,res[0]*252,p_cov)
qv = mv.quadraticVar(0.003*252)
m = mv.minVar(0.003*252)
print(qv.sum())
print(qv)
print(mv.meanRet(qv))
print(np.sqrt(mv.varianceRet(qv)))
print(m.sum())
print(m)
print(mv.meanRet(m))
print(np.sqrt(mv.varianceRet(m)))
'''
mean = returns.mean()
print(mean)
print(mean.shape)
new = np.expand_dims(mean,axis=0)
print(new)
print(new.shape)
a = np.expand_dims(np.dot(P,mean.T),axis=1)
# print(P.shape)
# print(mean.T.shape)
print(a.shape)
print(a)
print(a+Q)
'''
#
# a = np.array([[1,2],[2,3]])
# b = np.array([[3,4],[5,6]])
# print(np.dot(a,b))
# print(a.dot(b)) |
__all__ = ["szt_sys","szt_ceph_type", "szt_load_conf", "szt_log", "szt_sshconnection", \
"szt_net_tool", "szt_remote_base"] |
# -*- coding: utf-8 -*-
import requests
from meya import Component
class ChuckNorrisJoke(Component):
def start(self):
response = requests.get("http://api.icndb.com/jokes/random")
text = response.json()['value']['joke']
message = self.create_message(text=text)
return self.respond(message=message, action="next")
|
# Minimum Skew
import sys
lines = open(sys.argv[1].strip(), 'r').readlines()
text = str(lines[0]).strip()
gc = 0 # g - c
skew = []
for i in range(len(text)):
if text[i] == 'C':
gc -= 1
if text[i] == 'G':
gc += 1
skew.append(gc)
m = min(skew)
for i in range(len(skew)):
if skew[i] == m:
print i + 1,
|
from rummy_cubes_game import RummyCubesGame
def main(num_players, num_games):
while num_games > 0:
num_games -= 1
game = RummyCubesGame(num_players)
game.run()
if __name__ == "__main__":
number_of_players = 2
number_of_games = 1
main(number_of_players, number_of_games)
|
def adder(x, y):
"""Add two numbers together."""
print("INSIDE ADDER!")
return x + y
assert adder(2, 5) == 7
assert adder(2, 7) == 10, "expected 2+7 to be 10"
assert adder(2, 3) == 5
print("HELLO WORLD!")
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class fc(nn.Module):
def __init__(self, cfg, feature_shape, num_classes):
super(fc, self).__init__()
assert len(feature_shape) == 4, "Expect B*C*H*W"
feature_size = np.prod(feature_shape)
output_size = num_classes
latent_space_dim = list(cfg.CLASSIFIER.FC.hidden_layers)
latent_space_dim = [feature_size] + latent_space_dim
latent_space_dim = latent_space_dim + [output_size]
net_list = []
for i in range(len(latent_space_dim) - 1):
net_list.append(nn.Linear(latent_space_dim[i], latent_space_dim[i + 1], bias = cfg.CLASSIFIER.FC.bias))
if i != len(latent_space_dim) - 2:
net_list.append(nn.ReLU())
print(net_list)
self.net = nn.Sequential(*net_list)
def forward(self, x):
x = torch.flatten(x, start_dim = 1)
return self.net(x) |
# -*- coding: GBK -*-
# ---------------------------------------------------------------------------
# PointsClusterDefault.py
# Created on: 2015-07-02 11:08:02.00000
# (generated by ArcGIS/ModelBuilder)
# Usage: PointsClusterDefault <InputExcel> <ClusterDistance> <OutPolygon> <OutPointsExcel> <OutPolygonsExecl>
# Description:
# ---------------------------------------------------------------------------
# Set the necessary product code
# import arcinfo
# Import arcpy module
import arcpy
from arcpy import env
import cx_Oracle
import re
import logging
import os
import os.path
import time,datetime
import httplib, urllib, json
import getpass
spatialReference = arcpy.SpatialReference(4326)
ArcCatalogPath = "C:\\Users\\Administrator\\AppData\\Roaming\\ESRI\\Desktop10.3\\ArcCatalog"
OracleGISDBPath = "PDB_PMSDB.sde"
LOG_FILE_NAME = "E:\\GisPython\\logs\\deleteLUCETable.log"
logging.basicConfig(filename=LOG_FILE_NAME,level=logging.INFO)
logging.basicConfig(filename=LOG_FILE_NAME,level=logging.ERROR)
username = "arcadmin"
password = "Passw0rd"
serverName = "10.48.186.82"
serverPort = 6080
serviceFolder = "/arcgis/admin/services/WangYou/"
def StopOrStartService(serviceFolder,serviceName,stopOrStart):
token = getToken(username, password, serverName, serverPort)
if token == "":
print "Could not generate a token with the username and password provided."
return
# This request only needs the token and the response formatting parameter
params = urllib.urlencode({'token': token, 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to URL and post parameters
httpConn = httplib.HTTPConnection(serverName, serverPort)
# Get a token
fullSvcName = serviceName + "." + "MapServer"
stopOrStartURL = serviceFolder+fullSvcName+"/" + stopOrStart
httpConn.request("POST", stopOrStartURL, params, headers)
# Read stop or start response
stopStartResponse = httpConn.getresponse()
if (stopStartResponse.status != 200):
httpConn.close()
print "Error while executing stop or start. Please check the URL and try again."
return
else:
stopStartData = stopStartResponse.read()
# Check that data returned is not an error object
if not assertJsonSuccess(stopStartData):
if str.upper(stopOrStart) == "START":
print "Error returned when starting service " + fullSvcName + "."
else:
print "Error returned when stopping service " + fullSvcName + "."
print str(stopStartData)
else:
print "Service " + fullSvcName + " processed successfully."
httpConn.close()
return
# A function to generate a token given username, password and the adminURL.
def getToken(username, password, serverName, serverPort):
# Token URL is typically http://server[:port]/arcgis/admin/generateToken
tokenURL = "/arcgis/admin/generateToken"
params = urllib.urlencode({'username': username, 'password': password, 'client': 'requestip', 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to URL and post parameters
httpConn = httplib.HTTPConnection(serverName, serverPort)
httpConn.request("POST", tokenURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
print "Error while fetching tokens from admin URL. Please check the URL and try again."
return
else:
data = response.read()
httpConn.close()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
return
# Extract the token from it
token = json.loads(data)
return token['token']
# A function that checks that the input JSON object
# is not an error object.
def assertJsonSuccess(data):
obj = json.loads(data)
if 'status' in obj and obj['status'] == "error":
print "Error: JSON object returns an error. " + str(obj)
return False
else:
return True
if __name__ == '__main__':
GIS_ZDLUCELOG = ArcCatalogPath+"\\"+OracleGISDBPath+"\\SDE.GIS_ZDLUCELOG"
try:
print time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
logging.info("开始时间:"+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
logging.info("STOP SERVICE ----------------------")
StopOrStartService(serviceFolder,"LUCERSRP","stop")
StopOrStartService(serviceFolder,"LUCERSRQ","stop")
StopOrStartService(serviceFolder,"LUCESINR","stop")
StopOrStartService(serviceFolder,"LUCEZCLD","stop")
logging.info("DELETE TABLE ----------------------")
taskCursor = arcpy.SearchCursor(GIS_ZDLUCELOG,"FLAG = 2")
taskNextRow = taskCursor.next()
while taskNextRow:
deleName = taskNextRow.GUANLIANTABLE
deleTable = ArcCatalogPath+"\\"+OracleGISDBPath+"\\SDE."+deleName
if(arcpy.Exists(deleTable)):
print "delete exists "+deleName
logging.info("delete exists table :"+deleName)
arcpy.Delete_management(deleTable, "FeatureClass")
taskNextRow = taskCursor.next()
dbConnOra=cx_Oracle.connect("sde/sde_nsn2015@10.48.186.102:1521/pdb_pmsdb")
cursorOra = dbConnOra.cursor()
updateTaskState = "delete from GIS_ZDLUCELOG where FLAG=2"
cursorOra.execute(updateTaskState)
dbConnOra.commit()
dbConnOra.close()
logging.info("START SERVICE ----------------------")
StopOrStartService(serviceFolder,"LUCERSRP","start")
StopOrStartService(serviceFolder,"LUCERSRQ","start")
StopOrStartService(serviceFolder,"LUCESINR","start")
StopOrStartService(serviceFolder,"LUCEZCLD","start")
logging.info("结束时间:"+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
print "结束时间:"+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
except Exception,e:
logging.error(e)
print e.message
|
"""
计数排序(count sort)
1、找出数组中的最大、最小值
2、设置一个大小 = 最大值-最小值+1 的额外数组counts
3、利用下标统计arr中每个数出现的位置,arr中元素对应counts中下标为 counts[arr]+1
4、释放统计结果
适用于:
待排序列表中元素差值较小,重复元素较多
不足:当元素较多时,需要额外的空间较多
属性:
不稳定
O(n)时间复杂度
O(K)空间复杂度
测试用例:
arr = [10,1,3,4,2,8,11,4,67,3]
print(count_sort(arr))
"""
def count_sort(arr):
#寻找待排序列表中min、max值
max_index,min_index = arr[0],arr[0]
for i in arr:
if i<min_index:min_index = i
elif i>max_index:max_index = i
nums = max_index - min_index + 1 #待排序列表中元素区间
counts = [0]*nums #创建一个长度和待排序类别元素区间大小相同的集合
for i in arr:
counts[i - min_index] += 1 #使用下标进行统计,i-min_index为i对应的下标
#进行重新排序
pos = 0
for i in range(nums):
for j in range(counts[i]):
arr[pos] = i + min_index
pos += 1
return arr
class Solution():
def __init__(self,list):
self.list = list
def count_sort(self):
list_min,list_max = self.list[0],self.list[0]
for i in self.list:
if i < list_min:
list_min = i
elif i >= list_max:
list_max = i
new_list_len = list_max - list_min + 1
new_list = [0]*new_list_len
for j in self.list:
new_list[j - list_min] += 1
pos = 0
for i in range(new_list_len):
for j in range(new_list[i]):
self.list[pos] = i + list_min
pos += 1
return self.list
if __name__ == '__main__':
arr = [10, 1, 3, 4, 2, 8, 11, 4, 67, 3]
res = Solution(arr).count_sort()
print(res)
|
import os,sys
from .ioc import Ioc as Ioc
class IocFile:
def __init__(self, file):
self.file = file
self.domains = set()
self.ips = set()
self.hashes = set()
self.emails = set()
self._parse_indicator_file(self.file)
def all_indicators(self):
return self.domains.union(
self.ips,
self.hashes,
self.emails
)
# These methods are only meant to be called from inside this class.
def _parse_indicator_file(self, file):
try:
with open(file, 'r') as f:
for line in f.readlines():
line = line.strip('\n')
if Ioc.is_domain(line):
self.domains.add(Ioc.clean_indicator(line))
elif Ioc.is_ip(line):
self.ips.add(Ioc.clean_indicator(line))
elif Ioc.is_hash(line):
self.hashes.add(Ioc.clean_indicator(line))
elif Ioc.is_email(line):
self.emails.add(Ioc.clean_indicator(line))
else:
pass
except:
raise Exception('Could not parse indicator file')
|
#!/usr/bin/Python
# -*- coding: utf-8 -*-
# __author__ = "haibo"
import xlrd
def test():
data = xlrd.open_workbook('demo.xlsx') # 打开demo.xls
sheet_names = data.sheet_names() # 获取xls文件中所有sheet的名称
for name in sheet_names:
print name
table = data.sheet_by_name(sheet_names[0])
print table.nrows
print table.ncols
for row_value in table.row_values(0):
print row_value
# 循环行,得到索引的列表
for rownum in range(table.nrows):
print table.row_values(rownum)
if __name__ == "__main__":
test()
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import numpy
import pytest
import subprocess
import tempfile
from tests.integ import lock as lock
from sagemaker.mxnet.estimator import MXNet
from sagemaker.pytorch.estimator import PyTorch
from sagemaker.sklearn.estimator import SKLearn
from sagemaker.sklearn.model import SKLearnModel
from tests.integ import DATA_DIR
GIT_REPO = "https://github.com/aws/sagemaker-python-sdk.git"
BRANCH = "test-branch-git-config"
COMMIT = "ae15c9d7d5b97ea95ea451e4662ee43da3401d73"
PRIVATE_GIT_REPO = "https://github.com/git-support-test/test-git.git"
PRIVATE_BRANCH = "master"
PRIVATE_COMMIT = "a46d6f9add3532ca3e4e231e4108b6bad15b7373"
PRIVATE_GIT_REPO_2FA = "https://github.com/git-support-test-2fa/test-git.git"
PRIVATE_GIT_REPO_2FA_SSH = "git@github.com:git-support-test-2fa/test-git.git"
PRIVATE_BRANCH_2FA = "master"
PRIVATE_COMMIT_2FA = "52381dee030eb332a7e42d9992878d7261eb21d4"
CODECOMMIT_REPO = (
"https://git-codecommit.us-west-2.amazonaws.com/v1/repos/sagemaker-python-sdk-git-testing-repo/"
)
CODECOMMIT_BRANCH = "master"
# endpoint tests all use the same port, so we use this lock to prevent concurrent execution
LOCK_PATH = os.path.join(tempfile.gettempdir(), "sagemaker_test_git_lock")
@pytest.mark.local_mode
def test_github(
sagemaker_local_session, pytorch_inference_latest_version, pytorch_inference_latest_py_version
):
script_path = "mnist.py"
git_config = {"repo": GIT_REPO, "branch": BRANCH, "commit": COMMIT}
pytorch = PyTorch(
entry_point=script_path,
role="SageMakerRole",
source_dir="pytorch",
framework_version=pytorch_inference_latest_version,
py_version=pytorch_inference_latest_py_version,
instance_count=1,
instance_type="local",
sagemaker_session=sagemaker_local_session,
git_config=git_config,
)
data_path = os.path.join(DATA_DIR, "pytorch_mnist")
pytorch.fit({"training": "file://" + os.path.join(data_path, "training")})
with lock.lock(LOCK_PATH):
try:
predictor = pytorch.deploy(initial_instance_count=1, instance_type="local")
data = numpy.zeros(shape=(1, 1, 28, 28)).astype(numpy.float32)
result = predictor.predict(data)
assert 10 == len(result[0]) # check that there is a probability for each label
finally:
predictor.delete_endpoint()
@pytest.mark.local_mode
@pytest.mark.skip("needs a secure authentication approach")
def test_private_github(
sagemaker_local_session, mxnet_training_latest_version, mxnet_training_latest_py_version
):
script_path = "mnist.py"
data_path = os.path.join(DATA_DIR, "mxnet_mnist")
git_config = {
"repo": PRIVATE_GIT_REPO,
"branch": PRIVATE_BRANCH,
"commit": PRIVATE_COMMIT,
"2FA_enabled": False,
"username": "git-support-test",
"password": "", # TODO: find a secure approach
}
source_dir = "mxnet"
dependencies = ["foo/bar.py"]
mx = MXNet(
entry_point=script_path,
role="SageMakerRole",
source_dir=source_dir,
dependencies=dependencies,
framework_version=mxnet_training_latest_version,
py_version=mxnet_training_latest_py_version,
instance_count=1,
instance_type="local",
sagemaker_session=sagemaker_local_session,
git_config=git_config,
)
mx.fit(
{
"train": "file://" + os.path.join(data_path, "train"),
"test": "file://" + os.path.join(data_path, "test"),
}
)
files = [file for file in os.listdir(mx.source_dir)]
assert "some_file" in files
assert "mnist.py" in files
assert os.path.exists(mx.dependencies[0])
with lock.lock(LOCK_PATH):
try:
serving_script_path = "mnist_hosting_with_custom_handlers.py"
predictor = mx.deploy(1, "local", entry_point=serving_script_path)
data = numpy.zeros(shape=(1, 1, 28, 28))
result = predictor.predict(data)
assert result is not None
finally:
predictor.delete_endpoint()
@pytest.mark.local_mode
@pytest.mark.skip("needs a secure authentication approach")
def test_private_github_with_2fa(
sagemaker_local_session, sklearn_latest_version, sklearn_latest_py_version
):
script_path = "mnist.py"
data_path = os.path.join(DATA_DIR, "sklearn_mnist")
git_config = {
"repo": PRIVATE_GIT_REPO_2FA,
"branch": PRIVATE_BRANCH_2FA,
"commit": PRIVATE_COMMIT_2FA,
"2FA_enabled": True,
"token": "", # TODO: find a secure approach
}
source_dir = "sklearn"
sklearn = SKLearn(
entry_point=script_path,
role="SageMakerRole",
source_dir=source_dir,
py_version=sklearn_latest_py_version,
instance_count=1,
instance_type="local",
sagemaker_session=sagemaker_local_session,
framework_version=sklearn_latest_version,
hyperparameters={"epochs": 1},
git_config=git_config,
)
train_input = "file://" + os.path.join(data_path, "train")
test_input = "file://" + os.path.join(data_path, "test")
sklearn.fit({"train": train_input, "test": test_input})
assert os.path.isdir(sklearn.source_dir)
with lock.lock(LOCK_PATH):
try:
client = sagemaker_local_session.sagemaker_client
desc = client.describe_training_job(TrainingJobName=sklearn.latest_training_job.name)
model_data = desc["ModelArtifacts"]["S3ModelArtifacts"]
model = SKLearnModel(
model_data,
"SageMakerRole",
entry_point=script_path,
framework_version=sklearn_latest_version,
source_dir=source_dir,
sagemaker_session=sagemaker_local_session,
git_config=git_config,
)
predictor = model.deploy(1, "local")
data = numpy.zeros((100, 784), dtype="float32")
result = predictor.predict(data)
assert result is not None
finally:
predictor.delete_endpoint()
@pytest.mark.local_mode
def test_github_with_ssh_passphrase_not_configured(
sagemaker_local_session, sklearn_latest_version, sklearn_latest_py_version
):
script_path = "mnist.py"
data_path = os.path.join(DATA_DIR, "sklearn_mnist")
git_config = {
"repo": PRIVATE_GIT_REPO_2FA_SSH,
"branch": PRIVATE_BRANCH_2FA,
"commit": PRIVATE_COMMIT_2FA,
}
source_dir = "sklearn"
sklearn = SKLearn(
entry_point=script_path,
role="SageMakerRole",
source_dir=source_dir,
instance_count=1,
instance_type="local",
sagemaker_session=sagemaker_local_session,
framework_version=sklearn_latest_version,
py_version=sklearn_latest_py_version,
hyperparameters={"epochs": 1},
git_config=git_config,
)
train_input = "file://" + os.path.join(data_path, "train")
test_input = "file://" + os.path.join(data_path, "test")
with pytest.raises(subprocess.CalledProcessError) as error:
sklearn.fit({"train": train_input, "test": test_input})
assert "returned non-zero exit status" in str(error.value)
@pytest.mark.local_mode
@pytest.mark.skip("needs a secure authentication approach")
def test_codecommit(
sagemaker_local_session, mxnet_training_latest_version, mxnet_training_latest_py_version
):
script_path = "mnist.py"
data_path = os.path.join(DATA_DIR, "mxnet_mnist")
git_config = {
"repo": CODECOMMIT_REPO,
"branch": CODECOMMIT_BRANCH,
"username": "GitTest-at-142577830533",
"password": "", # TODO: assume a role to get temporary credentials
}
source_dir = "mxnet"
dependencies = ["foo/bar.py"]
mx = MXNet(
entry_point=script_path,
role="SageMakerRole",
source_dir=source_dir,
dependencies=dependencies,
framework_version=mxnet_training_latest_version,
py_version=mxnet_training_latest_py_version,
instance_count=1,
instance_type="local",
sagemaker_session=sagemaker_local_session,
git_config=git_config,
)
mx.fit(
{
"train": "file://" + os.path.join(data_path, "train"),
"test": "file://" + os.path.join(data_path, "test"),
}
)
files = [file for file in os.listdir(mx.source_dir)]
assert "some_file" in files
assert "mnist.py" in files
assert os.path.exists(mx.dependencies[0])
with lock.lock(LOCK_PATH):
try:
predictor = mx.deploy(1, "local")
data = numpy.zeros(shape=(1, 1, 28, 28))
result = predictor.predict(data)
assert result is not None
finally:
predictor.delete_endpoint()
|
from django.db import models
# Create your models here.
from ckeditor_uploader.fields import RichTextUploadingField
class feed(models.Model):
id=models.IntegerField(primary_key=True)
author=models.CharField(max_length=100)
title=models.CharField(max_length=100)
description=RichTextUploadingField(blank=True,null=True)
body=models.TextField()
|
import numpy
import math
import cv2
import random
import matplotlib
from scipy.special import gamma
import matplotlib.pyplot as plt
from functions import *
matplotlib.use("TkAgg")
def inverse_gamma(data, alpha=0.1, beta=0.1):
"""
Inverse gamma distributions
:param data: Data value
:param alpha: alpha value
:param beta: beta value
:return: Inverse gamma distributiion
"""
return (pow(beta, alpha) / math.gamma(alpha)) *\
pow(alpha, data-1) * math.exp(-beta/data)
def shape_scale():
"""
Generate shape and scale params
:return:
"""
pre_shape = numpy.random.uniform(0, 3, 16)
pre_scale = numpy.random.uniform(0, 3, 16)
shape = [inverse_gamma(i) for i in pre_shape]
scale = [inverse_gamma(j) for j in pre_scale]
return shape, scale
def noise_variance():
"""
Noise varriance sampled in inverse gamma distribution
:return: Sampled Noise
"""
var_list = numpy.arange(0, 3.2, 0.2)
var = inverse_gamma(random.choice(var_list))
return numpy.random.normal(0, var, (512, 512))
def GGD(shape, scale, path):
"""
Generalised gaussian distribution of input image
:param shape: Shape param of GGD
:param scale: Scale param of GGD
:param path: Path to image
:return: Approximate GGD image
"""
def ggd(x):
p1 = 2 * pow(scale, 1 / shape) * gamma(1 + 1 / shape)
p2 = math.exp(-pow(abs(x), shape) / scale)
return (1 / p1) * p2
mat = cv2.imread(path, 0)
mat = cv2.resize(mat, (512, 512))
mata = numpy.zeros(mat.shape)
for i in range(len(mat)):
for j in range(len(mat)):
mata[i][j] = ggd(mat[i][j])
return mat, mata
def potts_label_map_trf(image):
"""
Find Label Map for the ground truth image
:param image: Path to Image
:return: Approximate segmented and labels for segmentation
"""
img = cv2.imread(image, 0)
img = cv2.resize(img, (512, 512))
im = img.copy()
'''labels = numpy.zeros((512, 512))
for x in range(1, 511):
for y in range(1, 511):
if (im[x, y - 1] - 5 < im[x, y] < im[x, y - 1] + 5) and\
(im[x, y + 1] - 5 < im[x, y] < im[x, y + 1] + 5):
im[x, y] = im[x, y - 1]
labels[x, y] = 0
elif (im[x - 1, y] - 40 < im[x, y] < im[x - 1, y] + 40) and\
(im[x + 1, y] - 40 < im[x, y] < im[x + 1, y] + 40):
im[x, y] = im[x - 1, y]
labels[x, y] = 1'''
return energy_calc_1D(im)
def Segment(img):
"""
Segment the image based on POTTS model
:param img: Approximated Image
:return: Segmented image in 4 classes
"""
out = img.copy()
for i in range(len(img)):
for j in range(len(img)):
if img[i, j] in range(0, 41):
out[i, j] = 0
elif img[i, j] in range(41, 100):
out[i, j] = 1
elif img[i, j] in range(150, 255):
out[i, j] = 2
else:
out[i, j] = 3
return out
def plot(path):
orig, approx = GGD(0.1, 0.1, path)
approx_seg, labels = potts_label_map_trf(path)
final_segment = Segment(approx_seg)
plt.figure(figsize=(20, 10))
plt.legend()
plt.subplot(221)
plt.imshow(orig, cmap='gray'), plt.title("Original Image")
plt.subplot(222)
plt.imshow(approx, cmap='gray'), plt.title("GGD approximated Image")
plt.subplot(223)
plt.imshow(approx_seg, cmap='gray'), plt.title("TRF segmented Image")
plt.subplot(224)
plt.imshow(final_segment, cmap='jet'), plt.title("Final Segmented Image")
plt.show()
if __name__ == "__main__":
# To run the code just change the path and input path of the image.
# For windows last line of code will be like - plot(path=r".\us_images\18-19-54.jpg")
# For linux last line of code will be like - plot(path="./us_images/18-19-54.jpg")
plot(path="./us_images/18-19-54.jpg") |
from django.conf import settings
from django.contrib import admin
from django.urls import path, include
from django.contrib.auth import views as auth_views
from django.conf.urls.static import static # used for static files
urlpatterns = [
path('admin/', admin.site.urls, name='admin'),
path('login/', auth_views.LoginView.as_view(template_name='login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='logout.html'), name='logout'),
path('', include('homepage.urls')),
path('inventory/', include('inventory.urls')),
path('transactions/', include('transactions.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
tab = ["i", "s", "r", "v", "e", "a", "w", "h", "o", "b", "p", "n", "u", "t", "f", "g"]
other_tab = ["a","b", "c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
word = ["g", "i", "a", "n", "t", "s"]
for j in word:
sol = set()
for i in other_tab:
a = ord(i)
b = a & 0xf
if tab[b] == j:
sol.add(i)
print("{} <-".format(j), sol)
|
import pathlib
import yaml
from typing import List
class MissingConfigurationError(Exception):
def __init__(self, missing: List[str]):
self.missing = missing
class SettingsConfig:
def __init__(self, base_url: str, tags: frozenset, cache_duration: int,
cache_file_duration: int, cache_directory: pathlib.Path):
self.base_url = base_url.rstrip('/')
self.tags = tags
self.cache_duration = cache_duration
self.cache_file_duration = cache_file_duration
self.cache_directory = cache_directory
class LibraryConfig:
def __init__(self, library_type: str, library_id: str, name: str,
owner: str, description: str):
self.type = library_type
self.id = library_id
self.name = name
self.owner = owner
self.description = description
class ZoteroConfig:
def __init__(self, api_key: str):
self.api_key = api_key
class ZoteroxyConfig:
def __init__(self, zotero: ZoteroConfig, library: LibraryConfig, settings: SettingsConfig):
self.zotero = zotero
self.library = library
self.settings = settings
class ZoteroxyConfigParser:
DEFAULTS = {
'zotero': {},
'library': {
'type': 'group',
'owner': '',
'description': '',
},
'settings': {
'tags': frozenset(),
'cache': {
'duration': 3600,
'file': {
'duration': 3600,
'directory': 'cache',
},
},
},
}
REQUIRED = [
['zotero', 'api_key'],
['library', 'id'],
['library', 'name'],
['settings', 'base_url'],
]
def __init__(self):
self.cfg = dict()
def has(self, *path):
x = self.cfg
for p in path:
if not hasattr(x, 'keys') or p not in x.keys():
return False
x = x[p]
return True
def _get_default(self, *path):
x = self.DEFAULTS
for p in path:
x = x[p]
return x
def get_or_default(self, *path):
x = self.cfg
for p in path:
if not hasattr(x, 'keys') or p not in x.keys():
return self._get_default(*path)
x = x[p]
return x
def validate(self):
missing = []
for path in self.REQUIRED:
if not self.has(*path):
missing.append('.'.join(path))
if len(missing) > 0:
raise MissingConfigurationError(missing)
@property
def library(self):
return LibraryConfig(
library_type=self.get_or_default('library', 'type'),
library_id=self.get_or_default('library', 'id'),
name=self.get_or_default('library', 'name'),
owner=self.get_or_default('library', 'owner'),
description=self.get_or_default('library', 'description'),
)
@property
def settings(self):
return SettingsConfig(
base_url=self.get_or_default('settings', 'base_url'),
tags=frozenset(self.get_or_default('settings', 'tags')),
cache_duration=self.get_or_default('settings', 'cache', 'duration'),
cache_file_duration=self.get_or_default('settings', 'cache', 'file', 'duration'),
cache_directory=pathlib.Path(self.get_or_default('settings', 'cache', 'file', 'directory')),
)
@property
def zotero(self):
return ZoteroConfig(
api_key=self.get_or_default('zotero', 'api_key'),
)
def parse_file(self, fp):
self.cfg = yaml.full_load(fp)
self.validate()
return ZoteroxyConfig(
zotero=self.zotero,
library=self.library,
settings=self.settings,
) |
from math import ceil, floor
n = input()
a = map(float, raw_input().split())
x = 0
isum = 0
fsum = sum(a)
for i in xrange(2*n):
if a[i] == int(a[i]):
x += 1
isum += floor(a[i])
ans = 10**10
for i in xrange(n-x, n+1):
ans = min(ans, abs(fsum - isum - i))
print "%.3f" % ans
|
from django.core.management.base import BaseCommand, CommandError
from app.models import BusinessLicense
class Command(BaseCommand):
help = 'Lorem ipsum'
def handle(self, *args, **options):
self.stdout.write(self.style.SUCCESS('Lorem ipsum!'))
|
import jax.numpy as jnp
from jax import vmap, grad, jit
from utils import *
from jax.scipy.stats import laplace, norm #, logistic
from itertools import chain
def inner_layerwise(act_prime, y):
return jnp.log(vmap(vmap(act_prime))(y))
def loss_layerwise(nonlinearity, ys):
sigma_prime = grad(nonlinearity)
batched = vmap(inner_layerwise, in_axes=(None, 0))
# summing individual layers contributions
# Note: this works fine even with `len(ys)` == 2
full_pass = jnp.sum(batched(sigma_prime, jnp.stack(ys)), axis=0)
# summing over dimension
return jnp.sum(full_pass, axis=1)
# Function to compute the term L^2_J of the loglikelihood
def log_abs_det(params):
Ws = [W for (W, b) in params]
return jnp.sum(jnp.linalg.slogdet(Ws)[1])
@jit
@vmap
def log_pdf_normal(s):
""" Log-pdf for a Gaussian distribution w. mean 0 std 1"""
return jnp.sum(norm.logpdf(s))
# Note that here we compute the 3 terms of the loglikelihood;
# during training we directly optimize only the term `l1 + l2`
# and we include the gradient of the term `l3` explicitly
# (i.e. the `loss` function we derive includes only `l1 + l2`
# and the `l3` term is introduced with `add_det_grad`)
def get_piecewise_loss(g_layerwise, loss_pdf, nonlinearity):
@jit
def piecewise_loss(params, x):
z, ys = g_layerwise(params, x)
l1 = jnp.mean(log_pdf_normal(z))
l2 = jnp.mean(loss_layerwise(nonlinearity, ys))
l3 = log_abs_det(params)
return l1, l2, l3
return piecewise_loss
|
# -*- coding: utf-8 -*-
#
# Testing Xor pre-trained model.
# @auth whuang022ai
#
import mlp
if __name__ == "__main__":
mlp = mlp.MLP(0, 0, 0, 0)
mlp.load_model('xor')
mlp.test_forward()
|
# -*- coding: utf-8 -*-
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow,QFileDialog,QMessageBox,QWidget,QAction
from PyQt5.QtCore import pyqtSlot,QFile,QDir,Qt
from myEEGLAB import QEEGLAB
from ui_MainWindow import Ui_MainWindow
##from PyQt5.QtWidgets import
##from PyQt5.QtGui import
##from PyQt5.QtSql import
##from PyQt5.QtMultimedia import
##from PyQt5.QtMultimediaWidgets import
class QmyMainWindow(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent) # 调用父类构造函数,创建窗体
self.ui = Ui_MainWindow() # 创建UI对象
self.ui.setupUi(self) # 构造UI界面
# ========菜单栏open的激活函数================================
@pyqtSlot()
def on_actFile_Open_triggered(self):
print("user clicked “open” ")
curPath = QDir.currentPath() # 获取系统当前目录
title = "Open a file" # 对话框标题
filt = "edf file(*.edf);;All file(*.*)" # 文件过滤器eg.程序文件(*.h *.cpp *.py);;文本文件(*.txt);;所有文件(*.*)
fileName, flt = QFileDialog.getOpenFileName(self, title, curPath, filt)
if (fileName == ""): # 如果没有选择任何文件
return
else:
print(fileName)
if "edf" in flt:
self.ui.statusBar.showMessage(fileName)
EEGLAB = QEEGLAB(self)
EEGLAB.setAttribute(Qt.WA_DeleteOnClose)
EEGLAB.setWindowFlag(Qt.Window, True)
EEGLAB.show()
## ============窗体测试程序 ====================================
if __name__ == "__main__": #用于当前窗体测试
app = QApplication(sys.argv) #创建GUI应用程序
form=QmyMainWindow() #创建窗体
form.show()
sys.exit(app.exec_())
|
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
from django.views.decorators.cache import cache_page
from core.models import Trailer
@cache_page(timeout=10)
def index(request):
context = {
'random_list': Trailer.objects.all().order_by("?")[:30],
'carousel': Trailer.objects.filter()[0:7],
'main_vid': Trailer.objects.first(),
'featured': Trailer.objects.all().order_by("?")[:4],
}
return render(request, template_name='index.html', context=context)
@cache_page(timeout=10)
def detail(request, id):
trailer = Trailer.objects.get(id=id)
video_url = trailer.video.url
context = {
'random_list': Trailer.objects.all().order_by("?")[:30],
'carousel': Trailer.objects.filter()[0:7],
'main_vid': Trailer.objects.first(),
'featured': Trailer.objects.all().order_by("?")[:4],
'id': id,
'trailer': trailer,
'video_url': video_url
}
return render(request, template_name='detail.html', context=context)
def vmap(request):
return render(request, template_name='vmap.xml', context={})
def vast_playlist(request):
return render(request, template_name='vast_waterfall.xml', context={})
|
# card.py
# by Johnathon Beaumier
# for use in my program for playing the Guillotine card game
from guillotine import events
# structure:
# method definition for helper function
# class definition for card types
# lists for card object creation and grouping
### Method Definitions #######################################################
# so far unused
def possible_other_hands(hand, size, discard):
# make list of cards not accounted for
# some are duplicates, so we cant use 'not in'
possible_cards = list(action_cards)
for card in hand + discard:
possible_cards.remove(card)
# return all combinations of *size* possible cards
### Class Definitions ########################################################
# TODO: Change card events to *events
class Card(object):
def __init__(self, name, description, value, event_order):
self.name = name
self.description = description
self.value = value
if event_order is None:
event_order = {} # in case events get activated, default parameters should not be mutable
self.events = event_order
def __repr__(self):
if self.description is not None and self.value == 0:
return '<{}> - {}'.format(self.name, self.description)
elif self.description is not None:
return '<{} ({})> - {}'.format(self.name, self.value,
self.description)
elif self.value == 0:
return '<{}>'.format(self.name)
else:
return '<{} ({})>'.format(self.name, self.value)
class NobleCard(Card):
categories = {'Church': 'blue', 'Military': 'red', 'Royal': 'purple',
'Negative': 'gray', 'Civic': 'green'}
names = {}
def __init__(self, name, value, category, description=None,
trigger='collection', event_order=None):
super(NobleCard, self).__init__(name, description, value, event_order)
self.category = category
NobleCard.names[name] = value
def color(self):
return Noble.categories[self.category]
class ActionCard(Card):
names = []
def __init__(self, name, description, event_order, value=0):
super(ActionCard, self).__init__(name, description, value, event_order)
ActionCard.names.append(name)
### List Definitions #########################################################
# TODO: Card IDs to differentiate between same cards
noble_cards = [
NobleCard('Mayor', 3, 'Civic'),
NobleCard('Regent', 4, 'Royal'),
NobleCard('Baron', 3, 'Royal'),
NobleCard('Heretic', 2, 'Church'),
NobleCard('Councilman', 3, 'Civic'),
NobleCard('Bishop', 2, 'Church'),
NobleCard('Piss Boy', 1, 'Royal'),
NobleCard('Governor', 4, 'Civic'),
NobleCard('Hero of the People', -3, 'Negative'),
NobleCard('Tax Collector', 2, 'Civic'),
NobleCard('Coiffeur', 1, 'Royal'),
NobleCard('Duke', 3, 'Royal'),
NobleCard('Colonel', 3, 'Military'),
NobleCard('Archbishop', 4, 'Church'),
NobleCard('King Louis XVI', 5, 'Royal'),
NobleCard('Marie Antoinette', 5, 'Royal'),
NobleCard('Bad Nun', 3, 'Church'),
NobleCard('Royal Cartographer', 1, 'Royal'),
NobleCard('Land Lord', 2, 'Civic')
] + 2 * [
NobleCard('Wealthy Priest', 1, 'Church'),
NobleCard('Sheriff', 1, 'Civic'),
NobleCard('Lieutenant', 2, 'Military')
] + 3 * [
NobleCard('Martyr', -1, 'Negative')
] + 20 * [
NobleCard('Filler Noble', 0, 'Negative')
]
action_cards = 4 * [
ActionCard('Fainting Spell',
'Move a noble backward up to 3 places in line.', [
(events.choose_from_line, {'from_back': 1}),
(events.choose_movement, {'distance': -3}),
(events.move, {})
]), ActionCard('Was That My Name?',
'Move a noble forward up to 3 places in line.', [
(events.choose_from_line, {'from_front': 1}),
(events.choose_movement, {'distance': 3}),
(events.move, {})
])
] + 8 * [
ActionCard('Stumble',
'Move a noble forward exactly 1 place in line.', [
(events.choose_from_line, {'from_front': 1}),
(events.move, {'distance': 1})
]), ActionCard('Pushed',
'Move a noble forward exactly 2 places in line.', [
(events.choose_from_line, {'from_front': 2}),
(events.move, {'distance': 2})
]), ActionCard('Friend of the Queen',
'Move a noble backward up to 2 places in line.', [
(events.choose_from_line, {'from_back': 1}),
(events.choose_movement, {'distance': -2}),
(events.move, {})
]), ActionCard('Ignoble Noble',
'Move a noble forward exactly 4 places in line.', [
(events.choose_from_line, {'from_front': 4}),
(events.move, {'distance': 4})
]), ActionCard("L'Idiot",
'Move a noble forward up to 2 places in line.', [
(events.choose_from_line, {'from_front': 1}),
(events.choose_movement, {'distance': 2}),
(events.move, {})
])
] + 12 *[
ActionCard('Filler Action',
'Placeholder card to fill the deck. No effect.', [])
] |
import collections
import operator
def getDistance(startX, endX, startY, endY):
# Get difference of every point with every co-ordinate
dist={}
for i in range(startX, endX+1):
for j in range(startY, endY+1):
for index,coord in coords.items():
if (i,j) not in dist.keys():
dist[(i,j)] = [abs(coord[0]-i) + abs(coord[1]-j)]
else:
dist[(i,j)].append(abs(coord[0]-i) + abs(coord[1]-j))
# Compare dist with the actual coordinates and find closest
d2 = {}
for k,v in dist.items():
d1 = collections.Counter(v)
if d1[min(v)] > 1:
d2[k] = '.'
#print(k,v, min(v), d1[min(v)])
else:
d2[k] = v.index(min(v))
#print(k,v, min(v))
#print('-' * 10)
return d2
#Part 1
t1 = []
t2 = []
coords = {}
# Get coordinates in right format, as well as the range
#with open('dump') as f:
with open('6.txt') as f:
count = 0
for row in f:
l1 = row.rstrip().replace(' ','').split(',')
t1.append(l1[0])
t2.append(l1[1])
coords[count] = (int(l1[0]), int(l1[1]))
count += 1
startX=0
startY=0
endX=int(max(t1))
endY=int(max(t2))
print("MaxX:", endX, "MaxY", endY)
print('-' * 10)
d2 = getDistance(startX, endX, startY, endY)
print(d2.keys())
sys.exit(0)
#Start with the column immediately to the right of endX. Once you're done calculating compare it with the values you already have for (endX, y)
match = 1
while match == 1:
newstartX = endX + 1
newendX = newstartX
newstartY = 0
newendY = endY
t2 = getDistance(newstartX, newendX, newstartY, newendY)
newrc = False
for y in range(0, endY+1):
if d2[(endX, y)] == t2[(newstartX, y)]:
#print("Match: ", (endX, y), d2[(endX,y)], t2[(newstartX, y)])
match = 0
else:
#print("No match:", d2[(endX,y)], t2[(newstartX, y)])
#endX = newendX
match = 1
newrc = True
#Update dictionary by adding new column
d2[(newstartX, y)] = t2[(newstartX, y)]
if newrc:
endX = newendX
#Next go to the left most column where x becomes -ve. Once you're done here, compare it with what you have for (startX, y)
match = 1
while match == 1:
newstartX = startX - 1
newendX = newstartX
newstartY = 0
newendY = endY
t2 = getDistance(newstartX, newendX, newstartY, newendY)
newrc = False
for y in range(0, endY+1):
if d2[(startX, y)] == t2[(newstartX, y)]:
#print("Match: ", d2[(startX,y)], t2[(newstartX, y)])
match = 0
else:
#endX = newendX
match = 1
newrc = True
#Update dictionary by adding new column
d2[(newstartX, y)] = t2[(newstartX, y)]
if newrc:
endX = newendX
#Now go to the bottom most row where y becomes +ve. Once you're done here, compare it with what you have for (x, endY)
match = 1
while match == 1:
newstartX = 0
newendX = endX
newstartY = endY + 1
newendY = newstartY
t2 = getDistance(newstartX, newendX, newstartY, newendY)
newrc = False
for x in range(0, endX+1):
if d2[(x, endY)] == t2[(x, newendY)]:
#print("Match: ", d2[(endX,y)], t2[(newstartX, y)])
match = 0
else:
#endY = newendY
match = 1
newrc = True
#Update dictionary by adding new column
d2[(x, newstartY)] = t2[(x, newstartY)]
if newrc:
endY = newendY
#Now go to the top most row where y becomes -ve. Once you're done here, compare it with what you have for (x, startY)
match = 1
while match == 1:
newstartX = 0
newendX = endX
newstartY = startY - 1
newendY = newstartY
newrc = False
t2 = getDistance(newstartX, newendX, newstartY, newendY)
for x in range(0, endX+1):
if d2[(x, startY)] == t2[(x, newstartY)]:
#print("Match: ", d2[(endX,y)], t2[(newstartX, y)])
match = 0
else:
match = 1
newrc = True
#Update dictionary by adding new column
d2[(x, newstartY)] = t2[(x, newstartY)]
if newrc:
endY = newendY
# Track infinites by looking at the border coordinates
final = {}
infinite = []
for k,v in d2.items():
print(k,v)
if v != '.':
if k[0] == 0 or k[0] == endX or k[1] == 0 or k[1] == endY:
if v not in infinite:
infinite.append(v)
if v not in final.keys():
final[v] = 1
else:
final[v] += 1
#print('-' * 10)
#print("Infinite", infinite)
#print('-' * 10)
#for k,v in final.items():
# print(k, v)
#print('-' * 10)
count= sorted(final.items(), reverse=True, key=operator.itemgetter(1))
for entry in count:
#print(entry)
if entry[0] not in infinite:
print(entry[1])
break
else:
continue
|
#!/usr/bin/python3
# all arguments to this script are considered as json files
# and attempted to be formatted alphabetically
import json
import os
from sys import argv
files = argv[1:]
for file in files[:]:
if os.path.isdir(file):
files.remove(file)
for f in os.listdir(file):
files.append(os.path.join(file, f))
for file in files:
if not file.endswith('.json'):
continue
print("formatting file {}".format(file))
with open(file) as f:
j = json.load(f)
with open(file, 'w') as f:
f.write(json.dumps(j, indent=4, sort_keys=True, separators=(',', ': ')))
|
# Upload wav file using ampy or deploy tar
#
# TAR:
# import utils.setup
# utils.setup.deploy("http[s]://path/to/file.tar")
import os
import struct
from machine import Timer
from machine import DAC, Pin
from time import sleep
#from utils.pinout import set_pinout
#pinout = set_pinout()
#out = pinout.PIEZZO_PIN
print("init")
sleep(3)
# Play rutine
def play(t):
global wav_pos
if wav_pos == wav_size:
t.deinit()
# rewind wav to data begining
wav_pos = wav.seek(44)
print ("Stop play")
return
dw(ord(wr(1)))
wav_pos+=1
def play_loop(t):
global wav_pos
if wav_pos == wav_size:
print ("Rewind to begin")
wav_pos = wav.seek(44)
return
dw(ord(wr(1)))
wav_pos+=1
def play_loop_all(t):
global wav_pos
if wav_pos == wav_size:
t.deinit()
# rewind wav to data begining
play_playlist()
return
dw(ord(wr(1)))
wav_pos+=1
# Play rutine
def play_mem(t):
global mem_pos
if mem_pos == len(mem):
t.deinit()
print ("Stop play")
return
dac.write(mem[mem_pos])
mem_pos+=1
def actualSampleRate(t):
global wav_pos
global wav_pos_prev
print("Samples: {0} Time: {1}".format(wav_pos - wav_pos_prev, wav_pos / wav_sample_rate))
wav_pos_prev = wav_pos
def loadWav(wav_file):
global wav_sample_rate
global wav_size
global wav_pos
global wav
wav = open(wav_file, "rb")
# Read WAV header
wav.seek(20)
wav_format, \
wav_channels, \
wav_sample_rate, \
wav_byterate, \
wav_blockalign, \
wav_bits_per_sample, \
wav_datasignature, \
wav_data_size = struct.unpack('HHIIHH4sI', wav.read(24))
wav_size = wav_data_size + 44
# Seek to PCM data begin
wav_pos = wav.seek(44)
print("WAV File: {0}\n\
WAV Format: {1}\n\
WAV Channels: {2}\n\
WAV Sample rate: {3}\n\
WAV Bits per sample: {4}".format(wav_file,
wav_format,
wav_channels,
wav_sample_rate,
wav_bits_per_sample))
return wav.read
playlist_pos = 0
def play_playlist():
global wr
global playlist_pos
if playlist_pos >= len(wav_files):
playlist_pos = 0
wr = loadWav(wav_files[playlist_pos])
t1.init(freq=wav_sample_rate, mode=Timer.PERIODIC, callback=play_loop_all)
playlist_pos+=1
sleep(1)
# Init DAC
dac = DAC(Pin(26))
# Middle value for speaker
dac.write(0x7F)
dw = dac.write
wav_pos = 0
wav_pos_prev = 0
wav_files = list()
wav_files.append("assets/wav/s1.wav")
wav_files.append("assets/wav/s2.wav")
wav_files.append("assets/wav/s3.wav")
wav_files.append("assets/wav/oeproj.wav")
wav_files.append("assets/wav/s5.wav")
wav_files.append("assets/wav/s6.wav")
wav_files.append("assets/wav/s7.wav")
wav_files.append("assets/wav/upycool.wav")
t1 = Timer(0)
t2 = Timer(1)
t2.init(freq=1, mode=Timer.PERIODIC, callback=actualSampleRate)
play_playlist() |
#!python
from typing import List
def is_sorted(items: List[int]) -> bool:
"""Return a boolean indicating whether given items are in sorted order.
Running time: O(n) we're iterating over all elements
Memory usage: O(1) we're not creating new memory"""
if len(items) < 2:
return True
for i in range(0, len(items)-1):
if items[i] > items[i + 1]:
return False
return True
def bubble_sort(items: List[int], print_sorting=False) -> List[int]:
"""Sort given items by swapping adjacent items that are out of order, and
repeating until all items are in sorted order.
Running time: avg O(n^2) because we have an outer loop iterating over all
elements and an inner loop iterating over elements n - 1 - i
Memory usage: O(1) we're never creating new memory and doing it in place."""
for i in range(len(items)):
swapped = False
# Inner loop is responsible for the switching/checking
# We remove i at the end because we know that the last item is already sorted
# because we're pushing the largest value up each time
for j in range(0, len(items) - 1 - i):
if items[j] > items[j + 1]:
# Swaps the location of the items
items[j], items[j + 1] = items[j + 1], items[j]
swapped = True
if not swapped:
return items
if print_sorting:
visualize_sort(items)
return items
def selection_sort(items: List[int], print_sorting=False) -> List[int]:
"""Sort given items by finding minimum item, swapping it with first
unsorted item, and repeating until all items are in sorted order.
Running time: O(n^2) we have two loops iterating over all the elements
Memory usage: O(1) we're never creating new memory and doing it in place."""
for i in range(len(items)):
min_val = i
for j in range(i, len(items)):
# Checks to see if the current value is less than min_val
if items[j] < items[min_val]:
min_val = j
# Swaps the location of the items
items[i], items[min_val] = items[min_val], items[i]
if print_sorting:
visualize_sort(items)
return items
def insertion_sort(items: List[int], print_sorting=False) -> List[int]:
# S/O to https://youtu.be/yCxV0kBpA6M for the help
"""Sort given items by taking first unsorted item, inserting it in sorted
order in front of items, and repeating until all items are in order.
Running time: O(n^2) because we have one loop iterating over all elements
and a nested loop that also iterates over n-1 elements.
Memory usage: O(1) we're never creating new memory and doing it in place."""
for i in range(1, len(items)):
extr_val = items[i]
extr_loc = i - 1
while extr_loc >= 0 and items[extr_loc] > extr_val:
# Moves the greater value to the right
items[extr_loc + 1] = items[extr_loc]
extr_loc -= 1
items[extr_loc + 1] = extr_val
if print_sorting:
visualize_sort(items)
return items
def visualize_sort(items: List[int]) -> None:
'''Prints the sorting algorithm in action if enabled'''
print(items)
if __name__ == '__main__':
test = [(12, 'W'), (5, 'I')]
print(is_sorted(test))
T1 = [2, 1, 0, 4, 5]
print('STARTING VALUES:', T1, '\n')
print('Sorted?', is_sorted(T1))
# bubble_sort(T1, True)
# selection_sort(T1, True)
insertion_sort(T1, True)
|
import pymysql
import pandas as pd
class DBModel:
def __init__(self):
self.db_diet = pymysql.connect(
user='root',
passwd='111111',
host='127.0.0.1',
db='db_diet',
charset='utf8'
)
self.cursor = self.db_diet.cursor(pymysql.cursors.DictCursor)
def selectAll(self):
# select table
sql = "SELECT * FROM `history`;"
self.cursor.execute(sql)
result = self.cursor.fetchall()
result = pd.DataFrame(result)
print(result)
def selectByNameDate(self, username, userdate):
# select table
sql = "SELECT * FROM `history` WHERE username='{}' AND userdate='{}';"\
.format(username, userdate)
self.cursor.execute(sql)
result = self.cursor.fetchall()
result = pd.DataFrame(result)
# print('date: {}, breakfast: {}, lunch: {}, dinner{}'
# .format(result['userdate'][0], result['breakfast'][0], result['lunch'][0], result['dinner'][0]))
return [result['breakfast'][0], result['lunch'][0], result['dinner'][0]]
def insert(self, username, userdate, eatTime, foodname):
# 기존에 데이터가 있는지 확인
sql = "SELECT * FROM `history` WHERE username='{}' AND userdate = '{}';" \
.format(username, userdate)
self.cursor.execute(sql)
result = self.cursor.fetchall()
if result:
print(username + '데이터 존재함')
# update
sql = '''UPDATE history SET {}='{}'
WHERE username = '{}' AND userdate = '{}';'''\
.format(eatTime, foodname, username, userdate)
self.cursor.execute(sql)
self.db_diet.commit()
print('update complete')
else:
print(username + '데이터 추가함')
# insert
sql = '''INSERT INTO `history` (username, userdate, {})
VALUES ('{}', '{}', '{}');'''.format(eatTime, username, userdate, foodname)
self.cursor.execute(sql)
self.db_diet.commit()
print('insert complete')
if __name__ == '__main__':
print('DataBaseModel run')
dbm = DBModel()
## select test
# foodList = dbm.selectByNameDate('테스트', '2021-05-20')
# print(foodList)
## insert test
# dbm.insert('테스트4', '2021-05-20', 'dinner', '햄버거')
|
import FWCore.ParameterSet.Config as cms
from RecoBTag.Skimming.btagElecInJet_EventContent_cff import *
btagElecInJetOutputModuleAODSIM = cms.OutputModule("PoolOutputModule",
btagElecInJetEventSelection,
AODSIMbtagElecInJetEventContent,
dataset = cms.untracked.PSet(
filterName = cms.untracked.string('btagElecInJetAODSIM'),
dataTier = cms.untracked.string('USER')
),
fileName = cms.untracked.string('btagElecInJetAODSIM.root')
)
|
# -*- coding: utf-8 -*-
# @Author: fengmingshan
# @Date: 2019-09-02 15:15:02
# @Last Modified by: Administrator
# @Last Modified time: 2019-09-06 17:12:31
import pandas as pd
import numpy as np
import os
import math
data_path = 'd:/2019年工作/2019年9月校园超忙小区分析/'
file = '能源学校_曲靖KPI指标_08-29_09.02.csv'
df_content = pd.read_csv(data_path + file, engine='python', skiprows=5)
df_content = df_content[['开始时间',
'结束时间',
'网元',
'网元名称',
'小区',
'小区名称',
'空口上行用户面流量(MByte)_1',
'空口下行用户面流量(MByte)_1477070755617-11',
'分QCI用户体验下行平均速率(Mbps)_1',
'下行PRB平均占用率_1',
'PDCCH信道CCE占用率_1',
'最大RRC连接用户数_1',
'平均RRC连接用户数_1',
'下行平均激活用户数_1',
'最大激活用户数_1',
'CQI优良比(>=7比例)']]
df_content['平均RRC连接用户数_1'] = df_content['平均RRC连接用户数_1'].map(
lambda x: math.ceil(x))
df_content['下行平均激活用户数_1'] = df_content['下行平均激活用户数_1'].map(
lambda x: math.ceil(x))
df_content['空口下行用户面流量(MByte)_1477070755617-11'] = df_content['空口下行用户面流量(MByte)_1477070755617-11'].map(
lambda x: float(x.replace(',', '')) / 1024)
df_content['空口上行用户面流量(MByte)_1'] = df_content['空口上行用户面流量(MByte)_1'].map(
lambda x: float(x.replace(',', '')) / 1024)
df_content['分QCI用户体验下行平均速率(Mbps)_1'] = round(
df_content['分QCI用户体验下行平均速率(Mbps)_1'] / 8, 1)
df_content['总流量(GB)'] = df_content['空口上行用户面流量(MByte)_1'] + \
df_content['空口下行用户面流量(MByte)_1477070755617-11']
df_content.rename(columns={'空口上行用户面流量(MByte)_1': '上行流量(GB)',
'空口下行用户面流量(MByte)_1477070755617-11': '下行流量(GB)',
'分QCI用户体验下行平均速率(Mbps)_1': '用户体验速率(MBps)',
}, inplace=True)
df_content['单用户平均流量(MByte)'] = df_content['下行流量(GB)'] / \
df_content['下行平均激活用户数_1']
df_content['日期'] = df_content['开始时间'].apply(lambda x: x.split(' ')[0])
df_content['小时'] = df_content['开始时间'].apply(
lambda x: x.split(' ')[1].split(':')[0])
df_content['下行PRB平均占用率_1'] =
# =============================================================================
# 总体情况按小时分析
# =============================================================================
df_all = pd.pivot_table(
df_content,
index=[
'网元',
'网元名称',
'小区',
'小区名称',
'日期',
'小时'],
values=[
'最大RRC连接用户数_1',
'平均RRC连接用户数_1',
'下行平均激活用户数_1',
'最大激活用户数_1',
'总流量(GB)',
'下行PRB平均占用率_1'],
aggfunc={
'最大RRC连接用户数_1': np.sum,
'平均RRC连接用户数_1': np.sum,
'下行平均激活用户数_1': np.sum,
'最大激活用户数_1': np.sum,
'总流量(GB)': np.sum,
'下行PRB平均占用率_1': np.mean})
# =============================================================================
# 用户数TOP
# =============================================================================
df_max_users = pd.pivot_table(
df_content,
index=[
'网元',
'网元名称',
'小区',
'小区名称'],
values=[
'最大RRC连接用户数_1',
'平均RRC连接用户数_1',
'下行平均激活用户数_1',
'最大激活用户数_1'],
aggfunc={
'最大RRC连接用户数_1': np.max,
'平均RRC连接用户数_1': np.max,
'下行平均激活用户数_1': np.max,
'最大激活用户数_1': np.max})
df_max_users.reset_index(inplace=True)
df_max_users.sort_values(by=['最大激活用户数_1'], ascending=False, inplace=True)
df_max_users = df_max_users[['网元',
'网元名称',
'小区',
'小区名称',
'最大激活用户数_1',
'下行平均激活用户数_1',
'最大RRC连接用户数_1',
'平均RRC连接用户数_1']]
# =============================================================================
# 流量TOP
# =============================================================================
df_max_throughput = pd.pivot_table(
df_content,
index=['网元',
'网元名称',
'小区',
'小区名称'],
values=['下行流量(MByte)'],
aggfunc={
'下行流量(GB)': np.max})
df_max_throughput.reset_index(inplace=True)
df_max_throughput.sort_values(
by=['下行流量(GB)'],
ascending=False,
inplace=True)
# =============================================================================
# 速率TOP
# =============================================================================
df_min_speed = pd.pivot_table(
df_content,
index=['网元',
'网元名称',
'小区',
'小区名称'],
values=['用户体验速率(MBps)'],
aggfunc={
'用户体验速率(MBps)': np.min})
df_max_speed.reset_index(inplace=True)
df_max_speed.sort_values(
by=['用户体验速率(MBps)'],
ascending=True,
inplace=True)
|
from django.db import models
# Create your models here.
class User(models.Model):
name = models.CharField(
max_length=100
)
surname = models.CharField(
max_length=100
)
def __str__(self):
return self.name + self.surname
|
# -*- coding: utf-8 -*-
# from vaex import dataset
import vaex.dataset
from optparse import OptionParser
from mab.utils.progressbar import ProgressBar
import sys
import h5py
import numpy as np
def merge(output_filename, datasets_list, datasets_centering=None, sort_property=None, order_column_name=None, ascending=True):
# datasets = list(datasets)
if sort_property:
datasets_list.sort(key=lambda datasets: datasets[0].variables[sort_property], reverse=not ascending)
datasets_centering.sort(key=lambda dataset: dataset.variables[sort_property], reverse=not ascending)
h5output = h5py.File(output_filename, "w")
example_dataset = datasets_list[0][0]
max_length = max([sum(len(dataset) for dataset in datasets) for datasets in datasets_list])
# counts =
# for datasets in datasets:
# max_length = sum( counts )
shape = (len(datasets_list), max_length)
print(("shape of new arrays will be", shape, max_length))
if 0:
for dataset1 in datasets:
for dataset2 in datasets:
if dataset1 != dataset2:
if len(dataset1) != len(dataset2):
print((dataset1.name, "is of length", len(dataset1), "but", dataset2.name, "is of length", len(dataset2)))
sys.exit(1)
for column_name in example_dataset.column_names:
d = h5output.require_dataset("/columns/" + column_name, shape=shape, dtype=example_dataset.columns[column_name].dtype.type, exact=True)
d[0, 0] = example_dataset.columns[column_name][0] # ensure the array exists
# each float propery will be a new axis in the merged file (TODO: int and other types?)
for property_name in list(example_dataset.variables.keys()):
property = example_dataset.variables[property_name]
if isinstance(property, float):
d = h5output.require_dataset("/axes/" + property_name, shape=(len(datasets_list),), dtype=np.float64, exact=True)
d[0] = 0. # make sure it exists
# close file and open it again with our interface
h5output.close()
dataset_output = vaex.dataset.Hdf5MemoryMapped(output_filename, write=True)
progressBar = ProgressBar(0, len(datasets_list) - 1)
if 0:
idmap = {}
for index, dataset in enumerate(datasets_list):
ids = dataset.columns["ParticleIDs"]
for id in ids:
idmap[id] = None
used_ids = list(idmap.keys())
print((sorted(used_ids)))
particle_type_count = len(datasets_list[0])
for index, datasets in enumerate(datasets_list):
centers = {}
if datasets_centering is not None:
cols = datasets_centering[index].columns
# first rought estimate
for name in "x y z vx vy vz".split():
indices = np.argsort(cols["Potential"])[::1]
indices = indices[:10]
# centers[name] = cols[name][np.argmin(cols["Potential"])] #.mean()
centers[name] = cols[name].mean()
print(("center", centers[name]))
if 0:
# if column_name in "x y z".split():
# now sort by r
r = np.sqrt(np.sum([(cols[name] - centers[name])**2 for name in "x y z".split()], axis=0))
indices = np.argsort(r)
indices = indices[:len(indices) / 2] # take 50%
for name in "x y z".split():
centers[name] = cols[name][indices].mean()
# sort by v
v = np.sqrt(np.sum([(cols[name] - centers[name])**2 for name in "vx vy vz".split()]))
indices = np.argsort(r)
indices = indices[:len(indices) / 2] # take 50%
for name in "vx vy vz".split():
centers[name] = cols[name][indices].mean()
for column_name in datasets[0].column_names:
column_output = dataset_output.rank1s[column_name]
column_output[index, :] = np.nan # first fill with nan's since the length of the output column may be larger than that of individual input datasets
for property_name in list(datasets[0].variables.keys()):
property = datasets[0].variables[property_name]
if isinstance(property, float):
# print "propery ignored: %r" % property
# print "propery set: %s %r" % (property_name, property)
dataset_output.axes[property_name][index] = property
else:
# print "propery ignored: %s %r" % (property_name, property)
pass
center = 0
output_offset = 0
# merge the multiple datasets into the one column
for particle_type_index in range(particle_type_count):
dataset = datasets[particle_type_index]
# print len(dataset), output_offset
column_input = dataset.columns[column_name]
if order_column_name:
order_column = dataset.columns[order_column_name]
else:
order_column = None
# print dataset.name, order_column, order_column-order_column.min()
i1, i2 = output_offset, output_offset + len(dataset)
if order_column is not None:
column_output[index, order_column - order_column.min()] = column_input[:] - centers.get(column_name, 0)
else:
column_output[index, i1:i1 + len(dataset)] = column_input[:] - centers.get(column_name, 0)
output_offset += len(dataset)
# print "one file"
progressBar.update(index)
if __name__ == "__main__":
usage = "use the source luke!"
parser = OptionParser(usage=usage)
# parser.add_option("-n", "--name",
# help="dataset name [default=%default]", default="data", type=str)
parser.add_option("-o", "--order", default=None, help="rows in the input file are ordered by this column (For gadget: ParticleID)")
parser.add_option("-t", "--type", default=None, help="file type")
parser.add_option("-p", "--particle-types", default=None, help="gadget particle type")
parser.add_option("-c", "--center_type", default=None, help="gadget centering type")
# parser.add_option("-i", "--ignore", default=None, help="ignore errors while loading files")
parser.add_option("-r", "--reverse", action="store_true", default=False, help="reverse sorting")
parser.add_option("-s", "--sort",
help="sort datasets by propery [by default it will be the file order]", default=None, type=str)
(options, args) = parser.parse_args()
inputs = args[:-1]
output = args[-1]
print(("merging:", "\n\t".join(inputs)))
print(("to:", output))
if options.type is None:
print("specify file type --type")
parser.print_help()
sys.exit(1)
# dataset_type_and_options = options.format.split(":")
# dataset_type, dataset_options = dataset_type_and_options[0], dataset_type_and_options[1:]
# if dataset_type not in vaex.dataset.dataset_type_map:
# print "unknown type", dataset_type
# print "possible options are:\n\t", "\n\t".join(vaex.dataset.dataset_type_map.keys())
# sys.exit(1)
# evaluated_options = []
# for option in dataset_options:
# evaluated_options.append(eval(option))
class_ = vaex.dataset.dataset_type_map[options.type]
# @datasets = [class_(filename, *options) for filename in inputs]
datasets_list = []
for filename in inputs:
print(("loading file", filename, options.particle_types))
datasets = []
for type in options.particle_types.split(","):
datasets.append(class_(filename + "#" + type))
datasets_list.append(datasets)
datasets_centering = None
if options.center_type:
datasets_centering = []
for filename in inputs:
datasets_centering.append(class_(filename + "#" + options.center_type))
merge(output, datasets_list, datasets_centering, options.sort, ascending=not options.reverse, order_column_name=options.order)
|
from colorama import Cursor, init, Fore
abc = "abcdefghijklmnopqrstuvwxyz 1234567890 %#@![]{}()?¿+-*/"
class UtilCrypto:
def __init__(self,key):
self.key = key
self.limitKey = len(abc) #si la key es mayor a la lontyiud de abc entonces error
def containerblock(self):
self.FILENAMES = input("[!] Write your FILENAME ")
#self.BODY_FILE = input("[*] >>> ") #in this part , you will be write
def cryptoKey(self,key,text):
n_text = len(text) #si la key es 3 entonces tiene que recorrer tres posiciones por ejemplo
self.cryptoChildArray = []
#a = c
for new_text in text: #desarmarmos new_text
if new_text in abc: #detect letter si existe en abc
location = list(abc)
longIV = len(abc)
for k in range(len(abc)):
#donde k es la posicion
y = k,abc[k]
#print(y)
if new_text == abc[k]: #si a = a entonces
#print("LETRA = " + new_text + abc[k])
#recorrer letras por medio de tuplas
#primero saber en donde estamos si estamos en a entonces es 0 si la llave es tres entonces 0 +3 = 3 y la llave 3 = d
self.cryptoChild = abc[k+key]
#tenemos que leer las qie si existen
print(Fore.GREEN + "Cipher successfully!: "+ self.cryptoChild)
#como van saliendo crear un array e irlos agregando
self.cryptoChildArray.append(self.cryptoChild)
self.cryptoChild = ' '.join(map(str,self.cryptoChildArray))
print(self.cryptoChild)
else:
pass
#print("NO se encontro ina igulacion a a la LETRA")
else:
return "symbol not exists"
#return n_text
def DecryptMsg(self,key,text):
pass
def WriteInFileName(self):
if len(self.FILENAMES)>0:
with open(self.FILENAMES+'.txt',"wb") as ManagerFile:
ManagerFile.write(self.cryptoChild.encode())
return Fore.YELLOW + "[*] FILE CREATED SUCCESSFULLY"
else:
return "PUT ANYTHING IN FILENAME"
UtilC = UtilCrypto(True)
print(UtilC.cryptoKey(5,'atacar el lunes en mexico'))
UtilC.containerblock()
print(UtilC.WriteInFileName())
#print(Cipher())
#tambirne qeuermos un metodo para escribirlo en un arcivh
#atmbine quiero un metodo para enviarlo en email
|
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
from torchvision.datasets import CIFAR10
torch.manual_seed(1)
def data_tf(x):
x = np.array(x, dtype='float32') / 255
x = (x - 0.5) / 0.5
x = x.transpose((2, 0, 1))
x = torch.from_numpy(x)
return x
train_set = CIFAR10('./data', train=True, transform=data_tf)
train_data = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True)
test_set = CIFAR10('./data', train=False, transform=data_tf)
test_data = torch.utils.data.DataLoader(test_set, batch_size=128, shuffle=False)
class VGG(nn.Module):
def __init__(self):
super(VGG,self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(
in_channels=3,
out_channels=64,
kernel_size=3,
stride=1,
padding=1,
),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(64, 128, 3, 1, 1),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.conv3 = nn.Sequential(
nn.Conv2d(128, 256, 3, 1, 1),
nn.ReLU(),
nn.Conv2d(256, 256, 3, 1, 1),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.conv4 = nn.Sequential(
nn.Conv2d(256, 512, 3, 1, 1),
nn.ReLU(),
nn.Conv2d(512, 512, 3, 1, 1),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.conv5 = nn.Sequential(
nn.Conv2d(512, 512, 3, 1, 1),
nn.ReLU(),
nn.Conv2d(512, 512, 3, 1, 1),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.fc = nn.Sequential(
nn.Linear(512, 100),
nn.ReLU(),
nn.Linear(100, 10)
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = x.view(x.shape[0], -1)
x = self.fc(x)
return x
net = VGG()
optimizer = torch.optim.SGD(net.parameters(), lr=1e-1)
criterion = nn.CrossEntropyLoss()
def train(net, train_data, valid_data, num_epochs, optimizer, criterion):
for epoch in range(num_epochs):
train_loss = 0
train_acc = 0
for im, label in train_data:
im = Variable(im)
label = Variable(label)
output = net(im)
loss = criterion(output, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.data[0]
pred = torch.max(output,1)[1]
num_correct = (pred == label).sum().data[0]
acc = num_correct/im.shape[0]
train_acc += acc
# train_acc += get_acc(output, label)
print('EPOCH:',epoch,',train loss:',train_loss/len(train_data),',train acc',train_acc/len(train_data))
train(net,train_data,test_data,10,optimizer,criterion)
|
from flask import Flask, render_template, url_for, flash, redirect
from datetime import datetime
from flask_sqlalchemy import SQLAlchemy
from forms import LoginForm, RegistrationForm
app = Flask(__name__)
app.config['SECRET_KEY'] = 'oowewbcdeoproeporpebcoiffoekcjefdcdcndcoidfcbdc'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(20), unique = True, nullable = False)
email = db.Column(db.String(20), unique = True, nullable = False)
image_file = db.Column(db.String(20), nullable = False, default="default.jpg")
password = db.Column(db.String(20), nullable = False)
posts = db.relationship('Post', backref='author', lazy=True)
def __repr__(self):
return self.username
class Post(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(100), nullable = False)
date_posted = db.Column(db.DateTime, unique = True, nullable = False, default=datetime.utcnow)
content = db.Column(db.Text, nullable = False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
posts = [
{
'author': 'Rohit',
'tittle': 'First Blog post',
'content': 'First blog post content. This is the dummy content',
'date_posted': '29 June 2020',
'color': 'bg-primary'
},
{
'author': 'Jon Doe',
'tittle': 'Second Blog post',
'content': 'Second blog post content. This is the second content',
'date_posted': '2 June 2020',
'color': 'bg-success'
},
{
'author': 'Jon Wick',
'tittle': 'Thord Blog post',
'content': 'Third blog post content. This is the second content',
'date_posted': '21 June 2020',
'color': 'bg-warning'
}
]
colors =['bg-primary', 'bg-success', 'bg-warning', 'bg-dark']
@app.route('/home')
@app.route('/')
def home():
return render_template('home.html', posts = posts)
@app.route('/about')
def about_author():
return render_template('about.html')
@app.route('/register', methods=['GET','POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
flash(f'Account created for {form.username.data}!', 'success')
return redirect(url_for('home'))
return render_template('register.html', form=form)
@app.route('/login', methods=['GET','POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
if form.email.data == 'user@gmail.com' and form.password.data == 'password':
flash('You have been logged in!', 'success')
return redirect(url_for('home'))
else:
flash('Login unsuccessful', 'warning')
return render_template('login.html', form=form)
if __name__ == '__main__':
app.run(host='127.0.0.1',port=4455,debug=True) |
from __future__ import print_function
from stylelens_product.bigquery.amazon_search_keywords import AmazonSearchKeywords
from pprint import pprint
# create an instance of the API class
api_instance = AmazonSearchKeywords()
keywords = []
keyword = {}
keyword['keywords'] = 'sss2'
keyword['search_index'] = 'HC8000'
keyword['response_groups'] = 'HG8000'
keyword['browse_node'] = 'amazon'
keyword['sort'] = 'amazon'
keywords.append(keyword)
try:
api_response = api_instance.add_keywords(keywords)
except Exception as e:
print("Exception when calling add_keywords: %s\n" % e)
|
import matplotlib.pyplot as plt
import numpy as np
def generate_toy_data():
mean_a = [0.2, 0.2]
cov_a = [[0.01, 0], [0, 0.01]] # diagonal covariance
mean_b = [0.8, 0.8]
cov_b = [[0.01, 0], [0, 0.01]] # diagonal covariance
class_a_x = np.random.multivariate_normal(mean_a, cov_a, 500).T
class_b_x = np.random.multivariate_normal(mean_b, cov_b, 500).T
class_a_y = np.ones(np.size(class_a_x[0]))
class_b_y = -1*np.ones(np.size(class_b_x[0]))
"""
plt.plot(class_a_x[0], class_a_x[1], 'x')
plt.plot(class_b_x[0], class_b_x[1], 'o')
plt.axis('equal')
plt.show()
"""
data_x = np.concatenate((class_a_x, class_b_x), axis=1)
data_y = np.concatenate((class_a_y, class_b_y), axis=0)
num = data_y.size
randomize = np.arange(num)
np.random.shuffle(randomize)
data_x = data_x[:,randomize]
data_y = data_y[randomize]
"""
plt.plot(data_x[0], data_x[1], 'x')
plt.axis('equal')
plt.show()
"""
inter = int(0.8*num)
data_y = data_y.reshape(1,num)
return data_x[:,0:inter],data_y[:,0:inter],data_x[:,inter:-1],data_y[:,inter:-1]
#dd = generate_toy_data()
#print("ok")
|
class Song(object):
def __init__(self):
self.song = song
self.file_path = file_path
#Set mutators and accessors for song
#Get path of song
#Set path of song
def set_song_path(self, file_path):
file_path = self.file_path
|
#!/usr/bin/env python
from __future__ import print_function
import socket
import sys
if len(sys.argv) >= 3:
srv = (str(sys.argv[1]), int(sys.argv[2]))
else:
srv = ("90.176.152.68", 28015)
query = "\xFF\xFF\xFF\xFF\x54Source Engine Query\x00"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.sendto(query, srv)
rep = sock.recvfrom(4096)
except:
sock.close()
raise
# strip first \xff's
rep = rep[0][6:]
if '--dump' in sys.argv:
print(str(rep.split('\0x00')))
# split first text fields up to last numbers list
rep = rep.split('\x00', 5)
# get list with numbers
nums = rep[5]
# hostname - players/maxplayers
print('%s - %d/%d' % (
rep[0],
ord(nums[1]),
ord(nums[2])))
|
import geocoder
def main():
# Declare destinations list here
destinations = ['Space Needle', 'Crater Lake', 'Golden Gate Bridge', 'Yosemite National Park', 'Las Vegas, Nevada', 'Grand Canyon National Park', 'Aspen, Colorado', 'Mount Rushmore', 'Yellowstone National Park', 'Sandpoint, Idaho', 'Banff National Park', 'Capilano Suspension Bridge']
# Loop through each destination
for destination in destinations:
location = geocoder.arcgis(destination)
my_float = location.latlng
print(f'{destination} is located at ({my_float[0]:.4f}, {my_float[1]:.4f})')
# Get the lat-long coordinates from `geocoder.arcgis`
# Print out the place name and the coordinates
main()
|
import modi
"""
Example script for the usage of ir module
Make sure you connect 1 ir module to your network module
"""
if __name__ == "__main__":
bundle = modi.MODI()
ir = bundle.irs[0]
while True:
print("{0:<10}".format(ir.proximity), end='\r')
|
from PIL import Image
from pandas import read_csv
import os
labels = read_csv('Large Files/test.rotfaces/test.preds.csv').values
for item in labels:
colorImage = Image.open(os.path.join('Large Files/test.rotfaces/test/', item[0]))
if item[1] == 'rotated_right':
rotated = colorImage.rotate(90)
if item[1] == 'rotated_left':
rotated = colorImage.rotate(-90)
if item[1] == 'upside_down':
rotated = colorImage.rotate(180)
rotated.save(os.path.join('rotated_test_images/', item[0]+'.png'))
print('Rotation complete') |
# -*- coding: utf-8 -*-
from models import Item_types as IT, Items, Prices, Places
from sqlalchemy import func
from auxiliary_functions import prepare_options
DEFAULT_CAT = "предмет роскоши"
DEFAULT_ITYPE = "предмет"
class Search:
@staticmethod
def item_result_to_dict(result):
result = [
{
"id": item.items.id_item,
"name": item.items.iname,
"itype": item.items.item_types.itype,
"price_cur": item.price_cur,
"scope": item.price_cur,
"mname_hist": item.measures.mnane_hist,
"mname_old": item.measures.mnane_old,
"place": {
"id_place": item.places.id_place,
"name": item.places.pname,
"region": item.places.region,
"ptype": item.places.ptype,
"city": item.places.city,
"coord": {
"lat": item.places.coord_lat,
"lon": item.places.coord_lon,
}
},
"source": {
"name": item.sources.sname,
"page": item.page,
"year": item.year,
"month": item.month,
},
}
for item in result
]
return result
@staticmethod
def gather_place_descr(item):
place_descr = {
"name": item.places.pname,
"ptype": item.places.ptype,
"city": item.places.city,
"region": item.places.region,
"coord": {
"lat": item.places.coord_lat,
"lon": item.places.coord_lon,
}
}
return place_descr
@staticmethod
def gather_source_descr(item):
source_descr = {
"name": item.sources.sname,
"page": item.page,
"year": item.year,
"month": item.month,
}
return source_descr
@staticmethod
def places_result_to_dict(results):
places = {}
for item in results:
keys = list(item.keys())[1:] + ["freq"]
places[item[0]] = dict(zip(keys, item[1:]))
return places
def get_all_prices(self):
result = Prices.query.all()
return self.item_result_to_dict(result)
def get_all_places(self):
result = Prices.query.join(Places, Prices.id_place==Places.id_place)\
.with_entities(
Prices.id_place, Places.pname,
Places.ptype, Places.city,
Places.coord_lat, Places.coord_lon,
Places.region, func.count())\
.group_by(Prices.id_place).all()
return self.places_result_to_dict(result)
def search_items_by_place(self, id_place):
result = Prices.query.filter(
Prices.id_place == id_place
).all()
return self.item_result_to_dict(result)
@staticmethod
def get_item_types():
result = IT.query.\
with_entities(IT.itype).\
distinct(IT.itype).all()
return prepare_options(result)
@staticmethod
def get_regions():
result = Places.query.\
with_entities(Places.region).\
distinct(Places.region).all()
return prepare_options(result)
@staticmethod
def prepare_regions_types(regs, itypes):
result = Prices.query \
.join(Places, Prices.id_place == Places.id_place) \
.join(Items, Prices.id_item == Items.id_item) \
.join(IT, Items.id_itype == IT.id_itype) \
.filter(Places.region.in_(regs)) \
.filter(IT.itype.in_(itypes)) \
.with_entities(
Prices.id_place, Places.pname,
Places.ptype, Places.city,
Places.coord_lat, Places.coord_lon,
Places.region, func.count()) \
.group_by(Prices.id_place).all()
return result
@staticmethod
def prepare_regions(regs):
result = Prices.query \
.join(Places, Prices.id_place == Places.id_place) \
.filter(Places.region.in_(regs)) \
.with_entities(
Prices.id_place, Places.pname,
Places.ptype, Places.city,
Places.coord_lat, Places.coord_lon,
Places.region, func.count()) \
.group_by(Prices.id_place).all()
return result
@staticmethod
def prepare_types(itypes):
result = Prices.query \
.join(Places, Prices.id_place == Places.id_place) \
.join(Items, Prices.id_item == Items.id_item) \
.join(IT, Items.id_itype == IT.id_itype) \
.filter(IT.itype.in_(itypes)) \
.with_entities(
Prices.id_place, Places.pname,
Places.ptype, Places.city,
Places.coord_lat, Places.coord_lon,
Places.region, func.count()) \
.group_by(Prices.id_place).all()
return result
def search_places(self, request):
form_values = request.args.to_dict(flat=False)
if form_values:
regs = form_values.get('region')
itypes = form_values.get('item_type')
if regs and itypes:
result = self.prepare_regions_types(regs, itypes)
elif regs:
result = self.prepare_regions(regs)
elif itypes:
result = self.prepare_types(itypes)
return self.places_result_to_dict(result)
return self.get_all_places()
@staticmethod
def get_place_map(id_place):
result = Places.query.filter(
Places.id_place == id_place
).with_entities(
Places.description, Places.area_image,
Places.pname, Places.coord_lat, Places.coord_lon
).first()
iframe = '''
<iframe
class ="embed-responsive-item" src="{}"
name="etomesto-map" width="100%"
height="90%" frameborder="0"
vspace="0" hspace="0"
marginwidth="0" marginheight="0"
scrolling="no">
</iframe>'''.format(result[0])
area_image, pname, coord_lat, coord_lon = result[1:]
return iframe, area_image, pname, coord_lat, coord_lon
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.