text stringlengths 38 1.54M |
|---|
# -*- coding=utf-8 -*-
import numpy as np
import csv #้่ฆๅ ่ฝฝnumpyๅcsvไธคไธชๅ
csv_file=open('D:\\ๆฐๅปบๆไปถๅคน\\2018-12-26\\input_2018-12-26 11-07-41_zmt_sitechuang_tang.csv') #ๆๅผๆไปถ
csv_reader_lines = csv.reader(csv_file) #็จcsv.reader่ฏปๆไปถ
date_PyList=[]
for one_line in csv_reader_lines:
date_PyList.append(one_line)
print(one_line)
date_ndarray = np.array(date_PyList) #ๅฐpythonๅ่กจ่ฝฌๅไธบndarray
print (date_ndarray.dtype()) |
from __future__ import division
import sys
k = int(sys.argv[1]) # # of homozygous dominant
m = int(sys.argv[2]) # # of heterozygous
n = int(sys.argv[3]) # # of homozygous recessive
t = k + m + n # total # of organisms
# Likelihood of a dominant allele given the first organism is:
# k = homozygous dominant
k_chance = (k/t) * ((k-1) + m + n)/(t-1)
# m = heterozygous
m_chance = (m/t) * (k + (m-1)*(3/4) + n*(2/4))/(t-1)
# n = homozygous recessive
n_chance = (n/t) * (k + m*(2/4) + (n-1)*(0))/(t-1)
# total chance of dominant allele in end organism
t_chance = k_chance + m_chance + n_chance
print round(t_chance, 5) |
import logging
import shutil
import requests
from import_export import resources
from import_export.fields import Field
from tablib import Dataset
from .models import Item, Category, CustomImage
logger = logging.getLogger(__name__)
class ObjectResource(resources.ModelResource):
class Meta:
model = Item
fields = (
"id",
"name",
"description",
"ref",
"price",
"image_file",
"category_text",
)
name = Field(attribute="name", column_name="Short Description")
description = Field(attribute="description", column_name="Full Description")
ref = Field(attribute="ref", column_name="Product reference")
price = Field(attribute="price", column_name="Price")
image_file = Field(attribute="image_file", column_name="Detail URL")
category_text = Field(attribute="category_text", column_name="Section Text")
def import_objects(excel_file):
delete_all(Item)
delete_all(Category)
delete_all(CustomImage)
set_status("Reading Excel file")
object_resource = ObjectResource()
dataset = Dataset()
dataset.load(excel_file.read())
set_status("Checking file", max=dataset.height)
result = object_resource.import_data(dataset, dry_run=True)
if result.has_errors():
try:
error = result.rows[0].errors[0].error
except:
error = "Not known"
set_status(f"Error: {error}", done=True)
return False
else:
set_status("Loading database", max=dataset.height)
object_resource.import_data(dataset, dry_run=False) # Actually import now
set_status("Objects loaded", max=dataset.height, done=True)
return True
def set_status(text, max=0, count=0, empty=0, categories=0, image_count=0, done=False):
if max > 0:
percent = int(count / max * 100)
else:
percent = 0
# set_key_value(
# "PROGRESS",
# {
# "text": text,
# "percent": percent,
# "max": max,
# "count": count,
# "empty": empty,
# "categories": categories,
# "image_count": image_count,
# "done": done,
# },
# )
def process_images(user):
# delete all using workaround for sqlite limit
set_status("Clearing images")
delete_all(CustomImage) # deletes original_images too!
items = Item.objects.all()
count = 0
max = len(items)
image_count = 0
not_found = 0
threshold = 10
i = 0
set_status("Processing images", max)
try:
for item in items:
loaded = load_image(item, user)
count += 1
if loaded:
image_count += 1
else:
not_found += 1
i += 1
if i >= threshold:
set_status(
"Processing images",
max,
count,
empty=not_found,
image_count=image_count,
)
i = 0
set_status(
"Done", max, count, empty=not_found, image_count=image_count, done=True
)
return True
except Exception as e:
logger.error(f"Import images error: {str(e)} object = {item.name}")
return False
def load_image(item, user, collection=None):
"""
Try to load the image for an object.
Check if file already in /images first
if it exists, copy to original_images, create a CustomImage cross linked to the object and return True
else return False
"""
collection_id = collection.id if collection else 1
if item.image_file:
if "http" not in item.image_file:
# from excel file
base_url = "https://chinese-porcelain-art.com/acatalog/"
name = item.image_file.split("\\")
url = base_url + name[1]
file_name = name[1].split(".")[0] + ".jpg"
else:
# called by scrape process
parts = item.image_file.split("/")
file_name = parts[len(parts) - 1]
url = item.image_file
images_path = "images/" + file_name
media_path = "media/original_images/" + file_name
loaded = False
try:
shutil.copy(images_path, media_path)
loaded = True
except FileNotFoundError:
response = requests.get(url, stream=True)
if response.status_code == 200:
with open(media_path, "wb") as out_file:
shutil.copyfileobj(response.raw, out_file)
loaded = True
del response
if loaded:
new_image = CustomImage.objects.create(
file="original_images/" + file_name,
title=item.name,
collection_id=collection_id,
uploaded_by_user=user,
item=item,
)
item.image = new_image
item.save()
return True
item.image = None
item.save()
return False
def set_image_status(max=0, count=0, not_found=0, done=False):
if max > 0:
percent = int(count / max * 100)
else:
percent = 0
# set_key_value(
# "IMAGES",
# {
# "percent": percent,
# "max": max,
# "count": count,
# "not_found": not_found,
# "done": done,
# },
# )
def delete_all(cls):
"""For sqlite that cannot handle 1000 parameters"""
while cls.objects.count():
ids = cls.objects.values_list("pk", flat=True)[:500]
cls.objects.filter(pk__in=ids).delete()
def clear_item_images():
Item.objects.all().update(image_id=None)
|
print('1.USD to GBP\n2.GBP to USD')
x = int(input())
c = ['GBP-USD','1.28','USD-GBP','0.883']
def USDGBP():
y = int(input('input $ amount'))
ug = float(c[3])
z = y*ug
print('a rate 1$ to ',c[3],'ยฃ your money would equal ',z,'GBP')
def GBPUSD():
y = int(input('input $ amount'))
ug = float(c[1])
z = y*ug
print('a rate 1ยฃ to ',c[1],'$ your money would equal ',z,'USD')
if x == 1:
USDGBP()
if x == 2:
GBPUSD()
|
#!/usr/bin/python3
import numpy as np
import pyccl as ccl
import sacc
from tjpcov.covariance_clusters import CovarianceClusters
from tjpcov.covariance_cluster_counts_gaussian import ClusterCountsGaussian
from tjpcov.covariance_cluster_counts_ssc import ClusterCountsSSC
from tjpcov.clusters_helpers import FFTHelper
import pyccl.halos.hmfunc as hmf
import pytest
INPUT_YML = "./tests/data/conf_covariance_clusters.yaml"
@pytest.fixture
def mock_cosmo():
Omg_c = 0.26
Omg_b = 0.04
h0 = 0.67 # so H0 = 100h0 will be in km/s/Mpc
A_s_value = 2.1e-9
n_s_value = 0.96
w_0 = -1.0
w_a = 0.0
cosmo = ccl.Cosmology(
Omega_c=Omg_c,
Omega_b=Omg_b,
h=h0,
A_s=A_s_value,
n_s=n_s_value,
w0=w_0,
wa=w_a,
)
return cosmo
@pytest.fixture
def mock_sacc():
# Using values from
# https://github.com/nrussofer/Cosmological-Covariance-matrices
# /blob/master/Full%20covariance%20N_N%20part%20vfinal.ipynb
# As reference.
s = sacc.Sacc()
s.metadata["nbins_cluster_redshift"] = 18
s.metadata["nbins_cluster_richness"] = 3
s.metadata["min_mass"] = 1e13
# This isnt how tracers actually look, but sort of
# hacks the class to work without building
# an entire sacc file for this test.
s.add_tracer(
"Misc",
"clusters_0_0",
metadata={
"Mproxy_name": "richness",
"Mproxy_min": 10,
"Mproxy_max": 100,
"z_name": "redshift",
"z_min": 0.3,
"z_max": 1.2,
},
)
return s
@pytest.fixture
def mock_covariance_gauss(mock_sacc, mock_cosmo):
cc_cov = ClusterCountsGaussian(INPUT_YML)
cc_cov.load_from_sacc(mock_sacc)
cc_cov.load_from_cosmology(mock_cosmo)
cc_cov.fft_helper = FFTHelper(
mock_cosmo, cc_cov.z_lower_limit, cc_cov.z_upper_limit
)
cc_cov.mass_func = hmf.MassFuncTinker10(mock_cosmo)
cc_cov.h0 = 0.67
return cc_cov
@pytest.fixture
def mock_covariance_ssc(mock_sacc, mock_cosmo):
cc_cov = ClusterCountsSSC(INPUT_YML)
cc_cov.load_from_sacc(mock_sacc)
cc_cov.load_from_cosmology(mock_cosmo)
cc_cov.fft_helper = FFTHelper(
mock_cosmo, cc_cov.z_lower_limit, cc_cov.z_upper_limit
)
cc_cov.mass_func = hmf.MassFuncTinker10(mock_cosmo)
cc_cov.h0 = 0.67
return cc_cov
# Tests start
def test_is_not_null():
cc_cov = ClusterCountsSSC(INPUT_YML)
assert cc_cov is not None
cc_cov = None
cc_cov = ClusterCountsGaussian(INPUT_YML)
assert cc_cov is not None
def test_load_from_sacc(mock_covariance_gauss: CovarianceClusters):
assert mock_covariance_gauss.min_mass == np.log(1e13)
assert mock_covariance_gauss.num_richness_bins == 3
assert mock_covariance_gauss.num_z_bins == 18
assert mock_covariance_gauss.min_richness == 10
assert mock_covariance_gauss.max_richness == 100
assert mock_covariance_gauss.z_min == 0.3
assert mock_covariance_gauss.z_max == 1.2
def test_load_from_cosmology(mock_covariance_gauss: CovarianceClusters):
cosmo = ccl.CosmologyVanillaLCDM()
mock_covariance_gauss.load_from_cosmology(cosmo)
assert mock_covariance_gauss.cosmo == cosmo
@pytest.mark.parametrize(
"z, ref_val",
[
(0.3, 1.463291259900985e-05),
(0.35, 1.4251538328691035e-05),
],
)
def test_integral_mass_no_bias(
mock_covariance_gauss: CovarianceClusters, z, ref_val
):
test = mock_covariance_gauss.mass_richness_integral(z, 0, remove_bias=True)
assert test == pytest.approx(ref_val, rel=1e-4)
def test_double_bessel_integral(mock_covariance_gauss: CovarianceClusters):
ref = 8.427201745032292e-05
test = mock_covariance_gauss.double_bessel_integral(0.3, 0.3)
assert test == pytest.approx(ref, rel=1e-4)
def test_shot_noise(mock_covariance_gauss: ClusterCountsGaussian):
ref = 63973.635143644424
test = mock_covariance_gauss.shot_noise(0, 0)
assert test == pytest.approx(ref, 1e-4)
@pytest.mark.parametrize(
"z, reference_val",
[
(0.5, 2.596895139062984e-05),
(0.55, 2.5910691906342223e-05),
],
)
def test_integral_mass(
mock_covariance_gauss: CovarianceClusters, z, reference_val
):
test = mock_covariance_gauss.mass_richness_integral(z, 0)
assert test == pytest.approx(reference_val, rel=1e-4)
@pytest.mark.parametrize(
"z, reference_val",
[
(0.5, 3.8e-05), # a proper value must be added here
],
)
def test_integral_mass_no_mproxy(
mock_covariance_gauss: CovarianceClusters, z, reference_val
):
mock_covariance_gauss.richness_bins = np.linspace(13.5, 14, 4)
mock_covariance_gauss.has_mproxy = False
test = mock_covariance_gauss.mass_richness_integral(z, 0)
assert test == pytest.approx(reference_val, rel=1e-1)
def test_mass_richness(mock_covariance_gauss: CovarianceClusters):
reference_min = 0.0009528852621284171
test_min = [
mock_covariance_gauss.mass_richness(mock_covariance_gauss.min_mass, i)
for i in range(mock_covariance_gauss.num_richness_bins)
]
assert np.sum(test_min) == pytest.approx(reference_min)
@pytest.mark.parametrize(
"z_i, reference_val",
[
(0, 6613.739621696188),
(4, 55940746.72160228),
(8, 3781771343.1278453),
(14, 252063237.8394578),
(17, 1113852.72571463),
],
)
def test_calc_dv(
mock_covariance_gauss: CovarianceClusters, z_i, reference_val
):
z_true = 0.8
test = mock_covariance_gauss.comoving_volume_element(z_true, z_i) / 1e4
assert test == pytest.approx(reference_val / 1e4)
def test_cov_nxn(
mock_covariance_gauss: ClusterCountsGaussian,
mock_covariance_ssc: ClusterCountsSSC,
):
ref_sum = 130462.91921818888
cov_00_gauss = mock_covariance_gauss.get_covariance_block_for_sacc(
("clusters_0_0",), ("clusters_0_0",)
)
cov_00_ssc = mock_covariance_ssc.get_covariance_block_for_sacc(
("clusters_0_0",), ("clusters_0_0",)
)
assert cov_00_gauss + cov_00_ssc == pytest.approx(ref_sum, rel=1e-4)
|
import cv2
from path_names import PathNamesSegmentation as pns
import numpy as np
import matplotlib.pyplot as plt
from skimage import morphology
from scipy import ndimage as ndi
OUTPUT_FOLDER_MASK = pns.SEGMENTED + "otsu\\mask\\image4"
OUTPUT_FOLDER_SEGMENTED = pns.SEGMENTED + "otsu\\segmented\\image4"
def do_Otsu(file_path, output_path):
image_color = cv2.imread(pns.DEFAULT_IMAGE)
image_color = cv2.cvtColor(image_color, cv2.COLOR_BGR2RGB)
image = cv2.imread(file_path)
image = image[:, :, 0]
ret1, segmented = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
segmented = segmented.astype(bool)
segmented = morphology.remove_small_objects(segmented, 55)
segmented = ndi.binary_fill_holes(segmented, np.ones((3,3)))
segmented = segmented.astype(np.uint8)
mask = np.zeros((image.shape[0], image.shape[1], 3), dtype=np.uint8)
mask[segmented == 0] = [255, 255, 255]
mask = cv2.bitwise_not(mask)
segmented = cv2.bitwise_and(image_color, mask)
plt.imsave(OUTPUT_FOLDER_SEGMENTED + output_path, segmented)
plt.imsave(OUTPUT_FOLDER_MASK + output_path, mask)
do_Otsu(pns.DEFAULT_IMAGE,"\\default_image4.png")
do_Otsu(pns.GAUSSIAN_LOW, "\\gaussian\\gaussian_low.png")
do_Otsu(pns.GAUSSIAN_MODERATE, "\\gaussian\\gaussian_moderate.png")
do_Otsu(pns.GAUSSIAN_HIGH, "\\gaussian\\gaussian_high.png")
do_Otsu(pns.LAPLACIAN_LOW, "\\laplacian\\laplacian_low.png")
do_Otsu(pns.LAPLACIAN_MODERATE, "\\laplacian\\laplacian_moderate.png")
do_Otsu(pns.LAPLACIAN_HIGH, "\\laplacian\\laplacian_high.png")
do_Otsu(pns.POISSON_LOW, "\\poisson\\poisson_low.png")
do_Otsu(pns.POISSON_MODERATE, "\\poisson\\poisson_moderate.png")
do_Otsu(pns.POISSON_HIGH, "\\poisson\\poisson_high.png")
do_Otsu(pns.SPECKLE_LOW, "\\speckle\\speckle_low.png")
do_Otsu(pns.SPECKLE_MODERATE, "\\speckle\\speckle_moderate.png")
do_Otsu(pns.SPECKLE_HIGH, "\\speckle\\speckle_high.png")
do_Otsu(pns.UNIFORM_LOW, "\\uniform\\uniform_low.png")
do_Otsu(pns.UNIFORM_MODERATE, "\\uniform\\uniform_moderate.png")
do_Otsu(pns.UNIFORM_HIGH, "\\uniform\\uniform_high.png")
do_Otsu(pns.PEPPER_LOW, "\\pepper\\pepper_low.png")
do_Otsu(pns.PEPPER_MODERATE, "\\pepper\\pepper_moderate.png")
do_Otsu(pns.PEPPER_HIGH, "\\pepper\\pepper_high.png")
do_Otsu(pns.SALT_LOW, "\\salt\\saltr_low.png")
do_Otsu(pns.SALT_MODERATE, "\\salt\\salt_moderate.png")
do_Otsu(pns.SALT_HIGH, "\\salt\\saltr_high.png")
do_Otsu(pns.SP_LOW, "\\salt&pepper\\salt&pepper_low.png")
do_Otsu(pns.SP_MODERATE, "\\salt&pepper\\salt&pepper_moderate.png")
do_Otsu(pns.SP_HIGH, "\\salt&pepper\\salt&pepper_high.png")
|
import sys
import os
import argparse
from flask import Flask, jsonify
from flask import request
# fix app root
app_root = os.getcwd()
sys.path.append(app_root)
from app.config import app_ip, app_version
from app.logger import get_logger
log = get_logger(__name__)
flask_app = Flask(__name__)
log.info('app root dir: {}'.format(app_root))
@flask_app.errorhandler(404)
def not_found(error):
return json_error(Exception('endpoint not found'), http_code=404)
@flask_app.route('/api/v1/version', methods=['GET'])
def get_version():
return jsonify({'version': app_version})
@flask_app.route('/api/v1/test', methods=['GET'])
def get_models():
query_attrs = request.args.to_dict()
try:
return jsonify({'request_attr': query_attrs})
except Exception as ex:
return json_error(ex, http_code=500)
def json_error(msg, http_code=400):
log.exception(msg)
try:
err = {'error': str(msg), 'exception': type(msg).__name__}
return jsonify(err), http_code
except:
return jsonify({'exception': 'internal error'}), 500
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--port', default=None, type=int, help='rest port')
args = arg_parser.parse_args()
if args.port is not None:
log.info('http port was provided as argument: {}'.format(args.port))
flask_app.run(host=app_ip, port=args.port, debug=True)
else:
# do not provide port when run with gunicorn
flask_app.run()
|
from flask import Flask, render_template, session, redirect, url_for
from model import Formulario, procuraAgenciasBB
app = Flask(__name__)
app.config.from_mapping(SECRET_KEY='JAJAJKKKAHERJJCCAASS')
@app.route('/', methods=['GET', 'POST'])
def index():
form = Formulario()
if form.validate_on_submit():
session['agencia'] = form.agencia.data
return redirect(url_for('index'))
return render_template('index.html', form=form, agencia=procuraAgenciasBB(session.get('agencia')))
if __name__ == '__main__':
app.run(debug=True)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import importlib
class LibsLoader():
def load(self, lang):
module = "{}.nationalities".format(lang)
if not os.path.exists(module):
module = "nationalities"
lang_module = importlib.import_module(module)
LangClass = getattr(lang_module, "Nationalities")
return LangClass()
|
# Generated by Django 3.1.2 on 2021-06-23 00:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('GestionUsuario', '0003_auto_20210621_2358'),
]
operations = [
migrations.AlterModelTable(
name='usuario',
table='USUARIO',
),
]
|
"""CoolServer URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index , name='index'),
path('education/',views.edu,name='edu'),
path('auto/',views.auto,name='auto'),
path('business/',views.business,name='business'),
path('india/',views.india,name='india'),
path('lifestyle/',views.life,name='life'),
path('entertainment/',views.ent,name='ent'),
path('sport/',views.sport,name='sport'),
path('technology/',views.tech,name='tech'),
path('trending/',views.trending,name='trending'),
path('world/',views.world,name='world'),
]
|
import logging
import configparser
from projectTS.lib.postData import postData
import projectTS.vals as vals
logger = logging.getLogger('projectTS.imagesProcessing.updateTrafficDensity')
config = configparser.ConfigParser()
config.read('config.ini')
default = config['DEFAULT']
server = default['server']
postDataNsp = default['postDataNsp']
interID = default['intersection_id']
token = default['access_token']
intersection = postData(server + postDataNsp, interID, token)
def updateTrafficDensity():
if (vals.stateWS != '' and vals.stateNS != ''):
if (vals.rateNS >= vals.rateWS):
result = intersection.postData({
'rate': vals.rateNS,
'state': vals.stateNS
})
else:
result = intersection.postData({
'rate': vals.rateWS,
'state': vals.stateWS
})
if result:
logger.info('Update data success')
else:
logger.error('Update data failed') |
# Numbers and Maths
print "I will now count my chickens:"
print "Hens", 25+30/6
print "Roosters", 100-25*3%4
print "Now I will count the eggs:"
print 3+2+1-5+4%2-1/4+6
print "Is it true that 3+2<5-7?"
print 3+2<5-7
print "What is 3+2?",3+2
print "What is 5-7?",5-7
print "Oh, that's why its false"
print "How about more?"
print "Is it greater?",5>-2
print "Is it greater or equal?",5>=-2
print "Is it less or equal?",5<=-2 |
class ItemValue:
"""Item Value DataClass"""
def __init__(self, wt, val, ind):
self.wt = wt
self.val = val
self.ind = ind
self.cost = val // wt
def __lt__(self, other):
return self.cost < other.cost
def fractional_knapsack_max_value(wt, val, capacity):
i_val = [ItemValue(wt[i], val[i], i) for i in range(len(wt))]
i_val.sort(reverse=True)
solution = []
total_value = 0
for i in i_val:
cur_wt = int(i.wt)
cur_val = int(i.val)
if capacity - cur_wt >= 0:
capacity -= cur_wt
total_value += cur_val
solution.append(
{'item': i, 'total_value': cur_val, 'fraction': 100})
else:
fraction = capacity / cur_wt
total_value += cur_val * fraction
capacity = int(capacity - (cur_wt * fraction))
solution.append(
{'item': i, 'total_value': cur_val * fraction, 'fraction': fraction})
break
return total_value, solution
def print_solution(solution):
for i in solution:
print('value:', i.get('total_value'), '; fraction:', i.get('fraction'))
if __name__ == "__main__":
wt = [10, 40, 20, 30]
val = [60, 40, 100, 120]
capacity = 50
max_value, solution = fractional_knapsack_max_value(wt, val, capacity)
print("valor mรกximo =", max_value)
print_solution(solution)
|
from adapters.examination_adapter import ExaminationAdapter
def test_should_group_examinations():
fake_response = [{
'node': {
'title': '301413532',
'record_fields': [
{
'name': 'id_avaliacao_lms',
'value': '356366'
},
{
'name': 'id_pipe',
'value': '["301413532"]'
}
]
}
}, {
'node': {
'title': '301413532',
'record_fields': [
{
'name': 'id_avaliacao_lms',
'value': '356368'
},
{
'name': 'id_pipe',
'value': '["301413532"]'
}
]
}
}]
examinations_groups = ExaminationAdapter.group_examinations_in_pipes_ids(fake_response)
assert "301413532" in examinations_groups and len(examinations_groups["301413532"]) == 2
def test_should_extract_field():
row_data = {'title': '301413532', 'record_fields': [{'name': 'id_avaliacao_lms', 'value': '356366'}, {'name': 'id_pipe', 'value': '["301413532"]'}]}
examination = ExaminationAdapter.extract_examination(row_data)
assert examination == "356366"
|
def negativeindex():
# Negative indexing for accessing tuple elements
my_tuple = ('p', 'e', 'r', 'm', 'i', 't')
print (my_tuple)
# Output: 't'
print("index -1 : ",my_tuple[-1])
# Output: 'p'
print("index -6 : ",my_tuple[-6])
negativeindex()
|
#!/usr/bin/env python
# $Id$
##
## This file is part of pyFormex 0.7.1 Release Sat May 24 13:26:21 2008
## pyFormex is a Python implementation of Formex algebra
## Website: http://pyformex.berlios.de/
## Copyright (C) Benedict Verhegghe (benedict.verhegghe@ugent.be)
##
## This program is distributed under the GNU General Public License
## version 2 or later (see file COPYING for details)
##
"""Coordinates of points in 3D space"""
from numpy import *
import globaldata as GD
try:
from lib.misc import fuse
have_fast_fuse = True
except:
have_fast_fuse = False
# default float and int types
Float = float32
Int = int32
def istype(a,c):
return asarray(a).dtype.kind == c
# Implement 'roll' for older versions of numpy
if 'roll' not in dir():
def roll(a, shift, axis=None):
"""Roll the elements in the array by 'shift' positions along
the given axis.
A positive shift moves elements to the 'right' in a 1D array.
"""
a = asarray(a)
if axis is None:
n = a.size
reshape=1
else:
n = a.shape[axis]
reshape=0
shift %= n
indexes = concatenate((arange(n-shift,n),arange(n-shift)))
res = a.take(indexes, axis)
if reshape:
return res.reshape(a.shape)
else:
return res
###########################################################################
##
## some math functions
##
#########################
# pi is defined in numpy
# rad is a multiplier to transform degrees to radians
rad = pi/180.
# Convenience functions: trigonometric functions with argument in degrees
# Should we keep this in ???
def sind(arg):
"""Return the sin of an angle in degrees."""
return sin(arg*rad)
def cosd(arg):
"""Return the cos of an angle in degrees."""
return cos(arg*rad)
def tand(arg):
"""Return the tan of an angle in degrees."""
return tan(arg*rad)
def dotpr (A,B,axis=-1):
"""Return the dot product of vectors of A and B in the direction of axis.
The default axis is the last.
"""
A = asarray(A)
B = asarray(B)
return (A*B).sum(axis)
def length(A,axis=-1):
"""Returns the length of the vectors of A in the direction of axis.
The default axis is the last.
"""
A = asarray(A)
return sqrt((A*A).sum(axis))
def normalize(A,axis=-1):
"""Normalize the vectors of A in the direction of axis.
The default axis is the last.
"""
A = asarray(A)
shape = list(A.shape)
shape[axis] = 1
return A / length(A,axis).reshape(shape)
def projection(A,B,axis=-1):
"""Return the (signed) length of the projection of vector of A on B.
The default axis is the last.
"""
return dotpr(A,B,axis)/length(B,axis)
def norm(v,n=2):
"""Return a norm of the vector v.
Default is the quadratic norm (vector length)
n == 1 returns the sum
n <= 0 returns the max absolute value
"""
a = asarray(v).flat
if n == 2:
return sqrt((a*a).sum())
if n > 2:
return (a**n).sum()**(1./n)
if n == 1:
return a.sum()
if n <= 0:
return abs(a).max()
return
def inside(p,mi,ma):
"""Return true if point p is inside bbox defined by points mi and ma"""
return p[0] >= mi[0] and p[1] >= mi[1] and p[2] >= mi[2] and \
p[0] <= ma[0] and p[1] <= ma[1] and p[2] <= ma[2]
def isClose(values,target,rtol=1.e-5,atol=1.e-8):
"""Returns an array flagging the elements close to target.
values is a float array, target is a float value.
values and target should be broadcastable to the same shape.
The return value is a boolean array with shape of values flagging
where the values are close to target.
Two values a and b are considered close if
| a - b | < atol + rtol * | b |
"""
values = asarray(values)
target = asarray(target)
return abs(values - target) < atol + rtol * abs(target)
def origin():
"""Return a point with coordinates [0.,0.,0.]."""
return zeros((3),dtype=Float)
def unitVector(axis):
"""Return a unit vector in the direction of a global axis (0,1,2).
Use normalize() to get a unit vector in a general direction.
"""
u = origin()
u[axis] = 1.0
return u
def rotationMatrix(angle,axis=None):
"""Return a rotation matrix over angle, optionally around axis.
The angle is specified in degrees.
If axis==None (default), a 2x2 rotation matrix is returned.
Else, axis should specifying the rotation axis in a 3D world. It is either
one of 0,1,2, specifying a global axis, or a vector with 3 components
specifying an axis through the origin.
In either case a 3x3 rotation matrix is returned.
Note that:
rotationMatrix(angle,[1,0,0]) == rotationMatrix(angle,0)
rotationMatrix(angle,[0,1,0]) == rotationMatrix(angle,1)
rotationMatrix(angle,[0,0,1]) == rotationMatrix(angle,2)
but the latter functions calls are more efficient.
The result is returned as an array.
"""
a = angle*rad
c = cos(a)
s = sin(a)
if axis==None:
f = [[c,s],[-s,c]]
elif type(axis) == int:
f = [[0.0 for i in range(3)] for j in range(3)]
axes = range(3)
i,j,k = axes[axis:]+axes[:axis]
f[i][i] = 1.0
f[j][j] = c
f[j][k] = s
f[k][j] = -s
f[k][k] = c
else:
t = 1-c
X,Y,Z = axis
f = [ [ t*X*X + c , t*X*Y + s*Z, t*X*Z - s*Y ],
[ t*Y*X - s*Z, t*Y*Y + c , t*Y*Z + s*X ],
[ t*Z*X + s*Y, t*Z*Y - s*X, t*Z*Z + c ] ]
return array(f)
def bbox(objects):
"""Compute the bounding box of a list of objects.
All the objects in list should have
This is like the bbox() method of the Coords class, but the resulting
box encloses all the Coords in the list.
Objects returning a None bbox are ignored.
"""
bboxes = [f.bbox() for f in objects]
bboxes = [bb for bb in bboxes if bb is not None]
if len(bboxes) == 0:
o = origin()
bboxes = [ [o,o] ]
return Coords(concatenate(bboxes)).bbox()
###########################################################################
##
## class Coords
##
#########################
#
class Coords(ndarray):
"""A Coords object is a collection of 3D coordinates of points.
Coords is implemented as a Numerical Python array with a length of its
last axis equal to 3.
Each set of 3 values along the last axis represents a single point in 3D.
The datatype should be a float type; default is Float.
!! These restrictions are currently only check at creation time.
!! It is the responsibility of the user to keep consistency.
"""
# !! DO WE NEED AN EMPTY Coords OBJECT?
# I guess not, so we made the default constructor generate a single
# point [0.,0.,0.]
def __new__(cls, data=None, dtyp=None, copy=False):
"""Create a new instance of class Coords.
If no data are given, a single point (0.,0.,0.) will be created.
If specified, data should evaluate to an (...,3) shaped array of floats.
If copy==True, the data are copied.
If no dtype is given that of data are used, or float32 by default.
"""
if data is None:
data = zeros((3,),dtype=Float)
# Turn the data into an array, and copy if requested
ar = array(data, dtype=dtyp, copy=copy)
if ar.shape[-1] == 3:
pass
elif ar.shape[-1] in [1,2]:
ar = concatenate([ar,zeros(ar.shape[:-1]+(3-ar.shape[-1],))],axis=-1)
else:
raise ValueError,"Expected a length 1,2 or 3 for last array axis"
# Make sure dtype is a float type
if ar.dtype.kind != 'f':
ar = ar.astype(Float)
# Transform 'subarr' from an ndarray to our new subclass.
ar = ar.view(cls)
return ar
## def __array_finalize__(self,obj):
## #Make sure array shape is (n,3) float
## print "SHAPE = %s" % str(self.shape)
## print "DTYPE = %s" % str(self.dtype)
## if self.shape[-1] != 3:
## print 'Expected shape (n,3)'
## if self.dtype.kind != 'f':
## raise ValueError,"Expected a floating point type."
## if len(self.shape) != 2:
## print self.size
## self.shape = (self.size // 3,3)
###########################################################################
#
# Methods that return information about a Coords object or other
# views on the object data, without changing the object itself.
# General
def points(self):
"""Return the data as a simple set of points.
This reshapes the array to a 2-dimensional array, flattening
the structure of the points.
"""
return self.reshape((-1,3))
def pshape(self):
"""Return shape of the points array.
This is the shape of the Coords array with last axis removed.
"""
return self.shape[:-1]
def npoints(self):
"""Return the total number of points."""
return asarray(self.shape[:-1]).prod()
def x(self):
"""Return the x-plane"""
return self[...,0]
def y(self):
"""Return the y-plane"""
return self[...,1]
def z(self):
"""Return the z-plane"""
return self[...,2]
# Size
def bbox(self):
"""Return the bounding box of a set of points.
The bounding box is the smallest rectangular volume in global
coordinates, such at no points are outside the box.
It is returned as a Coords object with shape (2,3): the first row
holds the minimal coordinates and the second row the maximal.
"""
if self.size > 0:
s = self.points()
return row_stack([ s.min(axis=0), s.max(axis=0) ])
else:
return None
def center(self):
"""Return the center of the Coords.
The center of a Coords is the center of its bbox().
The return value is a (3,) shaped Coords object.
"""
min,max = self.bbox()
return 0.5 * (max+min)
def centroid(self):
"""Return the centroid of the Coords.
The centroid of a Coords is the point whose coordinates
are the mean values of all points.
The return value is a (3,) shaped Coords object.
"""
return self.points().mean(axis=0)
def sizes(self):
"""Return the sizes of the Coords.
Return an array with the length of the bbox along the 3 axes.
"""
min,max = self.bbox()
return max-min
def dsize(self):
"""Return an estimate of the global size of the Coords.
This estimate is the length of the diagonal of the bbox()."""
min,max = self.bbox()
return length(max-min)
def bsphere(self):
"""Return the diameter of the bounding sphere of the Coords.
The bounding sphere is the smallest sphere with center in the
center() of the Coords, and such that no points of the Coords
are lying outside the sphere.
"""
return self.distanceFromPoint(self.center()).max()
# Distance
def distanceFromPlane(self,p,n):
"""Return the distance of points f from the plane (p,n).
p is a point specified by 3 coordinates.
n is the normal vector to a plane, specified by 3 components.
The return value is a [...] shaped array with the distance of
each point to the plane through p and having normal n.
Distance values are positive if the point is on the side of the
plane indicated by the positive normal.
"""
p = asarray(p).reshape((3))
n = asarray(n).reshape((3))
n /= length(n)
d = inner(self,n) - inner(p,n)
return d
def distanceFromLine(self,p,n):
"""Return the distance of points f from the line (p,n).
p is a point on the line specified by 3 coordinates.
n is a vector specifying the direction of the line through p.
The return value is a [...] shaped array with the distance of
each point to the line through p with direction n.
All distance values are positive or zero.
"""
p = asarray(p).reshape((3))
n = asarray(n).reshape((3))
t = cross(n,p-self)
d = sqrt(sum(t*t,-1)) / length(n)
return d
def distanceFromPoint(self,p):
"""Return the distance of points f from the point p.
p is a single point specified by 3 coordinates.
The return value is a [...] shaped array with the distance of
each point to point p.
All distance values are positive or zero.
"""
p = asarray(p).reshape((3))
d = self-p
return sqrt(sum(d*d,-1))
# Test position
def test(self,dir=0,min=None,max=None,atol=0.):
"""Flag points having coordinates between min and max.
This function is very convenient in clipping a Coords in a specified
direction. It returns a 1D integer array flagging (with a value 1 or
True) the elements having nodal coordinates in the required range.
Use where(result) to get a list of element numbers passing the test.
Or directly use clip() or cclip() to create the clipped Coords.
The test plane can be define in two ways depending on the value of dir.
If dir == 0, 1 or 2, it specifies a global axis and min and max are
the minimum and maximum values for the coordinates along that axis.
Default is the 0 (or x) direction.
Else, dir should be compatible with a (3,) shaped array and specifies
the direction of the normal on the planes. In this case, min and max
are points and should also evaluate to (3,) shaped arrays.
One of the two clipping planes may be left unspecified.
"""
if min is None and max is None:
raise ValueError,"At least one of min or max have to be specified."
if type(dir) == int:
if not min is None:
T1 = self[...,dir] > min - atol
if not max is None:
T2 = self[...,dir] < max + atol
else:
if not min is None:
T1 = self.distanceFromPlane(min,dir) > - atol
if not max is None:
T2 = self.distanceFromPlane(max,dir) < atol
if min is None:
T = T2
elif max is None:
T = T1
else:
T = T1 * T2
return T
def fprint(self,fmt="%10.3e %10.3e %10.3e"):
"""Formatted printing of a Coords.
The supplied format should contain 3 formatting sequences for the
three coordinates of a point.
"""
for p in self.points():
print fmt % tuple(p)
##############################################################################
def set(self,f):
"""Set the coordinates from those in the given array."""
self[...] = f # do not be tempted to use self = f !
##############################################################################
#
# Transformations that preserve the topology (but change coordinates)
#
# A. Affine transformations
#
# Scaling
# Translation
# Central Dilatation = Scaling + Translation
# Rotation
# Shear
# Reflection
# Affine
#
# The following methods return transformed coordinates, but by default
# they do not change the original data. If the optional argument inplace
# is set True, however, the coordinates are changed inplace.
def scale(self,scale,inplace=False):
"""Return a copy scaled with scale[i] in direction i.
The scale should be a list of 3 numbers, or a single number.
In the latter case, the scaling is homothetic."""
if inplace:
out = self
else:
out = self.copy()
out *= scale
return out
def translate(self,vector,distance=None,inplace=False):
"""Translate a Coords object.
The translation vector can be specified in one of the following ways:
- an axis number (0,1,2),
- a single translation vector,
- an array of translation vectors.
If an axis number is given, a unit vector in the direction of the
specified axis will be used.
If an array of translation vectors is given, it should be
broadcastable to the size of the Coords array.
If a distance value is given, the translation vector is multiplied
with this value before it is added to the coordinates.
Thus, the following are all equivalent:
F.translate(1)
F.translate(1,1)
F.translate([0,1,0])
F.translate([0,2,0],0.5)
"""
if inplace:
out = self
else:
out = self.copy()
if type(vector) is int:
vector = unitVector(vector)
vector = Coords(vector)
if distance is not None:
vector *= distance
out += vector
return out
def rotate(self,angle,axis=2,around=None,inplace=False):
"""Return a copy rotated over angle around axis.
The angle is specified in degrees.
The axis is either one of (0,1,2) designating the global axes,
or a vector specifying an axis through the origin.
If no axis is specified, rotation is around the 2(z)-axis. This is
convenient for working on 2D-structures.
As a convenience, the user may also specify a 3x3 rotation matrix,
in which case the function rotate(mat) is equivalent to affine(mat).
All rotations are performed around the point [0,0,0], unless a
rotation origin is specified in the argument 'around'.
"""
if inplace:
out = self
else:
out = self.copy()
if not isinstance(angle,ndarray):
angle = rotationMatrix(angle,axis)
if around is not None:
around = asarray(around)
out = out.translate(-around,inplace=inplace)
out = out.affine(angle,around,inplace=inplace)
return out
def shear(self,dir,dir1,skew,inplace=False):
"""Return a copy skewed in the direction dir of plane (dir,dir1).
The coordinate dir is replaced with (dir + skew * dir1).
"""
if inplace:
out = self
else:
out = self.copy()
out[...,dir] += skew * out[...,dir1]
return out
def reflect(self,dir=2,pos=0,inplace=False):
"""Mirror the coordinates in direction dir against plane at pos.
Default position of the plane is through the origin.
Default mirror direction is the z-direction.
"""
if inplace:
out = self
else:
out = self.copy()
out[...,dir] = 2*pos - out[...,dir]
return out
# An alias
mirror = reflect
def affine(self,mat,vec=None,inplace=False):
"""Return a general affine transform of the Coords.
The returned Coords has coordinates given by xorig * mat + vec,
where mat is a 3x3 matrix and vec a length 3 list.
"""
if inplace:
out = self
else:
out = self.copy()
out = dot(out,mat)
if vec is not None:
out += vec
return out
#
#
# B. Non-Affine transformations.
#
# These always return copies !
#
# Cylindrical, Spherical, Isoparametric
#
def cylindrical(self,dir=[0,1,2],scale=[1.,1.,1.]):
"""Converts from cylindrical to cartesian after scaling.
dir specifies which coordinates are interpreted as resp.
distance(r), angle(theta) and height(z). Default order is [r,theta,z].
scale will scale the coordinate values prior to the transformation.
(scale is given in order r,theta,z).
The resulting angle is interpreted in degrees.
"""
# We put in a optional scaling, because doing this together with the
# transforming is cheaper than first scaling and then transforming.
f = zeros_like(self)
r = scale[0] * self[...,dir[0]]
theta = (scale[1]*rad) * self[...,dir[1]]
f[...,0] = r*cos(theta)
f[...,1] = r*sin(theta)
f[...,2] = scale[2] * self[...,dir[2]]
return f
def toCylindrical(self,dir=[0,1,2]):
"""Converts from cartesian to cylindrical coordinates.
dir specifies which coordinates axes are parallel to respectively the
cylindrical axes distance(r), angle(theta) and height(z). Default
order is [x,y,z].
The angle value is given in degrees.
"""
f = zeros_like(self)
x,y,z = [ self[...,i] for i in dir ]
f[...,0] = sqrt(x*x+y*y)
f[...,1] = arctan2(y,x) / rad
f[...,2] = z
return f
def spherical(self,dir=[0,1,2],scale=[1.,1.,1.],colat=False):
"""Converts from spherical to cartesian after scaling.
<dir> specifies which coordinates are interpreted as resp.
longitude(theta), latitude(phi) and distance(r).
<scale> will scale the coordinate values prior to the transformation.
Angles are then interpreted in degrees.
Latitude, i.e. the elevation angle, is measured from equator in
direction of north pole(90). South pole is -90.
If colat=True, the third coordinate is the colatitude (90-lat) instead.
"""
f = self.reshape((-1,3))
theta = (scale[0]*rad) * f[:,dir[0]]
phi = (scale[1]*rad) * f[:,dir[1]]
r = scale[2] * f[:,dir[2]]
if colat:
phi = 90.0*rad - phi
rc = r*cos(phi)
f = column_stack([rc*cos(theta),rc*sin(theta),r*sin(phi)])
return f.reshape(self.shape)
def toSpherical(self,dir=[0,1,2]):
"""Converts from cartesian to spherical coordinates.
dir specifies which coordinates axes are parallel to respectively
the spherical axes distance(r), longitude(theta) and latitude(phi).
Latitude is the elevation angle measured from equator in direction
of north pole(90). South pole is -90.
Default order is [0,1,2], thus the equator plane is the (x,y)-plane.
The returned angle values are given in degrees.
"""
v = self[...,dir].reshape((-1,3))
dist = sqrt(sum(v*v,-1))
long = arctan2(v[:,0],v[:,2]) / rad
lat = where(dist <= 0.0,0.0,arcsin(v[:,1]/dist) / rad)
f = column_stack([long,lat,dist])
return f.reshape(self.shape)
def bump1(self,dir,a,func,dist):
"""Return a Coords with a one-dimensional bump.
dir specifies the axis of the modified coordinates;
a is the point that forces the bumping;
dist specifies the direction in which the distance is measured;
func is a function that calculates the bump intensity from distance
!! func(0) should be different from 0.
"""
f = self.copy()
d = f[...,dist] - a[dist]
f[...,dir] += func(d)*a[dir]/func(0)
return f
def bump2(self,dir,a,func):
"""Return a Coords with a two-dimensional bump.
dir specifies the axis of the modified coordinates;
a is the point that forces the bumping;
func is a function that calculates the bump intensity from distance
!! func(0) should be different from 0.
"""
f = self.copy()
dist = [0,1,2]
dist.remove(dir)
d1 = f[...,dist[0]] - a[dist[0]]
d2 = f[...,dist[1]] - a[dist[1]]
d = sqrt(d1*d1+d2*d2)
f[...,dir] += func(d)*a[dir]/func(0)
return f
# This is a generalization of both the bump1 and bump2 methods.
# If it proves to be useful, it might replace them one day
# An interesting modification might be to have a point for definiing
# the distance and a point for defining the intensity (3-D) of the
# modification
def bump(self,dir,a,func,dist=None):
"""Return a Coords with a bump.
A bump is a modification of a set of coordinates by a non-matching
point. It can produce various effects, but one of the most common
uses is to force a surface to be indented by some point.
dir specifies the axis of the modified coordinates;
a is the point that forces the bumping;
func is a function that calculates the bump intensity from distance
(!! func(0) should be different from 0)
dist is the direction in which the distance is measured : this can
be one of the axes, or a list of one or more axes.
If only 1 axis is specified, the effect is like function bump1
If 2 axes are specified, the effect is like bump2
This function can take 3 axes however.
Default value is the set of 3 axes minus the direction of modification.
This function is then equivalent to bump2.
"""
f = self.copy()
if dist == None:
dist = [0,1,2]
dist.remove(dir)
try:
l = len(dist)
except TypeError:
l = 1
dist = [dist]
d = f[...,dist[0]] - a[dist[0]]
if l==1:
d = abs(d)
else:
d = d*d
for i in dist[1:]:
d1 = f[...,i] - a[i]
d += d1*d1
d = sqrt(d)
f[...,dir] += func(d)*a[dir]/func(0)
return f
# NEW implementation flattens coordinate sets to ease use of
# complicated functions
def newmap(self,func):
"""Return a Coords mapped by a 3-D function.
This is one of the versatile mapping functions.
func is a numerical function which takes three arguments and produces
a list of three output values. The coordinates [x,y,z] will be
replaced by func(x,y,z).
The function must be applicable to arrays, so it should
only include numerical operations and functions understood by the
numpy module.
This method is one of several mapping methods. See also map1 and mapd.
Example: E.map(lambda x,y,z: [2*x,3*y,4*z])
is equivalent with E.scale([2,3,4])
"""
x,y,z = func(self[...,0].flat,self[...,1].flat,self[...,2].flat)
shape = list(self.shape)
shape[2] = 1
#print shape,reshape(x,shape)
f = concatenate([reshape(x,shape),reshape(y,shape),reshape(z,shape)],2)
#print f.shape
return f
def map(self,func):
"""Return a Coords mapped by a 3-D function.
This is one of the versatile mapping functions.
func is a numerical function which takes three arguments and produces
a list of three output values. The coordinates [x,y,z] will be
replaced by func(x,y,z).
The function must be applicable to arrays, so it should
only include numerical operations and functions understood by the
numpy module.
This method is one of several mapping methods. See also map1 and mapd.
Example: E.map(lambda x,y,z: [2*x,3*y,4*z])
is equivalent with E.scale([2,3,4])
"""
f = zeros_like(self)
f[...,0],f[...,1],f[...,2] = func(self[...,0],self[...,1],self[...,2])
return f
def map1(self,dir,func):
"""Return a Coords where coordinate i is mapped by a 1-D function.
<func> is a numerical function which takes one argument and produces
one result. The coordinate dir will be replaced by func(coord[dir]).
The function must be applicable on arrays, so it should only
include numerical operations and functions understood by the
numpy module.
This method is one of several mapping methods. See also map and mapd.
"""
f = self.copy()
f[...,dir] = func(self[...,dir])
return f
def mapd(self,dir,func,point,dist=None):
"""Maps one coordinate by a function of the distance to a point.
<func> is a numerical function which takes one argument and produces
one result. The coordinate dir will be replaced by func(d), where <d>
is calculated as the distance to <point>.
The function must be applicable on arrays, so it should only
include numerical operations and functions understood by the
numpy module.
By default, the distance d is calculated in 3-D, but one can specify
a limited set of axes to calculate a 2-D or 1-D distance.
This method is one of several mapping methods. See also map3 and map1.
Example: E.mapd(2,lambda d:sqrt(10**2-d**2),f.center(),[0,1])
maps E on a sphere with radius 10
"""
f = self.copy()
if dist == None:
dist = [0,1,2]
try:
l = len(dist)
except TypeError:
l = 1
dist = [dist]
d = f[...,dist[0]] - point[dist[0]]
if l==1:
d = abs(d)
else:
d = d*d
for i in dist[1:]:
d1 = f[...,i] - point[i]
d += d1*d1
d = sqrt(d)
f[...,dir] = func(d)
return f
def replace(self,i,j,other=None):
"""Replace the coordinates along the axes i by those along j.
i and j are lists of axis numbers or single axis numbers.
replace ([0,1,2],[1,2,0]) will roll the axes by 1.
replace ([0,1],[1,0]) will swap axes 0 and 1.
An optionally third argument may specify another Coords object to take
the coordinates from. It should have the same dimensions.
"""
if other is None:
other = self
f = self.copy()
f[...,i] = other[...,j]
return f
def swapAxes(self,i,j):
"""Swap coordinate axes i and j.
Beware! This is different from numpy's swapaxes() method !
"""
return self.replace([i,j],[j,i])
def rollAxes(self,n=1):
"""Roll the axes over the given amount.
Default is 1, thus axis 0 becomes the new 1 axis, 1 becomes 2 and
2 becomes 0.
"""
return roll(self, int(n) % 3,axis=-1)
def projectOnSphere(self,radius,center=[0.,0.,0.]):
"""Project Coords on a sphere."""
d = self.distanceFromPoint(center)
s = radius / d
f = self - center
f[...,0] *= s
f[...,1] *= s
f[...,2] *= s
f += center
return f
##############################################################################
def fuse(self,nodesperbox=1,shift=0.5,rtol=1.e-5,atol=1.e-5):
"""Find (almost) identical nodes and return a compressed set.
This method finds the points that are very close and replaces them
with a single point. The return value is a tuple of two arrays:
- the unique points as a Coords object,
- an integer (nnod) array holding an index in the unique
coordinates array for each of the original nodes. This index will
have the same shape as the pshape() of the coords array.
The procedure works by first dividing the 3D space in a number of
equally sized boxes, with a mean population of nodesperbox.
The boxes are numbered in the 3 directions and a unique integer scalar
is computed, that is then used to sort the nodes.
Then only nodes inside the same box are compared on almost equal
coordinates, using the numpy allclose() function. Two coordinates are
considered close if they are within a relative tolerance rtol or absolute
tolerance atol. See numpy for detail. The default atol is set larger than
in numpy, because pyformex typically runs with single precision.
Close nodes are replaced by a single one.
Currently the procedure does not guarantee to find all close nodes:
two close nodes might be in adjacent boxes. The performance hit for
testing adjacent boxes is rather high, and the probability of separating
two close nodes with the computed box limits is very small. Nevertheless
we intend to access this problem by repeating the procedure with the
boxes shifted in space.
"""
x = self.points()
nnod = x.shape[0]
# Calculate box size
lo = array([ x[:,i].min() for i in range(3) ])
hi = array([ x[:,i].max() for i in range(3) ])
sz = hi-lo
esz = sz[sz > 0.0] # only keep the nonzero dimensions
vol = esz.prod()
nboxes = nnod / nodesperbox # ideal total number of boxes
boxsz = (vol/nboxes) ** (1./esz.shape[0])
nx = (sz/boxsz).astype(int32)
# avoid error message on the global sz/nx calculation
errh = seterr(all='ignore')
dx = where(nx>0,sz/nx,boxsz)
seterr(**errh)
#
nx = array(nx) + 1
ox = lo - dx*shift # origin : 0 < shift < 1
# Create box coordinates for all nodes
ind = floor((x-ox)/dx).astype(int32)
# Create unique box numbers in smallest direction first
o = argsort(nx)
val = ( ind[:,o[2]] * nx[o[2]] + ind[:,o[1]] ) * nx[o[1]] + ind[:,o[0]]
# sort according to box number
srt = argsort(val)
# rearrange the data according to the sort order
val = val[srt]
x = x[srt]
# now compact
# make sure we use int32 (for the fast fuse function)
# Using int32 limits this procedure to 10**9 points, which is more
# than enough for all practical purposes
val = val.astype(int32)
flag = ones((nnod,),dtype=int32) # 1 = new, 0 = existing node
sel = arange(nnod).astype(int32) # replacement unique node nr
tol = max(abs(rtol*self.sizes()).max(),atol)
if have_fast_fuse and GD.options.fastfuse:
fuse(x,val,flag,sel,tol)
else:
for i in range(nnod):
j = i-1
while j>=0 and val[i]==val[j]:
if allclose(x[i],x[j],rtol=rtol,atol=atol):
# node i is same as node j
flag[i] = 0
sel[i] = sel[j]
sel[i+1:nnod] -= 1
break
j = j-1
x = x[flag>0] # extract unique nodes
s = sel[argsort(srt)] # and indices for old nodes
return (x,s.reshape(self.shape[:-1]))
@classmethod
def concatenate(cls,L):
"""Concatenate a list of Coords object.
All Coords object in the list L should have the same shape
except for the length of the first axis.
This function is equivalent to the numpy concatenate, but makes
sure the result is a Cooords object.
"""
return Coords(concatenate(L))
@classmethod
def fromfile(*args):
"""Read a Coords from file.
This convenience function uses the numpy fromfile function to read
the coordinates from file.
You just have to make sure that the coordinates are read in order
(X,Y,Z) for subsequent points, and that the total number of
coordinates read is a multiple of 3.
"""
return Coords(fromfile(*args).reshape((-1,3)))
@classmethod
def interpolate(clas,F,G,div):
"""Create interpolations between two Coords.
F and G are two Coords with the same shape.
v is a list of floating point values.
The result is the concatenation of the interpolations of F and G at all
the values in div.
An interpolation of F and G at value v is a Coords H where each
coordinate Hijk is obtained from: Hijk = Fijk + v * (Gijk-Fijk).
Thus, a Coords interpolate(F,G,[0.,0.5,1.0]) will contain all points of
F and G and all points with mean coordinates between those of F and G.
As a convenience, if an integer is specified for div, it is taken as a
number of divisions for the interval [0..1].
Thus, interpolate(F,G,n) is equivalent with
interpolate(F,G,arange(0,n+1)/float(n))
The resulting Coords array has an extra axis (the first). Its shape is
(n,) + F.shape, where n is the number of divisions.
"""
if F.shape != G.shape:
raise RuntimeError,"Expected Coords objects with equal shape!"
if type(div) == int:
div = arange(div+1) / float(div)
else:
div = array(div).ravel()
return F + outer(div,G-F).reshape((-1,)+F.shape)
# Convenient shorter notations
rot = rotate
trl = translate
# Deprecated functions
from utils import deprecated
@deprecated(dsize)
def diagonal(self):
pass
##############################################################################
#
# Testing
#
# Some of the docstrings above hold test examples. They should be careflly
# crafted to test the functionality of the Formex class.
#
# Ad hoc test examples during development can be added to the test() function
# below.
#
# python formex.py
# will execute the docstring examples silently.
# python formex.py -v
# will execute the docstring examples verbosely.
# In both cases, the ad hoc tests are only run if the docstring tests
# are passed.
#
if __name__ == "__main__":
def testX(X):
"""Run some tests on Coords X."""
def prt(s,v):
"""Print a statement 's = v' and return v"""
if isinstance(v,ndarray):
sep = '\n'
else:
sep = ' '
print "%s =%s%s" % (s,sep,v)
return v
prt("###################################\nTests for Coords X",X)
# Info
prt("points",X.points())
prt("pshape",X.pshape())
prt("npoints",X.npoints())
prt("y",X.y())
prt("bbox",X.bbox())
prt("center",X.center())
prt("centroid",X.centroid())
prt("sizes",X.sizes())
prt("dsize",X.dsize())
prt("bsphere",X.bsphere())
prt("distanceFromPlane",X.distanceFromPlane([0.,0.,1.],[0.,0.,1.]))
prt("distanceFromLine",X.distanceFromLine([0.,0.,1.],[0.,0.,1.]))
prt("distanceFromPoint",X.distanceFromPoint([0.,0.,1.]))
prt("test",X.test(dir=1,min=0.5,max=1.5))
prt("test2",X.test(dir=[1.,1.,0.],min=[0.,0.5,0.],max=[0.,1.5,0.]))
# Transforms
prt("X_scl",X.scale(2,False))
prt("X",X)
prt("X_scl",X.scale(2,True))
prt("X",X)
prt("X_scl2",X.scale([0.5,1.,0.]))
prt("X_trl",X.copy().translate(0,6))
prt("X_trl2",X.translate([10.,100.,1000.]))
prt("X_rot",X.rotate(90.))
prt("X_rot2",X.rotate(90.,0))
Y=prt("X_ref",X.reflect(1,2))
print X.bbox()
print Y.bbox()
print bbox([X,Y])
return
X3 = X.copy().reflect(1,1.5).translate(1,2)
print "X =",X
print "X3 =",X3
G = Coords.concatenate([X1,X3,X2,X3])
print "X1+X3+X2+X3 =",G
print "unique:",G.unique()
Y = Coords([[[1,0,0],[0,1,0],[0,0,1]],[[2,0,0],[0,2,0],[0,0,2]]])
print Y
Y.translate([0.,100.,0.])
print Y
Y = Coords([1.0,0.0,0.0])
print Y
Y.translate([0.,100.,0.])
print Y
return
def test():
"""Run some additional examples.
This is intended for tests during development. This can be changed
at will.
"""
testX(Coords([[1,0,0],[0,1,0]]))
testX(Coords([[[0,0,0],[1,0,0]],[[0,1,0],[1,1,0]]]))
testX(Coords([1,0,0]))
testX(Coords())
return
def testweave():
code = r"""
float xm,ym,zm;
xm = ym = zm = 0.;
int i;
for(i=0;i<Na[0];i++) {
xm += A2(i,0)
ym += A2(i,1)
zm += A2(i,2)
}
xm /= Na[0];
ym /= Na[0];
zm /= Na[0];
return val = xm;
"""
f = 0
#import doctest, formex
#f,t = doctest.testmod(formex)
if f == 0:
test()
### End
|
"""
ๅฐ only_id:comp_full_name ๆพๅ
ฅ id_name_all
"""
import os
import sys
f = os.path.abspath(os.path.dirname(__file__))
ff = os.path.dirname(f)
fff = os.path.dirname(ff)
sys.path.extend([f, ff, fff])
import pymysql
import traceback
from dim.utility.tools import get_redis_db, in_redis_hash, in_redis_string
from dim.utility.info import a024, a027, etl_config, xin_config, online_config
a027_db = get_redis_db(a027)
etl = pymysql.connect(**etl_config)
etl.select_db('dimension_result')
etl_cur = etl.cursor()
def in_redis_all():
"""
ๅฐๅ
จ้idๅ
ฅๅฐredisๆป้keyไธญ๏ผๅ
จ้๏ผ
:return:
"""
all_ids = 0
for n in range(10):
sta = 0
while True:
all_sql = """select only_id, comp_full_name from comp_base_result{num} limit {sta}, 500000""".format(num=n,
sta=sta)
etl_cur.execute(all_sql)
results = etl_cur.fetchall()
if not results:
break
for result in results:
comp_id = result['only_id']
comp_full_name = result['comp_full_name']
in_redis_hash(a027_db, 'id_name_all', comp_id, comp_full_name)
in_redis_hash(a027_db, 'name_id_all', comp_full_name, comp_id)
sta += len(results)
print(sta)
all_ids += sta
print('~~~~~~~~' + str(n) + '~~~~~~~~' + str(all_ids) + '~~~~~~~~')
if __name__ == '__main__':
try:
in_redis_all()
except:
traceback.print_exc()
finally:
etl.close()
|
# coding=utf-8
def scramblies(str1, str2):
str1 = set(str1)
str2 = set(str2)
return str2.issubset(str1)
|
import cv2
import numpy as np
import Image
#img = Image.open('image032.png')
#img.save('image032.jpg')
res = cv2.imread('image002.jpg',1)
#img = cv2.resize(res,None,fx=0.25, fy=0.25, interpolation = cv2.INTER_CUBIC)
img = cv2.resize(res,(640,491))
cv2.imshow('initial',img)
#cv2.imshow('image',img)
# Convert BGR to HSV
#hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#green = img[0,:,0]
#cv2.imshow('gray',green)
#h,s,v=cv2.split(hsv)
#median = cv2.medianBlur(v,5)
#equ = cv2.equalizeHist(median)
kernel1 = np.ones((7,7),np.uint8)
kernel2 = np.ones((3,3),np.uint8)
kernel3 = np.ones((4,4),np.uint8)
b,g,r=cv2.split(img)
cv2.imshow('green',g)
closing1 = cv2.morphologyEx(g, cv2.MORPH_CLOSE, kernel1)
closing2 = cv2.morphologyEx(g, cv2.MORPH_CLOSE, kernel2)
cv2.imshow('closing1',closing1)
cv2.imshow('closing2',closing2)
###############33
greensubclosing=cv2.subtract(closing1,g)
cv2.imshow('green-closing1',greensubclosing)
###############3
# plot all the images and their histograms
#gradient_image=np.array(closing1)-np.array(closing2)
gradient_image=greensubclosing
cv2.imshow('gradient_image',gradient_image)
#gradient_image_th = cv2.adaptiveThreshold(gradient_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 5, 0)
ret,gradient_image_th = cv2.threshold(gradient_image,2,255,cv2.THRESH_BINARY)
#blur = cv2.GaussianBlur(gradient_image,(5,5),0)
#ret3,gradient_image_th = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imshow('image',gradient_image_th)
#gradient_image_th_eroded=cv2.erode(gradient_image_th,kernel3,iterations = 1)
#res = cv2.bitwise_and(gradient_image_th,gradient_image_th, mask= gradient_image)
#cv2.imshow('final',res);
#gradient_image_th_eroded_erode=cv2.erode(gradient_image_th_eroded,kernel3,iterations = 1)
#cv2.imshow('gradient_image_th_eroded',gradient_image_th_eroded)
#cv2.imshow('gradient_image_th_eroded_erode',gradient_image_th_eroded_erode)
#opening = cv2.morphologyEx(gradient_image_th_eroded, cv2.MORPH_OPEN, kernel1)
#closing = cv2.morphologyEx(gradient_image_th_eroded, cv2.MORPH_CLOSE, kernel1)
#cv2.imshow('Opening',opening)
#cv2.imshow('Closing',closing)
cv2.imshow('final',cv2.subtract(g,gradient_image_th))
#cv2.imwrite('image002_bv.jpg',cv2.subtract(g,gradient_image_th))
cv2.imwrite('bv018.jpg',gradient_image_th)
#detecting exudates
cv2.waitKey(0)
cv2.destroyAllWindows()
|
from Drop import Drop
global drop
def setup():
global x,y, drop
size(400,400)
background(0)
drop = Drop()
def draw():
global x,y,drop
background(0)
drop.move() #move down
drop.display() |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 24 14:04:13 2021
@author: chere
"""
import random
from rpi_ws281x import Color
class Train():
def __init__(self,master,position,speed,minSpeed,acc,goingUp, bounds, tailFactor,color=(255,255,255)):
self.master = master
self.position = position
self.speed = speed
self.minSpeed = minSpeed
self.acc = acc
self.goingUp = goingUp
self.bounds = bounds
self.tailFactor = tailFactor
self.color = color
self.tail = [(self.position,100)]
def update(self):
self.change_acc()
self.position += self.speed
if abs(self.speed+self.acc) >= abs(self.minSpeed) or self.minSpeed == 0:
self.speed+=self.acc
return self.updateTail()
def change_acc(self):
self.acc = self.master.getAcc(int(self.position))
def get_acc(self):
return self.acc
def get_speed(self):
return self.speed
def set_speed(self,new_speed):
self.speed = new_speed
def set_pos(self,new_position):
self.position = new_position
def getColor(self, intensity):
newColor = [0,0,0]
for k,v in enumerate(self.color):
newColor[k] = int(v*intensity/100)
return Color(newColor[0], newColor[1], newColor[2])
def getPositions(self):
return [v[0] for v in self.tail]
def getPosition(self):
return self.position
def getColors(self):
return [self.getColor(v[1]) for v in self.tail]
def updateTail(self):
newTail = []
position = int(self.position)
for v in self.tail:
#Updating previous pixels
if v[1] > 0:
newTail.append((v[0],max(0,v[1]-self.tailFactor)))
if len(newTail) > 0:
if position != newTail[-1][0]:
if ( self.speed>0 ):
newTail += [(k,newTail[-1][1]+((100-newTail[-1][1])/(position-newTail[-1][0]))*(k-newTail[-1][0])) for k in range(newTail[-1][0]+1,min(position,max(self.bounds))+1)]
else:
newTail += [(k,newTail[-1][1]+((100-newTail[-1][1])/abs(position-newTail[-1][0]))*abs(k-newTail[-1][0])) for k in range(newTail[-1][0]-1,max(position-1,min(self.bounds)-1),-1)]
else:
newTail[-1] = (position,100)
self.tail = newTail
return len(self.tail) == 0
class Rails():
def __init__(self, strip, bounds, boundsFalling, boundsRising, color = (255,255,255), initialSpeed = 0, randomSpeed = 3.5,minSpeed = 1, g = 0.1, goingUp = False, tailFactor = 30, newTrainDelay = 30, randomColor = True, randomNewTrainDelay = 1,maxTrainNb = 6):
self.strip = strip
self.bounds = bounds
self.boundsFalling = boundsFalling
self.boundsRising = boundsRising
self.color = color
self.speed = initialSpeed
self.randomSpeed = randomSpeed
self.minSpeed = minSpeed
self.g = g
self.goingUp = goingUp
self.tailFactor = tailFactor
self.newTrainDelay = newTrainDelay
self.randomColor = randomColor
self.randomNewTrainDelay = randomNewTrainDelay
self.maxTrainNb = maxTrainNb
self.trains = []
self.count = 0
self.makeNewTrain()
def new_step(self):
#cretion de nouveau trains si nรฉcaissaire
if (self.count == self.newTrainDelay):
if len(self.trains)<self.maxTrainNb:
self.count = random.randrange(-self.randomNewTrainDelay,self.randomNewTrainDelay)
self.makeNewTrain()
else:
self.count += 1
collision = [False for t in self.trains]
for k,i in enumerate(self.trains):
i_pos = i.getPosition()
i_speed = i.get_speed()
for l,j in enumerate(self.trains[k+1:]):
j_pos = j.getPosition()
j_speed = j.get_speed()
if (i_pos-j_pos)*(i_pos+i_speed-j_pos-j_speed)<=0:
collision[k] = True
collision[k+l+1]= True
new_pos = int((i_pos+i_speed+j_pos+j_speed)/2)
if i_pos < j_pos:
i.set_pos(new_pos)
j.set_pos(new_pos+1)
else :
i.set_pos(new_pos+1)
j.set_pos(new_pos)
i.updateTail()
i.set_speed(j_speed)
j.updateTail()
j.set_speed(i_speed)
#update des trains
for p,train in enumerate(self.trains):
if not collision[p]:
if train.update():
self.trains.remove(train)
col = train.getColors()
for k,v in enumerate(train.getPositions()):
self.strip.setPixelColor(v,col[k])
def getAcc(self, position):
for i in self.boundsFalling:
if i[0] <= position <= i[1]:
if self.goingUp:
return self.g
else:
return - self.g
for i in self.boundsRising:
if i[0] <= position <= i[1]:
if self.goingUp:
return - self.g
else:
return self.g
return 0
def makeNewTrain(self):
if self.goingUp:
position = min(self.bounds)
speed = self.speed+random.random()*self.randomSpeed
else:
position = max(self.bounds)
speed = -self.speed-random.random()*self.randomSpeed
if self.randomColor:
newTrainColor = self.wheel(random.randint(0,15)*51)
else:
newTrainColor = self.color
self.trains.append(Train(self, position, speed, self.minSpeed, self.getAcc(position) , self.goingUp, self.bounds , self.tailFactor, newTrainColor))
def wheel(self,value):
value = value % (3*255)
if value <= 255:
r = 255 - value
g = value
b = 0
elif value <= 2*255:
r = 0
g = 2*255-value
b = value-255
else:
r = value-2*255
g = 0
b = 3*255-value
return (r,g,b) |
import math
class BasePlayer:
def __init__(self, maxDepth):
self.maxDepth = maxDepth
##################
# TODO #
##################
# Assign integer scores to the three terminal states
# P2_WIN_SCORE < TIE_SCORE < P1_WIN_SCORE
# Access these with "self.TIE_SCORE", etc.
P1_WIN_SCORE = 7800
P2_WIN_SCORE = -7800
TIE_SCORE = 0
# Returns a heuristic for the board position
# Good positions for 0 pieces should be positive and
# good positions for 1 pieces should be negative
# for all boards, P2_WIN_SCORE < heuristic(b) < P1_WIN_SCORE
def myHeuristic(self, board):
h_score = 0
for i in range(board.WIDTH):
for j in range(board.HEIGHT):
try:
if board.board[i][j] == 0 and board.board[i+1][j] == 0:
h_score += 10
if board.board[i][j] == 0 and board.board[i+1][j] == 0 and board.board[i+2][j] == 0:
h_score += 100
if board.board[i][j] == 0 and board.board[i+2][j] == 0 and board.board[i+3][j] == 0:
h_score += 100
if board.board[i][j] == 0 and board.board[i+1][j] == 0 and board.board[i+3][j] == 0:
h_score += 100
except IndexError:
pass
try:
if board.board[i][j] == 1 and board.board[i+1][j] == 1:
h_score -= 10
if board.board[i][j] == 1 and board.board[i+1][j] == 1 and board.board[i+2][j] == 1:
h_score -= 100
if board.board[i][j] == 1 and board.board[i+2][j] == 1 and board.board[i+3][j] == 1:
h_score -= 100
if board.board[i][j] == 1 and board.board[i+1][j] == 1 and board.board[i+3][j] == 1:
h_score -= 100
except IndexError:
pass
try:
if board.board[i][j] == 0 and board.board[i][j+1] == 0:
h_score += 10
if board.board[i][j] == 0 and board.board[i][j+1] == 0 and board.board[i][j+2] == 0:
h_score += 100
if board.board[i][j] == 0 and board.board[i][j+1] == 0 and board.board[i][j+3] == 0:
h_score += 100
if board.board[i][j] == 0 and board.board[i][j+1] == 0 and board.board[i][j+3] == 0:
h_score += 100
except IndexError:
pass
try:
if board.board[i][j] == 1 and board.board[i][j+1] == 1:
h_score -= 10
if board.board[i][j] == 1 and board.board[i][j+1] == 1 and board.board[i][j+2] == 0:
h_score -= 100
if board.board[i][j] == 1 and board.board[i][j+1] == 1 and board.board[i][j+3] == 0:
h_score -= 100
if board.board[i][j] == 1 and board.board[i][j+1] == 1 and board.board[i][j+3] == 0:
h_score -= 100
except IndexError:
pass
try:
if (j + 3) <= board.HEIGHT:
if board.board[i][j] == 0 and board.board[i+1][j+1] == 0:
h_score += 10
if board.board[i][j] == 0 and board.board[i+1][j+1] == 0 and board.board[i+2][j+2] == 0:
h_score += 100
if board.board[i][j] == 0 and board.board[i+2][j+2] == 0 and board.board[i+3][j+3] == 0:
h_score += 100
if board.board[i][j] == 0 and board.board[i+1][j+1] == 0 and board.board[i+3][j+3] == 0:
h_score += 100
except IndexError:
pass
try:
if (j + 3) <= board.HEIGHT:
if board.board[i][j] == 1 and board.board[i+1][j+1] == 1:
h_score -= 10
if board.board[i][j] == 1 and board.board[i+1][j+1] == 1 and board.board[i+2][j+2] == 1:
h_score -= 100
if board.board[i][j] == 1 and board.board[i+2][j+2] == 1 and board.board[i+3][j+3] == 1:
h_score -= 100
if board.board[i][j] == 1 and board.board[i+1][j+1] == 1 and board.board[i+3][j+3] == 1:
h_score -= 100
except IndexError:
pass
try:
if(j - 3) >= 0:
if board.board[i][j] == 0 and board.board[i+1][j-1] == 0:
h_score += 10
if board.board[i][j] == 0 and board.board[i+1][j-1] == 0 and board.board[i+2][j-2] == 0:
h_score += 100
if board.board[i][j] == 0 and board.board[i+2][j-2] == 0 and board.board[i+3][j-3] == 0:
h_score += 100
if board.board[i][j] == 0 and board.board[i+1][j-1] == 0 and board.board[i+3][j-3] == 0:
h_score += 100
except IndexError:
pass
try:
if(j - 3) >= 0:
if board.board[i][j] == 1 and board.board[i+1][j-1] == 1:
h_score -= 10
if board.board[i][j] == 1 and board.board[i+1][j-1] == 1 and board.board[i+2][j-2] == 1:
h_score -= 100
if board.board[i][j] == 1 and board.board[i+2][j-2] == 1 and board.board[i+3][j-3] == 1:
h_score -= 100
if board.board[i][j] == 1 and board.board[i+1][j-1] == 1 and board.board[i+3][j-3] == 1:
h_score -= 100
except IndexError:
pass
return h_score
class ManualPlayer(BasePlayer):
def __init__(self, maxDepth=None):
BasePlayer.__init__(self, maxDepth)
def findMove(self, board):
opts = " "
for c in range(board.WIDTH):
opts += " "+(str(c+1) if len(board.board[c]) < board.HEIGHT else ' ')+" "
print(opts)
while True:
col = input("Place a "+('O' if board.turn == 0 else 'X')+" in column: ")
try: col = int(col) - 1
except ValueError: continue
if col >= 0 and col < board.WIDTH and len(board.board[col]) < board.HEIGHT:
return col
class PlayerMM(BasePlayer):
##################
# TODO #
##################
# performs minimax on board with depth.
# returns the best move and best score as a tuple
def minimax(self, board, depth):
Best_Move = None
Best_Score = None
if board.isEnd() == 0:
return None, self.P1_WIN_SCORE
if board.isEnd() == 1:
return None, self.P2_WIN_SCORE
if board.isEnd() == -1:
return None, self.TIE_SCORE
if not board.isEnd() and depth == 0:
return None,self.myHeuristic(board)
if board.turn == 0:
Best_Score = -math.inf
moves = board.getAllValidMoves()
for move in moves:
score = self.minimax(board.getChild(move),depth-1)[1]
if score != None:
if score > Best_Score:
Best_Score = score
Best_Move = move
return Best_Move,Best_Score
if board.turn == 1:
Best_Score = math.inf
moves = board.getAllValidMoves()
for move in moves:
score = self.minimax(board.getChild(move),depth-1)[1]
if score != None:
if score < Best_Score:
Best_Score = score
Best_Move = move
return Best_Move, Best_Score
def findMove(self, board):
move, score = self.minimax(board, self.maxDepth)
return move
class PlayerAB(BasePlayer):
##################
# TODO #
##################
# performs minimax with alpha-beta pruning on board with depth.
# alpha represents the score of max's current strategy
# beta represents the score of min's current strategy
# in a cutoff situation, return the score that resulted in the cutoff
# returns the best move and best score as a tuple
def alphaBeta(self, alpha, beta, board, depth):
Best_Move = None
if board.isEnd() == 0:
return None, self.P1_WIN_SCORE
if board.isEnd() == 1:
return None, self.P2_WIN_SCORE
if board.isEnd() == -1:
return None, self.TIE_SCORE
if not board.isEnd() and depth == 0:
return None,self.myHeuristic(board)
if board.turn == 0:
Best_Score = -math.inf
moves = board.getAllValidMoves()
for move in moves:
score = self.alphaBeta(alpha, beta, board.getChild(move), depth-1)[1]
if score != None:
if score > Best_Score:
Best_Score = score
Best_Move = move
if score > alpha:
alpha = score
if alpha >= beta:
return None, score
if board.turn == 1:
Best_Score = math.inf
moves = board.getAllValidMoves()
for move in moves:
score = self.alphaBeta(alpha, beta, board.getChild(move), depth-1)[1]
if score != None:
if score < Best_Score:
Best_Score = score
Best_Move = move
if beta > score:
beta = score
if alpha >= beta:
return None, score
return Best_Move, Best_Score
def findMove(self, board):
move, score = self.alphaBeta(-math.inf, math.inf, board, self.maxDepth)
return move
class PlayerDP(PlayerAB):
''' A version of PlayerAB that implements dynamic programming
to cache values for its heuristic function, improving performance. '''
def __init__(self, maxDepth):
PlayerAB.__init__(self, maxDepth)
self.resolved = {}
##################
# TODO #
##################
# if a saved heuristic value exists in self.resolved for board.state, returns that value
# otherwise, uses BasePlayer.myHeuristic to get a heuristic value and saves it under board.state
def myHeuristic(self, board):
if board.state in self.resolved:
return self.resolved[board.state]
else:
score = super().myHeuristic(board)
self.resolved[board.state] = score
return score
#######################################################
###########Example Subclass for Testing
#######################################################
# This will inherit your findMove from above, but will override the heuristic function with
# a new one; you can swap out the type of player by changing X in "class TestPlayer(X):"
class TestPlayer(BasePlayer):
# define your new heuristic here
def myHeurisitic(self):
pass
|
import django_filters
from trips import models
class TripFilter(django_filters.FilterSet):
class Meta:
model = models.Trip
fields = ['destination']
|
from flask import Blueprint, flash
coaction = Blueprint("coaction", __name__, static_folder="./static")
@coaction.route("/")
def index():
return coaction.send_static_file("index.html")
## Add your API views here |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 22 13:38:44 2019
@author: Fabiana
"""
#####################################
# CHAPTER 17 - DEPLOYING YOUR CODE
#####################################
# Go to this link to check the application online: https://stark-shore-59149.herokuapp.com/ |
# We will create a query "hello" that returns string "world"
import graphene
import json
# Create graphen root query class with subclass graphene.
class Query(graphene.ObjectType):
# First argument is hello of type string
hello = graphene.String()
# Is the new user admin?
is_admin = graphene.Boolean()
# How to obtain the string world? using resolvers, with snake_case notation
def resolve_hello(self, info):
return 'world'
def resolve_is_admin(self, info):
return True
# Create the schema based on the query type
schema = graphene.Schema(query = Query)
# Execute the query
result = schema.execute(
# Pass the graphql object as parameter
'''
{
hello
}
'''
)
# Print the results
print(result.data.items())
# odict_items([('hello', 'world')])
print(result.data['hello'])
# world -> returns just the ouput
# Print result in json format.
# First store a dictionary
dictResult = dict(result.data.items())
print(json.dumps(dictResult, indent = 2))
# indent = 2 makes more readable the output
# Query admin values
result = schema.execute(
# Pass the graphql object as parameter
# IMPORTANTE: executes must be passed in camelCase notation
'''
{
isAdmin
}
'''
)
print(result.data.items())
# Disable camel case
schema = graphene.Schema(query = Query, auto_camelcase = False)
result = schema.execute(
'''
{
is_admin
}
'''
)
print('Same as before: ', result.data.items())
|
def mul2(x):
return x * 2
def map_yield(func, arr):
it = iter(arr)
for i in it:
yield func(i)
def test_map_rek():
assert map_yield(mul2, (1, 2, 3)) == [2, 4, 6]
assert map_yield(str, (1, 2, 3)) == ["1", "2", "3"]
print "Test map_rek passed OK!"
def is_positive(num):
return num > 0
def my_filter(func, arr):
it = iter(arr)
for i in it:
if func(i):
yield i
def test_filter_yield():
gen = my_filter(is_positive, [-1, 5, 6, 0, -3, 4])
for i in gen:
print i
def mul(a, b):
return a * b
def my_reduse(func, arr):
it = iter(arr)
elem = it.next()
for i in it:
elem = func(elem, i)
yield elem
def test_reduse_yield():
gen = my_reduse(mul, my_filter(is_positive, [-1, 5, 6, 0, -3, 4]))
for i in gen:
print i
def get_line(fd):
string = ""
while True:
strT = fd.read(1)
if strT == "":
break
if strT == "\n":
yield string
string = ""
continue
string += strT
def test_get_line():
with open("test.txt", "w") as input_file:
input_file.write("put 1\nput 3\nadd\nprint\n")
with open("test.txt", "r") as input_file:
gen = get_line(input_file)
for str1 in gen:
print str1
def strip_spaces(strings):
it = iter(strings)
for i in it:
yield i.strip(' ')
def test_strip_spaces():
with open("test.txt", "w") as input_file:
input_file.write(" put 1 \nput 3 \nadd\n print\n")
with open("test.txt", "r") as input_file:
gen = get_line(input_file)
gen_strip = strip_spaces(gen)
for i in gen_strip:
print i
def drop_empty(strings):
it = iter(strings)
for i in it:
if i != "":
yield i
def test_drop_empty():
with open("test.txt", "w") as input_file:
input_file.write(" put 1 \nput 3 \n\n print\n")
with open("test.txt", "r") as input_file:
gen = get_line(input_file)
gen_drop = drop_empty(gen)
for i in gen_drop:
print i
def split_items(strings):
it = iter(strings)
for i in it:
for j in i.split():
yield j
def test_split_items():
with open("test.txt", "w") as input_file:
input_file.write(" put 1 \nput 3 \n\n print\n1.5632\n")
with open("test.txt", "r") as input_file:
gen = get_line(input_file)
gen_split = split_items(gen)
for i in gen_split:
print i
def get_ints(strings):
it = iter(strings)
for i in it:
x = 0
try:
x = int(i)
except ValueError:
continue
yield x
def test_get_ints():
with open("test.txt", "w") as input_file:
input_file.write(" put 1 \nput 3 \n\n print\n10\n27\n")
with open("test.txt", "r") as input_file:
gen = get_line(input_file)
gen_ints = get_ints(gen)
for i in gen_ints:
print i
def my_sum(strings):
it = iter(strings)
elem = it.next()
for i in it:
elem += i
yield elem
def test_my_sum():
with open("test.txt", "w") as input_file:
input_file.write(" put 1 \n3\n\n print\n10\n27\n")
with open("test.txt", "r") as input_file:
gen = get_line(input_file)
gen_ints = get_ints(gen)
gen_sum = my_sum(gen_ints)
for i in gen_sum:
print i
def main():
test_map_rek()
test_filter_yield()
test_reduse_yield()
test_get_line()
test_strip_spaces()
test_drop_empty()
test_split_items()
test_get_ints()
test_my_sum()
if __name__ == "__main__":
exit(main())
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from astropy import constants as const
from scipy import optimize
import math
# In[2]:
############################Constants##########################
G = 4.299E-9 #Gravitational constant Mpc Msol**-1 (km/s)**2
H0 = 100 #Today's Hubble constant km/s/Mpc
Myear = 1E6
a_year_in_seconds = math.pi * 1E7 #units of seconds
speed_of_light = 3E10 #cm/s
Msolar = 1.99E33 #units of grams
# In[3]:
#############Bolshoi-Planck Cosmological paramters##############
Om_mat_cero = 0.307
Om_lambda_cero = 0.693
Om_baryons = 0.048
sigma8 = 0.829
h = 0.678
# In[64]:
# In[65]:
#UNITS OF DZ ARE MYRS
#AGE OF UNIVERSE ARE GYEARS
# In[66]:
#DtDz(Om_mat_cero,Om_lambda_cero,h,1)
# In[67]:
def SHMR(z, Mvirz):
#Rodriguez-Puebla 2017
#star to halo mass relation
#Section 3.2 Parametrization of the SHMR (stellar to mass relation halo) Rodriguez-Puebla 2017
#input: redshift, intial virial mass (halo)
#units: Mvirz in solar mass
#output; stellar mass for the given halo mass
#units of solar mass
M = Mvirz
def P(x, y, z):
return y*z - x*z/(1+z)
def Q(z):
return np.exp(-4/(1.+z)**2)
def g(x, a, g, d):
return (-np.log10(10**(-a*x)+1.) +
d*(np.log10(1.+np.exp(x)))**g/(1.+np.exp(10**(-x))))
al = (1.975, 0.714, 0.042)
de = (3.390, -0.472, -0.931)
ga = (0.498, -0.157)
ep = (-1.758, 0.110, -0.061, -0.023)
M0 = (11.548, -1.297, -0.026)
#Section 5 Rodriguez-Puebla 2017
#Constrains for the model. Madau & Dickinson (2014)
alpha = al[0] + P(al[1], al[2], z) * Q(z)
delta = de[0] + P(de[1], de[2], z) * Q(z)
gamma = ga[0] + P(ga[1], 0, z) * Q(z)
eps = 10**(ep[0] + P(ep[1], ep[2], z)*Q(z) + P(ep[3], 0, z))
M1 = 10**(M0[0] + P(M0[1], M0[2], z)*Q(z))
x = np.log10(M/M1)
g1 = g(x, alpha, gamma, delta)
g0 = g(0, alpha, gamma, delta)
Ms = 10**(np.log10(eps*M1) + g1 - g0)
return Ms
# In[68]:
######Codigo Aldo tiene dos funciones separadas para la funcion que yo tengo arriba
# In[69]:
#def halo_mass_assembly(Mvir0, z0, redshift):
#Halo mass growth at any z fo a progenitor mass Mvir0 at z0
#Apendix B2 Rodriguez-Puebla 2017
#taken from Rodriguez-Puebla 2016a, Behroozi 2013b
#Halo-mass assembly graph
#inputs: Initial virial mass, redshift(0) initial time, redshift array
#units: solar mass
#output: halo mass growth
#units: solar mass
#z = redshift
#def M13(z):
#return (10**13.6) * (1+z)**2.755 * ((1+(z/2))**-6.351) * np.exp(-0.413*z)
#def aexp0(Mvir0):
# return 0.592 - np.log10(((10**15.7)/Mvir0)**0.113 + 1)
#def g(Mvir0, aexp):
# return 1. + np.exp(-3.676*(aexp-aexp0(Mvir0)))
#def f(Mvir0, z):
# aexp = 1. / (1+z)
# return np.log10((Mvir0)/M13(0.)) * (g(Mvir0, 1.)/g(Mvir0, aexp))
#def Mvir(Mvir0, z):
# return M13(z) * 10**(f(Mvir0, z))
#Mvir_z = Mvir(Mvir0, z-z0)
#return Mvir_z
# In[366]:
def halo_mass_assembly(Mvir0, z0, redshift):
#Halo mass growth at any z fo a progenitor mass Mvir0 at z0
#Improve fit to ecs 18-22 from RP16 Bolshoi-Planck paper
#Halo-mass assembly graph
#inputs: Initial virial mass, redshift(0) initial time, redshift array
#units: solar mass
#output: halo mass growth
#units: solar mass
z= redshift
def a0M0(Mvir0):
X = 26.6272 - Mvir0
return 1.37132 - np.log10( 10**( 0.077364 * X) + 1.)
def gM0(Mvir0, a_scale):
return 1. + np.exp( -3.79656 * ( a_scale - a0M0(Mvir0) ) )
def M13(z):
log10Mh013 = 13.
alpha = 2.77292
beta = -5.66267
gamma = -0.469065
return log10Mh013 + alpha * np.log10( 1. + z ) + beta * np.log10( 1. + 0.5 * z ) + gamma * z * np.log10( np.exp(1.) )
def fM0z(Mvir, z):
a_scale = 1. / (1. +z)
return (Mvir0 - M13(0.)) * gM0(Mvir0, 1.) / gM0(Mvir0, a_scale)
Mvir_z = np.log(Mvir0) + np.log(h)
return M13(z-z0) + fM0z(Mvir_z, z - z0) - np.log(h)
# In[367]:
###NUEVA
def f_int(Mvir, z):
#Instantaneous fraction of stellar mass from Mergers
#all type of mergers Ec; 34-36 RP17
a_scale = 1. / ( 1. + z )
beta_merger = 0.760 + np.log10( 1. + z )
logM_merge = 12.728 - 2.790 * ( a_scale - 1. )
frac_merge = 10**( beta_merger * ( log10Mvir - logM_merge ) )
frac_merge = 1. / ( frac_merge + 1. )
return frac_merge
# In[368]:
# The rate at which dark matter haloes grow will determine the rate at which the cosmological baryonic
# inflow material reaches the ISM of a galaxy. Eventually, when necessary conditions are satisfied, some of
# this cosmological baryonic material will be transformed into stars.
# As described in Section 2.2, we use the growth of dark matter haloes to predict the SFHs of galaxies
# without modelling how the cold gas in the galaxy is converted into stars.
# In[369]:
def galaxy_mass_assembly(Mvir0, z0, z):
#stellar mass growth at any z for a progenitor halo mass Mvir0 @ z0
#Halo growth and stellar relationship, galaxy mass evolution
#inputs: Initial virial mass, redshift(0), redshift array
#units solar mass
#output: stellar mass growth within halo
#units: solar mass
Mvirz = halo_mass_assembly(Mvir0, z0, z)
Ms_z = SHMR(z, Mvirz)
return Ms_z
# In[370]:
#print(AgeUniverse(Om_mat_cero, Om_lambda_cero, h, 2))
##########Cosmology#######
def AgeUniverse(Om_mat_cero, Om_lambda_cero, h, z):
#output units of gyears
#Hubble constant is H0 * h
T_Hubble = 1.02E-12
one_plus_z = 1. + z
Olz = Om_lambda_cero *one_plus_z**(-3)
T1 = 2. / np.sqrt( Om_lambda_cero ) / 3. / H0 / T_Hubble / h
T2 = np.sqrt(Olz)
T3 = np.sqrt(Olz + Om_mat_cero)
TH = T1 * np.log( ( T2 + T3 ) / np.sqrt(Om_mat_cero) )
return TH / 1E9
def DtDz(Om_mat_cero,Om_lambda_cer0, h, z): #dT_age / dz
dz = 0.01
dt = AgeUniverse(Om_mat_cero,Om_lambda_cero,h,z) - AgeUniverse(Om_mat_cero,Om_lambda_cero,h,dz+z)
return dt/dz
def DZ(z,DT): #calculates the redshift z+dz given an interval of time DT and z. DT input is in Myrs
def Delta_T(z_dz,DT,z):
DT_age = AgeUniverse(Om_mat_cero,Om_lambda_cero,h,z) - AgeUniverse(Om_mat_cero,Om_lambda_cero,h,z_dz)
return DT - DT_age / 1E6
z_f = optimize.bisect(Delta_T, 0, 1000, args=(DT,z))
return z_f
# In[371]:
def dMsdz(Mvir0, z0, redshift):
#Stellar mass formation rate
#Derivative of Galaxy mass assembly with respect to Age of Universe
#input initial virial mass in solar mass
#output units of solar masses per year
#if you are using a diff cosmology be sure to change the constants below
z = np.array(redshift)
zi = z
zf = z - 0.01
mi = galaxy_mass_assembly(Mvir0, z0, zi)
mf = galaxy_mass_assembly(Mvir0, z0, zf)
delm = mi - mf
delz = zi - zf
Ti = AgeUniverse(Om_mat_cero, Om_lambda_cero, h, zi)
Tf = AgeUniverse(Om_mat_cero, Om_lambda_cero, h, zf)
delT = Ti - Tf
return delm / delT / 1E9
# In[372]:
def R_stell_frac(Time):
#Given by Aldo
#fraction Stellar mass loss that comes back to ISM in form of gas
C0 = 0.05
lam = 1.46E5 #what is this number and what are the units
time = Time * 1E9
return C0*np.log(time/lam + 1.) #untiless, just a fraction
# In[373]:
def SFR(Mvir0, z0, redshift):
#Stellar mass formation rate corrected, stellar mass loss fraction included
#Given by Aldo
#input initial virial mass in solar mass
#output units of solar masses per year
TU = AgeUniverse(Om_mat_cero, Om_lambda_cero, h, redshift)
#gotta change the values if I change the cosmology
sfr_gal = dMsdz(Mvir0, z0, redshift) / (1. - R_stell_frac(TU))
return sfr_gal
# In[374]:
#########NUEVA
#Star formation histories Ec 14 from RP17
def SFR_hist(SHMR, Mvir0, z0, z):
time_burst = 100
z_dz = DZ(z, time_burst)
log10Mvir = halo_mass_assembly(Mvir0, z0, redshift)
log10Ms = galaxy_mass_assembly(Mvir0, z0, z)
log10Ms_t100 = galaxy_mass_assembly(Mvir0, z0, z_dz)
Dt = time_burst *Myear
DMs = pow(10, Ms) - pow(10, Ms_t100)
sfr = DMs/Dt
if DMs<=0:
sfr=0;
return sfr * f_int(Mvir,z) / (1. - R_stell_frac(AgeUniverse(Om_mat_cero, Om_lambda_cero, h, z)))
# In[375]:
def Vmax(Mvir,z):
#max value in DMhalo rotation curve
#First step to introduce SN feedback
#Rodriguez-Puebla 2016
#Equations 4-7
#input initial virial mass in solar mass
#units out: km/s
def alpha(z):
aexp = 1. / (1+z)
return 0.346 - 0.059*aexp + 0.025*aexp**2
def beta(z):
aexp = 1. / (1+z)
return 10**(2.209 + 0.060*aexp - 0.021*aexp**2)
def E(z):
#gotta change the values if I change the cosmology
return np.sqrt(Om_lambda_cero + Om_mat_cero*(1 + z)**3)
def V(Mvir, z):
z = np.array(redshift)
M12 = Mvir / 1E12
return beta(z) * ( M12 * E(z) )**alpha(z)
return V(Mvir,z)
# In[376]:
def Vmax_assembly(Mvir0, z0, z):
#Peak of the Halo rotation curve with halo mass growth
#units input: solar mass
#units outut: km/s
Mvirz = halo_mass_assembly(Mvir0, z0, z)
vmax_z = Vmax(Mvirz, z)
return vmax_z
# In[377]:
def SNE_feedback(Mvir0, z0, z):
#Given by Aldo
#input unitls of solar mass
#unitless, just a fraction
EK = 0.5 * Vmax_assembly(Mvir0, z0, z)**2
#kitenic energy of the halo
ESN = 10**7.701147
#units of solar mass km^2s^-2
epsilon_SN = 0.05
#fraction of the SN energy explosion transformed into kinetic Energy, Page 403 Mo d et al. Book
N_SN = 8.0E-3
#one supernova per 125 Msol: units solar mass^{-1}
# E_SFR = SFR(Mvir0, z0, redshift) * ESN * epsilon_SN * N_SN
E_SFR = ESN * epsilon_SN * N_SN
#units of km^2s^-2
z = np.array(redshift)
return E_SFR / EK
# In[378]:
def v_disp(Mvir,z):
#First step to introduce SMBH
#relationship between velocity dispersion and SMBH due to its potential
#velocity dispersion of DM halo
#input units of solar mass
#output untis of km/s
vmax_x = Vmax(Mvir,z)
return vmax_x / 3**0.5
# In[379]:
def v_disp_assembly(Mvir0,z0,z):
#Velocity dispersion of halo with mass halo growth
#input units of solar mass
#output units km/s
Mvirz = halo_mass_assembly(Mvir0, z0, z)
vdisp_z = v_disp(Mvirz, z)
z = np.array(redshift)
return vdisp_z
# In[380]:
def M_BH(Mvir0, z0, z):
#Black Hole mass relationship from velocity disperion sigma
#Woo, Jong-Hak (2013)
#units of solar mass both input and output
sigma = v_disp_assembly(Mvir0,z0,z)
#velocity dispersion, units: km/s
alpha = 8.37
beta = 5.31
logM_BH = alpha + beta * np.log10(sigma/200)
#unitless
M_BH = 10**logM_BH
return M_BH
# In[381]:
def dM_BHdz(Mvir0, z0, redshift):
#Black Hole mass growth from velocity dispersion sigma
#input units of solar mass
#output units of solar mass per year
z = np.array(redshift)
zi = z
zf = z - 0.01
mi = M_BH(Mvir0, z0, zi)
mf = M_BH(Mvir0, z0, zf)
delm = mi - mf
delz = zi - zf
Ti = AgeUniverse(Om_mat_cero, Om_lambda_cero, h, zi)
Tf = AgeUniverse(Om_mat_cero, Om_lambda_cero, h, zf)
delT = Ti - Tf
return delm / delT / 1E9
# In[382]:
def M_bh_from_MS(Mvir0, z0, z, fudge):
#Blackhole mass aproximation from stellar mass (approx 100 times less than stellar mass)
#Kormendy & Ho del 2013
#units of solar mass both input and output
Ms = galaxy_mass_assembly(Mvir0, z0, z)
Mbh = Ms/fudge #fudge can be 1e2 or 1e3
return Mbh
# In[383]:
def dM_bh_from_MS_dt(Mvir0, z0, z, fudge):
#Black hole mass growth rate from stellar mass approximation
#fudge can be 1e2 or 1e3
#output units of solar mass per year
z = np.array(redshift)
zi = z
zf = z - 0.01
mi = M_bh_from_MS(Mvir0, z0, zi, fudge)
mf = M_bh_from_MS(Mvir0, z0, zf, fudge)
delm = mi - mf
delz = zi - zf
Ti = AgeUniverse(Om_mat_cero, Om_lambda_cero, h, zi)
Tf = AgeUniverse(Om_mat_cero, Om_lambda_cero, h, zf)
delT = Ti - Tf
return delm / delT / 1E9
# In[384]:
####################I added these routines ##########################################
def fQ(Ms,z):
#Fraction of quenched galaxies as a function of stellar mass
#Equation 44 from Rodriguez-Puebla et al. 2017.
#input: Solar Mass
#output: uniteless
ratio = np.log10(Ms) - (10.2 + 0.6 * z)
ratio = 10**(-1.3 * ratio)
ratio = 1. + ratio
return 1. / ratio
# In[385]:
def fSF(Ms,z):
#Fraction of star-forming galaxies as a function of stellar mass
#FQ+FSF = 1.
return 1. - fQ(Ms,z)
# In[386]:
def Mbh_Ms_SF(Ms,z):
#Blackhole mass - stellar mass relation for star-forming galaxies
#Reines & Volonteri 2015.
#input: Solar Mass
#output: Solar Mass
Mbh = 7.45 + 1.05 * ( np.log10(Ms) - 11.)
Mbh = 10**Mbh
return Mbh
###why there's input for z if we're not using it
# In[387]:
def Mbh_Ms_Q(Ms,z):
#Blackhole mass - stellar mass relation for quenched galaxies
#Reines & Volonteri 2015.
#input: Solar Mass
#output: Solar Mass
Mbh = 8.95 + 1.40 * ( np.log10(Ms) - 11.)
Mbh = 10**Mbh
return Mbh
#find equation number and look for z
# In[388]:
def M_bh_from_Ms_using_fraction_of_SF_and_quenched(Mvir0, z0, z):
#Average Blackhole mass - stellar mass relation. Takes into account SFing and quenched galaxies.
#We are assuming lognormal distributions for both SFing and quenched galaxies.
#units of solar mass both input and output
Ms = galaxy_mass_assembly(Mvir0, z0, z)
Mbh = fQ(Ms,z) * np.log10(Mbh_Ms_Q(Ms,z)) + fSF(Ms,z) * np.log10(Mbh_Ms_SF(Ms,z))
Mbh = 10**Mbh
return Mbh
# In[389]:
def dM_bh_dt_from_Ms_using_fraction_of_SF_and_quenched(Mvir0, z0, redshift):
#Black hole mass growth rate from the average Blackhole mass - stellar mass relation
#output units of solar mass per year
z = np.array(redshift)
zi = z
zf = z - 0.01
mi = M_bh_from_Ms_using_fraction_of_SF_and_quenched(Mvir0, z0, zi)
mf = M_bh_from_Ms_using_fraction_of_SF_and_quenched(Mvir0, z0, zf)
delm = mi - mf
delz = zi - zf
Ti = AgeUniverse(Om_mat_cero, Om_lambda_cero, h, zi)
Tf = AgeUniverse(Om_mat_cero, Om_lambda_cero, h, zf)
delT = Ti - Tf
return delm / delT / 1E9
# In[390]:
def Lum_quasar_using_fraction_of_SF_and_quenched(Mvir0, z0, z):
#Bolometric luminosoty of quasar given my SMBH accreting gas
#Mbh/dt is in units are in solar mass per yr, and I need it g per sec
dMdt = dM_bh_dt_from_Ms_using_fraction_of_SF_and_quenched(Mvir0, z0, z) * Msolar / a_year_in_seconds
eps_acc = 0.1
Lqso = ((eps_acc * speed_of_light**2) / (1- eps_acc)) * dMdt
#return Luminosity in units of erg*s^-1
#erg = g*cm^2/s^2
return Lqso
###################################################################################
# In[391]:
def Lum_quasar(Mvir0, z0, z, fudge):
#Bolometric luminosoty of quasar given my SMBH accreting gas
#Mbh/dt is in units are in solar mass per yr, and I need it g per sec
#fudge can be 1e2 or 1e3
dMdt = dM_bh_from_MS_dt(Mvir0, z0, z, fudge) * Msolar / a_year_in_seconds
eps_acc = 0.1
Lqso = ((eps_acc * speed_of_light**2) / (1- eps_acc)) * dMdt
#return Luminosity in units of erg*s^-1
#erg = g*cm^2/s^2
return Lqso
# In[392]:
def Lum_eddigton(Mvir0, z0, z, fudge):
#units of ergs
g = const.G.value * (100**3) /1000 #m^3/kgs^2 to cm^3/gs^2
mp = const.m_p.value * 1000 #kg to grams
c = const.c.value *100 #m to cm
solar = 1.99E33 #units of grams
sig = const.sigma_T.value *100**2 #m^2 to cm^2
num = 4*np.pi* g * mp * c
den = sig
Mbh = M_bh_from_MS(Mvir0, z0, z, fudge) #* solar #I commented this multiplication
#check that num/den = 1.26E38
#originally you code retunr: (num*Mbh) / den
#but I simplified to 1.26E38 * Mbh
return 1.26E38 * Mbh
# In[393]:
def Lum_quasar_sigma(Mvir0, z0, z):
#Bolometric luminosoty of quasar given by SMBH accreting gas
#Mbh/dt is in units are in solar mass per yr, and I need it g per sec
#fudge can be 1e2 or 1e3
#input units of solar mass
#output units of ergs per second
dMdt = dM_BHdz(Mvir0, z0, redshift) * Msolar / a_year_in_seconds
eps_acc = 0.1
Lqso = ((eps_acc * speed_of_light**2) / (1- eps_acc)) * dMdt
#return Luminosity in units of erg*s^-1
#erg = g*cm^2/s^2
return Lqso
# In[394]:
def AGN_feedback(Mvir0, z0, z, fudge):
#feedback due to AGN
#Calculates energy from quasar and then it's divived by the kinetic
#of the halo
#takes in units of ergs and it's divided...? are units right?
eta = 0.008 #(croton+)
KE = 0.5 * Vmax_assembly(Mvir0, z0, z)**2 #missing a mass, which one, and it should be cm s^-1
E_qso = eta * Lum_quasar(Mvir0, z0, z, fudge)
return E_qso / KE
# In[405]:
z0 = 0
redshift = np.linspace (z0, 10, 100)
Mvir0 = (1e11, 1e12, 1e13, 1e14, 1e15)
Mstar = np.linspace(1e10, 1e12)
Mhalo = np.logspace(10, 15, 100)
Ms = SHMR(1, Mhalo)
plt.title('Star-to-halo relation')
plt.plot(np.log10(Mhalo), np.log10(Ms), '-k')
plt.xlabel('log Mhalo ($M_\odot$)')
plt.ylabel('log Mstar ($M_\odot$)')
plt.show()
# In[407]:
Mvirz = halo_mass_assembly(Mvir0[0], z0, redshift)
Mvirz2 = halo_mass_assembly(Mvir0[1], z0, redshift)
Mvirz3 = halo_mass_assembly(Mvir0[2], z0, redshift)
Mvirz4 = halo_mass_assembly(Mvir0[3], z0, redshift)
Mvirz5 = halo_mass_assembly(Mvir0[4], z0, redshift)
plt.title('Halo mass assembly of Rodriguez-Puebla et al. 2017')
plt.plot(np.log10(1+redshift), np.log10(Mvirz), color='black', label='mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(Mvirz2), color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(Mvirz3), color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(Mvirz4), color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(Mvirz5), color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('log Mvir ($M_\odot$)')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[398]:
Ms_z = galaxy_mass_assembly(Mvir0[0], z0, redshift)
Ms_z2 = galaxy_mass_assembly(Mvir0[1], z0, redshift)
Ms_z3 = galaxy_mass_assembly(Mvir0[2], z0, redshift)
Ms_z4 = galaxy_mass_assembly(Mvir0[3], z0, redshift)
Ms_z5 = galaxy_mass_assembly(Mvir0[4], z0, redshift)
plt.title('Galaxy mass assembly')
plt.plot(np.log10(1+redshift), np.log10(Ms_z), color='black', label='mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(Ms_z2), color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(Ms_z3), color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(Ms_z4), color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(Ms_z5), color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('log Mstar ($M_\odot$)')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[365]:
dmdz1 = dMsdz(Mvir0[0], z0, redshift)
dmdz2 = dMsdz(Mvir0[1], z0, redshift)
dmdz3 = dMsdz(Mvir0[2], z0, redshift)
dmdz4 = dMsdz(Mvir0[3], z0, redshift)
dmdz5 = dMsdz(Mvir0[4], z0, redshift)
plt.title('Star formation rate redshift')
plt.plot(np.log10(1+redshift), np.log10(dmdz1), '-k', color='black', label='mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(dmdz2), '-k', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(dmdz3), '-k', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(dmdz4), '-k', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(dmdz5), '-k', color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('log SFR [($M_\odot$)/yr]')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[185]:
plt.title('Star formation rate universe')
plt.plot(AgeUniverse(.3, .7, .7, redshift), np.log10(dmdz1), '-k', color='black', label='mvir 1e11')
plt.plot(AgeUniverse(.3, .7, .7, redshift), np.log10(dmdz2), '-k', color='blue', label='mvir 1e12')
plt.plot(AgeUniverse(.3, .7, .7, redshift), np.log10(dmdz3), '-k', color='green', label='mvir 1e13')
plt.plot(AgeUniverse(.3, .7, .7, redshift), np.log10(dmdz4), '-k', color='yellow', label='mvir 1e14')
plt.plot(AgeUniverse(.3, .7, .7, redshift), np.log10(dmdz5), '-k', color='red', label='mvir 1e15')
plt.xlabel('Age of universe [gyears]')
plt.ylabel('log SFR [($M_\odot$)/yr]')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[186]:
#SFHz = []
#SFHz1 = []
#SFHz2 = []
#SFHz3 = []
#SFHz4 = []
#for i in range(0, 100):
# redshift = i
# SFHz.append(SFR_hist(SHMR,Mvir0[0],z0,redshift))
# SFHz1.append(SFR_hist(SHMR,Mvir0[1],z0,redshift))
# SFHz2.append(SFR_hist(SHMR,Mvir0[2],z0,redshift))
# SFHz3.append(SFR_hist(SHMR,Mvir0[3],z0,redshift))
# SFHz4.append(SFR_hist(SHMR,Mvir0[4],z0,redshift))
#plt.title('Star Formation Histories Rodriguez-Puebla et al. 2017')
#plt.plot(np.log10(1.+z), np.log10(SFHz), color='k',ls='-', label='$M_{vir}(z=0)=10^{15}M_{\odot}$')
#plt.plot(np.log10(1.+z), np.log10(SFHz1), color='b',ls='-', label='$M_{vir}(z=0)=10^{14}M_{\odot}$')
#plt.plot(np.log10(1.+z), np.log10(SFHz2), color='g',ls='-', label='$M_{vir}(z=0)=10^{13}M_{\odot}$')
#plt.plot(np.log10(1.+z), np.log10(SFHz3), color='r',ls='-', label='$M_{vir}(z=0)=10^{12}M_{\odot}$')
#plt.plot(np.log10(1.+z), np.log10(SFHz4), color='y',ls='-', label='$M_{vir}(z=0)=10^{11}M_{\odot}$')
#plt.axis([0, 1.1, -3, 2.2])
#plt.ylabel('$log SFR$')
#plt.xlabel('$log ( 1 + z)$')
#plt.legend(loc='upper right',fontsize=10)
#plt.show()
# In[187]:
Vmax1 = Vmax_assembly(Mvir0[0],z0, redshift)
Vmax2 = Vmax_assembly(Mvir0[1],z0, redshift)
Vmax3 = Vmax_assembly(Mvir0[2],z0, redshift)
Vmax4 = Vmax_assembly(Mvir0[3],z0, redshift)
Vmax5 = Vmax_assembly(Mvir0[4],z0, redshift)
plt.title('Vmax growth')
plt.plot(np.log10(1+redshift), np.log10(Vmax1), '-k', color='black', label='mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(Vmax2), '-k', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(Vmax3), '-k', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(Vmax4), '-k', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(Vmax5), '-k', color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('log Vmax [km/s]')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[188]:
SNE_f1 = SNE_feedback(Mvir0[0], z0, redshift)
SNE_f2 = SNE_feedback(Mvir0[1], z0, redshift)
SNE_f3 = SNE_feedback(Mvir0[2], z0, redshift)
SNE_f4 = SNE_feedback(Mvir0[3], z0, redshift)
SNE_f5 = SNE_feedback(Mvir0[4], z0, redshift)
plt.title('SN feedback')
plt.plot(np.log10(1+redshift), np.log10(SNE_f1), '-k', color='black', label='mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(SNE_f2), '-k', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(SNE_f3), '-k', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(SNE_f4), '-k', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(SNE_f5), '-k', color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('log feedback')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[189]:
Vdisp1 = v_disp_assembly(Mvir0[0],z0, redshift)
Vdisp2 = v_disp_assembly(Mvir0[1],z0, redshift)
Vdisp3 = v_disp_assembly(Mvir0[2],z0, redshift)
Vdisp4 = v_disp_assembly(Mvir0[3],z0, redshift)
Vdisp5 = v_disp_assembly(Mvir0[4],z0, redshift)
plt.title('Vel disp')
plt.plot(np.log10(1+redshift), np.log10(Vdisp1), '-k', color='black', label='mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(Vdisp2), '-k', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(Vdisp3), '-k', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(Vdisp4), '-k', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(Vdisp5), '-k', color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('vel disp')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[190]:
M_bh1 = M_BH(Mvir0[0],z0, redshift)
M_bh2 = M_BH(Mvir0[1],z0, redshift)
M_bh3 = M_BH(Mvir0[2],z0, redshift)
M_bh4 = M_BH(Mvir0[3],z0, redshift)
M_bh5 = M_BH(Mvir0[4],z0, redshift)
plt.title('BH mass from volevity disp')
plt.plot(np.log10(1+redshift), np.log10(M_bh1), '-k', color='black', label='mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(M_bh2), '-k', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(M_bh3), '-k', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(M_bh4), '-k', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(M_bh5), '-k', color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('mass ($M_\odot$)')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[191]:
dM_bhdz1 = dM_BHdz(Mvir0[0],z0, redshift)
dM_bhdz2 = dM_BHdz(Mvir0[1],z0, redshift)
dM_bhdz3 = dM_BHdz(Mvir0[2],z0, redshift)
dM_bhdz4 = dM_BHdz(Mvir0[3],z0, redshift)
dM_bhdz5 = dM_BHdz(Mvir0[4],z0, redshift)
plt.title('BH mass from velocity dipersion growth rate')
plt.plot(np.log10(1+redshift), np.log10(dM_bhdz1), '-k', color='black', label='mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(dM_bhdz2), '-k', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(dM_bhdz3), '-k', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(dM_bhdz4), '-k', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(dM_bhdz5), '-k', color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('Log dBH_mass/dt ($M_\odot yr^{-1}$)')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[192]:
fudge2 = 1e2
M_bh_ms1 = M_bh_from_MS(Mvir0[0],z0, redshift, fudge2)
M_bh_ms2 = M_bh_from_MS(Mvir0[1],z0, redshift, fudge2)
M_bh_ms3 = M_bh_from_MS(Mvir0[2],z0, redshift, fudge2)
M_bh_ms4 = M_bh_from_MS(Mvir0[3],z0, redshift, fudge2)
M_bh_ms5 = M_bh_from_MS(Mvir0[4],z0, redshift, fudge2)
plt.title('BH mass from stellar mass')
plt.plot(np.log10(1+redshift), np.log10(M_bh_ms1), '-k', color='black', label='mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(M_bh_ms2), '-k', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(M_bh_ms3), '-k', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(M_bh_ms4), '-k', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(M_bh_ms5), '-k', color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('mass ($M_\odot$)')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[193]:
dM_bh_ms_dz1 = dM_bh_from_MS_dt(Mvir0[0],z0, redshift, fudge2)
dM_bh_ms_dz2 = dM_bh_from_MS_dt(Mvir0[1],z0, redshift, fudge2)
dM_bh_ms_dz3 = dM_bh_from_MS_dt(Mvir0[2],z0, redshift, fudge2)
dM_bh_ms_dz4 = dM_bh_from_MS_dt(Mvir0[3],z0, redshift, fudge2)
dM_bh_ms_dz5 = dM_bh_from_MS_dt(Mvir0[4],z0, redshift, fudge2)
plt.title('BH mass from stellar mass growth rate')
plt.plot(np.log10(1+redshift), np.log10(dM_bh_ms_dz1), '-k', color='black', label='mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(dM_bh_ms_dz2), '-k', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(dM_bh_ms_dz3), '-k', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(dM_bh_ms_dz4), '-k', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(dM_bh_ms_dz5), '-k', color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('Log dBH_mass/dt ($M_\odot yr^{-1}$)')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[194]:
fudge = 1e3
M_bh_mse31 = M_bh_from_MS(Mvir0[0],z0, redshift, fudge)
M_bh_mse32 = M_bh_from_MS(Mvir0[1],z0, redshift, fudge)
M_bh_mse33 = M_bh_from_MS(Mvir0[2],z0, redshift, fudge)
M_bh_mse34 = M_bh_from_MS(Mvir0[3],z0, redshift, fudge)
M_bh_mse35 = M_bh_from_MS(Mvir0[4],z0, redshift, fudge)
plt.title('BH mass from vel disp vs from stellar mass factor 100')
plt.plot(np.log10(1+redshift), np.log10(M_bh_ms1), '-', color='black', label='BHm stellar mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(M_bh_ms2), '-', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(M_bh_ms3), '-', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(M_bh_ms4), '-', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(M_bh_ms5), '-', color='red', label='mvir 1e15')
plt.plot(np.log10(1+redshift), np.log10(M_bh1), '--', color='black', label='BHm vel disp mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(M_bh2), '--', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(M_bh3), '--', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(M_bh4), '--', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(M_bh5), '--', color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('mass ($M_\odot$)')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[195]:
plt.title('BH mass from sigma vs from stellar mass factor 100: bh growth rate')
plt.plot(np.log10(1+redshift), np.log10(dM_bh_ms_dz1), '-', color='black', label='BHm stellar mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(dM_bh_ms_dz2), '-', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(dM_bh_ms_dz3), '-', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(dM_bh_ms_dz4), '-', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(dM_bh_ms_dz5), '-', color='red', label='mvir 1e15')
plt.plot(np.log10(1+redshift), np.log10(dM_bhdz1), '--', color='black', label='BHm vel disp mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(dM_bhdz2), '--', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(dM_bhdz3), '--', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(dM_bhdz4), '--', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(dM_bhdz5), '--', color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('Log dBH_mass/dt ($M_\odot yr^{-1}$)')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[196]:
plt.title('BH mass from velocity dispersion/from stellar mass, factor of 1000')
plt.plot(np.log10(1+redshift), np.log10(M_bh_mse31), '-', color='black', label='BHm stellar mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(M_bh_mse32), '-', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(M_bh_mse33), '-', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(M_bh_mse34), '-', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(M_bh_mse35), '-', color='red', label='mvir 1e15')
plt.plot(np.log10(1+redshift), np.log10(M_bh1), '--', color='black', label='BHm vel disp mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(M_bh2), '--', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(M_bh3), '--', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(M_bh4), '--', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(M_bh5), '--', color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('mass ($M_\odot$)')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[197]:
plt.title('Fraction of quenched galaxies as a function of stellar mass')
plt.semilogx(Mstar, fQ(Mstar,z0) ,'-', color='black', label='z=0')
plt.semilogx(Mstar, fQ(Mstar,2) ,'-', color='blue', label='z=2')
plt.semilogx(Mstar, fQ(Mstar,5) ,'-', color='green', label='z=5')
plt.xlabel('Stellar Mass (Msun)')
plt.ylabel('Quenched Fraction')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[198]:
Lum_q1 = Lum_quasar(Mvir0[0], z0, redshift, fudge2)
Lum_q2 = Lum_quasar(Mvir0[1], z0, redshift, fudge2)
Lum_q3 = Lum_quasar(Mvir0[2], z0, redshift, fudge2)
Lum_q4 = Lum_quasar(Mvir0[3], z0, redshift, fudge2)
Lum_q5 = Lum_quasar(Mvir0[4], z0, redshift, fudge2)
Lum_q31 = Lum_quasar(Mvir0[0], z0, redshift, fudge)
Lum_q32 = Lum_quasar(Mvir0[1], z0, redshift, fudge)
Lum_q33 = Lum_quasar(Mvir0[2], z0, redshift, fudge)
Lum_q34 = Lum_quasar(Mvir0[3], z0, redshift, fudge)
Lum_q35 = Lum_quasar(Mvir0[4], z0, redshift, fudge)
plt.title('Quasar luminosity from stellar mass: factors 1e2 and 1e3')
plt.plot(np.log10(1+redshift), np.log10(Lum_q1), '-', color='black', label='1e2 factor mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(Lum_q2), '-', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(Lum_q3), '-', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(Lum_q4), '-', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(Lum_q5), '-', color='red', label='mvir 1e15')
plt.plot(np.log10(1+redshift), np.log10(Lum_q31), '--', color='black', label='1e3 factor mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(Lum_q32), '--', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(Lum_q33), '--', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(Lum_q34), '--', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(Lum_q35), '--', color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('log Luminosity (erg/sec)')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[199]:
Lum_q_s1= Lum_quasar_sigma(Mvir0[0], z0, redshift)
Lum_q_s2= Lum_quasar_sigma(Mvir0[1], z0, redshift)
Lum_q_s3= Lum_quasar_sigma(Mvir0[2], z0, redshift)
Lum_q_s4= Lum_quasar_sigma(Mvir0[3], z0, redshift)
Lum_q_s5= Lum_quasar_sigma(Mvir0[4], z0, redshift)
plt.title('Quasar luminosity from velocity dispersion VS from stellar mass')
plt.plot(np.log10(1+redshift), np.log10(Lum_q_s1), '-', color='black', label='velocity dipersion mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(Lum_q_s2), '-', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(Lum_q_s3), '-', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(Lum_q_s4), '-', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(Lum_q_s5), '-', color='red', label='mvir 1e15')
plt.plot(np.log10(1+redshift), np.log10(Lum_q31), '--', color='black', label='1e3 factor stellar mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(Lum_q32), '--', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(Lum_q33), '--', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(Lum_q34), '--', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(Lum_q35), '--', color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('log Luminosity (erg/sec)')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[200]:
Lum_ed1 = Lum_eddigton(Mvir0[0], z0, redshift, fudge2)
Lum_ed2 = Lum_eddigton(Mvir0[1], z0, redshift, fudge2)
Lum_ed3 = Lum_eddigton(Mvir0[2], z0, redshift, fudge2)
Lum_ed4 = Lum_eddigton(Mvir0[3], z0, redshift, fudge2)
Lum_ed5 = Lum_eddigton(Mvir0[4], z0, redshift, fudge2)
Lum_ed31 = Lum_eddigton(Mvir0[0], z0, redshift, fudge)
Lum_ed32 = Lum_eddigton(Mvir0[1], z0, redshift, fudge)
Lum_ed33 = Lum_eddigton(Mvir0[2], z0, redshift, fudge)
Lum_ed34 = Lum_eddigton(Mvir0[3], z0, redshift, fudge)
Lum_ed35 = Lum_eddigton(Mvir0[4], z0, redshift, fudge)
plt.title('Eddington luminosity from stellar mass: factors of 1e2 and 1e3')
plt.plot(np.log10(1+redshift), np.log10(Lum_ed1), '-', color='black', label='1e2 factor mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(Lum_ed2), '-', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(Lum_ed3), '-', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(Lum_ed4), '-', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(Lum_ed5), '-', color='red', label='mvir 1e15')
plt.plot(np.log10(1+redshift), np.log10(Lum_ed31), '--', color='black', label='1e3 factor mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(Lum_ed32), '--', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(Lum_ed33), '--', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(Lum_ed34), '--', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(Lum_ed35), '--', color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('log Luminosity (erg/sec)')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[201]:
agn_fb1 = AGN_feedback(Mvir0[0], z0, redshift, fudge)
agn_fb2 = AGN_feedback(Mvir0[1], z0, redshift, fudge)
agn_fb3 = AGN_feedback(Mvir0[2], z0, redshift, fudge)
agn_fb4 = AGN_feedback(Mvir0[3], z0, redshift, fudge)
agn_fb5 = AGN_feedback(Mvir0[4], z0, redshift, fudge)
plt.title('AGN feedback')
plt.plot(np.log10(1+redshift), np.log10(agn_fb1), '-', color='black', label='1e3 factor mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(agn_fb2), '-', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(agn_fb3), '-', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(agn_fb4), '-', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(agn_fb5), '-', color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('feedback')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[202]:
M_bh_mse31 = M_bh_from_Ms_using_fraction_of_SF_and_quenched(Mvir0[0],z0, redshift,)
M_bh_mse32 = M_bh_from_Ms_using_fraction_of_SF_and_quenched(Mvir0[1],z0, redshift)
M_bh_mse33 = M_bh_from_Ms_using_fraction_of_SF_and_quenched(Mvir0[2],z0, redshift)
M_bh_mse34 = M_bh_from_Ms_using_fraction_of_SF_and_quenched(Mvir0[3],z0, redshift)
M_bh_mse35 = M_bh_from_Ms_using_fraction_of_SF_and_quenched(Mvir0[4],z0, redshift)
plt.title('BH mass from vel disp vs from stellar mass using the fraction of SFing and Quenched')
plt.plot(np.log10(1+redshift), np.log10(M_bh_mse31), '-', color='black', label='BHm stellar mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(M_bh_mse32), '-', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(M_bh_mse33), '-', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(M_bh_mse34), '-', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(M_bh_mse35), '-', color='red', label='mvir 1e15')
plt.plot(np.log10(1+redshift), np.log10(M_bh1), '--', color='black', label='BHm vel disp mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(M_bh2), '--', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(M_bh3), '--', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(M_bh4), '--', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(M_bh5), '--', color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('mass ($M_\odot$)')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[203]:
dM_bh_ms_dz1 = dM_bh_dt_from_Ms_using_fraction_of_SF_and_quenched(Mvir0[0],z0, redshift)
dM_bh_ms_dz2 = dM_bh_dt_from_Ms_using_fraction_of_SF_and_quenched(Mvir0[1],z0, redshift)
dM_bh_ms_dz3 = dM_bh_dt_from_Ms_using_fraction_of_SF_and_quenched(Mvir0[2],z0, redshift)
dM_bh_ms_dz4 = dM_bh_dt_from_Ms_using_fraction_of_SF_and_quenched(Mvir0[3],z0, redshift)
dM_bh_ms_dz5 = dM_bh_dt_from_Ms_using_fraction_of_SF_and_quenched(Mvir0[4],z0, redshift)
plt.title('BH mass from stellar mass growth rate')
plt.plot(np.log10(1+redshift), np.log10(dM_bh_ms_dz1), '-k', color='black', label='mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(dM_bh_ms_dz2), '-k', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(dM_bh_ms_dz3), '-k', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(dM_bh_ms_dz4), '-k', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(dM_bh_ms_dz5), '-k', color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('Log dBH_mass/dt ($M_\odot yr^{-1}$)')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[204]:
Lum_q_s1= Lum_quasar_using_fraction_of_SF_and_quenched(Mvir0[0], z0, redshift)
Lum_q_s2= Lum_quasar_using_fraction_of_SF_and_quenched(Mvir0[1], z0, redshift)
Lum_q_s3= Lum_quasar_using_fraction_of_SF_and_quenched(Mvir0[2], z0, redshift)
Lum_q_s4= Lum_quasar_using_fraction_of_SF_and_quenched(Mvir0[3], z0, redshift)
Lum_q_s5= Lum_quasar_using_fraction_of_SF_and_quenched(Mvir0[4], z0, redshift)
plt.title('Quasar luminosity from velocity dispersion VS from stellar mass')
plt.plot(np.log10(1+redshift), np.log10(Lum_q_s1), '-', color='black', label='velocity dipersion mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(Lum_q_s2), '-', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(Lum_q_s3), '-', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(Lum_q_s4), '-', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(Lum_q_s5), '-', color='red', label='mvir 1e15')
plt.plot(np.log10(1+redshift), np.log10(Lum_q31), '--', color='black', label='1e3 factor stellar mvir 1e11')
plt.plot(np.log10(1+redshift), np.log10(Lum_q32), '--', color='blue', label='mvir 1e12')
plt.plot(np.log10(1+redshift), np.log10(Lum_q33), '--', color='green', label='mvir 1e13')
plt.plot(np.log10(1+redshift), np.log10(Lum_q34), '--', color='yellow', label='mvir 1e14')
plt.plot(np.log10(1+redshift), np.log10(Lum_q35), '--', color='red', label='mvir 1e15')
plt.xlabel('log 1+z')
plt.ylabel('log Luminosity (erg/sec)')
plt.legend(loc=9, bbox_to_anchor=(0.1, -0.1), ncol=1)
plt.show()
# In[ ]:
# In[ ]:
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 3 15:48:18 2018
@author: bartonjo
Get the SRIM data out of the txt files and convert the data into
useable units.
"""
import sys
s = '/Users/bartonjo/PyFiles/LP/'
if s not in sys.path:
sys.path.insert(0, s)
from cookbook import savitzky_golay as smooth
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# path where all of the data produced by SRIM is located
path = '/Users/bartonjo/PyFiles/SRIM/'
folder = path + 'Si-on-W-12MeV-5000ions-kp-40dsp/'
# Experimental data
fluence = 5e18 # ions/m2
# get damage data from file
try:
nv = pd.read_table(folder + 'NOVAC.txt', skiprows=22,
delimiter='\s+')
nv.columns = ['depth_A','repl/A/ion']
varows=27
except:
nv = {'repl/A/ion':0}
varows=26
pass
va = pd.read_table(folder + 'VACANCY.txt', engine='python',
skiprows=varows, skipfooter=2, delimiter='\s+')
va.columns = ['depth_A', 'pko/A/ion', 'vac/A/ion']
# get density data from VACACY file
with open(folder + 'VACANCY.txt', 'r') as fin:
for line in fin.readlines():
row = line.strip().split()
if ('Density' in row):
atden = float(row[5])*1e6 # convert to m-3
# convert data and put in one dataframe of depth vs dpa
x = va.depth_A*1e-10*1e6 # convert to microns
vac = va['pko/A/ion'] + va['vac/A/ion'] - nv['repl/A/ion']
vac = vac * 1e10 # convert to number/m/ion
dpa = vac*fluence/atden*2
sdpa = smooth(np.array(dpa),15,3)
dam = pd.DataFrame({'Depth_um':x, 'dpa':dpa, 'smooth_dpa':sdpa})
dam.to_csv(folder + 'dpa_data.csv',index=False)
# roughly plot the data
plt.figure()
plt.plot(x,dpa,'s',label='TRIM data')
plt.plot(x,sdpa,'--k')
plt.xlabel(r'Depth [$\mu$m]', fontsize=18)
plt.ylabel('dpa', fontsize=18)
plt.tick_params(labelsize=18)
plt.title(folder[len(path):-1])
plt.legend(loc='best', fontsize=18)
plt.subplots_adjust(bottom=.12, left=.14)
plt.show()
plt.savefig(folder+'dpa_img.svg', format='svg')
|
"""
Return a list of the numerical headers
"""
def get_num_headers(values):
num_headers = []
non_num_headers = []
for elem in values:
try:
(float(values[elem][1]) or int(values[elem][1]))
num_headers.append(elem)
except:
non_num_headers.append(elem)
num_headers.pop(0)
return (num_headers) |
import requests
from bs4 import BeautifulSoup
import pandas as pd
from datetime import datetime, date
main_url = "https://sagittarius.com/archive/{}"
df = pd.DataFrame(columns=['start_date', 'end_date', 'sign', 'horoscope'])
i = 0
page = 1
while page <= 113:
try:
main_req = requests.get(main_url.format(page))
main_soup = BeautifulSoup(main_req.text, 'lxml')
h5s = main_soup.find('div', class_="bg-main-gradient").find_all('h5')
for h5 in h5s:
print(i)
if i == 0:
i += 1
continue
try:
url = h5.find('a').get('href')
req = requests.get(url)
soup = BeautifulSoup(req.text, 'lxml')
horoscopes = soup.find('ul', class_="horoscope-list").find_all('li')
# print(horoscopes)
sign = [h.find('a').get('name') for h in horoscopes]
day = soup.find('div', class_="bg-main-gradient").\
find('h3').text.replace("Free Horoscope for ", '')
day = datetime.strptime(day, "%B %d, %Y")
days = [day for x in range(len(sign))]
daily = [str(h.find('div', class_='text').find('p').text.replace(
"Ask 1 free question to a Psychic", '').
encode('ascii', 'ignore')) for h in horoscopes]
df = df.append(pd.DataFrame({"start_date": days, "end_date": days,
"sign": sign, "horoscope": daily},
columns=df.columns))
i += len(sign)
except:
continue
df.to_csv('daily2.csv')
except:
page += 1
continue
df.to_csv('daily2.csv')
|
import pandas as pd
import numpy as np
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Dropout, Flatten, Activation, Reshape
from keras.layers.convolutional import Conv2D, ZeroPadding2D
from keras.layers.pooling import MaxPooling2D, AveragePooling2D
from keras.optimizers import SGD, Adam, Adadelta
from keras.utils.np_utils import to_categorical
import keras.utils.vis_utils
import csv
from keras.preprocessing.image import ImageDataGenerator
if __name__ == '__main__':
file1 = pd.read_csv(sys.argv[1], sep=',', header=0)
file2 = pd.read_csv('test.csv', sep=',', header=0)
file1 = np.array(file1)
file2 = np.array(file2)
train_y = []
train_data = []
test_data = []
# read train
for line in file1:
train_y.append(line[0])
train_data.append(np.fromstring(line[1] , dtype=float, sep=' ').reshape(48, 48, 1))
train_y = np.array(train_y)
train_data = np.array(train_data)
train_data /= 255
#read test
for line in file2:
test_data.append(np.fromstring(line[1] , dtype=float, sep=' ').reshape(48, 48, 1))
test_data = np.array(test_data)
test_data /= 255
# make one hot
#num_classes = 7
train_y = to_categorical(train_y)#, num_classes)
#arguments
batch_size = 128
epochs = 25
n = len(train_data)
steps_per_epoch = int(n / 128) + 1
shape = (48, 48, 1)
#build model
model = Sequential()
model.add(Conv2D(64, kernel_size = (5, 5), padding='valid', input_shape=shape, activation='relu'))
model.add(ZeroPadding2D(padding=(2, 2), data_format='channels_last'))
model.add(MaxPooling2D(pool_size=(5, 5), strides=(2, 2)))
model.add(ZeroPadding2D(padding=(1, 1), data_format='channels_last'))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(ZeroPadding2D(padding=(1, 1), data_format='channels_last'))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(ZeroPadding2D(padding=(1, 1), data_format='channels_last'))
model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu'))
model.add(ZeroPadding2D(padding=(1, 1), data_format='channels_last'))
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu'))
model.add(ZeroPadding2D(padding=(1, 1), data_format='channels_last'))
model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(7, activation='softmax'))
# opt = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
opt = Adam(lr=1e-3)
# opt = Adadelta(lr=0.1, rho=0.95, epsilon=1e-08)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
'''
model.fit(x=train_data, y=train_y,
validation_split=0.1,
batch_size=batch_size,
epochs=epochs, verbose=2)
'''
gen = ImageDataGenerator(rotation_range=2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
gen.fit(train_data)
model.fit_generator(gen.flow(train_data, train_y, batch_size=batch_size), steps_per_epoch=steps_per_epoch, epochs=epochs, verbose=1)
pre = model.predict_classes(test_data)
model.save('model1.h5')
output = open('output.csv', 'w')
write1 = csv.writer(output)
write1.writerow(['id', 'label'])
for i in range(len(pre)):
write1.writerow([i, pre[i]])
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
import datetime as dt
from urllib.request import urlretrieve, urlopen, URLError
import os
here = os.path.dirname(os.path.realpath(__file__))
HOME = os.getenv('HOME')
## Get screen resolution
com = "xdpyinfo | grep dimensions"
res = os.popen(com).read().split()[1]
model_svg = here + '/background_%s.svg'%(res)
tmp_png = '/tmp/test.png'
desk_back = here + '/background.png'
now = dt.datetime.now()
#### Images
## Europe
# now
url1 = 'http://www.meteo.be/meteo/view/en/113200-ajaxcontroller.html/6723383/image.jpg?position=19&extraid='
# 1h ago
url2 = 'http://www.meteo.be/meteo/view/en/113200-ajaxcontroller.html/6723383/image.jpg?position=18&extraid='
## Frentes from aemet
url3 = 'http://www.aemet.es/imagenes_d/eltiempo/prediccion/modelos_num/hirlam/%s00+012_ww_isx0w012.gif'%(now.strftime('%Y%m%d'))
def download_image(url,name):
tmp = '/tmp/%s'%(name)
a,b = urlretrieve(url1,tmp)
print(a)
print(b)
if b['Content-Type'].split(';') == 'image/jpeg':
os.system('mv %s %s'%(tmp,here+'/%s'%(name)))
else: print('wrong downloaded file')
try:
download_image(url1,'europe-0.jpg')
#a,b = urlretrieve(url1,'/tmp/europe-0.jpg')
#if b['Content-Type'].split(';') == 'image/jpeg':
# os.system('mv /tmp/europe-0.jpg %s'%(here+'/europe-0.jpg'))
#else: print('wrong downloaded file')
except: pass
#try: a,b = urlretrieve(url2, here+'/europe-1.jpg')
#except: pass
try: #a,b = urlretrieve(url3, here+'/frentes.gif')
download_image(url3,'frentes.gif')
except: pass
com = 'inkscape --export-png=%s %s &&'%(tmp_png,model_svg)
com += ' mv %s %s'%(tmp_png,desk_back)
os.system(com)
|
import sys
# def hello():
# print("Hello")
#
#
#
# def max(a, b):
# if a > b:
# return a
# else:
# return b
#
#
# print(max(5,4))
#
#
# def area(w, h):
# return w * h
#
# def welcome(name):
# print("welcome", name)
#
#
# w = 4
# h = 5
#
# print(area(w, h))
#
# welcome('hello')
#
# a=[1, 2,3]
#
# welcome(a)
#
# a="Runoob"
#
# print(a)
#
# def change(a):
# print(id(a))
# a = 10
# print(id(a))
#
#
# a = 1
# print(id(a))
# change(a)
#
#
# def changeme( mylist ):
# mylist.append([1,2,3,4])
# print('---', mylist)
# return
#
#
# mylist = [10,20,30]
# changeme(mylist)
# print(mylist)
#
#
# def functionname(*ss):
# print(ss)
#
#
# functionname('1',2, [1,2,3])
#
#
# def sum(arg1, arg2):
# # ่ฟๅ2ไธชๅๆฐ็ๅ."
# total = arg1 + arg2
# print("ๅฝๆฐๅ
: ", total)
# print(id(total))
# return total
#
#
# # ่ฐ็จsumๅฝๆฐ
# total = sum(10, 20)
# print(id(total))
# print("ๅฝๆฐๅค : ", total)
#
# knights = {'gallahad': 'the pure', 'robin': 'the brave'}
#
# for k, v in knights.items():
# print(k,v)
#
# questions = ['name', 'quest', 'favorite color']
# answers = ['lancelot', 'the holy grail', 'blue']
#
# for q, a in zip(questions,answers):
# print('{0} ? -- {1}.' . format(q, a))
#
#
# basket = ['apple', 'orange', 'apple', 'pear', 'orange', 'banana']
# for f in reversed(basket):
# print(f)
#
# print(basket)
# print('ๅฝไปค่กๅๆฐๅฆไธ:')
# for i in sys.argv:
# print(i)
#print('\n\nPython ่ทฏๅพไธบ๏ผ', sys.modules, '\n')
|
import sys, pygame, glob, random
from pygame.locals import *
pygame.init()
# Game Screen Dimension
size = width,height = 640,480
screen = pygame.display.set_mode(size)
pygame.display.set_caption('BUBBLE TROUBLE')
#pygame.mouse.set_visible(True)
# Color Definition
black = 0,0,0
white = 255,255,255
blue = 0,0,255
green = 0,255,0
red = 255,0,0
# Frame per second
FPS = 60
fpsClock = pygame.time.Clock()
# Background Music Play
# pygame.mixer.music.load('abc.mp3')
# pygame.mixer.music.play(-1, 0.0)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
from setuptools import find_packages, setup
NAME = 'azion-python'
DESCRIPTION = "Python client to interact with Azion's ReST API"
URL = 'https://github.com/mauricioabreu/azion-python'
EMAIL = 'mauricio.abreua@gmail.com'
AUTHOR = 'Maurรญcio Antunes'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = '0.0.1'
REQUIRED = [
'pendulum', 'requests'
]
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/x-rst',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests',)),
install_requires=REQUIRED,
include_package_data=True,
license='MIT',
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
],
)
|
class DoubleNode:
def __init__(self, val):
self.val = val
self.next = None
self.prev = None
def traverseForward(self):
node = self
while node != None:
print (node.val) # access the node value
node = node.next # move on to the next node
def traverseBackward(self):
node = self
while node != None:
print (node.val)
node = node.prev
node1 = DoubleNode(67)
node2 = DoubleNode(68)
node3 = DoubleNode(69)
node1.next = node2 # 12->99
node2.next = node3 # 99->37
node2.prev = node1
node3.prev = node2
node1.traverseForward()
node3.traverseBackward()
|
import csv
import json
class CSVFile:
def __init__(self, path="data/alldata.csv", num_rows=1000):
self._path = path
def create_file(self, data):
with open(self._path, "w", newline="") as file:
writer = csv.writer(file, delimiter=",")
writer.writerows(data)
class JSONFile:
def __init__(self, path="data/data_config.json"):
self._path = path
self._data_stores = None
def load_file(self):
with open(self._path, "r", encoding="utf-8") as file:
self._data_stores = json.load(file)
print("Data Loaded!", self._data_stores, sep="\n") |
from itertools import combinations
# Determine start time
import time
start = time.time()
# Data Set 1:
items = (
("Item 1", 1, 1), ("Item 2", 1, 2), ("Item 3", 1, 3), ("Item 4", 1, 4), ("Item 5", 1, 5)
)
cap = 4
#
# # Data Set 2:
# items = (
# ("Item 1", 2, 5), ("Item 2", 2, 5), ("Item 3", 2, 5), ("Item 4", 2, 5), ("Item 5", 2, 5), ("Item 6", 2, 5), ("Item 7", 2, 5), ("Item 8", 2, 5), ("Item 9", 3, 6),
# )
# cap = 17
#
# # Data Set 3:
# items = (
# ("Item 1", 23, 92), ("Item 2", 31, 57), ("Item 3", 29, 49), ("Item 4", 44, 68), ("Item 5", 53, 60), ("Item 6", 38, 43), ("Item 7", 63, 67), ("Item 8", 85, 84), ("Item 9", 89, 87), ("Item 10", 82, 72),
# )
# cap = 165
#
# # Data Set 4:
# items = (
# ("Item 1", 12, 24), ("Item 2", 7, 13), ("Item 3", 11, 23), ("Item 4", 8, 15), ("Item 5", 9, 16),
# )
# cap = 26
#
# # Data Set 5:
# items = (
# ("Item 1", 56, 50), ("Item 2", 59, 50), ("Item 3", 80, 64), ("Item 4", 64, 46), ("Item 5", 75, 50), ("Item 6", 17, 5),
# )
# cap = 190
#
# # Data Set 6:
# items = (
# ("Item 1", 31, 70), ("Item 2", 10, 20), ("Item 3", 20, 39), ("Item 4", 19, 37), ("Item 5", 4, 7), ("Item 6", 3, 5), ("Item 7", 6, 10),
# )
# cap = 50
#
# # Data Set 7:
# items = (
# ("Item 1", 25, 350), ("Item 2", 35, 400), ("Item 3", 45, 450), ("Item 4", 5, 20), ("Item 5", 25, 70), ("Item 6", 3, 8), ("Item 7", 2, 5), ("Item 8", 2, 5),
# )
# cap = 104
#
# # Data Set 8:
# items = (
# ("Item 1", 41, 442), ("Item 2", 50, 525), ("Item 3", 49, 511), ("Item 4", 59, 593), ("Item 5", 55, 546), ("Item 6", 57, 564), ("Item 7", 60, 617),
# )
# cap = 170
# Data Set 9:
# items = (
# ("Item 1", 70, 135), ("Item 2", 73, 139), ("Item 3", 77, 149), ("Item 4", 80, 150), ("Item 5", 82, 156), ("Item 6", 87, 163), ("Item 7", 90, 173), ("Item 8", 94, 184), ("Item 9", 98, 192), ("Item 10", 106, 201), ("Item 11", 110, 210), ("Item 12", 113, 214), ("Item 13", 115, 221), ("Item 14", 118, 229), ("Item 15", 120, 240),
# )
# cap = 750
# Data Set 10:
# items = (
# ("Item 1", 382745, 825594), ("Item 2", 799601, 1677009), ("Item 3", 909247, 1676628), ("Item 4", 729069, 1523970), ("Item 5", 467902, 943972), ("Item 6", 44328, 97426), ("Item 7", 34610, 69666), ("Item 8", 698150, 1296457), ("Item 9", 823460, 1679693), ("Item 10", 903959, 1902996), ("Item 11", 853665, 1844992), ("Item 12", 551830, 1049289), ("Item 13", 610856, 1252836), ("Item 14", 670702, 1319836), ("Item 15", 488960, 953277), ("Item 16", 951111, 2067538), ("Item 17", 323046, 675367), ("Item 18", 446298, 853655), ("Item 19", 931161, 1826027), ("Item 20", 31385, 65731), ("Item 21", 496951, 901489), ("Item 22", 264724, 577243), ("Item 23", 224916, 466257), ("Item 24", 169684, 369261),
# )
# cap = 6404180
from operator import itemgetter
def knapsack(items,cap):
# Calculate the total value and check feasibility of a combination of items.
maxindex = temp = []
# items = sorted(items, key=itemgetter(2))
it, wt, val = [list(item) for item in zip(*items)]
K = [[0 for x in range(cap + 1)] for x in range(len(val) + 1)]
print(wt,'\n',val)
for i in range(1,len(val)+1):
temp = []
for w in range(cap+1):
if i == 0 or w == 0:
K[i][w] = 0
if wt[i-1] <= w:
K[i][w] = max(val[i-1] + K[i-1][w-wt[i-1]],K[i-1][w])
temp.append(val[i-1])
else:
K[i][w] = K[i-1][w]
# print(temp)
# prev=0
# for i in K[len(items)][1:]:
# if i == 0:
# prev = 0
# continue
# else:
# print(it_l[val_l.index(i-prev)])
# prev = i
# print(K)
print(K[len(items)][cap])
# return K[len(items)]
print(knapsack(items,cap))
# for x in range(cap+1):
# print(x)
#
# for x in range(len(items)+1):
# print(x)
# from operator import itemgetter
# items = sorted(items, key=itemgetter(2))
# it, wt, val = [list(item) for item in zip(*items)]
# print(val)
# Determine ending time
end = time.time()
# Print total time.
print("For a total time in seconds of ")
print(end - start) |
#!/usr/bin/env python
from distutils.core import setup
setup(name='Cheap Drives',
version='0.2',
description='Finds cheap hard drives',
author='John O\'Connor',
author_email='tehjcon@gmail.com',
scripts=['cheapdrives']
)
|
dee1=input()
vowels=['a','e','i','o','u']
if dee1 in vowels:
print("Vowel")
else:
print("Consonant")
|
import torch
from torchvision import transforms
from torch.autograd.variable import Variable
from torchvision.utils import make_grid
def noise(batch_size, n_features, device='cuda'):
"""creates a noise matrix for a given batch size"""
return Variable(torch.randn(batch_size, n_features)).to(device)
def make_ones(batch_size, device='cuda'):
"""Creates a tensor of ground truths of real data for discrimator"""
return Variable(torch.ones(batch_size, 1)).to(device)
def make_zeros(batch_size, device='cuda'):
"""Creates a tensor of ground truths of fake data for discrimator"""
return Variable(torch.zeros(batch_size, 1)).to(device)
tanh_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,),(0.5,))
])
def train_discriminator(discriminator, d_opt, loss, real_data, fake_data):
d_opt.zero_grad()
p_real = discriminator(real_data)
loss_real = loss(p_real, make_ones(len(real_data)))
loss_real.backward()
p_fake = discriminator(fake_data)
loss_fake = loss(p_fake, make_zeros(len(fake_data)))
loss_fake.backward()
d_opt.step()
return loss_fake + loss_real
def train_generator(discrimnator, fake_data, g_opt, loss):
g_opt.zero_grad()
pred = discrimnator(fake_data)
g_loss = loss(pred, make_ones(len(fake_data)))
g_loss.backward()
g_opt.step()
return g_loss
def train(dataloader,
generator,
discrimator,
opt_g,
opt_d,
loss,
epochs,
k,
device):
g_loss_list = []
d_loss_list = []
test_noise = noise(64)
generator.train()
discrimator.train()
for epoch in range(epochs):
g_loss_sum = 0.0
d_loss_sum = 0.0
for i, data in enumerate(dataloader):
images, _ = data
batch_size = len(images)
for _ in range(k):
fake_data = generator(noise(batch_size)).detach()
real_data = images.to(device)
d_loss_sum += train_discriminator(discrimator, opt_d, loss, real_data, fake_data)
fake_data = generator(noise(batch_size))
g_loss_sum += train_generator(discrimator, fake_data, opt_g, loss)
images = generator(test_noise).cpu().detach()
images = make_grid(images)
if epoch % 50 == 0:
transforms.ToPILImage(images[0]).show()
g_loss_list.append(g_loss_sum / i)
d_loss_list.append(d_loss_sum / i)
print(f'Epoch {epoch}, G Loss {g_loss_sum / i} D Loss {d_loss_sum / i}')
|
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import NoSuchElementException
import time
import json
from selenium import webdriver
"""
In this case, playstation new and upcoming link is a dynamic page so I used Selenium. Scrapy is fast, so all crawled links will be used by it.
"""
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:49.0) Gecko/20100101 Firefox/49.0'
options = Options()
options.add_argument("--no-sandbox")
options.add_argument('user-agent={user_agent}')
options.add_argument("--headless")
path = '/usr/bin/chromedriver'
urls = ['https://store.playstation.com/en-us/category/a00d4d61-f6bc-4a00-bb68-ff0bb43fcc33/1', 'https://store.playstation.com/en-us/category/a00d4d61-f6bc-4a00-bb68-ff0bb43fcc33/2']
driver = webdriver.Chrome(path, chrome_options=options)
game_links = []
for url in urls:
driver.get(url)
time.sleep(10)
links = driver.find_elements_by_xpath('//a[@class="ems-sdk-product-tile-link"]')
for link in links:
game_link = link.get_attribute('href')
print (game_link)
game_link.append(game_link)
with open('ps_new_games.json', 'w+') as new:
json.dump(game_links, new, indent=4)
driver.quit()
|
#!/usr/bin/python3
'''
Created on 2 Sep 2013
@author: dyc
'''
import sys
import os
import subprocess
from gi.repository import Gtk, Pango
print("python path: %s" % (sys.path))
import mesg_pb2
class PosTransfer:
def __init__(self, textbuffer):
self.textbuffer = textbuffer
# for hexdump: leading address and two SPACE
# for xxd: leading address and a comma
self.ADDRLEN = 9
# for hexdump: length of tailing |... BINARY CONTENTS ...|\n
# 2 + 16 + 1 = 19
# for xxd: length of tailing BINARY CONTENTS \n
# 16 + 1 = 17
self.CONTLEN = 17
self.BYTES_PER_LINE = 16
self.LINELEN = (self.ADDRLEN + self.CONTLEN + self.BYTES_PER_LINE * 3 + 1 + 1)
self.HALF = 8
def in_first_half(self, mod):
return 1 if mod >= self.HALF else 0
def addr_to_off(self, addr):
line_number = addr//self.BYTES_PER_LINE
line_offset = self.ADDRLEN + addr%self.BYTES_PER_LINE*3 + self.in_first_half(addr%self.BYTES_PER_LINE)
print("add[%d] at line[%d] offset[%d] ---> [%d]" % (addr, line_number, addr%self.BYTES_PER_LINE, line_number * self.LINELEN + line_offset))
return (line_number * self.LINELEN + line_offset)
def lower_bound (self, addr):
# print("addr %d get lower bound %d" % (addr, (addr//self.BYTES_PER_LINE) * self.BYTES_PER_LINE) )
return (addr//self.BYTES_PER_LINE) * self.BYTES_PER_LINE
def upper_bound (self, addr):
# print("addr %d get upper bound %d" % (addr, self.lower_bound(addr) + self.BYTES_PER_LINE - 1))
return self.lower_bound(addr) + self.BYTES_PER_LINE - 1
def in_same_line (self, addr_start, addr_end):
return True if (addr_end//self.BYTES_PER_LINE == addr_start//self.BYTES_PER_LINE) else False
# the parameter is a [) address range
# return : a list of [addr, addr]
def split_range(self, range_start, range_end):
range_end -= 1
addr_range = []
while range_start <= range_end and self.in_same_line(range_start, range_end) == False :
# print("split line (%d %d)" %(range_start, range_end) )
addr_range.append( (range_start, self.upper_bound(range_start)) )
range_start = self.upper_bound(range_start) + 1
addr_range.append( (range_start, range_end) )
# for a in addr_range:
# print("addr: %s" %(a.__str__()) )
return addr_range
# the parameter is a [) address range
def trans(self, range_start, range_end):
addr_range = self.split_range(range_start, range_end)
ret = []
for r in addr_range :
iter1 = self.textbuffer.get_start_iter().copy()
iter2 = self.textbuffer.get_start_iter().copy()
iter1.forward_chars( self.addr_to_off(r[0]))
iter2.forward_chars( self.addr_to_off(r[1]) + 2)
ret.append( (iter1, iter2) )
return ret
class InspectorMainWin(Gtk.Window):
def loadmesg(self):
self.mesg = mesg_pb2.HeaderMesg()
f = open("mid.out", "rb")
self.mesg.ParseFromString(f.read())
f.close()
def filltext(self, text):
return text.replace("*\n", '*' + ' ' * (self.transfer.LINELEN - 2) + '\n')
# add a whitespace in the middle of 16 bytes
def decorateText(self, text):
new_text=''
for line in text.split("\n"):
new_text += line[0:33] + ' ' + line[33:] + '\n'
return new_text
def getStrTab(self):
for sh in self.mesg.secHeaders:
print("strtab %x\n" % sh.type)
if sh.type == 3:
print("found strtab")
# self.strtab =
def deliminateSections(self):
for secheader in self.mesg.secHeaders:
print("deliminate header offset: %s \n"% hex(secheader.offset))
pos = self.transfer.addr_to_off(secheader.offset)
textiter = self.textbuffer.get_start_iter().copy()
textiter.forward_chars(pos-1)
textiter2 = textiter.copy()
textiter2.forward_char()
self.textbuffer.apply_tag(self.secDelimitTag, textiter, textiter2)
def memberInit(self):
self.textview = Gtk.TextView()
self.textbuffer = self.textview.get_buffer()
self.textview2 = Gtk.TextView()
self.textview2.set_wrap_mode(Gtk.WrapMode.CHAR)
self.textbuffer2 = self.textview2.get_buffer()
self.elfhead_tag = self.textbuffer.create_tag("elf head", foreground = "red")
self.sechead_tag = self.textbuffer.create_tag("section head", foreground = "blue")
self.sechead_tag_ = self.textbuffer.create_tag("sec head head", foreground = "blue", background = "yellow")
self.exec_tag = self.textbuffer.create_tag("exec", background = "blue")
self.proghead_tag = self.textbuffer.create_tag("program head", foreground = "green")
self.proghead_tag_ = self.textbuffer.create_tag("prog head head", foreground = "green", background = "yellow")
self.secDelimitTag = self.textbuffer.create_tag("section delimiter", background = "blue")
self.transfer = PosTransfer(self.textbuffer)
def deployWidget(self):
self.set_default_geometry(400, 400);
self.set_default_size(1000, 400)
self.set_position(Gtk.WindowPosition.CENTER)
self.grid = Gtk.Grid()
self.add(self.grid)
scrolledwindow = Gtk.ScrolledWindow()
scrolledwindow.set_hexpand(True)
scrolledwindow.set_vexpand(True)
self.grid.attach(scrolledwindow, left=0, top=0, width=1, height=1)
scrolledwindow.add(self.textview)
label = Gtk.Label()
label.set_text(" ")
self.grid.attach_next_to(label, scrolledwindow, Gtk.PositionType.RIGHT, width=1, height=1)
scrolledwindow2 = Gtk.ScrolledWindow()
scrolledwindow2.set_hexpand(True)
self.grid.attach_next_to(scrolledwindow2, label, Gtk.PositionType.RIGHT, width=1, height=1)
scrolledwindow2.add(self.textview2)
button = Gtk.Button(label="wrap mode")
# self.grid.attach_next_to(button, scrolledwindow2, Gtk.PositionType.BOTTOM, width=1, height=1)
def __init__(self, filename):
Gtk.Window.__init__(self, title="Inspector")
self.loadmesg()
self.getStrTab();
self.memberInit();
# cmd = "hexdump -C "+ filename + " | cut -c 11- | cut -d' ' -f1-18"
# cmd = "hexdump -C "+ filename
cmd = "xxd -g1 "+ filename
fileContent = subprocess.check_output(cmd, shell=True)
self.textbuffer.set_text(self.decorateText(fileContent.decode("ascii")))
cmd = "readelf -e "+ filename
fileContent = subprocess.check_output(cmd, shell=True)
self.textbuffer2.set_text(fileContent.decode("ascii"))
self.deployWidget()
iterlist = self.transfer.trans(0, self.mesg.elfHeader.ehsize)
for it in iterlist:
# print("apply tag %s" %(it.__str__()))
self.textbuffer.apply_tag(self.elfhead_tag, it[0], it[1])
self.deliminateSections()
# self.showExec(self.mesg.progHeaders, self.exec_tag)
self.apply_headers(self.sechead_tag, self.mesg.secHeaders, self.sechead_tag_)
self.apply_headers(self.proghead_tag, self.mesg.progHeaders, self.proghead_tag_)
def showExec (self, headers, tag):
for header in headers:
if (header.flags & 1):
begin = header.offset
end = begin + header.filesz
iterlist = self.transfer.trans(begin, end)
for it in iterlist:
self.textbuffer.apply_tag(tag, it[0], it[1])
def apply_headers(self, firsttag, headers, tag_):
for header in headers:
# print("deal header: %s in apply" % header.__str__())
iterlist = self.transfer.trans(header.begin, header.end)
first = True
for it in iterlist:
if first == True:
it_head = self.textbuffer.get_start_iter().copy()
it_head.forward_chars(it[0].get_offset())
it_head2 = it_head.copy()
it_head2.forward_chars(2)
self.textbuffer.apply_tag(tag_, it_head, it_head2)
it[0].forward_chars(3)
first = False
self.textbuffer.apply_tag(firsttag, it[0], it[1])
# break
if __name__ == '__main__':
if (len(sys.argv)<2):
filename = "a.out"
else:
filename = sys.argv[1]
win = InspectorMainWin(filename)
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
def compute_last_word(input_chars):
seen_so_far = ""
for current_char in input_chars:
if len(seen_so_far) == 0:
seen_so_far += current_char
else:
if current_char >= seen_so_far[0]:
seen_so_far = current_char + seen_so_far
else:
seen_so_far = seen_so_far + current_char
return seen_so_far
if __name__ == "__main__":
N = input()
result = []
for i in xrange(N):
input_chars = raw_input()
result.append(compute_last_word(input_chars))
for i in xrange(N):
print "Case #"+str(i+1)+": "+result[i]
|
from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
ld = LaunchDescription()
loc_node = Node(
package='crabe_localization',
executable='localizer',
parameters=[{'use_sim_time': True}]
)
cal_node = Node(
package='crabe_localization',
executable='calibration',
parameters=[{'use_sim_time': True}]
)
ld.add_action(loc_node)
ld.add_action(cal_node)
# La fonction retourne le LaunchDescription object, qui contient tous les noeuds ร lancer.
return ld |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import os
# In[2]:
def load_train(train_path):
f = open(train_path, encoding="utf8")
lines = []
for line in f:
if line != '\n':
line = line.strip('\n').split(' ')
lines.append(line)
df= pd.DataFrame(lines, columns = ['word', 'state'])
return df
ans= load_train('SG/train')
# In[3]:
def load_test(test_path):
f = open(test_path, encoding='utf8')
lines = []
for line in f:
if line != '\n':
line = line.strip('\n')
lines.append(line)
df= pd.DataFrame(lines, columns = ['word'])
return df
# In[4]:
def gen_emission_param_table(file_path):
#input_data = load_train('EN/train')
input_data = load_train(file_path)
input_data.columns = ['word', 'state']
unique_word_list = input_data.word.unique()
unique_state_list = input_data.state.unique()
data = {word:(np.zeros(len(unique_state_list))) for word in unique_word_list} # each word as a column, each column length is the number of unique y state, all entry are 0
# https://chrisalbon.com/python/data_wrangling/pandas_list_unique_values_in_column/
emission_count_table = pd.DataFrame(data, index = unique_state_list) # transform the dictionary into colums with index as name of each state(y),
# columns as each word of x, all entries are 0
y_count_dic = {state:0 for state in unique_state_list} # y_count_dic stores Count(y) in a dictionary
emission_param_table = pd.DataFrame(data, index = unique_state_list) # emission_count_table stores all Count(y -> x) in a dataframe
# emission_param_table stores all the emission parameters in a dataframe
print("updating emission_count_table and y_count_dic")
for index, row in input_data.iterrows():
word = row['word']
state = row['state']
#print(index, word, state)
#print(index)
emission_count_table[word][state] += 1
y_count_dic[state]+=1
print("updating emission_param_table")
for index, row in input_data.iterrows():
word = row['word']
state = row['state']
emission_param_table[word][state] = emission_count_table[word][state] / y_count_dic[state]
return emission_param_table, emission_count_table, y_count_dic
# In[5]:
def get_emission_parameter(emission_param_table,x, y):
if x not in emission_param_table.columns:
#print(f"word {x} is not found in the data set")
return
result = emission_param_table[x][y]
return result
# ### Introducing #UNK# to the function
# In[8]:
def gen_emission_param_table_UNK(training_data_path, test_data_path):
training_data = load_train(training_data_path)
unique_word_list_training = training_data.word.unique()
unique_state_list_training = training_data.state.unique()
test_data = load_test(test_data_path)
unique_word_list_test = test_data.word.unique()
unk_list = np.setdiff1d(unique_word_list_test, unique_word_list_training) # return the list of words in test data but not in training data
#non_unk_list_test = np.setdiff1d(unique_word_list_test, unk_list) # return the list of non UNK words in test data
data = {word:(np.zeros(len(unique_state_list_training))) for word in unique_word_list_training}
data["UNK"] = np.zeros(len(unique_state_list_training)) # add a UNK column to the table
emission_count_table = pd.DataFrame(data, index = unique_state_list_training) # transform the dictionary into colums with index as name of each state(y),
# columns as each word of x, all entries are 0
y_count_dic = {state:0 for state in unique_state_list_training} # y_count_dic stores Count(y) in a dictionary
emission_param_table = pd.DataFrame(data, index = unique_state_list_training) # emission_count_table stores all Count(y -> x) in a dataframe
# emission_param_table stores all the emission parameters in a dataframe
print("updating emission_count_table and y_count_dic")
for index, row in training_data.iterrows():
word = row['word']
state = row['state']
#print(index, word, state)
#print(index)
y_count_dic[state]+=1
if word not in unk_list:
emission_count_table[word][state] += 1
print("updating emission_param_table")
k = 0.5
for index, row in training_data.iterrows():
word = row['word']
state = row['state']
#print(index)
if word not in unk_list:
emission_param_table[word][state] = emission_count_table[word][state] / (y_count_dic[state] + k)
for state in unique_state_list_training:
emission_param_table['UNK'][state] = k/(y_count_dic[state] + k) # compute the UNK value for each state y
#print("unl_list is: ",unk_list)
#print("y_count_dic is: ", y_count_dic)
return emission_param_table, unk_list
# In[9]:
def get_emission_parameter_UNK(emission_param_table, unk_list, x, y):
if x in unk_list:
result = emission_param_table['UNK'][y]
#print(f"{x} is tagged as UNK and the this e('UNK'|{y}) is {result}" )
return result
elif x not in emission_param_table.columns:
#print(f"word {x} is not found in the test set")
return
result = emission_param_table[x][y]
return result
# In[10]:
def get_argmax_y(emission_param_table, unk_list, x):
if x in unk_list:
arg_max_y = emission_param_table['UNK'].idxmax()
#print(f"{x} is tagged as UNK and the this arg_max_y e({x}|y) is {arg_max_y}" )
return arg_max_y
arg_max_y = emission_param_table[x].idxmax()
return arg_max_y
# In[11]:
def gen_state(input_path, output_path, emission_param_table, unk_list ):
with open(input_path, "r", encoding="utf8") as f1, open(output_path, 'w', encoding="utf8") as f2:
test_list = f1.readlines()
for word in test_list:
if word == '\n':
#print("new sentence")
f2.write(word)
continue
word = word.strip()
arg_max_y = get_argmax_y(emission_param_table, unk_list, word)
output = f'{word} {arg_max_y}\n'
f2.write(output)
print("Generating output for EN....")
emission_param_table_UNK_test , unk_list = gen_emission_param_table_UNK('EN/train', 'EN/dev.in')
gen_state('EN/dev.in','EN/dev.p2.out', emission_param_table_UNK_test, unk_list)
print("Generating output for CN.....")
emission_param_table_UNK_test_CN , unk_list_CN = gen_emission_param_table_UNK('CN/train', 'CN/dev.in')
gen_state('CN/dev.in','CN/dev.p2.out', emission_param_table_UNK_test_CN, unk_list_CN)
print("Generating output for SG.....")
emission_param_table_UNK_test_SG , unk_list_SG = gen_emission_param_table_UNK('SG/train', 'SG/dev.in')
gen_state('SG/dev.in','SG/dev.p2.out', emission_param_table_UNK_test_SG, unk_list_SG)
print("Evaluation result for EN is: ")
os.system('python EvalScript/evalResult.py EN/dev.out EN/dev.p2.out')
print("Evaluation result for CN is: ")
os.system("python EvalScript/evalResult.py SG/dev.out SG/dev.p2.out")
print("Evaluation result for SG is: ")
os.system("python EvalScript/evalResult.py SG/dev.out SG/dev.p2.out") |
# -*- coding: utf-8 -*-
from .deploy import deploy as pool_deploy
from .deploy import redeploy as pool_redeploy
from .deploy import undeploy as pool_undeploy
__all__ = ('pool_deploy', 'pool_redeploy', 'pool_undeploy')
|
import tornado.web
from customer import Customer
import json
class GetHandler(tornado.web.RequestHandler):
def initialize(self, customers):
self.customers = customers
def get(self):
self.write(self.customers.json_list()) |
#!/usr/bin/env python
import numpy as np
from astropy.table import table
from mpdaf.obj import Cube
import argparse
parser = argparse.ArgumentParser(description='Create a muse whitelight image')
parser.add_argument('-f', metavar='MUSE datacube filename', type=str, help='name of the MUSE to catalog', required=True)
args = parser.parse_args()
print('Calculating median...')
cube = Cube(args.f)
whitelight = cube.median(axis=0)
print('Writing...')
savename = args.f
savename = savename.replace('.fits', '_WHITE.fits')
whitelight.write(savename)
print('Done')
#cube = Cube('J0333-4102_COMBINED_CUBE_MED_FINAL_vac_subtracted.fits')
#whitelight = cube.median(axis=0)
#print(whitelight.shape)
#whitelight.write('J0333-4102_COMBINED_SUBTRACTED_WHITE.fits') |
#============================================================================
#Name : test_configuration.py
#Part of : Helium
#Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
#All rights reserved.
#This component and the accompanying materials are made available
#under the terms of the License "Eclipse Public License v1.0"
#which accompanies this distribution, and is available
#at the URL "http://www.eclipse.org/legal/epl-v10.html".
#
#Initial Contributors:
#Nokia Corporation - initial contribution.
#
#Contributors:
#
#Description:
#===============================================================================
""" test configuration """
# pylint: disable=R0201
import logging
import StringIO
import unittest
import os
import tempfile
import sys
import configuration
_logger = logging.getLogger('test.configuration')
logging.basicConfig(level=logging.INFO)
class NestedConfigurationBuilderTest(unittest.TestCase):
""" Acceptance tests for NestedConfigurationBuilder of configuration.py """
def setUp(self):
""" Setup. """
config_file = open(os.path.join(os.environ['TEST_DATA'], 'data/config_test.cfg.xml'), 'r')
self._builder = configuration.NestedConfigurationBuilder(config_file)
def test_config_parsing(self):
"""A basic configuration can be parsed."""
config_set = self._builder.getConfiguration()
configs = config_set.getConfigurations()
assert len(configs) == 10
for config in configs:
print
for k in config.keys():
print k + ': ' + str(config[k])
assert configs[0]['A'] == 'foo'
assert configs[0]['B'] == 'child -> foo'
assert configs[0]['C'] == 'missing value test ${does_not_exist}'
assert configs[0]['node.content'].strip() == 'This is the value!'
assert configs[1]['A'] == 'foo'
assert configs[1]['B'] == 'parent: foo'
assert configs[1]['C'] == ['one', 'two']
assert 'C' in configs[1]
assert 'Z' not in configs[1]
configs = config_set.getConfigurations('spec.with.type')
assert len(configs) == 1
assert configs[0].type == 'test.type', "config.type must match 'test.type'."
configs = config_set.getConfigurations(type_='test.type')
assert len(configs) == 2
assert configs[0].type == 'test.type', "config.type must match 'test.type'."
configs = config_set.getConfigurations(name='test_spec', type_='test.type')
assert len(configs) == 2
assert configs[0].type == 'test.type', "config.type must match 'test.type'."
def test_append(self):
"""A child value can be appended to a parent value."""
configs = self._builder.getConfigurations()
config = configs[4]
assert config['A'] == ['foo', 'bar']
def test_parent_interpolated_by_child(self):
""" A child value can be interpolated into a parent template. """
configs = self._builder.getConfigurations()
parent_config = configs[5]
child_config = configs[6]
assert parent_config['template'] == 'value -> from parent'
assert child_config['template'] == 'value -> from child'
def test_property_escaping(self):
""" Property values can be escaped in the values of other properties. """
config_text = """
<build>
<config name="test_spec">
<set name="A" value="foo"/>
<set name="B" value="A = ${A}"/>
</config>
</build>"""
builder = configuration.NestedConfigurationBuilder(StringIO.StringIO(config_text))
config = builder.getConfiguration().getConfigurations()[0]
print config['B']
#assert configs[1]['C'] == ['one', 'two']
def test_any_root_element(self):
""" Any root element name can be used. """
config_text = """
<someConfigData>
<config name="test_spec">
<set name="A" value="foo"/>
</config>
</someConfigData>"""
builder = configuration.NestedConfigurationBuilder(StringIO.StringIO(config_text))
config = builder.getConfiguration().getConfigurations()[0]
assert config['A'] == 'foo'
def test_list_templating(self):
""" Testing list templating. """
configs = self._builder.getConfigurations('test_list_config1')
# should return only one config.
assert len(configs) == 1
_logger.debug(configs[0].get_list('include', []))
result = configs[0].get_list('include', [])
result.sort()
print result
assert len(result) == 3
assert result == [u'bar1_config1', u'bar2_config1', u'foo_config1']
def test_list_templating2(self):
""" Testing list templating 2. """
configs = self._builder.getConfigurations('test_list_config2')
# should return only one config.
assert len(configs) == 1
_logger.debug(configs[0].get_list('include', []))
result = configs[0].get_list('include', [])
result.sort()
print result
assert len(result) == 3
assert result == [u'bar1_config2', u'bar2_config2', u'foo_config2']
def test_append_list(self):
""" Testing if append handles the list correctly..."""
config_text = """
<build>
<config name="prebuild_zip" abstract="true">
<set name="exclude" value="**/_ccmwaid.inf" />
<set name="exclude" value="build/**" />
<set name="exclude" value="config/**" />
<set name="exclude" value="ncp_sw/**" />
<set name="exclude" value="ppd_sw/**" />
<set name="exclude" value="psw/**" />
<set name="exclude" value="tools/**" />
<set name="include" value="foo/**" />
<config>
<set name="root.dir" value="X:/rootdir" />
<set name="name" value="PF5250_200832_internal_code" />
<set name="include" value="**/internal/**" />
<set name="release.filters" value="tsrc" />
<set name="release.default" value="false" />
</config>
<config>
<set name="root.dir" value="X:/rootdir" />
<set name="name" value="PF5250_200832_doc" />
<append name="include" value="**/doc/**" />
<set name="include" value="**/docs/**" />
<append name="exclude" value="**/internal/**" /> <!-- set changed to append -->
<set name="release.filters" value="tsrc" />
<set name="release.default" value="false" />
</config>
</config>
</build>
"""
builder = configuration.NestedConfigurationBuilder(StringIO.StringIO(config_text))
configs = builder.getConfigurations()
config = configs[1]
print config['exclude']
print config['include']
exclude_match = [u'**/_ccmwaid.inf', u'build/**', u'config/**',
u'ncp_sw/**', u'ppd_sw/**', u'psw/**', u'tools/**',
u'**/internal/**']
exclude_result = config['exclude']
exclude_match.sort()
exclude_result.sort()
assert len(config['include']) == 3
assert exclude_result == exclude_match
config = configs[0]
assert config['include'] == '**/internal/**'
assert len(config['exclude']) == 7
def test_writeToXML(self):
""" To write the configurations into XML file. """
config_text = """
<build>
<config name="test_spec">
<set name="A" value="foo"/>
<set name="B" value="A = ${A}"/>
<config name="test_spec_1">
<set name="A" value="foo"/>
<set name="B" value="A = ${A}"/>
</config>
<config name="test_spec_2">
<set name="A" value="foo"/>
<set name="B" value="A = ${A}"/>
<config name="test_spec_3">
<set name="A" value="foo"/>
<set name="B" value="A = ${A}"/>
</config>
</config>
</config>
</build>"""
builder = configuration.NestedConfigurationBuilder(StringIO.StringIO(config_text))
configSet = builder.getConfiguration()
configs = configSet.getConfigurations('test_spec_1')
(_, outputFile) = tempfile.mkstemp('.tmp', 'zip_test')
builder.writeToXML(outputFile, configs, 'test_spec_1')
builder = configuration.NestedConfigurationBuilder(open(outputFile), 'r')
configSet = builder.getConfiguration()
configs = configSet.getConfigurations('test_spec_1')
config = configs[0]
assert config['A'] == 'foo'
builder = configuration.NestedConfigurationBuilder(StringIO.StringIO(config_text))
configSet = builder.getConfiguration()
configs = configSet.getConfigurations('test_spec')
(_, outputFile) = tempfile.mkstemp('.tmp', 'zip_test')
builder.writeToXML(outputFile, configs )
builder = configuration.NestedConfigurationBuilder(open(outputFile), 'r')
configSet = builder.getConfiguration()
configs = configSet.getConfigurations('test_spec')
config = configs[0]
assert config['B'] == 'A = foo'
class PropertiesConfigurationTest(unittest.TestCase):
""" Test plain text configuration files. """
def test_text_config(self):
""" Basic text properties can be read. """
config = configuration.PropertiesConfiguration(open(os.path.join(os.environ['TEST_DATA'], 'data/ant_config_test.txt'), 'r'))
assert config['text.a'] == 'text.value.A'
assert config['text.b'] == 'text.value.B'
def test_text_config_store(self):
""" Basic text properties can be read. """
config = configuration.PropertiesConfiguration(open(os.path.join(os.environ['TEST_DATA'], 'data/ant_config_test.txt'), 'r'))
config['foo'] = "bar"
(f_d, filename) = tempfile.mkstemp()
f_file = os.fdopen(f_d, 'w')
config.store(f_file)
config = configuration.PropertiesConfiguration(open(filename))
assert config['text.a'] == 'text.value.A'
assert config['text.b'] == 'text.value.B'
assert config['foo'] == 'bar'
|
from flask import Flask, render_template, request, jsonify, redirect, url_for, session
import jwt
from datetime import datetime, timedelta
import hashlib
import json
import re
from functools import wraps
from flask_socketio import SocketIO, emit, send
app = Flask(__name__)
app.config['SESSION_COOKIE_HTTPONLY'] = False
app.secret_key = b'SPARTA'
JWT_SECRET_KEY = 'SPARTA'
app.config['SECRET_KEY'] = JWT_SECRET_KEY
socketio = SocketIO(app)
#########################################################
# Decorators
#########################################################
def login_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
token = request.cookies.get("mytoken")
try:
payload = jwt.decode(token, JWT_SECRET_KEY, algorithms=['HS256'])
if "userId" in payload:
return func(*args, **kwargs)
else:
return redirect(url_for("home", msg="๋ก๊ทธ์ธ ๋จผ์ ํด์ฃผ์ธ์!"))
except (jwt.ExpiredSignatureError, jwt.exceptions.DecodeError):
return redirect(url_for("home"))
return wrapper
@app.route('/')
def home():
token = request.cookies.get("mytoken")
if token is not None:
payload = jwt.decode(token, JWT_SECRET_KEY, algorithms=['HS256'])
if "userId" in payload:
return redirect(url_for("goods"))
return render_template("index.html")
@app.route('/register')
def register():
return render_template("register.html")
@app.route('/goods')
@login_required
def goods():
return render_template("goods.html")
@app.route('/detail')
@login_required
def detail():
return render_template("detail.html")
@app.route('/cart')
@login_required
def cart():
return render_template("cart.html")
@app.route('/order')
@login_required
def order():
return render_template("order.html")
### API ###
def find_one(array, fil):
for row in array:
if all(row.get(k)==v for k, v in fil.items()): return row
def find_all(array, fil):
result = []
for row in array:
if all(row.get(k)==v for k, v in fil.items()): result.append(row)
return result
@app.route('/api/goods')
@login_required
def get_goods():
category = request.args.get("category")
goods = json.load(open("./static/goods.json", encoding="utf-8"))
if category is not None:
goods = [r for r in goods if r["category"] == category]
return jsonify({"result": "success", "goods": goods})
@app.route('/api/goods/<int:goods_id>')
@login_required
def get_detail(goods_id):
print(goods_id)
result = find_one(json.load(open("./static/goods.json", encoding="utf-8")), {"goodsId":goods_id})
if result is not None:
return jsonify({"result": "success", "detail": result})
else:
return "item not found", 404
@app.route('/api/auth', methods=["POST"])
def sign_in():
email = request.form["email"]
password = request.form["password"]
password_hash = hashlib.sha256(password.encode('utf-8')).hexdigest()
print(email, password_hash)
users = json.load(open("./static/users.json", encoding="utf-8"))
user = find_one(users, {"email": email, "password": password_hash})
if user is not None:
print("signing in...")
payload = {
'userId': user["userId"],
'exp': datetime.utcnow() + timedelta(seconds=60 * 60 * 24) # ๋ก๊ทธ์ธ 24์๊ฐ ์ ์ง
}
token = jwt.encode(payload, JWT_SECRET_KEY, algorithm='HS256')
return jsonify({'result': 'success', 'token': token, 'nickname': user["nickname"]})
else:
return jsonify({'result': 'fail', 'msg': '์์ด๋/๋น๋ฐ๋ฒํธ๊ฐ ์ผ์นํ์ง ์์ต๋๋ค.'})
@app.route('/api/users', methods=["POST"])
def sign_up():
nickname = request.form["nickname"]
email = request.form["email"]
password = request.form["password"]
confirm_password = request.form["confirmPassword"]
users = json.load(open("./static/users.json", encoding="utf-8"))
if not re.search(r'^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$', email):
return jsonify({"result": "fail", "msg": "์ด๋ฉ์ผ ํ์์ ํ์ธํด์ฃผ์ธ์."})
if find_one(users, {"email": email}):
return jsonify({"result": "fail", "msg": "์ด๋ฏธ ์กด์ฌํ๋ ์ด๋ฉ์ผ์
๋๋ค."})
if password != confirm_password:
return jsonify({"result": "fail", "msg": "๋น๋ฐ๋ฒํธ๊ฐ ์ผ์นํ์ง ์์ต๋๋ค."})
password_hash = hashlib.sha256(password.encode('utf-8')).hexdigest()
doc = {
"userId": len(users)+1,
"email": email,
"nickname": nickname,
"password": password_hash
}
print(doc) # insert_one()
return jsonify({"result": "success", "msg": "ํ์๊ฐ์
์ฑ๊ณต!"})
@app.route('/api/cart', methods=["GET"])
@login_required
def get_cart():
token = request.cookies.get("mytoken")
payload = jwt.decode(token, JWT_SECRET_KEY, algorithms=['HS256'])
user_id = payload["userId"]
print(user_id)
carts = json.load(open("./static/carts.json", encoding="utf-8"))
my_cart = find_all(carts, {"userId": user_id})
goods = json.load(open("./static/goods.json", encoding="utf-8"))
for item in my_cart:
good = find_one(goods, {"goodsId": item["goodsId"]})
item.update(good)
return jsonify({"result": "success", "cart": my_cart})
@app.route('/api/goods/<int:goods_id>/cart', methods=["POST", "PATCH", "DELETE"])
@login_required
def change_cart(goods_id):
token = request.cookies.get("mytoken")
payload = jwt.decode(token, JWT_SECRET_KEY, algorithms=['HS256'])
user_id = payload["userId"]
if request.method == 'DELETE':
print(user_id, goods_id) # delete_one()
carts = json.load(open("./static/carts.json", encoding="utf-8"))
if find_one(carts, {"userId": user_id, "goodsId": goods_id}):
return jsonify({"result": "success", "msg": "์ฅ๋ฐ๊ตฌ๋๋ฅผ ์์ ํ์ต๋๋ค."})
else:
return "์ฅ๋ฐ๊ตฌ๋์ ์กด์ฌํ์ง ์๋ ์ํ์
๋๋ค.", 400
quantity = int(request.form["quantity"])
if request.method == 'POST':
print(user_id, goods_id, quantity) # update_one()
return jsonify({"result": "success", "msg": "์ฅ๋ฐ๊ตฌ๋์ ๋ด์์ต๋๋ค."})
if request.method == 'PATCH':
print(user_id, goods_id, quantity) # update_one()
return jsonify({"result": "success", "msg": "์ฅ๋ฐ๊ตฌ๋๋ฅผ ์์ ํ์ต๋๋ค."})
else:
return jsonify({"result": "fail", "msg": "์๋ชป๋ ๋ฉ์๋์
๋๋ค."})
@socketio.on('newOrder')
def new_order(data):
print('received json: ' + str(data))
cart = json.loads(data)
print(data)
token = request.cookies.get("mytoken")
payload = jwt.decode(token, JWT_SECRET_KEY, algorithms=['HS256'])
user_id = payload["userId"]
print(user_id)
users = json.load(open("./static/users.json", encoding="utf-8"))
user = find_one(users, {"userId":user_id})
for item in cart:
print("sending...")
emit("orderSomething",
{"userName": user["nickname"], "goodsName": item["goodsName"]},
# namespace="/goods",
broadcast=True)
if __name__ == '__main__':
# app.run('0.0.0.0', port=5000, debug=True)
socketio.run(app, host="0.0.0.0", port=5000, debug=True)
|
# Cracking the Code interview
# Interview question 1.7
# zero row,col of MxN matrix
def find_zero_row(matrix):
return [i for i, row in enumerate(matrix) if not all(row)]
def find_zero_column(matrix):
# this will transpose the matrix and send it to find rows
return find_zero_row(zip(*matrix))
def zero(matrix):
rows = find_zero_row(matrix)
cols = find_zero_column(matrix)
# Zero out rows
for row in rows:
rows[row] = [0] * len(matrix[0])
# Zero Cols, access column needed to zero
for col in cols:
# access row, and zero out column
for row in matrix:
row[col] = 0
return matrix
|
import tensorflow as tf
from utils.dataset import Dataset
from utils.model import NNModel
params = {
"epochs" : 100,
"batch_size" : 2,
"image_shape" : (224, 224, 3),
"classes" : {"face" : [1, 6, 11, 12, 13], "brow" : [2, 3], "eye" : [2, 3, 4, 5], "lip" : [7, 9], "non-makeup" : [0, 4, 5, 8, 10],
"hair" : [10]},
"logs_path" : 'logs\\01\\',
"pretrained_model_path" : None,#'logs\\pretrained\\0100.ckpt',
"train_dataset_path" : {'source' : r'.\\data\\source\\Train', 'reference' : r'.\\data\\reference\\Train'},
"train_dataset_size" : [3450, 2447],
"test_dataset_path" : [r'.\\data\\source\\Test', r'.\\data\\reference\\Test'],
}
if __name__ == "__main__":
train_dataset = Dataset(params['train_dataset_path'], image_shape = params['image_shape'], classes = params['classes'], batch_size = params['batch_size'], dataset_size = params['train_dataset_size'], isTraining = True)
model = NNModel(input_shape = params['image_shape'], logs_path = params['logs_path'], batch_size = params['batch_size'], classes = params['classes'])
model.train(train_dataset, params["epochs"], pretrained_model_path = params['pretrained_model_path'])
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from graphene_django.views import GraphQLView
from .schema import schema
urlpatterns = [
path('admin/', admin.site.urls),
path('graphql/', GraphQLView.as_view(graphiql=True, schema=schema)),
path('', include('online_school.urls')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
import sys
import torch
import torch.optim as optim
import aikit.utils
class State:
def __init__(self, steps):
self.steps = steps
self.step = 0
# This is an inspired design from Horovod's `DistributedOptimizer`
def Optimizer(optimizer, steps):
'''
Wraps any valid PyTorch optimizer with Gradient Accumulation
'''
class _GradientAccumulationOptimizer(optim.Optimizer):
def __init__(self, steps, params):
self.__class__.__optimizer__.__init__(self, params) # TODO: Defaults -> ? (Add Defaults...!)
self.state['aikit'] = State(steps)
aikit.utils.log.debug('Wrapping \'%s\' PyTorch Optimizer with Gradient Accumulation of %d steps', optimizer.__class__.__name__, steps)
def step(self, closure=None):
loss = None
if self.aikit.step % self.aikit.steps == self.aikit.steps - 1:
loss = self.__class__.__optimizer__.step(self, closure)
else:
if closure is not None:
with torch.enable_grad():
loss = closure() # TODO: Make it better ....!
self.aikit.step += 1
return loss
def zero_grad(self):
if self.aikit.step % self.aikit.steps == 0:
self.__class__.__optimizer__.zero_grad(self)
@property
def aikit(self):
return self.state['aikit']
# the main idea is to dynamically create a class that has all the functionality of the passed optimizer
# (this is done by inheriting it) while overriding `step()` and `zero_grad()` to accumulate the gradients
# and actually assign and zero them once in a few steps
d = dict(_GradientAccumulationOptimizer.__dict__)
d['__optimizer__'] = optimizer.__class__
cls = type(
optimizer.__class__.__name__,
(optimizer.__class__,),
d
)
return cls(steps, optimizer.param_groups)
# declare a GA version of builtin optimizers
def _optimizer(optimizer):
setattr(
sys.modules[__name__],
optimizer,
lambda steps, *args, **kwargs: Optimizer(getattr(torch.optim, optimizer)(*args, **kwargs), steps)
)
[_optimizer(optimizer) for optimizer in ['Adadelta', 'Adagrad', 'Adam', 'AdamW', 'Adamax', 'ASGD', 'SGD', 'Rprop', 'RMSprop']] |
# https://learn.adafruit.com/circuitpython-made-easy-on-circuit-playground-express/play-tone
# https://learn.adafruit.com/circuitpython-made-easy-on-circuit-playground-express/buttons
from adafruit_circuitplayground.express import cpx
while True:
if cpx.button_a:
cpx.play_tone(329.63, 1)
cpx.play_tone(261.63, 1)
elif cpx.button_b:
cpx.play_tone(440.00, 1)
cpx.play_tone(415.30, 1)
|
import csv
import pprint
def get_bar_party_data():
"""this function reads from a csv file and converts the data into a list of dictionaries.
each item in the list is a dictionary of a specific location and the number of complaint calls
it received in 2016"""
bar_list = []
with open('bar_locations.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
bar_dict = {'location_type': row[0],
'zip_code': row[1],
'city': row[2],
'borough': row[3],
'latitude': row[4],
'longitude': row[5],
'num_calls': row[6]}
bar_list.append(bar_dict)
return bar_list
def print_data(data):
for entry in data:
print(entry)
pprint.pprint(entry)
def get_most_noisy_city_and_borough(data):
""" fill in the Nones for the dictionary below using the bar party data """
noisiest_city_and_borough = {'city': None, 'borough': None, 'num_city_calls': None, 'num_borough_calls': None}
# write code here to find the noisiest city and borough and their respective metrics
noise_city_calls = {}
noise_borugh_calls = {}
for list in data:
if list['city'] == 'City':
continue
if list['city'] in noise_city_calls.keys():
noise_city_calls[list['city']] += int(list['num_calls'])
else:
noise_city_calls[list['city']] = int(list['num_calls'])
if list['borough'] in noise_borugh_calls.keys():
noise_borugh_calls[list['borough']] += int(list['num_calls'])
else:
noise_borugh_calls[list['borough']] = int(list['num_calls'])
city = ''
borough = ''
num = 0
for k,v in noise_city_calls.items():
if v > num:
city = k
num = v
noisiest_city_and_borough['city'] = city
noisiest_city_and_borough['num_city_calls'] = num
num = 0
for k,v in noise_borugh_calls.items():
if v > num:
borough = k
num = v
noisiest_city_and_borough['borough'] = borough
noisiest_city_and_borough['num_borough_calls'] = num
return noisiest_city_and_borough
def get_quietest_city_and_borough(data):
""" fill in the Nones for the dictionary below using the bar party data """
quietest_city_and_borough = {'city': None, 'borough': None, 'num_city_calls': None, 'num_borough_calls': None}
# write code here to find the quietest city and borough and their respective metrics
noise_city_calls = {}
noise_borugh_calls = {}
for list in data:
if list['city'] == 'City':
continue
if list['city'] in noise_city_calls.keys():
noise_city_calls[list['city']] += int(list['num_calls'])
else:
noise_city_calls[list['city']] = int(list['num_calls'])
if list['borough'] in noise_borugh_calls.keys():
noise_borugh_calls[list['borough']] += int(list['num_calls'])
else:
noise_borugh_calls[list['borough']] = int(list['num_calls'])
city = ''
borough = ''
num = float('inf')
for k,v in noise_city_calls.items():
if v < num:
city = k
num = v
quietest_city_and_borough['city'] = city
quietest_city_and_borough['num_city_calls'] = num
num = float('inf')
for k,v in noise_borugh_calls.items():
if v < num:
borough = k
num = v
quietest_city_and_borough['borough'] = borough
quietest_city_and_borough['num_borough_calls'] = num
return quietest_city_and_borough
if __name__ == '__main__':
bar_data = get_bar_party_data()
# uncomment the line below to see what the data looks like
# print_data(bar_data)
noisy_metrics = get_most_noisy_city_and_borough(bar_data)
quiet_metrics = get_quietest_city_and_borough(bar_data)
print('Noisy Metrics: {}'.format(noisy_metrics))
print('Quiet Metrics: {}'.format(quiet_metrics))
|
"""
Collection of decorators to make our life a little easier
Simple Decorator is based on a recipe from here:
https://wiki.python.org/moin/PythonDecoratorLibrary
"""
### INCLUDES ###
import time
### CONSTANTS ###
## Multiple Attempt Settings ##
ATTEMPT_NUMBER = 10
ATTEMPT_TIMEOUT = 10 # seconds
### FUNCTIONS ###
def simple_decorator(decorator):
"""
This decorator can be used to turn simple functions
into well-behaved decorators, so long as the decorators
are fairly simple. If a decorator expects a function and
returns a function (no descriptors), and if it doesn't
modify function attributes or docstring, then it is
eligible to use this. Simply apply @simple_decorator to
your decorator and it will automatically preserve the
docstring and function attributes of functions to which
it is applied.
"""
def new_decorator(f):
g = decorator(f)
g.__name__ = f.__name__
g.__doc__ = f.__doc__
g.__dict__.update(f.__dict__)
return g
# Now a few lines needed to make simple_decorator itself
# be a well-behaved decorator.
new_decorator.__name__ = decorator.__name__
new_decorator.__doc__ = decorator.__doc__
new_decorator.__dict__.update(decorator.__dict__)
return new_decorator
@simple_decorator
def multiple_attempts(func):
""" Decorator to perform multiple attempts """
def _multiple_attempts(*args, **kwargs):
kwargs['total_attempts'] = 0
kwargs['success'] = False
kwargs['output'] = None
while not kwargs['success'] and kwargs['total_attempts'] < ATTEMPT_NUMBER:
kwargs['total_attempts'] += 1
kwargs = func(*args, **kwargs)
if kwargs['success'] or kwargs['total_attempts'] >= ATTEMPT_NUMBER:
break
else:
time.sleep(ATTEMPT_TIMEOUT)
return kwargs['output']
return _multiple_attempts
@simple_decorator
def time_it(func):
""" Decorator to time function execution """
def _time_it(*args, **kwargs):
start_time = time.time()
output = func(*args, **kwargs)
end_time = time.time()
execution_time = end_time - start_time
return output, execution_time
return _time_it
if __name__ == '__main__':
""" Test Unit """
import random
@multiple_attempts
def test_function(**kwargs):
"""
Little function that tests above decorator
Also, gives an example how to use above decorator
"""
kwargs['success'] = False
print('Attempt # {}'.format(kwargs['total_attempts']))
random_number = random.random()
success_margin = 0.75
if random_number >= success_margin:
kwargs['success'] = True
print('Random number: {}'.format(random_number))
print('Success Margin: {}'.format(success_margin))
kwargs['output'] = random_number
return kwargs
test_results = test_function()
print('Test Results: {}'.format(test_results))
|
#!/usr/bin/python
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
import time
from time import sleep
from time import ctime
import random
import re
from selenium.webdriver.support.select import Select
from STxlsdata import get_data
sc_url = 'https://svt6lb01a.rtp.raleigh.ibm.com/sales/salesconnect/#'
gpp_url = 'https://www-sso.toronto.ca.ibm.com/partnerworld/gpp/atlanta/bwg3/us/en'
driver = webdriver.Firefox()
timeout = 20
driver.set_page_load_timeout(180)
def switchWin(windowTitle):
windows = driver.window_handles
for window in windows:
driver.switch_to_window(window)
if driver.title == windowTitle:
break
def is_element_exist_by_css(element):
try:
driver.find_element_by_css_selector(element)
return True
except:
print 'element '+element+' is not found'
return False
def is_element_exist_by_xpath(element):
try:
driver.find_element_by_xpath(element)
return True
except:
print 'element '+element+' is not found'
return False
def is_element_visible(element):
try:
the_element = EC.visibility_of_element_located(element)
assert the_element(driver)
return True
except:
return False
def is_alert_exist():
try:
driver.switch_to_alert()
print 'alert is existing'
return True
except:
print 'alert is not existing'
return False
def select_option(item, selection):
eles = driver.find_elements_by_css_selector('div>span[data-fieldname]')
op_page_items = []
for ele in eles: # get all valid items
op_page_items.append(ele.get_attribute('data-fieldname'))
if item in op_page_items:
css_selector_item = 'span[data-fieldname="'+item+'"]'
driver.find_element_by_css_selector(
css_selector_item).location_once_scrolled_into_view
driver.find_element_by_css_selector(css_selector_item).click()
options = driver.find_elements_by_css_selector('li>div[role="option"]')
sleep(3)
options[selection].click()
else:
print 'Selected item is not on the page'
def wait_element(element, time):
i = 0
while not is_element_exist_by_xpath(element):
sleep(1)
i = i+1
if i > time:
print 'Wait time out'
break
# Login...
driver.get(sc_url)
driver.implicitly_wait(20)
driver.find_element_by_id('desktop').send_keys('seller_stg_006@cn.ibm.com')
driver.find_element_by_name('password').send_keys('zuo123nico')
driver.find_element_by_name('password').send_keys(Keys.ENTER)
# Indicate if this is a new user
sleep(5)
driver.find_element_by_id('arrow').click() # Close the bottom banner
if is_element_exist_by_css('h3'):
if driver.find_element_by_css_selector("h3").text == "User Locale Settings":
driver.find_element_by_class_name('btn-primary').click()
sleep(2)
if driver.find_element_by_css_selector('h3').text == "Setup Complete":
driver.find_element_by_css_selector(
'a[class="btn btn-primary"]').click()
sleep(10)
driver.find_element_by_xpath('//ul/li[@id="createList"]').click()
sleep(1)
driver.find_element_by_xpath('//span/a[@data-module="Opportunities"]').click()
sleep(1)
driver.find_element_by_css_selector('textarea[name="description"]').send_keys(
'Test Case '+ctime()) # input Opportunity Description
# active Sales Stage dropdown list
driver.find_element_by_xpath('//div/span/div/a/span').click()
# Select 04 in Sales Stage
driver.find_element_by_xpath("//ul/li[4]/div[@role='option']").click()
driver.find_element_by_xpath(
"//div[2][@class='row-fluid panel_left']/div/span/span/div/a/span[1]").click() # Click on Contact
driver.find_element_by_xpath("//ul[2]/li/div").click() # Click to search more
sleep(5)
driver.find_element_by_css_selector(
"a[name='create']").click() # click to create new contact
sleep(1)
# Swith to Contact setup page
switchWin(u'Create Contact \xbb SalesConnect')
# Setup new Contactor
driver.find_element_by_id("first_name").send_keys("Test001")
driver.find_element_by_id("last_name").send_keys(
"Test002") # Setup new Contactor
driver.find_element_by_id("phone_work").send_keys("999888777")
driver.find_element_by_id("phone_mobile").send_keys("999")
emailaddr = str(random.randint(100, 999))+'@test.com'
driver.find_element_by_id("Contacts0emailAddress0").send_keys(emailaddr)
driver.find_element_by_id("btn_account_name").click() # Open client selectr
# Switch to client selection page
switchWin(u'SalesConnect')
# Setup a related client with CMR number
driver.find_element_by_id("name_advanced").send_keys(
'739061') # Search for a client
driver.find_element_by_id('field_name_advanced').click()
driver.find_element_by_id('field_name_advanced').send_keys(Keys.DOWN)
driver.find_element_by_id('field_name_advanced').send_keys(Keys.DOWN)
#--driver.find_element_by_css_selector('option[label="CMR number"]').click()
checkBox_MyClients = driver.find_element_by_id(
'current_user_only_advanced') # Set searching range
if checkBox_MyClients.is_selected():
checkBox_MyClients.click()
sleep(2)
driver.find_element_by_id("search_form_submit").click()
sleep(4)
driver.find_element_by_css_selector("a[href='javascript:void(0);']").click()
sleep(2)
if is_alert_exist():
driver.switch_to_alert().accept()
# Switch back to customer setup page
switchWin(u'Create Contact \xbb SalesConnect')
# Contact setup complete
driver.find_element_by_id("SAVE_WITH_SERVER_VALIDATION").click()
sleep(5)
# Indicate if there was existing similar contactor
# this indication checking is not working fine in CI some time if the
# function is not running
if is_element_exist_by_css('table[id="contentTable"]'):
# ignore the samilar contactor and create new
driver.find_element_by_css_selector(
'input[value="Confirm create"]').click()
# Switch back to OP setup page
switchWin(u'Home \xbb SalesConnect')
i = 0
while not is_element_exist_by_xpath("//tbody/tr[1]/td[2][@data-type='fullname']/span/div"):
sleep(10)
i = i+1
if i > 10:
print 'Wait Contacts TimeOut'
break
print 'Contact displayed'
# Select the newly setup contactor by filtering the email addr
emailList = driver.find_elements_by_css_selector(
"div[class='ellipsis_inline'][rel='tooltip']") # All emails
emailListText = []
for i in emailList:
x = i.text
emailListText.append(x)
radioBtnList = driver.find_elements_by_css_selector(
"input[type='radio'][class='selection'][name='Contacts_select']") # All radio button
contactList = dict(zip(emailListText, radioBtnList))
contactList.get(emailaddr).click()
sleep(3)
# Client Name (Verification - should be auto selected)
autoClientName = driver.find_element_by_xpath(
"//div/span[@data-fieldname='account_name']/span/div/a/span[1]/div").text
i = 0
# Wait until the client is auto selected and will timeout after 10s.
while autoClientName == u'Required':
sleep(1)
i = i + 1
if i > 10:
print 'Wait Client Name TimeOut'
break
print 'Client name is auto selected'
# Source
driver.find_element_by_xpath(
"//div/span[@data-fieldname='lead_source']/span/div/a/span[1]").click()
driver.find_element_by_xpath(
'/html/body/div[8]/div/input').send_keys('Business Partner')
driver.find_element_by_xpath(
'/html/body/div[8]/div/input').send_keys(Keys.ENTER)
# OP code
driver.find_element_by_xpath(
"//div/span[@data-fieldname='solution_codes_c']/span/div/ul/li/input").click()
driver.find_element_by_xpath(
"//div/span[@data-fieldname='solution_codes_c']/span/div/ul/li/input").send_keys('10')
driver.find_element_by_xpath(
"//div/span[@data-fieldname='solution_codes_c']/span/div/ul/li/input").send_keys(Keys.ENTER)
# Line Item1
# Offering
driver.find_element_by_xpath(
"//div/span[@data-fieldname='level_search']/span/div/a/span[1]").click()
driver.find_element_by_xpath("/html/body/div[11]/div/input").send_keys('BFB60')
sleep(3)
driver.find_element_by_xpath(
"/html/body/div[11]/div/input").send_keys(Keys.ENTER)
sleep(3)
# Competitor
driver.find_element_by_xpath(
"//div/span[@data-fieldname='competitor']/span/div/ul/li/input").send_keys('Accept 360')
driver.find_element_by_xpath(
"//div/span[@data-fieldname='competitor']/span/div/ul/li/input").send_keys(Keys.ENTER)
# Amount
driver.find_element_by_css_selector(
"input[aria-label='Amount'][name ='revenue_amount']").send_keys('32100.98')
select_option('roadmap_status', 1) # Select 'Stretch' in Roadmap Status
sleep(1)
select_option('probability', 4) # Select '75%' in Probability
sleep(1)
# select_option('stg_fulfill_type',2) #Select 'Web' in Fullfillment Type
# Submit and Save the Opportunity
driver.find_element_by_css_selector('a[name="save_button"]').click()
print 'save successfully'
wait_element("//div/div[@class='alert alert-success alert-block']/a", 10)
opty_id = driver.find_element_by_xpath(
"//div/div[@class='alert alert-success alert-block']/a").text
print opty_id # Opporunity ID
opty_url = sc_url+'Opportunities/'+opty_id
driver.get(opty_url)
driver.find_element_by_xpath(
"//span/a[@name='edit_button']").click() # click on Eidt button
driver.find_element_by_xpath(
"//div/span[1][@data-fieldname='assigned_user_name']/span/div/a/span[1]/div").click() # Click on OO
search_more_buttons = driver.find_elements_by_xpath(
"//ul[2]/li/div[@class='select2-result-label']") # Click on Search more
for each in search_more_buttons:
try:
each.click()
except:
pass
module_filters = driver.find_elements_by_xpath(
"//div/div[1]/span[1][@class='table-cell']/div[1]/a/span[1]/span")
for each in module_filters:
try:
each.click()
except:
pass
driver.find_element_by_xpath(
"//ul[@role='listbox']/li[2]/div/div").click() # click on bp
search_boxes = driver.find_elements_by_xpath(
"//div[3]/div[1]/div[1]/div/div[1]/div/input[@class='search-name']")
for each in search_boxes:
try:
each.send_keys('PRM2005')
except:
pass
driver.find_element_by_xpath(
"//table/tbody/tr/td[1]/span/input[@class='selection'][@type='radio']").click() # click on radio button
if is_alert_exist():
driver.find_element_by_xpath(
"//div[2]/a[2][@data-action='confirm']").click()
switchWin(opty_id+u' \xbb Opportunities \xbb SugarCRM')
driver.find_element_by_xpath("//a[@name='save_button']").click()
wait_element("//div/ul/li/a[@data-tabid='tab_opportunity_overview']", 5)
sleep(10)
#Login GPP sys-------
driver.get(gpp_url)
driver.find_element_by_id('username').send_keys('BCCSME40@us.ibm.com')
driver.find_element_by_id('password').send_keys('pass1word')
driver.find_element_by_id('password').send_keys(Keys.ENTER)
sleep(5)
i = 0
while i < 10:
if is_element_exist_by_xpath("/html/body"):
driver.refresh()
i = i+1
else:
break
sleep(5)
driver.switch_to.frame(0)
driver.switch_to.frame(1) # 2nd navigate bar
# Click Opportunity
driver.find_element_by_xpath(
"//div[3]/table/tbody/tr/td[3]/a").click()
driver.switch_to.default_content()
driver.switch_to.frame(0)
driver.switch_to.frame(2)
show_options = driver.find_element_by_xpath(
'/html/body/table[1]/tbody/tr/td[2]/form/table/tbody/tr/td[1]/select') # select all opportunity
Select(show_options).select_by_index(1)
driver.switch_to.default_content()
driver.switch_to.frame(0)
driver.switch_to.frame(3)
driver.switch_to.frame(0)
driver.find_element_by_xpath(
"/html/body/div/form/span/div/table[1]/tbody/tr/td[7]/span/nobr/a").click()
driver.switch_to.default_content()
driver.switch_to.frame(0)
driver.switch_to.frame(3)
driver.switch_to.frame(0)
driver.find_element_by_xpath(
'/html/body/div/form/div/table/tbody/tr/td/span/div/div/table/tbody/tr[3]/td[3]/div/nobr/input').send_keys('KW-TMSU3C7') # input box
driver.find_element_by_xpath(
'/html/body/div/form/div/table/tbody/tr/td/span/table[2]/tbody/tr/td[5]/span/nobr/a').click() # Click Go
driver.switch_to.default_content()
driver.switch_to.frame(0)
driver.switch_to.frame(3)
driver.switch_to.frame(0)
gpp_opty = driver.find_element_by_xpath(
'/html/body/div/form/span/div/table[2]/tbody/tr/td/table/tbody/tr[2]/td[2]/a').text
#Assert the flow from SC to GPP
if gpp_opty == opty_id:
print opty_id+'flows to GPP successfully!'
else:
print opty_id+'is failed to flow to GPP!'
#END
|
import os, re, time, yaml
import paramiko
import gspread
import xmltodict
import xlrd
from datetime import datetime
from oauth2client.service_account import ServiceAccountCredentials
from .constants import GROUPS, CISCO_USERNAME, CISCO_PASSWORD, Q_ROUTER, NOT_PHY_INTS, STANDARD_PORT_NAMES
def not_pingable(ip):
response = os.system("ping -c 1 " + ip)
return response
def get_standard_port(port):
m = re.search("\d", port)
if m:
port_name = port[:m.start()]
port_number = port[m.start():]
s_port_name = STANDARD_PORT_NAMES.get(port_name)
return s_port_name + port_number if s_port_name else port
return port
def down_interfaces(interfaces):
l = []
for interface in interfaces:
if not interface['descrip'] and interface['port'][:2] not in NOT_PHY_INTS and 'down' in interface['status']:
l.append(interface)
return l
def port_slot_card_pos(platform, port):
if platform == 'iosxr':
slot, card = port.split('/')[1:3]
return [int(slot)+1, int(card)]
elif platform == 'ios':
num = port[2:].split('/')[0]
return map(int, [num, 0])
else:
return map(int, port[3:].split('/'))
def get_group(ip):
octet = ip.split('.')[1]
return GROUPS.get(octet, 'unknown')
def add_to_inventory(ip):
with open('geomap/inventory/hosts.yaml') as f:
inventory = yaml.safe_load(f)
create_host(inventory, ip, ip)
with open('geomap/inventory/hosts.yaml', "w") as f:
yaml.dump(inventory, f)
def init_host(ip, groups=[]):
return {
'data': {
'host_name': '',
'ospf': {
'neighbor': [],
'stub': False,
'stub_neighbor': []},
},
'groups': groups,
'hostname': ip,
}
def create_host(inv, ip, market_ip):
xe = re.compile("IOS-XE")
ios = re.compile("Cisco IOS Software")
xr = re.compile("Cisco IOS XR Software")
nxos = re.compile("NXOS: version")
ssh = open_ssh_session(ip, CISCO_USERNAME, CISCO_PASSWORD, 22)
if ssh and ssh != 2:
chan = ssh.invoke_shell()
output = send_command(chan, 'sh ver')
xe = xe.search(output)
ios = ios.search(output)
xr = xr.search(output)
nxos = nxos.search(output)
if xe:
inv[ip] = init_host(ip, groups=['cisco-xe', get_group(market_ip)])
elif ios:
inv[ip] = init_host(ip, groups=['cisco-ios', get_group(market_ip)])
elif xr:
inv[ip] = init_host(ip, groups=['cisco-xr', get_group(market_ip)])
elif nxos:
inv[ip] = init_host(ip, groups=['nxos', get_group(market_ip)])
else:
inv[ip] = init_host(ip, groups=['sg350', get_group(market_ip)])
inv[ip]['data']['tacacs'] = True
ssh.close()
elif ssh == 2:
inv[ip] = init_host(ip, groups=['sg350', get_group(market_ip)])
inv[ip]['data']['tacacs'] = False
else:
inv[ip] = init_host(ip, groups=['unknown', get_group(market_ip)])
inv[ip]['data']['tacacs'] = False
if inv.get(ip) and ip != market_ip:
inv[ip]['data']['router'] = market_ip
inv[ip]['groups'].append('switch')
def get_oui_for(vendor):
ouis = []
with open('geomap/vendorMacs.xml', encoding='latin1') as fd:
doc = xmltodict.parse(fd.read())
for key, data in doc.items():
for key in data:
if key == 'VendorMapping':
for item in data[key]:
if item['@vendor_name'] == vendor:
oui = item['@mac_prefix'].replace(':', '').lower()
oui = oui[:4] + '.' + oui[4:]
ouis.append(oui)
return ouis
def open_ssh_session(hostname, username, password, port=22, channel=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(hostname, username=username, password=password, port=port, sock=channel, allow_agent=False, look_for_keys=False)
except paramiko.ssh_exception.BadAuthenticationType:
return 2
except:
return False
return ssh
def get_output(channel):
chunk = ''
time.sleep(2)
while channel.recv_ready():
chunk += channel.recv(9999).decode('ISO-8859-1')
time.sleep(1)
return chunk
def send_command(chan, command):
get_output(chan)
chan.send('terminal length 0\n')
get_output(chan)
chan.send(command + '\n')
return get_output(chan)
def get_loopbacks(ip=Q_ROUTER, username=CISCO_USERNAME, password=CISCO_PASSWORD):
ssh = open_ssh_session(ip, username, password, 22)
if ssh:
chan = ssh.invoke_shell()
output = send_command(chan, 'sh ip route | i /32')
p = re.compile("10\.(63|68|84|86|88|92|93|94|98|110|199)\.255\.\d{1,3}\/32")
return [pe.group()[:-3] for pe in p.finditer(output)]
return []
def get_radar():
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('Radar-8d27863f5496.json', scope)
print(os.getcwd())
client = gspread.authorize(creds)
return client.open('My Radar- Engineering Division').sheet1
def user_directory_path(instance, filename):
# file will be uploaded to MEDIA_ROOT/user_<id>/<filename>
return '{0}/{1}'.format(instance.property.slug, filename)
def excel_number_to_date(number):
if not number or not isinstance(number, float):
return None
return datetime(*xlrd.xldate_as_tuple(number, 0))
def get_subnet(units):
if units > 1:
for bit in range(1, 33):
if 2**bit-3 > units:
return 32-bit
return 29
def get_subnets(units):
pass
|
# enter initial amount of investment
c_init = int(input("Enter initial amount of investment:"))
r_rate = float(input("Enter the yearly rate of interest:"))
t_yrs = int(input("Enter number of years till maturation:"))
n_times = int(input("Enter number of times the interest is compounded:"))
# =====================================================================================
# Formula to be implemented
# p = c(1 + r/n)^tn round to 2dp
# =====================================================================================
# addition done after division
add_part = 1 + (r_rate/n_times)
# The expression is then raised to [tn] - (t_yrs*n_times)
final_expr = add_part ** (t_yrs*n_times)
# result is multiplied with the initial value
final_value = c_init * final_expr
# Final value rounded using the round function to 2 dp
rounded_value = round(final_value, 2)
# Rounded final value of investment is printed
print(rounded_value)
|
"""
BSP 28 - Monteur
Dickbauer Yanick 1030489, Moser Patrick 1114954, Perner Manuel 0633155
WS 2016
"""
from lib import random_exp, user_input
NR_MACHINES = 4
FREQUENCY = 10 # simulation steps per hour
SIM_DURATION = 1000 # hour of simulation
STATE_MECHANIC_IDLE = 'drinking coffee'
STATE_MECHANIC_REPAIRING = 'repairing'
STATE_MACHINE_WORKING = 'working'
STATE_MACHINE_WAITING = 'waiting for repair'
STATE_MACHINE_IN_REPAIR = 'in repair'
PRINT_EVERY_STEP = False
SHOW_PLOT = True
def create_failure_free_time():
# lambda is 1/6 --> mean is 6
mean = 6
time = int(random_exp(mean) * FREQUENCY)
if time == 0:
#print('warning, time would be 0 increase simulation frequency')
time = 1
return time
def create_repair_time():
# mean is 1
mean = 1
while True:
time = int(random_exp(mean) * FREQUENCY)
if time != 0:
break
#print('warning, time would be 0 increase simulation frequency')
#time = 1
return time
def working_machines(machines):
for m, machine in enumerate(machines):
if machine['state'] == STATE_MACHINE_WORKING:
yield m, machines[m]
return []
def evaluate_iteration(t, machines, mechanics, queue):
nr_machines_in_repair = 0
_print_step('Iteration: {}'.format(t))
for m, machine in enumerate(machines):
_print_step(' Machine {}: state: {}, rem_time: {}'.format(
m+1, machine['state'], machine['rem_time']))
_print_step()
for m, mechanic in enumerate(mechanics):
if mechanic['state'] == STATE_MECHANIC_REPAIRING:
machine_nr = machines.index(mechanic['machine'])
work_str = ', works on machine: {}'.format(machine_nr+1)
nr_machines_in_repair += 1
else:
work_str = ''
_print_step(' Mechanic {}: state: {}{}'.format(
m+1, mechanic['state'], work_str))
_print_step()
_print_step(' Machines waiting for repair: {}'.format(
[machines.index(m)+1 for m in queue]))
_print_step('\n')
return {
'nr_machines_working' : len(list(working_machines(machines))),
'nr_machines_in_repair' : nr_machines_in_repair,
'nr_machines_waiting' : len(queue)
}
def _print_step(str=''):
if PRINT_EVERY_STEP:
print(str)
def simulate(nr_mechanics):
machines = [
{'state' : STATE_MACHINE_WORKING, 'rem_time' : create_failure_free_time(),
'id' : i}
for i in range(NR_MACHINES)]
mechanics = [{'state': STATE_MECHANIC_IDLE, 'machine' : None}
for i in range(nr_mechanics)]
queue = []
it_data = []
for t in range(FREQUENCY * SIM_DURATION):
# the machine part
for m, machine in working_machines(machines):
if machine['rem_time'] == 0:
machine['state'] = STATE_MACHINE_WAITING
queue.append(machine) # maybe a bug: state_waiting?
# mechanics part:
for m, mechanic in enumerate(mechanics):
if mechanic['state'] == STATE_MECHANIC_REPAIRING:
machine = mechanic['machine']
# maybe she fixed the machine:
if machine['rem_time'] == 0:
machine['rem_time'] = create_failure_free_time()
machine['state'] = STATE_MACHINE_WORKING
mechanic['state'] = STATE_MECHANIC_IDLE
if mechanic['state'] == STATE_MECHANIC_IDLE:
# check if machine is in queue
if len(queue) > 0:
machine = queue.pop(0)
machine['state'] = STATE_MACHINE_IN_REPAIR
machine['rem_time'] = create_repair_time()
mechanic['state'] = STATE_MECHANIC_REPAIRING
mechanic['machine'] = machine
# print actual state:
it_data.append(evaluate_iteration(t, machines, mechanics, queue))
for m, machine in enumerate(machines):
machine['rem_time'] -= 1
# evaluation
n = len(it_data)
#utilization_mechanics = sum([it['nr_machines_in_repair']/nr_mechanics for it in it_data])
#avg_utilization_mecha
#print(utilization_mechanics)
return it_data
def costs(it_data, nr_mechanics, cost_downtime, labor_costs):
#mechanics costs:
print('Sum of labor costs with {} mechnics:'.format(nr_mechanics),SIM_DURATION * nr_mechanics * labor_costs)
downtime = sum(NR_MACHINES - it['nr_machines_working'] for it in it_data) / FREQUENCY
print('Sum of downtime costs with {} mechanics:'.format(nr_mechanics) ,downtime * cost_downtime)
total_costs = (SIM_DURATION * nr_mechanics * labor_costs) + (downtime * cost_downtime)
print('Total costs with {} mechanics:'.format(nr_mechanics), total_costs)
def plot(sim_data):
import matplotlib.pyplot as plt
import seaborn
n = len(sim_data[0]['data'])
x_vals = range(n)
for it_nr, sim in enumerate(sim_data):
nr_mechanics = sim['nr_mechanics']
data = sim['data']
workings = [it['nr_machines_working'] for it in data]
cum_utilization_w = [sum(workings[0:i]) / (i * NR_MACHINES) for i in range(1,n)]
plt.plot(x_vals[1:], cum_utilization_w, label='Avg. Machine Utilization sim {}'.format(nr_mechanics))
repairs = [it['nr_machines_in_repair'] for it in data]
cum_utilization_r = [sum(repairs[0:i]) / (i * nr_mechanics) for i in range(1,n)]
plt.plot(x_vals[1:], cum_utilization_r, label='Avg. Mechanics Utilization sim {}'.format(nr_mechanics))
plt.legend()
plt.show()
def main():
cost_downtime, labor_costs = user_input([('Define the downtime costs per hour', int, 1000), ('Define the labor costs per hour of the mechanics', int, 50)])
data_sim_1 = simulate(1)
data_sim_2 = simulate(2)
costs(data_sim_1, 1, cost_downtime, labor_costs)
costs(data_sim_2, 2, cost_downtime, labor_costs)
if SHOW_PLOT:
plot([{'nr_mechanics' : 1, 'data' : data_sim_1}, {'nr_mechanics': 2, 'data': data_sim_2}])
main() |
"""
Backwards is no different than forwards but my brain is backwards so it makes
sense.
"""
def longest_subsequence(l, diff):
state = {}
max_length = 0
for n in l[::-1]:
target = n + diff
state[n] = state.get(target, 0) + 1
if state[n] > max_length:
max_length = state[n]
return max_length
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'QtSocket_main.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(688, 664)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.chatBox = QtWidgets.QTextEdit(self.centralwidget)
self.chatBox.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.chatBox.sizePolicy().hasHeightForWidth())
self.chatBox.setSizePolicy(sizePolicy)
self.chatBox.setMinimumSize(QtCore.QSize(640, 440))
font = QtGui.QFont()
font.setFamily("็ญ็บฟ")
font.setPointSize(12)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.chatBox.setFont(font)
self.chatBox.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.chatBox.setLineWrapMode(QtWidgets.QTextEdit.WidgetWidth)
self.chatBox.setReadOnly(True)
self.chatBox.setObjectName("chatBox")
self.verticalLayout_2.addWidget(self.chatBox)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.sendBox = QtWidgets.QPlainTextEdit(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sendBox.sizePolicy().hasHeightForWidth())
self.sendBox.setSizePolicy(sizePolicy)
self.sendBox.setMaximumSize(QtCore.QSize(16777215, 108))
font = QtGui.QFont()
font.setFamily("็ญ็บฟ")
font.setPointSize(12)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.sendBox.setFont(font)
self.sendBox.setObjectName("sendBox")
self.horizontalLayout_2.addWidget(self.sendBox)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_3.setSpacing(3)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.sendButton = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sendButton.sizePolicy().hasHeightForWidth())
self.sendButton.setSizePolicy(sizePolicy)
self.sendButton.setMaximumSize(QtCore.QSize(16777215, 69))
font = QtGui.QFont()
font.setFamily("Consolas")
font.setPointSize(16)
font.setBold(False)
font.setWeight(50)
font.setKerning(True)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.sendButton.setFont(font)
self.sendButton.setFlat(False)
self.sendButton.setObjectName("sendButton")
self.verticalLayout_3.addWidget(self.sendButton)
self.configButton = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.configButton.sizePolicy().hasHeightForWidth())
self.configButton.setSizePolicy(sizePolicy)
self.configButton.setMaximumSize(QtCore.QSize(16777215, 34))
font = QtGui.QFont()
font.setFamily("Consolas")
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.configButton.setFont(font)
self.configButton.setObjectName("configButton")
self.verticalLayout_3.addWidget(self.configButton)
self.verticalLayout_3.setStretch(0, 2)
self.verticalLayout_3.setStretch(1, 1)
self.horizontalLayout_2.addLayout(self.verticalLayout_3)
self.horizontalLayout_2.setStretch(0, 100)
self.horizontalLayout_2.setStretch(1, 1)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.verticalLayout_2.setStretch(0, 100)
self.verticalLayout_2.setStretch(1, 1)
self.verticalLayout.addLayout(self.verticalLayout_2)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Chatroom"))
self.sendButton.setText(_translate("MainWindow", "Send"))
self.configButton.setText(_translate("MainWindow", "Settings"))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 23.10.2009
@author: The Zero
'''
import sys
if len(sys.argv)<2:
print( 'Usage: %s <File> ' % sys.argv[0] )
sys.exit(1)
try:
stream = open(sys.argv[1], encoding = 'utf-8')
except Exception:
print( 'Unable to open source file.')
sys.exit(2)
try:
run = False if sys.argv[2] == 'norun' else True
except Exception:
run = True
lines = []
for line in stream:
lines.append(line.strip(' \r\n\t'))
stream.close()
#'''
import compiler
cmp = compiler.Compiler()
source = cmp.compile("".join(lines))
f = open(sys.argv[1] + '.bfc', 'w')
f.write(source)
f.close()
if run:
import pybf
compiled = pybf.Pybf()
compiled.run()
'''
import brainfuck
intr = brainfuck.Brainfuck("".join(lines), bits=16)
intr.chr()
'''
|
"""
Function:pre-process data
Author:Will
Date:2019-1-15
Version:1.0
"""
import pandas as pd
from sklearn.preprocessing import Imputer, LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.model_selection import train_test_split
# read data from csv
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, : -1].values
Y = dataset.iloc[:, 3].values
# Using sklearn.preprocessing.Imputer to handle missing data
imputer = Imputer(missing_values='NaN', strategy='mean', axis=0)
imputer = imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
# Using sklearn.preprocessing encode categorical data
labelencoder_X = LabelEncoder()
X[:, 0] = labelencoder_X.fit_transform(X[:, 0])
# Create a dummy variable
onehotencoder = OneHotEncoder(categorical_features=[0])
X = onehotencoder.fit_transform(X).toarray()
labelencoder_Y = LabelEncoder()
Y = labelencoder_Y.fit_transform(Y)
# Using sklearn.model_selection.train_test_split to split the datasets into training sets and test sets
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
# Using sklearn.preprocessing.StandardScalar to scale feature
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.fit_transform(X_test)
|
,buyers,prices
0,Carson Busses,$29.95
1,Earl E. Byrd,$8.37
2,Patty Cakes,$15.26
3,Derri Anne Connecticut,$19.25
4,Moe Dess,$19.25
5,Leda Doggslife,$13.99
6,Dan Druff,$31.57
7,Al Fresco,$8.49
8,Ido Hoe,$14.47
9,Howie Kisses,$15.86
10,Len Lease,$11.11
11,Phil Meup,$15.98
12,Ira Pent,$16.27
13,Ben D. Rules,$7.50
14,Ave Sectomy,$50.85
15,Gary Shattire,$14.26
16,Bobbi Soks,$5.68
17,Sheila Takya,$15.00
18,Rose Tattoo,$114.07
19,Moe Tell,$10.09
|
from surprise import (
KNNBaseline, Reader, Dataset, dump
)
# First, train the algortihm to compute the similarities between items
data = Dataset.load_from_file('ratings.csv', reader=Reader(sep=',', rating_scale=(1, 10)))
trainset = data.build_full_trainset()
sim_options = {'name': 'pearson_baseline', 'user_based': False}
algo = KNNBaseline(sim_options=sim_options)
algo.train(trainset)
'''
>>> algo.predict('425', '0338564')
Prediction(uid='425', iid='0338564', r_ui=None, est=8.8268148604314725, details={u'actual_k': 40, u'was_impossible': False})
>>> algo.predict('732', '1219827')
Prediction(uid='732', iid='1219827', r_ui=None, est=1.3944813261280586, details={u'actual_k': 8, u'was_impossible': False})
'''
dump.dump('knn.algo', algo=algo) |
import os
import cv2
import numpy as np
from PIL import Image
recognizer = cv2.createLBPHFaceRecognizer()
path = 'dataset'
strvar = 'dataset\\'
def getimgid():
global path
imagePaths = [os.path.join(path, f) for f in os.listdir(path)]
faceSamples = []
ids = []
for imagePath in imagePaths:
pilImage = Image.open(imagePath)
imageNp = np.array(pilImage, 'uint8')
id = int(imagePath.strip(strvar).strip('.jpg').split('.')[0])
faceSamples.append(imageNp)
ids.append(id)
cv2.imshow('training', imageNp)
cv2.waitKey(10)
return np.array(ids), faceSamples
ids, faces = getimgid()
recognizer.train(faces, ids)
recognizer.save('trainingDataset.yml')
cv2.destroyAllWindows()
|
# https://atcoder.jp/contests/abc098/tasks/abc098_c
N = int(input())
S = input()
sumsW = [0]
sumsE = [0]
cnt = 0
for i in range(N):
if S[i] == 'W':
cnt += 1
sumsW.append(cnt)
cnt = 0
for j in range(N-1, -1, -1):
if S[j] == 'E':
cnt += 1
sumsE.append(cnt)
sumsE.reverse()
ans = 3 * 10**5
for i in range(N+1):
ans = min(sumsW[i] + sumsE[i], ans)
print(ans)
|
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
bcrypt = Bcrypt()
db = SQLAlchemy()
DEFAULT_IMAGE_URL = "https://www.edmundsgovtech.com/wp-content/uploads/2020/01/default-picture_0_0.png"
def connect_db(app):
"""Connect db to flask app"""
db.app = app
db.init_app(app)
class Follows(db.Model):
"""User to user connection based on who is following who"""
__tablename__ = 'follows'
user_being_followed = db.Column(db.Integer, db.ForeignKey(
'users.id', ondelete="cascade"), primary_key=True)
user_following = db.Column(db.Integer, db.ForeignKey(
'users.id', ondelete="cascade"), primary_key=True)
def __repr__(self):
return f"<Follow Instance | Being Followed: {self.user_being_followed} | Follower: {self.user_following}>"
class User(db.Model):
"""User information"""
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), nullable=False, unique=True)
image_url = db.Column(db.Text, default=DEFAULT_IMAGE_URL)
bio = db.Column(db.Text, nullable=True)
password = db.Column(db.Text, nullable=False)
favorite_character = db.Column(db.Text, nullable=True)
followers = db.relationship("User",
secondary="follows",
primaryjoin=(
Follows.user_being_followed == id),
secondaryjoin=(Follows.user_following == id)
)
following = db.relationship("User",
secondary="follows",
primaryjoin=(Follows.user_following == id),
secondaryjoin=(
Follows.user_being_followed == id)
)
lists = db.relationship('List', cascade="all, delete", backref="user")
def __repr__(self):
"""Representation of instances"""
return f"<User Instance | ID: {self.id} | Username: {self.username}>"
def following_ids(self):
"""Returns a list of user ids that this user is following"""
ids = []
for usr in self.following:
ids.append(usr.id)
return ids
def public_lists(self):
"""Returns all public lists"""
return List.query.filter(List.user_id == self.id).filter(
List.is_private == False).all()
@classmethod
def signup(cls, username, password, image_url=DEFAULT_IMAGE_URL):
"""Creates user with hashed password"""
hashed_pwd = bcrypt.generate_password_hash(password).decode('UTF-8')
user = User(
username=username,
password=hashed_pwd,
image_url=image_url
)
db.session.add(user)
return user
@classmethod
def authenticate(cls, username, password):
"""Authenticates user with saved password hash.
Returns False if user/password combo don't match"""
user = cls.query.filter_by(username=username).first()
if user and bcrypt.check_password_hash(user.password, password):
return user
return False
class List(db.Model):
"""Lists created by users"""
__tablename__ = "lists"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(50), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey(
'users.id', ondelete="CASCADE"), nullable=False)
is_ranked = db.Column(db.Boolean, nullable=False)
is_private = db.Column(db.Boolean, nullable=False)
characters = db.relationship('Character', secondary="lists_characters")
def __repr__(self):
"""Representation of instance"""
return f"<List Instance | List ID: {self.id} | Name: {self.title} | User ID: {self.user_id}>"
class Character(db.Model):
"""Characters to be added to lists"""
__tablename__ = "characters"
id = db.Column(db.Integer, primary_key=True)
guid = db.Column(db.Text, nullable=True)
name = db.Column(db.Text, nullable=False)
game = db.Column(db.Text, nullable=False)
image_url = db.Column(db.Text, nullable=True)
def __repr__(self):
"""Representation of instance"""
return f"<Character Instance | ID: {self.id} | Name: {self.name} | Game: {self.game}>"
class ListCharacter(db.Model):
"""Connection between List and Character"""
__tablename__ = "lists_characters"
id = db.Column(db.Integer, primary_key=True)
character_id = db.Column(db.Integer, db.ForeignKey(
'characters.id', ondelete="CASCADE"), nullable=False)
list_id = db.Column(db.Integer, db.ForeignKey(
'lists.id', ondelete="CASCADE"), nullable=False)
rank = db.Column(db.Integer, nullable=True)
characters = db.relationship('Character', backref="lists_characters")
def __repr__(self):
"""Represention of instance"""
return f"<ListCharacter Instance | ID: {self.id} | Character: {self.character_id} ({self.characters.name}) | List: {self.list_id} | Rank in list: {self.rank}>"
|
import cv2
import numpy as np
def abs_sobel_thresh(img,orient='x',thresh_min=0,thresh_max=255):
#่ฝฌๆขๆ็ฐๅบฆๅพ
gray_img=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
#ๅ่ไปฃ็ ๆนๆณ
#ๅจxๅyๆนๅไธๅบ็จsobelๅฝๆฐ
if orient=='x':
abs_sobel=np.absolute(cv2.Sobel(gray_img,cv2.CV_64F,1,0))
if orient=='y':
abs_sobel=np.absolute(cv2.Sobel(gray_img,cv2.CV_64F,0,1))
#่ฝฌๆขไธบ8ไฝๆดๆฐ
sobel_img=np.uint8(255*abs_sobel/np.max(abs_sobel))
# #ๆนๆณไบ
# if orient=='x':
# sobel_img=cv2.Sobel(gray_img,-1,1,0)
# if orient=='y':
# #ๆนๆณไธ
# if orient=='x':
# sobel_img=cv2.Sobel(gray_img,cv2.CV_64F,1,0)
# if orient=='y':
# sobel_img=cv2.Sobel(gray_img,cv2.CV_64F,0,1)
# #ๅฉ็จๅ
็ฝฎๅฝๆฐ่ฟ่ก่ฝฌๆข
# sobel_img=cv2.convertScaleAbs(sobel_img)
#ๅๅปบไธไธช็ฉบๆฐ็ป
binary_img=np.zeros_like(sobel_img)
#้ๅผ็ดขๅผ
binary_img[(sobel_img>=thresh_min)&(sobel_img<=thresh_max)]=255 #ๅ่ไปฃ็ ไธญ็ป็ๆฏ1,ๆ่งๅพ่ฟ้ๅบ่ฏฅๆฏ255
return binary_img
def mag_thresh(img,sobel_kernel=3,mag_thresh=(0,255)):
"""
:param img:
:param sobel_kernel:
:param meg_thresh:
:return:
"""
gray_img=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
#sobelๆขฏๅบฆๆฃๆต
sobelx_img=cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely_img=cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
#่ฎก็ฎๆขฏๅบฆๅน
ๅผ
#ๅ่ไปฃ็ ๆนๆณ
# grad_mag=np.sqrt(sobelx_img**2+sobely_img**2)
#ไฝฟ็จๅ
็ฝฎไปฃ็
grad_mag=cv2.magnitude(sobelx_img,sobely_img) #็ญไบๅ่ไธญ็ไปฃ็
#ๅฝไธๅๅฐ0-255
#ๅ่ไปฃ็ ๆนๆณ
scale_factor=np.max(grad_mag)/255
grad_mag=(grad_mag/scale_factor).astype(np.uint8)
# #ๆนๆณไบ
# grad_mag=cv2.convertScaleAbs(grad_mag)
#ๅๅปบไธไธช็ฉบๆฐ็ป
binary_mag=np.zeros_like(grad_mag)
binary_mag[(grad_mag>=mag_thresh[0])&(grad_mag<=mag_thresh[1])]=255 #ๅ่ไปฃ็ ไธญ็ป็ๆฏ1๏ผๆ่งๅพๅบ่ฏฅๆฏ255
return binary_mag
def dir_thresh(img,sobel_kernel=3,dir_thresh=(0,np.pi/2)):
"""
:param img:
:param sobel_kernel:
:param dir_thresh:
:return:
"""
gray_img=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
#sobelๆขฏๅบฆๆฃๆต
sobelx_img=cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely_img=cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
#่ฎก็ฎๆขฏๅบฆๆนๅ
#ๅ่ไปฃ็
#ไธบไปไน่ฆ็จ็ปๅฏนๅผ,้ฝๆฑ็ปๅฏนๅผ๏ผไธๅฐฑๆฒกๆ่ฑก้็ๆฆๅฟตไบๅ,(่ฒไผผ่ฟ้ไธๆฏไธบไบ็็่ฎก็ฎๆนๅ๏ผ่ๆฏๅฐฑๆฏไฟ็ๆๆนๅ็ๅ็ด )
grad_dir=np.arctan2(np.absolute(sobelx_img),np.absolute(sobelx_img))
#ๅ
็ฝฎไปฃ็
# grad_dir=cv2.phase(sobelx_img,sobely_img)
#ๅๅปบไธไธช็ฉบๆฐ็ป
binary_dir=np.zeros_like(grad_dir)
binary_dir[(grad_dir>=dir_thresh[0])&(grad_dir<=dir_thresh[1])]=255 #ๅๆไปฃ็ ไธญ็ป็ๆฏ1๏ผๆ่ง็ๅบ่ฏฅๆฏ255,ๅฅฝๅ1ๅ255้ฝๅฏไปฅ(ไฝๆฏไธๅคชไธๆ ท)
return binary_dir
def hls_select(img,channel='s',hls_thresh=(0,255)):
"""
:param img:
:param thresh:
:return:
"""
hls_img=cv2.cvtColor(img,cv2.COLOR_RGB2HLS)
if channel=='h':
channel=hls_img[:,:,0]
elif channel=='l':
channel=hls_img[:,:,1]
else:
channel=hls_img[:,:,2]
binary_channel=np.zeros_like(channel)
binary_channel[(channel>=hls_thresh[0])&(channel<=hls_thresh[1])]=255
return binary_channel
def lab_select(img,lab_thresh=(0,255)):
"""
:param img:
:param lab_thresh:
:return:
"""
lab_img = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)
b_channel = lab_img[:,:,2]
binary_lab = np.zeros_like(b_channel)
binary_lab[(b_channel > lab_thresh[0]) & (b_channel <= lab_thresh[1])] = 255
return binary_lab
def luv_select(img,luv_thresh=(0,255)):
"""
:param img:
:param luv_thresh:
:return:
"""
luv_img = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
l_channel = luv_img[:,:,0]
binary_luv = np.zeros_like(l_channel)
binary_luv[(l_channel > luv_thresh[0]) & (l_channel <= luv_thresh[1])] = 255
return binary_luv
def thresholding(img):
"""
:param img:
:return:
"""
x_threshold=abs_sobel_thresh(img,orient='x',thresh_min=90,thresh_max=255)
mag_threshold=mag_thresh(img,sobel_kernel=3,mag_thresh=(30,170))
dir_threshold=dir_thresh(img,sobel_kernel=3,dir_thresh=(0.7,1.3))
hls_threshold=hls_select(img,hls_thresh=(160,255)) #้้้กบๅบhls
lab_threshold=lab_select(img,lab_thresh=(155,210))
luv_threshold=luv_select(img,luv_thresh=(225,255))
#็ปผๅ้ๅผ
thresh_img=np.zeros_like(x_threshold)
thresh_img[((x_threshold == 255) & (mag_threshold == 255)) | ((dir_threshold == 255) & (hls_threshold == 255)) | (lab_threshold == 255) | (luv_threshold == 255)]=255
return thresh_img |
# Example 14.1 Testing for Unit Roots
# Augmented Dickey-Fuller Test for Unit Roots
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller
data = pd.read_csv("http://web.pdx.edu/~crkl/ceR/data/usyc87.txt",index_col='YEAR',sep='\s+',nrows=66)
y = data['Y']
c = data['C']
# select one varible to test
x = c
# first difference of x
# x = c.diff()[1:]
def adf_output(res,reg_result=False):
r = res[-1]
output = pd.Series([res[0],res[1],r.usedlag,r.nobs],
index=['Test Statistic','p-value','Lags Used','Number of Observations Used'])
for key,value in res[2].items():
output['Critical Value (%s)' % key] = value
if reg_result:
print(r.resols.summary())
print(output,'\n')
# dickey-fuller unit-root test
# test for the unit roots of the vaiable (null hypothesis)
# (1) which model? III, II, I (ct, c, nc)
# (2) how many lags augmented?
adf_test3 = adfuller(x,regression='ct',autolag='AIC',store=True) # trend and drift
adf_output(adf_test3,reg_result=True)
adf_test2 = adfuller(x,regression='c',autolag='AIC',store=True) # drift
adf_output(adf_test2,reg_result=True)
adf_test1 = adfuller(x,regression='nc',autolag='AIC',store=True) # none
adf_output(adf_test1,reg_result=True)
# in addition, we can test the quardratic trend model (ctt)
adf_test4 = adfuller(x,regression='ctt',autolag='AIC',store=True) # trend and drift
adf_output(adf_test4,reg_result=True)
|
from obiektowosc.Human import Human
class Woman(Human):
#Nadpisywanie konstruktora klasy bazowej
def __init__(self):
#Slowko super pozwala nam odwolac sie do metody z klasy bazowej (w tym wypadku konstruktora)
super().__init__("woman", 55)
self.przedstawSie()
def makeUp(self, duty=False):
if duty:
print ("Wait an hour more, I need do my makeUp")
else:
print("Ok, I'm ready to go")
def przedstawSie(self):
print("I'm woman and my age shouldn't interest you")
def przedstawSieMilo(self):
self.przedstawSie()
super().przedstawSie()
|
from flask import Flask
from model import Question, connect_to_db, db
import sys
import random
app = Flask(__name__)
# Required to use Flask sessions and the debug toolbar
app.secret_key = "ABC"
def distribute_questions():
"""Print out num of questions based on argv"""
try:
if sys.argv[1]:
num_questions = int(sys.argv[1])
strand_1 = Question.query.filter_by(strand_id=1).all()
strand_2 = Question.query.filter_by(strand_id=2).all()
if num_questions % 2 == 0:
final_questions = random.sample(strand_1, num_questions/2) + random.sample(strand_2, num_questions/2)
print ', '.join([str(i) for i in final_questions])
except IndexError:
print 'Please enter a number to receive questions'
if __name__ == "__main__":
# We have to set debug=True here, since it has to be True at the
# point that we invoke the DebugToolbarExtension
app.debug = True
# make sure templates, etc. are not cached in debug mode
app.jinja_env.auto_reload = app.debug
connect_to_db(app)
distribute_questions()
#do not need to run server on localhost for this assignment
# app.run(port=5000, host='0.0.0.0')
|
#!/usr/bin/env python2.7
"""Installer for NuTermiNuX."""
import argparse
import datetime
import errno
import distutils.version
import glob
import logging
import os
import platform
import shutil
import subprocess
import sys
BREW = "brew"
BREWDIR = "/home/linuxbrew/.linuxbrew/bin"
CENTOS = "CentOS"
MIN_VERSION = "6.9"
NUTERMINUX = "NuTermiNuX"
PIP = "pip"
HOME = os.environ["HOME"]
PYLINTRC = os.path.join(HOME, ".pylintrc")
VIMRC = os.path.join(HOME, ".vimrc")
VIMDIR = os.path.join(HOME, ".vim")
BASHRC = os.path.join(HOME, ".bashrc")
ZSHRC = os.path.join(HOME, ".zshrc")
PROFILE = os.path.join(HOME, ".profile")
BPROFILE = os.path.join(HOME, ".bash_profile")
ZPROFILE = os.path.join(HOME, ".zprofile")
logging.basicConfig(
format="%(asctime)s %(name)s %(lineno)d %(levelname)-8s %(message)s",
level=logging.DEBUG)
LOGGER = logging.getLogger(__name__)
def query_yes_no(question, default="no"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write("%s%s" % (question, prompt))
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
def run_cmd(cmd, err=None):
"""Run the shell command and return err in case of a failure."""
LOGGER.debug("Running %s", cmd)
if err is None:
err = "'%s' failed" % cmd
ret = subprocess.call(cmd, shell=True)
if ret:
LOGGER.error("%s exited with %d code", cmd, ret)
return err
return None
def restore(rpath):
"""Restore the latest backup."""
LOGGER.debug("Restoring %s", rpath)
if os.path.islink(rpath):
os.unlink(rpath)
LOGGER.debug("Restoring the backup of %s", rpath)
bkps = sorted(glob.glob("%s*.bkp" % rpath))
if bkps:
LOGGER.debug("Backups: %s", bkps)
shutil.move(os.path.join(bkps[-1]), rpath)
def uninstall():
"""Restore old configuration."""
LOGGER.info("Uninstalling %s", NUTERMINUX)
LOGGER.info("Uninstalling linux%s", BREW)
uninstall_brew = ('ruby -e "$(curl -fsSL https://raw.githubusercontent.com/'
'Homebrew/install/master/uninstall)"')
_ = run_cmd(uninstall_brew, "Uninstalling brew failed")
restore(VIMRC)
restore(VIMDIR)
restore(BASHRC)
restore(ZSHRC)
restore(PYLINTRC)
if os.path.isfile(PROFILE):
remove_profile = "sed -i '/linuxbrew/d' %s" % PROFILE
_ = run_cmd(remove_profile)
if os.path.isfile(BPROFILE):
remove_bprofile = "sed -i '/linuxbrew/d' %s" % BPROFILE
_ = run_cmd(remove_bprofile)
if os.path.isfile(ZPROFILE):
remove_zprofile = "sed -i '/linuxbrew/d' %s" % ZPROFILE
_ = run_cmd(remove_zprofile)
for lb_dir in [HOME, "/home/linuxbrew"]:
lb_path = os.path.join(lb_dir, ".linuxbrew")
if os.path.isdir(lb_path):
shutil.rmtree(lb_path)
def force_symlink(dest_link, to_be_linked):
"""ln -sf file1 file2."""
LOGGER.debug("Symlinking %s to %s", to_be_linked, dest_link)
try:
os.symlink(dest_link, to_be_linked)
except OSError as exc: # to_be_linked exists.
if exc.errno == errno.EEXIST:
try:
os.remove(to_be_linked)
except OSError as exc:
if exc.errno == errno.EPERM:
shutil.rmtree(to_be_linked)
os.symlink(dest_link, to_be_linked)
def setup_nuterminux_config():
"""Setup NuTermiNuX configuration."""
LOGGER.info("Setting up %s configuration", NUTERMINUX)
dotfiles_dir = os.path.join(os.path.dirname(os.path.dirname(
os.path.realpath(__file__))), "dotfiles")
force_symlink(os.path.join(dotfiles_dir, "vim", "%s_vimrc"
% NUTERMINUX.lower()), VIMRC)
force_symlink(os.path.join(dotfiles_dir, "vim", "dotvim"), VIMDIR)
# force_symlink.symlink(os.path.join(dotfiles_dir, "bash", "%s_bashrc"
# % NUTERMINUX.lower()), BASHRC)
# force_symlink(os.path.join(dotfiles_dir, "zsh", "%s_zshrc"
# % NUTERMINUX.lower()), ZSHRC)
force_symlink(os.path.join(dotfiles_dir, "py", "pylintrc"), PYLINTRC)
def plug_install():
"""PlugInstalls all the plugins."""
LOGGER.info("Installing vim plugins")
err = run_cmd("vim +'PlugInstall --sync' +qa",
"vim plugins installation failed")
if err is not None:
LOGGER.warning(err)
def backup(bpath):
"""Backup existing user configuration."""
if not os.path.exists(bpath):
return
LOGGER.info("Backing up %s", bpath)
bkp_path = "%s.%s.bkp" % (bpath, datetime.datetime.now().strftime(
"%Y-%m-%d_%H:%M:%S"))
shutil.move(bpath, bkp_path)
def backup_current_config():
"""Backup user's current configuration."""
LOGGER.debug("Backing up current config")
backup(VIMRC)
backup(VIMDIR)
#backup(BASHRC)
#backup(ZSHRC)
backup(PYLINTRC)
def which(program):
"""which unix command."""
def is_exe(fpath):
"""Check if fpath is executable."""
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def install_vim8():
"""Install latest version of vim using linuxbrew."""
LOGGER.info("Installing vim 8")
err = run_cmd("%s install vim" % BREW)
if err is not None:
LOGGER.critical(err)
sys.exit(1)
msg = "Your vim plugins might not work as expected."
nvm = "neovim"
nvp = "%s python client" % nvm
LOGGER.debug("%s3 installing %s", PIP, nvp)
err = run_cmd("%s3 install --upgrade %s" % (PIP, nvm))
if err is not None:
LOGGER.warning("%s. %s", err, msg)
py2 = "python@2"
err = run_cmd("%s install %s" % (BREW, py2))
if err is not None:
LOGGER.warning("%s. %s", err, msg)
ctags = "ctags"
if which(ctags) is None:
err = run_cmd("%s install %s" % (BREW, ctags))
if err is not None:
LOGGER.warning("%s. %s", err, msg)
pylint = "pylint"
if which(pylint) is None:
err = run_cmd("%s install %s" % (PIP, pylint))
if err is not None:
LOGGER.warning("%s. %s", err, msg)
def install_linuxbrew():
"""Install linuxbrew package manager."""
LOGGER.debug("Checking if %s is already installed.", BREW)
if which("%s" % BREW) is not None:
LOGGER.info("%s is already installed", BREW)
return
LOGGER.info("Installing linux%s", BREW)
install = ('/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/'
'Homebrew/install/HEAD/install.sh)"')
err = run_cmd(install, "Linux%s install failed" % BREW)
if err is not None:
LOGGER.critical(err)
sys.exit(1)
LOGGER.info("Setting up PATH. Your PATH is %s", os.environ.get("PATH"))
for lb_dir in [HOME, "/home/linuxbrew"]:
lb_path = os.path.join(lb_dir, ".linuxbrew")
if os.path.isdir(lb_path):
os.environ["PATH"] = "%s%s%s%s%s" % (os.path.join(lb_path, "bin"),
os.pathsep,
os.path.join(lb_path, "sbin"),
os.pathsep, os.environ.get("PATH"))
bash = ('test -r ~/.bash_profile && echo "export PATH=\'$(brew --prefix)/bin'
':$(brew --prefix)/sbin\'":\'"$PATH"\' >> ~/.bash_profile')
_ = run_cmd(bash)
prof = ('echo "export PATH=\'$(brew --prefix)/bin:$(brew --prefix)/sbin\'":'
'\'"$PATH"\' >> ~/.profile')
_ = run_cmd(prof)
zprof = ('echo "export PATH=\'$(brew --prefix)/bin:$(brew --prefix)/sbin\'":'
'\'"$PATH"\' >> ~/.zprofile')
_ = run_cmd(zprof)
#hello = "brew install hello"
#_ = run_cmd(hello)
def centos_version():
"""Check OS is CentOS and CentOS version."""
LOGGER.debug("Checking prerequisite %s version", CENTOS)
linux_dist = platform.linux_distribution()
if not linux_dist[0].lower().startswith(CENTOS.lower()):
LOGGER.critical("%s is supported only on %s", NUTERMINUX, CENTOS)
sys.exit(1)
if (distutils.version.StrictVersion(linux_dist[1]) <
distutils.version.StrictVersion(MIN_VERSION)):
LOGGER.warning("%s requires at least %s %s", NUTERMINUX, CENTOS,
MIN_VERSION)
if query_yes_no("Do you want to 'yum update' (requires sudo)?"):
err = run_cmd("sudo yum -y update", "yum update failed")
if err is not None:
LOGGER.critical(err)
sys.exit(1)
else:
LOGGER.info("yum update is harmless and will not affect your build. "
"Consider running it again.")
sys.exit(0)
def main():
"""main function."""
parser = argparse.ArgumentParser()
parser.add_argument('--uninstall', action='store_true',
help="Uninstall NuTermiNuX")
args = parser.parse_args()
if args.uninstall:
uninstall()
return
# Install.
centos_version()
install_linuxbrew()
install_vim8()
backup_current_config()
setup_nuterminux_config()
plug_install()
LOGGER.info("Install successfull. Welcome to %s", NUTERMINUX)
if __name__ == "__main__":
main()
|
from c1 import hex_to_raw
import string
character_frequencies = {
'e': 12.02,
't': 9.10,
'a': 8.12,
'o': 7.68,
'i': 7.31,
'n': 6.95,
's': 6.28,
'r': 6.02,
'h': 5.92,
'd': 4.32,
'l': 3.98,
'u': 2.88,
'c': 2.71,
'm': 2.61,
'f': 2.30,
'y': 2.11,
'w': 2.09,
'g': 2.03,
'p': 1.82,
'b': 1.49,
'v': 1.11,
'k': 0.69,
'x': 0.17,
'q': 0.11,
'j': 0.10,
'z': 0.07
}
def get_frequencies(text):
# initialize the dictionary to have all chars
odict = {}
for c in string.ascii_lowercase:
odict[c] = 0
# Count the number of each character in the string.
# we're counting punctuation here
# I'm doing it separately because I want to guarantee that the chars
# are in the dict, but the punctuation will be evaluated as all being
# the same.
for c in text:
if c in odict.keys():
odict[c] += 1
else:
odict[c] = 1
# Calculate the frequencies of each character in the text.
length = len(text)
if length != 0:
for c in odict.keys():
odict[c] /= length
odict[c] *= 100
return odict
def score_english(text):
frequencies = get_frequencies(text)
# character_frequencies
score = 0
for c in frequencies.keys():
if c in character_frequencies.keys():
score += abs(frequencies[c] - character_frequencies[c])
elif c == ' ':
score += 0
elif c in "'\"!.?" or c in string.digits:
score += 15
else:
score += 100
return score
def bytes_to_string(byte_string):
output = ""
for i in byte_string:
output += chr(i)
return output
def xor(byte_string, key):
output = b''
key_val = key[0] # from key of type bytes to int
for b in byte_string:
output += bytes([b ^ key_val])
return output
def decrypt_single_xor(hex_string):
byte_string = hex_to_raw(hex_string)
lowest_score = 100000
lowest_string = ""
for c in string.printable:
xor_result = xor(byte_string, bytes([ord(c)]))
xor_string = bytes_to_string(xor_result)
score = score_english(xor_string)
if score < lowest_score:
lowest_score = score
lowest_string = xor_string
return lowest_string, c
def decrypt_block_xor(byte_string):
lowest_score = 100000
lowest_string = ""
lowest_c = ""
for c in string.printable:
xor_result = xor(byte_string, bytes([ord(c)]))
xor_string = bytes_to_string(xor_result)
score = score_english(xor_string)
if score < lowest_score:
lowest_score = score
lowest_string = xor_string
lowest_c = c
return bytes([ord(lowest_c)])
if __name__ == '__main__':
b = b'\x3C'
print(xor(b, (b'\x08'))[0])
print(b[0] ^ (b'\x08')[0])
hex_string = '1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736'
#print(hex_to_raw(hex_string))
print(decrypt_single_xor(hex_string))
|
from all_anagrams import *
def pair(d):
for anagrams in d.values():
for word1 in anagrams:
for word2 in anagrams:
if word1<word2 and word_distance(word1,word2)==2:
print word1,word2
return
def word_distance(x,y):
x=list(x)
y=list(y)
c=0
for a,b in zip(x,y):
if a!=b:
c+=1
return c
def main():
dic=make_anagram_list()
pair(dic)
return
if __name__=='__main__':
main()
|
import nltk
from nltk import FreqDist
from nltk.collocations import*
for num in range(1,8):
print('HP'+str(num)+'.txt')
filename = 'HP'+str(num)+'.txt'
myText = open(filename)
myTexttext = myText.read()
hp1 = myTexttext
len(hp1)
hp1tokens = nltk.word_tokenize(hp1)
len(hp1tokens)
hp1words = [word.lower() for word in hp1tokens if word.isalpha()]
nltkstopwords = nltk.corpus.stopwords.words('english')
morestopwords = ["'re", "'ve", "'ll", "'d", "'s", "'t", ".", ",", "'", "''", "``", "?", "!", "...", ";", "'m", "--"]
evenmorestopwords = ["chapter", "'s","\"", "1"]
stopwords = nltkstopwords + morestopwords + evenmorestopwords
stoppedhp1words = [w for w in hp1words if not w in stopwords]
hp1fdist = FreqDist(stoppedhp1words)
hp1fdistkeys = list(hp1fdist.keys())
hp1fdistkeys[:50]
hp1topkeys = hp1fdist.most_common(150)
print('FREQUENCY')
for pair in hp1topkeys:
print(pair)
numwords = len(stoppedhp1words)
hp1topkeysnormalized = [(word, freq/numwords) for (word, freq) in hp1topkeys]
for pair in hp1topkeysnormalized:
print(pair)
hp1bigrams = list(nltk.bigrams(stoppedhp1words))
hp1trigrams = list(nltk.trigrams(stoppedhp1words))
# hp1bigrams = list(nltk.bigrams(hp1words))
print('BIGRAMS')
for bigram in hp1bigrams[:50]:
print(bigram)
print('BIGRAM FREQUENCY')
bigramFreq = FreqDist(hp1bigrams)
for bigram in bigramFreq.most_common(50):
print(bigram)
print('TRIGRAMS')
for trigram in hp1trigrams[:50]:
print(trigram)
print('TRIGRAM FREQUENCY')
trigramFreq = FreqDist(hp1trigrams)
for trigram in trigramFreq.most_common(50):
print(trigram)
bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_words(hp1words)
finder.apply_word_filter(lambda w: w in stopwords)
scored = finder.score_ngrams(bigram_measures.raw_freq)
for bscore in scored[:50]:
print(bscore)
scored = finder.score_ngrams(bigram_measures.raw_freq)
for bscore in scored[:50]:
print(bscore)
scored = finder.score_ngrams(bigram_measures.pmi)
print('PMI')
for bscore in scored[:50]:
print(bscore)
finder3 = BigramCollocationFinder.from_words(hp1words)
finder3.apply_freq_filter(5)
scored = finder3.score_ngrams(bigram_measures.pmi)
for bscore in scored[:50]:
print(bscore)
|
import re
from collections import Counter
text = input("ะะฒะตะดะธัะต ัะตะบัั ะดะปั ะฟะพะดััะตัะฐ ัะปะพะฒ: ")
if text == "":
print("ะ ัะพะถะฐะปะตะฝะธั, ะฒั ะฝะต ะฒะฒะตะปะธ ัะตะบัั ะฒ ะบะพะฝัะพะปะธ, ะฟะพััะพะผั ัะตะบัั ะฑัะดะตั ะทะฐะณััะถะตะฝ ะธะท ัะฐะนะปะฐ!")
document_text = open('text.txt', 'r')
text = document_text.read()
print(text)
else:
print("ะขะตะบัั ััะฟะตัะฝะพ ััะธัะฐะฝ ั ะบะพะฝัะพะปะธ, ัะตะทัะปััะฐั:")
array_text = re.split(r'\W+', text.lower())
# ัะดะฐะปะตะฝะธะต ะฟัััะพะณะพ ัะธะผะฒะพะปะฐ ะฒ ะบะพะฝัะต
array_text.remove('')
print("ะกะปะพะฒ ะฒ ัะตะบััะต - {0} \nะะฐะถะพะต ัะปะพะฒะพ ะฒั
ะพะดะธั:".format(len(array_text)))
text_counts = Counter(array_text)
for words in text_counts:
print("{0} - {1} ".format(words, text_counts[words]))
|
from django.conf import settings
from api.models import CronJob, CronJobStatus
def ifrc_go(request):
cron_error = CronJob.objects.filter(status=CronJobStatus.ERRONEOUS).order_by('-id').first()
return {
# Provide a variable to define current environment
'GO_ENVIRONMENT': settings.GO_ENVIRONMENT,
# For maintenance mode:
'DJANGO_READ_ONLY': settings.DJANGO_READ_ONLY,
# For header /_!_\ error symbol in base_site.html
'HAVING_INGEST_ISSUE': True if cron_error else False,
'INGEST_ISSUE_ID': cron_error.id if cron_error else None,
}
|
import sys
num = int(sys.stdin.readline())
def findprintorder(numbers, findidx, numofnumber):
idxlst = [i for i in range(numofnumber)]
sortednumbers = sorted(numbers, reverse = True)
for i in range(numofnumber):
while(numbers[i] != sortednumbers[i]):
numbers = numbers[:i] + numbers[i+1:] + [numbers[i]]
idxlst = idxlst[:i] + idxlst[i+1:] + [idxlst[i]]
idx = [i for i in range(numofnumber)]
dic = { name:value for name, value in zip(idxlst, idx) }
print(dic[findidx]+1)
for i in range(num):
numofnumber, findidx = map(int,sys.stdin.readline().split())
numbers = list(map(21int, sys.stdin.readline().split()))
findprintorder(numbers, findidx, numofnumber)
|
n = int(input())
bars = list(map(int, input().split()))
bars.sort(reverse=True)
count = 0
ans = 1
multi_count = 2
for i in range(0, len(bars) - 1):
if bars[i] == bars[i + 1]:
count += 1
else:
count = 0
if count == 3:
print(bars[i] ** 2)
exit()
elif count == 1:
ans *= bars[i]
multi_count -= 1
if multi_count == 0:
print(ans)
exit()
print(0) |
from itertools import islice,count
from math import sqrt
import sys
def isPrime(n):
if n < 2: return False
for i in islice(count(3,2), int(sqrt(n)-1)//2):
if n%i ==0:
return False
return True
inputs = sys.stdin
t = int(next(inputs))
nmax = 0
best_a = 0
best_b = 0
for b in xrange(3,t,2):
if isPrime(b):
for a in xrange(-b,b,2):
n = 1
while isPrime(n*n + a*n + b): n += 1
if n>nmax:
nmax = n
best_a = a
best_b = b
print best_a,best_b |
from pylab import *
from scipy.optimize import curve_fit
dcc, freq1, freq2 = loadtxt("freqvsdcc2.txt", usecols=(0,1,2), skiprows= 0, unpack =True)
plot(dcc,freq1, 'o')
plot(dcc,freq2, 'o')
hlines(freq2[0],dcc[0],dcc[-1],color='green')
show() |
from audiolazy import *
import matplotlib.pyplot as plt
import numpy as np
from functools import partial
import scipy.io.wavfile as wv
rate = 44100
s, Hz = sHz(rate)
ms = 1e-3 * s
notes = {'C0':16.35,'C#0':17.32,'D0':18.35,'D#0':19.45,'E0':20.60,'F0':21.83,'F#0':23.12,'G0':24.50,
'G#0':25.96,'A0':27.50,'A#0':29.14,'B0':30.87,'C1':32.70,'C#1':34.65,'D1':36.71,'D#1':38.89,
'E1':41.20,'F1':43.65,'F#1':46.25,'G1':49.00,'G#1':51.91,'A1':55.00,'A#1':58.27,'B1':61.74,
'C2':65.41,'C#2':69.30,'D2':73.42,'D#2':77.78,'E2':82.41,'F2':87.31,'F#2':92.50,'G2':98.00,
'G#2':103.83,'A2':110.00,'A#2':116.54,'B2':123.47,'C3':130.81,'C#3':138.59,'D3':146.83,'D#3':155.56,
'E3':164.81,'F3':174.61,'F#3':185.00,'G3':196.00,'G#3':207.65,'A3':220.00,'A#3':233.08,'B3':246.94,
'C4':261.63,'C#4':277.18,'D4':293.66,'D#4':311.13,'E4':329.63,'F4':349.23,'F#4':369.99,'G4':392.00,
'G#4':415.30,'A4':440.0,'A#4':466.16,'B4':493.88,'C5':523.25,'C#5':554.37,'D5': 587.33,'D#5': 622.25,
'E5':659.26,'F5':698.46,'F#5':739.99,'G5':783.99,'G#5': 830.61,'A5': 880.0,'A#5':932.33,'B5':987.77,
'C6':1046.50,'C#6':1108.73,'D6':1174.66,'D#6':1244.51,'E6':1318.51,'F6':1396.91,'F#6':1479.98,'G6':1567.98,
'G#6':1661.22,'A6':1760.00,'A#6':1864.66,'B6':1975.53,'C7':2093.00,'C#7':2217.46,'D7':2349.32,'D#7':2489.02,
'E7':2637.02,'F7':2793.83,'F#7':2959.96,'G7':3135.96,'G#7':3322.44,'A7':3520.00,'A#7':3729.31,'B7':3951.07,
'C8':4186.01,'C#8':4434.92,'D8':4698.63,'D#8':4978.03,'E8':5274.04,'F8':5587.65,'F#8':5919.91,'G8':6271.93,
'G#8':6644.88,'A8':7040.00,'A#8':7458.62,'B8':7902.13
}
rng = np.random.default_rng(seed=1234)
# note_f = 110
# note_a = 0.3
# sig1 = note_a*floor(sinusoid(note_f * Hz)) + note_a*ceil(sinusoid(note_f * Hz))
# sig2 = white_noise()
# mix_wt = 0.99
# sig = mix_wt * sig1 + (1-mix_wt) * sig2
#
# env = ones(25*s).append(fadeout(2*s))
# # print(sig.take(100))
# dur_bw_f = 20 * s # Some few seconds of audio
# dur_f2 = 0.2 * s # Some few seconds of audio
# # freq = line(dur, 800, 200).append(200*ones())
# # lfo_f = 4
# lfo_f = line(dur_bw_f,4,110).append(110*ones())
# freq1 = 200 + 400*(1+sinusoid(lfo_f * Hz))
# freq2 = line(dur_f2,8000,0).append(zeros())
# bw = line(dur_bw_f, 1000, 100).append(100*ones())
# print(freq.take(500))
# freq = line(dur, 8000, 50).append(50*fadeout(5*s)) # A lazy iterable range
# bw = line(dur, 240, 100).append(100*fadeout(5*s))
# filt = resonator((freq1+freq2) * Hz, bw * Hz) # A simple bandpass filter
# # filt = lowpass(freq * Hz)
class oscillator:
"""
Generates an oscillation whose range is [-1,1]
"""
def __init__(self, type, f, amp=1.0, level=0.0):
"""
:param type: can be one of ['sin','square','saw','reverse_saw','triangle']
:param f: frequency of oscillator in Hertz
"""
if type not in ['sin', 'square', 'saw', 'reverse_saw', 'triangle']:
raise ValueError("type must be one of ['sin','square','saw','reverse_saw','triangle']")
if type == 'sin':
self.sig = level + amp * sinusoid(f * Hz)
elif type == 'square':
self.sig = level + amp * (floor(sinusoid(f * Hz)) + ceil(sinusoid(f * Hz)))
elif type == 'saw':
cycle_samples = ceil(s / f)
one_wave = line(cycle_samples, 0, 1, finish=True)
self.sig = level + amp * cycle(one_wave)
elif type == 'reverse_saw':
cycle_samples = ceil(s / f)
one_wave = line(cycle_samples, 1, 0, finish=True)
self.sig = level + amp * cycle(one_wave)
elif type == 'triangle':
int_filt = z / (z - 1)
if isinstance(f,Stream):
scale_factor = (1 / s) * (int_filt(f)/int_filt(ones()))
else:
scale_factor = f / s
self.sig = level - scale_factor/2 + scale_factor * amp * int_filt(floor(sinusoid(f * Hz)) + ceil(sinusoid(f * Hz)))
class track:
def __init__(self):
self.track_sig = [(zeros(), 0)]
self.track_vol = ones()
def add_to_track(self, sig, wt):
"""
:param sig: signal to add to track (~ instrument)
:param wt: weight in the mix (between (0,1])
:return:
"""
if wt <= 0 or wt > 1:
raise ValueError("Value of wt must be (0,1]")
self.track_sig.append((sig, wt))
def generate_op_signal(self, dur):
"""
:param dur: duration of track in seconds. This will be overriden by duration of volume envelope.
:return: mixed track
"""
wt_scale = 1 / (sum([sig[1] for sig in self.track_sig]))
op_sig = Streamix()
for sig in self.track_sig:
op_sig.add(0,wt_scale * sig[1] * sig[0].copy())
op_sig = op_sig.copy() * self.track_vol.copy()
return op_sig
class beatmaker:
def __init__(self, bpm, cycle_length_beats):
self.bpm = bpm
self.beat_length = (60 / bpm) * s
self.cycle_length_beats = cycle_length_beats
self.cycle_length = cycle_length_beats*self.beat_length
self.track_sig = [(zeros(), 0)]
def create_cycle(self, sample, wt, beat_program):
"""
:param sample: signal of sample to add to track (~ instrument)
:param wt: weight in the mix (between (0,1])
:return:
"""
if wt <= 0 or wt > 1:
raise ValueError("Value of wt must be (0,1]")
if max(beat_program) >= self.cycle_length_beats:
raise ValueError("Invalid beat programming. Beat value cannot be greater than cycle length")
beat_program = list(dict.fromkeys(beat_program))
cycle_sig = Streamix()
for i,v in enumerate(beat_program):
# cycle_sig = cycle_sig.tee() + zeros(i*self.beat_length).append(_beat.tee()).append(zeros((self.cycle_length_beats-1-i)*self.beat_length))
if i==0:
cycle_sig.add(v*self.beat_length,sample.copy())
else:
cycle_sig.add((v - beat_program[i-1]) * self.beat_length, sample.copy())
self.track_sig.append((cycle_sig, wt))
def generate_op_signal(self, type, dur):
"""
:param dur: duration of track in seconds. This will be overriden by duration of volume envelope.
:return: mixed track
"""
wt_scale = 1 / (sum([sig[1] for sig in self.track_sig]))
op_sig_cycle = Streamix()
for sig in self.track_sig:
op_sig_cycle.add(0, wt_scale * sig[1] * sig[0].copy())
if type == 'time':
num_cycles = ceil(dur*s/self.cycle_length)
elif type == 'num_cycles':
num_cycles = dur
op_sig = Streamix()
op_sig.add(0, op_sig_cycle.copy())
for i in range(num_cycles-1):
op_sig.add(self.cycle_length,op_sig_cycle.copy())
return op_sig
# ----------basic track test-----------#
track_dur = 30 * s
filter_dur = 20 * s
fadeout_dur = 3 * s
# basic oscillator track with vol envelope
test_track = track()
test_track.track_vol = 0.5 * ones(track_dur).append(fadeout(fadeout_dur))
test_osc = oscillator(type='square', f=110)
test_track.add_to_track(test_osc.sig, 1)
test_op = test_track.generate_op_signal(track_dur + fadeout_dur)
# filtering using bandpass filter
lfo_f = line(filter_dur, 4, 110).append(
110 * ones()) # increasing linearly from 4 to 110 over track_dur and then constant
lfo_1 = oscillator(type='sin', f=lfo_f, amp=400, level=200)
freq1 = lfo_1.sig # oscillating as per lfo_1
freq2 = line(0.2 * s, 8000, 0).append(zeros()) # decreasing linearly from 8000 to 0 in 0.2 s and then constant at 0
bw = line(filter_dur, 1000, 100).append(100 * ones()) # band width
filt = resonator((freq1 + freq2) * Hz, bw * Hz) # bandpass filter
test_op_filt = filt(test_op)
# -----------drum test---------------------#
#---------hihat----------#
hihat_test1 = white_noise()
# hihat_test = oscillator(type='square',f=15000).sig + white_noise()
# hihat_test_filter_f = line(0.02 * s, 11000, 11000).append(11000 * ones())
hihat_test_filter_f = 16000
# hihat_test_filter = resonator(hihat_test_filter_f * Hz, 5000 * Hz)
hihat_test_filter = highpass(hihat_test_filter_f * Hz)
closed_hihat_vol_env = line(0.1 * s, 1,0.2).append(line(0.1 * s, 0.2,0))
open_hihat_vol_env = line(0.2 * s, 1,0.4).append(line(0.2 * s, 0.4,0))
# hihat_sound_op = hihat_test1.copy() * fadeout(0.1 * s)
hihat_closed_sound_op = (hihat_test_filter(0.2*hihat_test1.copy())) * closed_hihat_vol_env.copy()
hihat_open_sound_op = (hihat_test_filter(0.2*hihat_test1.copy())) * open_hihat_vol_env.copy()
# hihat_sound_op = Streamix()
# hihat_sound_op.add(0,fadein(0.04 * s))
# hihat_sound_op.add(0,hihat_test2.peek(0.1*s))
# hihat_sound_op.add(0.12*s,fadeout(0.04 * s))
def hihat_1(hp_f,env_t1,env_l1,env_t2,env_l2,env_t3):
hihat_src = white_noise()
hihat_filter = highpass(hp_f * Hz)
v_env = line(env_t1 * s, 1,env_l1).append(line(env_t2 * s, env_l1,env_l2)).append(line(env_t3 * s, env_l2,0))
return (hihat_filter(0.2 * hihat_src.copy())) * v_env.copy()
# hihat_closed_sound_1 = hihat_1(16000,0.1,0.2,0.01,0.2,0.1)
# hihat_open_sound_1 = hihat_1(16000,0.2,0.4,0.01,0.4,0.2)
hihat_closed_sound_2 = hihat_1(16000,0.01,0.9,0.05,0.2,0.01)
hihat_open_sound_2 = hihat_1(15000,0.02,0.6,0.13,0.5,0.01)
#--------bass drum------------#
bass_test1 = oscillator('sin',110).sig.copy()
# bass_test2 = oscillator('saw',110).sig.copy()
bass_test2 = white_noise()
bass_test_3_f = line(0.02 * s, 200, 55 ).append(55 * ones())
bass_test3 = oscillator('sin',bass_test_3_f).sig.copy()
# bass_test_filter_f = line(0.01 * s, 500 * Hz, 50 * Hz).append(50 * Hz * ones())
# bass_test_filter = resonator(bass_test_filter_f , 100 * Hz)
bass_test_filter = lowpass(bass_test_3_f * Hz)
bass_sound_op = Streamix()
bass_sound_op.add(0,0.2*(fadein(0.01 * s).append(ones()))*(bass_test1.copy()) * fadeout(0.25 * s))
bass_sound_op.add(0,0.1*(fadein(0.01 * s).append(ones()))*(bass_test_filter(bass_test2.copy())) * fadeout(0.12 * s))
bass_sound_op.add(0,0.5*(bass_test3.copy()) * fadeout (0.3 * s))
def bass_drum_1(sin_1_f,sin_2_f1,sin_2_f2,sin_2_f1f2_t,
sin_1_l,sin_1_at,sin_1_d,
sin_2_l,sin_2_at,sin_2_d,
wn_l,wn_at,wn_d):
bass_sin1 = oscillator('sin', sin_1_f).sig.copy()
bass_wn = white_noise()
bass_sin2_f = line(sin_2_f1f2_t * s, sin_2_f1, sin_2_f2).append(sin_2_f2 * ones())
bass_sin2 = oscillator('sin', bass_sin2_f).sig.copy()
bass_lp_filter = lowpass(bass_sin2_f * Hz)
bass_drum_op = Streamix()
bass_drum_op.add(0, sin_1_l * (fadein(sin_1_at * s).append(ones())) * (bass_sin1.copy()) * fadeout(sin_1_d * s))
bass_drum_op.add(0, wn_l * (fadein(wn_at * s).append(ones())) * (bass_lp_filter(bass_wn.copy())) * fadeout(wn_d * s))
bass_drum_op.add(0, sin_2_l * (fadein(sin_2_at * s).append(ones())) * (bass_sin2.copy()) * fadeout(sin_2_d * s))
return bass_drum_op
bass_drum_sound_2 = bass_drum_1(27.5,400,55,0.01,
0.4,0.01,0.15,
0.3 ,0.001,0.15,
0.03,0.01,0.05)
# bass_drum_sound_1 = bass_drum_1(110,200,55,0.02,
# 0.2,0.01,0.25,
# 0.5,0.001,0.3,
# 0.1,0.01,0.12)
#--------snare drum----------#
# snare_test1_f = line(0.01 * s,4000,220).append(220 * ones())
# # snare_test1_f = 220
# snare_test1 = oscillator('triangle',snare_test1_f).sig.copy()
#
# # print(max(list(snare_test1.peek(2*s))))
# snare_test2 = white_noise()
# snare_test2_filter_f = 10000
# snare_test2_filter = highpass(snare_test2_filter_f * Hz)
# snare_test_2_vol_env = line(0.03 * s, 1,0.9).append(line(0.08 * s, 0.9,0.4)).append(line(0.03 * s, 0.4,0))
# snare_sound_op = Streamix()
# snare_sound_op.add(0,0.3 * (snare_test1.copy()) * fadeout(0.02* s))
# snare_sound_op.add(0,0.3 * snare_test2_filter(snare_test2.copy()) * snare_test_2_vol_env.copy())
def snare_1(tr_f_t,tr_f_h,tr_f_l,tr_d,
wn_hp_f,
wn_env_1_t,wn_env_1_l,
wn_env_2_t,wn_env_2_l,
wn_env_3_t,wn_env_3_l,
tr_mix
):
tr_f = line(tr_f_t * s, tr_f_h, tr_f_l).append(tr_f_l * ones())
tr = oscillator('triangle', tr_f).sig.copy()
# print(max(list(snare_test1.peek(2*s))))
wn = white_noise()
wn_hp = highpass(wn_hp_f * Hz)
wn_env = line(wn_env_1_t * s, wn_env_1_l, wn_env_2_l).append(line(wn_env_2_t * s, wn_env_2_l, wn_env_3_l)).append(line(wn_env_3_t * s, wn_env_3_l, 0))
snare_ = Streamix()
snare_.add(0, 0.3 * tr_mix * (tr.copy()) * fadeout(tr_d * s))
snare_.add(0, 0.3 * (1-tr_mix) * wn_hp(wn.copy()) * wn_env.copy())
return snare_
snare_sound_1 = snare_1(tr_f_t=0.01,tr_f_h=4000,tr_f_l=220,tr_d=0.02,
wn_hp_f=10000,
wn_env_1_t=0.03,wn_env_1_l=1,
wn_env_2_t=0.08,wn_env_2_l=0.9,
wn_env_3_t=0.03,wn_env_3_l=0.4,
tr_mix=0.5
)
snare_sound_2 = snare_1(tr_f_t=0.01,tr_f_h=8000,tr_f_l=330,tr_d=0.015,
wn_hp_f=10000,
wn_env_1_t=0.08,wn_env_1_l=0.7,
wn_env_2_t=0.02,wn_env_2_l=1,
wn_env_3_t=0.05,wn_env_3_l=0.4,
tr_mix=0.6
)
# -------- test beat -----------#
# test_beat = beatmaker(480,16)
# test_beat.create_cycle(hihat_closed_sound_1,0.6,[0,4,6,8,9,12,14])
# test_beat.create_cycle(hihat_open_sound_1,0.6,[2,10])
# test_beat.create_cycle(bass_drum_sound_1,1,[0,4,8,12,15])
# test_beat.create_cycle(snare_sound_op,0.6,[4,12])
# test_beat_track = test_beat.generate_op_signal('num_cycles',8)
test_beat_2 = beatmaker(320,16)
# test_beat_2.create_cycle(hihat_closed_sound_1,0.6,[0,1,2,4,5,6,7,8,9,10,11,12,13,14])
test_beat_2.create_cycle(hihat_closed_sound_2,0.4,[0,2,4,6,8,10,12,14])
# test_beat_2.create_cycle(hihat_open_sound_1,0.6,[3,15])
test_beat_2.create_cycle(bass_drum_sound_2,1,[0,3,6,10,14])
test_beat_2.create_cycle(snare_sound_1,0.8,[4,12])
test_beat_2_track = test_beat_2.generate_op_signal('num_cycles',4)
test_beat_3 = beatmaker(320,16)
test_beat_3.create_cycle(hihat_closed_sound_2,0.4,[0,1,2,4,5,6,7,8,9,10,11,12,13,14])
test_beat_3.create_cycle(hihat_open_sound_2,0.4,[3,15])
test_beat_3.create_cycle(bass_drum_sound_2,1,[0,3,6,9,10,14])
test_beat_3.create_cycle(snare_sound_1,0.8,[4,12])
test_beat_3_track = test_beat_3.generate_op_signal('num_cycles',4)
test_beat_4 = beatmaker(320,16)
test_beat_4.create_cycle(hihat_closed_sound_2,0.4,[0,1,2,4,5,6,7,8,9,10,11,12,13,14])
test_beat_4.create_cycle(hihat_open_sound_2,0.4,[3,15])
test_beat_4.create_cycle(bass_drum_sound_2,1,[0,3,6,9,10,14])
test_beat_4.create_cycle(snare_sound_1,0.8,[4,12])
test_beat_4_track = test_beat_4.generate_op_signal('num_cycles',3)
test_beat_4_r = beatmaker(320,16)
test_beat_4_r.create_cycle(hihat_closed_sound_2,0.4,[0,1,2,4,5,6,7,8,9,10,11,12,13,14])
test_beat_4_r.create_cycle(hihat_open_sound_2,0.4,[3,15])
test_beat_4_r.create_cycle(bass_drum_sound_2,1,[0,3,6,9,10,11,13])
test_beat_4_r.create_cycle(snare_sound_1,0.8,[4,12])
test_beat_4_r.create_cycle(snare_sound_2,0.9,[14,15])
test_beat_4_r_track = test_beat_4_r.generate_op_signal('num_cycles',1)
#---adding to track----#
# test_final_track = Streamix()
# test_final_track.add(0,0.12*test_op_filt.copy())
# test_final_track.add(22*s,test_beat_track.copy())
# test = ones(10)
# test_filt = z/(z-1)
# (z/(z-1)).plot().show()
# print(list(test.copy()))
# print(list(test_filt(test).copy()))
# test_osc = oscillator('triangle',440).sig.copy()
# plt.plot(test_hihat_cycle.copy().take(2*s))
# plt.show()
# ------------- synth patches ----------------------#
# test_synth_A_f_intr = (440 + 20*(fadein(0.01 * s).append(fadeout(0.02 * s))))
# test_synth_A_f = test_synth_A_f_intr.copy().append(oscillator('square',200,30,440).sig.copy())
# test_synth_A_f_m = oscillator('sin',10,30,440).sig.copy()
# test_synth_A_f = oscillator('square',200,30,test_synth_A_f_m).sig.copy()
# test_synth_f_f = (50 + 100*(fadein(0.8 * s).append(fadeout(0.8 * s)).append(zeros())))
# test_synth_f_f = 220
# test_synth_f_f = oscillator('sin', 300, 110, 220).sig.copy() #
# test_synth_f_a = 30 # good till 50-60
def patch_1_notes(f_l,f_a,
f_f_f,f_f_a,f_f_l,
env_at,env_h,env_d1,env_d1_l,env_d2,env_f,env_a
):
f_f = oscillator('sin', f_f_f, f_f_a, f_f_l).sig.copy() #
f = oscillator('sin', f_f.copy(), f_a, f_l).sig.copy()
# test_synth_env = fadein(0.01 * s).append(ones(0.1 * s)).append(line(0.2 * s, 1, 0.6)).append(0.6 * fadeout(3 * s) * oscillator('sin', 1.5, 0.1, 1).sig.copy())
env = (fadein(env_at * s).append(ones(env_h * s)).append(line(env_d1 * s, 1, env_d1_l)).append(env_d1_l * fadeout(env_d2 * s))
) * oscillator('sin', env_f, env_a, 1).sig.copy()
return env.copy()*oscillator('sin', f.copy()).sig.copy()
def patch_2_notes(f_l,f_a_r,f_f,
lp_f,
env_h,
env_at_1, env_d1_1, env_d1_l_1, env_d2_1,
env_at_2, env_d1_2, env_d1_l_2, env_d2_2,
sm):
adj_1 = 0.01 + max(env_at_1+env_d1_1+env_d2_1,env_at_2+env_d1_2+env_d2_2) - (env_at_1+env_d1_1+env_d2_1)
synth_1 = oscillator('square',f_l).sig.copy()
lp_1 = lowpass(lp_f * Hz)
env_1 = (fadein(env_at_1 * s).append(ones(env_h * s)).append(line(env_d1_1 * s, 1, env_d1_l_1)).append(
env_d1_l_1 * fadeout(env_d2_1 * s))).append(zeros(adj_1*s))
adj_2 = 0.01 * s + max(env_at_1 + env_d1_1 + env_d2_1, env_at_2 + env_d1_2 + env_d2_2) - (env_at_2 + env_d1_2 + env_d2_2)
synth_2_f = oscillator('sin',f_f,f_a_r*f_l,f_l).sig.copy()
synth_2 = oscillator('sin',synth_2_f).sig.copy()
env_2 = (fadein(env_at_2 * s).append(ones(env_h * s)).append(line(env_d1_2 * s, 1, env_d1_l_2)).append(
env_d1_l_2 * fadeout(env_d2_2 * s))).append(zeros(adj_2*s))
return (sm*env_2*synth_2.copy() + (1-sm)*env_1*lp_1(synth_1.copy()))
bass_patch_like = partial(patch_2_notes,
f_a_r=0,f_f=100,
lp_f=50,
env_at_1=0.005,env_d1_1=0.1,env_d1_l_1=0.4,env_d2_1=0.3,
env_at_2=0.05,env_d1_2=0.4,env_d1_l_2=0.6,env_d2_2=0.3,
sm=0.55)
bass_patch_op = Streamix()
bass_patch_op.add(0*s,0.2*bass_patch_like(f_l=notes['D2'],env_h=0.2).copy())
bass_patch_op.add(1.125*s,0.2*bass_patch_like(f_l=notes['D2'],env_h=0.1).copy())
bass_patch_op.add(0.75*s,0.2*bass_patch_like(f_l=notes['D2'],env_h=0.1).copy())
bass_patch_op.add(1.125*s,0.2*bass_patch_like(f_l=notes['B1'],env_h=0.2).copy())
bass_patch_op.add(1.125*s,0.2*bass_patch_like(f_l=notes['B1'],env_h=0.1).copy())
bass_patch_op.add(0.75*s,0.2*bass_patch_like(f_l=notes['B1'],env_h=0.1).copy())
bass_patch_op.add(1.125*s,0.2*bass_patch_like(f_l=notes['A1'],env_h=0.2).copy())
bass_patch_op.add(1.125*s,0.2*bass_patch_like(f_l=notes['A1'],env_h=0.1).copy())
bass_patch_op.add(0.75*s,0.2*bass_patch_like(f_l=notes['A1'],env_h=0.1).copy())
bass_patch_op.add(1.125*s,0.2*bass_patch_like(f_l=notes['A1'],env_h=0.2).copy())
bass_patch_op.add(1.125*s,0.2*bass_patch_like(f_l=notes['A1'],env_h=0.1).copy())
bass_patch_op.add(0.75*s,0.2*bass_patch_like(f_l=notes['A1'],env_h=0.1).copy())
# test_synth_A_f_m = 440
# test_synth_A_f = oscillator('sin',test_synth_f_f.copy(),test_synth_f_a,test_synth_A_f_m).sig.copy()
# test_synth_A = oscillator('sin',test_synth_A_f.copy()).sig.copy()
#
# test_synth_C_f_m = 523.25
# test_synth_C_f = oscillator('sin',test_synth_f_f.copy(),test_synth_f_a,test_synth_C_f_m).sig.copy()
# test_synth_C = oscillator('sin',test_synth_C_f.copy()).sig.copy()
#
# test_synth_E_f_m = 659.26
# test_synth_E_f = oscillator('sin',test_synth_f_f.copy(),test_synth_f_a,test_synth_E_f_m).sig.copy()
# test_synth_E = oscillator('sin',test_synth_E_f.copy()).sig.copy()
# test_synth_4_f_m = 554.37
# test_synth_4_f = oscillator('sin',test_synth_f_f.copy(),test_synth_f_a,test_synth_4_f_m).sig.copy()
# test_synth_4 = oscillator('sin',test_synth_4_f.copy()).sig.copy()
# notes_init = 'A4,A#4,B4,C5,C#5,D5,D#5,E5,F5,F#5,G5,G#5,A5'.split(',')
# freqs = np.round(440. * 2**(np.arange(0, len(notes_init)) / 12.),2)
# notes = dict(zip(notes_init, freqs))
# print(notes)
# print(notes['C5'])
rhodes_like = partial(patch_1_notes,f_a=30,f_f_f=300,f_f_a=110,f_f_l=220,
env_at=0.01,env_h=0.1,env_d1=0.2,env_d1_l=0.6,env_d2=3,env_f=1.5,env_a=0.1)
# rhodes_like = partial(patch_1_notes,f_a=0,f_f_f=30,f_f_a=2,f_f_l=30,
# env_at=0.01,env_h=0.1,env_d1=0.2,env_d1_l=0.6,env_d2=3,env_f=1.5,env_a=0.1)
# test_synth_C_f = test_synth_A_f_intr.copy().append(440*ones())
# test_synth_C = oscillator('sin',test_synth_C_f).sig.copy()
# test_synth_env = fadein(0.05 * s).append(ones(2 * s)).append(fadeout(1 * s))
# test_synth_env = fadein(0.01 * s).append(ones(0.1*s)).append(line(0.5 * s,1,0.4)).append(0.4*fadeout(2 * s))
# test_synth_env = fadein(0.01 * s).append(ones(0.1*s)).append(line(0.2*s,1,0.6)).append(0.6*fadeout(3*s)*oscillator('sin',1.5,0.1,1).sig.copy())
# test_synth_filter_f = 600 + 100*(fadein(0.05 * s).append(line(0.5 * s,1,0.3)).append(0.3*fadeout(1 * s)).append(zeros()))
# test_synth_filter_f =400
# test_synth_filter_bw = 50
# test_synth_filter = resonator(test_synth_filter_f,test_synth_filter_bw)
test_synth_op = Streamix()
test_synth_op.add(0*s,0.2*rhodes_like(f_l=notes['D4']).copy())
test_synth_op.add(0.08*s,0.2*rhodes_like(f_l=notes['F4']).copy())
test_synth_op.add(0.12*s,0.2*rhodes_like(f_l=notes['A4']).copy())
test_synth_op.add(0.15*s,0.2*rhodes_like(f_l=notes['C5']).copy())
test_synth_op.add((3-0.08-0.12-0.15)*s,0.2*rhodes_like(f_l=notes['B4']).copy())
test_synth_op.add(0.07*s,0.2*rhodes_like(f_l=notes['D5']).copy())
test_synth_op.add(0.13*s,0.2*rhodes_like(f_l=notes['F5']).copy())
test_synth_op.add((3-0.07-0.13)*s,0.2*rhodes_like(f_l=notes['A4']).copy())
test_synth_op.add(0.09*s,0.2*rhodes_like(f_l=notes['C5']).copy())
test_synth_op.add(0.12*s,0.2*rhodes_like(f_l=notes['E5']).copy())
#----------- strings ------------------#
def string_attempt_1(f_l,hrm,hrm_a,
env_at_1,env_at_1_l,env_at_2,
env_h,
env_d_1,env_d_1_l,env_d_2,
voices,voices_l,voices_det,voices_rng,
lp_f
):
env = line(env_at_1 * s, 0, env_at_1_l).append(line(env_at_2 * s, env_at_1_l,1)).append(ones(env_h * s)).append(
line(env_d_1 * s, 1, env_d_1_l)).append(line(env_d_2*s,env_d_1_l,0))
str_op = Streamix()
hrm = [0] + hrm
amps = [hrm_a ** (-1 * abs(h)) for h in hrm]
for i,h in enumerate(hrm):
a = amps[i]/(sum(amps)*(1+voices*voices_l))
m = 2 ** h
str_op.add(0, a * oscillator('saw', m * f_l).sig.copy())
for v in range(voices):
det_delta = voices_rng.uniform(-voices_det,voices_det)
str_op.add(0,voices_l * a * oscillator('sin', m*(f_l) + det_delta).sig.copy())
lp = lowpass(lp_f * Hz)
return 0.8*env *lp(str_op.copy())
organ_sound_1 = string_attempt_1(440,[-1,1,2,3],2,
1,0.4,0.6,
3,
0.5,0.3,0.8,
1,0.3,2,rng,
400
)
print(max(list(organ_sound_1.peek(5*s))))
organ_like = partial(string_attempt_1,hrm=[1,2,3],hrm_a=2,
env_at_1=0.7,env_at_1_l=0.4,env_at_2=0.6,
env_d_1=0.5,env_d_1_l=0.3,env_d_2=0.8,
voices=1,voices_l=0.3,voices_det=2,voices_rng=rng)
organ_like_2 = partial(string_attempt_1,hrm=[-2,-1,1,2,3],hrm_a=2,
env_at_1=0.7,env_at_1_l=0.4,env_at_2=0.6,
env_d_1=0.8,env_d_1_l=0.3,env_d_2=1,
voices=1,voices_l=0.3,voices_det=2,voices_rng=rng)
organ_sound_op = Streamix()
organ_sound_op.add(0,organ_like(f_l=notes['D4'],env_h=2,lp_f=notes['D4']).copy())
organ_sound_op.add(0,organ_like(f_l=notes['F4'],env_h=2,lp_f=notes['F4']).copy())
organ_sound_op.add(0,organ_like(f_l=notes['A4'],env_h=2,lp_f=notes['A4']).copy())
organ_sound_op.add(0,organ_like(f_l=notes['C5'],env_h=2,lp_f=notes['C5']).copy())
organ_sound_op.add(2.7*s,organ_like(f_l=notes['B4'],env_h=2,lp_f=notes['B4']).copy())
organ_sound_op.add(0,organ_like(f_l=notes['D5'],env_h=2,lp_f=notes['D5']).copy())
organ_sound_op.add(0,organ_like(f_l=notes['F5'],env_h=2,lp_f=notes['F5']).copy())
organ_sound_op.add(2.7*s,organ_like(f_l=notes['A4'],env_h=4,lp_f=notes['A4']).copy())
organ_sound_op.add(0*s,organ_like(f_l=notes['C5'],env_h=4,lp_f=notes['C5']).copy())
organ_sound_op.add(0*s,organ_like(f_l=notes['E5'],env_h=4,lp_f=notes['E5']).copy())
organ_sound_op_2 = Streamix()
organ_sound_op_2.add(0,organ_like_2(f_l=notes['D4'],env_h=2,lp_f=notes['D4']).copy())
organ_sound_op_2.add(0,organ_like_2(f_l=notes['F4'],env_h=2,lp_f=notes['F4']).copy())
organ_sound_op_2.add(0,organ_like_2(f_l=notes['A4'],env_h=2,lp_f=notes['A4']).copy())
organ_sound_op_2.add(0,organ_like_2(f_l=notes['C5'],env_h=2,lp_f=notes['C5']).copy())
organ_sound_op_2.add(2.7*s,organ_like_2(f_l=notes['B4'],env_h=2,lp_f=notes['B4']).copy())
organ_sound_op_2.add(0,organ_like_2(f_l=notes['D5'],env_h=2,lp_f=notes['D5']).copy())
organ_sound_op_2.add(0,organ_like_2(f_l=notes['F5'],env_h=2,lp_f=notes['F5']).copy())
organ_sound_op_2.add(2.7*s,organ_like_2(f_l=notes['A4'],env_h=4,lp_f=notes['A4']).copy())
organ_sound_op_2.add(0*s,organ_like_2(f_l=notes['C5'],env_h=4,lp_f=notes['C5']).copy())
organ_sound_op_2.add(0*s,organ_like_2(f_l=notes['E5'],env_h=4,lp_f=notes['E5']).copy())
test_final_track_2 = Streamix()
# bar 1
test_final_track_2.add(0.5*s,0.2*test_synth_op.copy())
test_final_track_2.add(0,0.9*test_beat_2_track.copy())
test_final_track_2.add(0,bass_patch_op.copy())
# bar 2
test_final_track_2.add(12*s,0.9*test_beat_2_track.copy())
test_final_track_2.add(0,0.2*test_synth_op.copy())
test_final_track_2.add(0,bass_patch_op.copy())
# bar 3
test_final_track_2.add(12*s,0.9*test_beat_3_track.copy())
test_final_track_2.add(0,0.2*test_synth_op.copy())
test_final_track_2.add(0,bass_patch_op.copy())
# bar 4
test_final_track_2.add(12*s,0.9*test_beat_4_track.copy())
test_final_track_2.add(0,0.2*test_synth_op.copy())
test_final_track_2.add(0,bass_patch_op.copy())
test_final_track_2.add(9*s,0.9*test_beat_4_r_track.copy())
# bar 5
test_final_track_2.add(3*s,0.9*test_beat_3_track.copy())
test_final_track_2.add(0,0.2*test_synth_op.copy())
test_final_track_2.add(0,bass_patch_op.copy())
test_final_track_2.add(0,0.12*organ_sound_op.copy())
# bar 6
test_final_track_2.add(12*s,0.9*test_beat_3_track.copy())
test_final_track_2.add(0,0.2*test_synth_op.copy())
test_final_track_2.add(0,bass_patch_op.copy())
test_final_track_2.add(0,0.13*organ_sound_op.copy())
# bar 7
test_final_track_2.add(12*s,0.9*test_beat_3_track.copy())
test_final_track_2.add(0,0.2*test_synth_op.copy())
test_final_track_2.add(0,bass_patch_op.copy())
test_final_track_2.add(0,0.14*organ_sound_op.copy())
# bar 8
test_final_track_2.add(12*s,0.22*test_synth_op.copy())
test_final_track_2.add(0,0.18*organ_sound_op_2.copy())
# test_synth_op = Streamix()
# test_synth_op.add(0,0.2*test_synth_A.copy())
# test_synth_op.add(0.09*s,0.2*test_synth_C.copy())
# test_synth_op.add(0.13*s,0.2*test_synth_E.copy())
# test_synth_op = test_synth_filter(test_synth_C.copy()).peek(4*s)
# test_synth_op = test_synth_C.copy().peek(4*s)
# print(max(list(test_synth_op.copy())))
def write_to_file(str_mix,t,path):
print('Writing file')
str_mix.add(0,zeros())
str_mix_array = np.array(list(str_mix.peek(t*s)),dtype='float32')
str_mix_array = (0.99/max(str_mix_array))*str_mix_array
print(max(str_mix_array))
wv.write(filename=path,rate=rate,data=str_mix_array)
return str_mix_array
# organ_sound_op_array = write_to_file(organ_sound_op,15,'../output/organ_chords.wav')
test_final_track_2_array = write_to_file(test_final_track_2,100,'../output/lowfi_hh_2_mod.wav')
print("Done!")
with AudioIO(True) as player:
player.play(test_final_track_2_array, rate=rate)
|
#!/usr/bin/python
my_rand_list = [5, 6, 4, 1, 7, 3, 2, 0, 8, 9]
largest = -1
for item in my_rand_list:
if(item > largest):
largest = item
print largest,
print
print 'largest number ', largest
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import operator
import re
import sys
from pants.backend.core.tasks.console_task import ConsoleTask
from pants.base.address import SyntheticAddress
from pants.base.exceptions import TaskError
from pants.base.target import Target
_identity = lambda x: x
def _extract_modifier(value):
if value.startswith('+'):
return _identity, value[1:]
elif value.startswith('-'):
return operator.not_, value[1:]
else:
return _identity, value
def _create_filters(list_option, predicate):
for value in list_option:
modifier, value = _extract_modifier(value)
predicates = map(predicate, value.split(','))
def filter(target):
return modifier(any(map(lambda predicate: predicate(target), predicates)))
yield filter
def _get_target(spec, build_graph):
try:
address = SyntheticAddress.parse(spec)
except IOError as e:
raise TaskError('Failed to parse address: %s: %s' % (address, e))
match = build_graph.get_target(address)
if not match:
raise TaskError('Invalid target address: %s' % address)
return match
class Filter(ConsoleTask):
"""Filters targets based on various criteria."""
@classmethod
def setup_parser(cls, option_group, args, mkflag):
super(Filter, cls).setup_parser(option_group, args, mkflag)
option_group.add_option(mkflag('type'), dest='filter_type', action='append', default=[],
help="Identifies target types to include (optional '+' prefix) or "
"exclude ('-' prefix). Multiple type inclusions or exclusions "
"can be specified at once in a comma separated list or else by "
"using multiple instances of this flag.")
option_group.add_option(mkflag('target'), dest='filter_target', action='append', default=[],
help="Identifies specific targets to include (optional '+' prefix) or "
"exclude ('-' prefix). Multiple target inclusions or exclusions "
"can be specified at once in a comma separated list or else by "
"using multiple instances of this flag.")
option_group.add_option(mkflag('ancestor'), dest='filter_ancestor', action='append', default=[],
help="Identifies ancestor targets (containing targets) that make a "
"select child (contained) targets to include "
"(optional '+' prefix) or exclude ('-' prefix). Multiple "
"ancestor inclusions or exclusions can be specified at once in "
"a comma separated list or else by using multiple instances of "
"this flag.")
option_group.add_option(mkflag('regex'), dest='filter_regex', action='append', default=[],
help="Identifies regexes of target addresses to include "
"(optional '+' prefix) or exclude ('-' prefix). Multiple target "
"inclusions or exclusions can be specified at once in a comma "
"separated list or else by using multiple instances of this flag.")
def __init__(self, context, workdir, outstream=sys.stdout):
super(Filter, self).__init__(context, workdir, outstream)
self._filters = []
def filter_for_address(spec):
match = _get_target(spec, self.context.build_graph)
return lambda target: target == match
self._filters.extend(_create_filters(context.options.filter_target, filter_for_address))
def filter_for_type(name):
# FIXME(pl): This should be a standard function provided by the plugin/BuildFileParser
# machinery
try:
# Try to do a fully qualified import 1st for filtering on custom types.
from_list, module, type_name = name.rsplit('.', 2)
module = __import__('%s.%s' % (from_list, module), fromlist=[from_list])
target_type = getattr(module, type_name)
except (ImportError, ValueError):
# Fall back on pants provided target types.
if name not in self.context.build_file_parser.report_target_aliases():
raise TaskError('Invalid type name: %s' % name)
target_type = self.context.build_file_parser.report_target_aliases()[name]
if not issubclass(target_type, Target):
raise TaskError('Not a Target type: %s' % name)
return lambda target: isinstance(target, target_type)
self._filters.extend(_create_filters(context.options.filter_type, filter_for_type))
def filter_for_ancestor(spec):
ancestor = _get_target(spec, self.context.build_graph)
children = set()
ancestor.walk(children.add)
return lambda target: target in children
self._filters.extend(_create_filters(context.options.filter_ancestor, filter_for_ancestor))
def filter_for_regex(regex):
parser = re.compile(regex)
return lambda target: parser.search(str(target.address.build_file_spec))
self._filters.extend(_create_filters(context.options.filter_regex, filter_for_regex))
def console_output(self, _):
filtered = set()
for target in self.context.target_roots:
if target not in filtered:
filtered.add(target)
for filter in self._filters:
if not filter(target):
break
else:
yield target.address.build_file_spec
|
import sys
from _wagyu import Box
from hypothesis import given
from . import strategies
@given(strategies.boxes)
def test_basic(box: Box) -> None:
result = repr(box)
assert result.startswith(Box.__module__)
assert Box.__qualname__ in result
@given(strategies.boxes)
def test_round_trip(box: Box) -> None:
result = repr(box)
assert eval(result, sys.modules) == box
|
# Generated by Django 3.0 on 2020-10-26 08:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lib', '0012_auto_20201026_1219'),
]
operations = [
migrations.AlterField(
model_name='brecord',
name='idate',
field=models.DateField(null=True),
),
migrations.AlterField(
model_name='brecord',
name='rdate',
field=models.DateField(null=True),
),
]
|
from django.shortcuts import render
from django.views import View
from django.http import JsonResponse
import json #json.loads๋ jsonํํ์ ๋ฐ์ดํฐ๋ฅผ ๋์
๋๋ฆฌ๋ก ๋ฐ๊ฟ
from django.contrib.auth.hashers import check_password
from .models import User
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework import status
from account.serializer import UserShortcutSerializer
from django.forms.models import model_to_dict
import re
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib import auth
def user_validation(data):
user_check = User.objects.filter(user_id=data['user_id']) #์์ด๋ ์ฒดํฌ
email_check1 = re.compile(
r'[0-9a-zA-Z]+@[0-9a-zA-Z]+\.[0-9a-zA-Z]{2,}'
).search(data['email']) #์ด๋ฉ์ผ ์ ๊ทํํ์
email_check2 = User.objects.filter(email=data['email']) #์ด๋ฉ์ผ ์ค๋ณต์ฒดํฌ
if user_check.exists():
return "USER_EXIST"
elif email_check1 is None:
return "EMAIL_INVALID"
elif email_check2.exists():
return "EMAIL_EXIST"
else:
return "OK"
@api_view(['POST'])
def register(request):
data = json.loads(request.body)
required_fields = ('user_id','username','nickname','email','classnum','password','university','faculty','major')
# JSON ํ๋ ์ฒดํฌ
if not all(i in data for i in required_fields):
return Response(
{"message":"ํ์ ์์์ ์
๋ ฅํด์ฃผ์ธ์."},
status=status.HTTP_400_BAD_REQUEST
)
validation = user_validation(data)
if validation == "USER_EXIST":
return Response(
{"message":"์ด๋ฏธ ์กด์ฌํ๋ ์์ด๋์
๋๋ค."},
status=status.HTTP_409_CONFLICT
)
elif validation == "EMAIL_INVALID":
return Response(
{"message":"์ ํํ ์ด๋ฉ์ผ ํ์์ ์
๋ ฅํด์ฃผ์ธ์."},
status=status.HTTP_400_BAD_REQUEST
)
elif validation == "EMAIL_EXIST":
return Response(
{"message":"์ด๋ฏธ ์กด์ฌํ๋ ์ด๋ฉ์ผ์
๋๋ค."},
status = status.HTTP_409_CONFLICT
)
else:
user = User.objects.create_user(**data)
return Response(
model_to_dict(user),
status=status.HTTP_201_CREATED
)
#ํ๋ฒ๋ ์ ๋์ฝ๋์ -> 17,18ํ๋ฒ ์ด๋ ๊ฒ ์
๋ ฅํ ๊บผ๋ฉด DB์์ ํด์ผ๋
#๊ทธ๋๋ก 2017270920 ์ด๋ ๊ฒ ๋ฐ์๊บผ๋ฉด validation์์ ์ค๋ณต๋๋ฉด ์๋ฌ๋ฉ์ธ์ง๋จ๊ฒ ํด์ผํจ.
@api_view(['GET', 'PUT'])
@permission_classes((IsAuthenticated,)) #( ~,) ํํ๋ก ์์ผ๋ฉด ํํ
def info(request):
user = request.user #์ค๋ธ์ ํธ์ ๋ค์ด์๋ ์์ฑ๋ค
data = request.data #PUT์ผ๋ก ๋ค์ด์จ ๋ฐ์ดํฐ๋ค
email = request.user.email
nickname= request.user.nickname
if request.method == "GET": #์ฌ์ฉ์ ์ ๋ณด ๋ฐํ
result = UserShortcutSerializer(user)
return Response(result.data, status=status.HTTP_200_OK)
else:
"""
required_field = ('password') # Q. ํ์์ ๋ณด ์์ ํ๋๋ฐ ๊ผญ ์ด๋ค ์ ๋ณด๊ฐ ๋ค์ด์์ผํ ๊น?
# A. ์๋ฌด๋ ์ ๋ณด๋ฅผ ๋ฐ๊พธ๋ ๊ฒ์ ๋ฐฉ์งํ๊ธฐ ์ํด ๋น๋ฐ๋ฒํธ๋ ํ์ธํ๋๊ฑธ๋ก
#if not all(i in data for i in required_field):
if not required_field:
return Response(
{"message":"ํ์ ์์์ ์
๋ ฅํด์ฃผ์ธ์"},
status=status.HTTP_400_BAD_REQUEST
)
"""
email_check1 = re.compile(
r'[0-9a-zA-Z]+@[0-9a-zA-Z]+\.[0-9a-zA-Z]{2,}' # ์ด๋ฉ์ผ ์ ๊ท ํํ์
).search(data['email'])
email_check2 = User.objects.filter(email=data['email']) # ์ด๋ฉ์ผ ์ค๋ณต ์ฒดํฌ
nickname_check = User.objects.filter(nickname=data['nickname']) #๋ณ๋ช
์ค๋ณต ์ฒดํฌ
"""
if not check_password(data['password'],user.password): #check_password๋ก ๋ณ๊ฒฝ
return Response(
{"message":"๋น๋ฐ๋ฒํธ๋ฅผ ํ์ธํด์ฃผ์ธ์"},
status=status.HTTP_403_FORBIDDEN
)
"""
if email_check1 is None:
return Response(
{"Message":"์ ํํ ์ด๋ฉ์ผ ํ์์ ์
๋ ฅํด์ฃผ์ธ์"},
status=status.HTTP_400_BAD_REQUEST
)
elif email_check2.exists() and email != data['email']:
return Response(
{"message":"์ด๋ฏธ ์กด์ฌํ๋ ์ด๋ฉ์ผ์
๋๋ค."},
status=status.HTTP_409_CONFLICT
)
elif nickname_check.exists() and nickname != data['nickname']:
return Response(
{"message":"์ด๋ฏธ ์กด์ฌํ๋ ๋ณ๋ช
์
๋๋ค."},
status=status.HTTP_409_CONFLICT
)
else:
not_allowed = [ #๋ณ๊ฒฝ๋๋ฉด ์๋๋ ์ ๋ณด๋ค์ ๊ฑธ๋ฌ๋
'password', 'last_login', 'user_id',
'is_active', 'is_admin'
]
#๋ณ๊ฒฝ๋๋ ์๋๋ ์ ๋ณด๋ค ๊ฐ์ฒด์ ๋ค์ด๊ฐ๊ธฐ ์ ์ data์์ ์ญ์
for n in not_allowed:
if n in data:
del data[n]
#user ๊ฐ์ฒด์ assign
for attr, value in data.items():
setattr(user, attr, value) #๊ฐ์ฒด ์์ฑ๋ณ๊ฒฝ
user.save()
return Response(
UserShortcutSerializer(user).data,
status=status.HTTP_202_ACCEPTED
)
@api_view(['POST'])
def user_logout(request):
auth.logout(request)
return Response(
{"message":"๋ก๊ทธ์์์ด ์๋ฃ๋์์ต๋๋ค."},
status=status.HTTP_200_OK
)
@api_view(['PUT'])
@permission_classes((IsAuthenticated,))
def change_password(request):
user = request.user
data = request.data
required_field = ('current_password', 'new_password1', 'new_password2')
if not check_password(data['current_password'], user.password): #ํ์ฌ ๋น๋ฐ๋ฒํธ ํ์ธ
return Response(
{"message":"ํ์ฌ ๋น๋ฐ๋ฒํธ๊ฐ ์ผ์นํ์ง ์์ต๋๋ค."},
status=status.HTTP_400_BAD_REQUEST
)
elif data['new_password1'] != data['new_password2']:
return Response(
{"message":"์ ๋น๋ฐ๋ฒํธ๊ฐ ์ผ์นํ์ง ์์ต๋๋ค."},
status=status.HTTP_400_BAD_REQUEST
)
else: #๋ณ๊ฒฝ
user.set_password(data['new_password1'])
user.save()
return Response(
{"message":"๋น๋ฐ๋ฒํธ๊ฐ ์ฑ๊ณต์ ์ผ๋ก ๋ณ๊ฒฝ๋์์ต๋๋ค."},
status=status.HTTP_202_ACCEPTED
)
"""
@api_view(['GET'])
def header_info(request):
is_authenticated = request.user.is_authenticated() #๋ก๊ทธ์ธ ํ์ธ
user = request.user
if not is_authenticated:
return Response(
{"message":"Not Logged in"},
status = status.HTTP_204_NO_CONTENT
)
data = {
"user_id" : user.user_id,
"username" : user.username,
"nickname" : user.nickname,
"email" : user.email,
"university":user.university,
"faculty":user.faculty,
"major":user.major,
"classnum" : user.classnum,
}
return Response(data, status=status.HTTP_200_OK)
"""
"""
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def user_profile(request, user_id): #๋ค๋ฅธ์ฌ๋ ์ ์ ์ ๋ณด ํ์ธ
user = request.user
try:
user = User.objects.get(user_id=user_id)
data = {
"classnum":user.classnum,
"user_id":user.user_id,
"username":user.username,
"nickname":user.nickname,
"email":user.email,
"university":user.university,
"faculty":user.faculty,
"major":user.major
}
return Response(data, status=status.HTTP_200_OK)
except User.DoesNotExist:
return Response(
{"message":"User not exist"},
status=status.HTTP_404_NOT_FOUND
)
"""
@api_view(['DELETE'])
@permission_classes((IsAuthenticated,))
def user_delete(request):
data = request.data
user = request.user
user_check = User.objects.filter(user_id=data['user_id'])
if not check_password(data['password'], user.password): #ํ์ฌ ๋น๋ฐ๋ฒํธ ํ์ธ
return Response(
{"message":"๋น๋ฐ๋ฒํธ๊ฐ ์ผ์นํ์ง ์์ต๋๋ค."},
status=status.HTTP_400_BAD_REQUEST
)
elif not user_check.exists():
return Response(
{"message":"์์ด๋๊ฐ ์ผ์นํ์ง ์์ต๋๋ค."},
status=status.HTTP_409_CONFLICT
)
else:
user = request.user
user.delete()
return Response(
{"message":"ํ์ํํด๊ฐ ์๋ฃ๋์์ต๋๋ค."},
status=status.HTTP_200_OK
) |
class Proxy:
def __init__(self,proxy):
pro = proxy.split(":")
self.username = pro[2]
self.password = pro[3]
self.host = pro[0]
self.port = pro[1]
return
def geUsername(self):
return self.username
def gePassword(self):
return self.password
def getHost(self):
return self.host
def gePort(self):
return self.port
def getProxy(self):
return f"{self.username}:{self.password}@{self.host}:{self.port}"
def __str__(self):
return f"{self.username}:{self.password}@{self.host}:{self.port}" |
node = Node( "TestNode" )
node.i = 15
assert node.i == 15
node.set( 1 )
assert node.get() == 1
child = Node( "TestNode" )
assert not node.hasChild( child )
assert not child.parent()
child.attachToParent( node )
assert node.hasChild( child )
assert node == child.parent()
child.detachFromParent()
assert not child.parent()
assert not node.hasChild( child )
assert not child.parent()
node.attachChild( child )
assert node.hasChild( child )
assert node == child.parent()
node.detachChild( child )
assert not node.hasChild( child )
assert not child.parent()
node.attachChild( child )
child.attachChild( Node( "core/Node" ) )
assert node == child.children()[0].root()
@node.func
def foo( i ):
return i * i
assert node.foo( 2 ) == 4
|
import pyxel
class App:
def __init__(self):
pyxel.init(160, 120)
self.x = 0
pyxel.run(self.update, self.draw)
def update(self):
self.x = (self.x + 1) % pyxel.width # เนเธซเนเธเธณเนเธซเธเนเธ x +1 เนเธเธข mod เธเธฑเธเธเธเธฒเธเธเธงเธฒเธกเธเธงเนเธฒเธเธเธญเธเธซเธเนเธฒเธเธญ
def draw(self):
pyxel.cls(0)
pyxel.rect(self.x, 0, self.x + 7, 7, 9)
App() |
import os
import numpy as np
from edflow.data.util import *
def test_plot_datum():
test_image = np.ones((128, 128, 3), dtype=int)
test_heatmap = np.zeros((128, 128, 25), dtype=int)
test_keypoints = np.random.randint(0, 128, (25, 2))
test_example = {
"image": test_image,
"heatmap": test_heatmap,
"keypoints": test_keypoints,
}
plot_datum(test_example, "test_plot.png")
assert os.path.exists("test_plot.png")
os.remove("test_plot.png")
def test_cart2polar():
x = np.array([1, 0])
y = np.array([0, 0])
r, phi = cart2polar(x, y)
assert r[0] == 1
assert round(phi[0], 2) == round(np.pi / 2, 2)
|
# ---------------------------------------------------------------------------------------
# Call train contour Data set training script with different Fix Initializations of
# J_xy and J_yx
# ---------------------------------------------------------------------------------------
import numpy as np
import torch
from train_contour_data_set import main
import models.new_piech_models as new_piech_models
from train_utils import inverse_sigmoid
if __name__ == '__main__':
random_seed = 7
torch.manual_seed(random_seed)
np.random.seed(random_seed)
sigma_jxy_jyx_range = np.array([0.001, 0.2, 0.4, 0.6, 0.8, 0.999])
jxy_jyx_range = inverse_sigmoid(sigma_jxy_jyx_range)
# ----------------------------------------------------------------------
data_set_parameters = {
'data_set_dir': "./data/channel_wise_optimal_full14_frag7",
'train_subset_size': 20000,
'test_subset_size': 2000
}
train_parameters = {
'random_seed': random_seed,
'train_batch_size': 32,
'test_batch_size': 32,
'learning_rate': 1e-4,
'num_epochs': 100,
'lateral_w_reg_weight': 0.0001,
'lateral_w_reg_gaussian_sigma': 10,
'clip_negative_lateral_weights': True,
'lr_sched_step_size': 80,
'lr_sched_gamma': 0.5
}
for jxy_jyx in jxy_jyx_range:
print("Processing jxy_jyx = {}".format(jxy_jyx, '*' * 40))
cont_int_layer = new_piech_models.CurrentSubtractInhibitLayer(
lateral_e_size=15, lateral_i_size=15, n_iters=5, j_xy=jxy_jyx, use_recurrent_batch_norm=True)
net = new_piech_models.ContourIntegrationResnet50(cont_int_layer)
main(net, train_params=train_parameters, data_set_params=data_set_parameters,
base_results_store_dir='./results/explore_fixed_jxy/jxy_{:0.3}'.format(jxy_jyx))
# -----------------------------------------------------------------------------------
# End
# -----------------------------------------------------------------------------------
import pdb
pdb.set_trace()
|
from unifier.apps.drf.v1.serializers.favorite import FavoriteSerializer
from unifier.apps.drf.v1.serializers.manga import (
MangaChapterCreateSerializer,
MangaChapterDetailSerializer,
MangaChapterSerializer,
MangaCreateSerializer,
MangaSerializer,
MangaSerializerDetail,
)
from unifier.apps.drf.v1.serializers.novel import (
NovelChapterDetailSerializer,
NovelChapterSerializer,
NovelSerializer,
NovelSerializerDetail,
)
from unifier.apps.drf.v1.serializers.platform import PlatformSerializer, PlatformSerializerDetail
from unifier.apps.drf.v1.serializers.user import UserSerializer
|
from team import Team
from colortext import *
import argparse
import os
import random
import shutil
import ansiwrap
from time import sleep
from match_events import *
from utils import *
from visualization import *
from PyQt5 import QtCore, QtGui, QtWidgets
import constants
#import pyautogui
def calc_score(towers, stacks):
score = 0
for i in range(3):
score += stacks[i] * (towers[i] + 1)
return score
def get_valid_match(towers, red_stacks, blue_stacks): #Checks if match score is valid
if red_stacks[0] == -1 or blue_stacks[0] == -1: #Score generation failed
return False
for i in range(3):
if towers[i] + red_stacks[i] + blue_stacks[i] > 22:
return False
return True
def gen_towers(focus1, focus2):
towers = [random.randint(0, 1), random.randint(0, 1), random.randint(0, 1)]
if focus1 == focus2:
towers[focus1] = random.randint(2, 4)
else:
towers[focus1] = random.randint(1, 3)
towers[focus2] = random.randint(1, 3)
if sum(towers) > 7:
return gen_towers(focus1, focus2)
else:
return towers
def gen_stacks(towers, focus_cube, score):
stacks = [0, 0, 0]
max_of_focus = int(score / (towers[focus_cube] + 1))
#print("Max of focus: " + str(max_of_focus))
stacks[focus_cube] = random.randint(int(max_of_focus / 2), int(max_of_focus * 3 / 4))
not_focus = [i for i in range(3) if i != focus_cube]
attempts = 0
while calc_score(towers, stacks) != score:
index = random.randint(0,1)
add_cube = not_focus[index]
stacks[add_cube] += 1
if calc_score(towers, stacks) > score:
stacks[add_cube] -= 1
if index == 0:
stacks[1] += 1
else:
stacks[0] += 1
if calc_score(towers, stacks) > score:
attempts += 1
for i in not_focus:
stacks[i] == 0
stacks[focus_cube] = random.randint(int(max_of_focus / 2), max_of_focus)
if attempts > 10:
stacks[0] = -1 #Stack generation failed
return stacks
return stacks
def update_cubes(curr, delta):
for i in range(3):
for j in range(3):
curr[i][j] += delta[i][j]
def pick_acting_team(strengths):
random_result = random.uniform(0, sum(strengths))
if random_result <= strengths[0]:
strengths[0] /= 2
return 0
else:
strengths[1] /= 2
return 1
def get_all_more(more, less):
for i in range(len(more)):
if more[i] < less[i]:
return False
return True
def run_match(red_alliance, blue_alliance, speed, wait, visual, extras):
all_teams = red_alliance + blue_alliance
prematch_event_strings = []
postmatch_event_strings = []
if extras:
for team in all_teams:
if team.robot_health == 1:
event_type = globals()[constants.give_prematch_event()]
prematch_event_strings.append(str(event_type(team)))
else:
prematch_event_strings.append(str(Repair(team)))
red_strengths = [team.give_score() for team in red_alliance]
blue_strengths = [team.give_score() for team in blue_alliance]
red_score_gen = int(sum(red_strengths) / 2)
blue_score_gen = int(sum(blue_strengths) / 2)
#0 = orange, 1 = green, 2 = purple
red_focus_cube = random.randint(0, 2)
blue_focus_cube = random.randint(0, 2)
towers_pred = [0, 0, 0]
red_stacks_pred = [-1, 0, 0]
blue_stacks_pred = [-1, 0, 0]
attempts = 0
while not get_valid_match(towers_pred, red_stacks_pred, blue_stacks_pred):
if attempts == 10: #Score may be impossible?
blue_score_gen -= 1
if attempts == 20:
red_score_gen -= 1
attempts = 0
towers_pred = gen_towers(red_focus_cube, blue_focus_cube)
red_stacks_pred = gen_stacks(towers_pred, red_focus_cube, red_score_gen)
blue_stacks_pred = gen_stacks(towers_pred, blue_focus_cube, blue_score_gen)
#Calculate auton bonus
red_auton_odds = red_alliance[0].auton_rate + red_alliance[1].auton_rate
blue_auton_odds = blue_alliance[0].auton_rate + blue_alliance[1].auton_rate
tie_auton_odds = 0.6 - abs(red_auton_odds - blue_auton_odds) / 2
#print(red_auton_odds + blue_auton_odds + tie_auton_odds)
auton_result_num = random.uniform(0, red_auton_odds + blue_auton_odds + tie_auton_odds)
#print(auton_result_num)
auton_winner = -1
if auton_result_num < red_auton_odds:
auton_winner = 0
elif auton_result_num < red_auton_odds + blue_auton_odds:
auton_winner = 1
else:
auton_winner = 2
events = []
match_totals = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
red_auton_result = [0, 0, 0]
blue_auton_result = [0, 0, 0]
red_p_auton = None
red_u_auton = None
blue_p_auton = None
blue_u_auton = None
#Auton generation
p_autons = [[0, 0, 0], [0], [0, 2, 0, 0]] #Autons common for protected zone (red)
u_autons = [[0, 2, 1, 2, 1, 1, 0], [0, 2, 1, 1, 0], [0, 2, 1, 1, 0, 2], [0, 2, 1, 1, 0, 0], [0]] #Autons common for unprotected zone (red)
successful_auton_gen = False
while not successful_auton_gen:
red_auton_result = [0, 0, 0]
blue_auton_result = [0, 0, 0]
red_p = list.copy(random.choice(p_autons))
red_u = list.copy(random.choice(u_autons))
blue_p = list.copy(random.choice(p_autons))
blue_u = list.copy(random.choice(u_autons))
for i in range(len(blue_p)):
if blue_p[i] == 0:
blue_p[i] = 1
elif blue_p[i] == 1:
blue_p[i] = 0
for i in range(len(blue_u)):
if blue_u[i] == 0:
blue_u[i] = 1
elif blue_u[i] == 1:
blue_u[i] = 0
fails = [False, False, False, False]
for i in range(4):
if random.random() > 0.8:
fails[i] = True
red_autons = [red_p, red_u]
blue_autons = [blue_p, blue_u]
for i in range(2):
if not fails[i]:
for cube in red_autons[i]:
red_auton_result[cube] += 1
if not fails[i + 2]:
for cube in blue_autons[i]:
blue_auton_result[cube] += 1
red_total = sum(red_auton_result)
blue_total = sum(blue_auton_result)
if (red_total > blue_total and auton_winner == 0) or (red_total < blue_total and auton_winner == 1) or (red_total == blue_total and auton_winner == 2): #We randomly picked right
red_p_auton = Stack(1, random.randint(10, 14), red_alliance[0], 0, cube_order=red_p, autofail=fails[0])
red_u_auton = Stack(0, random.randint(10, 14), red_alliance[1], 0, cube_order=red_u, autofail=fails[1])
blue_p_auton = Stack(1, random.randint(10, 14), blue_alliance[0], 1, cube_order=blue_p, autofail=fails[2])
blue_u_auton = Stack(0, random.randint(10, 14), blue_alliance[1], 1, cube_order=blue_u, autofail=fails[3])
events = [red_p_auton, red_u_auton, blue_p_auton, blue_u_auton]
successful_auton_gen = True
#Stack event generation
#Remove auton stacks as necessary (or cuz we feel like it)
red_available_spots = 3
blue_available_spots = 3
destacks = []
red_spot_list = [0, 1, 2]
blue_spot_list = [0, 1, 2]
red_still_needed = [red_stacks_pred[i] - red_auton_result[i] for i in range(3)]
blue_still_needed = [blue_stacks_pred[i] - blue_auton_result[i] for i in range(3)]
stack_size_reasonable = False
while not stack_size_reasonable:
red_available_spots = 3
blue_available_spots = 3
destacks = []
red_spot_list = [0, 1, 2]
blue_spot_list = [0, 1, 2]
red_still_needed = [red_stacks_pred[i] - red_auton_result[i] for i in range(3)]
blue_still_needed = [blue_stacks_pred[i] - blue_auton_result[i] for i in range(3)]
for event in events:
if type(event) == Stack and not event.autofail:
if event.color == 0:
if not get_all_more(red_stacks_pred, red_auton_result) or random.random() < 0.5:
destacks.append(Destack(event, random.randint(16, 25)))
for i in range(3):
red_auton_result[i] -= event.cube_totals[i]
else:
red_spot_list.remove(event.location)
red_available_spots -= 1
if event.color == 1:
if not get_all_more(blue_stacks_pred, blue_auton_result) or random.random() < 0.5:
destacks.append(Destack(event, random.randint(16, 25)))
for i in range(3):
blue_auton_result[i] -= event.cube_totals[i]
else:
blue_spot_list.remove(event.location)
blue_available_spots -= 1
if sum(red_still_needed) <= red_available_spots * 12 and sum(blue_still_needed) <= blue_available_spots * 12:
stack_size_reasonable = True
for event in destacks:
events.append(event)
red_still_needed = [red_stacks_pred[i] - red_auton_result[i] for i in range(3)]
blue_still_needed = [blue_stacks_pred[i] - blue_auton_result[i] for i in range(3)]
#Generate stacks to add
try:
red_stacks = [[0, 0, 0] for i in range(random.randint(int(sum(red_still_needed) / 12) + 1, red_available_spots))]
except ValueError:
red_stacks = [[0, 0, 0] for i in range(red_available_spots)]
try:
blue_stacks = [[0, 0, 0] for i in range(random.randint(int(sum(blue_still_needed) / 12) + 1, blue_available_spots))]
except ValueError:
blue_stacks = [[0, 0, 0] for i in range(blue_available_spots)]
for i in range(3):
for j in range(red_still_needed[i]):
red_stacks[random.randint(0, len(red_stacks) - 1)][i] += 1
for j in range(blue_still_needed[i]):
blue_stacks[random.randint(0, len(blue_stacks) - 1)][i] += 1
for stack in red_stacks:
location = random.choice(red_spot_list)
if location == 2 and (1 in red_spot_list):
location = 1
events.append(Stack(location, random.randint(30, 120), red_alliance[pick_acting_team(red_strengths)], 0, cube_totals=stack))
red_spot_list.remove(location)
for stack in blue_stacks:
location = random.choice(blue_spot_list)
if location == 2 and (1 in blue_spot_list):
location = 1
events.append(Stack(location, random.randint(30, 120), blue_alliance[pick_acting_team(blue_strengths)], 1, cube_totals=stack))
blue_spot_list.remove(location)
#Generate tower event times
tower_times = []
for i in range(sum(towers_pred)):
tower_times.append(random.randint(45, 120))
tower_times.sort()
#Quietly sim to determine what alliance should put each tower
red_sim_results = [0, 0, 0]
blue_sim_results = [0, 0, 0]
events.sort(key=lambda x: x.time)
tower_events = []
curr_event = 0
free_towers = [0, 1, 2, 3, 4, 5, 6]
tower_colors_needed = []
for i in range(3):
for j in range(towers_pred[i]):
tower_colors_needed.append(i)
for time in range(0, 120):
if curr_event < len(events) and events[curr_event].time == time:
while curr_event < len(events) and events[curr_event].time == time:
event = events[curr_event]
if type(event) == Stack:
if event.color == 0:
for i in range(3):
red_sim_results[i] += event.cube_totals[i]
else:
for i in range(3):
blue_sim_results[i] += event.cube_totals[i]
elif type(event) == Destack:
if event.color == 0:
for i in range(3):
red_sim_results[i] -= event.cube_totals[i]
else:
for i in range(3):
blue_sim_results[i] -= event.cube_totals[i]
curr_event += 1
if len(tower_times) == 0:
break
if tower_times[0] == time:
tower_color = tower_colors_needed.pop(random.randint(0, len(tower_colors_needed) - 1))
if red_sim_results[tower_color] > blue_sim_results[tower_color]:
tower_team = 0
elif red_sim_results[tower_color] < blue_sim_results[tower_color]:
tower_team = 1
else:
tower_team = random.randint(0, 1)
valid_tower = False
tower_loc = 7
while not valid_tower:
tower_loc = random.choice(free_towers)
if not ((tower_team == 0 and tower_loc == 6) or (tower_team == 1 and tower_loc == 0)):
valid_tower = True
free_towers.remove(tower_loc)
if tower_team == 0:
tower_events.append(Tower(tower_loc, tower_color, time, red_alliance[pick_acting_team(red_strengths)], 0))
else:
tower_events.append(Tower(tower_loc, tower_color, time, blue_alliance[pick_acting_team(blue_strengths)], 1))
del tower_times[0]
for event in tower_events:
events.append(event)
#Add special events:
#Defense:
for i in range(3):
if random.random() < constants.DEFENSE_ODDS:
acting_alliance = random.randint(0, 1)
try:
if acting_alliance == 0:
if random.randint(1, sum(red_alliance[0].scores) + sum(red_alliance[1].scores)) < sum(red_alliance[0].scores):
defender = red_alliance[1]
else:
defender = red_alliance[0]
if random.randint(1, sum(blue_alliance[0].scores) + sum(blue_alliance[1].scores)) < sum(blue_alliance[0].scores):
recipient = blue_alliance[0]
else:
recipient = blue_alliance[1]
else:
if random.randint(1, sum(blue_alliance[0].scores) + sum(blue_alliance[1].scores)) < sum(blue_alliance[0].scores):
defender = blue_alliance[1]
else:
defender = blue_alliance[0]
if random.randint(1, sum(red_alliance[0].scores) + sum(red_alliance[1].scores)) < sum(red_alliance[0].scores):
recipient = red_alliance[0]
else:
recipient = red_alliance[1]
except ValueError:
if acting_alliance == 0:
defender = random.choice(red_alliance)
recipient = random.choice(blue_alliance)
else:
defender = random.choice(blue_alliance)
recipient = random.choice(red_alliance)
questionable = False
if random.random() < constants.DEFENSE_DQ_ODDS:
questionable = True
if random.random() < constants.DQ_CARRY_THROUGH_ODDS:
postmatch_event_strings.append(defender.name + " has been issued a DQ!")
events.append(Defense(15 + (i + 1) * random.randint(1, 35), defender, acting_alliance, recipient, events, questionable))
for j in range(i + 1, 3):
if random.random() < constants.DEFENSE_CONTINUE_ODDS:
questionable = False
if random.random() < constants.DEFENSE_DQ_ODDS:
questionable = True
if random.random() < constants.DQ_CARRY_THROUGH_ODDS:
postmatch_event_strings.append(defender.name + " has been issued a DQ for defense on " + recipient.name + "!")
events.append(Defense(15 + (i + 1) * random.randint(1, 35), defender, acting_alliance, recipient, events, questionable))
#Damage during match:
damaged_teams = []
if random.random() < constants.DAMAGE_ODDS:
damaged_alliance = random.randint(0, 1)
if damaged_alliance == 0:
damaged_team = random.choice(red_alliance)
else:
damaged_team = random.choice(blue_alliance)
damager = None
if random.random() < constants.DAMAGE_INTENTION_ODDS:
if damaged_alliance == 0:
damager = random.choice(blue_alliance)
else:
damager = random.choice(red_alliance)
if random.random() < constants.DQ_CARRY_THROUGH_ODDS:
postmatch_event_strings.append(damager.name + " has been issued a DQ for damaging " + damaged_team.name + "!")
extent = random.choices(list(constants.DAMAGE_TYPES.keys()), weights=constants.DAMAGE_TYPE_ODDS)[0]
repair_category = random.choices(list(constants.DAMAGE_LENGTHS.keys()), weights=[0.5, 0.3, 0.2])[0]
repair_time = random.randint(0, 25) + constants.DAMAGE_LENGTHS[repair_category]
postmatch_event_strings.append(damaged_team.name + " appears to have sustained " + extent + " damage.")
postmatch_event_strings.append(repair_category)
events.append(Damage(random.randint(15, 120), damaged_team, damaged_alliance, extent, repair_time, constants.DAMAGE_TYPES[extent], damager))
events.sort(key=lambda x: x.time)
#Run match and log events
curr_event = 0
auton_winner = -1
if visual:
app = QtWidgets.QApplication(sys.argv)
ui = Ui_MainWindow()
bot1 = Bot(ui.centralwidget, 0, red_alliance[0].name, 420, 350)
bot2 = Bot(ui.centralwidget, 0, red_alliance[1].name, 420, 950)
bot3 = Bot(ui.centralwidget, 1, blue_alliance[0].name, 1670, 350)
bot4 = Bot(ui.centralwidget, 1, blue_alliance[1].name, 1670, 950)
ui.align()
for event in events:
event.init_visualization(ui.centralwidget)
ui.setWindowFlags(QtCore.Qt.FramelessWindowHint)
ui.show()
os.system('clear')
if extras:
for event in prematch_event_strings:
print(event)
input("Press enter to begin")
for time in range(121):
if visual:
QtWidgets.QApplication.processEvents()
if wait:
sleep(speed)
if curr_event < len(events) and events[curr_event].time == time:
while curr_event < len(events) and events[curr_event].time == time:
update_cubes(match_totals, events[curr_event].act())
events.sort(key=lambda x: x.time)
if visual:
events[curr_event].visualize(ui)
curr_event += 1
if time == 14:
if sum(match_totals[1]) > sum(match_totals[2]):
auton_winner = 0
print(redtext("Red wins autonomous!"))
elif sum(match_totals[1]) < sum(match_totals[2]):
auton_winner = 1
print(bluetext("Blue wins autonomous!"))
else:
print("Autonomous tie!")
auton_winner = 2
input("Press enter to begin driver control")
red_final_score = calc_score(match_totals[0], match_totals[1])
blue_final_score = calc_score(match_totals[0], match_totals[2])
if auton_winner == 0:
red_final_score += 6
elif auton_winner == 1:
blue_final_score += 6
else:
red_final_score += 3
blue_final_score += 3
print("Time up!")
print("Towers: " + cube_totals_to_string(match_totals[0]))
print("Red stacks: " + cube_totals_to_string(match_totals[1]))
print("Blue stacks: " + cube_totals_to_string(match_totals[2]))
print("Final score: " + redtext(red_final_score) + '-' + bluetext(blue_final_score))
#Damage after match:
if random.random() < constants.POST_DAMAGE_ODDS:
damaged_team = random.choice(all_teams)
extent = random.choices(list(constants.DAMAGE_TYPES.keys()), weights=constants.DAMAGE_TYPE_ODDS)[0]
damaged_team.robot_health = constants.DAMAGE_TYPES[extent]
repair_category = random.choices(list(constants.DAMAGE_LENGTHS.keys()), weights=[0.5, 0.3, 0.2])[0]
damaged_team.repair_time = random.randint(0, 25) + constants.DAMAGE_LENGTHS[repair_category]
postmatch_event_strings.append(damaged_team.name + " appears to have sustained " + extent + " damage.")
postmatch_event_strings.append(repair_category)
damaged_teams.append(damaged_team)
for team in damaged_teams:
team.exportJSON()
for event in postmatch_event_strings:
print(event)
input("Press enter to quit")
'''
for event in events:
update_cubes(match_totals, event.act())
#print(match_totals)
'''
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Simulate a competition match')
parser.add_argument('red1', help='Red alliance team 1')
parser.add_argument('red2', help='Red alliance team 2')
parser.add_argument('blue1', help='Blue alliance team 1')
parser.add_argument('blue2', help='Blue alliance team 2')
parser.add_argument('--no-visual', dest='visual', action='store_false', default=True, help='Don\'t run the visualization')
parser.add_argument('--speed', type=float, default = constants.DEFAULT_SPEED, help='Speed to run the simulation')
parser.add_argument('--no-wait', dest='wait', action='store_false', default=True, help='Use to run full match immediately without waiting')
parser.add_argument('--no-extras', dest='extras', action='store_false', default=True, help='Use to bypass prematch and postmatch events')
args = parser.parse_args()
try:
red1 = Team.fromJSON('team_data/' + args.red1 + '.json')
red2 = Team.fromJSON('team_data/' + args.red2 + '.json')
blue1 = Team.fromJSON('team_data/' + args.blue1 + '.json')
blue2 = Team.fromJSON('team_data/' + args.blue2 + '.json')
except FileNotFoundError as err:
print("Team file not found for team " + os.path.splitext(os.path.basename(err.filename))[0])
quit()
reds = [red1, red2]
blues = [blue1, blue2]
ui = None
run_match(reds, blues, args.speed, args.wait, args.visual, args.extras)
#sys.exit(app.exec_()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.