index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
999,000 | c3f22c9cd8f7ef0325ed7d1e9f13f403ccc38d7a | '''
Created by auto_sdk on 2014-09-08 16:48:02
'''
from top.api.base import RestApi
class AitaobaoItemsBuyConvertRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.buy_now = None
self._from = None
self.open_iid = None
self.pid = None
self.quantity = None
self.sku_id = None
def getapiname(self):
return 'taobao.aitaobao.items.buy.convert'
def getTranslateParas(self):
return {'_from':'from'}
|
999,001 | c27536eaeffeaf5050ef556269aac47f4c7ef6a2 | # Copyright (c) 2016, Frederik Hermans, Liam McNamara
#
# This file is part of FOCUS and is licensed under the 3-clause BSD license.
# The full license can be found in the file COPYING.
import inspect
import click
import focus
import focus.simpletxrx
def _nop():
pass
def _get_type(default):
if type(default) == tuple:
return tuple(_get_type(e) for e in default)
else:
return type(default)
def build_command(name, func):
args, varargs, keywords, defaults = inspect.getargspec(func)
args = args if args else list()
defaults = defaults if defaults else list()
if varargs is not None or keywords is not None:
raise RuntimeError('Cannot build CLI for function with kwargs or '
'varags.')
if len(args) != len(defaults):
raise RuntimeError('Cannot build CLI for function with argument '
'without default values.')
for arg, default in reversed(zip(args, defaults)):
func = click.option('--'+arg, type=_get_type(default),
default=default)(func)
return click.command(name)(func)
def build_group(name, *commands):
group = click.group(name)(_nop)
for cmd in commands:
group.add_command(cmd)
return group
def main():
benchmark = build_group('benchmark',
build_command('fft', focus.fft.benchmark),
build_command('multiprocreceiver',
focus.multiprocreceiver.benchmark),
build_command('receiver', focus.receiver.benchmark))
build_group('main',
benchmark,
build_command('test', focus.tests.run_tests),
focus.receiver.main,
focus.simpletxrx.tx,
focus.simpletxrx.rx,
focus.video.rx,
focus.video.tx,
focus.video.multirate,
build_command('fft_init', focus.fft.wisdom))()
if __name__ == '__main__':
main()
|
999,002 | f44f6b8b71205883755f2eab3b1a236c49d94bc3 | import torch
import json
import sys
from src.models.optim.autoencoder_trainer import AutoEncoderTrainer
from src.models.optim.DeepSAD_trainer import DeepSADTrainer
class DeepSAD:
"""
Define a DeepSAD instance (inspired form the work of Lukas Ruff et al. (2019))
and utility method to train, pretrain and test it.
"""
def __init__(self, net, ae_net=None, eta=1.0):
"""
Build the DeepSAD instance.
---------
INPUT
|---- net (nn.Module) the Encoder network to use.
|---- ae_net (nn.Module) the Autoencoder network to use for pretraining.
|---- eta (float) the DeepSAD parameter defining the weigth given to
| unspervized vs supervized sample in the loss. 1.0 gives
| equal weight to known and unknown samples. <1.0 gives more
| weight to unkonwn sample. >1.0 gives more weight to known
| samples.
OUTPUT
|---- None
"""
self.eta = eta # balance of importance of labeled or unlabeled sample
self.c = None # hypersphere center
self.net = net
self.trainer = None
self.ae_net = ae_net
self.ae_trainer = None
# Dict to store all the results : reconstruction and embedding
self.results = {
'reconstruction':{
'train':{
'time': None,
'loss': None
},
'scores_threshold':None,
'valid':{
'time':None,
'auc':None,
'f1':None,
'scores':None
},
'test':{
'time':None,
'auc':None,
'f1':None,
'scores':None
}
},
'embedding':{
'train':{
'time': None,
'loss': None
},
'scores_threshold':None,
'valid':{
'time':None,
'auc':None,
'f1':None,
'scores':None
},
'test':{
'time':None,
'auc':None,
'f1':None,
'scores':None
}
}
}
def train(self, dataset, lr=0.0001, n_epoch=150, lr_milestone=(), batch_size=64,
weight_decay=1e-6, device='cuda', n_jobs_dataloader=0, print_batch_progress=False):
"""
Train the DeepSAD model on the provided dataset with the provided parameters.
----------
INPUT
|---- dataset (pytorch Dataset) the dataset on which to train the DeepSAD.
| Must return (input, label, mask, semi_label, idx).
|---- lr (float) the learning rate.
|---- n_epoch (int) the number of epoch.
|---- lr_milestone (tuple) the lr update steps.
|---- batch_size (int) the batch_size to use.
|---- weight_decay (float) the weight_decay for the Adam optimizer.
|---- device (str) the device to work on ('cpu' or 'cuda').
|---- n_jobs_dataloader (int) number of workers for the dataloader.
|---- print_batch_progress (bool) whether to display a progress bar.
OUTPUT
|---- None
"""
self.trainer = DeepSADTrainer(self.c, self.eta, lr=lr, n_epoch=n_epoch, lr_milestone=lr_milestone,
batch_size=batch_size, weight_decay=weight_decay, device=device,
n_jobs_dataloader=n_jobs_dataloader, print_batch_progress=print_batch_progress)
# train deepSAD
self.net = self.trainer.train(dataset, self.net)
# get results and parameters
self.results['embedding']['train']['time'] = self.trainer.train_time
self.results['embedding']['train']['loss'] = self.trainer.train_loss
self.c = self.trainer.c.cpu().data.numpy().tolist()
def validate(self, dataset, device='cuda', n_jobs_dataloader=0, print_batch_progress=False):
"""
Validate the DeepSAD model on the provided dataset with the provided
parameters and find the best threshold ons scores that maximize the
F1-score.
----------
INPUT
|---- dataset (pytorch Dataset) the dataset on which to validate the DeepSAD.
| Must return (input, label, mask, semi_label, idx).
|---- device (str) the device to work on ('cpu' or 'cuda').
|---- n_jobs_dataloader (int) number of workers for the dataloader.
|---- print_batch_progress (bool) whether to display a progress bar.
OUTPUT
|---- None
"""
if self.trainer is None:
self.trainer = DeepSADTrainer(self.c, self.eta, device=device,
n_jobs_dataloader=n_jobs_dataloader,
print_batch_progress=print_batch_progress)
self.trainer.validate(dataset, self.net)
# recover restults
self.results['embedding']['scores_threshold'] = self.trainer.scores_threhold
self.results['embedding']['valid']['time'] = self.trainer.valid_time
self.results['embedding']['valid']['auc'] = self.trainer.valid_auc
self.results['embedding']['valid']['f1'] = self.trainer.valid_f1
self.results['embedding']['valid']['scores'] = self.trainer.valid_scores
def test(self, dataset, device='cuda', n_jobs_dataloader=0, print_batch_progress=False):
"""
Test the DeepSAD model on the provided dataset with the provided parameters.
----------
INPUT
|---- dataset (pytorch Dataset) the dataset on which to test the DeepSAD.
| Must return (input, label, mask, semi_label, idx).
|---- device (str) the device to work on ('cpu' or 'cuda').
|---- n_jobs_dataloader (int) number of workers for the dataloader.
|---- print_batch_progress (bool) whether to display a progress bar.
OUTPUT
|---- None
"""
if self.trainer is None:
self.trainer = DeepSADTrainer(self.c, self.eta, device=device,
n_jobs_dataloader=n_jobs_dataloader,
print_batch_progress=print_batch_progress)
self.trainer.test(dataset, self.net)
# recover restults
self.results['embedding']['test']['time'] = self.trainer.test_time
self.results['embedding']['test']['auc'] = self.trainer.test_auc
self.results['embedding']['test']['f1'] = self.trainer.test_f1
self.results['embedding']['test']['scores'] = self.trainer.test_scores
def pretrain(self, train_dataset, valid_dataset, test_dataset, lr=0.0001, n_epoch=150, lr_milestone=(),
batch_size=64, weight_decay=1e-6, device='cuda', n_jobs_dataloader=0, print_batch_progress=False):
"""
Pretrain the DeepSAD model through the training of an Autoencoder on the
provided dataset with the provided parameters.
----------
INPUT
|---- train_dataset (pytorch Dataset) the dataset on which to train
| the Autoencoder. Must return (input, label, mask,
| semi_label, idx).
|---- test_dataset (pytorch Dataset) the dataset on which to test
| the Autoencoder. Must return (input, label, mask,
| semi_label, idx).
|---- lr (float) the learning rate.
|---- n_epoch (int) the number of epoch.
|---- lr_milestone (tuple) the lr update steps.
|---- batch_size (int) the batch_size to use.
|---- weight_decay (float) the weight_decay for the Adam optimizer.
|---- device (str) the device to work on ('cpu' or 'cuda').
|---- n_jobs_dataloader (int) number of workers for the dataloader.
|---- print_batch_progress (bool) whether to display a progress bar.
OUTPUT
|---- None
"""
self.ae_trainer = AutoEncoderTrainer(lr=lr, n_epoch=n_epoch, lr_milestone=lr_milestone,
batch_size=batch_size, weight_decay=weight_decay, device=device,
n_jobs_dataloader=n_jobs_dataloader, print_batch_progress=print_batch_progress)
# Train AE
self.ae_net = self.ae_trainer.train(train_dataset, self.ae_net)
self.results['reconstruction']['train']['time'] = self.ae_trainer.train_time
self.results['reconstruction']['train']['loss'] = self.ae_trainer.train_loss
# Validate AE
self.ae_trainer.validate(valid_dataset, self.ae_net)
#self.scores_threhold_rec = self.ae_trainer.scores_threhold # get best threshold for max F1-score
self.results['reconstruction']['scores_threshold'] = self.ae_trainer.scores_threhold
self.results['reconstruction']['valid']['time'] = self.ae_trainer.valid_time
self.results['reconstruction']['valid']['auc'] = self.ae_trainer.valid_auc
self.results['reconstruction']['valid']['f1'] = self.ae_trainer.valid_f1
self.results['reconstruction']['valid']['scores'] = self.ae_trainer.valid_scores
# Test AE
self.ae_trainer.test(test_dataset, self.ae_net)
self.results['reconstruction']['test']['time'] = self.ae_trainer.test_time
self.results['reconstruction']['test']['auc'] = self.ae_trainer.test_auc
self.results['reconstruction']['test']['f1'] = self.ae_trainer.test_f1
self.results['reconstruction']['test']['scores'] = self.ae_trainer.test_scores
# Initialize DeepSAD model with Encoder's weights
self.init_network_weights_from_pretrain()
def init_network_weights_from_pretrain(self):
"""
Initialize DeepSAD encoder weights with the autoencoder's one.
----------
INPUT
|---- None
OUTPUT
|---- None
"""
net_dict = self.net.state_dict()
ae_net_dict = self.ae_net.state_dict()
# filter elements of the AE out to keep only those matching DeepSAD's one
ae_net_dict = {k: v for k, v in ae_net_dict.items() if k in net_dict}
# Update the DeepSAD state dict
net_dict.update(ae_net_dict)
self.net.load_state_dict(net_dict)
def save_model(self, export_path, save_ae=True):
"""
Save the model (hypersphere center, DeepSAD state dict, (Autoencoder
state dict)) on disk.
----------
INPUT
|---- export_path (str) the filename where to export the model.
|---- save_ae (bool) whether to save the Autoencoder state dict.
OUTPUT
|---- None
"""
net_dict = self.net.state_dict()
ae_net_dict = self.ae_net.state_dict() if save_ae else None
#ae_threshold = self.scores_threhold_rec if save_ae else None
torch.save({'c': self.c,
'net_dict': net_dict,
'ae_net_dict': ae_net_dict}, export_path)
def load_model(self, model_path, load_ae=True, map_location='cpu'):
"""
Load the model (hypersphere center, DeepSAD state dict, (Autoencoder
state dict) from the provided path.
--------
INPUT
|---- model_path (str) filename of the model to load.
|---- load_ae (bool) whether to load the Autoencoder state dict.
|---- map_location (str) device on which to load.
OUTPUT
|---- None
"""
assert load_ae and (self.ae_net is not None), 'The trainer has not been initialized with an Autoencoder. It can thus not be loaded.'
model_dict = torch.load(model_path, map_location=map_location)
self.c = model_dict['c']
self.net.load_state_dict(model_dict['net_dict'])
if load_ae and (self.ae_net is not None):
self.ae_net.load_state_dict(model_dict['ae_net_dict'])
def save_results(self, export_json_path):
"""
Save the DeepSAD results (train time, test time, test AUC, test scores
(loss and label for each samples)) as json.
----------
INPUT
|---- export_json_path (str) the json filename where to save.
OUTPUT
|---- None
"""
with open(export_json_path, 'w') as f:
json.dump(self.results, f)
|
999,003 | a0c5d05676f3e0011bfa96f9117a1b6921f04ed5 | weighItems = ['Cheese', 'Apple', 'Orange', 'Beef', 'Cocaine', 'Potato', 'Tomato', 'Pork', 'Carrot', 'Ham']
pieceItems = ['Bread', 'Roll', 'Crisps', 'Coke', 'Vinegar', 'Vodka', 'Ciggarettes', 'Beer', 'Pen', 'Pinapple'] |
999,004 | 142ef6a52abfa8253990cffc5d0bc6c932038ed3 | a = [[1,1],[2,3]]
print(a)
a = a[:]
print(a)
b = ['sunny', 'overcast']
b1 = [n for n in b if n != 'sunny']
print(b)
print(b1) |
999,005 | 6e0fa5c7f1362c8bb0eb51217b9067901c875fc7 | import urllib.request as ul
from selenium import webdriver
from PIL import Image
import pytesseract as pt
from bs4 import BeautifulSoup
browser = webdriver.Chrome(
'/home/aryandosaj/Desktop/Flask_Web_Hook/chromedriver')
browser.get('https://erp.bitmesra.ac.in')
image = browser.find_elements_by_tag_name('img')
list_of_images = []
for i in image:
list_of_images.append(i.get_attribute('src'))
browser.get(list_of_images[1])
browser.save_screenshot('captha.png')
browser.back()
username = browser.find_element_by_id('txt_username')
username.send_keys('')
password = browser.find_element_by_id('txt_password')
password.send_keys('')
captha_txt = (pt.image_to_string(Image.open('captha.png'), lang='eng'))
print(captha_txt)
captha = browser.find_element_by_id('txtcaptcha')
captha.send_keys(captha_txt)
login = browser.find_element_by_id('btnSubmit')
login.click()
browser.get(
'https://erp.bitmesra.ac.in/Academic/Comprehensive_Stud_Report1.aspx?pageno=76')
page = browser.page_source
soup = BeautifulSoup(page, 'html.parser')
attendance = soup.find("div", {"id": "divAttendance"})
# table_head = attendance.find('thead')
# table_head = table_head.find_all('th')
# attendance_heading = []
# for element in table_head:
# attendance_heading.append((element.contents[0]).strip())
# attandance_body = attendance.find('tbody')
# attandance_body = attandance_body.find_all('tr')
# attendance_body_content = []
# for entry in attandance_body:
# temp = entry.find_all('td')
# t = []
# for d in temp:
# t.append(d.contents[0].strip())
# attendance_body_content.append(t)
examination = soup.find("div", {"id": "divTestMark"})
examination = examination.find_all('tbody')
examination = examination[0].find_all('tr')
examination_heading = []
examination_body = []
for element in examination[0].find_all('th'):
examination_heading.append(element.contents[0].strip())
for element in examination[1:]:
# print(element)
t = []
for d in element.find_all('td'):
t.append(d.contents[0].strip())
examination_body.append(t)
# result = soup.find("div", {"id": "div40"})
# result = result.find('div')
result = soup.find("div", {"id": "divResult"})
result = result.find('tbody')
result = result.find('tbody')
result = result.find_all('tr')
result_heading = []
result_body = []
for element in result[0].find_all('th'):
result_heading.append(element.contents[0].strip())
for element in result[1:]:
t = []
for d in element.find_all('td')[:2]:
t.append(d.contents[0].strip())
ele = element.find_all('span')
for d in ele:
t.append(d.contents[0].strip())
result_body.append(t)
content = {
# 'Attendance':
# {
# 'heading': attendance_heading,
# 'body': attendance_body_content
# },
'Examination': {
'heading': examination_heading,
'body': examination_body
},
'Result':
{
'heading': result_heading,
'body': result_body
}
}
print(content)
|
999,006 | 1d64adf2cfde17d644f23925a5611d5a6d4fe5af | # 1006 - Media 2
# Entrada
a, b, c = map(float, input().split())
# Pesos
a *= 2
b *= 3
c *= 5
# Media
media = (a + b + c) / 10
# Saida
print('MEDIA = %.1f' % media)
|
999,007 | e05d5121cf19cfdd6199d9e31ae8a9215c972843 | from rest_framework.serializers import ModelSerializer
from ..Models.attendance import Attendance
from ..Serializers.employee_serializer import EmployeeSerializerForOtherModels
from ..Serializers.production_attendence_type_serializer import ProductionAttendanceTypeListSerializer
from ..Serializers.unit_serializer import UnitDetailSerializer
class AttendanceListSerializer(ModelSerializer):
employee=EmployeeSerializerForOtherModels()
unit=UnitDetailSerializer()
production_attendance_type=ProductionAttendanceTypeListSerializer()
class Meta:
model=Attendance
fields=['id','employee','production_attendance_type','value','unit'];
class AttendanceDetailSerializer(ModelSerializer):
class Meta:
model=Attendance
fields=['id','employee','production_attendance_type','value','unit','date'];
class AttendanceCreateSerializer(ModelSerializer):
class Meta:
model=Attendance
fields=['employee','production_attendance_type','value','unit','date'];
class AttendanceSerializerForSalaryCalculation(ModelSerializer):
class Meta:
model=Attendance
fields=['value']
|
999,008 | 8d676877fa3f126f04b9e3057cbf6c10f0d9e04e | import asyncio
from pyppeteer import launch
import time
from twilio.rest import Client
from aip import AipOcr
import os
def screen_size():
"""使用tkinter获取屏幕大小"""
import tkinter
tk = tkinter.Tk()
width = tk.winfo_screenwidth()
height = tk.winfo_screenheight()
tk.quit()
return width, height
launch_kwargs = {
"headless": False,
"args": [
"--start-maximized",
"--no-sandbox",
"--disable-infobars",
"--log-level=3",
"--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
],
"userDataDir": "F:\\manutus\\userDataDir"
}
account_sid = '**************'
auth_token = '**************'
myNumber = '**************'
twilioNumber = '**************'
APP_ID = '**************'
API_KEY = '**************'
SECRET_KEY = '**************'
async def textmyself(message):
client = Client(account_sid, auth_token)
message = client.messages.create(to=myNumber, from_=twilioNumber, body=message)
async def main():
while True:
time.sleep(10)
browser = await launch(launch_kwargs)
page = await browser.newPage()
await page.evaluateOnNewDocument("""
var _navigator = {};
for (name in window.navigator) {
if (name != "webdriver") {
_navigator[name] = window.navigator[name]
}
}
Object.defineProperty(window, 'navigator', {
get: ()=> _navigator,
})
""")
width, height = screen_size()
await page.setViewport({"width": width, "height": height})
await page.goto('https://ees.elsevier.com/neucom/default.asp?pg=login.asp')
await page.waitFor('#mainFrameset > frameset:nth-child(1) > frame')
frame1 = page.frames
await asyncio.sleep(1)
username = await frame1[5].querySelector('#rightCol > form > div > fieldset > input:nth-child(3)')
await username.type('**************')
password = await frame1[5].querySelector('#rightCol > form > div > fieldset > input:nth-child(6)')
await password.type('**************')
coauther_role = await frame1[5].querySelector(
'#rightCol > form > div > fieldset > div.buttonAlign > input:nth-child(2)')
await coauther_role.click()
time.sleep(2)
# await asyncio.sleep(1)
await page.goto('https://ees.elsevier.com/neucom/coauthor/coauth_pendSubmissions.asp?currentpage=1')
currentTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
await page.screenshot({'path': 'F:\manutus\ocr.png'})
client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
i = open(r'F:\manutus\ocr.png', 'rb')
img = i.read()
message = client.basicAccurate(img)
totalMessage = ''
for i in message.get('words_result'):
totalMessage += i.get('words')
if 'With' and 'Editor' in totalMessage:
print(f"With Editor")
print(currentTime)
else:
print(f"Please check status...")
await textmyself('Manuscript status has changed, please check your Elsevier account ASAP.')
print(currentTime)
break
await asyncio.sleep(1)
await browser.close()
path = "F:\\manutus\\"
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith(".png"):
os.remove(os.path.join(root, name))
print("Delete File: " + os.path.join(root, name))
break
asyncio.get_event_loop().run_until_complete(main())
|
999,009 | 48c8ab0cb7e241626ac770b456108dcaf248dd12 | #coding=utf-8
sex = input('请问你的性别是?')
if sex=='男':
print('你可以留胡子')
elif sex=='女':
print('你可以留长头发')
else:
print('你想怎样都可以')
|
999,010 | 58c38bd5b4038e5603d16f954805a3e8fad6f240 | import string
class Stuff:
def __init__(self):
self.clear()
def clear(self):
self.polymer = ''
def read_instructions(self, file_name):
input = open(file_name, 'r')
for instruction in input:
self.polymer = str(instruction.strip('\n'))
input.close()
def testStuffOne(self):
self.clear()
print ('Testing')
self.read_instructions('input_5_t.txt')
start_length = len(self.polymer)
new_length = 0
while new_length < start_length:
start_length = len(self.polymer)
self.remove_pairs()
new_length = len(self.polymer)
print(len(self.polymer))
def testStuffTwo(self):
print ('Testing')
lengths = {}
for letter in 'abcdefghijklmnopqrstuvwxyz':
self.clear()
self.read_instructions('input_5_t.txt')
self.polymer = self.polymer.replace(letter, '')
self.polymer = self.polymer.replace(letter.upper(), '')
start_length = len(self.polymer)
new_length = 0
while new_length < start_length:
start_length = len(self.polymer)
self.remove_pairs()
new_length = len(self.polymer)
lengths[letter] = len(self.polymer)
minimum = min(lengths, key=lengths.get)
print(minimum, ' ', lengths[minimum])
def remove_pairs(self):
self.polymer = self.polymer.replace('aA', '')
self.polymer = self.polymer.replace('Aa', '')
self.polymer = self.polymer.replace('bB', '')
self.polymer = self.polymer.replace('Bb', '')
self.polymer = self.polymer.replace('cC', '')
self.polymer = self.polymer.replace('Cc', '')
self.polymer = self.polymer.replace('dD', '')
self.polymer = self.polymer.replace('Dd', '')
self.polymer = self.polymer.replace('eE', '')
self.polymer = self.polymer.replace('Ee', '')
self.polymer = self.polymer.replace('fF', '')
self.polymer = self.polymer.replace('Ff', '')
self.polymer = self.polymer.replace('gG', '')
self.polymer = self.polymer.replace('Gg', '')
self.polymer = self.polymer.replace('hH', '')
self.polymer = self.polymer.replace('Hh', '')
self.polymer = self.polymer.replace('iI', '')
self.polymer = self.polymer.replace('Ii', '')
self.polymer = self.polymer.replace('jJ', '')
self.polymer = self.polymer.replace('Jj', '')
self.polymer = self.polymer.replace('kK', '')
self.polymer = self.polymer.replace('Kk', '')
self.polymer = self.polymer.replace('lL', '')
self.polymer = self.polymer.replace('Ll', '')
self.polymer = self.polymer.replace('mM', '')
self.polymer = self.polymer.replace('Mm', '')
self.polymer = self.polymer.replace('nN', '')
self.polymer = self.polymer.replace('Nn', '')
self.polymer = self.polymer.replace('oO', '')
self.polymer = self.polymer.replace('Oo', '')
self.polymer = self.polymer.replace('pP', '')
self.polymer = self.polymer.replace('Pp', '')
self.polymer = self.polymer.replace('qQ', '')
self.polymer = self.polymer.replace('Qq', '')
self.polymer = self.polymer.replace('rR', '')
self.polymer = self.polymer.replace('Rr', '')
self.polymer = self.polymer.replace('sS', '')
self.polymer = self.polymer.replace('Ss', '')
self.polymer = self.polymer.replace('tT', '')
self.polymer = self.polymer.replace('Tt', '')
self.polymer = self.polymer.replace('uU', '')
self.polymer = self.polymer.replace('Uu', '')
self.polymer = self.polymer.replace('vV', '')
self.polymer = self.polymer.replace('Vv', '')
self.polymer = self.polymer.replace('wW', '')
self.polymer = self.polymer.replace('Ww', '')
self.polymer = self.polymer.replace('xX', '')
self.polymer = self.polymer.replace('Xx', '')
self.polymer = self.polymer.replace('yY', '')
self.polymer = self.polymer.replace('Yy', '')
self.polymer = self.polymer.replace('zZ', '')
self.polymer = self.polymer.replace('Zz', '')
def doStuff(self):
self.clear()
print ('Real stuff')
self.read_instructions('input_5.txt')
start_length = len(self.polymer)
new_length = 0
while new_length < start_length:
start_length = len(self.polymer)
self.remove_pairs()
new_length = len(self.polymer)
print(len(self.polymer))
def doStuffTwo(self):
print ('Real stuff')
lengths = {}
for letter in 'abcdefghijklmnopqrstuvwxyz':
self.clear()
self.read_instructions('input_5.txt')
self.polymer = self.polymer.replace(letter, '')
self.polymer = self.polymer.replace(letter.upper(), '')
start_length = len(self.polymer)
new_length = 0
while new_length < start_length:
start_length = len(self.polymer)
self.remove_pairs()
new_length = len(self.polymer)
lengths[letter] = len(self.polymer)
minimum = min(lengths, key=lengths.get)
print(minimum, ' ', lengths[minimum])
if __name__ == "__main__":
x = Stuff()
x.testStuffOne()
x.doStuff()
x.testStuffTwo()
x.doStuffTwo() |
999,011 | a7bc89682849150e72e7961173da1514c6d93dbe | import imageio
import numpy as np
import matplotlib.pyplot as plt
import os.path
import sys
# color-based constants
WHITE = 255
HAND_CUTOFF = 30
# video segment lengths, in frames
MIN_SEGMENT_LENGTH = 25
MAX_SEGMENT_LENGTH = 150
# image width/height, bounding box placement
X0 = 220
X1 = X0 + 256
Y0 = 120
Y1 = Y0 + 256
def generate_segments(full_filename, summary=False):
""" Given the filename of a video which contains numerous consecutive finger
swipes, and a bounding box specified by the global values X0, X1, Y0, Y1,
this function will crop the video to only contain the bounding box, perform
background subtraction, and then perform temporal segmentation, resulting in
numerous video segments which each contain a single swipe (which wil be
stored in ./segments/). A summary video is also generated, which shows
bounding box placement, whether the current frame is being stored in one
of the video segments, and is played at twice the original speed."""
# split filename and create folder to store segments
filename, filetype = os.path.splitext(full_filename)
folder, filename = os.path.split(filename)
if len(folder) > 0:
folder = folder+"/"
if not os.path.isfile(folder+filename+filetype):
print("ERROR: File", folder+filename+filetype, "does not exist.")
sys.exit()
if not os.path.exists(folder+"segments"):
os.makedirs(folder+"segments")
# set flags
last_new_segment = 0 # frame at which segment began
was_low = False # if hand left image since segment began
low_count = 0 # consecutive frames for which hand was gone
segment = 0 # segment id number
# initialize summary/segment writers, set background as first frame
reader = imageio.get_reader(folder+filename+filetype, 'ffmpeg')
fps = reader.get_meta_data()['fps']
nframes = reader.get_meta_data()['nframes']
segment_writer = imageio.get_writer(
folder+"segments/"+filename+str(segment)+filetype,
'ffmpeg', fps=fps, macro_block_size=None)
segment_writer.close()
if summary:
summary_writer = imageio.get_writer(folder+filename+"_summary"+filetype,
'ffmpeg', fps=fps*2)
background = np.array(reader.get_data(0)).astype(int)[:,:,0]
# process video and segment
for i, image in enumerate(reader):
# background subtract, threshold at zero
image = np.array(image).astype(int)[:,:,0]
image = np.maximum(image - background, np.zeros(image.shape))
# check if at least 3 edge pixels belong to a hand
if (np.sum(image[Y1:Y1+1, X0:X1] > HAND_CUTOFF) > 3 or
np.sum(image[Y0:Y0+1, X0:X1] > HAND_CUTOFF) > 3 or
np.sum(image[Y0:Y1, X0:X0+1] > HAND_CUTOFF) > 3 or
np.sum(image[Y0:Y1, X1:X1+1] > HAND_CUTOFF) > 3):
# if hand just entered image and segment was long enough, start new segment
if(i - last_new_segment > MIN_SEGMENT_LENGTH and was_low):
if not segment_writer.closed:
segment_writer.close()
segment += 1
segment_writer = imageio.get_writer(
folder+"segments/"+filename+str(segment)+filetype,
'ffmpeg', fps=fps, macro_block_size=None)
last_new_segment = i
was_low = False
low_count = 0
else: # hand isn't in image, after 1/10 second decide it has left
low_count += 1
if low_count >= 3:
was_low = True
# segment has reached maximum length, end it
if i - last_new_segment > MAX_SEGMENT_LENGTH:
if not segment_writer.closed:
segment_writer.close()
# add border for summary video around bounding area which is captured
image[Y0-1,X0:X1] = WHITE*np.ones(X1-X0)
image[Y1,X0:X1] = WHITE*np.ones(X1-X0)
image[Y0:Y1,X0-1] = WHITE*np.ones(Y1-Y0)
image[Y0:Y1,X1] = WHITE*np.ones(Y1-Y0)
# record with segment/summary writers
if not segment_writer.closed:
segment_writer.append_data(image[Y0:Y1,X0:X1].astype('uint8'))
if summary:
summary_writer.append_data(image.astype('uint8'))
else: # add lines to indicate not recording, add to summary writer
for x in range(X0, X1, 10):
image[Y0:Y1,x] = WHITE*np.ones(Y1-Y0)
if summary:
summary_writer.append_data(image.astype('uint8'))
# display progress processing video
if i % 100 == 0:
percent = (i / nframes)
bars = percent*40
sys.stdout.write("\rSegmenting {0}: [{1}{2}] {3}% ".format(
full_filename, "|"*int(bars), " "*int(40-bars), int(percent*100)))
sys.stdout.flush()
# close writers
print("")
segment_writer.close()
if summary:
summary_writer.close()
if __name__ == "__main__":
# check input arguments, set filepaths
if len(sys.argv) != 2:
print("Usage: python segment.py <filename>")
sys.exit()
# call function to generate segments
generate_segments(sys.argv[1], True);
|
999,012 | e9976883a7ac28fc3ef5d8afadad3cc2ade27037 | # -*- coding:utf-8 -*-
import os
import sqlite3
from functools import wraps
import copy
"""
待优化:
1、字符串拼接时保留引号
劣法:参数填充时字符串值使用单双引号两层包裹
最优:
values = [str(tuple(item)) for item in values]
values = ",".join(values)
较优:对需要保留引号的字符串检出并更改为"'xxx'"形式,怎么实现呢?
def str_convert(s):
return "'" + s + "'"
# for i in range(len(values)):
# for j in range(len(values[i])):
# if isinstance(values[i][j],str):
# values[i][j] = '"' + values[i][j] + '"'
次优:替换法,在字符串值的前后增加特殊字符,待拼接完成后再替换为引号
2、
"""
"""
SQL:结构化查询语言
1、DDL语言(数据定义语言)
用来定义数据库、数据表、视图、索引、触发器
create alter drop
2、DML语言(数据操纵语言)
用于插入、更新、删除数据
insert update delete truncate
3、DQL语言(数据查询语言)
查询数据库中的数据
select
4、DCL语言(数据控制语言)
用来控制用户的访问权限
grant revoke
MySQL数据类型:
数值:TINYINT SMALLINT MEDIUMINT INT BIGINT FLOAT DOUBLE DECIMAL
字符串: CHAR VARCHAR TINYTEXT TEXT
日期、时间: DATE TIME DATETIME TIMESTAMP YEAR
NULL
注:
int(4),显示长度4位,zerofill填充0,99 --> 0099。 int(4) zerofill
float(5,2),总长度为5,小数2位
sqlite数据类型
ALTER TABLE XXX AUTO_INCREMENT=10;
"""
# 电机数据库文件
motor_db = os.path.split(os.path.realpath(__file__))[0] + "\\rotor_db\\motors.db"
def connect_db(db):
"""
sqlite3装饰器
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
conn = sqlite3.connect(db)
cur = conn.cursor()
try:
# 获取SQL语句
sql = func(*args, **kwargs)
# 执行语句
cur.execute(sql)
# 未设置自动提交,此时手动提交。
conn.commit()
# 获取查询结果集
ret = cur.fetchall()
except Exception as e:
# 封装调试用
print str(e)
return None
finally:
# 关闭指针、连接
cur.close()
conn.close()
# 返回查询内容,SELECT时有意义
return ret
return wrapper
return decorator
"""
DDL
"""
@connect_db(motor_db)
def drop_db(params):
"""
params = {"database":database}
"""
return "DROP DATABASE IF EXISTS {};".format(params["database"])
@connect_db(motor_db)
def drop_table(params):
"""
params = {"table":tablename}
"""
return "DROP TABLE IF EXISTS {};".format(params["table"])
@connect_db(motor_db)
def create_table(params):
"""
params = {"table":tablename, "fields":["ID int AUTO_INCREMENT PRIMARY KEY NOT NULL COMMENT XXX","name text DEFAULT XXX",...]}
"""
table = params["table"]
fields = ",".join(params["fields"])
return "CREATE TABLE IF NOT EXISTS {}({});".format(table,fields)
@connect_db(motor_db)
def alter_table(params):
"""
params = {"table":tablename,
"action":["CHANGE", "MODIFY", "RENAME AS", "ADD", "DROP"], #列表之一,字符串
"fields":["AUTO_INCREMENT=10", "new tablename"]} #列表之一,字符串
调节序号:auto_increment=10
修改表名:rename as 新表名
添加字段:add 字段名 列类型
修改字段:modify 字段名 列类型
change 旧字段名 新字段名 列类型
删除字段:drop 字段名
"""
table = params["table"]
action = params.get("action","")
fields = params["fields"]
return "ALTER TABLE {} {} {};".format(table,action,fields)
"""
DML
"""
@connect_db(motor_db)
def insert_items(params):
"""
params = {"table":tablename, "fields":["ID","name",...], "values":[[],[],...]}
不带字段名: insert into tablename values (...),(...),... 全字段填充
插入多行数据:insert into tablename (xx, xx, ...) values(xx,xx,...),(xx,xx,...)
"""
table = params["table"]
fields = params.get("fields","")
values = copy.deepcopy(params["values"])
# for i in range(len(values)):
# print values[i]
# for j in range(len(values[i])):
# if isinstance(values[i][j],str):
# values[i][j] = '"' + values[i][j] + '"'
# temp = ",".join(values[i])
# values[i] = "({})".format(temp)
if len(fields) == 1:
if len(values) == 1:
if isinstance(values[0],str):
values[0] = '"' + values[0] + '"'
values = "({})".format(values[0])
else:
values = [value for item in values for value in item]
for i in range(len(values)):
if isinstance(values[i],str):
values[0] = '"' + values[0] + '"'
values[i] = "({})".format(values[i])
values = ",".join(values)
else:
values = [str(tuple(item)) for item in values]
values = ",".join(values)
if fields:
fields = "(" + ",".join(fields) + ")"
# print "INSERT INTO {} {} VALUES{};".format(table,fields,values)
return "INSERT INTO {} {} VALUES{};".format(table,fields,values)
@connect_db(motor_db)
def update_table(params):
"""
params = {"table":tablename,"fields":{"col1":value,"col2":value,...}, "condition": "where ..."}
update tablename set column1_name = value [, column2_name=value,...] [where condition];
修改表数据
"""
table = params["table"]
fields = params["fields"]
condition = params.get("condition","")
temp = []
for key,value in fields.items():
if isinstance(value,str):
value = '"' + value + '"'
temp.append("{}={}".format(key,value))
values = ",".join(temp)
if condition:
condition = "WHERE " + condition
return "UPDATE {} SET {} {};".format(table,values,condition)
@connect_db(motor_db)
def delete_items(params):
"""
params = {"table":tablename,"condition":xxx}
delete from tablename where condition;
"""
condition = params.get("condition","")
if condition:
condition = "WHERE " + condition
# print "DELETE FROM {} {};".format(params["table"],condition)
return "DELETE FROM {} {};".format(params["table"],condition)
@connect_db(motor_db)
def truncate_table(params):
"""
params = {"table":tablename}
truncate [table] tablename;
用于完全清空表数据,但表结构、索引、约束等不变。
区别于DELETE命令:
同:都删除数据,不删除表结构,但TRUNCATE更快
不同:1、使用TRUNCATE重新设置AUTO_INCREMENT计数器
2、使用TRUNCATE不会对事务有影响
"""
return "TRUNCATE {};".format(params["table"])
"""
DQL
"""
@connect_db(motor_db)
def show_tables():
return "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;"
@connect_db(motor_db)
def table_query(params):
"""
params = {"table":tablename, "fields":["ID","name",...], "conditions":xxx}
"""
table = params["table"]
fields = params.get("fields","")
condition = params.get("condition","")
if not fields:
fields = "*"
fields = ",".join(fields)
if condition:
condition = "WHERE " + condition
return "SELECT {} FROM {} {};".format(fields, table, condition)
@connect_db(motor_db)
def head_query(params):
"""
params = {"table":tablename}
查询表字段
"""
# sql = "SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = {};".format(params["table"])
return "PRAGMA table_info({});".format(params["table"])
"""
视图
示例:创建master_view
CREATE VIEW master_view
AS
SELECT id,name FROM student;
SELECT * FROM master_view;
DESC master_view;
SHOW CREATE VIEW master_view;
ALTER VIEW master_view
AS
SELECT id,name,email FROM student;
UPDATE master_view SET xx=11 WHERE xx=xxx;
DROP VIEW master_view;
"""
"""
事务
将一组语句SQL放在同一批次内去执行,如果一个SQL语句出错,则该批次内的所有SQL都将被取消执行
如银行转帐,中途出现错误,全部回滚
MySQL事务处理只支持InnoDB和BDB数据表类型
ACID:
原子性(atomic)
一致性(consist)
隔离性(isolated)
持久性(durable)
关闭自动提交
SELECT @@autocommit;
SET autocommit=0;
MySQL事务控制
START TRANSACTION;
语句(组)
COMMIT;
ROLLBACK;
SET autocommit=1;
sqlite3事务控制
使用下面的命令来控制事务:
BEGIN TRANSACTION:开始事务处理。
COMMIT:保存更改,或者可以使用 END TRANSACTION 命令。
ROLLBACK:回滚所做的更改。
事务控制命令只与 DML 命令 INSERT、UPDATE 和 DELETE 一起使用。他们不能在创建表或删除表时使用,因为这些操作在数据库中是自动提交的。
"""
"""
触发器
四要素:
1、监视地点table
2、监视事件insert/update/delete
3、触发时间after/before
4、触发事件insert/update/delete
CREATE TRIGGER triggerName
{BEFORE | AFTER}
{INSERT | UPDATE | DELETE}
ON tablename
FOR EACH ROW
BEGIN
触发器SQL语句;
END;
DROP TRIGGER triggerName;
"""
def create_table_motorList():
params = {"table":"motorList"}
params["fields"] = ["Motor varchar(50) PRIMARY KEY NOT NULL"]
create_table(params)
def drop_table_motorList():
params = {"table":"motorList"}
drop_table(params)
def create_table_motorData():
params = {"table":"motorData"}
params["fields"] = ["Motor VARCHAR(50)",\
"Voltage FLOAT(5,2)",\
"Propeller INT(6)",\
"Throttle VARCHAR(4)",\
"Amps FLOAT(5,2)",\
"Watts INT(6)",\
"Thrust FLOAT(8,2)",\
"RPM INT(5)",\
"Moment FLOAT(5,2)",\
"Efficiency FLOAT(5,2)"]
create_table(params)
def drop_table_motorData():
params = {"table":"motorData"}
drop_table(params)
def create_table_motorInfo():
params = {"table":"motorInfo"}
params["fields"] = ["Motor VARCHAR(50) PRIMARY KEY NOT NULL",\
"Producer VARCHAR(50)",\
"Type VARCHAR(50)",\
"KV VARCHAR(10)",\
"Voltage FLOAT(5,2)",\
"Amps FLOAT(5,2)",\
"Watts INT(6)",\
"Resistor FLOAT(4,2)",\
"AmpNoLoad FLOAT(4,2)"]
create_table(params)
def drop_table_motorInfo():
params = {"table":"motorInfo"}
drop_table(params)
def create_table_propellerInfo():
params = {"table":"propellerInfo"}
params["fields"] = ["Producer VARCHAR(50)",\
"Propeller INT(6)",\
"Type VARCHAR(50)",\
"cT FLOAT(6,2)",\
"cM FLOAT(6,2)"]
create_table(params)
def drop_table_propellerInfo():
params = {"table":"propellerInfo"}
drop_table(params)
|
999,013 | 183df834b838b855a043cd8042b0ba434b0ffeb9 | # Cannibals and Missionary Problem
# Describes state which includes the cannibals, missionaries, if the transition is valid.
class State():
def __init__(self, cannibalLeft, missionaryLeft, side, cannibalRight, missionaryRight):
self.cannibalLeft = cannibalLeft
self.missionaryLeft = missionaryLeft
self.side = side
self.cannibalRight = cannibalRight
self.missionaryRight = missionaryRight
self.parent = None
def isGoal(self):
return self.cannibalLeft == 0 and self.missionaryLeft == 0
def isValid(self):
if self.missionaryLeft < 0 and self.missionaryRight < 0:
return False
if self.cannibalLeft < 0 or self.cannibalRight < 0:
return False
if self.missionaryLeft < self.cannibalLeft and self.missionaryLeft != 0:
return False
if self.missionaryRight < self.cannibalRight and self.missionaryRight != 0:
return False
return True
# New States returns a list of predecessors of a certain state, checking its validity and adding it to a list, which is returned in the end.
# Referenced: https://github.com/marianafranco/missionaries-and-cannibals/blob/master/python/missionaries_and_cannibals.py
def newStates(state):
children = []
cannibalLeft = [0, -2, -1, 0, -1]
missionaryLeft = [-2, 0, -1, -1, 1]
cannibalRight = [0, 2, 1, 0, 1]
missionaryRight = [2, 0, 1, 1, 0]
# if you're on the left
if state.side == -1:
newState = State(state.cannibalLeft, state.missionaryLeft - 2, 1, state.cannibalRight, state.missionaryRight + 2)
for x in range(4):
if newState.isValid():
newState.parent = state
children.append(newState)
newState = State(state.cannibalLeft + cannibalLeft[x], state.missionaryLeft + missionaryLeft[x], 1, state.cannibalRight + cannibalRight[x], state.missionaryRight + missionaryRight[x])
if newState.isValid():
newState.parent = state
children.append(newState)
# if you're on the right
if state.side == 1:
newState = State(state.cannibalLeft, state.missionaryLeft + 2, -1, state.cannibalRight, state.missionaryRight - 2)
if newState.isValid():
newState.parent = state
children.append(newState)
newState = State(state.cannibalLeft + 1, state.missionaryLeft + 1, -1, state.cannibalRight - 1, state.missionaryRight - 1)
# One missionary and one cannibal cross right to left.
if newState.isValid():
newState.parent = state
children.append(newState)
newState = State(state.cannibalLeft, state.missionaryLeft + 1, -1, state.cannibalRight, state.missionaryRight - 1)
# One missionary crosses right to left.
if newState.isValid():
newState.parent = state
children.append(newState)
newState = State(state.cannibalLeft + 1, state.missionaryLeft, -1, state.cannibalRight - 1, state.missionaryRight)
# One cannibal crosses right to left.
if newState.isValid():
newState.parent = state
children.append(newState)
return children
def bfs():
# starting off with initial state that there's 3 missionaries and 3 cannibals and the boats on the left with no one else on the other side.
initial = State(3, 3, -1, 0, 0)
if initial.isGoal() == True:
return initial
else:
river = []
seen = []
# adding the initial state to the list
river.append(initial)
while river:
# pops because it returns the item on the list with that index, which in the beginning is initial state
state = river.pop(0)
# if that state is the goal, so 0, 0, right, 3, 3, then return that state
if state.isGoal() == True:
printStates(state)
return state
#if it isn't the goal, add it to the list
seen.append(state)
# basing off of what was the initial, newStates will return a list of new valid states.
childStates = newStates(state)
# based off of the list of valid childPaths that stem from the initial state, we will append that child to the river (overall list)
for childState in childStates:
if childState not in seen or childState not in river:
river.append(childState)
def printStates(solution):
traversal = []
traversal.append(solution)
crossing = solution.parent
print("CL ML BOAT CR MR")
print("----------------")
# goes through each parent appending it to the path, so now the path will have all the valid traversals
while crossing:
traversal.append(crossing)
crossing = crossing.parent
# going backwards in list since the parents were added in reverse order
for x in range(len(traversal), 0, -1):
state = traversal[x - 1] # have to -1 so you start at the left side
sideStr = ''
if state.side == -1:
sideStr = '|----'
else:
sideStr = '----|'
print("{0}, {1} {2} {3},{4} ".format(state.cannibalLeft, state.missionaryLeft, sideStr, state.cannibalRight, state.missionaryRight))
def main():
winner = bfs()
main()
|
999,014 | 573fec0d1cb7b7ab2141997cea70ae8363682e37 | import random
import string
def generate_id(size=9):
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(size)) |
999,015 | 2084117da1b424ae479ad7c2523b95fc701af3d4 | import numpy as np
import sys
print("filename: %s"%( sys.argv[1]))
fp = open(sys.argv[1],'r')
fp_z=open('Zs.dat','w')
nz=61
start=2
if(len(sys.argv)>2):
start = int(sys.argv[2])
if(len(sys.argv)>3):
nz= int(sys.argv[3])
if(len(sys.argv)>4):
center= bool(sys.argv[4])
names=['N','P','G','C','D','W','T']
N=np.zeros((nz,len(names)))
l=fp.readline()
r=0
end=10000000
while (not (l=="")):
l= l.split()
numa=int(l[0])
l=fp.readline()
l=l.split()
LX=float(l[1])*10
LY=float(l[2])*10
LZ=float(l[3])*10
if(r>end):
break
r=r+1
for i in range(numa):
l=fp.readline()
l=l.split()
iz=np.mod(int(nz*float(l[3])/LZ),nz)
index=names.index(l[0])
if(r>start):
N[iz,index]+=1
l=fp.readline()
fp_z.write('%f\n'%(LZ))
fp_z.close()
fp.close()
#N=np.divide(N,np.sum(N,axis=1)[:,None])
N=N/((r-start)*LZ/nz*LX*LY*0.1**3)#np.mean(np.sum(N,axis=1))
factor=np.mean(0.5*(N[:4,-1]+N[-4:,-1]))/8.33
z=N[:,-1]>7.8
if(sum(z)>0):
print("water density:",np.mean(N[z,-1]))
print('correct water:',int(14000/factor),1./factor)
print("total density:",np.mean(np.sum(N,axis=1)))
print("factor:",8.33/np.mean(np.sum(N,axis=1)))
#N[iz,index]=N[iz,index]*1./((r-start)*1E-3*LX*LY*LZ/nz)
#N[iz,index]=N[iz,index]*1./((r-start)*1E-3*LX*LY*LZ/nz)
fp=open("density.dat",'w')
fp.write('#z')
for n in names:
fp.write('\t\t%s'%(n))
fp.write('\n')
for i in range(nz):
fp.write("%f" %((i+0.5)/nz))
for j in range(len(names)):
fp.write("\t%f"%(N[i,j]))
fp.write('\n')
#Z=np.zeros((nz,len(N[0][:]+1)))
#Z[:,1:] = N
#Z[:,0] = np.linspace(0.5*LZ/nz,LZ-0.5*LZ/nz,nz)
#fp=open('density_tot.dat','w')
#np.savetxt('density_tot.dat',N)
|
999,016 | f36d0de89a3312d9a99c71e5f6bc2169e14f7766 | # Generated by Django 2.0 on 2019-01-01 08:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_like'),
]
operations = [
migrations.RemoveField(
model_name='like',
name='post',
),
migrations.DeleteModel(
name='Like',
),
]
|
999,017 | d8169abbd8e7614e70d40b17718da99349743580 | import itertools
import math
import bisect
import re
from collections import Counter, defaultdict
from functools import lru_cache
from typing import List, Union, Dict, Tuple, Collection, Callable
import numpy as np
from graphviz import Digraph, Graph
from scipy.special import comb
from aug.data.fasta import fasta_file_iter
from aug.seq import alignments
complement_map = {"A": "T", "C": "G", "G": "C", "T": "A"}
rna_complement_map = {"A": "U", "C": "G", "G": "C", "U": "A"}
START_CODON = "AUG"
STOP_CODON = object() # dummy object for stop codon
rna_codon_table = { "UUU": "F", "CUU": "L", "AUU": "I", "GUU": "V",
"UUC": "F", "CUC": "L", "AUC": "I", "GUC": "V",
"UUA": "L", "CUA": "L", "AUA": "I", "GUA": "V",
"UUG": "L", "CUG": "L", "AUG": "M", "GUG": "V",
"UCU": "S", "CCU": "P", "ACU": "T", "GCU": "A",
"UCC": "S", "CCC": "P", "ACC": "T", "GCC": "A",
"UCA": "S", "CCA": "P", "ACA": "T", "GCA": "A",
"UCG": "S", "CCG": "P", "ACG": "T", "GCG": "A",
"UAU": "Y", "CAU": "H", "AAU": "N", "GAU": "D",
"UAC": "Y", "CAC": "H", "AAC": "N", "GAC": "D",
"UAA": STOP_CODON, "CAA": "Q", "AAA": "K", "GAA": "E",
"UAG": STOP_CODON, "CAG": "Q", "AAG": "K", "GAG": "E",
"UGU": "C", "CGU": "R", "AGU": "S", "GGU": "G",
"UGC": "C", "CGC": "R", "AGC": "S", "GGC": "G",
"UGA": STOP_CODON, "CGA": "R", "AGA": "R", "GGA": "G",
"UGG": "W", "CGG": "R", "AGG": "R", "GGG": "G",
}
protein_n_codons_table = Counter(rna_codon_table.values())
dna_codon_table = { "TTT": "F", "CTT": "L", "ATT": "I", "GTT": "V",
"TTC": "F", "CTC": "L", "ATC": "I", "GTC": "V",
"TTA": "L", "CTA": "L", "ATA": "I", "GTA": "V",
"TTG": "L", "CTG": "L", "ATG": "M", "GTG": "V",
"TCT": "S", "CCT": "P", "ACT": "T", "GCT": "A",
"TCC": "S", "CCC": "P", "ACC": "T", "GCC": "A",
"TCA": "S", "CCA": "P", "ACA": "T", "GCA": "A",
"TCG": "S", "CCG": "P", "ACG": "T", "GCG": "A",
"TAT": "Y", "CAT": "H", "AAT": "N", "GAT": "D",
"TAC": "Y", "CAC": "H", "AAC": "N", "GAC": "D",
"TAA": STOP_CODON, "CAA": "Q", "AAA": "K", "GAA": "E",
"TAG": STOP_CODON, "CAG": "Q", "AAG": "K", "GAG": "E",
"TGT": "C", "CGT": "R", "AGT": "S", "GGT": "G",
"TGC": "C", "CGC": "R", "AGC": "S", "GGC": "G",
"TGA": STOP_CODON, "CGA": "R", "AGA": "R", "GGA": "G",
"TGG": "W", "CGG": "R", "AGG": "R", "GGG": "G",
}
protein_dna_codon_table = defaultdict(list)
for codon, protein in dna_codon_table.items():
protein_dna_codon_table[protein].append(codon)
monoisotopic_mass_table = {
"A": 71.03711,
"C": 103.00919,
"D": 115.02694,
"E": 129.04259,
"F": 147.06841,
"G": 57.02146,
"H": 137.05891,
"I": 113.08406,
"K": 128.09496,
"L": 113.08406,
"M": 131.04049,
"N": 114.04293,
"P": 97.05276,
"Q": 128.05858,
"R": 156.10111,
"S": 87.03203,
"T": 101.04768,
"V": 99.06841,
"W": 186.07931,
"Y": 163.06333,
}
inverted_monoisotopic_mass = sorted(((value, key) for key, value in monoisotopic_mass_table.items()))
# https://web.archive.org/web/19991011081928/http://www.embl-heidelberg.de/%7Evogt/matrices/blosum62.cmp
blosum62 = \
{'A': {'X': 0, 'Z': -1, 'B': -2, 'V': 0, 'Y': -2, 'W': -3, 'T': 0, 'S': 1, 'P': -1, 'F': -2,
'M': -1, 'K': -1, 'L': -1, 'I': -1, 'H': -2, 'G': 0, 'E': -1, 'Q': -1, 'C': 0, 'D': -2,
'N': -2, 'R': -1, 'A': 4},
'R': {'X': -1, 'Z': 0, 'B': -1, 'V': -3, 'Y': -2, 'W': -3, 'T': -1, 'S': -1, 'P': -2, 'F': -3,
'M': -1, 'K': 2, 'L': -2, 'I': -3, 'H': 0, 'G': -2, 'E': 0, 'Q': 1, 'C': -3, 'D': -2,
'N': 0, 'R': 5, 'A': -1},
'N': {'X': -1, 'Z': 0, 'B': 3, 'V': -3, 'Y': -2, 'W': -4, 'T': 0, 'S': 1, 'P': -2, 'F': -3,
'M': -2, 'K': 0, 'L': -3, 'I': -3, 'H': 1, 'G': 0, 'E': 0, 'Q': 0, 'C': -3, 'D': 1,
'N': 6, 'R': 0, 'A': -2},
'D': {'X': -1, 'Z': 1, 'B': 4, 'V': -3, 'Y': -3, 'W': -4, 'T': -1, 'S': 0, 'P': -1, 'F': -3,
'M': -3, 'K': -1, 'L': -4, 'I': -3, 'H': -1, 'G': -1, 'E': 2, 'Q': 0, 'C': -3, 'D': 6,
'N': 1, 'R': -2, 'A': -2},
'C': {'X': -2, 'Z': -3, 'B': -3, 'V': -1, 'Y': -2, 'W': -2, 'T': -1, 'S': -1, 'P': -3, 'F': -2,
'M': -1, 'K': -3, 'L': -1, 'I': -1, 'H': -3, 'G': -3, 'E': -4, 'Q': -3, 'C': 9, 'D': -3,
'N': -3, 'R': -3, 'A': 0},
'Q': {'X': -1, 'Z': 3, 'B': 0, 'V': -2, 'Y': -1, 'W': -2, 'T': -1, 'S': 0, 'P': -1, 'F': -3,
'M': 0, 'K': 1, 'L': -2, 'I': -3, 'H': 0, 'G': -2, 'E': 2, 'Q': 5, 'C': -3, 'D': 0,
'N': 0, 'R': 1, 'A': -1},
'E': {'X': -1, 'Z': 4, 'B': 1, 'V': -2, 'Y': -2, 'W': -3, 'T': -1, 'S': 0, 'P': -1, 'F': -3,
'M': -2, 'K': 1, 'L': -3, 'I': -3, 'H': 0, 'G': -2, 'E': 5, 'Q': 2, 'C': -4, 'D': 2,
'N': 0, 'R': 0, 'A': -1},
'G': {'X': -1, 'Z': -2, 'B': -1, 'V': -3, 'Y': -3, 'W': -2, 'T': -2, 'S': 0, 'P': -2, 'F': -3,
'M': -3, 'K': -2, 'L': -4, 'I': -4, 'H': -2, 'G': 6, 'E': -2, 'Q': -2, 'C': -3, 'D': -1,
'N': 0, 'R': -2, 'A': 0},
'H': {'X': -1, 'Z': 0, 'B': 0, 'V': -3, 'Y': 2, 'W': -2, 'T': -2, 'S': -1, 'P': -2, 'F': -1,
'M': -2, 'K': -1, 'L': -3, 'I': -3, 'H': 8, 'G': -2, 'E': 0, 'Q': 0, 'C': -3, 'D': -1,
'N': 1, 'R': 0, 'A': -2},
'I': {'X': -1, 'Z': -3, 'B': -3, 'V': 3, 'Y': -1, 'W': -3, 'T': -1, 'S': -2, 'P': -3, 'F': 0,
'M': 1, 'K': -3, 'L': 2, 'I': 4, 'H': -3, 'G': -4, 'E': -3, 'Q': -3, 'C': -1, 'D': -3,
'N': -3, 'R': -3, 'A': -1},
'L': {'X': -1, 'Z': -3, 'B': -4, 'V': 1, 'Y': -1, 'W': -2, 'T': -1, 'S': -2, 'P': -3, 'F': 0,
'M': 2, 'K': -2, 'L': 4, 'I': 2, 'H': -3, 'G': -4, 'E': -3, 'Q': -2, 'C': -1, 'D': -4,
'N': -3, 'R': -2, 'A': -1},
'K': {'X': -1, 'Z': 1, 'B': 0, 'V': -2, 'Y': -2, 'W': -3, 'T': -1, 'S': 0, 'P': -1, 'F': -3,
'M': -1, 'K': 5, 'L': -2, 'I': -3, 'H': -1, 'G': -2, 'E': 1, 'Q': 1, 'C': -3, 'D': -1,
'N': 0, 'R': 2, 'A': -1},
'M': {'X': -1, 'Z': -1, 'B': -3, 'V': 1, 'Y': -1, 'W': -1, 'T': -1, 'S': -1, 'P': -2, 'F': 0,
'M': 5, 'K': -1, 'L': 2, 'I': 1, 'H': -2, 'G': -3, 'E': -2, 'Q': 0, 'C': -1, 'D': -3,
'N': -2, 'R': -1, 'A': -1},
'F': {'X': -1, 'Z': -3, 'B': -3, 'V': -1, 'Y': 3, 'W': 1, 'T': -2, 'S': -2, 'P': -4, 'F': 6,
'M': 0, 'K': -3, 'L': 0, 'I': 0, 'H': -1, 'G': -3, 'E': -3, 'Q': -3, 'C': -2, 'D': -3,
'N': -3, 'R': -3, 'A': -2},
'P': {'X': -2, 'Z': -1, 'B': -2, 'V': -2, 'Y': -3, 'W': -4, 'T': -1, 'S': -1, 'P': 7, 'F': -4,
'M': -2, 'K': -1, 'L': -3, 'I': -3, 'H': -2, 'G': -2, 'E': -1, 'Q': -1, 'C': -3, 'D': -1,
'N': -2, 'R': -2, 'A': -1},
'S': {'X': 0, 'Z': 0, 'B': 0, 'V': -2, 'Y': -2, 'W': -3, 'T': 1, 'S': 4, 'P': -1, 'F': -2,
'M': -1, 'K': 0, 'L': -2, 'I': -2, 'H': -1, 'G': 0, 'E': 0, 'Q': 0, 'C': -1, 'D': 0,
'N': 1, 'R': -1, 'A': 1},
'T': {'X': 0, 'Z': -1, 'B': -1, 'V': 0, 'Y': -2, 'W': -2, 'T': 5, 'S': 1, 'P': -1, 'F': -2,
'M': -1, 'K': -1, 'L': -1, 'I': -1, 'H': -2, 'G': -2, 'E': -1, 'Q': -1, 'C': -1, 'D': -1,
'N': 0, 'R': -1, 'A': 0},
'W': {'X': -2, 'Z': -3, 'B': -4, 'V': -3, 'Y': 2, 'W': '11', 'T': -2, 'S': -3, 'P': -4, 'F': 1,
'M': -1, 'K': -3, 'L': -2, 'I': -3, 'H': -2, 'G': -2, 'E': -3, 'Q': -2, 'C': -2, 'D': -4,
'N': -4, 'R': -3, 'A': -3},
'Y': {'X': -1, 'Z': -2, 'B': -3, 'V': -1, 'Y': 7, 'W': 2, 'T': -2, 'S': -2, 'P': -3, 'F': 3,
'M': -1, 'K': -2, 'L': -1, 'I': -1, 'H': 2, 'G': -3, 'E': -2, 'Q': -1, 'C': -2, 'D': -3,
'N': -2, 'R': -2, 'A': -2},
'V': {'X': -1, 'Z': -2, 'B': -3, 'V': 4, 'Y': -1, 'W': -3, 'T': 0, 'S': -2, 'P': -2, 'F': -1,
'M': 1, 'K': -2, 'L': 1, 'I': 3, 'H': -3, 'G': -3, 'E': -2, 'Q': -2, 'C': -1, 'D': -3,
'N': -3, 'R': -3, 'A': 0},
'B': {'X': -1, 'Z': 1, 'B': 4, 'V': -3, 'Y': -3, 'W': -4, 'T': -1, 'S': 0, 'P': -2, 'F': -3,
'M': -3, 'K': 0, 'L': -4, 'I': -3, 'H': 0, 'G': -1, 'E': 1, 'Q': 0, 'C': -3, 'D': 4,
'N': 3, 'R': -1, 'A': -2},
'Z': {'X': -1, 'Z': 4, 'B': 1, 'V': -2, 'Y': -2, 'W': -3, 'T': -1, 'S': 0, 'P': -1, 'F': -3,
'M': -1, 'K': 1, 'L': -3, 'I': -3, 'H': 0, 'G': -2, 'E': 4, 'Q': 3, 'C': -3, 'D': 1,
'N': 0, 'R': 0, 'A': -1},
'X': {'X': -1, 'Z': -1, 'B': -1, 'V': -1, 'Y': -1, 'W': -2, 'T': 0, 'S': 0, 'P': -2, 'F': -1,
'M': -1, 'K': -1, 'L': -1, 'I': -1, 'H': -1, 'G': -1, 'E': -1, 'Q': -1, 'C': -2, 'D': -1,
'N': -1, 'R': -1, 'A': 0}
}
def dna_to_rna(dna: str, start=0):
"""An RNA string is a string formed from the alphabet containing 'A', 'C', 'G', and 'U'.
Given a DNA string t corresponding to a coding strand, its transcribed RNA string u is formed by replacing all
occurrences of 'T' in t with 'U' in u.
>>> dna_to_rna("GCAT")
'GCAU'
"""
return dna.replace("T", "U")
def start_with_start_codon(rna: str):
""" helper method to start protein sequence generation from start codon.
:param rna: rna string
:return: first position of start codon in rna, or -1 if not presentd
"""
return rna.find(START_CODON)
def start_with_the_beggining(rna: str):
""" helper method for processing whole rna sequence starting with 0 index, not START_CODON
:param rna: rna string
:return: 0
"""
return 0
def rna_to_protein(rna: str, to_str=True, start: Union[int, Callable[[str], int]]=start_with_the_beggining,
end=False):
""" The RNA codon table dictates the details regarding the encoding of specific codons into the amino acid alphabet.
Given: An RNA string s corresponding to a strand of mRNA (of length at most 10 kbp).
Return: The protein string encoded by s.
:param rna: rna string
:param to_str: if true result will be returned as string, if false it will be returned as a list
:param start: start position, can be 0-based int or callable, two variants is already defined:
* start_with_the_beggining
* start_with_start_codon
:param end: if True, than sequence should ended on stop codon, if stop codon wasn't found return empty sequence
:return: protein sequence
>>> seq = "UUUAUGCUUUAA"
>>> rna_to_protein(seq)
'FML'
>>> rna_to_protein(seq, to_str=False)
['F', 'M', 'L']
>>> rna_to_protein(seq, start=start_with_start_codon)
'ML'
>>> rna_to_protein(seq, start=6)
'L'
"""
pos = start(rna) if callable(start) else start
if pos < 0:
return None if not to_str else ""
else:
rna = rna[pos:]
result = [""] * (len(rna) // 3)
for i in range(len(result)):
elem = rna_codon_table[rna[3 * i: 3 * i + 3]]
if elem != STOP_CODON:
result[i] = elem
else:
result = result[:i]
break
else:
# if no break occurs, then stop codon wasn't found, so if end argument is True empty sequence should be returned
if end:
result = []
if to_str:
result = "".join(result)
return result
def dna_to_protein(dna: str, start: int=0):
""" Return protein string based on dna string.
Just a conveyor rna_to_protein(dna_to_rna(dna)), defined for simplifying syntax.
:param start: position to start with (skip letters at positions range 0..start)
:param dna: dna string
:return: protein string
"""
return rna_to_protein(dna_to_rna(dna, start), start=start)
def gene_to_protein(gene: str, intrones: Union[str, Collection[str]]) -> str:
""" Return protein for gene with intrones taken into accounts
:param gene: dna string
:param intrones: intrones in gene, which will be deleted while generating the protein
:return: A protein string resulting from transcribing and translating the exons of gene.
"""
intrones = intrones if not isinstance(intrones, str) else (intrones,)
for introne in intrones:
gene = gene.replace(introne, "")
return dna_to_protein(gene)
def reverse_complement(dna: str):
"""The reverse complement of a DNA string s is the string sc formed by reversing the symbols of s,
then taking the complement of each symbol (e.g., the reverse complement of "GTCA" is "TGAC").
Given: A DNA string s of length at most 1000 bp.
Return: The reverse complement sc of s.
"""
dna = dna.strip()
result = [" "] * len(dna)
for index, letter in enumerate(reversed(dna)):
result[index] = complement_map[letter]
return "".join(result)
def all_possible_gene_transcription(dna: str):
""" generator returned all possible gene transcription, started from start codon in any position in the string or
its reverse complement
:param dna: dna sequence
:return: generator for every protein this dna or its reverse complement can form
"""
result = set()
for dna in (dna, reverse_complement(dna)):
rna = dna_to_rna(dna)
start = find_motif(rna, START_CODON)
for s in start:
r = rna_to_protein(rna, start=s, end=True)
if r:
result.add(r)
return result
def gc_rate(dna: str, percent=False):
""" returns rate for G and C in dna
:param dna: dna as a string
:param percent: set to True if you want return result as a procent [0, 100], of false as an [0, 1] ratio.
"""
c = Counter(dna)
result = (c["G"] + c["C"]) / len(dna)
return result * 100 if percent else result
def hamming_distance(p, q):
""" Compute the Hamming distance between two strings.
:return: The Hamming distance between these strings.
>>> hamming_distance("GGGCCGTTGGT", "GGACCGTTGAC")
3
"""
result = 0
for x, y in zip(p, q):
if x != y:
result += 1
return result + abs(len(p) - len(q))
def find_motif(dna:str, motif: str, zero_based=True):
""" returns indexes of all occurrences of motif in dna.
:param dna: the string to search in
:param motif: the substring to search
:param zero_based: if False will return indexes starting with 1 instead of 0.
:return: indexes of all occurrences of motif in dna
"""
index = 0
result = []
while index >=0:
index = dna.find(motif, index)
if index >=0:
result.append(index)
index += 1
return _helper_for_non_zero_based(result, zero_based)
def one_based_helper(indexes: List):
return [i + 1 for i in indexes]
def _helper_for_non_zero_based(indexes: List, zero_based: bool):
""" Transform indexes based on zero_based.
:param indexes: list to transform
:param zero_based: if False indexes will be increased by 1
:return: list of indexes transformed accordingly to zero_based
"""
if not zero_based:
return one_based_helper(indexes)
else:
return indexes
@lru_cache(None)
def rabbits_recurrence(n, k=1):
if n < 3:
return 1
else:
return rabbits_recurrence(n-1, k) + k * rabbits_recurrence(n-2, k)
@lru_cache(None)
def dying_rabbits(n, months_of_life):
"""
:return: The total number of pairs of rabbits that will remain after the n-th month if all rabbits live
for m months.
"""
young = 1
olds = [0] * (months_of_life - 1)
for _ in range(n - 1):
new_young = sum(olds)
for i in range(1, months_of_life - 1):
olds[-i] = olds[-i-1]
olds[0] = young
young = new_young
return sum(olds) + young
def calculate_protein_mass(protein: str):
""" Calculate the standard weight assigned to each member of the 20-symbol amino acid alphabet is the monoisotopic
mass of the corresponding amino acid.
:param protein: A protein string
:return: The total weight
"""
result = 0
for p in protein:
result += monoisotopic_mass_table[p]
return result
def dominant_probability(homozygous_dominant: int, heterozygous: int, homozygous_recessive :int):
""" Get three positive integers, representing a population containing different genes types,
return the probability that two randomly selected mating organisms will produce dominant child.
:param homozygous_dominant: number of individuals with according genes
:param heterozygous: number of individuals with according genes
:param homozygous_recessive: number of individuals with according genes
:return: the probability that two randomly selected mating organisms will produce an individual possessing a
dominant allele (and thus displaying the dominant phenotype). Assume that any two organisms can mate.
"""
d, h, r = homozygous_dominant, heterozygous, homozygous_recessive
all_ = d + h + r
result = d * (d + 2 * h + 2 * r - 1) + h * (0.75 * h + r - 0.75)
result /= all_ * (all_ - 1)
return result
def profile(dna: Union[list, tuple, str], update: Union[list, None]=None) -> dict:
"""
Function takes a list of strings DNA as input and returns the profile matrix (as a dictionary of lists).
:param dna: a list of strings (or just one string) which represent a genome part
:param update: a dict to update values (e.g. for separated calculations), None to generate new dict
:return: dictionary where keys are A, C, G, T and values are list with their occurrences in patterns
on that index.
:example:
>>> profile(("AACGTA","CCCGTT","CACCTT","GGATTA","TTCCGG"))
{'A': [1, 2, 1, 0, 0, 2], 'C': [2, 1, 4, 2, 0, 0], 'G': [1, 1, 0, 2, 1, 1], 'T': [1, 1, 0, 1, 4, 2]}
"""
dnas = dna if isinstance(dna, list) or isinstance(dna, tuple) else (dna,)
k = len(dnas[0])
update = update if update else {letter: [0] * k for letter in "ACGT"}
for i in range(k):
for motif in dnas:
update[motif[i]][i] += 1
return update
def consensus(dnas: Union[list, None]=None, precalculated_profile: Union[Dict[str, list], None]=None) -> str:
"""
Form a consensus string, from the most popular nucleotides in each column of the motif matrix
(ties are broken arbitrarily). If we select Motifs correctly from the collection of upstream regions,
then Consensus(Motifs) provides a candidate regulatory motif for these regions.
:param dnas: A set of kmers.
:return: A consensus string of dnas.
:example:
>>> consensus(("AACGTA","CCCGTT","CACCTT","GGATTA","TTCCGG"))
'CACCTA'
"""
k = len(dnas[0]) if dnas else len(precalculated_profile["A"])
count = precalculated_profile if precalculated_profile else profile(dnas)
consensus = ""
for j in range(k):
m = 0
frequentSymbol = ""
for symbol in "ACGT":
if count[symbol][j] > m:
m = count[symbol][j]
frequentSymbol = symbol
consensus += frequentSymbol
return consensus
def n_reverse_translation(protein: str, modulo: Union[int, None]=None):
""" returns the total number of different RNA strings from which the protein could have been translated,
modulo m.
"""
result = 1
for p in protein:
result *= protein_n_codons_table[p]
if modulo:
result %= modulo
return result * protein_n_codons_table[STOP_CODON]
def find_reverse_palindromes(dna: str, min_len: int=4, max_len: int=12, zero_based: bool=True):
""" A DNA string is a reverse palindrome if it is equal to its reverse complement.
For instance, GCATGC is a reverse palindrome because its reverse complement is GCATGC.
:param dna: A DNA string
:param min_len: minimal length of reversed palindrome to search
:param max_len: maximal length of reversed palindrome to search
:param zero_based: if true return indexes starting with 0, or starting with 1, if false
:return: The position and length of every reverse palindrome in the string having length between min_len and
max_len.
"""
def helper_for_non_zero_based(indexes: List[Tuple[int, int]]):
if not zero_based:
return [(i + 1, l) for i, l in indexes]
else:
return indexes
length = len(dna)
result = []
for i in range(length):
for l in range(min(min_len, length - i), min(max_len + 1, length - i + 1)):
if l > max_len or l < min_len:
continue
sub_dna = dna[i: i + l]
if sub_dna == reverse_complement(sub_dna):
result.append((i, l))
return helper_for_non_zero_based(result)
def bernul(n, k, p):
""" returns probability of k occurrences in n Bernoulli trial with probability p
https://en.wikipedia.org/wiki/Bernoulli_trial
:param n: number of tests
:param k: number of successes
:param p: probability of every success
:return: probability of k occurrences in n Bernoulli trial
"""
return comb(n, k) * p ** k * (1 - p) ** (n-k)
def independent_alleles(heterozygous_number: int, generation_number: int) -> float:
""" http://rosalind.info/problems/lia/
In this problem, we begin with Tom, who in the 0th generation has genotype Aa Bb. Tom has two children
in the 1st generation, each of whom has two children, and so on. Each organism always mates with an organism
having genotype Aa Bb.
:param heterozygous_number:
:type generation_number:
:return: The probability that at least heterozygous_number Aa Bb organisms will belong to the k-th generation of
Tom's family tree (don't count the Aa Bb mates at each level).
Assume that Mendel's second law holds for the factors.
>>> result = independent_alleles(1, 2)
>>> round(result, 3)
0.684
"""
n_child = 2 ** generation_number
result = 1
for i in range(0, heterozygous_number):
result -= bernul(n_child, i, p=1/4)
return result
def signed_permutation(n: int):
""" A signed permutation of length n is some ordering of the positive integers {1,2,…,n} in which each integer is
then provided with either a positive or negative sign (for the sake of simplicity, we omit the positive sign).
For example, π=(5,−3,−2,1,4) is a signed permutation of length 5.
:param n: positive integer
:return: permutations for every digit in [1..n]
>>> p = signed_permutation(2)
>>> list(p)
[[-1, -2], [-1, 2], [1, -2], [1, 2], [-2, -1], [-2, 1], [2, -1], [2, 1]]
"""
for p in itertools.product(itertools.permutations(list(range(1, n+1))), itertools.product([-1, 1], repeat=n)):
yield [i * j for i, j in zip(*p)]
def adjacency_list(fasta_file_path:str, k:int=3, prefixes:Union[str, None]=None,
suffixes:Union[str, None]=None) -> Union[List[str], List[Tuple[str, str]]]:
""" Return adjacency_list of dna in fasta file specified in fasta_file_path.
For a collection of strings and a positive integer k, return the overlap graph in which each string is represented
by a node, and string s is connected to string t with a directed edge when there is a length k suffix of s
that matches a length k prefix of t, as long as s !=t;
we demand s != t to prevent directed loops in the overlap graph (although directed cycles may be present).
:param fasta_file_path: path to file with dna in fasta format
:param k: length of prefixes and suffixes
:param prefixes: precalculated prefixes or None
:param suffixes: precalculated suffixes or None
:return: Overlap graph in form of adjacency list
"""
prefixes = prefixes or defaultdict(set)
suffixes = suffixes or defaultdict(set)
for id, string in fasta_file_iter(fasta_file_path):
prefixes[string[:k]].add(id)
suffixes[string[-k:]].add(id)
result = []
for suffix, ids in suffixes.items():
for a in ((start, finish) for start in ids for finish in prefixes[suffix] if start != finish):
result.append(a)
return result
def dna_probability(dna:str, gc:float, return_log=False) -> float:
""" For giving dna string and probability of g or c nucleotide return probability of that string or log base 10
of that probability if return_log is set to True.
:param dna: dna string
:param gc: probability of g or c nucleotide (gc-rate)
:param return_log: set true if you want to get log base 10 of probability
:return: probability of giving dna string or log base 10 of probability
>>> result = dna_probability("ACGATACAA", 0.287)
>>> round(result, 9)
6.066e-06
>>> result = dna_probability("ACGATACAA", 0.287, return_log=True)
>>> round(result, 3)
-5.217
"""
at = (1 - gc) / 2.0
gc /= 2.0
p = 1
for l in dna:
if l in "AT":
p *= at
elif l in "CG":
p *= gc
else:
raise ValueError("You should use dna string.")
if return_log:
return math.log(p, 10)
else:
return p
def find_spliced_motif(dna: str, motif: str, zero_based=True) -> Union[List[int], int]:
""" Returns the the positions of a subsequence(motif) in the string dna at which the symbols of the subsequence
appear.
E.g. the indices of ACG in TATGCTAAGATC can be represented by (2, 5, 9).
:param dna: dna string
:param motif: subsequence to search
:param zero_based: if false will return indexes starting with 1 instead of 0.
:return: list of indices
"""
j = 0
result = []
for i, l in enumerate(dna):
if l == motif[j]:
result.append(i)
j += 1
if j >= len(motif):
break
else:
return -1
return _helper_for_non_zero_based(result, zero_based)
def align(seq1, seq2, reconstruct_answer=True, method=None, swap_case_on_mismatch=True):
""" align two sequences
:param seq1:
:param seq2:
:return:
>>> method = alignments.NeedlemanWunsch(match_score=1, mismatch_score=-1, gap_score=-1, gap_start=-10)
>>> align("AXC", "AABCC", reconstruct_answer=True, method=method)
(('A--XC', 'AabcC'), -11)
>>> method = alignments.NeedlemanWunsch(match_score=1, mismatch_score=-1, gap_score=-1, gap_start=1)
>>> align("AXC", "AABCC", reconstruct_answer=True, method=method)
(('-A-X-C', 'aAb-cC'), 2)
"""
method = alignments.NeedlemanWunsch(match_score=1, mismatch_score=-1, gap_score=-1, gap_start=1) \
if method is None else method
distances = method.init_distance_matrix(seq1, seq2)
for i, j in itertools.product(list(range(1, len(seq2) + 1)), list(range(1, len(seq1) + 1))):
method.calculate_distance(seq1, seq2, distances, i, j)
score = method.score(distances)
if reconstruct_answer:
return method.reconstruct_answer(seq1, seq2, distances, swap_case_on_mismatch), score
else:
return score
def edit_distance(str1, str2, reconstruct_answer=False, method=alignments.Levinshtein(),
swap_case_on_mismatch=True):
""" Calculate editing distance between two strings.
>>> edit_distance("editing", "distance")
5
"""
method = alignments.Levinshtein() if method is None else method
return align(str1, str2, reconstruct_answer, method, swap_case_on_mismatch)
def enumerate_kmers(alphabet: Union[str, List[str]], length: int):
""" Create generator which will return all words with specified length (k-mers) which can be formed from alphabet.
:param alphabet:
:param length: or k in k-mers, length of created words
:return: all possible words one by one
>>> result = enumerate_kmers("ab", 3)
>>> list(result)
['aaa', 'aab', 'aba', 'abb', 'baa', 'bab', 'bba', 'bbb']
>>> result = enumerate_kmers("ABCG", 2)
>>> list(result)
['AA', 'AB', 'AC', 'AG', 'BA', 'BB', 'BC', 'BG', 'CA', 'CB', 'CC', 'CG', 'GA', 'GB', 'GC', 'GG']
"""
for value in itertools.product(alphabet, repeat=length):
yield "".join(value)
def string_to_kmers(s: str, k: int) -> List[str]:
""" Split string S to array of k-mers.
:param s: string to split
:param k: length of k-mers
:return: generator which returns k-mers of split string
>>> result = string_to_kmers("aaabaa", 2)
>>> list(result)
['aa', 'ab', 'aa']
>>> result = string_to_kmers("ACGT", 3)
>>> list(result)
['ACG', 'T']
"""
for i in range(0, len(s), k):
yield s[i:i + k]
def kmers_composition(dna: str, k: int, alphabet: str = "ACGT"):
""" For a fixed positive integer k, order all possible k-mers taken from an underlying alphabet lexicographically.
Then the k-mer composition of a string S can be represented by an array A for which A[m] denotes the number of times
that the mth k-mer (with respect to the lexicographic order) appears in s.
:param dna: dna string to represent in k-mer composition
:param k: length of k-mer
:param alphabet: alphabet of string
:return: k-mer composition of dna string
>>> result = kmers_composition("aaabaa", k=2, alphabet="ab")
>>> list(result)
[2, 1, 0, 0]
"""
dna = Counter(string_to_kmers(dna, k))
for k_mer in enumerate_kmers(alphabet, k):
yield dna[k_mer]
def count_kmers(dna: str, k: int, alphabet: str = "ACGT"):
""" Count number of kmers lexicographically.
:param dna: dna string to count
:param k: length of k-mer
:param alphabet: alphabet of string
:return: number of k-mer occurs in string in lexicographical order
"""
c = Counter(dna[i:i + k] for i in range(len(dna) - k + 1))
result = []
for k_mer in enumerate_kmers(alphabet, k):
result.append(c[k_mer])
return result
def distance_matrix(dnas: Collection[str], metric=hamming_distance, relative=True, as_ndarray=False):
""" computes matrix distance for string in dnas
:param dnas: collection of strings
:param metric: function to calculate distance between two strings
:param relative: if true distance will be return in 0.0..1.0 interval,
every item will be divided by size of the biggest string
:param as_ndarray: if true result will be return as numpy.ndarray
:return: matrix nxn (where n is length of dnas) where result[i][j] = metric(dnas[i], dnas[j]), possible devided by
strings size.
>>> dnas = ["ATTA", "ATTC", "ATTA"]
>>> distance_matrix(dnas, relative=False)
[[0, 1, 0], [1, 0, 1], [0, 1, 0]]
"""
n = len(dnas)
result = [[0] * n for _ in range(n)]
for pair in itertools.combinations(zip(range(n), dnas), r=2):
(idx1, dna1), (idx2, dna2) = pair
distance = metric(dna1, dna2)
distance = distance / max(len(dna1), len(dna2)) if relative else distance
result[idx1][idx2] = distance
result[idx2][idx1] = distance
if as_ndarray:
result = np.asarray(result)
return result
def failure_array(dna: str) -> List[int]:
""" The failure array of a string is an array P of length n for which P[k] is the length
of the longest substring s[j:k] that is equal to some prefix s[0:k−j], where j cannot equal 1
(otherwise, P[k] would always equal k). By convention, P[0]=0.
:param dna: string to compute failure array from
:return: computed failure array
>>> failure_array("CAGCATGGTATCACAGCAGAG")
[0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 0, 1, 2, 1, 2, 3, 4, 5, 3, 0, 0]
>>> failure_array("AAAAA")
[0, 1, 2, 3, 4]
"""
result = [0] * len(dna)
for i in range(1, len(dna)):
for prev in range(result[i - 1], -1, -1):
if dna[:prev + 1] == dna[i - prev:i + 1]:
result[i] = prev + 1
break
return result
def prefix_spectrum(spectrum: List[float]):
""" The prefix spectrum of a weighted string is the collection of all its prefix weights.
:param spectrum: prefix spectrum
:return: protein sequence, with the same prefix sequence.
result[i] = aminoacid with the mass equals to spectrum[len(spectrum) - 1] - spectrum[len(spectrum) - 2]
>>> prefix_spectrum([3524.8542, 3710.9335, 3841.974, 3970.0326, 4057.0646])
'WMQS'
"""
result = []
it = iter(spectrum)
next(it)
m = inverted_monoisotopic_mass
for pair in zip(spectrum, it):
mass = pair[1] - pair[0]
index = bisect.bisect_left(m, (mass, "dummy"))
index = index - 1 if index == len(m) or\
index - 1 > 0 and abs(mass - m[index - 1][0]) < abs(mass - m[index][0]) else index
result.append(m[index][1])
return "".join(result)
def find_protein_motif_by_shorthand(protein: str, shorthand: str):
shorthand = _convert_protein_shorthand_into_regex(shorthand)
shorthand = re.compile(shorthand)
result = []
match = shorthand.finditer(protein)
try:
while True:
pos = next(match).start()
result.append(pos)
match = shorthand.finditer(protein, pos + 1)
except StopIteration:
pass
return result
def _convert_protein_shorthand_into_regex(shorthand: str):
"""
:param shorthand:
:return:
>>> _convert_protein_shorthand_into_regex("ABC")
'ABC'
>>> _convert_protein_shorthand_into_regex("N{P}[ST]{P}")
'N[^P][ST][^P]'
"""
return shorthand.replace("{", "[^").replace("}", "]")
def transition_transversion(dna1: str, dna2: str):
""" returns transition/transversion number between dna1 and dna2.
Point mutations occurring in DNA can be divided into two types: transitions and transversions.
A transition substitutes one purine for another (A↔G) or one pyrimidine for another (C↔T);
that is, a transition does not change the structure of the nucleobase.
Conversely, a transversion is the interchange of a purine for a pyrimidine base, or vice-versa.
see http://rosalind.info/media/problems/tran/transitions-transversions.png
:return: tuple (transition, transversion)
>>> transition_transversion("ACGT", "AAGC")
(1, 1)
"""
transition = 0
transversion = 0
for a, b in zip(dna1, dna2):
if a != b:
if a == "A" and b == "G" or a == "C" and b == "T"\
or a == "G" and b == "A" or a == "T" and b == "C":
transition += 1
else:
transversion += 1
return transition, transversion
def transition_transversion_ratio(dna1: str, dna2: str):
""" returns transition/transversion ratio between dna1 and dna2
:return: transition/transversion ratio
>>> transition_transversion_ratio("ACGT", "AAGC")
1.0
"""
transition, transversion = transition_transversion(dna1, dna2)
return transition / transversion
def rna_structure_prediction(rna: str, min_size=3):
"""
https://en.wikipedia.org/wiki/Nucleic_acid_structure_prediction
:param rna:
:param min_size:
:return:
>>> rna_structure_prediction("ACCCU")
([(0, 4)], 1)
>>> rna_structure_prediction("CCCAAAGGGAAAGGGAAACCC")
([(0, 8), (1, 7), (2, 6), (12, 20), (13, 19), (14, 18)], 0)
"""
matrix = [[(0, 0, 0)] * len(rna) for _ in rna]
rna_to_int_map = dict(A=1, C=2, G=3, U=4) # sum == 5 if complementary
rna_int = [rna_to_int_map[letter] for letter in rna] # use sum instead of dictionary lookup
min_size += 1
for n in range(len(rna_int) - min_size):
for i in range(len(rna_int) - min_size - n):
j = i + min_size + n
m1 = max((matrix[i + 1][j][0], i + 1, j),
(matrix[i][j - 1][0], i, j - 1))
complement_score = matrix[i + 1][j - 1][0] + 1 if rna_int[i] + rna_int[j] == 5 else 0
m1 = m1 if m1[0] >= complement_score else (complement_score, i + 1, j - 1)
m2 = max((matrix[i][k][0] + matrix[k][j][0], i, k, k, j) for k in range(i + 1, j + 1))
matrix[i][j] = m1 if m1[0] >= m2[0] else m2
return _rna_structure_reconstruct_answer(matrix, i=0, j=len(rna) - 1), matrix[0][-1][1]
def _rna_structure_reconstruct_answer(matrix, i, j):
score = matrix[i][j]
structure = []
while score[0]:
if len(score) == 3:
_, next_i, next_j = score
if next_i - 1 == i and next_j + 1 == j:
structure.append((i, j))
i = next_i
j = next_j
score = matrix[next_i][next_j]
else:
_, i1, j1, i2, j2 = score
return structure + _rna_structure_reconstruct_answer(matrix, i1, j1)\
+ _rna_structure_reconstruct_answer(matrix, i2, j2)
return structure
def rna_structure_to_graphviz(rna, structure):
""" Produce graphviz graph for rna structure visualization
:Note: Use "neato" engine for better look
"""
dot = Graph()
for i, letter in enumerate(rna):
dot.node(str(i), letter)
dot.edges((str(i - 1), str(i)) for i in range(1, len(rna)))
for start, end in structure:
dot.edge(str(start), str(end), style="dashed")
return dot
def codon_iter(seq):
""" returns an iterator with all codons in the sequence
Returns
Parameters
----------
seq: str
sequence
Returns
-------
iterator: iterator[string]
"""
if len(seq) % 3:
raise ValueError("the sequence length are not devided by 3")
return string_to_kmers(seq, 3)
def longest_common_substring(strings):
#tree = SuffixTree(strings[0])
#result = strings
#for index in range(1, len(strings)):
# tree.add(strings[index])
# result = tree.make_common_tree()
#return result
first = strings[0]
for current in range(1, len(strings)):
max_idx = 0
max_len = 0
second = strings[current]
array = [[0] * len(second) for _ in range(len(first))]
for i in range(len(first)):
array[i][0] = first[i] == second[0]
for j in range(len(second)):
array[0][j] = first[0] == second[j]
for i in range(1, len(first)):
for j in range(1, len(second)):
array[i][j] = array[i - 1][j - 1] + 1 if first[i] == second[j] else 0
if array[i][j] > max_len:
max_len = array[i][j]
max_idx = i
first = first[max_idx - max_len + 1:max_idx + 1]
return first
|
999,018 | 61ad9eaaba6c55fc5a263eef1d0d8ae890de6286 | """Consolidate all environment setup functionality (sys.path changes,
monkeypatches, etc.) in here."""
import os, sys
# Add our external dependencies to sys.path
extlibs = ('jinja2', 'tweepy', 'python-simplejson', 'appengine-search')
for lib in extlibs:
sys.path.insert(0, os.path.join('ext', lib))
|
999,019 | 22693c44ffa287006ea20d866dc07768311befc3 | import networkx as nx
import pandas as pd
df=pd.read_csv("AAAI.csv")
graph=nx.from_pandas_edgelist(df, source='Topics', target='High-Level Keyword(s)', edge_attr=True)
a=nx.edge_betweenness_centrality(graph)
d={}
while(1):
b=max(a.values())
for key, value in a.items():
if(value==b):
c={key : value}
d.update(c)
C = {k:v for k,v in a.items() if k not in d}
if(len(C)==9):
break
for i in range(9):
print("Clusters", i+1, "=", C[i])
print("No of elements in cluster", i+1, "=" , len(C[i]))
print("\n") |
999,020 | 1f4554949a0a1d6431963069e7f95daaaecb1605 | import os
def package_dir():
return os.path.dirname(__file__) + os.sep
|
999,021 | c32e77ab7be76f10200e2e3552823c49c5dfd6d5 | # -*- coding: utf-8 -*-
"""
Accelerometer Plugin
Copyright (C) 2015 Olaf Lüke <olaf@tinkerforge.com>
Copyright (C) 2015 Matthias Bolte <matthias@tinkerforge.com>
accelerometer.py: Accelerometer Plugin Implementation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
import math
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QVBoxLayout, QLabel, QHBoxLayout, QComboBox, QCheckBox, QFont
from brickv.plugin_system.plugin_base import PluginBase
from brickv.bindings.bricklet_accelerometer import BrickletAccelerometer
from brickv.plot_widget import PlotWidget
from brickv.async_call import async_call
from brickv.callback_emulator import CallbackEmulator
class MonoSpaceLabel(QLabel):
def __init__(self):
super(MonoSpaceLabel, self).__init__()
font = QFont('monospace')
font.setStyleHint(QFont.TypeWriter)
self.setFont(font)
class PitchRollLabel(MonoSpaceLabel):
def setText(self, x, y, z):
try:
text = u'Pitch: {0:+03d}°'.format(int(round(math.atan(x/(math.sqrt(y*y + z*z)))*180/math.pi, 0)))
text += u', Roll: {0:+03d}°'.format(int(round(math.atan(y/math.sqrt(x*x+z*z))*180/math.pi, 0)))
text = text.replace('-0', '- ')
text = text.replace('+0', '+ ')
super(PitchRollLabel, self).setText(text)
except:
# In case of division by 0 or similar we simply don't update the text
pass
class TemperatureLabel(MonoSpaceLabel):
def setText(self, t):
text = u'Temperature: {0}°C'.format(t)
super(TemperatureLabel, self).setText(text)
class AccelerationLabel(MonoSpaceLabel):
def setText(self, x, y, z):
text = u'Acceleration X: {0:+.3f}g'.format(round(x/1000.0, 3))
text += u', Y: {0:+.3f}g'.format(round(y/1000.0, 3))
text += u', Z: {0:+.3f}g'.format(round(z/1000.0, 3))
super(AccelerationLabel, self).setText(text)
class Accelerometer(PluginBase):
def __init__(self, *args):
PluginBase.__init__(self, BrickletAccelerometer, *args)
self.accelerometer = self.device
self.cbe_acceleration = CallbackEmulator(self.accelerometer.get_acceleration,
self.cb_acceleration,
self.increase_error_count)
self.cbe_temperature = CallbackEmulator(self.accelerometer.get_temperature,
self.cb_temperature,
self.increase_error_count)
self.acceleration_label = AccelerationLabel()
self.current_acceleration = [None, None, None]
plot_list = [['X', Qt.red, self.get_current_x],
['Y', Qt.darkGreen, self.get_current_y],
['Z', Qt.blue, self.get_current_z]]
self.plot_widget = PlotWidget('Acceleration [g]', plot_list)
self.temperature_label = TemperatureLabel()
layout_ht = QHBoxLayout()
layout_ht.addStretch()
layout_ht.addWidget(self.temperature_label)
layout_ht.addStretch()
self.pitch_roll_label = PitchRollLabel()
layout_hpr = QHBoxLayout()
layout_hpr.addStretch()
layout_hpr.addWidget(self.pitch_roll_label)
layout_hpr.addStretch()
self.enable_led = QCheckBox("LED On")
self.enable_led.stateChanged.connect(self.enable_led_changed)
self.fs_label = QLabel('Full Scale:')
self.fs_combo = QComboBox()
self.fs_combo.addItem("2 g")
self.fs_combo.addItem("4 g")
self.fs_combo.addItem("6 g")
self.fs_combo.addItem("8 g")
self.fs_combo.addItem("16 g")
self.fs_combo.currentIndexChanged.connect(self.new_config)
self.dr_label = QLabel('Data Rate:')
self.dr_combo = QComboBox()
self.dr_combo.addItem("Off")
self.dr_combo.addItem("3.125 Hz")
self.dr_combo.addItem("6.25 Hz")
self.dr_combo.addItem("12.5 Hz")
self.dr_combo.addItem("25 Hz")
self.dr_combo.addItem("50 Hz")
self.dr_combo.addItem("100 Hz")
self.dr_combo.addItem("400 Hz")
self.dr_combo.addItem("800 Hz")
self.dr_combo.addItem("1600 Hz")
self.dr_combo.currentIndexChanged.connect(self.new_config)
self.fb_label = QLabel('Filter Bandwidth:')
self.fb_combo = QComboBox()
self.fb_combo.addItem("800 Hz")
self.fb_combo.addItem("400 Hz")
self.fb_combo.addItem("200 Hz")
self.fb_combo.addItem("50 Hz")
self.fb_combo.currentIndexChanged.connect(self.new_config)
layout_hc = QHBoxLayout()
layout_hc.addStretch()
layout_hc.addWidget(self.fs_label)
layout_hc.addWidget(self.fs_combo)
layout_hc.addStretch()
layout_hc.addWidget(self.dr_label)
layout_hc.addWidget(self.dr_combo)
layout_hc.addStretch()
layout_hc.addWidget(self.fb_label)
layout_hc.addWidget(self.fb_combo)
layout_hc.addStretch()
layout_hc.addWidget(self.enable_led)
layout_hc.addStretch()
layout_h = QHBoxLayout()
layout_h.addStretch()
layout_h.addWidget(self.acceleration_label)
layout_h.addStretch()
layout = QVBoxLayout(self)
layout.addLayout(layout_ht)
layout.addLayout(layout_hpr)
layout.addLayout(layout_h)
layout.addWidget(self.plot_widget)
layout.addLayout(layout_hc)
def enable_led_changed(self, state):
if state == Qt.Checked:
self.accelerometer.led_on()
else:
self.accelerometer.led_off()
def is_led_on_async(self, value):
if value:
self.enable_led.setChecked(True)
else:
self.enable_led.setChecked(False)
def new_config(self):
dr = self.dr_combo.currentIndex()
fs = self.fs_combo.currentIndex()
fb = self.fb_combo.currentIndex()
self.accelerometer.set_configuration(dr, fs, fb)
def cb_acceleration(self, data):
x, y, z = data
self.acceleration_label.setText(x, y, z)
self.pitch_roll_label.setText(x, y, z)
self.current_acceleration = [x/1000.0, y/1000.0, z/1000.0]
def cb_configuration(self, conf):
self.fs_combo.setCurrentIndex(conf.full_scale)
self.fb_combo.setCurrentIndex(conf.filter_bandwidth)
self.dr_combo.setCurrentIndex(conf.data_rate)
def cb_temperature(self, temp):
self.temperature_label.setText(temp)
def get_current_x(self):
return self.current_acceleration[0]
def get_current_y(self):
return self.current_acceleration[1]
def get_current_z(self):
return self.current_acceleration[2]
def start(self):
async_call(self.accelerometer.is_led_on, None, self.is_led_on_async, self.increase_error_count)
async_call(self.accelerometer.get_configuration, None, self.cb_configuration, self.increase_error_count)
async_call(self.accelerometer.get_acceleration, None, self.cb_acceleration, self.increase_error_count)
async_call(self.accelerometer.get_temperature, None, self.cb_temperature, self.increase_error_count)
self.cbe_acceleration.set_period(50)
self.cbe_temperature.set_period(1000)
self.plot_widget.stop = False
def stop(self):
self.cbe_acceleration.set_period(0)
self.cbe_temperature.set_period(0)
self.plot_widget.stop = True
def destroy(self):
pass
def get_url_part(self):
return 'accelerometer'
@staticmethod
def has_device_identifier(device_identifier):
return device_identifier == BrickletAccelerometer.DEVICE_IDENTIFIER
|
999,022 | b13f0ea9a687f3d8b319de1475be7fbe925de60f | # Generated by Django 3.0.8 on 2020-10-01 02:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0002_auto_20200924_0604'),
]
operations = [
migrations.AddField(
model_name='shelter',
name='species',
field=models.ManyToManyField(related_name='shelters', related_query_name='shelter', to='projects.PetTag'),
),
]
|
999,023 | 6344262b18e4c83b54a54f8a6e708d60581d66bb | # coding:utf-8
import unittest
import os
import report.HTMLTestRunner
# python2.7要是报编码问题,就加这三行,python3不用加
# import sys
# reload(sys)
# sys.setdefaultencoding('utf8')
# 用例路径
case_path = os.path.join(os.getcwd(), "testcase")
# 报告存放路径
report_path = os.path.join(os.getcwd(), "report")
def all_case():
#pattern:匹配的测试文件的格式;top_level_dir:项目的顶级目录
discover = unittest.defaultTestLoader.discover(case_path,
pattern="test*.py",
top_level_dir=None)
print(discover)
return discover
if __name__ == "__main__":
# html报告文件路径
report_abspath = os.path.join(report_path, "result.html")
fp = open(report_abspath, "wb")
runner = report.HTMLTestRunner.HTMLTestRunner(stream=fp,
title=u'自动化测试报告,测试结果如下:',
description=u'用例执行情况:')
# 调用add_case函数返回值
runner.run(all_case())
fp.close() |
999,024 | 141c77680a7ff2015e53f8e12a6243b8739c657b | # Generated by Django 3.0.4 on 2020-03-23 21:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='order',
name='payment_option',
field=models.CharField(blank=True, choices=[('credit_card', 'Credit Card'), ('cash', 'Cash')], max_length=255, null=True),
),
migrations.AddField(
model_name='order',
name='shipping_address',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.Address'),
),
]
|
999,025 | 97a3198bd9de766b39fb5e2e2fefe526478d8f64 | from unittest import TestCase
from yahtzee import calculate_small_straight_score
class TestCalculateSmallStraightScore(TestCase):
def test_calculate_small_straight_score_small_straight(self):
score_choice = "10 - Small Straight"
held_dice = [1, 2, 3, 4, 6]
expected = 30
actual = calculate_small_straight_score(score_choice, held_dice)
self.assertEqual(expected, actual)
def test_calculate_small_straight_score_choose_10_with_no_small_straight(self):
score_choice = "10 - Small Straight"
held_dice = [1, 2, 3, 5, 6]
expected = 0
actual = calculate_small_straight_score(score_choice, held_dice)
self.assertEqual(expected, actual)
|
999,026 | 4743710e9b7ad872a44e9f38c53f272b2f2bfa55 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 21 18:14:38 2018
@author: girish
"""
"""Face Detection"""
#%%Training
import numpy as np
import cv2
from matplotlib import pyplot as plt
def disp_img(im,r,c,d):
img=np.reshape(im,(r,c,d)).astype(np.uint8)
cv2.namedWindow('image',cv2.WINDOW_NORMAL)
cv2.imshow('image',cv2.resize(img,(560,460)))
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.waitKey(1)
return
"""Reading all training images"""
images=[]
for i in range(35):
for j in range (10):
images.append(cv2.imread('att_faces/s'+str(i+1)+'/'+str(j+1)+'.pgm'))
#images=np.array([cv2.imread(file) for file in glob.glob('/Users/girish/Desktop/Training/*.pgm')])
sz=np.shape(images)
nstr=20 #No of subjects
ni=sz[0] #No of Images
r=sz[1]
c=sz[2]
d=sz[3]
"""Converting image matrix into vector and creating the data matrix"""
data=np.zeros((r*c*d,ni)).astype(np.uint8)
for j in range(ni):
data[:,j]=np.matrix.flatten(images[j]).T
"""Calculating the mean image"""
exp=np.zeros((r*c*d,1))
exp=np.mean(data,axis=1)
"""Calculating the difference image(Actual-Mean)"""
A=np.zeros((r*c*d,ni))
for k in range(ni):
A[:,k]=data[:,k]-exp
"""Covariance Matrix, EigenVectors and Eigenvalues"""
cov=np.matmul(A.T,A)
cov=np.divide(cov,ni)
eigval,eigvec=np.linalg.eig(cov)
ef=np.matmul(A,eigvec)
"""Sorting the eigenvectors based on eigenvalues"""
indsort=eigval.argsort()
ev=eigval[indsort[::-1]]
ef=ef[:,indsort[::-1]]
k=ni
"""Normalization of Eigenfaces"""
for i in range(int(k)):
ef[:,i]=np.divide(ef[:,i],np.linalg.norm(ef[:,i]))
efp=ef+abs(ef.min())
efd=np.divide(efp,efp.max())*255
"""Finding the weights of each image"""
w=np.zeros((ni,r*c*d))
w=np.matmul(ef.T,A)
"""Uncomment the follwoing to display image.
Inplace of img input the image to be displayed in vector or matrix form"""
disp_img(exp,r,c,d)
#%%Testing
"""Reading the test images"""
test=[]
ns=5 #No of subjects
nt=10 #No of test images per subject
for i in range(ns):
for j in range(nt):
test.append(cv2.imread('att_faces/s'+str(i+36)+'/'+str(j+1)+'.pgm'))
#No of test images per subject
"""Creating the data matrix for test images"""
dt=np.zeros((r*c*d,ns*nt))
for i in range(ns*nt):
dt[:,i]=np.matrix.flatten(test[i]).T-exp
"""Calculating the weights of each test image"""
wt=np.matmul(ef.T,dt)
"""Finding the error as minimum of norm of difference between weights of training and testing images"""
dif=np.zeros((int(k),ns*nt))
for i in range(int(k)):
for j in range(ns*nt):
dif[i,j]=np.linalg.norm(w[:,i]-wt[:,j])
er=dif.min(axis=0)
t=np.zeros((7501,1))
for i in range(0,7501):
t[i]=i
nthr=len(t)
acc=np.zeros((nthr,1))
"""Calculating accuracies of detection for a range of threshold values"""
for i in range(nthr):
pred=np.ones((ns*nt,1),dtype=bool)
pred=er<t[i]
count=0
for j in range(len(pred)):
if pred[j]:
count+=1
acc[i,0]=count*100/(ns*nt)
"""Plotting Thershold vs Accuracy"""
plt.title("Accuracy vs Threshold")
plt.xlabel("Threshold")
plt.ylabel("Accuracy(%)")
plt.plot(t,acc)
plt.plot(t,100-acc)
plt.show()
"""Considering a threshold value of T=6000 accuracy is printed"""
T=6000
pred=np.ones((ns*nt,1),dtype=bool)
pred=er<T
count=0
for j in range(len(pred)):
if pred[j]:
count+=1
accT=count*100/(ns*nt)
pred=pred.reshape(ns,nt)
print('Accuracy for threshold of 6000 =',accT,'%') |
999,027 | d5ec578829503bfcabe867f7ef2690dfac8bee1b | from django.shortcuts import render
from django.shortcuts import redirect
from django.http import HttpResponse
from portfolio.settings import EMAIL_HOST_USER
from django.core.mail import send_mail
from .forms import MessageForm
def index(request):
return render(request, 'base/index.html')
def aboutme(request):
return render(request, 'base/aboutme.html')
def get_name(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = MessageForm(request.POST)
# check whether it's valid:
if form.is_valid():
# process the data in form.cleaned_data as required
name = form.cleaned_data['name']
email = form.cleaned_data['email']
message = form.cleaned_data['message']
subject = email + " send you an email"
recipients = ['icecakeinc@gmail.com']
print(name)
print(email)
print(message)
send_mail(subject, message, EMAIL_HOST_USER, recipients)
return redirect('/about')
|
999,028 | 75a282fe7ad33ad22df1cb690c40774b037cd937 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp import template
import os
from LoginHelper import LoginHelper
class MainHandler(webapp.RequestHandler):
def get(self):
if not LoginHelper.requestHasSecret(self.request): return self.response.set_status(403)
LoginHelper.responseAddSecretCookie(self.response)
path = os.path.join(os.path.dirname(__file__), 'index.html')
f = open(path)
self.response.out.write(f.read())
f.close()
application = webapp.WSGIApplication([('.*', MainHandler)],
debug=True)
|
999,029 | f2f434fd23998b4b423d2f7baed9a13175ba9c7c | """empty message
Revision ID: 467eb3f93699
Revises: a0c296fb5401
Create Date: 2021-05-08 13:50:35.935521
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '467eb3f93699'
down_revision = 'a0c296fb5401'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('food_package',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('package', sa.String(length=70), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('food_package')
# ### end Alembic commands ###
|
999,030 | 8fc2ff6c6952c32bed927431caceb655006cabe5 | #!/usr/bin/env python
### BEGIN CONFIGURATION SECTION ###
# which RESOLUTION?
RESOLUTION = (1024,768)
# which SIZE of the visible area?
SIZE = (600,600)
# how many TRIALS per round (MINimum)
MINTRIALS = 20
# LENGTH of STimulus presentation in seconds
ST_LENGTH = 0.5
# LENGTH of Break between Trials in seconds
TB_LENGTH = 2.5
# which N-back initially?
N = 1
# UPPER ThresHold, above which N is increased
UPPERTH = 90
# LOWER ThresHold, below which N is decreased
LOWERTH = 75
# SIZE of FONT
FONTSIZE = 20
KEYLEFT = "a"
KEYRIGHT = "o"
SPACE = " "
IFDIR = "data/images"
SFDIR = "data/sounds"
IFS = ['1.png','2.png','3.png','4.png','5.png','6.png','7.png','8.png']
SFS = ['1.ogg','2.ogg','3.ogg','4.ogg','5.ogg','6.ogg','7.ogg','8.ogg']
BASE = IFDIR+"/"+'base.png'
### END CONFIGURATION SECTION ###
TOPLEFT =((RESOLUTION[0]-SIZE[0])/2,(RESOLUTION[1]-SIZE[1])/2)
for i in range(0,len(IFS)):
IFS[i]=IFDIR+"/"+IFS[i]
SFS[i]=SFDIR+"/"+SFS[i]
import time, sys, re
from random import randint
from pygame import display, image, key, Surface, mixer, event, mouse, font
from pygame import FULLSCREEN, KEYDOWN
from pygame.transform import scale
def selftests():
print "Running some selftests"
die = None
# do some preloading to minimize lag blow
for f in IFS+SFS+[BASE]:
try:
open(f,"rb")
except IOError, e:
die = e
print >> sys.stderr, "FATAL:",die
if die:
raise die
if not len(IFS) == len(SFS):
print >> sys.stderr, "FATAL: amount of stimuli for different modalities do not match!"
sys.exit(1)
print "All data present, great!"
print
class Trial:
def __init__(self,imagefile,soundfile,trgtimg,trgtsnd):
self.image = scale(image.load(imagefile), SIZE).convert()
self.fill = scale(image.load(BASE),SIZE).convert()
self.sound = mixer.Sound(soundfile)
self.trgtimg = trgtimg
self.trgtsnd = trgtsnd
self.result = [not(self.trgtimg),not(self.trgtsnd)]
def runtrial(self):
surface = display.get_surface()
surface.fill((255,255,255))
surface.blit(self.image,TOPLEFT)
display.flip()
self.sound.play()
time.sleep(ST_LENGTH)
surface.blit(self.fill,TOPLEFT)
display.flip()
time.sleep(TB_LENGTH)
keypresses = []
for e in event.get(KEYDOWN):
keypresses += [e.dict['unicode']]
if SPACE in keypresses:
return None
if unicode(KEYLEFT) in keypresses:
if self.trgtimg:
#print "user hit key \""+ KEYLEFT +"\" correctly"
self.result[0] = True
else:
#print "user hit key \""+ KEYLEFT +"\" incorrectly"
self.result[0] = False
if unicode(KEYRIGHT) in keypresses:
if self.trgtsnd:
#print "user hit key \""+ KEYRIGHT +"\" correctly"
self.result[1] = True
else:
#print "user hit key \""+ KEYRIGHT +"\" incorrectly"
self.result[1] = False
return True
def myrandom(l):
result = []
for i in range(0,N):
result.append(l[randint(0,len(l)-1)])
for i in range(0,MINTRIALS):
if randint(0,1):
result.append(result[-N])
else:
# be strict about probabilities
myl = l[:]
myl.pop(result[-N])
result.append(myl[randint(0,len(myl)-1)])
return result
def gentrials():
trials = []
iis = myrandom(range(0,len(IFS)-1))
sis = myrandom(range(0,len(SFS)-1))
for i,j,k in zip(iis,sis,range(0,len(iis))):
if k < N:
trials.append(Trial(IFS[i],SFS[j],False,False))
else:
nb = k - N
trials.append(Trial(IFS[i],SFS[j],iis[k]==iis[nb],sis[k]==sis[nb]))
return trials
def ask():
spam = raw_input(" [Yes/No]? ")
if re.match("y(es)?", spam, re.I):
return True
elif re.match("n(o)?", spam, re.I):
return False
def main():
print "#"*31
print "### Welcome to MindMixer ###"
print "####### Version 0.1beta #######"
print """Have a look at the sourcecode!
Change stuff to suit your needs!
The program will hopefully be
self explaining. Hafe fun!"""
print "#"*31
selftests()
global N
while 1:
print "(Hint: while training, you can hit SPACE to abort)"
print "Hit '"+KEYLEFT+"' if the",str(N)+". previous image is identical to the one shown"
print "Hit '"+KEYRIGHT+"' if the",str(N)+". previous sound is identical to the one heard"
while 1:
print "Ready to train with N=%i?" %(N),
if ask():
break
else:
print "Do you wish to train with N set to a different value? Choosing 'No' exits the program.",
if ask():
n = int(raw_input("Ok, enter the desired value here: "))
while n < 1:
print "N must be 1 or higher!"
n = int(raw_input("Enter a value higher than 1: "))
N = n
else:
print "bye"
sys.exit(1)
display.init()
display.set_mode(RESOLUTION, FULLSCREEN)
font.init()
mixer.init(44100)
event.set_grab(True)
mouse.set_visible(False)
trials = gentrials()
for trial in trials:
if not trial.runtrial():
break
display.quit()
vis = 0.0
acu = 0.0
for trial in trials:
if trial.result[0]:
vis+=1
if trial.result[1]:
acu+=1
vp = (vis/(MINTRIALS+N))*100
ap = (acu/(MINTRIALS+N))*100
message = "percentage in visual modality:%i\npercentage in acoustic modality:%i\n" %(int(vp),int(ap))
print message
if vp >= UPPERTH and ap >= UPPERTH:
N+=1
elif (vp < LOWERTH or ap < LOWERTH) and N > 1:
N-=1
if __name__ == "__main__":
main()
|
999,031 | 94a47ec8dbb5cb93c6275e6896a88deb298ebe4c | from typing import Dict, List
import numpy as np
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.runners.base_runner import BaseRunner
from neuralmonkey.decoders.ctc_decoder import CTCDecoder
from neuralmonkey.decorators import tensor
class CTCDebugRunner(BaseRunner[CTCDecoder]):
"""A runner that print out raw CTC output including the blank symbols."""
# pylint: disable=too-few-public-methods
# Pylint issue here: https://github.com/PyCQA/pylint/issues/2607
class Executable(BaseRunner.Executable["CTCDebugRunner"]):
def collect_results(self, results: List[Dict]) -> None:
vocabulary = self.executor.decoder.vocabulary
if len(results) != 1:
raise RuntimeError("CTCDebugRunners do not support ensembles.")
logits = results[0]["logits"]
argmaxes = np.argmax(logits, axis=2).T
decoded_batch = []
for indices in argmaxes:
decoded_instance = []
for index in indices:
if index == len(vocabulary):
symbol = "<BLANK>"
else:
symbol = vocabulary.index_to_word[index]
decoded_instance.append(symbol)
decoded_batch.append(decoded_instance)
self.set_runner_result(outputs=decoded_batch, losses=[])
# pylint: enable=too-few-public-methods
def __init__(self,
output_series: str,
decoder: CTCDecoder) -> None:
check_argument_types()
super().__init__(output_series, decoder)
@tensor
def fetches(self) -> Dict[str, tf.Tensor]:
return {"logits": self.decoder.logits}
@property
def loss_names(self) -> List[str]:
return []
|
999,032 | 31a636513075f4470dc7b92cced09bda9a6ec2a7 | from abc import ABC, abstractmethod
import os
import importlib
from PIL import Image
import numpy as np
import torch
from .catalog import PathManager, LABEL_MAP_CATALOG
from ..elements import *
__all__ = ["Detectron2LayoutModel"]
class BaseLayoutModel(ABC):
@abstractmethod
def detect(self):
pass
# Add lazy loading mechanisms for layout models, refer to
# layoutparser.ocr.BaseOCRAgent
# TODO: Build a metaclass for lazy module loader
@property
@abstractmethod
def DEPENDENCIES(self):
"""DEPENDENCIES lists all necessary dependencies for the class."""
pass
@property
@abstractmethod
def MODULES(self):
"""MODULES instructs how to import these necessary libraries."""
pass
@classmethod
def _import_module(cls):
for m in cls.MODULES:
if importlib.util.find_spec(m["module_path"]):
setattr(
cls, m["import_name"], importlib.import_module(m["module_path"])
)
else:
raise ModuleNotFoundError(
f"\n "
f"\nPlease install the following libraries to support the class {cls.__name__}:"
f"\n pip install {' '.join(cls.DEPENDENCIES)}"
f"\n "
)
def __new__(cls, *args, **kwargs):
cls._import_module()
return super().__new__(cls)
class Detectron2LayoutModel(BaseLayoutModel):
"""Create a Detectron2-based Layout Detection Model
Args:
config_path (:obj:`str`):
The path to the configuration file.
model_path (:obj:`str`, None):
The path to the saved weights of the model.
If set, overwrite the weights in the configuration file.
Defaults to `None`.
label_map (:obj:`dict`, optional):
The map from the model prediction (ids) to real
word labels (strings). If the config is from one of the supported
datasets, Layout Parser will automatically initialize the label_map.
Defaults to `None`.
enforce_cpu(:obj:`bool`, optional):
When set to `True`, it will enforce using cpu even if it is on a CUDA
available device.
extra_config (:obj:`list`, optional):
Extra configuration passed to the Detectron2 model
configuration. The argument will be used in the `merge_from_list
<https://detectron2.readthedocs.io/modules/config.html
#detectron2.config.CfgNode.merge_from_list>`_ function.
Defaults to `[]`.
Examples::
>>> import layoutparser as lp
>>> model = lp.models.Detectron2LayoutModel('lp://HJDataset/faster_rcnn_R_50_FPN_3x/config')
>>> model.detect(image)
"""
DEPENDENCIES = ["detectron2"]
MODULES = [
{
"import_name": "_engine",
"module_path": "detectron2.engine",
},
{"import_name": "_config", "module_path": "detectron2.config"},
]
def __init__(
self,
config_path,
model_path=None,
label_map=None,
extra_config=[],
enforce_cpu=False,
):
if config_path.startswith("lp://") and label_map is None:
dataset_name = config_path.lstrip("lp://").split("/")[0]
label_map = LABEL_MAP_CATALOG[dataset_name]
if enforce_cpu:
extra_config.extend(["MODEL.DEVICE", "cpu"])
cfg = self._config.get_cfg()
config_path = PathManager.get_local_path(config_path)
cfg.merge_from_file(config_path)
cfg.merge_from_list(extra_config)
if model_path is not None:
cfg.MODEL.WEIGHTS = model_path
cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
self.cfg = cfg
self.label_map = label_map
self._create_model()
def gather_output(self, outputs):
instance_pred = outputs["instances"].to("cpu")
layout = Layout()
scores = instance_pred.scores.tolist()
boxes = instance_pred.pred_boxes.tensor.tolist()
labels = instance_pred.pred_classes.tolist()
for score, box, label in zip(scores, boxes, labels):
x_1, y_1, x_2, y_2 = box
if self.label_map is not None:
label = self.label_map.get(label, label)
cur_block = TextBlock(
Rectangle(x_1, y_1, x_2, y_2), type=label, score=score
)
layout.append(cur_block)
return layout
def _create_model(self):
self.model = self._engine.DefaultPredictor(self.cfg)
def detect(self, image):
"""Detect the layout of a given image.
Args:
image (:obj:`np.ndarray` or `PIL.Image`): The input image to detect.
Returns:
:obj:`~layoutparser.Layout`: The detected layout of the input image
"""
# Convert PIL Image Input
if isinstance(image, Image.Image):
if image.mode != "RGB":
image = image.convert("RGB")
image = np.array(image)
outputs = self.model(image)
layout = self.gather_output(outputs)
return layout
|
999,033 | c8e05ce3828bea3e91a3019556ce5762b8ddac96 | #-*- coding:utf-8 -*-
from openerp import models, fields, api
class setting_moveprocess_receivemailid(models.Model):
_name = 'setting.moveprocess.receivemailid'
billrecivnum = fields.Boolean('結算收件編號')
temporpaynum = fields.Boolean('暫繳收件編號')
billtempor = fields.Boolean('結算及暫繳收件編號')
earndistratio = fields.Boolean('盈餘分配比率')
notefinote = fields.Boolean('申報書附件註記') |
999,034 | 5b04404f08f962983914ac43a6f5a6220e67f0e6 | #! /usr/bin/env python
#encoding=utf-8
'''
Created on 2010-10-21
@author: jiugao
'''
from client.Authentication import Authentication
from common.Config import Config
from common.exception.ConnectionException import ConnectionException
from common.exception.SessionClosedException import SessionClosedException
from common.exception.URLException import URLException
from destination.DestQueryInfo import DestQueryInfo
from thrift.protocol import TBinaryProtocol
from thrift.transport import TSocket, TTransport
from thrift.transport.TTransport import TException
from time import sleep
import json, os
import logging
import traceback
from generated.routerservice import RouterService, constants
import socket
from generated.routerservice.ttypes import RouterException
from common.util import SleepUtils
logger = logging.getLogger("tt2-client.Destination")
MAX_FAILURE_RETRIES = 3
class Destination(object):
def __init__(self, conf):
self.conf = conf
self.close = False
self.transport = None
def connect(self):
SleepUtils.sleepRandom(3)
self.url = self.conf.getRandomUrl()
logger.info("connecting to router " + str(self.url))
self.close = False
for i in range(MAX_FAILURE_RETRIES):
try:
self.transport = TTransport.TBufferedTransport(TSocket.TSocket(self.url.ip, self.url.port))
self.transport = TTransport.TFramedTransport(self.transport)
protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
self.client = RouterService.Client(protocol)
self.transport.open()
logger.info("router connection has been established")
return
except TException:
logger.info("can not connect to router: " + repr(self.url) + " and try " + str(i) + " times " + str(traceback.format_exc()))
self.transport = None
sleep(3)
continue
logger.error("can not connect to router: " + repr(self.url))
raise ConnectionException("can not connect to router: " + str(self.url))
def getDest(self, queryInfo):
logger.info("get Dest for " + repr(queryInfo))
if self.close is True:
raise SessionClosedException("session has been closed")
if self.transport is None:
self.connect()
urls = None
try:
prop = {}
prop[constants.LOCAL_HOST] = str(os.getpid()) + "@" + socket.gethostname()
prop[constants.RECVWINSIZE] = "0"
prop[constants.TIMEOUT] = str(queryInfo.timeout)
prop[constants.TYPE] = "PUB"
urls = self.client.getBroker(queryInfo.user, queryInfo.pwd, queryInfo.topic, queryInfo.only, prop)
# urls = "{\"sessionId\":\"8045f5cb0521c82598584f3151b1a1d5\",\"brokerserver\":[\"{\\\"master\\\":\\\"localhost:8888\\\",\\\"slave\\\":[]}\"]}"
logger.debug("Urls from router: " + urls)
return UrlDecoder(urls)
except TException, e:
self.__cleanup()
raise URLException("get url via thrift failed. " + repr(e) + str(traceback.format_exc()))
except RouterException, e:
self.__cleanup()
raise URLException("get url via thrift failed. " + repr(e) + str(traceback.format_exc()))
except Exception:
self.__cleanup()
raise Exception("url: " + urls + " decoder failed. " + str(traceback.format_exc()))
def __cleanup(self):
if self.transport is not None:
try:
self.transport.close()
finally:
self.transport = None
def destroy(self):
logger.info("destroy router connection")
if self.close is True:
return
if self.transport is None:
self.close = True
return
self.__cleanup()
self.close = True
class Dest(object):
def __init__(self, sid, bs):
self.sessionId = sid
self.brokerserver = bs
def __repr__(self):
L = ["%s=%r" % (k, v) for k, v in self.__dict__.iteritems()]
return "%s(%s)" % (self.__class__.__name__, ",".join(L))
def UrlDecoderHook(s):
sid = s.get("sessionId")
bs = s.get("brokerserver")
return Dest(sid, bs)
def UrlDecoder(s):
return json.loads(s, object_hook=UrlDecoderHook)
if __name__ == '__main__':
print "case0##############"
conf = Config('../../conf/client.conf')
qi = DestQueryInfo(Authentication("tt", "2"), "test1", "1", "20000")
print qi
dest = Destination(conf);
url = dest.getDest(qi)
print url
dest.destroy()
print "case1##############"
testUrl = "{\"sessionId\":\"8045f5cb0521c82598584f3151b1a1d5\",\"brokerserver\":[\"dwbasis130001.sqa.cm4:39903\", \"er2\"]}"
dest = UrlDecoder(testUrl)
print dest.sessionId
print dest.brokerserver
print dest.brokerserver[0]
print dest.brokerserver[1]
|
999,035 | 24bcabf8e8d54fee37b69c45f04a25b1661d9d0d | from tuyaha.devices.climate import TuyaClimate
from tuyaha.devices.cover import TuyaCover
from tuyaha.devices.fan import TuyaFanDevice
from tuyaha.devices.light import TuyaLight
from tuyaha.devices.lock import TuyaLock
from tuyaha.devices.scene import TuyaScene
from tuyaha.devices.switch import TuyaSwitch
def get_tuya_device(data, api):
dev_type = data.get("dev_type")
devices = []
if dev_type == "light":
devices.append(TuyaLight(data, api))
elif dev_type == "climate":
devices.append(TuyaClimate(data, api))
elif dev_type == "scene":
devices.append(TuyaScene(data, api))
elif dev_type == "fan":
devices.append(TuyaFanDevice(data, api))
elif dev_type == "cover":
devices.append(TuyaCover(data, api))
elif dev_type == "lock":
devices.append(TuyaLock(data, api))
elif dev_type == "switch":
devices.append(TuyaSwitch(data, api))
return devices
|
999,036 | c436c9fd8dc9acf9cd45dd8a65de4e8e8b564c4e | # import time
# from selenium import webdriver
# from selenium.common.exceptions import NoSuchElementException
#
# browser = webdriver.Chrome()
# url = 'http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable'
# browser.get(url)
# browser.switch_to.frame('iframeResult')
# try:
# logo = browser.find_element_by_class_name('logo')
# except NoSuchElementException:
# print('NO LOGO')
# browser.switch_to.parent_frame()
# logo = browser.find_element_by_class_name('logo')
# print(logo)
# print(logo.text)
# from selenium import webdriver
#
# browser = webdriver.Chrome()
# browser.implicitly_wait(10)
# browser.get('https://www.zhihu.com/explore')
# input = browser.find_element_by_class_name('zu-top-add-question')
# print(input)
# from selenium import webdriver
# from selenium.webdriver.common.by import By
# from selenium.webdriver.support.ui import WebDriverWait
# from selenium.webdriver.support import expected_conditions as EC
#
# browser = webdriver.Chrome()
# browser.get('https://www.taobao.com/')
# wait = WebDriverWait(browser, 10)
# input = wait.until(EC.presence_of_element_located((By.ID, 'q')))
# button = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '.btn-search')))
# print(input, button)
import time
from selenium import webdriver
browser = webdriver.Chrome()
browser.get('https://www.baidu.com/')
browser.get('https://www.taobao.com/') |
999,037 | 74d0ac76ec6411c6efec86b2cd9d9eea9f6cb4cd | from bs4 import BeautifulSoup
import re, sys, urllib
import glob
def googleUPC( upc ):
reg = "[1-9][0-9]+"
nzupc = re.findall( reg, upc )[0]
return '0'*(14-len(nzupc)) + nzupc
def mkGoogleSUUrls( urls ):
base_url = 'http://www.google.com/%s/online?q=%s'
reg_url = '/(shopping/product/[0-9]+)?'
reg_upc = 'q=([0-9]+)'
def fixUrl( x ):
shp = re.findall( reg_url, x )
upc = re.findall( reg_upc, x )
furl = base_url % ( shp[0], upc[0] )
return furl
return map( fixUrl, urls )
def splitUrls( urls ):
sp = [ x for x in urls if x.find('/shopping/product/') != -1 ]
ss = [ x for x in urls if x.find('/shopping/product/') == -1 ]
return ( sp , ss )
def mkGoogleSrchUrls( upcs ):
base_url = 'https://www.google.com/search?output=search&tbm=shop&q=%s'
return [ base_url % ( googleUPC(x) ) for x in upcs ]
class GoogleSU():
"""/shopping/product/ parser"""
def __init__( self, data ):
self.soup = BeautifulSoup( data, 'lxml' )
def parse( self ):
self.prods = self.soup.findAll( "span", "os-seller-name-primary" )
urls = [ prod.findAll('a')[0]['href'] for prod in self.prods ]
return map( lambda x : 'http://www.google.com' + x, urls )
def fetchUrl( self, url ):
return urllib.urlopen( url ).read()
class GoogleShopping():
"""Shopping Search Results Parser"""
def __init__( self, data ):
self.soup = BeautifulSoup( data, 'lxml' )
def parse( self ):
shpu = self.soup.findAll( "a", "_po" )
durl = self.soup.findAll( "h3", "r" )
links = [ x['href'] for x in shpu ] +\
[ x.findAll('a')[0]['href'] for x in durl ]
return links
def numOfProds( self ):
prods = self.soup.findAll( 'div', 'pag-n-to-n-txt' )
rg = 'of ([0-9]+)'
num = int(re.findAll( rg, prods[0].text )[0])
def getNextPage( self ):
pgBottom = self.soup.findAll( 'div', "goog-inline-block jfk-button jfk-button-standard jfk-button-narrow jfk-button-collapse-left" )
return pgBottom[0]['href']
def genUrls( self ):
nprods = self.numOfProds()
if nprods > 25:
npage = self.getNextPage()
npages = int(nprods/25)
startPages = [ 'start:' + str(25*(i+1)) for i in range(npages) ]
return [ 'http://www.google.com' + re.sub( 'start:25', x, npage ) for x in startPages ]
else:
return []
class GoogleSearch():
def __init__( self, data ):
self.soup = BeautifulSoup( data, 'lxml' )
def parse( self ):
srchResults = self.soup.findAll( 'li', 'g' )
return [ x.findAll('a')[0]['href'] for x in srchResults ]
|
999,038 | ca4f9507519b0b44bcbe861685578d33ea5d1bf0 | import pandas as pd
from sklearn.model_selection import train_test_split
# Read the data
X_full = pd.read_csv('home-data-for-ml-course/train.csv')
X_test_full = pd.read_csv('home-data-for-ml-course/test.csv')
# Remove rows with missing SalePrice target
X_full.dropna(axis=0, subset=['SalePrice'], inplace=True)
# separate target from predictors
y = X_full.SalePrice
X_full.drop(['SalePrice'], axis=1, inplace=True)
# using only numerical predictors
X = X_full.select_dtypes(exclude=['object'])
X_test = X_test_full.select_dtypes(exclude=['object'])
X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0)
missing_val_count_by_column = X_train.isnull().sum()
print(missing_val_count_by_column[missing_val_count_by_column>0])
# Shape X_train = (1168, 37)
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
# Function for comparing different approaches
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=100, random_state=0)
model.fit(X_train, y_train)
return mean_absolute_error(model.predict(X_valid), y_valid)
############################
# Get names of columns with missing values
missing_columns = [col for col in X_train.columns if X_train[col].isnull().any()]
reduced_X_train = X_train.drop(missing_columns, axis=1)
reduced_X_valid = X_valid.drop(missing_columns, axis=1)
# Approach 1 - Drop columns with missing values
print("MAE (Drop columns with missing values):")
print(score_dataset(reduced_X_train, reduced_X_valid, y_train, y_valid))
# 17952.5914
############################
# Approach 2 - Imputation using Mean
from sklearn.impute import SimpleImputer
myimputer = SimpleImputer()
imputed_X_train = pd.DataFrame(myimputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(myimputer.transform(X_valid))
# Imputation removed column names; put them back
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
print("MAE (Imputation):")
print(score_dataset(imputed_X_train, imputed_X_valid, y_train, y_valid))
# 18250.6080
############################
# Approach 2 - Imputation using Median
final_imputer = SimpleImputer(strategy='median')
final_X_train = pd.DataFrame(final_imputer.fit_transform(X_train))
final_X_valid = pd.DataFrame(final_imputer.transform(X_valid))
final_X_train.columns = X_train.columns
final_X_valid.columns = X_valid.columns
print("MAE (Imputation using Median):")
model = RandomForestRegressor(n_estimators=100, random_state=0)
model.fit(final_X_train, y_train)
print(mean_absolute_error(model.predict(final_X_valid), y_valid))
# 18090.3841
############################
# Pre process test Data
final_X_test = pd.DataFrame(final_imputer.transform(X_test))
preds_test = model.predict(final_X_test)
# Output
output = pd.DataFrame({'Id':X_test.index, 'SalePrice':preds_test})
output.to_csv('submission_intermediateML_missing_values.csv', index=False)
|
999,039 | 5ad6097331075544d3381681699b33a7daca6c5b | from django.core.management.base import BaseCommand
from django.db.models import Q
from ...models import Harbor
class Command(BaseCommand):
def handle(self, **options):
updated_harbors = 0
missing_images = []
for harbor in Harbor.objects.exclude(
Q(servicemap_id=None) | Q(servicemap_id="")
):
image_filename = "{}.jpg".format(harbor.servicemap_id)
image_file = "/img/helsinki_harbors/{}".format(image_filename)
harbor.image_file = image_file
harbor.save()
updated_harbors += 1
self.stdout.write("Successfully updated {} harbors".format(updated_harbors))
if missing_images:
self.stderr.write(
"Could not find images for harbors with following Servicemap IDs:"
)
for id in missing_images:
self.stderr.write(id)
|
999,040 | ce7337866e548119bf9d38e9c4752df5de7425c0 | # -*- coding: utf-8 -*-
'''
Slightly resembles an install script, but actually there is not much
to install.
'''
import os
import sys
from distutils.core import setup
sys.path.append("src")
from pynal.models import Config
setup(name=Config.appname.lower(), version=Config.version,
url=Config.homepage, license=Config.license,
package_dir={"pynal": "src/pynal"},
packages=["pynal", "pynal.models", "pynal.view", "pynal.control"],
scripts=["pynal"],)
|
999,041 | 9486c699aa276fae050863203eeec9639c2561f5 | #coding:utf8
'''
Created on 2011-4-1
@author: sean_lan
'''
from app.share.dbopear import dbMail
from app.game.component.Component import Component
from app.game.component.mail.Mail import Mail
import math
class CharacterMailListComponent(Component):
'''角色邮件列表组件'''
def __init__(self,owner,mailList = []):
'''
@param mailList: [] 邮件列表
'''
Component.__init__(self, owner)
self._mailList = mailList
def getMailCnd(self,mtype):
'''获取邮件数量
@param mtype: int 邮件的类型 0 全部 1系统 2玩家 3保存
'''
cnd = dbMail.getPlayerMailCnd(self._owner.baseInfo.id, mtype)
return cnd
def checkMyMail(self,mailID):
'''检测是否是自己的邮件'''
result = dbMail.checkMail(mailID, self._owner.baseInfo.id)
return result
def getPageCnd(self,responseMailType,limit=4):
cnd = self.getMailCnd(responseMailType)
pageCnd = math.ceil(float(cnd)/limit)
if pageCnd == 0 :
pageCnd = 1
return int(pageCnd)
def getMailList(self):
'''获取角色邮件列表
'''
data = {}
mailList = dbMail.getPlayerMailList(self._owner.baseInfo.id)
data['maillist'] = mailList
return data
def readMail(self,mailID):
'''阅读邮件(将邮件未读状态改为以读状态)
@param mailID: int 邮件的ID
'''
result = self.checkMyMail(mailID)
if not result:
return {'result':False,'message':u""}
m = Mail(id = mailID)
m.updateMainInfo({'isReaded':1})
data = m.formatMailInfo()
return {'result':True,'data':data}
def deleteMail(self,mailID):
'''删除邮件'''
result = self.checkMyMail(mailID)
if not result:
return {'result':False,'message':u""}
m = Mail(id = mailID)
result = m.destroyMail()
return {'result':result,'message':u""}
def saveMail(self,mailID):
'''保存邮件'''
result = self.checkMyMail(mailID)
if not result:
return {'result':False,'message':u""}
m = Mail(id = mailID)
result = m.updateMainInfo({'isSaved':1})
if not result:
return {'result':False,'message':u""}
return {'result':True,'message':u""}
def BatchDelete(self,mailIDList):
'''批量删除'''
for mailId in mailIDList:
result = self.checkMyMail(mailId)
if not result:
return {'result':False,'message':u""}
for mailId in mailIDList:
m = Mail(id = mailId)
result = m.destroyMail()
return {'result':True,'message':u""}
def sendMail(self,receiverId,title,content):
'''发送邮件
@param receiverId: int 发送者的ID
@param title: str 邮件的标题
@param content: str 邮件的内容
'''
m = Mail( title=title, senderId =self._owner.baseInfo.id, receiverId=receiverId,\
sender = self._owner.baseInfo.getNickName(),content=content)
result = m.mailIntoDB()
return result
|
999,042 | cc2bc69bd29e8e78a5366203bd866a25af0ab5f8 | from django.contrib import admin
class PermissionAdmin(admin.ModelAdmin):
"""Push this to intercept .queryset() calls from OwnableAdmin
"""
def queryset(self, request):
"""Return items based on request
"""
qs = self.model._default_manager.get_query_set()
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
if request.user.is_superuser:
return qs
elif request.user.groups.filter(name__icontains='editors').exists():
return qs
return qs.none()
from mezzanine.blog import admin as blog_admin
bpa_bases = list(blog_admin.BlogPostAdmin.__bases__)
bpa_bases_names = [b.__name__ for b in bpa_bases]
pa_idx = bpa_bases_names.index('OwnableAdmin')
bpa_bases.insert(pa_idx, PermissionAdmin)
blog_admin.BlogPostAdmin.__bases__ = tuple(bpa_bases)
# EOF
|
999,043 | fb9e055381143a667e36d0194e98451c1cf7d587 | import csv
import keras
import Controller
from keras.models import Sequential, Model
from keras.layers import Dense
from keras.layers import Input
from keras.layers import concatenate
from keras.optimizers import Adam
import numpy as np
from keras import optimizers
from keras.callbacks import ModelCheckpoint
from keras.models import model_from_json, load_model
#from keras.layers import Dense, Flatten, Input, merge, Lambda
#from keras.initializations import normal, identity
from pathlib import Path
PI = 3.1415926
def readData():
X = []
Y = []
trackPoslast=0.0
with open('fulldata2.csv') as File:
reader=csv.reader(File)
for row in reader:
rowFlatten = []
#steering control
rowFlatten.append(float(row[2]))
rowFlatten.append(float(row[3]))
lidar2 = row[6].strip().split(',')
lidar2[0] = lidar2[0][1:]
lidar2[len(lidar2)-1]=lidar2[len(lidar2)-1][:-1]
lidar2 = [float(i) for i in lidar2]
trackPos = Controller.trackPosCalc(lidar2)
rowFlatten.append(float(trackPos))
rowFlatten.append(float(trackPoslast))
trackPoslast = trackPos
#Speed control
lidar1 = row[5].strip().split(',')
lidar1[0] = lidar1[0][1:]
lidar1[len(lidar1)-1]=lidar1[len(lidar1)-1][:-1]
lidar1 = [float(i) for i in lidar1]
distance=min(lidar1)
rowFlatten.append(float(distance))
rowFlatten.append(float(row[0]))
rowFlatten.append(float(row[1]))
#Appending inputs
X.append(rowFlatten)
#Appending outputs
Y.append([float(row[17]), float(row[18]),float(row[19])])
#Converting to numpy array
X = np.array(X)
Y = np.array(Y)
index = np.arange(len(X))
index = np.random.permutation(index)#Shuffle the array
#selecting training dataset
X = X[index[:200000]]
Y = Y[index[:200000]]
print(X.shape)
return X,Y
def normalizeData(X, Y):
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
X = (X - X_mean)/X_std
Y_mean = np.mean(Y, axis=0)
Y_std = np.std(Y, axis=0)
#print(X_mean, X_std,':', Y_mean, Y_std)
np.savez('./normalizeParameters', X_mean=X_mean, X_std=X_std, Y_mean=Y_mean, Y_std=Y_std)
return X, Y
def createModel():
'''
model = Sequential()
model.add(Dense(40, input_dim=7, kernel_initializer='glorot_normal', bias_initializer='zeros', activation='relu'))
model.add(Dense(40, kernel_initializer='glorot_normal', bias_initializer='zeros', activation='relu'))
model.add(Dense(20, kernel_initializer='glorot_normal', bias_initializer='zeros', activation='relu'))
#model.add(Dense(20, kernel_initializer='glorot_normal', bias_initializer='zeros', activation='relu'))
model.add(Dense(3, kernel_initializer='glorot_normal', bias_initializer='zeros', activation='tanh'))
'''
inp=Input(shape=(7,))
layer1=Dense(20,activation='relu', kernel_initializer='glorot_normal')(inp)
layer2=Dense(40,activation='relu', kernel_initializer='glorot_normal')(layer1)
layer3=Dense(40,activation='relu', kernel_initializer='glorot_normal')(layer2)
layer4=Dense(20,activation='relu', kernel_initializer='glorot_normal')(layer3)
layer4a=Dense(1,activation='tanh', kernel_initializer='glorot_normal')(layer4)
layer4b=Dense(1,activation='sigmoid', kernel_initializer='glorot_normal')(layer4)
layer4c=Dense(1,activation='sigmoid', kernel_initializer='glorot_normal')(layer4)
out=concatenate([layer4a, layer4b, layer4c])
model=Model(inputs=inp, outputs=out)
return model
def trainModel(model, X, Y):
#RMSprop=keras.optimizers.RMSprop(lr=0.01, rho=0.9, epsilon=None, decay=0.0)
#sgd = keras.optimizers.SGD(lr=0.01, momentum = 0.9)
adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss='mse', optimizer=adam)
# checkpoint
filePath = "weights.best.hdf5"
checkpoint = ModelCheckpoint(filePath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
model.fit(X, Y, epochs=50, batch_size=32, callbacks=callbacks_list, verbose=1)
def saveModel(model):
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("model.h5")
print("Saved model to disk")
if __name__ == "__main__":
X, Y = readData()
X, Y = normalizeData(X, Y)
if (Path("model.json").is_file() and Path("weights.best.hdf5").is_file()):
with open('model.json', 'r') as jfile:
model = model_from_json(jfile.read())
model.load_weights("weights.best.hdf5")
print("load from the existing model...")
else:
model = createModel()
print("create a new model")
trainModel(model, X, Y)
saveModel(model)
|
999,044 | 257de79de6e45c9f9ce9f1f634ed152ac5d3f091 | import numpy as np
from mytree import DecisionTreeClassifier as mytreeclf
class ADT():
def __init__(self, criterion, max_depth, random_state=None):
self.criterion = criterion
self.max_depth = max_depth
self.random_state = random_state
def fit(self, X, y):
self.clf = mytreeclf(max_depth=self.max_depth,
criterion=self.criterion,
random_state=self.random_state)
self.clf.fit(X, y)
def predict(self, X):
preds = np.ones(X.shape[0])*1e10
features = self.clf.tree_.feature
threshold = self.clf.tree_.threshold
children_left = self.clf.tree_.children_left
children_right = self.clf.tree_.children_right
value = []
for v in self.clf.tree_.value:
if v[0][0] < v[0][1]:
value.append(1)
elif v[0][0] > v[0][1]:
value.append(-1)
else:
value.append(0)
for i in range(X.shape[0]):
node = 0
while children_left[node] > 0:
dist = abs(X[i, features[node]]-threshold[node])
if dist < preds[i]:
preds[i] = dist
if X[i, features[node]] <= threshold[node]:
node = children_left[node]
else:
node = children_right[node]
preds[i] *= value[node]
preds -= np.min(preds)
maxi = np.max(preds)
if maxi != 0:
preds /= maxi
return preds
|
999,045 | 4cfc0123137e37198a150b4a536e1d7d2963f94e | from models import conv2d_model, dense_model
import numpy as np
from geometries import square_geometry
from hamiltonians import AFH
model = dense_model
epochs = 100000
epoch_size = 1000
n_minibatches = 5
num_nm_rhs = 1000 # required for <H> estimation
num_n_samples = 100000 # required for -\log <psi>^2 estimation
random_seed = 42
input_shape = (3, 3)
hamiltonian = AFH
geometry = square_geometry
len_thermalization = 1000
lr = 3e-2
n_parallel = 1
n_drop = 5
n_epoch_passes = 1
patience = 30
|
999,046 | 6497eb5e5b26c12a8110b2f9f89ea39180af30c9 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
描述:三数之和 (难度:中等)
给定一个包含 n 个整数的数组 nums,判断 nums 中是否存在三个元素 a,b,c ,使得 a + b + c = 0 ?找出所有满足条件且不重复的三元组。
注意:答案中不可以包含重复的三元组。
示例:
给定数组 nums = [-1, 0, 1, 2, -1, -4],
满足要求的三元组集合为:
[
[-1, 0, 1],
[-1, -1, 2]
]
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/3sum
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
if len(nums) < 3:
return []
nums.sort()
length = len(nums)
result = []
for i in range(length - 2):
if nums[i] > 0:
break
if i > 0 and nums[i] == nums[i - 1]:
continue
left = i + 1
right = length - 1
while left < right:
s = nums[i] + nums[left] + nums[right]
if s < 0:
left += 1
while left < right and nums[left] == nums[left - 1]:
left += 1
elif s > 0:
right -= 1
while left < right and nums[right] == nums[right + 1]:
right -= 1
else:
result.append([nums[i], nums[left], nums[right]])
left += 1
right -= 1
while left < right and nums[left] == nums[left - 1]:
left += 1
while left < right and nums[right] == nums[right + 1]:
right -= 1
return result |
999,047 | ea72eb9fdd09bfac59bc91c99ce3628de34d3a80 | from django.conf.urls import url, include
from rest_framework import routers
from todos import views
from rest_auth.registration.views import VerifyEmailView
app_name = 'todos'
router = routers.SimpleRouter()
router.register(r'todolists', views.ToDoListViewSet)
router.register(r'task', views.TaskViewSet)
router.register(r'update_state', views.UpdateDoneViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^users/', include('users.urls')),
url(r'^rest-auth/', include('rest_auth.urls')),
url(r'^rest-auth/registration/', include('rest_auth.registration.urls'))
]
|
999,048 | e1934e46dadd3b9734ad8477681bc05cc748b412 | import re
from pandas import read_csv, DataFrame, concat, Series
from functools import singledispatch, reduce
from tqdm import tqdm
from transformers import PreTrainedTokenizerBase
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
import torch
#======================================================================
# Dataset preprocessing functions
#======================================================================
def generate_dataset(path : str, configs : list) -> Dataset:
"""
Given a dictionary of instructions perform dynamic dispatch to pre-process
.csv as a dataframe based on given strategy args, encode DataFrame and
return tf.data.Dataset object.
"""
return reduce(lambda x, y: _generate_dataset_helper(x, y), configs, path)
@singledispatch
def _generate_dataset_helper(path : str, args : list) -> DataFrame:
return nlp_tc_df_parser(path, *args)
@_generate_dataset_helper.register
def _tokenize(df : DataFrame, kwargs : dict) -> dict:
tokenizer = kwargs.pop('tokenizer')
encodings = dict(tokenizer(list(df['posts'].values), **kwargs))
encodings['labels'] = list(df['type'].values)
return encodings
class EncodedDataset(Dataset):
def __init__(self, encodings):
self._labels = encodings.pop('labels')
self.encodings = encodings
def __getitem__(self, idx):
x = {k: torch.tensor(v[idx]) for k, v in self.encodings.items()}
x['labels'] = torch.tensor(self._labels[idx])
return x
def __len__(self):
return len(self._labels)
@_generate_dataset_helper.register
def _gen_tf_dataset(encodings : dict, kwargs : set) -> Dataset:
return EncodedDataset(encodings)
#======================================================================
# Functions to preprocess a pandas.DataFrame from a csv
#======================================================================
def nlp_tc_df_parser(path : str, *args) -> DataFrame:
"""
Given a path to an nlp text classification dataset (.csv),
instantiate an instance of a DataFrame object and if needed perform
cleaning procedures on it according to given kwargs.
Parameters
----------
path : str
path to dataset
*args : list
additional user args
Returns
-------
df : DataFrame
parsed DataFrame of in given path
"""
data_frame = reduce(lambda x, y: _parser(y, x), args, read_csv(path)) if len(args) > 0 else read_csv(path)
return data_frame
#======================================
# Helper Functions for nlp_tc_df_parser:
#======================================
#TODO str labels to num
# Remove hyperlinks that end with .com
@singledispatch
def _parser(strategy, df) -> DataFrame:
str_labels = pd.unique(df.type.values.tolist())
labels_dict = dict(zip(str_labels, list(range(len(str_labels)))))
df['type'] = df['type'].apply(lambda x: labels_dict[x])
return df
@_parser.register
def _hyper_link_cleaner(strategy : set, df) -> DataFrame:
df.posts = df.posts.str.replace(r'(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?', " ")
return df
@_parser.register
def _remove_below_word_limit(strategy : int, df) -> DataFrame:
df["total_words"] = df.posts.str.split(" ").map(len)
df = df[df["total_words"]>strategy]
df = df.drop(columns=["total_words"])
return df
# Splits input rows based on a given delimiter
@_parser.register
def _explode(strategy : str, df) -> DataFrame:
generic_col_names = ["labels", "x"]
df_col_names = df.columns.values.tolist()
df = df.rename(columns={df_col_names[i]: generic_col_names[i] for i in range(2)})
df = DataFrame(concat([Series(row['labels'], row['x'].split(strategy)) for _, row in tqdm(df.iterrows())])).reset_index()#_splitter(row['x'], strategy, 128)
df_col_names.reverse()
df = df.rename(columns={k: df_col_names[i] for i,k in enumerate(df.columns.values.tolist())})
df.to_csv("check.csv")
return df
@_parser.register
def _add_separate_cols(strategy: bool, df) -> DataFrame:
df['type'] = df['type'].str.split('')
df['type'] = df['type'].apply(lambda x: list(map(lambda attr_type: 0 if attr_type in "ESFP" else 1, x[1:-1])))
return df
def _splitter(string : str, delimiter : str, num_words : int) -> list:
string.replace(f"{delimiter}", ' ')
strings = string.split(' ')
return [' '.join(strings[j-num_words-1:j]) for j in range(num_words-1, len(strings), num_words)]
"""
Retaining domain name: doesn't transform multiple links in a single post
@parser.register
def domain_retain(strategy : list, df) -> DataFrame:
def transform_url(post):
url = re.search(r'\bhttp.*[a-zA-Z0-9]\s',post)
if url:
regex = re.findall(r'^.*\.(.*)\.', post)
post = post.replace(url.group(0),regex[0]+" ")
return post
df['posts'] = df['posts'].apply(lambda x: transform_url(x))
return df
"""
|
999,049 | e1004fbcd7451ecbdc9dbce65a1afad7478a7dff | # 提取http://lab.scrapyd.cn中的五条名言
# 相同作者的名言保存在一个文件中,采用追加的方式写入
import scrapy
# 定义一个spider类,继承Spider父类
class ListSpider(scrapy.Spider):
# 定义蜘蛛名
name = 'ListSpider'
start_urls = ['http://lab.scrapyd.cn']
def parse(self, response):
# 提取页面中的所有的名言
mingyanPage1 = response.css('div.quote')
for mingyan in mingyanPage1:
# 提取css中text标签对应的文字内容,名言的正文
text = mingyan.css('.text::text').extract_first()
# 提取作者
author = mingyan.css('.author::text').extract_first()
# 提取标签
tags = mingyan.css('.tags .tag::text').extract()
# 数组转换为字符串
tags = ', '.join(tags)
# 将爬去的内容存入文件,文件名为:编号. 作者-语录.txt
filename = '%s-语录.txt' %(author)
# 以追加的方式写入文件,文件名相同(即作者相同),会写在同一个文件
with open(filename, "a+") as f:
f.write(text)
f.write('\n')
f.write('标签: ' + tags)
f.write('\n---------------\n')
f.close()
|
999,050 | 28d6e9f9b9783907daae7afefd8b6b531f2a0e07 | # test github copilot
# พิมพ์ตรงนี้
# function to encrypt text with a random key
# โผล่ตรงนั้น ------------------------------------------>
def encrypt(text, key):
encrypted_text = ""
# loop through all characters in plain text
for i in range(len(text)):
# get ASCII value of character
char = ord(text[i])
# add key to the ASCII value
char += key
# convert ASCII value back to character and add to encrypted text
encrypted_text += chr(char)
return encrypted_text
def encrypt2(text, key):
# create a list of the alphabet
alphabet = []
for letter in range(97, 123):
alphabet.append(chr(letter))
# create a list to store the encrypted text
encrypted_text = []
# loop through the text
for letter in text:
# find the index of the letter in the alphabet
index = alphabet.index(letter)
# add the key to the index
index += key
# if the index is greater than the length of the alphabet
if index > len(alphabet) - 1:
# subtract the length of the alphabet from the index
index -= len(alphabet)
# add the letter at the index to the encrypted text
encrypted_text.append(alphabet[index])
# join the encrypted text and return it
return "".join(encrypted_text) |
999,051 | dfbf885083467e19c1ca4b5cbd9830273a8f0787 | import requests
import config
import json
import time
import tele_config
from boltiot import Bolt,Sms
thresh_per=70
mybolt=Bolt(config.API_KEY,config.DEVICE_ID)
sms=Sms(config.SID,config.AUTH_TOKEN,config.To_num,config.From_num)
def sensor_value(pin):
try:
response=mybolt.analogRead(pin)
data=json.loads(response)
print(data)
mybolt.digitalWrite(0,'LOW')
if data['success']!=1:
print("Request failed!")
print("Here is the response",data)
return -999
val=int(data['value'])
return val
except Exception as e:
print("There is some error")
print(e)
return -999
def snd_tele_msg(msg):
url="https://api.telegram.org/" + tele_config.tele_bot_id + "/sendMessage"
data={"chat_id":tele_config.tele_chat_id,"text":msg}
try:
response=requests.request("POST",url,params=data)
print("This is the telegram response")
print(response.text)
tele_data=json.loads(response.text)
return tele_data["ok"]
except Exception as e:
print("Error occured while sending msg via telegram!")
print(e)
while True:
print("Getting Sensor Value...")
res = sensor_value("A0")
poll_per = (res/1024)*100
print("The Pollution percentage in the surrounding area is: "+str(poll_per)+ "%")
if res == -999:
print("Oops!Error occured while getting the sensor value")
elif res>=thresh_per:
mybolt.digitalWrite(0,'HIGH')
response=sms.send_sms("ALERT!The Current gas sensor Value is:"+str(res)+". The Pollution Percentage is:"+str(poll_per)+". Has Exceeded the threshold level!")
print("Response received from Twilio -->",str(response))
print("Check the status of the SMS -->",str(response.status))
message="ALERT!The Current gas sensor Value is:"+str(res)+". The Pollution Percentage is:"+str(poll_per)+". Has Exceeded the threshold level!"
tele_status=snd_tele_msg(message)
print("The Status of Telegram message:",tele_status)
time.sleep(10)
|
999,052 | 068c477c3b2bf1f464d6a4951588bbae7d1e6057 | from constants import teams, var_view_map
from nba_py.team import TeamGameLogs
import numpy as np
from datetime import datetime
from sqlalchemy import *
all_stats = list(var_view_map.values())
def _color(value):
if value['WL'] == 'W':
return 'orange'
else:
return 'grey'
def _alpha(value):
if value['WL'] == 'W':
return .9
else:
return .25
def insert_league_average(engine, teams_dict, stat):
meta = MetaData()
with engine.connect() as conn:
table = Table('description', meta, autoload=True,
autoload_with=conn)
gp = np.zeros((30,))
mean = np.zeros((30,))
std = np.zeros((30,))
_min = np.zeros((30,))
_25th = np.zeros((30,))
median = np.zeros((30,))
_75th = np.zeros((30,))
_max = np.zeros((30,))
for i, key in enumerate(teams):
print(key)
value = teams[key]
df = TeamGameLogs(value).info()
descr = df[stat].describe()
gp[i] = descr[0]
mean[i] = descr[1]
std[i] = descr[2]
_min[i] = descr[3]
_25th[i] = descr[4]
median[i] = descr[5]
_75th[i] = descr[6]
_max[i] = descr[7]
stat_desc = dict(statistic=stat, gp=round(np.mean(gp), 2),
mean=round(np.mean(mean), 2),
std=round(np.mean(std), 2),
min=round(np.mean(_min), 2),
u25th=round(np.mean(_25th), 2),
median=round(np.mean(median), 2),
u75th=round(np.mean(_75th), 2),
umax=round(np.mean(_max), 2),
date_added=datetime.now())
ins = table.insert()
conn.execute(ins, stat_desc)
def get_latest(engine, stat):
meta = MetaData()
with engine.connect() as conn:
table = Table('description', meta, autoload=True,
autoload_with=conn)
sel = select([table.c.gp, table.c.mean, table.c.std, table.c.min,
table.c.u25th, table.c.median, table.c.u75th,
table.c.umax]).\
where(table.c.statistic == stat).\
order_by(table.c.date_added.desc()).\
limit(1) # noqa
return conn.execute(sel).fetchone()
if __name__ == '__main__':
engine = create_engine('sqlite:///nba_viz.db')
for i in all_stats:
tp = insert_league_average(engine, teams, i)
# d = get_latest(engine, 'AST')
|
999,053 | fffd4c683d484654ee0288a991235534521b3ac6 | import os
from .base import *
WAGTAILADMIN_BASE_URL = "http://testserver"
SECRET_KEY = "TEST_KEY"
WAGTAILSEARCH_BACKENDS = {
'default': {
'BACKEND': 'search.backend',
'URLS': [os.getenv('ELASTIC_SEARCH_URL')],
'INDEX': 'test',
'TIMEOUT': 1500,
'INDEX_SETTINGS': {
'settings': {
'index': {
'number_of_shards': 1,
},
},
}
}
}
TEST_ELASTICSEARCH = True
|
999,054 | 8a2574db275f443b35bed8a29dcdc62c50a74a24 | from time import sleep
print('\n')
print('\033[33m—'*32)
print('\033[36m PERGUNTE AO PEIXE v0.2Alpha')
print('\033[33m—'*32, '\033[m\n')
x = str(input('Digite uma palavra: ')).strip().upper()
y = str(input('Digite outra palavra: ')).strip().upper()
print('\n\033[32mPensando...\033[m\n')
sleep(2)
points = 0 #Positivo = X / Negativo = Y
if len(x) != len(y): #Peixe gosta de palavras grandes
if len(x) > len(y):
points = points + 1
else:
points = points - 1
if ('PEIXE' in x) == True: #Peixe gosta de peixes
points = points + 2
if ('PEIXE' in y) == True:
points = points - 2
if x.count(' ') < y.count(' '): #Peixe não gosta de espaços
points = points + 1
elif x.count(' ') > y.count(' '):
points = points - 1
if ('POLVO' in x) == True and ('POLVO' in y) == False: #Peixe ODEIA polvos
points = points - 1000
if ('POLVO' in y) == True and ('POLVO' in x) == False:
points = points + 1000
if ('POLVO' in x) == True and ('POLVO' in y) == True:
points = 5000
print('\033[33m—'*32)
if points != 5000:
if points != 0:
if points > 0:
print(f'O peixe escolheu:\033[36m{x.capitalize()}')
else:
print(f'O peixe escolheu:\033[36m{y.capitalize()}')
else:
print('\033[34mO peixe não sabe :(')
else:
print('\033[31mO PEIXE DETESTA POLVOS!!!')
print('\033[33m—'*32, '\033[m\n')
print(points) |
999,055 | 7100b81f5dd349f12e987dcfadb2220c875e1e50 | import statistics
from fastapi import FastAPI
import requests
from bs4 import BeautifulSoup
from typing import List
from lib_viagens import entrada_dados_viagem,dados_viagem,otimizar,buscarmenor,dic_datas,i_produtos,peso_tempo,lista_cidades,peso_atraso,TravellingSalesmanProblem
from collections import defaultdict
import unidecode
from simanneal import Annealer
import abc
import copy
import datetime
import math
import pickle
import random
import signal
import sys
import time
app= FastAPI()
@app.post("/get_normalization/")
async def list_normalization(list:list):
normal = []
mean = statistics.mean(list)
std = statistics.stdev(list)
for x in list:
element = (x - mean) / std
normal.append(element)
return normal
@app.post("/viagem/")
async def time_travel(atual:str,destino:str,p:int):
return dados_viagem(atual,destino,p)
@app.post("/otimizar_simples/")
async def travel(lista_t:list):
return otimizar(lista_t)
@app.post("/otimizar_completo/")
async def travel_otimizada (lista_t:list):
lista = lista_cidades(lista_t)
init_state = lista
random.shuffle(init_state)
distance_matrix = defaultdict(dict)
importancia_produtos = i_produtos(lista_t)
for va in init_state:
for vb in init_state:
distancia = dados_viagem(va, vb, 2)
peso_preco = importancia_produtos[vb]
distance_matrix[va][vb] = distancia *(1- peso_preco) * peso_atraso(va, vb, lista_t)
tsp = TravellingSalesmanProblem(init_state, distance_matrix)
tsp.set_schedule(tsp.auto(minutes=0.2))
# since our state is just a list, slice is the fastest way to copy
tsp.copy_strategy = "slice"
state, e = tsp.anneal()
while state[0] != 'sao paulo sp':
state = state[1:] + state[:1]
return(state)
|
999,056 | 3b1ae977cb2610f08f46bf404f70391d700954de | import pandas as pd
import numpy as np
def create_submission(pred_sub, name_of_the_file='submission'):
"""
Writes the submission in a csv file
INPUT:
pred_sub - The list of predictions
name_of_the_file - (optional): the path of the file
"""
df_sub = pd.DataFrame(pred_sub, columns=['Prediction'])
df_sub.index.name = 'Id'
df_sub.index = np.arange(1, 10001)
df_sub[df_sub['Prediction'] == 0] = -1
df_sub.to_csv(name_of_the_file + '.csv',index_label='Id')
print('submission file created as "'+ name_of_the_file+'.csv"')
|
999,057 | 4512c7ec4ac5b7a919345caffa1768697d3ff92c | import socket
import threading
import time
import random
from load_common import *
import sys
node_set_localhost = ["127.0.0.1"]
node_set_8 = ["192.168.0.200",
"192.168.0.201",
"192.168.0.202",
"192.168.0.203",
"192.168.0.204",
"192.168.0.205",
"192.168.0.206",
"192.168.0.207"]
node_set_7 = ["192.168.0.200",
"192.168.0.201",
"192.168.0.202",
"192.168.0.203",
"192.168.0.204",
"192.168.0.205",
"192.168.0.206"]
sending_thread_list = []
# --- CONFIG --- #
messages_to_send = 8
sleep_time_between_sends = 0.00005 # MAC
#sleep_time_between_sends = 0.01 # PI
my_ip = "127.0.0.1" # ip that the receiver thread will bind to
recv_port = 32005
# 32003 for direct worker, 32002 for load_dir
# ONLY USE THESE TWO VALUES, VERY IMPORTANT
target_port = 32003
nodes = node_set_localhost
dictionary_path = "target/dictionary.txt"
# --- END OF CONFIG --- #
print "usage: {} [queries_file]".format(sys.argv[0])
query_object = None
query_infile = None
if len(sys.argv) > 1:
query_infile = sys.argv[1]
if query_infile:
query_object = QueryPremade(queries_infile=query_infile)
else:
query_object = Query(dict_filepath=dictionary_path)
#setup socket
mysock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
mysock.bind((my_ip, recv_port))
mysock.settimeout(15)
print "Listening on:", (my_ip, recv_port)
receiving_thread = ReceiverThread(threadID=0, name="Receiver", my_ip=my_ip,
nmessages=messages_to_send*len(nodes), port=recv_port, sock=mysock)
receiving_thread.start()
for i in xrange(len(nodes)):
name = "Sender" + nodes[i]
sending_thread_list.append(ConnectionThread(threadID=i, name=name, my_ip=my_ip, target_ip=nodes[i],
port=recv_port, target_port=target_port, sleeptime=sleep_time_between_sends,
nmessages=messages_to_send, sock = mysock,
query_object=query_object))
for thread in sending_thread_list:
thread.start()
for thread in sending_thread_list:
if thread.isAlive():
thread.join()
receiving_thread.join()
print "Exiting Main Thread"
|
999,058 | f1c5970742510d393ef9a789f9f7d4f6252838f6 | #Metric-Oriented Sequence Typer (MOST) software is a modified version of SRST version 1 script (http://sourceforge.net/projects/srst/files/?source=navbar),
#modification made by Anthony.Underwood@phe.gov.uk and Rediat.Tewolde@phe.gov.uk.
import os
import os.path
import sys
import pickle
import re
from collections import Counter
import log_writer
from utility_functions import *
def get_scores(tmp_dir, profile_file_directory, stderr_log_output):
"""
Function
(1) Parse through the pileup file to capture quality metric values
The option for method:
tmp_dir[str]: the path to tmp directory
profile_file_directory[str]: the path to where reference.seq, profile.txt and
the locus variant sequences (*.fas) files located
Return
results[dict]: key = locus name and value = quality metric values
database[dict]: keys correspond to ST numbers and values are array of
locus variant numbers correspond to ST
LocusList[list]: list of locus names
"""
ranges = pickle.load(open(os.path.join(tmp_dir, "ranges.pkl")))
(database, locusList,list_of_all_allele_numbers_tuple) = try_and_except(stderr_log_output,
get_profiles,
profile_file_directory)
results = try_and_except(stderr_log_output,
score,
tmp_dir,
locusList,
ranges)
return results, database, locusList, list_of_all_allele_numbers_tuple
def get_profiles(profile_file_directory):
"""
Function
Create a dictionary from profile.txt file in which keys correspond to ST numbers and values
are array of locus variant numbers correspond to ST
The option of the method
profile_file_path[str]: the path to where reference.seq, profile.txt and
the locus variant sequences (*.fas) files location
Return
database[dict]: keys correspond to ST numbers and
values are array of locus variant numbers correspond to ST
LocusList[list]: list of locus names
"""
profile_file_path = profile_file_directory+ "/profiles.txt"
list_of_all_allele_numbers_tuple = []
database = None
locusList = []
for l in open(profile_file_path):
if database is None:
database = {}
locusList = l.split()[1:]
continue
t = l.split()
st = t[0]
v = ' '.join([s for s in t[1:]])
if v in database:
print >> sys.stderr, 'sequence type ' + str(st) + ' is a duplicate of ' + str(database[v])
database[v] = st
covert_string_to_tuple_list_of_allele_numbers = tuple(int(x) for x in re.findall("[0-9]+", v))
list_of_all_allele_numbers_tuple.append(covert_string_to_tuple_list_of_allele_numbers)
return (database, locusList, list_of_all_allele_numbers_tuple)
def score(tmp_dir, locusList, ranges):
"""
Function
(1) Parse the pileup file
(2) Calculate quality metric values
The option for method:
tmp_dir[str]: the path to tmp directory
LocusList[list]: list of allele name
ranges[str]: start and end position of locus variant sequence
Return
results[dict]: key = locus base name and value = quality metric values
"""
loc = ''
pos = 1
count_indel = 0
holes = 0
snps = 0
covMax=combined_covMax=covSum=covSum2= 0
covMin = combined_covMin =99999
percentage_coverages =[]
snpList = []
indelList = []
results = {}
pileup_file = os.path.join(tmp_dir, 'all.pileup')
for l in open(pileup_file):
t = l.split()
if loc == '':
loc = t[0]
pos = ranges[loc][0] + 1
if t[0] != loc:
results =GenerateResult(ranges,
holes, locusList,
loc,snps,count_indel,
snpList, indelList,
percentage_coverages,combined_covMin,
combined_covMax, covMin, covMax,covSum, results)
# reset locus vars
loc = t[0]
pos = ranges[loc][0] + 1
count_indel = 0
holes =snps=covMax=combined_covMax=covSum=covSum2= 0
covMin =combined_covMin= 99999
snpList = []
indelList = []
percentage_coverages =[]
here = int(t[1])
if here - 1 < ranges[loc][0]:
continue
elif here - 1 >= ranges[loc][1]:
continue
while pos < here:
holes += 1
pos += 1
v, indel, array_of_all_indels,most_common_indel = pile(t[2], t[4])
x = v.items()
x.sort(lambda a,b: compGreater(t[2], a, b))
if x[0][0] != t[2].lower():
snps += 1
snpList.append((pos,t[2],v));
c = x[0][1]
cov= int(most_common_indel)/float(t[3])
if cov > 0.5:
count_indel += 1
indel_type = Counter(array_of_all_indels)
indel_type = indel_type.items()
indelList.append((int(pos),t[2], indel_type))
covSum += c
covSum2 += c * c
if c > covMax:
covMax = c
if c < covMin:
covMin = c
combined_c = x[0][1] + x[1][1] + x[2][1] + x[3][1]
if combined_c > combined_covMax:
combined_covMax = c
if combined_c < combined_covMin:
combined_covMin = c
n = int(t[3])
js = []
for (_,j) in x[1:]:
js.append(j)
percentage_coverage = sum(js)/float(n)*100
percentage_coverages.append(round(float(percentage_coverage),2))
pos = here + 1
results =GenerateResult(ranges,
holes,
locusList,loc,
snps,count_indel,
snpList,indelList,
percentage_coverages,combined_covMin,
combined_covMax, covMin, covMax,
covSum, results)
return results
def GenerateResult(ranges, holes, locusList, loc, snps, count_indel, snpList, indelList, percentage_coverages, combined_covMin, combined_covMax, covMin, covMax, covSum, results):
"""
Function
If no gap(holes) in the pileup file report:
locus: the locus names(e.g: gki)
var: variant number (e.g: 2)
snps: number of mismatches between the readset and locus
count_indel: number of mismatches between the readset and locus
loc: locus variant (e.g: gki-2)
snpList
indelList
quality metric values
"""
nameSep ="-"
if len(percentage_coverages) > 0 and holes == 0:
locus_length = int(ranges[loc][1]) - int(ranges[loc][0])
m = re.search('([^'+nameSep+']+)'+nameSep+'?([0-9]+)', loc) # e.g loc= AROC-4, nameSep : "-"
locus = m.group(1)
if locus not in locusList:
print "Locus " + locus + " from sequence file not recognised in ST file."
var = m.group(2)
max_percentage_of_non_consensus_bases = max(percentage_coverages)
number_of_time_percentage_coverage_value_calculated =len(percentage_coverages)
percentage_coverage_number_of_time_percentage_coverage_value_calculated = int(number_of_time_percentage_coverage_value_calculated/float(locus_length)*100)
covStats = (max_percentage_of_non_consensus_bases, combined_covMin, combined_covMax, covMin, covMax, int(100 * covSum / number_of_time_percentage_coverage_value_calculated) / 100.0) #int(100 * covSum / n) / 100.0 is the average
res = (locus, var, snps, loc, snpList, covStats, indelList, count_indel,number_of_time_percentage_coverage_value_calculated,locus_length,percentage_coverage_number_of_time_percentage_coverage_value_calculated)
if locus not in results:
results[locus] = []
results[locus].append(res)
return results
def compGreater(r, a, b):
if a[1] == b[1]:
if a[0] == r:
return -1
elif b[0] == r:
return 1
return cmp(b[1], a[1])
def pile(r, s):
"""
Function
Parse through the pileup file and count the number of observed base
The option for method
r[str]: Reference base
s[str]: depth of coverage
Return
v[dict]: number of observed base at each locus variant position.
indel[str]
array_of_all_indels[list]
"""
r = r.lower()
v = {'a':0, 'c':0, 'g':0, 't':0}
s = s.lower()
most_common_indel = 0
array_of_all_indels = re.findall(r'[\+\-][0-9][0-9]*[A-Za-z]', s)
if len(array_of_all_indels) > 0:
most_common_indel = Counter(array_of_all_indels).most_common(1)[0][1]
indel = len(array_of_all_indels)
for list_indel in array_of_all_indels:
number_of_indelsBases = int(list_indel[1:-1])
position_of_indel = s.find(list_indel)
s = s[0:position_of_indel] + s[position_of_indel+number_of_indelsBases+len(list_indel)-1:]
skip = False
for c in s:
if skip == True:
skip = False
elif c == '.' or c == ',':
v[r] += 1
elif c == '^':
skip = True
elif c == '$':
pass
elif c == '+' or c == '-':
pass
elif c.lower() in v:
v[c.lower()] += 1
return v, indel, array_of_all_indels, most_common_indel
|
999,059 | ad817f60883057ad287af959a81299b92f6f89e3 | import os
import os.path
import torch
import sys
from torchvision import models
from collections import namedtuple
class saveData():
def __init__(self, args):
self.args = args
self.save_dir = os.path.join(args.save_dir, args.load)
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
self.save_dir_model = os.path.join(self.save_dir, 'model')
if not os.path.exists(self.save_dir_model):
os.makedirs(self.save_dir_model)
if os.path.exists(self.save_dir + '/log.txt'):
self.logFile = open(self.save_dir + '/log.txt', 'a')
else:
self.logFile = open(self.save_dir + '/log.txt', 'w')
def save_model(self, model, epoch):
torch.save(
model.state_dict(),
self.save_dir_model + '/model_lastest.pt')
torch.save(
model.state_dict(),
self.save_dir_model + '/model_' + str(epoch) + '.pt')
torch.save(
model,
self.save_dir_model + '/model_obj.pt')
torch.save(
epoch,
self.save_dir_model + '/last_epoch.pt')
def save_log(self, log):
sys.stdout.flush()
self.logFile.write(log + '\n')
self.logFile.flush()
def load_model(self, model):
model.load_state_dict(torch.load(self.save_dir_model + '/model_lastest.pt'))
last_epoch = torch.load(self.save_dir_model + '/last_epoch.pt')
print("load mode_status from {}/model_lastest.pt, epoch: {}".format(self.save_dir_model, last_epoch))
return model, last_epoch
class Vgg16(torch.nn.Module):
def __init__(self, requires_grad=False):
super(Vgg16, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3'])
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3)
return out
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def gradient(x):
gradient_h = torch.abs(x[:,:,:,:-1] - x[:,:,:,1:])
gradient_v = torch.abs(x[:,:,:-1,:] - x[:,:,1:,:])
return gradient_h, gradient_v
class ToTensor(object):
""" Conver ndarray to Tensors"""
def __call__(self, image_list):
# input image_list is: H x W x C
# torch image_list is: C x H x W
tensor_list = []
for image in image_list:
image = image.transpose((2, 0, 1))
tensor_list.append(image)
return tensor_list
def tensor_to_image(tensor):
if type(tensor) in [torch.autograd.Variable]:
img = tensor.data[0].cpu().detach().numpy()
else:
img = tensor[0].cpu().detach().numpy()
img = img.transpose((1,2,0))
try:
img = np.clip(img, 0, 255)
if img.shape[-1] == 1:
img = np.dstack((img, img, img))
except:
print("invalid value catch")
Image.fromarray(img).save('catch.jpg')
return img
def to_tensor(x, gpuid=None):
if type(x) in [list, tuple]:
image_num = len(x)
if image_num >0:
(h,w,c) = x[0].shape
else:
print("No image!")
t = torch.FloatTensor(image_num, c, h, w)
for i in range(image_num):
image = x[i].transpose((2, 0, 1))
t[i,:,:,:] = torch.from_numpy(image)
if gpuid:
t = t.cuda(gpuid)
return t
elif isinstance(x, np.ndarray):
if len(x.shape) == 3:
x = np.expand_dims(x, axis=0)
elif len(x.shape) == 2:
x = np.dstack((x,x,x))
x = np.expand_dims(x, axis=0)
bs, h, w, c = x.shape
t = torch.FloatTensor(bs,c,h,w)
x = x.transpose((0,3,1,2))
t = torch.from_numpy(x)
if gpuid:
t = t.cuda(gpuid)
return t
else:
print("data type not accepted!")
return None
def to_variable(x, gpuid=3):
v = None
if type(x) in [list, tuple, np.ndarray]:
x = to_tensor(x)
if type(x) in [torch.DoubleTensor, torch.FloatTensor]:
if gpuid:
x = x.cuda(gpuid)
v = torch.autograd.Variable(x)
else:
print("Unrecognized data type!")
return v
def generate_new_seq(filename): # new function
file_list = sorted(glob.glob(filename))
return file_list # [1:10000]
# def augment(input_list, scale_limit=300, crop_size=224):
# input_list = RandomHorizontalFlip(input_list)
# input_list = RandomColorWarp(input_list)
# # input_list = RandomScale(rain, streak, clean, size_limit=scale_limit)
# input_list = RandomCrop(input_list, size=crop_size)
# return input_list
def compute_psnr(est, gt):
batch_size = est.size()[0]
sum_acc = 0
for i in range(batch_size):
est_image = est.cpu().data[i].detach().numpy()
gt_image = gt.cpu().data[i].detach().numpy()
est_image = est_image.transpose((1,2,0))
gt_image = gt_image.transpose((1,2,0))
sum_acc += psnr(est_image*255, gt_image*255)
avg_acc = sum_acc / batch_size
return avg_acc
def rgb2ycbcr(im):
xform = np.array([[.299, .587, .114], [-.1687, -.3313, .5], [.5, -.4187, -.0813]])
ycbcr = im.dot(xform.T)
ycbcr[:,:,[1,2]] += 128
return np.uint8(ycbcr)
def ycbcr2rgb(im):
xform = np.array([[1, 0, 1.402], [1, -0.34414, -.71414], [1, 1.772, 0]])
rgb = im.astype(np.float)
rgb[:,:,[1,2]] -= 128
return np.float64(rgb.dot(xform.T))
def psnr(es, gt):
if len(es.shape) ==3 and es.shape[2] == 3:
es_img = rgb2ycbcr(es)
gt_img = rgb2ycbcr(gt)
es_channel = es_img[:,:,0]
gt_channel = gt_img[:,:,0]
else:
es_channel = es
gt_channel = gt
imdiff = np.float64(es_channel) - np.float64(gt_channel)
rmse = np.sqrt(np.mean(np.square(imdiff.flatten())))
psnr_value = 20*np.log10(255/rmse)
return psnr_value
def load_checkpoint(self, best=False):
"""
Load the best copy of a model. This is useful for 2 cases:
- Resuming training with the most recent model checkpoint.
- Loading the best validation model to evaluate on the test data.
Params
------
- best: if set to True, loads the best model. Use this if you want
to evaluate your model on the test data. Else, set to False in
which case the most recent version of the checkpoint is used.
"""
print("[*] Loading model from {}".format(self.ckpt_dir))
filename = self.model_name + '_ckpt.pth.tar'
if best:
filename = self.model_name + '_model_best.pth.tar'
ckpt_path = os.path.join(self.ckpt_dir, filename)
ckpt = torch.load(ckpt_path)
# load variables from checkpoint
self.start_epoch = ckpt['epoch']
self.best_valid_acc = ckpt['best_valid_acc']
self.lr = ckpt['lr']
self.model.load_state_dict(ckpt['state_dict'])
if best:
print(
"[*] Loaded {} checkpoint @ epoch {} "
"with best valid acc of {:.3f}".format(
filename, ckpt['epoch']+1, ckpt['best_valid_acc'])
)
else:
print(
"[*] Loaded {} checkpoint @ epoch {}".format(
filename, ckpt['epoch']+1)
)
|
999,060 | d4ecd8805a01403cf5678d43294fa22aa3fa6692 |
from __future__ import with_statement
__all__ = ["register_cookbook_path", "load_cookbook"]
import imp
import os
import sys
import yaml
import kokki
from kokki.environment import env as global_env
class CookbookTemplate(object):
def __init__(self, name, path):
self.name = name
self.path = path
def get_metadata(self):
if not hasattr(self, '_metadata'):
path = os.path.join(self.path, "metadata.yaml")
with open(path, "rb") as fp:
self._metadata = yaml.load(fp.read())
return self._metadata
def get_default_attributes(self):
meta = self.get_metadata()
if meta.get('attributes'):
return dict((k, v['default']) for k, v in meta['attributes'].items())
else:
meta['attributes'] = {}
return {}
def get_recipe(self, name):
path = os.path.join(self.path, "recipes", name + ".py")
if not os.path.exists(path):
return None
with open(path, "rb") as fp:
recipe = fp.read()
return recipe
def setup(self):
pass
COOKBOOKS_NAMESPACE = "kokki.cookbooks"
class CookbookImporter(object):
def __init__(self):
mod = self.cookbooks_module = imp.new_module("cookbooks")
mod.__path__ = list(cookbook_paths)
mod.__file__ = "<%s>" % self.__class__.__name__
sys.modules[COOKBOOKS_NAMESPACE] = mod
kokki.cookbooks = mod
def find_module(self, fullname, path=None):
if not fullname.startswith(COOKBOOKS_NAMESPACE):
return None
if self._find_module(fullname):
return self
def _find_module(self, fullname, path=None):
mod_path = fullname[len(COOKBOOKS_NAMESPACE)+1:]
current_name = "%s" % COOKBOOKS_NAMESPACE
paths = path or list(cookbook_paths)
for name in mod_path.split('.'):
current_name += "."+name
if current_name in sys.modules:
paths = sys.modules[current_name].__path__
continue
return imp.find_module(name, paths)
def _load_module(self, fullname, fp, pathname, description):
if description[2] == 5:
filename = os.path.join(pathname, "__init__.py")
ispkg = True
else:
filename = pathname
ispkg = False
mod = imp.new_module(fullname)
mod.__file__ = filename
if ispkg:
mod.__path__ = [pathname]
sys.modules[fullname] = mod
try:
if fp:
exec compile(fp.read(), filename, "exec") in mod.__dict__
else:
execfile(filename, mod.__dict__)
except:
del sys.modules[fullname]
raise
mod.__loader__ = self
mod.__package__ = COOKBOOKS_NAMESPACE
return mod
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
if fullname == COOKBOOKS_NAMESPACE:
mod = self.cookbooks_module
mod.__path__ = list(cookbook_paths)
mod.__file__ = "<%s>" % self.__class__.__name__
sys.modules[fullname] = mod
else:
fp, pathname, description = self._find_module(fullname)
mod = self._load_module(fullname, fp, pathname, description)
name = fullname[len(COOKBOOKS_NAMESPACE)+1:]
if "." not in name:
setattr(self.cookbooks_module, name, mod)
return mod
def load_cookbook(name, path=None, env=None):
import imp
import sys
env = env or global_env
try:
return env.cookbooks[name]
except KeyError:
paths = [path] if path else sys.path
for path in paths:
cb_path = os.path.join(path, name)
if os.path.exists(os.path.join(cb_path, 'metadata.yaml')):
parent_mod = __import__(COOKBOOKS_NAMESPACE, {}, {}, [name])
mod = getattr(parent_mod, name)
template = CookbookTemplate(name, cb_path)
for k in dir(template):
if not hasattr(mod, k):
setattr(mod, k, getattr(template, k))
env.cookbooks[name] = mod
env.set_attributes(mod.get_default_attributes())
globals()[name] = mod
return mod
cookbook_paths = set()
importer = CookbookImporter()
def register_cookbook_path(path):
cookbook_paths.add(path)
# sys.path.append(path)
# @sys.path_hooks.append
# def cookbook_path_hook(path):
# if path in cookbook_paths:
# return importer
# raise ImportError()
sys.meta_path.append(importer)
|
999,061 | f71738cf19d4ca29a62000db45b8151ec5e6f860 | from flask import Flask, request
from datetime import datetime, date
from app.models import db, Candidate
from flask_marshmallow import Marshmallow
ma = Marshmallow()
from app.api import api
from app.api.namespaces.candidates.schemas import CandidateEmploymentHistorySchema
def create_app():
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///hire.db'
api.init_app(app)
db.init_app(app)
with app.app_context():
db.create_all()
cand = Candidate(firstname='Harry', middlename='Ian', lastname='Laurenceau',
city='Chicago', state='MD')
db.session.add(cand)
db.session.commit()
ma.init_app(app)
@app.route('/health')
def healthcheck():
return {'status' : 'UP'}
@app.route('/history/<int:id>', methods=['POST'])
def add_history(id):
ceh = CandidateEmploymentHistorySchema()
employment_history = request.json
#employment_history['startdate'] = datetime.strptime(employment_history['startdate'], '%d-%m-%Y')
#employment_history['enddate'] = datetime.strptime(employment_history['enddate'], '%d-%m-%Y')
c = ceh.load(employment_history)
print(c, type(c.startdate))
return 'success'
return app |
999,062 | 08228b28c265efd1d2a6aa2755559f50619f3e27 | #!/usr/bin/env python
from __future__ import print_function
from github import Github
from os.path import expanduser
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-r", "--repository", dest="repo", help="Github Repositoy name e.g cms-sw/cms-bot",type=str)
parser.add_argument("-b", "--base_branch", dest="base_branch",help="Repository branch againt which new Pull request should be created",type=str)
parser.add_argument("-f", "--feature_branch", dest="feature_branch",help="New feature branch to be merged",type=str)
parser.add_argument("-t", "--title", dest="title", help="Pull request title",type=str)
parser.add_argument("-d", "--body", dest="body", help="Pull request body text, optional",type=str, default='')
parser.add_argument("-c", "--comment", dest="comment", help="Extra comment after creating Pull requests e.g. please tests",type=str, default='')
args = parser.parse_args()
if not args.repo: parser.error("Missing Repo")
if not args.base_branch: parser.error("Missing base branch name.")
if not args.feature_branch: parser.error("Missing feature branch name.")
if not args.title: parser.error("Missing PR title")
print("Authenticating to Github and connecting to repo")
gh = Github(login_or_token = open(expanduser("~/.github-token")).read().strip())
print("Authentication succeeeded")
gh_repo = gh.get_repo(args.repo)
print("Creating pull request")
pr = gh_repo.create_pull(title = args.title, body = args.body.replace('@N@','\n'), base = args.base_branch, head = args.feature_branch)
if args.comment:
pr.create_issue_comment(body=args.comment)
|
999,063 | 84f22687df107437e7e78f6055f9a113b47a4a99 | max_realizations = 1000000000
|
999,064 | 6e0a7d3f509b04f46626b8350e8407dca3fec9c8 | """
Utilty functions that interact with ArcGIS Feature Servers
NOTE: Intents that query FeatureServers may fail because AWS will
kill any computation that takes longer than 3 secs.
"""
from arcgis.gis import *
from arcgis.features import FeatureLayer
from arcgis.geocoding import geocode
import logging
logger = logging.getLogger(__name__)
dev_gis = GIS() # this is needed to use geocoding
def get_features_from_feature_server(url, query):
"""
Given a url to a City of Boston Feature Server, return a list
of Features (for example, parking lots that are not full)
:param url: url for Feature Server
:param query: query to select features (example: "Spaces > 0")
:return: list of all features returned from the query
"""
logger.debug('url received: ' + url + ', query received: ' + query)
features = []
f = FeatureLayer(url = url)
feature_set = f.query(where = query)
for feature in feature_set:
features.append(feature.as_dict)
return features
def _get_dest_addresses_from_features(feature_address_index, features):
"""
Generate and return a list of destination addresses (as strings)
given a list of features
:param feature_address_index: to retrieve address string in feature
:param features: list of features retrieved from FeatureServer
:return: list of destination addresses
"""
logger.debug(
'feature_address_index received; ' + str(feature_address_index) +
', features received (printing first five): ' + str(features[:5]) +
', count(features): ' + str(len(features))
)
dest_addresses = []
# build array of each feature location
for feature in features:
if feature[feature_address_index]:
dest_address = feature[feature_address_index].rstrip() # to strip \r\n
dest_address += " Boston, MA"
dest_addresses.append(dest_address)
return dest_addresses
def geocode_address(m_address):
"""
:param m_address: address of interest in street form
:return: address in coordinate (X and Y) form
"""
m_address = m_address + ", City: Boston, State: MA"
m_location = geocode(address=m_address)[0]
adict = (m_location['location'])
return list(adict.values())
|
999,065 | d7efff7a1687f1744dadd6d3e0de33dc8cda0e60 | '''
QUICK FIND : Tells whether two nodes of a graph are connected
UF ADT:
UF(n): initialize n nodes (with list of integers 0 to n-1)
union(p,q): connects nodes p and q (makes nodes entries same)
find(p): component identifier for p(0 to n-1)
connected(p,q): returns True if p and q are in same component i.e. connected
'''
# p and q are connected if and only if id[p] is equal to id[q]
class QuickFind(object):
def __init__(self,n):
self._nodes = [i for i in range(n)]
def union(self,p,q):
pid = self._nodes[p]
qid = self._nodes[q]
for i,j in enumerate(self._nodes):
if self._nodes[i] == pid:
self._nodes[i] = qid
def connected(self,p,q):
return self._nodes[p] == self._nodes[q]
if __name__ == '__main__':
qf = QuickFind(9)
qf.union(2,3)
qf.union(6,8)
qf.union(0,5)
qf.union(5,8)
print qf.connected(0,8)
print qf.connected(2,6)
print qf._nodes
|
999,066 | cf5acd41b8d9bea4db3d296b08d3cc153dd8ee53 | import pymongo
import pandas as pd
import pprint
import random
myclient = pymongo.MongoClient("mongodb+srv://admin:admin@cluster1.ajaye.mongodb.net/")
# print(myclient.list_database_names())
mydb = myclient["public"]
mycol = mydb["completeride"]
# print(mydb.list_collection_names())
def isKAnonymized(df, k):
for index, row in df.iterrows():
query = ' & '.join([f'{col} == {row[col]}' for col in df.columns])
rows = df.query(query)
if (rows.shape[0] < k):
return False
return True
def generalize(df, depths):
return df.apply(lambda x: x.apply(lambda y: int(int(y/(10**depths[x.name]))*(10**depths[x.name]))))
def kAnonymity():
df = pd.DataFrame(list(mycol.find()))
df1 = df[['_id','total_distance']].copy()
df3 = df1.drop(['_id'],axis=1)
depths = { 'total_distance': 1 }
df4 = generalize(df3, depths)
df4['generalized_distance'] = df4['total_distance']
df4['total_distance'] = df1['total_distance']
df4['_id'] = df1['_id']
list_index=[]
length=len(df4)
for index in range (length):
list_index.append(index)
s1 = pd.Series(list_index, name='index')
df4 = pd.concat([s1,df4], axis=1)
df5=df4.groupby(['generalized_distance'])['total_distance'].apply(list).reset_index(name='values')
df7=df4.groupby(['generalized_distance'])['index'].apply(list).reset_index(name='index_values')
df5['index_values'] = df7['index_values']
df6=df4.groupby(['generalized_distance'])['_id'].apply(list).reset_index(name='id_values')
df5['id_values'] = df6['id_values']
shuffle = df5['values'].values
for i in range(0,len(shuffle)):
random.shuffle(shuffle[i])
check=df5["index_values"]
group_by_values=df5["generalized_distance"]
travel_distance=df5["values"]
ids_by_values=df5["id_values"]
list_index_values=[]
list_group_by_values=[]
list_group_ids = []
travel_distance_values=[]
### for index_values
for i in range(len(df5)):
for j in range(len(check[i])):
list_index_values.append(check[i][j])
### for group by values
for x in range(len(df5)):
for y in range(len(check[x])):
list_group_by_values.append(group_by_values[x])
### for group by id values
for x in range(len(df5)):
for y in range(len(check[x])):
list_group_ids.append(ids_by_values[x][y])
### for travel distance
for a in range(len(df5)):
for b in range(len(check[a])):
travel_distance_values.append(travel_distance[a][b])
shuffled_dataframe = pd.DataFrame({'id': list_group_ids,
'total_distance': travel_distance_values,
'generalized_distance': list_group_by_values,
'index':list_index_values
})
final = shuffled_dataframe.sort_values(by='index', ascending=True)
final = final.drop(['index'],axis=1)
final = final.drop(['generalized_distance'],axis=1)
print(final.head())
kAnonymity() |
999,067 | 2650a7be313a1a2a6e59dd53c35a77ae221407d6 | """PytSite Facebook Plugin Event Handlers.
"""
__author__ = 'Alexander Shepetko'
__email__ = 'a@shepetko.com'
__license__ = 'MIT'
from pytsite import metatag as _metatag, lang as _lang, router as _router
from plugins import auth as _auth
from . import _api, _error
def router_dispatch():
"""'pytsite.router.dispatch' event handler.
"""
try:
_metatag.t_set('fb:app_id', _api.get_app_id())
except (_error.AppIdNotSet, _error.AppSecretNotSet):
if _auth.get_current_user().is_dev:
_router.session().add_warning_message(_lang.t('facebook@plugin_setup_required_warning'))
|
999,068 | 054ad813d68f387364d325486636a649e57f5dca | import numpy as np
import cPickle as pk
import gzip as gz
from IPython import embed
import glob
from pylab import *
from astropy.visualization import hist
from scipy import stats
from cosmojo.universe import Cosmo
def GetDls(spectra_folder):
patches_files = sorted(glob.glob('%s/*.pkl.gz' %(spectra_folder)))
dls = []
for f in patches_files:
dls_tmp = np.asarray(pk.load(gz.open(f,'rb'))['dltt'])
if len(dls_tmp) == 0:
pass
else:
dls.append( dls_tmp )
dls = np.vstack([dls[i] for i in xrange(len(dls))])
return dls
def GetTau(tt, tt_ref, lmin=650, lmax=2000, method='RK'):
assert (tt.size == tt_ref.size)
delta_ell = lmin - lmax
if method == 'RK' :
return np.sum(-0.5 * np.log(tt[lmin:lmax+1]/tt_ref[lmin:lmax+1])) / delta_ell
elif method == 'CR':
return -0.5 * np.log(np.sum(tt[lmin:lmax+1])/np.sum(tt_ref[lmin:lmax+1]))# / delta_ell
def GetManyTaus(dls, dl_ref, lmin=650, lmax=2000, method='RK'):
assert (dls.shape[1] == dl_ref.shape[0])
taus = np.zeros(dls.shape[0])
for i in xrange(dls.shape[0]):
taus[i] = GetTau(dls[i,:], dl_ref, lmin=lmin, lmax=lmax, method=method)
return taus
def err_skew(taus):
n = len(taus)
return np.sqrt(6.*n*(n-1)/((n-2)*(n+1)*(n+3)))
# Params
# lmin = 500
lmax = 1900
#Files
spectra_folder_5deg = 'spectra_patches_radius5deg_gal080'
spectra_folder_10deg = 'spectra_patches_radius10deg_gal080'
spectra_folder_15deg = 'spectra_patches_radius15deg_gal080'
spectra_ref_file = 'Dl_smica_halfmission1_cross_halfmission2_filt_200_2000_gal080.dat'
l, dltt_ref = np.loadtxt(spectra_ref_file, unpack=1)
# embed()
dls_5deg = GetDls(spectra_folder_5deg)
dls_10deg = GetDls(spectra_folder_10deg)
dls_15deg = GetDls(spectra_folder_15deg)
fig, ax = subplots(2,3, figsize=(20,12))
# Different lmax plot ~~~~~~~~~~~~~~~~~~~~~~~~~~~
for iLM, LM in enumerate([1200,1900]):
ax[iLM,0].set_title(r'Radius 5 deg - $\ell_{\rm max}=%d - N=%d$'%(LM,dls_5deg.shape[0]), size=15)
ax[iLM,1].set_title(r'Radius 10 deg - $\ell_{\rm max}=%d - N=%d$'%(LM,dls_10deg.shape[0]), size=15)
ax[iLM,2].set_title(r'Radius 15 deg - $\ell_{\rm max}=%d - N=%d$'%(LM,dls_15deg.shape[0]), size=15)
for lm in [300, 650, 1000]:
taus_15_CR = GetManyTaus(dls_15deg, dltt_ref, lm, LM, method='CR')
taus_10_CR = GetManyTaus(dls_10deg, dltt_ref, lm, LM, method='CR')
taus_5_CR = GetManyTaus(dls_5deg, dltt_ref, lm, LM, method='CR')
hist(taus_5_CR, 'knuth', histtype='step', ax=ax[iLM,0], label=r'$\ell_{\rm min} = %d\, \gamma_1=%.2f\pm%.2f$'%(lm,stats.skew(taus_5_CR),err_skew(taus_5_CR)))
hist(taus_10_CR, 'knuth', histtype='step', ax=ax[iLM,1], label=r'$\ell_{\rm min} = %d\, \gamma_1=%.2f\pm%.2f$'%(lm,stats.skew(taus_10_CR),err_skew(taus_10_CR)))
hist(taus_15_CR, 'knuth', histtype='step', ax=ax[iLM,2], label=r'$\ell_{\rm min} = %d\, \gamma_1=%.2f\pm%.2f$'%(lm,stats.skew(taus_15_CR),err_skew(taus_15_CR)))
# hist(taus_5_CR, 'knuth', histtype='step', ax=ax[iLM,0], label=r'$(\ell_{\rm min},\ell_{\rm max}) = (%d,%d)\, \gamma_1=%.2f\pm%.2f$'%(lm,LM,stats.skew(taus_5_CR),err_skew(taus_5_CR)))
# hist(taus_10_CR, 'knuth', histtype='step', ax=ax[iLM,1], label=r'$(\ell_{\rm min},\ell_{\rm max}) = (%d,%d)\, \gamma_1=%.2f\pm%.2f$'%(lm,LM,stats.skew(taus_10_CR),err_skew(taus_10_CR)))
# hist(taus_15_CR, 'knuth', histtype='step', ax=ax[iLM,2], label=r'$(\ell_{\rm min},\ell_{\rm max}) = (%d,%d)\, \gamma_1=%.2f\pm%.2f$'%(lm,LM,stats.skew(taus_15_CR),err_skew(taus_15_CR)))
for i in xrange(3):
ax[0,i].legend(loc='best')
ax[1,i].legend(loc='best')
ax[1,i].set_xlabel(r'$\hat{\tau}$', size=15)
ax[0,i].set_xlim(-0.1,0.1)
ax[1,i].set_xlim(-0.1,0.1)
ax[0,i].axvline(0, ls='--', color='grey')
ax[1,i].axvline(0, ls='--', color='grey')
savefig('plots/taus_smica_halfmission1_cross_halfmission2_filt_200_2000_lmax1200_vs_lmax1900_methodCR.pdf', bboxes_inches='tight')
show()
# fig, ax = subplots(2,3, figsize=(15,5))
# ax[0,0].set_title(r'Radius 5 deg - RK - $N=%d$'%dls_5deg.shape[0], size=15)
# ax[0,1].set_title(r'Radius 10 deg - RK - $N=%d$'%dls_10deg.shape[0], size=15)
# ax[0,2].set_title(r'Radius 15 deg - RK - $N=%d$'%dls_15deg.shape[0], size=15)
# ax[1,0].set_title(r'Radius 5 deg - CR - $N=%d$'%dls_5deg.shape[0], size=15)
# ax[1,1].set_title(r'Radius 10 deg - CR - $N=%d$'%dls_10deg.shape[0], size=15)
# ax[1,2].set_title(r'Radius 15 deg - CR - $N=%d$'%dls_15deg.shape[0], size=15)
# for lm in [300, 650, 1000]:
# taus_15 = GetManyTaus(dls_15deg, dltt_ref, lm, lmax)
# taus_10 = GetManyTaus(dls_10deg, dltt_ref, lm, lmax)
# taus_5 = GetManyTaus(dls_5deg, dltt_ref, lm, lmax)
# taus_15_CR = GetManyTaus(dls_15deg, dltt_ref, lm, lmax, method='CR')
# taus_10_CR = GetManyTaus(dls_10deg, dltt_ref, lm, lmax, method='CR')
# taus_5_CR = GetManyTaus(dls_5deg, dltt_ref, lm, lmax, method='CR')
# taus_5 = taus_5[np.where(taus_5 != np.nan)[0]]
# taus_10 = taus_10[np.where(taus_10 != np.nan)[0]]
# taus_15 = taus_15[np.where(taus_15 != np.nan)[0]]
# embed()
# hist(taus_5, 'knuth', histtype='step', ax=ax[0,0], label=r'$(\ell_{\rm min},\ell_{\rm max}) = (%d,%d)\, \gamma_1=%.2f\pm%.2f$'%(lm,lmax,stats.skew(taus_5),err_skew(taus_5)))
# hist(taus_10, 'knuth', histtype='step', ax=ax[0,1], label=r'$(\ell_{\rm min},\ell_{\rm max}) = (%d,%d)\, \gamma_1=%.2f\pm%.2f$'%(lm,lmax,stats.skew(taus_10),err_skew(taus_10)))
# hist(taus_15, 'knuth', histtype='step', ax=ax[0,2], label=r'$(\ell_{\rm min},\ell_{\rm max}) = (%d,%d)\, \gamma_1=%.2f\pm%.2f$'%(lm,lmax,stats.skew(taus_15),err_skew(taus_15)))
# hist(taus_5_CR, 'knuth', histtype='step', ax=ax[1,0], label=r'$(\ell_{\rm min},\ell_{\rm max}) = (%d,%d)\, \gamma_1=%.2f\pm%.2f$'%(lm,lmax,stats.skew(taus_5_CR),err_skew(taus_5_CR)))
# hist(taus_10_CR, 'knuth', histtype='step', ax=ax[1,1], label=r'$(\ell_{\rm min},\ell_{\rm max}) = (%d,%d)\, \gamma_1=%.2f\pm%.2f$'%(lm,lmax,stats.skew(taus_10_CR),err_skew(taus_10_CR)))
# hist(taus_15_CR, 'knuth', histtype='step', ax=ax[1,2], label=r'$(\ell_{\rm min},\ell_{\rm max}) = (%d,%d)\, \gamma_1=%.2f\pm%.2f$'%(lm,lmax,stats.skew(taus_15_CR),err_skew(taus_15_CR)))
# for i in xrange(3):
# ax[0,i].legend(loc='best')
# ax[1,i].legend(loc='best')
# ax[1,i].set_xlabel(r'$\hat{\tau}$', size=15)
# ax[0,i].set_xlim(-0.1,0.1)
# ax[1,i].set_xlim(-0.1,0.1)
# show()
# fig, ax = subplots(1,3, figsize=(20,6))
# ax[0].set_title(r'Radius 5 deg - CR - $N=%d$'%dls_5deg.shape[0], size=15)
# ax[1].set_title(r'Radius 10 deg - CR - $N=%d$'%dls_10deg.shape[0], size=15)
# ax[2].set_title(r'Radius 15 deg - CR - $N=%d$'%dls_15deg.shape[0], size=15)
# for lm in [300, 650, 1000]:
# taus_15 = GetManyTaus(dls_15deg, dltt_ref, lm, lmax, method='CR')
# taus_10 = GetManyTaus(dls_10deg, dltt_ref, lm, lmax, method='CR')
# taus_5 = GetManyTaus(dls_5deg, dltt_ref, lm, lmax, method='CR')
# hist(taus_5, 'knuth', histtype='step', ax=ax[0], label=r'$(\ell_{\rm min},\ell_{\rm max}) = (%d,%d)\, \gamma_1=%.2f\pm%.2f$'%(lm,lmax,stats.skew(taus_5),err_skew(taus_5)))
# hist(taus_10, 'knuth', histtype='step', ax=ax[1], label=r'$(\ell_{\rm min},\ell_{\rm max}) = (%d,%d)\, \gamma_1=%.2f\pm%.2f$'%(lm,lmax,stats.skew(taus_10),err_skew(taus_10)))
# hist(taus_15, 'knuth', histtype='step', ax=ax[2], label=r'$(\ell_{\rm min},\ell_{\rm max}) = (%d,%d)\, \gamma_1=%.2f\pm%.2f$'%(lm,lmax,stats.skew(taus_15),err_skew(taus_15)))
# for i in xrange(3):
# ax[i].legend(loc='best')
# ax[i].set_xlabel(r'$\hat{\tau}$', size=15)
# ax[i].set_xlim(-0.1,0.1)
# ax[i].axvline(0, ls='--', color='grey')
# # show()
# savefig('plots/taus_smica_halfmission1_cross_halfmission2_filt_200_2000_lmax'+str(lmax)+'_methodCR.pdf', bboxes_inches='tight')
# close()
# mycosmo = Cosmo({'tau':0.06, 'As':2.1e-9})
# tt = mycosmo.cmb_spectra(2000,dl=1)[:,0]/1e12
# fig, ax = subplots(1,3, figsize=(20,6))
# ax[0].set_title(r'Radius 5 deg - CR - $N=%d$'%dls_5deg.shape[0], size=15)
# ax[1].set_title(r'Radius 10 deg - CR - $N=%d$'%dls_10deg.shape[0], size=15)
# ax[2].set_title(r'Radius 15 deg - CR - $N=%d$'%dls_15deg.shape[0], size=15)
# for i in xrange(dls_5deg.shape[0]):
# ax[0].plot(dls_5deg[i,:],color='grey', alpha=0.05)
# ax[0].plot(dltt_ref,'k')
# ax[0].plot(tt,'r')
# for i in xrange(dls_10deg.shape[0]):
# ax[1].plot(dls_10deg[i,:],color='grey', alpha=0.05)
# ax[1].plot(dltt_ref,'k')
# ax[1].plot(tt,'r')
# for i in xrange(dls_15deg.shape[0]):
# ax[2].plot(dls_15deg[i,:],color='grey', alpha=0.05)
# ax[2].plot(dltt_ref,'k')
# ax[2].plot(tt,'r')
# for i in xrange(3):
# # ax[i].legend(loc='best')
# ax[i].set_xlabel(r'$\ell$', size=15)
# ax[i].set_xlim(2,2000)
# ax[i].axhline(0, ls='--', color='grey')
# show()
# embed()
|
999,069 | 051b94f0951aaae2d7acb06ecf4c604bb56fa570 |
"""
need to check out
https://github.com/VRGhost/vbox
"""
import re
import os
from subprocess import CalledProcessError
from .shellcommand import ShellCommand
from . import utils
re_sctl = r'(?P<sctl>.{0,}?)\s*\((?P<device>\d+),\s*(?P<port>\d+)\)\s*:'
re_vmdk = r'\s*(?P<vmdkfile>.{0,}?)\s*'
re_uuid = r'\(UUID:\s*(?P<uuid>[\w\-]+)\)\s*'
re_vm = re.compile(r'^{0}{1}{2}$'.format(re_sctl, re_vmdk, re_uuid))
class VMInfo(object):
pass
def get_vm_info(vmname):
showvm = 'VBoxManage showvminfo {vmname}'
command = ShellCommand(showvm, vmname=vmname).grep(vmname).grep('vmdk')
utils.debug(command.prompt)
utils.debug(command)
vminfomatch = re_vm.match(command.out)
if vminfomatch is None:
utils.error('ERROR: cannot extract VM data for {0}'.format(vmname))
return None
vminfo = vminfomatch.groupdict()
vm = VMInfo()
vm.hddfile = vminfo['vmdkfile']
vm.basedir = os.path.dirname(vm.hddfile)
vm.hdduuid = vminfo['uuid']
vm.hddsctl = vminfo['sctl']
vm.sctldev = vminfo['device']
vm.sctlport = vminfo['port']
return vm
def remove_vm_hdd(vmuuid, vminfo, hdduuid):
cmd = ' '.join([
'VBoxManage',
'storageattach {vm}',
'--storagectl {sctl}',
'--medium {med}',
'--device {dev}',
'--port {port}',
'--type hdd',
])
command = ShellCommand(
cmd,
vm=vmuuid,
sctl=vminfo.hddsctl,
med='none',
dev=vminfo.sctldev,
port=vminfo.sctlport
)
utils.debug(command.prompt)
result = command.out
if result:
utils.debug(result)
return result
def set_vm_hdd(vm, sctl=None, img=None, dev=0, port=0):
default = '~/VirtualBox VMs/{0}/box-disk1.vmdk'
sctl = sctl if sctl else 'SATAController'
img = img if img else os.path.expanduser(default.format(vm))
cmd = ' '.join([
'VBoxManage',
'storageattach {vm}',
'--storagectl {sctl}',
'--medium "{vmdk}"',
'--device {dev}',
'--port {port}',
'--type hdd',
])
command = ShellCommand(cmd, vm=vm, sctl=sctl, vmdk=img, dev=dev, port=port)
utils.debug(command.prompt)
result = command.out
if result:
utils.debug(result)
return result
|
999,070 | bacbd6fc47b37b9ec03044c242073a4b4c8100f1 | # from __future__ import absolute_import
import os
import sys
import time
import datetime
import shutil
from stat import S_ISREG, S_ISDIR, ST_CTIME, ST_MODE
import flask
import pickle
import werkzeug
import celery
import celery.exceptions
import logging
# import batches
import flask_restful
# import tasks
import config
import cv2
import numpy as np
from flask_apscheduler import APScheduler
from flask_sqlalchemy import SQLAlchemy
MIN_AVAIL_DISK_SIZE = 1000 * 1024 * 1024 * 1024
if hasattr(config, 'MIN_AVAIL_DISK_SIZE'):
MIN_AVAIL_DISK_SIZE = config.MIN_AVAIL_DISK_SIZE
MAX_IMAGE_STORE_DAYS = 7
if hasattr(config, 'MAX_IMAGE_STORE_DAYS'):
MAX_IMAGE_STORE_DAYS = config.MAX_IMAGE_STORE_DAYS
MAX_ACCESS_LOGS_STORE_DAYS = 14
if hasattr(config, 'MAX_ACCESS_LOGS_STORE_DAYS'):
MAX_ACCESS_LOGS_STORE_DAYS = config.MAX_ACCESS_LOGS_STORE_DAYS
if not os.path.exists(config.UPLOAD_FOLDER):
os.makedirs(config.UPLOAD_FOLDER)
def remove_history_images_uploaded():
dirpath = config.UPLOAD_FOLDER
# get all entries in the directory w/ stats
entries = (os.path.join(dirpath, fn) for fn in os.listdir(dirpath))
entries = ((os.stat(path), path) for path in entries)
# delete all image files
entries = (path for stat, path in entries if S_ISREG(stat[ST_MODE]))
for path in entries:
print 'remove tmp file: ', path
os.remove(path)
# handle dirs
entries = (os.path.join(dirpath, fn) for fn in os.listdir(dirpath))
entries = ((os.stat(path), path) for path in entries)
# leave only dirs, insert creation date
entries = ((stat[ST_CTIME], path)
for stat, path in entries if S_ISDIR(stat[ST_MODE]))
#NOTE: on Windows `ST_CTIME` is a creation date
# but on Unix it could be something else
#NOTE: use `ST_MTIME` to sort by a modification date
oldest_datetime = datetime.datetime.now(
) - datetime.timedelta(days=MAX_IMAGE_STORE_DAYS)
for cdate, path in sorted(entries):
try:
print path
created_datetime = datetime.datetime.strptime(
os.path.basename(path), '%Y%m%d%H')
if created_datetime < oldest_datetime:
print 'rm too old image dir: ', path
shutil.rmtree(path)
else:
#if we are short of disk space, delete the folder still
stat = os.statvfs(config.UPLOAD_FOLDER)
avail_size = stat.f_bsize * stat.f_bavail
if avail_size < MIN_AVAIL_DISK_SIZE:
print 'rm tmp image dir due to lack of disk space: ', path
shutil.rmtree(path)
except ValueError, verror:
print verror
# delete dirs not created by app
shutil.rmtree(path)
except Exception, ex:
print ex
def remove_history_access_logs():
try:
datetime_ndays_ago = datetime.datetime.now(
) - datetime.timedelta(days=MAX_ACCESS_LOGS_STORE_DAYS)
access_logs = AccessLog.query.filter(
AccessLog.created_datetime < datetime_ndays_ago)
access_logs.delete(synchronize_session='fetch')
db.session.commit()
# access_logs = AccessLog.query.all()
# print 'after remove access logs'
# for al in access_logs:
# print al.return_code, al.error_message, al.created_datetime
except Exception, ex:
print ex
db.session.rollback()
db.drop_all()
db.create_all()
def remove_history_data():
print 'remove history images and access logs'
remove_history_images_uploaded()
remove_history_access_logs()
app = flask.Flask(__name__)
class FlaskConfig(object):
JOBS = [
{
'id': 'remove_history_data',
'func': 'app:remove_history_data',
# 'args': (1, 2),
'trigger': 'cron',
'hour': 0
}
]
SCHEDULER_API_ENABLED = True
app.config.from_object(FlaskConfig())
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
class AccessLog(db.Model):
id = db.Column(db.Integer, primary_key=True)
created_datetime = db.Column(
db.DateTime, default=datetime.datetime.now, index=True)
return_code = db.Column(db.Integer, default=-1, index=True)
error_message = db.Column(db.String(120), default='')
# def __init__(self, username, email):
# self.username = username
# self.email = email
def __repr__(self):
return '<User %r>' % self.error_message
db.create_all()
# def test_init_database():
# al = AccessLog()
# al.return_code = 1
# al.error_message = 'yes'
# db.session.add(al)
# al = AccessLog()
# db.session.add(al)
# al = AccessLog()
# al.created_datetime = datetime.datetime.now() - datetime.timedelta(days=20)
# db.session.add(al)
# db.session.commit()
# access_logs = AccessLog.query.all()
# print 'all access logs'
# for al in access_logs:
# print al.return_code, al.error_message, al.created_datetime
# test_init_database()
scheduler = APScheduler()
# it is also possible to enable the API directly
# scheduler.api_enabled = True
scheduler.init_app(app)
scheduler.start()
import settings
the_celery = celery.Celery('tasks')
the_celery.config_from_object(settings)
@the_celery.task(name="tasks.object_detection_task", queue="important")
def object_detection_task(imgstream, secure_filename):
pass
# curl -X POST -F image=@hy0.jpg http://localhost:8000/person_detection
def object_detection():
print flask.request.files
detection_result = {}
return_code = -1
try:
# print len(flask.request.files)
imagefile = flask.request.files['image']
imagestream = imagefile.read()
filename_ = str(datetime.datetime.now()).replace(' ', '_') + \
werkzeug.secure_filename(imagefile.filename)
res = object_detection_task.apply_async(
args=[imagestream, filename_], expires=5)
# create dir each hour to store images
subdir = datetime.datetime.now().strftime('%Y%m%d%H')
storedir = os.path.join(config.UPLOAD_FOLDER, subdir)
if not os.path.exists(storedir):
os.makedirs(storedir)
filename = os.path.join(storedir, filename_)
with open(filename, "wb") as f:
f.write(imagestream)
result = res.get()
# result = 1
# if len(result)>0:
# filename = os.path.join(config.UPLOAD_FOLDER_DETECTED, filename_)
# with open(filename, 'w') as f:
# pickle.dump(result, f)
# if draw_result:
# draw_filename = os.path.join(config.UPLOAD_FOLDER_DRAW, filename_)
# img_data = cv2.imdecode(np.asarray(bytearray(imagestream), dtype=np.uint8), -1)
# for t in result:
# cv2.rectangle(img_data, (t['x'],t['y']), (t['x']+t['w'],t['y']+t['h']),
# (255,0,0),3)
# cv2.imwrite(draw_filename, img_data)
detection_result = {'targets': result}
except celery.exceptions.TaskRevokedError:
print('time is out')
return_code = 0
detection_result = {'error': 'time is out'}
except Exception, ex:
print(ex)
return_code = 1
detection_result = {'error': str(ex)}
try:
access_log = AccessLog()
access_log.return_code = return_code
db.session.add(access_log)
db.session.commit()
except Exception, ex:
print ex
db.session.rollback()
db.session.commit()
db.drop_all()
db.create_all()
return detection_result
class ObjectDetection(flask_restful.Resource):
def post(self):
return object_detection()
class PersonDetection(flask_restful.Resource):
def post(self):
return object_detection()
class Stat(flask_restful.Resource):
def post(self):
try:
print flask.request.args
# datetime string format: 20170210213021
# xxxx(year)xx(month)xx(day)xx(24hour)xx(minute)xx(second)
start_datetime = datetime.datetime.strptime(
flask.request.args.get('start_datetime'), "%Y%m%d%H%M%S")
end_datetime = datetime.datetime.strptime(
flask.request.args.get('end_datetime'), "%Y%m%d%H%M%S")
access_logs = AccessLog.query.filter(
AccessLog.created_datetime.between(start_datetime, end_datetime))
# success_access_logs = access_logs.filter(AccessLog.return_code == -1)
total_logs_num = 0
success_logs_num = 0
# filter in memory, instead of querying database again
for access_log in access_logs:
total_logs_num += 1
if access_log.return_code == -1:
success_logs_num += 1
return{"total_calls": total_logs_num, 'sucess_calls': success_logs_num}
except Exception, ex:
print ex
db.session.rollback()
db.session.commit()
# recreate tables
db.drop_all()
db.create_all()
return {'error', str(ex)}
api = flask_restful.Api(app)
api.add_resource(PersonDetection, '/person_detection')
api.add_resource(ObjectDetection, '/object_detection')
api.add_resource(Stat, '/stat')
|
999,071 | a9db5a82ea63c5d18440a8c36a875bd5f3236371 | from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from usuarios.models import User, NombreField
# Create your models here.
############################################################################################################################################################################
class Puesto_trabajo(models.Model):
empresa = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
titulo = models.CharField(max_length=100, null=True)
descripcion = models.TextField(null=True)
def __str__(self):
return self.titulo
############################################################################################################################################################################
class CVfield(models.Model):
cv = models.FileField(upload_to='cv/', blank=True, null=True)
############################################################################################################################################################################
class Entrevista(models.Model):
STATUS = (
('Pendiente', 'Pendiente'),
('Entrevistado/a', 'Entrevistado/a'),
('Evaluado/a', 'Evaluado/a')
)
# DATOS DEL POSTULANTE:
nombre_postulante = NombreField(
_('Nombre del postulante'), blank=True, null=True)
email = models.EmailField(_('Correo electrónico'), max_length=254)
fecha_ingresado = models.DateTimeField(auto_now_add=True)
# Configurar esta wea q no entendi https://simpleisbetterthancomplex.com/tutorial/2016/08/01/how-to-upload-files-with-django.html
cv = models.OneToOneField(
CVfield, on_delete=models.CASCADE, blank=True, null=True)
# DATOS DE LA ENTREVISTA
empresa = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
fecha_entrevista = models.DateTimeField(blank=True, null=True)
puesto_trabajo = models.ForeignKey(
Puesto_trabajo, on_delete=models.CASCADE)
status = models.CharField(
max_length=200, null=True, choices=STATUS, default='Pendiente')
def __str__(self):
return 'Entrevista con {} por el puesto {}'.format(self.nombre_postulante, self.puesto_trabajo)
# Postulante debe ser un usuario?
# class Postulante(models.Model):
# user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
|
999,072 | 3a50834eb84e6b4235b5f0537495f29877d036e2 | import logging
from .formatter import CustomFormatter
import os
def get_logger(name="LPBv2", log_to_file=False):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# Removing duplicate handlers when instanciating the logger in multiple files
if logger.hasHandlers():
logger.handlers.clear()
logger.propagate = False
if log_to_file:
# preparing folder and file
logfolder = "logs"
if not os.path.exists(logfolder):
os.makedirs(logfolder)
logfile = f"{logfolder}/{name}.log"
# Logging to a file
fh = logging.FileHandler(logfile)
fh.setLevel(logging.DEBUG)
simpleFormatter = logging.Formatter(
f"%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)"
)
fh.setFormatter(simpleFormatter)
logger.addHandler(fh)
# Logging to console with color
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(CustomFormatter())
logger.addHandler(ch)
return logger
|
999,073 | a4042cfb24813e2fd7ed1d961dc244808c0e76b0 | import numpy as np
import os
import math
import pandas as pd
from statsmodels.stats.proportion import proportion_confint
from sklearn.tree import DecisionTreeClassifier
from sklearn.base import ClassifierMixin
def safe_division(dividend, divisor, epsilon):
if divisor == 0:
return dividend / epsilon
return dividend / divisor
def confidence_interval(X, hyp, y, alpha=.95):
data = hyp.predict(X)
successes = np.count_nonzero(data == y)
trials = X.shape[0]
li, hi = proportion_confint(successes, trials, alpha=1 - alpha, method="wilson")
return li, hi
def choice_with_proportion(predictions, class_predicted, proportion, extra=0):
n = len(predictions)
for_each_class = {c: int(n * j) for c, j in proportion.items()}
indices = np.zeros(0)
for c in proportion:
instances = class_predicted == c
to_add = np.argsort(predictions, kind="mergesort")[instances][::-1][0:for_each_class[c] + extra]
indices = np.concatenate((indices, to_add))
return indices.astype(int)
def calculate_prior_probability(y):
"""Calculate the priori probability of each label
Parameters
----------
y : array-like of shape (n_samples,)
array of labels
Returns
-------
class_probability: dict
dictionary with priori probability (value) of each label (key)
"""
unique, counts = np.unique(y, return_counts=True)
u_c = dict(zip(unique, counts))
instances = len(y)
for u in u_c:
u_c[u] = float(u_c[u] / instances)
return u_c
def is_int(x):
"""Check if x is of integer type, but not boolean"""
# From sktime: BSD 3-Clause
# boolean are subclasses of integers in Python, so explicitly exclude them
return isinstance(x, (int, np.integer)) and not isinstance(x, bool)
def mode(y):
"""Calculate the mode of a list of values
Parameters
----------
y : array-like of shape (n_samples, n_estimators)
array of values
Returns
-------
mode: array-like of shape (n_samples,)
array of mode of each label
count: array-like of shape (n_samples,)
array of count of the mode of each label
"""
array = pd.DataFrame(np.array(y))
mode = array.mode(axis=0).loc[0, :]
count = array.apply(lambda x: x.value_counts().max())
return mode.values, count.values
def check_n_jobs(n_jobs):
"""Check `n_jobs` parameter according to the scikit-learn convention.
From sktime: BSD 3-Clause
Parameters
----------
n_jobs : int, positive or -1
The number of jobs for parallelization.
Returns
-------
n_jobs : int
Checked number of jobs.
"""
# scikit-learn convention
# https://scikit-learn.org/stable/glossary.html#term-n-jobs
if n_jobs is None:
return 1
elif not is_int(n_jobs):
raise ValueError(f"`n_jobs` must be None or an integer, but found: {n_jobs}")
elif n_jobs < 0:
return os.cpu_count()
else:
return n_jobs
def calc_number_per_class(y_label):
classes = np.unique(y_label)
proportion = calculate_prior_probability(y_label)
factor = 1/min(proportion.values())
number_per_class = dict()
for c in classes:
number_per_class[c] = math.ceil(proportion[c] * factor)
return number_per_class
def check_classifier(base_classifier, can_be_list=True, collection_size=None):
if base_classifier is None:
return DecisionTreeClassifier()
elif can_be_list and (type(base_classifier) == list or type(base_classifier) == tuple):
if collection_size is not None:
if len(base_classifier) != collection_size:
raise AttributeError(f"base_classifier is a list of classifiers, but its length ({len(base_classifier)}) is different from expected ({collection_size})")
for i, bc in enumerate(base_classifier):
base_classifier[i] = check_classifier(bc, False)
return list(base_classifier) # Transform to list
else:
if not isinstance(base_classifier, ClassifierMixin):
raise AttributeError(f"base_classifier must be a ClassifierMixin, but found {type(base_classifier)}")
return base_classifier
|
999,074 | 22b3f1cc3e81ecbbf5309c54b3f772019e1af398 | #!/usr/bin/env python3
import sys
import mechanicalsoup
# connect to duckduckgo
browser = mechanicalsoup.StatefulBrowser()
browser.open("https://google.com/")
# fill in search form
browser.select_form('form[action="/search"]')
browser["q"] = sys.argv[1]
browser.submit_selected(btnName="btnI")
# display results
print(browser.get_url())
|
999,075 | 5c77d17247e98f5d709576c97b18cda8b4269a4b | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from accounts.admin import admin_site
from publication.models import Publication
# Register your models here.
admin_site.register(Publication)
|
999,076 | cec67716adb32cbaf1b43d32631e5aa540bc9593 | ii = [('CoolWHM2.py', 1), ('GodwWSL2.py', 2)] |
999,077 | 414399f72a04ffcc948afc110eb040b0245530e5 | #!/usr/bin/python
import os
import sys
import csv
import datetime
import time
import tweepy
def speedtest():
#Demarage de speedtest-cli
print 'Demarage du test'
a = os.popen("python /usr/local/bin/speedtest --simple").read()
print 'ran'
#Separation du resultat en 3 lignes (ping,down,up)
lines = a.split('\n')
print a
ts = time.time()
date =datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
#Si le speedtest ne peut se conecter mise a 0 des valeurs du debit
if "Cannot" in a:
p = 1000
d = 0
u = 0
#Recuperation des valeurs du ping down et up
else:
p = lines[0][6:11]
d = lines[1][10:14]
u = lines[2][8:12]
print date,p, d, u
#Conection a twitter
consumer_secret = "xxxxxxxx"
consumer_key = "xxxxxxx"
access_token = "xxxxxx"
access_token_secret = "xxxxxx"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
#Tweet si le debit down est inferieur a ce qui est configure
if eval(d)<5:
print "Ecriture du tweet"
try:
#Mise en forme du Tweet
tweet="Hey @VOTRE_FAI pourquoi mon debit internet est de " + str(int(eval(d))) + "Mb/s en down et " + str(int(eval(u))) + "Mb/s en up avec un ping de " + str(int(eval(p))) + "ms quand je paie pour XXXMb/s en down ?"
api.update_status(status=tweet)
except Exception,e:
print str(e)
pass
return
if __name__ == '__main__':
speedtest()
print 'Terminer avec succes'
|
999,078 | dd8082dec8dc1e9e3990be9b06e20c465e7b8d56 | from ai import AI
# play
class Player(object):
def __init__(self, color):
self.color = color
# think
def think(self, board):
pass
# place discs
def move(self, board, action):
flipped_pos = board._move(action, self.color)
return flipped_pos
# unmove discs
def unmove(self, board, action, flipped_pos):
board._unmove(action, flipped_pos, self.color)
# Humanplayer
class HumanPlayer(Player):
def __init__(self, color):
super().__init__(color)
def think(self, board):
while True:
action = input("Turn to '{}'. \nPlease think carefully and input a point.(such as 'A1'): ".format(self.color)) # A1~H8
d, e = action[1], action[0].upper()
if d in '12345678' and e in 'ABCDEFGH': # legality check
x, y = '12345678'.index(d), 'ABCDEFGH'.index(e)
if (x, y) in board.get_legal_actions(self.color): # legality check
return x, y
# AI player(multiple inherit)
class AIPlayer(Player, AI):
def __init__(self, color, level_ix=0):
super().__init__(color) # init Player
super(Player, self).__init__(level_ix) # init AI
def think(self, board):
print("Turn to '{}'. \nPlease wait a moment. AI is thinking...".format(self.color))
if self.color == 'W':
enemy = AIPlayer('B') # hypothesized enemy
action = self.brain(board, enemy, 2) # Change the last number here if you want to check
if self.color == 'B':
enemy = AIPlayer('W') # hypothesized enemy
action = self.brain(board, enemy, 2) # Change the last number here if you want to check
return action |
999,079 | 61fb41476eee5d21cd2d4324d6fb5da2204c937a | #!/usr/bin/python3
from BPNN import *
from random import choice, randint, sample
from time import sleep
from collections import Counter
from multiprocessing import process
from copy import deepcopy
#CREATE 100 In Sample EXAMPLES
examples=[]
for i in range(100):
value = randint(1,1000)
ip = [float(s) for s in '{:012b}'.format(value)]
op = [float(s) for s in '{:012b}'.format(value + 1)]
examples.append( Example( ip, op ))
nets = []
for i in range(5):
nn = Neural_Net( alpha = 0.10, ip = 12, h = 12, op = 12)
nn.initialize_weights( )
nn.backward_connect_nodes( )
nets.append(nn)
hardest = set()
for nn in nets:
for i in range( 1000 ) :
correct = 0
incorrect = []
for example in [choice( examples) for x in range( len( examples ) )]:
nn.assign_input( example.ip)
nn.calculate( )
nn.train( example)
actual = example.op
hypothesis = [abs(round(node.value,0)) for node in nn.opl.nodes]
if hypothesis == actual:
correct += 1
else: incorrect.append(example)
if i % 10 == 0:
nn.alpha -= 0.001
# print('avg error: ', nn.avgerror)
print('%2f%% correct. ' % (correct / len(examples) * 100), correct, ' correct out of ', len(examples), ' alpha ', nn.alpha)
if correct >= len(examples) - 3:
#for ex in [(x.ip, x.op) for x in incorrect]:
# print(ex[0])
# print(ex[1])
for item in incorrect: hardest.add( item )
break
#Train a new neural net to recognize the hardest to classify examples...
non_hardest_example_set = set(examples) - hardest
hardest_org = deepcopy(hardest)
for ex in non_hardest_example_set: ex.op = [-1.0]
for ex in hardest: ex.op = [1.0]
trainset = non_hardest_example_set.union(hardest)
hard_nn = Neural_Net(alpha = 0.1, ip=12, h=12, op=1)
hard_nn.initialize_weights( )
hard_nn.backward_connect_nodes( )
#print([(ex.op,ex.ip) for ex in trainset])
for i in range(1000):
for ex in [sample(trainset,1)[0] for x in range(len(trainset))]:
hard_nn.assign_input(ex.ip)
hard_nn.calculate()
hard_nn.train(ex)
if i % 10 == 0:
hard_nn.alpha -= 0.001
#Train a new neural net to solve the hardest to solve examples...
hard_solve_nn = Neural_Net(alpha = 0.1, ip =12, h =12, op=12)
hard_solve_nn.initialize_weights( )
hard_solve_nn.backward_connect_nodes( )
for i in range(1000):
for ex in [sample(hardest_org,1)[0] for x in range(len(hardest_org))]:
hard_solve_nn.assign_input(ex.ip)
hard_solve_nn.calculate()
hard_solve_nn.train(ex)
if i % 10 == 0:
hard_solve_nn.alpha -= 0.001
#Out of Sample 1000 EXAMPLES
examples=[]
for i in range(1000):
value = i
ip = [float(s) for s in '{:012b}'.format(value)]
op = [float(s) for s in '{:012b}'.format(value + 1)]
examples.append( Example( ip, op ))
for nn in nets:
correct = 0
for i, example in enumerate(examples):
###TEST
hard_nn.assign_input(example.ip)
hard_nn.calculate()
if hard_nn.opl.nodes[0].value > -0.0:
#print('Hard...')
del examples[i]
continue
#hard_solve_nn.assign_input(example.ip)
#hard_solve_nn.calculate()
#print([abs(round(node.value,0)) for node in nn.opl.nodes])
#else: print('Easy...')
###TEST
nn.assign_input( example.ip)
nn.calculate( )
actual = example.op
hypothesis = [abs(round(node.value,0)) for node in nn.opl.nodes]
if hypothesis == actual:
correct += 1
#else:
#pass
#print(actual, 'Actual')
#print(hypothesis, 'Hypothesis \n\n')
print('%2f%% correct. ' % (correct / len(examples) * 100), correct, ' correct out of ', len(examples))
sleep(3)
for nn in nets:
print('Watch me count to one thousand now...')
nn.assign_input( [float(s) for s in '{:012b}'.format(0)] )
for i in range(100):
hypothesi = []
for nn in nets:
nn.calculate()
hypothesis_in_list = [abs(round(node.value,0)) for node in nn.opl.nodes]
hypothesis_in_decimal = int(''.join([str(int(f)) for f in hypothesis_in_list]),2)
hypothesi.append(hypothesis_in_decimal)
winning_hypothesis = Counter(hypothesi).most_common()[0][0]
print(winning_hypothesis)
winning_hypothesis_in_list = [float(s) for s in '{:012b}'.format(winning_hypothesis)]
for nn in nets:
nn.assign_input(winning_hypothesis_in_list)
|
999,080 | 7db5df504d33f9f1478d5e884d2345730c13944b | # Generated by Django 2.2.5 on 2019-12-31 09:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('CN171_operation', '0007_auto_20191227_1153'),
]
operations = [
migrations.RenameField(
model_name='oprfinance',
old_name='opr_bc_acc',
new_name='opr_bc_acct',
),
]
|
999,081 | 4f06cbc6c77758178bbe4dfe8700f9edb869962e | from .meteo_station import MeteoStation
|
999,082 | e855d8a10f960acb22b13c56e7545aec65af4ae5 | #passing two arguments and printing
def add_sub(x,y):
z = x + y
d = x - y
return z,d
result1,result2 = add_sub(1,2)
print(result1,result2) |
999,083 | a9341a299d0fdc21444a22784ccf1f9ed5b7abaa | import os
import subprocess
import shutil
# This script does the build, replacement and copying of the antlr generated files
buildCommand = 'java -jar "antlr-4.7.2-complete.jar" -Dlanguage=JavaScript "Thyme.g4" -o "./generated" -visitor';
def normalizePath (path):
return path.replace ("\\", os.sep)
def executeComand (command):
try:
# Create a new process
processResult = subprocess.run (command, shell=True, check=True, universal_newlines=True)
print (processResult.args)
if processResult.returncode == 0:
return "OK"
elif processResult.returncode == 1:
return "Process failed\n" + processResult.stderr
except subprocess.CalledProcessError as e:
if e.output != None:
return "Process failed\n" + e.output
else:
return "Process failed"
def replaceAntlrPath (src):
replaceSource = "antlr4/index";
replaceTarget = "lib/antlr4/index";
for item in os.listdir (src):
path = os.path.join (src, item);
# Check if is File
if not os.path.isdir (path):
# Extract file extension
fileExtension = path.split (".")[-1];
# Only do refactor in javascript files
if fileExtension == "js":
print ("Processing file '" + item + "'");
# Opens file
file = open (path, "r+");
# Read file's content and refactor packages
fileText = file.read ();
file.seek (0);
fileText = fileText.replace (replaceSource, replaceTarget);
# Write new text to file
file.write (fileText);
# Close file
file.truncate ()
file.close();
print ("Copying file '" + item + "'...");
shutil.copy2 (path, normalizePath ("../"));
# Build files
print ("Building antlr files...");
result = executeComand (buildCommand);
if result == "OK":
print ("\nCopying generated files...");
# Replace files path
replaceAntlrPath (os.path.join (os.getcwd (), "generated"));
print ("Done");
else:
print ("Some error ocurred!");
print (result);
|
999,084 | 2754c8cea19a5214f8b69f763cf6cb9559e3f5e7 | import sys
import csv
import string
from collections import defaultdict
if __name__ == '__main__':
# intialize core variables
if len(sys.argv) == 5:
trainingFile = str(sys.argv[1])
transFile = str(sys.argv[2])
emmisionsFile = str(sys.argv[3])
laplaceFile = str(sys.argv[4])
else:
print("<Using Default Parameters>")
trainingFile = 'train.txt'
transFile = 'transitions.txt'
emmisionsFile = 'emissions.txt'
laplaceFile = 'laplace-tag-unigrams.txt'
tag_bigrams = {} # (POS,POS) = occurences
word_unigrams = {} # (x) = occurrences
tag_unigrams = {} # (POS) = occurrences
pos_unigrams = {} # (x,POS) = occurrences
words = []
tokenCount = 0
# Each line is a sentence:
# ['<s>', 'I/NN', 'am/VBD', 'happy/ADJ', '</s>']
sentences = []
f = open(trainingFile, 'r')
for line in f:
line = line.rstrip('\n')
sentence = ['<s>/<s>']
sentence += line.split(" ")
sentence += ["</s>"]
sentences += sentence
# Capture words on their own
for word in sentences:
tokenCount += 1
if word == "</s>":
x = "</s>"
x_POS = "</s>"
else:
splitWord = word.split('/')
if len(splitWord) < 3:
x = splitWord[0]
x_POS = splitWord[1]
else:
x = splitWord[1]
x_POS = splitWord[2]
# get (x,x_POS)
if (x,x_POS) in pos_unigrams.keys(): pos_unigrams[(x,x_POS)] += 1
else: pos_unigrams[(x,x_POS)] = 1
# get (POS)
if x_POS in tag_unigrams.keys(): tag_unigrams[x_POS] += 1
else: tag_unigrams[x_POS] = 1
# get (x)
if x in word_unigrams.keys(): word_unigrams[x] += 1
else: word_unigrams[x] = 1
# Capture words in pairs
for i in range(0, len(sentences)-1):
word1 = sentences[i].split('/')
word2 = sentences[i+1].split('/')
if sentences[i] == "</s>": word1 = ["</s>", "</s>"]
if sentences[i+1] == "</s>": word2 = ["</s>", "</s>"]
if len(word1) > 2:
word1[0],word1[1] = word1[1], word1[2]
if len(word2) > 2:
word2[0],word2[1] = word2[1], word2[2]
# fill up dictionaries
if (word1[1],word2[1]) in tag_bigrams.keys():
tag_bigrams[(word1[1],word2[1])] += 1
else:
tag_bigrams[(word1[1],word2[1])] = 1
# write transitions.txt
with open('transitions.txt', 'w') as csv_file:
writer = csv.writer(csv_file, delimiter='\t', lineterminator='\n')
writer.writerow(['POS(x)', 'POS(y)', 'P_MLE(POS(y)|POS(x))'])
for (tag1,tag2), count in tag_bigrams.items():
# P_MLE = (x,y occurences) / (x occurences)
MLE = count / tag_unigrams[tag1]
writer.writerow([tag1, tag2, MLE])
# write emissions.txt
with open('emissions.txt', 'w') as csv_file:
writer = csv.writer(csv_file, delimiter='\t', lineterminator='\n')
writer.writerow(['POS(x)', 'x', 'P_MLE(x|POS(x))', 'P_Laplace(x|POS(x))',
'P_MLE(POS(x)|x)', 'P_Laplace(x|POS(x))'])
for (x,x_POS), count in pos_unigrams.items():
# P_MLE = (x has tag t) / (tag t occurs)
MLE = (count) / (tag_unigrams[x_POS])
MLE_r = (count) / (word_unigrams[x])
# P_Laplace = P(y|x) = #(x,xPOS)+1 / (#(x) + vocab size + 1)
Laplace = (count + 1) / (tag_unigrams[x_POS] + len(tag_unigrams) + 1)
Laplace_r = (count + 1) / (word_unigrams[x] + len(word_unigrams) + 1)
writer.writerow([x_POS, x, MLE, Laplace, MLE_r, Laplace_r])
for x in tag_unigrams.keys():
Laplace = 1 / (tag_unigrams[x] + len(word_unigrams) + 1)
writer.writerow([x, 'unk', 0.0, Laplace, 0.0, 0.0])
# write laplace-tag-unigrams.txt
with open('laplace-tag-unigrams.txt', 'w') as csv_file:
writer = csv.writer(csv_file, delimiter='\t', lineterminator='\n')
writer.writerow(['POS(x)', 'P_Laplace(POS(x))'])
for x_POS, count in tag_unigrams.items():
# P_Laplace for unigrams
Laplace = (tag_unigrams[x_POS] + 1) / (tokenCount + len(word_unigrams) + 1)
writer.writerow([x_POS, Laplace])
|
999,085 | b35cc5f39e0c02293b46cb4c78c4589e56ce73b1 | from std_msgs.msg import Int64
import rospy
time = 0
def clockCallback(clock):
print(dir(clock))
return
if __name__ == '__main__':
rospy.init_node('clock_tester')
clock_topic = "/clock"
clock = rospy.Subscriber(clock_topic, Int64, clockCallback)
# print(rospy.time)
rospy.Rate(5)
rospy.spin() |
999,086 | ddabe588d36491f8a387136ea71ff93fe574627a | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 10 21:57:37 2019
@author: Nataly
"""
#from readimg import read_img
import cv2
import numpy as np
import glob
from yolovoc import yolo2voc
from readboxes import read_boxes
from matplotlib import pyplot as plt
from rOI import ROI
from skimage.feature import greycomatrix, greycoprops
import skimage.feature
from scipy.stats import kurtosis
import statistics as stats
import pywt
import pywt.data
#tamañoA = []
#tamañoB = []
def Fourier(inA):
f = np.fft.fft2(inA)
fshift = np.fft.fftshift(f)
fourier = 20*np.log(np.abs(fshift))
fourier=fourier.astype(np.uint8)
return fourier
def GLCM (imA):
a=int(np.max(imA))
g = skimage.feature.greycomatrix(imA, [1], [0], levels=a+1, symmetric=False, normed=True)
contraste=skimage.feature.greycoprops(g, 'contrast')[0][0]
energia=skimage.feature.greycoprops(g, 'energy')[0][0]
homogeneidad=skimage.feature.greycoprops(g, 'homogeneity')[0][0]
correlacion=skimage.feature.greycoprops(g, 'correlation')[0][0]
disimi= greycoprops(g, 'dissimilarity')
ASM= greycoprops(g, 'ASM')
entropia=skimage.measure.shannon_entropy(g)
return g,contraste,energia,homogeneidad, correlacion, disimi, ASM,entropia
# plt.imshow(cropped)
def tama(a,b):
if a<600 or b<600:
tamañoA = 200
tamañoB = 200
else:
tamañoA = 600
tamañoB = 600
return tamañoA,tamañoB
energiaDM_LH=[]
homogeneidadDM_LH=[]
correlacionDM_LH=[]
disimiDM_LH=[]
ASMDM_LH=[]
entropiaDM_LH=[]
contrasteDM_LH=[]
mediaglcmDM_LH=[]
entropianoglcmDM_LH=[]
mediaDM_LH=[]
modaDM_LH=[]
desviacionDM_LH=[]
curtosisDM_LH=[]
nergianoglcmDM_LH=[]
##RE
energiaRE_LH=[]
homogeneidadRE_LH=[]
correlacionRE_LH=[]
disimiRE_LH=[]
ASMRE_LH=[]
entropiaRE_LH=[]
contrasteRE_LH=[]
mediaglcmRE_LH=[]
entropianoglcmRE_LH=[]
mediaRE_LH=[]
modaRE_LH=[]
desviacionRE_LH=[]
curtosisRE_LH=[]
nergianoglcmRE_LH=[]
##NO
energiaNO_LH=[]
homogeneidadNO_LH=[]
correlacionNO_LH=[]
disimiNO_LH=[]
ASMNO_LH=[]
entropiaNO_LH=[]
contrasteNO_LH=[]
mediaglcmNO_LH=[]
entropianoglcmNO_LH=[]
mediaNO_LH=[]
modaNO_LH=[]
desviacionNO_LH=[]
curtosisNO_LH=[]
nergianoglcmNO_LH=[]
##GLCMsinfourierLL
energiaDM_SF_LH=[]
homogeneidadDM_SF_LH=[]
correlacionDM_SF_LH=[]
disimiDM_SF_LH=[]
ASMDM_SF_LH=[]
entropiaDM_SF_LH=[]
contrasteDM_SF_LH=[]
mediaglcmDM_SF_LH=[]
entropianoglcmDM_SF_LH=[]
mediaDM_SF_LH=[]
modaDM_SF_LH=[]
desviacionDM_SF_LH=[]
curtosisDM_SF_LH=[]
nergianoglcmDM_SF_LH=[]
#RE
energiaRE_SF_LH=[]
homogeneidadRE_SF_LH=[]
correlacionRE_SF_LH=[]
disimiRE_SF_LH=[]
ASMRE_SF_LH=[]
entropiaRE_SF_LH=[]
contrasteRE_SF_LH=[]
mediaglcmRE_SF_LH=[]
entropianoglcmRE_SF_LH=[]
mediaRE_SF_LH=[]
modaRE_SF_LH=[]
desviacionRE_SF_LH=[]
curtosisRE_SF_LH=[]
nergianoglcmRE_SF_LH=[]
#NO
energiaNO_SF_LH=[]
homogeneidadNO_SF_LH=[]
correlacionNO_SF_LH=[]
disimiNO_SF_LH=[]
ASMNO_SF_LH=[]
entropiaNO_SF_LH=[]
contrasteNO_SF_LH=[]
mediaglcmNO_SF_LH=[]
entropianoglcmNO_SF_LH=[]
mediaNO_SF_LH=[]
modaNO_SF_LH=[]
desviacionNO_SF_LH=[]
curtosisNO_SF_LH=[]
nergianoglcmNO_SF_LH=[]
##hl
energiaDM_HL=[]
homogeneidadDM_HL=[]
correlacionDM_HL=[]
disimiDM_HL=[]
ASMDM_HL=[]
entropiaDM_HL=[]
contrasteDM_HL=[]
mediaglcmDM_HL=[]
entropianoglcmDM_HL=[]
mediaDM_HL=[]
modaDM_HL=[]
desviacionDM_HL=[]
curtosisDM_HL=[]
nergianoglcmDM_HL=[]
#RE
energiaRE_HL=[]
homogeneidadRE_HL=[]
correlacionRE_HL=[]
disimiRE_HL=[]
ASMRE_HL=[]
entropiaRE_HL=[]
contrasteRE_HL=[]
mediaglcmRE_HL=[]
entropianoglcmRE_HL=[]
mediaRE_HL=[]
modaRE_HL=[]
desviacionRE_HL=[]
curtosisRE_HL=[]
nergianoglcmRE_HL=[]
#NO
energiaNO_HL=[]
homogeneidadNO_HL=[]
correlacionNO_HL=[]
disimiNO_HL=[]
ASMNO_HL=[]
entropiaNO_HL=[]
contrasteNO_HL=[]
mediaglcmNO_HL=[]
entropianoglcmNO_HL=[]
mediaNO_HL=[]
modaNO_HL=[]
desviacionNO_HL=[]
curtosisNO_HL=[]
nergianoglcmNO_HL=[]
##GLCMsinfourierLL
energiaDM_SF_HL=[]
homogeneidadDM_SF_HL=[]
correlacionDM_SF_HL=[]
disimiDM_SF_HL=[]
ASMDM_SF_HL=[]
entropiaDM_SF_HL=[]
contrasteDM_SF_HL=[]
mediaglcmDM_SF_HL=[]
entropianoglcmDM_SF_HL=[]
mediaDM_SF_HL=[]
modaDM_SF_HL=[]
desviacionDM_SF_HL=[]
curtosisDM_SF_HL=[]
nergianoglcmDM_SF_HL=[]
#RE
energiaRE_SF_HL=[]
homogeneidadRE_SF_HL=[]
correlacionRE_SF_HL=[]
disimiRE_SF_HL=[]
ASMRE_SF_HL=[]
entropiaRE_SF_HL=[]
contrasteRE_SF_HL=[]
mediaglcmRE_SF_HL=[]
entropianoglcmRE_SF_HL=[]
mediaRE_SF_HL=[]
modaRE_SF_HL=[]
desviacionRE_SF_HL=[]
curtosisRE_SF_HL=[]
nergianoglcmRE_SF_HL=[]
#NO
energiaNO_SF_HL=[]
homogeneidadNO_SF_HL=[]
correlacionNO_SF_HL=[]
disimiNO_SF_HL=[]
ASMNO_SF_HL=[]
entropiaNO_SF_HL=[]
contrasteNO_SF_HL=[]
mediaglcmNO_SF_HL=[]
entropianoglcmNO_SF_HL=[]
mediaNO_SF_HL=[]
modaNO_SF_HL=[]
desviacionNO_SF_HL=[]
curtosisNO_SF_HL=[]
nergianoglcmNO_SF_HL=[]
## HLrojoo
energiaDM_HLROJO=[]
homogeneidadDM_HLROJO=[]
correlacionDM_HLROJO=[]
disimiDM_HLROJO=[]
ASMDM_HLROJO=[]
entropiaDM_HLROJO=[]
contrasteDM_HLROJO=[]
mediaglcmDM_HLROJO=[]
entropianoglcmDM_HLROJO=[]
mediaDM_HLROJO=[]
modaDM_HLROJO=[]
desviacionDM_HLROJO=[]
curtosisDM_HLROJO=[]
nergianoglcmDM_HLROJO=[]
#RE
energiaRE_HLROJO=[]
homogeneidadRE_HLROJO=[]
correlacionRE_HLROJO=[]
disimiRE_HLROJO=[]
ASMRE_HLROJO=[]
entropiaRE_HLROJO=[]
contrasteRE_HLROJO=[]
mediaglcmRE_HLROJO=[]
entropianoglcmRE_HLROJO=[]
mediaRE_HLROJO=[]
modaRE_HLROJO=[]
desviacionRE_HLROJO=[]
curtosisRE_HLROJO=[]
nergianoglcmRE_HLROJO=[]
#NO
energiaNO_HLROJO=[]
homogeneidadNO_HLROJO=[]
correlacionNO_HLROJO=[]
disimiNO_HLROJO=[]
ASMNO_HLROJO=[]
entropiaNO_HLROJO=[]
contrasteNO_HLROJO=[]
mediaglcmNO_HLROJO=[]
entropianoglcmNO_HLROJO=[]
mediaNO_HLROJO=[]
modaNO_HLROJO=[]
desviacionNO_HLROJO=[]
curtosisNO_HLROJO=[]
nergianoglcmNO_HLROJO=[]
##GLCMsinfourierLL
energiaDM_SF_HLROJO=[]
homogeneidadDM_SF_HLROJO=[]
correlacionDM_SF_HLROJO=[]
disimiDM_SF_HLROJO=[]
ASMDM_SF_HLROJO=[]
entropiaDM_SF_HLROJO=[]
contrasteDM_SF_HLROJO=[]
mediaglcmDM_SF_HLROJO=[]
entropianoglcmDM_SF_HLROJO=[]
mediaDM_SF_HLROJO=[]
modaDM_SF_HLROJO=[]
desviacionDM_SF_HLROJO=[]
curtosisDM_SF_HLROJO=[]
nergianoglcmDM_SF_HLROJO=[]
#RE
energiaRE_SF_HLROJO=[]
homogeneidadRE_SF_HLROJO=[]
correlacionRE_SF_HLROJO=[]
disimiRE_SF_HLROJO=[]
ASMRE_SF_HLROJO=[]
entropiaRE_SF_HLROJO=[]
contrasteRE_SF_HLROJO=[]
mediaglcmRE_SF_HLROJO=[]
entropianoglcmRE_SF_HLROJO=[]
mediaRE_SF_HLROJO=[]
modaRE_SF_HLROJO=[]
desviacionRE_SF_HLROJO=[]
curtosisRE_SF_HLROJO=[]
nergianoglcmRE_SF_HLROJO=[]
#NO
energiaNO_SF_HLROJO=[]
homogeneidadNO_SF_HLROJO=[]
correlacionNO_SF_HLROJO=[]
disimiNO_SF_HLROJO=[]
ASMNO_SF_HLROJO=[]
entropiaNO_SF_HLROJO=[]
contrasteNO_SF_HLROJO=[]
mediaglcmNO_SF_HLROJO=[]
entropianoglcmNO_SF_HLROJO=[]
mediaNO_SF_HLROJO=[]
modaNO_SF_HLROJO=[]
desviacionNO_SF_HLROJO=[]
curtosisNO_SF_HLROJO=[]
nergianoglcmNO_SF_HLROJO=[]
for image in glob.glob('*.jpg'):
# image = '00002.jpg'
im = cv2.imread(image)
im=cv2.normalize(im, None, 0, 255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC3)
aa,bb,c = im.shape
imaROI=ROI(im)
imaROI=cv2.normalize(imaROI, None, 0, 1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC3)
#cv2.imshow('Grays',imaROI)
#cv2.destroyAllWindows()
HSV=cv2.cvtColor(im,cv2.COLOR_RGB2HSV)
H,S,V=cv2.split(HSV)
V=V*imaROI
for z in range(c):
im[:,:,z]=im[:,:,z]*imaROI
_,contours,_= cv2.findContours(imaROI,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt=contours[max_index]
x3,y3,w3,h3 = cv2.boundingRect(cnt)
#cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
#"""
# cv2.imshow("Show",im[y:y+h,x:x+w])
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# imf=im.copy()
# cv2.rectangle(imf,(x,y),(x+w,y+h),(0,255,0),2)
# cv2.imshow("Show",imf)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
#"""
#plt.imshow(im)
#plt.show()
#imagenROI=im*imaROI
filetxt=image[0:len(image)-3]+'txt'
bboxfile=filetxt
boxes = read_boxes(bboxfile)
boxes_abs = yolo2voc(boxes, im.shape)
re=0
dm=0
imunda=0
imSinBBOX=im.copy()
for b in boxes_abs:
cls, x1, y1, x2, y2 = b
if cls == 3:
print('DM')
dm=dm+1
#print(image,dm)
a,b= V[int(y1):int(y2),int(x1):int(x2)].shape
tamañoA,tamañoB=tama(a,b)
V1= V[int(y1):int(y2),int(x1):int(x2)]
vecesA = int(a/tamañoA)
vecesB = int(b/tamañoB)
for f in range(0,a-tamañoA,tamañoA):
for c in range(0,b-tamañoB,tamañoB):
#print(f,c)
cropped = V1[f:f+tamañoA,c:c+tamañoB]
croppedrgb = im[f:f+tamañoA,c:c+tamañoB]
#test2[f:f+tamañoA,c:c+tamañoB]=test[f:f+tamañoA,c:c+tamañoB]
if c==tamañoB*vecesB-tamañoB:
cropped = V1[f:f+tamañoA,c:]
croppedrgb = im[f:f+tamañoA,c:]
#test2[f:f+tamañoA,c:]=test[f:f+tamañoA,c:]
if f==tamañoA*vecesA-tamañoA:
#print('ola')
if c==tamañoB*vecesB-tamañoB:
cropped = V1[f:,c:]
croppedrgb = im[f:,c:]
#test2[f:,c:]=test[f:,c:]
else:
cropped = V1[f:,c:c+tamañoB]
croppedrgb = im[f:,c:c+tamañoB]
#test2[f:,c:c+tamañoB]=test[f:,c:c+tamañoB]
#print('dani')
#cropFou=cropped
cropped_1=cropped.copy()
croppedrgb_1=croppedrgb.copy()
""" #Con Fourier No rojo"""
coeffs2 = pywt.dwt2(cropped, 'bior1.3')
LL, (LH, HL, HH) = coeffs2
LH=LH.astype(np.uint8)
cropFou=Fourier(LH)
ch= cropFou.shape
if len(ch)>2:
cropFou=cropFou[:,:,0]
g,contraste,energia,homogeneidad, correlacion, disimi, ASM,entropia=GLCM(cropFou)
contrasteDM_LH.append(contraste)
energiaDM_LH.append(energia)
homogeneidadDM_LH.append(homogeneidad)
correlacionDM_LH.append(correlacion)
disimiDM_LH.append(disimi)
ASMDM_LH.append(ASM)
entropiaDM_LH.append(entropia)
mediaglcmDM_LH.append(np.mean(g))
entropianoglcmDM_LH.append(skimage.measure.shannon_entropy(cropFou))
mediaDM_LH.append(np.mean(cropFou))
#modaDM_LH.append(np.mean(stats.mode(cropFou)))
desviacionDM_LH.append(np.var(cropFou))
curtosisDM_LH.append(kurtosis(cropFou))
nergianoglcmDM_LH.append(np.median(cropFou))
""" #Sin Fourier No rojo"""
coeffs2_1 = pywt.dwt2(cropped_1, 'bior1.3')
LL_1, (LH_1, HL_1, HH_1) = coeffs2_1
LH_1=LH_1.astype(np.uint8)
cropFou_1=LH_1
ch= cropFou_1.shape
if len(ch)>2:
cropFou_1=cropFou_1[:,:,0]
g_1,contraste_1,energia_1,homogeneidad_1, correlacion_1, disimi_1, ASM_1,entropia_1=GLCM(cropFou_1)
contrasteDM_SF_LH.append(contraste_1)
energiaDM_SF_LH.append(energia_1)
homogeneidadDM_SF_LH.append(homogeneidad_1)
correlacionDM_SF_LH.append(correlacion_1)
disimiDM_SF_LH.append(disimi_1)
ASMDM_SF_LH.append(ASM_1)
entropiaDM_SF_LH.append(entropia_1)
mediaglcmDM_SF_LH.append(np.mean(g_1))
entropianoglcmDM_SF_LH.append(skimage.measure.shannon_entropy(cropFou_1))
mediaDM_SF_LH.append(np.mean(cropFou_1))
#modaDM_SF_LH.append(np.mean(stats.mode(cropFou_1)))
desviacionDM_SF_LH.append(np.var(cropFou_1))
curtosisDM_SF_LH.append(kurtosis(cropFou_1))
nergianoglcmDM_SF_LH.append(np.median(cropFou_1))
###ROJO
""" #Con Fourier rojo"""
coeffs2rgb = pywt.dwt2(croppedrgb, 'bior1.3')
LLrgb, (LHrgb, HLrgb, HHrgb) = coeffs2rgb
HLrgb=HLrgb.astype(np.uint8)
cropFourgb=Fourier(HLrgb)
ch= cropFourgb.shape
if len(ch)>2:
cropFourgb=cropFourgb[:,:,0]
grgb,contrastergb,energiargb,homogeneidadrgb, correlacionrgb, disimirgb, ASMrgb,entropiargb=GLCM(cropFourgb)
contrasteDM_HLROJO.append(contrastergb)
energiaDM_HLROJO.append(energiargb)
homogeneidadDM_HLROJO.append(homogeneidadrgb)
correlacionDM_HLROJO.append(correlacionrgb)
disimiDM_HLROJO.append(disimirgb)
ASMDM_HLROJO.append(ASMrgb)
entropiaDM_HLROJO.append(entropiargb)
mediaglcmDM_HLROJO.append(np.mean(grgb))
entropianoglcmDM_HLROJO.append(skimage.measure.shannon_entropy(cropFourgb))
mediaDM_HLROJO.append(np.mean(cropFourgb))
#modaDM_HLROJO.append(np.mean(stats.mode(cropFourgb)))
desviacionDM_HLROJO.append(np.var(cropFourgb))
curtosisDM_HLROJO.append(kurtosis(cropFourgb))
nergianoglcmDM_HLROJO.append(np.median(cropFourgb))
""" #Sin Fourier rojo"""
coeffs2rgb_1 = pywt.dwt2(croppedrgb_1, 'bior1.3')
LLrgb_1, (LHrgb_1, HLrgb_1, HHrgb_1) = coeffs2rgb_1
HLrgb_1=HLrgb_1.astype(np.uint8)
cropFourgb_1=HLrgb_1
ch=cropFourgb_1.shape
if len(ch)>2:
cropFourgb_1=cropFourgb_1[:,:,0]
grgb_1,contrastergb_1,energiargb_1,homogeneidadrgb_1, correlacionrgb_1, disimirgb_1, ASMrgb_1,entropiargb_1=GLCM(cropFourgb_1)
contrasteDM_SF_HLROJO.append(contrastergb_1)
energiaDM_SF_HLROJO.append(energiargb_1)
homogeneidadDM_SF_HLROJO.append(homogeneidadrgb_1)
correlacionDM_SF_HLROJO.append(correlacionrgb_1)
disimiDM_SF_HLROJO.append(disimirgb_1)
ASMDM_SF_HLROJO.append(ASMrgb_1)
entropiaDM_SF_HLROJO.append(entropiargb_1)
mediaglcmDM_SF_HLROJO.append(np.mean(grgb_1))
entropianoglcmDM_SF_HLROJO.append(skimage.measure.shannon_entropy(cropFourgb_1))
mediaDM_SF_HLROJO.append(np.mean(cropFourgb_1))
#modaDM_SF_HLROJO.append(np.mean(stats.mode(cropFourgb_1)))
desviacionDM_SF_HLROJO.append(np.var(cropFourgb_1))
curtosisDM_SF_HLROJO.append(kurtosis(cropFourgb_1))
nergianoglcmDM_SF_HLROJO.append(np.median(cropFourgb_1))
if cls==0:
re=re+1
print(re)
if cls==2:
imunda=imunda+1
# imSinBBOX[int(y1):int(y2),int(x1):int(x2)]=0
# print('cls', cls)
# if cls!=0 and cls!=1 and cls!=2 and cls!=3 and cls!=4 and cls!=5 and cls!=6:
# plt.imshow(im)
# plt.show()
# re=re+1
if re > 0 and dm==0 and imunda==0:
inta=V[y3:y3+h3,x3:x3+w3]
aa,bb=inta.shape
tamañoA,tamañoB=tama(aa,bb)
vecesA = int(aa/tamañoA)
vecesB = int(bb/tamañoB)
for f in range(0,aa-tamañoA,tamañoA):
for c in range(0,bb-tamañoB,tamañoB):
cropped2 = inta[f:f+tamañoA,c:c+tamañoB]
croppedrgb2 = im[f:f+tamañoA,c:c+tamañoB]
if c==tamañoB*vecesB-tamañoB:
cropped2 = inta[f:f+tamañoA,c:]
croppedrgb2 = im[f:f+tamañoA,c:]
if f==tamañoA*vecesA-tamañoA:
if c==tamañoB*vecesB-tamañoB:
cropped2 = inta[f:,c:]
croppedrgb2 = im[f:,c:]
else:
cropped2 = inta[f:,c:c+tamañoB]
croppedrgb2 = im[f:,c:c+tamañoB]
cropped2_1=cropped2.copy()
croppedrgb2_1=croppedrgb2.copy()
""" #Con Fourier No rojo"""
coeffs22 = pywt.dwt2(cropped2, 'bior1.3')
LL2, (LH2, HL2, HH2) = coeffs22
LH2=LH2.astype(np.uint8)
cropFou2=Fourier(LH2)
ch= cropFou2.shape
if len(ch)>2:
cropFou2=cropFou2[:,:,0]
g2,contraste2,energia2,homogeneidad2, correlacion2, disimi2, ASM2,entropia2=GLCM(cropFou2)
contrasteRE_LH.append(contraste2)
energiaRE_LH.append(energia2)
homogeneidadRE_LH.append(homogeneidad2)
correlacionRE_LH.append(correlacion2)
disimiRE_LH.append(disimi2)
ASMRE_LH.append(ASM2)
entropiaRE_LH.append(entropia2)
mediaglcmRE_LH.append(np.mean(g2))
entropianoglcmRE_LH.append(skimage.measure.shannon_entropy(cropFou2))
mediaRE_LH.append(np.mean(cropFou2))
#modaRE_LH.append(np.mean(stats.mode(list(cropFou2))))
desviacionRE_LH.append(np.var(cropFou2))
curtosisRE_LH.append(kurtosis(cropFou2))
nergianoglcmRE_LH.append(np.median(cropFou2))
""" #Sin Fourier No rojo"""
coeffs22_1 = pywt.dwt2(cropped2_1, 'bior1.3')
LL2_1, (LH2_1, HL2_1, HH2_1) = coeffs22_1
LH2_1=LH2_1.astype(np.uint8)
cropFou2_1=LH2_1
ch= cropFou2_1.shape
if len(ch)>2:
cropFou2_1=cropFou2_1[:,:,0]
g2_1,contraste2_1,energia2_1,homogeneidad2_1, correlacion2_1, disimi2_1, ASM2_1,entropia2_1=GLCM(cropFou2_1)
contrasteRE_SF_LH.append(contraste2_1)
energiaRE_SF_LH.append(energia2_1)
homogeneidadRE_SF_LH.append(homogeneidad2_1)
correlacionRE_SF_LH.append(correlacion2_1)
disimiRE_SF_LH.append(disimi2_1)
ASMRE_SF_LH.append(ASM2_1)
entropiaRE_SF_LH.append(entropia2_1)
mediaglcmRE_SF_LH.append(np.mean(g2_1))
entropianoglcmRE_SF_LH.append(skimage.measure.shannon_entropy(cropFou2_1))
mediaRE_SF_LH.append(np.mean(cropFou2_1))
#modaRE_SF_LH.append(np.mean(stats.mode(cropFou2_1)))
desviacionRE_SF_LH.append(np.var(cropFou2_1))
curtosisRE_SF_LH.append(kurtosis(cropFou2_1))
nergianoglcmRE_SF_LH.append(np.median(cropFou2_1))
###ROJO
""" #Con Fourier rojo"""
coeffs22rgb = pywt.dwt2(croppedrgb2, 'bior1.3')
LLrgb2, (LHrgb2, HLrgb2, HHrgb2) = coeffs22rgb
HLrgb2=HLrgb2.astype(np.uint8)
cropFourgb2=Fourier(HLrgb2)
ch= cropFourgb2.shape
if len(ch)>2:
cropFourgb2=cropFourgb2[:,:,0]
grgb2,contrastergb2,energiargb2,homogeneidadrgb2, correlacionrgb2, disimirgb2, ASMrgb2,entropiargb2=GLCM(cropFourgb2)
# plt.imshow(cropFourgb2[:,:,0])
# plt.show()
contrasteRE_HLROJO.append(contrastergb2)
energiaRE_HLROJO.append(energiargb2)
homogeneidadRE_HLROJO.append(homogeneidadrgb2)
correlacionRE_HLROJO.append(correlacionrgb2)
disimiRE_HLROJO.append(disimirgb2)
ASMRE_HLROJO.append(ASMrgb2)
entropiaRE_HLROJO.append(entropiargb2)
mediaglcmRE_HLROJO.append(np.mean(grgb2))
entropianoglcmRE_HLROJO.append(skimage.measure.shannon_entropy(cropFourgb2))
mediaRE_HLROJO.append(np.mean(cropFourgb2))
#modaRE_HLROJO.append(np.mean(stats.mode(cropFourgb2)))
desviacionRE_HLROJO.append(np.var(cropFourgb2))
curtosisRE_HLROJO.append(kurtosis(cropFourgb2))
nergianoglcmRE_HLROJO.append(np.median(cropFourgb2))
""" #Sin Fourier rojo"""
coeffs2rgb2_1 = pywt.dwt2(croppedrgb2_1, 'bior1.3')
LLrgb2_1, (LHrgb2_1, HLrgb2_1, HHrgb2_1) = coeffs2rgb2_1
HLrgb2_1=HLrgb2_1.astype(np.uint8)
cropFourgb2_1=HLrgb2_1
ch= cropFourgb2_1.shape
if len(ch)>2:
cropFourgb2_1=cropFourgb2_1[:,:,0]
grgb2_1,contrastergb2_1,energiargb2_1,homogeneidadrgb2_1, correlacionrgb2_1, disimirgb2_1, ASMrgb2_1,entropiargb2_1=GLCM(cropFourgb2_1)
contrasteRE_SF_HLROJO.append(contrastergb2_1)
energiaRE_SF_HLROJO.append(energiargb2_1)
homogeneidadRE_SF_HLROJO.append(homogeneidadrgb2_1)
correlacionRE_SF_HLROJO.append(correlacionrgb2_1)
disimiRE_SF_HLROJO.append(disimirgb2_1)
ASMRE_SF_HLROJO.append(ASMrgb2_1)
entropiaRE_SF_HLROJO.append(entropiargb2_1)
mediaglcmRE_SF_HLROJO.append(np.mean(grgb2_1))
entropianoglcmRE_SF_HLROJO.append(skimage.measure.shannon_entropy(cropFourgb2_1))
mediaRE_SF_HLROJO.append(np.mean(cropFourgb2_1))
#modaRE_SF_HLROJO.append(np.mean(stats.mode(cropFourgb2_1)))
desviacionRE_SF_HLROJO.append(np.var(cropFourgb2_1))
curtosisRE_SF_HLROJO.append(kurtosis(cropFourgb2_1))
nergianoglcmRE_SF_HLROJO.append(np.median(cropFourgb2_1))
if re==0 and dm==0 and imunda==0:
inta3=V[y3:y3+h3,x3:x3+w3]
aaa,bbb=inta3.shape
tamañoA,tamañoB=tama(aaa,bbb)
vecesA = int(aaa/tamañoA)
vecesB = int(bbb/tamañoB)
for f in range(0,aaa-tamañoA,tamañoA):
for c in range(0,bbb-tamañoB,tamañoB):
cropped3 = inta3[f:f+tamañoA,c:c+tamañoB]
croppedrgb3 = im[f:f+tamañoA,c:c+tamañoB]
if c==tamañoB*vecesB-tamañoB:
cropped3 = inta3[f:f+tamañoA,c:]
croppedrgb3 = im[f:f+tamañoA,c:]
if f==tamañoA*vecesA-tamañoA:
if c==tamañoB*vecesB-tamañoB:
cropped3 = inta3[f:,c:]
croppedrgb3 = im[f:,c:]
else:
cropped3 = inta3[f:,c:c+tamañoB]
croppedrgb3 = im[f:,c:c+tamañoB]
cropped3_1=cropped3.copy()
croppedrgb3_1=croppedrgb3.copy()
""" #Con Fourier No rojo"""
coeffs23 = pywt.dwt2(cropped3, 'bior1.3')
LL3, (LH3, HL3, HH3) = coeffs23
LH3=LH3.astype(np.uint8)
cropFou3=Fourier(LH3)
ch= cropFou3.shape
if len(ch)>2:
cropFou3=cropFou3[:,:,0]
g3,contraste3,energia3,homogeneidad3, correlacion3, disimi3, ASM3,entropia3=GLCM(cropFou3)
contrasteNO_LH.append(contraste3)
energiaNO_LH.append(energia3)
homogeneidadNO_LH.append(homogeneidad3)
correlacionNO_LH.append(correlacion3)
disimiNO_LH.append(disimi3)
ASMNO_LH.append(ASM3)
entropiaNO_LH.append(entropia3)
mediaglcmNO_LH.append(np.mean(g3))
entropianoglcmNO_LH.append(skimage.measure.shannon_entropy(cropFou3))
mediaNO_LH.append(np.mean(cropFou3))
#modaNO_LH.append(np.mean(stats.mode(cropFou3)))
desviacionNO_LH.append(np.var(cropFou3))
curtosisNO_LH.append(kurtosis(cropFou3))
nergianoglcmNO_LH.append(np.median(cropFou3))
""" #Sin Fourier No rojo"""
coeffs23_1 = pywt.dwt2(cropped3_1, 'bior1.3')
LL2_1, (LH3_1, HL2_1, HH2_1) = coeffs23_1
LH3_1=LH3_1.astype(np.uint8)
cropFou3_1=LH3_1
ch= cropFou3_1.shape
if len(ch)>2:
cropFou3_1=cropFou3_1[:,:,0]
g3_1,contraste3_1,energia3_1,homogeneidad3_1, correlacion3_1, disimi3_1, ASM3_1,entropia3_1=GLCM(cropFou3_1)
contrasteNO_SF_LH.append(contraste3_1)
energiaNO_SF_LH.append(energia3_1)
homogeneidadNO_SF_LH.append(homogeneidad3_1)
correlacionNO_SF_LH.append(correlacion3_1)
disimiNO_SF_LH.append(disimi3_1)
ASMNO_SF_LH.append(ASM3_1)
entropiaNO_SF_LH.append(entropia3_1)
mediaglcmNO_SF_LH.append(np.mean(g3_1))
entropianoglcmNO_SF_LH.append(skimage.measure.shannon_entropy(cropFou3_1))
mediaNO_SF_LH.append(np.mean(cropFou3_1))
#modaNO_SF_LH.append(np.mean(stats.mode(cropFou3_1)))
desviacionNO_SF_LH.append(np.var(cropFou3_1))
curtosisNO_SF_LH.append(kurtosis(cropFou3_1))
nergianoglcmNO_SF_LH.append(np.median(cropFou3_1))
###ROJO
""" #Con Fourier rojo"""
coeffs23rgb = pywt.dwt2(croppedrgb3, 'bior1.3')
LLrgb2, (LHrgb2, HLrgb3, HHrgb2) = coeffs23rgb
HLrgb3=HLrgb3.astype(np.uint8)
cropFourgb3=Fourier(HLrgb3)
ch= cropFourgb3.shape
if len(ch)>2:
cropFourgb3=cropFourgb3[:,:,0]
grgb3,contrastergb3,energiargb3,homogeneidadrgb3, correlacionrgb3, disimirgb3, ASMrgb3,entropiargb3=GLCM(cropFourgb3)
contrasteNO_HLROJO.append(contrastergb3)
energiaNO_HLROJO.append(energiargb3)
homogeneidadNO_HLROJO.append(homogeneidadrgb3)
correlacionNO_HLROJO.append(correlacionrgb3)
disimiNO_HLROJO.append(disimirgb3)
ASMNO_HLROJO.append(ASMrgb3)
entropiaNO_HLROJO.append(entropiargb3)
mediaglcmNO_HLROJO.append(np.mean(grgb3))
entropianoglcmNO_HLROJO.append(skimage.measure.shannon_entropy(cropFourgb3))
mediaNO_HLROJO.append(np.mean(cropFourgb3))
#modaNO_HLROJO.append(np.mean(stats.mode(cropFourgb3)))
desviacionNO_HLROJO.append(np.var(cropFourgb3))
curtosisNO_HLROJO.append(kurtosis(cropFourgb2))
nergianoglcmNO_HLROJO.append(np.median(cropFourgb3))
""" #Sin Fourier rojo"""
coeffs2rgb3_1 = pywt.dwt2(croppedrgb3_1, 'bior1.3')
LLrgb2_1, (LHrgb2_1, HLrgb3_1, HHrgb2_1) = coeffs2rgb2_1
HLrgb3_1=HLrgb3_1.astype(np.uint8)
cropFourgb3_1=HLrgb3_1
ch= cropFourgb3_1.shape
if len(ch)>2:
cropFourgb3_1=cropFourgb3_1[:,:,0]
grgb3_1,contrastergb3_1,energiargb3_1,homogeneidadrgb3_1, correlacionrgb3_1, disimirgb3_1, ASMrgb3_1,entropiargb3_1=GLCM(cropFourgb3_1)
contrasteNO_SF_HLROJO.append(contrastergb3_1)
energiaNO_SF_HLROJO.append(energiargb3_1)
homogeneidadNO_SF_HLROJO.append(homogeneidadrgb3_1)
correlacionNO_SF_HLROJO.append(correlacionrgb3_1)
disimiNO_SF_HLROJO.append(disimirgb3_1)
ASMNO_SF_HLROJO.append(ASMrgb3_1)
entropiaNO_SF_HLROJO.append(entropiargb3_1)
mediaglcmNO_SF_HLROJO.append(np.mean(grgb3_1))
entropianoglcmNO_SF_HLROJO.append(skimage.measure.shannon_entropy(cropFourgb3_1))
mediaNO_SF_HLROJO.append(np.mean(cropFourgb3_1))
#modaNO_SF_HLROJO.append(np.mean(stats.mode(cropFourgb3_1)))
desviacionNO_SF_HLROJO.append(np.var(cropFourgb3_1))
curtosisNO_SF_HLROJO.append(kurtosis(cropFourgb3_1))
nergianoglcmNO_SF_HLROJO.append(np.median(cropFourgb3_1))
import pandas as pd
datos = {'EnergiaDM_LH':energiaDM_LH,
'HomogeneidadDM_LH':homogeneidadDM_LH,
'CorrelaciónDM_LH':correlacionDM_LH,
'DisimilitudDM_LH':disimiDM_LH,
'ASM_DM_LH':ASMDM_LH,
'EntropíaDM_LH':entropiaDM_LH,
'ContrasteDM_LH':contrasteDM_LH,
'Media(glcm)DM_LH':mediaglcmDM_LH,
'Entropia(no glcm)DM_LH':entropianoglcmDM_LH,
'MediaDM_LH':mediaDM_LH,
'ModaDM_LH':modaDM_LH,
'DesviacionDM_LH':desviacionDM_LH,
'CurtosisDM_LH':curtosisDM_LH,
'Energia(no glcm)DM_LH':nergianoglcmDM_LH,
##RE
'EnergiaRE_LH':energiaRE_LH,
'HomogeneidadRE_LH':homogeneidadRE_LH,
'CorrelaciónRE_LH':correlacionRE_LH,
'DisimilitudRE_LH':disimiRE_LH,
'ASM_RE_LH':ASMRE_LH,
'EntropíaRE_LH':entropiaRE_LH,
'ContrasteRE_LH':contrasteRE_LH,
'Media(glcm)RE_LH':mediaglcmRE_LH,
'Entropia(no glcm)RE_LH':entropianoglcmRE_LH,
'MediaRE_LH':mediaRE_LH,
'ModaRE_LH':modaRE_LH,
'DesviacionRE_LH':desviacionRE_LH,
'CurtosisRE_LH':curtosisRE_LH,
'Energia(no glcm)RE_LH':nergianoglcmRE_LH,
##NO
'EnergiaNO_LH':energiaNO_LH,
'HomogeneidadNO_LH':homogeneidadNO_LH,
'CorrelaciónNO_LH':correlacionNO_LH,
'DisimilitudNO_LH':disimiNO_LH,
'ASM_NO_LH':ASMNO_LH,
'EntropíaNO_LH':entropiaNO_LH,
'ContrasteNO_LH':contrasteNO_LH,
'Media(glcm)NO_LH':mediaglcmNO_LH,
'Entropia(no glcm)NO_LH':entropianoglcmNO_LH,
'MediaNO_LH':mediaNO_LH,
'ModaNO_LH':modaNO_LH,
'DesviacionNO_LH':desviacionNO_LH,
'CurtosisNO_LH':curtosisNO_LH,
'Energia(no glcm)NO_LH':nergianoglcmNO_LH,
##GLCMsinfourierLL
'EnergiaDM(sinF)_LH':energiaDM_SF_LH,
'HomogeneidadDM(sinF)_LH':homogeneidadDM_SF_LH,
'CorrelaciónDM(sinF)_LH':correlacionDM_SF_LH,
'DisimilitudDM(sinF)_LH':disimiDM_SF_LH,
'ASMDM(sinF)_LH':ASMDM_SF_LH,
'EntropíaDM(sinF)_LH':entropiaDM_SF_LH,
'ContrasteDM(sinF)_LH':contrasteDM_SF_LH,
'Media(glcm)DM(sinF)_LH':mediaglcmDM_SF_LH,
'Entropia(no glcm)DM(sinF)_LH':entropianoglcmDM_SF_LH,
'MediaDM(sinF)_LH':mediaDM_SF_LH,
'ModaDM(sinF)_LH':modaDM_SF_LH,
'DesviacionDM(sinF)_LH':desviacionDM_SF_LH,
'CurtosisDM(sinF)_LH':curtosisDM_SF_LH,
'Energia(no glcm)DM(sinF)_LH':nergianoglcmDM_SF_LH,
#RE
'EnergiaRE(sinF)_LH':energiaRE_SF_LH,
'HomogeneidadRE(sinF)_LH':homogeneidadRE_SF_LH,
'CorrelaciónRE(sinF)_LH':correlacionRE_SF_LH,
'DisimilitudRE(sinF)_LH':disimiRE_SF_LH,
'ASMRE(sinF)_LH':ASMRE_SF_LH,
'EntropíaRE(sinF)_LH':entropiaRE_SF_LH,
'ContrasteRE(sinF)_LH':contrasteRE_SF_LH,
'Media(glcm)RE(sinF)_LH':mediaglcmRE_SF_LH,
'Entropia(no glcm)RE(sinF)_LH':entropianoglcmRE_SF_LH,
'MediaRE(sinF)_LH':mediaRE_SF_LH,
'ModaRE(sinF)_LH':modaRE_SF_LH,
'DesviacionRE(sinF)_LH':desviacionRE_SF_LH,
'CurtosisRE(sinF)_LH':curtosisRE_SF_LH,
'Energia(no glcm)RE(sinF)_LH':nergianoglcmRE_SF_LH,
#NO
'EnergiaNO(sinF)_LH':energiaNO_SF_LH,
'HomogeneidadNO(sinF)_LH':homogeneidadNO_SF_LH,
'CorrelaciónNO(sinF)_LH':correlacionNO_SF_LH,
'DisimilitudNO(sinF)_LH':disimiNO_SF_LH,
'ASMNO(sinF)_LH':ASMNO_SF_LH,
'EntropíaNO(sinF)_LH':entropiaNO_SF_LH,
'ContrasteNO(sinF)_LH':contrasteNO_SF_LH,
'Media(glcm)NO(sinF)_LH':mediaglcmNO_SF_LH,
'Entropia(no glcm)NO(sinF)_LH':entropianoglcmNO_SF_LH,
'MediaNO(sinF)_LH':mediaNO_SF_LH,
'ModaNO(sinF)_LH':modaNO_SF_LH,
'DesviacionNO(sinF)_LH':desviacionNO_SF_LH,
'CurtosisNO(sinF)_LH':curtosisNO_SF_LH,
'Energia(no glcm)NO(sinF)_LH':nergianoglcmNO_SF_LH,
##hl
'EnergiaDM_HL':energiaDM_HL,
'HomogeneidadDM_HL':homogeneidadDM_HL,
'CorrelaciónDM_HL':correlacionDM_HL,
'DisimilitudDM_HL':disimiDM_HL,
'ASM_DM_HL':ASMDM_HL,
'EntropíaDM_HL':entropiaDM_HL,
'ContrasteDM_HL':contrasteDM_HL,
'Media(glcm)DM_HL':mediaglcmDM_HL,
'Entropia(no glcm)DM_HL':entropianoglcmDM_HL,
'MediaDM_HL':mediaDM_HL,
'ModaDM_HL':modaDM_HL,
'DesviacionDM_HL':desviacionDM_HL,
'CurtosisDM_HL':curtosisDM_HL,
'Energia(no glcm)DM_HL':nergianoglcmDM_HL,
#RE
'EnergiaRE_HL':energiaRE_HL,
'HomogeneidadRE_HL':homogeneidadRE_HL,
'CorrelaciónRE_HL':correlacionRE_HL,
'DisimilitudRE_HL':disimiRE_HL,
'ASM_RE_HL':ASMRE_HL,
'EntropíaRE_HL':entropiaRE_HL,
'ContrasteRE_HL':contrasteRE_HL,
'Media(glcm)RE_HL':mediaglcmRE_HL,
'Entropia(no glcm)RE_HL':entropianoglcmRE_HL,
'MediaRE_HL':mediaRE_HL,
'ModaRE_HL':modaRE_HL,
'DesviacionRE_HL':desviacionRE_HL,
'CurtosisRE_HL':curtosisRE_HL,
'Energia(no glcm)RE_HL':nergianoglcmRE_HL,
#NO
'EnergiaNO_HL':energiaNO_HL,
'HomogeneidadNO_HL':homogeneidadNO_HL,
'CorrelaciónNO_HL':correlacionNO_HL,
'DisimilitudNO_HL':disimiNO_HL,
'ASM_NO_HL':ASMNO_HL,
'EntropíaNO_HL':entropiaNO_HL,
'ContrasteNO_HL':contrasteNO_HL,
'Media(glcm)NO_HL':mediaglcmNO_HL,
'Entropia(no glcm)NO_HL':entropianoglcmNO_HL,
'MediaNO_HL':mediaNO_HL,
'ModaNO_HL':modaNO_HL,
'DesviacionNO_HL':desviacionNO_HL,
'CurtosisNO_HL':curtosisNO_HL,
'Energia(no glcm)NO_HL':nergianoglcmNO_HL,
##GLCMsinfourierLL
'EnergiaDM(sinF)_HL':energiaDM_SF_HL,
'HomogeneidadDM(sinF)_HL':homogeneidadDM_SF_HL,
'CorrelaciónDM(sinF)_HL':correlacionDM_SF_HL,
'DisimilitudDM(sinF)_HL':disimiDM_SF_HL,
'ASMDM(sinF)_HL':ASMDM_SF_HL,
'EntropíaDM(sinF)_HL':entropiaDM_SF_HL,
'ContrasteDM(sinF)_HL':contrasteDM_SF_HL,
'Media(glcm)DM(sinF)_HL':mediaglcmDM_SF_HL,
'Entropia(no glcm)DM(sinF)_HL':entropianoglcmDM_SF_HL,
'MediaDM(sinF)_HL':mediaDM_SF_HL,
'ModaDM(sinF)_HL':modaDM_SF_HL,
'DesviacionDM(sinF)_HL':desviacionDM_SF_HL,
'CurtosisDM(sinF)_HL':curtosisDM_SF_HL,
'Energia(no glcm)DM(sinF)_HL':nergianoglcmDM_SF_HL,
#RE
'EnergiaRE(sinF)_HL':energiaRE_SF_HL,
'HomogeneidadRE(sinF)_HL':homogeneidadRE_SF_HL,
'CorrelaciónRE(sinF)_HL':correlacionRE_SF_HL,
'DisimilitudRE(sinF)_HL':disimiRE_SF_HL,
'ASMRE(sinF)_HL':ASMRE_SF_HL,
'EntropíaRE(sinF)_HL':entropiaRE_SF_HL,
'ContrasteRE(sinF)_HL':contrasteRE_SF_HL,
'Media(glcm)RE(sinF)_HL':mediaglcmRE_SF_HL,
'Entropia(no glcm)RE(sinF)_HL':entropianoglcmRE_SF_HL,
'MediaRE(sinF)_HL':mediaRE_SF_HL,
'ModaRE(sinF)_HL':modaRE_SF_HL,
'DesviacionRE(sinF)_HL':desviacionRE_SF_HL,
'CurtosisRE(sinF)_HL':curtosisRE_SF_HL,
'Energia(no glcm)RE(sinF)_HL':nergianoglcmRE_SF_HL,
#NO
'EnergiaNO(sinF)_HL':energiaNO_SF_HL,
'HomogeneidadNO(sinF)_HL':homogeneidadNO_SF_HL,
'CorrelaciónNO(sinF)_HL':correlacionNO_SF_HL,
'DisimilitudNO(sinF)_HL':disimiNO_SF_HL,
'ASMNO(sinF)_HL':ASMNO_SF_HL,
'EntropíaNO(sinF)_HL':entropiaNO_SF_HL,
'ContrasteNO(sinF)_HL':contrasteNO_SF_HL,
'Media(glcm)NO(sinF)_HL':mediaglcmNO_SF_HL,
'Entropia(no glcm)NO(sinF)_HL':entropianoglcmNO_SF_HL,
'MediaNO(sinF)_HL':mediaNO_SF_HL,
'ModaNO(sinF)_HL':modaNO_SF_HL,
'DesviacionNO(sinF)_HL':desviacionNO_SF_HL,
'CurtosisNO(sinF)_HL':curtosisNO_SF_HL,
'Energia(no glcm)NO(sinF)_HL':nergianoglcmNO_SF_HL,
## HLrojoo
'EnergiaDM_HLROJO':energiaDM_HLROJO,
'HomogeneidadDM_HLROJO':homogeneidadDM_HLROJO,
'CorrelaciónDM_HLROJO':correlacionDM_HLROJO,
'DisimilitudDM_HLROJO':disimiDM_HLROJO,
'ASM_DM_HLROJO':ASMDM_HLROJO,
'EntropíaDM_HLROJO':entropiaDM_HLROJO,
'ContrasteDM_HLROJO':contrasteDM_HLROJO,
'Media(glcm)DM_HLROJO':mediaglcmDM_HLROJO,
'Entropia(no glcm)DM_HLROJO':entropianoglcmDM_HLROJO,
'MediaDM_HLROJO':mediaDM_HLROJO,
'ModaDM_HLROJO':modaDM_HLROJO,
'DesviacionDM_HLROJO':desviacionDM_HLROJO,
'CurtosisDM_HLROJO':curtosisDM_HLROJO,
'Energia(no glcm)DM_HLROJO':nergianoglcmDM_HLROJO,
#RE
'EnergiaRE_HLROJO':energiaRE_HLROJO,
'HomogeneidadRE_HLROJO':homogeneidadRE_HLROJO,
'CorrelaciónRE_HLROJO':correlacionRE_HLROJO,
'DisimilitudRE_HLROJO':disimiRE_HLROJO,
'ASM_RE_HLROJO':ASMRE_HLROJO,
'EntropíaRE_HLROJO':entropiaRE_HLROJO,
'ContrasteRE_HLROJO':contrasteRE_HLROJO,
'Media(glcm)RE_HLROJO':mediaglcmRE_HLROJO,
'Entropia(no glcm)RE_HLROJO':entropianoglcmRE_HLROJO,
'MediaRE_HLROJO':mediaRE_HLROJO,
'ModaRE_HLROJO':modaRE_HLROJO,
'DesviacionRE_HLROJO':desviacionRE_HLROJO,
'CurtosisRE_HLROJO':curtosisRE_HLROJO,
'Energia(no glcm)RE_HLROJO':nergianoglcmRE_HLROJO,
#NO
'EnergiaNO_HLROJO':energiaNO_HLROJO,
'HomogeneidadNO_HLROJO':homogeneidadNO_HLROJO,
'CorrelaciónNO_HLROJO':correlacionNO_HLROJO,
'DisimilitudNO_HLROJO':disimiNO_HLROJO,
'ASM_NO_HLROJO':ASMNO_HLROJO,
'EntropíaNO_HLROJO':entropiaNO_HLROJO,
'ContrasteNO_HLROJO':contrasteNO_HLROJO,
'Media(glcm)NO_HLROJO':mediaglcmNO_HLROJO,
'Entropia(no glcm)NO_HLROJO':entropianoglcmNO_HLROJO,
'MediaNO_HLROJO':mediaNO_HLROJO,
'ModaNO_HLROJO':modaNO_HLROJO,
'DesviacionNO_HLROJO':desviacionNO_HLROJO,
'CurtosisNO_HLROJO':curtosisNO_HLROJO,
'Energia(no glcm)NO_HLROJO':nergianoglcmNO_HLROJO,
##GLCMsinfourierLL
'EnergiaDM(sinF)_HLROJO':energiaDM_SF_HLROJO,
'HomogeneidadDM(sinF)_HLROJO':homogeneidadDM_SF_HLROJO,
'CorrelaciónDM(sinF)_HLROJO':correlacionDM_SF_HLROJO,
'DisimilitudDM(sinF)_HLROJO':disimiDM_SF_HLROJO,
'ASMDM(sinF)_HLROJO':ASMDM_SF_HLROJO,
'EntropíaDM(sinF)_HLROJO':entropiaDM_SF_HLROJO,
'ContrasteDM(sinF)_HLROJO':contrasteDM_SF_HLROJO,
'Media(glcm)DM(sinF)_HLROJO':mediaglcmDM_SF_HLROJO,
'Entropia(no glcm)DM(sinF)_HLROJO':entropianoglcmDM_SF_HLROJO,
'MediaDM(sinF)_HLROJO':mediaDM_SF_HLROJO,
'ModaDM(sinF)_HLROJO':modaDM_SF_HLROJO,
'DesviacionDM(sinF)_HLROJO':desviacionDM_SF_HLROJO,
'CurtosisDM(sinF)_HLROJO':curtosisDM_SF_HLROJO,
'Energia(no glcm)DM(sinF)_HLROJO':nergianoglcmDM_SF_HLROJO,
#RE
'EnergiaRE(sinF)_HLROJO':energiaRE_SF_HLROJO,
'HomogeneidadRE(sinF)_HLROJO':homogeneidadRE_SF_HLROJO,
'CorrelaciónRE(sinF)_HLROJO':correlacionRE_SF_HLROJO,
'DisimilitudRE(sinF)_HLROJO':disimiRE_SF_HLROJO,
'ASMRE(sinF)_HLROJO':ASMRE_SF_HLROJO,
'EntropíaRE(sinF)_HLROJO':entropiaRE_SF_HLROJO,
'ContrasteRE(sinF)_HLROJO':contrasteRE_SF_HLROJO,
'Media(glcm)RE(sinF)_HLROJO':mediaglcmRE_SF_HLROJO,
'Entropia(no glcm)RE(sinF)_HLROJO':entropianoglcmRE_SF_HLROJO,
'MediaRE(sinF)_HLROJO':mediaRE_SF_HLROJO,
'ModaRE(sinF)_HLROJO':modaRE_SF_HLROJO,
'DesviacionRE(sinF)_HLROJO':desviacionRE_SF_HLROJO,
'CurtosisRE(sinF)_HLROJO':curtosisRE_SF_HLROJO,
'Energia(no glcm)RE(sinF)_HLROJO':nergianoglcmRE_SF_HLROJO,
#NO
'EnergiaNO(sinF)_HLROJO':energiaNO_SF_HLROJO,
'HomogeneidadNO(sinF)_HLROJO':homogeneidadNO_SF_HLROJO,
'CorrelaciónNO(sinF)_HLROJO':correlacionNO_SF_HLROJO,
'DisimilitudNO(sinF)_HLROJO':disimiNO_SF_HLROJO,
'ASMNO(sinF)_HLROJO':ASMNO_SF_HLROJO,
'EntropíaNO(sinF)_HLROJO':entropiaNO_SF_HLROJO,
'ContrasteNO(sinF)_HLROJO':contrasteNO_SF_HLROJO,
'Media(glcm)NO(sinF)_HLROJO':mediaglcmNO_SF_HLROJO,
'Entropia(no glcm)NO(sinF)_HLROJO':entropianoglcmNO_SF_HLROJO,
'MediaNO(sinF)_HLROJO':mediaNO_SF_HLROJO,
'ModaNO(sinF)_HLROJO':modaNO_SF_HLROJO,
'DesviacionNO(sinF)_HLROJO':desviacionNO_SF_HLROJO,
'CurtosisNO(sinF)_HLROJO':curtosisNO_SF_HLROJO,
'Energia(no glcm)NO(sinF)_HLROJO':nergianoglcmNO_SF_HLROJO,
}
datos = pd.DataFrame(datos)
datos.to_excel('DiferentesCaracteristicas.xlsx')
|
999,087 | de966c61ffea5d644b97c02570e7d0eaeb673751 | from auditlog.models import LogEntry
from rest_framework import viewsets
from restapi.models import FavoriteThing, Category, Metadata, Enum
from restapi.serializers import FavoriteThingSerializer, CategorySerializer, MetadataSerializer, EnumSerializer, LogEntrySerializer
class CategoryViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
"""
queryset = Category.objects.all()
serializer_class = CategorySerializer
class FavoriteThingViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
"""
queryset = FavoriteThing.objects.all()
serializer_class = FavoriteThingSerializer
class MetadataViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
"""
queryset = Metadata.objects.all()
serializer_class = MetadataSerializer
class EnumViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
"""
queryset = Enum.objects.all()
serializer_class = EnumSerializer
class LogEntryViewSet(viewsets.ReadOnlyModelViewSet):
"""
This viewset automatically provides `list` and `detail` actions.
"""
queryset = LogEntry.objects.all()
serializer_class = LogEntrySerializer
|
999,088 | f2e181f1c543ce2f79aca64fc1c9f4d8f2d7b642 | num2 = int(input('Digite um valor'))
print('O resultado das operações com o número {}:'.format(num2))
print('dobro = {}\nTriplo = {}\nRaiz quadrada = {:.2f}:'.format((2*num2), (3*num2), (num2**(1/2)))) |
999,089 | a9071f3f5d08a2bf7ca7be8316b098a3a67685db | def magic_sum(mylist):
return sum([x//2 if x%2==0 else x*2 for x in mylist ])
|
999,090 | 53cba0138ce937268af51873590d0a61d5d14375 |
# coding: utf-8
# In[1]:
import numpy as np
# In[10]:
a= np.array([1, 2, 3, 4, 5])
b= np.array([10, 20 , 30, 40, 50])
# In[11]:
a+b
# In[12]:
a+1
# In[13]:
a*b
# In[15]:
c=np.arange(11.)
c
# In[19]:
y=np.sin(c)
y
# In[20]:
import matplotlib.pyplot as plt
# In[21]:
plt.plot(c, y)
# In[23]:
plt.show()
# In[24]:
c.dtype
# In[25]:
type(c)
# In[26]:
c.itemsize#每个元素的大小
# In[27]:
c.size#有多少个元素
# In[28]:
c.shape #一维向量5个元素
# In[29]:
#a.fill(4)#把a里面都填上4 or a[:]=4 前者更快
# In[30]:
#如果往整型的list里填浮点数 会把小数点后面都去掉
#numpy的loadtxt() load表比较高效简单
# In[31]:
#arr=np.loadtxt('path\\abc.txt') 不需要更改格式 分隔符啥的
#arr=np.loadtxt('path\\abc_withheader.txt',skiprows=1) 有head可以skiprows掉 =几就跳过第几行
#arr=np.loadtxt('path\\abc_complex.txt', skiprows = 1, dilimiter = ',', comments = '%', usecols=(1,2,3,5), dtype= int)
# In[33]:
x= np.linspace(0, 2*np.pi, 101)#从0开始到2个pi分割101分的list
y= np.sin(x)
s= 0.5*(x[:-1]+x[1:])
t= (y[1:]-y[:-1])/(x[1:]-x[:-1])
plt.plot(s,t)
plt.show()
# In[34]:
#二维 a[0, 3:5] 第0行 的第4个和第5个(记住0开头)
# a[4:, 4:] 第5行往下,第5列往右全要[[5r5c,5r6c,5r7c,5rnc],[6r5c,6r6c,6r7c,6rnc],[nr5c, nr6c,nr7c,nrnc]] r=rows c=columns
# a[:, 2] 第3列全要[r0c3,r1c3,r2c3,rnc3]
# a[2::2,::2] 跳跳更健康 [第3行往n行跳,一次跳2步,从0列开始跳,每次2步]
# a=[(0,1,2,3,4),(1,2,3,4,5)]斜向的一个数列[r0c1,r1c2,r2c3,r3c4,r4c5]
# mask=arrar([1,0,1,0,0,1], dtype = bool) a[mask,2] means [[0,2,5], 2]
# In[35]:
#b=a[1,3]
#b[0]=15
#a[1]will be changed to 15 because b reference from a
#b=a[1,3].copy() b就实体化了 change of b will not infect a
# In[36]:
#where
#a=array([[0,12,5,20],[1,2,11,15]])#2rows and 4 co;umns
#loc=where (a > 10)-->返回的是个元组,里面是索引值,第一个list和第二个list配合起来是个坐标
#loc --> (array[0,0,1,1],[1,3,2,3])
#a[loc] --> array([12,20,11,15])
# In[1]:
#一个练习 dow
OPEN=0
HIGH=1
LOW=2
CLOSE=3
VOLIME=4
AD_CLOSE=5
dow=np.loadtxt('path\\dow.txt',delimiter = ',')
HV_mask=dow[:, VOLUME] >5.5e9 #产生[0,1,0,0,1,1,1]这样的东西
days= sum(HV_mask)
HV_index = np.where(HV_mask)[0] #赋到行上 显示出第几第几行啥的
plt.plot(dow[:, ADJ_CLOSE, 'b-'])
plt.plot(HV_index, dow[HV_index, ADJ_CLOSE], 'ro')
plt.show()
# In[3]:
#NumPy(Numerical Python的简称)
#pandas这个名字本身源自于panel data
#JSON(即JavaScript Object Notation,这是一种常用的Web数据格式)
#%timeit这个魔术命令检测任意Python语句(如矩阵乘法)的执行时间
#在IPython中,以感叹号(!)开头的命令行表示其后的所有内容需要在系统shell中执行
# In[14]:
import numpy as np
data = {i: np.random.randn() for i in range(7)}
# In[17]:
data
# In[32]:
import this
# In[29]:
a
# In[34]:
data =np.array ([[1,2,3],[10,20,30]])
# In[35]:
data
# In[40]:
data*10
# In[41]:
data + data
# In[42]:
data.shape
# In[43]:
data[1]
# In[44]:
data[0][0]
# In[45]:
data.dtype
# In[47]:
s= np.zeros(10)#创建一个全0的10位数组 类似 np.ones. np.zeros_like 以另一数组为参数 创建结构一致但全0的数组
# In[48]:
s
# In[50]:
s=np.zeros((3, 6))#创建一个3行6列队全0数组
# In[51]:
s
# In[52]:
s= np.empty((2,3,2)) #创建一个2行3列2面对数组,因为empty 所以内部值全为垃圾
# In[53]:
s
# In[55]:
s= np.arange(10)
# In[56]:
s
# In[57]:
#asarray 将输入转换为ndarray
# In[62]:
np.eye(3,3,3)
# In[63]:
arr1 = np.array([1,2,3], dtype=np.float64) #可以在后面直接定义数据类型 这样数组的站位就变成8个字节了
# In[64]:
arr1.dtype
# In[73]:
arr2 = arr1.astype(np.int32) #用astype改变原始参数类型,注意 如果浮点数改为整数 则小数点后面的都消失
arr2.dtype
# In[74]:
arr3 = arr2.astype(arr1.dtype) #改变为另一个数组的类型
arr3.dtype
# In[79]:
arr4 = np.array([[1,2,3],[4,5,6],[7,8,9]])
arr4[0][1] #2种方法进行索引 都可以 这种相成先找出第0行 再找这行里的第一个元素
# In[80]:
arr4[0,1]#2维定位 0行1列
# In[81]:
names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'])
names == 'Bob'#返回一个布尔型的数组
# In[89]:
data = randn(7,4)
data
# In[90]:
data[names == 'Bob']#以布尔型数组为index 显示出True的行
# In[91]:
data[names != 'Bob']#还可以用不等于喲
# In[93]:
data[names == 'Bob', 2:]#与slice合用
# In[94]:
#布尔运算符 &(and) |(or)
#将数列中小于0的都改为0
data[data<0]=0
# In[95]:
data
# In[98]:
data[names=='Bob'] = 123
# In[99]:
data
# In[102]:
data[1,2] #第1行第2列
# In[103]:
data[[1,2]] #第1行和第2行
# In[104]:
data[[2,1]]#还可以换顺序
# In[116]:
arr = np.arange(32).reshape(4,2,4)#3维度第一个数为面
arr
# In[117]:
arr. transpose(1,0,2)#3维度转换 面变成行,行变成面
# In[108]:
arr.T#行列互换
# In[111]:
arr=np.random.randn(6,3)
np.dot(arr.T,arr)
# In[122]:
arr = np.empty((8,4), dtype = int)
for i in range(8):
arr[i]= i
arr
# In[124]:
arr = np.arange(10)
np.sqrt(arr)#sqrt 平方根
# In[125]:
np.exp(arr)
# In[127]:
x= randn(8)
y= randn(8)
np.maximum(x,y)#对比2个数组 取最大值组成一个新数组
# In[ ]:
arr = randn(6)*5
np.modf(arr)#将小数与整数分开形成2个数组
# In[138]:
arr = np.array([1, 'a', 3])
arr
np.isnan(y)
# In[139]:
'''
abs 绝对值
sqrt 平方根 相当于arr ** 0.5
square 平方 arr ** 2
exp 计算个元素的指数e
log 自然对数
sign 计算各元素的正负号 1正 00 -1负
ceil 大于等于该数的整数
floor 小于等于该数的整数
rint 4舍5入取整数 保留dtype
modf 分成2个数组 小数和整数
isnan 返回一个表示“哪些值是NaN(这不是一个数字)"的布尔型数组 数字返回False
isfinite, isinf 判断有穷无穷 返回T/F
cos, cosh, sin, sinh, tan, tanh 双曲型三角函数
arccos, arccosh, arcsin, arcsinh, arctan, arctanh, 反三角函数
logical-not 计算个元素not x的真值 相当于-arr
二元ufucnc
add, substract, multiply, dicide, floor_divide 加减乘除,向下圆整除
power x的y次幂
maximum, fmax fmax忽略NaN
minimum, fmin
mod 求模(除法余数)
cospysign 将第二个数组中个元素的符号复制给第一个数组
'''
# In[145]:
x=3^10
x
# In[146]:
arr = randn(4,4)
arr
# In[147]:
np.where(arr>0,2,arr)#如果大于0替换其值为2,否则保留原值
# In[149]:
np.where(arr>0,2,-1) #大于0替换成2否则替换成-1
# In[ ]:
|
999,091 | e37d44f03969ee7ab034b6553710d5920b4b4e99 | '''
A dynadag-ish graph layout calculator...
'''
import visgraph.layouts as vg_layout
import visgraph.drawing.bezier as vg_bezier
zero_zero = (0,0)
def revenumerate(l):
return list(zip(range(len(l)-1, -1, -1), reversed(l)))
SCOOCH_LEFT = 0
SCOOCH_RIGHT = 1
class DynadagLayout(vg_layout.GraphLayout):
def __init__(self, graph, barry=10):
vg_layout.GraphLayout.__init__(self, graph)
self._addGhostNodes()
self._barry_count = barry
self.width_pad = 20
self.height_pad = 40
def getLayoutSize(self):
'''
Return the width,height of this layout.
'''
height = 0
width = 0
for layer in self.layers:
lheight = 0
lwidth = 0
for nid,ninfo in layer:
xsize, ysize = ninfo.get('size', zero_zero)
lheight = max(lheight, ysize + self.height_pad)
lwidth += xsize + self.width_pad
height += lheight
width = max(lwidth, width)
return width, height
def _baryCenter(self, nid, ninfo):
tot = 0
cnt = 0
for eid, n1, n2, einfo in self.graph.getRefsFromByNid(nid):
node2 = self.graph.getNode(n2)
tot += node2[1].get('layerpos')
cnt += 1
for eid, n1, n2, einfo in self.graph.getRefsToByNid(nid):
node1 = self.graph.getNode(n1)
tot += node1[1].get('layerpos')
cnt += 1
barry = 0
if cnt:
barry = tot / float(cnt)
ninfo['barycenter'] = barry
# Try out "barycenter" averaging and re-ordering.
def _orderNodesByBary(self):
# Go through the layers and do barycenter calcs first.
# FIXME how do we tell when we're done?
for i in range(self._barry_count):
for layer in self.layers:
for nid, ninfo in layer:
self._baryCenter(nid, ninfo)
for layer in self.layers:
layer.sort(key=lambda k: k[1].get('barycenter'))
for i, (nid, ninfo) in enumerate(layer):
ninfo['layerpos'] = i
def _getNodeRelPos(self, nid, ninfo):
weight = ninfo['weight']
abovepos = []
for eid, n1, n2, einfo in self.graph.getRefsToByNid(nid):
fprops = self.graph.getNodeProps(n1)
if fprops['weight'] != weight-1:
continue
abovepos.append(fprops['layerpos'])
abovepos.sort()
belowpos = []
for eid, n1, n2, einfo in self.graph.getRefsFromByNid(nid):
tprops = self.graph.getNodeProps(n2)
if tprops['weight'] != weight+1:
continue
belowpos.append(tprops['layerpos'])
belowpos.sort()
return abovepos, belowpos
def _getLayerCross(self, layernum):
ccount = 0
layer = self.layers[layernum]
for i in range(1, len(layer)):
myabove, mybelow = self._getNodeRelPos(*layer[i])
hisabove, hisbelow = self._getNodeRelPos(*layer[i-1])
# If I have any nodes above with position lower
# than any of his, those are cross overs...
for mya in myabove:
for hisa in hisabove:
if mya < hisa:
ccount += 1
# If I have any nodes below with position lower
# than any of his, those acre cross overs...
for myb in mybelow:
for hisb in hisbelow:
if myb < hisb:
ccount += 1
return ccount
def _bubSortNodes(self):
# Go through nodes and see if we can re-order children to
# reduce crossovers...
for i in range(len(self.layers)):
layer = self.layers[i]
reduced = True
while reduced:
reduced = False
# Get the current crossover count for this layer
score = self._getLayerCross(i)
# TODO should we do this multipliciative rather than
# neighbors only?
for j in range(len(layer)-1):
n1 = layer[j]
n2 = layer[j+1]
layer[j] = n2
layer[j+1] = n1
newscore = self._getLayerCross(i)
# If this was optimal, keep it and continue
if newscore < score:
reduced = True
n1[1]['layerpos'] = j+1
n2[1]['layerpos'] = j
break
# Nope, put it back...
layer[j] = n1
layer[j+1] = n2
def _addGhostNodes(self):
'''
Translate the hierarchical graph we are given into dynadag
friendly graph with ghost nodes....
'''
weights = self.graph.getHierNodeWeights()
# First lets take care of any loop edges
# (These will be nodes in the graph which are marked "reverse=True"
# but have been added with src/dst swapped to make graphing easier)
for eid, n1, n2, einfo in self.graph.getEdges():
if not einfo.get('reverse'):
continue
topweight = weights.get(n1)
botweight = weights.get(n2)
# In the case of a single block loop, add one ghost and
# connect them all
#if n1 == n2:
if topweight == botweight:
bridgenode = self.graph.addNode(ghost=True, weight=topweight)
weights[bridgenode[0]] = topweight
self.graph.delEdgeByEid(eid)
self.graph.addEdgeByNids(n1, bridgenode[0], looptop=True)
self.graph.addEdgeByNids(bridgenode[0], n2, loopbot=True)
continue
# For a "reverse" edge, add a node in the weight for each
# and connect them.
topnode = self.graph.addNode(ghost=True, weight=topweight)
weights[topnode[0]] = topweight
botnode = self.graph.addNode(ghost=True, weight=botweight)
weights[botnode[0]] = botweight
self.graph.addEdge(topnode, botnode) # For rendering, these will be normal!
# Now, remove the "reverse" edge, and add a 'looptop' and 'loopbot' edge
self.graph.delEdgeByEid(eid)
self.graph.addEdgeByNids(n1, topnode[0], looptop=True)
self.graph.addEdgeByNids(botnode[0], n2, loopbot=True)
# Create ghost nodes for edges which pass through a weight layer
for eid, n1, n2, einfo in self.graph.getEdges():
xweight = weights.get(n1, 0)
yweight = weights.get(n2, 0)
if xweight + 1 < yweight:
self.graph.delEdgeByEid(eid)
while xweight + 1 < yweight:
xweight += 1
ghostid = self.graph.addNode(ghost=True, weight=xweight)[0]
self.graph.addEdgeByNids(n1, ghostid)
n1 = ghostid
self.graph.addEdgeByNids(n1, n2)
def layoutGraph(self):
self.maxweight = 0
for nid, ninfo in self.graph.getNodes():
self.maxweight = max(ninfo.get('weight', 0), self.maxweight)
self.layers = [ [] for i in range(self.maxweight + 1) ]
done = set()
def doit(node):
'''
Roll through all the nodes and assign them positions in their layer (based on weight)
'''
if node[0] in done:
return
done.add(node[0])
efrom = self.graph.getRefsFrom(node)
for eid, n1, n2, einfo in efrom:
tonode = self.graph.getNode(n2)
doit(tonode)
w = node[1].get('weight', 0)
layer = self.layers[w]
self.graph.setNodeProp(node, 'layerpos', len(layer))
layer.append(node)
# FIXME support more than one root!
for rootnode in self.graph.getHierRootNodes():
doit(rootnode)
# Now lets use positional averaging to order nodes in the layer
self._orderNodesByBary()
self._bubSortNodes()
self.maxwidth = 0
# Calculate the width / height of each layer...
lwidths = [] # The width of this total layer
self.lheights = [] # The tallest node in this layer
for layer in self.layers:
x = self.width_pad
y = 0
heightmax = 0
for nid, ninfo in layer:
size = ninfo.get('size', zero_zero)
xx, yy = size
heightmax = max(heightmax, yy)
x += xx
y += yy
lwidths.append(x)
self.lheights.append(heightmax)
self.maxwidth = max(self.maxwidth, x)
# Now that we have them sorted, lets set their individual positions...
vpad = 0
for i,layer in enumerate(self.layers):
hpad = (self.maxwidth - lwidths[i]) / 2
hpad += self.width_pad
for nid,ninfo in layer:
xpos = hpad
ypos = vpad
xsize, ysize = ninfo.get('size', zero_zero)
ninfo['position'] = (xpos,ypos)
ninfo['vert_pad'] = self.lheights[i] - ysize
hpad += xsize
hpad += self.width_pad
vpad += self.lheights[i]
vpad += self.height_pad
# Optimize the positions of nodes by moving them outward to align
# First from top to bottom
for i, layer in enumerate(self.layers):
layermid = len(layer) / 2
# From the left side, scooch kids out...
for j, (nid,ninfo) in enumerate(layer):
if not ninfo.get('ghost'):
break
self._scoochXKids(nid, ninfo, SCOOCH_LEFT)
# From the right side, scooch kids out...
for j, (nid, ninfo) in revenumerate(layer):
if not ninfo.get('ghost'):
break
self._scoochXKids(nid, ninfo, SCOOCH_RIGHT)
# From the bottom to the top!
for i, layer in revenumerate(self.layers):
layermid = len(layer) / 2
# From the left side, scooch kids out...
for j, (nid,ninfo) in enumerate(layer):
if not ninfo.get('ghost'):
break
self._scoochXParents(nid, ninfo, SCOOCH_LEFT)
# From the right side, scooch kids out...
for j, (nid, ninfo) in revenumerate(layer):
if not ninfo.get('ghost'):
break
self._scoochXParents(nid, ninfo, SCOOCH_RIGHT)
# Finally, we calculate the drawing for the edge lines
self._calcEdgeLines()
def _scoochXParents(self, nid, ninfo, lr=None):
weight = ninfo['weight']
for eid, n1, n2, einfo in self.graph.getRefsToByNid(nid):
pinfo = self.graph.getNodeProps(n1)
# Only do ghost nodes (for now...)
if not pinfo.get('ghost'):
continue
# Only do this to parents in the layer above us...
if pinfo['weight'] != weight-1:
continue
self._scoochXAlign(ninfo, pinfo, lr=lr)
def _scoochXKids(self, nid, ninfo, lr=None):
weight = ninfo['weight']
for eid, n1, n2, einfo in self.graph.getRefsFromByNid(nid):
kinfo = self.graph.getNodeProps(n2)
# Only do ghost nodes (for now...)
if not kinfo.get('ghost'):
continue
# Only do this to kids in the layer beneath us...
if kinfo['weight'] != weight+1:
continue
self._scoochXAlign(ninfo, kinfo, lr=lr)
def _scoochXAlign(self, ninfo, kinfo, lr=None):
'''
If possible, move the "kidinfo" node toward ninfo
along the X axis... If "lr" is specified, only move
the "kidnode" (which may be "above" you...) if it is
moving either SCOOCH_LEFT or SCOOCH_RIGHT as specified.
'''
xpos, ypos = ninfo['position']
xsize, ysize = ninfo.get('size', zero_zero)
xmid = xpos + ( xsize / 2 )
kxpos, kypos = kinfo['position']
kxsize, kysize = kinfo.get('size', zero_zero)
kxmid = kxpos + ( kxsize / 2 )
xdelta = xmid - kxmid
# If they only want us to go left, and the delta
# is right, bail...
if lr == SCOOCH_LEFT and xdelta >= 0:
return
# If they only want us to go right, and the delta
# is left, bail...
if lr == SCOOCH_RIGHT and xdelta <= 0:
return
self._scoochX(kinfo, xdelta)
def _scoochX(self, ninfo, xdelta):
layerpos = ninfo.get('layerpos')
x, y = ninfo['position']
xsize, ysize = ninfo.get('size', zero_zero)
layer = self.layers[ninfo['weight']]
layermax = len(layer) - 1
# There's always room on the left if we're the first...
if layerpos == 0 and xdelta < 0:
ninfo['position'] = (x+xdelta, y)
return
# Always room on the right if we're last!
if layerpos == layermax and xdelta > 0:
ninfo['position'] = (x+xdelta, y)
return
# Sigh... now we have to get fancy...
# If they're asking us to go left, find out about our
# left sibling
if xdelta < 0:
snid, sinfo = layer[layerpos - 1]
sx, sy = sinfo['position']
sxsize, sysize = sinfo.get('size', zero_zero)
sright = (sx + sxsize) + self.width_pad
#leftroom = sright - x
# "greater" is less movement here...
xdelta = max(xdelta, sright - x)
ninfo['position'] = (x+xdelta, y)
return
# If they're asking us to go right, find out about our
# right sibling
if xdelta > 0:
snid, sinfo = layer[layerpos + 1]
sx, sy = sinfo['position']
sxsize, sysize = sinfo.get('size', zero_zero)
myright = x + xsize + self.width_pad
xdelta = min(xdelta, sx-myright)
ninfo['position'] = (x+xdelta, y)
return
def _calcEdgeLines(self):
h_hpad = self.width_pad / 2
h_vpad = self.height_pad / 2
for eid, n1, n2, einfo in self.graph.getEdges():
pre_lines = []
post_lines = []
pinfo = self.graph.getNodeProps(n1)
kinfo = self.graph.getNodeProps(n2)
pwidth, pheight = pinfo.get('size', (0,0))
pweight = pinfo.get('weight')
lheight = self.lheights[pweight]
voffset = lheight - pheight
if einfo.get('looptop'):
x1, y1 = vg_layout.entry_pos(pinfo)
x2, y2 = vg_layout.entry_pos(kinfo)
xhalf = (x1 - x2) / 2
b = [ (x1, y1),
(x1, y1 - h_vpad),
(x2, y2 - h_vpad),
(x2, y2),
]
elif einfo.get('loopbot'):
x1, y1 = vg_layout.exit_pos(pinfo)
x2, y2 = vg_layout.exit_pos(kinfo)
kwidth, kheight = kinfo.get('size', (0,0))
kweight = kinfo.get('weight')
klheight = self.lheights[kweight]
kvoffset = klheight - kheight
pre_lines = [(x1, y1), (x1, y1 + voffset)]
post_lines = [(x2, y2), (x2, y2 + kvoffset)]
b = [ (x1, y1 + voffset),
(x1, y1 + voffset + h_vpad),
(x2, y2 + kvoffset + h_vpad),
(x2, y2 + kvoffset),
]
else:
x1, y1 = vg_layout.exit_pos(pinfo)
x2, y2 = vg_layout.entry_pos(kinfo)
pre_lines = [(x1,y1), (x1, y1 + voffset)]
b = [ (x1, y1 + voffset),
(x1, y1 + voffset + h_vpad),
(x2, y2 - h_vpad),
(x2, y2),
]
bez_lines = vg_bezier.calculate_bezier(b, 20)
einfo['edge_points'] = pre_lines + bez_lines + post_lines
#einfo['edge_points'] = bez_lines
|
999,092 | 2926dc639542e2118d9b76000e62b3e57b3e2b43 | # Автор: А.Н. Носкин
with open("24-s1.txt") as F:
k = 0 # счетчик строк
while True:
s = F.readline() # прочитать строку
if not s: break
if s.count("J") > s.count("E"):
k +=1
print(k)
|
999,093 | 96c75e0f00d0248426c4fd8b9af9250bd6935a3e | from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Project, ImageAlbum
@receiver(post_save, sender=Project)
def create_album(sender, instance: Project, created, **kwargs):
if created and instance.album == None:
defaultName = f"Album: {instance.title}"
defaultAlbum = ImageAlbum.objects.create(name=defaultName)
instance.album = defaultAlbum
instance.save()
|
999,094 | 54225b1a22373b8a86212e063ac393e5946b953b | import sys
file = sys.argv[1]
with open(file, 'r', encoding='iso-8859-1') as infile:
for line in infile:
if line[:5] == '1;94;' :
line = line[:-1]
print(line)
|
999,095 | 1ba68cc2d15fc62b6c54629fdd9a712030621a9d | #from tkinter import *
#root = Tk()
#frame = Frame(root)
#frame.pack()
#bottomframe = Frame(root)
#bottomframe.pack( side = BOTTOM )
#redbutton = Button(frame, text = 'Red', fg ='red')
#redbutton.pack( side = LEFT)
#greenbutton = Button(frame, text = 'Brown', fg='brown')
#greenbutton.pack( side = LEFT )
#bluebutton = Button(frame, text ='Blue', fg ='blue')
#bluebutton.pack( side = LEFT )
#blackbutton = Button(bottomframe, text ='Black', fg ='black')
#blackbutton.pack( side = BOTTOM)
#root.mainloop()
#from tkinter import *
#master = Tk()
#Label(master, text='First Name').grid(row=0)
#Label(master, text='Last Name').grid(row=1)
#e1 = Entry(master)
#e2 = Entry(master)
#e1.grid(row=0, column=1)
#e2.grid(row=1, column=1)
#mainloop()
#from tkinter import *
#master = Tk()
#var1 = IntVar()
#Checkbutton(master, text='male', variable=var1).grid(row=0, sticky=W)
#var2 = IntVar()
#Checkbutton(master, text='female', variable=var2).grid(row=1, sticky=W)
#mainloop()
#from tkinter import *
#top = Tk()
#Lb = Listbox(top)
#Lb.insert(1, 'Python')
#Lb.insert(2, 'Java')
#Lb.insert(3, 'C++')
#Lb.insert(4, 'Any other')
#Lb.pack()
#top.mainloop()
#from tkinter import *
#root = Tk()
#root.title('GfG')
#top = Toplevel()
#top.title('Python')
#top.mainloop()
#from tkinter import *
#m1 = PanedWindow()
#m1.pack(fill = BOTH, expand = 1)
#left = Entry(m1, bd = 5)
#m1.add(left)
#m2 = PanedWindow(m1, orient = VERTICAL)
#m1.add(m2)
#top = Scale( m2, orient = HORIZONTAL)
#m2.add(top)
#mainloop()
#from tkinter import *
#master = Tk()
#w = Spinbox(master, from_ = 0, to = 1000000000000000000)
#w.pack()
#mainloop()
#from tkinter import *
#root = Tk()
#T = Text(root, height=2, width=30)
#T.pack()
#T.insert(END, 'GeeksforGeeks\nBEST WEBSITE\n')
#mainloop()
|
999,096 | 5a19386d4df7e669fb20026689c5d4ea6b87c795 | from flask import (render_template, url_for, flash,
redirect, request, abort, Blueprint)
from flask_login import current_user, login_required
from rabadiom import db, login_manager, bcrypt
from rabadiom.models import Post, User, Ehr, Keys, Blockchain, SignedEhr
from rabadiom.posts.forms import PostForm
from functools import wraps
from rabadiom.blockchain.utils import Patient, Doctor, Block, EHR, Transaction
from rabadiom.blockchain import blockchain
def login_is_required(role = "ANY"):
def wrapper(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
if not current_user.is_authenticated:
return login_manager.unauthorized()
if ( (current_user.role != role) and (role != "ANY")):
return login_manager.unauthorized()
return fn(*args, **kwargs)
return decorated_view
return wrapper
posts = Blueprint('posts', __name__)
@posts.route("/post/new", methods=['GET', 'POST'])
@login_is_required("Doctor")
def new_post():
form = PostForm()
if form.validate_on_submit():
if blockchain.isChainValid(blockchain.chain):
user = User.query.filter_by(username=form.userid.data).first()
post = Ehr(doctor_id=current_user.id, user_id=int(user.id), diseases1 = form.diseases[0].data, diseases2 = form.diseases[1].data, diseases3 = form.diseases[2].data,
test_or_med1 = form.test_or_med[0].data, test_or_med2 = form.test_or_med[1].data, test_or_med3 = form.test_or_med[2].data,
test_or_med4 = form.test_or_med[3].data, test_or_med5 = form.test_or_med[4].data, test_or_med6 = form.test_or_med[5].data,
test_or_med7 = form.test_or_med[6].data,
causes1 = form.causes[0].data, causes2 = form.causes[1].data, causes3 = form.causes[2].data,
causes4 = form.causes[3].data, causes5 = form.causes[4].data, causes6 = form.causes[5].data,
causes7 = form.causes[6].data)
ehr = post
doc_keys = Keys.query.filter_by(user_id=current_user.id).first()
doc_keys = doc_keys.private_key
pat_keys = Keys.query.filter_by(user_id=user.id).first()
pat_keys = pat_keys.private_key
patient = Patient(name=user.name, private_key=pat_keys)
doctor = Doctor(name=current_user.name, private_key=doc_keys)
data = ""
data += ehr.diseases1 + ehr.diseases2 + ehr.diseases3 + ehr.test_or_med1 + ehr.test_or_med2 + ehr.test_or_med3 + ehr.test_or_med4 + ehr.test_or_med5 + ehr.test_or_med6 + ehr.test_or_med7 + ehr.causes1 + ehr.causes2 + ehr.causes3 + ehr.causes4 + ehr.causes5 + ehr.causes6 + ehr.causes7
ehr = EHR(patient=patient, doctor=doctor, data=data)
transaction = Transaction(patient=patient, doctor=doctor, ehr=ehr.ToHash())
block = Block(transaction=transaction)
block = blockchain.newBlock(block)
try:
chain_block = Blockchain.query.order_by(Blockchain.node.desc()).first()
if len(blockchain.chain) == 2:
chain_user = User.query.filter_by(id=chain_user.user_id).first()
chain_doc = User.query.filter_by(id=chain_doc.user_id).first()
doc_keys = Keys.query.filter_by(user_id=chain_doc.id).first()
doc_keys = doc_keys.private_key
pat_keys = Keys.query.filter_by(user_id=chain_user.id).first()
pat_keys = pat_keys.private_key
patient = Patient(name=chain_user.name, private_key=pat_keys)
doctor = Doctor(name=chain_doc.name, private_key=doc_keys)
transaction = Transaction(patient=patient, doctor=doctor, ehr=chain_block.ehr)
block = Block(transaction=transaction)
blockchain.chain[0] = block
block.prev_hash = chain_block.hash
except:
pass
db_block = Blockchain(user_id=user.id, doctor_id=current_user.id, ehr=ehr.ToHash(), hash=block.hash,
prev_hash=block.prev_hash, nonce=block.nonce, tstamp=block.tstamp)
diseases1 = doctor.Sign(form.diseases[0].data)
diseases2 = doctor.Sign(form.diseases[1].data)
diseases3 = doctor.Sign(form.diseases[2].data)
test_or_med1 = doctor.Sign(form.test_or_med[0].data)
causes1 = doctor.Sign(form.causes[0].data)
test_or_med2 = doctor.Sign(form.test_or_med[1].data)
causes2 = doctor.Sign(form.causes[1].data)
test_or_med3 = doctor.Sign(form.test_or_med[2].data)
causes3 = doctor.Sign(form.causes[2].data)
test_or_med4 = doctor.Sign(form.test_or_med[3].data)
causes4 = doctor.Sign(form.causes[3].data)
test_or_med5 = doctor.Sign(form.test_or_med[4].data)
causes5 = doctor.Sign(form.causes[4].data)
test_or_med6 = doctor.Sign(form.test_or_med[5].data)
causes6 = doctor.Sign(form.causes[5].data)
test_or_med7 = doctor.Sign(form.test_or_med[6].data)
causes7 = doctor.Sign(form.causes[6].data)
diseases1 = patient.Encrypt(diseases1)
diseases2 = patient.Encrypt(diseases2)
diseases3 = patient.Encrypt(diseases3)
test_or_med1 = patient.Encrypt(test_or_med1)
causes1 = patient.Encrypt(causes1)
test_or_med2 = patient.Encrypt(test_or_med2)
causes2 = patient.Encrypt(causes2)
test_or_med3 = patient.Encrypt(test_or_med3)
causes3 = patient.Encrypt(causes3)
test_or_med4 = patient.Encrypt(test_or_med4)
causes4 = patient.Encrypt(causes4)
test_or_med5 = patient.Encrypt(test_or_med5)
causes5 = patient.Encrypt(causes5)
test_or_med6 = patient.Encrypt(test_or_med6)
causes6 = patient.Encrypt(causes6)
test_or_med7 = patient.Encrypt(test_or_med7)
causes7 = patient.Encrypt(causes7)
encrypted = SignedEhr(diseases1 = diseases1, diseases2 = diseases2, diseases3 = diseases3,
test_or_med1 = test_or_med1, test_or_med2 = test_or_med2, test_or_med3 = test_or_med3,
test_or_med4 = test_or_med4, test_or_med5 = test_or_med5, test_or_med6 = test_or_med6,
test_or_med7 = test_or_med7,
causes1 = causes1, causes2 = causes2, causes3 = causes3,
causes4 = causes4, causes5 = causes5, causes6 = causes6,
causes7 = causes7)
db.session.add(db_block)
db.session.add(post)
db.session.add(encrypted)
db.session.commit()
flash('Your post has been created!', 'success')
return redirect(url_for('doc_main.home'))
else:
flash(str(blockchain.chain[-1].ToDict()))
flash("BlockChain is not Valid", "danger")
return render_template('create_post.html', title='New Post',
form=form, legend='New Post')
@login_required
@posts.route("/post/<int:post_id>")
def post(post_id):
post = Ehr.query.get_or_404(post_id)
doctor = User.query.filter_by(id=post.doctor_id).first()
encrypted = SignedEhr.query.filter_by(id=post.id).first()
doc_keys = Keys.query.filter_by(user_id=doctor.id).first()
doc_keys = doc_keys.private_key
pat_keys = Keys.query.filter_by(user_id=current_user.id).first()
pat_keys = pat_keys.private_key
patient = Patient(name=current_user.name, private_key=pat_keys)
doc = Doctor(name=doctor.name, private_key=doc_keys)
diseases1 = patient.Decrypt(encrypted.diseases1)
diseases2 = patient.Decrypt(encrypted.diseases2)
diseases3 = patient.Decrypt(encrypted.diseases3)
test_or_med1 = patient.Decrypt(encrypted.test_or_med1)
causes1 = patient.Decrypt(encrypted.causes1)
test_or_med2 = patient.Decrypt(encrypted.test_or_med2)
causes2 = patient.Decrypt(encrypted.causes2)
test_or_med3 = patient.Decrypt(encrypted.test_or_med3)
causes3 = patient.Decrypt(encrypted.causes3)
test_or_med4 = patient.Decrypt(encrypted.test_or_med4)
causes4 = patient.Decrypt(encrypted.causes4)
test_or_med5 = patient.Decrypt(encrypted.test_or_med5)
causes5 = patient.Decrypt(encrypted.causes5)
test_or_med6 = patient.Decrypt(encrypted.test_or_med6)
causes6 = patient.Decrypt(encrypted.causes6)
test_or_med7 = patient.Decrypt(encrypted.test_or_med7)
causes7 = patient.Decrypt(encrypted.causes7)
diseases1 = doc.Verify(post.diseases1, diseases1)
diseases2 = doc.Verify(post.diseases2, diseases2)
diseases3 = doc.Verify(post.diseases3, diseases3)
test_or_med1 = doc.Verify(post.test_or_med1, test_or_med1)
causes1 = doc.Verify(post.causes1, causes1)
test_or_med2 = doc.Verify(post.test_or_med2, test_or_med2)
causes2 = doc.Verify(post.causes2, causes2)
test_or_med3 = doc.Verify(post.test_or_med3, test_or_med3)
causes3 = doc.Verify(post.causes3, causes3)
test_or_med4 = doc.Verify(post.test_or_med4, test_or_med4)
causes4 = doc.Verify(post.causes4, causes4)
test_or_med5 = doc.Verify(post.test_or_med5, test_or_med5)
causes5 = doc.Verify(post.causes5, causes5)
test_or_med6 = doc.Verify(post.test_or_med6, test_or_med6)
causes6 = doc.Verify(post.causes6, causes6)
test_or_med7 = doc.Verify(post.test_or_med7, test_or_med7)
causes7 = doc.Verify(post.causes7, causes7)
if diseases3 and diseases2 and diseases1 and test_or_med1 and test_or_med2 and test_or_med3 and test_or_med4 and test_or_med5 and test_or_med6 and test_or_med7 \
and causes7 and causes6 and causes5 and causes4 and causes3 and causes2 and causes1:
flash("OKAY ALL GOOD, DATA HAS NOT BEEN TAMPERED WITH", "success")
else:
flash("DANGER CONTENT IS INVALID")
return render_template('post.html', title=post.id, post=post, doctor=doctor)
@posts.route("/post/<int:post_id>/update", methods=['GET', 'POST'])
@login_required
def update_post(post_id):
flash("POST CAN NOT BE UPDATED ONCE ENTERED", "danger")
return render_template('errors/403.html')
@posts.route("/post/<int:post_id>/delete", methods=['POST'])
@login_required
def delete_post(post_id):
flash("POST CAN NOT BE DELETED ONCE ENTERED", "danger")
return render_template('errors/403.html')
|
999,097 | 44658e2a602362326e7369dbe5ce016cb5b5f0b4 | #!/usr/bin/env python
import random, math
SVG_HEADER = """<svg height="38" width="38">"""
SVG_FOOTER = """</svg>"""
CIRCLE_PATTERN="""\
<circle r="%(r)d" stroke="#000" cx="%(cx)f" cy="%(cy)f" stroke-width="%(sw)f" fill="%(fill_color)s"/> \
"""
def circle(f, r, cx, cy, stroke_width):
fill_color = 0
for i in range(3):
fill_color *= 16
fill_color += random.randrange(16)
fill_color = '#' + hex(fill_color)[2:].zfill(3)
print(CIRCLE_PATTERN % dict(
r = r,
cx = cx,
cy = cy,
sw = stroke_width,
fill_color = fill_color
), file=f)
def generate_random_icon(f):
print(SVG_HEADER, file=f)
circle(f, 17, 19, 19, 2)
for i in range(6):
r = random.randrange(3, 10)
center_d = 17 - r
center_x = random.uniform(-center_d, center_d)
center_y = random.choice((-1, 1)) * math.sqrt((center_d * center_d) - (center_x * center_x))
center_x += 19
center_y += 19
circle(f, r, center_x, center_y, 0.5)
print(SVG_FOOTER, file=f)
|
999,098 | 7f5dc973e13137eaa9d478b72f73ce192ba63115 | from django.conf.urls import patterns, include, url
urlpatterns = patterns('project.views',
url(r'^(?P<project_id>\d+)/$', 'project_detail', name='project-detail'),
url(r'^$', 'index'),
)
|
999,099 | 09ccc973fae55f999aae14e58c38115d2395b870 | import json
import itertools
import logging
import os
import random
import string
import sys
import traceback
import typing as ty
from azure.identity import DefaultAzureCredential
from azure.identity import ManagedIdentityCredential
from azure.mgmt.keyvault import KeyVaultManagementClient
from azure.mgmt.keyvault.models import VaultCreateOrUpdateParameters
from azure.mgmt.keyvault.models import VaultProperties
from azure.mgmt.keyvault.models import Vault
from azure.mgmt.keyvault.models import Sku
from azure.mgmt.keyvault.models import Permissions
from azure.mgmt.keyvault.models import AccessPolicyEntry
import azure.functions as func
Numeric = ty.Union[int, float]
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class ConcertAzureFuncError(Exception):
"""Used to map errors onto log messages and HTTP results
- ``message`` - explanation of the error
- ``status_code`` - HTTP status code this should map onto
- ``data`` -- other fields to be mapped into a JSON log message as well as an HTTP response. Let's try to keep it one level, so a simple dict
- ``debug_data`` -- fields not shown to the user but logged. Keys should not conflict with data
"""
def __init__(
self,
*,
message: str,
status_code: int,
data: ty.Dict[str, ty.Union[str, Numeric]] = None,
debug_data: ty.Union[str, Numeric] = None,
):
self.message = message
self.status_code = status_code
self.data = data or dict()
self.debug_data = debug_data or dict()
def create_keyvault(
*,
credential: ty.Union[ManagedIdentityCredential, DefaultAzureCredential],
subscription_id: str,
tenant_id: str,
resource_group_name: str,
location: str,
keyvault_name: str,
owning_group_object_id: str,
) -> Vault:
keyvault_client = KeyVaultManagementClient(
credential,
subscription_id,
logging_enable=False,
)
# https://github.com/MicrosoftDocs/python-sdk-docs-examples/blob/main/key_vault/provision_key_vault.py
# KV name restrictions: https://docs.microsoft.com/en-us/azure/key-vault/secrets/quick-create-cli#create-a-key-vault
invalid_chars = tuple(
c
for c in keyvault_name
if c not in itertools.chain(string.digits, string.ascii_lowercase, ("-",))
)
if len(invalid_chars) != 0:
raise ConcertAzureFuncError(
message="keyvault_name contains invalid chars",
status_code=422, # HTTP Unprocessable Entity : https://stackoverflow.com/a/9132152/2958070
data={"keyvault_name": keyvault_name, "invalid_chars": repr(invalid_chars)},
)
if len(keyvault_name) < 3 or len(keyvault_name) > 24:
raise ConcertAzureFuncError(
message="keyvault_name should be between 3 and 24 characters",
status_code=422,
data={"keyvault_name": keyvault_name, "length": len(keyvault_name)},
)
availability_result = keyvault_client.vaults.check_name_availability({"name": keyvault_name})
if not availability_result.name_available:
raise ConcertAzureFuncError(
message="keyvault name not available (maybe previously claimed?)",
status_code=409,
data={"keyvault_name": keyvault_name},
debug_data=dict(),
)
# https://docs.microsoft.com/en-us/python/api/azure-mgmt-keyvault/azure.mgmt.keyvault.v2021_04_01_preview.operations.vaultsoperations?view=azure-python#begin-create-or-update-resource-group-name--vault-name--parameters----kwargs-
poller = keyvault_client.vaults.begin_create_or_update(
resource_group_name,
keyvault_name,
VaultCreateOrUpdateParameters(
location=location,
properties=VaultProperties(
tenant_id=tenant_id,
sku=Sku(
name="standard",
family="A",
),
access_policies=[
# let's leave this until we're actually adding users
AccessPolicyEntry(
tenant_id=tenant_id,
# https://portal.azure.com/#blade/Microsoft_AAD_IAM/GroupDetailsMenuBlade/Overview/groupId/5779e176-8600-472f-b067-620c2ab92d15
# concert-user01-sgp
# object_id="5779e176-8600-472f-b067-620c2ab92d15",
object_id=owning_group_object_id,
permissions=Permissions(
keys=["all"],
secrets=["all"],
),
),
],
),
),
)
# https://docs.microsoft.com/en-us/python/api/azure-core/azure.core.polling.lropoller?view=azure-python#result-timeout-none-
keyvault = poller.result()
return keyvault
def main(req: func.HttpRequest, context: func.Context) -> func.HttpResponse:
try:
# https://stackoverflow.com/a/64523180/2958070
invocation_id = context.invocation_id
owning_group_object_id = req.params.get("owning_group_object_id", None)
if owning_group_object_id is None:
raise ConcertAzureFuncError(
message="missing required URL parameter",
status_code=422,
data={"missing_parameter": "owning_group_object_id"},
)
keyvault_name = req.params.get("keyvault_name", None)
if keyvault_name is None:
raise ConcertAzureFuncError(
message="missing required URL parameter",
status_code=422,
data={"missing_parameter": "keyvault_name"},
)
# https://docs.microsoft.com/en-us/python/api/azure-identity/azure.identity.defaultazurecredential?view=azure-python
# I really just wanna log in via managed service or, failing that, the CLI
credential = DefaultAzureCredential(
exclude_shared_token_cache_credential=True,
exclude_visual_studio_code_credential=True,
)
# https://docs.microsoft.com/en-us/python/api/azure-identity/azure.identity.managedidentitycredential?view=azure-python
# credential = ManagedIdentityCredential(logging_enable=True)
logger.info(f"credential = {credential!r}")
# https://docs.microsoft.com/en-us/azure/azure-functions/functions-how-to-use-azure-function-app-settings?tabs=portal
# Where do we want to create these key vaults
KV_CREATION_AZURE_SUBSCRIPTION_ID = os.environ["KV_CREATION_AZURE_SUBSCRIPTION_ID"]
KV_CREATION_AZURE_TENANT_ID = os.environ["KV_CREATION_AZURE_TENANT_ID"]
KV_CREATION_RESOURCE_GROUP_NAME = os.environ["KV_CREATION_RESOURCE_GROUP_NAME"]
KV_CREATION_LOCATION = os.environ["KV_CREATION_LOCATION"]
# keyvault_name = f"cncrt-{random.randint(0,10000):05}-kv"
keyvault = create_keyvault(
credential=credential,
subscription_id=KV_CREATION_AZURE_SUBSCRIPTION_ID,
tenant_id=KV_CREATION_AZURE_TENANT_ID,
resource_group_name=KV_CREATION_RESOURCE_GROUP_NAME,
location=KV_CREATION_LOCATION,
keyvault_name=keyvault_name,
owning_group_object_id=owning_group_object_id,
)
debug_data = {
"message": "keyvault created",
"keyvault_name": keyvault_name,
"keyvault": repr(keyvault),
}
logger.info(json.dumps(debug_data))
ret_data = {
"invocation_id": invocation_id,
"message": "keyvault created",
"keyvault_name": keyvault_name,
"keyvault": keyvault.as_dict(),
}
return func.HttpResponse(
body=json.dumps(ret_data),
status_code=201,
mimetype="application/json",
)
except ConcertAzureFuncError as e:
log_data = {
"message": e.message,
"status_code": e.status_code,
**e.data,
**e.debug_data,
}
logger.error(json.dumps(log_data))
ret_data = {"message": e.message, "invocation_id": invocation_id, **e.data}
return func.HttpResponse(
body=json.dumps(ret_data),
status_code=e.status_code,
mimetype="application/json",
)
except Exception:
# I'm in a bit of quandery because I need all this information
# It's nicely formatted to the logs if this exception isn't handled
# but I also need to `return` something to the user and give them
# the invocation_id so I can find this again
# So here we go manually formatting exceptions...
exc_type, exc_value, exc_traceback = sys.exc_info()
debug_data = {
# TODO: use e.message in log?
"exc_type": repr(exc_type),
"exc_value": repr(exc_value),
"exc_traceback": "".join(
traceback.format_exception(exc_type, exc_value, exc_traceback)
),
}
logger.error(json.dumps(debug_data))
ret_data = {"message": "unexpected error", "invocation_id": invocation_id}
return func.HttpResponse(
body=json.dumps(ret_data),
status_code=500,
mimetype="application/json",
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.