index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
24,300 | 38c82368b2205a5b9cd06123feeb04f8ad211c11 | import os
import time
import random
import numpy as np
import torch.autograd
from skimage import io
from torch import optim
import torch.nn.functional as F
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
working_path = os.path.dirname(os.path.abspath(__file__))
###############################################
from datasets import RS_PD_random as RS
from models.LANet import LANet as Net
NET_NAME = 'LANet'
DATA_NAME = 'PD'
###############################################
from utils.loss import CrossEntropyLoss2d
from utils.utils import accuracy, intersectionAndUnion, AverageMeter
args = {
'train_batch_size': 8,
'val_batch_size': 8,
'lr': 0.1,
'epochs': 50,
'gpu': True,
'lr_decay_power': 1.5,
'train_crop_size': 512,
'crop_nums': 200,
'val_crop_size': 512,
'weight_decay': 5e-4,
'momentum': 0.9,
'print_freq': 100,
'predict_step': 5,
'pred_dir': os.path.join(working_path, 'results', DATA_NAME, NET_NAME+'.png'),
'chkpt_path': os.path.join(working_path, 'checkpoints', DATA_NAME, NET_NAME),
'log_dir': os.path.join(working_path, 'logs', DATA_NAME, NET_NAME),
'load_path': os.path.join(working_path, 'checkpoints', DATA_NAME, 'xxx.pth')
}
if not os.path.exists(args['log_dir']): os.makedirs(args['log_dir'])
writer = SummaryWriter(args['log_dir'])
def main():
net = Net(5, num_classes=RS.num_classes+1).cuda()
#net.load_state_dict(torch.load(args['load_path']), strict=False)
train_set = RS.RS('train', random_crop=True, crop_nums=args['crop_nums'], random_flip=True, crop_size=args['train_crop_size'], padding=True)
train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=4, shuffle=True)
val_set = RS.RS('val', sliding_crop=True, crop_size=args['val_crop_size'])
val_loader = DataLoader(val_set, batch_size=args['val_batch_size'], num_workers=4, shuffle=False)
criterion = CrossEntropyLoss2d(ignore_index=0).cuda()
optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=args['lr'], weight_decay=args['weight_decay'], momentum=args['momentum'], nesterov=True)
scheduler = optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.95, last_epoch=-1)
train(train_loader, net, criterion, optimizer, scheduler, args, val_loader)
writer.close()
print('Training finished.')
def train(train_loader, net, criterion, optimizer, scheduler, train_args, val_loader):
bestaccT=0
bestaccV=0.5
bestloss=1
begin_time = time.time()
all_iters = float(len(train_loader)*args['epochs'])
curr_epoch=0
while True:
torch.cuda.empty_cache()
net.train()
start = time.time()
acc_meter = AverageMeter()
train_main_loss = AverageMeter()
curr_iter = curr_epoch*len(train_loader)
for i, data in enumerate(train_loader):
running_iter = curr_iter+i+1
adjust_lr_MP(optimizer, running_iter, all_iters)
imgs, labels = data
if args['gpu']:
imgs = imgs.cuda().float()
labels = labels.cuda().long()
optimizer.zero_grad()
outputs, aux1, aux2 = net(imgs)#
assert outputs.shape[1] == RS.num_classes+1
main_loss = criterion(outputs, labels)
aux_loss1 = criterion(aux1, labels)
aux_loss2 = criterion(aux2, labels)
loss = main_loss + aux_loss1 *0.3 + aux_loss2 *0.3
loss.backward()
optimizer.step()
labels = labels.cpu().detach().numpy()
outputs = outputs.cpu().detach()
_, preds = torch.max(outputs, dim=1)
preds = preds.numpy()
# batch_valid_sum = 0
acc_curr_meter = AverageMeter()
for (pred, label) in zip(preds, labels):
acc, valid_sum = accuracy(pred, label)
# print(valid_sum)
acc_curr_meter.update(acc)
acc_meter.update(acc_curr_meter.avg)
train_main_loss.update(loss.cpu().detach().numpy())
# train_aux_loss.update(aux_loss, batch_pixel_sum)
curr_time = time.time() - start
if (i + 1) % train_args['print_freq'] == 0:
print('[epoch %d] [iter %d / %d %.1fs] [lr %f] [train loss %.4f acc %.2f]' % (
curr_epoch, i + 1, len(train_loader), curr_time, optimizer.param_groups[0]['lr'],
train_main_loss.val, acc_meter.val*100))
writer.add_scalar('train loss', train_main_loss.val, running_iter)
loss_rec = train_main_loss.val
writer.add_scalar('train accuracy', acc_meter.val, running_iter)
# writer.add_scalar('train_aux_loss', train_aux_loss.avg, running_iter)
writer.add_scalar('lr', optimizer.param_groups[0]['lr'], running_iter)
acc_v, loss_v = validate(val_loader, net, criterion, curr_epoch, train_args)
if acc_meter.avg>bestaccT: bestaccT=acc_meter.avg
if acc_v>bestaccV:
bestaccV=acc_v
bestloss=loss_v
torch.save(net.state_dict(), args['chkpt_path']+'_%de_OA%.2f.pth'%(curr_epoch, acc_v*100))
print('Total time: %.1fs Best rec: Train %.2f, Val %.2f, Val_loss %.4f' %(time.time()-begin_time, bestaccT*100, bestaccV*100, bestloss))
curr_epoch += 1
#scheduler.step()
if curr_epoch >= train_args['epochs']:
return
def validate(val_loader, net, criterion, curr_epoch, train_args):
# the following code is written assuming that batch size is 1
net.eval()
torch.cuda.empty_cache()
start = time.time()
val_loss = AverageMeter()
acc_meter = AverageMeter()
for vi, data in enumerate(val_loader):
imgs, labels = data
if train_args['gpu']:
imgs = imgs.cuda().float()
labels = labels.cuda().long()
with torch.no_grad():
outputs, _, _ = net(imgs)
loss = criterion(outputs, labels)
val_loss.update(loss.cpu().detach().numpy())
outputs = outputs.cpu().detach()
labels = labels.cpu().detach().numpy()
_, preds = torch.max(outputs, dim=1)
preds = preds.numpy()
for (pred, label) in zip(preds, labels):
acc, valid_sum = accuracy(pred, label)
acc_meter.update(acc)
if curr_epoch%args['predict_step']==0 and vi==0:
pred_color = RS.Index2Color(preds[0])
io.imsave(args['pred_dir'], pred_color)
print('Prediction saved!')
curr_time = time.time() - start
print('%.1fs Val loss: %.2f Accuracy: %.2f'%(curr_time, val_loss.average(), acc_meter.average()*100))
writer.add_scalar('val_loss', val_loss.average(), curr_epoch)
writer.add_scalar('val_Accuracy', acc_meter.average(), curr_epoch)
return acc_meter.avg, val_loss.avg
def adjust_lr(optimizer, curr_iter, all_iter, init_lr=args['lr']):
scale_running_lr = ((1. - float(curr_iter) / all_iter) ** args['lr_decay_power'])
running_lr = init_lr * scale_running_lr
for param_group in optimizer.param_groups:
param_group['lr'] = running_lr
def adjust_lr_increase(optimizer, curr_iter, all_iter, init_lr=args['lr'], power=1.5):
iter_rate = float(curr_iter) / all_iter
running_lr = init_lr * iter_rate **power
for param_group in optimizer.param_groups:
param_group['lr'] = running_lr
def adjust_lr_MP(optimizer, curr_iter, all_iter, init_lr=args['lr'], mid_lr=args['lr']/3, init_power=args['lr_decay_power'], mid_power=1.5):
mid_iter = (1 - pow(mid_lr/init_lr, 1/init_power)) * all_iter
if curr_iter<mid_iter:
running_lr = init_lr * ((1. - float(curr_iter) / all_iter) ** init_power)
else:
running_lr = mid_lr * ((1. - float(curr_iter-mid_iter) / (all_iter-mid_iter)) ** mid_power)
for param_group in optimizer.param_groups:
param_group['lr'] = running_lr
if __name__ == '__main__':
main()
|
24,301 | 7bc588b801d79d401d0bf44de29239bc68b79917 | from flask import Blueprint
from flask_restful import Api
from resources.usuarios import Hello, Usuario, UsuarioList
api_bp = Blueprint('api', __name__)
api = Api(api_bp)
# Route
api.add_resource(Hello, '/hello')
api.add_resource(UsuarioList, '/usuarios')
api.add_resource(Usuario, '/usuarios/<usuario_id>') |
24,302 | 37926465ff24940cba035d36ffb591c2dd6a22e9 | import io
from PIL import Image
import logging
import kestrel as ks
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
logger = logging.getLogger('global')
def pil_loader(img_bytes, filepath):
buff = io.BytesIO(img_bytes)
try:
with Image.open(buff) as img:
img = img.convert('RGB')
except IOError:
logger.info('Failed in loading {}'.format(filepath))
return img
def kestrel_loader(img_bytes, filepath):
input_frame = ks.Frame()
try:
image_data = img_bytes.tobytes()
input_frame.create_from_mem(image_data, len(image_data))
if input_frame.frame_type != ks.KESTREL_VIDEO_RGB:
input_frame = input_frame.cvt_color(ks.KESTREL_VIDEO_RGB)
if ks.Device().mem_type() == ks.KESTREL_MEM_DEVICE:
input_frame = input_frame.upload()
except IOError:
logger.info('Failed in loading {}'.format(filepath))
return [input_frame]
def build_image_reader(reader_type):
if reader_type == 'pil':
return pil_loader
elif reader_type == 'kestrel':
return kestrel_loader
else:
raise NotImplementedError
|
24,303 | 4c0cb26f3217e7a6a0ba7e7c5de2ab88f68229f4 | # Copyright (c) 2014. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pepdata import iedb, reduced_alphabet
def test_tcell_hla_restrict_a24():
"""
IEDB T-cell: Test that HLA restriction actually decreases
number of results and that regular expression patterns
are being used correctly
"""
df_all = iedb.load_tcell(nrows=1000)
df_a24_1 = iedb.load_tcell(hla='HLA-A24', nrows=1000)
df_a24_2 = iedb.load_tcell(hla='HLA-A\*24', nrows=1000)
df_a24_combined = \
iedb.load_tcell(hla = 'HLA-A24|HLA-A\*24', nrows=1000)
assert len(df_a24_1) < len(df_all)
assert len(df_a24_2) < len(df_all)
assert len(df_a24_combined) <= \
len(df_a24_1) + len(df_a24_2), \
"Expected %d <= %d + %d" % \
(len(df_a24_combined), len(df_a24_1), len(df_a24_2))
def test_tcell_hla_exclude_a0201():
"""
Test that excluding HLA allele A*02:01
actually returns a DataFrame not containing
that allele
"""
df_all = iedb.load_tcell(nrows=1000)
df_exclude = iedb.load_tcell(nrows=1000, exclude_hla="HLA-A*02:01")
assert df_all['MHC Allele Name'].str.contains("HLA-A*02:01").any()
n_A0201_entries = df_exclude['MHC Allele Name'].str.contains("HLA-A*02:01").sum()
assert n_A0201_entries == 0, \
"Not supposed to contain HLA-A*02:01, but found %d rows of that allele" % \
n_A0201_entries
def test_tcell_reduced_alphabet():
"""
IEBD T-cell: Changing to a binary amino acid alphabet should reduce
the number of samples since some distinct 20-letter strings collide
as 2-letter strings
"""
imm, non = iedb.load_tcell_classes(nrows = 100)
imm2, non2 = \
iedb.load_tcell_classes(
nrows = 100,
reduced_alphabet = reduced_alphabet.hp2)
assert len(imm) + len(non) > len(imm2) + len(non2)
def test_mhc_hla_a2():
"""
IEDB MHC: Test that HLA restriction actually decreases number of results and
that regular expression patterns are being used correctly
"""
df_all = iedb.load_mhc()
df_a2_1 = iedb.load_mhc(hla='HLA-A2', nrows=1000)
df_a2_2 = iedb.load_mhc(hla='HLA-A\*02', nrows=1000)
df_a2_combined = iedb.load_mhc(hla = 'HLA-A2|HLA-A\*02', nrows=1000)
assert len(df_a2_1) < len(df_all)
assert len(df_a2_2) < len(df_all)
assert len(df_a2_combined) <= len(df_a2_1) + len(df_a2_2), \
"Expected %d <= %d + %d" % \
(len(df_a2_combined), len(df_a2_1), len(df_a2_2))
def test_mhc_reduced_alphabet():
pos, neg = iedb.load_mhc_classes(nrows = 100)
pos2, neg2 = iedb.load_mhc_classes(
nrows = 100,
reduced_alphabet = reduced_alphabet.hp2)
assert len(pos) + len(neg) > len(pos2) + len(neg2)
|
24,304 | ffd6515e2298f94d36bf6a5ca95d7ed0026470a1 | def classifica_idade(i):
if i<=11:
return"crianca"
if i>=18:
return"adulto"
if 17>=i>=12:
return"adolescente" |
24,305 | 230e1fee95a4c3ec9df0b11719313b0c4ac516fe | import praw
from praw.helpers import comment_stream
r = praw.Reddit("luisbravo1 test")
r.login()
target_text = "Hello"
response_text = "Welcome to our subreddit!"
processed = []
while True:
for c in comment_stream(r, 'BotTestBravo'):
if target_text in c.body and c.id not in processed:
c.reply(response_text)
processed.append(c.id)
|
24,306 | fc579195cafe0233f83b9d49e7bb331f5c60040c | api_key = "c49b3bdc38dbb3c5803f8e5d47313233" |
24,307 | 4a18a20bbcde800f7a5168e06cc5bab246fa1cf2 | from hiero.core import *
from hiero.ui import *
from PySide.QtGui import *
from PySide.QtCore import *
class ReinstateAudioFromSource(QAction):
def __init__(self):
QAction.__init__(self, "Reinstate Audio", None)
self.triggered.connect(self.doit)
hiero.core.events.registerInterest("kShowContextMenu/kTimeline", self.eventHandler)
hiero.core.events.registerInterest("kShowContextMenu/kSpreadsheet", self.eventHandler)
def trackExists(self, sequence, trackName):
for track in sequence:
if track.name() == trackName:
return track
return None
def reAddAudioFromSource(self, selection):
for item in selection:
track = item.parent()
sequence = track.parent()
bin = sequence.project().clipsBin()
if item.source().mediaSource().hasAudio() and isinstance(item.parent(), hiero.core.VideoTrack):
inTime = item.timelineIn()
outTime = item.timelineOut()
sourceIn = item.sourceIn()
sourceOut = item.sourceOut()
newclip = Clip(MediaSource(item.source().mediaSource()))
bin.addItem(BinItem(newclip))
newclip.setInTime(0)
newclip.setOutTime(newclip.duration())
newclip.setInTime(sourceIn)
newclip.setOutTime(sourceOut)
videoClip = track.addTrackItem(newclip, inTime)
for i in range(item.source().numAudioTracks()):
newName = "Audio " + str( i+1 )
mediaOnTrack = False
if self.trackExists(sequence, newName) is None:
audiotrack = sequence.addTrack(hiero.core.AudioTrack("Audio " + str( i+1 )))
else:
audiotrack = self.trackExists(sequence, newName)
if len(audiotrack.items()) > 0:
for item in audiotrack.items():
if item.timelineIn() in range(inTime, outTime) or item.timelineOut() in range(inTime, outTime):
mediaOnTrack = True
break
if mediaOnTrack:
newaudiotrack = sequence.addTrack(hiero.core.AudioTrack("New Track " + str( i + 1)))
audioClip = newaudiotrack.addTrackItem(newclip, i, inTime)
else:
audioClip = audiotrack.addTrackItem(newclip, i, inTime)
audioClip.link(videoClip)
def doit(self):
selection = hiero.ui.activeView().selection()
self.reAddAudioFromSource(selection)
def eventHandler(self, event):
if not hasattr(event.sender, 'selection'):
return
s = event.sender.selection()
if s is None:
s = ()
title = "Reinstate Audio"
self.setText(title)
self.setEnabled( len(s) > 0 )
event.menu.addAction(self)
action = ReinstateAudioFromSource()
|
24,308 | 6cf19d49a58914410237fe1b193af34dc19355bb | ##name = input("enter your name :")
##print("your name is %s "%(name))
a = []
for i in range(0 ,21):
a.append(i)
print(a[0:21:2])
a=["cool", "smart" ,"daddy"]
c=" ".join(a)
print(c)
|
24,309 | d595aa11a6f06c973de6df5c06b498187de71b73 | # Copyright (c) 2021. Kenneth A. Grady
# See BSD-2-Clause-Patent license in LICENSE.txt
# Additional licenses are in the license folder.
# From standard libraries
import logging
from collections import deque
# From local application
import flush_variables
import font_table
import find_group_end
log = logging.getLogger(__name__)
def processor(main_dict: dict) -> dict:
try:
table_search = main_dict["wif_string"].find("fonttbl")
if table_search != -1:
main_dict["group_start"] = table_search - 2
main_dict["index"] = main_dict["group_start"]
deck = deque()
main_dict["group_contents"] = ""
main_dict = find_group_end.processor(main_dict=main_dict, deck=deck)
except (IndexError, Exception) as error:
msg = "A problem occurred searching for fonttbl."
log.debug(error, msg)
""" Process the code settings for each font number and store the
settings in a dictionary. """
main_dict = font_table.trim_fonttbl(main_dict=main_dict)
main_dict, code_strings_list = font_table.split_code_strings(
main_dict=main_dict)
code_strings_list = font_table.remove_code_strings(
code_strings_list=code_strings_list)
font_table.parse_code_strings(
code_strings_list=code_strings_list, main_dict=main_dict)
flush_variables.processor(main_dict=main_dict)
return main_dict
|
24,310 | 70afb253246845f6dc623caf8fd00e3f8075084e | from rest_framework import serializers
from .models import Selecciona
class SeleccionaSerializer(serializers.ModelSerializer):
class Meta:
model = Selecciona
fields = ('pk','categoria')
|
24,311 | 32ee8f4b9cce5e359644fc9c48ac05a8733ff336 |
__author__ = 'Danyang'
class Solution(object):
def kSumII(self, A, k, target):
ret = []
self.dfs(A, 0, k, [], target, ret)
return ret
def dfs(self, A, i, k, cur, remain, ret):
if len(cur) == k and remain == 0:
ret.append(list(cur))
return
if i >= len(A) or len(cur) > k or len(A)-i+len(cur) < k:
return
self.dfs(A, i+1, k, cur, remain, ret)
cur.append(A[i])
self.dfs(A, i+1, k, cur, remain-A[i], ret)
cur.pop()
def dfs_array(self, A, k, cur, remain, ret):
if len(cur) == k and remain == 0:
ret.append(list(cur))
if not A or len(cur) >= k or len(A)+len(cur) < k:
return
num = A.pop(0)
self.dfs_array(A, k, cur, remain, ret)
cur.append(num)
self.dfs_array(A, k, cur, remain-num, ret)
cur.pop()
A.push(0, num)
def dfs_stk(self, A, k, cur, remain, ret):
if len(cur) == k and remain == 0:
ret.append(list(cur))
if not A or len(cur) >= k or len(A)+len(cur) < k:
return
num = A.pop()
self.dfs(A, k, cur, remain, ret)
cur.append(num)
self.dfs(A, k, cur, remain-num, ret)
cur.pop()
A.append(num)
if __name__ == "__main__":
print Solution().kSumII([1, 2, 3, 4], 2, 5)
assert Solution().kSumII([1, 2, 3, 4], 2, 5) == [[3, 2], [1, 4]] |
24,312 | 407da5c2fb2a5a06fc677b30d07838b7ce283fe7 | from checkov.kubernetes.checks import * # noqa
|
24,313 | 1c31fecbdde4c28816ece8a60be867708fbdae47 | # https://www.youtube.com/watch?v=BJ-VvGyQxho
# instance variables are unique. eg. our name, email addres, etc
# class variables are the same for each instance, so data are shared among all instances of a class
class Emplyoee:
# uncomment the following line if want to leave the class empty for now
# pass
# class variables are the same for each instance, so data are shared among all instances of a class
# ALL_CAP for constant
RAISE_AMT = 1.04
NUM_EMP = 0
# initalize class attributes
# self refers to the instance itself but not the class
# __init__ method runs everytime when we create a new instance; essentially a templet
def __init__(self, first, last, pay):
# attributes
self.first = first
self.last = last
self.pay = pay
self.email = first + '.' + last + '@company.com'
# self refers to the instance itself so we don't do self b/c we want to refer to the class
Emplyoee.NUM_EMP += 1
# creat a method
def fullname(self):
return '{} {}'.format(self.first, self.last)
# a method to change pay
def app_raise(self):
self.pay = int(self.pay * self.RAISE_AMT)
# -------------------------
print(Emplyoee.NUM_EMP)
# instance variables are unique. eg. our name, email addres, etc
emp1 = Emplyoee('Andrea', 'Huang', 8000)
emp2 = Emplyoee('P', 'A', 7000)
print(Emplyoee.NUM_EMP)
# following 2 lines function the same
print(emp1.fullname())
print(Emplyoee.fullname(emp1))
print(emp2.email)
# override the class variable for emp1
# allow any subclass to override the constant class variable
emp1.RAISE_AMT = 1.05
# print out a list of all the attributes and variables of an instance
print(emp1.__dict__)
print(Emplyoee.RAISE_AMT)
print(emp1.RAISE_AMT)
print(emp2.RAISE_AMT)
emp2.app_raise()
print(emp2.pay)
|
24,314 | 971079683a60ca2f57aa93650dc34c12db876a68 | # import the necessary packages
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import MobileNetV2 |
24,315 | a0125753c16203862a9e68e14efe5a3698ec81b4 | import os
print "ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'"
|
24,316 | 5d5bc5e01537c9296ffd046c1c70df2eeccba856 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Profiler error code and messages."""
from enum import unique, Enum
_GENERAL_MASK = 0b00001 << 7
_PARSER_MASK = 0b00010 << 7
_ANALYSER_MASK = 0b00011 << 7
class ProfilerMgrErrors(Enum):
"""Enum definition for profiler errors"""
@unique
class ProfilerErrors(ProfilerMgrErrors):
"""Profiler error codes."""
# general error code
PARAM_VALUE_ERROR = 0 | _GENERAL_MASK
PATH_ERROR = 1 | _GENERAL_MASK
PARAM_TYPE_ERROR = 2 | _GENERAL_MASK
DIR_NOT_FOUND_ERROR = 3 | _GENERAL_MASK
FILE_NOT_FOUND_ERROR = 4 | _GENERAL_MASK
IO_ERROR = 5 | _GENERAL_MASK
# parser error code
DEVICE_ID_MISMATCH_ERROR = 0 | _PARSER_MASK
RAW_FILE_ERROR = 1 | _PARSER_MASK
STEP_NUM_NOT_SUPPORTED_ERROR = 2 | _PARSER_MASK
JOB_ID_MISMATCH_ERROR = 3 | _PARSER_MASK
# analyser error code
COLUMN_NOT_EXIST_ERROR = 0 | _ANALYSER_MASK
ANALYSER_NOT_EXIST_ERROR = 1 | _ANALYSER_MASK
DEVICE_ID_ERROR = 2 | _ANALYSER_MASK
OP_TYPE_ERROR = 3 | _ANALYSER_MASK
GROUP_CONDITION_ERROR = 4 | _ANALYSER_MASK
SORT_CONDITION_ERROR = 5 | _ANALYSER_MASK
FILTER_CONDITION_ERROR = 6 | _ANALYSER_MASK
COLUMN_NOT_SUPPORT_SORT_ERROR = 7 | _ANALYSER_MASK
PIPELINE_OP_NOT_EXIST_ERROR = 8 | _ANALYSER_MASK
@unique
class ProfilerErrorMsg(Enum):
"""Profiler error messages."""
# general error msg
PARAM_VALUE_ERROR = 'Param value error. {}'
PATH_ERROR = 'Path error. {}'
PARAM_TYPE_ERROR = 'Param type error. {}'
DIR_NOT_FOUND_ERROR = 'The dir <{}> not found.'
FILE_NOT_FOUND_ERROR = 'The file <{}> not found.'
IO_ERROR = 'Read or write file fail.'
# parser error msg
DEVICE_ID_MISMATCH_ERROR = 'The device ID mismatch.'
RAW_FILE_ERROR = 'Raw file error. {}'
STEP_NUM_NOT_SUPPORTED_ERROR = 'The step num must be in {}'
JOB_ID_MISMATCH_ERROR = 'The job id in the parameter is not the same as ' \
'in the training trace file. '
# analyser error msg
COLUMN_NOT_EXIST_ERROR = 'The column {} does not exist.'
ANALYSER_NOT_EXIST_ERROR = 'The analyser {} does not exist.'
DEIVICE_ID_ERROR = 'The device_id in search_condition error, {}'
FILTER_CONDITION_ERROR = 'The filter_condition in search_condition error, {}'
OP_TYPE_ERROR = 'The op_type in search_condition error, {}'
GROUP_CONDITION_ERROR = 'The group_condition in search_condition error, {}'
SORT_CONDITION_ERROR = 'The sort_condition in search_condition error, {}'
COLUMN_NOT_SUPPORT_SORT_ERROR = 'The column {} does not support to sort.'
PIPELINE_OP_NOT_EXIST_ERROR = 'The minddata pipeline operator {} does not exist.'
|
24,317 | 90117c7ce352c776dc6eb435894adc93aae9259e | def main(num):
for i in range(1, num):
divisorSum1 = divisorCalc(i)
if divisorSum1 != 0:
divisorSum2 = divisorCalc(divisorSum1)
if i == divisorSum2:
if divisorSum1 != divisorSum2:
print(divisorSum1)
print(divisorSum2)
def divisorCalc(num):
divisorArray = []
divisorSum = 0
for i in range(1, num):
if num % i == 0:
divisorArray.append(i)
if len(divisorArray) > 0:
divisorSum = sum(divisorArray)
return divisorSum
if __name__ == "__main__":
main(10000)
|
24,318 | a3f8c92b6631e02233243b0c56138b332d323860 | '''
7/11/2018: Class created. The mainGUI class will generate a window which asks the user what step of the data
reduction process they would like to go to. Kind of the command central of the whole program.
'''
import Tkinter as tk
#import w_file_select as wfs
#import r_file_select as rfs
#import s_file_select as sfs
#import wavecal
#import contrect
import s_type
import spexredux
import sys
class mainGUI:
def __init__(self):
winHeight = 600
winWidth = 1000
self.root = tk.Tk()
self.root.title("Giraffe Butts")
#self.root.geometry(str(winWidth) + "x" + str(winHeight))
self.root.geometry("2150x1240")
menubar = tk.Menu (self.root)
fileMenu = tk.Menu(menubar, tearoff = False)
fileMenu.add_command(label = "Save and Quit", command = self.saveexit_button)
fileMenu.add_command(label = "Quit", command = self.exit_button)
waveMenu = tk.Menu(menubar, tearoff = False)
waveMenu.add_command(label = "Start", command = self.wave_button)
conMenu = tk.Menu(menubar, tearoff = False)
conMenu.add_command(label = "Start", command = self.rect_button)
specMenu = tk.Menu(menubar, tearoff = False)
specMenu.add_command(label = "Start", command = self.spec_button)
menubar.add_cascade(label = "File", menu = fileMenu)
menubar.add_cascade(label = "Wavelength Calibration", menu = waveMenu)
menubar.add_cascade(label = "Continuum Rectification", menu = conMenu)
menubar.add_cascade(label = "Spectral Typing", menu = specMenu)
self.root.config(menu=menubar)
#self.wav = wavecal.wavecal(self)
#self.wav.grid(column = 0, row = 0)
#self.wav.grid_remove()
#self.con = contrect.rect(self)
#self.con.grid(column = 0, row = 0)
#self.con.grid_remove()
self.ty = s_type.classification(self)
self.ty.grid(column = 0, row = 0)
#self.ty.grid_remove()
self.root.mainloop()
def wave_button(self):
w = wfs.wfiles(self)
def rect_button(self):
r = rfs.rfiles(self)
def spec_button(self):
s = sfs.sfiles(self)
def saveexit_button(self):
print("SAVE AND EXIT")
def exit_button(self):
sys.exit()
'''
data contains (in order): Names path file, Image path file, Flat path file, Dark path file, bias path file, lamp path file, [Waves], Save path
'''
def setDataWave(self, data):
self.spex = spexredux.extract(data[0], data[1], data[2], data[3], data[4], wvfiles = data[5])
self.wav.fill(self.spex, data[6])
self.wav.grid()
self.root.update()
def setDataRect(self, data):
print("Something")
'''
data conatins (in order): spectra path files, save path
'''
def setSpectra(self, data, source):
self.spex = spexredux.genSpec(data[0])
if type(source) is rfs.rfiles:
print("CONTINUUM")
else:
print("SPECTRAL")
|
24,319 | 39abbe3d77d36bef7f6e1d5a8ca567f8412337f6 | # geometryFunctions.py
# A program to run geometry functions
# kevin kredit
import math
import string
def main():
print "Would you like to find the SA or V of a sphere?"
print
which = raw_input("If SA, type 'SA', if V, type 'V'. ")
which = string.lower(which)
print
if which == "sa":
r = input("What is the radius? ")
m = sa(r)
print "The SA of your sphere is approxamatly:", m, "units squred."
if which == "v":
r = input("What is the radius? ")
n = v(r)
print "The V of your sphere is approxamatly:", n, "units squared."
def sa(r):
sa = 4 * r**2 * 3.14159
return sa
def v(r):
v = (4/3) * 3.14159 * r**3
return v
main()
|
24,320 | ee8715b2331996b11815f2acaea7d737912e524a | li = [1, 2, 1, 3, 2, 4, 2, 5, 4, 6, 5, 6, 6, 7, 3, 7]
arr = [[] for _ in range(len(set(li))+1)]
for i in range(0, len(li), 2):
arr[li[i]].append(li[i+1])
visited = [0] * (len(set(li))+1)
def dfs_Recursive(v):
visited[v] = 1
print(v, end=' ')
for i in arr[v]:
if visited[i] == 0:
dfs_Recursive(i)
dfs_Recursive(1)
def dfs(v, arr):
stack = []
stack.append(v)
while stack:
v = stack.pop(-1)
if visited[v] == 0:
visited[v] = 1
print(v, end=' ')
for w in arr[v]:
if visited[w] == 0:
stack.append(w)
dfs(1, arr)
tin = [1, 2, 1, 3, 2, 4, 2, 5, 4, 6, 5, 6, 6, 7, 3, 7]
edges = [[] for i in range(8)]
while tin: # 쌍방향 정점 만들기
x = tin.pop()
y = tin.pop()
edges[x].append(y)
edges[y].append(x)
visited = [0] * 8
def dfsr_teachers(v):
visited[v] = 1
print(v, end=' ')
for w in edges[v]:
if not visited[w]:
dfsr_teachers(w)
def dfs_teachers(v):
s = []
s.append(v)
while s:
v = s.pop(-1)
if not visited[v]:
visited[v] = 1
print(v, end=' ')
for w in edges[v]:
if not visited[w]:
s.append(w) |
24,321 | 8156969c6366c26895edc4d6f967f49fb844d3bb | from pymongo import MongoClient
import sys
sys.path.insert(0,"../")
import config
def get_top_n_entities(collection, n, types=None):
pipeline = [{"$project":{"stdName":1,"type":1,"aliases":1,"articleIds":1,"num":{"$size":"$articleIds"}}}]
cursor = list(collection.aggregate(pipeline))
top_n_entities = {}
if types:
entities = {type:[] for type in types}
for ent in cursor:
if(ent['type'] in types):
entities[ent['type']].append(ent)
for type in entities.keys():
entities[type].sort(key=lambda x: x['num'], reverse=True)
top_n_entities[type] = [{"name":obj['stdName'],"coverage":obj['num'],"aliases":obj['aliases'],"articleIds":obj['articleIds']} for obj in entities[type][:n]]
else:
cursor.sort(key=lambda x: x['num'], reverse=True)
top_n_entities["all"] = [{"name":obj['stdName'],"coverage":obj['num'],"aliases":obj['aliases'],"articleIds":obj['articleIds']} for obj in cursor[:n]]
return top_n_entities
def main():
if __name__=='__main__':
#number of top entities needed per type
N = 30
# types = ['Person', 'Company', 'Organization', 'Country', 'City', 'Continent', 'ProvinceOrState']
types = ['Person']
client = MongoClient(config.mongoConfigs['host'],config.mongoConfigs['port'])
db = client[config.mongoConfigs['db']]
collection=db['farmers_opinion_resolved']
entities = get_top_n_entities(collection, N, types)
for type in entities.keys():
print(type+":")
for ent in entities[type]:
print(ent['name']+ " - "+ str(ent['coverage']))
print('')
|
24,322 | b78b7f7e57e9400a8b40e1cece99d3df9a4d778f | #!/usr/bin/env python
from setuptools import setup
setup(name='marshmallow-har',
version='0.3',
description='Simple set of marshmallow schemas to load/dump the HTTP Archive (HAR) format.',
author='Delve Labs inc.',
author_email='info@delvelabs.ca',
url='https://github.com/delvelabs/marshmallow-har',
packages=['marshmallow_har'],
install_requires=[
'marshmallow',
])
|
24,323 | 6e89e9c10ee21dd841c3ea428212e883187746cb | # May help avoid undefined symbol errors https://pytorch.org/cppdocs/notes/faq.html#undefined-symbol-errors-from-pytorch-aten
import torch
import warnings
from . import * |
24,324 | 26490ab18b44db7f975e52c8d6a4c0c752558438 |
""" print(type("1"))
print("1")
print(type("hola mundo"))
print("hola mundo")
print(type(1))
print(1)
carro = 5
numero = "carro"
texto = "1"
numero_direccion = 524
piso_casa=1
print(type(numero))
print(numero)
print(type (texto))
print(texto)
print("la suma es :", numero+numero)
print("El reporte del formulario es :", numero_direccion+piso_casa)
"""
factura={"pan","huevos", 100, 1234, "mimundo", "ggg", 1223, "5425525"}
print(factura)
factura.add("hola")
print(factura)
#["pan", "huevos", 100, 1234, "mimundo", "ggg", 1223, "5425525", "revisando", 12324]
factura2=factura.copy()
factura2.add("object")
print(factura2)
print(factura)
hola = b"hola"
print(type(hola))
print(hola) |
24,325 | bb2c4163bfa0a3989e596287b05b0fc42194afd8 | from __future__ import print_function
import pymysql
import os
import os.path
cModule = 0
cUnit = 0
cSeries = 0
cTopic = 0
# 插入module
def persistModuleFromFile(file, subject_id, stage_id, conn):
global cModule
cm = cModule
f = open(file,"r",encoding="utf-16")
preModule = ""
line = f.readline()
while line:
tri = line.split("\t")
if tri[0].find(" ") >= 0:
print(tri[0]+"---------------------")
if preModule != tri[0]:
cm += 1
# print(tri[0]+file[file.rfind("/"):len(file)])
cur.execute("SELECT * FROM t_module WHERE name='"+tri[0]+"' AND subject_id="+str(subject_id))
print(cur.rowcount)
if cur.rowcount == 0:
cur.execute("INSERT INTO t_module VALUES ("+str(cm)+",'','"+tri[0]+"','"+stage_id+"','"+subject_id+"',0,0)")
preModule = tri[0]
# print(+":"+tri[1]+":"+tri[2])
line = f.readline()
f.close()
cModule = cm
# 插入unit
def persistUnitFromFile(file, subject_id, stage_id, conn):
global module
global cUnit
cu = cUnit
f = open(file,"r",encoding="utf-16")
preUnit = ""
line = f.readline()
while line:
tri = line.split("\t")
if preUnit != tri[1]:
cu += 1
# print(tri[1]+file[file.rfind("/"):len(file)])
cur.execute("INSERT INTO t_unit VALUES ("+str(cu)+",'','"+str(module[str(tri[0])+str(subject_id)])+"','"+tri[1]+"','"+stage_id+"','"+subject_id+"')")
preUnit = tri[1]
# print(+":"+tri[1]+":"+tri[2])
line = f.readline()
f.close()
cUnit = cu
# 插入series
def persistSeriesFromFile(file, subject_id, stage_id, conn):
global module
global cSeries
cs = cSeries
f = open(file,"r",encoding="utf-16")
preSeries = ""
line = f.readline()
# print(f)
while line:
tri = line.split("\t")
# print(tri[0])
# print(tri[1])
if preSeries != tri[1]:
cs += 1
# print(tri[1]+file[file.rfind("/"):len(file)])
cur.execute("INSERT INTO t_series VALUES ("+str(cs)+",'','','"+str(module[str(tri[0])+str(subject_id)])+"','"+tri[1]+"','"+stage_id+"','"+subject_id+"')")
preSeries = tri[1]
# print(+":"+tri[1]+":"+tri[2])
line = f.readline()
f.close()
cSeries = cs
# 插入topic
def persistTopicFromFile(file, subject_id, stage_id, conn):
global module
global cTopic
ct = cTopic
f = open(file,"r",encoding="utf-16")
preTopic = ""
line = f.readline()
while line:
tri = line.split("\t")
if preTopic != tri[2]:
ct += 1
# print(tri[2]+file[file.rfind("/"):len(file)])
# print("INSERT INTO t_topic VALUES ("+str(ct)+",'','','"+tri[2]+"',0,'"+stage_id+"','"+subject_id+"','"+str(unit[str(tri[1])])+"')")
cur.execute("INSERT INTO t_topic VALUES ("+str(ct)+",'','','"+tri[2]+"',0,'"+stage_id+"','"+subject_id+"','"+str(unit[str(tri[1])])+"')")
preTopic = tri[2]
# print(+":"+tri[1]+":"+tri[2])
line = f.readline()
f.close()
cTopic = ct
conn = pymysql.connect(host='localhost', port=3306, user='root', passwd='', db='noriental',charset='utf8')
cur = conn.cursor()
rootDir = "/Users/lianghongyun/Documents/work/2.27知识图谱汇总/plain"
subject = {}
subjectId = {}
stage = {}
stageId = {}
module = {}
moduleId = {}
unit = {}
unitId = {}
# 加载学段
cur.execute("SELECT t.id, t.name FROM t_stage t")
for row in cur:
stage[row[1]] = row[0]
stageId[row[0]] = row[1]
# 加载学科
cur.execute("SELECT t.id, t.name, t.stage_id FROM t_subject t")
for row in cur:
subjectId[str(row[0])] = str(row[1])
subject[str(row[1])] = row[0]
#清空module表
cur.execute("DELETE FROM t_module")
#从文件load module
for parent,dirnames,filenames in os.walk(rootDir):
for filename in filenames:
persistModuleFromFile(rootDir+"/"+filename,subject[filename[2:4]+filename[0:2]],stage[filename[2:4]],conn)
# 从数据库加载module
cur.execute("SELECT t.id, t.name, t.subject_id FROM t_module t")
for row in cur:
print(row[1]+str(row[2]))
module[str(row[1])+str(row[2])] = row[0]
moduleId[row[0]] = row[1]
# conn.commit()
#清空unit表
cur.execute("DELETE FROM t_unit")
# 从文件load unit
for parent,dirnames,filenames in os.walk(rootDir):
for filename in filenames:
if filename[4:6] == "主题":
persistUnitFromFile(rootDir+"/"+filename,subject[filename[2:4]+filename[0:2]],stage[filename[2:4]],conn)
#清空series表
cur.execute("DELETE FROM t_series")
# 从文件load series
for parent,dirnames,filenames in os.walk(rootDir):
for filename in filenames:
if filename[4:6] == "专题":
persistSeriesFromFile(rootDir+"/"+filename,subject[filename[2:4]+filename[0:2]],stage[filename[2:4]],conn)
for pk in moduleId.keys():
cur.execute("SELECT t.id FROM t_unit t WHERE t.module_id="+str(pk))
countUnit = cur.rowcount
cur.execute("UPDATE t_module t SET t.count_unit="+str(countUnit)+" WHERE t.id="+str(pk))
cur.execute("SELECT t.id FROM t_series t WHERE t.module_id="+str(pk))
countSeries = cur.rowcount
cur.execute("UPDATE t_module t SET t.count_series="+str(countSeries)+" WHERE t.id="+str(pk))
# 从数据库加载unit
cur.execute("SELECT t.id, t.name FROM t_unit t")
for row in cur:
unit[row[1]] = row[0]
unitId[row[0]] = row[1]
#清空topic表
cur.execute("DELETE FROM t_topic")
# 从文件load topic
for parent,dirnames,filenames in os.walk(rootDir):
for filename in filenames:
if filename[4:6] == "主题":
persistTopicFromFile(rootDir+"/"+filename,subject[filename[2:4]+filename[0:2]],stage[filename[2:4]],conn)
conn.commit()
cur.close()
conn.close()
|
24,326 | e46c2ec91d088bc81d259cc148aa4f9eab5354bb | import csv
from dataclasses import dataclass
@dataclass
class Party:
party_name: str
leader: str
bio: str
def get_party_bio():
return {Party('Pineapple Pizza Party', 'John Doe', 'This is a bio.'),
Party('Pronounced Jiff Union', 'Steve Joe', 'This is a bio.'),
Party('Socks and Crocs Reform League', 'Lohn Loe', 'This is a bio.')
}
def create_dict_of_votes(file):
"""
Creates a dictionary from the given csv file of votes.
------------------------------------------------------
Inputs:
file -> str
Outputs:
dict
"""
with open(file) as csvfile:
entries = csv.reader(csvfile)
header = next(entries)
votes = dict()
# add valid entries to the dictionary
for entry in entries:
# create full name of voter
name = " ".join(entry[0:2])
# get the party the person voted for
party = entry[2]
# add vote to dictionary if the vote is valid
auth_vote(name, party, votes)
return votes
def auth_vote(name, party, votes):
"""
Adds the vote to the given dictionary if the vote is valid. A vote is valid if:
(1) the person has not already voted; and
(2) they have voted for 1 valid party
-------------------------------------------------------------------------------
Inputs:
name -> str
party -> str
votes -> dict
Outputs:
None
"""
if name not in votes and party in {'Pineapple Pizza Party', 'Pronounced Jiff Union', 'Socks and Crocs Reform League'}:
votes[name] = party
def count_party_votes(votes):
"""
Returns dictionary matching party to their number of votes.
-------------------------------------------------------------------------------
Inputs:
votes: dict
Outputs:
vote_count: dict
"""
vote_count = {'Pineapple Pizza Party': 0, 'Pronounced Jiff Union': 0, 'Socks and Crocs Reform League': 0}
for person in votes:
vote_count[votes[person]] += 1
return vote_count
def run_Program(sourcePath):
votes = create_dict_of_votes(sourcePath)
vote_counts = count_party_votes(votes)
# print(votes)
return vote_counts |
24,327 | d6c04e42645c5e0d677234356578a005c2e37c7d | import requests
import pandas as pd
from tabulate import tabulate
import datetime
import sys
class DataCollect:
def data_from_api(self, query, size):
url = "https://api.pushshift.io/reddit/comment/search"
querystring = {"sort": "desc", "q": query, "size": size}
headers = {
'User-Agent': "PostmanRuntime/7.13.0",
'Accept': "*/*",
'Cache-Control': "no-cache",
'Postman-Token': "d5979873-a5b6-4e5f-b40f-32458e7082c9,4fcaadea-15a7-49d8-8b05-891336fa493b",
'Host': "api.pushshift.io",
'cookie': "__cfduid=df6ffa9e102960abbcbe896ecfd332d071581425174",
'accept-encoding': "gzip, deflate",
'Connection': "keep-alive",
'cache-control': "no-cache"
}
response = requests.request("GET", url, headers=headers, params=querystring)
data = response.json()['data']
print('total bytesize is {}'.format(str(sys.getsizeof(response.text))))
print('total number of results is {}'.format(len(data)))
return data
|
24,328 | 7fcbb6bb4379adbc3955e645c59c45a3eba262bb | import gym
import torch
from algorithms.base_trainer import BaseTrainer
from algorithms.sarsa.sarsa_agent import SarsaAgent
class SarsaTrainer(BaseTrainer):
"""
Helper class for training an agent using the SARSA algorithm. Implements
the main training loop for SARSA.
"""
def __init__(self, *, gamma=0.9):
self.gamma = gamma
def train_agent(
self,
*,
env,
test_env,
save_name,
train_every=32,
eval_every=1000,
max_steps=100000,
start_epsilon=0.9,
end_epsilon=0.001,
epsilon_decay_steps=1000,
render=True,
):
"""
Trains an agent on the given environment following the SARSA algorithm.
Creates an agent and then loops as follows:
1) Gather a number of episodes, storing the experience
2) If time, train the agent with these experiences
3) If time, evaluate the agent, using a low epsilon
:param env: gym.env to train an agent on
:param test_env: gym.env to test an agent on
:param save_name: str, name to save the agent under
:param train_every: int, specifies to train after x steps
:param eval_every: int, evaluates every x steps
:param max_stpes: int, maximum number of steps to gather/train
:param start_epsilon: float, epsilon to start with
:param end_epsilon: float, epsilon to end with
:param epsilon_decay_steps: int, number of steps over which to decay
:param render: bool, if True, renders the environment during training
:returns: trained agent of type BaseAgent
"""
agent = self.create_agent(env)
curr_epsilon = start_epsilon
epsilon_decay = self.get_decay_value(
start_epsilon, end_epsilon, epsilon_decay_steps
)
obs = env.reset()
action = agent.act(obs, epsilon=curr_epsilon)
for step in range(1, max_steps + 1):
next_obs, reward, done, _ = env.step(action)
next_action = agent.act(next_obs, epsilon=curr_epsilon)
agent.store_step(obs, action, reward, next_obs, next_action, done)
obs = next_obs
if render:
env.render()
if self.time_to(train_every, step):
agent.perform_training(gamma=self.gamma)
curr_epsilon = max(end_epsilon, curr_epsilon - epsilon_decay)
if self.time_to(eval_every, step):
self.evaluate_agent(agent, test_env, end_epsilon)
torch.save(agent, f"saved_agents/{save_name}")
if done:
obs = env.reset()
action = agent.act(obs, epsilon=curr_epsilon)
print("At step {}".format(step), end="\r")
print("\nDone!")
return agent
def create_agent(self, env):
"""
Given a specific environment, creates an SARSA agent specific for this
environment. Can only handle discrete environments.
:param env: gym.env to create an agent for
:returns: SarsaAgent
"""
if isinstance(env.action_space, gym.spaces.Discrete):
return SarsaAgent(
obs_dim=env.observation_space.shape[0],
act_dim=env.action_space.n,
hidden_sizes=[64],
)
raise ValueError("SARSA can only be used for discrete action spaces.")
|
24,329 | 0f359b4b87f4168f36aa3b462fd852c56605f751 | #!/usr/bin/python
import matplotlib.pyplot as plt
from data_processing import process_data
import sys
sample_size = 15000
# initialize empty collection
data = process_data('../resources/train.csv', sample_size, True)
# read data from csv
alive = {}
dead = {}
aliveX = []
aliveY = []
deadX = []
deadY = []
deltas = []
pos = 1
for i in xrange(1,sample_size):
for cell in xrange(1, 401):
ratio = data[i]['cells'][cell]['living_neighbors'] / float(data[i]['cells'][cell]['neighbors'])
if data[i]['cells'][cell]['outcome'] == '1':
if ratio in alive.keys():
alive[ratio] += 1
else:
alive[ratio] = 1
else:
if ratio in dead.keys():
dead[ratio] += 1
else:
dead[ratio] = 1
total_alive = sum(alive.values())
total_dead = sum(dead.values())
for key in alive.keys():
aliveX.append(key)
if key in dead.keys():
aliveY.append(float(alive[key])/(alive[key]+dead[key]))
else:
aliveY.append(1)
for key in dead.keys():
deadX.append(key)
if key in alive.keys():
deadY.append(float(dead[key])/(alive[key]+dead[key]))
else:
deadY.append(1)
plt.plot(deadX, deadY, 'rx')
plt.plot(aliveX, aliveY, 'bv')
plt.show()
|
24,330 | e9d76510f3c63ecda02161adfc8065ec76aa2038 | from django.shortcuts import render
from django.template import loader
from django.http import HttpResponse
from django.views import View
import requests
import urllib2
import json
import re
# Create your views here.
from .forms import SubmitQueryForm
class queryIndexView(View):
def get(self, request):
the_form = SubmitQueryForm()
context = {
'title': 'Search your favourite movies and shows',
'subTitle': 'Proudly powered by OMDb API',
'form': the_form,
'loadResults': 'False'
}
return render(request, "queryOMBd/index.html", context)
def post(self, request):
#print(request.POST.get('query'))
form = SubmitQueryForm(request.POST)
if form.is_valid():
print(form.cleaned_data)
query_response = omdbapi_search(form.cleaned_data['url'])
context = {
'title': 'Search your favourite movies and shows',
'subTitle': 'Proudly powered by OMDb API',
'form': form,
'query': form.cleaned_data['url'],
'loadResults': 'True',
'response': query_response
}
return render(request, "queryOMBd/index.html", context)
def omdbapi_search(query):
if re.match( r'tt\d+', query):
url = 'http://www.omdbapi.com/?i=' + query
display = 'Id'
else:
search_query = query.replace(' ', '+')
url = 'http://www.omdbapi.com/?s=' + search_query + '&plot=full'
display = 'Search'
json_obj = urllib2.urlopen(url)
data = json.load(json_obj)
data['Display'] = display
#if data['Response'] == 'True':
#for item in data['Search']:
# print item['Title'], item['Year']
return data;
'''
def index(request):
if request.method == "POST":
print(request.POST)
print(request.POST['query'])
print(request.POST.get('query'))
form = SubmitQueryForm(request.POST)
#if form.is_valid()
return render(request, "queryOMBd/index.html", {})
def test(request):
return HttpResponse('My second view!')
def profile(request):
req = requests.get('http://www.omdbapi.com/?t=game+of+thrones')
content = req.text
return HttpResponse(content)
'''
|
24,331 | 7be2ec4603a981ccae0a2d69b2bbf601ff98667a | N, M, H = map(int, input().split())
arr = [[0] * N for _ in range(H)] # 사다리
ans = -1
for _ in range(M):
n, h = map(int, input().split())
arr[n - 1][h - 1] = 1 # 연결
def dfs(cnt, y, x): # 사다리 추가
if ans != -1: # ans 가 갱신이 되었다면
return
j = x
for i in range(y, H):
while j < N - 1:
if arr[i][j]: # 사다리 있으면
j += 2
else: # 없으면
arr[i][j] = 1
check(cnt + 1)
if cnt + 1 != 3: # 사다리는 3개가 최고
dfs(cnt + 1, i, j + 2)
arr[i][j] = 0
j += 1
j = 0
def check(cnt): # 조건에 맞는지
global ans
for i in range(N):
y, x = 0, i
while y < H:
if arr[y][x]: # 1이면
x += 1 # 오른쪽으로 이동 후 한 칸 내려감
y += 1
else: # 0이면
if x - 1 >= 0 and arr[y][x - 1]: # 왼쪽이 1이면
x -= 1 # 왼쪽 이동 후 내려감
y += 1
else:
y += 1
if x != i:
break
else:
ans = cnt
check(0)
if ans == -1:
dfs(0, 0, 0)
print(ans)
|
24,332 | 52d4f5d4edc19d35c0d998ff5f1f4e06e99a362a | def div_1(d):
num = 1
cnt = 0
rem = {}
while num:
if num in rem:
return cnt - rem[num] + 1
cnt += 1
rem[num] = cnt
num = (num % d) * 10
return 0
max, max_d = 0, 0
for d in xrange(7, 1000):
cnt = div_1(d)
if cnt > max:
max = cnt
max_d = d
print max_d
|
24,333 | 4f5d5ca0463041773e29eca7b3820b65f2268fa6 | import FWCore.ParameterSet.Config as cms
process = cms.Process("Test")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1
process.MessageLogger.categories.append('ParticleListDrawer')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1),
skipEvents = cms.untracked.uint32(0)
)
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('Test.root')
)
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = cms.string('GR_R_42_V19::All')
# Choose input file
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:Summer11.root'
)
)
## Load modules to create objects and filter events on reco level
process.load("SUSYAnalysis.SUSYFilter.sequences.Preselection_cff")
## Load modules to create objects and filter events on reco level
process.load("SUSYAnalysis.SUSYFilter.sequences.BjetsSelection_cff")
process.load("SUSYAnalysis.SUSYFilter.sequences.MuonID_cff")
## Load modules for analysis on generator and reco-level
process.load("SUSYAnalysis.SUSYAnalyzer.sequences.SUSYBjetsAnalysis_Data_cff")
#--------------------------
# muon selection paths
#--------------------------
## no btag
process.Selection1m = cms.Path(process.makeObjects *
process.analyzeSUSYBjets1m_noCuts *
process.preselectionMuHTData2 *
process.MuHadSelection *
process.analyzeSUSYBjets1m_preselection *
process.RA4MuonCollections *
process.RA4MuonSelection *
process.muonSelection*
process.analyzeSUSYBjets1m_leptonSelection *
process.jetSelection*
process.analyzeSUSYBjets1m_jetSelection *
process.HTSelection *
process.analyzeSUSYBjets1m_HTSelection *
process.metSelection *
process.analyzeSUSYBjets1m_metSelection *
process.mTSelection *
process.analyzeSUSYBjets1m_mTSelection
)
## exactly 1 btag
process.Selection1b1m_2 = cms.Path(process.makeObjects *
process.preselectionMuHTData2 *
process.MuHadSelection *
process.muonSelection*
process.jetSelection *
process.exactlyOneMediumTrackHighEffBjet *
process.analyzeSUSYBjets1b1m_4 *
process.HTSelection *
process.analyzeSUSYBjets1b1m_5 *
process.metSelection *
process.analyzeSUSYBjets1b1m_6 *
process.mTSelection *
process.analyzeSUSYBjets1b1m_1
)
## exactly 2 btags
process.Selection2b1m_2 = cms.Path(process.makeObjects *
process.preselectionMuHTData2 *
process.MuHadSelection *
process.muonSelection*
process.jetSelection *
process.exactlyTwoMediumTrackHighEffBjets *
process.analyzeSUSYBjets2b1m_4 *
process.HTSelection *
process.analyzeSUSYBjets2b1m_5 *
process.metSelection *
process.analyzeSUSYBjets2b1m_6 *
process.mTSelection *
process.analyzeSUSYBjets3b1m_1
)
## at least 3 btags
process.Selection3b1m_1 = cms.Path(process.makeObjects *
process.preselectionMuHTData2 *
process.MuHadSelection *
process.muonSelection*
process.jetSelection *
process.threeMediumTrackHighEffBjets *
process.analyzeSUSYBjets3b1m_4 *
process.HTSelection *
process.analyzeSUSYBjets3b1m_5 *
process.metSelection *
process.analyzeSUSYBjets3b1m_6 *
process.mTSelection *
process.analyzeSUSYBjets1b1m_1
)
#--------------------------
# electron selection paths
#--------------------------
## no btag
process.Selection1e = cms.Path(process.makeObjects *
process.analyzeSUSYBjets1e_noCuts *
process.preselectionElHTData2 *
process.ElHadSelection *
process.analyzeSUSYBjets1e_preselection *
process.electronSelection*
process.analyzeSUSYBjets1e_leptonSelection *
process.jetSelection*
process.analyzeSUSYBjets1e_jetSelection *
process.HTSelection *
process.analyzeSUSYBjets1e_HTSelection *
process.metSelection *
process.analyzeSUSYBjets1e_metSelection *
process.mTSelection *
process.analyzeSUSYBjets1e_mTSelection
)
## exactly 1 btag
process.Selection1b1e_2 = cms.Path(process.makeObjects *
process.preselectionElHTData2 *
process.ElHadSelection *
process.electronSelection*
process.jetSelection *
process.exactlyOneMediumTrackHighEffBjet *
process.analyzeSUSYBjets1b1e_4 *
process.HTSelection *
process.analyzeSUSYBjets1b1e_5 *
process.metSelection *
process.analyzeSUSYBjets1b1e_6 *
process.mTSelection *
process.analyzeSUSYBjets1b1e_1
)
## exactly 2 btags
process.Selection2b1e_2 = cms.Path(process.makeObjects *
process.preselectionElHTData2 *
process.ElHadSelection *
process.electronSelection*
process.jetSelection *
process.exactlyTwoMediumTrackHighEffBjets *
process.analyzeSUSYBjets2b1e_4 *
process.HTSelection *
process.analyzeSUSYBjets2b1e_5 *
process.metSelection *
process.analyzeSUSYBjets2b1e_6 *
process.mTSelection *
process.analyzeSUSYBjets2b1e_1
)
## at least 3 btags
process.Selection3b1e_1 = cms.Path(process.makeObjects *
process.preselectionElHTData2 *
process.ElHadSelection *
process.electronSelection *
process.jetSelection *
process.threeMediumTrackHighEffBjets *
process.analyzeSUSYBjets3b1e_4 *
process.HTSelection *
process.analyzeSUSYBjets3b1e_5 *
process.metSelection *
process.analyzeSUSYBjets3b1e_6 *
process.mTSelection *
process.analyzeSUSYBjets3b1e_1
)
|
24,334 | ba02b793b6633d973e5ab6a15b0e405df9321a1b | # Generated by Django 3.1.7 on 2021-02-21 22:19
from django.db import migrations, models
import django.db.models.deletion
import jsonfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=50)),
('description', models.CharField(blank=True, max_length=500)),
('tags', jsonfield.fields.JSONField(null=True)),
('date', models.DateField(blank=True, verbose_name='Date')),
('link', models.URLField(blank=True, max_length=500)),
('technologies', jsonfield.fields.JSONField(null=True)),
],
),
migrations.CreateModel(
name='ProjectTechnologies',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tech', models.CharField(max_length=50)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='technology_project_name', to='portfolio.project')),
],
),
migrations.CreateModel(
name='ProjectImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='image_project_name', to='portfolio.project')),
],
),
]
|
24,335 | 9d2a0562d4cb56fc8c4e4f27e35432e3de8e7e19 | import serial
import time
import threading
class RoboteqSteering(object):
def __init__(self, device = '/dev/roboteq0', baudrate = 115200, logerr = lambda x: print(x)):
self.logerr = logerr
self.ser = None
try:
self.ser = serial.Serial(device, baudrate = baudrate, timeout = 1)
except serial.serialutil.SerialException:
self.logerr("unable to open serial port")
exit(1)
self.thread_read_loop = threading.Thread(target = self._read_loop, daemon = True)
self.thread_read_loop.start()
def __del__(self):
if self.ser and self.ser.is_open:
self.ser.close()
def _write(self, command_string):
if not self.ser.is_open:
self.logerr("exiting: serial port closed")
exit(1)
try:
self.ser.write((command_string + '\r').encode())
except serial.serialutil.SerialException:
self.logerr("serial port error")
exit(1)
return True
def command(self, power):
if power > 1000:
power = 1000
if power < -1000:
power = -1000
self._write("!G 1 %d" % power)
def query_ff(self):
self._write("?FF")
def _read_loop(self):
while True:
line = self.ser.readline()
|
24,336 | 7ede59161a892b7f7c1298dd73e7d6962320bcc8 | import sqlite3
conn = sqlite3.connect('MovieDB.db')
c = conn.cursor()
def creat_db():
c.execute("""CREATE TABLE IF NOT EXISTS MovieTB (
ID INTEGER,
TITLE TEXT,
GENRE TEXT,
DESCRIPTION TEXT,
POSTER TEXT,
RELEASE_DATE TEXT,
STATUS TEXT,
IMDB_LINK TEXT)""")
conn.commit()
def drop_M_TB():
c.execute("DROP TABLE MovieTB")
def select(title):
c.execute("SELECT ID, TITLE FROM MovieTB WHERE :TITLE = title" , {'TITLE' : title})
print(c.fetchall())
def insert(m_id, title, genre, desc, poster, r_date, status, imdb_link):
s = ""
for item in genre:
s = s + "," + item
genre = s[1:len(s)]
c.execute(
"INSERT INTO MovieTB VALUES (:ID, :TITLE, :GENRE, :DESCRIPTION, :POSTER, :RELEASE_DATE, :STATUS, :IMDB_LINK)",
{'ID': m_id, 'TITLE': title.lower(), 'GENRE': genre, 'DESCRIPTION': desc, 'POSTER': poster, 'RELEASE_DATE': r_date
, 'STATUS': status, 'IMDB_LINK': imdb_link})
conn.commit()
def closing_the_connection():
conn.close()
|
24,337 | 20ffe7c5577e313898c75f4bf8af5638a4aaa48f | from time import sleep
from selenium.common.exceptions import NoSuchElementException
from datetime import datetime, timedelta
import pickle
# Проверка на хорошая (true) ли страница или плохая (false)
def check_good_page(browser):
try:
elm = "/html/body/div[1]/section/main/div/header/section/ul/li[1]/span/span"
smart_sleep(browser=browser, xpath=elm)
browser.find_element_by_xpath(elm)
return True
except NoSuchElementException:
return False
def login_inst(browser, username="kirill.glushakov03@mail.ru", password="instapython"):
# ВХОД НА СТРАНИЦУ ВХОДА В ИНСТАГРАМ
def open_inst():
while True: # Проверка на правильность страницы входа (открываем о тех пор пока не гуд)
try:
browser.find_element_by_xpath(
"/html/body/div[1]/section/main/div/div/div[1]/div/form/div[1]/div[1]/div/label/input")
break
except NoSuchElementException:
browser.get("https://www.instagram.com/accounts/edit/")
sleep(1)
open_inst()
xpath_try_login = '//*[@id="react-root"]/section/nav/div[2]/div/div/div[3]/div'
try:
cookies = pickle.load(open("cookies.pkl", "rb")) # Открытие cookies
for cookie in cookies:
browser.add_cookie(cookie)
# Открытие стандартной страницы
browser.get("https://www.instagram.com/slutskgorod/")
smart_sleep(browser=browser, xpath=xpath_try_login)
except FileNotFoundError:
# ВХОД В АККАУНТ
xpath_user_name = "/html/body/div[1]/section/main/div/div/div[1]/div/form/div[1]/div[1]/div/label/input"
xpath_password = "/html/body/div[1]/section/main/div/div/div[1]/div/form/div[1]/div[2]/div/label/input"
xpath_button = "/html/body/div[1]/section/main/div/div/div[1]/div/form/div[1]/div[3]"
# ЛОГИН
browser.find_element_by_xpath(xpath_user_name).send_keys(username)
# ПАРОЛЬ
browser.find_element_by_xpath(xpath_password).send_keys(password)
browser.find_element_by_xpath(xpath_button).click()
smart_sleep(browser=browser, xpath=xpath_try_login)
# Открытие стандартной страницы
browser.get("https://www.instagram.com/slutskgorod/")
smart_sleep(browser=browser, xpath=xpath_try_login)
pickle.dump(browser.get_cookies(), open("cookies.pkl", "wb"))
# Обработка ошибок запроса к сайту, если ошибок нет - True иначе - False
def exception(browser):
# Ошибка 560
# try:
# elm = "/html/body/div[1]/div[1]/div[2]/div[1]/div/div/div/div[2]" # Ошибка 560
# s = browser.find_element_by_xpath(elm).text
# print(s)
# return False
# except:
# pass
# К сожалению, эта страница недоступна.
# try:
# elm = '//*[@id="react-root"]/section/main/div/h2'
# s = browser.find_element_by_xpath(elm).text
# print(s)
# return False
# except:
# pass
# Универсально (Поиск надписи подпищиков)
try:
elm = "/html/body/div[1]/section/main/div/header/section/ul/li[1]/span/span"
browser.find_element_by_xpath(elm)
return True
except NoSuchElementException:
return False
# Функция которая ищет 60 секунд элемент и если не находит False, если находит True
def smart_sleep(browser, xpath=None, strict_pause=None):
if strict_pause is not None:
sleep(strict_pause)
return True
else:
capture_inst_xpath = '//*[@id="react-root"]/section/nav/div[2]/div/div/div[1]/a/div/div/img'
start_time = datetime.now()
while (datetime.now() - start_time) < timedelta(seconds=8):
try:
browser.find_element_by_xpath(xpath=capture_inst_xpath)
if xpath is not None:
browser.find_element_by_xpath(xpath=xpath)
print(" smart_sleep {}".format(datetime.now() - start_time))
return True
except NoSuchElementException:
sleep(0.0001)
return False
# Открытие файла и добавление всех строчек файла в LIST и вернуть LIST
def open_file_to_list(path):
f = open(path, 'r')
file_list = []
for line in f:
file_list.append(line)
f.close()
return file_list
def text_to_list(lst, count):
txt = ""
count = 0
for line in lst:
if count < 10:
txt += line
count += 1
return txt
# проверка подписаны мы на человека или нет
def check_users(b):
xpath = '//button[@class="sqdOP L3NKy y3zKF "]' # синяя кнопка подписатся
try:
result = "Мы не подписаны"
smart_sleep(browser=b, xpath=xpath)
b.find_element_by_xpath(xpath)
return result
except NoSuchElementException:
result = "Мы подписаны"
return result
# Поиск элемента по "xpath" или "name" (в случае ошибки напечатает "text" если он указан)
def find_element(b, text=None, xpath=None, name=None, delay=True):
def recursion():
sleep(0.1)
try:
if datetime.now() - time > timedelta(seconds=4):
return None
else:
if name is not None:
answer = b.find_element_by_name(name)
return answer
if xpath is not None:
answer = b.find_element_by_xpath(xpath)
return answer
except NoSuchElementException:
return recursion()
# Искать с задержкой (Много раз)
if delay:
time = datetime.now()
result = recursion()
if result is None:
if text is not None:
print("ERROR", "NoSuchElement", "({}) | Delay = {}".format(text, 4))
return result
else:
return result
# Искать сразу (один раз)
else:
if name is not None:
try:
return b.find_element_by_name(name)
except NoSuchElementException:
return None
if xpath is not None:
try:
return b.find_element_by_xpath(xpath)
except NoSuchElementException:
return None
# Прокрутка до того момента пока не наберется число "count" пользователей | прокручиваемый элемент "elm_scroll"
def scroll(b, count, elm_scroll):
xpath_count_my_sub = '//*[@id="react-root"]/section/main/div/header/section/ul/li[3]/a/span'
xpath_count_parse_users_li = '/html/body/div[6]/div/div/div[3]/ul/div/li'
count_my_sub = b.find_element_by_xpath(xpath_count_my_sub).text
count_my_sub = count_my_sub.replace(" ", "")
count_my_sub = count_my_sub.replace("тыс.", "000")
count_my_sub = int(count_my_sub)
print("Число наших подписок (из цифры):", count_my_sub)
# Ограничение на парсинг пользователей при прокрутке
if count > count_my_sub:
temp_count = count_my_sub
else:
temp_count = count
while True:
sleep(0.5)
b.execute_script("""arguments[0].scrollTo(0, arguments[0].scrollHeight);
return arguments[0].scrollHeight; """, elm_scroll)
elms_users = b.find_elements_by_xpath(xpath_count_parse_users_li)
if len(elms_users) >= temp_count:
return elms_users
|
24,338 | 2a9c14d45e5cb1412827aaa5a7da7498250d76fe | N,K = map(int, input().split())
ans = 0
nusuke = [0] * N
for i in range(K):
d = int(input())
data = list(map(int, input().split()))
for j in data:
nusuke[j-1] += 1
for k in nusuke:
if k == 0:
ans += 1
print(ans) |
24,339 | ff2eba785ea3f288700c21e2679b21b69c71a437 | import json
import time
from m365py import m365py
from m365py import m365message
from paho.mqtt import client as mqtt_client
# MQTT
client = mqtt_client.Client('Raspi')
client.connect('192.168.xxx.xxx')
# M365
scooter_mac_address = 'XX:XX:XX:XX:XX:XX'
scooter = m365py.M365(scooter_mac_address, auto_reconnect=False)
try:
scooter.connect()
while True:
# Request all currently supported 'attributes'
scooter.request(m365message.battery_voltage)
scooter.request(m365message.battery_ampere)
scooter.request(m365message.battery_percentage)
scooter.request(m365message.battery_cell_voltages)
scooter.request(m365message.battery_info)
scooter.request(m365message.general_info)
scooter.request(m365message.motor_info)
scooter.request(m365message.trip_info)
scooter.request(m365message.trip_distance)
scooter.request(m365message.distance_left)
scooter.request(m365message.speed)
scooter.request(m365message.tail_light_status)
scooter.request(m365message.cruise_status)
scooter.request(m365message.supplementary)
# m365py also stores a cached state of received values
client.publish("ScooterM365", json.dumps(scooter.cached_state, indent=4, sort_keys=True), retain=True)
# Delay
time.sleep(10)
except:
print('Scooter not found or disconnected')
|
24,340 | 5bf25f93ffb2945d641ae3d5733610ffa6c0c91b | from typing import Dict, List
import numpy as np
import cvxpy as cp
from libs.solver import atomic_solve
class Atom(object):
def __init__(self, atom_id: int, node_data_package: dict):
self.atom_id = atom_id
self.node_data_package = node_data_package
self.first_time: bool = True
self.previous_problem = None
# set the attributes from the node data package
for node_variable in self.node_data_package:
setattr(self, '_'+node_variable, self.node_data_package[node_variable])
self.mu = None
self.mu_bar = None
self.nu = None
self.nu_bar = None
self.adaptive_learning: bool = False
self._gamma_mu = 0 # self._gamma
self._gamma_nu = 0 # self._gamma
self.round: int = 0 # round/iteration
self.epsilon = 1e-10
self.Gy_trajectory: list = []
self.Ay_trajectory: list = []
self._gamma_mu_trajectory = []
# Later implement so don't have to broadcast to everyone
self.global_atom_y: Dict[int, np.array] = {self.atom_id: self.get_y()}
self.global_atom_nu_bar: Dict[int, np.array] = {}
qmj_tuple = ()
for m in range(self._global_num_nodes):
qmj_tuple += (self._Qmj[m][0][self.atom_id - 1],) # because indexing of atom starts at 1
self._Qmj = np.vstack(qmj_tuple)
parent_node: int = int(self._parent_node)
self.upstream_line_resistance: float = self._neighbors[parent_node]['resistance']
self.upstream_line_thermal_limit: float = self._neighbors[parent_node]['thermal_limit']
# broadcast and receive to initialize things on the network - order
'''
(1) self.broadcast(msg_type='broadcast_y', msg=self._y)
(2) self.receive(msg_type='receive_y')
(3) self.init_dual_vars()
(4) self.broadcast(msg_type='broadcast_nu_bar', msg=self.nu_bar)
(5) self.receive(msg_type='receive_nu_bar')
LOOP
(6) Update y and mu, mu_bar
(7) broadcast y
(8) receive y
(9) update nu, nu_bar
(10) broadcast nu_bar
'''
# MARK: Getters
def get_global_y(self):
y_tuple: List[np.array] = [self.global_atom_y[key] for key in sorted(self.global_atom_y)]
return np.vstack(y_tuple)
def get_y(self) -> np.array:
return self._y
def get_nu_bar(self) -> np.array:
return self.nu_bar
def get_nu(self) -> np.array:
return self.nu
def get_global_nu_bar(self):
nu_bar_tuple: List[np.array] = [self.global_atom_nu_bar[key] for key in sorted(self.global_atom_nu_bar)]
return np.vstack(nu_bar_tuple)
def init_dual_vars(self):
self.mu: np.array = np.zeros_like(self._rho * self._gamma * self._Gj @ self.get_y())
self.mu_bar: np.array = self.mu + self._rho * self._gamma * self._Gj @ self.get_y()
global_y_mat: np.array = self._Aj @ self.get_global_y()
self.nu: np.array = np.zeros_like(self._rho * self._gamma * global_y_mat)
self.nu_bar: np.array = self.nu + self._rho * self._gamma * global_y_mat
# set nu_bar
self.global_atom_nu_bar[self.atom_id] = self.nu_bar
# MARK: PAC
def cost_function(self, var):
xi = 1.0 # FIXME: Integrate with Network Grid topo
gen_cost = 0.0
load_util = 0.0
loss = 0.0
if self._bus_type == 'feeder':
# yj = [PLj, PGj, QLj, QGj, vj, {Pjh, Qjh}]
beta_pg = 1
beta_qg = 1
PG: float = var[1]
QG: float = var[3]
gen_cost: float = beta_pg*PG + beta_qg*QG
else:
# yj = [Pij, Qij, lij, PLj, PGj, QLj, QGj, vj, {vi}] (end node) OR
# yj = [Pij, Qij, Lij, PLj, PGj, QLj, QGj, vj, {vi, Pjk, Qjk, . . .}] ('middle' node)
PG: float = var[4]
QG: float = var[6]
PL: float = var[3]
QL: float = var[5]
Lij: float = var[2] # the current flow on the upstream line
gen_cost: float = self._beta_pg*cp.square(PG - self._PG[0]) + self._beta_qg*cp.square(QG - self._QG[0])
load_util: float = self._beta_pl*cp.square(PL - self._PL[1]) + self._beta_ql*cp.square(QL - self._QL[1])
loss: float = xi*self.upstream_line_resistance*Lij
return gen_cost + load_util + loss
def solve_atomic_objective_function(self) -> np.array:
params = {'global_nu_bar': (self.get_global_nu_bar().shape, self.get_global_nu_bar()), 'mu_bar': (self.mu_bar.shape, self.mu_bar), 'prev_y': (self.get_y().shape, self.get_y())}
if self.first_time:
var, self.previous_problem = atomic_solve(self.cost_function, self._y.shape, Gj=self._Gj, rho=self._rho, Qmj=self._Qmj, Bj=self._Bj, bj=self._bj, bus_type=self._bus_type, thermal_limit=self.upstream_line_thermal_limit, prev_params=params)
else:
var, _ = atomic_solve(self.cost_function, self._y.shape, Gj=self._Gj, rho=self._rho, Qmj=self._Qmj, Bj=self._Bj, bj=self._bj, bus_type=self._bus_type, thermal_limit=self.upstream_line_thermal_limit, previous_problem=self.previous_problem, prev_params=params)
return var
def update_y_and_mu(self):
try:
self._y: np.array = self.solve_atomic_objective_function()
self.first_time = False # we've successfully done our first time!
except ValueError as e:
print('Could not solve for y')
raise e
# update mu
mat_product: np.array = self._Gj @ self.get_y()
PRODUCT = self._rho * self._gamma * mat_product
self.mu = self.mu + PRODUCT
self.Gy_trajectory.append(mat_product/self._rho)
if self.adaptive_learning:
H = sum([g.T@g for g in self.Gy_trajectory])
# n = H.shape[0]
# diagonalized_H = np.diag(np.diag(H))
# epsilon_identity = self.epsilon*np.identity(n)
# total = np.diag(1/np.sqrt(np.diag(epsilon_identity + diagonalized_H)))
self._gamma_mu = self._gamma/np.sqrt(self.epsilon + H)
PRODUCT = self._gamma_mu * mat_product
self.mu_bar = self.mu + PRODUCT
# update my y that exists in the global dict
self.global_atom_y[self.atom_id] = self._y
def update_nu(self):
# update nu
mat_product: np.array = self._Aj @ self.get_global_y()
PRODUCT = self._rho * self._gamma * mat_product
self.nu = self.nu + PRODUCT
self.Ay_trajectory.append(mat_product/self._rho)
# if self.adaptive_learning:
# H_round = sum([g@g.T for g in self.Ay_trajectory])
# n = H_round.shape[0]
# diagonalized_H_round = np.diag(np.diag(H_round))
# epsilon_identity = self.epsilon*np.identity(n)
# total = np.diag(1/np.sqrt(np.diag(epsilon_identity + diagonalized_H_round)))
#
# PRODUCT = self._gamma * total @ mat_product
self.nu_bar = self.nu + PRODUCT
# update my belief of nu_bar
self.global_atom_nu_bar[self.atom_id] = self.nu_bar
def __str__(self):
return f'I am atom-{self.atom_id}, with example: {self.get_y()}'
|
24,341 | 2cc134ec6f8e28a39065cc5ee7efbe783ca98f12 | import SceneDataStructs
entity_path = 'entity_folder\\'
# a class for an entity object
class Entity:
def __init__(self, entity_name, types=None, role=None):
self.name = entity_name
self.types = types
self.role = role
#
# # keys are shot numbers, value is pair of spatial positions (shotstart, shotend)
# self.spatial_dict = dict()
def asDict(self):
return self.__dict__
def __hash__(self):
return hash(str(self.name) + str(self.role))
def __eq__(self, other):
if hasattr(other, name):
if self.name == other.name:
return True
return False
else:
if self.name == other:
return True
return False
def __str__(self):
return str(self.name) + '_' + str(self.role)
def __repr__(self):
return str(self.name) + '_' + str(self.role)
def generateEntities(scene_lib):
print('writing entities:')
for sc_name, scene in scene_lib.items():
if sc_name is None:
continue
print(sc_name)
scene_entity_file = open(entity_path + 'scene' + sc_name + '_entities.txt', 'w')
for entity in scene.entities:
scene_entity_file.write(entity)
scene_entity_file.write('\n')
def readEntityRoles(scene_file):
role_dict = dict()
subs = False
for line in scene_file:
split_line = line.split()
if len(split_line) == 0:
continue
if not subs:
if len(split_line) > 1:
role_dict[split_line[0]] = [Entity(split_line[0], role=split_line[-1])]
else:
role_dict[split_line[0]] = [Entity(split_line[0])]
if split_line[-1] == '_':
subs = True
continue
if subs:
role_dict[split_line[0]] = [wrd.lower() for wrd in split_line[2:]]
return role_dict
def assignRoles(scene_lib):
print('assigning entities to roles')
for sc_name, scene in scene_lib.items():
if sc_name in SceneDataStructs.EXCLUDE_SCENES:
continue
# print(sc_name)
scene_entity_file = open(entity_path + 'scene' + sc_name + '_entities_coded.txt')
rd = readEntityRoles(scene_entity_file)
scene.substituteEntities(rd)
scene_entity_file.close()
print(scene_lib)
if __name__ == '__main__':
from SceneDataStructs import Scene, SceneLib, Shot, Action, ActionType
print('loading scene library')
scene_lib = SceneDataStructs.load()
# print(scene_lib)
assignRoles(scene_lib)
SceneDataStructs.save_scenes(scene_lib) |
24,342 | 52e695e621548186ec9e1d96ba6f242aa5605840 | // A.1 RosCopter
#!/ usr/ bin / env python
import roslib ; roslib . load_manifest ('roscopter ')
import rospy
from std_msgs . msg import String , Header
from std_srvs . srv import *
from sensor_msgs . msg import NavSatFix , NavSatStatus , Imu
import roscopter . msg
import sys ,struct ,time ,os
sys . path . insert (0, os. path . join (os. path . dirname (os. path . realpath ( __file__ )), '../ mavlink/ pymavlink '))
from optparse import OptionParser
parser = OptionParser (" roscopter .py [ options ]")
parser . add_option (" -- baudrate ", dest =" baudrate ", type ='int ',
help =" master port baud rate ", default =57600)
parser . add_option (" -- device ", dest =" device ", default ="/ dev / ttyUSB0 ", help =" serial device")
parser . add_option (" --rate ", dest =" rate ", default =10 , type ='int ', help =" requested stream rate ")
parser . add_option (" --source - system ", dest =' SOURCE_SYSTEM ', type ='int ',
default =255 , help ='MAVLink source system for this GCS ')
parser . add_option (" --enable - control ",dest =" enable_control ", default =False , help =" Enable
listning to control messages ")
(opts , args ) = parser . parse_args ()
import mavutil
# create a mavlink serial instance
master = mavutil . mavlink_connection ( opts . device , baud = opts . baudrate )
if opts . device is None :
print (" You must specify a serial device ")
sys . exit (1)
def wait_heartbeat (m):
'''wait for a heartbeat so we know the target system IDs '''
print (" Waiting for APM heartbeat ")
m. wait_heartbeat ()
print (" Heartbeat from APM ( system %u component %u)" % (m. target_system , m.target_system ))
# This does not work yet because APM does not have it implemented
# def mav_control ( data ):
# '''
# Set roll , pitch and yaw.
# roll : Desired roll angle in radians ( float )
# pitch : Desired pitch angle in radians ( float )
# yaw : Desired yaw angle in radians ( float )
# thrust : Collective thrust , normalized to 0 .. 1 ( float )
# '''
# master . mav. set_roll_pitch_yaw_thrust_send ( master . target_system , master .target_component ,
# data .roll , data .pitch ,
data .yaw , data . thrust )
#
# print (" sending control : %s"% data )
def send_rc ( data ):
master . mav . rc_channels_override_send ( master . target_system , master . target_component ,
data . channel [0] , data . channel [1] , data . channel [2] , data . channel [3] , data . channel [4] ,
data . channel [5] , data . channel [6] , data . channel [7])
print (" sending rc: %s"% data )
# service callbacks
# def set_mode ( mav_mode ):
# master . set_mode_auto ()
def set_arm ( req ):
master . arducopter_arm ()
return True
def set_disarm ( req ):
master . arducopter_disarm ()
return True
pub_gps = rospy . Publisher ('gps ', NavSatFix )
# pub_imu = rospy . Publisher (' imu ', Imu )
pub_rc = rospy . Publisher ('rc ', roscopter .msg.RC)
pub_state = rospy . Publisher ('state ', roscopter . msg . State )
pub_vfr_hud = rospy . Publisher ('vfr_hud ', roscopter . msg . VFR_HUD )
pub_attitude = rospy . Publisher ('attitude ', roscopter . msg . Attitude )
pub_raw_imu = rospy . Publisher ('raw_imu ', roscopter . msg . Mavlink_RAW_IMU )
if opts . enable_control :
# rospy . Subscriber (" control ", roscopter . msg. Control , mav_control )
rospy . Subscriber (" send_rc ", roscopter . msg .RC , send_rc )
# define service callbacks
arm_service = rospy . Service ('arm ',Empty , set_arm )
disarm_service = rospy . Service ('disarm ',Empty , set_disarm )
# state
gps_msg = NavSatFix ()
def mainloop ():
rospy . init_node ('roscopter ')
while not rospy . is_shutdown ():
rospy . sleep (0.001)
msg = master . recv_match ( blocking = False )
if not msg :
continue
# print msg. get_type ()
if msg . get_type () == " BAD_DATA ":
if mavutil . all_printable ( msg . data ):
sys . stdout . write ( msg . data )
sys . stdout . flush ()
else :
msg_type = msg . get_type ()
if msg_type == " RC_CHANNELS_RAW " :
pub_rc . publish ([ msg . chan1_raw , msg . chan2_raw , msg . chan3_raw , msg .
chan4_raw , msg. chan5_raw , msg . chan6_raw , msg. chan7_raw , msg .chan8_raw ])
if msg_type == " HEARTBEAT ":
pub_state . publish ( msg . base_mode & mavutil . mavlink .
MAV_MODE_FLAG_SAFETY_ARMED ,msg . base_mode & mavutil . mavlink .MAV_MODE_FLAG_GUIDED_ENABLED ,mavutil . mode_string_v10 ( msg ))
if msg_type == " VFR_HUD ":
pub_vfr_hud . publish ( msg . airspeed , msg . groundspeed , msg . heading , msg .throttle , msg .alt , msg. climb )
if msg_type == " GPS_RAW_INT ":
fix = NavSatStatus . STATUS_NO_FIX
if msg . fix_type >=3:
fix = NavSatStatus . STATUS_FIX
pub_gps . publish ( NavSatFix ( latitude = msg. lat /1 e07 ,
longitude = msg. lon /1 e07 ,
altitude = msg . alt /1 e03 ,
status = NavSatStatus ( status =fix , service =NavSatStatus . SERVICE_GPS )))
# pub . publish ( String (" MSG : %s"% msg))
if msg_type == " ATTITUDE " :
pub_attitude . publish ( msg .roll , msg .pitch , msg.yaw , msg . rollspeed , msg .pitchspeed , msg . yawspeed )
if msg_type == " LOCAL_POSITION_NED " :
print " Local Pos : (%f %f %f) , (%f %f %f)" %( msg .x, msg .y, msg .z, msg.vx, msg.vy , msg.vz)
if msg_type == " RAW_IMU " :pub_raw_imu . publish ( Header () , msg . time_usec ,msg .xacc , msg .yacc , msg .zacc ,msg .xgyro , msg .ygyro , msg.zgyro ,msg .xmag , msg .ymag , msg . zmag )
# wait for the heartbeat msg to find the system ID
wait_heartbeat ( master )
# waiting for 10 seconds for the system to be ready
print (" Sleeping for 10 seconds to allow system , to be ready ")
rospy . sleep (10)
print (" Sending all stream request for rate %u" % opts . rate )
# for i in range (0, 3):
master . mav . request_data_stream_send ( master . target_system , master . target_component ,
mavutil . mavlink . MAV_DATA_STREAM_ALL , opts .rate , 1)
# master . mav. set_mode_send ( master . target_system ,
if __name__ == '__main__ ':
try :
mainloop ()
except rospy . ROSInterruptException : pass
|
24,343 | 6e6e9666fba4a9ed25870af83c64f4c6686333bd |
from keras.models import Sequential
from keras.layers import Activation, Dropout, UpSampling2D, ZeroPadding2D
from keras.layers import Conv2DTranspose, Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras import regularizers
def CreateModel(input_shape):
pool_size = (2, 2)
### Here is the actual neural network ###
model = Sequential()
# Normalizes incoming inputs. First layer needs the input shape to work
model.add(BatchNormalization(input_shape=input_shape))
# Below layers were re-named for easier reading of model summary; this not
# necessary
# Conv Layer 1
model.add(Conv2D(32, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Conv1'))
# Conv Layer 2
model.add(Conv2D(64, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Conv2'))
# Pooling 1
model.add(MaxPooling2D(pool_size=pool_size))
# Conv Layer 3
model.add(Conv2D(64, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Conv3'))
model.add(Dropout(0.2))
# Conv Layer 4
model.add(Conv2D(128, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Conv4'))
model.add(Dropout(0.2))
# Conv Layer 5
model.add(Conv2D(128, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Conv5'))
model.add(Dropout(0.2))
# Pooling 2
model.add(MaxPooling2D(pool_size=pool_size))
# Conv Layer 6
model.add(Conv2D(256, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Conv6'))
model.add(Dropout(0.2))
# Conv Layer 7
model.add(Conv2D(256, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Conv7'))
model.add(Dropout(0.2))
# Pooling 3
model.add(MaxPooling2D(pool_size=pool_size))
# Upsample 1
model.add(UpSampling2D(size=pool_size))
model.add(ZeroPadding2D(padding=((0,1),(0,0))))
# Deconv 1
model.add(Conv2DTranspose(256, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv1'))
model.add(Dropout(0.2))
# Deconv 2
model.add(Conv2DTranspose(256, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv2'))
model.add(Dropout(0.2))
# Upsample 2
model.add(UpSampling2D(size=pool_size))
# Deconv 3
model.add(Conv2DTranspose(128, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv3'))
model.add(Dropout(0.2))
# Deconv 4
model.add(Conv2DTranspose(128, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv4'))
model.add(Dropout(0.2))
# Deconv 5
model.add(Conv2DTranspose(64, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv5'))
model.add(Dropout(0.2))
# Upsample 3
model.add(UpSampling2D(size=pool_size))
model.add(ZeroPadding2D(padding=((2,0),(0,0))))
# Deconv 6
model.add(Conv2DTranspose(64, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv6'))
# Final layer - only including one channel so 3 filter
model.add(Conv2DTranspose(1, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Final'))
### End of network ###
return model |
24,344 | 3f8be2914ac7c657f840be4e6835e6e08b047bfb | #------------------------------------------------------------
# Conner Addison 8984874
# Physics 129L
#------------------------------------------------------------
# Homework 1, Exercise 5
import subprocess
subprocess.call('/bin/ls /etc', shell = True) |
24,345 | 7bf4ce81caffb434e5198ee875f66a3afa4fe23f | '''
Descripttion:
version:
Author: zpliu
Date: 2021-02-15 15:40:24
LastEditors: zpliu
LastEditTime: 2021-02-16 08:52:21
@param:
'''
import sys
def getConstitutiveIntronCoordinate(location):
try:
start, end = location.split(":")[2].split("-")
except IndexError:
start, end = location.split(":")[-1].split("-")
strand = location.split(":")[-1]
return int(start), int(end), strand
if __name__ == "__main__":
ASkmerFile = sys.argv[1]
conservedLocation = sys.argv[2]
kmer = {}
out = []
with open(ASkmerFile, 'r') as File:
for line in File:
line = line.strip("\n").split("\t")
if line[2] == '0':
pass
else:
start1 = str(int(line[1].split(":")[-1].split("-")[0])+1)
end1 = str(int(line[1].split(":")[-1].split("-")[1])-1)
kmer[line[0]] = line[1].split(":")[0]+":"+start1+"-"+end1
with open(conservedLocation, 'r') as File:
for line in File:
line = line.strip("\n").split("\t")
start1, end1, strand1 = getConstitutiveIntronCoordinate(line[1])
# no sense with starnd2
start2, end2, strand2 = getConstitutiveIntronCoordinate(
kmer[line[0]])
tmp = sorted([start1, start2, end1, end2])
if (tmp[2]-tmp[1])/(tmp[3]-tmp[0]) > 0.95:
out.append("\t".join(line)+"\n")
else:
out.append(
line[0]+"\t"+line[1].split(":")[0]+":"+line[1].split(
":")[1]+":"+str(start2)+"-"+str(end2)+":"+strand1+"\n"
)
with open(sys.argv[3], 'w') as File:
for line in out:
File.write(line)
|
24,346 | 96abf58fcea8224390d0c0a5d66fdc7529ddd191 | # calculator.py
def big(a, b):
if a > b:
return a
else:
return b
def small(a, b):
if a > b:
return b
else:
return a |
24,347 | 543a49e0f83e8cda7877aeff8982c7090ac97312 | #2.Find all such numbers divisible by 7, but not a multiple of 5, between 2000 and 3200 (inclusive). The numbers obtained should be printed on a single line in a comma-separated sequence.
"""
ipadress = input()
nodes = ipadress.split(".")
nodes.pop(0)
print(nodes)
"""
"""
l = []
for i in range(2000, 3201):
if (i%7==0) and (i%5!=0):
l.append(str(i))
print(','.join(l))
"""
"""
values=input()
l=values.split(",")
t=tuple(l)
print(l)
print(t)
"""
"""
y = input("Enter the words> ")
y = y.split(",")
y = sorted(y)
#map(lambda x:x.lower(),y)
print(",".join(y))\
"""
"""
words = [x for x in input().split(',')]
words.sort()
print(','.join(words))
"""
"""
s = input()
words = [word for word in s.split(" ")]
words = map(lambda x:x.lower(),words)
print(" ".join(sorted(list(set(words)))))
"""
"""
words = [x for x in input().split(',')]
#words = map(lambda x:x.lower(),words)
words = list(map(lambda x:x.lower(),words))
words.sort()
print(','.join(words))
"""
"""
words = input("Enter sequence of words separated by whitespace: ").split(' ')
words_set = set(words)
print(' '.join(sorted(words_set)))
#print(words_set)
"""
|
24,348 | 6c1b1df69f3d13b081c1f3c16f735537b0274f08 | from django.contrib import admin
from .models import Word, CategoryWord, RelationWord
admin.site.register(CategoryWord)
admin.site.register(Word)
admin.site.register(RelationWord) |
24,349 | 998b4d975dfe48a3f02ccc0566a0eeaee3b62114 | #!/usr/bin/env python
# coding: utf-8
# # Take last 3 values of roll number If the value startswith "01" ---> "Cse dept" elif the value startswith "11" ---> "It dept" elif the value startswith "21" ---> "Ece dept" else not a student of Srm University
#
# In[ ]:
# In[ ]:
# In[12]:
a=input("EnterRoll No ")
if a[-3:].startswith("01"):
print("You belong to CSE Dept.")
elif a[-3:].startswith("11"):
print("You belong to IT Dept.")
elif a[-3:].startswith("21"):
print("You belong to ECE Dept.")
else:
print("You are not an SRM student.")
# # Find all leap years between 1800 to 2020
# In[17]:
for i in range(1800,2020):
if ((i%4==0 or i%400==0) and (i%100!=0)):
print(i,end=" ")
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
24,350 | 0fb20195d61a25b0a19320fa7bcdd0e95467ef1e | from FloppyToolZ.Funci import *
import time
import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.externals import joblib
from sklearn import preprocessing
from sklearn import metrics
# #### funcitons needed
# grid search
def Model(yValues, predictors, CVout, nCores):
param_grid = {'learning_rate': [0.1, 0.05, 0.02, 0.01, 0.005, 0.002, 0.001],
'max_depth': [4, 6],
'min_samples_leaf': [3, 5, 9, 17],
'max_features': [1.0, 0.5, 0.3, 0.1]}
est = GradientBoostingRegressor(n_estimators=7500)
gs_cv = GridSearchCV(est, param_grid, cv=5, refit=True, n_jobs=nCores).fit(predictors, yValues)
# Write outputs to disk and return elements from function
joblib.dump(gs_cv, CVout)
return (gs_cv)
# model performance
def ModPerfor(cv_results, yData, xData):
# Calculate Predictions of the true values
y_true, y_pred = yData, cv_results.predict(xData)
res['r2'].append(metrics.r2_score(y_true, y_pred))
res['mse'].append(metrics.mean_squared_error(y_true, y_pred))
res['rmse'].append((metrics.mean_squared_error(y_true, y_pred))**0.5)
res['y_true'].append(list(y_true))
res['y_pred'].append(list(y_pred))
# Get parameters from the best estimator
res['max_depth'].append(cv_results.best_params_['max_depth'])
res['learning_rate'].append(cv_results.best_params_['learning_rate'])
res['min_samples_leaf'].append(cv_results.best_params_['min_samples_leaf'])
res['max_features'].append(cv_results.best_params_['max_features'])
#return res
# #### create master file list for pred/resp sets
# p1 = '/home/florus/'
p1 = 'Y:/_students_data_exchange/FP_FP/Seafile/myLibrary/MSc/Modelling/GAP_FILLED/ALL_VIs/data_junks_from_R/' # Geoserv2
path_ns = p1 + 'not_smooth/subsets'
path_sm = p1 + 'smooth/subsets'
path_sm5 = p1 + 'smooth5/subsets'
paths = [path_ns, path_sm, path_sm5]
fil = [getFilelist(path, '.csv') for path in paths]
filp = [f for fi in fil for f in fi]
# #### read in column names for different pred-sets (seasPAr, seasFit, seasStat)
c_fil = getFilelist('Y:/_students_data_exchange/FP_FP/Seafile/myLibrary/MSc\Modelling/All_VIs/colnames', '.csv')
c_fil.sort()
c_seasPar = pd.read_csv(c_fil[1])
c_seasFit = pd.read_csv(c_fil[0])
c_seasStat = pd.read_csv(c_fil[2])
c_seasPar = c_seasPar[c_seasPar.columns.values[0]].values.tolist()
c_seasFit = c_seasFit[c_seasFit.columns.values[0]].values.tolist()
c_seasStat = c_seasStat[c_seasStat.columns.values[0]].values.tolist()
c_seasParStat = c_seasPar + c_seasStat
c_seasPar.append('Mean_AGB')
c_seasFit.append('Mean_AGB')
c_seasStat.append('Mean_AGB')
c_seasParStat.append('Mean_AGB')
# exlcude GreenUP & Maturity due to too many NaNs
# kill = ['NDVI_GreenUp', 'EVI_GreenUp','NBR_GreenUp',
# 'NDVI_Maturity', 'EVI_Maturity', 'NBR_Maturity']
#
# for ki in kill:
# c_seasPar.remove(ki)
# c_seasParStat.remove(ki)
# #### read in the data-blocks and seperate into train & test
# build result container
keys = ['ParVers', 'ParSet', 'r2', 'mse', 'rmse', 'y_true', 'y_pred', 'max_depth', 'learning_rate', 'min_samples_leaf', 'max_features']
vals = [list() for i in range(len(keys))]
res = dict(zip(keys, vals))
# par_sets = [c_seasPar, c_seasFit, c_seasStat, c_seasParStat]
# par_names = ['SeasPar', 'SeasFIT', 'SeasStats', 'SeasParStats']
par_sets = [c_seasPar, c_seasStat, c_seasParStat]
par_names = ['SeasPar', 'SeasStats', 'SeasParStats']
# dummy model for save parallel
def ModelRun():
# iterate over different parameter versions
for n, pV in enumerate(filp):
dat = pd.read_csv(pV)
# iterate over different parameter-sets
for i, par in enumerate(par_sets):
# subset data per predictor set
block = dat[par].dropna()
# split into train & test
x_Train, x_Test, y_Train, y_Test = train_test_split(block.iloc[:, np.where((block.columns.values=='Mean_AGB') == False)[0]],
block['Mean_AGB'], random_state= 42, test_size = 0.3)
# scale training and test predictors
# scaler = preprocessing.StandardScaler().fit(x_Train)
# x_Train = scaler.transform(x_Train)
# x_Test = scaler.transform(x_Test)
# insert Modelversion into results-container
res['ParVers'].append(pV.split('/')[-1].split('.')[0])
res['ParSet'].append(par_names[i])
stor = 'Y:/_students_data_exchange/FP_FP/Seafile/myLibrary/MSc/Modelling/GAP_FILLED/ALL_VIs/runs_greenMat/' + pV.split('/')[-1].split('.')[0] + par_names[i] + '.sav'
ModPerfor(Model(y_Train, x_Train, stor, 40),
y_Test, x_Test)
print(n)
# ##### run gbr once
if __name__ == '__main__':
starttime = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
print("--------------------------------------------------------")
print("Starting process, time: " + starttime)
print("")
# run model and store performances in results-container
ModelRun()
print("")
endtime = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
print("--------------------------------------------------------")
print("--------------------------------------------------------")
print("start: " + starttime)
print("end: " + endtime)
print("")
df = pd.DataFrame(data = res)
df.to_csv('Y:/_students_data_exchange/FP_FP/Seafile/myLibrary/MSc/Modelling/GAP_FILLED/ALL_VIs/runs_greenMat/AllRuns.csv', sep=',', index=False) |
24,351 | a47371b53464e8203f37c72fe3ffc2c50e4e0640 | #coding:utf8
########################################################################################
# Davi Frossard, 2016 #
# VGG16 implementation in TensorFlow #
# Details: #
# http://www.cs.toronto.edu/~frossard/post/vgg16/ #
# #
# Model from https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md #
# Weights from Caffe converted using https://github.com/ethereon/caffe-tensorflow
# update: 2017-7-30 delphifan
########################################################################################
import tensorflow as tf
import numpy as np
import getdata as gd
from scipy.misc import imread, imresize
from imagenet_classes import class_names
from tensorflow.examples.tutorials.mnist import input_data
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
init_para = 0.1;
mnist = input_data.read_data_sets('data', one_hot=True)
class vgg16:
def __init__(self, imgs, weights=None, sess=None):
self.imgs = tf.reshape(imgs,shape=[-1,28,28,1])#imgs
self.convlayers()
self.fc_layers()
self.probs = tf.nn.softmax(self.fc3l) #计算softmax层输出
self.myout = tf.argmax(tf.nn.softmax(self.fc3l),1)
if weights is not None and sess is not None: #载入pre-training的权重
self.load_weights(weights, sess)
def convlayers(self):
self.parameters = []
# zero-mean input
# 去RGB均值操作(这里RGB均值为原数据集的均值)
with tf.name_scope('preprocess') as scope:
mean = tf.constant([0,0,0],#123.68, 116.779, 103.939],
dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
images = self.imgs-mean
# conv1_1
with tf.name_scope('conv1_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv1_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv1_2
with tf.name_scope('conv1_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv1_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv1_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool1
self.pool1 = tf.nn.max_pool(self.conv1_2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool1')
# conv2_1
with tf.name_scope('conv2_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv2_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv2_2
with tf.name_scope('conv2_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 128], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv2_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv2_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool2
self.pool2 = tf.nn.max_pool(self.conv2_2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool2')
# conv3_1
with tf.name_scope('conv3_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv3_2
with tf.name_scope('conv3_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv3_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv3_3
with tf.name_scope('conv3_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv3_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool3
self.pool3 = tf.nn.max_pool(self.conv3_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool3')
# conv4_1
with tf.name_scope('conv4_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool3, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv4_2
with tf.name_scope('conv4_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv4_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv4_3
with tf.name_scope('conv4_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv4_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool4
self.pool4 = tf.nn.max_pool(self.conv4_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool4')
# conv5_1
with tf.name_scope('conv5_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool4, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv5_2
with tf.name_scope('conv5_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv5_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv5_3
with tf.name_scope('conv5_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv5_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool5
self.pool5 = tf.nn.max_pool(self.conv5_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool4')
def fc_layers(self):
# fc1
with tf.name_scope('fc1') as scope:
# 取出shape中第一个元素后的元素 例如x=[1,2,3] -->x[1:]=[2,3]
# np.prod是计算数组的元素乘积 x=[2,3] np.prod(x) = 2*3 = 6
# 这里代码可以使用 shape = self.pool5.get_shape()
#shape = shape[1].value * shape[2].value * shape[3].value 代替
shape = int(np.prod(self.pool5.get_shape()[1:]))
fc_size = 128
fc1w = tf.Variable(tf.truncated_normal([shape, fc_size],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc1b = tf.Variable(tf.constant(1.0, shape=[fc_size], dtype=tf.float32),
trainable=True, name='biases')
pool5_flat = tf.reshape(self.pool5, [-1, shape])
fc1l = tf.nn.bias_add(tf.matmul(pool5_flat, fc1w), fc1b)
self.fc1 = tf.nn.relu(fc1l)
self.parameters += [fc1w, fc1b]
# fc2
with tf.name_scope('fc2') as scope:
fc2w = tf.Variable(tf.truncated_normal([fc_size, fc_size],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc2b = tf.Variable(tf.constant(1.0, shape=[fc_size], dtype=tf.float32),
trainable=True, name='biases')
fc2l = tf.nn.bias_add(tf.matmul(self.fc1, fc2w), fc2b)
self.fc2 = tf.nn.relu(fc2l)
self.parameters += [fc2w, fc2b]
# fc3
with tf.name_scope('fc3') as scope:
fc3w = tf.Variable(tf.truncated_normal([fc_size,10 ],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc3b = tf.Variable(tf.constant(1.0, shape=[10], dtype=tf.float32),
trainable=True, name='biases')
self.fc3l = tf.nn.bias_add(tf.matmul(self.fc2, fc3w), fc3b)
self.parameters += [fc3w, fc3b]
def load_weights(self, weight_file, sess):
weights = np.load(weight_file)
keys = sorted(weights.keys())
for i, k in enumerate(keys):
print i, k, np.shape(weights[k])
sess.run(self.parameters[i].assign(weights[k]))
fca_size = 256
weights ={
'wc1':tf.Variable(tf.random_normal([3, 3, 1, 64], dtype=tf.float32, stddev=init_para), name='w1'),
'wc2':tf.Variable(tf.random_normal([3, 3, 64, 64], dtype=tf.float32, stddev=init_para), name='w2'),
'wc3':tf.Variable(tf.random_normal([3, 3, 64, 128], dtype=tf.float32, stddev=init_para), name='w3'),
'wc4':tf.Variable(tf.random_normal([3, 3, 128, 128], dtype=tf.float32, stddev=init_para), name='w4'),
'wc5':tf.Variable(tf.random_normal([3,3,128,256])),
'wc6':tf.Variable(tf.random_normal([3,3,256,256])),
'wc7':tf.Variable(tf.random_normal([3,3,256,256])),
'wc8':tf.Variable(tf.random_normal([3,3,256,256])),
'wc9':tf.Variable(tf.random_normal([3,3,256,512])),
'wc10':tf.Variable(tf.random_normal([3,3,512,512])),
'wc11':tf.Variable(tf.random_normal([3,3,512,512])),
'wc12':tf.Variable(tf.random_normal([3,3,512,512])),
'wc13':tf.Variable(tf.random_normal([3,3,512,512])),
'wc14':tf.Variable(tf.random_normal([3,3,512,512])),
'wc15':tf.Variable(tf.random_normal([3,3,512,512])),
'wc16':tf.Variable(tf.random_normal([3,3,512,256])),
'wd1':tf.Variable(tf.truncated_normal([200704/2,fca_size], dtype=tf.float32, stddev=1e-2), name='fc1'),
'wd2':tf.Variable(tf.truncated_normal([fca_size,fca_size], dtype=tf.float32, stddev=1e-2), name='fc2'),
'out':tf.Variable(tf.truncated_normal([fca_size, 10], dtype=tf.float32, stddev=1e-2), name='fc3'),
}
biases ={
'bc1':tf.Variable(tf.random_normal([64])),
'bc2':tf.Variable(tf.random_normal([64])),
'bc3':tf.Variable(tf.random_normal([128])),
'bc4':tf.Variable(tf.random_normal([128])),
'bc5':tf.Variable(tf.random_normal([256])),
'bc6':tf.Variable(tf.random_normal([256])),
'bc7':tf.Variable(tf.random_normal([256])),
'bc8':tf.Variable(tf.random_normal([256])),
'bc9':tf.Variable(tf.random_normal([512])),
'bc10':tf.Variable(tf.random_normal([512])),
'bc11':tf.Variable(tf.random_normal([512])),
'bc12':tf.Variable(tf.random_normal([512])),
'bc13':tf.Variable(tf.random_normal([512])),
'bc14':tf.Variable(tf.random_normal([512])),
'bc15':tf.Variable(tf.random_normal([512])),
'bc16':tf.Variable(tf.random_normal([256])),
'bd1':tf.Variable(tf.random_normal([fca_size])),
'bd2':tf.Variable(tf.random_normal([fca_size])),
'out':tf.Variable(tf.random_normal([10])),
}
def conv2D(name,x,w,b):
return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x,w,strides=[1,1,1,1],padding='SAME'),b),name=name)
def maxPool2D(name,x,k):
return tf.nn.max_pool(x,ksize=[1,k,k,1],strides=[1,k,k,1],padding='SAME',name=name)
def fc(name,x,w,b):
return tf.nn.relu(tf.matmul(x,w)+b,name=name)
def norm(name, x, lsize=4):
return tf.nn.lrn(x, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=name)
def convLevel(i,input,type):
num = i
out = conv2D('conv'+str(num),input,weights['wc'+str(num)],biases['bc'+str(num)])
if type=='p':
out = norm('norm'+str(num),out, lsize=4)
out = maxPool2D('pool'+str(num),out, k=1)
return out
def lrn(_x):
'''
作局部响应归一化处理
:param _x:
:return:
'''
return tf.nn.lrn(_x, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
def max_pool(_x, f):
'''
最大池化处理,因为输入图片尺寸较小,这里取步长固定为1,1,1,1
:param _x:
:param f:
:return:
'''
return tf.nn.max_pool(_x, [1, f, f, 1], [1, 1, 1, 1], padding='SAME')
def conv2d(_x, _w, _b):
'''
封装的生成卷积层的函数
因为NNIST的图片较小,这里采用1,1的步长
:param _x: 输入
:param _w: 卷积核
:param _b: bias
:return: 卷积操作
'''
return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(_x, _w, [1, 1, 1, 1], padding='SAME'), _b))
def VGG(x,weights,biases,dropout):
x = tf.reshape(x,shape=[-1,28,28,1])
input = x
conv1 = conv2d(input, weights['wc1'], biases['bc1'])
lrn1 = lrn(conv1)
pool1 = max_pool(lrn1, 2)
# 第二卷积层
conv2 = conv2d(pool1, weights['wc2'], biases['bc2'])
lrn2 = lrn(conv2)
pool2 = max_pool(lrn2, 2)
# 第三卷积层
conv3 = conv2d(pool2, weights['wc3'], biases['bc3'])
# 第四卷积层
conv4 = conv2d(conv3, weights['wc4'], biases['bc4'])
input = conv4
'''for i in range(4):
i += 1
if(i==2) or (i==1) or (i==12) : # 根据模型定义还需要更多的POOL化,但mnist图片大小不允许。
input = convLevel(i,input,'p')
else:
input = convLevel(i,input,'c')
fc1 = tf.reshape(input, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
fc1 = tf.nn.dropout(fc1, dropout)
fc2 = tf.reshape(fc1, [-1, weights['wd2'].get_shape().as_list()[0]])
fc2 = tf.add(tf.matmul(fc2, weights['wd2']), biases['bd2'])
fc2 = tf.nn.relu(fc2)
fc2 = tf.nn.dropout(fc2, dropout)
out = tf.nn.softmax(tf.add(tf.matmul(fc2, weights['out']), biases['out']))'''
shape = input.get_shape() # 获取第五卷基层输出结构,并展开
reshape = tf.reshape(input, [-1, shape[1].value*shape[2].value*shape[3].value])
fc1 = tf.nn.relu(tf.matmul(reshape, weights['wd1']) + biases['bd1'])
fc1_drop = tf.nn.dropout(fc1, keep_prob=dropout)
# FC2层
fc2 = tf.nn.relu(tf.matmul(fc1_drop, weights['wd2']) + biases['bd2'])
fc2_drop = tf.nn.dropout(fc2, keep_prob=dropout)
# softmax层
y_conv = tf.nn.softmax(tf.matmul(fc2_drop, weights['out']) + biases['out'])
return y_conv
if __name__ == '__main__':
keep_prob = tf.placeholder(tf.float32)
learning_rate = 0.0001
train_iters = 100000
batch_size = 64
dropout=1
display_step = 10
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
pred = VGG(x, weights, biases, keep_prob)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=pred))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy_ = tf.reduce_mean(tf.cast(correct_pred,tf.float32))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
step = 1
while step*batch_size < train_iters or acc < 0.7:
batch_x,batch_y = mnist.train.next_batch(batch_size)
sess.run(optimizer,feed_dict={x:batch_x,y:batch_y,keep_prob:dropout})
if step % display_step == 0 :
#loss,acc = sess.run([cost,accuracy],feed_dict={x:batch_x,y:batch_y,keep_prob=1.0})
acc = sess.run(accuracy_, feed_dict={x: batch_x, y: batch_y, keep_prob: 1.})
# 计算损失值
loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y, keep_prob: 1.})
print("iter: "+str(step*batch_size)+"mini batch Loss="+"{:.6f}".format(loss)+",acc="+"{:6f}".format(acc))
step += 1
print("training end!")
learning_rate = 0.001
max_iters = 100000
batch_size = 128
images = tf.placeholder(tf.float32, [None, 784])#224, 224, 3])
classes = tf.placeholder(tf.float32, [None, 10])
input_data = gd.load_mnist('./data',kind = 't10k')
keep_prob=tf.placeholder(tf.float32)
dropout=0.8
vgg = vgg16(imgs = images)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels = classes,logits = vgg.probs,name = 'entropy_with_logits'))
opt = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
correct_pred = tf.equal(tf.argmax(vgg.probs,1),tf.argmax(classes,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32))
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
with tf.Session() as sess:
sess.run(init)
step=1
while step*batch_size < max_iters:
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(opt,feed_dict = {images:batch_xs,classes:batch_ys})
if step%10 == 0:
acc = sess.run(accuracy,feed_dict = {images:batch_xs,classes:batch_ys})
loss = sess.run(cost,feed_dict = {images:batch_xs,classes:batch_ys})
print "iter:"+str(step*batch_size)+"\tacc:"+"{:6f}".format(acc)+"\tloss:"+"{:6f}".format(loss)
step+=1
'''
print "start run\n"
#计算VGG16的softmax层输出(返回是列表,每个元素代表一个判别类型的数组)
prob = sess.run(vgg.probs, feed_dict={vgg.imgs: [img1, img2, img3]})
for pro in prob:
# 源代码使用(np.argsort(prob)[::-1])[0:5]
# np.argsort(x)返回的数组值从小到大的索引值
#argsort(-x)从大到小排序返回索引值 [::-1]是使用切片将数组从大到小排序
#preds = (np.argsort(prob)[::-1])[0:5]
preds = (np.argsort(-pro))[0:5] #取出top5的索引
for p in preds:
print class_names[p], pro[p]
print '\n'
sess = tf.Session()
imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])
vgg = vgg16(imgs, 'vgg16_weights.npz', sess) # 载入预训练好的模型权重
img1 = imread('img1.jpg', mode='RGB') #载入需要判别的图片
img1 = imresize(img1, (224, 224))
img2 = imread('img2.jpg', mode='RGB')
img2 = imresize(img2, (224, 224))
img3 = imread('img3.jpg', mode='RGB')
img3 = imresize(img3, (224, 224))
print "start run\n"
#计算VGG16的softmax层输出(返回是列表,每个元素代表一个判别类型的数组)
prob = sess.run(vgg.probs, feed_dict={vgg.imgs: [img1, img2, img3]})
for pro in prob:
# 源代码使用(np.argsort(prob)[::-1])[0:5]
# np.argsort(x)返回的数组值从小到大的索引值
#argsort(-x)从大到小排序返回索引值 [::-1]是使用切片将数组从大到小排序
#preds = (np.argsort(prob)[::-1])[0:5]
preds = (np.argsort(-pro))[0:5] #取出top5的索引
for p in preds:
print class_names[p], pro[p]
print '\n'
''' |
24,352 | 641308ca1979956ddc0fa26546d561e24ea399da | from time import time
import numpy as np
simple_list = [5,1,3,8,4,7,2,9,0,6]
np.random.seed(0)
big_list = np.random.permutation(100000)
first = simple_list[0]
def strive_sort(l):
n = len(l)
for i in range(n):
for j in range(n):
if l[i] <= l[j]:
l[i], l[j] = l[j], l[i]
return l
def buble_sort(l):
n = len(l)
for i in range(n):
for j in range(0, n-i-1):
if l[j+1] < l[j]:
l[j], l[j+1] = l[j+1], l[j]
return l
def partition( l, low, high):
i = low -1
pivot = l[high]
for j in range( low, high):
if l[j] < pivot:
i += 1
l[i], l[j] = l[j], l[i]
l[i+1], l[high] = l[high], l[i+1]
return i+1
def quick_sort(left, right, l):
if left < right:
pivot = partition(l,left, right)
quick_sort( left, pivot-1, l)
quick_sort( pivot+1, right, l)
'''
start = time()
strive_sort(big_list)
end = time()
print("Strive took: ", end-start, "s")
start = time()
buble_sort(big_list)
end = time()
print("buble took: ", end-start, "s")
'''
start = time()
quick_sort(0, len(simple_list)-1,simple_list)
print(simple_list)
end = time()
print("quick took: ", end-start, "s")
|
24,353 | 2d9d911be247c8482c7b8ae58d590586d75f4d7c | import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
x_train = [1,2,3]
y_train = [1,2,3]
W = tf.Variable(tf.random_normal([1]),name='wegith')
b = tf.Variable(tf.random_normal([1]),name='bias')
hypothesis = x_train*W + b
cost = tf.reduce_mean(tf.square(hypothesis-y_train))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range (2001):
sess.run(train)
if step %50 == 0:
print(step, sess.run(cost),sess.run(W),sess.run(b))
|
24,354 | c839cbbe85c526b5be9b1b46d11de20c637bed41 | N = int(input())
arr = []
dx = [-1,1]
real_min = 9999
for i in range(N):
arr.append(list(map(int, input().split())))
def rotate(num):
if(num == 0):
return 1
else:
return 0
def check_number(five_num, visit, x, y, d1, d2):
min_num = five_num
max_num = five_num
num = 0
for i in range(0, y):
for j in range(0, x+d1+1):
if (visit[i][j] != 5):
num+=arr[i][j]
min_num = min(num, min_num)
max_num = max(num, max_num)
num = 0
for i in range(0, y-d1+d2+1):
for j in range(x+d1+1, N):
if (visit[i][j] != 5):
num += arr[i][j]
min_num = min(num, min_num)
max_num = max(num, max_num)
num = 0
for i in range(y, N):
for j in range(0, x+d2):
if (visit[i][j] != 5):
num += arr[i][j]
min_num = min(num, min_num)
max_num = max(num, max_num)
num = 0
for i in range(y-d1+d2+1, N):
for j in range(x+d2, N):
if (visit[i][j] != 5):
num += arr[i][j]
min_num = min(num, min_num)
max_num = max(num, max_num)
global real_min
real_min = min(max_num-min_num, real_min)
def make_five(x, y, d1, d2):
# [x,y]//x+d1,y-d1 // x+d1+d2, y-d1+d2//x+d2, y+d2
visited = [[0]*N for _ in range (N)]
leftstartx = x+d1
starty = y-d1
rightstartx = x + d1
visited[starty][leftstartx] = 5
five_num = 0
five_num+=arr[starty][leftstartx]
dir1 = 0
dir2 = 1
while(True):
leftstartx = leftstartx+dx[dir1]
rightstartx = rightstartx + dx[dir2]
starty = starty + 1
for i in range(leftstartx, rightstartx+1):
visited[starty][i] = 5
five_num+=arr[starty][i]
if (leftstartx == rightstartx):
break
if(leftstartx == x):
dir1 = rotate(dir1)
if(rightstartx == x+d1+d2):
dir2 = rotate(dir2)
check_number(five_num, visited, x, y, d1, d2)
if __name__ == "__main__":
for i in range(2, N):
for j in range(1, i):
d1 = j
d2 = i-j
for y in range(d1, N-d2):
for x in range(0, N-d1-d2):
make_five(x, y, d1, d2)
print(real_min)
|
24,355 | 103badaac605d06e8fc38c40cce6fe435c46e0fa | CYL_15_BOTTOM_14 = [[(0, 0), (0, 1), (0, 2), (1, 1)], [(0, 3), (1, 2), (1, 3), (1, 4)], [(0, 4), (0, 5), (0, 6), (1, 5)], [(0, 7), (1, 6), (1, 7), (1, 8)], [(0, 8), (0, 9), (0, 10), (1, 9)], [(0, 11), (0, 12), (0, 13), (1, 12)], [(1, 0), (2, 0), (2, 1), (3, 0)], [(1, 10), (2, 9), (2, 10), (3, 10)], [(1, 11), (2, 11), (2, 12), (3, 11)], [(2, 2), (3, 1), (3, 2), (4, 2)], [(2, 3), (3, 3), (3, 4), (4, 3)], [(2, 4), (2, 5), (2, 6), (3, 5)], [(2, 7), (3, 6), (3, 7), (4, 7)], [(2, 8), (3, 8), (3, 9), (4, 8)], [(2, 13), (3, 12), (3, 13), (4, 13)], [(4, 0), (5, 0), (5, 1), (6, 0)], [(4, 4), (4, 5), (4, 6), (5, 5)], [(4, 9), (4, 10), (4, 11), (5, 10)], [(4, 12), (5, 11), (5, 12), (6, 12)], [(5, 2), (5, 3), (5, 4), (6, 3)], [(5, 6), (5, 7), (5, 8), (6, 7)], [(5, 9), (6, 8), (6, 9), (6, 10)], [(6, 1), (7, 0), (7, 1), (8, 1)], [(6, 2), (7, 2), (7, 3), (8, 2)], [(6, 4), (6, 5), (6, 6), (7, 5)], [(6, 11), (7, 10), (7, 11), (8, 11)], [(7, 4), (8, 3), (8, 4), (8, 5)], [(7, 6), (7, 7), (7, 8), (8, 7)], [(7, 9), (8, 8), (8, 9), (8, 10)], [(8, 0), (9, 0), (9, 1), (10, 0)], [(8, 6), (9, 6), (9, 7), (10, 6)], [(9, 2), (10, 2), (10, 3), (11, 2)], [(9, 3), (9, 4), (9, 5), (10, 4)], [(9, 8), (10, 7), (10, 8), (10, 9)], [(9, 9), (9, 10), (9, 11), (10, 10)], [(9, 12), (10, 11), (10, 12), (10, 13)], [(10, 1), (11, 0), (11, 1), (12, 1)], [(11, 3), (12, 2), (12, 3), (13, 3)], [(11, 4), (12, 4), (12, 5), (13, 4)], [(11, 5), (11, 6), (11, 7), (12, 6)], [(11, 8), (12, 8), (12, 9), (13, 8)], [(11, 9), (11, 10), (11, 11), (12, 10)], [(11, 12), (12, 12), (12, 13), (13, 12)], [(12, 0), (13, 0), (13, 1), (14, 0)], [(12, 7), (13, 6), (13, 7), (14, 7)], [(12, 11), (13, 10), (13, 11), (14, 11)], [(13, 2), (14, 1), (14, 2), (14, 3)], [(13, 5), (14, 4), (14, 5), (14, 6)], [(13, 9), (14, 8), (14, 9), (14, 10)]]
CYL_15_BOTTOM_19 = [[(0, 0), (0, 1), (0, 2), (1, 1)], [(0, 3), (1, 2), (1, 3), (1, 4)], [(0, 4), (0, 5), (0, 6), (1, 5)], [(0, 7), (1, 6), (1, 7), (1, 8)], [(0, 8), (0, 9), (0, 10), (1, 9)], [(0, 11), (1, 10), (1, 11), (1, 12)], [(0, 12), (0, 13), (0, 14), (1, 13)], [(0, 15), (1, 14), (1, 15), (1, 16)], [(0, 16), (0, 17), (0, 18), (1, 17)], [(1, 0), (2, 0), (2, 1), (3, 0)], [(2, 2), (3, 1), (3, 2), (3, 3)], [(2, 3), (2, 4), (2, 5), (3, 4)], [(2, 6), (2, 7), (2, 8), (3, 7)], [(2, 9), (3, 8), (3, 9), (4, 9)], [(2, 10), (3, 10), (3, 11), (4, 10)], [(2, 11), (2, 12), (2, 13), (3, 12)], [(2, 14), (3, 13), (3, 14), (3, 15)], [(2, 15), (2, 16), (2, 17), (3, 16)], [(2, 18), (3, 17), (3, 18), (4, 18)], [(3, 5), (4, 4), (4, 5), (5, 5)], [(3, 6), (4, 6), (4, 7), (5, 6)], [(4, 0), (5, 0), (5, 1), (6, 0)], [(4, 1), (4, 2), (4, 3), (5, 2)], [(4, 8), (5, 7), (5, 8), (6, 8)], [(4, 11), (4, 12), (4, 13), (5, 12)], [(4, 14), (4, 15), (4, 16), (5, 15)], [(4, 17), (5, 16), (5, 17), (6, 17)], [(5, 4), (6, 3), (6, 4), (6, 5)], [(5, 9), (5, 10), (5, 11), (6, 10)], [(5, 13), (6, 12), (6, 13), (7, 13)], [(5, 14), (6, 14), (6, 15), (7, 14)], [(6, 1), (7, 0), (7, 1), (8, 1)], [(6, 2), (7, 2), (7, 3), (8, 2)], [(6, 6), (7, 5), (7, 6), (8, 6)], [(6, 7), (7, 7), (7, 8), (8, 7)], [(6, 9), (7, 9), (7, 10), (8, 9)], [(6, 11), (7, 11), (7, 12), (8, 11)], [(6, 16), (7, 15), (7, 16), (8, 16)], [(7, 4), (8, 4), (8, 5), (9, 4)], [(8, 0), (9, 0), (9, 1), (10, 0)], [(8, 3), (9, 2), (9, 3), (10, 3)], [(8, 8), (9, 7), (9, 8), (10, 8)], [(8, 10), (9, 9), (9, 10), (9, 11)], [(8, 12), (8, 13), (8, 14), (9, 13)], [(8, 15), (9, 14), (9, 15), (9, 16)], [(9, 5), (10, 4), (10, 5), (11, 5)], [(9, 6), (10, 6), (10, 7), (11, 6)], [(9, 12), (10, 11), (10, 12), (10, 13)], [(9, 17), (10, 16), (10, 17), (10, 18)], [(10, 1), (11, 0), (11, 1), (12, 1)], [(10, 2), (11, 2), (11, 3), (12, 2)], [(10, 9), (11, 8), (11, 9), (12, 9)], [(10, 10), (11, 10), (11, 11), (12, 10)], [(10, 14), (11, 13), (11, 14), (12, 14)], [(10, 15), (11, 15), (11, 16), (12, 15)], [(11, 4), (12, 3), (12, 4), (12, 5)], [(11, 7), (12, 6), (12, 7), (12, 8)], [(11, 12), (12, 11), (12, 12), (12, 13)], [(11, 17), (12, 17), (12, 18), (13, 17)], [(12, 0), (13, 0), (13, 1), (14, 0)], [(12, 16), (13, 15), (13, 16), (14, 16)], [(13, 2), (14, 1), (14, 2), (14, 3)], [(13, 3), (13, 4), (13, 5), (14, 4)], [(13, 6), (14, 5), (14, 6), (14, 7)], [(13, 7), (13, 8), (13, 9), (14, 8)], [(13, 10), (14, 9), (14, 10), (14, 11)], [(13, 11), (13, 12), (13, 13), (14, 12)], [(13, 14), (14, 13), (14, 14), (14, 15)]]
CYL_15_BOTTOM_23 = [[(0, 0), (0, 1), (0, 2), (1, 1)], [(0, 3), (0, 4), (0, 5), (1, 4)], [(0, 6), (1, 5), (1, 6), (1, 7)], [(0, 7), (0, 8), (0, 9), (1, 8)], [(0, 10), (1, 9), (1, 10), (1, 11)], [(0, 11), (0, 12), (0, 13), (1, 12)], [(0, 14), (1, 13), (1, 14), (1, 15)], [(0, 15), (0, 16), (0, 17), (1, 16)], [(0, 18), (1, 17), (1, 18), (2, 18)], [(0, 19), (1, 19), (1, 20), (2, 19)], [(0, 20), (0, 21), (0, 22), (1, 21)], [(1, 2), (2, 1), (2, 2), (3, 2)], [(1, 3), (2, 3), (2, 4), (3, 3)], [(2, 0), (3, 0), (3, 1), (4, 0)], [(2, 5), (2, 6), (2, 7), (3, 6)], [(2, 8), (3, 7), (3, 8), (4, 8)], [(2, 9), (3, 9), (3, 10), (4, 9)], [(2, 10), (2, 11), (2, 12), (3, 11)], [(2, 13), (3, 12), (3, 13), (3, 14)], [(2, 14), (2, 15), (2, 16), (3, 15)], [(2, 17), (3, 16), (3, 17), (4, 17)], [(2, 20), (2, 21), (2, 22), (3, 21)], [(3, 4), (4, 3), (4, 4), (5, 4)], [(3, 5), (4, 5), (4, 6), (5, 5)], [(3, 18), (3, 19), (3, 20), (4, 19)], [(4, 1), (5, 0), (5, 1), (6, 1)], [(4, 2), (5, 2), (5, 3), (6, 2)], [(4, 7), (5, 6), (5, 7), (6, 7)], [(4, 10), (4, 11), (4, 12), (5, 11)], [(4, 13), (5, 13), (5, 14), (6, 13)], [(4, 14), (4, 15), (4, 16), (5, 15)], [(4, 18), (5, 17), (5, 18), (5, 19)], [(4, 20), (4, 21), (4, 22), (5, 21)], [(5, 8), (5, 9), (5, 10), (6, 9)], [(5, 12), (6, 11), (6, 12), (7, 12)], [(5, 16), (6, 15), (6, 16), (6, 17)], [(6, 0), (7, 0), (7, 1), (8, 0)], [(6, 3), (6, 4), (6, 5), (7, 4)], [(6, 6), (7, 5), (7, 6), (7, 7)], [(6, 8), (7, 8), (7, 9), (8, 8)], [(6, 10), (7, 10), (7, 11), (8, 10)], [(6, 14), (7, 13), (7, 14), (7, 15)], [(6, 18), (7, 17), (7, 18), (7, 19)], [(6, 19), (6, 20), (6, 21), (7, 20)], [(7, 2), (8, 1), (8, 2), (9, 2)], [(7, 3), (8, 3), (8, 4), (9, 3)], [(7, 16), (8, 15), (8, 16), (9, 16)], [(8, 5), (8, 6), (8, 7), (9, 6)], [(8, 9), (9, 8), (9, 9), (9, 10)], [(8, 11), (8, 12), (8, 13), (9, 12)], [(8, 14), (9, 13), (9, 14), (9, 15)], [(8, 17), (8, 18), (8, 19), (9, 18)], [(8, 20), (9, 19), (9, 20), (9, 21)], [(9, 1), (10, 0), (10, 1), (10, 2)], [(9, 4), (10, 3), (10, 4), (11, 4)], [(9, 5), (10, 5), (10, 6), (11, 5)], [(9, 7), (10, 7), (10, 8), (11, 7)], [(9, 11), (10, 10), (10, 11), (10, 12)], [(9, 17), (10, 17), (10, 18), (11, 17)], [(10, 9), (11, 8), (11, 9), (11, 10)], [(10, 13), (11, 12), (11, 13), (11, 14)], [(10, 14), (10, 15), (10, 16), (11, 15)], [(10, 19), (11, 18), (11, 19), (11, 20)], [(10, 20), (10, 21), (10, 22), (11, 21)], [(11, 0), (11, 1), (11, 2), (12, 1)], [(11, 3), (12, 2), (12, 3), (12, 4)], [(11, 6), (12, 5), (12, 6), (12, 7)], [(11, 11), (12, 10), (12, 11), (12, 12)], [(11, 16), (12, 15), (12, 16), (12, 17)], [(12, 0), (13, 0), (13, 1), (14, 0)], [(12, 8), (13, 7), (13, 8), (14, 8)], [(12, 9), (13, 9), (13, 10), (14, 9)], [(12, 13), (13, 12), (13, 13), (14, 13)], [(12, 14), (13, 14), (13, 15), (14, 14)], [(12, 18), (13, 17), (13, 18), (14, 18)], [(12, 19), (13, 19), (13, 20), (14, 19)], [(12, 20), (12, 21), (12, 22), (13, 21)], [(13, 2), (14, 1), (14, 2), (14, 3)], [(13, 3), (13, 4), (13, 5), (14, 4)], [(13, 6), (14, 5), (14, 6), (14, 7)], [(13, 11), (14, 10), (14, 11), (14, 12)], [(13, 16), (14, 15), (14, 16), (14, 17)]]
CYL_15_BOTTOM_27 = [[(0, 0), (0, 1), (0, 2), (1, 1)], [(0, 3), (1, 2), (1, 3), (1, 4)], [(0, 4), (0, 5), (0, 6), (1, 5)], [(0, 7), (1, 6), (1, 7), (1, 8)], [(0, 8), (0, 9), (0, 10), (1, 9)], [(0, 11), (1, 10), (1, 11), (1, 12)], [(0, 12), (0, 13), (0, 14), (1, 13)], [(0, 15), (1, 14), (1, 15), (1, 16)], [(0, 16), (0, 17), (0, 18), (1, 17)], [(0, 19), (0, 20), (0, 21), (1, 20)], [(0, 22), (1, 21), (1, 22), (2, 22)], [(0, 23), (1, 23), (1, 24), (2, 23)], [(0, 24), (0, 25), (0, 26), (1, 25)], [(1, 0), (2, 0), (2, 1), (3, 0)], [(1, 18), (2, 17), (2, 18), (3, 18)], [(1, 19), (2, 19), (2, 20), (3, 19)], [(2, 2), (3, 1), (3, 2), (3, 3)], [(2, 3), (2, 4), (2, 5), (3, 4)], [(2, 6), (3, 5), (3, 6), (4, 6)], [(2, 7), (3, 7), (3, 8), (4, 7)], [(2, 8), (2, 9), (2, 10), (3, 9)], [(2, 11), (3, 10), (3, 11), (3, 12)], [(2, 12), (2, 13), (2, 14), (3, 13)], [(2, 15), (3, 14), (3, 15), (4, 15)], [(2, 16), (3, 16), (3, 17), (4, 16)], [(2, 21), (3, 20), (3, 21), (4, 21)], [(2, 24), (2, 25), (2, 26), (3, 25)], [(3, 22), (3, 23), (3, 24), (4, 23)], [(4, 0), (5, 0), (5, 1), (6, 0)], [(4, 2), (4, 3), (4, 4), (5, 3)], [(4, 5), (5, 4), (5, 5), (5, 6)], [(4, 8), (5, 7), (5, 8), (6, 8)], [(4, 9), (5, 9), (5, 10), (6, 9)], [(4, 10), (4, 11), (4, 12), (5, 11)], [(4, 13), (5, 12), (5, 13), (6, 13)], [(4, 14), (5, 14), (5, 15), (6, 14)], [(4, 17), (5, 16), (5, 17), (5, 18)], [(4, 18), (4, 19), (4, 20), (5, 19)], [(4, 22), (5, 21), (5, 22), (5, 23)], [(4, 24), (4, 25), (4, 26), (5, 25)], [(5, 2), (6, 1), (6, 2), (6, 3)], [(5, 20), (6, 19), (6, 20), (6, 21)], [(6, 4), (7, 4), (7, 5), (8, 4)], [(6, 5), (6, 6), (6, 7), (7, 6)], [(6, 10), (6, 11), (6, 12), (7, 11)], [(6, 15), (7, 14), (7, 15), (7, 16)], [(6, 16), (6, 17), (6, 18), (7, 17)], [(6, 22), (7, 21), (7, 22), (7, 23)], [(6, 23), (6, 24), (6, 25), (7, 24)], [(7, 0), (8, 0), (8, 1), (9, 0)], [(7, 1), (7, 2), (7, 3), (8, 2)], [(7, 7), (7, 8), (7, 9), (8, 8)], [(7, 10), (8, 9), (8, 10), (9, 10)], [(7, 12), (8, 11), (8, 12), (9, 12)], [(7, 13), (8, 13), (8, 14), (9, 13)], [(7, 18), (7, 19), (7, 20), (8, 19)], [(8, 3), (9, 2), (9, 3), (9, 4)], [(8, 5), (8, 6), (8, 7), (9, 6)], [(8, 15), (9, 14), (9, 15), (9, 16)], [(8, 16), (8, 17), (8, 18), (9, 17)], [(8, 20), (9, 19), (9, 20), (9, 21)], [(8, 21), (8, 22), (8, 23), (9, 22)], [(8, 24), (9, 23), (9, 24), (9, 25)], [(9, 1), (10, 0), (10, 1), (10, 2)], [(9, 5), (10, 4), (10, 5), (10, 6)], [(9, 7), (9, 8), (9, 9), (10, 8)], [(9, 11), (10, 10), (10, 11), (10, 12)], [(9, 18), (10, 17), (10, 18), (10, 19)], [(10, 3), (11, 3), (11, 4), (12, 3)], [(10, 7), (11, 6), (11, 7), (12, 7)], [(10, 9), (11, 8), (11, 9), (11, 10)], [(10, 13), (11, 12), (11, 13), (11, 14)], [(10, 14), (10, 15), (10, 16), (11, 15)], [(10, 20), (10, 21), (10, 22), (11, 21)], [(10, 23), (11, 22), (11, 23), (11, 24)], [(10, 24), (10, 25), (10, 26), (11, 25)], [(11, 0), (11, 1), (11, 2), (12, 1)], [(11, 5), (12, 5), (12, 6), (13, 5)], [(11, 11), (12, 10), (12, 11), (12, 12)], [(11, 16), (12, 15), (12, 16), (12, 17)], [(11, 17), (11, 18), (11, 19), (12, 18)], [(11, 20), (12, 19), (12, 20), (12, 21)], [(12, 0), (13, 0), (13, 1), (14, 0)], [(12, 4), (13, 3), (13, 4), (14, 4)], [(12, 8), (13, 7), (13, 8), (14, 8)], [(12, 9), (13, 9), (13, 10), (14, 9)], [(12, 13), (13, 12), (13, 13), (14, 13)], [(12, 14), (13, 14), (13, 15), (14, 14)], [(12, 22), (13, 21), (13, 22), (14, 22)], [(12, 23), (13, 23), (13, 24), (14, 23)], [(12, 24), (12, 25), (12, 26), (13, 25)], [(13, 2), (14, 1), (14, 2), (14, 3)], [(13, 6), (14, 5), (14, 6), (14, 7)], [(13, 11), (14, 10), (14, 11), (14, 12)], [(13, 16), (14, 15), (14, 16), (14, 17)], [(13, 17), (13, 18), (13, 19), (14, 18)], [(13, 20), (14, 19), (14, 20), (14, 21)]]
CYL_15_BOTTOM_13 = [[(0, 0), (1, 0), (1, 1), (2, 0)], [(0, 1), (0, 2), (0, 3), (1, 2)], [(0, 4), (1, 3), (1, 4), (1, 5)], [(0, 5), (0, 6), (0, 7), (1, 6)], [(0, 8), (1, 7), (1, 8), (2, 8)], [(0, 9), (1, 9), (1, 10), (2, 9)], [(0, 10), (0, 11), (0, 12), (1, 11)], [(2, 2), (3, 1), (3, 2), (4, 2)], [(2, 3), (3, 3), (3, 4), (4, 3)], [(2, 4), (2, 5), (2, 6), (3, 5)], [(2, 7), (3, 6), (3, 7), (4, 7)], [(2, 10), (2, 11), (2, 12), (3, 11)], [(3, 0), (4, 0), (4, 1), (5, 0)], [(3, 8), (3, 9), (3, 10), (4, 9)], [(4, 4), (4, 5), (4, 6), (5, 5)], [(4, 8), (5, 7), (5, 8), (5, 9)], [(4, 10), (4, 11), (4, 12), (5, 11)], [(5, 1), (6, 0), (6, 1), (6, 2)], [(5, 2), (5, 3), (5, 4), (6, 3)], [(5, 6), (6, 5), (6, 6), (7, 6)], [(5, 10), (6, 10), (6, 11), (7, 10)], [(6, 4), (7, 4), (7, 5), (8, 4)], [(6, 7), (6, 8), (6, 9), (7, 8)], [(7, 0), (8, 0), (8, 1), (9, 0)], [(7, 1), (7, 2), (7, 3), (8, 2)], [(7, 7), (8, 6), (8, 7), (9, 7)], [(7, 9), (8, 8), (8, 9), (8, 10)], [(8, 3), (9, 2), (9, 3), (10, 3)], [(8, 5), (9, 4), (9, 5), (9, 6)], [(9, 1), (10, 0), (10, 1), (10, 2)], [(9, 8), (9, 9), (9, 10), (10, 9)], [(9, 11), (10, 10), (10, 11), (10, 12)], [(10, 4), (10, 5), (10, 6), (11, 5)], [(10, 7), (11, 6), (11, 7), (12, 7)], [(10, 8), (11, 8), (11, 9), (12, 8)], [(11, 0), (12, 0), (12, 1), (13, 0)], [(11, 1), (11, 2), (11, 3), (12, 2)], [(11, 4), (12, 4), (12, 5), (13, 4)], [(11, 10), (12, 9), (12, 10), (13, 10)], [(11, 11), (12, 11), (12, 12), (13, 11)], [(12, 3), (13, 2), (13, 3), (14, 3)], [(13, 1), (14, 0), (14, 1), (14, 2)], [(13, 5), (14, 4), (14, 5), (14, 6)], [(13, 6), (13, 7), (13, 8), (14, 7)], [(13, 9), (14, 8), (14, 9), (14, 10)]]
CYL_15_TOP_3 = [[(0, 0), (1, 0), (1, 1), (2, 0)], [(4, 0), (5, 0), (5, 1), (6, 0)], [(6, 1), (7, 0), (7, 1), (7, 2)], [(8, 0), (8, 1), (8, 2), (9, 1)], [(10, 0), (11, 0), (11, 1), (12, 0)], [(13, 1), (14, 0), (14, 1), (14, 2)]]
CYL_15_TOP_10 = [[(0, 0), (0, 1), (0, 2), (1, 1)], [(0, 3), (1, 2), (1, 3), (1, 4)], [(0, 4), (0, 5), (0, 6), (1, 5)], [(1, 0), (2, 0), (2, 1), (3, 0)], [(1, 6), (1, 7), (1, 8), (2, 7)], [(2, 2), (3, 2), (3, 3), (4, 2)], [(2, 3), (2, 4), (2, 5), (3, 4)], [(2, 6), (3, 6), (3, 7), (4, 6)], [(3, 1), (4, 0), (4, 1), (5, 1)], [(3, 5), (4, 4), (4, 5), (5, 5)], [(4, 3), (5, 2), (5, 3), (5, 4)], [(4, 7), (5, 6), (5, 7), (5, 8)], [(5, 0), (6, 0), (6, 1), (7, 0)], [(6, 2), (7, 1), (7, 2), (8, 2)], [(6, 3), (7, 3), (7, 4), (8, 3)], [(6, 4), (6, 5), (6, 6), (7, 5)], [(6, 7), (7, 6), (7, 7), (8, 7)], [(6, 8), (7, 8), (7, 9), (8, 8)], [(8, 0), (9, 0), (9, 1), (10, 0)], [(8, 4), (8, 5), (8, 6), (9, 5)], [(9, 2), (9, 3), (9, 4), (10, 3)], [(9, 6), (9, 7), (9, 8), (10, 7)], [(10, 1), (11, 0), (11, 1), (12, 1)], [(10, 2), (11, 2), (11, 3), (12, 2)], [(10, 4), (10, 5), (10, 6), (11, 5)], [(11, 4), (12, 3), (12, 4), (12, 5)], [(11, 6), (11, 7), (11, 8), (12, 7)], [(12, 0), (13, 0), (13, 1), (14, 0)], [(12, 6), (13, 6), (13, 7), (14, 6)], [(13, 2), (14, 1), (14, 2), (14, 3)], [(13, 3), (13, 4), (13, 5), (14, 4)], [(13, 8), (14, 7), (14, 8), (14, 9)]]
CYL_15_TOP_7 = [[(0, 0), (0, 1), (0, 2), (1, 1)], [(0, 3), (1, 2), (1, 3), (2, 3)], [(0, 4), (1, 4), (1, 5), (2, 4)], [(1, 0), (2, 0), (2, 1), (3, 0)], [(3, 1), (4, 0), (4, 1), (4, 2)], [(3, 2), (3, 3), (3, 4), (4, 3)], [(4, 4), (5, 4), (5, 5), (6, 4)], [(5, 0), (6, 0), (6, 1), (7, 0)], [(5, 1), (5, 2), (5, 3), (6, 2)], [(6, 3), (7, 2), (7, 3), (8, 3)], [(6, 5), (7, 4), (7, 5), (7, 6)], [(7, 1), (8, 0), (8, 1), (8, 2)], [(8, 4), (8, 5), (8, 6), (9, 5)], [(9, 0), (9, 1), (9, 2), (10, 1)], [(9, 3), (10, 2), (10, 3), (10, 4)], [(10, 0), (11, 0), (11, 1), (12, 0)], [(11, 2), (12, 1), (12, 2), (12, 3)], [(11, 3), (11, 4), (11, 5), (12, 4)], [(13, 1), (14, 0), (14, 1), (14, 2)], [(13, 2), (13, 3), (13, 4), (14, 3)], [(13, 5), (14, 4), (14, 5), (14, 6)]]
CYL_15_TOP_12 = [[(0, 0), (1, 0), (1, 1), (2, 0)], [(0, 1), (0, 2), (0, 3), (1, 2)], [(0, 4), (1, 3), (1, 4), (1, 5)], [(0, 5), (0, 6), (0, 7), (1, 6)], [(0, 8), (1, 7), (1, 8), (2, 8)], [(0, 9), (1, 9), (1, 10), (2, 9)], [(2, 1), (2, 2), (2, 3), (3, 2)], [(2, 4), (3, 3), (3, 4), (3, 5)], [(2, 5), (2, 6), (2, 7), (3, 6)], [(3, 1), (4, 0), (4, 1), (4, 2)], [(3, 7), (3, 8), (3, 9), (4, 8)], [(4, 3), (5, 3), (5, 4), (6, 3)], [(4, 4), (4, 5), (4, 6), (5, 5)], [(4, 7), (5, 7), (5, 8), (6, 7)], [(4, 9), (5, 9), (5, 10), (6, 9)], [(5, 0), (5, 1), (5, 2), (6, 1)], [(5, 6), (6, 5), (6, 6), (7, 6)], [(6, 0), (7, 0), (7, 1), (8, 0)], [(6, 2), (7, 2), (7, 3), (8, 2)], [(6, 4), (7, 4), (7, 5), (8, 4)], [(6, 8), (7, 7), (7, 8), (8, 8)], [(6, 10), (7, 9), (7, 10), (7, 11)], [(8, 1), (9, 0), (9, 1), (9, 2)], [(8, 3), (9, 3), (9, 4), (10, 3)], [(8, 5), (8, 6), (8, 7), (9, 6)], [(8, 9), (8, 10), (8, 11), (9, 10)], [(9, 5), (10, 4), (10, 5), (10, 6)], [(9, 7), (9, 8), (9, 9), (10, 8)], [(10, 0), (10, 1), (10, 2), (11, 1)], [(10, 7), (11, 6), (11, 7), (11, 8)], [(10, 9), (11, 9), (11, 10), (12, 9)], [(11, 0), (12, 0), (12, 1), (13, 0)], [(11, 2), (11, 3), (11, 4), (12, 3)], [(11, 5), (12, 4), (12, 5), (12, 6)], [(12, 7), (13, 6), (13, 7), (14, 7)], [(12, 8), (13, 8), (13, 9), (14, 8)], [(13, 1), (14, 0), (14, 1), (14, 2)], [(13, 2), (13, 3), (13, 4), (14, 3)], [(13, 5), (14, 4), (14, 5), (14, 6)], [(13, 10), (14, 9), (14, 10), (14, 11)]]
CYL_15_TOP_25 = [[(0, 0), (0, 1), (0, 2), (1, 1)], [(0, 3), (1, 2), (1, 3), (1, 4)], [(0, 4), (0, 5), (0, 6), (1, 5)], [(0, 7), (1, 6), (1, 7), (2, 7)], [(0, 8), (1, 8), (1, 9), (2, 8)], [(0, 9), (0, 10), (0, 11), (1, 10)], [(0, 12), (1, 11), (1, 12), (2, 12)], [(0, 13), (1, 13), (1, 14), (2, 13)], [(0, 14), (0, 15), (0, 16), (1, 15)], [(0, 17), (1, 16), (1, 17), (2, 17)], [(0, 18), (1, 18), (1, 19), (2, 18)], [(0, 19), (0, 20), (0, 21), (1, 20)], [(0, 22), (1, 21), (1, 22), (1, 23)], [(1, 0), (2, 0), (2, 1), (3, 0)], [(2, 2), (2, 3), (2, 4), (3, 3)], [(2, 5), (3, 4), (3, 5), (4, 5)], [(2, 6), (3, 6), (3, 7), (4, 6)], [(2, 9), (2, 10), (2, 11), (3, 10)], [(2, 14), (2, 15), (2, 16), (3, 15)], [(2, 19), (2, 20), (2, 21), (3, 20)], [(2, 22), (3, 21), (3, 22), (4, 22)], [(3, 1), (4, 0), (4, 1), (5, 1)], [(3, 2), (4, 2), (4, 3), (5, 2)], [(3, 8), (4, 7), (4, 8), (5, 8)], [(3, 9), (4, 9), (4, 10), (5, 9)], [(3, 11), (3, 12), (3, 13), (4, 12)], [(3, 14), (4, 13), (4, 14), (5, 14)], [(3, 16), (4, 15), (4, 16), (4, 17)], [(3, 17), (3, 18), (3, 19), (4, 18)], [(4, 4), (5, 3), (5, 4), (6, 4)], [(4, 11), (5, 10), (5, 11), (5, 12)], [(4, 19), (4, 20), (4, 21), (5, 20)], [(5, 0), (6, 0), (6, 1), (7, 0)], [(5, 5), (5, 6), (5, 7), (6, 6)], [(5, 13), (6, 12), (6, 13), (6, 14)], [(5, 15), (5, 16), (5, 17), (6, 16)], [(5, 18), (6, 17), (6, 18), (7, 18)], [(5, 19), (6, 19), (6, 20), (7, 19)], [(5, 21), (5, 22), (5, 23), (6, 22)], [(6, 2), (7, 1), (7, 2), (8, 2)], [(6, 3), (7, 3), (7, 4), (8, 3)], [(6, 5), (7, 5), (7, 6), (8, 5)], [(6, 7), (7, 7), (7, 8), (8, 7)], [(6, 8), (6, 9), (6, 10), (7, 9)], [(6, 11), (7, 10), (7, 11), (7, 12)], [(6, 15), (7, 14), (7, 15), (7, 16)], [(6, 21), (7, 20), (7, 21), (8, 21)], [(6, 23), (7, 22), (7, 23), (7, 24)], [(7, 13), (8, 12), (8, 13), (8, 14)], [(7, 17), (8, 16), (8, 17), (9, 17)], [(8, 0), (9, 0), (9, 1), (10, 0)], [(8, 4), (9, 3), (9, 4), (10, 4)], [(8, 6), (9, 5), (9, 6), (9, 7)], [(8, 8), (8, 9), (8, 10), (9, 9)], [(8, 11), (9, 10), (9, 11), (9, 12)], [(8, 15), (9, 14), (9, 15), (9, 16)], [(8, 18), (8, 19), (8, 20), (9, 19)], [(8, 22), (8, 23), (8, 24), (9, 23)], [(9, 2), (10, 2), (10, 3), (11, 2)], [(9, 8), (10, 7), (10, 8), (10, 9)], [(9, 13), (10, 12), (10, 13), (10, 14)], [(9, 18), (10, 17), (10, 18), (10, 19)], [(9, 20), (9, 21), (9, 22), (10, 21)], [(10, 1), (11, 0), (11, 1), (12, 1)], [(10, 5), (11, 4), (11, 5), (12, 5)], [(10, 6), (11, 6), (11, 7), (12, 6)], [(10, 10), (11, 9), (11, 10), (12, 10)], [(10, 11), (11, 11), (11, 12), (12, 11)], [(10, 15), (11, 14), (11, 15), (12, 15)], [(10, 16), (11, 16), (11, 17), (12, 16)], [(10, 20), (11, 19), (11, 20), (11, 21)], [(10, 22), (11, 22), (11, 23), (12, 22)], [(11, 3), (12, 2), (12, 3), (12, 4)], [(11, 8), (12, 7), (12, 8), (12, 9)], [(11, 13), (12, 13), (12, 14), (13, 13)], [(11, 18), (12, 17), (12, 18), (12, 19)], [(12, 0), (13, 0), (13, 1), (14, 0)], [(12, 12), (13, 11), (13, 12), (14, 12)], [(12, 20), (13, 19), (13, 20), (14, 20)], [(12, 21), (13, 21), (13, 22), (14, 21)], [(13, 2), (14, 1), (14, 2), (14, 3)], [(13, 3), (13, 4), (13, 5), (14, 4)], [(13, 6), (14, 5), (14, 6), (14, 7)], [(13, 7), (13, 8), (13, 9), (14, 8)], [(13, 10), (14, 9), (14, 10), (14, 11)], [(13, 14), (14, 13), (14, 14), (14, 15)], [(13, 15), (13, 16), (13, 17), (14, 16)], [(13, 18), (14, 17), (14, 18), (14, 19)], [(13, 23), (14, 22), (14, 23), (14, 24)]]
|
24,356 | b8f302cd44e64c9616d9b5e4919e05551febc0df | from django.db import models
from django.contrib.auth.models import User
class SignUp(models.Model):
#user = models.ForeignKey(SignUp)
email = models.EmailField()
user = models.ForeignKey(User)
full_name = models.CharField(max_length=40)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
#might need text widget
skills = models.TextField()
#Qualifications = models.TextField()
Experience = models.TextField()
CurrentDegree = models.CharField(max_length=40, blank = True, null = True)
Currentprojects = models.TextField()
location = models.CharField(max_length=40)
active = models.BooleanField(default = True)
def __unicode__(self):
return self.email
# Create your models here.
#class SearchProfiles(models.Model):
class UserPicture(models.Model):
user = models.ForeignKey(User)
image = models.ImageField(upload_to='profiles/')
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
active = models.BooleanField(default = True)
thumbnail = models.BooleanField(default = False)
def __unicode__(self):
return str(self.image) |
24,357 | ddc721f7da163b8320901861a257f141ad86937d | #!/usr/local/bin/python
import sys
usage = """ Find square root of a give number
Usage: findSquareRoot.py <number>
Example: findSquareRoot.py 16"""
def main(argv):
"""
Executes the main() flow
@param argv: Command-line arguments
@type argv: array of strings
"""
a = [1, 5, 10, 13]
b = [4, 11, 12, 17, 19]
o = mergeArrays(a, b)
print 'Output: ', o
def mergeArrays(a, b):
"""
Merges two sorted arrays
@param a: Array of ints
@param b: Array of ints
@type a: integer
@type b: integer
"""
i = 0
j = 0
o = []
aLen = len(a)
bLen = len(b)
# go over the array from lower to higher
while i < aLen and j < bLen:
if a[i] < b[j]:
o.append(a[i])
i += 1
elif b[j] < a[i]:
o.append(b[j])
j += 1
else:
# equals a[i] and b[j]
o.append(a[i])
i += 1
j += 1
# append the rest
while i < aLen:
o.append(a[i])
i += 1
while j < bLen:
o.append(b[j])
j += 1
return o
if __name__ == "__main__":
main(sys.argv[1:]) |
24,358 | 5d4cef2598a4e213a0478556f73fd8871b406e40 | import os
import tempfile
import shutil
import os
import zipfile
import tarfile
from contextlib import contextmanager
import requests
ARTIFACTORY_BASE_URL = "https://104.196.181.115/artifactory/libs-snapshot-local/com/globalpayments/businessview"
directories = ["datasets","tables","views"]
accepted_extensions = [".yaml", ".yml"]
@contextmanager
def mktmpdir():
tmpdir = tempfile.mkdtemp()
try:
yield tmpdir
finally:
shutil.rmtree(tmpdir)
# headers = {'X-Jfrog-Art-Api': os.environ['ARTIFACTORY_KEY']}
# with mktmpdir() as tmpdir:
# # r = requests.get(
# # ARTIFACTORY_BASE_URL, headers=headers, verify=False, stream=True)
# # local_path = os.path.join(tmpdir,"tar")
# # # Writes artifactory results to temp directory
# # with open(local_path, "wb") as f:
# # for chunk in r.iter_content(chunk_size=512):
# # if chunk:
# # f.write(chunk)
# service_path = os.path.join(tmpdir, "yaml")
# # print(service_path)
# local_path = os.path.join(os.getcwd(), "BQTableYamlFile_94.tar")
# # print(local_path)
# tar = tarfile.open(local_path)
# tar.extractall(service_path)
# # print(os.listdir(service_path))
# # service_archive = zipfile.ZipFile(local_path, mode="r")
# # service_archive.extractall(path=service_path)
# # service_archive.close()
# takes dir_name returns only yaml files in directory
def get_files(dir_name):
if (dir_name in directories):
return [yml for yml in os.listdir(os.path.join(os.getcwd(), dir_name)) if yml.endswith(tuple(accepted_extensions))]
|
24,359 | 50b89650285dcfaf8239fe65a05f38686bf32844 | import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print("Hello! Let's explore some US bikeshare data!")
# TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = input("Would you like to analyze Chicago, New York City or Washington? ").lower()
while city == 'chicago' or 'new york city' or 'washington':
print("You have selected: ", city)
break
else:
print("Invalid input. Start Over!")
# TO DO: get user input for month (all, january, february, ... , june)
month = input('\nWhich month do you want to analyze? ').lower()
while month == 'january' or 'february' or 'march' or 'april' or 'may' or 'june' or 'july' or 'august' or 'september' or 'october' or 'november' or 'december' or 'all':
print("You would like to analyze: ", month)
break
else:
print("Invalid input. Try again.")
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
day = input('\nWhich day of the week are you interested in? ').lower()
while day == 'sunday' or 'monday' or 'tuesday' or 'wednesday' or 'thursday' or 'friday' or 'saturday'or 'all':
print("You are interested in: ", day)
break
else:
print("Invalid input. Try again.")
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df=pd.read_csv(CITY_DATA[city])
df['Start Time']=pd.to_datetime(df['Start Time'])
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
df['hour'] = df['Start Time'].dt.hour
if month != 'all':
months = ['january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december']
month = months.index(month) + 1
df = df[df['month'] == month]
if day != 'all':
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# TO DO: display the most common month
pop_month = df['month'].mode()[0]
print("\nThe most popular month is: ", pop_month)
# TO DO: display the most common day of week
pop_day = df['day_of_week'].mode()[0]
print("\nThe most popular day is: ", pop_day)
# TO DO: display the most common start hour
pop_start_time = df['hour'].mode()[0]
print("\nThe most popular start time is: ", pop_start_time)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# TO DO: display most commonly used start station
pop_start_station = df['Start Station'].mode()[0]
print('\nThe most popular start station is: ', pop_start_station)
# TO DO: display most commonly used end station
pop_end_station = df['End Station'].mode()[0]
print('\nThe most popular end station is: ', pop_end_station)
# TO DO: display most frequent combination of start station and end station trip
df['Start and End St'] = df['Start Station'].map(str) + df['End Station']
pop_start_end = df['Start and End St'].mode()[0]
print('\nThe most popular combination of stations is', pop_start_end)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# TO DO: display total travel time
total_trip = df['Trip Duration'].sum()
print("\nTotal travel time in seconds for this time period is ", total_trip)
# TO DO: display mean travel time
mean_trip = df['Trip Duration'].mean()
print("\nThe mean travel time for this time period is ", int(mean_trip))
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# TO DO: Display counts of user types
user_types = df['User Type'].value_counts()
if city == 'washington':
print("That info isn't available.")
break
else:
print("\nUser types are: ", user_types)
# TO DO: Display counts of gender
gender = df['Gender'].value_counts()
if city == 'washington':
print("That info isn't available")
break
else:
print("\nThe breakdown of gender is: ", gender)
# TO DO: Display earliest, most recent, and most common year of birth
oldest_birth=np.nanmin(df['Birth Year'])[0]
print('\nOldest birth year is', int(oldest_birth))
youngest_birth=np.nanmax(df['Birth Year'])[0]
print('\nYoungest birth year is', int(youngest_birth))
common_birth=df['Birth Year'].mode()[0]
print('\nMost common birth year is', int(common_birth))
display = input('\nWould you like to view the raw data 5 rows at a time? ').lower()
if display !='yes':
break
else:
print(df.iloc[current_line:current_line+5])
current_line += 5
return display_data(df, current_line)
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
main()
|
24,360 | c73f48adcb9b95c783a85c68425a6fc57f2d6c4e | from django.shortcuts import render
# Create your views here.
from rest_framework.generics import GenericAPIView
from rest_framework.views import APIView
class SMSCodeView(APIView):
def get(self,request,mobile):
pass
class SMSCodeView(GenericAPIView):
pass |
24,361 | 8e9997e287707166c820a3470e0be6be22fc9674 | # Generated by Django 3.0 on 2021-03-11 20:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0003_informe_sintomaspaciente'),
]
operations = [
migrations.RemoveField(
model_name='informe',
name='fk_patologia',
),
migrations.AddField(
model_name='informe',
name='patologias',
field=models.CharField(blank=True, max_length=100),
),
]
|
24,362 | 27903500c37fdaceacdb9dc51d5488f6aa8e9296 | def orderJunctionBoxes(numberOfBoxes, boxList):
if not boxList:
return []
log_old = []
log_new = []
for box in boxList:
idx = box.index(" ")
key = box[:idx]
value = box[idx + 1:]
if value[0].isdigit():
log_new.append(box)
else:
log_old.append([key,value])
sorted_log = sorted(log_old, key=lambda x: (x[1], x[0]))
final = [" ".join(each) for each in sorted_log]
return final+log_new
boxList = [
"ykc 82 01",
"ykc 83 01",
"eo first qpx",
"09z cat hamster",
"09z dog hamster",
"06f 12 25 6",
"az0 first qpx",
"236 cat dog rabbit snake"
]
print(orderJunctionBoxes(len(boxList), boxList))
|
24,363 | 55ed0b0f08f6b81dacbe7fe572719dc329e7ff50 | #
# At the moment, this one is just for a quick test
# Turns ascii into Tiles
#
from core import Renderer
import libtcodpy as libtcod
from core.Tile import Tile
testmap = """
........;;;;;;;;;;;...........................;;;;;;;;;;;........;;;;;;;;;;;........;;;;;;;;;;;
..........;;;;;;;;;.............................;;;;;;;;;..........;;;;;;;;;..........;;;;;;;;;
..............;;;;;.................................;;;;;..............;;;;;..............;;;;;
...............;;;;..................................;;;;...............;;;;...............;;;;
........#......;;;;...........................#......;;;;........#......;;;;........#......;;;;
........#........;;.......###.................#........;;........#........;;........#........;;
....#####...............T....#............#####..............#####..............#####..........
..T..........................###........T..................T..................T................
......................T........................................................................
..T........................T............T..................T..................T................
........T............T........................T..................T..................T..........
...T.....................................T..................T..................T...............
..........###........T..........................###................###................###......
.....T....#............#####...............T....#.............T....#.............T....#........
.......###.................#........;;.......###................###................###.........
...........................#......;;;;.........................................................
..................................;;;;.........................................................
.................................;;;;;.........................................................
.............................;;;;;;;;;.........................................................
...........................;;;;;;;;;;;.........................................................
...........................;;;;;;;;;;;........;;;;;;;;;;;........;;;;;;;;;;;........;;;;;;;;;;;
.............................;;;;;;;;;..........;;;;;;;;;..........;;;;;;;;;..........;;;;;;;;;
.................................;;;;;..............;;;;;..............;;;;;..............;;;;;
..................................;;;;...............;;;;...............;;;;...............;;;;
...........................#......;;;;........#......;;;;........#......;;;;........#......;;;;
.......###.................#........;;........#........;;........#........;;........#........;;
.....T....#............#####..............#####..............#####..............#####..........
..........###........T..................T..................T..................T................
...T...........................................................................................
........T............T..................T..................T..................T................
..T........................T..................T..................T..................T..........
......................T..................T..................T..................T...............
..T..........................###................###................###................###......
....#####...............T....#.............T....#.............T....#.............T....#........
........#........;;.......###................###................###................###.........
........#......;;;;............................................................................
...............;;;;............................................................................
..............;;;;;............................................................................
..........;;;;;;;;;............................................................................
........;;;;;;;;;;;............................................................................
...............;;;;............................................................................
..............;;;;;............................................................................
..........;;;;;;;;;............................................................................
........;;;;;;;;;;;............................................................................
""".strip().split()
def createTiles():
"""Returns a matrix of map tiles"""
Renderer.Clear()
map = []
w, h = len(testmap[0]), len(testmap)
x, y = 0, 0
for row in testmap:
for char in row:
map.append(makeTile(char, x, y))
x += 1
y += 1
x = 0
return map, w, h
def makeTile(char, x, y):
col, bgcol = libtcod.white, libtcod.black
if char == '.':
col, bgcol = libtcod.chartreuse, libtcod.desaturated_chartreuse
#char = ' '
elif char == ';':
col, bgcol = libtcod.dark_chartreuse, libtcod.darker_chartreuse
#char = ' '
elif char == 'T':
col, bgcol = libtcod.dark_orange, libtcod.desaturated_chartreuse
elif char == '#':
col, bgcol = libtcod.dark_grey, libtcod.desaturated_chartreuse
return Tile(char, col, bgcol, x, y)
|
24,364 | 319456d008fde4c084b646837126df3b63078c7c |
import os
import pathlib
from datetime import datetime
import shutil
import fmpy
from fmpy import *
import yaml
import re
from typing import Any, Dict, List, Union
SIM_CONFIG_NAME_f = lambda model_fp: model_fp.replace(".fmu", "_conf.yaml")
# [TODO] dynamically read FMI version from modelDescription.xml
# ("1.0", "2.0", "3.0")
FMI_VERSION = "2.0"
START_TIME = 0.0
STOP_TIME = 20.0
STEP_SIZE = 0.1
class FMUSimValidation:
def __init__(
self,
model_filepath: str,
user_validation: bool = True,
):
"""Template for validating FMU models for Bonsai integration.
Parameters
----------
model_filepath: str
Filepath to FMU model.
user_validation: bool
If True, model inputs/outputs need to be accepted by user for each run.
If False, YAML config file is used (if exists and valid). Otherwise, FMI
file is read. If FMI model description is also invalid, error is raised.
"""
# ensure model filepath is balid, and save as att if it is
assert model_filepath.endswith(".fmu"), "Provided filepath is not an FMU file: '{}'".format(model_filepath)
self.model_filepath = model_filepath
# config file with config_params, inputs, outputs
self.sim_config_filepath = SIM_CONFIG_NAME_f(self.model_filepath)
# read the model description
self.model_description = read_model_description(model_filepath)
error_log = "Provided model ({}) doesn't have modelVariables in XLS description file".format(model_filepath)
assert len(self.model_description.modelVariables) > 0, error_log
# correct non-alphanumeric tags.
# note, it doesn't suppose any problem, since interaction with sim uses indices, not names.
self._clean_non_alphanumeric_chars()
# collect the value references (indices)
# collect the value types (Real, Integer or Enumeration)
# collect the variables to be initialized and the value to do so at
self.vars_to_idx = {}
self.vars_to_type_f = {}
self.vars_to_ini_vals = {}
for variable in self.model_description.modelVariables:
# extract key attributes per variable
var_idx = variable.valueReference #, variable.causality
var_name = variable.name
var_type = variable.type
var_start = variable.start
# collect type reference
if var_type == "Real":
self.vars_to_type_f[var_name] = float
elif var_type == "Integer":
self.vars_to_type_f[var_name] = int
else:
# [TODO] Integrate variables of type "Enumeration". How do we cast? Define a function for "self.vars_to_type_f".
# [TODO] Integrate variables of type string (need to find correct var_type tag first).
# [TODO] Integrate variables of type boolean (need to find correct var_type tag first).
print(f"Variable '{var_name}' will be skipped. FMU connector cannot currently handle vars of type '{var_type}'.")
continue
# collect the value references (indices)
self.vars_to_idx[var_name] = var_idx
# collect the variables to be initialized and the value to do so at
if var_start is not None:
# cast variable prior to storing
self.vars_to_ini_vals[var_name] = self.vars_to_type_f[var_name](var_start)
# initialize sim config
self.is_model_config_valid = False # Currently unused, since error is raised if model invalid
self.sim_config_params = []
self.sim_inputs = []
self.sim_outputs = []
self.sim_other_vars = []
# ---------------------------------------------------------------------
# YAML CONFIG --> check for existing config using SIM_CONFIG_NAME_f --> e.g: "{model_name}_conf.yaml"
valid_config = self._validate_sim_config()
# exit if model is valid, unless validation has been activated
if valid_config:
# print model config for user reference: config_params, inputs, outputs
print(self._get_sim_config_str())
if user_validation:
# prompt user to manually validate model if selected
validation_asserted = input("Is this configuration correct (y|n)? ")
if validation_asserted == "y":
self.is_model_config_valid = True
return
# reset config if invalid
self.sim_config_params = []
self.sim_inputs = []
self.sim_outputs = []
self.sim_other_vars = []
else:
# when no validation is selected, we assume the sim config is valid
self.is_model_config_valid = True
return
# ---------------------------------------------------------------------
# FMI CONFIG --> if model is invalid we look for attributes within the .fmi model definition
valid_config = self._extract_sim_config_from_fmi_std()
if valid_config:
# print model config for user reference: config_params, inputs, outputs
print(self._get_sim_config_str())
if user_validation:
# prompt user to manually validate model if selected
validation_asserted = input("Is this configuration correct (y|n)? ")
if validation_asserted == "y":
self.is_model_config_valid = True
# dump YMAL file to reuse next time the model is loaded
self._dump_config_to_yaml_file()
return
else:
# when no validation is selected, we assume the sim config is valid
self.is_model_config_valid = True
# dump YMAL file to reuse next time the model is loaded
self._dump_config_to_yaml_file()
return
# Dump auxiliary YAML config file if user doesn't assert the provided set
# of config_params/inputs/outputs
self._dump_config_to_yaml_file(is_aux_yaml = True)
# If neither YAML nor FMI model is sufficient raise error
error_log = "MODEL DOES NOT HAVE THE CORRECT CONFIG DEFINED NEITHER ON YAML CONFIG FILE "
error_log += "NOR FMI MODEL DESCRIPTION. A YAML FILE HAS BEEN CREATED FOR YOU TO MODIFY. "
error_log += "THE SIM HAS BEEN FORCED TO EXIT, BUT FEEL FREE TO RERUN ONCE SET-UP IS COMPLETED."
raise Exception(error_log)
def _validate_sim_config(self):
"""Check if configuration file exists, otherwise indicate user to do so
Configuration contains sim config_params/inputs/outputs and naming
convention follows SIM_CONFIG_NAME_f --> e.g: "{modelname}_conf.yaml"
> E.g: filename == "cartpole.fmu"
--> config == "cartpole_conf.yaml"
"""
print("\n[FMU Validator] ---- Looking to see if YAML config file exists ----")
# use convention to search for config file
config_file = self.sim_config_filepath
if not os.path.isfile(config_file):
print("[FMU Validator] Configuration file for selected example was NOT found: {}".format(config_file))
return False
print("[FMU Validator] Sim config file for selected example was found: {}\n".format(config_file))
# Open and extract sim config from YAML file
with open(config_file, 'r') as file:
#data = yaml.dump(config_file, Loader=yaml.FullLoader)
simulation_config = yaml.load(file, Loader=yaml.FullLoader)
if 'simulation' not in simulation_config.keys():
print("[FMU Validator] Configuration file for selected example does not have a 'simulation' tag, thus it is omited.")
return False
# Extract sim configuration from dict
sim_config_params = simulation_config['simulation']['config_params']
sim_inputs = simulation_config['simulation']['inputs']
sim_outputs = simulation_config['simulation']['outputs']
sim_other_vars = simulation_config['simulation']['other_vars']
# Validate values extracted
if len(sim_inputs) == 0:
print("[FMU Validator] Sim config file has no sim-input states, and thus cannot be used\n")
elif len(sim_outputs) == 0:
print("[FMU Validator] Sim config file has no sim-output states, and thus cannot be used\n")
else:
# Store data extracted as attributes
self.sim_config_params = sim_config_params
self.sim_inputs = sim_inputs
self.sim_outputs = sim_outputs
self.sim_other_vars = sim_other_vars
return True
return False
def _extract_sim_config_from_fmi_std(self):
"""We use the fmi standard to extract the correct set of config_params, inputs, outputs
We look into the "causality" attribute for each variable in model description
> E.g: {var}.causality == "parameter" ==> sim config_params
{var}.causality == "input" ==> sim inputs
{var}.causality == "output" ==> sim outputs
"""
print("\n---- Looking to see if FMU model description contains required 'causality' type definitions ----")
sim_config_params = []
sim_inputs = []
sim_outputs = []
sim_other_vars = []
for variable in self.model_description.modelVariables:
# extract causality and append valu
causality = variable.causality
if causality == "parameter":
sim_config_params.append(variable.name)
elif causality == "input":
sim_inputs.append(variable.name)
elif causality == "output":
sim_outputs.append(variable.name)
else:
sim_other_vars.append(variable.name)
# Validate values extracted
if len(sim_inputs) == 0:
print("\n[FMU Validator] Sim FMU description file has no sim-input states, and thus cannot be used.")
elif len(sim_outputs) == 0:
print("\n[FMU Validator] Sim FMU description file has no sim-output states, and thus cannot be used.")
else:
# Store data extracted as attributes
self.sim_config_params = sim_config_params
self.sim_inputs = sim_inputs
self.sim_outputs = sim_outputs
self.sim_other_vars = sim_other_vars
return True
# Dump auxiliary YMAL file for user to review/edit
self._dump_config_to_yaml_file(sim_config_params,
sim_inputs,
sim_outputs,
sim_other_vars,
is_aux_yaml = True)
return False
def _dump_config_to_yaml_file(self,
sim_config_params = None,
sim_inputs = None,
sim_outputs = None,
sim_other_vars = None,
is_aux_yaml = False):
"""Dump sim's config_params, inputs, and outputs to YAML file
By default, we overwrite to main YAML config file.
sim_other_vars: str
If provided.
"""
if sim_config_params is None:
sim_config_params = self.sim_config_params
if sim_inputs is None:
sim_inputs = self.sim_inputs
if sim_outputs is None:
sim_outputs = self.sim_outputs
if sim_other_vars is None:
sim_other_vars = self.sim_other_vars
if not is_aux_yaml:
config_file = self.sim_config_filepath
else:
config_file = self.sim_config_filepath.replace(".yaml", "_EDIT.yaml")
# Prepare set of unused data ( to be shared with user for editing )
full_sim_config = {"config_params": sim_config_params,
"inputs": sim_inputs,
"outputs": sim_outputs,
"other_vars": sim_other_vars}
full_sim_data = {"simulation": full_sim_config}
# Dump configuration to YAML file for later reuse (or user editing if "is_aux_yaml==True")
with open(config_file, 'w') as file:
dump = yaml.dump(full_sim_data, sort_keys = False, default_flow_style=False)
file.write( dump )
# Raise error, and avoid continuing using model
log = "\n[FMU Validator] A YAML file with bonsai required fields, as well as available "
log += "sim variables, has been created at: \n --> '{}'\n".format(config_file)
if is_aux_yaml:
log += "[FMU Validator] Edit the YAML file, and remove the '_EDIT' nametag to use this model.\n"
print(log)
return
def _get_sim_config_str(self):
"""Get string with the sim's config_params, inputs, and outputs for the model
"""
log = "[FMU Validator] The set of configuration_parameters, inputs, and outputs defined is the following:\n"
log += "\n{}: {}".format("Sim Config Params -- Brain Config ", self.sim_config_params)
log += "\n{}: {}".format("Sim Inputs -- Brain Actions ", self.sim_inputs)
log += "\n{}: {}".format("Sim Outputs -- Brain States ", self.sim_outputs)
log += "\n{}: {}".format("Sim Other Vars -- Other Sim States ", self.sim_other_vars)
return log
def _clean_non_alphanumeric_chars(self):
"""Remove non-alphanumeric characters to make them valid with Bonsai interaction.
"""
for i,variable in enumerate(self.model_description.modelVariables):
clean_name = re.sub(r'[^a-zA-Z0-9_]', '', variable.name)
if clean_name != variable.name:
log = "Sim variable '{}' has been renamed to '{}' ".format(variable.name, clean_name)
log += "to comply with Bonsai naming requirements."
print(log)
self.model_description.modelVariables[i].name = clean_name
return
class FMUConnector:
def __init__(
self,
model_filepath: str,
fmi_version: str = FMI_VERSION,
start_time = START_TIME,
stop_time = STOP_TIME,
step_size = STEP_SIZE,
user_validation: bool = False,
use_unzipped_model: bool = False,
):
"""Template for simulating FMU models for Bonsai integration.
Note, it calls FMUSimValidation to validate the model when first instanced.
Parameters
----------
model_filepath: str
Full filepath to FMU model.
fmi_version: str
FMI version (1.0, 2.0, 3.0).
fmi_version from model_description to use in case fmi_version cannot
be read from model.
start_time: float
Timestep to start the simulation from (in time units).
stop_time: float
Timestep to stop simulation (in time units).
step_size: float
Time to leave the simulation running in between steps (in time units).
user_validation: bool
If True, model inputs/outputs need to be accepted by user for each run.
If False, YAML config file is used (if exists and valid). Otherwise, FMI
file is read. If FMI model description is also invalid, error is raised.
use_unzipped_model: bool
If True, model unzipping is not performed and unzipped version of the model
is used. Useful to test changes to unzipped FMI model.
Note, unzipping is performed if unzipped version is not found.
"""
# validate simulation: config_vars (optional), inputs, and outputs
validated_sim = FMUSimValidation(model_filepath, user_validation)
# extract validated sim configuration
self.model_filepath = validated_sim.model_filepath
self.sim_config_filepath = validated_sim.sim_config_filepath
self.model_description = validated_sim.model_description
# model variable names structured per type (config, inputs/brain actions, outputs/brain states)
self.sim_config_params = validated_sim.sim_config_params
self.sim_inputs = validated_sim.sim_inputs
self.sim_outputs = validated_sim.sim_outputs
self.sim_other_vars = validated_sim.sim_other_vars
# model variable dictionaries with
self.vars_to_idx = validated_sim.vars_to_idx
self.vars_to_type_f = validated_sim.vars_to_type_f
self.vars_to_ini_vals = validated_sim.vars_to_ini_vals
# get parent directory and model name (without .fmu)
aux_head_and_tail_tup = os.path.split(self.model_filepath)
self.model_dir = aux_head_and_tail_tup[0]
self.model_name = aux_head_and_tail_tup[1].replace(".fmu", "")
# placeholder to prevent accessing methods if initialization hasn't been called first
# also prevents calling self.fmu.terminate() if initialization hasn't occurred or termination has already been applied
self._is_initialized = False
# get FMI version
read_fmi_version = self.model_description.fmiVersion
if read_fmi_version in ["1.0", "2.0", "3.0"]:
# Use fmi version from model_description
print(f"[FMU Connector] FMU model indicates to be follow fmi version '{read_fmi_version}'.")
self.fmi_version = read_fmi_version
else:
assert fmi_version in ["1.0", "2.0", "3.0"], f"fmi version provided ({fmi_version}) is invalid."
# Use fmi version provided by user if the one on model_description is invalid
print(f"[FMU Connector] Using fmi version provided by user: v'{fmi_version}'. Model indicates v'{read_fmi_version}' instead.")
self.fmi_version = fmi_version
# save time-related data
error_log = "Stop time provided ({}) is lower than start time provided ({})".format(stop_time, start_time)
assert stop_time > start_time, error_log
error_log = "Step size time ({}) is greater than the difference between ".format(step_size)
error_log += "stop and start times, ({}) and ({}), respectively".format(stop_time, start_time)
assert step_size < stop_time-start_time, error_log
self.start_time = float(start_time)
self.stop_time = float(stop_time)
self.step_size = float(step_size)
self.sim_time = float(self.start_time)
# retrieve FMU model type, as well as model identifier
self.model_type = "None"
self.model_identifier = self.model_name
coSimulation = self.model_description.coSimulation
if coSimulation is not None:
self.model_identifier = coSimulation.modelIdentifier
self.model_type = "coSimulation"
else:
scheduledExecution = self.model_description.scheduledExecution
if scheduledExecution is not None:
self.model_identifier = scheduledExecution.modelIdentifier
self.model_type = "scheduledExecution"
else:
modelExchange = self.model_description.modelExchange
if modelExchange is not None:
self.model_identifier = modelExchange.modelIdentifier
self.model_type = "modelExchange"
else:
raise Exception("Model is not of any known type: coSimulation, scheduledExecution, nor modelExchange")
# extract the FMU
extract_path = os.path.join(self.model_dir, self.model_name + "_unzipped")
if not use_unzipped_model:
# extract model to subfolder by default
self.unzipdir = extract(self.model_filepath, unzipdir=extract_path)
else:
# use previouslly unzipped model
self.unzipdir = extract_path
# get unique identifier using timestamp for instance_name (possible conflict with batch)
self.instance_name = self._get_unique_id()
# ---------------------------------------------------------------
# instance model depending on 'fmi version' and 'fmu model type'
self.fmu = None
print(f"[FMU Connector] Model has been determined to be of type '{self.model_type}' with fmi version == '{self.fmi_version}'.")
if self.model_type == "modelExchange":
## [TODO] test integrations
print(f"[FMU Connector] Simulator hasn't been tested for '{self.model_type}' models with fmi version == '{self.fmi_version}'.")
if self.fmi_version == "1.0":
self.fmu = fmi1.FMU1Model(guid=self.model_description.guid,
unzipDirectory=self.unzipdir,
modelIdentifier=self.model_identifier,
instanceName=self.instance_name)
elif self.fmi_version == "2.0":
self.fmu = fmi2.FMU2Model(guid=self.model_description.guid,
unzipDirectory=self.unzipdir,
modelIdentifier=self.model_identifier,
instanceName=self.instance_name)
elif self.fmi_version == "3.0":
self.fmu = fmi3.FMU3Model(guid=self.model_description.guid,
unzipDirectory=self.unzipdir,
modelIdentifier=self.model_identifier,
instanceName=self.instance_name)
elif self.model_type == "coSimulation":
if self.fmi_version == "1.0":
## [TODO] test integrations
print(f"[FMU Connector] Simulator hasn't been tested for '{self.model_type}' models with fmi version == '{self.fmi_version}'.")
self.fmu = fmi1.FMU1Slave(guid=self.model_description.guid,
unzipDirectory=self.unzipdir,
modelIdentifier=self.model_identifier,
instanceName=self.instance_name)
elif self.fmi_version == "2.0":
self.fmu = fmi2.FMU2Slave(guid=self.model_description.guid,
unzipDirectory=self.unzipdir,
modelIdentifier=self.model_identifier,
instanceName=self.instance_name)
elif self.fmi_version == "3.0":
## [TODO] test integrations
print(f"[FMU Connector] Simulator hasn't been tested for '{self.model_type}' models with fmi version == '{self.fmi_version}'.")
self.fmu = fmi3.FMU3Slave(guid=self.model_description.guid,
unzipDirectory=self.unzipdir,
modelIdentifier=self.model_identifier,
instanceName=self.instance_name)
elif self.model_type == "scheduledExecution":
if self.fmi_version == "1.0" or self.fmi_version == "2.0":
raise Exception("scheduledExecution type only exists in fmi v'3.0', but fmi version '{}' was provided.".format(self.fmi_version))
print(f"[FMU Connector] Simulator hasn't been tested for '{self.model_type}' models with fmi version == '{self.fmi_version}'.")
## [TODO] test integrations
#elif self.fmi_version_int == 3:
self.fmu = fmi3.FMU3ScheduledExecution(guid=self.model_description.guid,
unzipDirectory=self.unzipdir,
modelIdentifier=self.model_identifier,
instanceName=self.instance_name)
# ---------------------------------------------------------------
return
def initialize_model(self, config_param_vals = None):
"""Initialize model in the sequential manner required.
"""
self._is_initialized = True
self.fmu.instantiate()
self.fmu.reset()
self.fmu.setupExperiment(startTime=self.start_time)
if config_param_vals is not None:
self._apply_config(config_param_vals)
self.fmu.enterInitializationMode()
self.fmu.exitInitializationMode()
return
def run_step(self):
"""Move one step forward.
"""
# Ensure model has been initialized at least once
self._model_has_been_initialized("run_step")
# Check if sim is steady-state (doesn't contain "doStep" method)
if "doStep" not in dir(self.fmu):
error_log = "[run_step] FMU model cannot be run one step-forward, since it is a steady-state sim. "
error_log += "No step advance will be applied."
print(error_log)
return
self.fmu.doStep(currentCommunicationPoint=self.sim_time, communicationStepSize=self.step_size)
self.sim_time += self.step_size
return
def reset(self, config_param_vals: Dict[str, Any] = None):
"""Reset model with new config (if given).
"""
# Ensure model has been initialized at least once
self._model_has_been_initialized("reset")
# Terminate and re-initialize
self._terminate_model()
self.initialize_model(config_param_vals)
# Reset time
self.sim_time = float(self.start_time)
return
def close_model(self):
"""Close model and remove unzipped model from temporary folder.
"""
# Ensure model has been initialized at least once
self._model_has_been_initialized("close_model")
# terminate fmu model
# - avoids error from calling self.fmu.terminate if termination has already been performed
self._terminate_model()
# free fmu
self.fmu.freeInstance()
# clean up
# [TODO] enforce clean up even when exceptions are thrown, or after keyboard interruption
shutil.rmtree(self.unzipdir, ignore_errors=True)
return
def get_states(self, sim_outputs: List = None):
"""Get var indices for each (valid) var name provided in list.
If none are provided, all outputs are returned.
"""
# Ensure model has been initialized at least once
self._model_has_been_initialized("get_states")
if sim_outputs is None:
sim_outputs = self.sim_outputs
elif not len(sim_outputs) > 0:
sim_outputs = self.sim_outputs
states_dict = self._get_variables(sim_outputs)
# Check if more than one index has been found
if not len(states_dict.keys()) > 0:
print("[get_states] No valid state names have been provided. No states are returned.")
return {}
return states_dict
def apply_actions(self, b_action_vals: Dict[str, Any] = {}):
"""Apply brain actions to simulation inputs.
b_action_vals: dict
Dictionary of brain (action_name, action_value) pairs.
"""
# Ensure model has been initialized at least once
self._model_has_been_initialized("apply_actions")
# Ensure action dict is not empty
if not len(b_action_vals.items()) > 0:
print("[apply_actions] Provided action dict is empty. No action changes will be applied.")
return False
# We forward the configuration values provided
applied_actions_bool = self._set_variables(b_action_vals)
if not applied_actions_bool:
print("[apply_actions] No valid action parameters were found. No actions applied.")
return applied_actions_bool
def get_all_vars(self):
"""Get a dictionary of (var_name: var_val) pairs for all variables in simulation.
"""
# Ensure model has been initialized at least once
self._model_has_been_initialized("get_all_vars")
# Get all variable names in model
all_var_names = self.get_all_var_names()
# Reusing get_states method --> Retrieve dict with (state_name, state_value) pairs
all_vars = self.get_states(all_var_names)
return all_vars
def get_all_var_names(self):
"""Get a list of all variables in the sim (removing duplicates, if any).
Note, list is kept the same from first time this method is called.
"""
if hasattr(self, "all_var_names"):
return self.all_var_names
# Append all variables in model (defined in YAML).
aux_all_var_names = []
aux_all_var_names.extend(self.sim_config_params)
aux_all_var_names.extend(self.sim_inputs)
aux_all_var_names.extend(self.sim_outputs)
aux_all_var_names.extend(self.sim_other_vars)
# Remove duplicates (if any) -- Keeping initial order
all_var_names = [aux_all_var_names[i] for i in range(len(aux_all_var_names)) \
if aux_all_var_names[i] not in aux_all_var_names[:i]]
# Store for following calls
self.all_var_names = all_var_names
return self.all_var_names
def _apply_config(self, config_param_vals: Dict[str, Any] = {}):
"""Apply configuration paramaters.
"""
# Ensure array is not empty
if not len(config_param_vals.items()) > 0:
print("[_apply_config] Config params was provided empty. No changes applied.")
return False
# We forward the configuration values provided
applied_config_bool = self._set_variables(config_param_vals)
# Report config application to user
if not applied_config_bool:
print("[_apply_config] No valid config parameters were found. No changes applied.")
# Take of any other variables that require initialization
print("[_apply_config] Apply additional required initializations.")
non_initialized_vars = [var_tuple for var_tuple in self.vars_to_ini_vals.items() \
if var_tuple[0] not in config_param_vals.keys()]
vars_to_initialize_d = dict(non_initialized_vars)
applied_init_bool = self._set_variables(vars_to_initialize_d)
# Report additional initializations to user
if applied_init_bool:
log = "[_apply_config] Initialized the following required values to "
log += "FMU defaults: ({}).".format(vars_to_initialize_d)
print(log)
return applied_config_bool
def _get_variables(self, sim_outputs: List = None):
"""Get var indices for each (valid) var name provided in list.
"""
# Ensure model has been initialized at least once
self._model_has_been_initialized("_get_variables")
# Ensure array is not empty
if sim_outputs is None:
return {}
elif not len(sim_outputs) > 0:
#print("[_get_variables] No var names were provided. No vars are returned.")
return {}
sim_output_indices, sim_output_names = self._var_names_to_indices(sim_outputs)
# Check if more than one index has been found
if not len(sim_output_indices) > 0:
#print("[_get_variables] No valid var names have been provided. No vars are returned.")
return {}
outputs_dict = dict(zip(sim_output_names, self.fmu.getReal(sim_output_indices)))
return outputs_dict
def _set_variables(self, b_input_vals: Dict[str, Any] = {}):
"""Apply given input values to simulation.
b_input_vals: dict
Dictionary of brain (input_name, input_value) pairs.
"""
# Ensure model has been initialized at least once
self._model_has_been_initialized("_set_variables")
# Ensure dict is not empty
if not len(b_input_vals.items()) > 0:
#print("[_set_variables] Provided input dict is empty. No input changes will be applied.")
return False
# Get input names, and extract indices
sim_inputs = list(b_input_vals.keys())
sim_input_indices,sim_input_names = self._var_names_to_indices(sim_inputs)
# Check if more than one index has been found
if not len(sim_input_indices) > 0:
#print("[_set_variables] No valid input names have been provided. No input changes will be applied.")
return False
# Extract values for valid inputs (found on model variables)
sim_input_vals = []
for sim_input_name in sim_input_names:
# Cast to correct var type prior to appending
sim_input_casted = self.vars_to_type_f[sim_input_name](b_input_vals[sim_input_name])
sim_input_vals.append(sim_input_casted)
# Update inputs to the brain
self.fmu.setReal(sim_input_indices, sim_input_vals)
return True
def _var_names_to_indices(self, var_names: List):
"""Get var indices for each var name provided in list.
"""
if type(var_names) is not type([]):
# Return empty array if input is not 'list' type
print("[_var_names_to_indices] Provided input is not of type list.")
return []
indices_array = []
names_array = []
for name in var_names:
if name not in self.vars_to_idx.keys():
print("[_var_names_to_indices] Invalid variable name '{}' has been skipped.".format(name))
continue
indices_array.append(self.vars_to_idx[name])
names_array.append(name)
if not len(var_names) > 0:
print("[_var_names_to_indices] No (valid) states have been provided.")
return indices_array, names_array
def _get_unique_id(self):
"""Get unique id for instance name (identifier).
"""
now = datetime.now()
u_id = now.second + 60*(now.minute + 60*(now.hour + 24*(now.day + 31*(now.month + 366*(now.year)))))
return "instance" + str(u_id)
def _model_has_been_initialized(self, method_name: str = ""):
"""Ensure model has been initialized at least once.
"""
if not self._is_initialized:
error_log = "Please, initialize the model using 'initialize_model' method, prior "
error_log += "to calling '{}' method.".format(method_name)
raise Exception(error_log)
def _terminate_model(self):
"""Ensure model has been initialized at least once.
"""
# Ensure model has been initialized at least once
self._model_has_been_initialized("_terminate_model")
if not self._is_initialized:
print("[_terminate_model] Model hasn't been initialized or has already been terminated. Skipping termination.")
return
# Terminate instance
self.fmu.terminate()
self._is_initialized = False
return
# [TODO] Uncomment function once we figure out what is the correct value for required arg "kind".
# [TODO] Then, use method to define halt condition when an unexpected state is reached (in halt clause).
#def getStatus(self):
# """Check current FMU status.
# """
# return self.fmu.getState(kind=)
|
24,365 | 8bf3b69e64d79a90250aaa59895d1bfd6e4c5c85 | from flask import *
from datetime import *
from flaskext.sqlalchemy import SQLAlchemy
from flaskext.markdown import Markdown
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///ducttape-site.db'
app.config['SQLALCHEMY_ECHO'] = False
app.config['SECRET_KEY'] = 'lolverysecret'
app.config.from_pyfile('../ducttape-site.cfg', silent=True)
db = SQLAlchemy(app)
Markdown(app, safe_mode="escape")
import ducttape.filters
import ducttape.views
import ducttape.models
@app.context_processor
def inject_time():
return dict(current_datetime = datetime.utcnow())
|
24,366 | 9a5fcd5c7548b589dc4360c7bbb4dbb735e665e5 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/10/23 16:05
# @Author : litianshuang
# @Email : litianshuang@jingdata.com
# @File : 61.py
# @Desc :
class ListNode:
def __init__(self, val, next):
self.val = val
self.next = next
class Solution(object):
def rotateRight(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if head is None:
return None
total = 0
cnt = head
tail = None
while cnt is not None:
total += 1
if cnt is not None and cnt.next == None:
tail = cnt
cnt = cnt.next
if k <= total:
left_move = total - k
else:
md = k % total
left_move = total - md
while left_move > 0:
prehead = head
tail.next = prehead
head = head.next
prehead.next = None
tail = tail.next
left_move -= 1
return head
def make_list():
vals = [1,2,3,4,5]
head = None
tail = None
for val in vals:
if head is None:
head = ListNode(val,None)
tail = head
else:
now = ListNode(val, None)
tail.next = now
tail = tail.next
return head
if __name__ == "__main__":
s = Solution()
head = make_list()
ret = s.rotateRight(head, 8)
while ret is not None:
print ret.val
ret = ret.next
|
24,367 | 181a4e6b0a4faab160f2f5491b3f01ddcb27edf9 | """
thumbsup - a website sreenshot service based on PhantomJS and Tornado
"""
import os
import socket
import subprocess
import logging
from functools import partial
from urlparse import urlparse, urlunparse
import tornado.ioloop
import tornado.web
# If using a new python, import the lru_cache from stdlib otherwise
# use the backported functools module
try:
from functools import lru_cache
except ImportError:
from functools32 import lru_cache
from thumbsup import urlnorm, calls, paths
@lru_cache(maxsize=100000)
def domain_exists(domain):
try:
logging.debug("Checking for existance of non-cached domain")
return socket.gethostbyname(domain)
except socket.gaierror:
logging.error("Domain not found - %s" % domain)
return None
class TaskChain(object):
"""
Defines a chain of external calls to be executed in order
"""
def __init__(self, callback, errback):
self.commands = []
self.callbacks = []
self.callback = callback
self.errback = errback
self.callopts = {
"stdin": subprocess.PIPE,
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT,
"close_fds": True,
}
self.ioloop = tornado.ioloop.IOLoop.instance()
def __call__(self):
assert self.callback
assert self.errback
self._execute(None, None, None)
def attach(self, command, callback):
self.commands.append(command)
self.callbacks.append(callback)
def _execute(self, fd, events, to_call):
success = True
if to_call is not None:
assert self.pipe
success = to_call(self.pipe)
logging.debug("Removing handler %s" % fd)
self.ioloop.remove_handler(fd)
# Bail if something in the chain breaks
# or we run out of commands
if not success:
self.errback()
return
if not self.commands:
self.callback()
return
callargs = self.commands.pop(0)
nextcall = self.callbacks.pop(0)
logging.debug("Calling popen")
self.pipe = subprocess.Popen(callargs, **self.callopts)
# The handler is the most important bit here. We add the same
# method as a handler, with the callback for processing the
# result already passed as the to_call arg.
logging.debug("Attaching handler to %s " % self.pipe.stdout.fileno())
self.ioloop.add_handler(self.pipe.stdout.fileno(),
partial(self._execute, to_call=nextcall),
self.ioloop.ERROR)
class ThumbnailHandler(tornado.web.RequestHandler):
settings = {}
def __init__(self, *args, **kwargs):
self.settings = kwargs.pop('settings')
if "digest" in kwargs:
self.filename_digest = kwargs.pop("digest")
else:
self.filename_digest = paths._simple_digest
super(ThumbnailHandler, self).__init__(*args, **kwargs)
@property
def redirect_location(self):
return "/static/%s" % (self.filename)
def _make_external_calls(self, host, destination,
view_size, thumb_size, ip):
# Define the actions for success and failure
success = partial(self.redirect, self.redirect_location)
failure = partial(self.send_error, 504)
fetch_and_resize = TaskChain(success, failure)
# Phantomjs
callargs = calls.call_phantom(self.settings["phantomjs_path"],
self.settings["render_script"],
host, destination, view_size,
self.settings["ua_string"], ip)
logging.debug(callargs)
fetch_and_resize.attach(callargs, calls.on_phantom)
# Thumbnail the image
callargs = calls.call_imagic_resize(destination, thumb_size)
logging.debug(callargs)
fetch_and_resize.attach(callargs, calls.on_magic)
#Start execution
logging.debug("Handler complete, relaying to async chain")
fetch_and_resize()
@tornado.web.asynchronous
def get(self):
try:
host = self.get_argument("host")
# If we don't have a default scheme, default to http
# We can't support relative paths anyway.
components = urlparse(host)
if not components.scheme:
components = urlparse("http://" + host)
components = list(components)
# Encode the domain according to idna
domain = components[1].encode("idna")
components[1] = domain
if not domain_exists(domain):
self.send_error(504)
return
norm_host = urlunparse(urlnorm.norm(components))
except (UnicodeError, AttributeError) as e:
logging.error("Invalid address provided - %s" % host)
logging.error(e)
self.send_error(504)
return
view_size = self.get_argument("view_size",
self.settings["view_size"]).lower()
thumb_size = self.get_argument("thumb_size",
self.settings["thumb_size"]).lower()
image_format = self.get_argument("image_format",
self.settings["image_format"]).lower()
img_hash = self.filename_digest(domain, norm_host,
view_size, thumb_size)
self.filename = "%s.%s" % (img_hash, image_format)
destination = os.path.join(self.settings["static_path"], self.filename)
if os.path.isfile(destination):
logging.info("%s exists already, redirecting" % norm_host)
self.redirect(self.redirect_location)
else:
logging.info("%s not found, starting render" % norm_host)
self._make_external_calls(norm_host, destination,
view_size, thumb_size,
self.request.remote_ip)
|
24,368 | 8fa32bf895b4e9cd8967c6355237a115b4932edb | from threading import Thread
from Tkinter import *
import random
import time
import Queue
q = Queue.Queue()
root = Tk()
canvas = Canvas(root, width=500, height=500)
canvas.pack()
def spin(i):
time.sleep(random.randint(3,10))
print "hello?"
q.put("hello world %d" % i)
def update():
ret = None
try:
ret = q.get_nowait()
except:
pass
canvas.create_rectangle(0,0,random.randint(10, 50),50)
canvas.create_text(50, random.randint(100, 300), text=ret)
root.after(100, update)
def main():
for i in range(10):
Thread(target=spin, args=(i,)).start()
root.after(100, update)
root.mainloop()
main()
|
24,369 | c681b1bdda2176794a576f8e30cac0a3e9a2a785 | #%%
from scipy.special import comb
from utils import *
#%%
bid = binary_iid([1, 0], bern(0.2), 10)
print(bid.num_strings({0: 2}))
print(bid.prob_of_string({0: 2}))
print(bid.set_prob({0: 2}))
#%%
bc= ensamble([1,0],bern(0.1))
print(sum([bc.generate_symbol() for _ in range(20)]))
print(bc.info_content(1), bc.entropy(1), bc.get_prob([1]))
for k in range(bid.N + 1):
print(k, bid.num_strings({1: k}))
# %%
|
24,370 | 968baeec53b688e338ada8d2c4ec785b7e091559 | ## for syntax
|
24,371 | af24371f664c27e9a73c4020d345729ad09c6edc | import c
import unittest
class TestContest540(unittest.TestCase):
def test_problem_c(self):
self.assertEqual(
c.solve(
4,
6,
[
'X...XX',
'...XX.',
'.X..X.',
'......'
],
(1, 6),
(2, 2)
),
'YES'
)
self.assertEqual(
c.solve(
5,
4,
[
'.X..',
'...X',
'X.X.',
'....',
'.XX.'
],
(5, 3),
(1, 1)
),
'NO'
)
self.assertEqual(
c.solve(
4,
7,
[
'..X.XX.',
'.XX..X.',
'X...X..',
'X......'
],
(2, 2),
(1, 6)
),
'YES'
)
self.assertEqual(
c.solve(
2,
2,
[
'..',
'XX'
],
(2, 1),
(1, 1)
),
'YES'
)
if __name__ == '__main__':
unittest.main()
|
24,372 | ca9d3d2d8f149beb74a67192d74c437aad92714c | print("Anas Ahmed")
print("(18B-116-CS),Sec-A")
print("Practice Problem 3.3(b)")
list_ticket = eval(input("Enter Your Ticket NO#:"))
list_lottery = eval(input("Enter your Lottery NO#:"))
if list_ticket==list_lottery:
print("YOU WON! :)")
else:
print("Sorry Try AGAIN")
|
24,373 | 6e998cb02ff867ddd0ab7e0e2e32989f64281889 | from . route_branch_manager_tests import *
|
24,374 | cbc1ddba45335eb6cde89f493b28051a2277d2ce | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from matplotlib.colors import ListedColormap
dataset=pd.read_csv(r"C:\Users\student\Desktop\PumpingData.csv")
X=dataset.iloc[:,[3,4]].values
y=dataset.iloc[:,5].values
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.25,random_state=0)
from sklearn.naive_bayes import GaussianNB
classif=GaussianNB()
classif.fit(X_train,y_train)
y_pred=classif.predict(X_test)
print(y_pred)
from sklearn.metrics import confusion_matrix
cm=confusion_matrix(y_test,y_pred)
print(cm)
arr=np.array(cm)
TP=int(arr[0, 0])
TN=int(arr[1, 1])
FP=int(arr[1, 0])
FN=int(arr[0, 1])
total=TP+TN+FP+FN
accuracy=(TP+TN)/total
Misclassificationrate=(FP+FN)/total
print(accuracy)
print(Misclassificationrate)
|
24,375 | 27d5b10fa738d5f6c851e794aa8e714756950bc3 | import numpy as np
import pytest
import stk
from .case_data import CaseData
bb1 = stk.BuildingBlock("[C+2][N+]Br", [stk.BromoFactory()])
canonical_bb1 = bb1.with_canonical_atom_ordering()
bb2 = stk.BuildingBlock("IS[O+]", [stk.IodoFactory()])
canonical_bb2 = bb2.with_canonical_atom_ordering()
@pytest.fixture(
params=(
lambda: CaseData(
molecule=stk.BuildingBlock(
smiles="Br[C+2][N+]Cl",
functional_groups=[stk.BromoFactory()],
placer_ids=(0,),
),
result=stk.BuildingBlock.init(
atoms=(
stk.Cl(0),
stk.Br(1),
stk.C(2, 2),
stk.N(3, 1),
),
bonds=(
stk.Bond(stk.Cl(0), stk.N(3, 1), 1),
stk.Bond(stk.Br(1), stk.C(2, 2), 1),
stk.Bond(stk.C(2, 2), stk.N(3, 1), 1),
),
position_matrix=np.array([]),
functional_groups=(
stk.Bromo(
bromine=stk.Br(1),
atom=stk.C(2, 2),
bonders=(stk.C(2, 2),),
deleters=(stk.Br(1),),
),
),
placer_ids=(1,),
),
),
lambda: CaseData(
molecule=stk.ConstructedMolecule(
topology_graph=stk.polymer.Linear(
building_blocks=(bb1, bb2),
repeating_unit="AB",
num_repeating_units=1,
),
),
result=stk.ConstructedMolecule.init(
atoms=(
stk.C(0, 2),
stk.O(1, 1),
stk.N(2, 1),
stk.S(3),
),
bonds=(
stk.Bond(stk.C(0, 2), stk.N(2, 1), 1),
stk.Bond(stk.O(1, 1), stk.S(3), 1),
stk.Bond(stk.N(2, 1), stk.S(3), 1),
),
position_matrix=np.array([]),
atom_infos=(
stk.AtomInfo(
atom=stk.C(0, 2),
building_block_atom=stk.C(0, 2),
building_block=canonical_bb1,
building_block_id=0,
),
stk.AtomInfo(
atom=stk.O(1, 1),
building_block_atom=stk.O(0, 1),
building_block=canonical_bb2,
building_block_id=1,
),
stk.AtomInfo(
atom=stk.N(2, 1),
building_block_atom=stk.N(2, 1),
building_block=canonical_bb1,
building_block_id=0,
),
stk.AtomInfo(
atom=stk.S(3),
building_block_atom=stk.S(2),
building_block=canonical_bb2,
building_block_id=1,
),
),
bond_infos=(
stk.BondInfo(
bond=stk.Bond(stk.C(0, 2), stk.N(2, 1), 1),
building_block=canonical_bb1,
building_block_id=0,
),
stk.BondInfo(
bond=stk.Bond(stk.O(1, 1), stk.S(3), 1),
building_block=canonical_bb2,
building_block_id=1,
),
stk.BondInfo(
bond=stk.Bond(stk.N(2, 1), stk.S(3), 1),
building_block=None,
building_block_id=None,
),
),
num_building_blocks={
canonical_bb1: 1,
canonical_bb2: 1,
},
),
),
),
)
def case_data(request):
return request.param()
|
24,376 | 23dbb952eb756cfc398642bc13fc9b52b0bf3b27 | from .base import *
import django_heroku
import dj_database_url
from google.oauth2 import service_account
DEBUG = False
DATABASES = {}
DATABASES["default"] = dj_database_url.config(conn_max_age=600, ssl_require=True)
CORS_ORIGIN_WHITELIST = [
"https://jumga-1.netlify.app",
"http://jumga-1.netlify.app",
"https://jumgaapi.herokuapp.com",
]
ALLOWED_HOSTS += [
"https://jumga-1.netlify.app",
"http://jumga-1.netlify.app",
"https://jumgaapi.herokuapp.com",
"http://jumgaapi.herokuapp.com",
"jumgaapi.herokuapp.com",
]
CSRF_TRUSTED_ORIGINS = [
"localhost:3000",
"https://jumga-1.netlify.app",
"http://jumga-1.netlify.app",
"https://jumgaapi.herokuapp.com",
"http://jumgaapi.herokuapp.com",
]
FLUTTERWAVE_PUBLIC_KEY = os.getenv("FLUTTERWAVE_PUBLIC_KEY")
FLUTTERWAVE_SECRET_KEY = os.getenv("FLUTTERWAVE_SECRET_KEY")
DEFAULT_FILE_STORAGE = "jumga.settings.gcloud.GoogleCloudMediaFileStorage"
STATICFILES_STORAGE = "jumga.settings.gcloud.GoogleCloudStaticFileStorage"
GS_PROJECT_ID = "remakeu-5d060"
GS_STATIC_BUCKET_NAME = "remakeu-5d060.appspot.com"
GS_MEDIA_BUCKET_NAME = "remakeu-5d060.appspot.com" # same as STATIC BUCKET if using single bucket both for static and media
GS_CREDENTIALS = service_account.Credentials.from_service_account_file(
os.path.join(BASE_DIR, "remakeU-36d3620cc3ec.json")
)
STATIC_URL = "https://storage.googleapis.com/{}/static/".format(GS_STATIC_BUCKET_NAME)
STATIC_ROOT = "static/"
MEDIA_URL = "https://storage.googleapis.com/{}/media/".format(GS_MEDIA_BUCKET_NAME)
MEDIA_ROOT = "media/" |
24,377 | 4aefc6b41ff42bc2daafa84c3809dbde3a846eb9 | from mpi4py import MPI
import numpy as np
comm = MPI.COMM_WORLD
number_of_processors=comm.Get_size()
rank = comm.Get_rank()
if rank==0:
result=0
array_1=np.random.randn(1,1000000)
comm.send(array_1,dest=1,tag=1)
array_2=np.random.randn(1,1000000)
comm.send(array_2,dest=2,tag=1)
array_3=np.random.randn(1,1000000)
comm.send(array_3,dest=3,tag=1)
print("Data has been sent. \n")
for i in range(1,number_of_processors):
result+= comm.recv(source=i,tag=2)
print("Sum of all the three arrays is:{}\n".format(result))
elif rank==1:
data_1=comm.recv(source=0,tag=1)
sum_1=np.sum(data_1)
print("Sum of array_1 is :{}\n".format(sum_1))
comm.send(sum_1,dest=0,tag=2)
elif rank==2:
data_2=comm.recv(source=0,tag=1)
sum_2=np.sum(data_2)
print("Sum of array_2 is :{}\n".format(sum_2))
comm.send(sum_2,dest=0,tag=2)
elif rank==3:
data_3=comm.recv(source=0,tag=1)
sum_3=np.sum(data_3)
print("Sum of array_3 is :{}\n".format(sum_3))
comm.send(sum_3,dest=0,tag=2)
|
24,378 | cb7b3eac85c095cac42e988982cd25fc979bd54f | import gzip
sites = []
reads = {}
header_lines = []
with gzip.open(snakemake.input.raw_table, "rt") as infile:
for line in infile:
if line.startswith("#"):
header_lines.append(line)
continue
F = line.rstrip().split("\t")
site_id = ":".join(F[0:3])
sites.append(site_id)
reads[site_id] = [F[0], F[1], F[2], F[-2], F[-1]]
for in_f in snakemake.input.filtered:
with open(in_f, "r") as ifile:
for line in ifile:
line_list = line.rstrip().split("\t")
curr_id = ":".join(line_list[0:3])
reads[curr_id].insert(-2, line_list[3])
with gzip.open(snakemake.output.table_adjusted, "wt") as out_file:
for h in header_lines:
out_file.write("%s" % h)
for s in sites:
out_file.write("%s\n" % "\t".join( reads[s] ) )
|
24,379 | 4b80a551bf48521d0bb7b105058f4b469c12c73e | from zhinst.toolkit import Session
from zhinst.toolkit.driver.nodes.awg import AWG
import enum
from typing import Optional
from dataclasses import dataclass
import numpy as np
class LogType(enum.Enum):
Error = enum.auto()
Trigger = enum.auto()
ZSync_Feedback = enum.auto()
Internal_Feedback = enum.auto()
ZSync_AUX = enum.auto()
DIO = enum.auto()
class TriggerSource(enum.Flag):
DigTrigger1 = enum.auto()
DigTrigger2 = enum.auto()
ZSyncTrigger = enum.auto()
@dataclass
class LogEntry:
time_clk: int
time_us: float
log_type: LogType
trigger_source: Optional[TriggerSource] = None
raw: Optional[int] = None
processed1: Optional[int] = None
processed2: Optional[int] = None
addr: Optional[int] = None
data: Optional[int] = None
def __str__(self):
entry_str = f"{self.time_clk:<7d}\t{self.time_us:.3f}\t{self.log_type.name:s} "
if self.log_type == LogType.Error:
entry_str += "Collision error"
if self.log_type == LogType.Trigger:
entry_str += f"source({self.trigger_source.name:s})"
elif (
self.log_type == LogType.ZSync_Feedback
and self.processed1 is not None
and self.processed2 is not None
):
entry_str += f"raw(0x{self.raw:04X}) register(0x{self.processed1:04X}) decoder(0x{self.processed2:04X})"
elif self.log_type == LogType.ZSync_Feedback:
entry_str += f"raw(0x{self.raw:04X})"
elif self.log_type == LogType.Internal_Feedback:
entry_str += f"raw(0x{self.raw:04X}) processed(0x{self.processed1:04X})"
elif self.log_type == LogType.ZSync_AUX:
entry_str += f"addr(0x{self.addr:04X}) data(0x{self.data:08X})"
elif self.log_type == LogType.DIO:
entry_str += f"raw(0x{self.raw:08X})"
else:
raise RuntimeError("Unknown log type!")
return entry_str
def reset_and_enable_rtlogger(
awg: AWG, input: str = "zsync", start_timestamp: Optional[int] = None
) -> None:
"""Reset and start a given RT Logger.
Args:
awg: AWG node of the RTLogger
input: The source of data that it should log. Either "dio" or "zsync". (default: "zsync")
start_timestamp: optional start timestamp, if provided, timestamp mode is used.
"""
awg.rtlogger.clear(True) # Clear the logs
if start_timestamp is None:
# Starts with the AWG and overwrites old values as soon as the memory limit
# is reached.
awg.rtlogger.mode("normal")
else:
# Starts with the AWG, waits for the first valid trigger, and only starts
# recording data after the time specified by the start_timestamp.
# Recording stops as soon as the memory limit is reached.
awg.rtlogger.mode("timestamp")
awg.rtlogger.starttimestamp(start_timestamp)
# Set the input of the rtlogger
# This is necessary only on the SHF family,
# on the HDAWG such node is absent, since the input
# is selected by the node dios/0/mode
if awg.rtlogger.input:
awg.rtlogger.input(input)
# Start the rtlogger
awg.rtlogger.enable(True, deep=True)
def _get_trigdelay(session: Session, awg: AWG) -> int:
"""Get the ZSync trigger delay.
Note: this function makes use of a raw node; raw nodes are usually meant for
internal purposes only, they are not documented and their existence is not
guaranteed in future releases. This function is intended for illustrative
purposes only and raw nodes should not be used by users.
Args:
session: Toolkit session to a data server.
awg: AWG node.
Returns:
The delay of the ZSync trigger, in clock cycles.
"""
awg_split = str(awg).split("/") # Split into list
awg_split.insert(2, "raw")
awg_split.append("zsync")
awg_split.append("trigdelay")
trigdelay_node = "/".join(awg_split) # Join again into node path
trig_delay = session.daq_server.getInt(trigdelay_node)
# The trigger delay on the SG and QA channels is in 500 MHz unit
if "sgchannels" in str(awg) or "qachannels" in str(awg):
trig_delay = int(np.ceil(trig_delay / 2))
return trig_delay
def print_rtlogger_data(
session: Session,
awg: AWG,
compensate_start_trigger: bool = True,
max_lines: int = None,
silent: bool = False,
) -> Optional[list[LogEntry]]:
"""Print the data collected by the RT Logger.
Args:
session: Toolkit session to a data server.
awg: AWG node
compensate_start_trigger: True if the start trigger delay should
be compensated for.
max_lines: Maximum number of lines to be printed.
silent: if True - don't print anything, return the
list of events. (Default: False)
Returns:
list[LogEntry]: list of logged events, if silent is True, otherwise None
"""
rtlogger = awg.rtlogger
# Fetch the output of the rtlogger and decode
rtdata = rtlogger.data().reshape((-1, 4))
rtdata.dtype = np.dtype(
[
("timestamp", np.int64),
("value", np.int64),
("source", np.int64),
("error", np.int64),
]
)
# Get various parameter
timebase = rtlogger.timebase()
reg_node = awg.zsync.register
if reg_node:
reg_shift, reg_mask, reg_offset = (
reg_node.shift(),
reg_node.mask(),
reg_node.offset(),
)
else:
reg_shift, reg_mask, reg_offset = (0, 0, 0)
dec_node = awg.zsync.decoder
if dec_node:
dec_shift, dec_mask, dec_offset = (
dec_node.shift(),
dec_node.mask(),
dec_node.offset(),
)
else:
dec_shift, dec_mask, dec_offset = (0, 0, 0)
intfeedback_node = awg.intfeedback.direct
if intfeedback_node:
int_shift, int_mask, int_offset = (
intfeedback_node.shift(),
intfeedback_node.mask(),
intfeedback_node.offset(),
)
else:
int_shift, int_mask, int_offset = (0, 0, 0)
if compensate_start_trigger:
trig_delay = _get_trigdelay(session, awg)
else:
trig_delay = 0
base_ts = 0
# Process the raw data
max_lines = max_lines or len(rtdata)
entries = []
for i in range(max_lines):
line = rtdata[i]
raw_value = int(line["value"])
# the +2 is due to the difference between the
# rtlogger and the sequencer behavior
ts = int(line["timestamp"]) - base_ts + 2
ts_s = ts * timebase * 1e6
# Check collision error
if line["error"]:
entry = LogEntry(ts, ts_s, LogType.Error)
entries.append(entry)
continue
# - Trigger processing
trigger_source = TriggerSource(0)
if line["source"] == 2:
# Dig trigger 1
if raw_value & 0x40000000:
trigger_source |= TriggerSource.DigTrigger1
# Dig trigger 2
if raw_value & 0x80000000:
trigger_source |= TriggerSource.DigTrigger2
# ZSync trigger
if (raw_value & 0xC0000000) == 0 and (raw_value & 0xFF) == 0x08:
trigger_source = TriggerSource.ZSyncTrigger
if bool(trigger_source):
# Reset the time counter if requested
if compensate_start_trigger:
base_ts = int(line["timestamp"]) + trig_delay
ts = 0
ts_s = 0.0
else:
ts = int(line["timestamp"])
ts_s = ts * timebase * 1e6
# We got a trigger, save it
entry = LogEntry(ts, ts_s, LogType.Trigger, trigger_source=trigger_source)
entries.append(entry)
continue
# - ZSync feedback processing
if line["source"] == 1:
if reg_node and dec_node:
register_data = ((raw_value >> reg_shift) & reg_mask) + reg_offset
decoder_data = ((raw_value >> dec_shift) & dec_mask) + dec_offset
entry = LogEntry(
ts,
ts_s,
LogType.ZSync_Feedback,
raw=raw_value,
processed1=register_data,
processed2=decoder_data,
)
else:
entry = LogEntry(ts, ts_s, LogType.ZSync_Feedback, raw=raw_value)
entries.append(entry)
continue
# - Internal feedback processing
if line["source"] == 3:
processed_data = ((raw_value >> int_shift) & int_mask) + int_offset
entry = LogEntry(
ts,
ts_s,
LogType.Internal_Feedback,
raw=raw_value,
processed1=processed_data,
)
entries.append(entry)
continue
# - DIO processing
if line["source"] == 0:
entry = LogEntry(ts, ts_s, LogType.DIO, raw=raw_value)
entries.append(entry)
continue
# - ZSync AUX processing
if (
line["source"] == 2
and (raw_value & 0xC0000000) == 0
and (raw_value & 0xFF) == 0x01
):
addr = (raw_value >> 8) & 0xFFFF
data = (raw_value >> 16) & 0x3FFF
entry = LogEntry(ts, ts_s, LogType.ZSync_AUX, addr=addr, data=data)
entries.append(entry)
continue
if silent:
return entries
else:
# Print the RTLogger logs
print("t[clk]\tt[us]\tData")
for entry in entries:
print(entry)
return None # Avoid dump to console in interactive session
|
24,380 | 384c951dd5d3b931aa4a7dd5023efa395d4cec50 | from twisted.web.client import getPage
from plugin_lib import command
import xml.etree.ElementTree
def parse_body(body, bot, url, channel):
e = xml.etree.ElementTree.fromstring(body)
price = e.findtext('channel/item/{http://www.woot.com/}price')
condition = e.findtext('channel/item/{http://www.woot.com/}condition')
product = e.findtext('channel/item/title')
if condition.lower() != 'new':
product = condition +' '+product
percent=e.findtext('channel/item/{http://www.woot.com/}soldout')
wootoff=e.findtext('channel/item/{http://www.woot.com/}wootoff')
wootoff='' if wootoff.lower()=='false' else '\x02\x0301,08WootOff!\x0F '
if percent.lower() == 'false':
percent=100*float(e.findtext('channel/item/{http://www.woot.com/}soldoutpercentage'))
percent="%.2f%% sold" % (percent)
else: percent='\x0300,05SOLD OUT\x0F'
bot.say(channel, "%s \x02%s\x0F %s: %shttp://%s.com/" % (price, product, percent, wootoff, url))
@command('woot')
def cmd_woot(bot, user, channel, args):
"""!woot [<sub>] # Returns the current woot sale for the current sub woot site"""
base_url = 'woot'
if len(args) != 0:
base_url = args[0] + '.' + base_url
url = "http://%s.com/salerss.aspx" % (base_url)
d = getPage(url, timeout=10)
d.addCallback(parse_body, bot, base_url, channel)
d.addErrback(bot.log.err)
|
24,381 | 63659614f27d761a90915423da12b8f5d0434605 | class AttrDisplay:
"""
Provides an inheritable print voerload method that displays
instances with their class names and a name=value pair for
each attribute stored on the instance itself (but not attrs
inherited from its calsses). Can be mixed into any class,
and will work in any instance.
"""
def getAllAttrs(self):
lists = []
for key in sorted(self.__dict__):
lists.append('%s=%s' % (key, getattr(self, key)))
return ', '.join(lists)
def __str__(self):
return '[%s: %s]' % (self.__class__.__name__, self.getAllAttrs())
if __name__ == '__main__':
class TopTest(AttrDisplay):
count = 0
def __init__(self):
self.attr1 = TopTest.count
self.attr2 = TopTest.count+1
TopTest.count += 2
class SubTest(AttrDisplay):
pass
X,Y = TopTest(), SubTest()
print(X, Y, sep='\n') # Show all instance attrs, show lowest class name
|
24,382 | c75c846d022b3f4b9b19f5d54247358ad7abaae2 | import torch
import torch.nn.functional as F
import numpy as np
import random
import cv2
from torch.utils.data import Dataset
class BaseDataset(Dataset):
def __init__(self,
ignore_label=-1,
base_size=2048,
crop_size=(512, 1024),
downsample_rate=1,
scale_factor=16,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
):
self.base_size = base_size
self.crop_size = crop_size
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.scale_factor = scale_factor
self.downsample_rate = 1. / downsample_rate
self.files = []
def __len__(self):
return len(self.files)
def input_transform(self, image):
image = image.astype(np.float32)[:, :, ::-1]
image = image / 255.0
image -= self.mean
image /= self.std
return image
def label_transform(self, label):
return np.array(label).astype('int32')
def pad_image(self, image, h, w, size, padvalue):
pad_image = image.copy()
pad_h = max(size[0] - h, 0)
pad_w = max(size[1] - w, 0)
if pad_h > 0 or pad_w > 0:
pad_image = cv2.copyMakeBorder(image, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=padvalue)
return pad_image
def rand_crop(self, image, label):
h, w = image.shape[:-1]
image = self.pad_image(image, h, w, self.crop_size, (0.0, 0.0, 0.0))
label = self.pad_image(label, h, w, self.crop_size, (self.ignore_label,))
new_h, new_w = label.shape
x = random.randint(0, new_w - self.crop_size[1])
y = random.randint(0, new_h - self.crop_size[0])
image = image[y:y + self.crop_size[0], x:x + self.crop_size[1]]
label = label[y:y + self.crop_size[0], x:x + self.crop_size[1]]
return image, label
def multi_scale_aug(self, image, label=None, rand_scale=1., rand_crop=True):
long_size = np.int(self.base_size * rand_scale + 0.5)
h, w = image.shape[:2]
if h > w:
new_h = long_size
new_w = np.int(w * long_size / h + 0.5)
else:
new_w = long_size
new_h = np.int(h * long_size / w + 0.5)
image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
if label is not None:
label = cv2.resize(label, (new_w, new_h), interpolation=cv2.INTER_NEAREST)
else:
return image
if rand_crop:
image, label = self.rand_crop(image, label)
return image, label
def resize_short_length(self, image, label=None, short_length=None, fit_stride=None, return_padding=False):
h, w = image.shape[:2]
if h < w:
new_h = short_length
new_w = np.int(w * short_length / h + 0.5)
else:
new_w = short_length
new_h = np.int(h * short_length / w + 0.5)
image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
pad_w, pad_h = 0, 0
if fit_stride is not None:
pad_w = 0 if (new_w % fit_stride == 0) else fit_stride - (new_w % fit_stride)
pad_h = 0 if (new_h % fit_stride == 0) else fit_stride - (new_h % fit_stride)
image = cv2.copyMakeBorder(image, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT,
value=tuple(x * 255 for x in self.mean[::-1])
)
if label is not None:
label = cv2.resize(label, (new_w, new_h), interpolation=cv2.INTER_NEAREST)
if pad_h > 0 or pad_w > 0:
label = cv2.copyMakeBorder(
label, 0, pad_h, 0, pad_w,
cv2.BORDER_CONSTANT, value=self.ignore_label
)
if return_padding:
return image, label, (pad_h, pad_w)
else:
return image, label
else:
if return_padding:
return image, (pad_h, pad_w)
else:
return image
def random_brightness(self, img, shift_value=10, brightness=False):
if not brightness:
return img
if random.random() < 0.5:
return img
self.shift_value = shift_value
img = img.astype(np.float32)
shift = random.randint(-self.shift_value, self.shift_value)
img[:, :, :] += shift
img = np.around(img)
img = np.clip(img, 0, 255).astype(np.uint8)
return img
def gen_sample(self, image, label, multi_scale=True, is_flip=True, brightness=True):
if multi_scale:
rand_scale = 0.5 + random.randint(0, self.scale_factor) / 10.0
image, label = self.multi_scale_aug(image, label, rand_scale=rand_scale)
image = self.random_brightness(img=image, brightness=brightness)
image = self.input_transform(image)
label = self.label_transform(label)
image = image.transpose((2, 0, 1))
if is_flip:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
if self.downsample_rate != 1:
label = cv2.resize(
label,
None,
fx=self.downsample_rate,
fy=self.downsample_rate,
interpolation=cv2.INTER_NEAREST
)
return image, label
def reduce_zero_label(self, labelmap):
labelmap = np.array(labelmap)
encoded_labelmap = labelmap - 1
return encoded_labelmap
def multi_scale_inference(self, model, image, scale=[1], flip=False):
batch, c, h, w = image.size()
assert batch == 1, "only supporting batchsize 1."
|
24,383 | 6c7b6fadd078f4516e7c59853e8f41ed34b27c0c | # Generated by Django 2.2.8 on 2020-09-07 23:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Test', '0008_remove_question_img'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='opt5',
),
]
|
24,384 | 535720ae9f52256bbfc59fbb8dd028269f8f7880 | from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response, render
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
def home(request):
context_dict={}
return render(request,'home.html', context_dict)
|
24,385 | 007a2e1158b46d47f44464078146ab6336658d9c | # Generated by Django 3.0.6 on 2021-01-27 08:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0006_product_available'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='sub_cat',
new_name='subcategory',
),
]
|
24,386 | 54d73678d6e6f6b395aafcf59eba83aa59ff21ee | """
Token class
Tokens each have a type, a lexeme and line.
The type tells us what type this token is. This is a TokenType object.
The lexeme is the set of characters that make up this token. This will typically be a string or character.
The line corresponds to the line number that we scanned this token on. This is useful for error handling.
"""
class Token:
# Constructor taking in token type, lexeme and line number
def __init__(self, lexeme, tokType, line):
self.__lexeme = lexeme
self.__type = tokType
self.__line = line
# Getters for each private field
def getLexeme(self):
return self.__lexeme
def getType(self):
return self.__type
def getLine(self):
return self.__line
# Overriding __str__ to allow nicer printing of tokens for debugging
# __str__ is called whenever you print this object : print(Token)
def __str__(self):
return "<'{}', {}>".format(self.__lexeme, self.__type) |
24,387 | 20c137c79cdccd0afd202e8a2e5e1ad42fc71763 | import unittest
from unittest.mock import patch
from unittest_really_advanced_example import *
class AnotherMoreComplexServiceTest(unittest.TestCase):
@patch("unittest_really_advanced_example.SendMailClass")
@patch("unittest_really_advanced_example.OneService")
def test_make_magic_method(self, mail_service_mock, first_service_mock):
first_service_mock.make_things_and_get_result.return_value = 6
complex_service = AnotherMoreComplexService(mail_service_mock, first_service_mock)
complex_service.make_complex_bussiness()
self.assertEqual(1, mail_service_mock.send_mail.call_count)
self.assertIsNotNone(complex_service.get_magical_number())
self.assertEqual(4, complex_service.get_magical_number())
if __name__ == '__main__':
unittest.main() |
24,388 | 113596eb5f256000a469fc146f58dac005033084 | """Robust apply mechanism.
Provides a function 'call', which can sort out what arguments a given
callable object can take, and subset the given arguments to match only
those which are acceptable.
"""
IM_FUNC = "__func__"
FUNC_CODE = "__code__"
def function(receiver):
"""Get function-like callable object for given receiver.
returns (function_or_method, codeObject, fromMethod)
If fromMethod is true, then the callable already has its first
argument bound.
"""
if hasattr(receiver, IM_FUNC):
# Instance method.
im_func = getattr(receiver, IM_FUNC)
func_code = getattr(im_func, FUNC_CODE)
return receiver, func_code, True
elif hasattr(receiver, FUNC_CODE):
func_code = getattr(receiver, FUNC_CODE)
return receiver, func_code, False
elif hasattr(receiver, "__call__"):
return function(receiver.__call__)
else:
raise ValueError(f"unknown reciever type {receiver} {type(receiver)}")
def robust_apply(receiver, signature, *arguments, **named):
"""Call receiver with arguments and appropriate subset of named.
``signature`` is the callable used to determine the call signature
of the receiver, in case ``receiver`` is a callable wrapper of the
actual receiver."""
signature, code_object, startIndex = function(signature)
acceptable = code_object.co_varnames[
startIndex + len(arguments) : code_object.co_argcount
]
for name in code_object.co_varnames[startIndex : startIndex + len(arguments)]:
if name in named:
raise TypeError(
f"Argument {name!r} specified both positionally "
f"and as a keyword for calling {signature!r}"
)
if not (code_object.co_flags & 8):
# fc does not have a **kwds type parameter, therefore
# remove unacceptable arguments.
# have to make this a list type in python3 as dicts cant be
# modified in place, producing RuntimeError
for arg in list(named.keys()):
if arg not in acceptable:
del named[arg]
return receiver(*arguments, **named)
|
24,389 | d1c3e4df6c50b348d3afce22fd189ff0fecf4884 | altura = float(input('Qual é a sua altura em cm'))
peso = float(input('Qual é o seu peso em kg:'))
IMC = peso / (altura/100)**2
print (IMC)
if IMC < 18.5:
print(f'Seu IMC é de {IMC}, e é classificado como magreza')
elif IMC >= 18.5 and IMC < 24.9:
print(f'Seu IMC é de {IMC}, e é considerado normal')
elif IMC >= 25 and IMC < 24.9:
print(f'Seu IMC é de {IMC}, e é classificado como sobrepeso. Pouco, mas fica o alerta!')
elif IMC >= 30 and IMC < 39.9:
print(f'Seu IMC é de {IMC}, e é classificado como obesidade, fique atento e mude seus habitos!')
else:
print ( "Comece a se alimentar melhor e se exercitar, obesidade grave!") |
24,390 | 0c31b90b33402707867ae8c330030bee07488461 | import numpy as np
import ngrams.train as train
unigram_dump_files = ['../output/unigramEN.txt','../output/unigramFR.txt','../output/unigramIT.txt']
bigram_dump_files = ['../output/bigramEN.txt','../output/bigramFR.txt','../output/bigramIT.txt']
excluding_chars = ' .,"\n\'[]()-;0123456789?*_!&$:<>\t«»'
unigram_models = train.train_ngram_2(n_grams=1,delta=0.5,excluding_chars=excluding_chars)
bigram_models = train.train_ngram_2(n_grams=2,delta=0.5,excluding_chars=excluding_chars)
for model,out_file in zip(unigram_models,unigram_dump_files):
train.dump(model=model,out_file=out_file)
for model,out_file in zip(bigram_models,bigram_dump_files):
train.dump(model=model,out_file=out_file)
#test input file
input_file='../datasets/first10sentences.txt'
result_file='../datasets/first10sentences_result.txt'
lang=train.load_input(result_file)
u_correct_count=0
b_correct_count=0
for sentence_ind,sentence in enumerate(train.load_input(input_file)):
languages=['EN','FR','IT']
output_file='../output/out%d.txt' % (sentence_ind)
sentence = sentence.lower()
output_string = sentence
for c in excluding_chars:
sentence = sentence.replace(c,'#')
print(sentence)
n_grams=2
log_probs=[0,0,0]
sentence_log_probs=[0,0,0]
output_string += '\nUNIGRAM MODEL:'
for token_ind in range (len(sentence)):
c = sentence[token_ind:token_ind + 1]
if c != '#':
output_string += '\n\nUNIGRAM %s:' % (c)
for model_ind,ngram in enumerate(unigram_models):
log_probs[model_ind]=ngram.get(c,ngram.get('<unk>'))
sentence_log_probs[model_ind] += log_probs[model_ind]
output_string += '\n%s: P(%s) = %s ==> log prob of sentence so far: %s' \
% (languages[model_ind],c,log_probs[model_ind],sentence_log_probs[model_ind])
#conclusion
most_probable_lang = languages[np.argmin(sentence_log_probs)]
output_string += '\nAccording to the UNIGRAM model, the sentence is in %s' % most_probable_lang
print('UNIGRAM:',most_probable_lang)
if most_probable_lang == lang[sentence_ind]:
u_correct_count += 1
log_probs=[0,0,0]
sentence_log_probs=[0,0,0]
output_string += '\nBIGRAM MODEL:'
for token_ind in range (len(sentence)-1):
c = sentence[token_ind:token_ind + 2]
output_string += '\n\nBIGRAM %s:' % (c)
for model_ind,ngram in enumerate(bigram_models):
log_probs[model_ind]=ngram.get(c,ngram.get('<unk>'))
sentence_log_probs[model_ind] += log_probs[model_ind]
output_string += '\n%s: P(%s|%s) = %s ==> log prob of sentence so far: %s' \
% (languages[model_ind],c[-1:],c[:-1],log_probs[model_ind],sentence_log_probs[model_ind])
#conclusion
most_probable_lang = languages[np.argmin(sentence_log_probs)]
output_string += '\nAccording to the BIGRAM model, the sentence is in %s' % most_probable_lang
print('BIGRAM:',most_probable_lang)
if most_probable_lang == lang[sentence_ind]:
b_correct_count += 1
open(output_file, 'w').write(output_string)
print('Unigram accuracy:%d'%(u_correct_count))
print('Bigram accuracy:%d' %(b_correct_count)) |
24,391 | 2c9b24f7fcb04abad95fe0a6d33b9a778eeb6c2f | # 이스케이프 문자열
print("\"")
print("\'")
print("\\")
# 다음 줄
print("첫 번째 줄 \n두 번째 줄")
#탭(스페이스 4개)
print("안녕\t하세요")
# back space = 앞에 있는 것 하나 지움
print("첫 번째 줄\b두 번째 줄")
# 두 print 한 줄에 출력하기
print("첫 줄", end =' 당연히 이것도 가능 ')
print("두 번째 줄") |
24,392 | fa47e401ceda56515a441e2af2f46cc4c2015acc | from aiogram.types import InlineKeyboardButton,InlineKeyboardMarkup
onatili_buttons = InlineKeyboardMarkup(
inline_keyboard=[
[
InlineKeyboardButton(text='😁',callback_data='onatili_darslari'),
InlineKeyboardButton(text="2-dars",callback_data= '2-dars'),
],
[
InlineKeyboardButton(text=""" 🤔 """,switch_inline_query=" Zo'r bot ekan "),
InlineKeyboardButton(text="Kanalga azo bo'lish",url='https://t.me/UstozShogird'),
],
],
resize_keyboard = True
)
onatili_dars1_buttons = InlineKeyboardMarkup(
inline_keyboard=[
[
InlineKeyboardButton(text='1-dars',callback_data='dars1'),
InlineKeyboardButton(text="2-dars",callback_data= 'dars2'),
],
],
resize_keyboard = True
)
|
24,393 | ed33157adac037d270de2c59f088cbc36019f6e3 | import numpy as np
import cv2 as cv
img = cv.imread("photo.jpg", -1)
events = [i for i in dir(cv) if "EVENT" in i]
# print(events)
# set mouse callback
def circle_draw(event, x, y, flags, params):
if event == cv.EVENT_LBUTTONDBLCLK:
cv.circle(img, (x,y), 100, (255,0,0),-1)
cv.namedWindow("image")
cv.setMouseCallback("image", circle_draw)
while True:
cv.imshow("image", img)
if cv.waitKey(20) & 0xFF == 27:
break
cv.destroyAllWindows |
24,394 | bf8f29d8e0c2643657f8ba34077a8f9bf15aac99 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 15/02/2015
@author: jorgesaw
'''
from __future__ import absolute_import, print_function, unicode_literals
from imperial.core.factoria.mostrarVentanaSearchGui import \
MostrarVentanaSearchGui
class FactoriaVentanasSearch(object):
u"""Fábrica para crear las distintas instancias de cada ventana de la aplicación."""
@staticmethod
def crearVentanaGui(tipo, parent=None, mapParam={}):
from imperial.vista import factoria
ventana = None
if tipo in factoria.LST_GENERIC_SEARCH:
ventana = MostrarVentanaSearchGui(tipo, parent, mapParam)
return ventana
@staticmethod
def __setearParametros(tipo, mapParam):
pass
#mapParam['clase_modelo'] = dto.getClaseModelo(tipo)
#mapParam['dao'] = dao.getClaseModelo(tipo)
#mapParam['modelo'] = Model
#mapParam['modelo_tabla'] = ModeloTabla
#mapParam['modelo_datos_tabla'] = dao.getClaseModelo(tipo)
#mapParam['ventana'] = dlg.getClaseModelo(tipo) |
24,395 | 2b6a77dc5968602e24f8b5e05d47f48d373b788e | from django.shortcuts import render
from django.contrib.auth import login, authenticate
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import render, redirect
# Create your views here.
def index(request):
return render(request,'landpage.html')
def contact(request):
return render(request,'contactus.html')
|
24,396 | ade213bfaf711a058c80cbe06471db6b73ea3ffa | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_remove_country_neighbouring'),
]
operations = [
migrations.AlterField(
model_name='language',
name='iso639_1',
field=models.CharField(max_length=2, null=True),
),
] |
24,397 | abbc4b75b27009efdde3b04e9a509cb9b35eb069 | import serial
import cv2
import time
import os
import shutil
import numpy as np
import matlab.engine
from termcolor import colored
# define the countdown func.
def countdown(t):
while t:
mins, secs = divmod(t, 60)
timer = '{:02d}:{:02d}'.format(mins, secs)
print(timer, end="\r")
time.sleep(1)
t -= 1
print(t)
def empty_a_folder(folder):
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
# rotation matrix and translation matrix from CoordinateCalibration.py
R = np.asarray( [[ 0.06396712, 0.65312681, -0.75454197],
[ 0.99776758, -0.02732128, 0.0609377 ],
[ 0.019185, -0.75675552, -0.65341642]])
d = np.asarray([404.55821559, 69.31332313, 250.10474557])
print("Pre-loading")
arduinoData = serial.Serial('COM3', 9600)
time.sleep(2) # Let Arduino some time to reset
from imageai.Detection.Custom import CustomObjectDetection
execution_path = os.getcwd()
empty_a_folder(execution_path + r"\monitoring")
eng = matlab.engine.start_matlab()
matlab_code_path = os.getcwd() + r'\MatlabCode'
eng.cd(matlab_code_path)
# pre-loading fire_net
detector = CustomObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath(detection_model_path=os.path.join(execution_path, "detection_model-ex-33--loss-4.97.h5"))
detector.setJsonPath(configuration_json=os.path.join(execution_path, "detection_config.json"))
detector.loadModel()
print("Pre-loading Complete\n")
# start monitoring
print("Monitoring room...")
cam_1 = cv2.VideoCapture(1)
cam_2 = cv2.VideoCapture(0)
time_elapsed = 0
prev = time.time()
img_counter = 0
print_waiting_intro = False
fire_pixels_1 = []
fire_pixels_2 = []
minimum_percentage_probability = 20
extinguishing_state = False
print_extinguishing_intro = False
wait_time = 5
while True:
time_elapsed = time.time() - prev
ret_1, frame_1 = cam_1.read()
frame_1 = cv2.flip(frame_1, -1)
ret_2, frame_2 = cam_2.read()
if not ret_1:
print("failed to grab frame")
break
cv2.imshow("cam_1", frame_1)
if not ret_2:
print("failed to grab frame")
break
cv2.imshow("cam_2", frame_2)
cv2.waitKey(1)
if time_elapsed > wait_time: # for every 5 seconds, get a frame from the cam_1
print("\nProcessing Images...")
print_waiting_intro = False
prev = time.time()
cv2.imwrite(execution_path + r"\monitoring\cam_1.{}.jpg".format(img_counter), frame_1)
cv2.imwrite(execution_path + r"\monitoring\cam_2.{}.jpg".format(img_counter), frame_2)
img_path = os.getcwd() + r'\monitoring'
eng.undistortImgs(img_path, "cam_1.{}.jpg".format(img_counter), "cam_2.{}.jpg".format(img_counter))
drawn_image_1, output_objects_array_1 = detector.detectObjectsFromImage(
input_image=img_path + r"\undistorted_cam_1.{}.jpg".format(img_counter),
input_type="file",
output_type="array",
minimum_percentage_probability=minimum_percentage_probability)
drawn_image_2, output_objects_array_2 = detector.detectObjectsFromImage(
input_image=img_path + r"\undistorted_cam_2.{}.jpg".format(img_counter),
input_type="file",
output_type="array",
minimum_percentage_probability=minimum_percentage_probability)
cv2.imwrite(execution_path + r"\monitoring\drawn_image_1.{}.jpg".format(img_counter), drawn_image_1)
cv2.imwrite(execution_path + r"\monitoring\drawn_image_2.{}.jpg".format(img_counter), drawn_image_2)
print("\n---------------------")
print("Result: ", end='')
if len(output_objects_array_1) == 0 or len(output_objects_array_2) == 0:
print(colored('Negative', 'green'))
extinguishing_state = False
wait_time = 5
print("---------------------\n")
elif output_objects_array_2[0] and output_objects_array_1[0]:
percentage = str(int(output_objects_array_1[0]["percentage_probability"]))
print(colored(percentage + "% Positive", 'red'))
print("---------------------\n")
fire_box_points_1 = output_objects_array_1[0]["box_points"]
fire_box_points_2 = output_objects_array_2[0]["box_points"]
fire_pixels_1 = [(fire_box_points_1[0] + fire_box_points_1[2])/2, fire_box_points_1[3]]
fire_pixels_2 = [(fire_box_points_2[0] + fire_box_points_2[2])/2, fire_box_points_2[3]]
fire_pixels_1_x = float(fire_pixels_1[0])
fire_pixels_1_y = float(fire_pixels_1[1])
fire_pixels_2_x = float(fire_pixels_2[0])
fire_pixels_2_y = float(fire_pixels_2[1])
# get world coordinate of cam1 from Matlab script
cam_world_coordinates = eng.myTriangulate(fire_pixels_1_x, fire_pixels_1_y, fire_pixels_2_x, fire_pixels_2_y)
my_world_coordinates = np.matmul(R, cam_world_coordinates[0]) + d
round_my_world_coor = [int(round(my_world_coordinates[0])), int(round(my_world_coordinates[1])), int(round(my_world_coordinates[2]))]
print("Fire Location: ", colored(round_my_world_coor, 'red'), "\n")
# decoding coordinates to send to Arduino
command = ""
for i in round_my_world_coor:
if i < 0:
i = 0
if i / 10 < 1:
command = command + "00" + str(i)
elif i / 10 < 10:
command = command + "0" + str(i)
else:
command = command + str(i)
arduinoData.write(command.encode())
extinguishing_state = True
wait_time = 20
img_counter = img_counter + 1
elif extinguishing_state == True and print_extinguishing_intro == False:
print("Extinguishing...\n")
print_extinguishing_intro = True
elif extinguishing_state == False and print_waiting_intro == False:
print("Waiting for 5 seconds\n", end='', flush=True)
print_waiting_intro = True
elif extinguishing_state == False and print_waiting_intro == True:
print("#", end='', flush=True)
cam_1.release()
cam_2.release()
cv2.destroyAllWindows() |
24,398 | 3b20426b81e0a4da283d8b498f07083a3fc8d0aa | from playwright.sync_api import Page
import time
class CAccountClass:
def __init__(self):
self.sexual = ''
self.name = ''
self.sex = ''
self.phone = ''
self.mail = ''
self.passport = ''
self.start = ''
self.end = ''
self.birth = ''
self.effective = ''
def fill_data(page:Page,data:CAccountClass):
page.get_by_placeholder("请输入名", exact=True).click()
page.get_by_placeholder("请输入名", exact=True).fill(data.sexual)
page.get_by_placeholder("请输入名.").click()
page.get_by_placeholder("请输入名.").fill(data.name)
page.get_by_placeholder("输入护照号").click()
page.get_by_placeholder("输入护照号").fill(data.passport)
#page.locator(".col-12 > app-input-control > div > .mat-form-field > .mat-form-field-wrapper > .mat-form-field-flex > .mat-form-field-infix").first.click()
page.get_by_placeholder("44").fill("86")
page.get_by_placeholder("012345648382").click()
page.get_by_placeholder("012345648382").fill(data.phone)
page.get_by_placeholder("输入邮箱地址").click()
page.get_by_placeholder("输入邮箱地址").click()
page.get_by_placeholder("输入邮箱地址").fill(data.mail)
"""
page.locator("#mat-select-value-9").click()
page.get_by_text("中国").click()
page.locator("#mat-select-value-7").click()
if data.sex == 1:
page.get_by_text("男性").click()
else:
page.get_by_text("女性").click()
page.locator("app-ngb-datepicker").filter(has_text="出生日期*").locator("div").nth(3).click()
time.sleep(0.5)
page.get_by_role("combobox", name="Select month").select_option(str(data.birth.month))
page.get_by_role("combobox", name="Select year").select_option(str(data.birth.year))
page.get_by_role("gridcell", name=str(data.birth.strftime("%A, %B %d, %Y"))).get_by_text(str(data.birth.day)).click()
page.locator("app-ngb-datepicker").filter(has_text="护照有效期*").locator("div").nth(3).click()
time.sleep(0.5)
page.get_by_role("combobox", name="Select month").select_option(str(data.effective.month))
page.get_by_role("combobox", name="Select year").select_option(str(data.effective.year))
#page.get_by_text(str(data.effective.day), exact=True).click()
page.get_by_role("gridcell", name=str(data.effective.strftime("%A, %B %d, %Y"))).get_by_text(str(data.effective.day)).click()
"""
|
24,399 | b32f5e0740813c9e7f7e5dd5d7b7f6553cee9da9 | from __future__ import unicode_literals
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises
from nose.tools import assert_raises
import boto
import six
import sure # noqa
from boto.exception import EC2ResponseError
from moto import mock_ec2_deprecated
@mock_ec2_deprecated
def test_key_pairs_empty():
conn = boto.connect_ec2('the_key', 'the_secret')
assert len(conn.get_all_key_pairs()) == 0
@mock_ec2_deprecated
def test_key_pairs_invalid_id():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.get_all_key_pairs('foo')
cm.exception.code.should.equal('InvalidKeyPair.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_key_pairs_create():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as ex:
kp = conn.create_key_pair('foo', dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set')
kp = conn.create_key_pair('foo')
assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----')
kps = conn.get_all_key_pairs()
assert len(kps) == 1
assert kps[0].name == 'foo'
@mock_ec2_deprecated
def test_key_pairs_create_two():
conn = boto.connect_ec2('the_key', 'the_secret')
kp = conn.create_key_pair('foo')
kp = conn.create_key_pair('bar')
assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----')
kps = conn.get_all_key_pairs()
kps.should.have.length_of(2)
[i.name for i in kps].should.contain('foo')
[i.name for i in kps].should.contain('bar')
kps = conn.get_all_key_pairs('foo')
kps.should.have.length_of(1)
kps[0].name.should.equal('foo')
@mock_ec2_deprecated
def test_key_pairs_create_exist():
conn = boto.connect_ec2('the_key', 'the_secret')
kp = conn.create_key_pair('foo')
assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----')
assert len(conn.get_all_key_pairs()) == 1
with assert_raises(EC2ResponseError) as cm:
conn.create_key_pair('foo')
cm.exception.code.should.equal('InvalidKeyPair.Duplicate')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_key_pairs_delete_no_exist():
conn = boto.connect_ec2('the_key', 'the_secret')
assert len(conn.get_all_key_pairs()) == 0
r = conn.delete_key_pair('foo')
r.should.be.ok
@mock_ec2_deprecated
def test_key_pairs_delete_exist():
conn = boto.connect_ec2('the_key', 'the_secret')
conn.create_key_pair('foo')
with assert_raises(EC2ResponseError) as ex:
r = conn.delete_key_pair('foo', dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the DeleteKeyPair operation: Request would have succeeded, but DryRun flag is set')
r = conn.delete_key_pair('foo')
r.should.be.ok
assert len(conn.get_all_key_pairs()) == 0
@mock_ec2_deprecated
def test_key_pairs_import():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as ex:
kp = conn.import_key_pair('foo', b'content', dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set')
kp = conn.import_key_pair('foo', b'content')
assert kp.name == 'foo'
kps = conn.get_all_key_pairs()
assert len(kps) == 1
assert kps[0].name == 'foo'
@mock_ec2_deprecated
def test_key_pairs_import_exist():
conn = boto.connect_ec2('the_key', 'the_secret')
kp = conn.import_key_pair('foo', b'content')
assert kp.name == 'foo'
assert len(conn.get_all_key_pairs()) == 1
with assert_raises(EC2ResponseError) as cm:
conn.create_key_pair('foo')
cm.exception.code.should.equal('InvalidKeyPair.Duplicate')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_key_pair_filters():
conn = boto.connect_ec2('the_key', 'the_secret')
_ = conn.create_key_pair('kpfltr1')
kp2 = conn.create_key_pair('kpfltr2')
kp3 = conn.create_key_pair('kpfltr3')
kp_by_name = conn.get_all_key_pairs(
filters={'key-name': 'kpfltr2'})
set([kp.name for kp in kp_by_name]
).should.equal(set([kp2.name]))
kp_by_name = conn.get_all_key_pairs(
filters={'fingerprint': kp3.fingerprint})
set([kp.name for kp in kp_by_name]
).should.equal(set([kp3.name]))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.