index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
993,200 | 44edafe7eca10d1d065886053de89aff09e3bb26 | import openpyxl
wb = openpyxl.load_workbook('./sample.xlsx')
sheet = wb['Sheet1']
cells = sheet['A2':'C3']
for row in cells:
for cell in row:
print(cell.value)
print('-'*10)
wb.close() |
993,201 | a214891b9593b201593e98c780b45e2be9c56cdb | import random
class Hand:
cards = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'K', 'Q', 'A']
suits = ['H', 'S', 'D', 'C']
def __init__(self):
self.hand = []
self.total = 0
self.aces = 0
self.first = 0
self.terminated = False
self.bust = False
self.doubleDown = False
return
def sum(self, card):
if card < 8:
self.total += int(card) + 2
elif card == 12:
self.total += 11
else:
self.total += 10
while self.total > 21 and self.aces > 0:
self.total -= 10
self.aces -= 1
if self.total > 21:
self.bust = True
self.terminated = True
def add(self, number_of_cards, suit=None, card=None):
for i in range(0, number_of_cards):
if suit is None or card is None:
suit = random.randint(0, len(self.suits) - 1)
card = random.randint(0, len(self.cards) - 1)
if self.cards[card] == 'A':
self.aces += 1
self.hand.append((self.suits[suit] + self.cards[card]))
self.sum(card)
suit = card = None
if len(self.hand) == 1:
self.first = self.get_total()
def terminate(self):
self.terminated = True
def set_double_down(self):
self.doubleDown = True
def get_total(self):
return self.total
def is_bust(self):
return self.bust
def is_twenty_one(self):
return self.total == 21
def is_terminated(self):
return self.terminated
def is_double_down(self):
return self.doubleDown
|
993,202 | 674b3f438ac498f6b050f32944d0a25be1929d8e | import matplotlib.pyplot as plt
import numpy as np
def show_single_model_loss(losses, model_name, mean=True):
losses = np.array(losses, dtype=np.float)
if mean is True:
losses = np.mean(losses, axis=1)
else:
losses = losses.flatten()
plt.plot(losses)
plt.ylabel('mse')
plt.xlabel('step')
plt.xticks(list(range(0, losses.shape[0], 10)))
plt.title(f'{model_name} losses')
plt.show()
|
993,203 | 06726235a183e6206fa9c3d8b16b4ba06a69fd7f | #!/usr/bin/python3
import os
import json
datasetPath = "dataset/"
datasetFilePath = "dataset.json"
datasetFile = open(datasetFilePath, mode="w")
datasetFile.truncate()
result = [os.path.join(dp, f).replace("\\","/") for dp, dn, filenames in os.walk(datasetPath) for f in filenames if os.path.splitext(f)[1] == '.png']
json.dump(result, datasetFile)
|
993,204 | 36bafbe3f66506975c5275619a93ad19d41b8456 | def largest_possible_num(arr):
for i in range(len(arr)-1):
for j in range(i+1, len(arr)):
if arr[i]+arr[j] < arr[j]+arr[i]:
arr[i], arr[j] = arr[j], arr[i] #Swaping elements
return ''.join(arr)
a = ["61","59", "62"]
print largest_possible_num(a)
#One Line Solution
# def arrange(arr):
# return ''.join(sorted(arr, cmp=lambda x, y: cmp(y + x, x + y)))
|
993,205 | 4c2cde7977cef3036105e5e52c437e7f76a6680c | import re
import os
f = open('VM_clean_word')
VM_word = f.read()
VM_letter_con = re.compile(r'\n').sub('', VM_word)
# print(VM_letter_con)
if os.path.exists('VM_letter_count'):
os.remove(os.getcwd() + '\VM_letter_count')
print('Previous file removed!')
else:
print('Does not exist the "VM_letter_count" file.')
f1 = open('VM_letter_count', 'x')
# res = {}
# for i in VM_letter_con:
# res[i] = VM_letter_con.count(i)
# print(res)
for i in range(26):
f1.write(chr(i + ord('a')) + '\t' + str(VM_letter_con.count(chr(i + ord('a')))) + '\n')
f1.write('?' + '\t' + str(VM_letter_con.count('?')) + '\n')
|
993,206 | 68c6457a1c9dd524985d61d22d825f24b175a0a6 | #!/usr/bin/python3
# ops.py by Bill Weinman [http://bw.org/]
# This is an exercise file from Python 3 Essential Training on lynda.com
# Copyright 2010 The BearHeart Group, LLC
def main():
list =[];
list = [0,1,2,3,4,5,6,7,8,9,10];
print(list[0:5]);
#shorthand to add 0-100 in a list
list[:] = range(100);
print(list);
#slice from index 27 to 42
print(list[27:42]);
#slice from index 27 to 42 and every 3 steps
print(list[27:42:3]);
list[27:42:3] = (99,99,00,00,99);
print(list);
if __name__ == "__main__": main()
|
993,207 | f433acf77a57818c6ceaf37fb2fae362a21afacb | #Write a Python program to sort a list of tuples using Lambda.
t=(30,4,91,55,1,66,5)
f = lambda t:sorted(t)
print(f(t))
|
993,208 | f2928426bebe17dc9ae993783b822ba41fb7a643 | import unittest
from formation.formats import JSONFormat
class EqualityTestCase(unittest.TestCase):
def test_child_equality(self):
json1 = """
{
"type":"tag1",
"children": [
{"type": "tag2"},
{"type": "tag3"}
]
}
"""
json2 = """
{
"type":"tag1",
"children": [
{"type": "tag3"},
{"type": "tag2"}
]
}
"""
node1 = JSONFormat(data=json1).load()
node2 = JSONFormat(data=json2).load()
self.assertFalse(node1 == node2)
self.assertTrue(node1 != node2)
def test_tag_equality(self):
json1 = """
{
"type":"tag1-ext",
"attrib": {
"name": "tag1"
},
"children": [
{
"type": "tag2",
"attrib": {
"width": "50"
}
},
{"type": "tag3"}
]
}
"""
json2 = """
{
"type":"tag1",
"attrib": {
"name": "tag1"
},
"children": [
{
"type": "tag2",
"attrib": {
"width": "50"
}
},
{"type": "tag3"}
]
}
"""
node1 = JSONFormat(data=json1).load()
node2 = JSONFormat(data=json2).load()
self.assertFalse(node1 == node2)
self.assertTrue(node1 != node2)
def test_attrib_equality(self):
json1 = """
{
"type":"tag1",
"attrib": {
"name": "tag1"
},
"children": [
{
"type": "tag2",
"attrib": {
"attr": {
"width": "40",
"height": "50"
}
}
},
{"type": "tag3"}
]
}
"""
json2 = """
{
"type":"tag1",
"attrib": {
"name": "tag1"
},
"children": [
{
"type": "tag2",
"attrib": {
"attr": {
"height": "50",
"width": "40"
}
}
},
{"type": "tag3"}
]
}
"""
json3 = """
{
"type":"tag1",
"attrib": {
"name": "tag1"
},
"children": [
{
"type": "tag2",
"attrib": {
"attr": {
"height": "50"
}
}
},
{
"type": "tag3",
"attrib": {
"attr": {
"width": "40"
}
}
}
]
}
"""
node1 = JSONFormat(data=json1).load()
node2 = JSONFormat(data=json2).load()
node3 = JSONFormat(data=json3).load()
# test correct operator overloading
self.assertFalse(node1 != node2)
self.assertTrue(node1 == node2)
self.assertTrue(node1 != node3)
self.assertFalse(node1 == node3)
class AttributeHandlingTestCase(unittest.TestCase):
def setUp(self) -> None:
self.node = JSONFormat(
data="""
{
"type": "tag1",
"attrib": {
"name": "tag1",
"attr": {
"background": "#ffffff",
"font": "Arial"
},
"layout": {
"width": "20",
"height": "40"
}
}
}
"""
).load()
def test_get_attr(self):
self.assertEqual(self.node["layout"]["height"], "40")
self.assertEqual(self.node["name"], "tag1")
def test_remove_attr(self):
self.node.remove_attrib("width", "layout")
self.assertNotIn("width", self.node.attrib["layout"])
# should not cause an exception removing something already removed
self.node.remove_attrib("width", "layout")
def test_load_attr(self):
self.node["layout"]["anchor"] = "left"
self.assertIn("anchor", self.node.attrib["layout"])
self.assertEqual(self.node["layout"]["anchor"], "left")
self.node["id"] = "200"
self.assertIn("id", self.node.attrib)
self.assertEqual(self.node["id"], "200")
def test_attrib_grouping(self):
grouped = self.node.attrib
self.assertDictEqual(grouped.get("layout"), {"width": "20", "height": "40"})
self.assertDictEqual(grouped.get("attr"), {"background": "#ffffff", "font": "Arial"})
if __name__ == '__main__':
unittest.main()
|
993,209 | 711651ef8595687840a8d0f6dcf84bbcaaf912cd | getDatacenters = [{
'id': 0,
'name': 'dal05'
}]
|
993,210 | e77a5cf1d15f7c119567f7c5fa72c80db1d43994 | from fastapi.requests import Request
from car_market.infrastructure.version_provider import VersionProvider
def get_version_provider(request: Request) -> VersionProvider:
"""
See: https://fastapi.tiangolo.com/tutorial/dependencies/
:param request:
:return:
"""
return request.app.state.version_provider
|
993,211 | 68ebaa2d8b9b4d02194e33444dc5625fe4032d50 | from flask import request, Response, abort, current_app
from functools import wraps
import jwt
from .models import ReaderDao
def get_auth_data():
session = current_app.session
token = request.headers.get('Authorization')
reader_id = jwt.decode(token, verify=False)['id']
secret = session.query(ReaderDao.secret).filter(ReaderDao.id == reader_id).first()['secret']
return token, secret
def requires_auth(f):
""" Manages authorization on routes. """
@wraps(f)
def decorated(*args, **kwargs):
try:
token, secret = get_auth_data()
jwt.decode(token, secret)
return f(*args, **kwargs)
except:
abort(401)
return decorated
|
993,212 | 5c42da06e699a7a1807c6093c0ea8eb07fc2030c | import requests
#json_data = requests.get('http://www.floatrates.com/daily/idr.json')
json_data = {"usd":{"code":"USD","alphaCode":"USD","numericCode":"840","name":"U.S. Dollar","rate":6.8354476434761e-5,"date":"Sat, 31 Oct 2020 11:00:01 GMT","inverseRate":14629.619772663},
"eur":{"code":"EUR","alphaCode":"EUR","numericCode":"978","name":"Euro","rate":5.8505225223501e-5,"date":"Sat, 31 Oct 2020 11:00:01 GMT","inverseRate":17092.490391752}}
#print(json_data.json())
print(json_data)
for data in json_data.values():
print(data['code'])
print(data['name'])
print(data['date'])
print(data['inverseRate'])
|
993,213 | 335f621b40923058f92291657cf0d096f4724586 | import os
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from pajki.db import db, URL
from pajki.base import BaseSpider
from settings import DATA_PATH
class LevicaSpider(BaseSpider):
name= "levica"
allowed_domains = ["levica.si"]
start_urls = ["http://www.levica.si/novice/"]
rules = (
Rule(LinkExtractor(allow=(),
restrict_xpaths=('//div[@class="main-pagination"]/a')),
follow=True),
Rule(LinkExtractor(allow=(),
restrict_xpaths=('//article/a')), callback='parse_novica'),
)
def parse_novica(self, response):
body = response.xpath('normalize-space(string(//div[@class="post-content-right"]))').extract_first("")
if body:
URL.create(content=body, url=response.url)
#print(body)
|
993,214 | 942418f5e6511bbd8738c98b9c4560e5ca8c6c7d | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 23 20:15:07 2020
@author: victoria
"""
import torch
import numpy as np
from SAE import StackedAutoEncoder
import pandas as pd
from sklearn import preprocessing
from torch.autograd import Variable
#min-max normalisation
def NormalizeData(data):
#two separate scaler options, minmax and z-score (aka standard scaler)
# scaler = preprocessing.MinMaxScaler()
scaler = preprocessing.StandardScaler()
data = scaler.fit_transform(data)
return data, scaler
#root mean squared log error loss
class RMSLELoss(torch.nn.Module):
def __init__(self):
super().__init__()
self.mse = torch.nn.MSELoss()
def forward(self, pred, actual):
return torch.sqrt(self.mse(torch.log(pred + 1), torch.log(actual + 1)))
def RunModel(learning_rate, squeeze, x_train):
#number of epochs for full-stack training stage
epochs = 10000
#create stacked autoencoder, define optimizer + criterion for combined training phase
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = StackedAutoEncoder(input_shape=x_train.shape[1],encode_shape = squeeze).to(device)
#choice between two optimizers, SGD and Adam
# optimizer = torch.optim.SGD(model.parameters(),lr=learning_rate)
optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate)
#mse loss criterion
criterion = torch.nn.MSELoss()
#normalise train data + convert to tensor
train_dataset = pd.DataFrame.to_numpy(x_train)
train_dataset, scaler = NormalizeData(train_dataset)
x_train_tensor = torch.tensor(train_dataset.astype(float))
print("__________________________________________________________")
print("FITTING AUTOENCODER")
#independent sub-autoencoder training with high learning rate
model(x_train_tensor.float()).clone().detach()
print("Training Full-Stack")
for epoch in range(epochs+1):
#train full stacked autoencoder combined
optimizer.zero_grad()
#precentage of guassian noise to be added during full stack training
percentage = 0.05
noise = np.random.normal(0, x_train_tensor.std(), x_train_tensor.shape) * percentage
noised_features = ( x_train_tensor+noise).float()
#model training + optimiser steps
encoded = model.encode( noised_features.float())
outputs = model.reconstruct(encoded)
train_loss = criterion(outputs.float(), Variable(x_train_tensor.data, requires_grad=True).float())
train_loss.backward()
optimizer.step()
loss = train_loss.item()
if epoch % 1000 == 0:
print("epoch : {}/{}, loss = {:.11f}".format(epoch, epochs, loss))
return model, x_train_tensor, scaler
return 0
|
993,215 | 73664ba7f811b7af6a27cf52e5d5b7e352afa94c | import requests
from hashlib import md5
import base64
import random
import string
import time
with open("../test.jpg", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
app_key = '<your appKey>'
app_secret = '<your appSecret>'
nonstr = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
timestamp = str(time.time()*1000)
m = md5()
m.update('%s:%s:%s:%s' % (app_key, app_secret, nonstr, timestamp))
app_signature = m.hexdigest()
data = {
'type': 'express',
'data': encoded_string
}
headers = {
'app-timestamp': timestamp,
'app-key': app_key,
'app-nonstr': nonstr,
'app-signature': app_signature
}
result = requests.post('http://api.manhattan.hexindeu.com/task/detect', data=data, headers=headers).json()
print result
|
993,216 | 9f710d07df6007a06f05757858e33c61b5978fa7 | # Generated by Django 3.0.5 on 2020-05-05 06:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('emp', '0038_auto_20200505_1127'),
]
operations = [
migrations.AddField(
model_name='student',
name='github',
field=models.CharField(blank=True, default=' ', max_length=20, null=True),
),
migrations.AddField(
model_name='student',
name='phno',
field=models.IntegerField(blank=True, default=0, max_length=10, null=True),
),
migrations.AlterField(
model_name='student',
name='experience',
field=models.TextField(default=' ', max_length=1000),
),
]
|
993,217 | 1470a7ad0f53425636fe6025f2a3a16b1217fc26 | import requests
url = "https://www.baidu.com"
r =requests.request("GET",url=url)
print(r.text())
print(r.state_code())
|
993,218 | 5ce88b78bfcc43e722e8dd83e96613b760ff104a | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
AUTHOR = 'James Dey'
SITENAME = 'Deytalytics Blog'
SITEURL = 'http://deyblog.github.io/'
PATH = 'content'
TIMEZONE = 'Europe/London'
DEFAULT_LANG = 'English'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (('Deytalytics.com','http://www.deytalytcis.com'),)
# Social widget
SOCIAL = (('Linkedin', 'https://www.linkedin.com/company/deytalyticsltd'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
THEME='simple'
SEARCH_BOX = True |
993,219 | 8704580cce0b5343cde72b56632102601f223930 | import sys
import os
import shutil
if __name__ == '__main__':
project_name = sys.argv[1]
input_file = sys.argv[2]
input_string = ""
if sys.argv.__len__() == 4:
input_string = sys.argv[3]
print(input_string)
input_path = "fileInput/%s/" % input_file
output_path = "fileOutput/%s/" % project_name
class_path = "class/%s_classes" % project_name
java_file_path = "src/%s.java" % project_name
jar_file_path = "jar/%s.jar" % project_name
cmd2 = "javac -encoding utf-8 -classpath C:/Java/jdk/lib/hadoop-core-1.2.1.jar -d %s %s" % (class_path, java_file_path)
cmd3 = "jar -cvf %s -C %s/ ." % (jar_file_path, class_path)
cmd4 = "hadoop jar %s %s %s %s" % (jar_file_path, project_name, input_path, output_path)
if input_string != "":
cmd4 = cmd4 + " " + input_string
if os.path.exists(output_path):
shutil.rmtree(output_path)
if os.path.exists(class_path):
pass
else:
os.makedirs(class_path)
for cmd in [cmd2, cmd3, cmd4]:
print(cmd)
os.system(cmd)
|
993,220 | 7d398fe5751c8ff95ace3fb52c8049d5a2836d62 | # Generated by Django 2.0.3 on 2018-04-19 06:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
('app1', '0006_auto_20180411_0259'),
]
operations = [
migrations.CreateModel(
name='TraitValueDetails',
fields=[
('sid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('overall_value', models.DecimalField(decimal_places=2, default=0, max_digits=4)),
('overall_value_health', models.DecimalField(decimal_places=2, default=0, max_digits=4)),
('late_shipment_rate', models.DecimalField(decimal_places=2, default=0, max_digits=4)),
('on_time_delivery', models.DecimalField(decimal_places=2, default=0, max_digits=4)),
('hit_to_success_ratio', models.DecimalField(decimal_places=2, default=0, max_digits=4)),
('late_shipment_recommendations', models.TextField(default='ABC')),
('positive_feedbacks', models.DecimalField(decimal_places=2, default=0, max_digits=4)),
('negative_feedbacks', models.DecimalField(decimal_places=2, default=0, max_digits=4)),
('gst_value', models.DecimalField(decimal_places=2, default=0, max_digits=4)),
('kyc_value', models.DecimalField(decimal_places=2, default=0, max_digits=4)),
('credit_card_value', models.DecimalField(decimal_places=2, default=0, max_digits=4)),
],
options={
'managed': True,
'db_table': 'traits_value_details',
},
),
migrations.AlterField(
model_name='sellerbusinessdetails',
name='gst_flag',
field=models.CharField(choices=[('empty', 'empty'), ('inprocess', 'inprocess'), ('accepted', 'accepted'), ('rejected', 'rejected')], default='empty', max_length=15),
),
migrations.AlterField(
model_name='sellerdetails',
name='sKYC_flag',
field=models.CharField(choices=[('empty', 'empty'), ('inprocess', 'inprocess'), ('accepted', 'accepted'), ('rejected', 'rejected')], default='empty', max_length=15),
),
]
|
993,221 | 3d6bdcdd4a4e28eab6b7c8ae5f6595c58846e205 | ss = 'None'
if ss:
print('not None')
else:
print('None') |
993,222 | 320e8f37432dbf6684305be22630cda4f38c7471 | # created at 2018-01-29
# updated at 2018-09-28
# Author: coneypo
# Blog: http://www.cnblogs.com/AdaminXie
# GitHub: https://github.com/coneypo/ML_handwritten_number
# 利用保存到本地的训练好的模型,来检测单张 image 的标记
from sklearn.externals import joblib
from PIL import Image
img = Image.open("../test/test_5.png")
# Get features
from generate_datebase import get_features
features_test_png = get_features.get_features_single(img)
path_saved_models = "../data/data_models/"
# LR
LR = joblib.load(path_saved_models + "model_LR.m")
predict_LR = LR.predict([features_test_png])
print("LR predict:", predict_LR[0])
# LSVC
LSVC = joblib.load(path_saved_models + "model_LSVC.m")
predict_LSVC = LSVC.predict([features_test_png])
print("LSVC predict:", predict_LSVC[0])
# MLPC
MLPC = joblib.load(path_saved_models + "model_MLPC.m")
predict_MLPC = MLPC.predict([features_test_png])
print("MLPC predict:", predict_MLPC[0])
# SGDC
SGDC = joblib.load(path_saved_models + "model_SGDC.m")
predict_SGDC = SGDC.predict([features_test_png])
print("SGDC predict:", predict_SGDC[0]) |
993,223 | 5c04b2e990f0c66411e7876cbfe8b689f908637b | import os
import time
import speech_recognition as sr
import azure.cognitiveservices.speech as speechsdk
from cs50 import SQL
from flask import Flask, flash, redirect, render_template, request, send_from_directory, session, url_for
from flask_session import Session
from flask_login import current_user
from tempfile import mkdtemp
from werkzeug.utils import secure_filename
from werkzeug.security import check_password_hash, generate_password_hash
from helpers import allowed_file, login_required
app = Flask(__name__)
# set max file upload size to 16mb
app.config['MAX_CONTENT_LENGTH'] = 64 * 1024 * 1024
# configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# configure Microsoft Azure Speech
speech_key, service_region = "API KEY", "REGION"
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
# Configure CS50 Library to use SQLite database
db = SQL("sqlite:///speechtotext.db")
# set base file path
base_path = 'static/files/'
files_path = 'files/'
@app.route('/')
def index():
# display page
if not session.get("user_id"):
return render_template('index.html')
# get current user's audio files
print('You are now Logged In')
files = db.execute("SELECT filename, transcript FROM files JOIN users ON users.id = files.user_id WHERE user_id = :user_id",
user_id=session["user_id"])
return render_template('index.html', files=files, files_path=files_path)
@app.route('/upload', methods=['GET', 'POST'])
@login_required
def upload():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# check if user has not selected a filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
# save file to static/files and store metadata in database
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(base_path, filename))
db.execute("INSERT INTO files (filename, user_id) VALUES (:filename, :user_id)",
filename=filename, user_id=session["user_id"])
flash("File Uploaded")
return redirect('files')
else:
return render_template('upload.html')
@app.route('/files', methods=["GET", "POST"])
@login_required
def files():
if request.method == 'POST':
# Ensure file is selected
file = request.form.get("file")
if not file:
return "choose a file to transcribe", 403
# get the full audio file path
path = base_path + file
# recognize speech using Microsoft Azure Speech
audio_input = speechsdk.audio.AudioConfig(filename=path)
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_input)
# perform continuous speech recognition with input from an audio file
done = False
def stop_cb(evt):
"""callback that signals to stop continuous recognition upon receiving an event `evt`"""
print('CLOSING on {}'.format(evt))
nonlocal done
done = True
all_results = []
def handle_final_result(evt):
all_results.append(evt.result.text)
# connect callbacks to the events fired by the speech recognizer
speech_recognizer.recognizing.connect(lambda evt: print('RECOGNIZING: {}'.format(evt)))
speech_recognizer.recognized.connect(handle_final_result)
speech_recognizer.recognized.connect(lambda evt: print('RECOGNIZED: {}'.format(evt)))
speech_recognizer.session_started.connect(lambda evt: print('SESSION STARTED: {}'.format(evt)))
speech_recognizer.session_stopped.connect(lambda evt: print('SESSION STOPPED {}'.format(evt)))
speech_recognizer.canceled.connect(lambda evt: print('CANCELED {}'.format(evt)))
# stop continuous recognition on either session stopped or canceled events
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
# start continuous speech recognition
speech_recognizer.start_continuous_recognition()
while not done:
time.sleep(.5)
speech_recognizer.stop_continuous_recognition()
# function to convert list to string
def listToString(s):
str1 = ""
return (str1.join(s))
# store transcription result
text = listToString(all_results)
db.execute("UPDATE files SET transcript = :text WHERE filename = :filename",
text=text, filename=file)
# redirect to files
return render_template('files.html', text=text)
else:
# get list of uploaded files
files = db.execute("SELECT filename FROM files JOIN users ON users.id = files.user_id WHERE user_id = :user_id",
user_id=session["user_id"])
return render_template('files.html', files=files, files_path=files_path)
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return "must provide username", 403
# Ensure password was submitted
elif not request.form.get("password"):
return "must provide password", 403
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return "invalid username and/or password", 403
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Redirect user to login form
return redirect("/")
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user"""
# User reached route via GET
if request.method == "GET":
return render_template("register.html")
# User reached route via POST
else:
# Ensure username was submitted
if not request.form.get("username"):
return "must provide username", 403
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username does not already exist
if len(rows) != 0:
return "Username already exists", 403
# Ensure password was submitted
elif not request.form.get("password"):
return "must provide password", 403
# Ensure password confirmation was submitted
elif not request.form.get("confirmation"):
return "must provide password confirmation", 403
# Ensure the passwords match
elif request.form.get("password") != request.form.get("confirmation"):
return "passwords do not match", 403
# Store username and hash the password
username = request.form.get("username")
hash = generate_password_hash(request.form.get("password"))
# Insert new user into users in the database
db.execute("INSERT INTO users (username, hash) VALUES (:username, :hash)",
username=username, hash=hash)
# Redirect user to home page
return redirect("/")
@app.route("/change password", methods=["GET", "POST"])
@login_required
def change_password():
"""Change user password"""
# User reached route via POST
if request.method == "POST":
# Get user login details
rows = db.execute("SELECT * FROM users WHERE id = :user_id", user_id=session["user_id"])
# Ensure current password was submitted
if not request.form.get("password"):
return "must provide current password", 403
# Ensure current password is correct
if not check_password_hash(rows[0]["hash"], request.form.get("password")):
return "password incorrect", 403
# Ensure new password was submitted
elif not request.form.get("new_password"):
return "must provide new password", 403
# Ensure password confirmation was submitted
elif not request.form.get("confirmation"):
return "must provide password confirmation", 403
# Ensure the passwords match
elif request.form.get("new_password") != request.form.get("confirmation"):
return "passwords do not match", 403
# Hash new password
hash = generate_password_hash(request.form.get("new_password"))
# Update user password in the database
db.execute("UPDATE users SET hash = :hash WHERE id = :user_id", user_id=session["user_id"], hash=hash)
# Flash password change success message
flash("Password Changed Successfully")
# Redirect user to home page
return redirect("/")
# User reached route via GET
else:
return render_template("change_password.html")
@app.errorhandler(413)
def too_large(e):
return "File is too large", 413
# run the application
if __name__ == '__main__':
app.run(debug = True)
|
993,224 | 1b18baac1b5ea5a05495ef052585a955b8868c6c | from libs.config import alias
from libs.myapp import send, color
def get_php():
return """function udpGet($sendMsg,$ip,$port){
$handle=stream_socket_client("udp://{$ip}:{$port}", $errno, $errstr);
if(!$handle){
echo("[-][UDP] {$errno} - {$errstr}\\n");
return;
}
$sendMsg=hex2bin($sendMsg);
@fwrite($handle, $sendMsg);
$result = fread($handle,1024);
@fclose($handle);
return $result;
}
function checkUDP($dns_server){
if(stristr(urlencode(udpGet('02ed010000010000000000000862696c6962696c6903636f6d0000010001',$dns_server,'53')), 'bilibili%03com')) {
echo "[+][UDP] {$dns_server}:53\\n";
}else{
echo "[-][UDP] {$dns_server}:53\\n";
}
}
function checkDNS($domain){
$dnsres = dns_get_record($domain,DNS_A);
if(sizeof($dnsres)>0){
echo("[+][DNS] ${domain}\\n");
} else {
echo("[-][DNS] ${domain}\\n");
}
}
function checkTCP(){
$ips = ['bilibili.com', 'jetbrains.com', 'microsoft.com'];
foreach($ips as $ip) {
$context = stream_context_create(array('http' => array('follow_location' => false, 'timeout' => 5)));
$httpres = file_get_contents("http://${ip}", false, $context);
if($httpres===false){
echo("[-][TCP] ${ip}\\n");
continue;
};
echo("[+][TCP] ${ip}\\n");
break;
}
}
checkTCP();
checkUDP("8.8.8.8");
checkUDP("223.5.5.5");
checkUDP("119.29.29.29");
checkDNS("bilibili.com");
"""
@alias(True, _type="DETECT")
def run():
"""
outnetwork
Quickly detect the situation of the target out of the Internet.
- HTTP(TCP)
- DNS
- UDP
Origin: https://github.com/AntSword-Store/AS_Out-of-Network
"""
res = send(get_php())
for line in res.r_text.split("\n"):
if line.startswith("[+]"):
print(color.green(line))
elif line.startswith("[-]"):
print(color.red(line))
else:
print(line)
|
993,225 | 0a322d03a493200ef4f32473b790707bf83c9fe8 | import tensorflow as tf
from tensorflow.keras import layers, models
import numpy as np
import time
batch_size = 64
nbr_entrainement = 5
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = (x_train.reshape(-1, 28, 28, 1)/255).astype(np.float32)
x_test = (x_test.reshape(-1, 28, 28, 1)/255).astype(np.float32)
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size)
model = models.Sequential([
layers.Conv2D(64, 3, strides = 2, activation = 'relu'),
layers.BatchNormalization(),
layers.Conv2D(128, 3, strides = 2, activation = 'relu'),
layers.BatchNormalization(),
layers.Flatten(),
layers.Dense(512, activation = 'relu'),
layers.BatchNormalization(),
layers.Dense(10, activation = 'softmax')
])
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
train_loss = tf.keras.metrics.Mean()
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
test_loss = tf.keras.metrics.Mean()
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
def train(train_ds, nbr_entrainement):
for entrainement in range(nbr_entrainement):
start = time.time()
for images, labels in train_ds:
train_step(images, labels)
message = 'Entrainement {:04d}, loss: {:6.4£}, accuracy: {:7.4£}%, temps: {:7.4£}'
print(message.format(entrainement+1,
train_loss.result(),
train_accuracy.result()*100,
time.time()-start))
train_loss.reset_states()
train_accuracy.reset_states()
def test(test_ds):
start = time.time()
for test_images, test_labels in test_ds:
predictions = model(test_images)
t_loss = loss_object(test_labels, predictions)
test_loss(t_loss)
test_accuracy(test_labels, predictions)
message = 'Loss: {:6.4£}, accuracy: {:7.4£}%, temps: {:7.4£}'
print(message.format(test_loss.result(),
test_accuracy.result()*100,
time.time()-start))
print("Entrainement")
train(train_ds, nbr_entrainement)
print("Jeu de test")
test(test_ds)
|
993,226 | c0711b28278c7cd54fc3d530d9f26fa4dd084eab | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import threading
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
gi.require_version('Gtk', '3.0')
from gi.repository import GLib, GObject, Gst, GstBase, Gtk
Gst.init(None)
class GstPipeline:
def __init__(self, pipeline, user_function, src_size):
self.user_function = user_function
self.running = False
self.gstsample = None
self.sink_size = None
self.src_size = src_size
self.box = None
self.condition = threading.Condition()
self.pipeline = Gst.parse_launch(pipeline)
self.overlay = self.pipeline.get_by_name('overlay')
self.gloverlay = self.pipeline.get_by_name('gloverlay')
self.overlaysink = self.pipeline.get_by_name('overlaysink')
appsink = self.pipeline.get_by_name('appsink')
appsink.connect('new-preroll', self.on_new_sample, True)
appsink.connect('new-sample', self.on_new_sample, False)
# Set up a pipeline bus watch to catch errors.
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.connect('message', self.on_bus_message)
# Set up a full screen window on Coral, no-op otherwise.
self.setup_window()
def run(self):
# Start inference worker.
self.running = True
worker = threading.Thread(target=self.inference_loop)
worker.start()
# Run pipeline.
self.pipeline.set_state(Gst.State.PLAYING)
try:
Gtk.main()
except:
pass
# Clean up.
self.pipeline.set_state(Gst.State.NULL)
while GLib.MainContext.default().iteration(False):
pass
with self.condition:
self.running = False
self.condition.notify_all()
worker.join()
def on_bus_message(self, bus, message):
t = message.type
if t == Gst.MessageType.EOS:
Gtk.main_quit()
elif t == Gst.MessageType.WARNING:
err, debug = message.parse_warning()
sys.stderr.write('Warning: %s: %s\n' % (err, debug))
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write('Error: %s: %s\n' % (err, debug))
Gtk.main_quit()
return True
def on_new_sample(self, sink, preroll):
sample = sink.emit('pull-preroll' if preroll else 'pull-sample')
if not self.sink_size:
s = sample.get_caps().get_structure(0)
self.sink_size = (s.get_value('width'), s.get_value('height'))
with self.condition:
self.gstsample = sample
self.condition.notify_all()
return Gst.FlowReturn.OK
def get_box(self):
if not self.box:
glbox = self.pipeline.get_by_name('glbox')
if glbox:
glbox = glbox.get_by_name('filter')
box = self.pipeline.get_by_name('box')
assert glbox or box
assert self.sink_size
if glbox:
self.box = (glbox.get_property('x'), glbox.get_property('y'),
glbox.get_property('width'), glbox.get_property('height'))
else:
self.box = (-box.get_property('left'), -box.get_property('top'),
self.sink_size[0] + box.get_property('left') + box.get_property('right'),
self.sink_size[1] + box.get_property('top') + box.get_property('bottom'))
return self.box
def inference_loop(self):
while True:
with self.condition:
while not self.gstsample and self.running:
self.condition.wait()
if not self.running:
break
gstsample = self.gstsample
self.gstsample = None
# Passing Gst.Buffer as input tensor avoids 2 copies of it.
gstbuffer = gstsample.get_buffer()
svg = self.user_function(gstbuffer, self.src_size, self.get_box())
if svg:
if self.overlay:
self.overlay.set_property('data', svg)
if self.gloverlay:
self.gloverlay.emit('set-svg', svg, gstbuffer.pts)
if self.overlaysink:
self.overlaysink.set_property('svg', svg)
def setup_window(self):
# Only set up our own window if we have Coral overlay sink in the pipeline.
if not self.overlaysink:
return
gi.require_version('GstGL', '1.0')
gi.require_version('GstVideo', '1.0')
from gi.repository import GstGL, GstVideo
# Needed to commit the wayland sub-surface.
def on_gl_draw(sink, widget):
widget.queue_draw()
# Needed to account for window chrome etc.
def on_widget_configure(widget, event, overlaysink):
allocation = widget.get_allocation()
overlaysink.set_render_rectangle(allocation.x, allocation.y,
allocation.width, allocation.height)
return False
window = Gtk.Window(Gtk.WindowType.TOPLEVEL)
window.fullscreen()
drawing_area = Gtk.DrawingArea()
window.add(drawing_area)
drawing_area.realize()
self.overlaysink.connect('drawn', on_gl_draw, drawing_area)
# Wayland window handle.
wl_handle = self.overlaysink.get_wayland_window_handle(drawing_area)
self.overlaysink.set_window_handle(wl_handle)
# Wayland display context wrapped as a GStreamer context.
wl_display = self.overlaysink.get_default_wayland_display_context()
self.overlaysink.set_context(wl_display)
drawing_area.connect('configure-event', on_widget_configure, self.overlaysink)
window.connect('delete-event', Gtk.main_quit)
window.show_all()
# The appsink pipeline branch must use the same GL display as the screen
# rendering so they get the same GL context. This isn't automatically handled
# by GStreamer as we're the ones setting an external display handle.
def on_bus_message_sync(bus, message, overlaysink):
if message.type == Gst.MessageType.NEED_CONTEXT:
_, context_type = message.parse_context_type()
if context_type == GstGL.GL_DISPLAY_CONTEXT_TYPE:
sinkelement = overlaysink.get_by_interface(GstVideo.VideoOverlay)
gl_context = sinkelement.get_property('context')
if gl_context:
display_context = Gst.Context.new(GstGL.GL_DISPLAY_CONTEXT_TYPE, True)
GstGL.context_set_gl_display(display_context, gl_context.get_display())
message.src.set_context(display_context)
return Gst.BusSyncReply.PASS
bus = self.pipeline.get_bus()
bus.set_sync_handler(on_bus_message_sync, self.overlaysink)
def get_dev_board_model():
try:
model = open('/sys/firmware/devicetree/base/model').read().lower()
if 'mx8mq' in model:
return 'mx8mq'
if 'mt8167' in model:
return 'mt8167'
except: pass
return None
def run_pipeline(user_function,
src_size,
appsink_size,
videosrc='/dev/video1',
videofmt='raw',
headless=False):
if videofmt == 'h264':
SRC_CAPS = 'video/x-h264,width={width},height={height},framerate=30/1'
elif videofmt == 'jpeg':
SRC_CAPS = 'image/jpeg,width={width},height={height},framerate=30/1'
else:
SRC_CAPS = 'video/x-raw,width={width},height={height},framerate=30/1'
if videosrc.startswith('/dev/video'):
PIPELINE = 'v4l2src device=%s ! {src_caps}'%videosrc
elif videosrc.startswith('http'):
PIPELINE = 'souphttpsrc location=%s'%videosrc
elif videosrc.startswith('rtsp'):
PIPELINE = 'rtspsrc location=%s'%videosrc
else:
demux = 'avidemux' if videosrc.endswith('avi') else 'qtdemux'
PIPELINE = """filesrc location=%s ! %s name=demux demux.video_0
! queue ! decodebin ! videorate
! videoconvert n-threads=4 ! videoscale n-threads=4
! {src_caps} ! {leaky_q} """ % (videosrc, demux)
coral = get_dev_board_model()
if headless:
scale = min(appsink_size[0] / src_size[0], appsink_size[1] / src_size[1])
scale = tuple(int(x * scale) for x in src_size)
scale_caps = 'video/x-raw,width={width},height={height}'.format(width=scale[0], height=scale[1])
PIPELINE += """ ! decodebin ! queue ! videoconvert ! videoscale
! {scale_caps} ! videobox name=box autocrop=true ! {sink_caps} ! {sink_element}
"""
elif coral:
if 'mt8167' in coral:
PIPELINE += """ ! decodebin ! queue ! v4l2convert ! {scale_caps} !
glupload ! glcolorconvert ! video/x-raw(memory:GLMemory),format=RGBA !
tee name=t
t. ! queue ! glfilterbin filter=glbox name=glbox ! queue ! {sink_caps} ! {sink_element}
t. ! queue ! glsvgoverlay name=gloverlay sync=false ! glimagesink fullscreen=true
qos=false sync=false
"""
scale_caps = 'video/x-raw,format=BGRA,width={w},height={h}'.format(w=src_size[0], h=src_size[1])
else:
PIPELINE += """ ! decodebin ! glupload ! tee name=t
t. ! queue ! glfilterbin filter=glbox name=glbox ! {sink_caps} ! {sink_element}
t. ! queue ! glsvgoverlaysink name=overlaysink
"""
scale_caps = None
else:
scale = min(appsink_size[0] / src_size[0], appsink_size[1] / src_size[1])
scale = tuple(int(x * scale) for x in src_size)
scale_caps = 'video/x-raw,width={width},height={height}'.format(width=scale[0], height=scale[1])
PIPELINE += """ ! tee name=t
t. ! {leaky_q} ! videoconvert ! videoscale ! {scale_caps} ! videobox name=box autocrop=true
! {sink_caps} ! {sink_element}
t. ! {leaky_q} ! videoconvert
! rsvgoverlay name=overlay ! videoconvert ! ximagesink sync=false
"""
SINK_ELEMENT = 'appsink name=appsink emit-signals=true max-buffers=1 drop=true'
SINK_CAPS = 'video/x-raw,format=RGB,width={width},height={height}'
LEAKY_Q = 'queue max-size-buffers=1 leaky=downstream'
src_caps = SRC_CAPS.format(width=src_size[0], height=src_size[1])
sink_caps = SINK_CAPS.format(width=appsink_size[0], height=appsink_size[1])
pipeline = PIPELINE.format(leaky_q=LEAKY_Q,
src_caps=src_caps, sink_caps=sink_caps,
sink_element=SINK_ELEMENT, scale_caps=scale_caps)
print('Gstreamer pipeline:\n', pipeline)
pipeline = GstPipeline(pipeline, user_function, src_size)
pipeline.run()
|
993,227 | 8e9548dbf8990b5d2a538bb77c9db65d704c3ea6 | from common.logger import Logger
logger = Logger()
logger.set_dir('C:\\Users\\Zhiming Zhou\\OneDrive\\workspace\\AdaShift-LGANs-MaxGP-refactored\\result\\syn_toy\\toy_case1_Adam1e-04_beta0.0_0.999_decay400000_mlp128x64_relu_lsgan_1.00_decay\\')
logger.load()
logger.plot() |
993,228 | 8e2ebd7d6fa1fc198a95a7963aa9a310d364348c | # -*- coding:utf-8 -*-
"""
2016.1.10
self is a alias of the real object
"""
class Book:
x = 0
name = ""
def __init__(self,name): # common
self.name = name
def readtimes(self):
self.x = self.x + 1
print "%s was read %s times." %(self.name, self.x)
#def __del__(self): # seldom used
# print "I'm deleted...", self.x
class PyBook(Book): #inheritance
times = 0
def borrowtimes(self, borrower):
self.readtimes()
self.times = self.times + 1
print "%s was borrowed %s times by %s." %(self.name, self.times, borrower)
a = Book("complexity")
a.name
a.readtimes()
b = PyBook("Core of Python")
b.name
b.borrowtimes("Sally")
b.borrowtimes("Sabie")
#print dir(complexity) # show the operations that the object can perform
#y = "hello world"
#print dir(y)
#print y.split()
|
993,229 | d48455b00e8a1bbfd6e69105c8a206c36e3e9534 | import requests
import json
import threading
import time
headers = {'PRIVATE-TOKEN': '<your_access_token>', 'Content-Type':'application/json'}
# Envio al conquer el bak
data_file = {
'ip':'192.168.0.14',
'puerto':5701
}
url = '192.168.0.14:5700/bak'
req = requests.post('http://'+url,data=json.dumps(data_file), headers=headers)
datas = {
'K':[3,4],
# 'data_balance':'D',
# 'type_balance':'RR',
# 'name':'DataPreproces',
# 'type_cluster':'Kmeans',
'name':'Diferencial_EMAS-MERRA_'
# 'workers':int(arg[2])
}
url = '192.168.0.14:5603/preprocer'
req = requests.post('http://'+url,data=json.dumps(datas), headers=headers)
# datas = {
# 'ip':'192.168.0.14',
# 'puerto':4000
# }
# url = '192.168.0.14:5701/save_workers'
# req = requests.post('http://'+url,data=json.dumps(datas), headers=headers)
# json_r = req.json()
# print(json_r['response'])
# url = '192.168.0.12:4011/RECIBIR_BALAANCE'
# for x in range(25):
# print(x)
# req = requests.post('http://'+url,data=json.dumps(datas), headers=headers)
# json_r = req.json()
# print(json_r['response'])
# time.sleep(3)
|
993,230 | 62cd0e82dea95d57c7790834389b192e59c798d3 | import json
from app.models import db, Loot_Item
path = '/home/conrad/Documents/bl3Rework/bl3-companion-updated/app/seeds/'\
'loot_data/loot.txt'
def seed_loot_items():
# This will need to be changed when we deploy.
f = open(path, "r")
loot_data = f.read()
loot_list = json.loads(loot_data)
loot_seeds = [Loot_Item(item_name=loot['name'],
type=loot['type'],
elemental_types=loot['possibleElements'],
manufacturers=loot['possibleManufacturers'])
for loot in loot_list]
db.session.add_all(loot_seeds)
db.session.commit()
# Uses a raw SQL query to TRUNCATE the loot_items table.
# SQLAlchemy doesn't have a built in function to do this
# TRUNCATE Removes all the data from the table, and resets
# the auto incrementing primary key
def undo_loot_items():
db.session.execute('TRUNCATE loot_items;')
db.session.commit()
|
993,231 | 1d722591ea7d5867674b523320364140c2d26cc5 | # -*- coding: utf-8 -*-
import logging
from chatbot import MessageGenerator
class RegularTweet(object):
def __init__(self, config):
self.generator = MessageGenerator(**dict(config.items('chatbot')))
def __call__(self, api):
try:
text = self.generator.generate()
api.update_status(text)
logging.debug('sent tweet. text=%s' % text)
except:
logging.exeption('failed to send tweet.')
|
993,232 | db4adc12de4c1674d1b368e8184f47b70e7577e4 | import sys
print(sys.version_info)
print(sys.version_info.major)
|
993,233 | 2b6f42fa0c97dbfe527b8d071996c8d008d9ccdd | from clove.network.bitcoin import Bitcoin
class Pesobit(Bitcoin):
"""
Class with all the necessary Pesobit network information based on
https://github.com/pesobitph/pesobit-source/blob/master/src/net.cpp
(date of access: 02/17/2018)
"""
name = 'pesobit'
symbols = ('PSB', )
nodes = ("212.24.104.88", )
port = 7867
message_start = b'\xea\xaf\xe3\xc7'
base58_prefixes = {
'PUBKEY_ADDR': 55,
'SCRIPT_ADDR': 85,
'SECRET_KEY': 183
}
# no testnet
|
993,234 | 5bc9a474dd655f07df945d2059211918d3b204bb | #!/usr/bin/python
# encoding:utf-8
Question = []
Answer = []
# Set the path to your own note address and note filename
noteFile ="/Users/ad/Downloads/Anki_ICU_Book.txt"
file = open(noteFile)
line =' '
while line :
line = file.readline()
# get Question by token 'Q:'
if 'Q:' in line and len(line) > 4:
# just support single line question
line = line.replace('\n','')
Question.append(line)
# get answer by token 'A:', with Markdown '-', support muti-lines
if 'A:' in line:
tmp = line
while (not line.startswith('\n') ) and line:
line = file.readline()
# apply to the markdown
while line.startswith('-'):
line = line.replace('-',' ',1)
tmp = tmp + line
if len(tmp.strip()) > 3 :
Answer.append(tmp)
file.close()
# temp file
file = open(noteFile + '_tmp.csv','w')
i = 0
print 'Q&A writing into csv file, casds like this:'
while i < len(Question):
print Question[i]
print Answer[i]
print '...........'
file.write('"' + Question[i]+'",')
file.write('"' + Answer[i] + '"\n')
i = i + 1
file.close()
print 'To import into Anki, the tmp file path & name is: ', noteFile + '_tmp.csv'
|
993,235 | 98e0388678d3bf39a792a034e614cbef29b8bd03 | import numpy as np
import mcts
import UNIverse as univ
from graph_tool.all import *
def draw_graph(magent):
g = magent.graph
vprops = {'label': g.vp.label, 'fillcolor':g.vp.color, 'overlap':'prism1000','overlap_scaling':'30'}
gdraw = graphviz_draw(g,vprops=vprops)
universe = univ.universe()
magent = mcts.mcts(universe,1,'test',visualize=False)
for i in range(4):
magent.rollout(universe.state)
# magent.visualize_props()
# draw_graph(magent)
|
993,236 | a67226dc10b39fddb3cc264cebc56ef20607ba7b | class Solution:
# @param {string} s
# @param {string} p
# @return {boolean}
def isMatch(self, s, p):
if not p: #If p is "", return false, unless s is "" too.
return not s
m=len(s)
n=len(p)
i=0 #Record the number of consecutive matching characters starting from the beginning in s.
j=0 #Record the number of consecutive matching characters starting from the beginning in p.
lastx = 0 #Record the last index where '*' appears in s.
lasty = -1 #Record the last index where '*' appears in p.
while i<m:
if j<n and (p[j]=='?' or p[j]==s[i]): #If p[j] equals s[i] or p[j] is '?', go on.
i+=1
j+=1
elif j<n and p[j]=='*': #If p[j] is '*', save current indexes.
lastx=i
lasty=j
j+=1
elif lasty>=0: #If there is a '*', if any part is the right of s matches the part behind '*' in p, then s matches p.
i=lastx+1
lastx+=1
j=lasty
else:
return False
if i<m: #If i<m, all the characters in p are matched but there are still remaining characters in s, so s unmatches p.
return False
while j<n and p[j]=='*': #To deal with If tail '*' in p, j+=1.
j+=1
return j==n #If j equals n, s matches p; else, s unmatches p.
|
993,237 | 3ea375d046655499396504f089410abb1c56fbfe | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A dodai hypervisor.
"""
import os
import os.path
import tempfile
import httplib, urllib
from nova import exception
from nova import log as logging
from nova import utils
from nova.compute import power_state
from nova.compute import instance_types
from nova.virt import driver
from nova import db
from nova.virt import images
from nova import flags
from nova.virt.dodai import ofc_utils
from nova.compute import vm_states
from nova.db.sqlalchemy.session import get_session_dodai
from eventlet import greenthread
LOG = logging.getLogger('nova.virt.dodai')
FLAGS = flags.FLAGS
def get_connection(_):
# The read_only parameter is ignored.
return DodaiConnection.instance()
class DodaiConnection(driver.ComputeDriver):
"""Dodai hypervisor driver"""
def __init__(self):
self.host_status = {
'host_name-description': 'Dodai Compute',
'host_hostname': 'dodai-compute',
'host_memory_total': 8000000000,
'host_memory_overhead': 10000000,
'host_memory_free': 7900000000,
'host_memory_free_computed': 7900000000,
'host_other_config': {},
'host_ip_address': '192.168.1.109',
'host_cpu_info': {},
'disk_available': 500000000000,
'disk_total': 600000000000,
'disk_used': 100000000000,
'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
'host_name_label': 'dodai-compute'}
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
cls._instance = cls()
return cls._instance
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function,
including catching up with currently running VM's on the given host."""
LOG.debug("init_host")
def get_host_stats(self, refresh=False):
"""Return Host Status of ram, disk, network."""
return self.host_status
def get_info(self, instance_name):
"""Get the current status of an instance, by name (not ID!)
Returns a dict containing:
:state: the running state, one of the power_state codes
:max_mem: (int) the maximum memory in KBytes allowed
:mem: (int) the memory in KBytes used by the domain
:num_cpu: (int) the number of virtual CPUs for the domain
:cpu_time: (int) the CPU time used in nanoseconds
"""
LOG.debug("get_info")
instance_id = self._instance_name_to_id(instance_name)
bmm = db.bmm_get_by_instance_id(None, instance_id)
status = PowerManager(bmm["ipmi_ip"]).status()
if status == "on":
inst_power_state = power_state.RUNNING
else:
inst_power_state = power_state.SHUTOFF
return {'state': inst_power_state,
'max_mem': 0,
'mem': 0,
'num_cpu': 2,
'cpu_time': 0}
def list_instances(self):
"""
Return the names of all the instances known to the virtualization
layer, as a list.
"""
LOG.debug("list_instances")
instance_ids = []
bmms = db.bmm_get_all(None)
for bmm in bmms:
if not bmm["instance_id"]:
continue
instance_ids.append(self._instance_id_to_name(bmm["instance_id"]))
return instance_ids
def list_instances_detail(self, context):
"""Return a list of InstanceInfo for all registered VMs"""
LOG.debug("list_instances_detail")
info_list = []
bmms = db.bmm_get_all_by_instance_id_not_null(context)
for bmm in bmms:
instance = db.instance_get(context, bmm["instance_id"])
status = PowerManager(bmm["ipmi_ip"]).status()
if status == "off":
inst_power_state = power_state.SHUTOFF
if instance["vm_state"] == vm_states.ACTIVE:
db.instance_update(context, instance["id"], {"vm_state": vm_states.STOPPED})
else:
inst_power_state = power_state.RUNNING
if instance["vm_state"] == vm_states.STOPPED:
db.instance_update(context, instance["id"], {"vm_state": vm_states.ACTIVE})
info_list.append(driver.InstanceInfo(self._instance_id_to_name(bmm["instance_id"]),
inst_power_state))
return info_list
def _instance_id_to_name(self, instance_id):
return FLAGS.instance_name_template % instance_id
def _instance_name_to_id(self, instance_name):
return int(instance_name.split("-")[1], 16)
def spawn(self, context, instance,
network_info=None, block_device_info=None):
"""
Create a new instance/VM/domain on the virtualization platform.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
:param context: security context
:param instance: Instance object as returned by DB layer.
This function should use the data there to guide
the creation of the new instance.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info:
"""
LOG.debug("spawn")
instance_zone, cluster_name, vlan_id, create_cluster = self._parse_zone(instance["availability_zone"])
# update instances table
bmm, reuse = self._select_machine(context, instance)
instance["display_name"] = bmm["name"]
instance["availability_zone"] = instance_zone
db.instance_update(context,
instance["id"],
{"display_name": bmm["name"],
"availability_zone": instance_zone})
if vlan_id:
db.bmm_update(context, bmm["id"], {"availability_zone": cluster_name,
"vlan_id": vlan_id,
"service_ip": None})
if instance_zone == "resource_pool":
self._install_machine(context, instance, bmm, cluster_name, vlan_id)
else:
self._update_ofc(bmm, cluster_name)
if bmm["instance_id"]:
db.instance_destroy(context, bmm["instance_id"])
if reuse:
db.bmm_update(context, bmm["id"], {"status": "used",
"instance_id": instance["id"]})
else:
self._install_machine(context, instance, bmm, cluster_name, vlan_id)
if instance["key_data"]:
self._inject_key(bmm["pxe_ip"], str(instance["key_data"]))
def _inject_key(self, pxe_ip, key_data):
conn = httplib.HTTPConnection(pxe_ip, "4567")
params = urllib.urlencode({"key_data": key_data.strip()})
headers = {'Content-type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain'}
conn.request("PUT", "/services/dodai-instance/key.json", params, headers)
response = conn.getresponse()
data = response.read()
LOG.debug(response.status)
LOG.debug(response.reason)
LOG.debug(data)
def _parse_zone(self, zone):
create_cluster = False
vlan_id = None
cluster_name = "resource_pool"
instance_zone = zone
parts = zone.split(",")
if len(parts) >= 2:
if parts[0] == "C":
parts.pop(0)
create_cluster = True
cluster_name, vlan_id = parts
vlan_id = int(vlan_id)
instance_zone = ",".join(parts)
return instance_zone, cluster_name, vlan_id, create_cluster
def _install_machine(self, context, instance, bmm, cluster_name, vlan_id, update_instance=False):
db.bmm_update(context, bmm["id"], {"instance_id": instance["id"]})
mac = self._get_pxe_mac(bmm)
# fetch image
image_base_path = self._get_cobbler_image_path()
if not os.path.exists(image_base_path):
utils.execute('mkdir', '-p', image_base_path)
image_path = self._get_cobbler_image_path(instance)
if not os.path.exists(image_path):
image_meta = images.fetch(context,
instance["image_ref"],
image_path,
instance["user_id"],
instance["project_id"])
else:
image_meta = images.show(context, instance["image_ref"])
image_type = "server"
image_name = image_meta["name"] or image_meta["properties"]["image_location"]
if image_name.find("dodai-deploy") == -1:
image_type = "node"
# begin to install os
pxe_ip = bmm["pxe_ip"] or "None"
pxe_mac = bmm["pxe_mac"] or "None"
storage_ip = bmm["storage_ip"] or "None"
storage_mac = bmm["storage_mac"] or "None"
service_mac1 = bmm["service_mac1"] or "None"
service_mac2 = bmm["service_mac2"] or "None"
instance_path = self._get_cobbler_instance_path(instance)
if not os.path.exists(instance_path):
utils.execute('mkdir', '-p', instance_path)
self._cp_template("create.sh",
self._get_cobbler_instance_path(instance, "create.sh"),
{"INSTANCE_ID": instance["id"],
"IMAGE_ID": instance["image_ref"],
"COBBLER": FLAGS.cobbler,
"HOST_NAME": bmm["name"],
"STORAGE_IP": storage_ip,
"STORAGE_MAC": storage_mac,
"PXE_IP": pxe_ip,
"PXE_MAC": pxe_mac,
"SERVICE_MAC1": bmm["service_mac1"],
"SERVICE_MAC2": bmm["service_mac2"],
"IMAGE_TYPE": image_type,
"MONITOR_PORT": FLAGS.dodai_monitor_port,
"ROOT_SIZE": FLAGS.dodai_partition_root_gb,
"SWAP_SIZE": FLAGS.dodai_partition_swap_gb,
"EPHEMERAL_SIZE": FLAGS.dodai_partition_ephemeral_gb,
"KDUMP_SIZE": FLAGS.dodai_partition_kdump_gb})
self._cp_template("pxeboot_action",
self._get_pxe_boot_file(mac),
{"INSTANCE_ID": instance["id"],
"COBBLER": FLAGS.cobbler,
"PXE_MAC": pxe_mac,
"ACTION": "create"})
LOG.debug("Reboot or power on.")
self._reboot_or_power_on(bmm["ipmi_ip"])
# wait until starting to install os
while self._get_state(context, instance) != "install":
greenthread.sleep(20)
LOG.debug("Wait until begin to install instance %s." % instance["id"])
self._cp_template("pxeboot_start", self._get_pxe_boot_file(mac), {})
# wait until starting to reboot
while self._get_state(context, instance) != "install_reboot":
greenthread.sleep(20)
LOG.debug("Wait until begin to reboot instance %s after os has been installed." % instance["id"])
power_manager = PowerManager(bmm["ipmi_ip"])
power_manager.soft_off()
while power_manager.status() == "on":
greenthread.sleep(20)
LOG.debug("Wait unit the instance %s shuts down." % instance["id"])
power_manager.on()
# wait until installation of os finished
while self._get_state(context, instance) != "installed":
greenthread.sleep(20)
LOG.debug("Wait until instance %s installation finished." % instance["id"])
if cluster_name == "resource_pool":
status = "active"
else:
status = "used"
db.bmm_update(context, bmm["id"], {"status": status})
if update_instance:
db.instance_update(context, instance["id"], {"vm_state": vm_states.ACTIVE})
def _update_ofc(self, bmm, cluster_name):
try:
ofc_utils.update_for_run_instance(FLAGS.ofc_service_url,
cluster_name,
bmm["server_port1"],
bmm["server_port2"],
bmm["dpid1"],
bmm["dpid2"])
except Exception as ex:
LOG.exception(_("OFC exception %s"), unicode(ex))
def _get_state(self, context, instance):
# check if instance exists
instance_ref = db.instance_get(context, instance["id"])
if instance_ref["deleted"]:
raise exception.InstanceNotFound(instance_id=instance["id"])
path = self._get_cobbler_instance_path(instance, "state")
if not os.path.exists(path):
return ""
if not os.path.isfile(path):
return ""
f = open(path)
state = f.read().strip()
f.close()
LOG.debug("State of instance %d: %s" % (instance["id"], state))
return state
def _get_pxe_mac(self, bmm):
return "01-%s" % bmm["pxe_mac"].replace(":", "-").lower()
def _select_machine(self, context, instance):
inst_type = instance_types.get_instance_type(instance['instance_type_id'])
bmm_found = None
reuse = False
# create a non autocommit session
session = get_session_dodai(False)
session.begin()
try:
bmms = db.bmm_get_all_by_instance_type(context, inst_type["name"], session)
if instance["availability_zone"] == "resource_pool": #Add a machine to resource pool.
for bmm in bmms:
if bmm["availability_zone"] != "resource_pool":
continue
if bmm["status"] != "inactive":
continue
bmm_found = bmm
break
else:
for bmm in bmms:
if bmm["availability_zone"] != "resource_pool":
continue
if bmm["status"] != "active":
continue
instance_ref = db.instance_get(context, bmm["instance_id"])
if instance_ref["image_ref"] != instance["image_ref"]:
continue
bmm_found = bmm
reuse = True
break
if not bmm_found:
for bmm in bmms:
if bmm["status"] == "used" or bmm["status"] == "processing":
continue
bmm_found = bmm
reuse = False
break
if bmm_found:
db.bmm_update(context, bmm_found["id"], {"status": "processing"}, session)
except Exception as ex:
LOG.exception(ex)
session.rollback()
raise exception.BareMetalMachineUnavailable()
session.commit()
if bmm_found:
return bmm_found, reuse
raise exception.BareMetalMachineUnavailable()
def _get_cobbler_instance_path(self, instance, file_name = ""):
return os.path.join(FLAGS.cobbler_path,
"instances",
str(instance["id"]),
file_name)
def _get_cobbler_image_path(self, instance = None):
if instance:
return os.path.join(FLAGS.cobbler_path,
"images",
str(instance["image_ref"]))
else:
return os.path.join(FLAGS.cobbler_path,
"images")
def _get_pxe_boot_file(self, mac):
return os.path.join(FLAGS.pxe_boot_path, mac)
def _get_disk_size_mb(self, instance):
inst_type_id = instance['instance_type_id']
inst_type = instance_types.get_instance_type(inst_type_id)
if inst_type["local_gb"] == 0:
return 10 * 1024
return inst_type["local_gb"] * 1024
def _reboot_or_power_on(self, ip):
power_manager = PowerManager(ip)
status = power_manager.status()
LOG.debug("The power is " + status)
if status == "off":
power_manager.on()
else:
power_manager.reboot()
def _cp_template(self, template_name, dest_path, params):
f = open(utils.abspath("virt/dodai/" + template_name + ".template"), "r")
content = f.read()
f.close()
path = os.path.dirname(dest_path)
if not os.path.exists(path):
os.makedirs(path)
for key, value in params.iteritems():
content = content.replace(key, str(value))
f = open(dest_path, "w")
f.write(content)
f.close
def destroy(self, context, instance, network_info, cleanup=True):
"""Destroy (shutdown and delete) the specified instance.
If the instance is not found (for example if networking failed), this
function should still succeed. It's probably a good idea to log a
warning in that case.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param cleanup:
"""
LOG.debug("destroy")
bmm = db.bmm_get_by_instance_id(context, instance["id"])
db.bmm_update(context, bmm["id"], {"status": "processing"})
mac = self._get_pxe_mac(bmm)
# update ofc
self._update_ofc_for_destroy(context, bmm)
db.bmm_update(context, bmm["id"], {"vlan_id": None,
"availability_zone": "resource_pool"})
# begin to delete os
self._cp_template("delete.sh",
self._get_cobbler_instance_path(instance, "delete.sh"),
{"INSTANCE_ID": instance["id"],
"COBBLER": FLAGS.cobbler,
"MONITOR_PORT": FLAGS.dodai_monitor_port})
self._cp_template("pxeboot_action",
self._get_pxe_boot_file(mac),
{"INSTANCE_ID": instance["id"],
"COBBLER": FLAGS.cobbler,
"PXE_MAC": bmm["pxe_mac"],
"ACTION": "delete"})
self._reboot_or_power_on(bmm["ipmi_ip"])
# wait until starting to delete os
while self._get_state(context, instance) != "deleted":
greenthread.sleep(20)
LOG.debug("Wait until data of instance %s was deleted." % instance["id"])
utils.execute("rm", "-rf", self._get_cobbler_instance_path(instance));
# update db
db.bmm_update(context, bmm["id"], {"instance_id": None,
"service_ip": None})
return db.bmm_get(context, bmm["id"])
def _update_ofc_for_destroy(self, context, bmm):
# update ofc
try:
LOG.debug("vlan_id: " + str(bmm["vlan_id"]))
ofc_utils.update_for_terminate_instance(FLAGS.ofc_service_url,
bmm["availability_zone"],
bmm["server_port1"],
bmm["server_port2"],
bmm["dpid1"],
bmm["dpid2"],
bmm["vlan_id"])
except Exception as ex:
LOG.exception(_("OFC exception %s"), unicode(ex))
def add_to_resource_pool(self, context, instance, bmm):
# begin to install default os
self._install_machine(context, instance, bmm, "resource_pool", None, True)
def stop(self, context, instance):
LOG.debug("stop")
bmm = db.bmm_get_by_instance_id(context, instance["id"])
PowerManager(bmm["ipmi_ip"]).off()
def start(self, context, instance):
LOG.debug("start")
bmm = db.bmm_get_by_instance_id(context, instance["id"])
PowerManager(bmm["ipmi_ip"]).on()
def reboot(self, instance, network_info):
"""Reboot the specified instance.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
"""
LOG.debug("reboot")
bmm = db.bmm_get_by_instance_id(None, instance["id"])
PowerManager(bmm["ipmi_ip"]).reboot()
def update_available_resource(self, ctxt, host):
"""Updates compute manager resource info on ComputeNode table.
This method is called when nova-compute launches, and
whenever admin executes "nova-manage service update_resource".
:param ctxt: security context
:param host: hostname that compute manager is currently running
"""
LOG.debug("update_available_resource")
return
def reset_network(self, instance):
"""reset networking for specified instance"""
LOG.debug("reset_network")
return
class PowerManager(object):
def __init__(self, ip):
self.ip = ip
def on(self):
return self._execute("on")
def off(self):
return self._execute("off")
def soft_off(self):
return self._execute("soft")
def reboot(self):
return self._execute("reset")
def status(self):
parts = self._execute("status").split(" ")
return parts[3].strip()
def _execute(self, subcommand):
out, err = utils.execute("/usr/bin/ipmitool", "-I", "lan", "-H", self.ip, "-U", FLAGS.ipmi_username, "-P", FLAGS.ipmi_password, "chassis", "power", subcommand)
return out
|
993,238 | 3897260f4cebd8eef1898595f8a969f0d6070ca1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 12 20:42:08 2020
@author: ryan
"""
from pipeline_functions import *
import numpy as np
import pandas as pd
import cv2
from tqdm import tqdm
from scipy.ndimage import zoom
from contour_utils import *
from find_features import features
from scipy import sparse
from LoopingArray import LoopingArray
import pickle
import matplotlib.pyplot as plt
from datetime import datetime
t1=datetime.now()
IMAGELENGTH=500
fps=40
videopath="TailBeatingExamples/Copy of IM1_IM2_2.1.1_L.mp4"
#conservative_mask
path = "h5files/h5 2/IM1_IM2_2.1.1_LDLC_resnet50_DLC_toptrackFeb27shuffle1_170000.h5"
f = pd.HDFStore(path,'r')
df = f.get('df_with_missing')
df.columns = df.columns.droplevel()
df=df.iloc[70000:150000,:]
new_features=features(starttime=0,endtime=80000)
filtered_df=new_features.filter_df(df,add_midpoint=True)
new_features.fit(filtered_df)
other_features,_,_,_=new_features.export_df()
filtered_head=filtered_df.A_head.interpolate(method="nearest")
filtered_head=relative_position_check(filtered_head,max_dist=60,max_counter=30)
filtered_head=filtered_head.interpolate(method="nearest")
contour_array=find_conservative_mask(videopath,length=80000,start=70000,step=1)
with open("data/IM1_IM2_2.1.1_L_70000_150000_contour_raw_NOV12", "wb") as fp:
pickle.dump(contour_array, fp)
#more accurate contour and other features
new_contour_array=find_tail(videopath,contour_array,filtered_head,start=70000,step=1,interpolate=False,size=IMAGELENGTH)
with open("data/IM1_IM2_2.1.1_L_70000_150000_contour_refined_NOV12", "wb") as fp:
pickle.dump(new_contour_array, fp)
curve_scores=[]
tail_indexs=[]
better_head_indexs=[]
lengths=[]
head_x=np.array(filtered_head.x)
head_y=np.array(filtered_head.y)
for i in tqdm(range(len(new_contour_array))):
contour=new_contour_array[i]
#since image is interpolated, and its size is 3 times before
head_index=head_on_contour(head_x[i], head_y[i], contour)
better_head_index,tail_index,curve_score,length=predict_tail(contour,head_index,step=[33,57],neighbor_width=[17,17],
frame=i)
tail_indexs.append(tail_index)
curve_scores.append(curve_score)
lengths.append(length)
better_head_indexs.append(better_head_index)
head_indexs=pd.Series(better_head_indexs).fillna(method="ffill").astype(int)
tail_indexs=pd.Series(tail_indexs).fillna(method="ffill").astype(int)
with open("data/IM1_IM2_2.1.1_L_70000_150000_curve_scores_NOV12", "wb") as fp:
pickle.dump(curve_scores, fp)
with open("data/IM1_IM2_2.1.1_L_70000_150000_tail_index_NOV12", "wb") as fp:
pickle.dump(tail_indexs, fp)
with open("data/IM1_IM2_2.1.1_L_70000_150000_head_index_NOV12", "wb") as fp:
pickle.dump(better_head_indexs, fp)
with open("data/IM1_IM2_2.1.1_L_70000_150000_fish_segment_length_NOV12", "wb") as fp:
pickle.dump(lengths, fp)
tail_angles=[]
tail_devs=[]
for i in range(len(new_contour_array)):
contour=curve_scores[i][:,:2]
contour=contour.squeeze()
N=len(contour)
head_index=head_indexs[i]
tail_index=tail_indexs[i]
tail_angle,tail_dev=compute_TailAngle_Dev(head_index,tail_index,contour)
tail_angles.append(tail_angle)
tail_devs.append(tail_dev)
tail_angles=np.array(tail_angles)
tail_devs=np.array(tail_devs)
other_features["Tail_Angle"]=tail_angles
other_features["Tail_Dev"]=tail_devs
other_features['X_Position']=filtered_head.x
other_features.to_csv("data/IM2_IM2_2.1.1_L_data_auto_scored.csv")
t2=datetime.now()
print("time_elapsed={}".format(t2-t1)) |
993,239 | 50751ee9cb61c79c7903084b9bd3ae767b7f9978 | # https://atcoder.jp/contests/abc095/tasks/arc096_b
# 嘘解法やないかい
# good noteにアイデアを書き出してる
# 重要な考察だけ
# 貪欲に次を決定する方法は取れない。∵見られない寿司が出てくる。その栄養価が高いとWA
# 最短距離を歩くにはずっと時計回りに進んでからどこかで折り返して反時計回りに進むのが最適。(逆も試してより大きな方の結果を得れば良い)
# じゃどこで折り返すのがMAX?スコアの累積のMAX(左端)でしょ
import sys
read = sys.stdin.readline
ra = range
enu = enumerate
def read_ints():
return list(map(int, read().split()))
def read_col(H):
'''
H is number of rows
A列、B列が与えられるようなとき
ex1)A,B=read_col(H) ex2) A,=read_col(H) #一列の場合
'''
ret = []
for _ in range(H):
ret.append(list(map(int, read().split())))
return tuple(map(list, zip(*ret)))
MOD = 10**9 + 7
INF = 2**31 # 2147483648 > 10**9
# default import
from collections import defaultdict, Counter, deque
from operator import itemgetter
from itertools import product, permutations, combinations, accumulate
from bisect import bisect_left, bisect_right # , insort_left, insort_right
N, C = read_ints()
X, V = read_col(N)
def ret_ans(X, V): # 片方向だけ実装する
# 右から左に探索、取る価値のある寿司の一番右側
V_accum = list(accumulate(V))
ma = 0
idx_ma = 0
ret_ls = [-1] * N
for j, (v, x) in enu(zip(V_accum, X)):
ret_ls[j] = v - x
if v - x > ma:
ma = v - x
idx_ma = j
v = V[idx_ma]
x_idx_ma = X[idx_ma]
for i in range(N - 1, idx_ma, -1):
v += V[i]
y = C - X[i]
ret_ls[j] = v - (y + 2 * x_idx_ma)
print(idx_ma, ret_ls)
return max(ret_ls)
ans1 = ret_ans(X, V)
V_reversed = V[::-1]
X_reversed = []
for x in reversed(X):
X_reversed.append(C - x)
print(max(ret_ans(X_reversed, V_reversed), ans1))
|
993,240 | cf937743447d13bebb4571106a719170cf4d5641 | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/rob/baxter/devel/.private/sound_play/share/sound_play/msg/SoundRequestAction.msg;/home/rob/baxter/devel/.private/sound_play/share/sound_play/msg/SoundRequestActionGoal.msg;/home/rob/baxter/devel/.private/sound_play/share/sound_play/msg/SoundRequestActionResult.msg;/home/rob/baxter/devel/.private/sound_play/share/sound_play/msg/SoundRequestActionFeedback.msg;/home/rob/baxter/devel/.private/sound_play/share/sound_play/msg/SoundRequestGoal.msg;/home/rob/baxter/devel/.private/sound_play/share/sound_play/msg/SoundRequestResult.msg;/home/rob/baxter/devel/.private/sound_play/share/sound_play/msg/SoundRequestFeedback.msg;/home/rob/baxter/src/sound_play/msg/SoundRequest.msg"
services_str = ""
pkg_name = "sound_play"
dependencies_str = "actionlib_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "sound_play;/home/rob/baxter/devel/.private/sound_play/share/sound_play/msg;sound_play;/home/rob/baxter/src/sound_play/msg;actionlib_msgs;/opt/ros/kinetic/share/actionlib_msgs/cmake/../msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = 'TRUE' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
993,241 | 637fa318da5f9cdef95eb78f1a3a765d6cbb0249 | """Template robot with Python."""
import os
import csv
import time
from Browser import Browser
from Browser.utils.data_types import SelectAttribute
from RPA.Robocloud.Secrets import Secrets
from RPA.Excel.Files import Files
from RPA.FileSystem import FileSystem
from RPA.HTTP import HTTP
from RPA.PDF import PDF
from RPA.Archive import Archive
from RPA.Dialogs import Dialogs
#initialize variables
browser = Browser()
lib = Archive()
d = Dialogs()
http = HTTP()
pdf = PDF()
secrets = Secrets()
def download_the_csv(url):
http.download(url)
def ask_for_url():
d.add_heading("Please provide the URL where download the orders")
d.add_text_input(name="url",label="URL")
result = d.run_dialog()
return result.url
def open_webapplication():
browser.open_browser("https://robotsparebinindustries.com/#/robot-order")
def process_the_orders():
# get the filename from the vault
orders_filename = secrets.get_secret("orders_parameters")["filename"]
file_exist = False
counter = 0
while not file_exist:
time.sleep(1)
if (os.path.isfile(orders_filename)):
file_exist = True
else:
counter += 1
if counter>=10:
break
if file_exist:
with open('orders.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
fill_the_form(row)
time.sleep(1)
#rint("Process completed!")
else:
print("file does not exist")
def remove_message():
browser.click("text=OK")
def fill_the_form(row):
preview_appeared = False
attempt_1 = 1
while not preview_appeared:
try:
# Insert the Head
browser.select_options_by("xpath=//select[@id='head']",SelectAttribute["value"],str(row["Head"]))
# Insert Body
label_index = "id-body-" + str(row["Body"])
browser.click("xpath=//input[@id='" + label_index + "']")
# Insert Legs
browser.type_text("xpath=//input[@type='number']", str(row["Legs"]))
# Insert Address
browser.type_text("xpath=//input[@id='address']", str(row["Address"]))
# Click on Preview
browser.click("id=preview")
# Extract the preview image
time.sleep(2)
preview_filename = f"{os.getcwd()}/output/preview_"+ str(row["Order number"]) + ".png"
browser.take_screenshot(filename=preview_filename,
selector="xpath=//div[@id='robot-preview-image']")
preview_appeared = True
# Click on Order
order_complete = False
attempts_2 = 1
while not order_complete:
browser.click("id=order")
# Generate PDF
order_complete = generate_pdf(row["Order number"], preview_filename)
if order_complete:
insert_new_order()
#print("Order " + str(row["Order number"]) + " completed")
if attempts_2 == 3:
#print("Order " + str(row["Order number"]) + " failed generating order")
break
else:
attempts_2 += 1
except:
#print("Order " + str(row["Order number"]) + " error while inserting the parameters")
continue
finally:
if (preview_appeared == True and order_complete == True) or attempt_1 == 3:
break
else:
preview_appeared = False
attempt_1 += 1
def generate_pdf(order_number, preview_filename):
try:
pdf_filename = f"{os.getcwd()}/output/receipt_"+ order_number+".pdf"
receipt_html = browser.get_property(
selector="xpath=//div[@id='receipt']", property="outerHTML")
pdf.html_to_pdf(receipt_html, pdf_filename)
# add image
pdf.add_watermark_image_to_pdf(image_path=preview_filename,
source_path=pdf_filename,
output_path=pdf_filename)
order_complete = True
except:
order_complete = False
return order_complete
def insert_new_order():
browser.click("id=order-another")
remove_message()
def create_zip_file():
lib.archive_folder_with_zip(folder=f"{os.getcwd()}/output/",
archive_name=f"{os.getcwd()}/output/pdf_receipts.zip",
include="*.pdf")
def close_browser():
browser.playwright.close()
if __name__ == "__main__":
try:
url = ask_for_url()
download_the_csv(url)
open_webapplication()
remove_message()
process_the_orders()
create_zip_file()
finally:
close_browser()
|
993,242 | f55deb27ff1c89ad93acefcf694d218d5ff86eed | #!/usr/bin/python
# Ported to Python and barely optimized by Hernan Chavez Thielemann
from lib.misc.file import write_file
from lib.misc.warn import print_dec_g
__merged_files__ = ['main.m', 'Writing_input.m', ]
def extract_lammps_data(_data_file_,_ck_buttons_, _forcefield_):
''' already implemented in topology analizer
MIXER
'''
def write_lammps_data(_topodata_, data_name, _config_):
''' Write a lammps data file'''
print_dec_g ('Writing Lammps data file...')
####--------------- Unpacking data ----------------####
atomstyle, _, _ = _config_
atsty = ['atomic', 'angle', 'full', 'charge', 'bond', 'molecular']
style_str='####------- ATOM STYLE < {} > --------####'
_flag_ = False
if atomstyle in atsty:
nam = ''.join([ chr(ord(l)-32) for l in atomstyle])
print_dec_g(style_str.format(nam))
_content_ = write_lammps_data_all(_topodata_, data_name, _config_)
_flag_ = True
else: #if atomstyle == 'Angle':
_content_=''
exit(('Error 037!! - Atom style {} '
+'not implemented yet').format(atomstyle))
write_file( data_name, _content_)
print_dec_g ('Successful writing!!')
return _flag_
def write_lammps_data_all( _topodata_, data_name, _config_):
''' Write a lammps data file'''
####--------------- Unpacking data ----------------####
_numbers_ = _topodata_['numbers']
n_atoms, n_bonds, n_angles, n_dihedrals = _numbers_['total']
n_atomtypes, n_bondtypes, n_angletypes, n_dihedraltypes= _numbers_['type']
_box_= _topodata_['box']
_mol_, _mtype_, _atype_, _xyz_ = _topodata_['atomsdata']
atomstyle, _solvated_f_, _ = _config_
_asty_d_ ={ 'atomic':1, 'charge':1, 'bond':2, 'angle':3,
'full':4, 'molecular':4}
####--------------- TITLE ----------------####
_text_ = '#Lammps data file. Geometry for PEG\n\n'
####--------------- NUMBERS ----------------####
_aux_txt =[' {} atoms\n'.format(n_atoms)]
_aux_txt.append(' {} bonds\n'.format( n_bonds))
_aux_txt.append(' {} angles\n'.format( n_angles))
_aux_txt.append(' {} dihedrals\n'.format( n_dihedrals))
_text_+= ''.join(_aux_txt[:_asty_d_[atomstyle]])+'\n'
####---------------- TYPES -----------------####
_aux_txt =[' {} atom types\n'.format(n_atomtypes)]
_aux_txt.append(' {} bond types\n'.format(n_bondtypes))
_aux_txt.append(' {} angle types\n'.format(n_angletypes))
_aux_txt.append(' {} dihedral types\n\n'.format(n_dihedraltypes))
_text_+= ''.join(_aux_txt[:_asty_d_[atomstyle]])+'\n'
####---------------- BOX -----------------####
_text_ +=(' {:.4f} {:.4f} xlo xhi\n {:.4f} {:.4f} ylo yhi\n'
+' {:.4f} {:.4f} zlo zhi\n').format(*_box_)
#####------ MASSES ------####
_text_ +='\n Masses\n\n'
atom_info = _topodata_['atomtypes']
for i in range( n_atomtypes):
_text_ +=' {} {}\n'.format( i+1, atom_info[i][1])
#####------ Force field potentials ------####
#for na in range(len(known_atoms)):
#if known_atoms[na][4] not in charge.keys():
# print known_atoms[na][4], known_atoms[na][6]
#charge[known_atoms[na][4]]= float(known_atoms[na][6])
#conv_dict[known_atoms[na][4].lstrip(' ')] = known_atoms[na][1]
#_topodata_['S_translation'] = conv_dict
aux_pot_txt, dicts = write_lammps_potentials( _topodata_, atomstyle)
_text_ += aux_pot_txt
#a_dict={}
#print dicts[0]
#for key in conv_dict.keys(): # key - short
#
# a_dict[key]= dicts[0][conv_dict[key]]
#print a_dict
####------ATOMS------####
known_atoms = _topodata_['atoms']
if _solvated_f_ == 1:
charge = _topodata_['S_charge']
conv_dict = _topodata_['S_translation'] # key s_tag : val l_tag
_text_ +='\n Atoms\n\n'
if atomstyle in ['charge','full']:
atom_shape = ' {}'*3+' {:7.4f}'*4+' # {}\n'# idx mol atype charge x y z
elif atomstyle in ['bond','angle','molecular']:
atom_shape = ' {0} {1} {2} {4:7.4f} {5:7.4f} {6:7.4f} # {7}\n'
elif atomstyle =='atomic':
atom_shape = ' {0} {2} {4:7.4f} {5:7.4f} {6:7.4f} # {7}\n'
base_atoms_n = len( known_atoms)
for i in range( base_atoms_n):
aty = known_atoms[i][1]
_text_ += atom_shape.format( i+1, _mol_[i],
dicts[0][aty],
float(known_atoms[i][6]),
float(_xyz_[i][0])*10,
float(_xyz_[i][1])*10,
float(_xyz_[i][2])*10,
aty
)
solv_bonds = []
solv_angles = []
if _solvated_f_ == 1:
solv_at_v = range(n_atoms )[ base_atoms_n:]
for i in solv_at_v:
aty = conv_dict[_atype_[i]]
_text_ += atom_shape.format(i+1, _mol_[i], dicts[0][aty],
charge[aty],
float(_xyz_[i][0])*10,
float(_xyz_[i][1])*10,
float(_xyz_[i][2])*10,
aty
)
if charge[aty] <0:
# better way to do this is trough coords --------- <WFS>
# but anyway works perfectly
aty2 = conv_dict[_atype_[i+1]]
aty3 = conv_dict[_atype_[i+2]]
solv_bonds.append([aty+'-'+aty2, i+1, i+2])
solv_bonds.append([aty+'-'+aty3, i+1, i+3])
solv_angles.append([aty2+'-'+aty+'-'+aty3, i+2, i+1, i+3])
####------BONDS------####
if _asty_d_[atomstyle]>=2:
known_bonds = _topodata_['bonds']
base_bonds_n = len (known_bonds)
_text_ +='\n Bonds\n\n'
bond_shape = ' {}'*4+'\n'
for i in range(base_bonds_n):
at1 = int(known_bonds[i][0])
at2 = int(known_bonds[i][1])
_bond_ty_ = dicts[1][known_atoms[at1-1][1]+'-'
+known_atoms[at2-1][1]]
_text_ += bond_shape.format( i+1, _bond_ty_, at1, at2)
if _solvated_f_ == 1:
# better way to do this is trough corrds --------- <WFS>
for i in range(n_bonds-base_bonds_n):
_bond_ty_ = dicts[1][solv_bonds[i][0]]
_text_ += bond_shape.format(i+1+base_bonds_n,_bond_ty_,
solv_bonds[i][1],solv_bonds[i][2])
####------ANGLES------#########
if _asty_d_[atomstyle]>=3:
known_angles = _topodata_['angles']
base_angles_n = len(known_angles)
_text_ +='\n Angles\n\n'
angle_shape = ' {}'*5+'\n'
for i in range(base_angles_n):
at1 = int(known_angles[i][0])
at2 = int(known_angles[i][1])
at3 = int(known_angles[i][2])
angle_t = (known_atoms[at1-1][1]+'-'+ known_atoms[at2-1][1]
+'-'+known_atoms[at3-1][1])
_angle_ty_ = dicts[2][angle_t]
#print angle_t, _angle_ty_
_text_ += angle_shape.format( i+1, _angle_ty_, at1, at2, at3)
if _solvated_f_ == 1:
for i in range(n_angles-base_angles_n):
_angle_ty_ = dicts[2][solv_angles[i][0]]
_text_ += angle_shape.format(i+1+base_angles_n, _angle_ty_,
solv_angles[i][1],
solv_angles[i][2],
solv_angles[i][3]
)
####------DIHEDRAL------####
if _asty_d_[atomstyle]==4:
known_dihedrals = _topodata_['dihedrals']
base_dihedrals_n = len(known_dihedrals)
_text_ +='\n Dihedrals\n\n'
dihedral_shape = ' {}'*6+'\n'
for i in range(base_dihedrals_n):
at1 = int(known_dihedrals[i][0])
at2 = int(known_dihedrals[i][1])
at3 = int(known_dihedrals[i][2])
at4 = int(known_dihedrals[i][3])
_dihe_ty_ = dicts[3][known_atoms[at1-1][1]+'-'
+known_atoms[at2-1][1]+'-'
+known_atoms[at3-1][1]+'-'
+known_atoms[at4-1][1]
]
_text_+= dihedral_shape.format( i+1, _dihe_ty_, at1, at2, at3, at4)
return _text_
def write_lammps_data_atomic(_topodata_, data_name, _config_):
''' Write a lammps data file
Deprecated
'''
####--------------- Unpacking data ----------------####
_numbers_ = _topodata_['numbers']
n_atoms, _, _, _ = _numbers_['total']
n_atomtypes, _, _, _= _numbers_['type']
_box_= _topodata_['box']
_, _, _atype_, _xyz_ = _topodata_['atomsdata']
known_atoms = _topodata_['atoms']
atom_info = _topodata_['atomtypes']
atomstyle, _solvated_f_, _ = _config_
####--------------- TITLE ----------------####
_text_ = '#Lammps data file. Geometry for PEG\n\n'
####--------------- NUMBERS ----------------####
_text_ +=' {} atoms\n'.format(n_atoms)
####---------------- TYPES -----------------####
_text_ +=' {} atom types\n'.format(n_atomtypes)
####---------------- BOX -----------------####
_text_ +=(' {:.4f} {:.4f} xlo xhi\n {:.4f} {:.4f} ylo yhi\n'
+' {:.4f} {:.4f} zlo zhi\n').format(*_box_)
#####------ MASSES ------####
_text_ +='\n Masses\n\n'
for i in range( n_atomtypes):
_text_ +=' {} {}\n'.format( i+1, atom_info[i][1])
#####------ Force field potentials ------####
aux_pot_txt, adict = write_lammps_potentials( _topodata_, atomstyle)
_text_ += aux_pot_txt
####------ATOMS------####
if _solvated_f_ == 1:
conv_dict = _topodata_['S_translation'] # key s_tag : val l_tag
_text_ +='\n Atoms\n\n'
atom_shape = ' {}'*2+' {:7.4f}'*3+'\n'# index mol atype charge x y z
base_atoms_n = len( known_atoms)
for i in range( base_atoms_n):
aty = known_atoms[i][1]
_text_ += atom_shape.format( i+1,
adict[aty],
float(_xyz_[i][0])*10,
float(_xyz_[i][1])*10,
float(_xyz_[i][2])*10
)
if _solvated_f_ == 1:
solv_at_v = range(n_atoms )[ base_atoms_n:]
for i in solv_at_v:
aty = conv_dict[_atype_[i]]
_text_ += atom_shape.format(i+1, adict[aty],
float(_xyz_[i][0])*10,
float(_xyz_[i][1])*10,
float(_xyz_[i][2])*10
)
return _text_
def write_lammps_potentials( _topodata_, atomstyle = 'full'):
atom_info = _topodata_['atomtypes'] # index 1: mass ; index 4 -5 : eps-sig
_numbers_ = _topodata_['numbers']
n_atomtypes, n_bondtypes, n_angletypes, n_dihedraltypes= _numbers_['type']
#n_bondstypes = len(data_container['bondtypes'])
buckorlj = int( _topodata_[ 'defaults'][0]) # 1 -2 lj/buc
comb_rule = int( _topodata_[ 'defaults'][1]) # 1-2-3
sigma = []
epsilon = []
buck3 = []
atom_type_d = {}
for x in range( n_atomtypes):
atom_type_d[atom_info[x][0]] = x+1
_A_ = float(atom_info[x][5])
_B_ = float(atom_info[x][4])
if comb_rule==1:
_eps_ = (_B_**2)/(4*_A_)
_sig_ = (_A_/_B_)**(1/6.0)
else:
_eps_ = _A_
_sig_ = _B_
epsilon.append(_eps_ / 4.186)
if buckorlj == 2:#------------------------------------------ <WFS>
_C_ = loat(atom_info[x][6])
buck3.append(' '+ str(f_C_/ 4.186 / (10** 6)))
sigma.append( 10 / _sig_)
else:
buck3.append('')
sigma.append(_sig_* 10)
####----------- DEFINING LJ INTERACTIONS ----------####
#------------------------------------------------------- <WFS>
# make function- buck
'''potential'''
txt_p_p ='\n Pair Coeffs\n\n'
for i in range( n_atomtypes):
txt_p_p +=' {} {} {}{}\n'.format( i+1, epsilon[i], sigma[i], buck3[i])
####----------- DEFINING BONDED INTERACTIONS ----------####
txt_p_b ='\n Bond Coeffs\n\n' # bond_style hybrid
bty = _topodata_['bondtypes']
bondtypes_d = {}
for i in range(n_bondtypes):
bondtypes_d[bty[i][0]+'-'+bty[i][1]]= i+1
bondtypes_d[bty[i][1]+'-'+bty[i][0]]= i+1
txt_p_b += ' {} {:.4f} {:.4f}\n'.format( i+1,
float(bty[i][-1])/ 100/ 4.186/2,
float(bty[i][-2])*10)
txt_p_a ='\n Angle Coeffs\n\n'
aty = _topodata_['angletypes']
angletypes_d = {}
i=0
for i in range(n_angletypes):
angletypes_d[aty[i][0]+'-'+aty[i][1]+'-'+aty[i][2]]= i+1
angletypes_d[aty[i][2]+'-'+aty[i][1]+'-'+aty[i][0]]= i+1
txt_p_a += ' {} {:.4f} {:.4f}\n'.format( i+1,
float(aty[i][-1])/ 4.186/2,
float(aty[i][-2]))
txt_p_d ='\n Dihedral Coeffs\n\n'
dty = _topodata_['dihedraltypes']
dihedraltypes_d = {}
i=0
for i in range(n_dihedraltypes):
_type_forward_ = dty[i][0]+'-'+dty[i][1]+'-'+dty[i][2]+'-'+dty[i][3]
_type_backward_ = dty[i][3]+'-'+dty[i][2]+'-'+dty[i][1]+'-'+dty[i][0]
dihedraltypes_d[ _type_forward_ ] = i+1
dihedraltypes_d[ _type_backward_] = i+1
txt_p_d += ' {} {:.4f} {} {} {}\n'.format( i+1,
float(dty[i][-2])/4.186/2,
int(float(dty[i][-1])),
int(float(dty[i][-3])),
'0.0'
)
if atomstyle in ['full', 'molecular']:
dicts = [ atom_type_d, bondtypes_d, angletypes_d, dihedraltypes_d]
txt_p_ = txt_p_p+txt_p_b+txt_p_a+txt_p_d
elif atomstyle == 'angle':
dicts = [ atom_type_d, bondtypes_d, angletypes_d]
txt_p_ = txt_p_p+txt_p_b+txt_p_a
elif atomstyle == 'bond':
dicts = [ atom_type_d, bondtypes_d]
txt_p_ = txt_p_p+txt_p_b
elif atomstyle == 'atomic' or atomstyle == 'charge':
dicts = [atom_type_d]
txt_p_ = txt_p_p
else:
print ('\nWeird thing, it is supposed impossible to reach this place\n')
return txt_p_, dicts
def write_lammps_input( _simconfig_, _topodata_= None, in_name= 'in.gro2lam'):
''' _simconfig_ contains the data gathered from the gui
_topodata_ comes from the converted gromacs file
in_name is intended as name for the input'''
#===================================================
'''==== Gathering and ordering the data ====='''
#===================================================
#===================================================
####----------- SIM RAW CONFIG --------####
_simconfig_ = _simconfig_[:]
( data_file, timestep, nve_steps, nvt_steps, nvt_tss,
nvt_tdamp, npt_steps, npt_pss, npt_pdamp, npt_tss,
npt_tdamp) = _simconfig_[0]
nvt_tstart, nvt_tend = nvt_tss.split(':')
npt_pstart, npt_pend = npt_pss.split(':')
npt_tstart, npt_tend = npt_tss.split(':')
#print (data_file, timestep, nve_steps, nvt_steps, nvt_tstart, nvt_tend,
#nvt_tdamp, npt_steps, npt_pstart, npt_pend, npt_tdamp, npt_tdamp,
#npt_ystart, npt_yend)
i = 5
thermo, atommap, pairwiseint, lj_rcutoff, c_rcutoff = _simconfig_[1][:i]
neighbordistance, lrsolver, lrerror, in12_13_14 = _simconfig_[1][i:i+4]
neighbordelay, neighborupdate, npt_kind = _simconfig_[1][i+4:i+7]
f_comb_rule, _order_, T_init_vel = _simconfig_[1][i+7:]
#===================================================
####------------ RESTRAIN DATA --------####
rest_line = ''
group_lines = ''
torestrain = []
ens_af = []
if _simconfig_[2] <> []:
g_names, g_aids, k_xyz_c, runs_c, ch_init = _simconfig_[2][0][:]
if _simconfig_[2][1] <> None:
####### ------------------------------------ Super interesante!!
## este es uno de esos casos donde no es posible
## utilizar += a pesar de desligar con [:] ...
aux1,aux2,aux3,aux4,aux5 = _simconfig_[2][1][:]
g_names = g_names + aux1
g_aids = g_aids + aux2
k_xyz_c = k_xyz_c + aux3
runs_c = runs_c + aux4
ch_init = ch_init + aux5
print'\n'
for re in range(len(g_names)):
if ch_init[re]==1:
print 'Restraining group '+g_names[re]+' in '+runs_c[re]
groupinfo = [g_names[re], g_aids[re]]
group_lines += 'group {} id {}\n'.format( *groupinfo)
if runs_c[re] not in ['', 'No', 'no', '0']:
ens = [int(x)-1 for x in runs_c[re].split('-')]
torestrain.append( [g_names[re], k_xyz_c[re], ens ])
for e in ens:
if e not in ens_af:
ens_af.append(e)
if group_lines <> '':
group_lines +='\n'
mix_value = {'1':'geometric', '2':'arithmetic',
'3':'geometric', '4':'sixthpower'}
#for mt in range(len( _mtype_)):
#group_lines += 'group {} id {}:{}\n'.format(*_mtype_[mt])
_asty_d_ ={ 'atomic':1, 'charge':2, 'bond':3, 'angle':4,
'full':5, 'molecular':5}
#===================================================
####------------ TOPO DATA --------####
print '\n'+data_file + '\n'
if _topodata_<>None:
atomstyle, _solvated_, _parametric_ = _topodata_['config']
buckorlj, comb_rule, _, f_LJ, _ = _topodata_['defaults']
else:
print 'non _topodata_'
atomstyle = 'full'
comb_rule = ''
buckorlj = 0
if f_comb_rule in mix_value.values():
mix_value_s=' mix '+f_comb_rule
elif f_comb_rule=='from_gromacs' and _topodata_<>None:
mix_value_s=' mix '+mix_value[comb_rule]
else:
print 'Using default mixing rule'
mix_value_s = ''
#===================================================
'''======= Writing Lammps data file ======='''
#===================================================
_dtxt_= '\n'+'#INIT\n'+'units real\n'+'boundary p p p\n'+'atom_style '
# as I understand lammps default is 3
#_dtxt_+= '%s %d\n'.format('dimension',dimension)
_dtxt_+= atomstyle+'\n'
if atomstyle not in ['full', 'charge]']: # no charges
if 'coul' in pairwiseint:
pairwiseint = pairwiseint.split('/coul')[0]
if 'none'in pairwiseint:
lj_rcutoff = ''
c_rcutoff = ''
elif 'coul' not in pairwiseint:
c_rcutoff = ''
_dtxt_+= '\natom_modify map {}\n'.format(atommap)
#===================================================
####------------ SYSTEM CONFIGURATION --------####
_dsc_txt=['pair_style {} {}'.format( pairwiseint, lj_rcutoff)]
_dsc_txt.append(' {}\n'.format( c_rcutoff))
_dsc_txt.append( 'bond_style harmonic\n')
_dsc_txt.append( 'angle_style harmonic\n')
_dsc_txt.append( 'dihedral_style charmm\n')
_dtxt_+= ''.join(_dsc_txt[:_asty_d_[atomstyle]])+'\n'
if 'data' in data_file:
_dtxt_+= 'read_data {}\n'.format(data_file)
else:
_dtxt_+= 'read_restart {}\n'.format(data_file)
#===================================================
####-------------- NEIGHBOR LIST -----------####
_dtxt_+= '\nneighbor {} bin\n'.format( neighbordistance)
if lrsolver <> '' and atomstyle in ['full','charge']:
if '/coul/long' in pairwiseint:
_dtxt_+= 'kspace_style {} {}\n'.format( lrsolver, lrerror)
_dtxt_+= 'pair_modify shift no tail yes'+mix_value_s+'\n'
_dtxt_+= 'special_bonds lj/coul {} {} {}\n'.format( *in12_13_14.split(':'))
_aux_s_ = 'neigh_modify every {} delay {} check yes\n\n'
_dtxt_+= _aux_s_.format( neighborupdate, neighbordelay)
#===================================================
####--------------- TIMESTEP ----------------####
_dtxt_+= 'timestep {}\n\n\n'.format(timestep)
_dtxt_+= 'thermo {}\n'.format(thermo)
_dtxt_+= ('thermo_style custom step temp press vol '
+'epair emol etotal enthalpy'
+'\n\n')
#===================================================
####-------------- Init VELOCITIES ----------####
aux_vel_str = 'velocity all create {} 1234567 rot yes dist gaussian\n\n'
_dtxt_+= aux_vel_str.format(T_init_vel)
#===================================================
####---------------- GROUPS ----------------####
_dtxt_+= group_lines
#===================================================
####--------------- SIMULATION ------------####
ensembles = _order_.split('-')
curr_time = 0
timestep = float(timestep)
tounfix = [[],[]]
_dtxt_ += '\n'
for en in range(len(ensembles)):
if ens_af<>[] and en in ens_af: # RESTRAIN
for re in range(len(torestrain)):
if en in torestrain[re][2]:
if en-1 not in torestrain[re][2]:
spring_f = 'fix rest_{0}_{1} {0} spring/self {2} {3}\n'
k, xyz = torestrain[re][1].split(':')
_dtxt_ += spring_f.format( torestrain[re][0], en+1 ,
k, xyz)
unr= 0+en
while unr+1 in torestrain[re][2]:
unr+=1
name_2uf = 'rest_{0}_{1}'.format( torestrain[re][0],
en+1)
tounfix= [ tounfix[0]+ [unr], tounfix[1]+ [name_2uf]]
_dtxt_ += '\n'
if ensembles[en]=='NVE' and nve_steps <> '' and nve_steps.isdigit():
steps = int(nve_steps)
nve_frm = 'fix nve_name1 all nve\nrun {}\nunfix nve_name1\n\n'
_dtxt_ += nve_frm.format(steps)
curr_time += steps*timestep
elif ensembles[en]=='NPT' and npt_steps <> '' and npt_steps.isdigit():
steps = int(npt_steps)
npt_frm = 'fix npt_name1 all npt temp {} {} {} {} {} {} {}\n'
_dtxt_ += npt_frm.format( npt_tstart, npt_tend, npt_tdamp,
npt_kind, npt_pstart, npt_pend, npt_pdamp)
_dtxt_+= 'run {}\nunfix npt_name1\n\n'.format( steps)
curr_time += steps*timestep
elif ensembles[en]=='NVT' and nvt_steps <> '' and nvt_steps.isdigit():
steps = int(nvt_steps)
nvt_frm = 'fix nvt_name1 all nvt temp {} {} {}\n'
_dtxt_ += nvt_frm.format( nvt_tstart, nvt_tend, nvt_tdamp )
_dtxt_+= 'run {}\nunfix nvt_name1\n\n'.format( steps)
curr_time += steps*timestep
elif ensembles[en]=='R':
restart_txt = '\nwrite_restart restart.g2l_{}fs\n'
_dtxt_ += restart_txt.format(int(curr_time))
if tounfix <> [ [], []] and en in tounfix[0]: # UNFIX RESTRAIN
for unf in range( len( tounfix[0])):
if tounfix[0][unf] == en:
_dtxt_ += 'unfix ' + tounfix[1][unf] + '\n'
print ('Writing Lammps input script...')
write_file( in_name , _dtxt_)
print_dec_g ('Successful writing!!')
#-------------------- here would be optimum some further check
return True
if __name__ == '__main__':
pass
|
993,243 | 8b1e93bf828f475717708fb64465823d36b7fb66 |
from django.test import TestCase
from django.contrib.auth.models import User
from .models import *
from django.shortcuts import get_object_or_404
class TestViews(TestCase):
def setUp(self):
User.objects.create_user(username='username', password='password')
self.client.login(username='username', password='password')
def test_get_add_bug_page(self):
page = self.client.get('/bugs/new/')
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, "bugform.html")
def test_get_edit_bug_page(self):
user = User.objects.get(username="username")
bug = Bug.objects.create(
title="test",
description="testing",
author_id=user.id)
bug.save()
page = self.client.get('/bugs/{0}/{1}/edit/'.format(bug.id, bug.slug))
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, "bugform.html")
def test_get_edit_page_for_bug_that_does_not_exist(self):
page = self.client.get("/bugs/999/nothing/edit/")
self.assertEqual(page.status_code, 404)
def test_get_bugs_list_page(self):
page = self.client.get("/bugs/")
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, "bugs.html")
def test_get_bugs_detail_page(self):
bug = Bug.objects.create(title="test", description="testing")
bug.save()
page = self.client.get('/bugs/{0}/{1}/'.format(bug.id, bug.slug))
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, "bugdetail.html")
|
993,244 | 57b7fd0e43e5b4ec4500fb8d4e1865e63bae0215 | import tkinter
Prime=tkinter.Tk()
Prime.title('Odd Even List')
entry=tkinter.Entry(Prime)
entry.grid()
def enter():
data = int(entry.get())
Prime.title('Prime')
listbox = tkinter.Listbox(Prime)
listboxx = tkinter.Listbox(Prime)
listbox.grid(row=0, column=1)
listboxx.grid(row=0, column=2)
i = 1
for num in range(2, data + 1):
if num > 0:
for i in range(2, num):
if num % i == 0:
listbox.insert(0,num)
break
else:
listboxx.insert(0,num)
button=tkinter.Button(Prime,text="Submit",command=enter)
button.grid()
Prime.mainloop() |
993,245 | 8c64f5fd6de3916194458f96df1aebb722319848 | #coding=utf-8
import os
WORK_DIRECTORY="."
LOG_DIRECTORY=os.sep.join([WORK_DIRECTORY,"log"])
CONFIG_DIRECTORY=os.sep.join([WORK_DIRECTORY,"conf"])
PROJECTS_DIRECTORY=os.sep.join([WORK_DIRECTORY,"projects"])
CONFIG_FILE=os.sep.join([CONFIG_DIRECTORY,"autotest.conf"])
NFC_INSTALL_TEST_DIR="nfc_install_test"
NFC_AUTO_TEST_DIR="nfc_autotest"
CENI_INSTALL_TEST_DIR="ceni_install_test"
CENI_AUTO_TEST_DIR="ceni_autotest"
SWITCH_AUTO_TEST_DIR="switch_autotest"
AUTO_TEST_PROJECT_DIR_LIST=[NFC_INSTALL_TEST_DIR,
NFC_AUTO_TEST_DIR,
CENI_INSTALL_TEST_DIR,
CENI_AUTO_TEST_DIR,
SWITCH_AUTO_TEST_DIR]
CONF_DEFAULT_SECTION="DEFAULT"
CONF_NFC_INSTALL_TEST_SECTION="nfc_install_test"
CONF_DEFAULT_SECTION_DEFAULT_PROJECT_TYPE="project_types"
NFC_INSTALL_TEST="NFC安装测试"
NFC_AUTO_TEST="NFC自动化测试"
CENI_INSTALL_TEST="CENI安装测试"
CENI_AUTO_TEST="CENI自动化测试"
SWITCH_AUTO_TEST="交换机自动化测试"
CONF_DEFAULT_SECTION_DEFAULT_PROJECT_TYPE_VALUE=",".join([NFC_INSTALL_TEST,
NFC_AUTO_TEST,
CENI_INSTALL_TEST,
CENI_AUTO_TEST,
SWITCH_AUTO_TEST])
CONF_NFC_INSTALL_TEST_SECTION_TEST_TYPES="test_types"
NFC_INSTALL_TEST_CTRL_HA="HA 总控+3主控+计算+TGW测试"
NFC_INSTALL_TEST_ALL_SEP="总控+主控+计算+TGW测试"
NFC_INSTALL_TEST_GLBCTRL="总主控+计算+TGW测试"
NFC_INSTALL_TEST_CTRLCOM="总控+主控计算+TGW测试"
CONF_NFC_INSTALL_TEST_SECTION_DEFAULT_TEST_TYPE_VALUE=",".join([NFC_INSTALL_TEST_CTRL_HA,
NFC_INSTALL_TEST_ALL_SEP,
NFC_INSTALL_TEST_GLBCTRL,
NFC_INSTALL_TEST_CTRLCOM])
NFC_INSTALL_PROJECT_NAME="name"
NFC_INSTALL_PROJECT_TENANT="tenant"
NFC_INSTALL_PROJECT_PASSWORD="password"
NFC_INSTALL_PROJECT_KEYSTONE_URL="keystone_url"
NFC_INSTALL_PROJECT_REGION="region"
NFC_INSTALL_PROJECT_PUBLIC_NETWORK="public_network"
NFC_INSTALL_PROJECT_CASES="cases"
TEST_START_TIME="test_start_time"
TEST_END_TIME="test_end_time"
|
993,246 | 81320ef5941df158a75228fcbc6588af2365f4fc | #!/usr/bin/env python
# encoding: utf-8
# -*- coding: utf-8 -*-
# @contact: ybsdeyx@foxmail.com
# @software: PyCharm
# @time: 2019/3/26 19:59
# @author: Paulson●Wier
# @file: 4.py
# @desc:
def manual_iter():
with open('1_3.txt',encoding='utf-8') as f:
try:
while True:
line = next(f)
print(line, end='')
except StopIteration:
pass
def manual_iter1():
'''
通常来讲,StopIteration 用来指示迭代的结尾。然而,如果你手动使用上面演示
的 next() 函数的话,你还可以通过返回一个指定值来标记结尾,比如 None 。下面是
示例:
:return:
'''
with open('1_3.txt',encoding='utf-8') as f:
while True:
line = next(f,None)
if line is None:
break
print(line,end="")
'''
讨论
大多数情况下,我们会使用 for 循环语句用来遍历一个可迭代对象。但是,偶尔也
需要对迭代做更加精确的控制,这时候了解底层迭代机制就显得尤为重要了。
'''
# manual_iter1()
# 4.6 带有外部状态的生成器函数
'''
问题
你想定义一个生成器函数,但是它会调用某个你想暴露给用户使用的外部状态值。
解决方案
如果你想让你的生成器暴露外部状态给用户,别忘了你可以简单的将它实现为一
个类,然后把生成器函数放到 __iter__() 方法中过去。比如:
'''
from collections import deque
class linehistory:
def __init__(self, lines, histlen=3):
self.lines = lines
self.history = deque(maxlen=histlen)
def __iter__(self):
for lineno, line in enumerate(self.lines, 1):
self.history.append((lineno, line))
yield line
def clear(self):
self.history.clear()
with open('1_3.txt', encoding='utf-8') as f:
lines = linehistory(f)
for line in lines:
if 'python' in line:
for lineno, hline in lines.history:
print('{}{}'.format(lineno, hline), end='')
|
993,247 | 214251a17d1d30f4728f4959c9fce81785ad3a69 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_ephem_nav_converter')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_ephem_nav_converter')
_ephem_nav_converter = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_ephem_nav_converter', [dirname(__file__)])
except ImportError:
import _ephem_nav_converter
return _ephem_nav_converter
try:
_mod = imp.load_module('_ephem_nav_converter', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_ephem_nav_converter = swig_import_helper()
del swig_import_helper
else:
import _ephem_nav_converter
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
def new_doubleArray(nelements):
return _ephem_nav_converter.new_doubleArray(nelements)
new_doubleArray = _ephem_nav_converter.new_doubleArray
def delete_doubleArray(ary):
return _ephem_nav_converter.delete_doubleArray(ary)
delete_doubleArray = _ephem_nav_converter.delete_doubleArray
def doubleArray_getitem(ary, index):
return _ephem_nav_converter.doubleArray_getitem(ary, index)
doubleArray_getitem = _ephem_nav_converter.doubleArray_getitem
def doubleArray_setitem(ary, index, value):
return _ephem_nav_converter.doubleArray_setitem(ary, index, value)
doubleArray_setitem = _ephem_nav_converter.doubleArray_setitem
def new_longArray(nelements):
return _ephem_nav_converter.new_longArray(nelements)
new_longArray = _ephem_nav_converter.new_longArray
def delete_longArray(ary):
return _ephem_nav_converter.delete_longArray(ary)
delete_longArray = _ephem_nav_converter.delete_longArray
def longArray_getitem(ary, index):
return _ephem_nav_converter.longArray_getitem(ary, index)
longArray_getitem = _ephem_nav_converter.longArray_getitem
def longArray_setitem(ary, index, value):
return _ephem_nav_converter.longArray_setitem(ary, index, value)
longArray_setitem = _ephem_nav_converter.longArray_setitem
def new_intArray(nelements):
return _ephem_nav_converter.new_intArray(nelements)
new_intArray = _ephem_nav_converter.new_intArray
def delete_intArray(ary):
return _ephem_nav_converter.delete_intArray(ary)
delete_intArray = _ephem_nav_converter.delete_intArray
def intArray_getitem(ary, index):
return _ephem_nav_converter.intArray_getitem(ary, index)
intArray_getitem = _ephem_nav_converter.intArray_getitem
def intArray_setitem(ary, index, value):
return _ephem_nav_converter.intArray_setitem(ary, index, value)
intArray_setitem = _ephem_nav_converter.intArray_setitem
def new_shortArray(nelements):
return _ephem_nav_converter.new_shortArray(nelements)
new_shortArray = _ephem_nav_converter.new_shortArray
def delete_shortArray(ary):
return _ephem_nav_converter.delete_shortArray(ary)
delete_shortArray = _ephem_nav_converter.delete_shortArray
def shortArray_getitem(ary, index):
return _ephem_nav_converter.shortArray_getitem(ary, index)
shortArray_getitem = _ephem_nav_converter.shortArray_getitem
def shortArray_setitem(ary, index, value):
return _ephem_nav_converter.shortArray_setitem(ary, index, value)
shortArray_setitem = _ephem_nav_converter.shortArray_setitem
def getStructSize(self):
try:
return eval('sizeof_' + repr(self).split(';')[0].split('.')[-1])
except (NameError) as e:
typeString = 'sizeof_' + repr(self).split(';')[0].split('.')[-1]
raise NameError(e.message + '\nYou tried to get this size macro: ' + typeString +
'\n It appears to be undefined. \nYou need to run the SWIG GEN_SIZEOF' +
' SWIG macro against the class/struct in your SWIG file if you want to ' +
' make this call.\n')
def protectSetAttr(self, name, value):
if(hasattr(self, name) or name == 'this'):
object.__setattr__(self, name, value)
else:
raise ValueError('You tried to add this variable: ' + name + '\n' +
'To this class: ' + str(self))
def protectAllClasses(moduleType):
import inspect
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
for member in clsmembers:
try:
exec(str(member[0]) + '.__setattr__ = protectSetAttr')
exec(str(member[0]) + '.getStructSize = getStructSize')
except (AttributeError, TypeError) as e:
pass
Update_ephemNavConverter = _ephem_nav_converter.Update_ephemNavConverter
SelfInit_ephemNavConverter = _ephem_nav_converter.SelfInit_ephemNavConverter
CrossInit_ephemNavConverter = _ephem_nav_converter.CrossInit_ephemNavConverter
Reset_ephemNavConverter = _ephem_nav_converter.Reset_ephemNavConverter
sizeof_EphemerisIntMsg = _ephem_nav_converter.sizeof_EphemerisIntMsg
class EphemNavConverterData(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, EphemNavConverterData, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, EphemNavConverterData, name)
__repr__ = _swig_repr
__swig_setmethods__["stateOutMsgName"] = _ephem_nav_converter.EphemNavConverterData_stateOutMsgName_set
__swig_getmethods__["stateOutMsgName"] = _ephem_nav_converter.EphemNavConverterData_stateOutMsgName_get
if _newclass:
stateOutMsgName = _swig_property(_ephem_nav_converter.EphemNavConverterData_stateOutMsgName_get, _ephem_nav_converter.EphemNavConverterData_stateOutMsgName_set)
__swig_setmethods__["ephInMsgName"] = _ephem_nav_converter.EphemNavConverterData_ephInMsgName_set
__swig_getmethods__["ephInMsgName"] = _ephem_nav_converter.EphemNavConverterData_ephInMsgName_get
if _newclass:
ephInMsgName = _swig_property(_ephem_nav_converter.EphemNavConverterData_ephInMsgName_get, _ephem_nav_converter.EphemNavConverterData_ephInMsgName_set)
__swig_setmethods__["stateOutMsgID"] = _ephem_nav_converter.EphemNavConverterData_stateOutMsgID_set
__swig_getmethods__["stateOutMsgID"] = _ephem_nav_converter.EphemNavConverterData_stateOutMsgID_get
if _newclass:
stateOutMsgID = _swig_property(_ephem_nav_converter.EphemNavConverterData_stateOutMsgID_get, _ephem_nav_converter.EphemNavConverterData_stateOutMsgID_set)
__swig_setmethods__["ephInMsgID"] = _ephem_nav_converter.EphemNavConverterData_ephInMsgID_set
__swig_getmethods__["ephInMsgID"] = _ephem_nav_converter.EphemNavConverterData_ephInMsgID_get
if _newclass:
ephInMsgID = _swig_property(_ephem_nav_converter.EphemNavConverterData_ephInMsgID_get, _ephem_nav_converter.EphemNavConverterData_ephInMsgID_set)
__swig_setmethods__["outputState"] = _ephem_nav_converter.EphemNavConverterData_outputState_set
__swig_getmethods__["outputState"] = _ephem_nav_converter.EphemNavConverterData_outputState_get
if _newclass:
outputState = _swig_property(_ephem_nav_converter.EphemNavConverterData_outputState_get, _ephem_nav_converter.EphemNavConverterData_outputState_set)
def __init__(self):
this = _ephem_nav_converter.new_EphemNavConverterData()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _ephem_nav_converter.delete_EphemNavConverterData
__del__ = lambda self: None
EphemNavConverterData_swigregister = _ephem_nav_converter.EphemNavConverterData_swigregister
EphemNavConverterData_swigregister(EphemNavConverterData)
class EphemerisIntMsg(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, EphemerisIntMsg, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, EphemerisIntMsg, name)
__repr__ = _swig_repr
__swig_setmethods__["r_BdyZero_N"] = _ephem_nav_converter.EphemerisIntMsg_r_BdyZero_N_set
__swig_getmethods__["r_BdyZero_N"] = _ephem_nav_converter.EphemerisIntMsg_r_BdyZero_N_get
if _newclass:
r_BdyZero_N = _swig_property(_ephem_nav_converter.EphemerisIntMsg_r_BdyZero_N_get, _ephem_nav_converter.EphemerisIntMsg_r_BdyZero_N_set)
__swig_setmethods__["v_BdyZero_N"] = _ephem_nav_converter.EphemerisIntMsg_v_BdyZero_N_set
__swig_getmethods__["v_BdyZero_N"] = _ephem_nav_converter.EphemerisIntMsg_v_BdyZero_N_get
if _newclass:
v_BdyZero_N = _swig_property(_ephem_nav_converter.EphemerisIntMsg_v_BdyZero_N_get, _ephem_nav_converter.EphemerisIntMsg_v_BdyZero_N_set)
__swig_setmethods__["timeTag"] = _ephem_nav_converter.EphemerisIntMsg_timeTag_set
__swig_getmethods__["timeTag"] = _ephem_nav_converter.EphemerisIntMsg_timeTag_get
if _newclass:
timeTag = _swig_property(_ephem_nav_converter.EphemerisIntMsg_timeTag_get, _ephem_nav_converter.EphemerisIntMsg_timeTag_set)
def __init__(self):
this = _ephem_nav_converter.new_EphemerisIntMsg()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _ephem_nav_converter.delete_EphemerisIntMsg
__del__ = lambda self: None
EphemerisIntMsg_swigregister = _ephem_nav_converter.EphemerisIntMsg_swigregister
EphemerisIntMsg_swigregister(EphemerisIntMsg)
import sys
protectAllClasses(sys.modules[__name__])
# This file is compatible with both classic and new-style classes.
|
993,248 | b82b530452109f379efb452a62e9bb034fa07e6a | #!/usr/bin/env python
#============================================
#
# Это мой первый скрипт на python. Правда.
# Но есть опыт разработки на TCL
# Не смотрел как делают коллеги, но думаю отличительной особенностью
# этого скрипта будет использование SNMP
#
# Спасибо!
#
#============================================
#Imports
from netmiko import ConnectHandler
from netmiko import NetMikoAuthenticationException, NetMikoTimeoutException
from pysnmp.hlapi import *
import csv
import datetime
import multiprocessing as mp
import sys
import os
import re
import time
#Module 'Global' variables
DEVICE_FILE_PATH = 'devices.csv' # file should contain a list of devices in format: ip,username,password,device_type
BACKUP_DIR_PATH = 'Backups' # complete path to backup directory
COMM = 'publ'
NTP = '172.18.65.11'
def get_devices_from_file(device_file):
# Это заимствовано
# creating empty structures
device_list = list()
device = dict()
# reading a CSV file with ',' as a delimeter
with open(device_file, 'r') as f:
reader = csv.DictReader(f, delimiter=';')
# every device represented by single row which is a dictionary object with keys equal to column names.
for row in reader:
device_list.append(row)
print ("Got the device list from inventory\n")
# returning a list of dictionaries
return device_list
def get_current_date_and_time():
# Это заимствовано
now = datetime.datetime.now()
print("Got a timestamp\n")
# Returning a formatted date string
# Format: yyyy_mm_dd-hh_mm_ss
return now.strftime("%Y_%m_%d-%H_%M_%S")
def connect_to_device(device):
# Это заимствовано
connection = ConnectHandler(
host = device['ip'],
username = device['username'],
password=device['password'],
device_type=device['device_type'],
secret=device['secret']
)
# returns a "connection" object
return connection
def disconnect_from_device(connection, hostname):
# Это заимствовано
connection.disconnect()
def get_backup_file_path(hostname,timestamp):
# Это заимствовано
if not os.path.exists(os.path.join(BACKUP_DIR_PATH, hostname)):
os.mkdir(os.path.join(BACKUP_DIR_PATH, hostname))
# Merging a string to form a full backup file name
backup_file_path = os.path.join(BACKUP_DIR_PATH, hostname, '{}-{}.txt'.format(hostname, timestamp))
# returning backup file path
return backup_file_path
def create_backup(connection, backup_file_path, hostname):
# This function pulls running configuration from a device and writes it to the backup file
# Requires connection object, backup file path and a device hostname as an input
try:
# sending a CLI command using Netmiko and printing an output
connection.enable()
output = connection.send_command('sh run')
# creating a backup file and writing command output to it
with open(backup_file_path, 'w') as file:
file.write(output)
# if successfully done
return True
except Error:
# if there was an error
return False
def ntp_chk(connection):
try:
# конфигурируем timezone
connection.send_config_set('clock timezone MSK 3 0')
# Проверяем конфиг времени, если ntp сервер не сконфигурирован, то добавляем
if not re.search(NTP,connection.send_command('sh ntp config | i'+NTP)):
# Если нужного ntp нет, проверяем его пингом по 1 пакету до 5 раз, так быстрее в случае его доступности
i = 0
ok = 0
cmd = 'ping '+NTP+' rep 1'
while i < 5 and not ok:
i += 1
ok = re.search('Success rate is 100 percent', connection.send_command(cmd))
if ok:
cmd = 'ntp server '+NTP+' prefer'
connection.send_config_set(cmd)
i = 0
# Проверяем синхронизацию до 6 раз через 10 секунд
while i < 6:
i += 1
if re.search('Clock is synchronized',connection.send_command('sh ntp status | i Clock')): return True
time.sleep(10)
return False
except Error:
return False
def snmp_get_exact(community, ip, oid):
# Чтение заданного oid
errorIndication, errorStatus, errorIndex, varBinds = next(
getCmd(SnmpEngine(),
CommunityData(community),
UdpTransportTarget((ip, 161)),
ContextData(),
ObjectType(ObjectIdentity(ObjectIdentity(oid))))
)
if errorIndication or errorStatus:
return ('Error', False)
else:
oid, value = varBinds[0]
return (str(oid), value.prettyPrint(), True)
def snmp_get_next(community, ip, oid):
# Чтение oid следующего за текущим
errorIndication, errorStatus, errorIndex, varBinds = next(
nextCmd(SnmpEngine(),
CommunityData(community),
UdpTransportTarget((ip, 161)),
ContextData(),
ObjectType(ObjectIdentity(ObjectIdentity(oid))))
)
if errorIndication or errorStatus:
return ('Error', 'Error', False)
else:
oid, value = varBinds[0]
return (str(oid), value.prettyPrint(), True)
def process_target(device,timestamp):
# - connects to the device,
# - gets a backup file name and a hostname for this device,
# - creates a backup for this device
# - устанавливает/проверяет синхронизацию времени с ntp
# - terminates connection
# - По snmp получает всю остальную необходимую информацию
# Requires connection object and a timestamp string as an input
connection = connect_to_device(device)
backup_file_path = get_backup_file_path(device['hostname'], timestamp)
backup_result = create_backup(connection, backup_file_path, device['hostname'])
ntp_stat = 'Clock not in Sync'
bakup = 'Config not backuped'
if backup_result:
#bakup (ssh) успешен
bakup = 'Config backuped'
if ntp_chk(connection): ntp_stat = 'Clock in Sync'
disconnect_from_device(connection, device['hostname'])
report = device['hostname']
# Читаем entPhysicalModelName
oid, model, flag = (snmp_get_next(COMM, device['ip'], '1.3.6.1.2.1.47.1.1.1.1.13'))
# В составных устройствах может быть пусто в .1, поэтому ищем первый заполненный oid... это большое упрощение, по хорошему надо разбирать по устройствам
while model == '' and re.search('1.3.6.1.2.1.47.1.1.1.1.13', oid):
oid, model, flag = (snmp_get_next(COMM, device['ip'], oid))
if not flag: break
if flag:
# По SNMP подключились успешно
report = report +'|'+model
# Читаем sysConfigName
oid, ios, flag = (snmp_get_exact(COMM, device['ip'], '1.3.6.1.4.1.9.2.1.73.0'))
# Выделяем имя IOS
ios = ios.split('/')[-1]
ios = ios.split(':')[-1]
report = report +'|'+ios
# NPE/PE
if re.search('npe', ios): ios_type = 'NPE'
else: ios_type = 'PE '
report = report +'|'+ios_type
# читаем cdpGlobal
oid, value, flag = (snmp_get_exact(COMM, device['ip'], '1.3.6.1.4.1.9.9.23.1.3.1.0'))
if value == '1':
# CDP включен
report = report +'|CDP is ON '
# Далее считываем количество соседей по всем интерфейсам и суммируем их
oid = '1.3.6.1.4.1.9.9.23.1.2.1.1.3'
neib = 0
oid, value, flag = (snmp_get_next(COMM, device['ip'], oid))
while re.search('23.1.2.1.1.3', oid):
neib = neib + int(value)
oid, value, flag = (snmp_get_next(COMM, device['ip'], oid))
if not flag: break
report = report+','+str(neib)+' peers'
else:
# CDP выключен
report = report +'|CDP is OFF'
else:
# Ошибка SNMP
report = report +'|SNMP Error'
report = report +'|'+ntp_stat+'|'+bakup
print(report)
def main(*args):
# This is a main function
# getting the timestamp string
timestamp = get_current_date_and_time()
print (timestamp)
# getting a device list from the file in a python format
device_list = get_devices_from_file(DEVICE_FILE_PATH)
# creating a empty list
processes=list()
# Running workers to manage connections
with mp.Pool(4) as pool:
# Starting several processes...
for device in device_list:
processes.append(pool.apply_async(process_target, args=(device,timestamp)))
# Waiting for results...
for process in processes:
process.get()
if __name__ == '__main__':
# checking if we run independently
_, *script_args = sys.argv
# the execution starts here
main(*script_args)
|
993,249 | 55c344640aacd48b7b190f130c81bae00709aa90 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import sys,os
import numpy as np
import keras
import pandas as pd
import re
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model
from keras.models import Sequential
from keras.layers import LSTM
from nltk.corpus import stopwords
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from sklearn.metrics import classification_report,accuracy_score,f1_score
from sklearn.cross_validation import cross_val_score,cross_val_predict,KFold
np.random.seed(1)
MAX_SEQUENCE_LENGTH = 30 #max number of sentences in a message
MAX_NB_WORDS = 20000 #cap vocabulary
GLOVE_FILE = '/Users/nookiebiz4/Downloads/glove/glove.twitter.27B.50d.txt'
EMBEDDING_DIM = 50 #size of word vector
TWITTER_FILE = '/Users/nookiebiz4/583_proj2/training-Obama-Romney-tweets.xlsx'
JAR_FILE = '/Users/nookiebiz4/Downloads/stanford-postagger-2016-10-31/stanford-postagger.jar'
MODEL_FILE = '/Users/nookiebiz4/Downloads/stanford-postagger-2016-10-31/models/english-left3words-distsim.tagger'
def get_Ytrue_Ypred(model,x,y):
#Y matrix is [1,0,0] for class 0, [0,1,0] for class 1, [0,0,1] for class -1
convert_to_label ={0:0,1:1,2:-1}
model_predictions = model.predict(x)
y_pred = np.zeros(len(y))
y_true = np.zeros(len(y))
#errors = 0.0
for i in range(len(y)):
y_pred[i] = convert_to_label[np.argmax(model_predictions[i])]
y_true[i] = convert_to_label[np.argmax(y[i])]
#if y_true[i] != y_pred[i]:
#errors+=1.0
return y_true,y_pred
# read the data
obama_data = pd.read_excel(TWITTER_FILE,names = ['date','time','text','sentiment'],parse_cols = 4,sheetname = 'Obama')
romney_data = pd.read_excel(TWITTER_FILE,names = ['date','time','text','sentiment'],parse_cols = 4,sheetname = 'Romney')
def get_data(data):
""" get and clean the data """
data = data.iloc[1:]
data['text'] = data['text'].values.astype('unicode')
data['date'] = data['date'].values.astype('str')
data['time'] = data['time'].values.astype('unicode')
# remove rows with mixed sentiment
data = data[data['sentiment'] < 2]
data.index = range(len(data))
return data
obama_data = get_data(obama_data)
romney_data = get_data(romney_data)
print obama_data.head()
print romney_data.head()
emoticon_dictionary = {':)':' smileyface ','(:':' smileyface ','XD': ' happyface ',':D': ' smileyface ','>.<':' smileyface ',':-)':' smileyface ',';)':' winkface ',';D':' winkface ',':\'(':' cryingface '}
emoticons = [':\)','\(:','XD',':D','>\.<',':-\)',';\)',';D',':\'\(']
emoticon_pattern = re.compile(r'(' + '\s+|\s+'.join(emoticons) + r')')
# convert emoticons to words
def emoticon_converter(x):
x = emoticon_pattern.sub(lambda i : emoticon_dictionary[i.group().replace(' ','')],x)
return x
obama_data['text'] = obama_data['text'].apply(emoticon_converter)
romney_data['text'] = romney_data['text'].apply(emoticon_converter)
# http://stackoverflow.com/questions/8870261/how-to-split-text-without-spaces-into-list-of-words
# convert hashtags into words
def separate_hashtag(x):
for i in range(0,len(x)):
hashtags = re.findall(r"#(\w+)", x[i])
for words in hashtags:
x[i] = re.sub('#'+ words,split_hashtag(words.lower()),x[i])
return x
#obama_data['text'] = separate_hashtag(obama_data['text'])
#romney_data['text'] = separate_hashtag(romney_data['text'])
# remove punctuations
punc = ['\:','\;','\?','\$','\.','\(','\)','\#',',','-']
cond_1 = re.compile('|'.join(punc))
# remove tags
tags = ['<a>','</a>','<e>','</e>']
cond_2 = re.compile("|".join(tags))
def preprocess(data):
""" preprocess the data"""
# remove punctuations
data = data.apply(lambda x : re.sub(cond_1,'',x))
# remove tags
data = data.apply(lambda x : re.sub(cond_2,'',x))
# remove users
data = data.apply(lambda x : re.sub(r'\@\s?\w+','',x))
# remove hypertext
data = data.apply(lambda x : re.sub(r'http://\w+','',x))
# remove digits
data = data.apply(lambda x : re.sub(r'[0-9]+','',x))
# convert to ascii
data = data.apply(lambda x: x.encode('utf-8'))
return data
obama_data['text'] = preprocess(obama_data['text'])
romney_data['text'] = preprocess(romney_data['text'])
def process_time(data):
""" processes time """
def extract_date(pattern,string):
temp = re.match(pattern,string)
if temp:
return temp.group(1)
else:
return string
# clean date
date_format_1 = re.compile('\d+/(\d{2})/\d+')
date_format_2 = re.compile('\d+\-\d+\-(\d{2})')
date_format_3 = re.compile('(\d{2})\-[a-zA-Z]+\-\d+')
date_format = [date_format_1] + [date_format_2] + [date_format_3]
# remove whitespace
data['date'] = data['date'].apply(lambda x : x.replace(' ',''))
for i in date_format:
data['date'] = data['date'].apply(lambda x: extract_date(i,x))
def converter(first,second):
if first == 'AM':
return second
else:
val = re.findall('(\d{1,2})',second)[0]
if int(val) > 12:
val = str(int(val) + 12)
return re.sub('\d{1,2}',val,second,1)
def extract_time(pattern,string):
temp = re.match(pattern,string)
if temp:
first = temp.group(1)
second = temp.group(2)
third = temp.group(3)
if first is None and third is None:
return second
if first == 'AM' or first == 'PM':
return converter(first,second)
else:
return converter(third,second)
# clean time
time_format_1 = re.compile('(AM|PM)?\s?(\d{1,2}:\d{1,2}:\d{1,2})\s?(AM|PM)?')
# remove whitespace
data['time'] = data['time'].apply(lambda x : x.replace(' ',''))
data['time'] = data['time'].apply(lambda x : extract_time(time_format_1,x))
data['time'] = pd.to_datetime(data['time'], format='%H:%M:%S')
return data
## IMP - Process Emoticons, better stopwords list, clean hashtags
# Tweet NLP
manual_stopwords_list = ['rt']
# stopwords list based on pos tags
from nltk.tag import StanfordPOSTagger
jar = JAR_FILE
model = MODEL_FILE
st = StanfordPOSTagger(model, jar, encoding='utf8')
remove_tags_stanfordpos = ['IN','DT','PRP','PRP$','WDT','WP','WP$','CD','PDT']
remove_tags_tweetnlp = []
def tweet_tag_filter(x):
pass
# obama_data['text'] = obama_data['text'].apply(tweet_tag_filter)
# romney_data['text'] = romney_data['text'].apply(tweet_tag_filter)
def pos_tag_filter(x):
x = x.split()
s = st.tag(x)
for i,(_,tag) in enumerate(s):
if tag in remove_tags_stanfordpos:
x[i] = ''
return ''.join(x)
# obama_data['text'] = obama_data['text'].apply(pos_tag_filter)
# romney_data['text'] = romney_data['text'].apply(pos_tag_filter)
# remove stopwords
stopwords_list = stopwords.words('english') + manual_stopwords_list
# stemming
class Tokenizer(object):
def __init__(self):
self.wnl = WordNetLemmatizer()
def __call__(self, doc):
return [self.wnl.lemmatize(t) for t in word_tokenize(doc)]
def get_X_y(data):
return data['text'],data['sentiment'].astype(int)
texts = obama_data['text']
labels = np.array(obama_data['sentiment'])
tokenizer = keras.preprocessing.text.Tokenizer(nb_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index #key = word, value = number
print('Found %s unique tokens.' % len(word_index))
#pad the data
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
print labels[0:4]
labels = keras.utils.np_utils.to_categorical(labels,nb_classes=3)
print labels[0:4]
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
embeddings_index = {}
f = open(GLOVE_FILE)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
#prepare embedding matrix
#num_words = min(MAX_NB_WORDS, len(word_index))
num_words = len(word_index)+1
embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
#create the embedding layer
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
# create the model
np.random.seed(1)
#k fold cross validaiton
avg_acc = []
avg_f1 = []
kf = KFold(n=len(data),n_folds=10)
for train,test in kf:
np.random.seed(1)
x_train, x_val, y_train, y_val = data[train], data[test], labels[train], labels[test]
model = Sequential()
model.add(embedding_layer)
model.add(LSTM(10))
model.add(Dense(len(labels[0]), activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(x_train, y_train, nb_epoch=30, batch_size=64,verbose=False)
y_true,y_pred = get_Ytrue_Ypred(model,x_val,y_val)
avg_acc.append(accuracy_score(y_true,y_pred))
avg_f1.append(f1_score(y_true,y_pred))
print classification_report(y_true,y_pred)
#print classification_report(y_true,y_pred)
print 'Average f1-score = ', np.mean(np.array(avg_f1))
print 'Overall Accuracy = ',100.0*np.mean(np.array(avg_acc)),'%'
|
993,250 | 19fb9de547cda3f071f7a8a0661c74e89646d647 | from rest_framework import serializers
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import User
from . import models
class TeacherSerializer(serializers.ModelSerializer):
pk = serializers.UUIDField(read_only=True, source='idTeacher')
password = serializers.CharField(write_only=True)
username = serializers.CharField(write_only=True)
token = serializers.SerializerMethodField(read_only=True)
def get_token(self, obj):
token, created = Token.objects.get_or_create(user=obj.user)
return token.key
def validate_username(self, value):
if User.objects.filter(username=value).exists():
raise serializers.ValidationError("Username já cadastrado.")
return value
def create(self, validated_data):
username = validated_data.pop('username')
user = User.objects.create(username=username, first_name=validated_data['name'], email=validated_data['email'])
user.set_password(validated_data.pop('password'))
user.save()
teacher = models.Teacher.objects.create(**validated_data, user=user)
return teacher
class Meta:
model = models.Teacher
fields = ('pk', 'name', 'email', 'dateOfBirth', 'gender', 'username', 'password', 'token')
class StudentSerializer(serializers.ModelSerializer):
pk = serializers.UUIDField(read_only=True, source='idStudent')
password = serializers.CharField(write_only=True)
username = serializers.CharField(write_only=True)
token = serializers.SerializerMethodField(read_only=True)
def get_token(self, obj):
token, created = Token.objects.get_or_create(user=obj.user)
return token.key
def validate_username(self, value):
if User.objects.filter(username=value).exists():
raise serializers.ValidationError("Username já cadastrado.")
return value
def create(self, validated_data):
username = validated_data.pop('username')
user = User.objects.create(username=username, first_name=validated_data['name'], email=validated_data['email'])
user.set_password(validated_data.pop('password'))
user.save()
teacher = models.Student.objects.create(**validated_data, user=user)
return teacher
class Meta:
model = models.Student
fields = ('pk', 'name', 'email', 'dateOfBirth', 'course', 'gender', 'username', 'password', 'token')
# class UserSerializer(serializers.ModelSerializer):
# class Meta:
# model = User
# fields = ('email', 'first_name', 'last_name', 'password', 'is_superuser')
|
993,251 | 7c71bbca6b15633c7df3fdf1c6edfa7ff7d57dc3 | # import os,time
# data = os.path.abspath('.')#获取当前文件路径
# # print(data)
# # data1 = os.listdir(data)#当前文件路径下的全部文件
# # print(data1)
# # data2 = os.getcwd() #当前文件路径
# # print(data2)
# # data3 = os.path.dirname(data)#当前路径的父路径
# # print(data3)
# # data4= os.path.basename(data)
# # print(data4)
# # data5 = os.path.isfile(data4)
# # print(data5)
# data6 = os.stat(data)
# print(data6)
# data7 = os.path.join('F:\Voice\BaseDriver\music','您好东方.mp3')
# print(data7)
# data8 = os.path.split(data7)
# print(data8)
# # data9 = os.path.isdir(data)
# # print(data9)
# # data10 = os.path.exists('2.txt')
# # print(data10)
# # #os.mkdir("2.txt")
# # data12 = os.path.getsize('path.py')
# # print(data12)
# # data13 = time.ctime(os.path.getatime(("2.txt")))
# # print(data13)
# # data15 = os.listdir(os.getcwd())
# # data14 = [x for x in data15 if os.path.splitext(x)[1] == '.py']
# # print(data14)
# # os.rmdir('2.txt')
# no = time.time()
# print(no)
# now = time.strftime(time.ctime())
# print(now)
# now1 = time.localtime()
# print(now1)
# now2 = time.strftime('"%Y-%m-%d-%X"',time.localtime(time.time()))
# print(now2)
# time.sleep(1)
# print("clock1:%s" % time.clock() )
|
993,252 | 1b00aec3361c5f1419ed37ac7cb97a516a05be56 | from readthedocs.core.utils.extend import SettingsOverrideObject
from readthedocs.embed.v3.views import EmbedAPIBase
class ProxiedEmbedAPIBase(EmbedAPIBase):
# DRF has BasicAuthentication and SessionAuthentication as default classes.
# We don't support neither in the community site.
authentication_classes = []
class ProxiedEmbedAPI(SettingsOverrideObject):
_default_class = ProxiedEmbedAPIBase
|
993,253 | c60eb1450dd4f838929ca54ab9e7aaa351aadee1 | import scrapy
from jusik.items import JusikItem
class MybotsSpider(scrapy.Spider):
name = 'mybots'
allowed_domains = ['finance.naver.com/item/main.nhn?code=005930']
start_urls = ['http://finance.naver.com/item/main.nhn?code=005930']
def parse(self, response):
codes = response.xpath('//*[@id="middle"]/div[1]/div[1]/div/span[1]/text()').extract()
blinds = response.css('.blind::text').extract()
items = []
item = JusikItem()
item['code']=codes[0]
item['price'] = blinds[36]
item['total']= blinds[42]
item['high'] = blinds[40]
item['low'] = blinds[44]
items.append(item)
return items
|
993,254 | f73f13960012590b128a8b7f0d1fccaec22916a1 | '''
models.py
=========
Basic objects used for communication bodies with a servlet container.
'''
import struct
from collections import namedtuple
from .ajp_types import (AjpAttribute, AjpHeader, AjpPacketHeadersToContainer,
AjpRequestDirection)
from .utils import pack_as_string
# pylint: disable=C0103,R0902,R0913,R0914,W0102
# Used by AjpForwardRequest to avoid magic numbers.
DEFAULT_REQUEST_SERVER_PORT = 80
# AjpForwardRequest uses this namedtuple for building a list of request
# attributes.
ATTRIBUTE = namedtuple('Attribute', 'ajp_attr, value')
class AjpForwardRequest:
'''
Represents a request to the servlet container.
See https://tomcat.apache.org/connectors-doc/ajp/ajpv13a.html
:param direction: AjpRequestDirection for this request.
:param method: AjpCommand for the method to use.
:param protocol: (optional) protocol to set. This is only sent as
part of the request and is metadata from what I can tell.
:param req_url: (optional) request uri, which is the path of the
url sent to the servlet container.
:param remote_addr: IP address of the host sending the request.
:param remote_host: name of the host sending the request.
:param server_name: name of the server to receive the request.
:param server_port: (optional) target port on the server. This is
only sent as part of the request and is metadata from what I
can tell.
:param is_ssl: (optional) boolean flag indicating that the request
is SSL (default is False).
:param request_headers: dictionary of HTTP request headers.
:param attributes: list of ATTRIBUTE named tuples that are AJP
attributes sent to the request.
:param data_stream: (optional) File-like object containing the
request data (e.g. json, form data, or binary data).
'''
# AJP's maximum buffer size for sending data.
MAX_REQUEST_LENGTH = 8186
def __init__(self,
direction=AjpRequestDirection.WEB_SERVER_TO_SERVLET_CONTAINER,
method=None,
protocol='HTTP/1.1',
req_uri=None,
remote_addr=None,
remote_host=None,
server_name=None,
server_port=DEFAULT_REQUEST_SERVER_PORT,
is_ssl=False,
request_headers={},
attributes=[],
data_stream=None):
self._direction = direction
self._method = method
self._protocol = protocol
self._req_uri = req_uri
self._remote_addr = remote_addr
self._remote_host = remote_host
self._server_name = server_name
self._server_port = server_port
self._is_ssl = is_ssl
self._num_headers = 0
self._request_headers = request_headers
self._attributes = attributes
self._data_stream = data_stream
@property
def method(self):
'Returns the AJP/HTTP method'
return self._method
@property
def protocol(self):
'Returns the protocol for this AjpForwardRequest'
return self._protocol
@property
def req_uri(self):
'Returns the `req_uri` for this AjpForwardRequest'
return self._req_uri
@property
def remote_addr(self):
'Returns the `remote_addr` for this AjpForwardRequest'
return self._remote_addr
@property
def remote_host(self):
'Returns the `remote_host` for this AjpForwardRequest'
return self._remote_host
@property
def server_name(self):
'Returns the `server_name` for this AjpForwardRequest'
return self._server_name
@property
def server_port(self):
'Returns the `server_port` for this AjpForwardRequest'
return self._server_port
@property
def is_ssl(self):
'Returns the `is_ssl` for this AjpForwardRequest'
return self._is_ssl
@property
def request_headers(self):
'Returns the `request_headers` for this AjpForwardRequest'
return self._request_headers
@property
def request_attributes(self):
'Returns the `attributes` for this AjpForwardRequest'
return self._attributes
@property
def data_stream(self):
'Returns the data for this AjpForwardRequest'
return self._data_stream
def __repr__(self):
return '<AjpForwardRequest: [%s], remote_host=%s, req_uri=%s, request_headers=%s>' % (
self._method.name, self._remote_host, self._req_uri, self._request_headers)
def serialize_to_packet(self):
'Returns the bytes object to send to the servlet container.'
return self._serialize_forward_request()
def serialize_data_to_packet(self):
'''Generator that serializes the request body into packets to
the servlet container.'''
if not self._data_stream:
return
data = self._data_stream.read(self.MAX_REQUEST_LENGTH)
while True:
if data:
packet = struct.pack('>H', len(data))
packet += data
packet_header = struct.pack(
'>bbH',
self._direction.first_bytes[0],
self._direction.first_bytes[1],
len(packet))
yield packet_header + packet
else:
yield struct.pack('>bbH', self._direction.first_bytes[0],
self._direction.first_bytes[1], 0x00)
break
data = self._data_stream.read(self.MAX_REQUEST_LENGTH)
def _serialize_forward_request(self):
'Serializes the forward request.'
packet = b''
packet = struct.pack(
'bb',
AjpPacketHeadersToContainer.FORWARD_REQUEST.value,
self._method.value)
packet += pack_as_string(self._protocol)
packet += pack_as_string(self._req_uri)
packet += pack_as_string(self._remote_addr)
packet += pack_as_string(self._remote_host)
packet += pack_as_string(self._server_name)
packet += struct.pack('>h', self._server_port)
packet += struct.pack('?', self._is_ssl)
packet += self._serialize_headers()
packet += self._serialize_attributes()
packet_header = struct.pack(
'>bbh',
self._direction.first_bytes[0],
self._direction.first_bytes[1],
len(packet))
return packet_header + packet
def _serialize_headers(self):
'''
Returns the bytes object containing the number of headers and the
serialized headers.
'''
hdr_packet = struct.pack('>h', len(self._request_headers))
for hdr_name in self._request_headers:
if isinstance(hdr_name, AjpHeader):
# Need to split the code into 2 bytes before packing it.
hdr_packet += struct.pack('BB', hdr_name.value >>
8, hdr_name.value & 0x0F)
else:
hdr_packet += pack_as_string(hdr_name)
hdr_val = self._request_headers[hdr_name]
if isinstance(hdr_val, list):
hdr_val = ','.join(hdr_val)
hdr_packet += pack_as_string(hdr_val)
return hdr_packet
def _serialize_attributes(self):
' Returns the bytes object containing the serialized attributes.'
attr_packet = b''
for attr in self._attributes:
# Assume self._attributes contain only ATTRIBUTE types
# whose name field is a type of AjpAttribute
attr_packet += struct.pack('b', attr.ajp_attr.code)
if attr.ajp_attr == AjpAttribute.REQ_ATTRIBUTE:
nm, val = attr.value
attr_packet += pack_as_string(nm)
attr_packet += pack_as_string(val)
else:
attr_packet += pack_as_string(attr.value)
attr_packet += struct.pack('B', AjpAttribute.ARE_DONE.code)
return attr_packet
class AjpResponse:
'''
Represents a response from the servlet container.
See https://tomcat.apache.org/connectors-doc/ajp/ajpv13a.html.
'''
def __init__(self):
self._status_code = None
self._status_msg = None
self._response_headers = {}
self._ajp_request = None
self._content = None
def __repr__(self):
return '<AjpResponse: [{0}, {1}]>'.format(
self._status_code, self._status_msg.decode('utf-8'))
@property
def status_code(self):
'Returns the status code.'
return self._status_code
@property
def status_msg(self):
'Returns the status message'
return self._status_msg
@property
def headers(self):
'Returns the response headers'
return self._response_headers
@property
def request(self):
'Returns the request for this response.'
return self._ajp_request
@property
def text(self):
'Returns the content as text for this response'
# return self._content.decode('utf-8')
return self._content
@property
def content(self):
'Returns content as bytes'
return self._content
|
993,255 | ec993fac6948252e988608c915e5f234c44e6b96 | def calcula_aumento(salario):
if salario > 1250.00:
salario *= 1.10
round(salario,3)
return ('{:.2f}'.format(salario))
elif salario <= 1250.00:
salario *= 1.15
round (salario,3)
return ('{:.2f}'.format(salario))
|
993,256 | 0cf0793241b95dddf7c3540c13579a23b02c6917 | # coding: utf-8
"""
@author: Evan
@time: 2019/12/30 15:44
"""
import requests
import time
import re
import os
from urllib.parse import urlencode
from requests import codes
from hashlib import md5
from multiprocessing.pool import Pool
def get_page(offset):
# ajax方式请求页面,返回json数据
headers = {
'cookie': 'tt_webid=6775370412135581191; s_v_web_id=a6e5268aafeb77b631f16d5594066da0; WEATHER_CITY=%E5%8C%97%E4%BA%AC; tt_webid=6775370412135581191; __tasessionId=6lk9rx4qk1577757865050',
'referer': 'https://www.toutiao.com/search/?keyword=%E8%A1%97%E6%8B%8D',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
'x-requested-with': 'XMLHttpRequest'
}
param = {
'aid': 24,
'app_name': 'web_search',
'offset': offset,
'format': 'json',
'keyword': '街拍',
'autoload': 'true',
'count': 20,
'en_qc': 1,
'cur_tab': 1,
'from': 'search_tab',
'pd': 'synthesis',
'timestamp': int(time.time()*1000)
}
base_url = 'https://www.toutiao.com/api/search/content/?'
url = base_url + urlencode(param)
try:
resp = requests.get(url, headers=headers)
if resp.status_code == codes.ok:
return resp.json()
except requests.ConnectionError:
return None
def parse_images(json):
# 解析json数据,获取图片和标题,利用生成器爬取数据
if json.get('data'):
data = json.get('data')
for item in data:
if not item.get('title'):
continue
title = item.get('title')
if not item.get('image_list'):
continue
images = item.get('image_list')
for image in images:
origin_image = re.sub('list.*?pgc-image', 'origin/pgc-image', image.get('url'))
if 'list/190x124' in origin_image:
continue
yield {
'image': origin_image,
'title': title
}
def save_images(item):
# 保存图片: 在当前文件夹下的image/
print(item)
img_path = 'image/'
if not os.path.exists(img_path):
os.makedirs(img_path)
resp = requests.get(item.get('image'))
if resp.status_code == codes.ok:
title = re.sub('[/:*?"<>|\\\]', '', item['title'])
file_path = '{}xx_{}.{}'.format(title[:10], md5(resp.content).hexdigest()[:6], 'jpg')
full_path = img_path + file_path
if not os.path.exists(full_path):
with open(full_path, 'wb') as fw:
fw.write(resp.content)
print('Downloaded image path is %s' % full_path)
else:
print('Already Downloaded', full_path)
def main(offset):
json = get_page(offset)
if json:
for item in parse_images(json):
save_images(item)
GROUP_START = 0
GROUP_END = 20
if __name__ == '__main__':
pool = Pool()
groups = ([x*20 for x in range(GROUP_START, GROUP_END)])
pool.map(main, groups)
pool.close()
pool.join()
|
993,257 | 58b3630d4a77f1c5d089649e28f47261f602d8f4 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import json
import es2json.helperscripts as helperscripts
from es2json import ESGenerator
from es2json import IDFile
from es2json import IDFileConsume
def run(argv=None):
"""
here We build a simple cmdline tool so we can use our classes from shell
"""
parser = argparse.ArgumentParser(description='Query elasticsearch indices/index/documents and print them '
'formatted as JSON-Objects',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-server', type=str, help="use http://host:port/index/type/id.\n"
"host:port - hostname or IP with port of the elasticsearch node to query\n"
" default: localhost:9200\n"
"index - index to query\n"
" default: None → queries across all available indices\n"
"type - elasticsearch doctype to use (optional)\n"
"id - identifier of one specific document to query (optional)",
default="http://127.0.0.1:9200")
parser.add_argument('-ign-source', action="store_true",
help='return the Document or just the Elasticsearch-Metadata')
parser.add_argument('-size', type=str, default=None, metavar="N[:M]",
help='just return the first n-Records of the search,\n'
'or return a python slice, e.g. 2:10 returns a list\n'
'from the 2nd including the 9th element of the search\n'
'only works with the ESGenerator\n'
'Note: Not all slice variants may be supported')
parser.add_argument('-timeout', type=int, default=10,
help='Set the time in seconds after when a ReadTimeoutError can occur.\n'
'Default is 10 seconds. Raise for big/difficult querys ')
parser.add_argument("-includes", type=str,
help="just include following _source field(s) in the _source object")
parser.add_argument("-excludes", type=str,
help="exclude following _source field(s) from the _source object")
parser.add_argument("-headless", action='store_true',
help="don't print Elasticsearch metadata")
parser.add_argument('-body', type=helperscripts.jsonstring_or_file,
help='Elasticsearch Query object that can be in the form of\n'
'1) a JSON string (e.g. \'{"query": {"match": {"name": "foo"}}}\')\n'
'2) a file containing the upper query string')
parser.add_argument('-idfile', type=str,
help="path to a file with \\n-delimited IDs to process")
parser.add_argument('-idfile_consume', type=str,
help="path to a file with \\n-delimited IDs to process")
parser.add_argument('-missing_behaviour', type=str, choices=['print', 'yield'], default='print',
help="If IDs from an idfile are missing: 'print' or 'yield'\n"
"and json dict containing the ID, default is 'print'")
parser.add_argument('-pretty', action='store_true',
help="prettyprint the json output")
parser.add_argument('-verbose', action='store_true',
help="print progress for large dumps")
parser.add_argument('-chunksize', type=int, default=1000,
help="chunksize of the search window to use")
parser.add_argument("-auth", type=str, nargs="?", const="ENV", metavar="USER",
help='Provide authentication, this can be done using:\n'
'1) set environment variables E2J_USER and E2J_PASSWD. In\n'
' this case there is no further argument needed here\n'
'2) as a string "username". The password is then asked interactively\n'
'3) as "username:password" (not recommended)')
args = parser.parse_args(argv)
es_kwargs = {} # dict to collect kwargs for ESgenerator
#parsing server # http://server.de:1234/index/_doc/101
slashsplit = args.server.split("/") # → [http:, , server.de:1234, index, _doc, 101]
es_kwargs["host"] = slashsplit[2].rsplit(":")[0]
es_kwargs["port"] = int(args.server.split(":")[2].rsplit("/")[0]) # raise Error if port not castable to int
if len(slashsplit) > 3:
es_kwargs["index"] = slashsplit[3]
if len(slashsplit) > 4:
es_kwargs["type_"] = slashsplit[4]
if len(slashsplit) > 5:
es_kwargs["id_"] = slashsplit[5]
if args.auth:
raise NotImplementedError("authentication not yet implemented")
# args.pop("auth")
if args.auth == "ENV":
# check and use environmental username (E2J_USER) and password (E2J_PASSWD)
pass
else:
# parse authentication string: either "username" or "username:password"
pass
if args.size:
"""
we build the slice() object here, if this fails because of user input,
the stacktrace of slice() is very informative, so we don't do our own Error handling here
for size-searches, we don't use a scroll since the user wants only a small searchwindow
"""
if isinstance(args.size, int): # oh, we got an single number, not a string with an number or even an string describing a slice
args.size = str(args.size)
if ':' in args.size:
es_kwargs["slice_"] = slice(int(args.size.split(':')[0]), int(args.size.split(':')[1]), 1)
else:
es_kwargs["slice_"] = slice(0, int(args.size), 1)
if args.headless and args.ign_source:
helperscripts.eprint("ERROR! do not use -headless and -ign-source at the same Time!")
exit(-1)
else:
es_kwargs["headless"] = args.headless
es_kwargs["source"] = not args.ign_source
if args.pretty:
tabbing = 4
else:
tabbing = None
if args.includes and isinstance(args.includes, str):
es_kwargs["includes"] = args.includes.split(",")
if args.excludes and isinstance(args.excludes, str):
es_kwargs["excludes"] = args.excludes.split(",")
if args.chunksize:
es_kwargs["chunksize"] = args.chunksize
if args.body:
es_kwargs["body"] = args.body
if args.timeout:
es_kwargs["timeout"] = args.timeout
if args.verbose:
es_kwargs["verbose"] = args.verbose
if args.missing_behaviour and (args.idfile or args.idfile_consume):
es_kwargs["missing_behaviour"] = args.missing_behaviour
if args.idfile:
es_kwargs["idfile"] = args.idfile
ESGeneratorFunction = IDFile(**es_kwargs).generator()
elif args.idfile_consume:
es_kwargs["idfile"] = args.idfile_consume
ESGeneratorFunction = IDFileConsume(**es_kwargs).generator()
else:
ESGeneratorFunction = ESGenerator(**es_kwargs).generator()
for json_record in ESGeneratorFunction:
print(json.dumps(json_record, indent=tabbing))
if __name__ == "__main__":
run()
|
993,258 | ab1f60cba5d5436fcc83357646a061acfbb67035 | """
Simple pattern editor example.
"""
import numpy as np
from traits.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
from traits.api import HasTraits, Instance
from traitsui.api import Item, View
from patterns.colored_checker_board import ColoredCheckerBoardEditor
from patterns.colored_checker_board_model import (
ColoredCheckerBoardModel
)
class MainWindow(HasTraits):
board = Instance(ColoredCheckerBoardModel)
view = View(
Item('board', editor=ColoredCheckerBoardEditor()),
resizable=True
)
def _board_default(self):
data = np.array([[False, True], [True, False], [True, True]])
return ColoredCheckerBoardModel(data=data)
if __name__ == '__main__':
main = MainWindow()
main.configure_traits()
|
993,259 | a85bda146131b616381337a9ed0d20b79abf68ff | from django.conf.urls import url, include
from django.urls import path
from .views import *
urlpatterns = [
path('dishes/', DishView.as_view()),
path('', StartView.as_view()),
path('ingredients/', IngredientView.as_view()),
path('drinks/', DrinkView.as_view()),
path('dishes/list', DishViewList.as_view()),
path('ingredients/list', IngredientViewList.as_view()),
path('ingredients/new/', MakeIngredient.as_view(), name='ingredient_form'),
path('drinks/new/', MakeDrink.as_view(), name='drinks_form'),
path('dishes/new/', MakeDishes.as_view(), name='dishes_form'),
path('drinks/<int:pk>/edit/', UpdateDrink.as_view(), name='update_drink'),
path('ingredients/<int:pk>/edit/', UpdateIngredient.as_view(), name='update_ingredient'),
path('dishes/<int:pk>/edit/', UpdateDish.as_view(), name='update_dish')
]
|
993,260 | 1b727a5cbdd1c9d5e53482151ee9942fa5b6dabd | import os
import pandas as pd
from pydub import AudioSegment
from gtts import gTTS
# pip install pyaudio
# pip install pydub
# pip install pandas
# pip install gTTS
def textToSpeech(text,filename):
mytext=str(text)
language = 'hi'
myobj= gTTS(text=mytext, lang=language, slow=False)
myobj.save(filename)
# this function returns pydubs audio segment
def mergeAudios(audios):
combined=AudioSegment.empty()
for audio in audios:
combined += AudioSegment.from_mp3(audio)
return combined
def generateSkeleton():
audio=AudioSegment.from_mp3('railway.mp3')
# 1-Generate kripya dhayan dijiye
start =
finish =
audioProcessed = audio[start:finish]
audioProcessed.export("1_hindi.mp3",format="mp3")
# 2- is from-city
start =
finish =
audioProcessed = audio[start:finish]
audioProcessed.export("1_hindi.mp3",format="mp3")
# 3 - Generate se chalkar
start =
finish =
audioProcessed = audio[start:finish]
audioProcessed.export("3_hindi.mp3",format="mp3")
# 4 - is via-city
# 5-Generate ke raaste
start =
finish =
audioProcessed = audio[start:finish]
audioProcessed.export("5_hindi.mp3",format="mp3")
# 6- is to-city
# 7- Generate ko jaane wali gadi sankhya
start =
finish =
audioProcessed = audio[start:finish]
audioProcessed.export("7_hindi.mp3",format="mp3")
# 8- is Train no and name
# 9- Generate kuch hi samay mai platform sankhya
start =
finish =
audioProcessed = audio[start:finish]
audioProcessed.export("9_hindi.mp3",format="mp3")
# 10- is platform number
# 11- generate par aa rahi hai
start =
finish =
audioProcessed = audio[start:finish]
audioProcessed.export("11_hindi.mp3",format="mp3")
def generateAnnouncement(filename):
df= pd.read_excel(filename)
for index, item in df.iterrows():
# 2 - Generate from - city
textToSpeech(item['from'], '2_hindi.mp3')
# 4 - Generate via -city
textToSpeech(item['via'], '4_hindi.mp3')
# 6 - Generate to- city
textToSpeech(item['to'], '6_hindi.mp3')
# 8 - Generate train no and name
textToSpeech(item['train_no'], + " " + item['tain_name'], '8_hindi.mp3')
# 10 - Generate platform number
textToSpeech(item['plateform'], '10_hindi.mp3')
audios= [f"{i}_hindi.mp3" for i in range(1,12)]
announcement= mergeAudios(audios)
announcement.export(f"announcement_{item['train_no']}_{index+1}.mp3", format="mp3")
if __name__ == "__main__":
print("Generating Skeleton...")
generateSkeleton()
print(" Now Generating Announcement...")
generateAnnouncement("announce_hindi.xlsx")
|
993,261 | 6af54e1506a59a443593d53b15d85b56c0b655a4 | from django.urls import path
from . import views
urlpatterns = [
path('dashboard/', views.homepage, name='dashboard'),
path('result/', views.results, name='result'),
path('overview/', views.profile, name='overview'),
path('new/', views.addpatient, name='addPatient'),
#path('overview/predict/', views.predict, name='predict'),
] |
993,262 | ac1e1a99888abadf0c83dff918d447203ac0b7ba | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-25 05:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('msgs', '0002_auto_20161025_0443'),
]
operations = [
migrations.RemoveField(
model_name='message',
name='timezone',
),
migrations.AlterField(
model_name='message',
name='utc_offset',
field=models.IntegerField(default=0),
),
]
|
993,263 | e3c5a9e450b5fa8fa39a2f2f374a7b2cc33a0e57 | import re
m = re.findall(r'0\d{3}-\d{8}', 'sefsefdfs0518-87459266sdfsdfsfe')
print(m)
'''git test'''
|
993,264 | a0b5ab24bf2602d9df12fd9a28ee91bfd2785398 | import re
from chatbot.api.domain.repositories.SiteRepository import ISiteRepository
from chatbot.models.SiteUrlSetting import URL_PATTERN_DEFALT_ID
from chatbot.models.SiteUrlSetting import SiteUrlSettingModel
class SiteService:
def __init__(self, site_repository: ISiteRepository):
self.site_repository = site_repository
def search_bot_id(self, site_id: int, url: str):
site = self.site_repository.find_by_id(id=site_id)
default_bot_id = 0
for url_setting in site.url_settings:
if url_setting.url_pattern == URL_PATTERN_DEFALT_ID:
default_bot_id = url_setting.bot_id
continue
result = re.match(url_setting.url_pattern, url)
if result:
return url_setting.bot_id
if default_bot_id == 0:
raise Exception('bot_id not found')
return default_bot_id
def find_url_setting(self, site_id: int, url: str) -> SiteUrlSettingModel:
site = self.site_repository.find_by_id(id=site_id)
if site is None:
return None
default_url_setting = None
for url_setting in site.url_settings:
if url_setting.enable_flag is False:
continue
if url_setting.url_pattern == URL_PATTERN_DEFALT_ID:
default_url_setting = url_setting
continue
result = re.match(url_setting.url_pattern, url)
if result:
return url_setting
return default_url_setting
|
993,265 | bd70a6d206937aee783f32aebde2a606e433a16e | """
Copyright (c) 2016-2020 we-get developers (https://github.com/rachmadaniHaryono/we-get/)
See the file 'LICENSE' for copying permission
"""
from setuptools import setup, find_packages
version = '1.1.2'
setup(
name='we-get',
version=version,
description='Search torrents from the command-line',
author='Levi Sabah',
author_email='0xl3vi@gmail.com',
license='MIT',
keywords=['command line', 'torrent'],
url='https://github.com/rachmadaniHaryono/we-get',
packages=find_packages(),
install_requires=[
'colorama',
'docopt',
'beautifulsoup4',
'prompt-toolkit>=3.0.5',
'Pygments>=2.6.1',
],
extras_require={
'test': ["pytest", "pytest-flake8", 'vcrpy'], },
include_package_data=True,
package_data={'we_get': ['txt/useragents.txt']},
entry_points={'console_scripts': ['we-get=we_get:main']}
)
|
993,266 | 17ea1f4b9728354089b2396b27c324afbb43f56e | # a = 1 , b = 2 , c = 3 , 以此类推,返回字符串中加起来最大的单词
# 例:man i need a taxi up to ubud 返回 'taxi'
'''
def high(x):
letter = [chr(i) for i in range(ord('a'),ord('z')+1)]
nums = list(range(1,27))
dic = {}
for i in nums:
dic[letter[i-1]] = i
x_split = x.split(' ')
answer = {}
for word in x_split:
and1 = 0
for j in word:
and1 += dic[j]
answer[and1] = word
max1 = max(answer.keys())
return answer[max1]
def high(x):
words = x.split(' ')
list = []
for i in words:
scores = [sum([ord(char) - 96 for char in i])]
list.append(scores)
return words[list.index(max(list))]
'''
def high(x):
return max(x.split(),key=lambda k: sum(ord(c) -96 for c in k))
if __name__ == '__main__':
r = high('take me to semynak')
print(r)
# man i need a taxi up to ubud
# what time are we climbing up the volcano
|
993,267 | 9d4fe76a8d4d2f385ae26271f9dbe61335b3f474 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 27 10:49:39 2020
@author: Arthur Donizeti Rodrigues Dias
"""
class Category:
def __init__(self, categories):
self.ledger=[]
self.categories = categories
self.listaDeposito=[]
self.listaRetirada=[]
self.total_entrada = 0
self.total_saida = 0
def __str__(self):
x = self.categories.center(30,"*")+'\n'
n = 0;
lista_v = []
for i in self.ledger:
for k, v in self.ledger[n].items():
lista_v.append(v)
n+=1
n = 1
for i in self.ledger:
y = str(lista_v[n])
x = x + y[0:23].ljust(23)+str(format(lista_v[n-1], '.2f')).rjust(7)+'\n'
n += 2
x = x + 'Total: '+str(format(self.get_balance(), '.2f'))
return x
def deposit(self, quantia,description=""):
dic ={'amount': quantia, 'description' : description}
self.total_entrada += quantia
self.listaDeposito.append(quantia)
self.listaDeposito.append(description)
self.ledger.append(dic)
def withdraw(self, quantia,description=""):
try:
if self.total_entrada>quantia:
self.total_saida += quantia
dic ={'amount': -quantia, 'description' : description}
self.listaRetirada.append(quantia)
self.listaRetirada.append(description)
self.ledger.append(dic)
return True
else:
return False
except:
return False
def get_balance(self):
return self.total_entrada - self.total_saida
def transfer(self,quantia, beneficiario):
if quantia <= self.total_entrada:
x = f'Transfer to {beneficiario.categories}'
self.withdraw(quantia,x)
beneficiario.deposit(quantia, f'Transfer from {self.categories}' )
return True
else:
return False
def check_funds(self, quantia):
if quantia <= self.get_balance() :
return True
else :
return False
def create_spend_chart(teste):
soma = 0.0
cont = 10
tamanho = 0
lista = []
lista_porcentagem = []
for i in teste:
x = i.categories
if tamanho < len(x):
tamanho = len(x)
soma += i.total_saida
lista.append(x)
for i in teste:
lista_porcentagem.append((i.total_saida/soma)*10)
x = 'Percentage spent by category\n'
while cont >=0:
x += f'{cont*10}|'.rjust(4)
for i in lista_porcentagem:
if i > cont:
x += ' o '
else:
x += ' '
x = x+' \n'
cont -= 1
x += ('--'*len(lista)).rjust(7+len(lista))+'----\n'
aux =0
while tamanho > 0:
x += " "*4
for i in lista:
if len(i)>aux:
x += ' '+i[aux]+' '
else:
x += ' '
if tamanho == 1:
x += " "
else:
x = x+' \n'
aux +=1
tamanho -= 1
return(x)
|
993,268 | f9ff653f1af933b7c1559fb4053b6327f92609e8 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import champagn.validators
class Migration(migrations.Migration):
dependencies = [
('product', '0016_auto_20160615_1130'),
]
operations = [
migrations.AlterField(
model_name='productcategory',
name='name',
field=models.CharField(max_length=30),
),
migrations.AlterField(
model_name='producttemplatedata',
name='default_image',
field=models.ImageField(validators=[champagn.validators.validate_product_image], upload_to=b'product_template_data/', blank=True, help_text=b'Image Size should not more than 24Mb.', null=True),
),
migrations.AlterField(
model_name='producttemplatedata',
name='default_text',
field=models.CharField(max_length=30, null=True, blank=True),
),
migrations.AlterField(
model_name='producttemplatedata',
name='height',
field=models.IntegerField(blank=True),
),
migrations.AlterField(
model_name='producttemplatedata',
name='left',
field=models.IntegerField(blank=True),
),
migrations.AlterField(
model_name='producttemplatedata',
name='top',
field=models.IntegerField(blank=True),
),
migrations.AlterField(
model_name='producttemplatedata',
name='width',
field=models.IntegerField(blank=True),
),
]
|
993,269 | a384c4d477dc6b607d0711f69c1380d5f74dddd9 | if __name__ == "__main__":
with open("./flanks_occ.txt", "r") as file:
file_string = file.read()
# parse string from file
dict_p = dict(entry.split(": ") for entry in file_string.split("\n"))
# transform entries to ints
final_dict = dict((int(k), int(dict_p[k])) for k in dict_p)
sum = 0
for k, v in final_dict.items():
sum += v
if sum == 101:
print("key for 10%:", k)
if sum == 206:
print("key for 20%:", k) # == 8739 |
993,270 | 8462835a078dcd5490b9868ff12e94c7074df2f0 | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import TensorDataset, DataLoader
import argparse
from tqdm import tqdm
import os
from net.vae import VRAE
from net.ae import *
# Device configuration
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def generate_bgl(name, window_size, step=0):
num_sessions = 0
inputs = []
outputs = []
num_keys = set()
with open('bgl/window_'+str(window_size)+'future_' + str(step) + 'remove_8/' + name, 'r') as f_len:
file_len = len(f_len.readlines())
with open('bgl/window_'+str(window_size)+'future_' + str(step) + 'remove_8/' + name, 'r') as f:
for line in f.readlines():
num_sessions += 1
line = tuple(map(lambda n: n, map(int, line.strip().split())))
assert len(line) == window_size, (str(line), len(line))
inputs.append(line)
for key in line:
num_keys.add(key)
inputs = list(inputs)
print(name)
print('Number of sessions: {}'.format(num_sessions))
print('number of keys:{}'.format(len(num_keys)))
dataset = TensorDataset(torch.tensor(inputs, dtype=torch.float), torch.tensor(inputs))
return dataset
def generate_bgl_loss(name, step, slide):
num_sessions = 0
inputs = []
outputs = []
num_keys = set()
with open('bgl/loss_'+'future_' + str(step) + 'slide_' + str(slide) + '/' + name, 'r') as f_len:
file_len = len(f_len.readlines())
with open('bgl/loss_'+'future_' + str(step) + 'slide_' + str(slide) + '/' + name, 'r') as f:
for line in f.readlines():
num_sessions += 1
source = tuple(map(lambda n: n, map(float, line.strip().split()[:slide])))
label = source
inputs.append(source)
outputs.append(label)
print(name)
print('Number of sessions: {}'.format(len(inputs)))
dataset = TensorDataset(torch.tensor(inputs, dtype=torch.float), torch.tensor(outputs))
return dataset
def generate_bgl_loss_full(step, slide):
num_sessions = 0
inputs = []
outputs = []
num_keys = set()
with open('bgl/loss_'+'future_' + str(step) + 'slide_' + str(slide) + '/normal_loss_train_set.txt', 'r') as f_len:
file_len = len(f_len.readlines())
with open('bgl/loss_'+'future_' + str(step) + 'slide_' + str(slide) + '/normal_loss_train_set.txt', 'r') as f:
for line in f.readlines():
num_sessions += 1
source = tuple(map(lambda n: n, map(float, line.strip().split()[:-1])))
label = source
inputs.append(source)
outputs.append(label)
print('train_len')
print(len(inputs))
with open('bgl/loss_'+'future_' + str(step) + 'slide_' + str(slide) + '/normal_loss_val_set.txt', 'r') as f_len:
file_len = len(f_len.readlines())
with open('bgl/loss_'+'future_' + str(step) + 'slide_' + str(slide) + '/normal_loss_val_set.txt', 'r') as f:
for line in f.readlines():
num_sessions += 1
source = tuple(map(lambda n: n, map(float, line.strip().split()[:-1])))
label = source
inputs.append(source)
outputs.append(label)
print('Number of sessions: {}'.format(len(inputs)))
dataset = TensorDataset(torch.tensor(inputs, dtype=torch.float), torch.tensor(outputs))
return dataset
def generate_hdfs(window_size, split=''):
num_sessions = 0
inputs = []
outputs = []
with open('data/hdfs_train' + split, 'r') as f:
for line in f.readlines():
num_sessions += 1
line = tuple(map(lambda n: n - 1, map(int, line.strip().split())))
for i in range(len(line) - window_size):
seq = line[i:i + window_size]
inputs.append(seq)
outputs.append(seq)
print('Number of sessions: {}'.format(num_sessions))
dataset = TensorDataset(torch.tensor(inputs, dtype=torch.float), torch.tensor(outputs))
return dataset
if __name__ == '__main__':
# Hyperparameters
batch_size = 2048
input_size = 1
model_dir = 'model'
parser = argparse.ArgumentParser()
parser.add_argument('-model', type=str, default='ae', choices=['vae', 'ae'])
parser.add_argument('-num_layers', default=1, type=int)
parser.add_argument('-hidden_size', default=128, type=int)
parser.add_argument('-latent_length', default=16, type=int)
parser.add_argument('-window_size', default=10, type=int)
parser.add_argument('-dataset', type=str, default='bgl_loss', choices=['hd', 'bgl_loss', 'bgl_loss_full', 'bgl'])
parser.add_argument('-epoch', default=40, type=int)
parser.add_argument('-lr', default=0.001, type=float)
parser.add_argument('-dropout', default=0.0, type=float)
parser.add_argument('-step', default=0, type=int)
parser.add_argument('-slide', default=0, type=int)
parser.add_argument('-seed', type=int, default=1, metavar='S')
args = parser.parse_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
num_layers = args.num_layers
hidden_size = args.hidden_size
window_size = args.window_size if args.slide == 0 else args.slide
num_epochs = args.epoch
latent_length = args.latent_length
dropout = args.dropout
if args.dataset == 'hd':
seq_dataset = generate_hdfs(window_size, split='')
num_classes = 28
# for -1 padding during testing
num_classes +=1
elif args.dataset == 'bgl_loss':
seq_dataset = generate_bgl_loss('normal_loss_train_set.txt', args.step, args.slide)
num_classes = 1
# for -1 padding during testing
elif args.dataset == 'bgl_loss_full':
seq_dataset = generate_bgl_loss_full(args.step, args.slide)
num_classes = 1
elif args.dataset == 'bgl':
seq_dataset = generate_bgl('normal_train.txt', window_size, args.step)
# val_dataset = generate_bgl('abnormal_test.txt', window_size)
num_classes = 377
dataloader = DataLoader(seq_dataset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=8)
log = 'dataset='+ str(args.dataset)
log = log + '_window_size=' + str(window_size) if args.slide == 0 else log + '_slide=' + str(args.slide)
log = log + '_hidden_size=' + str(hidden_size) + \
'_latent_length=' + str(latent_length) + \
'_num_layer=' + str(num_layers) + \
'_epoch=' + str(num_epochs) + \
'_dropout=' + str(dropout) + \
'_step=' + str(args.step)
log = log + '_lr=' + str(args.lr) if args.lr != 0.001 else log
log = log + '_' + args.model
print('store model at:')
print(log)
writer = SummaryWriter(log_dir='log/' + log)
if args.model == 'vae':
model = VRAE(sequence_length=window_size,
number_of_features=input_size,
num_classes=num_classes,
hidden_size=hidden_size,
latent_length=latent_length,
training=True,
dropout_rate=dropout)
elif args.model == 'ae':
model = AE(input_size,
hidden_size,
latent_length,
num_layers,
num_classes,
window_size,
dropout_rate=dropout)
model = model.to(device)
# Loss and optimizer
if args.dataset == 'bgl':
criterion = nn.CrossEntropyLoss()
elif args.dataset == 'bgl_loss' or args.dataset == 'bgl_loss_full':
criterion = torch.nn.L1Loss()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
# Train the model
total_step = len(dataloader)
for epoch in range(num_epochs):
train_loss = 0
tbar = tqdm(dataloader)
model.train()
for step, (seq, label) in enumerate(tbar):
seq = seq.clone().detach().view(-1, window_size, input_size).to(device)
label = torch.tensor(label).to(device)
if args.model =='vae':
loss, rec, kl = model.compute_loss(seq)
else:
output = model(seq)
if args.dataset == 'bgl_loss' or args.dataset == 'bgl_loss_full':
label = label.unsqueeze(2)
else:
output = output.permute(0,2,1)
loss = criterion(output, label)
optimizer.zero_grad()
loss.backward()
train_loss += loss.item()
optimizer.step()
tbar.set_description('Train loss: %.5f' % (train_loss / (step + 1)))
print('Epoch [{}/{}], train_loss: {:.4f}'.format(epoch + 1, num_epochs, train_loss / total_step))
writer.add_scalar('train_loss', train_loss / total_step, epoch + 1)
torch.save(model.state_dict(), model_dir + '/' + log + '.pt')
writer.close()
print('Finished Training') |
993,271 | d7413641e6205c4974c28d127c6e217ee0f5e3db | # -*- coding: utf-8 -*-
"""
api component module.
"""
from pyrin.application.decorators import component
from pyrin.api import APIPackage
from pyrin.api.manager import APIManager
from pyrin.application.structs import Component
@component(APIPackage.COMPONENT_NAME)
class APIComponent(Component, APIManager):
"""
api component class.
"""
pass
|
993,272 | dec820c30472805d8bc8321f5b4c05ce025bc3dc | import numpy as np
from func.activation import sigmoid
from ml.classifier import AbstractClassification
from ml.model import Model
from ml.regression import AbstractRegression
from optimiz.optimizer import Optimizer
class LinearRegression(AbstractRegression):
def __init__(self):
pass
def predict(self, x):
pass
def fit(self, x, y, optimizer):
pass
class LogicRegression(AbstractClassification):
def __init__(self, optimizer, model=None):
if not optimizer or not isinstance(optimizer, Optimizer):
raise TypeError("optimizer must be the subclass of optimize.optimizer.Optimizer")
self.__optimizer = optimizer
if model:
self.__model = model
else:
self.__model = Model()
self.__labels = None
def __forward(self, x):
if self.__model is None:
raise AssertionError("you should use this model after fitted it, call fit() first.")
w = self.__model.weights
b = self.__model.bias
hx = sigmoid.apply(x.dot(w) + b)
return 1 if hx[0] > 0.5 else 0
def __backward(self, x, y):
yv = self.__labels["value"][y]
hx = self.__forward(x)
sigma = yv - hx
return sigma, (sigma * x).reshape(-1, 1)
def fit(self, X, y):
m, n = X.shape
if m < 1 or n < 1:
raise ValueError("invalid training set")
self.__labels = {
'class': {i: v for i, v in enumerate(np.unique(y))},
'value': {v: i for i, v in enumerate(np.unique(y))}
}
self.__model.weights = np.random.rand(n, 1)
self.__model.bias = np.random.rand(1, 1)
self.__optimizer.optimize(self.__backward, X, y, self.__model)
return self
def test(self, X, y):
hx = self.classify(X)
count = 0
for i in range(len(hx)):
if hx[i] != y.iloc[i]:
count += 1
return 1 - (count / float(len(X)))
def classify(self, X):
return [self.__labels['class'][self.__forward(x)] for x in X]
|
993,273 | 405383203514635d44f334c307917eee8b1d6fb0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 9 17:48:09 2018
@author: sarfraz
"""
import unittest
from NeuralNetwork import *
class TestNeuralNetwork(unittest.TestCase):
def test_NeuralNetwork(self):
NN=NeuralNetwork()
self.assertEqual(str(NN), "Neural Network")
if __name__ == '__main__':
unittest.main() |
993,274 | b749500a8fe0f53b358c97a5e021a5e1d60fae41 | #!/usr/bin/env python
# usage python dictionary.py
# create plain text from concatenating dataset .txt files, then from plain text create dictionary.txt at same folder
# usage : python dictionary.py <path to folder containing .txt files>
from sys import argv
from pathlib import Path
import subprocess
from subprocess import Popen,PIPE
subprocess.check_output(['./concat.sh',str(argv[1])])
saved_txt_path=str(Path(argv[1]).parents[0])
with open(saved_txt_path + '/concatenated.txt', 'r') as myfile:
data=myfile.read()
words = data.split()
print(len(words))
# remove duplicate items from the list
words=list(set(words))
print(len(words))
with open (saved_txt_path + '/words.txt', 'w') as fo:
for word in words:
fo.write(str(word) + '\n')
|
993,275 | ac2af60f02b2321db85eeb60ad13629e8be2de13 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('article', '0003_article_wx_url'),
]
operations = [
migrations.AddField(
model_name='article',
name='recommend_img',
field=models.URLField(null=True, verbose_name='\u63a8\u8350\u5c01\u9762', blank=True),
),
migrations.AddField(
model_name='series',
name='img_cover',
field=models.URLField(default='https://dn-wtbox.qbox.me/img//logo@2x.png', verbose_name='\u5c01\u9762'),
preserve_default=False,
),
migrations.AddField(
model_name='series',
name='introduce',
field=models.TextField(default='ddd', verbose_name='\u7b80\u4ecb'),
preserve_default=False,
),
migrations.AddField(
model_name='series',
name='recommend_img',
field=models.URLField(null=True, verbose_name='\u63a8\u8350\u5c01\u9762', blank=True),
),
]
|
993,276 | f0422beae93ad367e21eb897488a29408fbdb17b | import torch.nn.functional as F
import torch
from torch.nn.modules.loss import _Loss
from collections import Counter
import torch.nn as nn
import numpy as np
import math
class BaseLoss(_Loss):
def __init__(self,):
super(BaseLoss, self).__init__()
self.cum_loss = 0
def clear_loss(self):
self.cum_loss = 0
class PlainLoss(BaseLoss):
def __init__(self,):
super(PlainLoss, self).__init__()
self.criteria = torch.nn.MSELoss()
def forward(self, y_hat, y):
l = self.criteria(y_hat,y)
self.cum_loss +=l
return l
def get_description(self, step):
tok_loss = self.cum_loss
desc = " token loss : %f" % (
tok_loss / step)
return desc
|
993,277 | 5525f4777bada45b5295ad281fea9cf9cf624cd4 | # -*- coding: utf-8 -*-
# &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
# Odoo Connector
# QQ:35350428
# 邮件:sale@100china.cn
# 手机:13584935775
# 作者:'wangguangjian'
# 公司网址: www.odoo.pw www.100china.cn
# Copyright 昆山一百计算机有限公司 2012-2016 Amos
# 日期:2014-06-18
# &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
from odoo import fields, models, api, _
from datetime import timedelta
from makePinyin import pinyinQuan, pinyinAbbr
from makePinyin import makePinyin
class res_city(models.Model):
_name = "res.city"
_description = u"市"
_order = 'code'
name = fields.Char(string=u'市', size=64, index=True, default=u'/', required=True, copy=False, )
code = fields.Char(string=u'邮政编码', copy=False, )
license_plate = fields.Char(string=u'车牌号码', copy=False, )
a_z = fields.Char(string=u'首字母', copy=False, )
spelling = fields.Char(string=u'全拼', copy=False, )
country_id = fields.Many2one('res.country.state', string=u"省", index=True)
b_x = fields.Char(string=u'百度X')
b_y = fields.Char(string=u'百度Y')
b_active = fields.Boolean(string=u'是否已定位', default=False)
@api.model
def create(self, values):
values['a_z'] = pinyinAbbr(values['name'])[0][0]
values['spelling'] = pinyinQuan(values['name'], sep="", zhuyin=False, dyz=False)[0]
line = super(res_city, self).create(values)
return line
class res_area(models.Model):
_name = "res.area"
_description = u"区"
_order = 'code'
name = fields.Char(string=u'市', size=64, index=True, default=u'/', required=True, copy=False, )
code = fields.Char(string=u'编号', copy=False, )
a_z = fields.Char(string=u'首字母', copy=False, )
spelling = fields.Char(string=u'全拼', copy=False, )
zip = fields.Char(string=u'邮编', copy=False, )
country_id = fields.Many2one('res.city', string=u"市", index=True)
b_x = fields.Char(string=u'百度X')
b_y = fields.Char(string=u'百度Y')
b_active = fields.Boolean(string=u'是否已定位', default=False)
@api.model
def create(self, values):
values['a_z'] = pinyinAbbr(values['name'])[0][0]
values['spelling'] = pinyinQuan(values['name'], sep="", zhuyin=False, dyz=False)[0]
line = super(res_area, self).create(values)
return line
class res_country(models.Model):
_inherit = 'res.country'
a_z = fields.Char(string=u'首字母', copy=False, )
spelling = fields.Char(string=u'全拼', copy=False, )
b_x = fields.Char(string=u'百度X')
b_y = fields.Char(string=u'百度Y')
b_active = fields.Boolean(string=u'是否已定位', default=False)
@api.model
def create(self, values):
values['a_z'] = pinyinAbbr(values['name'])[0][0]
values['spelling'] = pinyinQuan(values['name'], sep="", zhuyin=False, dyz=False)[0]
line = super(res_country, self).create(values)
return line
class res_country_state(models.Model):
_inherit = 'res.country.state'
a_z = fields.Char(string=u'首字母', copy=False, )
spelling = fields.Char(string=u'全拼', copy=False, )
b_x = fields.Char(string=u'百度X')
b_y = fields.Char(string=u'百度Y')
b_active = fields.Boolean(string=u'是否已定位', default=False)
@api.model
def create(self, values):
values['a_z'] = pinyinAbbr(values['name'])[0][0]
values['spelling'] = pinyinQuan(values['name'], sep="", zhuyin=False, dyz=False)[0]
line = super(res_country_state, self).create(values)
return line
|
993,278 | 0a07a7a04c6146af728c66965ec5d9c092393520 | from scipy.spatial import distance as dist
from collections import OrderedDict
import numpy as np
from enum import Enum
from keras.models import load_model
from keras.applications.mobilenet import MobileNet
from keras.applications.vgg16 import VGG16
from keras.models import Model, Sequential
from keras.layers import Concatenate, Dense, Input, Conv2D, Lambda, MaxPool2D, Flatten
import keras.backend as K
import cv2
def euclidean_distance(vects):
x, y = vects
sum_square = K.sum(K.square(x - y), axis=1, keepdims=True)
return K.sqrt(K.maximum(sum_square, K.epsilon()))
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
def iou(bbox1, bbox2):
"""
Calculates the intersection-over-union of two bounding boxes.
Args:
bbox1 (numpy.array, list of floats): bounding box in format x1,y1,x2,y2.
bbox2 (numpy.array, list of floats): bounding box in format x1,y1,x2,y2.
Returns:
int: intersection-over-onion of bbox1, bbox2
"""
bbox1 = [float(x) for x in bbox1]
bbox2 = [float(x) for x in bbox2]
(x0_1, y0_1, x1_1, y1_1) = bbox1
(x0_2, y0_2, x1_2, y1_2) = bbox2
# get the overlap rectangle
overlap_x0 = max(x0_1, x0_2)
overlap_y0 = max(y0_1, y0_2)
overlap_x1 = min(x1_1, x1_2)
overlap_y1 = min(y1_1, y1_2)
# check if there is an overlap
if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0:
return 0
# if yes, calculate the ratio of the overlap to each ROI size and the unified size
size_1 = (x1_1 - x0_1) * (y1_1 - y0_1)
size_2 = (x1_2 - x0_2) * (y1_2 - y0_2)
size_intersection = (overlap_x1 - overlap_x0) * (overlap_y1 - overlap_y0)
size_union = size_1 + size_2 - size_intersection
return size_intersection / size_union
class DetectionHistory():
def __init__(self, identifier):
self.id = identifier
self.boxes = []
self.image_patches = []
self.statuses = []
class ChangeStatus(Enum):
ADD = 1
SAME = 4
REMOVE = 2
MOVE = 3
class SimilarityModel():
def __init__(self, keras_model_path = None):
input_before = Input(shape=(128, 128, 3))
input_after = Input(shape=(128, 128, 3))
mobile = MobileNet(input_shape=(128,128,3), weights="imagenet")
mobile = Sequential(layers=mobile.layers[2:-5])
before_feature = mobile(input_before)
after_feature = mobile(input_after)
distance = Lambda(euclidean_distance,
output_shape=eucl_dist_output_shape)([before_feature, after_feature])
self._model = Model([input_before, input_after], distance)
self._model.load_weights(keras_model_path)
def __call__(self, first, second):
first = np.expand_dims(cv2.resize(first, (128, 128)), axis=0)
second = np.expand_dims(cv2.resize(second, (128, 128)), axis=0)
# first -= np.mean(first, axis = 0)
# second -= np.mean(second, axis = 0)
return self._model.predict([first, second])[0]
def crop(image, box):
h, w, _ = image.shape
return image[int(box[0]*h):int(box[2]*h),
int(box[1]*w):int(box[3]*w)]
def mean(numbers):
return float(sum(numbers)) / max(len(numbers), 1)
class ChangeTracker():
def __init__(self, maxDisappeared=3, iou_thresh = 0.6, similarity_tresh=0.5):
# initialize the next unique object ID along with two ordered
# dictionaries used to keep track of mapping a given object
# ID to its centroid and number of consecutive frames it has
# been marked as "disappeared", respectively
self.nextObjectID = 0
self.objects = OrderedDict()
self.disappeared = OrderedDict()
self.removedObjects = OrderedDict()
self.pastSimilarity = 1
self.imageSimilarity = SimilarityModel(
keras_model_path =
"/home/wc-gpu/MasterThesis/models/research/object_detection/od_api_tf_my_notebooks/checkpoint_similar/keep/2019-05-01weights-epoch08-val_acc0.91-val_loss0.10_fixed_val.hdf5")
# store the number of maximum consecutive frames a given
# object is allowed to be marked as "disappeared" until we
# need to deregister the object from tracking
self.maxDisappeared = maxDisappeared
self.iouThresh = iou_thresh
self.distThresh = similarity_tresh
self.detectionHistory = dict(boxes=[], classes=[])
def register(self, box, image):
# when registering an object we use the next available object
# ID to store the centroid
history = DetectionHistory(self.nextObjectID)
history.boxes.append(box)
history.image_patches.append(crop(image, box))
history.statuses.append(ChangeStatus.ADD)
self.objects[self.nextObjectID] = history
self.disappeared[self.nextObjectID] = 0
self.nextObjectID += 1
self.addDetection(box, ChangeStatus.ADD)
def addDetection(self, box, status):
self.detectionHistory["boxes"][-1].append(box)
self.detectionHistory["classes"][-1].append(int(status))
def track(self, id, box, image, status=ChangeStatus.SAME):
history = self.objects[id]
history.boxes.append(box)
history.image_patches.append(crop(image, box))
history.statuses.append(status)
self.disappeared[self.nextObjectID] = 0
self.addDetection(box, status)
def remove(self, objectID):
history = self.objects[objectID]
history.statuses.append(ChangeStatus.REMOVE)
self.disappeared[objectID] += 1
self.addDetection(history.boxes[-1], ChangeStatus.REMOVE)
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
def deregister(self, objectID):
# to deregister an object ID we delete the object ID from
# both of our respective dictionaries
self.removedObjects[objectID] = self.objects[objectID]
del self.objects[objectID]
del self.disappeared[objectID]
def img_patch_similarity(self, box, image, id):
cropped = crop(image, box)
return mean([self.imageSimilarity(cropped, patch) for patch in self.objects[id].image_patches[-self.pastSimilarity:]])
def update(self, boxes, image=None):
self.detectionHistory["boxes"].append([])
self.detectionHistory["classes"].append([])
# check to see if the list of input bounding box rectangles
# is empty
if len(boxes) == 0:
# loop over any existing tracked objects and mark them
# as disappeared
for objectID in self.disappeared.keys():
self.disappeared[objectID] += 1
# if we have reached a maximum number of consecutive
# frames where a given object has been marked as
# missing, deregister it
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
# return early as there are no centroids or tracking info
# to update
return self.objects
# initialize an array of input centroids for the current frame
boxes = boxes # np.array(boxes, dtype="int")
# if we are currently not tracking any objects take the input
# centroids and register each of them
if len(self.objects) == 0:
for i in range(0, len(boxes)):
self.register(boxes[i], image)
# otherwise, are are currently tracking objects so we need to
# try to match the input centroids to existing object
# centroids
else:
# grab the set of object IDs and corresponding centroids
objectIDs = np.array(list(self.objects.keys()))
objectBoxes = np.array(list([a.boxes[-1] for a in self.objects.values()]))
# compute the distance between each pair of object
# centroids and input centroids, respectively -- our
# goal will be to match an input centroid to an existing
# object centroid
# D = dist.cdist(np.array(objectBoxes), boxes)
D = np.empty((len(objectBoxes), len(boxes)), dtype=np.float32)
for i in range(len(objectBoxes)):
for j in range(len(boxes)):
D[i, j] = 1 - iou(objectBoxes[i], boxes[j])
# in order to perform this matching we must (1) find the
# smallest value in each row and then (2) sort the row
# indexes based on their minimum values so that the row
# with the smallest value as at the *front* of the index
# list
rows = D.min(axis=1).argsort()
# next, we perform a similar process on the columns by
# finding the smallest value in each column and then
# sorting using the previously computed row index list
cols = D.argmin(axis=1)[rows]
# in order to determine if we need to update, register,
# or deregister an object we need to keep track of which
# of the rows and column indexes we have already examined
usedRows = set()
usedCols = set()
# loop over the combination of the (row, column) index
# tuples
for (row, col) in zip(rows, cols):
# if we have already examined either the row or
# column value before, ignore it
# val
if row in usedRows or col in usedCols or D[row][col] > self.iouThresh:
continue
# otherwise, grab the object ID for the current row,
# set its new centroid, and reset the disappeared
# counter
objectID = objectIDs[row]
self.track(objectID, boxes[col], image)
# indicate that we have examined each of the row and
# column indexes, respectively
usedRows.add(row)
usedCols.add(col)
# compute both the row and column index we have NOT yet
# examined
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
# in the event that the number of object centroids is
# equal or greater than the number of input centroids
# we need to check and see if some of these objects have
# potentially disappeared
# loop over the unused row indexes
if len(unusedRows) and len(unusedCols):
patchRows = np.array(sorted(list(unusedRows)), dtype=np.int16)
patchCols = np.array(sorted(list(unusedCols)), dtype=np.int16)
objectIDs = objectIDs[patchRows]
objectBoxes = objectBoxes[patchRows]
boxes = boxes[patchCols]
D = D[patchRows, :]
D = D[:, patchCols]
for i in range(len(objectBoxes)):
for j in range(len(boxes)):
D[i, j] = self.img_patch_similarity(boxes[j], image, objectIDs[i])
rows = D.min(axis=1).argsort()
cols = D.argmin(axis=1)[rows]
usedRows = set()
usedCols = set()
for (row, col) in zip(rows, cols):
if row in usedRows or col in usedCols or D[row][col] > self.distThresh:
continue
objectID = objectIDs[row]
self.track(objectID, boxes[col], image, ChangeStatus.MOVE)
usedRows.add(row)
usedCols.add(col)
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
for row in unusedRows:
# grab the object ID for the corresponding row
# index and increment the disappeared counter
self.remove(objectIDs[row])
# otherwise, if the number of input centroids is greater
# than the number of existing object centroids we need to
# register each new input centroid as a trackable object
for col in unusedCols:
self.register(boxes[col], image)
# return the set of trackable objects
return self.objects
|
993,279 | 426a4f6bffe11de33208bc8a602165e0520ffbb8 | from rest_framework import viewsets
from Contact.Address.models import Address
from Contact.Address.serializers import AddressSerializer
class AddressViewSet(viewsets.ModelViewSet):
queryset = Address.objects
serializer_class = AddressSerializer
def get_queryset(self):
queryset = super().get_queryset()
if self.request.user.is_authenticated:
return queryset.filter(owner=self.request.user)
return queryset.none()
|
993,280 | edd14c9118f697f7bd66ef9141763fabf938b015 | import logging
from environment.environment import SERVICE_ACCOUNT_JSON, FIREBASE_SCOPE
from oauth2client.service_account import ServiceAccountCredentials
def get_credentials(user_mail=''):
"""
Args:
user_mail: mail of the user to impersonate, if any.
Returns:
Credentials object with the requested credentials, delegated from the user_email if any.
"""
# Normal call to get credentials from Service Account
credentials = ServiceAccountCredentials.from_json_keyfile_name(
SERVICE_ACCOUNT_JSON, scopes=FIREBASE_SCOPE)
return credentials.get_access_token().access_token
|
993,281 | b761c8481d89ee6dcc26fd75e51e75bd5bade0ae | from tensorflow import keras
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import LSTM
from keras.layers import TimeDistributed
from keras.layers import ConvLSTM2D
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras_tuner import HyperModel
# define the class for the LSTM model, containing 2 LSTM layers
class LSTMStacked(HyperModel):
def __init__(self, input_shape,n_outputs):
self.input_shape = input_shape
self.n_outputs = n_outputs
# build function that takes hyperparameters as an input
def build(self, hp):
model = keras.Sequential()
# define the number of units so that it can be optimized in the hyperparameters tuning step
model.add(LSTM(
units=hp.Int(
'Units_LSTM_1',
min_value=32,
max_value=512,
step=32,
default=128
),
input_shape=self.input_shape,
return_sequences = True
)
)
# define the dropout rate so that it can be optimized in the hyperparameters tuning step
model.add(
Dropout(rate=hp.Float(
'dropout_1',
min_value=0.0,
max_value=0.5,
default=0.25,
step=0.05,
)
)
)
# define the number of units so that it can be optimized in the hyperparameters tuning step
model.add(LSTM(
units=hp.Int(
'Units_LSTM_2',
min_value=32,
max_value=512,
step=32,
default=128
)
)
)
# define the dropout rate so that it can be optimized in the hyperparameters tuning step
model.add(
Dropout(rate=hp.Float(
'dropout_2',
min_value=0.0,
max_value=0.5,
default=0.25,
step=0.05,
)
)
)
# define the number if units so that it can be optimized in the hyperparameters tuning step
model.add(
Dense(
units=hp.Int(
'units',
min_value=32,
max_value=512,
step=32,
default=128
),
activation=hp.Choice(
'dense_activation',
values=['relu', 'tanh', 'sigmoid'],
default='relu'
)
)
)
model.add(Dense(self.n_outputs, activation='softmax'))
# define the learning rate so that it can be optimized in the hyperparameters tuning step
model.compile(
optimizer=keras.optimizers.Adam(
hp.Float(
'learning_rate',
min_value=1e-4,
max_value=1e-2,
sampling='LOG',
default=1e-3
)
),
loss='categorical_crossentropy',
metrics=['accuracy']
)
return model
# define the class for the ConvLSTM model
class ConvLSTM(HyperModel):
def __init__(self, input_shape, n_outputs):
self.input_shape = input_shape
self.n_outputs = n_outputs
# build function that takes hyperparameters as an input
def build(self, hp):
model = keras.Sequential()
# define the the number of output filters so that it can be optimized in the hyperparameters tuning step
model.add(
ConvLSTM2D(
filters=hp.Choice(
'num_filters',
values=[16, 32, 64],
default=16,
),
kernel_size=(1, 3),
activation='relu',
input_shape=self.input_shape
)
)
# define the dropout rate so that it can be optimized in the hyperparameters tuning step
model.add(
Dropout(rate=hp.Float(
'dropout_1',
min_value=0.0,
max_value=0.5,
default=0.25,
step=0.05,
))
)
# # remove all of the dimensions of the inout tensor, except for one
model.add(Flatten())
# define the number if units and the activation function
# so that it can be optimized in the hyperparameters tuning step
model.add(
Dense(
units=hp.Int(
'units',
min_value=32,
max_value=512,
step=32,
default=128
),
activation=hp.Choice(
'dense_activation',
values=['relu', 'tanh', 'sigmoid'],
default='relu'
)
)
)
# define the dropout rate so that it can be optimized in the hyperparameters tuning step
model.add(
Dropout(
rate=hp.Float(
'dropout_2',
min_value=0.0,
max_value=0.5,
default=0.25,
step=0.05
)
)
)
model.add(Dense(self.n_outputs, activation='softmax'))
# define the learning rate so that it can be optimized in the hyperparameters tuning step
model.compile(
optimizer=keras.optimizers.Adam(
hp.Float(
'learning_rate',
min_value=1e-4,
max_value=1e-2,
sampling='LOG',
default=1e-3
)
),
loss='categorical_crossentropy',
metrics=['accuracy']
)
return model
# define the class for the CNNLSTM model
class CNNLSTM(HyperModel):
def __init__(self, input_shape,n_outputs):
self.input_shape = input_shape
self.n_outputs = n_outputs
def build(self, hp):
model = keras.Sequential()
model.add(TimeDistributed(
Conv1D(
filters=hp.Choice(
'num_filters_1',
values=[16, 32, 64],
default=64,
),
kernel_size=3,
activation='relu',
input_shape=self.input_shape,
)
)
)
# define the the number of output filters so that it can be optimized in the hyperparameters tuning
# TimeDistributed layer apply the same layer to several inputs.
model.add(TimeDistributed(
Conv1D(
filters=hp.Choice(
'num_filters_2',
values=[16, 32, 64],
default=64,
),
kernel_size=3,
activation='relu'
)
)
)
# define the dropout rate so that it can be optimized in the hyperparameters tuning step
model.add(TimeDistributed(Dropout(rate=hp.Float(
'dropout_1',
min_value=0.0,
max_value=0.5,
default=0.25,
step=0.05,
)
)
)
)
# MaxPooling1D downsamples the input representation by taking
# the maximum value over a spatial window of size pool_size
model.add(TimeDistributed(MaxPooling1D(pool_size=2)))
# remove all of the dimensions of the inout tensor, except for one
model.add(TimeDistributed(Flatten()))
# define the number if units so that it can be optimized in the hyperparameters tuning step
model.add(
LSTM(
units=hp.Int(
'units_LSTM',
min_value=32,
max_value=512,
step=32,
default=128
)
)
)
# define the dropout rate so that it can be optimized in the hyperparameters tuning step
model.add(
Dropout(
rate=hp.Float(
'dropout_2',
min_value=0.0,
max_value=0.5,
default=0.25,
step=0.05
)
)
)
# define the number if units and the activation function
# so that it can be optimized in the hyperparameters tuning step
model.add(
Dense(
units=hp.Int(
'units',
min_value=32,
max_value=512,
step=32,
default=128
),
activation=hp.Choice(
'dense_activation',
values=['relu', 'tanh', 'sigmoid'],
default='relu'
)
)
)
model.add(Dense(self.n_outputs, activation='softmax'))
# define the learning rate so that it can be optimized in the hyperparameters tuning step
model.compile(
optimizer=keras.optimizers.Adam(
hp.Float(
'learning_rate',
min_value=1e-4,
max_value=1e-2,
sampling='LOG',
default=1e-3
)
),
loss='categorical_crossentropy',
metrics=['accuracy']
)
return model
|
993,282 | 8378c44dc6de0968a5f70c734feaf11a1dbc0b42 | licenses = """
.. |CC0| replace:: `CC0, Public Domain Dedication`_
.. |CC BY 3.0| replace:: `Creative Commons Attribution 3.0`_
.. |CC BY-NC 3.0| replace:: `Creative Commons Attribution-NonCommercial 3.0`_
.. |CC BY-SA 3.0| replace:: `Creative Commons Attribution-ShareAlike 3.0`_
.. |CC BY-NC-SA 3.0| replace:: `Creative Commons Attribution-NonCommercial-ShareAlike 3.0`_
.. |CC BY 4.0| replace:: `Creative Commons Attribution 4.0`_
.. |CC BY-NC 4.0| replace:: `Creative Commons Attribution-NonCommercial 4.0`_
.. |CC BY-SA 4.0| replace:: `Creative Commons Attribution-ShareAlike 4.0`_
.. |CC BY-NC-SA 4.0| replace:: `Creative Commons Attribution-NonCommercial-ShareAlike 4.0`_
.. |MIT| replace:: `The MIT License`_
.. _CC0, Public Domain Dedication: https://creativecommons.org/publicdomain/zero/1.0/
.. _Creative Commons Attribution 3.0: https://creativecommons.org/licenses/by/3.0/
.. _Creative Commons Attribution-NonCommercial 3.0: https://creativecommons.org/licenses/by-nc/3.0/
.. _Creative Commons Attribution-ShareAlike 3.0: https://creativecommons.org/licenses/by-sa/3.0/
.. _Creative Commons Attribution-NonCommercial-ShareAlike 3.0: https://creativecommons.org/licenses/by-nc-sa/3.0/
.. _Creative Commons Attribution 4.0: https://creativecommons.org/licenses/by/4.0/
.. _Creative Commons Attribution-NonCommercial 4.0: https://creativecommons.org/licenses/by-nc/4.0/
.. _Creative Commons Attribution-ShareAlike 4.0: https://creativecommons.org/licenses/by-sa/4.0/
.. _Creative Commons Attribution-NonCommercial-ShareAlike 4.0: https://creativecommons.org/licenses/by-nc-sa/4.0/
.. _The MIT License: https://opensource.org/licenses/MIT
"""
|
993,283 | 1e5d87b180a2fbe3debc716118d66544143936a7 | from .controllers.home import HomeController
from .controllers.api.users import UserController
from .controllers.api.facebook import FacebookController
home = HomeController()
user = UserController()
facebook = FacebookController()
def urlpatterns(app):
# root route
app.route('/', 'GET', home.index)
# Users API routes
app.route('/api/users', 'GET', user.list)
app.route('/api/user/', 'POST', user.create)
app.route('/api/user/<pk>', 'GET', user.detail)
app.route('/api/user/<pk>', ['OPTIONS', 'DELETE'], user.delete)
app.route('/api/user/<pk>', ['OPTIONS', 'PUT'], user.update)
# Facebook API
app.route('/api/facebook/<pk>', 'GET', facebook.detail)
|
993,284 | 6ba2aa6bab41610e284ad3b42806f6d433ec371d | # -*- coding: utf-8 -*-
'''
Created on 2010-6-3
@author: 郑仁启
'''
import unittest
from wsjnews.newshandler import NewsParser
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testNewsParser(self):
parser = NewsParser()
parser.feed(html)
pass
html = open('test.htm').read()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testNewsParser']
unittest.main() |
993,285 | c9dc00acd092f7559ce8174b88cff2d9d70d7855 |
import os
import json
import logging
import pickle
from redis import Redis
from cerberus import Validator
from redis.exceptions import ConnectionError
class DataValidationError(Exception):
pass
class DatabaseConnectionError(Exception):
pass
class BadRequestError(Exception):
pass
class NotFoundError(Exception):
pass
class Product(object):
logger = logging.getLogger(__name__)
redis = None
schema = {
'id': {'type': 'integer'},
'name': {'type': 'string', 'required': True},
'category': {'type': 'string', 'required': True},
'price': {'type': 'integer', 'required': True},
'description': {'type': 'string', 'required': True},
'color': {'type': 'string', 'required': True},
'count': {'type': 'integer', 'required': True}
}
__validator = Validator(schema)
def __init__(self, id=0, name='', category='',
price='', description='', color='', count=''):
self.id = int(id)
self.name = name
self.category = category
self.price = price
self.color = color
self.description = description
self.count = count
def save(self):
if self.id == 0:
self.id = Product.__next_index()
Product.redis.set(self.id, pickle.dumps(self.serialize()))
def delete(self):
Product.redis.delete(self.id)
def serialize(self):
return {"id": self.id, "name": self.name, "category": self.category,
"price": self.price, "description": self.description,
"color": self.color, "count": self.count}
def deserialize(self, data):
try:
self.name = data['name']
self.category = data['category']
self.price = data['price']
self.description = data['description']
self.color = data['color']
self.count = data['count']
except KeyError as err:
raise DataValidationError(
'Invalid product: missing ' + err.args[0])
except TypeError:
raise DataValidationError(
'Invalid product: body of request contained bad or no data')
return self
@staticmethod
def __next_index():
return Product.redis.incr('index')
@staticmethod
def all():
results = []
for key in Product.redis.keys():
if key != 'index':
data = pickle.loads(Product.redis.get(key))
product = Product(data['id']).deserialize(data)
results.append(product)
return results
@staticmethod
def available():
results = []
for key in Product.redis.keys():
if key != 'index':
data = pickle.loads(Product.redis.get(key))
product = Product(data['id']).deserialize(data)
if product.count > 0:
results.append(product)
return results
@staticmethod
def remove_all():
Product.redis.flushall()
@staticmethod
def find(product_id):
if Product.redis.exists(product_id):
data = pickle.loads(Product.redis.get(product_id))
product = Product(data['id']).deserialize(data)
return product
return None
@staticmethod
def __find_by(attribute, value):
Product.logger.info('Processing %s query for %s', attribute, value)
search_criteria = value.lower()
results = []
for key in Product.redis.keys():
if key != 'index':
data = pickle.loads(Product.redis.get(key))
test_value = data[attribute].lower()
if test_value == search_criteria:
results.append(Product(data['id']).deserialize(data))
return results
@staticmethod
def find_by_category(category):
return Product.__find_by('category', category)
@staticmethod
def find_by_name(name):
return Product.__find_by('name', name)
@staticmethod
def connect_to_redis(hostname, port, password):
Product.logger.info("Testing Connection to: %s:%s", hostname, port)
Product.redis = Redis(host=hostname, port=port, password=password)
try:
Product.redis.ping()
Product.logger.info("Connection established")
except ConnectionError:
Product.logger.info("Connection Error from: %s:%s", hostname, port)
Product.redis = None
return Product.redis
@staticmethod
def init_db(redis=None):
if redis:
Product.logger.info("Using client connection...")
Product.redis = redis
try:
Product.redis.ping()
Product.logger.info("Connection established")
except ConnectionError:
Product.logger.error("Client Connection Error!")
Product.redis = None
raise ConnectionError('Could not connect to the Redis Service')
return
if 'VCAP_SERVICES' in os.environ:
Product.logger.info("Using VCAP_SERVICES...")
vcap_services = os.environ['VCAP_SERVICES']
services = json.loads(vcap_services)
creds = services['rediscloud'][0]['credentials']
Product.logger.info("Conecting to Redis on host %s port %s",
creds['hostname'], creds['port'])
Product.connect_to_redis(creds['hostname'], creds[
'port'], creds['password'])
else:
Product.logger.info(
"VCAP_SERVICES not found, checking localhost for Redis")
Product.connect_to_redis('127.0.0.1', 6379, None)
if not Product.redis:
Product.logger.info(
"No Redis on localhost, looking for redis host")
Product.connect_to_redis('redis', 6379, None)
if not Product.redis:
Product.logger.fatal(
'*** FATAL ERROR: Could not connect to the Redis Service')
raise ConnectionError('Could not connect to the Redis Service')
|
993,286 | e4bd9cda8b536d957e2f5443e75bcbfd06a2e6a7 | from django.db import models
from django.contrib.auth.models import User
from menu.models import Menu
# Create your models here.
""" Credits to CI's Boutique Ado Tutorial """
class RecipePost(models.Model):
"""
This model stores each blog post details
"""
menu = models.ForeignKey(Menu, null=False, blank=False,
on_delete=models.CASCADE)
author = models.ForeignKey(User, on_delete=models.CASCADE)
body = models.TextField()
def __str__(self):
return str(self.menu) + ' | ' + str(self.author)
|
993,287 | 76d33a2fea7ca81949606d23877227ae432a8163 | #!/usr/local/anaconda3/bin/python
import numpy as np
import sys, getopt, time, re
from collections import deque
from tkinter import *
try:
program = None
speed = 1.0
visual = False
opts, args = getopt.getopt(sys.argv[1:], 'hi:s:v', ['help', 'program=', 'speed=', 'visual'])
except:
print('[PROGRAM NOT FOUND!]')
exit(1)
for opt, arg in opts:
if opt in ('-h', '--help'):
print('Usage: python tmac.py -i <program> (-s <speed> --visual)')
exit(0)
elif opt in ('-i', '--program'):
program = arg
elif opt in ('-s', '--speed'):
speed = float(arg)
elif opt in ('-v', '--visual'):
visual = True
if program is None:
print('[PROGRAM NOT FOUND!]')
exit(1)
with open(program, 'r') as f:
states = f.readline()[:-1]
symbols = f.readline()[:-1]
blank = f.readline()[:-1]
input_symbols = f.readline()[:-1]
init_state = f.readline()[:-1]
final_state = f.readline()[:-1]
transitions = list()
for i, line in enumerate(f):
line = line[:-1]
if len(line) < 1:
continue
# print(i, line)
X, Y = line.split('->')
X_tokens = X.split(',')
Y_tokens = Y.split(',')
for t in range(2):
X_tokens[t] = X_tokens[t].replace(' ' , '')
for t in range(3):
Y_tokens[t] = Y_tokens[t].replace(' ', '')
# print(X_tokens, Y_tokens)
transition = [X_tokens, Y_tokens]
transitions += [transition]
f.close()
# - - - - - - - - - - - - - - - - - - -
regex = '{(.*?)}'
states = re.findall(regex, states)[0].split(',')
symbols = re.findall(regex, symbols)[0].split(',')
blank = re.findall(regex, blank)[0].split(',')
input_symbols = re.findall(regex, input_symbols)[0].split(',')
init_state = re.findall(regex, init_state)[0].split(',')
final_state = re.findall(regex, final_state)[0].split(',')
input_symbols_copy = input_symbols.copy()
init_state_copy = init_state.copy()
def compute(one_step_run=False, tape=input_symbols_copy, machine_state=init_state_copy[0], index=0):
if not one_step_run:
print(f'States: {states}\nSymbols: {symbols}\nBlank: {blank}\nInput symbols: \
{input_symbols}\nInitial state: {init_state}\nFinal state: {final_state}')
print('\nTransitions:')
for transition in transitions:
print(transition[0], '->', transition[1])
print()
if index < -1 or index > len(tape):
print('Error')
exit(1)
direction = 0
recent_transition = None
while machine_state != 'HALT':
if index == -1:
index = 0
tape.insert(0, blank[0])
elif index == len(tape):
tape.append(blank[0])
print(machine_state, tape[index])
is_found = False
for transition in transitions:
if machine_state == transition[0][0] and tape[index] == transition[0][1]:
print('applying transition:', transition)
recent_transition = transition
is_found = True
new_symbol = transition[1][0]
go_to = transition[1][1]
new_state = transition[1][2]
tape[index] = new_symbol
machine_state = new_state
direction = go_to
if go_to == 'L':
index -= 1
else:
index += 1
print(tape)
break
if not is_found:
print('[TRANSITION NOT FOUND!] Halting...')
machine_state = 'HALT'
break
if one_step_run:
break
return tape, machine_state, index, recent_transition, direction
# - - - - - - - - - - - - - - - - - - -
if not visual:
compute()
exit(0)
print(f'Animation speed: {speed}')
tape = input_symbols.copy()
state = init_state.copy()[0]
index, tape_offset = 0, 1
is_paused = True
root = Tk()
root.resizable(False, False)
root.title('Turing Machine')
canvas = Canvas(root, width=30*16-8, height=80)
control_frame = Frame(root)
canvas.pack()
control_frame.pack()
transition_label = Label(control_frame, text='Transition', width=50)
transition_label.grid(row=1, column=0, columnspan=3)
restart_button = Button(control_frame, text='Restart')
restart_button['state'] = DISABLED
cells = list() # tkinter rectangle objects
cell_symbols = list() # tkinter text objects
_radius = 30
machine_state_symbol = canvas.create_text(1.5*_radius, 60, text='', font=('Purisa', 10))
machine = canvas.create_oval(_radius, 45, 2*_radius, 45+_radius)
for i in range(16):
cell = canvas.create_rectangle(0, 10, 30+_radius*i, 10+_radius)
cell_symbol = canvas.create_text(_radius/2+_radius*i, 25, text=blank)
cells.append(cell)
cell_symbols.append(cell_symbol)
for i in range(1, len(cell_symbols)-1):
if i - 1 >= len(tape):
canvas.itemconfig(cell_symbols[i], text=blank[0])
else:
canvas.itemconfig(cell_symbols[i], text=tape[i-1])
canvas.itemconfig(machine_state_symbol, text=state)
def move_machine(direction, machine_state):
sign = -1
if direction == 'R':
sign = 1
canvas.move(machine, sign*_radius, 0)
canvas.move(machine_state_symbol, sign*_radius, 0)
canvas.itemconfigure(machine_state_symbol, text=machine_state)
def move_tape(direction, machine_state):
global tape_offset
sign = -1
tape_offset += 1
if direction == 'R':
sign = 1
tape_offset -= 2
for i in range(0, tape_offset):
canvas.itemconfig(cell_symbols[i], text=blank[0])
for i in range(tape_offset, 16):
if i < 0:
continue
if i - tape_offset >= len(tape):
canvas.itemconfig(cell_symbols[i], text=blank[0])
else:
canvas.itemconfig(cell_symbols[i], text=tape[i-tape_offset])
if machine_state is not None:
canvas.itemconfigure(machine_state_symbol, text=machine_state)
def run(tape, state, index, transition, dir_):
global speed
global restart_button
if is_paused:
root.after(100, lambda: run(tape, state, index, transition, dir_))
return
restart_button['state'] = DISABLED
coord = canvas.coords(machine)
is_halted = False
if state == 'HALT':
print(' ========= HALT ==========')
restart_button['state'] = NORMAL
canvas.itemconfig(machine_state_symbol, text='HALT')
is_halted = True
canvas.itemconfig(cell_symbols[0], text=blank[0])
for i in range(1, len(cell_symbols)-1):
if i - 1 >= len(tape):
canvas.itemconfig(cell_symbols[i], text=blank[0])
else:
canvas.itemconfig(cell_symbols[i], text=tape[i-1])
elif coord[0] >= 14*_radius and dir_ == 'R':
# print('shifting the tape left...')
move_tape(direction='R', machine_state=state)
elif coord[2] <= 2*_radius and dir_ == 'L':
# print('shifting the tape right...')
move_tape(direction='L', machine_state=state)
else:
move_machine(direction=dir_, machine_state=state)
# print(f'moving the machine {dir_}')
if not is_halted:
transition_label.config(text=f'Transition: {transition[0][0]}, {transition[0][1]} -> {transition[1][0]}, {transition[1][1]}, {transition[1][2]}')
canvas.itemconfig(cell_symbols[0], text=blank[0])
for i in range(1, len(cell_symbols)-1):
if i - 1 >= len(tape):
canvas.itemconfig(cell_symbols[i], text=blank[0])
else:
canvas.itemconfig(cell_symbols[i], text=tape[i-1])
time_ = 2000-int(500*np.abs(speed))
if time_ < 0:
time_ = 10
tape, state, index, transition, dir_ = compute(one_step_run=True, tape=tape, machine_state=state, index=index)
root.after(time_, lambda: run(tape, state, index, transition, dir_))
def restart():
global tape, state, index
global restart_button
tape = input_symbols.copy()
state = init_state.copy()[0]
index, tape_offset = 0, 1
is_paused = True
print('\tRestarted!')
# print(tape)
# print(state)
restart_button['state'] = DISABLED
canvas.move(machine, -canvas.coords(machine)[0]+_radius, 0)
canvas.move(machine_state_symbol, -canvas.coords(machine_state_symbol)[0]+1.5*_radius, 0)
canvas.itemconfig(machine_state_symbol, text=state)
canvas.itemconfig(cell_symbols[0], text=blank[0])
for i in range(1, 16):
if i - 1 >= len(tape):
canvas.itemconfig(cell_symbols[i], text=blank[0])
else:
canvas.itemconfig(cell_symbols[i], text=tape[i-1])
tape, state, index, transition, dir_ = compute(one_step_run=True, tape=tape, machine_state=state, index=index)
transition_label.config(text=f'Transition: {transition[0][0]}, {transition[0][1]} -> {transition[1][0]}, {transition[1][1]}, {transition[1][2]}')
time_ = 2000-int(500*np.abs(speed))
if time_ < 0:
time_ = 10
root.after(time_, lambda: run(tape, state, index, transition, dir_))
def change_speed(delta):
global speed
speed += delta
print('Animation speed:', speed)
def resume_pause():
global control_button
global is_paused
if is_paused:
control_button.config(text='Pause')
else:
control_button.config(text='Resume')
is_paused = not is_paused
restart_button.config(command=restart)
restart_button.grid(row=2, column=0)
Button(control_frame, text='Speed up', command=lambda: change_speed(0.1)).grid(row=2, column=1)
Button(control_frame, text='Slow down', command=lambda: change_speed(-0.1)).grid(row=2, column=2)
Button(control_frame, text=u'\u2190', command=lambda: move_tape(direction='L', machine_state=None)).grid(row=0, column=0)
Button(control_frame, text=u'\u2192', command=lambda: move_tape(direction='R', machine_state=None)).grid(row=0, column=2)
control_button = Button(control_frame, text='Resume', command=resume_pause)
control_button.grid(row=0, column=1)
root.after(1000, restart)
root.mainloop()
|
993,288 | f85af5e9a9940be5df6823a1c81639a91032aae1 | # -*- encoding: utf-8 -*-
##############################################################################
# Company: Tecvemar, c.a.
# Author: Juan V. Márquez L.
# Creation Date:
# Version: 0.0.0.0
#
# Description: Report parser for: tcv_mrp_planning
#
#
##############################################################################
from report import report_sxw
#~ from datetime import datetime
from osv import fields, osv
#~ from tools.translate import _
#~ import pooler
import decimal_precision as dp
import time
#~ import netsvc
class parser_tcv_mrp_planning(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context=None):
context = context or {}
super(parser_tcv_mrp_planning, self).__init__(
cr, uid, name, context=context)
self.localcontext.update({
'get_summary': self._get_summary,
})
self.context = context
def _get_summary(self, obj_lines, *args):
'''
obj_lines: an obj.line_ids (lines to be totalized)
args: [string] with csv field names to be totalized
Use in rml:
[[ repeatIn(get_summary(o.line_ids, ('fld_1,fld_2,...')), 't') ]]
'''
totals = {}
field_list = args[0][0]
fields = field_list.split(',')
for key in fields:
totals[key] = 0
for line in obj_lines:
for key in fields:
totals[key] += line[key]
return [totals]
report_sxw.report_sxw(
'report.tcv.mrp.planning.report',
'tcv.mrp.planning',
'addons/tcv_mrp_planning/report/tcv_mrp_planning.rml',
parser=parser_tcv_mrp_planning,
header=False
)
##------------------------------------------------------------ tcv_mrp_planning
class tcv_mrp_planning(osv.osv_memory):
_name = 'tcv.mrp.planning'
_description = ''
##-------------------------------------------------------------------------
##------------------------------------------------------- _internal methods
def _clear_lines(self, cr, uid, ids, context):
ids = isinstance(ids, (int, long)) and [ids] or ids
unlink_ids = []
for item in self.browse(cr, uid, ids, context={}):
for l in item.line_ids:
unlink_ids.append((2, l.id))
self.write(cr, uid, ids, {'line_ids': unlink_ids}, context=context)
return True
def _get_blocks_stock(self, cr, uid, item, location_id,
product_id, context):
obj_loc = self.pool.get('tcv.stock.by.location.report')
loc_data_id = obj_loc.create(
cr, uid, {'date': item.date,
'location_id': location_id,
'product_id': product_id}, context)
obj_loc.button_load_inventory(
cr, uid, loc_data_id, context=context)
loc_brw = obj_loc.browse(
cr, uid, loc_data_id, context=context)
stock = 0
pcs = 0
for line in loc_brw.line_ids:
stock += line.product_qty
pcs += 1
return (stock, pcs)
def _get_in_process_data(self, cr, uid, ids, item, context):
obj_inp = self.pool.get('tcv.mrp.in.process')
inp_data_id = obj_inp.create(
cr, uid, {'date_from': '2015-01-01',
'date_to': item.date}, context)
obj_inp.button_load_in_process(
cr, uid, inp_data_id, context=context)
inp_brw = obj_inp.browse(
cr, uid, inp_data_id, context=context)
data = {}
for line in inp_brw.line_ids:
group = line.template_id.name.split()[0]
product = line.product_id.id
if not data.get(group):
data.update({group: {}})
if not data[group].get(product):
data[group].update({product: {'pcs': 0,
'stock': 0}})
data[group][product]['pcs'] += line.pieces
data[group][product]['stock'] += line.area
return data
def _get_stock_in_bundle(self, cr, uid, ids, item, context):
obj_bun = self.pool.get('tcv.bundle')
bun_ids = obj_bun.search(cr, uid, [('state', '=', 'available'),
('reserved', '=', False)])
data = {}
for bundle in obj_bun.browse(cr, uid, bun_ids, context):
for line in bundle.line_ids:
product = line.product_id.id
if not data.get(product):
data.update({product: {'pcs': 0,
'stock': 0}})
data[product]['pcs'] += 1
data[product]['stock'] += line.lot_factor
return data
##--------------------------------------------------------- function fields
_columns = {
'name': fields.char(
'Name', size=64, required=False, readonly=False),
'date': fields.date(
'Date', required=True),
'company_id': fields.many2one(
'res.company', 'Company', required=True, readonly=True,
ondelete='restrict'),
'line_ids': fields.one2many(
'tcv.mrp.planning.lines', 'line_id', 'String', readonly=True),
}
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d'),
'company_id': lambda self, cr, uid, c: self.pool.get('res.company').
_company_default_get(cr, uid, self._name, context=c),
}
_sql_constraints = [
]
##-------------------------------------------------------------------------
##---------------------------------------------------------- public methods
##-------------------------------------------------------- buttons (object)
def button_load(self, cr, uid, ids, context=None):
ids = isinstance(ids, (int, long)) and [ids] or ids
obj_conf = self.pool.get('tcv.mrp.planning.config')
conf_ids = obj_conf.search(cr, uid, [])
item = self.browse(cr, uid, ids[0], context=context)
if item.line_ids:
self._clear_lines(cr, uid, ids, context)
lines = []
in_process = {}
stock_bundle = {}
for product in obj_conf.browse(cr, uid, conf_ids, context=context):
stock_quarry = self._get_blocks_stock(
cr, uid, item, product.quarry_location_id.id,
product.product_id1.id, context)
stock_plant = self._get_blocks_stock(
cr, uid, item, product.plant_location_id.id,
product.product_id1.id, context)
if not in_process:
in_process = self._get_in_process_data(
cr, uid, ids, item, context)
stock_gangsaw = in_process.get(
'Aserrado', {}).get(product.product_id2.id, {})
stock_polish = in_process.get(
'Apomazado', {}).get(product.product_id2.id, {})
stock_resin = in_process.get(
'Resinado', {}).get(product.product_id2.id, {})
stock_available = self._get_blocks_stock(
cr, uid, item, product.stock_location_id.id,
product.product_id3.id, context)
if not stock_bundle:
stock_bundle = self._get_stock_in_bundle(
cr, uid, ids, item, context)
lines.append((0, 0, {
'name': product.name,
'stock_quarry': stock_quarry[0],
'pcs_quarry': stock_quarry[1],
'stock_plant': stock_plant[0],
'pcs_plant': stock_plant[1],
'stock_gangsaw': stock_gangsaw.get('stock', 0),
'pcs_gangsaw': stock_gangsaw.get('pcs', 0),
'stock_polish': stock_polish.get('stock', 0),
'pcs_polish': stock_polish.get('pcs', 0),
'stock_resin': stock_resin.get('stock', 0),
'pcs_resin': stock_resin.get('pcs', 0),
'stock_available': stock_available[0],
'pcs_available': stock_available[1],
'stock_bundle': stock_bundle.get(
product.product_id3.id, {}).get('stock', 0),
'pcs_bundle': stock_bundle.get(
product.product_id3.id, {}).get('pcs', 0),
}))
self.write(cr, uid, ids, {'line_ids': lines}, context=context)
return True
##------------------------------------------------------------ on_change...
##----------------------------------------------------- create write unlink
##---------------------------------------------------------------- Workflow
tcv_mrp_planning()
class tcv_mrp_planning_lines(osv.osv_memory):
_name = 'tcv.mrp.planning.lines'
_description = ''
##-------------------------------------------------------------------------
##------------------------------------------------------- _internal methods
##--------------------------------------------------------- function fields
_columns = {
'line_id': fields.many2one(
'tcv.mrp.planning', 'String', required=True, ondelete='cascade'),
'name': fields.char(
'Name', size=64, required=False, readonly=False),
'stock_quarry': fields.float(
'stock_quarry', digits_compute=dp.get_precision('Product UoM'),
readonly=False),
'pcs_quarry': fields.integer(
'pcs_quarry'),
'stock_plant': fields.float(
'stock_plant', digits_compute=dp.get_precision('Product UoM'),
readonly=False),
'pcs_plant': fields.integer(
'pcs_plant'),
'stock_gangsaw': fields.float(
'stock gangsaw', digits_compute=dp.get_precision('Product UoM'),
readonly=False),
'pcs_gangsaw': fields.integer(
'pcs gangsaw'),
'stock_polish': fields.float(
'stock pumiced', digits_compute=dp.get_precision('Product UoM'),
readonly=False),
'pcs_polish': fields.integer(
'pcs polish'),
'stock_resin': fields.float(
'stock resin', digits_compute=dp.get_precision('Product UoM'),
readonly=False),
'pcs_resin': fields.integer(
'pcs resin'),
'stock_available': fields.float(
'stock available', digits_compute=dp.get_precision('Product UoM'),
readonly=False),
'pcs_available': fields.integer(
'pcs available'),
'stock_bundle': fields.float(
'stock bundle', digits_compute=dp.get_precision('Product UoM'),
readonly=False),
'pcs_bundle': fields.integer(
'pcs bundle'),
}
_defaults = {
}
_sql_constraints = [
]
##-------------------------------------------------------------------------
##---------------------------------------------------------- public methods
##-------------------------------------------------------- buttons (object)
##------------------------------------------------------------ on_change...
##----------------------------------------------------- create write unlink
##---------------------------------------------------------------- Workflow
tcv_mrp_planning_lines()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
993,289 | ae87abc4bb5afacbcb17fdbf93f5937f53912246 | import pytest
@pytest.mark.usefixtures("setup")
class TestExample:
def test_fixtureDemo(self):
print("i will execute steps in fixtureDemo method")
def test_fixtureDemo1(self):
print("i will execute steps in fixtureDemo1 method")
def test_fixtureDemo2(self):
print("i will execute steps in fixtureDemo2 method")
def test_fixtureDemo3(self):
print("i will execute steps in fixtureDemo3 method") |
993,290 | 632633b3705c9522f0b03996f638ae95ace3a18e | # Generated by Django 2.1.5 on 2019-03-24 19:39
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MeterReading',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(verbose_name='Date of reading')),
('reading', models.FloatField(verbose_name='Meter readng')),
],
),
]
|
993,291 | ffdeb8ed8bc677e6ca0c82206382399d7bec1a65 | '''
Problema:
* Escreva um programa que reconhece se uma string é um palíndromo.
* Considere letras maiúsculas e minúsculas como sendo iguais. Exemplo: Arara, ovO, reter.
'''
# Receber sring
fraseOriginal = input("Digite uma sring: ")
# Converter tudo para minúsculo
frase = fraseOriginal.lower()
# Verificar se é palíndromo
if frase[::1] == frase[::-1]:
print("A string {} é palíndroma.".format(fraseOriginal))
else:
print("A string {} não é palíndroma.".format(fraseOriginal))
|
993,292 | f5f1639a7b6e324f0bfe6513003a7b8638c38d64 | import random
from bs4 import BeautifulSoup
from flightsio.constants import ROUTE_FIELDS
def parse_destinations(destinations_html):
soup = BeautifulSoup(destinations_html, 'html.parser')
links = [link for link in soup.find_all('a') if '/airport/' in link.get('href')]
random.shuffle(links)
return {
link.text: 'https://info.flightmapper.net' + link.get('href')
for link in links
}
def parse_flight_list(flights_html):
"""
Parse the available flight information from a given set of destinations. The html page
will have a given set of links, we're only concerned with the links that point to a
given flight. Extract those along with the name of the flight.
Example of page this function can consume.
http://info.flightmapper.net/airport/PHX/OKC
:params flights_html: The html page containing a set of flights between two destinations.
"""
soup = BeautifulSoup(flights_html, 'html.parser')
return {
link.text: 'https://info.flightmapper.net' + link.get('href')
for link in soup.find_all('a') if '/flight/' in link.get('href')
}
def parse_flight_routes(flight, flight_url, flight_html):
soup = BeautifulSoup(flight_html, 'html.parser')
def get_flight_info(flight_rec):
data = flight_rec.text.strip().split('\n')
if len(data) < len(ROUTE_FIELDS):
data.append('')
info = {field: data[i] for i, field in enumerate(ROUTE_FIELDS)}
return {'flight': flight, **info, 'url': flight_url}
return [get_flight_info(rec) for rec in soup.find_all('tr') if len(rec.find_all('td'))]
|
993,293 | 132877613c69dacdeeedfdf249f43034bd74d495 | import os
import sys
import pygame
class Colors:
black = (0, 0, 0)
white = (255, 255, 255)
gray = (96, 96, 96)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
purple = (255, 0, 255)
yellow = (255, 255, 0)
orange = (255,165,0)
aqua = (0, 255, 255)
brown = (165, 42, 42)
pink = (255, 192, 203)
colorkey = (244, 244, 244)
class States:
standing = 'standing'
moving = 'moving'
shooting = 'shooting'
open = 'open'
closed = 'closed'
class Constants:
up = 'up'
down = 'down'
left = 'left'
right = 'right'
def terminate():
pygame.quit()
sys.exit()
def cc(x, y, cell_size):
return (x * cell_size, y * cell_size)
def load_image(directory, file_name, colorkey=None):
image = pygame.image.load(os.path.join(directory, file_name)).convert()
if colorkey is not None:
image.set_colorkey(colorkey)
return image
def load_images_from_directory(directory, extensions):
'''Yield the file name (no extension) and image for all files in
the given directory that have the desired extension(s).
'''
for item in os.listdir(directory):
if (os.path.isfile(os.path.join(directory, item))
and item.endswith(extensions)):
yield os.path.splitext(item)[0], load_image(directory, item)
def slice_sprite_sheet(image, sub_image_size):
image_width, image_height = image.get_width(), image.get_height()
if (image_width % sub_image_size[0] != 0
or image_height % sub_image_size[1] != 0):
class NotEvenlyDivisable(Exception): pass
raise NotEvenlyDivisable('Image width or height is not evenly divisable by sub image width or height.')
images = []
for index, x in enumerate(range(0, image_width, sub_image_size[0])):
images.append([])
for y in range(0, image_height, sub_image_size[1]):
images[index].append(image.subsurface(pygame.Rect((x, y), sub_image_size)))
return images
def blit_over(surface, rect, background):
surface.blit(background.subsurface(rect), rect)
if __name__ == '__main_':
pass |
993,294 | d7185e034fd552d6c7961bc757085e7d86d6f4fa | """
TRAFFIC IMAGE CLASSIFICATION
@author: Hayk Stepanyan
"""
import cv2
import numpy as np
import os
import sys
import tensorflow as tf
from sklearn.model_selection import train_test_split
EPOCHS = 15
IMG_WIDTH, IMG_HEIGHT = 30, 30
NUM_CATEGORIES = 43
TEST_SIZE = 0.3
def load_data(data_dir):
"""
Args:
data_dir - directory
Return:
tuple (images, labels), where images is the list of images and
labels is a list of integers representing corresponding labels
"""
images, labels = [], []
for category in range(43):
for image_name in os.listdir(os.path.join(data_dir, str(category))):
image = cv2.imread(os.path.join(data_dir, str(category), image_name))
images.append(cv2.resize(image, (30, 30)))
labels.append(category)
return images, labels
def load_model():
"""
Return:
Trained CNN
"""
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(
32, (4, 4), activation="relu", input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)
),
tf.keras.layers.Conv2D(
64, (4, 4), activation="relu"
),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(
10, (3, 3)
),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(
10, (3, 3)
),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(256, activation="relu"),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(100, activation="relu"),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(NUM_CATEGORIES, activation="softmax")
])
model.compile(
optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"]
)
return model
if len(sys.argv) not in [2, 3]:
sys.exit("Usage: python traffic.py data_directory [model.h5]")
images, labels = load_data("gtsrb")
# Split dataset into training and test
labels = tf.keras.utils.to_categorical(labels)
x_train, x_test, y_train, y_test = train_test_split(
np.array(images), np.array(labels), test_size=TEST_SIZE
)
model = load_model()
model.fit(x_train, y_train, epochs=EPOCHS)
model.evaluate(x_test, y_test, verbose=2)
# Save model to file
if len(sys.argv) == 3:
filename = sys.argv[2]
model.save(filename)
print("Model saved to {}".format(filename))
|
993,295 | beed65d9d287d8d4bdeb12d8fde706ab450ac3ca | import bisect
class FlumeObjectContainer(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(FlumeObjectContainer, cls).__new__(cls, *args)
return cls._instance
def __init__(self):
self.__objects = []
self.__objects_ids = {}
def __iter__(self):
for flume_object in self.__objects:
yield flume_object
def __len__(self):
return len(self.__objects)
def clear(self):
self.__objects = []
self.__objects_ids = {}
def add(self, flume_object):
if id(flume_object) in self.__objects_ids:
return False
key = flume_object.name
bisect.insort_left(self.__objects, [key, flume_object])
self.__objects_ids[id(flume_object)] = flume_object
return True
def search(self, name):
if name in self.__objects:
return
|
993,296 | b0b7bbb3741cce48962acf0af000621daac92d44 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import glob
import os
import matplotlib.pyplot as plt
from matplotlib import _png
dirname = os.path.join(os.path.dirname(__file__), 'data')
files = sorted(os.path.basename(f)
for f in glob.iglob(os.path.join(dirname, 'basn*.png')))
def time_pngsuite(fname):
data = plt.imread(os.path.join(dirname, fname))
time_pngsuite.params = files
def time_imread_png_uint16():
img = _png.read_png_int(os.path.join(os.path.dirname(__file__),
'data/uint16.png'))
|
993,297 | c45658cc444a8ff7a30eb9aa8658fc2c61ce59da | slaretil_edocinu tropmi __erutuf__ morf
EIesaBLHN tropmi lhn. morf
:)EIesaBLHN(EIBLM ssalc
)x?('''r = LRU_DILAV_
//:?sptth
/moc.\)blm>etis<P?(*).\+]-_z-ad\[:?(
:?(
:?(
|-c*)/+]/^[:?(
:?(
|lmth.\)debme-lanretni-m|debme:?(/debme/oediv/derahs
|psj.\)xedni|yalp:?(+)/+]/^[:?(
=di_tnetnocb\?*.?\)
)
)+d\>di<P?(
)
'''
'moc.blm.tnetnoc' = NIAMOD_TNETNOC_
[ = STSET_
{
,'33989643-c/hctac-ralucatceps-syelkca/oediv/sreniram/moc.blm.www//:sptth' :'lru'
,'d2fd12d38b328dab60ceecfcad853236' :'5dm'
{ :'tcid_ofni'
,'33989643' :'di'
,'4pm' :'txe'
,"hctac ralucatceps s'yelkcA" :'eltit'
,'0fb5122affea2fad8cbc3f4be189a5f7:5dm' :'noitpircsed'
,66 :'noitarud'
,0005995041 :'pmatsemit'
,'22704102' :'etad_daolpu'
,'$gpj.\*.//:?sptth^:er'r :'lianbmuht'
,}
,}
{
,'36669443-c/ybred-rof-seraperp-notnats/oediv/moc.blm.www//:sptth' :'lru'
,'f9129bea6e53cf465a0ccac9fb9162fb' :'5dm'
{ :'tcid_ofni'
,'36669443' :'di'
,'4pm' :'txe'
,'ybreD rof seraperp notnatS' :'eltit'
,'75afdeaf4ba31c9e9609c9df5e1ec00d:5dm' :'noitpircsed'
,64 :'noitarud'
,0020215041 :'pmatsemit'
,'11704102' :'etad_daolpu'
,'$gpj.\*.//:?sptth^:er'r :'lianbmuht'
,}
,}
{
,'51187543-c/pmahc-ybred-sa-staeper-sedepsec/oediv/moc.blm.www//:sptth' :'lru'
,'8239eb8bf08809b006cda1356719bb99' :'5dm'
{ :'tcid_ofni'
,'51187543' :'di'
,'4pm' :'txe'
,'pmahc ybreD sa staeper sedepseC' :'eltit'
,'70dafaf185f90bf6fc4d562ec352fd80:5dm' :'noitpircsed'
,884 :'noitarud'
,6334145041 :'pmatsemit'
,'51704102' :'etad_daolpu'
,'$gpj.\*.//:?sptth^:er'r :'lianbmuht'
,}
,}
{
,'51977543-c/ybred-nur-emoh-no-atsituab/oediv/moc.blm.www//:sptth' :'lru'
,'ce033f6dbee1ee3667e060b21a75b8ad' :'5dm'
{ :'tcid_ofni'
,'51977543' :'di'
,'4pm' :'txe'
,'ybreD nuR emoH no atsituaB' :'eltit'
,'bf0f9388a46cddd6890d34113043b08b:5dm' :'noitpircsed'
,25 :'noitarud'
,2215045041 :'pmatsemit'
,'51704102' :'etad_daolpu'
,'$gpj.\*.//:?sptth^:er'r :'lianbmuht'
,}
,}
{
,'890055811-c/remoh-a-fo-mahkceb-mit-bor-ot-llaw-eht-pu-yedips-seog-rallip-nivek-syaj-eulb/swen/moc.blm.www//:sptth' :'lru'
,'86d429c996e9d4fbddf153255b73e90e' :'5dm'
{ :'tcid_ofni'
,'38790657' :'di'
,'4pm' :'txe'
,'hctac rof sbmilc ralliP :C tsuM' :'eltit'
,'nur emoh a fo mahkceB miT bor ot tfel ni llaw eht gnibmilc yb ecnanimod evisnefed sih seunitnoc ralliP niveK redleiftuo syaJ eulB :51/51/4' :'noitpircsed'
,0229319241 :'pmatsemit'
,'51405102' :'etad_daolpu'
}
,}
{
,'49639776=dit?3843202531-c/llewdlac-ffo-sremoh-evorgrah/oediv/moc.blm.www//:sptth' :'lru'
,eurT :'gnihctam_ylno'
,}
{
,'blm=ytreporp&422=thgieh&004=htdiw&6629746=di_cipot&58029653=di_tnetnoc?lmth.debme/debme/oediv/derahs/moc.blm.m//:ptth' :'lru'
,eurT :'gnihctam_ylno'
,}
{
,'35599563=di_tnetnoc?lmth.debme/debme/oediv/derahs/moc.blm.blm//:ptth' :'lru'
,eurT :'gnihctam_ylno'
,}
{
,'35599563=di_tnetnoc?psj.yalp/oediv/se/moc.blm.blm//:ptth' :'lru'
,eurT :'gnihctam_ylno'
,}
{
,'38757115-c/hctac-gnidils-taerg-syttocsip/oediv/slanidrac/moc.blm.www//:sptth' :'lru'
,eurT :'gnihctam_ylno'
,}
{
remoh-a-fo-mahkceb-mit-bor-ot-llaw-eht-pu-yedips-seog-rallip-nivek-syaj-eulb/890055811/elcitra/swen/moc.blm.m//:ptth morF #
,'blm=bulc&debme_elcitra/890055811_elcitra/aidemitlum/blm=noitceSetis&eslaf=edomhsah&eurt=yalpotua&blm=ytreporp&38790657=di_tnetnoc?lmth.debme-lanretni-m/debme/oediv/derahs/moc.blm.blm//:ptth' :'lru'
,eurT :'gnihctam_ylno'
,}
{
,'248219872-c/naf-sa-na-morf-sessalgnus-deworrob-zemog-solrac/4tuc/moc.blm.www//:sptth' :'lru'
,eurT :'gnihctam_ylno'
}
]
|
993,298 | bab7193f5186042247076e253b0cb74c52d9fb8e | import os
from typing import Callable, Optional, Tuple
import numpy as np
from torch.utils.data import Dataset
from pl_bolts.utils import _PIL_AVAILABLE
from pl_bolts.utils.warnings import warn_missing_pkg
if _PIL_AVAILABLE:
from PIL import Image
else: # pragma: no cover
warn_missing_pkg("PIL", pypi_name="Pillow")
KITTI_LABELS = tuple(range(-1, 34))
DEFAULT_VALID_LABELS = (7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33)
class KittiDataset(Dataset):
"""KITTI Dataset for sematic segmentation.
You need to have downloaded the Kitti semantic dataset first and provide the path to where it is saved.
You can download the dataset here: http://www.cvlibs.net/datasets/kitti/eval_semseg.php?benchmark=semantics2015
There are 34 classes, however not all of them are useful for training (e.g. railings on highways).
Useful classes (the pixel values of these classes) are stored in `valid_labels`, other labels
except useful classes are stored in `void_labels`.
The class id and valid labels(`ignoreInEval`) can be found in here:
https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py
Args:
data_dir (str): where to load the data from path, i.e. '/path/to/folder/with/data_semantics/'
img_size (tuple): image dimensions (width, height)
valid_labels (tuple): useful classes to include
transform (callable, optional): A function/transform that takes in the numpy array and transforms it.
"""
IMAGE_PATH = os.path.join("training", "image_2")
MASK_PATH = os.path.join("training", "semantic")
def __init__(
self,
data_dir: str,
img_size: tuple = (1242, 376),
valid_labels: Tuple[int] = DEFAULT_VALID_LABELS,
transform: Optional[Callable] = None,
) -> None:
if not _PIL_AVAILABLE: # pragma: no cover
raise ModuleNotFoundError("You want to use `PIL` which is not installed yet.")
self.img_size = img_size
self.valid_labels = valid_labels
self.void_labels = tuple(label for label in KITTI_LABELS if label not in self.valid_labels)
self.ignore_index = 250
self.class_map = dict(zip(self.valid_labels, range(len(self.valid_labels))))
self.transform = transform
self.data_dir = data_dir
self.img_path = os.path.join(self.data_dir, self.IMAGE_PATH)
self.mask_path = os.path.join(self.data_dir, self.MASK_PATH)
self.img_list = self.get_filenames(self.img_path)
self.mask_list = self.get_filenames(self.mask_path)
def __len__(self) -> int:
return len(self.img_list)
def __getitem__(self, idx: int):
img = Image.open(self.img_list[idx])
img = img.resize(self.img_size)
img = np.array(img)
mask = Image.open(self.mask_list[idx])
mask = mask.resize(self.img_size)
mask = np.array(mask)
mask = self.encode_segmap(mask)
if self.transform is not None:
img = self.transform(img)
return img, mask
def encode_segmap(self, mask):
"""Sets all pixels of the mask with any of the `void_labels` to `ignore_index` (250 by default).
It also sets all of the valid pixels to the appropriate value between 0 and `len(valid_labels)` (the number of
valid classes), so it can be used properly by the loss function when comparing with the output.
"""
for voidc in self.void_labels:
mask[mask == voidc] = self.ignore_index
for validc in self.valid_labels:
mask[mask == validc] = self.class_map[validc]
# remove extra idxs from updated dataset
mask[mask > 33] = self.ignore_index
return mask
def get_filenames(self, path: str):
"""Returns a list of absolute paths to images inside given `path`"""
files_list = []
for filename in os.listdir(path):
files_list.append(os.path.join(path, filename))
return files_list
|
993,299 | 344694583f49b373670eaad7ebdc9d70374bcd7b | import datetime
import logging
import uuid
from functools import partial
import core.case.subscription as case_subscription
from core.case.database import case_db
logging.basicConfig() # needed so apscheduler can log to console when an error occurs
class EventEntry(object):
"""
Container for event entries
Attributes:
uuid (str): a unique identifier
timestamp (str): time of creation
type (str): type of event logged
caller (str): name/id of the object which created the event
ancestry (list[str]): callchain which produced the event
message (str): Event message
data: other information attached to event
"""
def __init__(self, sender, entry_type, entry_message, data=None, name=""):
self.uuid = str(uuid.uuid4())
self.timestamp = datetime.datetime.utcnow()
self.type = entry_type
self.caller = (sender.name if hasattr(sender, "name") else sender.id) if not name else name
self.ancestry = list(sender.ancestry)
self.message = entry_message
self.data = data
def __repr__(self):
return str({
"uuid": self.uuid,
"timestamp": self.timestamp,
"type": str(self.type),
"caller": str(self.caller),
"ancestry": str(self.ancestry),
"message": str(self.message),
"data": str(self.data)
})
def __add_entry_to_case_db(sender, event, message_name):
cases_to_add = [case for case in case_subscription.subscriptions
if case_subscription.is_case_subscribed(case, sender.ancestry, message_name)]
if cases_to_add:
case_db.add_event(event, cases_to_add)
def __add_entry_to_case_wrapper(sender, event_type, message_name, entry_message, data):
__add_entry_to_case_db(sender, EventEntry(sender, event_type, entry_message, data), message_name)
def __add_entry(message_name, event_type, entry_message, data):
return partial(__add_entry_to_case_wrapper,
event_type=event_type,
message_name=message_name,
entry_message=entry_message,
data=data)
def add_system_entry(entry_message, data=''):
"""
Callback to use for blinker Signals which log system events
:param entry_message(str): message to log
:return: Closure which can be called twice. First on a message name, then on a sender by the blinker signal
"""
return partial(__add_entry,
event_type='SYSTEM',
entry_message=entry_message,
data=data)
def add_workflow_entry(entry_message, data=''):
"""
Callback to use for blinker Signals which log workflow events
:param entry_message(str): message to log
:return: Closure which can be called twice. First on a message name, then on a sender by the blinker signal
"""
return partial(__add_entry,
event_type='WORKFLOW',
entry_message=entry_message,
data=data)
def add_step_entry(entry_message, data=''):
"""
Callback to use for blinker Signals which log step events
:param entry_message(str): message to log
:return: Closure which can be called twice. First on a message name, then on a sender by the blinker signal
"""
return partial(__add_entry,
event_type='STEP',
entry_message=entry_message,
data='')
def add_next_step_entry(entry_message, data=''):
"""
Callback to use for blinker Signals which log next step events
:param entry_message(str): message to log
:return: Closure which can be called twice. First on a message name, then on a sender by the blinker signal
"""
return partial(__add_entry,
event_type='NEXT',
entry_message=entry_message,
data=data)
def add_flag_entry(entry_message, data=''):
"""
Callback to use for blinker Signals which log flag events
:param entry_message(str): message to log
:return: Closure which can be called twice. First on a message name, then on a sender by the blinker signal
"""
return partial(__add_entry,
event_type='FLAG',
entry_message=entry_message,
data=data)
def add_filter_entry(entry_message, data=''):
"""
Callback to use for blinker Signals which log filter events
:param entry_message(str): message to log
:return: Closure which can be called twice. First on a message name, then on a sender by the blinker signal
"""
return partial(__add_entry,
event_type='FILTER',
entry_message=entry_message,
data=data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.