index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
19,100 | 6614ccbf6df3851c283d818409f262f2115b2ed3 | from rest_framework import serializers
from api.models import APIVideos
class APIVideosSerializer(serializers.ModelSerializer):
class Meta:
model = APIVideos
fields = '__all__'
|
19,101 | 573ec44a8c9bc268b0913506edd24ea88cfffb76 | # coding=utf-8
"""
Lädt die Favoriten eines Users
@author Markus Tacker <m@coderbyheart.de>
"""
import vertx
import mutex
import time
from com.xhaus.jyson import JysonCodec as json
from oauth import Consumer
from core.event_bus import EventBus
from util import parse_date
from datetime import datetime, timedelta
config = vertx.config()
curators = []
friends = []
aday = datetime.now() - timedelta(1)
def response_handler(resp, user_id):
favs = {'user_id': user_id, 'favorites': []}
@resp.body_handler
def body_handler(body):
if resp.status_code == 200:
favs['favorites'] = json.loads(body.to_string())
else:
print "Failed to fetch favorites: %s" % body.to_string()
EventBus.send('log.event', "user.favorites.list.result")
EventBus.send('user.favorites.list.result', json.dumps(favs))
def fetch_favorites(message):
user = message.body
consumer = Consumer(api_endpoint="https://api.twitter.com/", consumer_key=config['consumer_key'], consumer_secret=config['consumer_secret'], oauth_token=config['oauth_token'], oauth_token_secret=config['oauth_token_secret'])
consumer.get("/1.1/favorites/list.json", {'user_id': user['id'], 'count': 20}, lambda resp: response_handler(resp, user['id']))
EventBus.register_handler('user.favorites.list', False, fetch_favorites)
|
19,102 | 33269e94abbdf0e891ac10684eb92349fb5f6290 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Chalmers device (20 qubit).
"""
from qiskit.providers.models import (GateConfig, QasmBackendConfiguration)
from qiskit.test.mock.fake_backend import FakeBackend
from qiskit.transpiler import CouplingMap
class FakeChalmers(FakeBackend):
"""A fake 20 qubit backend."""
def __init__(self):
"""
00 ↔ 01 ↔ 02 ↔ 03 ↔ 04
↕ ↕ ↕ ↕ ↕
05 ↔ 06 ↔ 07 ↔ 08 ↔ 09
↕ ↕ ↕ ↕ ↕
10 ↔ 11 ↔ 12 ↔ 13 ↔ 14
↕ ↕ ↕ ↕ ↕
15 ↔ 16 ↔ 17 ↔ 18 ↔ 19
"""
coupling = CouplingMap()
cmap = coupling.from_grid(4, 5).get_edges()
configuration = QasmBackendConfiguration(
backend_name='fake_chalmers',
backend_version='0.0.0',
n_qubits=20,
basis_gates=['rx', 'rz','iswap','cz','id'],
simulator=False,
local=True,
conditional=False,
open_pulse=False,
max_shots = 100000,
memory=False,
gates=[GateConfig(name='TODO', parameters=[], qasm_def='TODO')],
coupling_map=cmap,
)
super().__init__(configuration)
def properties(self):
"""Returns a snapshot of device properties as recorded on 03/05/21.
"""
return None |
19,103 | a783ef7a13bd529484852933b3e56c8a19bc2dd1 | from logging import Logger
from typing import List
from bson.objectid import ObjectId
from kink import inject
from surebets_finder.raw_content.aplication.clients.betclick_client import BetClickClient
from surebets_finder.raw_content.aplication.clients.client import IWebClient
from surebets_finder.raw_content.aplication.clients.efortuna_client import EFortunaClient
from surebets_finder.raw_content.aplication.clients.lvbet_client import LvBetClient
from surebets_finder.raw_content.aplication.url_factory import UrlFactory
from surebets_finder.raw_content.domain.entities import RawContent
from surebets_finder.raw_content.domain.repositories import RawContentRepository
from surebets_finder.shared.category import Category
from surebets_finder.shared.provider import Provider
@inject
class Importer:
def __init__(self, repository: RawContentRepository, logger: Logger) -> None:
self._repository = repository
self._logger = logger
def _get_client(self, provider: Provider, urls: List[str]) -> IWebClient:
mapper = {
Provider.EFORTUNA: EFortunaClient(urls), # type: ignore
Provider.LVBET: LvBetClient(urls), # type: ignore
Provider.BETCLICK: BetClickClient(urls), # type: ignore
}
return mapper[provider]
def import_all(self) -> None:
self._logger.info("Importer has started!")
for provider in Provider:
for category in Category:
urls = UrlFactory.create(provider, category).get_urls()
client = self._get_client(provider, urls)
self._logger.info(
f"Importing data from category={category.value} and provider={provider.value} using {str(client)}"
)
content = client.get_raw_data()
raw_content = RawContent(id=ObjectId(), content=content, category=category, provider=provider)
self._repository.create(raw_content)
|
19,104 | 6956f57ce6e9e8c5f1f30b5f24b3bd2fdce01869 | import requests
import telegram
import json
from bs4 import BeautifulSoup
from apscheduler.schedulers.blocking import BlockingScheduler
with open('token.json') as f:
token_json = json.loads(f.read())
bot = telegram.Bot(token=token_json["token"])
D_DAY = "09"
MOVIE = "조커"
TIME = 10
url = 'http://www.cgv.co.kr/common/showtimes/iframeTheater.aspx?areacode=01&theatercode=0150&date=201910' + D_DAY
html = requests.get(url)
soup = BeautifulSoup(html.text, 'html.parser')
day = soup.select_one('li.on > div > a > strong').text.strip()
titleMovie = []
title = soup.select('div.info-movie > a > strong')
def crawling_function():
for i in title:
titleMovie.append(i.text.strip())
if(MOVIE in titleMovie and day == D_DAY):
messageSuccess = D_DAY + "일 " + MOVIE + "의 예매가 열렸습니다."
bot.sendMessage(chat_id=token_json["user_id"], text=messageSuccess)
sched.pause()
# else:
# messageFailure = D_DAY + "일 " + MOVIE + "의 예매가 열리지 않았습니다."
# bot.sendMessage(chat_id=token_json["user_id"], text=messageFailure)
sched = BlockingScheduler()
sched.add_job(crawling_function, 'interval', seconds=TIME)
sched.start()
|
19,105 | f4dbb201d865da459e1c07d65da90e2be4751f2e | a=10
b=20
c=30
print (a+b)
|
19,106 | 9cd1357ed5960644e5688053921a1a0a2b1ed226 | # -*- coding: utf-8 -*-
import yaml
import logging
from ruc.reducedPattern import reducedPattern
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
def Run():
options = makeOptions()
reduced_pattern = reducedPattern(options)
download = reduced_pattern.download()
def makeOptions():
file = open('config/config.yml', 'r')
config = yaml.load(file)
if (config is None):
raise Exception('Config file not found or format error.')
options = {}
options['url'] = config['sunat']['url']
options['zip_path'] = config['paths']['zip']
options['unzip_path'] = config['paths']['unzip']
options['ext'] = config['paths']['ext']
return options
if __name__ == '__main__':
try:
Run()
except Exception as e:
msg = 'Error: %s' % (e)
logging.error(msg) |
19,107 | 2089628471d7cbff0ad4aa2d35b482f9cfc0eeb7 | #!/usr/bin/env python
import cgi
import cgitb; cgitb.enable()
import traceback
import os
import os.path
import sys
import re
import math
import glob
import time
import subprocess
import random
import importlib
import operator
import collections
import urllib
def main():
startHomePage()
addContainer()
addHome()
startList()
users_file = '/home/unixtool/data/question/users'
sys.stdout.flush()
getUsers(users_file)
endList()
addQuestion()
endHomePage()
def getUsers(users_file):
"""The user names get read from the users file here"""
user_names = tuple(open(users_file, 'r'));
for user_name in user_names:
clean_user_name = user_name.rstrip("\n")
listQuestions(clean_user_name)
def listQuestions(clean_user_name):
"""The questions for the user get read here"""
DEVNULL = open(os.devnull, 'wb')
question_names_pipe = subprocess.Popen(['ls', '-1', "/home/"+clean_user_name+"/.question/questions"], stdout=subprocess.PIPE, stderr=DEVNULL)
question_names = question_names_pipe.stdout.read().split('\n')
question_names.pop()
question_names.sort()
for question_name in question_names:
clean_question_name = question_name.rstrip("\n")
if (clean_user_name != "" and clean_question_name != ""):
generateQuestionList(clean_user_name, clean_question_name)
def viewQuestion(clean_user_name, clean_question_name):
startHomePage()
addContainer()
addHome()
getQuestionAnswers(clean_user_name, clean_question_name)
addAnswer(clean_user_name, clean_question_name)
endHomePage()
def startHomePage():
print("Content-type: text/html\n\n")
print("<html>")
print("<Title>Nikita: Assignment4</Title>")
print("<body style=\"background-color: #57068c\">")
def addContainer():
print("<div style=\"border: 4px solid black; width: 90%; position: absolute; margin: 4% 6% 5% 4%; background-color: #99CCFF;\">")
def addHome():
print("<div style=\"font-size: 1.5em; line-height: 1.7em; margin-left: 12%;\">")
print("<b>")
print("<u>")
print("<a href=\"http://cims.nyu.edu/~nn899/cgi-bin/question.cgi\"; style=\"color: black\";>")
print("<font color=\"#57068c\">")
print("Home")
print("</font>")
print("</a>")
print("</u>")
print("</b>")
print("</div>")
def startList():
print("<ul style=\"list-style-type: disc; margin-left: 4%\">")
def endList():
print("</ul>")
def getQuestionAnswers(clean_user_name, clean_question_name):
print("<br/>")
DEVNULL = open(os.devnull, 'wb')
question_pipe = subprocess.Popen(['/home/nn899/bin/question', 'view', clean_user_name+"/"+clean_question_name], stdout=subprocess.PIPE, stderr=DEVNULL)
question = question_pipe.stdout.read().split("====\n")
print("<div style=\"font-size: 1.5em; line-height: 1.7em; margin-left: 8%; margin-right: 8%;\">")
print("<b>")
if len(question) == 1:
question_only = question[0].rstrip("\n").split("\n", 1)
if (len(question_only) == 1):
question_only.append("")
print("<div style=\"display: inline-block; width: 60%; margin-left: 5%; word-wrap: break-word;\">")
print("<pre style=\"font-size: 1.5em; line-height:1.7em; white-space: pre-wrap; word-wrap: break-word;\">")
print(cgi.escape(question_only[1], True))
print("</pre>")
print("</div>")
print("</b>")
print("<br/>")
if not question_only[0]:
question_only[0] = 0
question_only[0] = int(question_only[0])
print("<div style=\"display: inline-block; width: 60%; margin-left: 5%;\">")
print("</div>")
print("<div style=\"display: inline-block; width: 10%;\">")
if (question_only[0] > 0):
print("%+d") % question_only[0]
else:
print(question_only[0])
print("</div>")
print("<div style=\"display: inline-block; width: 10%;\">")
print("<form action=\"question.cgi\" method=\"GET\">")
print("<input type=\"hidden\" name=\"user_name\" value=\"%s\">") %(clean_user_name)
print("<input type=\"hidden\" name=\"question_name\" value=\"%s\">") %(urllib.quote_plus(clean_question_name))
print("<button type=\"submit\" name=\"vote\" value=\"up\" onclick=\"javascript:window.location='question.cgi';\">Up</button>")
print("</form>")
print("</div>")
print("<div style=\"display: inline-block; width: 10%;\">")
print("<form action=\"question.cgi\" method=\"GET\">")
print("<input type=\"hidden\" name=\"user_name\" value=\"%s\">") %(clean_user_name)
print("<input type=\"hidden\" name=\"question_name\" value=\"%s\">") %(urllib.quote_plus(clean_question_name))
print("<button type=\"submit\" name=\"vote\" value=\"down\" onclick=\"javascript:window.location='question.cgi';\">Down</button>")
print("</form>")
print("</div>")
print("<br/>")
print("<hr style=\"border: 0.1em solid black; width: 94%;\">")
if len(question) >= 2:
question_only = question[0].rstrip("\n").split("\n", 1)
if (len(question_only) == 1):
question_only.append("")
print("<div style=\"display: inline-block; width: 60%; margin-left: 5%; word-wrap: break-word;\">")
print("<pre style=\"font-size: 1.5em; line-height:1.7em; white-space: pre-wrap; word-wrap: break-word;\">")
print(cgi.escape(question_only[1], True))
print("</pre>")
print("</div>")
print("</b>")
print("<br/>")
if not question_only[0]:
question_only[0] = 0
question_only[0] = int(question_only[0])
print("<div style=\"display: inline-block; width: 60%; margin-left: 5%\">")
print("</div>")
print("<div style=\"display: inline-block; width: 10%;\">")
if (question_only[0] > 0):
print("%+d") % question_only[0]
else:
print(question_only[0])
print("</div>")
print("<div style=\"display: inline-block; width: 10%;\">")
print("<form action=\"question.cgi\" method=\"GET\">")
print("<input type=\"hidden\" name=\"user_name\" value=\"%s\">") %(clean_user_name)
print("<input type=\"hidden\" name=\"question_name\" value=\"%s\">") %(urllib.quote_plus(clean_question_name))
print("<button type=\"submit\" name=\"vote\" value=\"up\" onclick=\"javascript:window.location='question.cgi';\">Up</button>")
print("</form>")
print("</div>")
print("<div style=\"display: inline-block; width: 10%;\">")
print("<form action=\"question.cgi\" method=\"GET\">")
print("<input type=\"hidden\" name=\"user_name\" value=\"%s\">") %(clean_user_name)
print("<input type=\"hidden\" name=\"question_name\" value=\"%s\">") %(urllib.quote_plus(clean_question_name))
print("<button type=\"submit\" name=\"vote\" value=\"down\" onclick=\"javascript:window.location='question.cgi';\">Down</button>")
print("</form>")
print("</div>")
print("<br/>")
print("<hr style=\"border: 0.1em solid black; width: 94%;\">")
answer_only = question[1:len(question)]
j = 0
while (j < len(answer_only)):
answer_only[j] = answer_only[j].lstrip("\n").rstrip("\n").split("\n", 1)
if (len(answer_only[j]) == 1):
#answer_only[j].append("")
j = j + 1
continue
if not answer_only[j][0]:
answer_only[j][0] = 0
answer_only[j][0] = int(answer_only[j][0])
j = j + 1
answer_tuple = tuple(tuple(x) for x in answer_only)
if len(answer_tuple[len(answer_tuple) - 1]) == 1:
answer_tuple = answer_tuple[0:(len(answer_tuple) - 1)]
answer_dictionary = dict((y,x) for x,y in answer_tuple)
answers = collections.OrderedDict(sorted(answer_dictionary.items(), key = operator.itemgetter(1), reverse = True))
print("<br/>")
i = 0
while (i < len(answers)):
m = re.match("^(.*)\s+([^/\s]+/[^/]*)$", answers.items()[i][0], re.DOTALL)
if m:
print("<div style=\"display: inline-block; width: 60%; margin-left: 5%; word-wrap: break-word;\">")
print("<pre style=\"font-size: 1.5em; line-height:1.7em; white-space: pre-wrap; word-wrap: break-word;\">")
print(cgi.escape(m.group(1), True))
print("</pre>")
print("</div>")
else:
print("<div style=\"display: inline-block; width: 60%;\">")
print("</div>")
if m.group(2):
answer_id = m.group(2)
print("<div style=\"display: inline-block; width: 10%;\">")
if (answers.items()[i][1] > 0):
print("%+d") % answers.items()[i][1]
else:
print(answers.items()[i][1])
print("</div>")
print("<div style=\"display: inline-block; width: 10%;\">")
print("<form action=\"question.cgi\" method=\"GET\">")
print("<input type=\"hidden\" name=\"user_name\" value=\"%s\">") %(clean_user_name)
print("<input type=\"hidden\" name=\"question_name\" value=\"%s\">") %(urllib.quote_plus(clean_question_name))
print("<input type=\"hidden\" name=\"answer_id\" value=\"%s\">") %(urllib.quote_plus(answer_id))
print("<button type=\"submit\" name=\"vote\" value=\"up\" onclick=\"javascript:window.location='question.cgi';\">Up</button>")
print("</form>")
print("</div>")
print("<div style=\"display: inline-block; width: 10%;\">")
print("<form action=\"question.cgi\" method=\"GET\">")
print("<input type=\"hidden\" name=\"user_name\" value=\"%s\">") %(clean_user_name)
print("<input type=\"hidden\" name=\"question_name\" value=\"%s\">") %(urllib.quote_plus(clean_question_name))
print("<input type=\"hidden\" name=\"answer_id\" value=\"%s\">") %(urllib.quote_plus(answer_id))
print("<button type=\"submit\" name=\"vote\" value=\"down\" onclick=\"javascript:window.location='question.cgi';\">Down</button>")
print("</form>")
print("</div>")
print("<br/>")
print("<hr style=\"border: 0.05em solid black; width: 94%;\">")
i = i + 1
print("<br/>")
print("</div>")
def addQuestion():
print("<div style=\"font-size: 1.5em; line-height: 1.7em; margin-left: 12%;\">")
print("<b>")
print("<u>")
print("<a href=\"http://cims.nyu.edu/~nn899/cgi-bin/question.cgi?add_question=true\"; style=\"color: black\";>")
print("Add question")
print("</a>")
print("</u>")
print("</b>")
print("</div>")
print("<br/>")
def addAnswer(clean_user_name, clean_question_name):
print("<div style=\"font-size: 1.5em; line-height: 1.7em; margin-left: 12%;\">")
print("<b>")
print("<u>")
print("<a href=\"http://cims.nyu.edu/~nn899/cgi-bin/question.cgi?add_answer=true&user_name=%s&question_name=%s\"; style=\"color: black\";>") %(urllib.quote_plus(clean_user_name), urllib.quote_plus(clean_question_name))
print("Add answer")
print("</a>")
print("</u>")
print("</b>")
print("</div>")
print("<br/>")
def endHomePage():
print("</div>")
print("</body>")
print("</html>")
def generateQuestionList(clean_user_name, clean_question_name):
print("<li style=\"font-size: 1.5em; line-height: 1.7em; margin-left: 6%;\">")
print("<u>")
print("<a href=\"http://cims.nyu.edu/~nn899/cgi-bin/question.cgi?user_name=%s&question_name=%s\"; style=\"color: black\";>") %(urllib.quote_plus(clean_user_name), urllib.quote_plus(clean_question_name))
print(clean_user_name+"/"+cgi.escape(clean_question_name, True))
print("</a>")
print("</u>")
print("<br/>")
print("</li>")
def generateQuestionName():
i = 1
j = 'q'
temp_question_name = j + `i`
existing_questions = os.listdir("/home/nn899/.question/questions")
while (temp_question_name in existing_questions):
i = i + 1
temp_question_name = j + `i`
return temp_question_name
def generateAnswerName(clean_user_name, clean_question_name):
i = 1
j = 'a'
temp_answer_name = j + `i`
if (os.path.isdir("/home/nn899/.question/answers/"+clean_user_name+"/"+clean_question_name)):
existing_answers = os.listdir("/home/nn899/.question/answers/"+clean_user_name+"/"+clean_question_name)
while (temp_answer_name in existing_answers):
i = i + 1
temp_answer_name = j + `i`
return temp_answer_name
def createQuestion(unique_question_name):
startHomePage()
addContainer()
addHome()
print("<br/>")
print("<div style=\"font-size: 1.5em; line-height: 1.7em; margin-left: 12%;\">")
print("What is your question?")
print("</div>")
print("<div style=\"font-size: 1.5em; line-height: 1.7em; margin-left: 12%;\">")
print("<form action=\"question.cgi\" method=\"POST\">")
print("<textarea name=\"question\" style=\"width: 91%; height: 10em; font-size: 1.1em; line-height: 1em;\" required></textarea>")
print("<br/>")
print("<input type=\"hidden\" name=\"question_name\" value=\"%s\">") %(unique_question_name)
print("<input type=\"submit\" value=\"Submit\">")
print("<button type=\"reset\" value=\"Reset\">Reset</button>")
print("<button type=\"reset\" onclick=\"javascript:window.location='question.cgi';\">Cancel</button>")
print("</form>")
print("</div>")
endHomePage()
def createAnswer(clean_user_name, clean_question_name, unique_answer_name):
startHomePage()
addContainer()
addHome()
print("<br/>")
print("<div style=\"font-size: 1.5em; line-height: 1.7em; margin-left: 12%;\">")
print("What is your answer?")
print("</div>")
print("<div style=\"font-size: 1.5em; line-height: 1.7em; margin-left: 12%;\">")
print("<form action=\"question.cgi\" method=\"POST\">")
print("<textarea name=\"answer\" style=\"width: 91%; height: 10em; font-size: 1.1em; line-height: 1em;\" required></textarea>")
print("<br/>")
print("<input type=\"hidden\" name=\"user_name\" value=\"%s\">") %(clean_user_name)
print("<input type=\"hidden\" name=\"question_name\" value=\"%s\">") %(urllib.quote_plus(clean_question_name))
print("<input type=\"hidden\" name=\"answer_name\" value=\"%s\">") %(unique_answer_name)
print("<input type=\"submit\" value=\"Submit\">")
print("<button type=\"reset\" value=\"Reset\">Reset</button>")
print("<button type=\"reset\" onclick=\"javascript:window.location='question.cgi?user_name=%s&question_name=%s';\">Cancel</button>") %(clean_user_name, urllib.quote_plus(clean_question_name))
print("</form>")
print("</div>")
endHomePage()
try:
form = cgi.FieldStorage()
if (("user_name" not in form) and ("question_name" not in form) and ("add_question" not in form) and ("add_answer" not in form) and ("answer_name" not in form)):
main()
if (("user_name" in form) and ("question_name" in form) and ("add_answer" not in form) and ("answer_name" not in form) and ("vote" not in form)):
clean_user_name = form["user_name"].value
clean_question_name = urllib.unquote_plus(form["question_name"].value)
viewQuestion(clean_user_name, clean_question_name)
if ("add_question" in form):
unique_question_name = generateQuestionName()
createQuestion(unique_question_name)
if ("add_answer" in form):
clean_user_name = form["user_name"].value
clean_question_name = urllib.unquote_plus(form["question_name"].value)
unique_answer_name = generateAnswerName(clean_user_name, clean_question_name)
createAnswer(clean_user_name, clean_question_name, unique_answer_name)
if (("question_name" in form) and ("question" in form)):
question_name = form["question_name"].value
clean_question = form["question"].value
DEVNULL = open(os.devnull, 'wb')
question_pipe = subprocess.Popen(['/home/nn899/bin/question', 'create', question_name, clean_question], stdout=subprocess.PIPE, stderr=DEVNULL)
main()
if (("user_name" in form) and ("question_name" in form) and ("answer" in form) and ("answer_name" in form)):
answer_name = form["answer_name"].value
clean_answer = form["answer"].value
clean_user_name = form["user_name"].value
clean_question_name = urllib.unquote_plus(form["question_name"].value)
DEVNULL = open(os.devnull, 'wb')
answer_pipe = subprocess.Popen(['/home/nn899/bin/question', 'answer', clean_user_name+"/"+clean_question_name, answer_name, clean_answer], stdout=subprocess.PIPE, stderr=DEVNULL)
viewQuestion(clean_user_name, clean_question_name)
if (("user_name" in form) and ("question_name" in form) and ("vote" in form)):
user_name = form["user_name"].value
question_name = urllib.unquote_plus(form["question_name"].value)
vote = form["vote"].value
DEVNULL = open(os.devnull, 'wb')
if ("answer_id" in form):
answer_id = urllib.unquote_plus(form["answer_id"].value)
vote_pipe = subprocess.Popen(['/home/nn899/bin/question', 'vote', vote, user_name+"/"+question_name, answer_id], stdout=subprocess.PIPE, stderr=DEVNULL)
else:
vote_pipe = subprocess.Popen(['/home/nn899/bin/question', 'vote', vote, user_name+"/"+question_name], stdout=subprocess.PIPE, stderr=DEVNULL)
viewQuestion(user_name, question_name)
except:
cgi.print_exception()
|
19,108 | 7e5d63c4510824bb7a2b3b90dbc7886a49be44fd | import nutkit.protocol as types
from tests.shared import get_driver_name
from tests.tls.shared import (
TestkitTlsTestCase,
TlsServer,
)
class TestTlsVersions(TestkitTlsTestCase):
def setUp(self):
super().setUp()
self._server = None
self._driver = get_driver_name()
def tearDown(self):
if self._server:
# If test raised an exception this will make sure that the stub
# server is killed and it's output is dumped for analysis.
self._server.reset()
self._server = None
super().tearDown()
def _try_connect_smart(self):
if self.driver_supports_features(types.Feature.API_SSL_SCHEMES):
return super()._try_connect(self._server, "neo4j+s", "thehost")
elif self.driver_supports_features(types.Feature.API_SSL_CONFIG):
return super()._try_connect(self._server, "neo4j", "thehost",
encrypted=True)
self.skipTest("Needs support for either of %s" % ", ".join(
map(lambda f: f.value,
(types.Feature.API_SSL_SCHEMES, types.Feature.API_SSL_CONFIG))
))
def test_1_1(self):
if self._driver in ["dotnet"]:
self.skipTest("TLS 1.1 is not supported")
self._server = TlsServer("trustedRoot_thehost",
min_tls="1", max_tls="1")
if self.driver_supports_features(types.Feature.TLS_1_1):
self.assertTrue(self._try_connect_smart())
else:
self.assertFalse(self._try_connect_smart())
def test_1_2(self):
self._server = TlsServer("trustedRoot_thehost",
min_tls="2", max_tls="2")
if self.driver_supports_features(types.Feature.TLS_1_2):
self.assertTrue(self._try_connect_smart())
else:
self.assertFalse(self._try_connect_smart())
def test_1_3(self):
self._server = TlsServer("trustedRoot_thehost",
min_tls="3", max_tls="3")
if self.driver_supports_features(types.Feature.TLS_1_3):
self.assertTrue(self._try_connect_smart())
else:
self.assertFalse(self._try_connect_smart())
|
19,109 | 731b0a979ccafbcd2d3890cfaefccbe1b01f9ace | """
author : halo2halo
date : 31, Jan, 2020
"""
import sys
# import itertools
# import numpy as np
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
a, b = read().decode('utf8').split()
print('H' if a==b else 'D')
|
19,110 | c306ceb10f90897804f45f45726eca44a9bff676 | from . import persister
from .addcoltfm import AddColTfm
from .badindicatortfm import BadIndicatorTfm
from .categorizetfm import CategorizeTfm
from .coltag import ColTag
from .droptfm import DropTfm
from .funcreplacetfm import FuncReplaceTfm
from .globals import *
from .graphics import *
from .help import *
from .imputer import Imputer
from .lag import *
from .lepdataframe import LepDataFrame
from .medianreplacetfm import MedianReplaceTfm
from .mergeddataframe import MergedDataFrame, merged_data, merged_data_no_target
from .onehottfm import OnehotTfm
from .skewtfm import SkewTfm
from .typetfm import TypeTfm
from .votingregressor import VotingRegressor |
19,111 | 59b19e32c14bd85c6deea6c3381cc922f5a49289 | from person import Person
from typing import List
import logging
class Employees:
"""Initialise an Employee Catalog"""
_workers: List[Person]
def __init__(self):
self._workers = []
def add_person(self, person: Person):
""" Add person to the Employee Catalog"""
self._workers.append(person)
# logging.info('{} has been added to the list of employees'.
# format(person.give_name()))
def give_workers_list(self):
"""Return all of the workers """
return self._workers
|
19,112 | e6aee83f73adb1e7da66f8c7fcd1c1bfcfbe80a6 | # -*- coding: utf-8 -*-
"""
@author : MG
@Time : 2017/11/18
@author : MG
@desc : 监控csv文件(每15秒)
对比当前日期与回测csv文件中最新记录是否匹配,存在最新交易请求,生成交易请求(order*.csv文件)
追踪tick级行情实时成交(仅适合小资金)
追踪tick级行情本金买入点 * N % 止损
目前仅支持做多,不支持做空
每15秒进行一次文件检查
文件格式(csv xls),每个symbol一行,不可重复,例:卖出eth 买入eos, 不考虑套利的情况(套利需要单独开发其他策略)
显示如下(非文件格式,csv文件以‘,’为分隔符,这个近为视觉好看,以表格形式显示):
currency symbol weight stop_loss_rate
eth ethusdt 0.5 0.3
eos eosusdt 0.5 0.4
"""
import re
import threading
import time
import logging
import pandas as pd
from datetime import datetime, timedelta, date
from abat.strategy import StgBase, StgHandlerBase
from abat.utils.fh_utils import str_2_date
from config import Config
from abat.common import PeriodType, RunMode, BacktestTradeMode, Direction, PositionDateType
from collections import defaultdict
import os
import json
# 下面代码是必要的引用
# md_agent md_agent 并没有“显式”的被使用,但是在被引用期间,已经将相应的 agent 类注册到了相应的列表中
import agent.md_agent
import agent.td_agent
DEBUG = False
# "TradeBookCryptoCurrency2018-10-08.csv"
PATTERN_BACKTEST_FILE_NAME = re.compile(r'(?<=^TradeBookCryptoCurrency)[\d\-]{8,10}(?=.csv$)')
PATTERN_ORDER_FILE_NAME = re.compile(r'^order.*\.csv$')
PATTERN_FEEDBACK_FILE_NAME = re.compile(r'^feedback.*\.json$')
class TargetPosition:
def __init__(self, direction: Direction, currency, position, symbol,
price=None, stop_loss_price=None, has_stop_loss=False, gap_threshold_vol=0.01):
self.direction = direction
self.currency = currency
self.position = position
self.symbol = symbol
self.price = price
self.stop_loss_price = stop_loss_price
self.has_stop_loss = has_stop_loss
self.gap_threshold_vol = gap_threshold_vol
def check_stop_loss(self, close):
"""
根据当前价格计算是否已经到达止损点位
如果此前已经到达过止损点位则不再比较,也不需重置状态
:param close:
:return:
"""
# 如果此前已经到达过止损点位则不再比较,也不需重置状态
if self.stop_loss_price is None or self.has_stop_loss:
return
self.has_stop_loss = (self.direction == Direction.Long and close < self.stop_loss_price) or (
self.direction == Direction.Short and close > self.stop_loss_price)
if self.has_stop_loss:
logging.warning('%s 处于止损状态。止损价格 %f 当前价格 %f', self.symbol, self.stop_loss_price, close)
def get_target_position(self):
return self.direction, self.currency, self.position, self.symbol, \
self.price, self.stop_loss_price, self.has_stop_loss, \
self.gap_threshold_vol
def to_dict(self):
return {attr: getattr(self, attr) for attr in dir(self)
if attr.find('_') != 0 and not callable(getattr(self, attr))}
def __repr__(self):
return f'<TargetPosition(symbol={self.symbol}, direction={int(self.direction)}, ' \
f'position={self.position}, price={self.price}, stop_loss_price={self.stop_loss_price}, ' \
f'has_stop_loss={self.has_stop_loss}, gap_threshold_vol={self.gap_threshold_vol})>'
class ReadFileStg(StgBase):
_folder_path = os.path.abspath(os.path.join(os.path.curdir, r'file_order'))
def __init__(self, symbol_list=None):
super().__init__()
self.symbol_list = symbol_list
self._mutex = threading.Lock()
self._last_check_datetime = datetime.now() - timedelta(minutes=1)
self.interval_timedelta = timedelta(seconds=15)
self.symbol_target_position_dic = {}
# 设定相应周期的事件驱动句柄 接收的参数类型
self._on_period_event_dic[PeriodType.Tick].param_type = dict
# 记录合约最近一次执行操作的时间
self.symbol_last_deal_datetime = {}
# 记录合约最近一个发送买卖请求的时间
self.instrument_lastest_order_datetime_dic = {}
# 目前由于交易是异步执行,在尚未记录每一笔订单的情况下,时间太短可能会导致仓位与请求但出现不同步现象,导致下单过多的问题
self.timedelta_between_deal = timedelta(seconds=3)
self.min_order_vol = 0.1
self.symbol_latest_price_dic = defaultdict(float)
self.weight = 1 if not DEBUG else 0.2 # 默认仓位权重
self.stop_loss_rate = -0.03
self.logger.info('接受订单文件目录:%s', self._folder_path)
self.load_feedback_file()
def fetch_pos_by_file(self):
"""读取仓位配置csv文件,返回目标仓位DataFrame"""
# 检查最近一次文件检查的时间,避免重复查询
if self._last_check_datetime + self.interval_timedelta > datetime.now():
return
# 获取文件列表
file_name_list = os.listdir(self._folder_path)
if file_name_list is None:
# self.logger.info('No file')
return
# 读取所有 csv 文件
position_df = None
file_path_list = []
for file_name in file_name_list:
# 仅处理 order*.csv文件
if PATTERN_ORDER_FILE_NAME.search(file_name) is None:
continue
self.logger.debug('处理文件 order 文件: %s', file_name)
file_base_name, file_extension = os.path.splitext(file_name)
file_path = os.path.join(self._folder_path, file_name)
file_path_list.append(file_path)
position_df_tmp = pd.read_csv(file_path)
if position_df is None:
position_df = position_df_tmp
else:
is_ok = True
for col_name in ('currency', 'symbol', 'weight', 'stop_loss_rate'):
if col_name not in position_df_tmp.columns:
is_ok = False
self.logger.error('%s 文件格式不正确,缺少 %s 列数据', file_name, col_name)
break
if not is_ok:
continue
position_df = position_df.append(position_df_tmp)
# 调试阶段暂时不重命名备份,不影响程序使用
if not DEBUG:
# 文件备份
backup_file_name = f"{file_base_name} {datetime.now().strftime('%Y-%m-%d %H_%M_%S')}" \
f"{file_extension}.bak"
os.rename(file_path, os.path.join(self._folder_path, backup_file_name))
self.logger.info('备份 order 文件 %s -> %s', file_name, backup_file_name)
return position_df, file_path_list
def handle_backtest_file(self):
"""
处理王淳的回测文件,生成相应的交易指令文件
:return:
"""
with self._mutex:
# 获取文件列表
file_name_list = os.listdir(self._folder_path)
if file_name_list is None:
# self.logger.info('No file')
return
# 读取所有 csv 文件
for file_name in file_name_list:
file_base_name, file_extension = os.path.splitext(file_name)
# 仅处理 order*.csv文件
m = PATTERN_BACKTEST_FILE_NAME.search(file_name)
if m is None:
continue
file_date_str = m.group()
file_date = str_2_date(file_date_str)
if file_date != date.today():
self.logger.warning('文件:%s 日期与当前系统日期 %s 不匹配,不予处理', file_name, date.today())
continue
self.logger.debug('处理文件 %s 文件日期:%s', file_name, file_date_str)
file_path = os.path.join(self._folder_path, file_name)
data_df = pd.read_csv(file_path)
if data_df is None or data_df.shape[0] == 0:
continue
if str_2_date(data_df.iloc[-1]['Date']) != file_date:
self.logger.warning('文件:%s 回测记录中最新日期与当前文件日期 %s 不匹配,不予处理', file_name, file_date)
continue
# 生成交易指令文件
currency = data_df.iloc[-1]['InstruLong'].lower()
order_dic = {
'currency': [currency],
'symbol': [f'{currency}usdt'],
'weight': [self.weight],
'stop_loss_rate': [self.stop_loss_rate],
}
order_file_name = f'order_{file_date_str}.csv'
order_file_path = os.path.join(self._folder_path, order_file_name)
order_df = pd.DataFrame(order_dic)
order_df.to_csv(order_file_path)
# 调试阶段暂时不重命名备份,不影响程序使用
if not DEBUG:
# 文件备份
backup_file_name = f"{file_base_name} {datetime.now().strftime('%Y-%m-%d %H_%M_%S')}" \
f"{file_extension}.bak"
os.rename(file_path, os.path.join(self._folder_path, backup_file_name))
def handle_order_file(self):
"""
获得目标持仓currency, 权重,止损点位
生成相应交易指令
另外,如果发现新的交易order文件,则将所有的 feedback 文件备份(根据新的order进行下单,生成新的feedback文件)
:return:
"""
with self._mutex:
position_df, file_path_list = self.fetch_pos_by_file()
if position_df is None or position_df.shape[0] == 0:
return
# 如果存在新的 order 指令,则将所有的 feedback 文件备份(根据新的order进行下单,生成新的feedback文件)
self.backup_feedback_files()
self.logger.debug('仓位调整目标:\n%s', position_df)
target_holding_dic = position_df.set_index('currency').dropna().to_dict('index')
if len(self.symbol_latest_price_dic) == 0:
self.logger.warning('当前程序没有缓存到有效的最新价格数据,交易指令暂缓执行')
return
# {currency: (Direction, currency, target_position, symbol, target_price, stop_loss_price)
symbol_target_position_dic = {}
# 检查目标仓位与当前持仓是否相符,否则执行相应交易
target_currency_set = set(list(position_df['currency']))
holding_currency_dic = self.get_holding_currency()
# 检查是否所有持仓符合目标配置文件要求
is_all_fit_target = True
# 如果当前 currency 不在目标持仓列表里面,则卖出
for num, (currency, balance_dic) in enumerate(holding_currency_dic.items(), start=1):
# currency 在目标持仓中,无需清仓
if currency in target_currency_set:
continue
# hc 为 货币交易所的一种手续费代币工具,不做交易使用
# if currency == 'hc':
# continue
# 若持仓余额 小于 0.0001 则放弃清仓
tot_balance = 0
for _, dic in balance_dic.items():
tot_balance += dic['balance']
if tot_balance < 0.0001:
continue
symbol = self.get_symbol_by_currency(currency)
if symbol_list is not None and symbol not in symbol_list:
self.logger.warning('%s 持仓: %.6f 不在当前订阅列表中,也不在目标持仓中,该持仓将不会被操作',
symbol, tot_balance)
continue
self.logger.info('计划卖出 %s', symbol)
# TODO: 最小下单量在数据库中有对应信息,有待改进
gap_threshold_vol = 0.1
symbol_target_position_dic[symbol] = TargetPosition(Direction.Long, currency, 0, symbol,
gap_threshold_vol=gap_threshold_vol)
is_all_fit_target = False
# 生成目标持仓列表买入指令
for num, (currency, position_dic) in enumerate(target_holding_dic.items()):
weight = position_dic['weight']
stop_loss_rate = position_dic['stop_loss_rate']
# stop_loss_price = position_dic['stop_loss_rate']
symbol = self.get_symbol_by_currency(currency)
target_vol, gap_threshold_vol, stop_loss_price = self.calc_vol_and_stop_loss_price(symbol, weight, stop_loss_rate)
if target_vol is None:
self.logger.warning('%s 持仓权重 %.2f %% 无法计算目标持仓量', currency, weight * 100)
continue
# 检查当前持仓是否与目标持仓一致,如果一致则跳过
# position_date_pos_info_dic = self.get_position(symbol)
# if position_date_pos_info_dic is not None and len(position_date_pos_info_dic) > 0:
# # 有持仓,比较是否满足目标仓位,否则下指令
# position_cur = sum([pos_info['balance'] for pos_info in position_date_pos_info_dic.values()])
# position_gap = target_vol - position_cur
# # 实盘情况下,很少绝对一致,在一定区间内即可
# if position_gap > gap_threshold_vol:
# # 当前合约累计持仓与目标持仓不一致,则添加目标持仓任务
# is_all_fit_target = False
# else:
# is_all_fit_target = False
# 无论仓位是否存在,均生成交易指令,待交易执行阶段进行比较(以上代码不影响是否生产建仓指令)
# 多头目标持仓
self.logger.info('计划买入 %s 目标仓位:%f 止损价:%f', symbol, target_vol, stop_loss_price)
symbol_target_position_dic[symbol] = TargetPosition(Direction.Long, currency, target_vol, symbol,
None, stop_loss_price,
gap_threshold_vol=gap_threshold_vol)
symbol_target_position_dic_len = len(symbol_target_position_dic)
if symbol_target_position_dic_len > 0:
self.symbol_target_position_dic = symbol_target_position_dic
self.logger.info('发现新的目标持仓指令:')
self.logger_symbol_target_position_dic()
# 生成 feedback 文件
self.create_feedback_file()
else:
self.symbol_target_position_dic = None
self.logger.debug('无仓位调整指令')
def logger_symbol_target_position_dic(self):
"""
展示当前目标持仓信息
:return:
"""
symbol_target_position_dic_len = len(self.symbol_target_position_dic)
for num, (key, val) in enumerate(self.symbol_target_position_dic.items()):
self.logger.info('%d/%d) %s, %r', num, symbol_target_position_dic_len, key, val)
def on_timer(self):
"""
每15秒进行一次文件检查
1)检查王淳的回测文件,匹配最新日期 "TradeBookCryptoCurrency2018-10-08.csv" 中的日期是否与系统日期一致,如果一致则处理,生成“交易指令文件”
2)生成相应目标仓位文件 order_2018-10-08.csv
:param md_df:
:param context:
:return:
"""
self.get_balance()
self.handle_backtest_file()
self.handle_order_file()
def do_order(self, md_dic, instrument_id, order_vol, price=None, direction=Direction.Long, stop_loss_price=0,
msg=""):
# if True:
# self.logger.info("%s %s %f 价格 %f [%s]",
# instrument_id, '买入' if position > 0 else '卖出', position, price, msg)
# return
# position == 0 则代表无需操作
# 执行交易
if direction == Direction.Long:
if order_vol == 0:
return
elif order_vol > 0:
if price is None or price == 0:
price = md_dic['close']
# TODO: 稍后按盘口卖一档价格挂单
# if DEBUG:
# # debug 模式下,价格不要真实成交,只要看一下有委托单就可以了
# price /= 2
if stop_loss_price is not None and stop_loss_price > 0 and price <= stop_loss_price:
self.logger.warning('%s 当前价格 %.6f 已经触发止损价 %.6f 停止买入操作',
instrument_id, price, stop_loss_price)
return
self.open_long(instrument_id, price, order_vol)
self.logger.info("%s %s -> 开多 %.4f 价格:%.4f", instrument_id, msg, order_vol, price)
elif order_vol < 0:
if price is None or price == 0:
price = md_dic['close']
# TODO: 稍后按盘口卖一档价格挂单
# if DEBUG:
# # debug 模式下,价格不要真实成交,只要看一下有委托单就可以了
# price += price
order_vol_net = -order_vol
self.close_long(instrument_id, price, order_vol_net)
self.logger.info("%s %s -> 平多 %.4f 价格:%.4f", instrument_id, msg, order_vol_net, price)
else:
raise ValueError('目前不支持做空')
self.instrument_lastest_order_datetime_dic[instrument_id] = datetime.now()
def on_tick(self, md_dic, context):
"""
tick 级数据进行交易操作
:param md_dic:
:param context:
:return:
"""
# self.logger.debug('get tick data: %s', md_dic)
symbol = md_dic['symbol']
# 更新最新价格
close_cur = md_dic['close']
self.symbol_latest_price_dic[symbol] = close_cur
# 计算是否需要进行调仓操作
if self.symbol_target_position_dic is None or symbol not in self.symbol_target_position_dic:
# self.logger.debug("当前 symbol='%s' 无操作", symbol)
return
if self.datetime_last_update_position is None:
self.logger.debug("尚未获取持仓数据,跳过")
return
target_currency = self.trade_agent.get_currency(symbol)
# self.logger.debug('target_position_dic[%s]: %s', symbol, self.target_position_dic[symbol])
# 如果的当前合约近期存在交易回报,则交易回报时间一定要小于查询持仓时间:
# 防止出现以及成交单持仓信息未及时更新导致的数据不同步问题
if symbol in self.datetime_last_rtn_trade_dic:
if target_currency not in self.datetime_last_update_position_dic:
logging.debug("持仓数据中没有包含当前合约,最近一次成交回报时间:%s,跳过",
self.datetime_last_rtn_trade_dic[symbol])
self.get_position(symbol, force_refresh=True)
return
if self.datetime_last_rtn_trade_dic[symbol] > self.datetime_last_update_position_dic[target_currency]:
logging.debug("持仓数据尚未更新完成,最近一次成交回报时间:%s 晚于 最近一次持仓更新时间:%s",
self.datetime_last_rtn_trade_dic[symbol],
self.datetime_last_update_position_dic[target_currency])
self.get_position(symbol, force_refresh=True)
return
# 过于密集执行可能会导致重复下单的问题
if symbol in self.symbol_last_deal_datetime:
last_deal_datetime = self.symbol_last_deal_datetime[symbol]
if last_deal_datetime + self.timedelta_between_deal > datetime.now():
# logging.debug("最近一次交易时间:%s,防止交易密度过大,跳过", last_deal_datetime)
return
with self._mutex:
target_position = self.symbol_target_position_dic[symbol]
target_position.check_stop_loss(close_cur)
# self.logger.debug("当前持仓目标:%r", target_position)
# 撤销所有相关订单
self.cancel_order(symbol)
# 计算目标仓位方向及交易数量
position_date_pos_info_dic = self.get_position(symbol)
if position_date_pos_info_dic is None:
# 无当前持仓,有目标仓位,直接按照目标仓位进行开仓动作
# target_direction, target_currency, target_position, symbol, target_price, \
# stop_loss_price, has_stop_loss, gap_threshold_vol = self.get_target_position(symbol)
if not target_position.has_stop_loss:
self.do_order(md_dic, symbol, target_position.position, target_position.price,
target_position.direction, target_position.stop_loss_price, msg='当前无持仓')
else:
# 如果当前有持仓,执行两类动作:
# 1)若 当前持仓与目标持仓不匹配,则进行相应的调仓操作
# 2)若 当前持仓价格超出止损价位,则进行清仓操作
position_holding = sum(
[pos_info_dic['balance'] for pos_info_dic in position_date_pos_info_dic.values()])
self.logger.debug('当前 %s 持仓 %f 价格 %.6f', target_position.currency, position_holding, close_cur)
# 比较当前持仓总量与目标仓位是否一致
# 如果当前有持仓,目标仓位也有持仓,则需要进一步比对
# target_direction, target_currency, target_position, symbol, target_price, \
# stop_loss_price, has_stop_loss, gap_threshold_vol = self.get_target_position(symbol)
if target_position.has_stop_loss:
# 已经触发止损,如果依然有持仓,则进行持续清仓操作
self.do_order(md_dic, symbol, -position_holding, None,
target_position.direction, msg="止损")
else:
# 汇总全部同方向持仓,如果不够目标仓位,则加仓
# 对全部的反方向持仓进行平仓
# 如果持仓超过目标仓位,则平仓多出的部分,如果不足则补充多的部分
position_gap = target_position.position - position_holding
if position_gap > target_position.gap_threshold_vol:
if position_holding < target_position.gap_threshold_vol:
msg = '建仓'
else:
msg = "补充仓位"
# 如果不足则补充多的部分
self.do_order(md_dic, symbol, position_gap, target_position.price,
target_position.direction, target_position.stop_loss_price, msg=msg)
elif position_gap < - target_position.gap_threshold_vol:
if target_position.position == 0:
msg = '清仓'
else:
msg = "持仓超过目标仓位,减仓 %.4f" % position_gap
# 如果持仓超过目标仓位,则平仓多出的部分
self.do_order(md_dic, symbol, position_gap, target_position.price,
target_position.direction, target_position.stop_loss_price, msg=msg)
else:
self.logger.debug('当前持仓 %f 与目标持仓%f 差距 %f 过小,忽略此调整',
position_holding, target_position.position, position_gap)
# 更新最近执行时间
self.symbol_last_deal_datetime[symbol] = datetime.now()
def get_symbol_by_currency(self, currency):
"""目前暂时仅支持currency 与 usdt 之间转换"""
return currency + 'usdt'
def calc_vol_and_stop_loss_price(self, symbol, weight, stop_loss_rate=None, gap_threshold_precision=0.01):
"""
根据权重及当前账号总市值,计算当前 symbol 对应多少 vol, 根据 stop_loss_rate 计算止损价格(目前仅考虑做多的情况)
:param symbol:
:param weight:
:param stop_loss_rate:
:param gap_threshold_precision:
:return:
"""
holding_currency_dic = self.get_holding_currency(exclude_usdt=False)
# tot_value = sum([dic['balance'] * self.symbol_latest_price_dic[self.get_symbol_by_currency(currency)]
# for currency, dic in holding_currency_dic.items()])
if symbol not in self.symbol_latest_price_dic or self.symbol_latest_price_dic[symbol] == 0:
self.logger.error('%s 没有找到有效的最新价格', symbol)
weight_vol = None
gap_threshold_vol = None
stop_loss_price = None
else:
tot_value = 0
for currency, dic in holding_currency_dic.items():
for pos_date_type, dic_sub in dic.items():
if currency == 'usdt':
tot_value += dic_sub['balance']
else:
tot_value += dic_sub['balance'] * self.symbol_latest_price_dic[
self.get_symbol_by_currency(currency)]
price_latest = self.symbol_latest_price_dic[symbol]
weight_vol = tot_value * weight / price_latest
gap_threshold_vol = tot_value * gap_threshold_precision / price_latest
stop_loss_price = price_latest * (1 + stop_loss_rate)
return weight_vol, gap_threshold_vol, stop_loss_price
def get_target_position(self, symbol):
dic = self.symbol_target_position_dic[symbol]
return dic['direction'], dic['currency'], dic['position'], dic['symbol'], \
dic['price'], dic['stop_loss_price'], dic.setdefault('has_stop_loss', False), \
dic.setdefault('gap_threshold_vol', None)
def backup_feedback_files(self):
"""
将所有的 feedback 文件备份
:return:
"""
# 获取文件列表
file_name_list = os.listdir(self._folder_path)
if file_name_list is None:
# self.logger.info('No file')
return
for file_name in file_name_list:
# 仅处理 feedback*.csv文件
if PATTERN_FEEDBACK_FILE_NAME.search(file_name) is None:
continue
file_base_name, file_extension = os.path.splitext(file_name)
file_path = os.path.join(self._folder_path, file_name)
# 文件备份
backup_file_name = f"{file_base_name} {datetime.now().strftime('%Y-%m-%d %H_%M_%S')}" \
f"{file_extension}.bak"
os.rename(file_path, os.path.join(self._folder_path, backup_file_name))
self.logger.info('备份 Feedback 文件 %s -> %s', file_name, backup_file_name)
def create_feedback_file(self):
"""
根据 symbol_target_position_dic 创建 feedback 文件
:return:
"""
symbol_target_position_dic = self.symbol_target_position_dic
data_dic = {}
for key, val in symbol_target_position_dic.items():
val_dic = val.to_dict()
val_dic['direction'] = int(val_dic['direction'])
data_dic[key] = val_dic
file_name = f"feedback_{datetime.now().strftime('%Y-%m-%d %H_%M_%S')}.json"
file_path = os.path.join(self._folder_path, file_name)
with open(file_path, 'w') as file:
json.dump(data_dic, file)
self.logger.info('生产 feedback 文件:%s', file_name)
return file_path
def load_feedback_file(self):
"""
加载 feedback 文件,更新 self.symbol_target_position_dic
:return:
"""
# 获取文件列表
file_name_list = os.listdir(self._folder_path)
if file_name_list is None or len(file_name_list) == 0:
# self.logger.info('No file')
return
# 读取所有 csv 文件
for file_name in file_name_list:
# 仅处理 order*.csv文件
if PATTERN_FEEDBACK_FILE_NAME.search(file_name) is None:
continue
self.logger.debug('处理文件 feedback文件: %s', file_name)
file_path = os.path.join(self._folder_path, file_name)
with open(file_path) as file:
data_dic = json.load(file)
# 构建 symbol_target_position_dic 对象
symbol_target_position_dic = {}
for key, val in data_dic.items():
val['direction'] = Direction(val['direction'])
symbol_target_position_dic[key] = TargetPosition(**val)
self.symbol_target_position_dic = symbol_target_position_dic
self.logger.info('加载 feedback 文件:%s', file_name)
self.logger_symbol_target_position_dic()
break
else:
logging.info('没有可用的 feedback 文件可加载')
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format=Config.LOG_FORMAT)
DEBUG = False
symbol_list = ['ethusdt', 'eosusdt']
# 参数设置
strategy_params = {'symbol_list': symbol_list}
md_agent_params_list = [
# {
# 'name': 'min1',
# 'md_period': PeriodType.Min1,
# 'instrument_id_list': ['rb1805', 'i1801'], # ['jm1711', 'rb1712', 'pb1801', 'IF1710'],
# 'init_md_date_to': '2017-9-1',
# 'dict_or_df_as_param': dict
# },
{
'name': 'tick',
'md_period': PeriodType.Tick,
'instrument_id_list': symbol_list, #
}]
run_mode_realtime_params = {
'run_mode': RunMode.Realtime,
'enable_timer_thread': True,
'seconds_of_timer_interval': 15,
}
run_mode_backtest_params = {
'run_mode': RunMode.Backtest,
'date_from': '2017-9-4',
'date_to': '2017-9-27',
'init_cash': 1000000,
'trade_mode': BacktestTradeMode.Order_2_Deal
}
# run_mode = RunMode.BackTest
# 初始化策略处理器
stghandler = StgHandlerBase.factory(
stg_class_obj=ReadFileStg,
strategy_params=strategy_params,
md_agent_params_list=md_agent_params_list,
**run_mode_realtime_params)
if DEBUG:
stghandler.run()
else:
# 开始执行策略
stghandler.start()
# 策略执行 2 分钟后关闭
time.sleep(180)
stghandler.keep_running = False
stghandler.join()
logging.info("执行结束")
# print(os.path.abspath(r'..\file_order'))
|
19,113 | 3ccbd2907701b165392c9bdd04a4f55bf307cd93 | class Header:
def __init__(self, level: int, text: str):
self.level = level
self.text = text
def as_markdown(self):
return f'{"#" * self.level} {self.text}\n'
class CodeBlock:
def __init__(self, code: str):
self.code = code
def as_markdown(self):
return f'```\n{self.code}\n```\n'
class Markup:
def __init__(self):
self.elements = []
def header(self, level: int, text: str):
self.elements.append(Header(level, text))
return self
def code_block(self, text: str):
self.elements.append(CodeBlock(text))
return self
def section(self, level: int, title: str, body: str):
return self.header(level, title).code_block(body)
def as_markdown(self):
return '\n'.join([e.as_markdown() for e in self.elements])
|
19,114 | 27065dd5477e201c4e76be01d6fd248c4b01b99e | from lorentzian import Lorentzian
from rabi_power_flop import Rabi_power_flop
from analyzerWindow import AnalyzerWindow
from analyzerWindowSpectrum729 import analyzerWindow729, analyzerWindowPowerFlop
fitting_models = {
Lorentzian:analyzerWindow729,
Rabi_power_flop:analyzerWindowPowerFlop,
}
class FittingInterface(object):
def __init__(self):
self.all_fitting_models = {}
self.active_model = None
self.data = None
self.manual_parameters = {}
for model in fitting_models:
self.all_fitting_models[model.name] = model
self.accepted = None
self.auto_accept = None
def setAccepted(self, accepted):
self.accepted = accepted
def setAutoAccept(self, auto_accept):
self.auto_accept = auto_accept
def setModel(self, model):
try:
self.active_model = self.all_fitting_models[model]()
except KeyError:
raise Exception("Wrong model")
def setData(self, data):
self.data = data
def addParameter(self, parameter, initial_guess, to_fit):
self.manual_parameters[parameter] = (to_fit, initial_guess)
def start_fit(self):
if not self.active_model:
raise Exception("No fitting model selected")
dataX, dataY = self.active_model.setData(self.data)
self.active_model.setUserParameters(self.manual_parameters)
fitX, fitY = self.active_model.fit()
fitting_parameters = self.active_model.get_parameter_info()
model_cls = self.active_model.__class__
window_cls = fitting_models[model_cls]
self.gui = window_cls(fitting_parameters, self.auto_accept, self)
self.gui.plot(dataX, dataY, '*k')
self.gui.plotfit(fitX, fitY)
if self.auto_accept:
self.accepted = True
return self.gui
def refit(self, gui_parameters):
self.manual_parameters = gui_parameters
self.active_model.setUserParameters(self.manual_parameters)
fitX, fitY = self.active_model.fit()
fitting_parameters = self.active_model.get_parameter_info()
self.gui.plotfit(fitX, fitY)
self.gui.set_last_fit(fitting_parameters)
def evaluate_params(self,params):
evalX, evalY = self.active_model.evaluate_parameters(params)
return evalX, evalY
def get_parameter(self, parameter):
return self.active_model.get_parameter_value(parameter) |
19,115 | 1e90a7fe272462e02c8cdabf75ecdaa4153e5912 | # Project Setup
from distutils.core import setup
setup(
name="School Notifications",
version="1.0",
packages=["distutils", "setup", "vulcan-api", "discord-webhook", "schedule"]
)
|
19,116 | 50eab1f089bb8b97a80637dc169041d9e6c5589a | from MessagePrinter.MessagesPrinter import MessagePrinter
# class to use as part game's view
class Connect4View(MessagePrinter):
#--------------------------------------------------------------------------
#input : a string, 'cls' i for windows and 'clear' for ubunrun or other
#function: constructor, store great messages
def __init__(self, system):
MessagePrinter.__init__(self, system)
"""
sets all the strings able to be print furter
"""
self.welcome_message = "".join([ " __ __ _ \n",
" \ \ / / | | \n",
" \ \ /\ / /__| | ___ ___ _ __ ___ ___ \n",
" \ \/ \/ / _ \ |/ __/ _ \| '_ ` _ \ / _ \ \n",
" \ /\ / __/ | (_| (_) | | | | | | __/ \n",
" \/ \/ \___|_|\___\___/|_| |_| |_|\___| \n",
" _ \n",
" | | \n",
" | |_ ___ \n",
" | __/ _ \ \n",
" | || (_) | \n",
" \__\___/ \n",
" \n",
" _ _ _ \n",
" | | | || | \n",
" ___ ___ _ __ _ __ ___ ___| |_ | || |_ \n",
" / __/ _ \| '_ \| '_ \ / _ \/ __| __| |__ _| \n",
" | (_| (_) | | | | | | | __/ (__| |_ | | \n",
" \___\___/|_| |_|_| |_|\___|\___|\__| |_| \n\n"])
self.title = "".join([ " _ _ _ \n",
" | | | || | \n",
" ___ ___ _ __ _ __ ___ ___| |_ | || |_ \n",
" / __/ _ \| '_ \| '_ \ / _ \/ __| __| |__ _| \n",
" | (_| (_) | | | | | | | __/ (__| |_ | | \n",
" \___\___/|_| |_|_| |_|\___|\___|\__| |_| \n\n"])
self.winner = "".join([" __ __ _ \n",
" \ \ / / ( ) \n",
" \ \_/ /__ _ _|/__ _____ __ _____ _ __ \n",
" \ / _ \| | | | \ \ / / _ \ \ \ /\ / / _ \| '_ \ \n",
" | | (_) | |_| | \ V / __/ \ V V / (_) | | | |\n",
" |_|\___/ \__,_| \_/ \___| \_/\_/ \___/|_| |_|\n",
" \n\n" ])
self.lost = "".join([" __ __ _ _ _ \n",
" \ \ / / | | | | | | \n",
" \ \_/ /__ _ _ | |__ __ ___ _____ | | ___ ___| |_ \n",
" \ / _ \| | | | | '_ \ / _` \ \ / / _ \ | | / _ \/ __| __|\n",
" | | (_) | |_| | | | | | (_| |\ V / __/ | |___| (_) \__ \ |_ \n",
" |_|\___/ \__,_| |_| |_|\__,_| \_/ \___| |______\___/|___/\__|\n",
" \n\n"])
self.draw = "".join([" _____ \n",
" | __ \ \n",
" | | | |_ __ __ ___ __\n",
" | | | | '__/ _` \ \ /\ / /\n",
" | |__| | | | (_| |\ V V / \n",
" |_____/|_| \__,_| \_/\_/ \n",
" \n\n"])
self.main_menu = "".join([" ------------------- Main menu --------------------\n\n",
" >>> 1. New game\n\n",
" >>> 2. How to play\n\n",
" >>> 3. Exit\n\n"])
self.new_game_menu = "".join([" ----------------- New game --------------------\n\n",
" >>> 1. Type of game\n\n",
" >>> 2. Level game\n\n",
" >>> 3. Training\n\n",
" >>> 4. Start\n\n",
" >>> 5. Back\n\n"])
self.type_game_menu = "".join([" ---------------- Type of game -----------------\n\n",
" >>> 1. Computer vs computer\n\n",
" >>> 2. Human vs human\n\n" ,
" >>> 3. Human vs Computer\n\n",
" >>> 4. Back\n\n"])
self.how_to_play = "".join([" ----------------- How to play -----------------\n\n",
" xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n ",
" xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n ",
" xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n ",
" xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n ",
" xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n ",
" xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n ",
" xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n ",
" xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n ",
" xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n ",
" xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n ",
" xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n ",
" xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n ",
" xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n ",
" xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n ",
" xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n ",
" xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n ",
" xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n"])
self.player1_wins_title = "".join(["__________.__ ____ __ __.__ \n",
"\______ \ | _____ ___.__. __________/_ | / \ / \__| ____ ______\n",
" | ___/ | \__ \< | |/ __ \_ __ \ | \ \/\/ / |/ \ / ___/\n",
" | | | |__/ __ \\\___ \ ___/| | \/ | \ /| | | \\\___ \ \n",
" |____| |____(____ / ____|\___ >__| |___| \__/\ / |__|___| /____ >\n",
" \/\/ \/ \/ \/ \/ \n"])
self.player2_wins_title = "".join(["__________.__ ________ __ __.__ \n",
"\______ \ | _____ ___.__. __________\_____ \ / \ / \__| ____ ______\n",
" | ___/ | \__ \< | |/ __ \_ __ \/ ____/ \ \/\/ / |/ \ / ___/\n",
" | | | |__/ __ \\\___ \ ___/| | \/ \ \ /| | | \\\___ \ \n",
" |____| |____(____ / ____|\___ >__| \_______ \ \__/\ / |__|___| /____ >\n",
" \/\/ \/ \/ \/ \/ \/ \n"])
#--------------------------------------------------------------------------
#input : none
#function: print the tittle
#output: none
def view_title(self):
"""
function: Prints the Connect4 title
output: None
"""
self.clear_print("bright","","cyan", self.title)
#--------------------------------------------------------------------------
#input : none
#function: print the winner
#output: none
def view_winner(self):
"""
funtion: Prints You've Von title
output: None
"""
self.clear_console()
self.clear_print("bright","","yellow",self.winner)
self.input_option(self,">>> Main menu: ")
#--------------------------------------------------------------------------
#input : none
#function: print the lost
#output: none
def view_lost(self):
"""
function: Prints You've Lost title
output: None
"""
self.clear_console()
self.alert(self.lost)
self.input_option(self,">>> Main menu: ")
#--------------------------------------------------------------------------
#input : none
#function: print draw message
#output: none
def view_draw(self):
"""
function: Prints Draw title
output: None
"""
self.clear_console()
self.alert(self.draw)
self.input_option(">>> Main menu: ")
#--------------------------------------------------------------------------
#input : none
#function: print a menu with input option
#output: none
def view_menu(self, menu, space):
"""
function: Prints in console the menu title
output: None
"""
#self.view_title()
self.print_message("bright","","white", menu)
option = self.input_option(space)
return option
#--------------------------------------------------------------------------
#input : none
#function: print the main menu
#output: none
def view_main_menu(self):
"""
function: prints the main menu
output: None
"""
self.clear_console()
self.print_message("bright","","cyan", self.welcome_message)
return self.view_menu(self.main_menu," >>> ")
#--------------------------------------------------------------------------
#input : none
#function: print the new game menu
#output: none
def view_new_game_menu(self):
"""
function: Prints the new menu title
output: None
"""
self.view_title()
return self.view_menu(self.new_game_menu, " >>> ")
#--------------------------------------------------------------------------
#input : none
#function: print the type game menu
#output: none
def view_type_game_menu(self):
"""
function: Prints type of the game menu
output: None
"""
self.view_title()
return self.view_menu(self.type_game_menu, " >>> ")
#--------------------------------------------------------------------------
#input : none
#function: print thow to play
#output: none
def view_how_to_play(self):
"""
function: Prints how to play menu
output: None
"""
self.view_title()
return self.view_menu(self.how_to_play,">>> Back [enter]: ")
#--------------------------------------------------------------------------
#input : none
#function: asks for column option
#output: none
def column_option(self):
"""
function: Prints select a number column input
output: None
"""
return self.input_option(">>>> Select a number column: ")
#--------------------------------------------------------------------------
#input : none
#function: asks for the level game
#output: none
def select_level(self):
"""
function: Prints select level input
output: None
"""
return int(self.input_option(">>>> Select level (1 - 4): "))
#--------------------------------------------------------------------------
#input : none
#function: print invalid option
#output: none
def invalid_option(self):
"""
function: Prints in red invalid option
outpu: None
"""
self.alert("___ Invalid option D: ____")
def player1_wins(self, style, back, fore):
"""
input : None
function: Prints Player2 Wins title
output: None
"""
self.clear_print(style, back, fore,self.player1_wins_title)
def player2_wins(self, style, back, fore):
"""
input : None
function: Prints Player2 Wins title
output: None
"""
self.clear_print(style, back, fore,self.player2_wins_title)
def print_players_names(self, p1_name, p2_name):
"""
input : two strings with the players's names
function: print in console the players's names
output: None
"""
message = "".join(['\033[0m'," Players --->"," ",p1_name,'\033[91m',
" ●",'\033[0m'," ",p2_name,'\033[93m'," ● ",
"\n\n"])
self.print_message("","","white",message)
|
19,117 | e423efdba540f799d79b2e01146624963449a57e | class Job:
def __init__(self, raw_job, batch, machine):
self.__raw_job = raw_job
self.__batch = batch
self.__machine = machine
def __str__(self):
return "Job %s" % str(self.__raw_job['id'])
def get_account_id(self):
return self.__batch.get_raw('account')
def get_fair_machine_set(self):
return self.__machine.fair_machine_set
def get_batch(self):
return self.__batch
def get_start_time(self):
return self.__raw_job['real_start_time']
def get_finish_time(self):
return self.__raw_job['real_start_time'] + self.__raw_job['real_duration']
def get_id(self):
return self.__raw_job['id']
class DummyJob:
def __init__(self, account_id, fair_machine_set, maxt):
self.__account_id = account_id
self.__fair_machine_set = fair_machine_set
self.__start_time = maxt
def __str__(self):
return "Dummy job, account_id: %d, fair_machine_set: %s" % (self.__account_id, str(self.__fair_machine_set))
def get_account_id(self):
return self.__account_id
def get_fair_machine_set(self):
return self.__fair_machine_set
def get_start_time(self):
return self.__start_time
|
19,118 | e1a653e854acdf64d01fc4ab6216c9f4dd4e3f74 | # -*- coding: utf-8 -*-
'''
This is a file to show how python decorator does.
Normaly, we use the python decorator like this:
@a_callable_object
def your_method(*args):
do_something
@a_callable_object
class your_class():
define_your_class
And the callable object can be a funtion, class object.
The gramar may do like this:
your_method = a_callable_object(your_method)
you_class = a_callable_object(your_class)
It will do a previous name linking to a new object.
'''
def show_name2address(v):
'''
show the instance information about name and memory address
'''
print '%s %s and address 0x%x' % (type(v).__name__,
getattr(v, '__name__', ''), id(v))
def test_case(test_fun):
'''
A test case for function call.
'''
def wrap():
print ('\n======== start testing %s function ======\n' %
test_fun.__name__)
test_fun()
print ('\n======== end testing %s function ========\n' %
test_fun.__name__)
return wrap
def no_argument_decorator():
'''
function decorator with no argument.
Usage example:
>>> @no_argument_decorator()
>>> def your_function(*args):
>>> write your own code
At this time, you can't use as @no_argument_decorator,
because your decorator function has no argument and
decorator must callback with your_function as an argument.
'''
def wrap(fun):
show_name2address(fun)
def innerwrap(*args):
print 'execute in innerwrap'
return fun(*args)
show_name2address(innerwrap)
return innerwrap
show_name2address(wrap)
return wrap
def no_argument_withwraps_decorator():
'''
function decorator with no argument.
Usage example:
>>> @no_argument_decorator()
>>> def your_function(*args):
>>> write your own code
At this time, you can't use as @no_argument_decorator,
because your decorator function has no argument and
decorator must callback with your_function as an argument.
With the wraps decorator, you can see how and what the attributes of
decorated function change.
'''
from functools import wraps
def wrap(fun):
show_name2address(fun)
@wraps(fun)
def innerwrap(*args):
print 'execute in innerwrap'
return fun(*args)
show_name2address(innerwrap)
return innerwrap
show_name2address(wrap)
return wrap
@test_case
def test_noarg_decorator_case():
'''
test no_argument_decorator with no argument for function
'''
try:
@no_argument_decorator()
def test_no_argument_decorator(*args):
print 'execute in test_no_argument_decorator'
print args
print (test_no_argument_decorator.__dict__,
test_no_argument_decorator.__name__)
except Exception, e:
print e
# The address of test_no_argument_decorator function has been changed
show_name2address(test_no_argument_decorator)
# really, perform as invoking innerwrap functio
test_no_argument_decorator(*[1, 'justdoit', 'programer'])
@test_case
def test_noarg_withwraps_decorator_case():
'''
test no_argument_decorator with no argument for function
'''
try:
@no_argument_withwraps_decorator()
def test_no_argument_withwraps_decorator(*args):
print 'execute in test_no_argument_decorator'
print args
print (test_no_argument_withwraps_decorator.__dict__,
test_no_argument_withwraps_decorator.__name__)
except Exception, e:
print e
# The address of test_no_argument_decorator function has been changed
show_name2address(test_no_argument_withwraps_decorator)
# really, perform as invoking innerwrap functio
test_no_argument_withwraps_decorator(*[1, 'justdoit', 'programer'])
@test_case
def test_noarg_decorator_class_case():
'''
test no_argument_decorator with no argument for class
'''
try:
@no_argument_decorator()
class test_no_argument_decorator_class():
def __init__(self, *args):
print 'init in test_no_argument_decorator_class'
print args
except Exception, e:
print e
show_name2address(test_no_argument_decorator_class)
test_no = test_no_argument_decorator_class(*(1, 2, 3))
show_name2address(test_no)
@test_case
def test_noexplicitarg_decorator():
'''
Test no argument decorator with no explicit argument.
'''
try:
@no_argument_decorator
def test_no_explicit_argument_decorator(*args):
print 'execute in test_no_explicit_argument_decorator'
print args
except Exception, e:
print e
def single_argument_decorator(fun):
'''
function decorator with one single arguments.
Usage example:
>>> @single_argument_decorator(default=below funtion)
>>> def your_function(*args):
>>> write your own code
'''
show_name2address(fun)
def wrap(*args):
print args
return fun(*args)
return wrap
@test_case
def test_singlearg_decorator_case():
try:
@single_argument_decorator
def test_single_argument_decorator(x):
return x**2
except Exception, e:
print e
print test_single_argument_decorator(3)
@test_case
def test_single_explicit_noarg_decorator_case():
'''
Test when single argument decorator is called with explicit no argument.
'''
try:
@single_argument_decorator()
def test_single_explicitno_argument_decorator(x):
return 2*x
except Exception, e:
print e
def multi_arguments_decorator(fun, arg):
'''
function decorator with multi arguments.
Usage example:
>>> @multi_arguments_decorator(fun, arg)
>>> def your_function(*args):
>>> write your own code
'''
show_name2address(fun)
show_name2address(arg)
def wrap(func):
show_name2address(func)
def innerwrap(*args):
return func(args)
show_name2address(innerwrap)
return innerwrap
show_name2address(wrap)
return wrap
@test_case
def test_multiargs_decorator_case():
'''
Test function decorator with multi arguments.
'''
try:
@multi_arguments_decorator(123, 'abc')
def test_multi_arguments_decorator(s):
print s
except Exception, e:
print e
print test_multi_arguments_decorator('def')
@test_case
def test_noexplicitargs_decorator_case():
'''
Test function decorator without explicit arguments.
'''
try:
@multi_arguments_decorator
def test_multi_arguments_decorator(s):
print s
except Exception, e:
print 'class decorator error!'
print e
class single_argument_class_decorator(object):
'''
This is a decorator class a single argument.
A example for usage:
>>> @no_argument_class_decorator
>>> def your_function(*args):
>>> write your own code
'''
def __init__(self):
print 'in the no_argument_class_decorator init'
def __new__(cls, fun):
print 'in the no_argument_class_decorator new'
show_name2address(fun)
def wrap(*args):
print args
print fun(args)
show_name2address(wrap)
return wrap
class another_single_argument_class_decorator(object):
'''
This is an another decorator class a single argument.
A example for usage:
>>> @no_argument_class_decorator
>>> def your_function(*args):
>>> write your own code
'''
def __init__(self, fun):
print 'in the no_argument_class_decorator init'
self._fun = fun
def __call__(self, *args):
print 'in the no_argument_class_decorator new'
show_name2address(self._fun)
r = self._fun(args)
show_name2address(self.__call__)
return r
class no_argument_class_decorator():
'''
A class decorator without any arguments.
'''
def __call__(self, fun):
show_name2address(fun)
def wrap(*args):
fun(*args)
show_name2address(wrap)
return wrap
@test_case
def test_noarg_class_decorator_case():
'''
Test no argument class decorator.
'''
try:
@no_argument_class_decorator()
def test_noarg_class_decorator(*args):
print args
except Exception, e:
print e
show_name2address(test_noarg_class_decorator)
test_noarg_class_decorator(*range(5))
@test_case
def test_singlearg_class_decorator_case():
'''
test function with single_argument_class_decorator.
'''
try:
@single_argument_class_decorator
def test_single_argument_class_decorator(s):
return [x[:-2] for x in s]
except Exception, e:
print 'class decorator error'
print e
show_name2address(test_single_argument_class_decorator)
test_single_argument_class_decorator(*('justdoit', 'basketball player'))
@test_case
def test_another_singlearg_class_decorator_case():
'''
test function with another single_argument_class_decorator.
'''
try:
@another_single_argument_class_decorator
def test_another_single_argument_class_decorator(s):
twice = lambda x: 2*x
return [twice(x) for x in s]
except Exception, e:
print 'class decorator error'
print e
show_name2address(test_another_single_argument_class_decorator)
print test_another_single_argument_class_decorator(*range(10))
'''
def property_decorator(fun, *args):
A property decorator for class object.
Usage for example:
>>> class A():
>>> @property_decorator
>>> def class_fun(self):
>>> return 'class function'
print 'execute in property decorator'
print fun, dir(fun), fun.func_globals, args
return fun(fun.im_self)
'''
class property_decorator_class(object):
'''
A class property decorator for class object.
When it inherits from object, it may be decorated as a descriptor.
In general, a descriptor is an object attribute with "binding behavior",
one whose attribute access has been overridden by methods in the descriptor
protocol. Those methods are __get__(), __set__(), and __delete__().If any
of those methods are defined for an object, it is said to be a descriptor.
see details at
https://docs.python.org/2/howto/descriptor.html#descriptor-howto-guide
Usage for example:
>>> class A():
>>> @property_decorator_class
>>> def class_fun(self):
>>> return 'class function'
'''
def __init__(self, fun):
self._fun = fun
def __get__(self, obj, type):
print 'execute in get method'
if obj is None:
return self
return self._fun(obj)
def __set__(self, obj, value, *args):
print 'execute in set method'
print args
obj.__dict__[self._fun.__name__] = value
class test_property_decorator():
'''
A class for testing property decorator
'''
def __init__(self, s):
self._s = s
@property_decorator_class
def svalue(self):
return self._s
@test_case
def test_property_decorator_case():
'''
Test self-implement property class
'''
tpd = test_property_decorator("mytest")
print test_property_decorator.svalue, "tpd's value is %s" % tpd.svalue
tpd._s = "new mytest"
print "tpd's new value is %s" % tpd.svalue
'''
In python, we may use the multi levels decorators for different
decoration work.And the decorator order starts at definition of
function or class object from down to up.We can this in the
following example.Like this:
@decorator_b
@decorator_a
def myfunction():
do_my_work
-----decoration order-----
@decorator_a
def myfunction():
do_my_work
After this, a new function(eg: my_new_function) will be binded with
'my_function' name.
-----decorator order------
@decorator_b
my_new_function
And this becomes the usuall condition.
'''
def firstDecorator(fun):
'''
The first decorator when call with mutlti levels decorators.
'''
print 'execute in first decorator'
def wrap(*args):
fun(*args)
return wrap
def secondDecorator(fun):
'''
The second decorator when call with mutlti levels decorators.
'''
print 'execute in second decorator'
def wrap(*args):
fun(*args)
return wrap
@test_case
def test_multi_levels_decorator_case():
@secondDecorator
@firstDecorator
def test_multi_levels_decorator(*args):
'''
Test how multi levels decorator works.
'''
print args
test_multi_levels_decorator(*range(10))
def main():
test_noarg_decorator_case()
test_noarg_withwraps_decorator_case()
test_noarg_decorator_class_case()
test_noexplicitarg_decorator()
test_singlearg_decorator_case()
test_single_explicit_noarg_decorator_case()
test_multiargs_decorator_case()
test_noexplicitargs_decorator_case()
test_noarg_class_decorator_case()
test_singlearg_class_decorator_case()
test_another_singlearg_class_decorator_case()
test_property_decorator_case()
test_multi_levels_decorator_case()
if __name__ == '__main__':
main()
|
19,119 | 4a3385391ce55eb53e849cf23400dce1637a6eb4 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 28 15:47:21 2020
@author: ritambasu
"""
#In this code n=10000000 should be used for better convergence (but it takes very long time to compute)
#for fast check please change n=10000 (please)
import numpy as np
from matplotlib import pyplot as plt
fig, ax = plt.subplots(1, 1)
#defining the given function to be sampled
def f(x):
if 3.0<x<7.0:
return(2.0)
else:
return(0.0)
#defining the pdf of the function after normalization for comparison
def p_f_exact(x):
if 3.0<x<7.0:
return(0.25)
else:
return(0.0)
#main construction
nsteps = 100000
theta = 0.0
theta_arr=np.zeros(nsteps)#Markov chain vs steps
theta_prime_arr=np.zeros(nsteps)#for plotting all the points vs steps
pdf_arr=np.zeros(nsteps)#for plotting normalized pdf of the function
steps=np.zeros(nsteps)
x = np.linspace(0,9,nsteps)
for i in range(nsteps):
theta_prime=theta+np.random.standard_normal()
r = np.random.rand()
pdf_arr[i]=p_f_exact(x[i])
if f(theta)!=0.0:
if (f(theta_prime)/f(theta))> r:
theta = theta_prime
else:
theta=theta
else:
theta = theta_prime
theta_arr[i]=theta
steps[i]=i
theta_prime_arr[i]=theta_prime
#plotting of density histogram and comparison
ax.plot(x,pdf_arr,'r-',lw=1.5, alpha=1.0, label='actual pdf')
plt.hist(theta_arr,bins=100,density=True,fill=False,histtype='bar',label='MCMC randoms')
plt.xlim(0,9)
plt.xlabel('xi',fontsize=17)
plt.ylabel('PDF',fontsize=17)
plt.title('MCMC Density Histogram of Random-numbers',fontsize=20)
plt.suptitle('A PDF of 10,000 non-uniform deviates',x=0.5,y=-0.05)
plt.legend( loc = 'best')
plt.show()
#plottings of Markov Chain and all points
plt.scatter(steps,theta_prime_arr,marker="o",s=5,color='Green',label="other points")
plt.plot(steps,theta_arr,label="Markov chain",color='Blue')
plt.xlabel('steps',fontsize=17)
plt.ylabel(r'$\theta$ [steps]',fontsize=17)
plt.ylim(-4,10)
plt.title('Markov Chain.',fontsize=20)
plt.legend( loc = 'best')
plt.show()
|
19,120 | ca8ce4d3b0bcc5ce8d44a1b7a8dfdc61fdf55919 | def repSqAlg(g, x):
y = g
z = 1
x_bckw = bin(x)[2::][::-1]
print "g = " + str(g)
print "x = " + x_bckw
for i in range(0, len(x_bckw)):
if(x_bckw[i] == '1'):
z = z*y
y = pow(y,2)
print "y = " + str(y) + " z = " + str(z)
return z
print "5^53 = " + str(repSqAlg(5,53))
|
19,121 | f26203d2598ad1a1fd9a2f624d95d8fd0782d07e | # -*- coding: utf-8 -*-
# Problem 4 - Largest palindrome product
# A palindromic number reads the same both ways. The largest palindrome made
# from the product of two 2-digit numbers is 9009 = 91 99.
# Find the largest palindrome made from the product of two 3-digit numbers.
from itertools import combinations
def euler_4():
def _is_palindromic(integer):
s = str(integer)
return all([s[i] == s[(i * -1) - 1] for i in xrange(len(s) / 2)])
palindromes = filter(_is_palindromic,
[x[0] * x[1] for x in
combinations(xrange(100, 1000), 2)])
return max(palindromes)
if __name__ == '__main__':
assert euler_4() == 906609
|
19,122 | f3d0beeb22374c0152a7d00e05fd71368b3b0912 | # 4. Определить является ли строка изограммой, т.е. что все буквы в ней за
# исключением пробелов встречаются только один раз. Например, строки 'Питон', 'downstream', 'книга без слов' являются
# изограммами, а само слово 'изограмма' - нет.
st = input('Введите слово:')
print("Изограмма:", len(st) == len(set(st))) |
19,123 | cea45849edb1831500bff45ff325e70b58fb5d83 | from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(
name='wkt_scraper',
version='1.0.3',
description='Parse word information from Wiktionary. Currently only English, Spanish and Turkish are supported.',
long_description=readme(),
long_description_content_type='text/markdown',
keywords='wiktionary scraper parser',
author='Fatih Akgul',
author_email='akguls@gmail.com',
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.11',
],
packages=['scraper', ],
include_package_data=True,
url='https://github.com/fatih-akgul/wkt_scraper',
install_requires=['beautifulsoup4', 'requests'],
)
|
19,124 | 486a912d877bc6ad1ec8a7660c983b51abaf35cd | #coding: utf-8
from django.contrib import admin
from bitfield import BitField
from bitfield.forms import BitFieldCheckboxSelectMultiple
from .models import Filter, UserAdvert
class AdminFilter(admin.ModelAdmin):
formfield_overrides = {
BitField: {'widget': BitFieldCheckboxSelectMultiple},
}
list_display = ('id', 'user', 'is_rent', 'price_min', 'price_max', 'section',)
list_filter = ('is_rent', 'section',)
class AdminUserAdvert(admin.ModelAdmin):
list_display = ('id', 'filter', 'advert', 'is_read',)
list_filter = ('is_read',)
admin.site.register(Filter, AdminFilter)
admin.site.register(UserAdvert, AdminUserAdvert)
|
19,125 | 1bd8ca3033b2d8b0b97a9518cc65984e168200c3 | from . import actions, config, env, model, process
from .actions import Actions
from .config import configure
from .env import create_runtime_env
from .model import PPO
from .process import runner, MultiprocessAgent
__all__ = [
"actions", "config", "env", "model", "process",
"Actions", "configure", "create_runtime_env",
"PPO", "runner", "MultiprocessAgent"
]
|
19,126 | d3090aa472474e15e025487c4350ce2fa74edc3b | import datetime
from django.db import models
class Category(models.Model):
title = models.CharField(u'título', unique=True, max_length=120)
is_sub_category = models.BooleanField(default=True)
class Meta:
verbose_name = 'Categoria'
verbose_name_plural = 'Categorias'
def __str__(self):
return self.title
class Product(models.Model):
name = models.CharField(u'nome', max_length=120)
description = models.TextField(u'descrição', max_length=254, null=True, blank=True)
price = models.DecimalField(u'preço', max_digits=10, decimal_places=2, default=0.0)
image = models.ImageField(u'foto', upload_to='product', max_length=254, null=True, blank=True)
category = models.ForeignKey(Category, models.CASCADE, null=True, blank=True, related_name='all_products')
sub_categories = models.ManyToManyField(Category)
created_at = models.DateTimeField(u'criado em', auto_now=True)
updated_at = models.DateTimeField(u'atualizado em', auto_now_add=True)
is_active = models.BooleanField(u'ativo', default=True)
class Meta:
verbose_name = 'Produto'
verbose_name_plural = 'Produtos'
def __str__(self):
return f'Produto: {self.name}, criado em: {self.created_at.strftime("%d/%m%Y")}'
|
19,127 | 739c04af830bc284057a2e1a2d55a22b3405210f | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-28 19:26
from os.path import abspath, join, dirname
from setuptools import find_packages, setup
this_dir = abspath(dirname(__file__))
with open(join(this_dir, 'README.md'), encoding='utf-8') as file:
long_description = file.read()
setup(
name='hanlp_demo',
version='0.0.1',
description='HanLP: Han Language Processing',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/hankcs/HanLP',
author='hankcs',
author_email='hankcshe@gmail.com',
license='Apache License 2.0',
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
"Development Status :: 3 - Alpha",
'Operating System :: OS Independent',
"License :: OSI Approved :: Apache Software License",
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
"Topic :: Text Processing :: Linguistic"
],
keywords='corpus,machine-learning,NLU,NLP',
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
install_requires=[
'hanlp_common'
],
python_requires='>=3.6',
)
|
19,128 | ac51f0d5719dacd15b3108f0f2c8754f672cc6fa | #!/usr/bin/env python3
from netmiko import ConnectHandler
from getpass import getpass
my_user = input('Username: ')
my_pass = getpass('Password: ')
cisco3 = {
"device_type": "cisco_ios",
"host": "cisco3.lasthop.io",
"username": my_user,
"password": my_pass
}
cisco4 = {
"device_type": "cisco_ios",
"host": "cisco4.lasthop.io",
"username": my_user,
"password": my_pass
}
cisco_devices = [cisco3, cisco4]
for device in cisco_devices:
net_connect = ConnectHandler(**device)
output = net_connect.send_command("show ip interface brief")
# print(f'------{device}------\n')
print(output + '\n')
net_connect.disconnect()
|
19,129 | 868ad4369cd64f877f4ea35f1a85c941aa9c7409 | """
Strings are amongst the most popular types in Python.
We can create them simply by enclosing characters in quotes.
Python treats single quotes the same as double quotes.
Creating strings is as simple as assigning a value to a variable.
"""
import string
name = "shubham"
print("Data in upper case : ",name.upper()) # upper() for UPPER case strings conversion
lower = "PRAJAPATI"
print("Data in lower case : ",lower.lower()) # lower() for lower case strings conversion
takeData = input("Please enter any data : ")
print("Here is the user input data : ",takeData) # input() for taking data from user
# NOTE -> input() takes data in string if you want to do some functionality with numeric please
# convert that data in your dataType like : int float etc.
# Let's take a look of some in-built string functions
print(string.ascii_uppercase) # ascii_uppercase gives A-Z
print(string.digits) # it gives 0-9
# string formatter
"""
% c -> character
% s -> string
% i ,d-> signed integer deciaml integer
% u -> unsigned decimal integer
% x -> hex decimal integer (lowercase)
% X -> hex decimal integer (UPPERCASE)
% o -> octal decimal integers
% e -> exponantial notation (lowercase, e^3)
% E -> exponantial notation (10^3)
% f -> floating point numbers
"""
print("hexa decimal : ",string.hexdigits) # string.hexdigits gives hexadecimal number
print("only printable no : ",string.printable ) # printable characters only
print("Octa decimal no : ",string.octdigits) # Octa decimal no's
print(type(name.isalnum()),name.isalnum()) # checks alphanumeric
print(type(name.isnumeric()),name.isnumeric()) # checks numeric
print(type(name.isdigit()),name.isdigit()) # checks digit
print("Split func : ",name.split()) # Splits stings
print("Starts With ",name.startswith('s')) # Checks starting char of string return boolean
number = " my number is 97748826478"
print(number.split()) # Basically returns list
print(number.strip()) # removes unprintable charaters from both side left and right
print(number.rstrip()) # removes unprintable charaters right side only
splitn= number.split()
for onew in splitn:
if (onew.strip()).isdigit():
if len(onew.strip())== 11:
print("No", onew.strip())
str1 = "abcdxyzabc"
print(str1.replace('a','k')) # occurance of 'a' by 'k'
str2 = str1.replace('a','k')
print(str2.replace('acd','shu'))
print(str1.capitalize()) # Capitalize capital only first char of an string
# Method 1st
newName = "shubham kumar prajapati"
splitName = newName.split()
print(splitName)
print(splitName[0][0].upper() + ". "+ splitName[1][0].upper() + ". "+ splitName[2].capitalize())
wordlen = len(splitName)
print("Length of list : ",wordlen)
# Method 2nd
count = 0
newname = ""
for aw in splitName:
count +=1
if count < wordlen:
newname += aw[0].upper()+ ". "
else :
newname += aw[0].upper()+aw[1:]
print("By method 2nd : ",newname) |
19,130 | 250cf654c01fb49e51662dff8b8de1a5cb03e352 | # -*- coding: utf-8 -*-
#import MySQLdb
import pymysql
MySQLdb = pymysql
import logging
from config import *
from
logger = logging.getLogger('db_helper')
conn = MySQLdb.connect(host=MYSQL_HOST, user=MYSQL_USER, passwd=MYSQL_PASSWD, db=MYSQL_DB, charset='utf8')
def
|
19,131 | b89ea4a357f26070e850cfa546dbef2c3a45f690 | from marshmallow import fields
from sqlalchemy import (
Column, Text
)
from news.models.sqlalchemy import (
create_schedule_abc, create_schedule
)
from ..utils.ma import get_base_schema
from ..users.models import User
from ..extensions import (
db, celery, persister
)
class ScheduleABC(create_schedule_abc(User)):
def __init__(self, name='', *args, **kwargs):
self.name = name
super().__init__(*args, **kwargs)
def __str__(self):
return '{}\'s schedule {} {}'.format(
self.owner.fullname if self.owner else 'Anonymous',
self.name, self.url)
name = Column(Text, nullable=False, default='')
@property
def serialized(self):
schema = ScheduleSchema()
return schema.dump(self).data
@property
def state(self):
return self.get_state(celery)
Schedule = create_schedule(ScheduleABC, db.Model, persister=persister)
class ScheduleSchema(get_base_schema(Schedule, json_field='dict')):
state = fields.Method('get_state')
def get_state(self, schedule):
return schedule.get_state(celery)
|
19,132 | 73590ea100635c66f93304f2dfb9751c3cb16728 | import argparse
parser = argparse.ArgumentParser(description="AoC day 1")
parser.add_argument("file", help="The file that should be sourced")
parser.add_argument("-p", "--phase", help="The part of the exercise that we are at", type=int, default=1)
def main(argv):
print(f"test, {argv}")
infile = open(argv.file, "r")
allItems = []
for line in infile:
# do a line operation
if line:
allItems.append(int(line))
infile.close()
if argv.phase == 1:
sol = solutionPt1(allItems)
print(f"The target mass is {sol}")
elif argv.phase == 2:
sol = solutionPt2(allItems)
print(f"The fuel adjusted fuel weight is now {sol}")
# Fuel required to launch a given module is based on its mass.
# Specifically, to find the fuel required for a module, take its mass,
# divide by three, round down, and subtract 2.
def solutionPt1(items):
sum = 0
for item in items:
sum += int(item / 3) - 2
return sum
def solutionPt2(items):
sum = 0
for item in items:
tumbler = int(item / 3) - 2
while tumbler > 0:
sum += tumbler
tumbler = int(tumbler / 3) - 2
return sum
main(parser.parse_args())
|
19,133 | be2174db4a126fff36c891d4cdd560cc508a2a42 | from distutils.core import setup
import py2exe
import requests.certs
setup(console=['Downloader.py'],
data_files = '',
options = {
'py2exe': {
'packages': ['csv', 'StringIO', 'requests']
}
}
) |
19,134 | 1c9b3eab5e21101382a836807cd2d5cff1fb2bc6 | import asyncio
import asyncssh
from pyplus.collection import qdict
from jsshd.command import run_command
class SSHServerSession(asyncssh.SSHServerSession):
def __init__(self, server):
self.__user_server = server
def connection_made(self, chan):
self._chan = chan
def shell_requested(self): return False
def exec_requested(self, command):
async def run_callback(command):
try:
env = qdict(channel=self._chan)
result = await run_command(command, env)
if isinstance(result, BaseException):
self._chan.write(str(result) + '\n')
self._chan._report_response(False)
else:
self._chan.write(result + '\n')
self._chan._report_response(True)
self._chan.exit(0)
except:
pass
# parse and check command
asyncio.get_event_loop().create_task(run_callback(command))
return None
|
19,135 | 20c83ce7c4beaf50044a8ffb68e0826dd6f6cc06 | #definiraj funkcijo, ki ima za vnos 2d x n array dolzine-2 polarna kota
import scipy.optimize as optimize
import numpy as np
def f(seznam):
vsota=0
for i in range(len(fi)):
for j in range(i):
razdalja=abs(np.sqrt(2-2*(np.sin(fi[i])*np.sin(theta[j])*np.cos(fi[i]-fi[j])+ np.cos(theta[i]) * np.cos( theta[j] ) )))
vsota=vsota + razdalja
return vsota
test=for (int x=0; x<length; x = x+step)
print(test)
n=8
for i in range(n):
if i==0:
kot1=()
if i<0:
kot1=kot1+()
kot2=kot1
print(kot1)
#potrebno še pogoj
for i in range(n):
kot1[i]=np.pi/n*i
kot2[i]=np.pi/n*i
print(kot1,'\n',kot2)
x0=kot1
result = optimize.minimize(f,)
print(result)
# print(result)
#potem se 3d plot za razlicne n
|
19,136 | 84e3880fc8ba4cd461068ee472b6136cf547f401 | #!/home/nomanchesam/Documentos/Tec de Monterrey/9no SEMESTRE/Adm de proyectos de ing sw/W13/python/venv/bin/python3.6
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
19,137 | ed3390ade5901cb90de6e0c91290a1dfd92de60c | #!/usr/bin/env python3
import json
import click
from tqdm import tqdm
from util import util
logger = util.get_logger(__name__)
@click.command()
@click.argument('index')
@click.argument('output_file')
@click.option('-q', '--query', help='Sample selection query', type=click.File('r'))
@click.option('-f', '--output-format', help='Output format (multiple may be specified)', multiple=True,
type=click.Choice(['json', 'text']), default='json')
@click.option('-n', '--total-mails', help='Total number of mails to sample', type=int, default=1000)
@click.option('-l', '--group-limit', help='Group sample limit', type=int)
@click.option('-s', '--skip', help='Skip ahead n messages', type=int, default=0)
@click.option('-x', '--scroll-size', help='Scroll size', type=int, default=2000)
def main(index, output_file, **kwargs):
"""
Sample mails from Elasticsearch index.
Arguments:
index: Elasticsearch index to sample from
output_file: output file (prefix without extension in case multiple formats are specified)
"""
output_jsonl = None
output_text = None
if 'json' in kwargs['output_format']:
fname = output_file if len(kwargs['output_format']) == 1 else kwargs['output_format'] + '.jsonl'
output_jsonl = open(fname, 'w')
if 'text' in kwargs['output_format']:
fname = output_file if len(kwargs['output_format']) == 1 else kwargs['output_format'] + '.txt'
output_text = open(fname, 'w')
if kwargs.get('query') is not None:
query = json.load(kwargs.get('query'))
else:
query = {
"sort": ["warc_id"],
"size": 200,
"query": {
"bool": {
"filter": {
"bool": {
"must_not": [
{
"query_string": {
"analyze_wildcard": True,
"default_field": "*",
"query": """group:(*.patches OR *.commits* OR
*.dist-commits* OR *.version-control* OR *.git* OR *.cvs* OR *.svn*
OR *.trunk* OR *.scm* OR *.pkg*) OR (group:(*.bugs* OR *.issues*
OR *.bugzilla* OR *.codereview*) OR
headers.subject.keyword:(*jira* OR *bugzilla*) OR
headers.from_email.keyword:(*bugs* OR *bugzilla* OR *jira* OR *jboss*))"""
}
}
],
"must": {"term": {"lang": "en"}},
"minimum_should_match": 1,
"should": [
{"wildcard": {"group": "gmane.culture.*"}},
{"wildcard": {"group": "gmane.politics.*"}},
{"wildcard": {"group": "gmane.science.*"}},
{"wildcard": {"group": "gmane.education.*"}},
{"wildcard": {"group": "gmane.music.*"}},
{"wildcard": {"group": "gmane.games.*"}},
{"wildcard": {"group": "gmane.recreation.*"}}
]
}
}
}
}
}
logger.info('Retrieving initial batch')
es = util.get_es_client()
results = util.es_retry(es.search, index=index, scroll='10m', size=kwargs['scroll_size'], body=query)
skip = kwargs['skip']
if skip > 0:
logger.info('Skipping ahead {} messages'.format(skip))
sampled_groups = {}
num_samples = 0
num_skipped = 0
try:
with tqdm(desc='Calculating progress', unit=' messages') as progress_bar:
while num_samples < kwargs['total_mails'] and len(results['hits']['hits']) > 0:
for hit in results['hits']['hits']:
if skip > 0 and num_skipped < skip:
progress_bar.set_description('Skipping messages')
progress_bar.total = skip
num_skipped += 1
progress_bar.update()
continue
elif (skip == 0 or num_skipped >= skip) and num_samples == 0:
progress_bar.set_description('Sampling messages')
progress_bar.total = kwargs['total_mails']
progress_bar.n = 0
progress_bar.last_print_n = 0
progress_bar.update(0)
src = hit['_source']
text_plain = src['text_plain']
prev_samples = sampled_groups.get(src['group'], 0)
if kwargs['group_limit'] and prev_samples > kwargs['group_limit']:
continue
sampled_groups[src['group']] = prev_samples + 1
num_samples += 1
progress_bar.update()
if output_jsonl:
json.dump({'text': text_plain,
'meta': {k: src[k] for k in src.keys() if k not in ['text_plain', 'text_html']},
'labels': []}, output_jsonl)
output_jsonl.write('\n')
if output_text:
output_text.write(util.normalize_message_text(text_plain))
output_text.write('\n')
if num_samples >= kwargs['total_mails']:
break
results = util.es_retry(es.scroll, scroll_id=results['_scroll_id'], scroll='10m')
finally:
es.clear_scroll(scroll_id=results['_scroll_id'])
if output_jsonl:
output_jsonl.close()
if output_text:
output_text.close()
if __name__ == '__main__':
main()
|
19,138 | b292d678ab7039fa9b3fc8c76b26591c462521d6 | import persistence
import connection
@connection.connection_handler
def get_card_status(status_id):
"""
Find the first status matching the given id
:param status_id:
:return: str
"""
statuses = persistence.get_statuses()
return next((status['title'] for status in statuses if status['id'] == str(status_id)), 'Unknown')
|
19,139 | d7a12df53fde82da1a77f4f741f9601d68cc4476 | def num_to_dashes(num):
dash = []
for i in range(num):
dash.append("-")
return ''.join(dash)
print(num_to_dashes(1))
print(num_to_dashes(5))
print(num_to_dashes(3))
def num_to_dashes_2(num):
return "-" * num
print(num_to_dashes_2(1))
print(num_to_dashes_2(5))
print(num_to_dashes_2(3)) |
19,140 | 568716e1919712308b6eb58effb7ab7109d33db7 | #!/usr/bin/env python
# setp.py
# Copyright (c) 2015 Mark Tearle <mark@tearle.com>
# See LICENSE for details.
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
from setuptools import setup, find_packages
classifiers = [
'Development Status :: 5 - Production/Stable',
'Framework :: Twisted',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Natural Language :: English',
'Topic :: Software Development :: Libraries :: Python Modules'
]
setup(
name='npyscreenreactor',
version='1.3',
license='MIT',
classifiers=classifiers,
author='Mark Tearle',
author_email='mark@tearle.com',
description = 'Twisted reactor for npyscreen',
long_description = 'npyscreenreactor is a Twisted reactor for the npyscreen curses library',
url='https://github.com/mtearle/npyscreenreactor',
download_url='https://github.com/mtearle/npyscreenreactor/tarball/v1.3',
packages=find_packages(),
keywords=['npyscreen', 'twisted'],
install_requires=['twisted', 'npyscreen']
)
|
19,141 | ba15d0112d99c71ab387eccb99f10cac9b875e61 | """
Code based on https://github.com/scikit-learn/scikit-learn/blob/fd237278e/sklearn/decomposition/_pca.py
"""
import numpy as np
from math import log
from scipy.special import gammaln
import pandas as pd
def select_rank_minka(shape, svals):
"""
Selects the PCA rank using the method from (Minka, 2000).
Parameters
----------
shape: tuple (n_samples, n_features)
svals: array-like, (n_features, )
All singular values of the data matrix X.
Output
------
rank_est, out
rank_est: int
Estimated rank
out: dict
All output from rank selection algorithm.
"""
# assert shape[0] > shape[1]
n_samples, n_features = shape
# assert n_samples > n_features
assert len(svals) == n_features
cov_evals = svals ** 2 / n_samples
ranks = np.arange(1, n_features) # 1 to n_features - 1
log_liks = np.empty_like(ranks)
for idx, rank in enumerate(ranks):
log_liks[idx] = get_log_lik(cov_evals=cov_evals,
rank=rank,
shape=shape)
log_liks = pd.Series(log_liks, index=ranks)
log_liks.name = 'log_lik'
log_liks.index.name = 'rank'
rank_est = log_liks.idxmax()
return rank_est, {'log_liks': log_liks,
'cov_evals': cov_evals}
def get_log_lik(cov_evals, rank, shape):
"""Compute the log-likelihood of a rank ``rank`` dataset.
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
cov_evals : array of shape (n_features, )
Eigen values of the sample covarinace matrix.
rank : int
Tested rank value. It should be strictly lower than n_features,
otherwise the method isn't specified (division by zero in equation
(31) from the paper).
shape : tuple (n_samples, n_features)
Shape of the data set
Returns
-------
ll : float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
n_samples, n_features = shape
if not 1 <= rank <= n_features - 1:
raise ValueError("the tested rank should be in [1, n_features - 1]")
eps = 1e-15
if cov_evals[rank - 1] < eps:
# When the tested rank is associated with a small eigenvalue, there's
# no point in computing the log-likelihood: it's going to be very
# small and won't be the max anyway. Also, it can lead to numerical
# issues below when computing pa, in particular in log((spectrum[i] -
# spectrum[j]) because this will take the log of something very small.
return -np.inf
pu = -rank * log(2.)
for i in range(1, rank + 1):
pu += (gammaln((n_features - i + 1) / 2.) -
log(np.pi) * (n_features - i + 1) / 2.)
pl = np.sum(np.log(cov_evals[:rank]))
pl = -pl * n_samples / 2.
v = max(eps, np.sum(cov_evals[rank:]) / (n_features - rank))
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank) / 2.
pa = 0.
spectrum_ = cov_evals.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(cov_evals)):
pa += log((cov_evals[i] - cov_evals[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
|
19,142 | 0ef98ecb66d39979efe1efe173e6be8477bfd675 | # Generated by Django 2.2.2 on 2019-07-04 17:10
from django.db import migrations, models
import markdownx.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='About',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('about', markdownx.models.MarkdownxField()),
],
),
]
|
19,143 | 3410653c071e3eac0059b76070c6669f4a5ce6f5 | from ._groupdeck import Format
from django.db import models
from django.conf import settings
User = settings.AUTH_USER_MODEL
class Junct_Sub_Format(models.Model):
subbingUser_Format = models.ForeignKey(User,
related_name='subbingUser_Format',
on_delete=models.CASCADE)
subbedFormat = models.ForeignKey(Format,
related_name='subbedFormat',
on_delete=models.CASCADE)
timestamp = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = ('subbingUser_Format', 'subbedFormat')
def __str__(self):
return '%s --subbed--> %s' % (self.subbingUser_Format, self.subbedFormat)
|
19,144 | d745df6f3dbb28f172b1e9a80649f0eb8baf83e2 | import moisture_reading
import sqlite3
from datetime import datetime
conn = sqlite3.connect('soil_moisture_test.db')
c = conn.cursor()
c.execute('DROP TABLE IF EXISTS readings')
moisture_reading.create_table(c)
frozen_time = datetime.now()
frozen_time_string = frozen_time.strftime('%Y-%m-%d %H:%M:%S')
class MockDateTime():
def now():
return frozen_time
class TestDatabase():
def test_create_reading(self):
c.execute('DELETE FROM readings')
reading = moisture_reading.create(700, MockDateTime, c)
assert reading == (1, 700, frozen_time_string)
def test_get_all_readings(self):
c.execute('DELETE FROM readings')
reading1 = moisture_reading.create(700, MockDateTime, c)
reading2 = moisture_reading.create(800, MockDateTime, c)
assert moisture_reading.all(c) == [reading1, reading2]
def test_format_readings(self):
c.execute('DELETE FROM readings')
moisture_reading.create(700, MockDateTime, c)
moisture_reading.create(800, MockDateTime, c)
assert moisture_reading.format_readings(c) == [
[700, 800],
[frozen_time_string, frozen_time_string]
]
|
19,145 | 5832d23a0a4c86c97098f5f4eb74904cf0a7e9c3 | # Returns sorted array using insertion sort
# Every iteration, takes an element at index i and inserts it into the sorted sequence array from indexes 0 to i-1
def insertion_sort(array):
for i in range(0, len(array)):
current = array[i]
# Elements in array that are greater than current are moved one position to the right
j = i-1
while j >= 0 and current < array[j]:
array[j+1] = array[j]
j -= 1
array[j+1] = current
return array
|
19,146 | e0c8be8d392b2a383ad1ee92af43ad036a84281a | import pandas as pd
import models
import predict
import pickle
import metrics
import numpy as np
from sklearn.linear_model import LinearRegression
import tensorflow as tf
from sklearn.model_selection import train_test_split
# images path
IMAGES_GCS_PATH = 'gs://osic_fibrosis/images-norm/images-norm'
def create_ensemble_table(save_path='ensemble_table.csv', qreg_model_path='models_weights/qreg_model/model_v4.ckpt',
cnn_model_path='models_weights/cnn_model/model_v6.ckpt', image_size=[256, 256],
processor_path='models_weights/qreg_model/processor.pickle', pp_train='theta_data/pp_train.csv'):
"""
Create a train table for ensemble training. Predict for both qreg and CNN models.
Parameters:
:param save_path: table save path
:param qreg_model_path: quantile regression model path
:param cnn_model_path: CNN model path
:param image_size: CNN model image size
:param processor_path: table preprocessor pickle path
:param pp_train: preprocessed train table path
"""
# train set
train_table = pd.read_csv(pp_train)
nn_x = train_table.drop(['GT_FVC', 'Patient'], axis=1).values
ensemble_table = train_table[['Patient', 'Weeks', 'GT_FVC']]
processor = pickle.load(open(processor_path, 'rb'))
processor.inverse_transform(ensemble_table, 'Weeks')
# models
qreg_model = models.get_qreg_model(nn_x.shape[1])
qreg_model.load_weights(qreg_model_path)
cnn_gen = predict.exponent_generator(IMAGES_GCS_PATH + '/train',
model_path=cnn_model_path,
image_size=image_size)
# preds
qreg_preds = qreg_model.predict(nn_x)[:, 1] # qreg predict
ensemble_table['Qreg_FVC'] = qreg_preds
cnn_exp_dict = {id: exp_func for id, exp_func in cnn_gen}
predict.predict_form(cnn_exp_dict, ensemble_table, submission=False) # cnn predict
# save
ensemble_table.to_csv(save_path, index=False)
def fix_ensemble_table():
fixed_ensemble = pd.read_csv('ensemble_table.csv')
fixed_ensemble = fixed_ensemble.drop_duplicates(subset=['Weeks', 'Patient', 'GT_FVC', 'FVC'])
fixed_ensemble = fixed_ensemble.loc[fixed_ensemble["FVC"].notnull()]
fixed_ensemble.to_csv('fixed_ensemble_table.csv', index=False)
def ensemble_metric_check():
ensemble_table = pd.read_csv('fixed_ensemble_table.csv')
cnn_metric = metrics.laplace_log_likelihood(ensemble_table["GT_FVC"].values, ensemble_table["FVC"].values, 200)
qreg_metric = metrics.laplace_log_likelihood(ensemble_table["GT_FVC"].values, ensemble_table["Qreg_FVC"].values, 200)
print("Qreg score {}, CNN score {}".format(qreg_metric, cnn_metric))
if __name__ == '__main__':
# dataset
ensemble_table = pd.read_csv('fixed_ensemble_table.csv')
qreg_values = ensemble_table["Qreg_FVC"].values.reshape(-1, 1)
cnn_values = ensemble_table["FVC"].values.reshape(-1, 1)
y = ensemble_table["GT_FVC"].values
X = np.concatenate([qreg_values, cnn_values], axis=1)
# fit regressor
linear_regressor = LinearRegression(fit_intercept=True, normalize=False)
linear_regressor.fit(X, y)
print(linear_regressor.coef_, linear_regressor.intercept_)
regressor_preds = linear_regressor.predict(X)
print('Regressor score {}'.format(metrics.laplace_log_likelihood(y, regressor_preds, 200)))
# check simple blend
qreg_values = qreg_values.reshape((-1, ))
cnn_values = cnn_values.reshape((-1, ))
pred1 = qreg_values * 0.4 + cnn_values * 0.6
pred2 = qreg_values * 0.25 + cnn_values * 0.75
pred3 = qreg_values * 0.6 + cnn_values * 0.4
score1 = metrics.laplace_log_likelihood(y, pred1, 200)
score2 = metrics.laplace_log_likelihood(y, pred2, 200)
score3 = metrics.laplace_log_likelihood(y, pred3, 200)
print("40qreg + 60cnn {}, 25qreg + 75cnn {}, 60qreg + 40cnn {}".format(score1, score2, score3))
# simple nn
model = tf.keras.models.Sequential([
tf.keras.layers.Input([2]),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(1, activation='linear')
])
model.compile(optimizer='adam', loss='mse')
train_x, val_x, train_y, val_y = train_test_split(X, y, test_size=0.2)
model.fit(x=train_x, y=train_y, validation_data=(val_x, val_y), epochs=200, batch_size=10)
print("NN score {}".format(metrics.laplace_log_likelihood(y, model.predict(X)[0], 200)))
|
19,147 | 5ba65430fc43b7c6ad77c036d29950ac037588d5 | from fingym.fingym import make
from fingym.envs.spy_envs import SpyEnv |
19,148 | 14bd084e872f4aed806a6097e53601b8cc794641 | row = int(input("Enter number of rows : "))
col = int(input("Enter number of columns : "))
print('\nSolid Rectangle')
for i in range(row):
a = ''
for j in range(col):
a += '*'
print(a)
print('\nHollow Rectangle')
for i in range(row):
a = ''
for j in range(col):
if (i == 0) + (j == 0) + (i == row - 1) + (j == col - 1):
a += '*'
else:
a += ' '
print(a)
|
19,149 | fc20936a7d53100e78330fcc72e03605867740f3 | from django.apps import AppConfig
class EstatetradeConfig(AppConfig):
name = 'estateTrade'
|
19,150 | e5be951bbb479bfd87627665aafd6e1c4d357169 | ##
##
from __future__ import print_function
from awips.dataaccess import DataAccessLayer as DAL
import baseBufrMosTestCase
import unittest
#
# Test DAF support for bufrmosHPC data
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 01/19/16 4795 mapeters Initial Creation.
# 04/11/16 5548 tgurney Cleanup
# 04/18/16 5548 tgurney More cleanup
#
#
class BufrMosHpcTestCase(baseBufrMosTestCase.BufrMosTestCase):
"""Test DAF support for bufrmosHPC data"""
datatype = "bufrmosHPC"
# Most tests inherited from superclass
def testGetGeometryData(self):
req = DAL.newDataRequest(self.datatype)
req.setLocationNames("KOMA")
req.setParameters("forecastHr", "maxTemp24Hour")
self.runGeometryDataTest(req)
|
19,151 | 22c372b8a0a6f3de0e7e503a81d6a74df32daac4 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import jinja2
import webapp2
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
#*****************************************************************************************************
#upto this point, everything stays the same.
#the code above sets up jinja. this is to be included in all applications in the exact same way.
#for each application, create a templates folder and save all your html files in there.
class Player(db.Model):
playername=db.TextProperty()
playerscore=db.IntegerProperty()
class ListSurveys(Handler):
def get(self):
players = db.GqlQuery("SELECT * FROM Player "
"ORDER BY playerscore DESC ")
self.render("players.html", players=players)
class MainPage(Handler):
def get(self):
self.render("base.html")
def post(self):
name=self.request.get("name")
if(name==""):
playername="Anonymous"
else:
playername=name
score=self.request.get("score")
playerscore=int(score)
s = Player(playername=playername, playerscore=playerscore)
s.put()
self.render("base.html")
class ReminderTool(Handler):
def get(self):
self.render("ReminderApp.html")
app = webapp2.WSGIApplication([
('/', MainPage),('/players', ListSurveys),('/remindertool', ReminderTool),
], debug=True)
|
19,152 | 8d7298895dd8b48880ba72b8748d74cf93c41634 | """A script for updating pre-existing V2 Pipette configurations."""
import os
import json
import argparse
from pathlib import Path
from typing import List, Dict, Tuple, Any, Iterator, Type
from pydantic import BaseModel
from pydantic.main import ModelMetaclass
from enum import Enum
from opentrons_shared_data import get_shared_data_root
from ..pipette_definition import (
PipetteConfigurations,
PipetteGeometryDefinition,
PipettePhysicalPropertiesDefinition,
PipetteLiquidPropertiesDefinition,
PipetteModelVersionType,
SupportedTipsDefinition,
)
from ..types import (
PipetteModelType,
PipetteChannelType,
PipetteVersionType,
PipetteTipType,
PipetteModelMajorVersion,
PipetteModelMinorVersion,
)
from ..load_data import _geometry, _physical, _liquid
from ..pipette_load_name_conversions import convert_pipette_model
from ..dev_types import PipetteModel
"""
Instructions:
To run this script, you must be in `shared-data/python`. To invoke, use the command:
`pipenv run python -m opentrons_shared_data.pipette.scripts.update_configuration_files`
If you want to update all files, you can simply use the argument `--update_all_models`.
Make sure to run `make format-js` afterwards to ensure formatting of the json files
is good.
*Note* If you are adding a brand-new key, you MUST update the pydantic models
found in `python/pipette/pipette_definition.py` before running this script.
*Note* When you are entering in your data, please utilize the exact type. I.e. if it's a
list, you must input the list like: [1, 2, 3] or if it's a dict, like: {"data": 1}..
For now, we do not support updating pipetting functions in this script.
"""
ROOT = get_shared_data_root() / "pipette" / "definitions" / "2"
NOZZLE_LOCATION_CONFIGS = ["nozzle_offset", "nozzle_map"]
def _change_to_camel_case(c: str) -> str:
# Tiny helper function to convert to camelCase.
config_name = c.split("_")
if len(config_name) == 1:
return config_name[0]
return f"{config_name[0]}" + "".join(s.capitalize() for s in config_name[1::])
def list_configuration_keys() -> Tuple[List[str], Dict[int, str]]:
"""List out the model keys available to modify at the top level."""
lookup = {i: v for (i, v) in enumerate(PipetteConfigurations.__fields__)}
return [
f"{i}: {v}" for (i, v) in enumerate(PipetteConfigurations.__fields__)
], lookup
def list_available_enum(enum_type: Type[Enum]) -> List[str]:
"""List available pipette models"""
return [f"{i}: {v}" for (i, v) in enumerate(enum_type)] # type: ignore[var-annotated]
def handle_subclass_model(
top_level_configuration: List[str], base_model: BaseModel, is_basemodel: bool
) -> List[str]:
"""Handle sub-classed basemodels and update the top level model as necessary."""
if is_basemodel:
if base_model.__fields__ == SupportedTipsDefinition.__fields__:
# pydantic does something weird with the types in ModelFields so
# we cannot use isinstance checks to confirm if the base model
# is a supported tips definition
print(f"choose {PipetteTipType.__name__}:")
for row in list_available_enum(PipetteTipType):
print(f"\t{row}")
tip_type = list(PipetteTipType)[
int(input("select the tip volume size to modify"))
]
top_level_configuration.append(tip_type.name)
lookup = {i: v for (i, v) in enumerate(base_model.__fields__)}
config_list = [f"{i}: {v}" for (i, v) in enumerate(base_model.__fields__)]
print(f"you selected the basemodel {base_model.__name__}:") # type: ignore[attr-defined]
for row in config_list:
print(f"\t{row}")
configuration_to_update = lookup[
int(input("select a specific configuration from above\n"))
]
field_type = base_model.__fields__[configuration_to_update].type_
is_basemodel = isinstance(field_type, ModelMetaclass)
top_level_configuration.append(configuration_to_update)
return handle_subclass_model(top_level_configuration, field_type, is_basemodel)
else:
return top_level_configuration
def check_from_version(version: str) -> str:
"""Check that the version requested is supported in the system."""
version_int = [int(v) for v in version.split(".")]
if version_int[0] not in PipetteModelMajorVersion:
raise ValueError(f"Major version {version_int[0]} is not supported.")
if version_int[1] not in PipetteModelMinorVersion:
raise ValueError(f"Minor version {version_int[1]} is not supported.")
return version
def save_data_to_file(
directorypath: Path,
file_name: str,
data: Dict[str, Any],
) -> None:
"""
Function used to save data to a file
"""
directorypath.mkdir(parents=True, exist_ok=True)
filepath = directorypath / f"{file_name}.json"
with open(filepath, "w") as f:
json.dump(data, f, indent=2)
def update(
dict_to_update: Dict[str, Any], iter_of_configs: Iterator[str], value_to_update: Any
) -> Dict[str, Any]:
"""
Recursively update the given dictionary to ensure no data is lost when updating.
"""
next_key = next(iter_of_configs, None)
if next_key and isinstance(dict_to_update[next_key], dict):
dict_to_update[next_key] = update(
dict_to_update.get(next_key, {}), iter_of_configs, value_to_update
)
elif next_key:
dict_to_update[next_key] = value_to_update
return dict_to_update
def build_nozzle_map(
nozzle_offset: List[float], channels: PipetteChannelType
) -> Dict[str, List[float]]:
Y_OFFSET = 9
X_OFFSET = -9
breakpoint()
if channels == PipetteChannelType.SINGLE_CHANNEL:
return {"A1": nozzle_offset}
elif channels == PipetteChannelType.EIGHT_CHANNEL:
return {
f"{chr(ord('A') + 1*row)}1": [
nozzle_offset[0],
nozzle_offset[1] + Y_OFFSET * row,
nozzle_offset[2],
]
for row in range(8)
}
elif channels == PipetteChannelType.NINETY_SIX_CHANNEL:
return {
f"{chr(ord('A') + 1*row)}{1 + 1*col}": [
nozzle_offset[0] + X_OFFSET * col,
nozzle_offset[1] + Y_OFFSET * row,
nozzle_offset[2],
]
for row in range(8)
for col in range(12)
}
raise ValueError(f"Unsupported channel type {channels}")
def load_and_update_file_from_config(
config_to_update: List[str],
value_to_update: Any,
model_to_update: PipetteModelVersionType,
) -> None:
"""Update the requested config and save to disk.
Load the requested config sub type (physical, geometry or liquid). Then
update the current file and save to disk.
"""
camel_list_to_update = iter([_change_to_camel_case(i) for i in config_to_update])
if config_to_update[0] in PipetteGeometryDefinition.__fields__:
geometry = _geometry(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
if config_to_update[0] == "nozzle_map":
nozzle_to_use = (
value_to_update if value_to_update else geometry["nozzleOffset"]
)
geometry["nozzleMap"] = build_nozzle_map(
nozzle_to_use, model_to_update.pipette_channels
)
elif config_to_update[0] == "nozzle_offset":
geometry["nozzleMap"] = build_nozzle_map(
value_to_update, model_to_update.pipette_channels
)
geometry["nozzleOffset"] = value_to_update
else:
geometry = update(geometry, camel_list_to_update, value_to_update)
PipetteGeometryDefinition.parse_obj(geometry)
filepath = (
ROOT
/ "geometry"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
geometry,
)
elif config_to_update[0] in PipettePhysicalPropertiesDefinition.__fields__:
physical = _physical(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
physical = update(physical, camel_list_to_update, value_to_update)
PipettePhysicalPropertiesDefinition.parse_obj(physical)
filepath = (
ROOT
/ "general"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
physical,
)
elif config_to_update[0] in PipetteLiquidPropertiesDefinition.__fields__:
liquid = _liquid(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
liquid = update(physical, camel_list_to_update, value_to_update)
PipetteLiquidPropertiesDefinition.parse_obj(liquid)
filepath = (
ROOT
/ "liquid"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
liquid,
)
else:
raise KeyError(
f"{config_to_update} is not saved to a file. Check `pipette_definition.py` for more information."
)
def _update_single_model(configuration_to_update: List[str]) -> None:
"""Helper function to update single model."""
print(f"choose {PipetteModelType.__name__}:")
for row in list_available_enum(PipetteModelType):
print(f"\t{row}")
model = list(PipetteModelType)[int(input("Please select from above\n"))]
print(f"choose {PipetteChannelType.__name__}:")
for row in list_available_enum(PipetteChannelType):
print(f"\t{row}")
channels = list(PipetteChannelType)[int(input("Please select from above\n"))]
version = PipetteVersionType.convert_from_float(
float(check_from_version(input("Please input the version of the model\n")))
)
built_model: PipetteModel = PipetteModel(
f"{model.name}_{str(channels)}_v{version.major}.{version.minor}"
)
if configuration_to_update[0] == NOZZLE_LOCATION_CONFIGS[1]:
print(
"You selected nozzle_map to edit. If you wish to update the nozzle offset, enter it on the next line.\n"
)
print("Otherwise, please type 'null' on the next line.\n")
value_to_update = json.loads(
input(
f"Please select what you would like to update {configuration_to_update} to for {built_model}\n"
)
)
model_version = convert_pipette_model(built_model)
load_and_update_file_from_config(
configuration_to_update, value_to_update, model_version
)
def _update_all_models(configuration_to_update: List[str]) -> None:
paths_to_validate = ROOT / "liquid"
_channel_model_str = {
"single_channel": "single",
"ninety_six_channel": "96",
"eight_channel": "multi",
}
for channel_dir in os.listdir(paths_to_validate):
for model_dir in os.listdir(paths_to_validate / channel_dir):
for version_file in os.listdir(paths_to_validate / channel_dir / model_dir):
version_list = version_file.split(".json")[0].split("_")
built_model: PipetteModel = PipetteModel(
f"{model_dir}_{_channel_model_str[channel_dir]}_v{version_list[0]}.{version_list[1]}"
)
if configuration_to_update[0] == NOZZLE_LOCATION_CONFIGS[1]:
print(
"You selected nozzle_map to edit. If you wish to update the nozzle offset, enter it on the next line.\n"
)
print("Otherwise, please type 'null' on the next line.\n")
value_to_update = json.loads(
input(
f"Please select what you would like to update {configuration_to_update} to for {built_model}\n"
)
)
model_version = convert_pipette_model(built_model)
load_and_update_file_from_config(
configuration_to_update, value_to_update, model_version
)
def determine_models_to_update(update_all_models: bool) -> None:
try:
while True:
print(f"choose {PipetteConfigurations.__name__}:")
config_list, table_lookup = list_configuration_keys()
for row in config_list:
print(f"\t{row}")
configuration_to_update = [
table_lookup[int(input("select a configuration from above\n"))]
]
if configuration_to_update[0] == NOZZLE_LOCATION_CONFIGS[0]:
print(
f"NOTE: updating the {configuration_to_update[0]} will automatically update the {NOZZLE_LOCATION_CONFIGS[1]}\n"
)
field_type = PipetteConfigurations.__fields__[
configuration_to_update[0]
].type_
is_basemodel = isinstance(field_type, ModelMetaclass)
configuration_to_update = handle_subclass_model(
configuration_to_update, field_type, is_basemodel
)
if update_all_models:
_update_all_models(configuration_to_update)
else:
_update_single_model(configuration_to_update)
except KeyboardInterrupt:
print("Finished updating! Validate that your files updated successfully.")
def main() -> None:
"""Entry point."""
parser = argparse.ArgumentParser(
description="96 channel tip handling testing script."
)
parser.add_argument(
"--update_all_models",
type=bool,
help="update all",
default=False,
)
args = parser.parse_args()
determine_models_to_update(args.update_all_models)
if __name__ == "__main__":
"""
A script to automate building a pipette configuration definition.
This script can either perform migrations from a v1 -> v2 schema format
or build a brand new script from scratch.
When building a new pipette configuration model, you will either need
to provide CSVs or use command line inputs.
If you choose CSVs you will need one CSV for the general pipette configuration
data (such as pipette model or number of channels) and one for every tip
type that this pipette model can support.
"""
main()
|
19,153 | 77c36ee7c89a1a78900aeea002e8311fbaa0218a | # MIT License
# Copyright (c) 2018 Addison Lynch
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pyTD.auth import auth_check
from pyTD.accounts.base import AccountsAndTrading
from pyTD.utils.exceptions import ResourceNotFound
class Watchlists(AccountsAndTrading):
"""
Class for retrieving and posting data via the Watchlists endpoint.
Parameters
----------
account_num: account number, optional
Account number used for the watchlist
api: pyTD.api.api object, optional
A pyTD api object. If not passed, API requestor defaults to
pyTD.api.default_api
updated_list: list(WatchlistItem), optional
list of watchlistitems, if adding a list
"""
def __init__(self, **kwargs):
self.method = kwargs.pop("method")
self.account_num = kwargs.pop("account_num", None)
self.watchlist_id = kwargs.pop("watchlist_id", None)
self.updated_list = kwargs.pop("updated_list", None)
self.opt = kwargs
api = kwargs.get("api")
super(AccountsAndTrading, self).__init__(api)
@property
def params(self):
# if this doesn't work, try sending json instead or as data param
p = {
"name": self.watchlist_id,
"watchlistItems": self.updated_list,
}
# clear out keys with empty values
pars = {k: v for k, v in p.items() if v is not None}
return pars
def set_list_from_strings(self, new_symbols, asset_type):
new_list = []
for symbol in new_symbols:
w = WatchlistItem(instrument=(symbol, asset_type))
new_list.append(w.get_dict())
self.updated_list = new_list
@property
def resource(self):
return "watchlists"
@property
def url(self):
if self.method == "POST":
return "%s%s/%s/%s" % (self._BASE_URL, self.endpoint, self.account_num, self.resource)
else:
if self.watchlist_id:
return "%s%s/%s/%s/%s" % (self._BASE_URL, self.endpoint, self.account_num, self.resource, self.watchlist_id)
elif self.account_num:
return "%s%s/%s/%s" % (self._BASE_URL, self.endpoint, self.account_num, self.resource)
else:
return "%s%s/%s" % (self._BASE_URL, self.endpoint, self.resource)
@auth_check
def execute(self):
# TODO: add in this URL accordingly
if self.method == "GET":
return self.get(url=self.url)
# if self.method == "POST":
# return self.post(url=self.url.format(self.account_num))
if self.method == "PUT":
return self.put(url=self.url)
if self.method == "DELETE":
return self.delete(url=self.url)
class WatchlistItem:
"""
Class for storing a watchlist item.
Parameters
----------
quantity: int, optional
quantity of shares, defaults to 0
average_price: float, optional
average price of purchase, defaults to 0
commission: float, optional
price of commission, defaults to 0
purchased_date: datetime object, optional
date of purchase, defaults to DateParam\"
instrument: tuple
tuple of symbol and asset type. see below for types (symbol, asset type)
assetType": "'EQUITY' or 'OPTION' or 'MUTUAL_FUND' or 'FIXED_INCOME' or 'INDEX'"
"""
def __init__(self, **kwargs):
self.quantity = kwargs.pop("quantity", 0)
self.average_price = kwargs.pop("average_price", 0)
self.commission = kwargs.pop("commission", 0)
self.purchased_date = kwargs.pop("purchased_date", "DateParam\"")
self.instrument = kwargs.pop("instrument")
@property
def quantity(self):
return self.quantity
@property
def average_price(self):
return self.average_price
@property
def commission(self):
return self.commission
@property
def purchased_date(self):
return self.purchased_date
@property
def instrument(self):
return self.instrument
def get_dict(self):
return {
"quantity": self.quantity,
"averagePrice": self.average_price,
"commission": self.commission,
"purchasedDate": self.purchased_date,
"instrument": {
"symbol": self.instrument[0],
"assetType": self.instrument[1]
}
}
|
19,154 | a3edfe115d3bbdb5769f1b3238c786cf3ab60900 | import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bookstore.settings')
import django
django.setup() |
19,155 | cb20086e0acb667058672624e82f5af78922f0ae | import numpy as np
import cv2
def find_objects(layer_list, confidence_threshold):
"""
Essa função irá escolher os bounding boxes que passarem no limite de confiança, bem como reoganizar os dados para
que fiquem mais
:param layer_list: Lista com layers de saida da rede [layer1, layer2, ... , layerN]
:param confidence_threshold: Nivel de confiaça para detecção
:return: Lista com detecções [detection][class, confidence, x, y, w, h]
"""
detected_objects = []
for layer in layer_list:
for bounding_boxes_values in layer:
# Guarda os valores de confidence para cada classe armazenados depois do quinto valor da lista
detection_scores = bounding_boxes_values[5:]
# Retorna a posição do maior valor numa lista de numeros (conficence)
class_id = np.argmax(detection_scores)
# Armazena o valor numerico do confidence
confidence = round(detection_scores[class_id], 2)
# Verifica se a classe com maior confiança é maior que nosso limite de confiança
if confidence > confidence_threshold:
x = bounding_boxes_values[0] if bounding_boxes_values[0] > 0 else 0
y = bounding_boxes_values[1] if bounding_boxes_values[1] > 0 else 0
w = bounding_boxes_values[2]
h = bounding_boxes_values[3]
detected_objects.append([class_id, confidence, x, y, w, h])
return detected_objects
def normalized_to_absolut_coordinates(detection_list, img_shape):
"""
Transforma coordenadas normalizadas para coordenadas absolutas
:param detection_list: Lista com coordenadas normalizadas [class, confidence, x, y, w, h], [class, confidence,...
:param img_shape: Lista com tamanho da imagem [widht, height, channels]
:return: Lista com coordenadas absolutas [class, confidence, x, y, w, h], [class, confidence, x, y, w, h], ...
"""
absolut_bbox = []
img_height, img_widht, img_channels = img_shape
for detection in detection_list:
w = int(detection[4] * img_widht)
h = int(detection[5] * img_height)
x = int((detection[2] * img_widht) - (w/2))
y = int((detection[3] * img_height) - (h/2))
absolut_bbox.append([detection[0], detection[1], x, y, w, h])
return absolut_bbox
def absolut_to_normalized_coordinates(detections_list, img_shape):
"""
Transforma coordenadas absolutas para coordenadas normalizadas
:param detections_list: Lista com coordenadas absolutas [class, confidence, x, y, w, h]
:param img_shape: Lista com tamanho da imagem [widht, height, channels]
:return: Lista com coordenadas normalizadas [class, confidence, x, y, w, h]
"""
# print(type(detection[4]))
normalized_bbox = []
img_height, img_widht, img_channels = img_shape
for detection in detections_list:
w = detection[4] / img_widht
h = detection[5] / img_height
x = (detection[2] / img_widht) + (w/2)
y = (detection[3] / img_height) + (h/2)
normalized_bbox.append([detection[0], detection[1], x, y, w, h])
return normalized_bbox
def draw_detections(image, detection_list, classes):
"""
Desenha os bbs e classes na imagem
:param image: Imagem
:param detection_list: Lista de detecções em VALORES ABSOLUTOS
:param classes: Classes
:return:
"""
placa = ''
nome = ""
# detection_list = sort_detections_by_x_value(detection_list)
for detection in detection_list:
if detection[2] <= 0 or detection[3] <= 0 or detection[4] <= 0 or detection[5] <= 0:
break
# print(detection)
nome += classes[detection[0]]
x = detection[2]
y = detection[3]
w = detection[4]
h = detection[5]
# placa = detection[6]
espessura = int(w * 0.02)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), espessura)
# cv2.rectangle(image, (x, y), (x + w, y - h), (0, 0, 0), -1)
# cv2.putText(image, placa, (x, y - int(h * 0.3)), cv2.FONT_HERSHEY_SIMPLEX, w * 0.006,
# (0, 255, 0), thickness=int(w*0.02))
cv2.imshow(nome, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def draw_detections_final(image, plate_detection_list, plate_classes, plates):
"""
Desenha bb da rede de placas e as classes encontradas da rede de caracteres
:param image: Imagem
:param plate_detection_list: Lista com bbs das placas encontradas
:param plate_classes: classes da rede de placa
:param plates: Lista com os caracteres das placas encontradas
:return:
"""
i = 0
for detection in plate_detection_list:
x = detection[2]
y = detection[3]
w = detection[4]
h = detection[5]
espessura = int(w * 0.02)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), espessura)
cv2.rectangle(image, (x, y), (x + w, y - h), (0, 0, 0), -1)
cv2.putText(image, plates, (x, y - int(h * 0.3)), cv2.FONT_HERSHEY_SIMPLEX, w * 0.006,
(0, 255, 0), thickness=int(w*0.02))
i += 1
# cv2.imshow('final', image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
def nms(detection_list, confidence_threshold, nms_threshold):
"""
Aplica o Non Maximun Supression num conjunto de bounding boxes
:param detection_list: Lista com detecções em VALORES ABSOLUTOS
:param confidence_threshold: Limiar de confiança
:param nms_threshold: Limiar do NMS
:return: Lista atualizada pós NMS
"""
bbox = []
confidences = []
class_ids = []
out = []
# Desmembra
for detection in detection_list:
bbox.append(detection[2:])
confidences.append(float(detection[1]))
class_ids.append(detection[0])
indices = cv2.dnn.NMSBoxes(bbox, confidences, confidence_threshold, nms_threshold)
# Seleciona os bbs que passaram pelo nms
for i in indices:
i = i[0]
box = bbox[i]
x, y, w, h = box[0], box[1], box[2], box[3]
class_id = class_ids[i]
conf = confidences[i]
out.append([class_id, round(conf, 2), x, y, w, h])
return out
def get_data_from_name(image_name):
"""
Desmebra o nome da imagem nos dados disponiveis neles
:param image_name: Nome da imagem
:return: Lista com dados separados
"""
nome = image_name.split(".")[0]
nome_recebido = list(nome)
ano = ''.join(nome_recebido[:4])
mes = ''.join(nome_recebido[4:6])
dia = ''.join(nome_recebido[6:8])
hora = ''.join(nome_recebido[8:10])
minuto = ''.join(nome_recebido[10:12])
segundo = ''.join(nome_recebido[12:14])
codigo = ''.join(nome_recebido[14:24])
certeza = ''.join(nome_recebido[24:27])
placa = ''.join(nome_recebido[27:34])
posicao = ''.join(nome_recebido[34])
classificao = ''.join(nome_recebido[35:37])
velocidade = ''.join(nome_recebido[37:40])
comprimento = ''.join(nome_recebido[40:43])
sequencial = ''.join(nome_recebido[43:])
return [ano, mes, dia, hora, minuto, segundo, codigo, certeza, placa, posicao, classificao, velocidade, comprimento,
sequencial]
def sort_detections_by_x_value(detection_list):
"""
Ordena as detecções pelo valor de X
:param detection_list: Lista de detecções
:return: Lista de detecções ordenadas
"""
array = np.array(detection_list)
# print(array)
sorted_detections_by_x_value = array[np.argsort(array[:, 2])]
# print(sorted_detections_by_x_value)
sorted_detections_by_x_value = sorted_detections_by_x_value.tolist()
for detection in sorted_detections_by_x_value:
detection[0] = int(detection[0])
detection[2] = int(detection[2])
detection[3] = int(detection[3])
detection[4] = int(detection[4])
detection[5] = int(detection[5])
return sorted_detections_by_x_value
def generate_txt_label(name, detection_list, path):
"""
Gera arquivo txt para treinamento (padrão darknet/yolo)
:param name: Nome da imagem
:param detection_list:
:param path:
:return:
"""
name = name.split(".")[0]
final_name = path + '/obj/' + name + '.txt'
for detection in detection_list:
class_id = round(detection[0])
x = round(detection[2], 5)
y = round(detection[3], 5)
w = round(detection[4], 5)
h = round(detection[5], 5)
with open(final_name, 'a') as file:
file.write(str(class_id) + " " + str(x) + " " + str(y) + " " + str(w) + " " + str(h) + "\n")
def generate_train_txt(name, path):
"""
Gera arquivo para treinamento padrao yolo
:param name: nome da imagem
:param path: diretorio onde salvar o txt
:return:
"""
with open(path + '/test.txt', 'a') as file:
file.write('/content/YOLO_metric/data/obj/' + name + '\n')
def generate_test_txt(name, path):
"""
Gera arquivo para treinamento padrao yolo
:param name: nome da imagem
:param path: diretorio onde salvar o txt
:return:
"""
with open(path + '/test.txt', 'a') as file:
file.write('data/test/' + name + '\n')
def compara_resultados_placa(nome, detection_list, classes, plate_type):
"""
Compara o resultado encontrado com o dado no nome da imagem
:param nome: Nome da imagem
:param detection_list: Lista de detecções [class, confidence, x, y, w, h]
:param classes: Nome das classes, e.g: "["flor", "abajour"]"
:return: 1 se é igual, se não 0
"""
nome_encontrado = []
nome_verdadeiro = get_data_from_name(nome)[8]
nome_verdadeiro = list(nome_verdadeiro)
# Ordena classes pelo valor de x
detection_list = sort_detections_by_x_value(detection_list)
for classe_id in detection_list:
nome_encontrado.append(classes[classe_id[0]])
# Ajusta os caracteres que se parecem
# nome_encontrado = ajusta_char_placa(detection_list, classes, plate_type)
# print(f'Nome encontrado {nome_encontrado}')
# print(f'Nome verdadeiro {nome_verdadeiro}')
return 1 if nome_encontrado == nome_verdadeiro else 0
def ajusta_char_placa(detection_list, classes_list, plate_type, adjust=True):
"""
:param detection_list:
:param classes_list:
:return:
"""
nome_encontrado = []
classes_ajustada = []
# print(classes_list)
# Ordena classes pelo valor de x
detection_list = sort_detections_by_x_value(detection_list)
# Armazena apenas as classes
for classe_id in detection_list:
nome_encontrado.append(classes_list[classe_id[0]])
# print(len(nome_encontrado))
# Ajusta os caracteres que se parecem
if adjust:
nome_encontrado = ajusta_letras(nome_encontrado)
nome_encontrado = ajusta_numeros(nome_encontrado)
# Caso a placa seja do modelo novo, é ajustado o 4º digito
if not plate_type:
nome_encontrado = ajusta_np(nome_encontrado)
nome_encontrado = list(nome_encontrado)
# print(nome_encontrado)
# Armazena endereço das classes
for letra in nome_encontrado:
classes_ajustada.append(classes_list.index(letra))
nome_encontrado = []
for classe in classes_ajustada:
nome_encontrado.append(classes_list[classe])
return nome_encontrado
def ajusta_np(letras):
if letras[4] == '0':
letras[4] = 'O'
elif letras[4] == '1':
letras[4] = 'I'
elif letras[4] == '5':
letras[4] = 'S'
elif letras[4] == '4':
letras[4] = 'A'
elif letras[4] == '6':
letras[4] = 'S'
print("Uma placa nova foi encontrada e houve correção no 4º digito")
return letras
def ajusta_letras(letras):
# Numeros que se parecem com algumas letras
for i in range(0, 3):
if letras[i] == '0':
letras[i] = 'O'
elif letras[i] == '1':
letras[i] = 'I'
elif letras[i] == '5':
letras[i] = 'S'
elif letras[i] == '4':
letras[i] = 'A'
elif letras[i] == '6':
letras[i] = 'S'
elif letras[i] == '8':
letras[i] = 'B'
elif letras[i] == '3':
letras[i] = 'C'
return letras
def ajusta_numeros(numeros):
for i in range(3, 7):
if numeros[i] == 'O':
numeros[i] = '0'
elif numeros[i] == 'I':
numeros[i] = '1'
elif numeros[i] == 'Z':
numeros[i] = '2'
elif numeros[i] == 'D':
numeros[i] = '0'
elif numeros[i] == 'B':
numeros[i] = '8'
elif numeros[i] == 'X':
numeros[i] = '8'
elif numeros[i] == 'Q':
numeros[i] = '0'
elif numeros[i] == 'A':
numeros[i] = '4'
elif numeros[i] == 'E':
numeros[i] = '3'
elif numeros[i] == 'S':
numeros[i] = '5'
return numeros
def cortar(image, detections_list):
"""
Corta as detecções encontradas nas imagens e salva numa lista de imagens
:param image: imagem completa
:param detections_list: lista de detecções
:return: lista com imagens cortadas
"""
plate_list = []
for detection in detections_list:
x = detection[2] if detection[2] > 0 else 0
y = detection[3] if detection[3] > 0 else 0
w = detection[4] if detection[4] > 0 else 0
h = detection[5] if detection[5] > 0 else 0
plate_list.append(image[y:y+h, x:x+w])
return plate_list
def get_plate_char(char_detections, classes, plate_type, adjust=True):
plate = ''
char_detections = sort_detections_by_x_value(char_detections)
char_detections = ajusta_char_placa(char_detections, classes, plate_type, adjust=adjust)
char_detections = list(char_detections)
# Verifica se os 3 primeiros digitos sao letras e os 4 ultimos sao numeros
# if plate_type:
# for i in range(0, 3):
# if char_detections[i].isdigit():
# return ''
#
# for i in range(3, 7):
# if not char_detections[i].isdigit():
# return ''
# else:
# for i in range(0, 3):
# if char_detections[i].isdigit():
# return ''
# if not char_detections[3].isdigit():
# return ''
# if char_detections[4].isdigit():
# return ''
# for i in range(5, 7):
# if not char_detections[i].isdigit():
# return ''
for char in char_detections:
plate += char
return plate
def draw_detected_plates(image, plates):
box_size = 30 * len(plates)
cv2.rectangle(image, (0, 0), (150, box_size), (0, 0, 0), -1)
posicao = 0
for plate in plates:
cv2.putText(image, plate, (0, 25+(posicao*10)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), thickness=3)
posicao += 3
def sizetest(img):
if type(img).__module__ == np.__name__:
y, x, c = img.shape
if y <= 0:
return 0
elif x <= 0:
return 0
else:
return 1
else:
return 0
|
19,156 | 794a0f6436e20b1ef5cdd7b61640f9b186eed86d | import time
import hashlib
import random
import logging
import socket
import re, uuid
import base64
import os, random, struct
import subprocess
from collections import namedtuple
from Cryptodome.Cipher import AES
from Cryptodome import Random
from Cryptodome.Hash import SHA256
from optparse import *
from _thread import *
import asn1tools
import threading
import cProfile, pstats, io
from random import randint
import json
from json import JSONEncoder
pr = cProfile.Profile()
pr.enable()
encrypt_start = time.perf_counter()
print("Printing secret key...\n")
secret_key = "secret.key"
print("Printing nbit key...\n")
nbit_key = "nbit.key"
output_secret_key = encrypting(SK, secret_key)
print("This file", output_secret_key, "is encrypted secret key\n")
output_nbit_key = encrypting(SK, nbit_key)
print("This file", output_nbit_key, "is encrypted nbit key\n")
s = open(output_secret_key, "rb")
keycontent = s.read(8192)
t = open(output_nbit_key, "rb")
nbitcontent = t.read(8192)
#Encode key in BER format
priv_key_BER = asn1_file.encode('DataKey', {'key': keycontent, 'nbit': nbitcontent})
# Send the BER encoded file to the peer
while (keycontent and nbitcontent):
self.connection.sendall(priv_key_BER)
keycontent = s.read(8192)
nbitcontent = t.read(8192)
priv_key_BER = asn1_file.encode('DataKey', {'key': keycontent, 'nbit': nbitcontent})
s.close()
t.close()
encrypt_stop = time.perf_counter()
#writing time taken to generate shared key between keygen and client
KeyExchangeTiming = open('time.txt', 'a')
encrypt_time_total = round((encrypt_stop - encrypt_start), 3)
KeyExchangeTiming.write('\nTotal Time Taken to Encryption/Decryption of keys for' + str(self.connection) + ': ')
KeyExchangeTiming.write(str(encrypt_time_total))
KeyExchangeTiming.write(str('\n============================='))
KeyExchangeTiming.close()
print('Original secret key file size: ', os.path.getsize(secret_key))
print ('Encrypted secret key file size: ', os.path.getsize(output_secret_key))
os.system("md5sum secret.key")
print('Original nbit key file size: ', os.path.getsize(nbit_key))
print ('Encrypted nbit key file size: ', os.path.getsize(output_nbit_key))
os.system("md5sum nbit.key")
|
19,157 | 5cc7de879a61ad32cfb7a4e9461dab906e602fa2 | from inspect import getcallargs
def fun():
print "subcriber__init__"
def authenticate(request=None, **credentials):
"""
If the given credentials are valid, return a User object.
"""
print request, credentials
pos = (1,2)
named = {'a': 1}
def f(a, *pos, **named):
print a
|
19,158 | 33e26742ceca75854fbec7b4e3a72c9c4dde3e19 | '''
Created on 16/10/2014
@author: Mugul
'''
import unittest
from estacionamiento import *
class Test(unittest.TestCase):
##################################################################
###################### reservaPuesto ######################
##################################################################
def testReservaPuestoDicNoInicializado(self):
self.assertEqual("El diccionario Placa-Puesto no existe",
reservaPuesto(estacionamiento,tiempo,"123123",None),"Diccionario no existe" )
def testReservaPuestoTiemposIncorrectos(self):
tiempo = (-1 , 2)
self.assertEqual("El tiempo reservado debe ser positivo y menor que 24",
reservaPuesto(estacionamiento,tiempo,"123123",placaPuesto),"Tiempo incorrecto" )
def testReservaMatrizIncorrecta(self):
matriz = [
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
]
self.assertEqual("Matriz de dimension invalida, debe ser Nx%d"%bloquesDeTiempo,
reservaPuesto(matriz,tiempo,"123123",placaPuesto),"Dimension de matriz incorrecta" )
def testReservaPuestoPlacaNoInicializada(self):
self.assertEqual("Placa debe ser un string",
reservaPuesto(estacionamiento,tiempo,None,placaPuesto),"Placa no esta creada" )
def testReservaPuestoMatrizOcupada(self):
matriz = [
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
]
self.assertEqual("No hay puestos desocupados",
reservaPuesto(matriz,tiempo,"123123",placaPuesto),"Estacionamiento ocupado" )
def testReservaPuestoMatrizReservadaDesocupado(self):
matriz = [
[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2],
[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2],
[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2],
[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2],
]
self.assertEqual("No hay puestos desocupados",
reservaPuesto(matriz,tiempo,"123123",placaPuesto),"Estacionamiento reservado desocupado" )
def testReservaPuestoMatrizReservadaOcupada(self):
matriz = [
[3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3],
[3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3],
[3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3],
[3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3],
[3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3],
]
self.assertEqual("No hay puestos desocupados",
reservaPuesto(matriz,tiempo,"123123",placaPuesto),"Estacionamiento reservado ocupado" )
#######################################
# En esta seccion se prueba la funcion#
# de tiempoACobrar. #
#######################################
#Test de funcionamiento basico
#Se verifica en matriz sencilla
def testBasico(self):
for i in range(3,6):
estacionamiento[2][i] = 1
placaPuesto["placa"]=2
salida = tiempoACobrar("placa",3,6,placaPuesto)
print (salida)
#Fallo al primer intento, return en posicion equivocada
#Corrio bien al segundo intento
#Conductor Francisco Martinez
#Test de chequeo con matriz mas llena
def testBasico2(self):
for i in range(3,7):
estacionamiento[2][i] = 1
placaPuesto["placa"]=2
salida = tiempoACobrar("placa",3,6,placaPuesto)
print (salida)
#Corrio bien al primer intento
#Conductor Francisco Martinez
#Test de chequeo con multiples valores para la salida
def testBasico3(self):
for i in range(3,7):
estacionamiento[2][i] = 1
for i in range(7,9):
estacionamiento[2][i] = 2
for i in range(9,10):
estacionamiento[2][i] = 3
placaPuesto["placa"]=2
salida = tiempoACobrar("placa",3,10,placaPuesto)
print (salida)
#Corrio bien al primer intento
#Conductor Francisco Martinez
#Test para probar caso con placa erronea
def testPlacaErronea(self):
for i in range(3,7):
estacionamiento[2][i] = 1
for i in range(7,9):
estacionamiento[2][i] = 2
for i in range(9,10):
estacionamiento[2][i] = 3
placaPuesto["placa"]=2
salida = tiempoACobrar("placa2",3,10,placaPuesto)
print (salida)
#Fallo al primer intento, corregido con un try
#Funciono bien al siguiente intento
#Conductor Francisco Martinez
#Probar darle un diccionario vacio
def testDiccionarioVacio(self):
for i in range(3,7):
estacionamiento[2][i] = 1
for i in range(7,9):
estacionamiento[2][i] = 2
for i in range(9,10):
estacionamiento[2][i] = 3
placaPuesto["placa"]=2
salida = tiempoACobrar("placa",3,10,None)
print (salida)
#Funciono bien al primer intento
#Conductor Francisco Martinez
#######################################
# En esta seccion se prueba la funcion#
# de desocupar puesto. #
#######################################
#Se probara enviar un Diccionario vacio
def testDV(self) :
for i in range(3,7):
estacionamiento[2][i] = 1
for i in range(7,9):
estacionamiento[2][i] = 2
for i in range(9,10):
estacionamiento[2][i] = 3
placaPuesto["placa"]=2
desocuparPuesto("placa",10,None)
#Funciono al primer intento
#Caso Basico en el cual debe liberar el puesto del
#diccionario, y limpiar la matriz
def testBasicoDesocupar(self) :
for i in range(3,7):
estacionamiento[2][i] = 1
for i in range(10,24):
estacionamiento[2][i] = 1
placaPuesto["placa"]=2
desocuparPuesto("placa",10,placaPuesto)
#Lo hizo bien al primer intento.
#Caso de malicia en el cual se libera desde el final
#diccionario, y limpiar la matriz
def testBasicoDesocupar2(self) :
for i in range(3,7):
estacionamiento[2][i] = 1
for i in range(10,24):
estacionamiento[2][i] = 1
placaPuesto["placa"]=2
desocuparPuesto("placa",24,placaPuesto)
#Lo hizo bien al primer intento.
#Caso de malicia en el cual se libera desde el inicio
#diccionario, y limpiar la matriz
def testBasicoDesocupar3(self) :
estacionamiento[2][0]=1
for i in range(3,7):
estacionamiento[2][i] = 1
for i in range(10,24):
estacionamiento[2][i] = 1
placaPuesto["placa"]=2
print (estacionamiento)
desocuparPuesto("placa",0,placaPuesto)
print (estacionamiento)
#Lo hizo bien al primer intento.
#Funciono bien al primer intento.
def testCuatroFunciones(self):
tiempo = (1,2)
placaPuesto["placa"] = 2
reservaPuesto(estacionamiento,tiempo,"placa",placaPuesto)
tiempoACobrar("placa",1,2,placaPuesto)
desocuparPuesto("placa",2,placaPuesto)
IntentarEstacionar("placa",1)
print ("No hubo errores.")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
19,159 | ece5100c1706fa6f5e71fd7609a619a2f5f5b9bd | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 1 18:35:16 2018
@author: Cameron
This program builds attributes for each input into the model.
All functions are called at the bottom.
"""
import mysqlConnection as md
#import zipcodeDistance as zd
import pandas as pd
import simplejson
import urllib.request
def getSeaPortData(engine, zipcode, zipList):
query = "SELECT * from dddm.seaports_final where ZIPCODE in ("
query += "'" + zipcode + "',"
for zip in zipList:
query += "'" + zip + "',"
query = query[:-1]
query += ")"
data = pd.read_sql(query, engine)
num = len(data.index)
if num == 0:
return 1
elif num < 2:
return 2
return 3
def getLandPricesData(engine, zipcode, zipList):
query = "SELECT * from dddm.land_prices_final where zip in ("
query += "'" + zipcode + "',"
for zip in zipList:
query += "'" + zip + "',"
query = query[:-1]
query += ")"
data = pd.read_sql(query, engine)
#Account for missing data by return -1
if len(data.index) == 0:
return -1
avgCostIndex = data['structure_cost_norm'].mean()
if avgCostIndex < 0.33:
return 3
elif avgCostIndex < 0.67:
return 2
return 1
def getOilReservesData(engine, zipcode, zipList):
query = "SELECT * from dddm.oil_reserve_final where zip in ("
query += "'" + zipcode + "',"
for zip in zipList:
query += "'" + zip + "',"
query = query[:-1]
query += ")"
data = pd.read_sql(query, engine)
oilReserves = data['year16_norm'].max()
if oilReserves == 0:
return 1
elif oilReserves < .5:
return 2
return 3
def getExistingPlants(engine, zipcode, zipList):
query = "SELECT * from dddm.plant_locations where zip_code in ("
query += "'" + zipcode + "',"
for zip in zipList:
query += "'" + zip + "',"
query = query[:-1]
query += ")"
data = pd.read_sql(query, engine)
num = len(data.index)
if num == 0:
return 3
elif num < 3:
return 2
return 1
def getDisasterData(engine, zipcode, zipList):
query = "SELECT * from dddm.disaster_data_final where zip in ("
query += "'" + zipcode + "',"
for zip in zipList:
query += "'" + zip + "',"
query = query[:-1]
query += ")"
data = pd.read_sql(query, engine)
#Account for missing data by return -1
if len(data.index) == 0:
return -1
fireMentions = data['NumFireReferences_norm'].mean()
floodMentions = data['NumFloodReferences_norm'].mean()
hurricaneMentions = data['NumHurricaneReferences_norm'].mean()
overallMean = (fireMentions + floodMentions + hurricaneMentions) / 3
if overallMean < .2:
return 3
elif overallMean < .5:
return 2
return 1
def getRailroadData(engine, zipcode, zipList):
query = "SELECT * from dddm.railroad_data_final where zip in ("
query += "'" + zipcode + "',"
for zip in zipList:
query += "'" + zip + "',"
query = query[:-1]
query += ")"
data = pd.read_sql(query, engine)
#Account for missing data by return -1
if len(data.index) == 0:
return -1
avgFreightTons = data['Tons_norm'].mean()
if avgFreightTons == 0:
return 1
elif avgFreightTons < .3:
return 2
return 3
def getPopulationDensityData(engine, zipcode, zipList):
query = "SELECT * from dddm.population_density_final where zip in ("
query += "'" + zipcode + "',"
for zip in zipList:
query += "'" + zip + "',"
query = query[:-1]
query += ")"
data = pd.read_sql(query, engine)
#Account for missing data by return 3 because no natural disasters
if len(data.index) == 0:
return -1
density = data['density_norm'].mean()
if .000000001 < density < .01:
return 3
elif density < .03:
return 2
return 1
def fetch_earthquake_data(zipcode):
engine = md.connect()
#fetch latitude and longitude for zipcode
zipcode = str(int(zipcode))
#print(zipcode)
query1 = "SELECT * FROM dddm.zip_lookup where zip = '" + zipcode + "' OR zip='0" + zipcode +"' OR zip = '00" + zipcode +"' LIMIT 0,1"
zip_data = pd.read_sql(query1, engine)
coord = 1.0
lat_range1 = str(int(zip_data['lat']) + coord)
lat_range2 = str(int(zip_data['lat']) - coord)
lng_range1 = str(int(zip_data['lng']) + coord)
lng_range2 = str(int(zip_data['lng']) - coord)
query2 = "SELECT * from dddm.earthquake_data where latitude BETWEEN '" + lat_range2 + "' and '" + lat_range1 + "' AND longitude BETWEEN '" \
+ lng_range2 + "' and '" + lng_range1 + "'"
earthquake_data = pd.read_sql(query2, engine)
return earthquake_data
#fetch rules
#Qualitative Data
def fetch_rules():
engine = md.connect()
query = "SELECT * FROM dddm.rules where rule like '%%General%%'"
rules_data = pd.read_sql(query, engine)
return rules_data['rule'].iloc[0]
#fetch water data
def fetch_water_data(zipcode):
engine = md.connect()
zipcode = str(int(zipcode))
#print(zipcode)
query1 = "SELECT * FROM dddm.zip_lookup where zip = '" + zipcode + "' OR zip='0" + zipcode +"' OR zip = '00" + zipcode +"' LIMIT 0,1"
zip_data = pd.read_sql(query1, engine)
coord = 0.5
lat_range1 = str(int(zip_data['lat']) + coord)
lat_range2 = str(int(zip_data['lat']) - coord)
lng_range1 = str(int(zip_data['lng']) + coord)
lng_range2 = str(int(zip_data['lng']) - coord)
query2 = "SELECT * FROM dddm.water_locations where LatitudeMeasure BETWEEN '" \
+ str(lat_range2) + "' and '" + str(lat_range1) + "' AND LongitudeMeasure BETWEEN '" \
+ str(lng_range2) + "' and '" + str(lng_range1) + "'"
water_data = pd.read_sql(query2, engine)
return water_data
#fetch elevation data from google API
def fetch_elevation_data(engine, zipcode):
#engine = md.connect()
zipcode = str(int(zipcode))
#print(zipcode)
query = "SELECT * FROM dddm.zip_lookup where zip = '" + zipcode + "' OR zip='0" + zipcode +"' OR zip = '00" + zipcode +"'"
zip_data = pd.read_sql(query,engine)
if len(zip_data.index) == 0:
return -1
#print(zip_data)
latitude = str(zip_data.iloc[0]['lat'])
longitude = str(zip_data.iloc[0]['lng'])
base_url = "https://maps.googleapis.com/maps/api/elevation/json?locations="
key_url = "&key=AIzaSyAbFTeYx8kS0d7jH20xcm05QEUCDcdhL3U"
location = latitude + "," + longitude
api_url = base_url + location +key_url
json_output = simplejson.load(urllib.request.urlopen(api_url))
result = float(json_output["results"][0]["elevation"])
if(-1 < result <= 1029.00):
return 3
elif(1029.00 < result <= 2058):
return 2
return 1
#fetch weather data
def fetch_weather_data(zipcode):
engine = md.connect()
zipcode = str(int(zipcode))
#print(zipcode)
query1 = "SELECT * FROM dddm.zip_lookup where zip = '" + zipcode + "' OR zip='0" + zipcode +"' OR zip = '00" + zipcode +"'"
zip_data = pd.read_sql(query1, engine)
state = str(zip_data['state_id'])
query2 = "SELECT * FROM dddm.weather_observations where State = '" + state + "'"
weather_data = pd.read_sql(query2, engine)
return weather_data
|
19,160 | 560876c95977ad8c73a5af99511b2caa29a0a3ae | import sys
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from keras.models import load_model
Data = np.load('./extracted_data.npz')
X, y = Data['X'], Data['y']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y)
Model = load_model( sys.argv[1] )
y_pred = Model.predict_classes( X_test )
y_test = y_test.reshape( (y_test.shape[0],) )
labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
cm = confusion_matrix(y_test, y_pred, labels=[0,1,2,3,4,5,6])
sums = np.sum(cm, axis=1, keepdims=True)
percentages = cm / sums.astype(float) * 100
annotations = np.empty_like(percentages).astype(str)
for row in range(7):
cls_sum = sums[row]
for col in range(7):
perc = percentages[row, col]
cnt = cm[row, col]
annotations[row, col] = '%.2f%%\n%d/%d' % (perc, cnt, cls_sum)
cm = pd.DataFrame(percentages, index=labels, columns=labels)
cm.index.name = 'True'
cm.columns.name = 'Predicted'
fig, axarr = plt.subplots( figsize=(16,16) )
axarr.set(aspect='equal')
sns.heatmap(cm, annot=annotations, fmt='', cmap='YlGnBu')
plt.savefig('./Confusion_Matrix.png')
plt.show()
|
19,161 | d099dedd72c900aab0ee25870be5e3ae8281c084 | import unittest
from crawler import PttSpider
class TestCase(unittest.TestCase):
def test_01(self):
input_data = 'https://i.imgur.com/oqxkFn0.jpg'
result = ['https://i.imgur.com/oqxkFn0.jpg']
self.assertEqual(PttSpider.image_url(input_data), result)
def test_02(self):
input_data = 'https://i.imgur.com/oqxkFn0'
result = ['https://i.imgur.com/oqxkFn0.jpg']
self.assertEqual(PttSpider.image_url(input_data), result)
def test_03(self):
input_data = 'https://imgur.com/a/WKVB1Jc'
result = []
self.assertEqual(PttSpider.image_url(input_data), result)
def test_04(self):
input_data = 'http://imgur.com/GEQEdqy'
result = ['http://imgur.com/GEQEdqy.jpg']
self.assertEqual(PttSpider.image_url(input_data), result)
def test_05(self):
input_data = 'https://imgur.com/gallery/EuEjONQ'
result = []
self.assertEqual(PttSpider.image_url(input_data), result)
def test_06(self):
input_data = 'http://img.yaplog.jp/img/18/pc/l/p/-/lp-n-rena/1/1680_large.png'
result = ['http://img.yaplog.jp/img/18/pc/l/p/-/lp-n-rena/1/1680_large.png']
self.assertEqual(PttSpider.image_url(input_data), result)
if __name__ == '__main__':
unittest.main()
|
19,162 | f10d16ee2c1e23bc32508bb7025f0bbdc9de606a | import numpy as np
from sklearn import svm
import csv
DIM = 5000
def read_q2_file(file_name, rows):
X = np.zeros([rows, DIM])
Y = np.zeros(rows)
i = 0
with open(file_name) as csvfile:
reader = csv.reader(csvfile, delimiter=' ')
for row in reader:
Y[i] = float(row[0])
for elem in row[1: -1]:
a, b = elem.split(':')
X[i, int(a) - 1] = float(b)
i += 1
return (X, Y)
def svmTrain(dataTrain, labelTrain, cost, kernel, gamma, degree):
clf = svm.SVC(C=cost, kernel=kernel, gamma=gamma, degree=degree, coef0=1.0)
svc = clf.fit(dataTrain, labelTrain)
totalSV = svc.support_.size
return (clf, totalSV)
def svmPredict(data, lable, svmModel):
predictions = svmModel.predict(data)
accuracy = np.sum(predictions == lable) / lable.size
return accuracy
def print_result(totalSV, trainAccuracy, testAccuracy):
print('Number of SV =', totalSV, ', trainAccuracy =', trainAccuracy, ', testAccuracy =', testAccuracy,
', E_in =', 1 - trainAccuracy, ', E_out =', 1 - testAccuracy)
def run_svm_and_write_output(q_number, dataTrain, labelTrain, dataTest, labelTest, cost, kernel, gamma, degree, write=True):
# train your svm
# (n.b., svmTrain, svmPredict are not previously defined;
# you will have to supply code to implement them)
svmModel, totalSV = svmTrain(dataTrain, labelTrain, cost, kernel, gamma, degree)
# test on the training data
trainAccuracy = svmPredict(dataTrain, labelTrain, svmModel)
# test on your test data
testAccuracy = svmPredict(dataTest, labelTest, svmModel)
if write:
### The output of your program should be written in a file as follows.
# for question 'i', write the output in 'problem-i.txt' file (e.g., 'problem-1a.txt')
fo = open('problem-' + q_number + '.txt', 'w')
# report your results in the file
fo.write("Kernel: "+ str(kernel)+"\n")
fo.write("Cost: "+ str(cost)+ "\n")
fo.write("Number of Support Vectors: "+ str(totalSV)+"\n")
fo.write("Train Accuracy: "+ str(trainAccuracy)+"\n")
fo.write("Test Accuracy: " + str(testAccuracy)+"\n")
print_result(totalSV, trainAccuracy, testAccuracy)
q2_train_data, q2_train_lable = read_q2_file('hw2-2-train.txt', 6000)
q2_test_data, q2_test_lable = read_q2_file('hw2-2-test.txt', 1000)
print('Problem 2d')
print('Linear Kernel:')
run_svm_and_write_output('2d', q2_train_data, q2_train_lable, q2_test_data, q2_test_lable, 1, 'linear', 'auto', 1)
print('\nProblem 2e')
print('RBF Kernel:')
run_svm_and_write_output('2e1', q2_train_data, q2_train_lable, q2_test_data, q2_test_lable, 1, 'rbf', 0.001, 1, False)
print('\nPolynomial Kernel:')
run_svm_and_write_output('2e2', q2_train_data, q2_train_lable, q2_test_data, q2_test_lable, 1, 'poly', 1.0, 2, False)
|
19,163 | 2ea6e3a0a58c0f6efe29aa1d9dd9a7c9d292a6bf | #COMPILAR CON PYTHON3
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import pylab as pl
import random
import numpy as np
ns = range(10,151)
ms = [16,13,16,14,19,17,20,23,26,26,29,32,36,41,47,54,61,66,73,76,92,96,97,107,119,125,140,152,156,173,188,198,189,198,221,232,225,240,243,261,269,285,278,305,309,309,353,351,359,385,374,394,381,409,408,432,439,501,488,505,524,504,548,517,560,565,572,637,602,639,709,676,689,721,769,761,806,773,853,848,852,898,898,927,970,940,984,1068,1023,1052,1052,1184,1124,1139,1163,1136,1186,1316,1232,1326,1312,1327,1320,1371,1364,1401,1451,1456,1512,1523,1595,1561,1670,1661,1689,1695,1651,1701,1743,1776,1765,1768,1826,1990,1862,1942,1997,1977,2082,1974,2053,2133,2209,2249,2213,2286,2280,2314,2376,2385,2349]
tiemposBack = [213,241,418,291,469,357,414,572,553,543,727,660,826,808,945,1066,1157,1488,2120,1614,3239,2541,2099,2218,3069,3299,3169,5318,3944,5645,6286,6700,4397,6123,13198,14005,6569,11171,7651,11536,16514,27069,11287,50632,30574,13319,98706,41426,31932,120000,37958,69491,15261,42751,17316,23469,19051,428964,77721,36678,135026,26754,250442,27314,140853,59714,69960,982110,33125,94914,4036460,101702,115022,126389,1090173,181326,2178118,91785,4417749,198256,169516,664396,182221,242741,2437316,131018,219349,19368858,431457,756575,112408,80395134,1386416,357497,1511250,204294,256085,176888537,263838,52264513,6010987,570319,381175,616383,347223,3191559,14384585,2134336,25959702,6904122,103960427,3657351,457247917,116646200,137954270,31667945,393085,1341823,1959324,10893166,1756187,1091931,3391935,8899401676,2480380,274594975,557400229,67552974,2278956048,4352325,70970387,153335734,568328984,576244826,17493055,146855390,9932382,3288737,36444973,36343602,611180]
tiemposBackCompleto = [184,299,601,1263,2590,5325,10570,21837,45354,93132,186772,388921,805033,1659986,3299862,7059715,14890105,30437126,61138537,128968885,274630008,543795692,1146350329,2381324873]
complejidad = [random.normalvariate((2**n)*(n**2),50) / 10**3 for n in ns[0:24]]
'''
tuplas = []
for i in range(len(ms)):
par = (ms[i], tiemposBack[i])
tuplas.append(par)
tuplas.sort()
msOrd = []
tiemposBackOrd = []
for i in range(len(tuplas)):
msOrd.append(tuplas[i][0])
tiemposBackOrd.append(tuplas[i][1])
print(msOrd)
'''
plt.clf()
df = pd.DataFrame({'Backtracking': tiemposBackCompleto, 'n': ns[0:24], 'g(n) = 2^n * n^2 * n^-3': complejidad})
df.plot(x='n', logy=True)
plt.ylabel('Tiempo (microsegundos)')
plt.show()
'''
plt.clf()
df3 = pd.DataFrame()
#df3 = pd.DataFrame({'Backtracking': tiemposBack, 'n': ns,'Complejidad (2^n)*(n^2)': complejidad })
df3['n'] = ns
df3['Backtracking'] = tiemposBack
df3['Complejidad (2^n)*(n^2)'] = complejidad
df3.plot(x='n', logy=True)
plt.ylabel('Tiempo (microsegundos)')
correlation = df3.corr()
#print(correlation)
sns.plt.show()
''' |
19,164 | e1a0675ce2775ee0c4f71266f02917786d44838c |
from lexicon import scan
user_input = scan("the bear run south")
def parse_skip():
user_input[:] = [item for item in user_input if 'stop' not in item]
user_input[:] = [item for item in user_input if 'ERROR' not in item]
print"Finished\n"
print user_input
print"+++\n"
|
19,165 | e891970c88d014bef19a8fd0c1159b518e10ed65 | """
Load data from the new Well Registry to NGWMN
"""
import cx_Oracle
import psycopg2
def _manipulate_values(y, is_timestamp):
"""
Make various translations to make sure
the data is Oracle friendly.
"""
# remove leading and trailing spaces
try:
z = y.strip()
except AttributeError:
z = y
# deal with datetimes
if is_timestamp:
return f"to_timestamp('{z}', 'YYYY-MM-DD\"T\"HH24:MI:SS.ff6\"Z\"')"
# deal with everything else
if z is None:
return 'NULL'
elif z is False:
return '0'
elif z is True:
return '1'
elif len(str(z)) == 0:
return "''"
else:
if isinstance(z, str):
# Escape single quotes
z = z.translate(str.maketrans({"'": "''"}))
return f"'{z}'"
TIME_COLUMNS = ['INSERT_DATE', 'UPDATE_DATE']
def _generate_upsert_sql(mon_loc):
"""
Generate SQL to insert/update for Oracle.
"""
mon_loc_db = [(k, _manipulate_values(v, k in TIME_COLUMNS)) for k, v in mon_loc.items()]
all_columns = ','.join(col for (col, _) in mon_loc_db)
all_values = ','.join(value for (_, value) in mon_loc_db)
update_query = ','.join(f"{k}={v}" for (k, v) in mon_loc_db if k not in ['AGENCY_CD', 'SITE_NO'])
statement = (
f"MERGE INTO GW_DATA_PORTAL.WELL_REGISTRY_STG a "
f"USING (SELECT '{mon_loc['AGENCY_CD']}' AGENCY_CD, '{mon_loc['SITE_NO']}' "
f"SITE_NO FROM DUAL) b ON (a.AGENCY_CD = b.AGENCY_CD AND a.SITE_NO = b.SITE_NO) "
f"WHEN MATCHED THEN UPDATE SET {update_query} "
f"WHEN NOT MATCHED THEN INSERT ({all_columns}) VALUES ({all_values})"
)
return statement
def _generate_upsert_pgsql(mon_loc):
"""
Generate SQL to insert/update for PostGIS
"""
mon_loc_db = [(k, _manipulate_values(v, k in TIME_COLUMNS)) for k, v in mon_loc.items()]
all_columns = '"' + '","'.join(col for (col, _) in mon_loc_db if col not in ['INSERT_USER_ID', 'UPDATE_USER_ID', 'REVIEW_FLAG'])
all_columns += '","GEOM"'
all_values = ','.join(value for (key, value) in mon_loc_db if key not in ['INSERT_USER_ID', 'UPDATE_USER_ID', 'REVIEW_FLAG'])
geom_col = f" ST_SetSRID(ST_MakePoint({mon_loc['DEC_LONG_VA']},{mon_loc['DEC_LAT_VA']}),4269) "
all_values += "," + geom_col
update_query = ','.join(f'"{k}"={v}' for (k, v) in mon_loc_db if k not in ['AGENCY_CD', 'SITE_NO', 'INSERT_USER_ID', 'UPDATE_USER_ID', 'REVIEW_FLAG'])
update_query += ', "GEOM"=' + geom_col
statement = (
f'INSERT INTO "GW_DATA_PORTAL"."WELL_REGISTRY_MAIN" ({all_columns}) VALUES ({all_values}) '
f'ON CONFLICT("AGENCY_CD", "SITE_NO") DO UPDATE SET {update_query}'
)
return statement
class NoDb:
"""
Do Nothing place holder no database available.
"""
def __enter__(self):
"""
Do Nothing place holder.
"""
pass
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Do Nothing place holder.
"""
pass
def make_oracle(host, port, database, user, password):
"""
Connect to Oracle database.
"""
if host is None:
return NoDb()
connect_str = f'{host}:{port}/{database}'
return cx_Oracle.connect(user, password, connect_str, encoding='UTF-8')
def make_postgres(host, port, database, user, password):
"""
Connect to Postgres database.
"""
if host is None:
return NoDb()
return psycopg2.connect(host=host, port=port, database=database, user=user, password=password)
def load_monitoring_location(connect, mon_loc):
"""
Connect to the database and run the upsert SQL into Oracle.
"""
cursor = connect.cursor()
cursor.execute(_generate_upsert_sql(mon_loc))
connect.commit()
def load_monitoring_location_pg(connect, mon_loc):
"""
Connect to the database and run the upsert SQL into PostGIS.
"""
cursor = connect.cursor()
cursor.execute(_generate_upsert_pgsql(mon_loc))
connect.commit()
def refresh_well_registry_mv(connect):
"""
Refresh the well_registry_mv materialized view
"""
cursor = connect.cursor()
cursor.execute("begin dbms_mview.refresh('GW_DATA_PORTAL.WELL_REGISTRY_MV'); end;")
def refresh_well_registry_pg(connect):
"""
Refresh the well_registry_mv table in postgres
"""
cursor = connect.cursor()
cursor.execute(DELETE_MV)
cursor.execute(INSERT_MV)
connect.commit()
DELETE_MV = 'delete from "GW_DATA_PORTAL"."WELL_REGISTRY_MV";'
INSERT_MV = 'insert into "GW_DATA_PORTAL"."WELL_REGISTRY_MV" ( \
"AGENCY_CD", \
"AGENCY_NM", \
"AGENCY_MED", \
"SITE_NO", \
"SITE_NAME", \
"DISPLAY_FLAG", \
"DEC_LAT_VA", \
"DEC_LONG_VA", \
"HORZ_DATUM", \
"HORZ_METHOD", \
"HORZ_ACY", \
"ALT_VA", \
"ALT_UNITS", \
"ALT_UNITS_NM", \
"ALT_DATUM_CD", \
"ALT_METHOD", \
"ALT_ACY", \
"WELL_DEPTH", \
"WELL_DEPTH_UNITS", \
"WELL_DEPTH_UNITS_NM", \
"NAT_AQUIFER_CD", \
"NAT_AQFR_DESC", \
"COUNTRY_CD", \
"COUNTRY_NM", \
"STATE_CD", \
"STATE_NM", \
"COUNTY_CD", \
"COUNTY_NM", \
"LOCAL_AQUIFER_CD", \
"LOCAL_AQUIFER_NAME", \
"SITE_TYPE", \
"AQFR_CHAR", \
"QW_SYS_NAME", \
"QW_SN_FLAG", \
"QW_SN_DESC", \
"QW_BASELINE_FLAG", \
"QW_BASELINE_DESC", \
"QW_WELL_CHARS", \
"QW_WELL_CHARS_DESC", \
"QW_WELL_TYPE", \
"QW_WELL_TYPE_DESC", \
"QW_WELL_PURPOSE", \
"QW_WELL_PURPOSE_DESC", \
"QW_WELL_PURPOSE_NOTES", \
"WL_SYS_NAME", \
"WL_SN_FLAG", \
"WL_SN_DESC", \
"WL_BASELINE_FLAG", \
"WL_BASELINE_DESC", \
"WL_WELL_CHARS", \
"WL_WELL_CHARS_DESC", \
"WL_WELL_TYPE", \
"WL_WELL_TYPE_DESC", \
"WL_WELL_PURPOSE", \
"WL_WELL_PURPOSE_DESC", \
"WL_WELL_PURPOSE_NOTES", \
"GEOM", \
"INSERT_DATE", \
"UPDATE_DATE", \
"DATA_PROVIDER", \
"WL_DATA_PROVIDER", \
"QW_DATA_PROVIDER", \
"LITH_DATA_PROVIDER", \
"CONST_DATA_PROVIDER", \
"WL_DATA_FLAG", \
"QW_DATA_FLAG", \
"LOG_DATA_FLAG", \
"LINK" \
) \
select "AGENCY_CD", \
"AGENCY_NM", \
"AGENCY_MED", \
"SITE_NO", \
"SITE_NAME", \
"DISPLAY_FLAG", \
"DEC_LAT_VA", \
"DEC_LONG_VA", \
"HORZ_DATUM", \
"HORZ_METHOD", \
"HORZ_ACY", \
"ALT_VA", \
"ALT_UNITS", \
"ALT_UNITS_NM", \
"ALT_DATUM_CD", \
"ALT_METHOD", \
"ALT_ACY", \
"WELL_DEPTH", \
"WELL_DEPTH_UNITS", \
"WELL_DEPTH_UNITS_NM", \
"NAT_AQUIFER_CD", \
"NAT_AQFR_DESC", \
"COUNTRY_CD", \
"COUNTRY_NM", \
"STATE_CD", \
"STATE_NM", \
"COUNTY_CD", \
"COUNTY_NM", \
"LOCAL_AQUIFER_CD", \
"LOCAL_AQUIFER_NAME", \
"SITE_TYPE", \
"AQFR_CHAR", \
"QW_SYS_NAME", \
"QW_SN_FLAG", \
"QW_SN_DESC", \
"QW_BASELINE_FLAG", \
"QW_BASELINE_DESC", \
"QW_WELL_CHARS", \
"QW_WELL_CHARS_DESC", \
"QW_WELL_TYPE", \
"QW_WELL_TYPE_DESC", \
"QW_WELL_PURPOSE", \
"QW_WELL_PURPOSE_DESC", \
"QW_WELL_PURPOSE_NOTES", \
"WL_SYS_NAME", \
"WL_SN_FLAG", \
"WL_SN_DESC", \
"WL_BASELINE_FLAG", \
"WL_BASELINE_DESC", \
"WL_WELL_CHARS", \
"WL_WELL_CHARS_DESC", \
"WL_WELL_TYPE", \
"WL_WELL_TYPE_DESC", \
"WL_WELL_PURPOSE", \
"WL_WELL_PURPOSE_DESC", \
"WL_WELL_PURPOSE_NOTES", \
"GEOM", \
"INSERT_DATE", \
"UPDATE_DATE", \
"DATA_PROVIDER", \
"WL_DATA_PROVIDER", \
"QW_DATA_PROVIDER", \
"LITH_DATA_PROVIDER", \
"CONST_DATA_PROVIDER", \
"WL_DATA_FLAG", \
"QW_DATA_FLAG", \
"LOG_DATA_FLAG", \
"LINK" \
from "GW_DATA_PORTAL"."WELL_REGISTRY";'
|
19,166 | fe9684ca65831497778ff913a8c3ef74fab12d72 |
import numpy as np
from problem import BinaryOptimizationProblem
import optimize
class FourPeaksProblem(BinaryOptimizationProblem):
"""
Four peaks problem - boolean function with a maximum at T+1
1's followed by all 0's, or all 1's preceding T+1 0's.
Parameters
----------
T : int
Controls the number of leading/trailing 1's/0's for the
global optimum
n : int
Number of entries in vector
Methods
-------
fitness :
The fitness method takes a population of instances of the
problem domain as input and invokes the cost function on
each element to return a sorted collection of the input
population and corresponding fitness scores.
cost : (abstract)
The cost function should take an instance of the problem
domain as an input and return a floating point value.
neighborhood : (abstract)
The neighborhood function should take an instance of the
problem domain as an input and return an iterable sequence
of neighboring vectors.
Examples
--------
TODO: Add examples
"""
def __init__(self, T, n):
super(FourPeaksProblem, self).__init__(n)
self.T = T
def cost(self, sample):
"""
Calculate the cost of the input sample
Parameters
----------
sample : ndarray
Input data
Returns
-------
: float
N/A
"""
assert len(sample) == len(self.domain)
tail = self.n - len(np.trim_zeros(sample, 'b'))
head = self.n - len(np.trim_zeros(np.logical_not(sample), 'f'))
Rxt = self.n if tail > self.T and head > self.T else 0
return -float(max(head, tail) + Rxt)
def neighborhood(self, sample):
"""
Enumerate the neighbors of the current sample
Parameters
----------
sample : ndarray
Input data
Yields
-------
new_sample : ndarray
N/A
"""
assert len(sample) == len(self.domain)
for idx in range(len(sample)):
new_sample = np.array(sample)
new_sample[idx] = int(np.logical_not(sample[idx]))
yield new_sample
def fitness(self):
raise NotImplementedError
if __name__ == "__main__":
problem = FourPeaksProblem(3, 30)
solution = optimize.mimic(problem, niter=10)
print solution, problem.cost(solution)
|
19,167 | cfd709f809bd45584bea6ee34ed53200e5a8f7e9 | from matplotlib import cbook
import matplotlib.pyplot as plt
import matplotlib.text as text
from matplotlib.widgets import Button
import argparse
import numpy as np
import h5py
import matplotlib
import sys, time
from lpd.gui.readout_config import LpdReadoutConfig
class imagePlot():
def __init__(self):
# Ask the user for data file, image, train..
self.args = parseArgs()
(self.numRows, self.numCols) = (32, 128)
# Set X and Y ticks to match data size
(xStart, xStop, xStep) = (16, 128, 16)
(yStart, yStop, yStep) = (8, 32, 8)
(self.xlist, self.ylist) = ([], [])
# Generate list of xticks to label the x axis
for i in range(xStart, xStop, xStep):
self.xlist.append(i)
# Generate yticks for the y-axis
for i in range(yStart, yStop, yStep):
self.ylist.append(i)
(imgOffset, timeStamp, run_number, trainNumber, image_number, imageData) = self.obtainImageWithInfo()
# Create the figure and title
self.fig = plt.figure(1)
self.ax = self.fig.add_subplot(111)
self.mainTitle = plt.title("")
# Previous, next buttons inspired by example: matplotlib.org/1.3.1/examples/widgets/buttons.html
# Previous, Next ASIC module buttons..
decModule = plt.axes([0.85, 0.115, 0.07, 0.045])
incModule = plt.axes([0.92, 0.115, 0.07, 0.045]) # [left, bottom, width, height]
incrementModule = Button(incModule, '+M')
incrementModule.on_clicked(self.nextModule)
decrementModule = Button(decModule, '-M')
decrementModule.on_clicked(self.prevModule)
# Previous, Next image buttons..
decImage = plt.axes([0.85, 0.06, 0.07, 0.045])
incImage = plt.axes([0.92, 0.06, 0.07, 0.045]) # [left, bottom, width, height]
incrementImg = Button(incImage, '+I')
incrementImg.on_clicked(self.nextImage)
decrementImg = Button(decImage, '-I')
decrementImg.on_clicked(self.prevImage)
# Previous, Next train buttons..
decTrain = plt.axes([0.85, 0.005, 0.07, 0.045])
incTrain = plt.axes([0.92, 0.005, 0.07, 0.045]) # [left, bottom, width, height]
incrementTrain = Button(incTrain, '+T')
incrementTrain.on_clicked(self.nextTrain)
decrementTrain = Button(decTrain, '-T')
decrementTrain.on_clicked(self.prevTrain)
# Determine row/col coordinates according to selected ASIC module
(rowStart, colStart) = self.asicStartingRowColumn(self.args.module)
self.img = self.ax.imshow(imageData[rowStart:rowStart+self.numRows, colStart:colStart+self.numCols], interpolation='nearest', vmin='0', vmax='4095')
self.ax.set_xticks(self.xlist)
self.ax.set_yticks(self.ylist)
dateStr = time.strftime('%d/%m/%y %H:%M:%S', time.localtime(timeStamp))
titleText = 'Run %d Train %d Image %d Module %d : %s' % (run_number, trainNumber, image_number, self.args.module, dateStr)
self.mainTitle.set_text(titleText)
# Add a colour bar
axc, kw = matplotlib.colorbar.make_axes(self.ax)
cb = matplotlib.colorbar.Colorbar(axc, self.img)
self.img.colorbar = cb
self.artist = self.fig.get_children()
DataCursor(self.artist[1], imageData[rowStart:rowStart+self.numRows, colStart:colStart+self.numCols])
plt.show()
def asicStartingRowColumn(self, module):
''' Determining upper left corner's row/col coordinates according to selected ASIC module '''
(row, column) = (-1, -1)
if module == 0: (row, column) = (0, 128) # ASIC module #1
elif module == 1: (row, column) = (32, 128) # ASIC module #2
elif module == 2: (row, column) = (64, 128) # ASIC module #3
elif module == 3: (row, column) = (96, 128) # ASIC module #4
elif module == 4: (row, column) = (128, 128) # ASIC module #5
elif module == 5: (row, column) = (160, 128) # ASIC module #6
elif module == 6: (row, column) = (192, 128) # ASIC module #7
elif module == 7: (row, column) = (224, 128) # ASIC module #8
elif module == 15:(row, column) = (0, 0) # ASIC module #16
elif module == 14:(row, column) = (32, 0) # ASIC module #15
elif module == 13:(row, column) = (64, 0) # ASIC module #14
elif module == 12:(row, column) = (96, 0) # ASIC module #13
elif module == 11:(row, column) = (128, 0) # ASIC module #12
elif module == 10:(row, column) = (160, 0) # ASIC module #11
elif module == 9: (row, column) = (192, 0) # ASIC module #10
elif module == 8: (row, column) = (224, 0) # ASIC module #9
return (row, column)
def nextModule(self, event):
self.args.module += 1
# Is current module number now greater than 15? (max: 16 ASIC/supermodule)
if self.args.module > 15:
self.args.module = 0
# Yes, is the current image number = maximum number of images per train?
if self.args.image == self.maxImageNumber:
self.args.image = 0
# Yes. Is current train number = max train number?
if self.args.train == self.maxTrainNumber:
# Train maximum reached, start over with first image in first train
self.args.train = 0
else:
# Image number exceeded but not train number; Increment train
self.args.train += 1
else:
# No, need only increment current image
self.args.image += 1
(imgOffset, timeStamp, runNumber, trainNumber, imageNumber, imageData) = self.obtainImageWithInfo()
dateStr = time.strftime('%d/%m/%y %H:%M:%S', time.localtime(timeStamp))
titleText = 'Run %d Train %d Image %d Module %d : %s' % (runNumber, trainNumber, imageNumber, self.args.module, dateStr)
self.mainTitle.set_text(titleText)
# Determine row/col coordinates according to selected ASIC module
(rowStart, colStart) = self.asicStartingRowColumn(self.args.module)
self.img.set_data(imageData[rowStart:rowStart+self.numRows, colStart:colStart+self.numCols])
DataCursor(self.artist[1], imageData[rowStart:rowStart+self.numRows, colStart:colStart+self.numCols])
plt.draw()
def prevModule(self, event):
self.args.module -= 1
# Is current module number now sub zero?
if self.args.module < 0:
self.args.module = 15
# Yes, is the current image number = 0
if self.args.image == 0:
self.args.image = self.maxImageNumber
# Yes. Is current train number sub zero?
if self.args.train == 0:
# Train minimum reached, start over with last image in last train
self.args.train = self.maxTrainNumber
else:
# Image number exceeded but not train number; Decrement train
self.args.train -= 1
else:
# No, need only decrement current image
self.args.image -= 1
(imgOffset, timeStamp, runNumber, trainNumber, imageNumber, imageData) = self.obtainImageWithInfo()
dateStr = time.strftime('%d/%m/%y %H:%M:%S', time.localtime(timeStamp))
titleText = 'Run %d Train %d Image %d Module %d : %s' % (runNumber, trainNumber, imageNumber, self.args.module, dateStr)
self.mainTitle.set_text(titleText)
# Determine row/col coordinates according to selected ASIC module
(rowStart, colStart) = self.asicStartingRowColumn(self.args.module)
self.img.set_data(imageData[rowStart:rowStart+self.numRows, colStart:colStart+self.numCols])
DataCursor(self.artist[1], imageData[rowStart:rowStart+self.numRows, colStart:colStart+self.numCols])
plt.draw()
def nextImage(self, event):
self.args.image += 1
# Is current image number now greater than images per train?
if self.args.image > self.maxImageNumber:
# Yes. Is current train number = max train number?
if self.args.train == self.maxTrainNumber:
# Both image and train maximum reached, start over with first image in first train
self.args.train = 0
self.args.image = 0
else:
# Image number exceeded but not train number; Increment train and reset image
self.args.train += 1
self.args.image = 0
(imgOffset, timeStamp, runNumber, trainNumber, imageNumber, imageData) = self.obtainImageWithInfo()
dateStr = time.strftime('%d/%m/%y %H:%M:%S', time.localtime(timeStamp))
titleText = 'Run %d Train %d Image %d Module %d : %s' % (runNumber, trainNumber, imageNumber, self.args.module, dateStr)
self.mainTitle.set_text(titleText)
# Determine row/col coordinates according to selected ASIC module
(rowStart, colStart) = self.asicStartingRowColumn(self.args.module)
thisImage = imageData[rowStart:rowStart+self.numRows, colStart:colStart+self.numCols]
self.img.set_data(thisImage)
DataCursor(self.artist[1], thisImage)
plt.draw()
# Calculate image average value
print >> sys.stderr, "Train: %d Image: %d. image average: %f" % (trainNumber, imageNumber, np.average(thisImage))
def prevImage(self, event):
self.args.image -= 1
# Is current image number now sub zero?
if self.args.image < 0:
# Yes. Is current train number sub zero?
if self.args.train == 0:
# Both image and train minimum reached, start over with last image in last train
self.args.train = self.maxTrainNumber
self.args.image = self.maxImageNumber
else:
# Image number exceeded but not train number; Decrement train and set image to highest in train
self.args.train -= 1
self.args.image = self.maxImageNumber
(imgOffset, timeStamp, runNumber, trainNumber, imageNumber, imageData) = self.obtainImageWithInfo()
dateStr = time.strftime('%d/%m/%y %H:%M:%S', time.localtime(timeStamp))
titleText = 'Run %d Train %d Image %d Module %d : %s' % (runNumber, trainNumber, imageNumber, self.args.module, dateStr)
self.mainTitle.set_text(titleText)
# Determine row/col coordinates according to selected ASIC module
(rowStart, colStart) = self.asicStartingRowColumn(self.args.module)
thisImage = imageData[rowStart:rowStart+self.numRows, colStart:colStart+self.numCols]
self.img.set_data(thisImage)
DataCursor(self.artist[1], thisImage)
plt.draw()
# Calculate image average
print >> sys.stderr, "Train: %d Image: %d. image average: %f" % (trainNumber, imageNumber, np.average(thisImage))
def nextTrain(self, event):
self.args.train += 1
# Is current train number now greater than number of trains?
if self.args.train > self.maxTrainNumber:
# Yes. Start with first train
self.args.train = 0
(imgOffset, timeStamp, runNumber, trainNumber, imageNumber, imageData) = self.obtainImageWithInfo()
dateStr = time.strftime('%d/%m/%y %H:%M:%S', time.localtime(timeStamp))
titleText = 'Run %d Train %d Image %d Module %d : %s' % (runNumber, trainNumber, imageNumber, self.args.module, dateStr)
self.mainTitle.set_text(titleText)
# Determine row/col coordinates according to selected ASIC module
(rowStart, colStart) = self.asicStartingRowColumn(self.args.module)
self.img.set_data(imageData[rowStart:rowStart+self.numRows, colStart:colStart+self.numCols])
DataCursor(self.artist[1], imageData[rowStart:rowStart+self.numRows, colStart:colStart+self.numCols])
plt.draw()
def prevTrain(self, event):
self.args.train -= 1
# Is current image number now sub zero?
if self.args.train < 0:
# Yes. Start over with last train
self.args.train = self.maxTrainNumber
(imgOffset, timeStamp, runNumber, trainNumber, imageNumber, imageData) = self.obtainImageWithInfo()
dateStr = time.strftime('%d/%m/%y %H:%M:%S', time.localtime(timeStamp))
titleText = 'Run %d Train %d Image %d Module %d : %s' % (runNumber, trainNumber, imageNumber, self.args.module, dateStr)
self.mainTitle.set_text(titleText)
# Determine row/col coordinates according to selected ASIC module
(rowStart, colStart) = self.asicStartingRowColumn(self.args.module)
self.img.set_data(imageData[rowStart:rowStart+self.numRows, colStart:colStart+self.numCols])
DataCursor(self.artist[1], imageData[rowStart:rowStart+self.numRows, colStart:colStart+self.numCols])
plt.draw()
def obtainImageWithInfo(self):
''' Open file specified by parser reading specified image'''
with h5py.File(self.args.file_name, 'r') as hdfFile:
# Read in the train, image counter and timestamp arrays
trainNumber = hdfFile['/lpd/data/trainNumber'][...]
imageNumber = hdfFile['/lpd/data/imageNumber'][...]
timeStamp = hdfFile['/lpd/data/timeStamp'][...]
# Get max train and image number form arrays
self.maxTrainNumber = np.amax(trainNumber)
self.maxImageNumber = np.amax(imageNumber)
# Read in the metadata
meta = hdfFile['/lpd/metadata']
# Parse the readout configuration XML blob
readoutConfig = LpdReadoutConfig(meta['readoutParamFile'][0])
readoutParams = {}
for (param, val) in readoutConfig.parameters():
readoutParams[param] = val
# print readoutParams
# Get number of trains from metadata and check array against argument
numTrains = meta.attrs['numTrains']
if numTrains != self.maxTrainNumber + 1:
print "WARNING: mismatch in number of trains between metadata and file"
if self.args.train > self.maxTrainNumber:
print "ERROR: train number specified (%d) is bigger than maximum in data (%d), quitting" \
% (self.args.train, self.maxTrainNumber)
sys.exit(1)
# Check image number requested in argument is in range too
if self.args.image > self.maxImageNumber:
print "ERROR: images number specified (%d) is bigger than maximum in data (%d), quitting" \
% (self.args.image, self.maxImageNumber)
sys.exit(1)
# Calculate image offset into array and range check (should be OK)
imgOffset = (self.args.train * (self.maxImageNumber + 1)) + self.args.image
if imgOffset > imageNumber.size:
print "ERROR: calculated image offset (%d) is larger than images stored in data (%d), quitting" \
% (imgOffset, imageNumber.size)
sys.exit(1)
# Read in the image array
image = hdfFile['/lpd/data/image']
imageData = image[imgOffset,:,:] # Only read in the specified image
# Mask off or select gain range from image data
if self.args.gain == 0:
imageData[:] = imageData[:] & 0xFFF
# Invert image if specified
if self.args.invert != None:
imageData[:] = 4095 - imageData[:]
return (imgOffset, timeStamp[imgOffset], meta.attrs['runNumber'], trainNumber[imgOffset], imageNumber[imgOffset], imageData)
def parseArgs():
parser = argparse.ArgumentParser(description="Read and Plot one ASIC from a specific LPD image in an HDF file, into a figure where clicking inside the image will produce pixel coordinates and value.")
parser.add_argument("file_name", help='Name of HDF5 data file to open')
parser.add_argument("-t", "--train", type=int, default=0,
help="Select train number to plot")
parser.add_argument("-i", "--image", type=int, default=0,
help="Select image number with train to plot")
parser.add_argument("--invert", help="Invert image data")
parser.add_argument("-g", "--gain", type=int, default=0, choices=[0, 1, 10, 100],
help="Select gain range from image (0=mask off gain bits)")
parser.add_argument("-m", "--module", type=int, default=0,
help="ASIC module to select (SuperModule: 0=top right, 16=top left)")
args = parser.parse_args()
# Check ASIC module number within the range
if not (-1 < args.module < 16):
print "ERROR: Specified ASIC module number (%d) is outside the valid range of 0-15, quitting" \
% ( args.module)
sys.exit(1)
return args
# ------- Modified example from stack overflow: --------- #
class DataCursor(object):
''' Modified from example at: http://stackoverflow.com/a/13306887/2903608 '''
"""A simple data cursor widget that displays the x,y location of a
matplotlib artist when it is selected."""
def __init__(self, artists, imageData, tolerance=5, offsets=(-20, 20),
#template='x: %0.2f\ny: %0.2f\nPxl: %i',
template='Column: %i\nRow: %i\nPixel: %i', display_all=False):
"""Create the data cursor and connect it to the relevant figure.
"artists" is the matplotlib artist or sequence of artists that will be
selected.
"imageData" is LPD image data for a super module.
"tolerance" is the radius (in points) that the mouse click must be
within to select the artist.
"offsets" is a tuple of (x,y) offsets in points from the selected
point to the displayed annotation box
"template" is the format string to be used. Note: For compatibility
with older versions of python, this uses the old-style (%)
formatting specification.
"display_all" controls whether more than one annotation box will
be shown if there are multiple axes. Only one will be shown
per-axis, regardless.
"""
self.imageData = imageData
self.template = template
self.offsets = offsets
self.display_all = display_all
if not cbook.iterable(artists):
artists = [artists]
self.artists = artists
self.axes = tuple(set(art.axes for art in self.artists))
self.figures = tuple(set(ax.figure for ax in self.axes))
self.annotations = {}
for ax in self.axes:
self.annotations[ax] = self.annotate(ax)
for artist in self.artists:
artist.set_picker(tolerance)
for fig in self.figures:
fig.canvas.mpl_connect('pick_event', self)
def annotate(self, ax):
"""Draws and hides the annotation box for the given axis "ax"."""
annotation = ax.annotate(self.template, xy=(0, 0), ha='right',
xytext=self.offsets, textcoords='offset points', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')
)
annotation.set_visible(False)
return annotation
def __call__(self, event):
"""Intended to be called through "mpl_connect"."""
# Rather than trying to interpolate, just display the clicked coords
# This will only be called if it's within "tolerance", anyway.
x, y = event.mouseevent.xdata, event.mouseevent.ydata
annotation = self.annotations[event.artist.axes]
if x is not None:
if not self.display_all:
# Hide any other annotation boxes...
for ann in self.annotations.values():
ann.set_visible(False)
# Update the annotation in the current axis..
annotation.xy = x, y
# Is borders enabled, update x, y were necessary
(x_offset, y_offset, pixelValue) = (0, 0, -1)
pixelValue = self.imageData[y, x]
annotation.set_text(self.template % (x+x_offset, y+y_offset, pixelValue))
annotation.set_visible(True)
event.canvas.draw()
def borderValueSelected(self, x, y):
""" Check whether the users clicked on one of the borders """
# print "=========================="
# Ensure we are using integer values
(x, y) = int(x), int(y)
bBorderValueSelected = False
if x in range(128, 132):
# print "x within the border!"
bBorderValueSelected = True
# else:
# print "Not xborder, x: ", x, " range: ", range(128, 132)
# Note: Border adds 4 pixels between adjacent ASICs
yBorders = [32, 33, 34, 35, 68, 69, 70, 71, 104, 105, 106, 107, 140, 141, 142, 143, 176, 177, 178, 179, 212, 213, 214, 215, 248, 249, 250, 251]
if y in yBorders:
# print "y within a Border!"
bBorderValueSelected = True
# else:
# print "Not yborder, y: ", y, " range: ", yBorders
return bBorderValueSelected
def adjustCoordByBorders(self, x, y):
""" Subtract the width of the border(s) from x/y values where applicable """
(x_offset, y_offset) = (0, 0)
if (127 < x < (255+4)):
x_offset -= 4
if (31 < y < (63+4*1)):
y_offset -= 4
elif ( (63+4*1) < y < (95+4*2) ):
y_offset -= 4*2
elif ( (95+4*2) < y < (127+4*3) ):
y_offset -= 4*3
elif ( (127+4*3) < y < (159+4*4) ):
y_offset -= 4*4
elif ( (159+4*4) < y < (191+4*5) ):
y_offset -= 4*5
elif ( (191+4*5) < y < (223+4*6) ):
y_offset -= 4*6
elif ( (223+4*6) < y < (255+4*7) ):
y_offset -= 4*7
return x_offset, y_offset
if __name__ == '__main__':
imagePlot()
|
19,168 | 37a24abc412f536cfbbb5daeea6dd6b6fac1817f | class Udacian:
def __init__(self, name, city, enrollment, nanodegree, status):
self.name = name
self.city = city
self.enrollment = enrollment
self.nanodegree = nanodegree
self.status = status
def getInformation(self):
print(self.name + " " + self.city + " " + self.enrollment + " " + self.nanodegree + " " + self.status)
test = Udacian("Mohammed Bokhari", "Jeddah", "Student", "Full-Stack", "--")
test.getInformation()
|
19,169 | d423eb573925072fdd199f2176ee62b11c016059 | n = int(input("Колличество участников? "))
members_list = []
k = int(input("Введите колличество человек в группе? "))
start = 1
print(n % k, "n / k")
while n % k != 0:
k = int(input("Введите число кратное к? "))
for i_sportsmens in range(n // k):
members_list.append(list(range(start, start + k)))
start += k
print(members_list) |
19,170 | 4fe68b95a22809dd9c80a7c4bc4c3cb106cd385a | from flask.ext.wtf import Form
from wtforms import TextField, BooleanField, SelectMultipleField
from wtforms.validators import Required
from wtforms.ext.sqlalchemy.fields import QuerySelectMultipleField
from wtforms import widgets
from models import Author, Book
class LoginForm(Form):
openid = TextField('openid', validators=[Required()])
remember_me = BooleanField('remember_me', default=False)
class BookForm(Form):
title = TextField('title', validators=[Required()])
authors = QuerySelectMultipleField('authors', query_factory=lambda:
Author.query.order_by('name').all(),
get_label=lambda a: a.name,
widget=widgets.ListWidget(prefix_label=True),
option_widget=widgets.CheckboxInput()
)
class AuthorForm(Form):
name = TextField('title', validators=[Required()])
books = QuerySelectMultipleField('books', query_factory=lambda:
Book.query.order_by('title').all(),
get_label=lambda b: b.title,
widget=widgets.ListWidget(prefix_label=True),
option_widget=widgets.CheckboxInput()
)
|
19,171 | dce6768959558a88353106546ded6cfcc6aeb792 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import collections
import json
import logging
import multiprocessing
import subprocess
import sys
import time
import concurrent.futures as futures
import hglib
from hgmolib import find_hg_repos
from kafka import (
KafkaConsumer,
OffsetAndMetadata,
TopicPartition,
)
from .config import Config
from .consumer import (
value_deserializer,
process_hg_sync,
handle_message_main,
MAX_BUFFER_SIZE,
)
REPOS_DIR = '/repo/hg/mozilla'
formatter = logging.Formatter('%(asctime)s %(name)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
logger = logging.getLogger('vcsreplicator.bootstrap')
main_file_handler = logging.FileHandler('/var/log/vcsrbootstrap/bootstrap.log')
main_file_handler.setFormatter(formatter)
main_stdout_handler = logging.StreamHandler(sys.stdout)
main_stdout_handler.setFormatter(formatter)
logger.addHandler(main_file_handler)
logger.addHandler(main_stdout_handler)
logger.setLevel(logging.INFO)
# Send vcsreplicator consumer logs to a separate file
consumer_logger = logging.getLogger('vcsreplicator.consumer')
consumer_handler = logging.FileHandler('/var/log/vcsrbootstrap/consumer.log')
consumer_logger.addHandler(consumer_handler)
# Send kafka-python logs to a separate file
kafka_logger = logging.getLogger('kafka')
kafka_handler = logging.FileHandler('/var/log/vcsrbootstrap/kafka.log')
kafka_handler.setLevel(logging.DEBUG)
kafka_logger.addHandler(kafka_handler)
def clone_repo(config, path, requirements, hgrc, heads):
"""Wraps process_hg_sync to provide logging"""
logger.info('syncing repo: %s' % path)
try:
return process_hg_sync(config, path, requirements, hgrc, heads, create=True)
finally:
logger.info('exiting sync for: %s' % path)
def seqmap(message_handler, events):
'''Process events using the message handler in the order they
arrived in the queue
'''
for config, payload in events:
message_handler(config, payload)
def hgssh():
'''hgssh component of the vcsreplicator bootstrap procedure.'''
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('config', help='Path to config file')
parser.add_argument('hg', help='Path to hg executable for use in bootstrap process')
parser.add_argument('--workers', help='Number of concurrent workers to use for publishing messages', type=int,
default=multiprocessing.cpu_count())
parser.add_argument('--output', help='Output file path for hgssh JSON')
args = parser.parse_args()
config = Config(filename=args.config)
topic = config.get('replicationproducer', 'topic')
# Create consumer to gather partition offsets
consumer_config = {
# set this so offsets are committed to Zookeeper
'api_version': (0, 8, 1),
'bootstrap_servers': [
host.strip()
for host in config.get('replicationproducer', 'hosts').split(',')
],
'enable_auto_commit': False, # We don't actually commit but this is just for good measure
}
consumer = KafkaConsumer(**consumer_config)
# This call populates topic metadata for all topics in the cluster.
# Needed as missing topic metadata can cause the below call to retrieve
# partition information to fail.
consumer.topics()
partitions = consumer.partitions_for_topic(topic)
if not partitions:
logger.critical('could not get partitions for %s' % topic)
sys.exit(1)
# Gather the initial offsets
topicpartitions = [
TopicPartition(topic, partition_number)
for partition_number in sorted(partitions)
]
offsets_start = consumer.end_offsets(topicpartitions)
logger.info('gathered initial Kafka offsets')
# Mapping of `replicatesync` future to corresponding repo name
replicatesync_futures = {}
with futures.ThreadPoolExecutor(args.workers) as e:
# Create a future which makes a `replicatesync` call
# for each repo on hg.mo
for repo in find_hg_repos(REPOS_DIR):
# Create a future to call `replicatesync` for this repo
replicatesync_args = [
args.hg,
'-R', repo,
'replicatesync',
'--bootstrap',
]
replicatesync_futures.update({
e.submit(subprocess.check_output, replicatesync_args): repo
})
logger.info('calling `replicatesync --bootstrap` on %s' % repo)
# Execute the futures and raise an Exception on fail
for future in futures.as_completed(replicatesync_futures):
repo = replicatesync_futures[future]
exc = future.exception()
if exc:
logger.error('error occurred calling `replicatesync --bootstrap` on %s: %s' % (repo, exc))
raise Exception('error triggering replication of Mercurial repo %s: %s' %
(repo, exc))
logger.info('called `replicatesync --bootstrap` on %s successfully' % repo)
# Gather the final offsets
offsets_end = consumer.end_offsets(topicpartitions)
logger.info('gathered final Kafka offsets')
# Create map of partition numbers to (start, end) offset tuples
offsets_combined = {
int(topicpartition.partition): (offsets_start[topicpartition], offsets_end[topicpartition])
for topicpartition in topicpartitions
}
# Create JSON for processing in ansible and print to stdout
# Convert repo paths into their wire representations
output = {
'offsets': offsets_combined,
'repositories': sorted([
config.get_replication_path_rewrite(repo)
for repo in replicatesync_futures.values()
]),
}
print(json.dumps(output))
logger.info('hgssh bootstrap process complete!')
# Send output to a file if requested
if args.output:
logger.info('writing output to %s' % args.output)
with open(args.output, 'w') as f:
json.dump(output, f)
def hgweb():
'''hgweb component of the vcsreplicator bootstrap procedure. Takes a
vcsreplicator config path on the CLI and takes a JSON data structure
on stdin'''
import argparse
# Parse CLI args
parser = argparse.ArgumentParser()
parser.add_argument('config', help='Path of config file to load')
parser.add_argument('input', help='JSON data input (output from the hgssh bootstrap procedure) file path')
parser.add_argument('--workers', help='Number of concurrent workers to use for performing clones', type=int,
default=multiprocessing.cpu_count())
args = parser.parse_args()
logger.info('reading hgssh JSON document')
with open(args.input, 'r') as f:
hgssh_data = json.loads(f.read())
logger.info('JSON document read')
# Convert the JSON keys to integers
hgssh_data['offsets'] = {
int(k): v
for k, v in hgssh_data['offsets'].items()
}
config = Config(filename=args.config)
consumer_config = {
# set this so offsets are committed to Zookeeper
'api_version': (0, 8, 1),
'bootstrap_servers': [
host.strip()
for host in config.get('replicationproducer', 'hosts').split(',')
],
'client_id': config.get('consumer', 'client_id'),
'enable_auto_commit': False,
'group_id': config.get('consumer', 'group'),
'max_partition_fetch_bytes': MAX_BUFFER_SIZE,
'value_deserializer': value_deserializer,
}
topic = config.get('consumer', 'topic')
topicpartitions = [
TopicPartition(topic, partition)
for partition, (start_offset, end_offset)
in sorted(hgssh_data['offsets'].items())
# there is no need to do an assignment if the length of the
# bootstrap message range is 0
if start_offset != end_offset
]
consumer = KafkaConsumer(**consumer_config)
# This call populates topic metadata for all topics in the cluster.
consumer.topics()
outputdata = collections.defaultdict(list)
# We will remove repos from this set as we replicate them
# Once this is an empty set we are done
repositories_to_clone = set()
for repo in hgssh_data['repositories']:
filterresult = config.filter(repo)
if filterresult.passes_filter:
repositories_to_clone.add(repo)
else:
outputdata[repo].append('filtered by rule %s' % filterresult.rule)
extra_messages = collections.defaultdict(collections.deque) # maps repo names to extra processing messages
clone_futures_repo_mapping = {} # maps cloning futures to repo names
extra_messages_futures_repo_mapping = {} # maps extra messages futures to repo names
# Overwrite default hglib path so handle_message_main and it's derivatives
# use the correct virtualenv
hglib.HGPATH = config.get('programs', 'hg')
# Maps partitions to the list of messages within the bootstrap range
aggregate_messages_by_topicpartition = {
tp.partition: []
for tp in topicpartitions
}
# Gather all the Kafka messages within the bootstrap range for each partition
for topicpartition in topicpartitions:
start_offset, end_offset = hgssh_data['offsets'][topicpartition.partition]
end_offset -= 1
# Assign the consumer to the next partition and move to the start offset
logger.info('assigning the consumer to partition %s' % topicpartition.partition)
consumer.assign([topicpartition])
logger.info('seeking the consumer to offset %s' % start_offset)
consumer.seek(topicpartition, start_offset)
consumer.commit(offsets={
topicpartition: OffsetAndMetadata(start_offset, '')
})
logger.info('partition %s of topic %s moved to offset %s' %
(topicpartition.partition, topicpartition.topic, start_offset))
# Get all the messages we need to process from kafka
for message in consumer:
# Check if the message we are processing is within the range of accepted messages
# If we are in the range, add this message to the list of messages on this partition
# If we are at the end of the range, break from the loop and move on to the next partition
if message.offset <= end_offset:
aggregate_messages_by_topicpartition[message.partition].append(message)
logger.info('message on partition %s, offset %s has been collected' % (message.partition, message.offset))
consumer.commit(offsets={
TopicPartition(topic, message.partition): OffsetAndMetadata(message.offset + 1, ''),
})
if message.offset >= end_offset:
logger.info('finished retrieving messages on partition %s' % message.partition)
break
logger.info('finished retrieving messages from Kafka')
# Process the previously collected messages
with futures.ThreadPoolExecutor(args.workers) as e:
for partition, messages in sorted(aggregate_messages_by_topicpartition.items()):
logger.info('processing messages for partition %s' % partition)
for message in messages:
payload = message.value
# Ignore heartbeat messages
if payload['name'] == 'heartbeat-1':
continue
if payload['path'] in repositories_to_clone:
# If we have not yet replicated the repository for this message,
# of the repo sync message is not tagged with the bootstrap flag,
# move on to the next message. The assumed upcoming hg-repo-sync-2
# message will clone the data represented in this message anyways.
if payload['name'] != 'hg-repo-sync-2' or not payload['bootstrap']:
continue
logger.info('scheduled clone for %s' % payload['path'])
# Schedule the repo sync
clone_future = e.submit(clone_repo, config, payload['path'],
payload['requirements'], payload['hgrc'],
payload['heads'])
# Here we register the future against its repo name
clone_futures_repo_mapping[clone_future] = payload['path']
# Remove the repo from the set of repos
# which have not been scheduled to sync
repositories_to_clone.remove(payload['path'])
elif payload['path'] not in outputdata:
# If the repo is not in the list of repositories to clone,
# and the repo is not in the outputdata object (ie hasn't
# errored out, by being filtered or otherwise),
# then we have already scheduled the repo sync and we will
# need to process this message once the sync completes.
extra_messages[payload['path']].append((config, payload))
logger.info('extra messages found for %s: %s total' %
(payload['path'], len(extra_messages[payload['path']]))
)
if repositories_to_clone:
logger.error('did not receive expected sync messages for %s' % repositories_to_clone)
# Add errors to audit output
for repo in repositories_to_clone:
outputdata[repo].append('did not receive sync message')
# Process clones
remaining_clones = len(clone_futures_repo_mapping)
for completed_future in futures.as_completed(clone_futures_repo_mapping):
repo = clone_futures_repo_mapping[completed_future]
exc = completed_future.exception()
if exc:
message = 'error triggering replication of Mercurial repo %s: %s' % (repo, str(exc))
logger.error(message)
# Add error to audit output
outputdata[repo].append(message)
else:
logger.info('%s successfully cloned' % repo)
remaining_clones -= 1
logger.info('%s repositories remaining' % remaining_clones)
# Schedule extra message processing if necessary
if repo in extra_messages:
logger.info('scheduling extra processing for %s' % repo)
future = e.submit(seqmap, handle_message_main, extra_messages[repo])
extra_messages_futures_repo_mapping[future] = repo
# Process extra messages
total_message_batches = len(extra_messages_futures_repo_mapping)
for completed_future in futures.as_completed(extra_messages_futures_repo_mapping):
repo = extra_messages_futures_repo_mapping[completed_future]
exc = completed_future.exception()
if exc:
message = 'error processing extra messages for %s: %s' % (repo, str(exc))
logger.error(message)
# Add error to audit output
outputdata[repo].append(message)
else:
logger.info('extra processing for %s completed successfully' % repo)
total_message_batches -= 1
logger.info('%s batches remaining' % total_message_batches)
logger.info('%s bootstrap process complete' % config.get('consumer', 'group'))
# If anything broke, dump the errors and set exit code 1
if outputdata:
with open('/repo/hg/hgweb_bootstrap_out.json', 'w') as f:
f.write(json.dumps(outputdata))
return 1
|
19,172 | 2e7c99226098aa7ca761cd431f924ccfb6128fdf | # -*- coding: utf-8 -*-
import sys
import os
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
extensions = [
'sphinx.ext.autodoc',
]
project = 'Geno-Pheno-Envo'
copyright = '2021, University of Arizona'
author = 'Tyson L Swetnam'
version = '0.0.1'
release = '0.0.1'
language = None
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst']
common_static_path = os.path.join(os.path.dirname(__file__), 'static')
templates_path = ['_templates']
html_static_path = ['_static', common_static_path]
exclude_patterns = ['_build']
master_doc = 'index'
pygments_style = 'sphinx'
todo_include_todos = False
html_theme = 'sphinx_rtd_theme'
edit_on_github_project = 'genophenoenvo/genophenoenvo.github.io'
edit_on_github_branch = 'master'
htmlhelp_basename = 'GenoPhenoEnvo_Documentation'
latex_elements = {}
latex_documents = [
(master_doc, 'GenoPhenoEnvoDocumentation.tex', 'GenoPhenoEnvo Documentation',
'GenoPhenoEnvo', 'manual'),
]
man_pages = [
(master_doc, 'GenoPhenoEnvo Documentation', 'GenoPhenoEnvo Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, 'GenoPhenoEnvo Documentation', 'GenoPhenoEnvo Documentation',
author, 'Tyson Swetnam', 'CyVerse',
'Miscellaneous'),
]
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
epub_exclude_files = ['search.html']
def setup(app):
app.add_config_value(
'recommonmark_config',
{
'enable_auto_toc_tree': True,
'enable_eval_rst': True,
'auto_toc_tree_section': 'Contents',
},
True
)
app.add_transform(AutoStructify)
app.add_stylesheet('cyverse.css')
app.add_javascript('jquery.tablesorter.min.js')
app.add_javascript('cyverse.js')
|
19,173 | 5bddbfb22f336b23d0487e11a69ddd5e1d50671b | """
A grammar to parse from primary FITS headers.
This grammar will return exactly one row per source.
"""
#c Copyright 2008-2016, the GAVO project
#c
#c This program is free software, covered by the GNU GPL. See the
#c COPYING file in the source distribution.
import gzip
import re
from gavo import base
from gavo.grammars.common import Grammar, RowIterator, MapKeys
from gavo.utils import fitstools
from gavo.utils import pyfits
class FITSProdIterator(RowIterator):
def _iterRows(self):
if self.grammar.qnd:
return self._parseFast()
else:
return self._parseSlow()
def _hackBotchedCard(self, card, res):
"""tries to make *anything* from a card pyfits doesn't want to parse.
In reality, I'm just trying to cope with oversized keywords.
"""
mat = re.match(r"([^\s=]*)\s*=\s*([^/]+)", card.cardimage)
if mat:
res[mat.group(1)] = mat.group(2).strip()
else: # Card beyond recognition, ignore
pass
def _buildDictFromHeader(self, header):
res = {}
for card in header.ascard:
try:
res[card.key.replace("-", "_")] = card.value
except (ValueError, pyfits.VerifyError):
self._hackBotchedCard(card, res)
res["header_"] = header
if self.grammar.hdusField:
res[self.grammar.hdusField] = fitstools.openFits(self.sourceToken)
return self.grammar.mapKeys.doMap(res)
def _parseFast(self):
fName = self.sourceToken
if fName.endswith(".gz"):
f = gzip.open(fName)
else:
f = open(fName)
header = fitstools.readPrimaryHeaderQuick(f,
maxHeaderBlocks=self.grammar.maxHeaderBlocks)
f.close()
yield self._buildDictFromHeader(header)
def _parseSlow(self):
fName = self.sourceToken
hdus = fitstools.openFits(fName)
hduIndex = int(self.grammar.hdu)
# handle compressed FITSes transparently in hdu=0 case
# TODO: similar code in utils.fitstools -- is there a smart
# refactoring for this?
if (hduIndex==0
and len(hdus)>1
and isinstance(hdus[1], pyfits.CompImageHDU)):
hduIndex = 1
header = hdus[hduIndex].header
hdus.close()
yield self._buildDictFromHeader(header)
def getLocator(self):
return self.sourceToken
class FITSProdGrammar(Grammar):
r"""A grammar that returns FITS-headers as dictionaries.
This is the grammar you want when one FITS file corresponds to one
row in the destination table.
The keywords of the grammar record are the cards in the primary
header (or some other hdu using the same-named attribute). "-" in
keywords is replaced with an underscore for easier @-referencing.
You can use a mapKeys element to effect further name cosmetics.
This grammar should handle compressed FITS images transparently if
set qnd="False". This means that you will essentially get the readers
from the second extension for those even if you left hdu="0".
The original header is preserved as the value of the header\_ key. This
is mainly intended for use WCS use, as in ``pywcs.WCS(@header_)``.
If you have more complex structures in your FITS files, you can get access
to the pyfits HDU using the hdusField attribute. With
``hdusField="_H"``, you could say things like ``@_H[1].data[10][0]``
to get the first data item in the tenth row in the second HDU.
"""
name_ = "fitsProdGrammar"
_qnd = base.BooleanAttribute("qnd", default=True, description=
"Use a hack to read the FITS header more quickly. This only"
" works for the primary HDU", copyable=True)
_hduIndex = base.IntAttribute("hdu", default=0,
description="Take the header from this HDU. You must say qnd='False'"
" for this to take effect.", copyable=True)
_mapKeys = base.StructAttribute("mapKeys", childFactory=MapKeys,
default=None, copyable=True, description="Prescription for how to"
" map header keys to grammar dictionary keys")
_hdusAttr = base.UnicodeAttribute("hdusField", default=None,
description="If set, the complete pyfits HDU list for the FITS"
" file is returned in this grammar field.", copyable=True)
_maxHeaderBlocks = base.IntAttribute("maxHeaderBlocks",
default=40, copyable=True, description="Stop looking for"
" FITS END cards and raise an error after this many blocks."
" You may need to raise this for people dumping obscene amounts"
" of data or history into headers.")
rowIterator = FITSProdIterator
def onElementComplete(self):
if self.mapKeys is None:
self.mapKeys = base.makeStruct(MapKeys)
self._onElementCompleteNext(FITSProdGrammar)
|
19,174 | 94eae350812de646ebceb2978a644cc5263caab4 | import sys
from typing import Tuple, List
sys.argv.append("..")
from utils import constants
import numpy as np
class Baby:
"""
The baby is the player who is controlled by the RL agent.
The baby moves around the game board trying to eat the
berries. The game is over once all of the berries are eaten
"""
def __init__(self, board_dimensions: tuple, initial_position: list = None) -> None:
"""Creates a baby giving it the board dimensions so that it may not move outside
the board and an initial position
Parameters
----------
board_dimensions : tuple
rows and columns of the board
initial_position : list, optional
initial positition within the board, if not given a random position
is given, by default None
"""
assert len(board_dimensions) == 2, "board dimensions must be 2 digit array"
assert all(
[dim >= 0 for dim in board_dimensions]
), "dimensions must be positive"
self.board_dimensions = board_dimensions
if initial_position:
assert type(initial_position) == list, "Position must be length 2 list"
assert (
len(initial_position) == 2
), "Position must be a list of length 2 containing x and y coordinates where top left of the board is [0,0]"
assert (
0 <= initial_position[0] < self.board_dimensions[0]
), "Invalid initial x position"
assert (
0 <= initial_position[1] < self.board_dimensions[1]
), "invalid initial y position"
self.position = initial_position.copy()
else:
self.position = [
np.random.randint(0, board_dimensions[0] - 1),
np.random.randint(0, board_dimensions[1] - 1),
]
def action(self, direction: str) -> bool:
"""Takes the given action and returns whether it was a valid executed
move
Parameters
----------
direction : str
one of the four compass directions or random movement
Returns
-------
bool
whether the move was valid and executed
"""
direction = direction[0].upper()
assert (
direction in constants.BABY_MOVEMENTS
), f"Movement must be one of {constants.BABY_MOVEMENTS}"
if direction == "R":
legal_moves = []
if self.position[0] != 0:
legal_moves.append("N")
if self.position[0] != self.board_dimensions[0] - 1:
legal_moves.append("S")
if self.position[1] != 0:
legal_moves.append("W")
if self.position[1] != self.board_dimensions[1] - 1:
legal_moves.append("E")
direction = np.random.choice(legal_moves)
if direction == "N":
if self.position[0] != 0:
self.position[0] -= 1
return True
else:
return False
elif direction == "E":
if self.position[1] != self.board_dimensions[1] - 1:
self.position[1] += 1
return True
else:
return False
elif direction == "S":
if self.position[0] != self.board_dimensions[0] - 1:
self.position[0] += 1
return True
else:
return False
elif direction == "W":
if self.position[1] != 0:
self.position[1] -= 1
return True
else:
return False
return False
def get_position(self) -> Tuple[int]:
"""Returns the position on the baby as a tuple of ints
Returns
-------
tuple(int)
"""
return self.position.copy()
def get_board_dimensions(self) -> List[int]:
"""Returns the board dimensions
Returns
-------
list(int)
"""
return self.board_dimensions.copy()
def __str__(self) -> str:
"""For printing the position of a baby
Returns
-------
str
"""
position = self.get_position()
return f"Baby at position ({position[0]}, {position[1]}) (row, col)"
|
19,175 | 2f588c4423e1c223f8f0ef4beb186770747a4925 | from django.views.generic import TemplateView
from rest_framework.response import Response
from rest_framework.views import APIView
from timeline.models import Point
from timeline.serializers import TimeLineSerializer
class timeline_view(TemplateView):
template_name = 'timeline/timeline.html'
class timeline_api_view(APIView):
def get(self, request, format=None):
point = Point.objects.filter(published=True)
serializer = TimeLineSerializer(point, many=True)
return Response(serializer.data) |
19,176 | 0f6f29e3253b8e06108e875212d7cb13a3656ef5 | #This is a basic chatbot
from nltk.chat.util import Chat, reflections
pairs = [
[
r"hi|hey|hello",
["Hello! what is your name?", "Hey there! what is your name?",]
],
[
r"my name is (.*)",
["Hello I am your assistant How are you today ?",]
],
[
r"what is your name ?",
["My name is chatter and I'm a chatbot ?",]
],
[
r"how are you ?",
["I'm doing good\nHow about You ?",]
],
[
r"i'm(.*) doing good|same",
["Nice to hear that","Alright :)",]
],
[
r"(.*) age?",
["Age is just a number. Let's not talk about it.",]
],
[
r"(.*)color ?",
["I love all colours.",]
],
[
r"(.*)created|create",
["Vishal Naik created me",]
],
[
r"okay|ok|Hmm|Hmm",
["I am not interested",]
],
[
r"quit",
["See you soon!",]
],
]
def BOTB():
print("Hi, I am chatter, I love to have conversations") #Chat Opener
chat = Chat(pairs, reflections) # Create an object
chat.converse() #to have a conversation
if __name__ == "__main__":
BOTB() #call function to start the conversation
|
19,177 | 905ee1daf882814193070b6be12ebcccefd7b24e | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module:
author:
- Sam Doran (@samdoran)
version_added: '2.9'
short_description:
notes: []
description:
-
options:
opt1:
description: []
"""
EXAMPLES = """
"""
RETURN = """
"""
def get_locale(module):
rc, out, err = module.run_command('locale')
locale = {}
for line in out.splitlines():
k, v = line.split('=')
locale[k.lower()] = v.strip('"')
return locale
def main():
module = AnsibleModule(
argument_spec={
'lc_all': {'type': 'str', 'default': 'en_US.UTF-8'},
'lang': {'type': 'str', 'default': 'en_US.UTF-8'},
'language': {'type': 'str', 'default': 'en_US.UTF-8'},
},
supports_check_mode=True,
)
results = {
'changed': False,
}
locale = get_locale(module)
for k, v in sorted(module.params.items()):
if locale[k] == module.params[k]:
continue
else:
rc, out, err = module.run_command(['update-locale', '{k}="{v}"'.format(k=k.upper(), v=v)])
results['changed'] = True
module.exit_json(**results)
if __name__ == '__main__':
main()
|
19,178 | a3a9baaad253b86cc7f6b1665d2b9e6d30bce9f9 | from to_import import *
def func_find_min_s12(data,data_sigma,k):
#print k
s12_left=0
s12_right=1.28
f_s12_left=func_fit_single_exponents(data,data_sigma,s12_left,k,False)
f_s12_right=func_fit_single_exponents(data,data_sigma,s12_right,k,False)
s12_center=(s12_left+s12_right)*0.5
f_s12_center=func_fit_single_exponents(data,data_sigma,s12_center,k,False)
for i in range(7):
#print s12_left,s12_center,s12_right,f_s12_left,f_s12_center,f_s12_right
if ((f_s12_center<f_s12_left)&(f_s12_center<f_s12_right)):
s12_left=0.5*(s12_left+s12_center)
s12_right=0.5*(s12_right+s12_center)
f_s12_left=func_fit_single_exponents(data,data_sigma,s12_left,k,False)
f_s12_right=func_fit_single_exponents(data,data_sigma,s12_right,k,False)
elif ((f_s12_center>=f_s12_left)&(f_s12_center<f_s12_right)):
s12_right=s12_center
f_s12_right=f_s12_center
s12_center=(s12_left+s12_right)*0.5
f_s12_center=func_fit_single_exponents(data,data_sigma,s12_center,k,False)
elif ((f_s12_center>=f_s12_right)&(f_s12_center<f_s12_left)):
s12_left=s12_center
f_s12_left=f_s12_center
s12_center=(s12_left+s12_right)*0.5
f_s12_center=func_fit_single_exponents(data,data_sigma,s12_center,k,False)
else:
#print "stop"
break
#print f_s12_center,s12_center
return f_s12_center,s12_center
def func_find_min_k(data,data_sigma):
k_left=1.2
k_right=14
[f_k_left,s12_k_left]=func_find_min_s12(data,data_sigma,k_left)
[f_k_right,s12_k_right]=func_find_min_s12(data,data_sigma,k_right)
k_center=(k_left+k_right)*0.5
[f_k_center,s12_k_center]=func_find_min_s12(data,data_sigma,k_center)
for i in range(7):
print k_left,k_center,k_right,f_k_left,f_k_center,f_k_right
if ((f_k_center<f_k_left)&(f_k_center<f_k_right)):
k_left=0.5*(k_left+k_center)
k_right=0.5*(k_right+k_center)
[f_k_left,s12_k_left]=func_find_min_s12(data,data_sigma,k_left)
[f_k_right,s12_k_right]=func_find_min_s12(data,data_sigma,k_right)
elif ((f_k_center>=f_k_left)&(f_k_center<f_k_right)):
k_right=k_center
f_k_right=f_k_center
k_center=(k_left+k_right)*0.5
[f_k_center,s12_k_center]=func_find_min_s12(data,data_sigma,k_center)
elif ((f_k_center>=f_k_right)&(f_k_center<f_k_left)):
k_left=k_center
f_k_left=f_k_center
k_center=(k_left+k_right)*0.5
[f_k_center,s12_k_center]=func_find_min_s12(data,data_sigma,k_center)
else:
#print k_left,k_center,k_right,f_k_left,f_k_center,f_k_right
#print "stop"
break
print f_k_center,k_center,s12_k_center
return f_k_center,k_center,s12_k_center
|
19,179 | 436a5cc1037d4757bc5dea6d2a614d39107975d1 | /home/sharad/catkin_ws/devel/.private/mavros_msgs/lib/python2.7/dist-packages/mavros_msgs/srv/_FileMakeDir.py |
19,180 | cc97624f1340470ba76c43ebbcb9beec5345c092 | '''
This is the fourth level in the game. In this game, the player is prompted to enter 10 words. The first letter of the second word should
match with the last letter of the first word. Points are awarded based on the number of correct words entered
'''
def wordsnake(): # The function wordsnake is defined
fhand = open('wordlist.txt') # A textfile containing a list of words in english is opened and stored in fhand
wordlist = [] # A list wordlist is declared
inputwords = [] # A list inputwords is declared
for i in fhand: # The variable i iterates in fhand
i = i.strip() # i is stripped of the space
wordlist.append(i) # The value of i is appended into the list wordlist
print('\nW O R D - S N A K E')
print('Enter the word :') # The player is prompted to enter the first word
inp = input() # The entered word is stored in the variable inp
if inp in wordlist: # Checks whether inp is present in the wordlist
inputwords.append(inp) # If inp is present in wordlist, inp is appended into inputwords
else: # If inp is not present in wordlist
inputwords.append(inp) # inp is appended into the wordlist
with open('wordlist.txt', "a") as fhandle: # The textfile wordlist.txt is opened to update
fhandle.write('%s\n' %inp) # The word inp id added into the text file wordlist.txt
count = 0 # A variable count is declared to be 0
correctword = 0 # A variable correctword is declared to be 0
wrongword = 0 # A varibale worngword id declared to be 0
while (count < 9): # While the value of count is less than 9
word = inputwords[count] # The entered word is stored into the variable word
print('Enter the next word:') # The player is prompted to enter the next word
inp1 = input() # The word is stored into the variabel inp1
if inp1 not in wordlist: # If the second word in not in wordlist
with open('wordlist.txt', "a") as fhandle: # The text file wordlist.txt is opened to update
fhandle.write('%s\n' % inp1) # The second word is added to the text fiel wordlist.txt
if(word[-1] == inp1[0]): # Checks whether the final letter of word and the first letter of inp1 is the same
inputwords.append(inp1) # If the condition is satisfied, the second word is appended to the inputwords
correctword = correctword + 1 # The value of correctword is incremented by 1
else: # If the condition is not met
# The player is prompted to try again
print('The last letter of the first word does not match with the first letter of this word. Try again')
wrongword = wrongword + 1 # The variable wrongword is incremented by 1
continue # The program goes to the beginning
count = count + 1 # The value of countis increased by 1
if (correctword == 10) or (correctword > 10): # If correctword is greater than or equal to 10
print('Flour has been added to your inventory')
XP = 25 # The score is 25
return(XP) # The score is returned
if (correctword > 7) and (correctword < 10): # If correctword is greater than 7 and less than 10
print('Flour has been added to your inventory')
XP = 20 # The score is 20
return(XP) # The score is returned
if (correctword > 4) and (correctword < 7): # If correctword is greater than 4 and less than 7
print('Flour has been added to your inventory')
XP = 15 # The score is 15
return(XP) # The score is returned
if (wrongword == 10): # If wrongword is greater than or equal to 10
print('Since you scored zero, Flour was not added to your inventory.')
XP = 0 # The score is 0
return(XP) # The score is returned
|
19,181 | 29aea1da5be506adf8bfbb74566316c5ef6721ec | hour="4"
minute="16"
seconds="38"
am_pm="PM"
print("The time is: {0}:{1}:{2} {3}".format(hour,minute,seconds,am_pm))
|
19,182 | 014f5b332b375306011aff1a85ac959f8a284279 | """
WizNote API server.
"""
from typing import Dict
import requests
from wizclientpy.sync import api
from wizclientpy.sync.wizrequest import WizResp, json_request
from wizclientpy.sync.user_info import UserInfo, KbInfo, KbValueVersions
from wizclientpy.utils.classtools import MetaSingleton
from wizclientpy.utils.urltools import buildCommandUrl
from wizclientpy.constants import WIZKM_WEBAPI_VERSION
class WizKMApiServerBase:
"""Base class of api object."""
server: str
def __init__(self, strServer: str):
while strServer.endswith("/"):
strServer = strServer[:-1]
if not strServer.startswith("http"):
strServer = "https://" + strServer
self.server = strServer
def server(self):
return self.server
class WizKMAccountsServer(WizKMApiServerBase, metaclass=MetaSingleton):
"""WizKMAccountsServer is used to manage account related information."""
__isLogin: bool
__autoLogout: bool
__valueVersions: Dict
__userInfo: UserInfo
def __init__(self, strServer: str = api.newAsServerUrl()):
self.__isLogin = False
self.__autoLogout = False
self.__valueVersions = {}
super().__init__(strServer)
def login(self, user_name: str, password: str) -> bool:
"""Login to server and get access token."""
if self.__isLogin:
return True
url = buildCommandUrl(self.server, "/as/user/login")
result = json_request("POST", url, body={
"userId": user_name,
"password": password
})
# Update user information
self.__userInfo = UserInfo(result)
self.__isLogin = True
return True
def logout(self) -> bool:
"""Logout the current token."""
url = buildCommandUrl(
self.server, "/as/user/logout", self.__userInfo.strToken)
json_request("GET", url, token=self.__userInfo.strToken)
return True
def keep_alive(self):
"""Extended expiration time of token by 15 min."""
if self.__isLogin:
url = buildCommandUrl(
self.server, "/as/user/keep", self.__userInfo.strToken)
result = json_request("GET", url, token=self.__userInfo.strToken)
return result["maxAge"]
else:
raise ServerXmlRpcError("Can not keep alive without login.")
def fetch_token(self, user_id, password):
"""Get a token by user identity."""
url = buildCommandUrl(self.server, "/as/user/token")
result = json_request("POST", url, {
"userId": user_id,
"password": password
})
return result["token"]
def fetch_user_info(self) -> UserInfo:
"""Get user information from server by token."""
url = buildCommandUrl(
self.server, "/as/user/keep", self.__userInfo.strToken)
result = json_request("GET", url, token=self.__userInfo.strToken)
def user_info(self) -> UserInfo:
"""Access to user information object."""
return self.__userInfo
def set_user_info(self, userInfo: UserInfo):
"""Set new user info."""
self.__isLogin = True
self.__userInfo = userInfo
def fetch_value_versions(self):
nCountPerPage = 100
nNextVersion = 0
url = buildCommandUrl(
self.server, "/as/user/kv/versions", self.__userInfo.strToken)
result = json_request("GET", url, token=self.__userInfo.strToken, body={
'version': nNextVersion,
'count': nCountPerPage,
'pageSize': nCountPerPage
})
kbValVerCollection = []
for obj in result:
kbValVerCollection.append(KbValueVersions(obj))
return kbValVerCollection
def init_all_value_versions(self):
versions = self.fetch_value_versions()
for version in versions:
self.__valueVersions[version.strKbGUID] = version
class WizKMDatabaseServer(WizKMApiServerBase):
"""WizKMDatabaseServer is used to manage knowledge database."""
def __init__(userInfo, kbInfo=KbInfo(), versions=KbValueVersions()):
self.__userInfo = userInfo
self.__kbInfo = kbInfo
self.__valueVersions = versions
def document_downloadDataNew(self):
pass |
19,183 | 4dd08ef8e5952dacff087c78dfb812c03d3b507b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
from astropy.table import Table
from .utils import _earth_location_from_dict
__all__ = [
'TelescopeArray',
]
class TelescopeArray(Table):
"""Telescope array info.
TODO: is this available in ctapipe?
"""
@property
def summary(self):
"""Summary info string."""
s = '---> Telescope array info:\n'
s += '- number of telescopes: {}\n'.format(len(self))
return s
@property
def observatory_earth_location(self):
"""Observatory location (`~astropy.coordinates.EarthLocation`)"""
return _earth_location_from_dict(self.meta)
def plot(self, ax):
"""Plot telescope locations."""
raise NotImplementedError
|
19,184 | e97b5cd3092b839fc87055de29bbc8c4f8af4bcb | import psutil
import time
from multiprocessing import Process
import cv2
from Encoding import encrypt_cisco
import base64
import numpy as np
encoding=encrypt_cisco('amirjahani69')
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 60]
def dec_quality(frame):
ret,frame=cv2.imencode('.jpg', frame,encode_param)
return frame.tobytes()
def code_data(frame):
frame=encoding.encrypt(frame)
json_frm=base64.b64encode(frame)
return json_frm
i=0
while True:
i+=1
name="test/b{}.jpg".format(i)
with open(name,'rb') as file:
data=file.read()
file.close()
enc_frm=base64.b64decode(data)
frame=encoding.decrypt(enc_frm)
nparr=np.fromstring(frame, dtype='int8')
crp=cv2.imdecode(nparr,cv2.IMREAD_COLOR)
cv2.imshow("a",crp)
cv2.waitKey(10)
"""cap=cv2.VideoCapture(0)
i=0
while True:
i+=1
ret,frame=cap.read()
frame=cv2.resize(frame,(480,320))
frame=dec_quality(frame)
frame=code_data(frame)
with open('test/b{}.jpg'.format(i),'wb') as file:
file.write(frame)
file.close()"""
|
19,185 | ef71e825f5a2b7671dafba13b3103a102e9a7e5d | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pygame import *
from pygame.transform import scale
MOVE_SPEED = 7
WIDTH = 32
HEIGHT = 52
class Player(sprite.Sprite):
def __init__(self, x, y):
sprite.Sprite.__init__(self)
self.mensaje = " "
# монеты
self.coins_count = 0
# скорость
self.xvel = 0
self.yvel = 0
# прямоугольник игрока
self.rect = Rect(x, y, WIDTH, HEIGHT) # прямоугольный объект
# загружаем анимации для всех направлений
self.images = {}
for t in ['up', 'down', 'left', 'right']:
self.images[t] = []
for i in range(3):
self.images[t].append(scale(image.load(f"/home/andreacat/pocketPyme/rpg-template-master/images/player/{t}{i}.png"), (32, 52)))
# текущая картинки
self.index = 0
self.image = self.images['down'][1]
def update(self, left, right, up, down, platforms, coins):
# если персонаж пересекается с монеткой
for coin in coins:
if sprite.collide_rect(self, coin):
coin.kill()
self.coins_count += 1
self.mensaje = (f"mision pyme {self.coins_count}")
# изменяем скорость в зависимости от нажатых клавиш и выбираем картинку
if up:
self.yvel = -MOVE_SPEED
self.image = self.images['up'][self.index]
if down:
self.yvel = MOVE_SPEED
self.image = self.images['down'][self.index]
if left:
self.xvel = -MOVE_SPEED
self.image = self.images['left'][self.index]
if right:
self.xvel = MOVE_SPEED
self.image = self.images['right'][self.index]
if not (left or right): # стоим, когда нет указаний идти
self.xvel = 0
if not (up or down): # стоим, когда нет указаний идти
self.yvel = 0
if not (up or down or left or right):
self.image = self.images['down'][1]
else:
self.index = (self.index + 1) % 3
# проверяем перечение с объектами на карте
self.collide(platforms)
# передвижение
self.rect.x += self.xvel # переносим свои положение на xvel
self.rect.y += self.yvel # переносим свои положение на xvel
def collide(self, platforms):
for p in platforms:
if sprite.collide_rect(self, p): # если есть пересечение платформы с игроком
if self.xvel < 0 and self.rect.left < p.rect.right and self.rect.right > p.rect.right:
self.xvel = 0
if self.xvel > 0 and self.rect.right > p.rect.left and self.rect.left < p.rect.left:
self.xvel = 0
if self.yvel < 0 and self.rect.top < p.rect.bottom and self.rect.bottom > p.rect.bottom:
self.yvel = 0
if self.yvel > 0 and self.rect.bottom > p.rect.top and self.rect.top < p.rect.top:
self.yvel = 0
|
19,186 | 7b575246d545d055003a7e308cd15579f3500fa8 | """ Insert Jellyfish output into the database. """
import itertools
import time
from bitarray import bitarray
from django.db import connection, transaction
from django.core.management.base import BaseCommand
from kmer.models import String, Binary
class Command(BaseCommand):
help = 'Insert random Kmer data into the database.'
_code = {
'A': bitarray('01'),
'C': bitarray('11'),
'G': bitarray('00'),
'T': bitarray('10')
}
_tables = [
'kmer_binary',
'kmer_string'
]
def add_arguments(self, parser):
parser.add_argument('--k', type=int, dest='k', default=31,
help='Length of k to test.')
parser.add_argument('--rows', type=int, dest='rows', default=10 ** 11,
help='Total number of rows to insert.')
def handle(self, *args, **opts):
self.test_results = {}
self.totals = []
# Empty Tables
self.empty_tables()
# Init variables
total = 0
self.kmers = []
self.kmer_string = []
self.kmer_binary = []
# Generate kmers
progress_time = time.time()
print 'Kmers:String:Binary:kmers/second'
for p in itertools.product(['A', 'T', 'G', 'C'], repeat=opts['k']):
if total % (opts['rows'] / 100000) == 0 and total > 0:
# Insert Kmers
self.insert_kmers('String')
self.insert_kmers('Binary')
self.kmer_string = []
self.kmer_binary = []
# Get table sizes
progress_time = float(time.time() - progress_time)
rate = int((opts['rows'] / 100000) / progress_time)
self.table_size(total, rate)
progress_time = time.time()
if total == opts['rows']:
break
kmer = ''.join(p)
self.kmer_string.append(String(string=kmer))
self.kmer_binary.append(Binary(string=self.encode(kmer)))
total += 1
def encode(self, seq):
a = bitarray()
a.encode(self._code, seq)
return a.tobytes()
@transaction.atomic
def insert_kmers(self, table):
if table == 'String':
String.objects.bulk_create(self.kmer_string, batch_size=10000)
elif table == 'Binary':
Binary.objects.bulk_create(self.kmer_binary, batch_size=10000)
def table_size(self, total, rate):
# Get KmerString table size
print '{0}:{1}:{2}:{3}'.format(
total,
self.get_table_size('kmer_string')[0],
self.get_table_size('kmer_binary')[0],
rate
)
def get_table_size(self, table):
cursor = connection.cursor()
cursor.execute("SELECT pg_total_relation_size('{0}');".format(table))
row = cursor.fetchone()
return row
@transaction.atomic
def empty_tables(self):
# Empty Tables and Reset id counters to 1
for table in self._tables:
self.empty_table(table)
def empty_table(self, table):
query = "TRUNCATE TABLE {0} RESTART IDENTITY CASCADE;".format(table)
cursor = connection.cursor()
cursor.execute(query)
|
19,187 | 4c6d44c777246c740bac5d343120b619cb011f48 | from __future__ import print_function
from .. import ureg, Q_
from io import BytesIO
import numpy as np
from .node import Node
from .number import parse_rap_expr
from .util import from_rap
from .. import ui as ui
class RapInt(Node):
@property
def w(self):
vals = from_rap(self)
w = ui.Integer(
name=vals['label'],
desc=vals['desc'],
units=vals['units'],
min=vals['min'],
max=vals['max'],
value=vals['value'],
cb=lambda x, y: RapInt.value.fset(self, w.value)
)
return w
@property
def value(self):
return int(self.get_text())
@value.setter
def value(self, val):
self.set_text(str(val))
class RapMinMax(Node):
def text_to_number(self, par):
val = self.get_text()
u = par.find('units')
if u is None or u == '' or (type(u) == str and u.startswith('/')):
return float(val)
val = parse_rap_expr(u.text, val)
if type(val) == str:
return val
return val.magnitude
@property
def value(self):
par = self.elem.find('..')
if par.tag == 'integer':
return int(self.get_text())
return float(self.text_to_number(par))
@value.setter
def value(self, val):
self.set_text(str(val))
class RapBool(Node):
@property
def value(self):
if self.get_text() in ['true', '1', 'yes', 'on', 1]:
return True
else:
return False
@value.setter
def value(self, val):
if val in [True, 1, '1', 'true', 'True', 'yes', 'Yes', 'On', 'on']:
self.set_text('true')
else:
self.set_text('false')
class XY(Node):
"""
xy can be a name/value pairs for a histogram or x,y pairs for a curve
"""
@property
def value(self):
try:
par = self.elem.find('../..')
except:
ValueError("Could not find xy parents.")
val = self.elem.text
if par.tag == 'curve':
# return a 2D numpy array
res = np.fromstring(val, sep=' \n').reshape(-1, 2)
else:
# return a list of (name, value) tuples
it = iter(shlex.split(val))
res = zip(it, it)
return res
@value.setter
def value(self, val):
if type(val) == np.ndarray:
s = BytesIO()
np.savetxt(s, val, fmt='%.6e %.6e', newline="\n")
res = s.getvalue()
elif type(val) == list or type(val) == tuple:
val = '\n'.join([' '.join(map(repr, x)) for x in zip(*val)])
# we need the strings double quoted for tcl
res = val.replace("'", '"')
self.elem.text = res
class RapLog(Node):
@property
def value(self):
return self.all_text()
@value.setter
def value(self, val):
self.set_text(str(val))
|
19,188 | 16b2f8ab347f0bf079f54cf8166692b2faaa25a0 | # Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration
# @file AthenaRootComps/python/ReadAthenaxAODHybrid.py
# @purpose make the Athena framework read a set of xAOD files to emulate the
# usual TEvent event loop ... BUT READ METADATA WITH POOL!
# @author Will Buttinger and Johannes Elmsheuser
#
def _configure():
"""Install the Athena-based xAOD event selector and correctly configure
a few other services.
"""
from AthenaCommon import CfgMgr
from AthenaCommon.AppMgr import theApp
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
from AthenaCommon.Logging import logging
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
from AthenaCommon.Constants import ERROR
msg = logging.getLogger( 'ReadAthenaxAODHybrid' )
msg.debug("Configuring Athena for reading xAOD files (via TEvent, with POOL for Metadata)...")
#check if we already have a selector set up
if hasattr(svcMgr, 'EventSelector'):
err = "svcMgr already configured with another EventSelector: [%s]"%\
svcMgr.EventSelector.getFullJobOptName()
msg.error( err )
raise RuntimeError( err )
#Setup our EventSelector
svcMgr += CfgMgr.Athena__xAODEventSelector( "EventSelector" )
#for historical reasons, we now add configurables of a bunch of services
if not hasattr(svcMgr, 'THistSvc'): svcMgr += CfgMgr.THistSvc()
if not hasattr (svcMgr, 'ProxyProviderSvc'): svcMgr += CfgMgr.ProxyProviderSvc()
if not hasattr (svcMgr, 'InputMetaDataStore'): svcMgr += CfgMgr.StoreGateSvc("InputMetaDataStore")
if not hasattr (svcMgr, 'Athena::xAODCnvSvc'): svcMgr += CfgMgr.Athena__xAODCnvSvc()
if not hasattr(svcMgr, 'EventPersistencySvc'): svcMgr += CfgMgr.EvtPersistencySvc( "EventPersistencySvc" )
if not hasattr (svcMgr, 'MetaDataSvc'): svcMgr += CfgMgr.MetaDataSvc ("MetaDataSvc")
if not hasattr(svcMgr, 'PoolSvc'): svcMgr += CfgMgr.PoolSvc()
#Here we set various properties of things
theApp.ExtSvc += [ svcMgr.EventSelector.getFullName() ]
theApp.EvtSel = "EventSelector"
svcMgr.MetaDataSvc.MetaDataContainer = "MetaDataHdr" #this really should be the default for this property :-(
svcMgr.PoolSvc.OutputLevel = ERROR
svcMgr.EventSelector.ReadMetaDataWithPool=True
#default the input collections to the FilesInput from AthenaCommonFlags
#this is so that the eventselector picks up input files in grid jobs
svcMgr.EventSelector.InputCollections = athenaCommonFlags.FilesInput()
# suppress the event loop heartbeat as it is somewhat I/O hungry for
# no real gain in n-tuple reading/writing scenarii
if not hasattr(svcMgr, theApp.EventLoop): svcMgr += getattr(CfgMgr, theApp.EventLoop)()
evtloop = getattr(svcMgr, theApp.EventLoop)
try:
evtloop.EventPrintoutInterval = 10000
except Exception as err:
msg.info('disabling event loop heartbeat... [failed]')
msg.info('performances might be sub-par... sorry.')
pass
msg.debug("Configuring Athena for reading ROOT files (via TEvent, with POOL for Metadata)... [OK]")
return
# execute at module import
_configure()
# clean-up namespace
del _configure
|
19,189 | 8e971f083753ea5363132519b3b133ccb78f8d45 | from time import sleep
from django.test import TestCase
from netaddr import EUI
from customers.models import Customer
from networks.models import CustomerIpLeaseModel, NetworkIpPool, NetworkIpPoolKind
from customers.tests.get_user_credentials_by_ip import BaseServiceTestCase
class FetchSubscriberDynamicLeaseTestCase(TestCase):
def setUp(self):
# Initialize customers instances
BaseServiceTestCase.setUp(self)
self.customer.device = self.device_switch
self.customer.dev_port = self.ports[1]
self.customer.add_balance(self.admin, 10000, 'test')
self.customer.save()
self.customer.refresh_from_db()
self.customer.pick_service(self.service, self.customer)
# customer for tests
custo2 = Customer.objects.create_user(
telephone='+79782345679',
username='custo2',
password='passw',
group=self.group,
device=self.device_onu
)
custo2.refresh_from_db()
self.customer2 = custo2
self.ippool = NetworkIpPool.objects.create(
network='10.11.12.0/24',
kind=NetworkIpPoolKind.NETWORK_KIND_INTERNET,
description='test',
ip_start='10.11.12.2',
ip_end='10.11.12.254',
# vlan_if=vlan,
gateway='10.11.12.1',
is_dynamic=True
)
self.ippool.groups.add(self.group)
def test_is_ok(self):
lease = CustomerIpLeaseModel.fetch_subscriber_lease(
customer_mac='1:2:3:4:5:6',
device_mac='12:13:14:15:16:17',
device_port=2,
is_dynamic=True
)
self.assertIsNotNone(lease)
self.assertEqual(lease.ip_address, '10.11.12.2')
self.assertEqual(lease.pool, self.ippool)
self.assertEqual(lease.customer, self.customer)
self.assertEqual(lease.mac_address, EUI('1:2:3:4:5:6'))
self.assertTrue(lease.is_dynamic)
def test_multiple_fetch(self):
for n in range(4):
lease = CustomerIpLeaseModel.fetch_subscriber_lease(
customer_mac='1:2:3:4:5:7',
device_mac='12:13:14:15:16:17',
device_port=2,
is_dynamic=True
)
self.assertIsNotNone(lease)
self.assertEqual(lease.ip_address, '10.11.12.2')
self.assertEqual(lease.customer, self.customer)
self.assertTrue(lease.is_dynamic)
def test_different_mac(self):
lease = CustomerIpLeaseModel.fetch_subscriber_lease(
customer_mac='1:2:3:4:5:6',
device_mac='12:13:14:15:16:17',
device_port=2,
is_dynamic=True
)
self.assertIsNotNone(lease)
self.assertEqual(lease.ip_address, '10.11.12.2')
self.assertEqual(lease.customer, self.customer)
self.assertTrue(lease.is_dynamic)
lease = CustomerIpLeaseModel.fetch_subscriber_lease(
customer_mac='1:2:3:4:5:7',
device_mac='12:13:14:15:16:17',
device_port=2,
is_dynamic=True
)
self.assertIsNotNone(lease)
self.assertEqual(lease.ip_address, '10.11.12.3')
self.assertEqual(lease.customer, self.customer)
self.assertTrue(lease.is_dynamic)
lease = CustomerIpLeaseModel.fetch_subscriber_lease(
customer_mac='1:2:3:4:5:8',
device_mac='12:13:14:15:16:17',
device_port=2,
is_dynamic=True
)
self.assertIsNotNone(lease)
self.assertEqual(lease.ip_address, '10.11.12.4')
self.assertEqual(lease.customer, self.customer)
self.assertTrue(lease.is_dynamic)
def test_multiple_customers(self):
lease = CustomerIpLeaseModel.fetch_subscriber_lease(
customer_mac='1:2:3:4:5:6',
device_mac='12:13:14:15:16:17',
device_port=2,
is_dynamic=True
)
self.assertIsNotNone(lease)
self.assertEqual(lease.ip_address, '10.11.12.2')
self.assertEqual(lease.customer, self.customer)
self.assertTrue(lease.is_dynamic)
lease = CustomerIpLeaseModel.fetch_subscriber_lease(
customer_mac='1:2:3:4:5:8',
device_mac='11:13:14:15:16:18',
is_dynamic=True
)
self.assertIsNotNone(lease)
self.assertEqual(lease.ip_address, '10.11.12.3')
self.assertEqual(lease.customer, self.customer2)
self.assertTrue(lease.is_dynamic)
def test_ident_mac(self):
"""
What if two different customers have the same mac addr
"""
for n in range(4):
lease = CustomerIpLeaseModel.fetch_subscriber_lease(
customer_mac='1:2:3:4:5:6',
device_mac='12:13:14:15:16:17',
device_port=2,
is_dynamic=True
)
self.assertIsNotNone(lease)
self.assertEqual(lease.ip_address, '10.11.12.2')
self.assertEqual(lease.customer, self.customer)
self.assertTrue(lease.is_dynamic)
lease = CustomerIpLeaseModel.fetch_subscriber_lease(
customer_mac='1:2:3:4:5:6',
device_mac='11:13:14:15:16:18',
is_dynamic=True
)
self.assertIsNotNone(lease)
self.assertEqual(lease.ip_address, '10.11.12.3')
self.assertEqual(lease.customer, self.customer2)
self.assertTrue(lease.is_dynamic)
def test_change_subnet(self):
"""
What if group membership for ip pool is changed
:return:
"""
lease = CustomerIpLeaseModel.fetch_subscriber_lease(
customer_mac='1:2:3:4:5:6',
device_mac='12:13:14:15:16:17',
device_port=2,
is_dynamic=True
)
self.assertIsNotNone(lease)
self.assertEqual(lease.ip_address, '10.11.12.2')
self.assertEqual(lease.customer, self.customer)
self.assertTrue(lease.is_dynamic)
ippool2 = NetworkIpPool.objects.create(
network='10.10.11.0/24',
kind=NetworkIpPoolKind.NETWORK_KIND_INTERNET,
description='test',
ip_start='10.10.11.2',
ip_end='10.10.11.254',
gateway='10.10.11.1',
is_dynamic=True
)
self.ippool.groups.remove(self.group)
ippool2.groups.add(self.group)
lease = CustomerIpLeaseModel.fetch_subscriber_lease(
customer_mac='1:2:3:4:5:6',
device_mac='12:13:14:15:16:17',
device_port=2,
is_dynamic=True
)
self.assertIsNotNone(lease)
self.assertEqual(lease.ip_address, '10.10.11.2')
self.assertEqual(lease.customer, self.customer)
self.assertTrue(lease.is_dynamic)
lease = CustomerIpLeaseModel.fetch_subscriber_lease(
customer_mac='1:2:3:4:5:7',
device_mac='12:13:14:15:16:17',
device_port=2,
is_dynamic=True
)
self.assertIsNotNone(lease)
self.assertEqual(lease.ip_address, '10.10.11.3')
self.assertEqual(lease.customer, self.customer)
self.assertTrue(lease.is_dynamic)
lease = CustomerIpLeaseModel.fetch_subscriber_lease(
customer_mac='1:2:3:4:5:6',
device_mac='12:13:14:15:16:17',
device_port=2,
is_dynamic=True
)
self.assertIsNotNone(lease)
self.assertEqual(lease.ip_address, '10.10.11.2')
self.assertEqual(lease.customer, self.customer)
self.assertTrue(lease.is_dynamic)
def test_dynamic_or_static(self):
ippool_stat = NetworkIpPool.objects.create(
network='10.11.13.0/24',
kind=NetworkIpPoolKind.NETWORK_KIND_INTERNET,
description='test',
ip_start='10.11.13.2',
ip_end='10.11.13.254',
gateway='10.11.13.1',
is_dynamic=False
)
ippool_stat.groups.add(self.group)
lease = CustomerIpLeaseModel.fetch_subscriber_lease(
customer_mac='1:2:3:4:5:6',
device_mac='12:13:14:15:16:17',
device_port=2,
is_dynamic=False
)
self.assertIsNotNone(lease)
self.assertEqual(lease.ip_address, '10.11.13.2')
self.assertEqual(lease.customer, self.customer)
self.assertFalse(lease.is_dynamic)
lease = CustomerIpLeaseModel.fetch_subscriber_lease(
customer_mac='1:2:3:4:5:6',
device_mac='12:13:14:15:16:17',
device_port=2,
is_dynamic=True
)
self.assertIsNotNone(lease)
self.assertEqual(lease.ip_address, '10.11.12.2')
self.assertEqual(lease.customer, self.customer)
self.assertTrue(lease.is_dynamic)
def test_returns_newer_lease(self):
CustomerIpLeaseModel.objects.create(
ip_address='10.11.12.13',
pool=self.ippool,
customer=self.customer,
mac_address='1:2:3:4:5:6',
is_dynamic=True
)
lease1 = CustomerIpLeaseModel.fetch_subscriber_lease(
customer_mac='1:2:3:4:5:6',
device_mac='12:13:14:15:16:17',
device_port=2,
is_dynamic=True
)
self.assertIsNotNone(lease1)
self.assertEqual(lease1.ip_address, '10.11.12.13')
self.assertEqual(lease1.pool, self.ippool)
self.assertEqual(lease1.customer, self.customer)
self.assertTrue(lease1.is_dynamic)
sleep(0.2)
CustomerIpLeaseModel.objects.create(
ip_address='10.11.12.14',
pool=self.ippool,
customer=self.customer,
mac_address='1:2:3:4:5:6',
is_dynamic=True
)
lease2 = CustomerIpLeaseModel.fetch_subscriber_lease(
customer_mac='1:2:3:4:5:6',
device_mac='12:13:14:15:16:17',
device_port=2,
is_dynamic=True
)
self.assertIsNotNone(lease2)
self.assertEqual(lease2.ip_address, '10.11.12.14')
self.assertEqual(lease2.pool, self.ippool)
self.assertEqual(lease2.customer, self.customer)
self.assertTrue(lease2.is_dynamic)
# lease2 must be newer than lease1
self.assertGreater(lease2.lease_time, lease1.lease_time)
|
19,190 | 71dea171a4bc6d5bfc0169ed7bf9176de7c9e870 | from typing import Text
class Epic:
# TODO: Might want to encapsulate this stuff in GameState or something.
def __init__(self):
self._stanzas = []
self.hero = None
def add_stanza(self, stanza: Text):
self._stanzas.append(stanza)
@property
def story(self):
return "\n\n".join(self._stanzas)
def set_hero(self, hero):
self.hero = hero
|
19,191 | c8fb0854fd81486bc20eec4d4c12b018de2d74c6 | import threading
import time
import stepperControl as SC
import matplotlib.pyplot as plt
def loop_3():
global end
global angleTime
end = "no"
angleTime = SC.motor_control()
end = "end"
def loop_4():
while (end != "end"):
print "threading"
plt.plot(angleTime[1], angleTime[0])
plt.show()
threading.Thread(target=loop_3).start()
threading.Thread(target=loop_4).start()
|
19,192 | 0ac0167956c0667fa14f2ee237c68c4a6a9c5be5 | # This file is used to define the class of beam-column connection, which includes beam/column depth
# check, RBS dimensions, moment capacity at column face, strong-column-weak-beam check, and panel zone
# thickness (doubler plate)
##########################################################################
# Load Built-in Packages #
##########################################################################
# Please add all the imported modules in the part below
import copy
import pandas as pd
import sys
##########################################################################
# Load User Defined Class and Py Files #
##########################################################################
from help_functions import extract_depth
from help_functions import extract_weight
# #########################################################################
# Open the section database and store it as a global variable #
# #########################################################################
from global_variables import STRONG_COLUMN_WEAK_BEAM_RATIO
# #########################################################################
# Define a class of beam #
# #########################################################################
class Connection(object):
"""
This class is used to define a beam-column connection part, which has the following attributes:
(1) Check column and beam depth as well as weight per ANSI Section 5.3.1 prequalified connection.
(2) Extract RBS (reduced beam section) dimension from beam class.
(3) Compute the probable maximum moment at the center of RBS
(4) Calculate shear force at the center of RBS
(5) Compute probable moment at column face
(6) Compute plastic moment of beam based on expeced yield stress
(7) Check moment capacity at column face
(8) Check shear strength of beam
(9) Check whether strong column weak beam is satisfied
(10) Calculate doubler plate thickness
"""
def __init__(self, connection_type, steel, beam_dead_load, beam_live_load, span,
left_beam=None, right_beam=None, top_column=None, bottom_column=None):
"""
This function initializes all attributes of Connection class.
:param connection_type: a string which denotes the type of beam-column connection.
"interior": interior beam-column connection with two beams and two columns
"exterior": exterior beam-column connection with one beam and two columns
"top exterior": exterior connection at roof with one beam and one column
"top interior": interior connection at roof with two beams and one column
:param steel: a class defined in "steel_material.py" file
:param beam_dead_load: dead load on beam (unit: lb/ft)
:param beam_live_load: live load on beam (unit: lb/ft)
:param span: the length of beam (unit: ft)
:param left_beam: a class defined in "beam_component.py" file which represents the beam at
left side of the connection.
:param right_beam: a class defined in "beam_component.py" file which represents the beam at
right side of the connection.
:param top_column: a class defined in "column_component.py" file which refers the column in
upper story of the connection.
:param bottom_column: a class defined in "column_component.py" file which refers the column in
lower story of the connection.
"""
self.connection_type = connection_type
# The dictionary used to store the RBS dimensions
self.left_RBS_dimension = {}
self.right_RBS_dimension = {}
# The dictionary used to store the probable moment
self.moment = {}
# The dictionary used to store the shear force
self.shear_force = {} # keys:
# A scalar used to denote the doubler plate thickness
self.doubler_plate_thickness = 0
# A dictionary used to store the failure mode (if any)
self.is_feasible = {} # keys: 'geometry limit', 'flexural strength', 'shear strength', 'SCWB'
# Define a boolean flag which denotes the overall check results (True means OK.)
self.flag = None
# Call methods to initialize the attributes listed above
self.check_column_beam(connection_type, left_beam, right_beam, top_column, bottom_column)
self.extract_reduced_beam_section(connection_type, left_beam, right_beam)
self.compute_probable_moment_RBS(connection_type, steel, left_beam, right_beam)
self.compute_shear_force_RBS(connection_type, beam_dead_load, beam_live_load, span, bottom_column)
self.compute_probable_moment_column_face(connection_type)
self.compute_plastic_moment(connection_type, steel, left_beam, right_beam)
self.check_moment_column_face(connection_type)
self.check_shear_strength(connection_type, beam_dead_load, beam_live_load, left_beam, right_beam)
self.check_column_beam_relationships(connection_type, steel, left_beam, right_beam, top_column, bottom_column)
self.determine_doubler_plate(connection_type, steel, left_beam, right_beam, bottom_column, top_column)
def check_column_beam(self, connection_type, left_beam, right_beam, top_column, bottom_column):
"""
This method is used to check whether the column and beam depth (weight) is feasible for
prequalified connection. (step 1 in ANSI Section 5.8)
The explanations for input arguments are presented in __init__() function.
:return: a boolean result stored in is_feasible dictionary.
Actually, this method should always be true because all beam and column members are selected from a
database that non-prequalified sizes have been removed.
"""
# Extract the beam depth and weight
if connection_type == 'typical exterior':
# Connection only has one beam and two columns
left_beam_depth = extract_depth(left_beam.section['section size'])
left_beam_weight = extract_weight(left_beam.section['section size'])
top_column_depth = extract_depth(top_column.section['section size'])
bottom_column_depth = extract_depth(bottom_column.section['section size'])
if (left_beam_depth <= 36 and left_beam_weight <= 300
and top_column_depth <= 36 and bottom_column_depth <= 36):
self.is_feasible['geometry limits'] = True
else:
sys.stderr.write('Beam and column depth & weight are not acceptable!\n')
self.is_feasible['geometry limits'] = False
elif connection_type == 'top exterior':
# ****************** Debug using only *************************
# print("top exterior:")
# print("column size = ", bottom_column.section['section size'])
# print("beam size = ", left_beam.section['section size'])
# ****************** Debug ends here **************************
# Connection only has one beam and one column
left_beam_depth = extract_depth(left_beam.section['section size'])
left_beam_weight = extract_weight(left_beam.section['section size'])
bottom_column_depth = extract_depth(bottom_column.section['section size'])
if left_beam_depth <= 36 and left_beam_weight <= 300 and bottom_column_depth <= 36:
self.is_feasible['geometry limits'] = True
else:
sys.stderr.write('Beam and column depth & weight are not acceptable!\n')
self.is_feasible['geometry limits'] = False
elif connection_type == 'typical interior':
# Connection has two beams and two columns
left_beam_depth = extract_depth(left_beam.section['section size'])
left_beam_weight = extract_weight(left_beam.section['section size'])
right_beam_depth = extract_depth(right_beam.section['section size'])
right_beam_weight = extract_weight(right_beam.section['section size'])
top_column_depth = extract_depth(top_column.section['section size'])
bottom_column_depth = extract_depth(bottom_column.section['section size'])
if (left_beam_depth <= 36 and right_beam_depth <= 36
and left_beam_weight <= 300 and right_beam_weight <= 300
and top_column_depth <= 36 and bottom_column_depth <= 36):
self.is_feasible['geometry limits'] = True
else:
sys.stderr.write('Beam and beam depth & weight are not acceptable!\n')
self.is_feasible['geometry limits'] = False
elif connection_type == 'top interior':
# Connection has two beams and one column
left_beam_depth = extract_depth(left_beam.section['section size'])
left_beam_weight = extract_weight(left_beam.section['section size'])
right_beam_depth = extract_depth(right_beam.section['section size'])
right_beam_weight = extract_weight(right_beam.section['section size'])
bottom_column_depth = extract_depth(bottom_column.section['section size'])
if (left_beam_depth <= 36 and right_beam_depth <= 36
and left_beam_weight <= 300 and right_beam_weight <= 300
and bottom_column_depth <= 36):
self.is_feasible['geometry limits'] = True
else:
sys.stderr.write('Beam and beam depth & weight are not acceptable!\n')
self.is_feasible['geometry limits'] = False
else:
sys.stderr.write('Error: wrong type of connection specified!\n No such keyword for connection exists!\n')
sys.exit(2)
def extract_reduced_beam_section(self, connection_type, left_beam, right_beam):
"""
This method is used to extract the RBS dimensions into one (or two) dictionary.
The explanations for input arguments are presented in __init__() function.
:return: one (two) dictionary which contains the RBS dimensions.
"""
if connection_type == 'typical exterior' or connection_type == 'top exterior':
# The connection only has one beam in this case
self.left_RBS_dimension = copy.deepcopy(left_beam.RBS_dimension)
elif connection_type == 'typical interior' or connection_type == 'top interior':
# The connection has two beams at both sides
self.left_RBS_dimension = copy.deepcopy(left_beam.RBS_dimension)
self.right_RBS_dimension = copy.deepcopy(right_beam.RBS_dimension)
else:
sys.stderr.write('Error: wrong type of connection specified!\nNo such keyword for connection exists!\n')
sys.exit(2)
def compute_probable_moment_RBS(self, connection_type, steel, left_beam, right_beam):
"""
This method is used to compute section modulus at RBS center (step 2 and 3 in ANSI Section 5.8)
:return: a dictionary which includes the probable moment at RBS center
"""
Cpr = (steel.Fy+steel.Fu) / (2*steel.Fy)
if Cpr >= 1.2:
Cpr = 1.2
if connection_type == 'typical exterior' or connection_type == 'top exterior':
left_Z_RBS = left_beam.section['Zx'] - 2 * left_beam.RBS_dimension['c'] * left_beam.section['tf'] \
* (left_beam.section['d'] - left_beam.section['tf'])
self.moment['Mpr1'] = Cpr * steel.Ry * steel.Fy * left_Z_RBS
elif connection_type == 'typical interior' or connection_type == 'top interior':
left_Z_RBS = left_beam.section['Zx'] - 2 * left_beam.RBS_dimension['c'] * left_beam.section['tf'] \
* (left_beam.section['d'] - left_beam.section['tf'])
self.moment['Mpr1'] = Cpr * steel.Ry * steel.Fy * left_Z_RBS
right_Z_RBS = right_beam.section['Zx'] - 2 * right_beam.RBS_dimension['c'] * right_beam.section['tf'] \
* (right_beam.section['d'] - right_beam.section['tf'])
self.moment['Mpr2'] = Cpr * steel.Ry * steel.Fy * right_Z_RBS
else:
sys.stderr.write('Error: wrong type of connection specified!\nNo such keyword for connection exists!')
sys.exit(2)
def compute_shear_force_RBS(self, connection_type, beam_dead_load, beam_live_load, span, bottom_column):
"""
This method calculates the shear force at the center of RBS (step 4 in ANSI Section 5.8)
:return: a dictionary which includes the shear forces
"""
# Be cautious: beam_dead_load read here is in the unit of lb/ft
# The unit should be converted from lb/ft to kips/inch
wu = 1.2*(beam_dead_load*0.001/12) + 0.5*(beam_live_load*0.001/12) + 0.2*0
Sh = self.left_RBS_dimension['a'] + self.left_RBS_dimension['b']/2
Lh = span*12.0 - 2 * bottom_column.section['d'] - 2 * Sh
if connection_type == 'typical exterior' or connection_type == 'top exterior':
self.shear_force['VRBS1'] = 2*self.moment['Mpr1']/Lh + wu*Lh/2
elif connection_type == 'typical interior' or connection_type == 'top interior':
self.shear_force['VRBS1'] = 2 * self.moment['Mpr1'] / Lh + wu * Lh / 2
self.shear_force['VRBS2'] = 2 * self.moment['Mpr2'] / Lh - wu * Lh / 2
else:
sys.stderr.write('Error: wrong type of connection specified!\nNo such keyword for connection exists!\n')
sys.exit(2)
def compute_probable_moment_column_face(self, connection_type):
"""
This method calculates the probable maximum moment at the face of the column. (step 5 in ANSI Section 5.8)
:return: Store probable maximum moment at column face into the dictionary
"""
Sh = self.left_RBS_dimension['a'] + self.left_RBS_dimension['b']/2
if connection_type == 'typical exterior' or connection_type == 'top exterior':
self.moment['Mf1'] = self.moment['Mpr1'] + self.shear_force['VRBS1']*Sh
elif connection_type == 'typical interior' or connection_type == 'top interior':
self.moment['Mf1'] = self.moment['Mpr1'] + self.shear_force['VRBS1']*Sh
self.moment['Mf2'] = self.moment['Mpr2'] + self.shear_force['VRBS2']*Sh
else:
sys.stderr.write('Error: wrong type of connection specified!\nNo such keyword for connection exists!\n')
sys.exit(2)
def compute_plastic_moment(self, connection_type, steel, left_beam, right_beam):
"""
This method calculates the plastic moment of the beam based on expected yield stress.
(step 6 in ANSI Section 5.8)
:return: Store the plastic moment to the dictionary.
"""
if connection_type == 'typical exterior' or connection_type == 'top exterior':
self.moment['Mpe1'] = steel.Ry * steel.Fy * left_beam.section['Zx']
elif connection_type == 'typical interior' or connection_type == 'top interior':
self.moment['Mpe1'] = steel.Ry * steel.Fy * left_beam.section['Zx']
self.moment['Mpe2'] = steel.Ry * steel.Fy * right_beam.section['Zx']
else:
sys.stderr.write('Error: wrong type of connection specified!\nNo such keyword for connection exists!\n')
sys.exit(2)
def check_moment_column_face(self, connection_type):
"""
This method checks whether the plastic moment is greater than the actual moment at column face.
(step 7 in ANSI Section 5.8)
:return: boolean result stored in is_feasible dictionary.
"""
phi_d = 1.0
if connection_type == 'typical exterior' or connection_type == 'top exterior':
if phi_d*self.moment['Mpe1'] >= self.moment['Mf1']:
self.is_feasible['flexural strength'] = True
else:
sys.stderr.write('Plastic moment at column face is not sufficient!\n')
self.is_feasible['flexural strength'] = False
elif connection_type == 'typical interior' or connection_type == 'top interior':
if (phi_d*self.moment['Mpe1'] >= self.moment['Mf1']
and phi_d*self.moment['Mpe2'] >= self.moment['Mf2']):
self.is_feasible['flexural strength'] = True
else:
sys.stderr.write('Plastic moment at column face is not sufficient!\n')
self.is_feasible['flexural strength'] = False
else:
sys.stderr.write('Error: wrong type of connection specified!\nNo such keyword for connection exists!\n')
sys.exit(2)
def check_shear_strength(self, connection_type, beam_dead_load, beam_live_load, left_beam, right_beam):
"""
This method checks whether the beam shear strength is sufficient for the required shear strength.
(step 8 in ANSI Section 5.8)
:return: boolean result stored in is_feasible dictionary.
"""
wu = 1.2 * (beam_dead_load * 0.001 / 12) + 0.5 * (beam_live_load * 0.001 / 12) + 0.2 * 0
Sh = self.left_RBS_dimension['a'] + self.left_RBS_dimension['b'] / 2
if connection_type == 'typical exterior' or connection_type == 'top exterior':
self.shear_force['Vu1'] = self.shear_force['VRBS1'] + wu*Sh
if left_beam.strength['shear'] >= self.shear_force['Vu1']:
self.is_feasible['shear strength'] = True
else:
sys.stderr.write('Shear strength is not sufficient!\n')
self.is_feasible['shear strength'] = False
elif connection_type == 'typical interior' or connection_type == 'top interior':
self.shear_force['Vu1'] = self.shear_force['VRBS1'] + wu * Sh
self.shear_force['Vu2'] = self.shear_force['VRBS2'] + wu * Sh
if (left_beam.strength['shear'] >= self.shear_force['Vu1']
and right_beam.strength['shear'] >= self.shear_force['Vu2']):
self.is_feasible['shear strength'] = True
else:
sys.stderr.write('Shear strength is not sufficient!\n')
self.is_feasible['shear strength'] = False
else:
sys.stderr.write('Error: wrong type of connection specified!\nNo such keyword for connection exists!\n')
sys.exit(2)
def check_column_beam_relationships(self, connection_type, steel, left_beam, right_beam, top_column, bottom_column):
"""
This method examines whether the "strong-column-weak-beam" criteria is satisfied.
(step 11 in ANSI Section 5.8)
:return: boolean result stored in is_feasible dictionary.
"""
if connection_type == 'top exterior':
# For column in one-story building or top story:
# Strong column weak beam is exempted if the column axial load ratio < 0.3 for all load combinations except
# those using amplified seismic load.
# If not the case, still need to check the Mpc/Mpb ratio.
if bottom_column.demand['axial']/bottom_column.strength['axial'] < 0.3:
self.is_feasible['SCWB'] = True
else:
Puc_bot = bottom_column.demand['axial']
Ag_bot = bottom_column.section['A']
ht_bot = bottom_column.unbraced_length['x']*12.2 # Be cautious: convert the unit from ft to inch
Zx_bot = bottom_column.section['Zx']
db = left_beam.section['d']
# Compute the moment summation for column
self.moment['Mpc'] = Zx_bot * (steel.Fy-Puc_bot/Ag_bot) * (ht_bot/(ht_bot-db/2))
# Compute the moment summation for beam
self.moment['Muv'] = self.shear_force['VRBS1'] * (self.left_RBS_dimension['a']
+ self.left_RBS_dimension['b']/2
+ bottom_column.section['d']/2)
self.moment['Mpb'] = self.moment['Mpr1'] + self.moment['Muv']
# Perform the strong column weak beam check
if self.moment['Mpc']/self.moment['Mpb'] >= STRONG_COLUMN_WEAK_BEAM_RATIO:
self.is_feasible['SCWB'] = True
else:
sys.stderr.write('Strong column weak beam (top exterior) is not satisfied!\n')
self.is_feasible['SCWB'] = False
elif connection_type == 'top interior':
# For column in one-story building or top story:
# Strong column weak beam is exempted if the column axial load ratio < 0.3 for all load combinations except
# those using amplified seismic load.
# If not the case, still need to check the Mpc/Mpb ratio.
if bottom_column.demand['axial']/bottom_column.strength['axial'] < 0.3:
self.is_feasible['SCWB'] = True
else:
Puc_bot = bottom_column.demand['axial']
Ag_bot = bottom_column.section['A']
h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch
Zx_bot = bottom_column.section['Zx']
# Generally the left and right beams have the identical beam sizes
db = (left_beam.section['d'] + right_beam.section['d']) / 2
# Compute the moment summation for column
self.moment['Mpc'] = Zx_bot * (steel.Fy-Puc_bot/Ag_bot) * (h_bot/(h_bot-db/2))
# Compute the moment summation for beam
self.moment['Muv'] = (self.shear_force['VRBS1']+self.shear_force['VRBS2']) \
* (self.left_RBS_dimension['a']+self.left_RBS_dimension['b']/2
+bottom_column.section['d']/2)
self.moment['Mpb'] = self.moment['Mpr1'] + self.moment['Mpr2'] + self.moment['Muv']
# Perform the strong column weak beam check
if self.moment['Mpc']/self.moment['Mpb'] >= STRONG_COLUMN_WEAK_BEAM_RATIO:
self.is_feasible['SCWB'] = True
else:
sys.stderr.write('Strong column weak beam (top interior) is not satisfied!\n')
self.is_feasible['SCWB'] = False
elif connection_type == 'typical exterior':
# This connection has two columns and one beam
Puc_top = top_column.demand['axial']
Puc_bot = bottom_column.demand['axial']
Ag_top = top_column.section['A']
Ag_bot = bottom_column.section['A']
ht_top = top_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch
ht_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch
Zx_top = top_column.section['Zx']
Zx_bot = bottom_column.section['Zx']
db = left_beam.section['d']
# Compute the moment summation for column
self.moment['Mpc'] = Zx_top * (steel.Fy-Puc_top/Ag_top) * (ht_top/(ht_top-db/2)) \
+ Zx_bot * (steel.Fy-Puc_bot/Ag_bot) * (ht_bot/(ht_bot-db/2))
# Compute the moment summation for beam
self.moment['Muv'] = self.shear_force['VRBS1'] * (self.left_RBS_dimension['a']
+ self.left_RBS_dimension['b']/2
+ bottom_column.section['d']/2)
self.moment['Mpb'] = self.moment['Mpr1'] + self.moment['Muv']
# Perform the strong column weak beam check
if self.moment['Mpc']/self.moment['Mpb'] >= STRONG_COLUMN_WEAK_BEAM_RATIO:
self.is_feasible['SCWB'] = True
else:
sys.stderr.write('Strong column weak beam is not satisfied!\n')
self.is_feasible['SCWB'] = False
elif connection_type == 'typical interior':
# This connection has two columns and two beams
Puc_top = top_column.demand['axial']
Puc_bot = bottom_column.demand['axial']
Ag_top = top_column.section['A']
Ag_bot = bottom_column.section['A']
h_top = top_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch
h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch
Zx_top = top_column.section['Zx']
Zx_bot = bottom_column.section['Zx']
# Generally the left and right beams have the identical beam sizes
db = (left_beam.section['d'] + right_beam.section['d']) / 2
# Compute the moment summation for column
self.moment['Mpc'] = Zx_top * (steel.Fy - Puc_top / Ag_top) * (h_top / (h_top - db / 2)) \
+ Zx_bot * (steel.Fy - Puc_bot / Ag_bot) * (h_bot / (h_bot - db / 2))
# Compute the moment summation for beam
self.moment['Muv'] = (self.shear_force['VRBS1']+self.shear_force['VRBS2']) \
* (self.left_RBS_dimension['a']+self.left_RBS_dimension['b']/2
+ bottom_column.section['d']/2)
self.moment['Mpb'] = self.moment['Mpr1'] + self.moment['Mpr2'] + self.moment['Muv']
# Perform the strong column weak beam check
if self.moment['Mpc'] / self.moment['Mpb'] >= STRONG_COLUMN_WEAK_BEAM_RATIO:
self.is_feasible['SCWB'] = True
else:
sys.stderr.write('Strong column weak beam is not satisfied!\n')
self.is_feasible['SCWB'] = False
else:
sys.stderr.write('Error: wrong type of connection specified!\nNo such keyword for connection exists!\n')
sys.exit(2)
def determine_doubler_plate(self, connection_type, steel, left_beam, right_beam, bottom_column, top_column):
"""
This method determines the panel zone thickness (doubler plates).
:return: a scalar which denotes the doubler plate thickness.
"""
if connection_type == 'top exterior':
# Connection has one left beam and one bottom column
h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch
db = left_beam.section['d']
tf = left_beam.section['tf']
self.shear_force['Vc'] = (self.moment['Mf1']+0) / (h_bot/2+0)
self.shear_force['Ru'] = (self.moment['Mf1']+0)/(db-tf) - self.shear_force['Vc']
elif connection_type == 'typical exterior':
# Connection has one left beam and two columns
h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch
h_top = top_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch
db = left_beam.section['d']
tf = left_beam.section['tf']
self.shear_force['Vc'] = (self.moment['Mf1']+0) / (h_bot/2+h_top/2)
self.shear_force['Ru'] = (self.moment['Mf1']+0)/(db-tf) - self.shear_force['Vc']
elif connection_type == 'top interior':
# Connection has two beams and one bottom column
h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch
# Actually left and right beams have the identical sizes
db = (left_beam.section['d'] + right_beam.section['d'])/2
tf = (left_beam.section['tf'] + right_beam.section['tf'])/2
self.shear_force['Vc'] = (self.moment['Mf1']+self.moment['Mf2']) / (h_bot/2)
self.shear_force['Ru'] = (self.moment['Mf1']+self.moment['Mf2'])/(db-tf) - self.shear_force['Vc']
elif connection_type == 'typical interior':
# Connection has two beams and two columns
h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch
h_top = top_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch
db = (left_beam.section['d'] + right_beam.section['d']) / 2
tf = (left_beam.section['tf'] + right_beam.section['tf']) / 2
self.shear_force['Vc'] = (self.moment['Mf1']+self.moment['Mf2']) / (h_bot/2+h_top/2)
self.shear_force['Ru'] = (self.moment['Mf1']+self.moment['Mf2'])/(db-tf) - self.shear_force['Vc']
else:
sys.stderr.write('Error: wrong type of connection specified!\nNo such keyword for connection exists!\n')
sys.exit(2)
# Compute the shear strength of the panel zone
phi = 1.0
dc = bottom_column.section['d']
tw = bottom_column.section['tw']
bcf = bottom_column.section['bf']
tcf = bottom_column.section['tf']
db = left_beam.section['d']
self.shear_force['Rn'] = 0.60 * steel.Fy * dc * tw * (1+(3*bcf*tcf**2)/(db*dc*tw))
# Compute the doubler plate thickness
if phi*self.shear_force['Rn'] >= self.shear_force['Ru']:
# Panel zone shear strength is sufficient ==> no need for doubler plate
self.doubler_plate_thickness = 0
else:
# Panel zone shear strength is not sufficient ==> need doubler plate
required_tp = (self.shear_force['Ru'] - 0.60*steel.Fy*(3*bcf*tcf**2)/db) / (0.60*steel.Fy*dc)
tp = 0.25 # Assumed doubler plate thickness
while tp < required_tp:
tp += 0.25 # Update the thickness at an increment of 0.25 until it reaches the requirement
self.doubler_plate_thickness = tp
def check_flag(self):
"""
This method is used to test whether the connection passed all checks.
:return: a boolean variable indicating the connection is feasible or note.
"""
# Loop over each checking result to see if it is feasible or not
self.flag = True
for key in self.is_feasible.keys():
if self.is_feasible[key] == False:
self.flag = False
return self.flag
|
19,193 | ec8d4384bd2a72de3f384f1b574a71f656c14e92 |
from xai.brain.wordbase.verbs._comply import _COMPLY
#calss header
class _COMPLYING(_COMPLY, ):
def __init__(self,):
_COMPLY.__init__(self)
self.name = "COMPLYING"
self.specie = 'verbs'
self.basic = "comply"
self.jsondata = {}
|
19,194 | 3193b93cb891405f6ba75d21e72c4881b29eeb7d | from django import forms
from .models import Snippet
class SnippetForm(forms.ModelForm):
class Meta:
model = Snippet
exclude = ('author',)
|
19,195 | 92a9b36163f23b05db15db9d0ffa17f7eefa315b | from karel.stanfordkarel import *
def main():
while front_is_clear():
zigzag()
zigzag()
def zigzag():
plus_45()
minus_45()
def plus_45():
while left_is_clear():
put_beeper()
move()
turn_left()
move()
turn_right()
#put_beeper()
def minus_45():
while right_is_clear():
put_beeper()
move()
turn_right()
move()
turn_left()
#put_beeper()
def turn_right():
for i in range(3):
turn_left() |
19,196 | dd492971615c3b8fae9a0bbebb7378de4c43809a | from setuptools import setup, find_packages
setup(name="tobiasPackage", packages=find_packages()) |
19,197 | 3a5f3c4757fd1596d537d1472b24b9a52c17dfcf | BATCH_SIZE = 32
GAN_BATCH_SIZE = 24
EVAL_BATCH_SIZE = 64
SEQ_LENGTH = 225
|
19,198 | 2a3862207275234f7a31715b10287d08f129cf97 | from spack import *
from spack.util.environment import is_system_path
import os,re
class Stitched(CMakePackage):
homepage = "https://github.com/cms-sw/stitched.git"
url = "https://github.com/cms-sw/stitched.git"
version('master', git='https://github.com/cms-sw/Stitched.git',
branch="master")
resource(name='cmaketools', git='https://github.com/gartung/cmaketools.git',
placement="cmaketools")
depends_on('md5-cms')
depends_on('boost')
depends_on('python')
depends_on('py-pybind11', type=('link', 'run', 'test'))
depends_on('tinyxml2@6.2.0')
depends_on('root ~opengl cxxstd=17')
depends_on('fmt+pic cxxstd=17')
depends_on('xrootd')
depends_on('clhep@2.4.1.3')
depends_on('intel-tbb-oneapi')
depends_on('cppunit', type=('link', 'test'))
depends_on('xerces-c')
depends_on('catch2', type=('link', 'test'))
depends_on('googletest', type=('link', 'test'))
depends_on('benchmark@1.4.1', type=('link', 'test'))
def cmake_args(self):
cxxstd = self.spec['root'].variants['cxxstd'].value
args = ['-DCMakeTools_DIR=%s/cmaketools' % self.stage.source_path]
args.append('-DCLHEP_ROOT_DIR=%s' % self.spec['clhep'].prefix)
args.append('-DBOOST_ROOT=%s' % self.spec['boost'].prefix)
args.append('-DTBB_ROOT_DIR=%s' % self.spec['intel-oneapi-tbb'].prefix.tbb.latest)
args.append('-D_TBB_COMPILER=gcc4.8')
args.append('-DTINYXML2_ROOT_DIR=%s' % self.spec['tinyxml2'].prefix)
args.append('-DCMSMD5ROOT=%s' % self.spec['md5-cms'].prefix)
args.append('-DCMAKE_CXX_STANDARD=%s'% cxxstd)
args.append('-DXROOTD_INCLUDE_DIR=%s/xrootd' % self.spec['xrootd'].prefix.include)
args.append('-DCATCH2_INCLUDE_DIRS=%s/catch2' % self.spec['catch2'].prefix.include)
args.append('-DBUILDTEST=BOOL:True')
return args
def setup_build_environment(self, env):
# This hack is made necessary by a header name collision between
# md5-cms and libmd md5.h
# But dependencies without CMake defined includes need to be added back
def prepend_include_path(dep_name):
include_path = self.spec[dep_name].prefix.include
if not is_system_path(include_path):
env.prepend_path('SPACK_INCLUDE_DIRS', include_path)
prepend_include_path('md5-cms')
|
19,199 | b4af04f8667b6992f3da0c17f130537a92e8fb51 | t = input()
for a0 in xrange(t):
u, v = map(int, raw_input().split())
t = u + v
print (t * (t + 1) / 2) + u + 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.