text stringlengths 38 1.54M |
|---|
def protein_database_parser(counter):
database = list()
file = open("Database.txt", "r")
temp = ""
read_flag = False
for line in file:
if counter == 0:
break
if not line:
continue
if line[0] == '>' and not read_flag:
read_flag = True
continue
if line[0] != '>' and read_flag:
temp += line[:-2]
continue
if line[0] == '>' and read_flag:
database.append(temp)
temp = ""
counter -= 1
file.close()
return database
|
## Copyright [2017-2018] UMR MISTEA INRA, UMR LEPSE INRA, ##
## UMR AGAP CIRAD, EPI Virtual Plants Inria ##
## Copyright [2015-2016] UMR AGAP CIRAD, EPI Virtual Plants Inria ##
## ##
## This file is part of the AutoWIG project. More information can be ##
## found at ##
## ##
## http://autowig.rtfd.io ##
## ##
## The Apache Software Foundation (ASF) licenses this file to you under ##
## the Apache License, Version 2.0 (the "License"); you may not use this ##
## file except in compliance with the License. You should have received ##
## a copy of the Apache License, Version 2.0 along with this file; see ##
## the file LICENSE. If not, you may obtain a copy of the License at ##
## ##
## http://www.apache.org/licenses/LICENSE-2.0 ##
## ##
## Unless required by applicable law or agreed to in writing, software ##
## distributed under the License is distributed on an "AS IS" BASIS, ##
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or ##
## mplied. See the License for the specific language governing ##
## permissions and limitations under the License. ##
import six
import os
from setuptools import setup, find_packages
packages = {"" : "src" + os.sep + "py"}
for package in find_packages("src" + os.sep + "py"):
packages[package] = "src" + os.sep + "py"
setup(packages = packages.keys(),
package_dir = {"" : "src" + os.sep + "py"},
name = 'autowig',
version = '1.0.0',
author = 'Pierre Fernique',
author_email = 'pfernique@gmail',
description = '',
long_description = '',
license = 'Apache License 2.0',
package_data = {package: [ "*.so", "*.dll"] for package in packages},
entry_points = {
'autowig.parser': [],
'autowig.controller': ['default = autowig.default_controller:default_controller'],
'autowig.generator': ['boost_python = autowig.boost_python_generator:boost_python_generator',
'boost_python_pattern = autowig.boost_python_generator:boost_python_pattern_generator',
'boost_python_internal = autowig.boost_python_generator:boost_python_internal_generator',
'boost_python_closure = autowig.boost_python_generator:boost_python_closure_generator',
'pybind11 = autowig.pybind11_generator:pybind11_generator',
'pybind11_pattern = autowig.pybind11_generator:pybind11_pattern_generator',
'pybind11_internal = autowig.pybind11_generator:pybind11_internal_generator',
'pybind11_closure = autowig.pybind11_generator:pybind11_closure_generator'],
'autowig.visitor': ['boost_python = autowig.boost_python_generator:boost_python_visitor',
'boost_python_closure = autowig.boost_python_generator:boost_python_closure_visitor',
'pybind11 = autowig.pybind11_generator:pybind11_visitor',
'pybind11_closure = autowig.pybind11_generator:pybind11_closure_visitor',
'all = autowig.asg:all_visitor',
'free = autowig.asg:free_visitor',
'public = autowig.asg:public_visitor',
'protected = autowig.asg:protected_visitor',
'private = autowig.asg:private_visitor'],
'autowig.feedback' : ['edit = autowig.edit_feedback:edit_feedback',
'comment = autowig.comment_feedback:comment_feedback'],
'autowig.boost_python_call_policy': ['default = autowig.boost_python_generator:boost_python_default_call_policy'],
'autowig.boost_python_export': ['custom = autowig.boost_python_generator:BoostPythonExportFileProxy',
'default = autowig.boost_python_generator:BoostPythonExportDefaultFileProxy'],
'autowig.boost_python_module': ['default = autowig.boost_python_generator:BoostPythonModuleFileProxy'],
'autowig.boost_python_decorator': ['default = autowig.boost_python_generator:BoostPythonDecoratorDefaultFileProxy'],
'autowig.pybind11_call_policy': ['default = autowig.pybind11_generator:pybind11_default_call_policy'],
'autowig.pybind11_export': ['default = autowig.pybind11_generator:PyBind11ExportFileProxy'],
'autowig.pybind11_module': ['default = autowig.pybind11_generator:PyBind11ModuleFileProxy'],
'autowig.pybind11_decorator': ['default = autowig.pybind11_generator:PyBind11DecoratorDefaultFileProxy'],
'autowig.node_rename': ['PEP8 = autowig._node_rename:pep8_node_rename'],
'autowig.documenter': ['doxygen2sphinx = autowig.doxygen2sphinx:doxygen2sphinx_documenter'],
'autowig.node_path' : ['scope = autowig._node_path:scope_node_path',
'hash = autowig._node_path:hash_node_path'],
'console_scripts': [],
},
zip_safe = False
)
|
while True:
num = int(input("Please write your number here: "))
if num > 1:
for i in range(2,num):
if (num % i) == 0:
print(f"\n{num} is not a prime number\n")
print(f"{i} times {num//i} is {num}")
break
else:
print(f"\n{num} is a prime number")
else:
print(f"\n{num} is not a prime number\n") |
#
# Client
# Connects SUB socket to tcp://localhost:5556
# Menerima hasil
#
import zmq
import pickle
# Socket to talk to server
context = zmq.Context()
socket = context.socket(zmq.SUB)
print "ok"
socket.connect("tcp://localhost:5556")
socket.connect("tcp://localhost:5557")
# Subscribe to zipcode, default is NYC, 10001
#zip_filter = sys.argv[1] if len(sys.argv) > 1 else "10001"
# Python 2 - ascii bytes to unicode str
#if isinstance(zip_filter, bytes):
# zip_filter = zip_filter.decode('ascii')
#socket.setsockopt_string(zmq.SUBSCRIBE, zip_filter)
socket.setsockopt(zmq.SUBSCRIBE, '')
terima = socket.recv()
hasil1 = pickle.loads(terima)
terima = socket.recv()
hasil2 = pickle.loads(terima)
hasil2 = dict((x, y) for x, y in hasil2)
for freq in hasil1:
if freq[0] in hasil2:
hasil2[freq[0]] = hasil2[freq[0]] + freq[1]
else:
hasil2[freq[0]] = freq[1]
hasil2 = sorted(hasil2.items(), key=lambda x: x[1], reverse=True)
i = 0
while i < 10:
print hasil2[i]
i = i + 1 |
#!python3
inputFile = open("input.txt", "r")
outputFile = open("output.txt", "w")
testCases = int(inputFile.readline())
for testCase in range(1, testCases + 1):
phoneStr = inputFile.readline()
phoneStr = phoneStr.rstrip()
charCount = [0] * 26
numCount = [0] * 10
for index in range(0, len(phoneStr)):
charCount[ord(phoneStr[index]) - ord('A')] += 1
numCount[0] = charCount[25]
charCount[4] -= charCount[25]
charCount[17] -= charCount[25]
charCount[14] -= charCount[25]
charCount[25] = 0
numCount[2] = charCount[22]
charCount[19] -= charCount[22]
charCount[14] -= charCount[22]
charCount[22] = 0
numCount[4] = charCount[20]
charCount[5] -= charCount[20]
charCount[14] -= charCount[20]
charCount[17] -= charCount[20]
charCount[20] = 0
numCount[6] = charCount[23]
charCount[18] -= charCount[23]
charCount[8] -= charCount[23]
charCount[23] = 0
numCount[8] = charCount[6]
charCount[4] -= charCount[6]
charCount[8] -= charCount[6]
charCount[7] -= charCount[6]
charCount[19] -= charCount[6]
charCount[6] = 0
numCount[1] = charCount[14]
charCount[13] -= charCount[14]
charCount[4] -= charCount[14]
charCount[14] = 0
numCount[3] = charCount[19]
charCount[7] -= charCount[19]
charCount[17] -= charCount[19]
charCount[4] -= (2 * charCount[19])
charCount[19] = 0
numCount[5] = charCount[5]
charCount[8] -= charCount[5]
charCount[21] -= charCount[5]
charCount[4] -= charCount[5]
charCount[5] = 0
numCount[7] = charCount[21]
charCount[18] -= charCount[21]
charCount[4] -= (2 * charCount[21])
charCount[13] -= charCount[21]
charCount[21] = 0
numCount[9] = charCount[4]
displayString = ""
for index in range(0, 10):
displayString += (chr(ord('0') + index) * numCount[index])
print("Case #", testCase, ": ", displayString, sep="", file=outputFile)
inputFile.close()
outputFile.close()
|
import sys
sys.stdout = sys.stderr
# path is in vhost file, not here like in docs at.. http://flask.pocoo.org/docs/0.10/deploying/mod_wsgi/#creating-a-wsgi-file
# sys.path.insert(0, '/var/www/html/python/flask21xx')
from pdb218 import app as application |
from plone.app.contentrules.api import edit_rule_assignment
from plone.app.contentrules.testing import PLONE_APP_CONTENTRULES_FUNCTIONAL_TESTING
from plone.app.testing import applyProfile
from plone.app.testing import login
from plone.app.testing import setRoles
from plone.app.testing import TEST_USER_ID
from plone.app.testing import TEST_USER_NAME
from plone.dexterity.utils import createContentInContainer
import unittest
class TestCascadingRule(unittest.TestCase):
layer = PLONE_APP_CONTENTRULES_FUNCTIONAL_TESTING
def setUp(self):
self.portal = self.layer["portal"]
self.request = self.layer["request"]
login(self.portal, TEST_USER_NAME)
setRoles(self.portal, TEST_USER_ID, ["Manager"])
self.portal.invokeFactory("Folder", "news")
self.portal.invokeFactory("Folder", "events")
applyProfile(self.portal, "plone.app.contentrules:testing")
edit_rule_assignment(self.portal, "test4", bubbles=1, enabled=1)
edit_rule_assignment(self.portal, "test5", bubbles=1, enabled=1)
def test_cascading_rule(self):
# check that test2 rule and test4 rule are executed
# test2 rule publishes the event in news folder
# test4 rule moves it in events folder when it is published
createContentInContainer(self.portal.news, "Event", id="my-event")
self.assertFalse("my-event" in self.portal.news)
self.assertTrue("my-event" in self.portal.events)
wtool = self.portal.portal_workflow
self.assertEqual(
wtool.getInfoFor(self.portal.events["my-event"], "review_state"),
"published",
)
|
# FUNCTIONS
'''
- *args **kwags
- returns extra arguments in tuples, and extra key word args in dictionaries
- extra arguments and keyword arguments
- positional arguments = args based on position in func call
- keyword args = keyword = value, when theres an arg in calling a func which has a var set to
it meaning that if specified it doesnt matter in which position in the calling
- if func() returns an str u can use
string methods on it like func().lower
'''
def func(required_arg, *args, **kwargs):
print(required_arg)
if args:
print(args)
if kwargs:
print(kwargs)
# define
# function name and in () the parameters
def example():
print('\nbasic function learning')
a = 2 + 6
print(a)
example()
# or call the function in terminal without calling it in code
# PARAMETERS for functions
def simple_addition(num1, num2):
answer = num1 + num2
print('\nnum1 is', num1, 'num2 is', num2)
print(answer)
# when calling function I specify the parameters
simple_addition(2, 5)
simple_addition(num2=3, num1=6) # explicit parameter specification
# DEFAULT FUN PARAMETERS
def simple(num1, num2):
pass
# DEFAULT VALUE IN FUNC = when parameter is specified in fun define u dont have to specify it when calling, it is always that
def simple(num1, num2=5):
print(num1, num2)
simple(3)
## calling functions in functions
def one_good_turn(n):
return n + 1
def deserves_another(n):
return one_good_turn(n) + 2
# more and % division if number is divisible ny 3
def cube(number):
return number * number * number
def by_three(number):
if number % 3 == 0:
return cube(number)
else:
return False
# abs type
def distance_from_zero(x):
if (type(x) == int or type(x) == float):
return abs(x)
else:
return 'Nope'
|
import mdtraj as md
import sys
import numpy as np
xtc = sys.argv[1]
pdb = sys.argv[2]
traj = md.load_xtc(xtc, top=pdb)
pairs = traj.top.select_pairs('all','all')
d = md.compute_distances(traj,pairs)
print(np.max(d))
|
"""
Python mapping for the MetricKit framework.
This module does not contain docstrings for the wrapped code, check Apple's
documentation for details on how to use these functions and classes.
"""
import sys
import Foundation
import objc
from . import _metadata, _MetricKit
sys.modules["MetricKit"] = mod = objc.ObjCLazyModule(
"MetricKit",
"com.apple.MetricKit",
objc.pathForFramework("/System/Library/Frameworks/MetricKit.framework"),
_metadata.__dict__,
None,
{
"__doc__": __doc__,
"objc": objc,
"__path__": __path__,
"__loader__": globals().get("__loader__", None),
},
(Foundation, _MetricKit),
)
del sys.modules["MetricKit._metadata"]
|
class Solution:
def reverse(self, x: int) -> int:
"""Purpose: Reverses the digits of a 32-bit signed integer.
Note: Assume we may only store integers within 32-bit signed
integer range [-2^31, 2^31 - 1].
Example: -123 -> -321
1234 -> 4321
120 -> 21
"""
sign = 1
if x < 0:
sign = -1
s = str(x)[1:]
else:
s = str(x)
res = int(s[::-1])
if not res > 2**31:
return res*sign
return 0
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 01:35:00 2020
@author: Seo
"""
import os
from mypyqtimports import *
class FileOpenWidget(QWidget):
def __init__(self, callingWidget):
super().__init__()
self.title = 'Open Database'
self.left = 10
self.top = 10
self.width = 640
self.height = 480
self.callingWidget = callingWidget
# self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.openFileNameDialog()
self.openFileNamesDialog()
self.saveFileDialog()
self.show()
def openFileNameDialog(self):
options = QFileDialog.Options()
# options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"Open Database", "","All Files (*);;Database Files (*.db)", options=options)
if fileName:
print(fileName)
self.callingWidget.changeDbPathEditText(fileName) # from the callingWidget, (should use a more generic name but whatever for now)
def openDirectoryDialog(self):
options = QFileDialog.Options()
dialog = QFileDialog()
dialog.setFileMode(QFileDialog.DirectoryOnly)
dirname = QFileDialog.getExistingDirectory(self,"Open Directory", os.getcwd(), options=options)
if dirname:
print(dirname)
self.callingWidget.changeExportDirEditText(dirname)
# don't need the rest, but gonna leave it here from the example
def openFileNamesDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
files, _ = QFileDialog.getOpenFileNames(self,"QFileDialog.getOpenFileNames()", "","All Files (*);;Python Files (*.py)", options=options)
if files:
print(files)
def saveFileDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getSaveFileName(self,"QFileDialog.getSaveFileName()","","All Files (*);;Text Files (*.txt)", options=options)
if fileName:
print(fileName) |
from flask_wtf import FlaskForm
from wtforms.validators import InputRequired, Length
from flask_login import UserMixin
from wtforms import StringField, PasswordField, BooleanField, SelectField, IntegerField, RadioField, DateField, TimeField, FloatField
from wtforms.widgets import TextArea
import helpers_constants
class LoginForm(FlaskForm):
username = StringField('Username', validators=[InputRequired(), Length(min=3, max=15)], render_kw={'autofocus': True})
password = PasswordField('Password', validators=[InputRequired(), Length(min=8, max=80)])
# remember = BooleanField('remember me')
class RegisterForm(FlaskForm):
# access = SelectField('Access Level', choices=[(0, 'guest'), (1, 'user'), (2, 'admin')], coerce=int)
access = SelectField('Access Level', choices=[(1, 'user'), (2, 'admin')], coerce=int)
username = StringField('First Name', validators=[InputRequired(), Length(min=3, max=15)], render_kw={'autofocus': True})
password = PasswordField('Password', validators=[InputRequired(), Length(min=8, max=80)])
class VocabForm(FlaskForm):
practice_type = RadioField('What do you want to do?', choices=[('practice', 'Practice'), ('quiz', 'Quiz')], default='practice')
prompt_type = RadioField('Prompt Type:', choices=[('word', 'Word'), ('def', 'Definition/Sentence')], default='word')
lesson_num = IntegerField('Lesson Number:', validators=[InputRequired()])
class WeeklyForm(FlaskForm):
weekof = DateField('For the week beginning on', validators=[InputRequired()], id='date')
scripture_ref = StringField('Reference', validators=[InputRequired()], id='scripture_ref')
scripture = StringField('Text', validators=[InputRequired()], widget=TextArea(), id='scripture')
discussion_ref = StringField('Reference', validators=[InputRequired()], id='discussion_ref')
discussion_question = StringField('Question', validators=[InputRequired()], widget=TextArea(), id='discussion_question')
mon_job = StringField('Monday', validators=[InputRequired()], id='mon_job')
tue_job = StringField('Tuesday', validators=[InputRequired()], id='tue_job')
wed_job = StringField('Wednesday', validators=[InputRequired()], id='wed_job')
thu_job = StringField('Thursday', validators=[InputRequired()], id='thu_job')
fri_job = StringField('Friday', validators=[InputRequired()], id='fri_job')
sat_job = StringField('Saturday', validators=[InputRequired()], id='sat_job')
cal_goal1 = StringField('Calvin', validators=[InputRequired()], id='calvin_goal1')
# cal_goal2 = StringField('Physical', validators=[InputRequired()], id='calvin_goal2')
# cal_goal3 = StringField('Social', validators=[InputRequired()], id='calvin_goal3')
# cal_goal4 = StringField('Intellectual', validators=[InputRequired()], id='calvin_goal4')
sam_goal1 = StringField('Samuel', validators=[InputRequired()], id='samuel_goal1')
# sam_goal2 = StringField('Physical', validators=[InputRequired()], id='samuel_goal2')
# sam_goal3 = StringField('Social', validators=[InputRequired()], id='samuel_goal3')
# sam_goal4 = StringField('Intellectual', validators=[InputRequired()], id='samuel_goal4')
kay_goal1 = StringField('Kay', validators=[InputRequired()], id='kay_goal1')
# kay_goal2 = StringField('Physical', validators=[InputRequired()], id='kay_goal2')
# kay_goal3 = StringField('Social', validators=[InputRequired()], id='kay_goal3')
# kay_goal4 = StringField('Intellectual', validators=[InputRequired()], id='kay_goal4')
seth_goal1 = StringField('Seth', validators=[InputRequired()], id='seth_goal1')
# seth_goal2 = StringField('Physical', validators=[InputRequired()], id='seth_goal2')
# seth_goal3 = StringField('Social', validators=[InputRequired()], id='seth_goal3')
# seth_goal4 = StringField('Intellectual', validators=[InputRequired()], id='seth_goal4')
cal_book = StringField('Calvin', validators=[InputRequired()], id='calvin_book')
sam_book = StringField('Samuel', validators=[InputRequired()], id='samuel_book')
kay_book = StringField('Kay', validators=[InputRequired()], id='kay_book')
seth_book = StringField('Seth', validators=[InputRequired()], id='seth_book')
class MathDailyForm(FlaskForm):
choose_kid = SelectField('Name', choices=[('choose', 'Choose...'), ('calvin', 'Calvin'), ('samuel', 'Samuel'), ('kay', 'Kay')], validators=[InputRequired()], id='choose_kid')
choose_book = SelectField('Name', choices=[('choose', 'Choose...'), ('Math_5_4', 'Math 5/4'), ('Math_6_5', 'Math 6/5'), ('Math_7_6', 'Math 7/6'), ('Math_8_7', 'Math 8/7'), ('Algebra_1_2', 'Algebra 1/2'), ('Algebra_1', 'Algebra 1'), ('Algebra_2', 'Algebra 2'), ('Advanced_math', 'Advanced Math'), ('Calculus', 'Calculus')], validators=[InputRequired()], id='choose_book')
test = BooleanField('Test')
start_chapter = IntegerField('Start Chapter', validators=[InputRequired()], id='start_chapter')
start_problem = StringField('First Problem', validators=[InputRequired()], id='start_problem')
end_chapter = IntegerField('End Chapter', validators=[InputRequired()], id='end_chapter')
end_problem = StringField('Last Problem', validators=[InputRequired()], id='end_problem')
date = DateField('Date', validators=[InputRequired()], id='date')
start_time = TimeField('Start Time', validators=[InputRequired()], id='start_time', render_kw={"placeholder": "hh:mm"})
end_time = TimeField('Stop Time', validators=[InputRequired()], id='end_time', render_kw={"placeholder": "hh:mm"})
class ScriptureDailyForm(FlaskForm):
choose_kid = SelectField('Name', choices=[('choose', 'Choose...'), ('Calvin', 'Calvin'), ('Samuel', 'Samuel'), ('Kay', 'Kay')], validators=[InputRequired()], id='choose_kid', render_kw={'onchange': 'focus_to_date()'})
date = DateField('Date', validators=[InputRequired()], id='date')
start_book = StringField('Start Book', validators=[InputRequired()], id='start_book')
start_chapter = IntegerField('Start Chapter', validators=[InputRequired()], id='start_chapter')
start_verse = IntegerField('Start Verse', validators=[InputRequired()], id='start_verse')
end_book = StringField('End Book', validators=[InputRequired()], id='end_book')
end_chapter = IntegerField('End Chapter', validators=[InputRequired()], id='end_chapter')
end_verse = IntegerField('End Verse', validators=[InputRequired()], id='end_verse')
comment = StringField('Comment', validators=[InputRequired()], widget=TextArea(), id='comment')
class NumberofExercisesForm(FlaskForm):
choose_book = SelectField('Book', choices=[('choose', 'Choose...'), ('Math_5_4', 'Math 5/4'), ('Math_6_5', 'Math 6/5'), ('Math_7_6', 'Math 7/6'), ('Math_8_7', 'Math 8/7'), ('Algebra_1_2', 'Algebra 1/2'), ('Algebra_1', 'Algebra 1'), ('Algebra_2', 'Algebra 2'), ('Advanced_math', 'Advanced Math'), ('Calculus', 'Calculus')], validators=[InputRequired()], id='choose_book')
chapter = IntegerField('Chapter', validators=[InputRequired()], id='chapter')
num_lesson_probs = StringField('Number of Lesson Problems', validators=[InputRequired()], id='num_lesson_probs')
num_mixed_probs = StringField('Number of Mixed Problems', validators=[InputRequired()], id='num_mixed_probs')
test = BooleanField('Test')
class CreditDebit(FlaskForm):
choose_kid = SelectField('Name', choices=[('choose', 'Choose...'), ('Calvin', 'Calvin'), ('Samuel', 'Samuel'), ('Kay', 'Kay')], validators=[InputRequired()], id='choose_kid')
credit_debit = SelectField('Transaction Type', choices=[('choose', 'Choose...'), ('deposit', 'Deposit'), ('withdrawal', 'Withdrawal')], validators=[InputRequired()], id='credit_debit')
amount = FloatField('Amount', validators=[InputRequired()], id='amount')
description = StringField('Description', validators=[InputRequired()], widget=TextArea(), id='description')
class DownloadFormsForm(FlaskForm):
email = RadioField('Email List?', choices=[('yes', 'Yes'), ('no', 'No')], id='email', default='no')
class User(UserMixin):
def __init__(self, username, access=helpers_constants.ACCESS['user']):
self.username = username
self.access = access
# def is_authenticated(self):
# return True
def is_active(self):
# Here you should write whatever the code is that checks the database if your user is active
# return self.active
return True
def is_anonymous(self):
return False
def get_id(self):
return self.username
def allowed(self, access_level):
return self.access >= access_level
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen('https://www.dal.ca/academics/programs.html')
bs = BeautifulSoup(html, "html.parser")
complete_data = bs.find_all('div', { "class" : "autoSearcher section"})
f = open("Sub_programs.xml","w",encoding = "utf-8")
f.write('<?xml version="1.0" encoding="utf-8"?>')
f.write('<pma_xml_export version="1.0" xmlns:pma="http://www.phpmyadmin.net/some_doc_url/">')
f.write('<database name="dmdw">')
hrefs = bs.find_all('a', href = True)
list = []
j = 0
for i in hrefs:
name = i.text
link = i['href']
list.append(name)
list.append(link)
for k in range(128,len(list)-1,2):
name = list[k]
link = str(list[k+1])
link = "https://dal.ca"+link
html = urlopen(link)
bs = BeautifulSoup(html, "html.parser")
faculty_name = bs.find_all('div', {"class": "aaPlainText parbase section"})
data = bs.find_all(['a'])
for i in data:
try:
intermediate = str(i.text)
if (intermediate.startswith('Faculty of')):
links = str(link)
if(link.__contains__("/undergraduate")):
program = "undergraduate"
f.write('<table name="Sub_programs"><column name="Id"></column>')
f.write('<column name="Program_type">' + program + '</column>')
f.write('<column name="Program_course">' + intermediate + '</column>')
f.write('</table>')
elif(links.__contains__("/graduate")):
program = "graduate"
f.write('<table name="Sub_programs"><column name="Id"></column>')
f.write('<column name="Program_type">' + program + '</column>')
f.write('<column name="Program_course">' + intermediate + '</column>')
f.write('</table>')
else:
continue
except :
continue
f.write('</database>')
f.write('</pma_xml_export>')
f.close()
f.write('</database>')
f.write('</pma_xml_export>')
f.close()
|
#!/usr/bin/env python
'''
a stupidly basic pipeline for testing
'''
import argparse
import os
import sys
import datetime
import subprocess
def log(sev, msg):
when = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
sys.stderr.write('{0}: {1} {2}\n'.format(when, sev, msg))
def run_command(line, command, config):
configured_command = command
for key in config:
configured_command = configured_command.replace('{{{}}}'.format(key), config[key]) # {KEY} -> keyvalue
log('INFO', 'running line {}: {} from template {}...'.format(line, configured_command, command))
result = os.system(configured_command)
if result != 0:
log('ERROR', 'running line {}: {}: FAILED: {}'.format(line, configured_command, result))
return False
#try:
# subprocess.check_output(configured_command, shell=True)
#except subprocess.CalledProcessError as e:
# log('ERROR', 'running line {}: {}: FAILED: returncode: {}. output: {}'.format(line, configured_command, e.returncode, e.output))
# return False
log('INFO', 'running line {}: {}: done'.format(line, configured_command))
return True
def run_pipeline(config_fh, commands, resume=0):
log('INFO', 'xpipe is starting')
# read config
config = {}
for line in config_fh:
if line.startswith('#') or len(line.strip()) == 0:
continue
fields = line.strip('\n').split('=')
config[fields[0]] = fields[1]
log('INFO', 'Loaded {} configuration settings'.format(len(config)))
# run commands
ok = True
line = 0
for line, command in enumerate(commands):
command = command.strip()
if command.startswith('#') or len(command) == 0:
log('INFO', command) # comment
continue
if line + 1 < resume:
log('INFO', 'skipping line {}: {}'.format(line + 1, command))
continue
if len(command) == 0:
continue
if not run_command(line + 1, command, config):
ok = False # problem
break
log('INFO', 'xpipe is finished')
if not ok:
log('ERROR', 'xpipe encountered an error on line {}'.format(line + 1))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Extremely simple pipeline')
parser.add_argument('--config', required=True, help='configuration options')
parser.add_argument('--resume', required=False, type=int, default=0, help='line number in file to start from')
args = parser.parse_args()
# now do each stage...
run_pipeline(config_fh=open(args.config, 'r'), commands=sys.stdin, resume=args.resume)
|
## -*- coding: UTF8 -*-
## manager.py
##
## Copyright (c) 2019 analyzeDFIR
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
#pylint: disable=R0902
from typing import Optional, Any, Callable, Union
from sqlalchemy import create_engine as sqlalchemy_create_engine, MetaData
from sqlalchemy.engine import Engine
from sqlalchemy.orm import sessionmaker, scoped_session, Session, Query
class DBManager:
"""Database connection manager. Handles database connection configuration
for both standard applications and web servers (using thread-local storage),
reading from and writing to a database (including transactions), etc. This
class essentially acts as a convenience wrapper around a SQLAlchemy Engine
and Session.
"""
def __init__(self,
conn_string: Optional[str] = None,
metadata: Optional[MetaData] = None,
session_factory: Optional[Callable[..., Session]] = None,
session: Optional[Union[session, scoped_session]] = None,
scoped: bool = False
) -> None:
self.conn_string = conn_string
self.metadata = metadata
self.session_factory = session_factory
self.session = session
self.scoped_sessions = scoped
self.engine = None
@property
def conn_string(self) -> Optional[str]:
"""Getter for conn_string."""
return self.__conn_string
@conn_string.setter
def conn_string(self, value: Optional[str]) -> None:
"""Setter for conn_string."""
self.__conn_string = value
@property
def engine(self) -> Optional[Engine]:
"""Getter for engine."""
return self.__engine
@engine.setter
def engine(self, value: Optional[Engine]) -> None:
"""Setter for engine."""
self.__engine = value
@property
def metadata(self) -> Optional[MetaData]:
"""Getter for metadata."""
return self.__metadata
@metadata.setter
def metadata(self, value: Optional[MetaData]) -> None:
"""Setter for metadata."""
self.__metadata = value
@property
def session_factory(self) -> Optional[Callable[..., Session]]:
"""Getter for session_factory."""
return self.__session_factory
@session_factory.setter
def session_factory(self, value: Optional[Callable[..., Session]]) -> None:
"""Setter for session_factory."""
self.__session_factory = value
@property
def scoped_sessions(self) -> bool:
"""Getter for scoped_sessions."""
return self.__scoped_sessions
@scoped_sessions.setter
def scoped_sessions(self, value: bool) -> bool:
"""Setter for scoped_sessions."""
self.__scoped_sessions = value
@property
def session(self) -> Optional[Union[Session, scoped_session]]:
"""Getter for session."""
return self.__session
@session.setter
def session(self, value: Optional[Union[Session, scoped_session]]) -> None:
"""Setter for session."""
self.__session = value
def create_engine(self,
conn_string: Optional[str] = None,
persist: bool = True
) -> Optional[Engine]:
"""
Args:
conn_string => database connection string
persist => whether to persist the database engine to self.engine
Returns:
New database connection (SQLAlchemy Engine) using either provided conn_string
or self.conn_string.
NOTE:
If both conn_string and self.conn_string are None then will return None.
Preconditions:
N/A
"""
if conn_string is not None:
self.conn_string = conn_string
if self.conn_string is not None:
engine = sqlalchemy_create_engine(self.conn_string)
if persist:
self.engine = engine
return engine
return None
def create_session(self, persist: bool = True) -> Union[Session, scoped_session]:
"""
Args:
persist => whether to persist the session
Returns:
Either new session object or pre-existing session.
NOTE:
If self.session_factory is None, this will throw an AttributeError.
Preconditions:
N/A
"""
if self.scoped_sessions:
return self.session_factory
if persist:
if self.session is None:
self.session = (self.session_factory)()
return self.session
return (self.session_factory)()
def close_session(self,
session: Optional[Union[Session, scoped_session]] = None
) -> None:
"""
Args:
session => session to close if not self.session
Procedure:
Closes either the provided session or the current
session (self.session).
Preconditions:
N/A
"""
if session is not None:
session.close()
elif self.scoped_sessions and self.session_factory is not None:
self.session_factory.remove()
elif self.session is not None:
self.session.close()
self.session = None
def bootstrap(self, engine: Optional[Engine] = None) -> None:
"""
Args:
engine => the connection engine to use
Procedure:
Use a database connection (SQLAlchemy Engine) to
bootstrap a database with the necessary tables,
indexes, and (materialized) views.
Preconditions:
N/A
"""
if engine is not None:
self.engine = engine
if self.engine is not None and self.metadata is not None:
self.metadata.create_all(self.engine)
def initialize(self,
conn_string: Optional[str] = None,
metadata: Optional[MetaData] = None,
bootstrap: bool = False,
scoped: bool = False,
create_session: bool = False
) -> 'DBManager':
"""
Args:
conn_string => database connection string
metadata => database metadata object
bootstrap => whether to bootstrap database with tables, indexes,
and views
scoped => whether to use scoped session objects
create_session => whether to create a persisted database session
Procedure:
Initialize a database connection using self.conn_string and perform
various setup tasks such as boostrapping the database with the
necessary tables, indexes and views, and setting up a
(scoped) session.
NOTE:
See http://docs.sqlalchemy.org/en/latest/orm/contextual.html for
more information about scoped sessions.
Preconditions:
N/A
"""
if conn_string is not None:
self.conn_string = conn_string
self.create_engine()
if metadata is not None:
self.metadata = metadata
if self.engine is not None:
if bootstrap:
self.bootstrap()
if scoped or self.scoped_sessions:
self.session_factory = scoped_session(
sessionmaker(bind=self.engine, autoflush=False)
)
self.scoped_sessions = True
else:
self.session_factory = sessionmaker(bind=self.engine, autoflush=False)
self.scoped_sessions = False
if create_session and not self.scoped_sessions:
self.create_session()
return self
def query(self, model: Any, **kwargs: Any) -> Optional[Query]:
"""
Args:
model => model of table to query
kwargs => fields to filter on
Returns:
SQLAlchemy Query object with field filters from kwargs applied.
If applying the filters fails, will return None instead
of raising error.
Preconditions:
N/A
"""
query = self.session.query(model)
for arg in kwargs:
query = query.filter(getattr(model, arg) == kwargs[arg])
return query
def add(self,
record: Any,
session: Optional[Union[session, scoped_session]] = None,
commit: bool = False
) -> 'DBManager':
"""
Args:
record => record to add to current session
session => session to add record to
commit => whether to commit and end the transaction block
Procedure:
Add record to either provided or current session and commit if specified
(wrapper around Session.add).
Preconditions:
N/A
"""
if session is None:
session = self.session
session.add(record)
if commit:
self.commit(session)
return self
def delete(self,
record: Any,
session: Optional[Union[session, scoped_session]] = None,
commit: bool = False
) -> 'DBManager':
"""
Args:
record => record to add to current session
session => session to add record to
commit => whether to commit and end the transaction block
Procedure:
Delete record using either provided session or current session
and commit if specified (wrapper around Session.delete).
Preconditions:
N/A
"""
if session is None:
session = self.session
session.delete(record)
if commit:
self.commit(session)
return self
def commit(self,
session: Optional[Union[session, scoped_session]] = None
) -> 'DBManager':
"""
Args:
session => session to add record to
Procedure:
Commit either provided or current session (wrapper around Session.commit).
Preconditions:
N/A
"""
if session is None:
session = self.session
session.commit()
return self
def rollback(self,
session: Optional[Union[session, scoped_session]] = None
) -> 'DBManager':
"""
Args:
session => session to add record to
Procedure:
Rollback either provided or current session (wrapper around Session.rollback).
Preconditions:
N/A
"""
if session is None:
session = self.session
session.rollback()
return self
|
###########################################
# Desc: Converts Imperial measurements into metric measurements
#
# Author: Zach Slaunwhite
###########################################
#Ready for marking
def main():
# CONSTANTS
TONSCONVERTED = 35840
STONESCONVERTED = 224
POUNDSCONVERTED = 16
KILOSCONVERTED = 35.274
# example of unit converter is meters to kilo meters
UNITCONVERTER = 1000
print("Imperial To Metric Conversion\n")
# Input
# prompt user for the input of imperial measurements
inputTons = float(input("Enter the number of tons: "))
inputStone = float(input("Enter the number of stone: "))
inputPounds = float(input("Enter the number of pounds: "))
inputOunces = float(input("Enter the number of ounces: "))
# Process
# convert all values into ounces to unify the variables
# formula given for ounces conversion is totalOunces = 35840 * inputTons + 224 * inputStone + 16 * inputPounds + inputOunces
totalOunces = TONSCONVERTED * inputTons + STONESCONVERTED * inputStone + POUNDSCONVERTED * inputPounds + inputOunces
# divide total ounces by 35.274 for kilos // totalKilos = totalOunces/35.274
totalKilos = totalOunces/KILOSCONVERTED
<<<<<<< HEAD
#take the remainder of the kilos to get the kilo amount
kiloRemainder = totalKilos %1000
#take the metric tons value
metricTons = int(totalKilos/1000)
#take the remainder of the kilograms, multiply by 1000 to get left over grams
gramsRemainder = (kiloRemainder%1) * 1000
#cast remainder to int
=======
# take the remainder of the kilos to get the kilo amount
kiloRemainder = totalKilos%UNITCONVERTER
# take the metric tons value
metricTons = int(totalKilos/UNITCONVERTER)
# take the remainder of the kilograms, multiply by 1000 to get left over grams
gramsRemainder = (kiloRemainder%1) * UNITCONVERTER
# cast remainder to int
>>>>>>> 093b776cb98442ab2ef8ff29c17fd20c6815576e
kiloRemainder = int(kiloRemainder)
# Output receipt
print("\nThe metric weight is {} metric tons, {} kilos, and {} grams".format(str (metricTons), str(kiloRemainder), str(round(gramsRemainder,1))))
#PROGRAM STARTS HERE. DO NOT CHANGE THIS CODE.
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import paho.mqtt.client as mqtt
import RPi.GPIO as GPIO
import json
# BCM GPIO编号
pins = [17,18,27,22,23,24,25,4]
def gpio_setup():
# 采用BCM编号
GPIO.setmode(GPIO.BCM)
# 设置所有GPIO为输出状态,且输出低电平
for pin in pins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, GPIO.LOW)
def gpio_destroy():
for pin in pins:
GPIO.output(pin, GPIO.LOW)
GPIO.setup(pin, GPIO.IN)
# 连接成功回调函数
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
# 连接完成之后订阅gpio主题
client.subscribe("gpio")
# 消息推送回调函数
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
# 获得负载中的pin 和 value
gpio = json.loads(str(msg.payload))
if gpio['pin'] in pins:
if gpio['value'] == 0:
GPIO.output(gpio['pin'], GPIO.LOW)
else:
GPIO.output(gpio['pin'], GPIO.HIGH)
if __name__ == '__main__':
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
gpio_setup()
try:
# 请根据实际情况改变MQTT代理服务器的IP地址
client.connect("192.168.1.110", 1883, 60)
client.loop_forever()
except KeyboardInterrupt:
client.disconnect()
gpio_destroy()
|
from odoo import api, fields, models, _
class ResUsers(models.Model):
_inherit = "res.users"
room_ids = fields.Many2many('medical.hospital.oprating.room', 'user_room_rel',
'room_id', 'user_id', "Allowed Room Columns")
physician_ids = fields.Many2many('medical.physician', 'physician_room_rel',
'physician_id', 'user_id', "Allowed Doctor Columns")
|
import pymysql
cinema_db = pymysql.connect("baulne.paulf.tk", "anton", "medvedev", "cinema") #Connection à la database cinema
cursor = cinema_db.cursor()
sql_request = "SELECT * FROM Acteurs"
cursor.execute(sql_request)
response = cursor.fetchall()
print(response) |
import matplotlib.pyplot as plt
from nessai.samplers.importancesampler import ImportanceNestedSampler as INS
import numpy as np
import pytest
@pytest.fixture(autouse=True)
def auto_close_figures():
"""Automatically close all figures after each test"""
yield
plt.close("all")
def test_plot_state(ins, history, n_it):
ins.iteration = n_it
ins.history = history
ins.checkpoint_iterations = [3, 4]
ins.importance = dict(
total=np.arange(-1, n_it),
evidence=np.arange(-1, n_it),
posterior=np.arange(-1, n_it),
)
ins.stopping_criterion = ["ratio", "ess"]
ins.tolerance = [0.0, 1000]
fig = INS.plot_state(ins)
assert fig is not None
def test_plot_extra_state(ins, history, n_it):
ins.iteration = n_it
ins.checkpoint_iterations = [3, 4]
ins.history = history
fig = INS.plot_extra_state(ins)
assert fig is not None
@pytest.mark.parametrize("enable_colours", [False, True])
def test_plot_trace(ins, samples, enable_colours):
ins.samples = samples
fig = INS.plot_trace(ins, enable_colours=enable_colours)
assert fig is not None
def test_plot_likelihood_levels(ins, samples):
ins.samples = samples
fig = INS.plot_likelihood_levels(ins)
assert fig is not None
def test_plot_level_cdf(ins, samples):
ins.live_points = samples
cdf = np.cumsum(samples["logW"])
fig = INS.plot_level_cdf(
ins,
cdf,
q=0.5,
threshold=samples[len(samples) // 2]["logL"],
)
assert fig is not None
|
from django.contrib import admin
from projects.models import Project,Person
# from .models import Person,Project
# Register your models here.
class ProjectsAdmin(admin.ModelAdmin):
"""
定制后台管理类
"""
#指定在修改(新增)中需要显示的字段
fields = ('name','leader','tester','programer','publish_app')
list_display = ['id','name','leader','tester']
admin.site.register(Project,ProjectsAdmin)
admin.site.register(Person) |
import pylibimport
pylibimport.init_finder(download_dir='./sub/import_dir/', install_dir='./sub/target_dir')
import custom_0_0_0
print(custom_0_0_0.run_custom())
import dynamicmethod_1_0_2
import dynamicmethod_1_0_3
print(dynamicmethod_1_0_2)
print(dynamicmethod_1_0_3)
assert dynamicmethod_1_0_2 is not dynamicmethod_1_0_3
|
from django.urls import path, include
from AppTwo import views
urlpatterns = [
path('users/', views.user),
] |
#! /usr/bin/env python3
# import csv
import MySQLdb
import sys
# from datetime import datetime,date
con = MySQLdb.connect(host='localhost',port=3306,db='my_suppliers',\
user='root',passwd='root')
c = con.cursor()
c.execute("""insert into Suppliers values (%s,%s,%s,%s,%s);""",['haha_input','fun','990','1510.0','2008/02/18'])
con.commit()
print('success to insert db')
c.execute("select * from Suppliers")
rows = c.fetchall()
for row in rows:
row_list = []
for col_index in range(len(row)):
row_list.append(str(row[col_index]))
print(row_list)
print('success to check for mysql_db')
|
import numpy as np
from ed import *
from scipy.sparse import spdiags
from scipy.sparse.linalg import eigsh
L = 4
U = 4
μ = 0
print("L is ", L)
print("U is ", U)
print("μ is ", μ)
H0 = H_free_apbc(L, μ=μ)
H02 = H_free(L, m=1)
HU = H_int(L, U=U)
E0, EU, v0 = energies(L, H02, HU)
print(
"\nExact ED energies are: \t\t",
E0,
"\t",
EU,
"sum is ",
E0 + EU
) |
from movements import app, actions
from movements.forms import ClassForms
from flask import render_template, request, url_for, redirect
import sqlite3
from config import *
import requests
url_api_crypto = "https://pro-api.coinmarketcap.com/v1/tools/price-conversion?amount={}&symbol={}&convert={}&CMC_PRO_API_KEY={}"
def getConversion(url):
response = requests.get(url)
if response.status_code==200:
data=response.json()
return data
DBFILE = app.config['DBFILE']
@app.route('/purchase', methods=['GET', 'POST'])
def transaccion():
msg = []
start=False
form = ClassForms()
fecha=actions.fecha()
hora=actions.hora()
try:
dicResponse=actions.totales()
except Exception as e:
print("¡¡ ERROR !!: Acceso a base de datos-DBFILE:{} {}". format(type(e).__name__,e))
msg.append("Error en acceso a base de datos. Consulte con el administrador.")
return render_template("Purchase.html", form=form, msg=msg,start=False)
coins=[]
for item,valor in dicResponse.items():
if valor> 0:
coins.append(item)
if not 'EUR' in coins:
coins.append('EUR')
form.moneda_from.choices=coins
if request.method == 'POST':
if form.validate():
if form.calcular.data ==True:
try:
result= getConversion(url_api_crypto.format(form.cantidad_from.data,form.moneda_from.data,form.moneda_to.data,API_KEY))
moneda_from=result["data"]["symbol"]
cantidad_from=result["data"]["amount"]
moneda_to=(form.moneda_to.data)
cantidad_to=result["data"]["quote"][moneda_to]["price"]
conversion=float(cantidad_from)/float(cantidad_to)
form.cantidad_to.data = cantidad_to
form.conversion.data = conversion
except Exception as e:
print("¡¡ ERROR !! de acceso al consultar la API:{} {}". format(type(e).__name__,e))
msg.append("Error en la consulta a la API. Consulte con el administrador.")
return render_template("Purchase.html", form=form, msg=msg,start=False)
return render_template("Purchase.html", form=form,start=True)
if form.submit.data:
try:
actions.consulta ('INSERT INTO movimientos (fecha, hora, moneda_from, moneda_to, cantidad_from, cantidad_to, conversion) VALUES (?,?, ?, ?, ?,?,?);',
(
fecha,
hora,
form.moneda_from.data,
form.moneda_to.data,
float(form.cantidad_from.data),
round(float(form.cantidad_to.data),8),
round(float(form.conversion.data),8)
)
)
except Exception as e:
print("¡¡ ERROR !! de accerso al consultar la base de datos-DBFILE:{} {}". format(type(e).__name__,e))
msg.append("Error en el acceso a base de datos. Porfavor consulte con el administrador.")
return render_template("Purchase.html", form=form, msg=msg,start=False)
return redirect(url_for('listadoMovimientos'))
else:
return render_template("Purchase.html", form=form)
return render_template("Purchase.html" , form=form, start=False ) |
import os
import shutil
import unittest
from chariot.storage import Storage
def resolve(path):
return os.path.abspath(path)
class TestStorage(unittest.TestCase):
def test_path(self):
root = os.path.join(os.path.dirname(__file__), "../../data")
storage = Storage(root)
correct_path = os.path.join(root, "raw")
self.assertEqual(resolve(storage.path("raw")),
resolve(correct_path))
def test_setup_data_dir(self):
root = os.path.join(os.path.dirname(__file__), "./tmp_root")
os.mkdir(root)
storage = Storage.setup_data_dir(root)
self.assertTrue(os.path.exists(storage.raw()))
self.assertTrue(os.path.exists(storage.processed()))
self.assertTrue(os.path.exists(storage.interim()))
self.assertTrue(os.path.exists(storage.external()))
shutil.rmtree(root)
def test_download(self):
url = "https://www.google.com/images/branding/googlelogo/1x/googlelogo_color_272x92dp.png"
root = os.path.join(os.path.dirname(__file__), "./data")
storage = Storage(root)
path = storage.download(url, "raw/image.png")
self.assertTrue(os.path.exists(path))
correct_path = os.path.join(root, "raw/image.png")
self.assertEqual(resolve(path), resolve(correct_path))
os.remove(path)
if __name__ == "__main__":
unittest.main()
|
# for (~하는 동안), range(a, b) - b는 불포함
# while (~하는 동안) - for와 용법이 다름
# for
for num in range(1, 10): # cf. [1, 2, 3][0:2]
print(num)
num_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
for num in num_list:
print(num)
# while
# while True: # 무한루프 - while의 특성이 False로 바뀔때까지 계속 출력
# print(1)
# 끝내려면 cmd + c
a = 1
while a < 10:
print(a)
a = a + 1
|
from sys import argv, exit
height = int(argv[1])
width = int(argv[2])
n = int(argv[3])
def seek_point(x, y):
p = complex(3.0 * x / width - 2.0, 2.0 * y / height - 1.0)
z = complex(.0, .0)
for i in range(n):
z = z * z + p
if abs(z) >= 2.0:
break
return i
l = []
for y in xrange(height):
for x in xrange(width):
l.append(seek_point(x, y))
with open('/tmp/mandelbrot-image-%s' % n, 'w') as f:
f.write(','.join(str(x) for x in l))
|
import requests
def subset(a, b, present=[]):
subset = all((k in a and a[k] == v) for k, v in b.items())
present = all(key in a for key in present)
return subset and present
def post_subset(url, json, expected_subset, present):
r = requests.post(url, json=json)
assert r.status_code == requests.codes.ok
response_json = r.json()
assert subset(response_json, expected_subset, present), response_json
return response_json
def get(url, code, expected=None, dont_crack_json=False):
r = requests.get(url)
assert r.status_code == code
if dont_crack_json:
return None
request_json = r.json()
if expected:
assert request_json == expected
return request_json
|
import random
from base64 import encodebytes
class CustomerProxyMiddleware(object):
def process_request(self, request, spider):
proxy = random.choice(PROXIES)
request.meta['proxy'] = "http://%s" % proxy['ip_port']
print("-----------Proxy-----------" + proxy['ip_port'])
PROXIES = [
{'ip_port': '124.88.67.17:843'},
{'ip_port': '202.171.253.72:80'},
{'ip_port': '78.89.180.167:80'},
{'ip_port': '211.143.45.216:3128'},
{'ip_port': '113.31.27.228:8080'},
{'ip_port': '119.6.136.122:843'},
{'ip_port': '61.162.223.41:9797'},
{'ip_port': '123.125.122.224:80'},
{'ip_port': '124.88.67.23:843'},
{'ip_port': '119.29.253.167:8888'},
{'ip_port': '111.23.4.139:80'},
{'ip_port': '124.88.67.23:81'},
{'ip_port': '123.125.122.224:80'},
] |
from random import randint
game_running = True
game_results = []
one_time = True
one_time2 = True
one_time_game = True
def calculate_monster_attack(attack_min, attack_max):
return randint(attack_min, attack_max)
def calculate_player_heal(heal_min, heal_max):
return randint(heal_min, heal_max)
def game_ends(winner_name):
print(f'{winner_name} won the game')
while game_running == True:
counter = 0
new_round = True
player = {'name': 'Abel', 'attack_min': 5, 'attack_max': 30, 'heal_min': 16, 'heal_max': 30, 'health': 100}
monster = {'name': 'Max', 'attack_min': 10, 'attack_max': 28, 'health': 200}
trap = {'tattack_min':0, 'tattack_max':20}
potion = {'min': 20, 'max': 50}
starter_money =200
def calculated_trap_attack():
return randint(trap['tattack_min'], trap['tattack_max'])
def potion_health_reg():
return randint(potion['min'],potion['max'])
def calculate_player_attack():
return randint(player['attack_min'], player['attack_max'])
print('---' * 7)
print('Please enter username:')
print(' ')
player['name'] = input()
print(' ')
print('Welcome in the another world.')
print(' ')
print('I hope you enjoy the game')
print('---' * 7)
print(player['name'] + ' has ' + str(player['health']) + ' health')
print(' ')
print(monster['name'] + ' has ' + str(monster['health']) + ' health')
while new_round == True:
counter = counter + 1
player_won = False
monster_won = False
print('''
Please select action:\r\n
1) Attack\r\n
2) Heal\r\n
3) Extra skill\r\n
4) Trick skill\r\n
5) Run\r\n
6) Show Results\r\n
7) User manual\r\n
8) Exit game
''')
player_choice = input()
if player_choice == '1':
monster['health'] = monster['health'] - calculate_player_attack()
if monster['health'] <= 0:
player_won = True
else:
player['health'] = player['health'] - calculate_monster_attack(monster['attack_min'], monster['attack_max'])
if player['health'] <= 0:
monster_won = True
elif player_choice == '2':
player['health'] = player['health'] + calculate_player_heal(player['heal_min'], player['heal_max'])
player['health'] = player['health'] - calculate_monster_attack(monster['attack_min'], monster['attack_max'])
if player['health'] <= 0:
monster_won = True
if player_choice == '2':
monster['health'] = monster['health'] + 5
elif player_choice == '3':
while one_time == True or one_time2 == True:
print('1) Heal up with 30 hp')
print('2) Decrease monster health with 50 hp ')
player_choice2 = input()
if player_choice2 == '1' and one_time == True:
player['health'] = player['health'] + 30
one_time = False
print("You used the extra heal skill.")
if one_time2 == True:
print(' ')
print("If you wanna use the other skill press 2.")
print(' ')
elif player_choice2 == '2'and one_time2 == True:
monster['health'] = monster['health'] - 50
one_time2 = False
print("You used the extra damage skill.")
if one_time == True:
print(' ')
print(" If you wanna use the other skill press 1.")
print(' ')
else:
print(' ')
print("Sorry you used all the extra skill")
print(' ')
elif player_choice == '4':
player['health'] = player['health'] - 14
monster['health'] = monster['health'] - calculate_player_attack()
elif player_choice == '5':
player['health'] = player['health'] - calculated_trap_attack()
elif player_choice== '6':
if starter_money >= 100:
player['helath'] = player['health'] - potion_health_reg()
starter_money -= 100
else:
print('You need to collect more money to buy this item')
elif player_choice == '7':
for player_stat in game_results:
print(' ')
print('Last match won by:', player_stat)
print(' ')
elif player_choice == '8':
print('If you type 1, your caracter hit the monster after that a monster hit you back.')
print(' ')
print('If you type 2, your caracter heal hear/himself, but the monster hit you and he get 5 more hp.')
print(' ')
print('If you type 3, you can choose from the extra skill list.')
print(' ')
print('If you type 4, your caracter start running from the monster, but you can get hit by the trap.')
print(' ')
print('If you type 5, you able to see a previous round stats(winner name, rest of health, and number of round.')
print(' ')
print('If you type 6, you can see the user manual.')
print(' ')
print('If you type 7, the game ends automatically.')
print(' ')
print('If anything not work correctly please send a message to me.')
print(' ')
elif player_choice == '9':
print(' ')
print('Thanks for your time.')
print(' ')
new_round = False
game_running = False
else:
print('Invalid Input')
if player_won == False and monster_won == False:
print(player['name'] + ' has ' + str(player['health']) + ' hp left')
print('----' * 7)
print(monster['name'] + ' has ' + str(monster['health']) + ' hp left')
elif player_won:
game_ends(player['name'])
round_result = {'name': player['name'], 'health': player['health'], 'rounds': counter}
game_results.append(round_result)
new_round = False
elif monster_won:
game_ends(monster['name'])
round_result = {'name': monster['name'], 'health': monster['health'], 'round' : counter}
game_results.append(round_result)
new_round = False
if player_won == True:
print(' ')
print('Congratulation you destroyed the MONSTER')
if monster_won == True:
print(' ')
print('Next time might be able to destroy the MONSTER')
|
#!/usr/bin/python
import errno
import git
from git.exc import InvalidGitRepositoryError
import os
from pathlib import Path
import platform
import shutil
import stat
import subprocess
from sys import argv
from twrpdtgen.misc import append_license
from twrpdtgen.misc import error
from twrpdtgen.misc import get_device_arch
from twrpdtgen.misc import make_twrp_fstab
from twrpdtgen.misc import open_file_and_read
from twrpdtgen.misc import printhelp
version_major = "1"
version_minor = "0"
version_quickfix = "0"
version = version_major + "." + version_minor + "." + version_quickfix
try:
twrpdtgen_repo = git.Repo(os.getcwd())
except InvalidGitRepositoryError:
error("Please clone the script with Git instead of downloading it as a zip")
exit()
last_commit = twrpdtgen_repo.head.object.hexsha
last_commit = last_commit[:7]
print("TWRP device tree generator")
print("Python edition")
print("Version " + version)
print("")
try:
recovery_image = argv[1]
except IndexError:
error("Recovery image not provided")
printhelp()
exit()
if not os.path.isfile(recovery_image):
error("Recovery image doesn't exist")
printhelp()
exit()
device_codename = input("Enter the device codename: ")
device_full_name = input("Enter the device full name: ")
device_manufacturer = input("Enter the device manufacturer: ")
device_release_year = input("Enter the device release year: ")
device_is_ab = input("Is the device A/B? (y/N): ")
if device_codename == "":
error("Device codename can't be empty")
exit()
if device_full_name == "":
error("Device full name can't be empty")
exit()
if device_manufacturer == "":
error("Device manufacturer can't be empty")
exit()
if device_release_year == "":
error("Device release year can't be empty")
exit()
device_manufacturer = device_manufacturer.lower()
if device_is_ab == "y" or device_is_ab == "Y":
device_is_ab = True
elif device_is_ab == "" or device_is_ab == "n" or device_is_ab == "N":
device_is_ab = False
print("")
# Define paths
current_path = Path(os.getcwd())
aik_path = current_path / "extract"
working_path = current_path / "working"
aik_images_path = aik_path / "split_img"
aik_ramdisk_path = aik_path / "ramdisk"
device_tree_path = working_path / device_manufacturer / device_codename
device_tree_prebuilt_path = device_tree_path / "prebuilt"
device_tree_recovery_root_path = device_tree_path / "recovery" / "root"
device_tree_files = ["Android.mk", "AndroidProducts.mk", "BoardConfig.mk", "device.mk", "omni_" + device_codename + ".mk", "vendorsetup.sh"]
print("Cloning AIK...")
def handleRemoveReadonly(func, path, exc):
os.chmod(path, stat.S_IWRITE)
func(path)
if os.path.isdir(aik_path):
shutil.rmtree(aik_path, ignore_errors=False, onerror=handleRemoveReadonly)
if platform.system() == "Linux":
git.Repo.clone_from("https://github.com/SebaUbuntu/AIK-Linux-mirror", aik_path)
elif platform.system() == "Windows":
git.Repo.clone_from("https://github.com/SebaUbuntu/AIK-Windows-mirror", aik_path)
print("Creating device tree folders...")
if os.path.isdir(device_tree_path):
shutil.rmtree(device_tree_path, ignore_errors=True)
os.makedirs(device_tree_path)
os.makedirs(device_tree_prebuilt_path)
os.makedirs(device_tree_recovery_root_path)
print("Appending license headers to device tree files...")
for file in device_tree_files:
append_license(device_tree_path / file, device_release_year, "#")
print("Extracting recovery image...")
new_recovery_image = aik_path / (device_codename + ".img")
shutil.copyfile(recovery_image, new_recovery_image)
if platform.system() == "Linux":
aik_process = subprocess.Popen([aik_path / "unpackimg.sh", "--nosudo", new_recovery_image],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
aik_stdout, aik_stderr = aik_process.communicate()
elif platform.system() == "Windows":
subprocess.call([aik_path / "unpackimg.bat", new_recovery_image])
print("Getting device infos...")
device_arch = get_device_arch(aik_ramdisk_path / "sbin" / "recovery")
device_have_kernel = os.path.isfile(aik_images_path / (device_codename + ".img" + "-" + "zImage"))
device_have_dt_image = os.path.isfile(aik_images_path / (device_codename + ".img" + "-" + "dt"))
device_have_dtb_image = os.path.isfile(aik_images_path / (device_codename + ".img" + "-" + "dtb"))
device_have_dtbo_image = os.path.isfile(aik_images_path / (device_codename + ".img" + "-" + "dtbo"))
device_base_address = open_file_and_read(aik_images_path / (device_codename + ".img" + "-" + "base")).split('\n', 1)[0]
device_board_name = open_file_and_read(aik_images_path / (device_codename + ".img" + "-" + "board")).split('\n', 1)[0]
device_cmdline = open_file_and_read(aik_images_path / (device_codename + ".img" + "-" + "cmdline")).split('\n', 1)[0]
device_hash_type = open_file_and_read(aik_images_path / (device_codename + ".img" + "-" + "hashtype")).split('\n', 1)[0]
device_header_version = open_file_and_read(aik_images_path / (device_codename + ".img" + "-" + "header_version")).split('\n', 1)[0]
device_image_type = open_file_and_read(aik_images_path / (device_codename + ".img" + "-" + "imgtype")).split('\n', 1)[0]
device_kernel_offset = open_file_and_read(aik_images_path / (device_codename + ".img" + "-" + "kernel_offset")).split('\n', 1)[0]
device_recovery_size = open_file_and_read(aik_images_path / (device_codename + ".img" + "-" + "origsize")).split('\n', 1)[0]
device_recovery_sp = open_file_and_read(aik_images_path / (device_codename + ".img" + "-" + "os_patch_level")).split('\n', 1)[0]
device_recovery_version = open_file_and_read(aik_images_path / (device_codename + ".img" + "-" + "os_version")).split('\n', 1)[0]
device_pagesize = open_file_and_read(aik_images_path / (device_codename + ".img" + "-" + "pagesize")).split('\n', 1)[0]
device_ramdisk_compression = open_file_and_read(aik_images_path / (device_codename + ".img" + "-" + "ramdiskcomp")).split('\n', 1)[0]
device_ramdisk_offset = open_file_and_read(aik_images_path / (device_codename + ".img" + "-" + "ramdisk_offset")).split('\n', 1)[0]
device_second_offset = open_file_and_read(aik_images_path / (device_codename + ".img" + "-" + "second_offset")).split('\n', 1)[0]
device_tags_offset = open_file_and_read(aik_images_path / (device_codename + ".img" + "-" + "tags_offset")).split('\n', 1)[0]
if device_arch == False:
error("Device architecture not supported")
exit()
device_have_64bit_arch = (device_arch == "arm64" or device_arch == "x86_64")
if device_have_kernel:
if device_arch == "arm":
device_kernel_name = "zImage"
elif device_arch == "arm64":
device_kernel_name = "Image.gz"
elif device_arch == "x86" or device_arch == "x86_64":
device_kernel_name = "bzImage"
else:
device_kernel_name = "zImage"
if (device_arch == "arm" or device_arch == "arm64") and (not device_have_dt_image and not device_have_dtb_image):
device_kernel_name += "-dtb"
shutil.copyfile(aik_images_path / (device_codename + ".img" + "-" + "zImage"), device_tree_prebuilt_path / device_kernel_name)
if device_have_dt_image:
shutil.copyfile(aik_images_path / (device_codename + ".img" + "-" + "dt"), device_tree_prebuilt_path / "dt.img")
if device_have_dtb_image:
shutil.copyfile(aik_images_path / (device_codename + ".img" + "-" + "dtb"), device_tree_prebuilt_path / "dtb.img")
if device_have_dtbo_image:
shutil.copyfile(aik_images_path / (device_codename + ".img" + "-" + "dtbo"), device_tree_prebuilt_path / "dtbo.img")
if os.path.isfile(aik_ramdisk_path / "etc" / "twrp.fstab"):
print("Found a TWRP fstab, copying it...")
shutil.copyfile(aik_ramdisk_path / "etc" / "twrp.fstab", device_tree_path / "recovery.fstab")
else:
print("Generating fstab...")
make_twrp_fstab(aik_ramdisk_path / "etc" / "recovery.fstab", device_tree_path / "recovery.fstab")
for file in os.listdir(aik_ramdisk_path):
if file.endswith(".rc") and file != "init.rc":
if file == "ueventd.rc":
shutil.copyfile(aik_ramdisk_path / file, device_tree_recovery_root_path / ("ueventd." + device_codename + ".rc"))
else:
shutil.copyfile(aik_ramdisk_path / file, device_tree_recovery_root_path / file)
print("Creating Android.mk...")
with open(device_tree_path / "Android.mk", "a") as file:
file.write("LOCAL_PATH := $(call my-dir)" + "\n")
file.write("\n")
file.write("ifeq ($(TARGET_DEVICE)," + device_codename + ")" + "\n")
file.write("include $(call all-subdir-makefiles,$(LOCAL_PATH))" + "\n")
file.write("endif" + "\n")
file.close()
print("Creating AndroidProducts.mk...")
with open(device_tree_path / "AndroidProducts.mk", "a") as file:
file.write("PRODUCT_MAKEFILES := \\" + "\n")
file.write(" $(LOCAL_DIR)/omni_" + device_codename + ".mk" + "\n")
file.close()
print("Creating BoardConfig.mk...")
with open(device_tree_path / "BoardConfig.mk", "a") as file:
file.write("DEVICE_PATH := device" + "/" + device_manufacturer + "/" + device_codename + "\n")
file.write("\n")
file.write("# For building with minimal manifest" + "\n")
file.write("ALLOW_MISSING_DEPENDENCIES := true" + "\n")
file.write("\n")
file.write("# Architecture" + "\n")
if device_arch == "arm64":
file.write("TARGET_ARCH := arm64" + "\n")
file.write("TARGET_ARCH_VARIANT := armv8-a" + "\n")
file.write("TARGET_CPU_ABI := arm64-v8a" + "\n")
file.write("TARGET_CPU_ABI2 := " + "\n")
file.write("TARGET_CPU_VARIANT := generic" + "\n")
file.write("\n")
file.write("TARGET_2ND_ARCH := arm" + "\n")
file.write("TARGET_2ND_ARCH_VARIANT := armv7-a-neon" + "\n")
file.write("TARGET_2ND_CPU_ABI := armeabi-v7a" + "\n")
file.write("TARGET_2ND_CPU_ABI2 := armeabi" + "\n")
file.write("TARGET_2ND_CPU_VARIANT := generic" + "\n")
file.write("TARGET_BOARD_SUFFIX := _64" + "\n")
file.write("TARGET_USES_64_BIT_BINDER := true" + "\n")
elif device_arch == "arm":
file.write("TARGET_ARCH := arm" + "\n")
file.write("TARGET_ARCH_VARIANT := armv7-a-neon" + "\n")
file.write("TARGET_CPU_ABI := armeabi-v7a" + "\n")
file.write("TARGET_CPU_ABI2 := armeabi" + "\n")
file.write("TARGET_CPU_VARIANT := generic" + "\n")
elif device_arch == "x86":
file.write("TARGET_ARCH := x86" + "\n")
file.write("TARGET_ARCH_VARIANT := generic" + "\n")
file.write("TARGET_CPU_ABI := x86" + "\n")
file.write("TARGET_CPU_ABI2 := armeabi-v7a" + "\n")
file.write("TARGET_CPU_ABI_LIST := x86,armeabi-v7a,armeabi" + "\n")
file.write("TARGET_CPU_ABI_LIST_32_BIT := x86,armeabi-v7a,armeabi" + "\n")
file.write("TARGET_CPU_VARIANT := generic" + "\n")
elif device_arch == "x86_64":
file.write("TARGET_ARCH := x86_64" + "\n")
file.write("TARGET_ARCH_VARIANT := x86_64" + "\n")
file.write("TARGET_CPU_ABI := x86_64" + "\n")
file.write("TARGET_CPU_ABI2 := " + "\n")
file.write("TARGET_CPU_VARIANT := generic" + "\n")
file.write("\n")
file.write("TARGET_2ND_ARCH := x86" + "\n")
file.write("TARGET_2ND_ARCH_VARIANT := x86" + "\n")
file.write("TARGET_2ND_CPU_ABI := x86" + "\n")
file.write("TARGET_2ND_CPU_VARIANT := generic" + "\n")
file.write("TARGET_BOARD_SUFFIX := _64" + "\n")
file.write("TARGET_USES_64_BIT_BINDER := true" + "\n")
file.write("\n")
file.write("# Assert" + "\n")
file.write("TARGET_OTA_ASSERT_DEVICE := " + device_codename + "\n")
file.write("\n")
if device_board_name != "":
file.write("# Bootloader" + "\n")
file.write("TARGET_BOOTLOADER_BOARD_NAME := " + device_board_name + "\n")
file.write("\n")
file.write("# File systems" + "\n")
file.write("BOARD_HAS_LARGE_FILESYSTEM := true" + "\n")
file.write("#BOARD_RECOVERYIMAGE_PARTITION_SIZE := " + device_recovery_size + " # This is the maximum known partition size, but it can be higher, so we just omit it" + "\n")
file.write("BOARD_SYSTEMIMAGE_PARTITION_TYPE := ext4" + "\n")
file.write("BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE := ext4" + "\n")
file.write("BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE := ext4" + "\n")
file.write("TARGET_USERIMAGES_USE_EXT4 := true" + "\n")
file.write("TARGET_USERIMAGES_USE_F2FS := true" + "\n")
file.write("TARGET_COPY_OUT_VENDOR := vendor" + "\n")
file.write("\n")
if device_is_ab:
file.write("# A/B" + "\n")
file.write("AB_OTA_UPDATER := true" + "\n")
file.write("TW_INCLUDE_REPACKTOOLS := true" + "\n")
file.write("# Kernel" + "\n")
file.write("BOARD_KERNEL_CMDLINE := " + device_cmdline + "\n")
if device_have_kernel:
file.write("TARGET_PREBUILT_KERNEL := $(DEVICE_PATH)/prebuilt/" + device_kernel_name + "\n")
if device_have_dt_image:
file.write("TARGET_PREBUILT_DT := $(DEVICE_PATH)/prebuilt/dt.img" + "\n")
if device_have_dtb_image:
file.write("TARGET_PREBUILT_DTB := $(DEVICE_PATH)/prebuilt/dtb.img" + "\n")
if device_have_dtbo_image:
file.write("BOARD_PREBUILT_DTBOIMAGE := $(DEVICE_PATH)/prebuilt/dtbo.img" + "\n")
file.write("BOARD_INCLUDE_RECOVERY_DTBO := true" + "\n")
if device_header_version != "0":
file.write("BOARD_BOOTIMG_HEADER_VERSION := " + device_header_version + "\n")
file.write("BOARD_KERNEL_BASE := " + device_base_address + "\n")
file.write("BOARD_KERNEL_PAGESIZE := " + device_pagesize + "\n")
file.write("BOARD_RAMDISK_OFFSET := " + device_ramdisk_offset + "\n")
file.write("BOARD_KERNEL_TAGS_OFFSET := " + device_tags_offset + "\n")
file.write("BOARD_FLASH_BLOCK_SIZE := " + str(int(device_pagesize) * 64) + " # (BOARD_KERNEL_PAGESIZE * 64)" + "\n")
file.write("BOARD_MKBOOTIMG_ARGS += --ramdisk_offset $(BOARD_RAMDISK_OFFSET)" + "\n")
file.write("BOARD_MKBOOTIMG_ARGS += --tags_offset $(BOARD_KERNEL_TAGS_OFFSET)" + "\n")
if device_have_dt_image:
file.write("BOARD_MKBOOTIMG_ARGS += --dt $(TARGET_PREBUILT_DT)" + "\n")
if device_have_dtb_image:
file.write("BOARD_MKBOOTIMG_ARGS += --dtb $(TARGET_PREBUILT_DTB)" + "\n")
if device_header_version != "0":
file.write("BOARD_MKBOOTIMG_ARGS += --header_version $(BOARD_BOOTIMG_HEADER_VERSION)" + "\n")
file.write("TARGET_KERNEL_ARCH := " + device_arch + "\n")
file.write("TARGET_KERNEL_HEADER_ARCH := " + device_arch + "\n")
file.write("TARGET_KERNEL_SOURCE := kernel/" + device_manufacturer + "/" + device_codename + "\n")
file.write("TARGET_KERNEL_CONFIG := " + device_codename + "_defconfig" + "\n")
file.write("\n")
if device_ramdisk_compression == "lzma":
file.write("# Ramdisk compression" + "\n")
file.write("LZMA_RAMDISK_TARGETS := recovery" + "\n")
file.write("\n")
file.write("# Platform" + "\n")
file.write("#TARGET_BOARD_PLATFORM := " + "\n")
file.write("#TARGET_BOARD_PLATFORM_GPU := " + "\n")
file.write("\n")
file.write("# Hack: prevent anti rollback" + "\n")
file.write("PLATFORM_SECURITY_PATCH := 2099-12-31" + "\n")
file.write("PLATFORM_VERSION := 16.1.0" + "\n")
file.write("\n")
file.write("# TWRP Configuration" + "\n")
file.write("TW_THEME := portrait_hdpi" + "\n")
file.write("TW_EXTRA_LANGUAGES := true" + "\n")
file.write("TW_SCREEN_BLANK_ON_BOOT := true" + "\n")
file.write('TW_INPUT_BLACKLIST := "hbtp_vm"' + "\n")
file.write("TW_USE_TOOLBOX := true" + "\n")
file.close()
print("Creating device.mk...")
with open(device_tree_path / "device.mk", "a") as file:
file.write("LOCAL_PATH := device" + "/" + device_manufacturer + "/" + device_codename + "\n")
if device_is_ab:
file.write("# A/B" + "\n")
file.write("AB_OTA_PARTITIONS += \\" + "\n")
file.write(" boot \\" + "\n")
file.write(" system \\" + "\n")
file.write(" vendor" + "\n")
file.write("\n")
file.write("AB_OTA_POSTINSTALL_CONFIG += \\" + "\n")
file.write(" RUN_POSTINSTALL_system=true \\" + "\n")
file.write(" POSTINSTALL_PATH_system=system/bin/otapreopt_script \\" + "\n")
file.write(" FILESYSTEM_TYPE_system=ext4 \\" + "\n")
file.write(" POSTINSTALL_OPTIONAL_system=true" + "\n")
file.write("\n")
file.write("# Boot control HAL" + "\n")
file.write("PRODUCT_PACKAGES += \\" + "\n")
file.write(" android.hardware.boot@1.0-impl \\" + "\n")
file.write(" android.hardware.boot@1.0-service" + "\n")
file.write("\n")
file.write("PRODUCT_PACKAGES += \\" + "\n")
file.write(" bootctrl.$(TARGET_BOARD_PLATFORM)" + "\n")
file.write("\n")
file.write("PRODUCT_STATIC_BOOT_CONTROL_HAL := \\" + "\n")
file.write(" bootctrl.$(TARGET_BOARD_PLATFORM) \\" + "\n")
file.write(" libgptutils \\" + "\n")
file.write(" libz \\" + "\n")
file.write(" libcutils" + "\n")
file.write("\n")
file.write("PRODUCT_PACKAGES += \\" + "\n")
file.write(" otapreopt_script \\" + "\n")
file.write(" cppreopts.sh \\" + "\n")
file.write(" update_engine \\" + "\n")
file.write(" update_verifier \\" + "\n")
file.write(" update_engine_sideload" + "\n")
file.close()
print("Creating omni_" + device_codename + ".mk...")
with open(device_tree_path / ("omni_" + device_codename + ".mk"), "a") as file:
file.write("# Inherit from those products. Most specific first." + "\n")
if device_have_64bit_arch:
file.write("$(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit.mk)" + "\n")
file.write("$(call inherit-product, $(SRC_TARGET_DIR)/product/embedded.mk)" + "\n")
file.write("$(call inherit-product, $(SRC_TARGET_DIR)/product/full_base_telephony.mk)" + "\n")
file.write("$(call inherit-product, $(SRC_TARGET_DIR)/product/languages_full.mk)" + "\n")
file.write("\n")
file.write("# Inherit from " + device_codename + " device" + "\n")
file.write("$(call inherit-product, device/" + device_manufacturer + "/" + device_codename + "/device.mk)" + "\n")
file.write("\n")
file.write("# Inherit some common Omni stuff." + "\n")
file.write("$(call inherit-product, vendor/omni/config/common.mk)" + "\n")
file.write("$(call inherit-product, vendor/omni/config/gsm.mk)" + "\n")
file.write("\n")
file.write("# Device identifier. This must come after all inclusions" + "\n")
file.write("PRODUCT_DEVICE := " + device_codename + "\n")
file.write("PRODUCT_NAME := omni_" + device_codename + "\n")
file.write("PRODUCT_BRAND := " + device_manufacturer + "\n")
file.write("PRODUCT_MODEL := " + device_full_name + "\n")
file.write("PRODUCT_MANUFACTURER := " + device_manufacturer + "\n")
file.write("PRODUCT_RELEASE_NAME := " + device_full_name + "\n")
file.close()
print("Creating vendorsetup.sh...")
with open(device_tree_path / "vendorsetup.sh", "a") as file:
file.write("add_lunch_combo omni_" + device_codename + "-userdebug" + "\n")
file.write("add_lunch_combo omni_" + device_codename + "-eng" + "\n")
file.close()
dt_repo = git.Repo.init(device_tree_path)
with dt_repo.config_writer() as git_config:
git_config.set_value('user', 'email', 'barezzisebastiano@gmail.com')
git_config.set_value('user', 'name', 'Sebastiano Barezzi')
dt_repo.index.add(["*"])
commit_message = device_codename + ": Initial TWRP device tree" + "\n"
commit_message += "Made with SebaUbuntu's TWRP device tree generator" + "\n"
commit_message += "Arch: " + device_arch + "\n"
commit_message += "Manufacturer: " + device_manufacturer + "\n"
commit_message += "Device full name: " + device_full_name + "\n"
commit_message += "Script version: " + version + "\n"
commit_message += "Last script commit: " + last_commit + "\n"
commit_message += "Signed-off-by: Sebastiano Barezzi <barezzisebastiano@gmail.com>"
dt_repo.index.commit(commit_message)
print("")
print("Done! You can find the device tree in " + str(device_tree_path))
print("Note: You should open BoardConfig.mk and fix TARGET_BOARD_PLATFORM, TARGET_BOARD_PLATFORM_GPU and BOARD_RECOVERYIMAGE_PARTITION_SIZE")
|
"""
使用 xpath 将猫眼 100 的全部电影信息全部提取出来。
目标网址:https://maoyan.com/board/4?offset=90
name(电影名)
star(主演)
releasetime(上映时间)
score(评分)
"""
|
#! /usr/bin/env python3
#
# Copyright (c) 2016, 2017, 2018 Forschungszentrum Juelich GmbH
# Author: Yann Leprince <y.leprince@fz-juelich.de>
#
# This software is made available under the MIT licence, see LICENCE.txt.
"""Downscaling is used to create a multi-resolution image pyramid.
The central component here is the :class:`Downscaler` base class. Use
:func:`get_downscaler` for instantiating a concrete downscaler object.
"""
import numpy as np
from neuroglancer_scripts.utils import ceil_div
from neuroglancer_scripts.data_types import get_chunk_dtype_transformer
__all__ = [
"get_downscaler",
"add_argparse_options",
"Downscaler",
"StridingDownscaler",
"AveragingDownscaler",
"MajorityDownscaler",
]
def get_downscaler(downscaling_method, info=None, options={}):
"""Create a downscaler object.
:param str downscaling_method: one of ``"average"``, ``"majority"``, or
``"stride"``
:param dict options: options passed to the downscaler as kwargs.
:returns: an instance of a sub-class of :class:`Downscaler`
:rtype: Downscaler
"""
if downscaling_method == "auto":
if info["type"] == "image":
return get_downscaler("average", info=None, options=options)
else: # info["type"] == "segmentation":
return get_downscaler("stride", info=None, options=options)
elif downscaling_method == "average":
outside_value = options.get("outside_value")
return AveragingDownscaler(outside_value)
elif downscaling_method == "majority":
return MajorityDownscaler()
elif downscaling_method == "stride":
return StridingDownscaler()
else:
raise NotImplementedError("invalid downscaling method {0}"
.format(downscaling_method))
def add_argparse_options(parser):
"""Add command-line options for downscaling.
:param argparse.ArgumentParser parser: an argument parser
The downscaling options can be obtained from command-line arguments with
:func:`add_argparse_options` and passed to :func:`get_downscaler`::
import argparse
parser = argparse.ArgumentParser()
add_argparse_options(parser)
args = parser.parse_args()
get_downscaler(args.downscaling_method, vars(args))
"""
group = parser.add_argument_group("Options for downscaling")
group.add_argument("--downscaling-method", default="auto",
choices=("auto", "average", "majority", "stride"),
help='The default is "auto", which chooses '
'"average" or "stride" depending on the "type" '
'attribute of the dataset (for "image" or '
'"segmentation", respectively). "average" is '
'recommended for grey-level images. "majority" is a '
'high-quality, but very slow method for segmentation '
'images. "stride" is fastest, but provides no '
'protection against aliasing artefacts.')
group.add_argument("--outside-value", type=float, default=None,
help='padding value used by the "average" downscaling '
"method for computing the voxels at the border. If "
"omitted, the volume is padded with its edge values.")
class Downscaler:
"""Base class for downscaling algorithms."""
def check_factors(self, downscaling_factors):
"""Test support for given downscaling factors.
Subclasses must override this method if they do not support any
combination of integer downscaling factors.
:param downscaling_factors: sequence of integer downscaling factors
(Dx, Dy, Dz)
:type downscaling_factors: :class:`tuple` of :class:`int`
:returns: whether the provided downscaling factors are supported
:rtype: bool
"""
return (
len(downscaling_factors) == 3
and all(isinstance(f, int) and 1 <= f for f in downscaling_factors)
)
def downscale(self, chunk, downscaling_factors):
"""Downscale a chunk according to the provided factors.
:param numpy.ndarray chunk: chunk with (C, Z, Y, X) indexing
:param downscaling_factors: sequence of integer downscaling factors
(Dx, Dy, Dz)
:type downscaling_factors: tuple
:returns: the downscaled chunk, with shape ``(C, ceil_div(Z, Dz),
ceil_div(Y, Dy), ceil_div(X, Dx))``
:rtype: numpy.ndarray
:raises NotImplementedError: if the downscaling factors are unsupported
"""
raise NotImplementedError
class StridingDownscaler(Downscaler):
"""Downscale using striding.
This is a fast, low-quality downscaler that provides no protection against
aliasing artefacts. It supports arbitrary downscaling factors.
"""
def downscale(self, chunk, downscaling_factors):
if not self.check_factors(downscaling_factors):
raise NotImplementedError
return chunk[:,
::downscaling_factors[2],
::downscaling_factors[1],
::downscaling_factors[0]]
class AveragingDownscaler(Downscaler):
"""Downscale by a factor of two in any given direction, with averaging.
This downscaler is suitable for grey-level images.
.. todo::
Use code from the neuroglancer module to support arbitrary factors.
"""
def __init__(self, outside_value=None):
if outside_value is None:
self.padding_mode = "edge"
self.pad_kwargs = {}
else:
self.padding_mode = "constant"
self.pad_kwargs = {"constant_values": outside_value}
def check_factors(self, downscaling_factors):
return (
len(downscaling_factors) == 3
and all(f in (1, 2) for f in downscaling_factors)
)
def downscale(self, chunk, downscaling_factors):
if not self.check_factors(downscaling_factors):
raise NotImplementedError
dtype = chunk.dtype
# Use a floating-point type for arithmetic
work_dtype = np.promote_types(chunk.dtype, np.float64)
chunk = chunk.astype(work_dtype, casting="safe")
half = work_dtype.type(0.5)
if downscaling_factors[2] == 2:
if chunk.shape[1] % 2 != 0:
chunk = np.pad(chunk, ((0, 0), (0, 1), (0, 0), (0, 0)),
self.padding_mode, **self.pad_kwargs)
chunk = half * (chunk[:, ::2, :, :] + chunk[:, 1::2, :, :])
if downscaling_factors[1] == 2:
if chunk.shape[2] % 2 != 0:
chunk = np.pad(chunk, ((0, 0), (0, 0), (0, 1), (0, 0)),
self.padding_mode, **self.pad_kwargs)
chunk = half * (chunk[:, :, ::2, :] + chunk[:, :, 1::2, :])
if downscaling_factors[0] == 2:
if chunk.shape[3] % 2 != 0:
chunk = np.pad(chunk, ((0, 0), (0, 0), (0, 0), (0, 1)),
self.padding_mode, **self.pad_kwargs)
chunk = half * (chunk[:, :, :, ::2] + chunk[:, :, :, 1::2])
dtype_converter = get_chunk_dtype_transformer(work_dtype, dtype,
warn=False)
return dtype_converter(chunk)
class MajorityDownscaler(Downscaler):
"""Downscaler using majority voting.
This downscaler is suitable for label images.
.. todo::
The majority downscaler could be *really* optimized (clever iteration
with nditer, Cython, countless for appropriate cases)
"""
def downscale(self, chunk, downscaling_factors):
if not self.check_factors(downscaling_factors):
raise NotImplementedError
new_chunk = np.empty(
(chunk.shape[0],
ceil_div(chunk.shape[1], downscaling_factors[2]),
ceil_div(chunk.shape[2], downscaling_factors[1]),
ceil_div(chunk.shape[3], downscaling_factors[0])),
dtype=chunk.dtype
)
for t, z, y, x in np.ndindex(*new_chunk.shape):
zd = z * downscaling_factors[2]
yd = y * downscaling_factors[1]
xd = x * downscaling_factors[0]
block = chunk[t,
zd:(zd + downscaling_factors[2]),
yd:(yd + downscaling_factors[1]),
xd:(xd + downscaling_factors[0])]
labels, counts = np.unique(block.flat, return_counts=True)
new_chunk[t, z, y, x] = labels[np.argmax(counts)]
return new_chunk
|
#!/usr/bin/python
import datetime
import getopt
import logging
import math
import pytz
import sys
from crate import client
from dateutil import parser
def main(argv):
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Setting up the connection to CrateDB (with command line args)
crate_host = None
crate_user = None
crate_password = None
start_date = None
end_date = None
delta = 24
tenant_name = 'EKZ'
cursor = None
connection = None
dry_run = False
try:
opts, args = getopt.getopt(argv, "h:u:p:t:s:e:d:r:",
["host=", "user=", "password=",
"tenant-name=", "start-date=",
"end-date=", "delta-time", "dry-run"])
except getopt.GetoptError:
logger.error("wrong parameters")
print('occupancy.py -h <cratedb_host> -u <cratedb_user>')
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--host"):
crate_host = arg
elif opt in ("-u", "--user"):
crate_user = arg
elif opt in ("-p", "--password"):
crate_password = arg
elif opt in ("-t", "--tenant-name"):
tenant_name = parser.parse(arg)
elif opt in ("-s", "--start-date"):
start_date = parser.parse(arg)
elif opt in ("-e", "--end-date"):
end_date = parser.parse(arg)
elif opt in ("-d", "--delta-time"):
delta = int(arg)
elif opt in ("-r", "--dry-run"):
dry_run = bool(arg)
if not crate_host:
logger.error("missing parameters")
print('occupancy.py -h <cratedb_host> -u <cratedb_user>')
sys.exit(-2)
try:
logger.info("connecting...")
schema = "mt" + tenant_name.lower()
connection = client.connect(crate_host, username=crate_user,
password=crate_password)
cursor = connection.cursor()
computeOccupancy(cursor, schema, start_date, end_date, delta, dry_run)
except Exception as e:
logger.error(str(e), exc_info=True)
sys.exit(-2)
finally:
if cursor:
cursor.close()
if connection:
connection.close()
sys.exit()
def computeOccupancy(cursor, schema, start_date, end_date, delta, dry_run):
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.info("computing occupancy...")
# Current time changed if end date specified
if end_date:
currentTime = end_date.replace(microsecond=0, second=0, minute=0,
tzinfo=pytz.UTC)
else:
currentTime = datetime.datetime.utcnow().replace(microsecond=0, second=0,
minute=0,
tzinfo=pytz.UTC)
# Start time changed if given a start date
if start_date:
previousTime = start_date.replace(microsecond=0, second=0, minute=0,
tzinfo=pytz.UTC)
else:
previousTime = (currentTime - datetime.timedelta(hours=delta))
# How many hours to compute data for
hoursDiff = int((currentTime - previousTime).total_seconds() / 60 / 60)
# Check if hours not 0 or negative
if hoursDiff < 1:
logger.error("Start date too close to current time")
print("Start date too close to current time")
sys.exit(-2)
# Setting up cursor and pulling data since the start time
limit = 1000
offset = 0
data = []
query = 'SELECT "status","time_index", "entity_id", "entity_type", ' \
'"fiware_servicepath", "name", "refdevice" FROM ' \
'"{}"."etparkingspot" WHERE "time_index">=? AND "time_index"<=? ' \
'AND status!=? AND status!=? ORDER BY time_index ASC LIMIT {} ' \
'OFFSET {}'
while offset >= 0:
stmt = query.format(schema, limit, offset)
cursor.execute(stmt,
(previousTime.isoformat(),
currentTime.isoformat(), 'None', 'unknown'))
current_size = len(data)
data += cursor.fetchall()
if len(data) == current_size:
offset = -1
else:
offset += limit
logger.info("loaded {} data".format(len(data)))
# List of Service Paths
servicePaths = list(dict.fromkeys(map(lambda a: a[4], data)))
# Computing occupancy data for all entities and servicepaths
occupancyData = []
for path in servicePaths:
pathData = filter(lambda a: a[4] == path, data)
entityIds = list(dict.fromkeys(map(lambda a: a[2], pathData)))
for entity in entityIds:
# Getting the last known status. Assumed free if none found
stmt = 'SELECT "status","time_index", "entity_id", ' \
'"entity_type", "fiware_servicepath", "name", "refdevice" ' \
'FROM "{}"."etparkingspot" WHERE "time_index"<? AND ' \
'"entity_id"=? AND "fiware_servicepath"=? AND status!=? ' \
'AND status!=? ORDER BY time_index DESC LIMIT 1'.format(schema)
cursor.execute(
stmt,
(previousTime.strftime('%s') + '000', entity, path, 'None',
'unknown'))
previousStateRow = cursor.fetchone()
if previousStateRow:
previousState = previousStateRow[0]
entity_type = previousStateRow[3]
name = previousStateRow[5]
refdevice = previousStateRow[6]
else:
previousState = 'free'
entity_type = None
name = None
refdevice = None
entityData = filter(lambda a: a[2] == entity, pathData)
occupancyData.extend(computeEntityOccupancy(entity, entity_type, name, path, refdevice, entityData, previousState, previousTime, hoursDiff))
logger.info("occupancy computed")
if dry_run:
logger.info("dry run mode, no data will be stored")
stmt = 'INSERT INTO "{}"."etparkingoccupancy" (occupancy, time_index, entity_id, entity_type, fiware_servicepath, name, refdevice) VALUES (?,?,?,?,?,?,?)'.format(schema)
for i in range(0, len(occupancyData), 1000):
chunck = occupancyData[i:i + 1000]
if not dry_run:
logger.info("sending batch of {} lenght".format(len(chunck)))
cursor.executemany(stmt, chunck)
if not dry_run:
logger.info("occupancy stored")
def computeEntityOccupancy(entity, entity_type, name, path, refdevice, entityData, previousState, previousTime, hoursDiff):
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
occupancyData = []
# For each of the hours since the start time given
for i in range(hoursDiff):
start_time = (previousTime + datetime.timedelta(
hours=i)).strftime('%s') + '000'
end_time = (previousTime + datetime.timedelta(
hours=(i + 1))).strftime('%s') + '000'
hourData = filter(
lambda d: d[1] >= int(start_time) and d[1] < int(end_time),
entityData)
occupiedTime = 0
if len(hourData) > 0:
for j in range((len(hourData) + 1)):
if j != len(hourData):
if hourData[j][3]:
entity_type = hourData[j][3]
if hourData[j][5]:
name = hourData[j][5]
if hourData[j][6]:
refdevice = hourData[j][6]
timePassed = 0
if j == 0:
timePassed = hourData[j][1] - long(start_time)
elif j == len(hourData):
timePassed = long(end_time) - hourData[j - 1][1]
else:
timePassed = hourData[j][1] - hourData[j - 1][1]
if previousState == 'occupied':
occupiedTime = occupiedTime + timePassed
if j != len(hourData) and hourData[j][0]:
previousState = hourData[j][0]
if len(hourData) == 0 and previousState == 'occupied':
occupiedTime = 3600000
if len(hourData) == 0 and previousState == 'free':
occupiedTime = 0
occupancy = round(math.ceil((occupiedTime / 3600000.0) * 100),2)
timezonedStartTime = datetime.datetime.fromtimestamp(
long(start_time) / 1000.0)
timezonedStartTime = timezonedStartTime.replace(
tzinfo=pytz.UTC).isoformat()
logger.debug("entity {} in path {} occupancy computed is {} "
"on time {}".format(entity, path, occupancy,
timezonedStartTime))
occupancyData.append((occupancy, timezonedStartTime, entity,
entity_type, path, name, refdevice))
return occupancyData
if __name__ == "__main__":
main(sys.argv[1:])
|
import argparse
import pickle as pkl
import taichi as ti
import math
import numpy as np
import sklearn.cluster as cluster
from engine.mpm_solver import MPMSolver
import sys, os
sys.path.append(os.path.join(os.path.dirname("__file__"), '..', '..'))
from hgns.util import cluster_scene
def parse_mpm():
parser = argparse.ArgumentParser(description='MPM sim arguments')
parser.add_argument('--dataset_name', type=str,
help='Path under data/ to save the simulation.')
parser.add_argument('--num_sims', type=int,
help='Number of groundtruth simulations to generate.')
parser.add_argument('--num_frames', type=int,
help='Number of groundtruth simulation frames to generate per trajectory.')
parser.add_argument('--step_size', type=float,
help='Simulation step size')
parser.add_argument('--young', type=int,
help='Young\'s coefficients')
parser.add_argument('--ncubes', type=int,
help='Number of boxes to be dropped')
parser.add_argument('--material', type=str,
help='Material of boxes to be dropped')
parser.add_argument('--gui_particle_radius', type=float,
help='particle radius for GUI visualization')
parser.add_argument('--gui_write_to_disk', action='store_true', default=False,
help='particle radius for GUI visualization')
parser.set_defaults(dataset_name='water',
num_sims=1000,
num_frames=500,
step_size=8e-3,
young=1e5,
ncubes=3,
material='water',
gui_particle_radius=1.5,
gui_write_to_disk=False)
return parser
def cube_overlap(lower_corners, cube_sizes, new_lower_corner, new_cube_size):
eps = 0.03
new_upper_corner = new_lower_corner + new_cube_size
for lc, cs in zip(lower_corners, cube_sizes):
overlap = True
uc = lc + cs
if lc[0] > new_upper_corner[0] + eps or new_lower_corner[0] > uc[0] + eps:
overlap = False
if lc[1] > new_upper_corner[1] + eps or new_lower_corner[1] > uc[1] + eps:
overlap = False
if overlap:
return True
return False
def process_particle_features(particles):
material = np.expand_dims(particles['material'], axis=1)
return np.concatenate((particles['position'],
particles['velocity'],
material), axis=1)
def simulate(mpm, gui, args):
particle_states = []
for frame in range(args.num_frames):
mpm.step(args.step_size)
colors = np.array([0x068587, 0xED553B, 0xEEEEF0, 0xFFFF00], dtype=np.uint32)
particles = mpm.particle_info()
particle_states.append(process_particle_features(particles))
gui.circles(particles['position'], radius=args.gui_particle_radius,
color=colors[particles['material']])
gui.show(f'{frame:06d}.png' if args.gui_write_to_disk else None)
particle_states = np.stack(particle_states, axis=0)
return particle_states
def rollout(trajectory, gui, radius=1.5):
colors = np.array([0x068587, 0xED553B, 0xEEEEF0, 0xFFFF00], dtype=np.uint32)
nframes = trajectory.shape[0]
for i in range(nframes):
curr_frame = trajectory[i]
gui.circles(curr_frame[:, :2], radius=radius,
color=colors[curr_frame[:, -1].astype(int)])
gui.show(None)
def main():
parser = parse_mpm()
args = parser.parse_args()
simulation_trajectories = []
if args.material == 'water':
material = MPMSolver.material_water
elif args.material == 'elastic':
material = MPMSolver.material_elastic
elif args.material == 'snow':
material = MPMSolver.material_snow
elif args.material == 'sand':
material = MPMSolver.material_sand
for sim_i in range(args.num_sims):
ti.init(arch=ti.cuda) # Try to run on GPU
gui = ti.GUI("Taichi MLS-MPM-99", res=512, background_color=0x112F41)
mpm = MPMSolver(res=(128, 128))
mpm.E = args.young
# init condition
n_samples = args.ncubes * 10
lower_corner_candidates = np.random.uniform(0.2, 0.8, size=(n_samples, 2))
cube_size_candidates = np.random.uniform(0.06, 0.15, size=(n_samples, 2))
lower_corners = []
cube_sizes = []
num_cubes = args.ncubes
for s in range(1, n_samples):
if not cube_overlap(lower_corner_candidates[:s], cube_size_candidates[:s],
lower_corner_candidates[s], cube_size_candidates[s]):
lower_corners.append(lower_corner_candidates[s])
cube_sizes.append(cube_size_candidates[s])
if len(lower_corners) == num_cubes:
break
for i in range(len(lower_corners)):
mpm.add_cube(lower_corner=lower_corners[i],
cube_size=cube_sizes[i],
material=material)
simulation_trajectories.append(simulate(mpm, gui, args))
#rollout(simulation_trajectories[0], gui, args.gui_particle_radius)
#cluster_scene(simulation_trajectories[-1][0])
if (sim_i+1) % 1 == 0:
print('Simulated {} trajectories.'.format(sim_i+1))
data_path = os.path.join('../data/', args.dataset_name)
if not os.path.exists(data_path):
os.makedirs(data_path)
if (sim_i+1) % 100 == 0 or sim_i+1 == args.num_sims:
print(len(simulation_trajectories))
with open('../data/{}/{}_{}.pkl'.format(args.dataset_name, args.material, sim_i // 100), 'wb') as f:
pkl.dump(simulation_trajectories, f)
simulation_trajectories = []
if __name__ == '__main__':
main()
|
import fs_wrapper
from case_utility import *
from qrd_shared.case import *
from test_case_base import TestCaseBase
import logging_wrapper
############################################
#author:
# liqiang@cienet.com.cn
#function:
# use the foreground camera to take a VGA picture
#precondition:
# there is a mounted sdcard
#steps:
# open the foreground camera
# set the VGA picture size
# take a picture
############################################
class test_suit_camera_case1(TestCaseBase):
def test_case_main(self,case_results):
sleep(3)
#check the sdcard .no sdcard, no continue
local_assert(True,is_external_storage_enable())
if not can_continue():
return
#switch to foreground camera.
camera.switch_2_foreground_camera()
#open the second level settings
click_imageview_by_id('second_level_indicator')
#click the other settings imageview to set picture size
click_view_by_container_id('second_level_indicator_bar','android.widget.ImageView','2')
#search VGA text
MAX_COUNT = 5
count = 0
while search_text('VGA',searchFlag=TEXT_MATCHES) ==False and count < MAX_COUNT:
count = count + 1
click_textview_by_desc(camera.get_value('increase_picture_size'))
if count >= MAX_COUNT:
log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], 'can not find the VGA label')
set_cannot_continue()
qsst_log_case_status(logging_wrapper.STATUS_FAILED, 'can not find the VGA label', logging_wrapper.SEVERITY_HIGH)
return
#close the other settings,back to the first level
click_imageview_by_id('back_to_first_level')
#click the shutter button
if not camera.take_picture():
log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], 'take picture failed')
set_cannot_continue()
qsst_log_case_status(logging_wrapper.STATUS_FAILED, 'take picture failed', logging_wrapper.SEVERITY_HIGH)
return
case_results.append((self.case_config_map[fs_wrapper.CASE_NAME_ATTR], can_continue()))
qsst_log_case_status(logging_wrapper.STATUS_SUCCESS, 'take picture success', logging_wrapper.SEVERITY_HIGH)
|
# Defines two classes, Point() and NonVerticalLine().
# An object for the second class is created by passing named arguments,
# point_1 and point_2, to its constructor.
# Such an object can be modified by changing one point or both points thanks to the
# function change_point_or_points().
# At any stage, the object maintains correct values for slope and intersect.
#
# Written by Xiaowen Huang
class Point:
def __init__(self, x=None, y=None):
if x == None and y == None:
self.x, self.y = 0, 0
elif x == None or y == None:
print('Need two coordinates, point not created.')
else:
self.x, self.y = x, y
class NonVerticalLine:
def __init__(self, point_1, point_2):
if not self._check_and_initialise(point_1, point_2):
print('Incorrect input, line not created.')
else:
self.p1, self.p2 = point_1, point_2
self.slope = (point_1.y - point_2.y)/(point_1.x - point_2.x)
self.intercept = point_1.y - self.slope * point_1.x
def _check_and_initialise(self, point_1, point_2):
if point_1.x and point_1.y and point_2.x and point_2.x:
if point_1.x != point_2.x:
return True
else:
return False
else:
return False
def check(self, point_1, point_2):
if point_1 is None and point_2 is not None:
if point_2.x == self.p1.x:
return False
elif point_2 is None and point_1 is not None:
if point_1.x == self.p2.x:
return False
elif point_1 and point_2:
if point_1.x == point_2.x:
return False
return True
def change_point_or_points(self, point_1=None, point_2=None):
if self.check(point_1, point_2):
if point_1:
self.p1 = point_1
if point_2:
self.p2 = point_2
self.slope = (self.p1.y - self.p2.y)/(self.p1.x - self.p2.x) + 0.0
self.intercept = self.p1.y - self.slope * self.p1.x
else:
print('Could not perform this change.')
|
import sys
sys.stdin = open('6109.txt')
t = int(input())
def reset(dir):
global n
if dir == 'up':
for x in range(n):
temp = []
for y in range(n):
if data[y][x] != 0 :
temp.append(data[y][x])
temp.append(0)
new_temp=[]
for index in range(len(temp)-1):
if temp[index] == temp[index+1]:
new_temp.append(temp[index] * 2)
temp[index+1] = 0
elif temp[index] != 0:
new_temp.append(temp[index])
for y in range(n):
if new_temp:
data[y][x] = new_temp.pop(0)
else:
data[y][x] = 0
elif dir == 'down':
for x in range(n):
temp = []
for y in range(n-1,-1,-1):
if data[y][x] != 0:
temp.append(data[y][x])
temp.append(0)
new_temp = []
for index in range(len(temp) - 1):
if temp[index] == temp[index + 1]:
new_temp.append(temp[index] * 2)
temp[index + 1] = 0
elif temp[index] != 0:
new_temp.append(temp[index])
for y in range(n-1,-1,-1):
if new_temp:
data[y][x] = new_temp.pop(0)
else:
data[y][x] = 0
elif dir == 'right':
for y in range(n):
temp = []
for x in range(n-1,-1,-1):
if data[y][x] != 0:
temp.append(data[y][x])
temp.append(0)
new_temp = []
for index in range(len(temp) - 1):
if temp[index] == temp[index + 1]:
new_temp.append(temp[index] * 2)
temp[index + 1] = 0
elif temp[index] != 0:
new_temp.append(temp[index])
for x in range(n-1,-1,-1):
if new_temp:
data[y][x] = new_temp.pop(0)
else:
data[y][x] = 0
else:
for y in range(n):
temp = []
for x in range(n):
if data[y][x] != 0:
temp.append(data[y][x])
temp.append(0)
new_temp = []
for index in range(len(temp) - 1):
if temp[index] == temp[index + 1]:
new_temp.append(temp[index] * 2)
temp[index + 1] = 0
elif temp[index] != 0:
new_temp.append(temp[index])
for x in range(n):
if new_temp:
data[y][x] = new_temp.pop(0)
else:
data[y][x] = 0
for tc in range(1,t+1):
n, s = input().split()
n = int(n)
data = []
for i in range(n):
data.append(list(map(int,input().split())))
reset(s)
print('#%d' %tc)
for y in range(n):
for x in range(n):
if x != n-1:
print('%d' %data[y][x], end = ' ')
else:
print('%d' %data[y][x])
|
from ResNet import ResNet
from DenseNet import DenseNet
from SENet import SENet
from keras.datasets import mnist
from keras.utils import to_categorical
import numpy as np
import argparse
import json
parser = argparse.ArgumentParser(description='Process the data and parameters')
# LOG, MODEL
parser.add_argument('--model_type', default="ResNet", help='the type of model [ResNet]')
parser.add_argument('--log_path', default="train.log", help='the path of log [train.log]')
parser.add_argument('--model_path', default="model.hdf5", help='the path of model [model.hdf5]')
parser.add_argument('--ans_path', default="answer.csv", help='the path of predict result [answer.csv]')
# TRAINING PARAMETERS
parser.add_argument('--learning_rate', type=float, default=0.1, help='the learning rate [0.1]')
parser.add_argument('--decay_rate', type=float, default=0.9, help='the decay rate of learning rate [0.9]')
parser.add_argument('--batch_size', type=int, default=32, help='the batch size [32]')
parser.add_argument('--nb_epoch', type=int, default=50, help='the number of training epochs [50]')
parser.add_argument('--momentum', type=float, default=0.9, help='the momentum rate [0.9]')
parser.add_argument('--val_split', type=float, default=0.2, help='the split of validation data [0.2]')
parser.add_argument('--model_data', default="jsons/resnet_34.json", help='the path of hyperparameters of the network [model.json]')
args = parser.parse_args()
with open(args.model_data, 'r') as file: model_data = json.load(file)
if args.model_type == "ResNet": net = ResNet(model_data)
elif args.model_type == "DenseNet": net = DenseNet(model_data)
elif args.model_type == "SENet": net = SENet(model_data)
(train_X, train_y), (test_X, test_y) = mnist.load_data()
train_X, test_X = np.expand_dims(train_X, 3), np.expand_dims(test_X, 3)
train_y = to_categorical(train_y)
net.train(
images=train_X,
labels=train_y,
nb_epoch=args.nb_epoch,
batch_size=args.batch_size,
lr=args.learning_rate,
decay=args.decay_rate,
momentum=args.momentum,
val_split=args.val_split,
log_path=args.log_path,
model_path=args.model_path)
prediction, tokens = net.test(
images=test_X,
batch_size=args.batch_size,
model_path=args.model_path)
cnt = 0
for idx, token in enumerate(tokens):
if token == test_y[idx]: cnt += 1
print("Accuracy: {}%".format(float(cnt) / len(tokens) * 100))
with open(args.ans_path, 'w') as file:
file.write("id,val\n")
for idx, token in enumerate(tokens):
file.write("{},{}\n".format(idx, token))
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 3 19:38:25 2018
@author: 王磊
"""
import numpy as np
import matplotlib.pyplot as plt
#t=np.arange(0.,5.,0.2)
#plt.plot(t,t,'r--',t,t**2,'bs',t,t**3,'g^')
#plt.show()
#
#
#plt.plot([1,2,3,4])
#plt.ylabel('some numbers')
#plt.show()
#
#plt.plot([1,2,3,4],[1,4,9,16],'ro')
#plt.axis([0,6,0,20])
#plt.show()
#
#plt.plot([1,2,3,4],[1,4,9,16],'r-',linewidth=2.0)
#plt.show()
#line = plt.plot([1,2,3,4,5],[1,2,3,2,1],[1,2,3,4,5],[1,2,3,2,1],)
##line.set_antialiased(False) # turn off antialising
#plt.step(line,
# color='r',
# linewidth=4.0)
|
from behave import *
import ast
from lib.kata_generator_templater import KataGeneratorTemplater
@given("kata name underscored")
def set_up_params_for_humanize_kata_name(context):
context.templater = KataGeneratorTemplater("underscored_kata_name", [])
@when("method 'humanize_kata_name' is called")
def execute_humanize_kata_name(context):
context.humanize_kata_name_result = context.templater.humanize_kata_name()
@then("it returns kata name with separated words")
def test_separation_of_words(context):
assert(context.humanize_kata_name_result == "Underscored kata name")
@then("capitalized first letter")
def test_capitalization_of_first_letter(context):
assert(context.humanize_kata_name_result[0] == "U")
@given("params = {params}")
def set_up_params_for_format_params_string_for_given_clause(context, params):
context.templater = KataGeneratorTemplater("test", ast.literal_eval(params))
@when("method 'format_params_string_for_given_clause' called")
def execute_format_params_string_for_given_clause(context):
context.format_params_for_given_clause_result = context.templater.format_params_string_for_given_clause()
@then("method 'format_params_string_for_given_clause' returns {result}")
def test_format_params_for_given_clause_result(context, result):
assert(context.format_params_for_given_clause_result == ast.literal_eval(result))
@given("array of params = {params}")
def set_up_params_for_format_params_string_for_given_decorator(context, params):
context.templater = KataGeneratorTemplater("test", ast.literal_eval(params))
@when("method 'format_params_string_for_given_decorator' called")
def execute_format_params_string_for_given_decorator(context):
context.format_params_for_given_decorator_result = context.templater.format_params_string_for_given_decorator()
@then("method 'format_params_string_for_given_decorator' returns {result}")
def test_format_params_for_given_decorator_result(context, result):
assert(context.format_params_for_given_decorator_result == ast.literal_eval(result))
@given("params like {params}")
def set_up_params_for_format_params_string_for_given_method_arguments(context, params):
context.templater = KataGeneratorTemplater("test", ast.literal_eval(params))
@when("method 'format_params_string_for_given_method_arguments' called")
def execute_format_params_string_for_given_method_arguments(context):
context.format_params_for_given_method_arguments_result = context.templater.format_params_string_for_given_method_arguments()
@then("method 'format_params_string_for_given_method_arguments' returns {result}")
def test_format_params_for_given_method_arguments_result(context, result):
assert(context.format_params_for_given_method_arguments_result == ast.literal_eval(result))
@given("set of params = {params}")
def set_up_params_for_format_params_string_for_given_method_definition(context, params):
context.templater = KataGeneratorTemplater("test", ast.literal_eval(params))
@when("method 'format_params_string_for_given_method_definition' called")
def execute_format_params_string_for_given_method_definition(context):
context.format_params_for_given_method_definition_result = context.templater.format_params_string_for_given_method_definition()
@then("method 'format_params_string_for_given_method_definition' returns {result}")
def test_format_params_for_given_method_definition_result(context, result):
assert(context.format_params_for_given_method_definition_result == ast.literal_eval(result))
@given("list of params = {params}")
def set_up_params_for_format_params_for_method_call(context, params):
context.templater = KataGeneratorTemplater("test", ast.literal_eval(params))
@when("method 'format_params_for_method_call' called")
def execute_format_params_for_method_call(context):
context.format_params_for_method_call_result = context.templater.format_params_for_method_call()
@then("method 'format_params_for_method_call' returns {result}")
def test_format_params_for_method_call_result(context, result):
assert(context.format_params_for_method_call_result == ast.literal_eval(result))
|
def replace_spaces(s):
characters = list(s)
for i, c in enumerate(characters):
if c == " ":
characters[i] = "%20"
return "".join(characters)
print replace_spaces("Mr John Smith ")
|
# Store another array and copy over non-zero elements to that
# array, zero element increment a counter- append that many
# zeroes to the end of the array
# O(n) space
# O(n) runtime
# [1, 10, 0, 2, 8, 3, 0, 0, 6, 4, 0, 5, 7, 0]
# [1, 10, 2, 8, 3, 6, ]
# index of last non_zero element =
# count = 3
# O(n) time, O(1) space
def moveZerosToEnd(arr):
non_zero_ind = 0
for i in range(len(arr)):
if arr[i] != 0:
arr[non_zero_ind] = arr[i]
non_zero_ind += 1
for i in range(non_zero_ind, len(arr)):
arr[i] = 0
return arr
print moveZerosToEnd([1, 10, 0, 2, 8, 3, 0, 0, 6, 4, 0, 5, 7, 0])
|
import unittest
from tests.printer_test import TestPrinter
if __name__ == '__main__':
unittest.main()
|
l1 =["Arvind","Hema","Bhavya","pooja","Nazreen"]
l2 = ["Bangalore","Tirupathi","Kolar","Bangalore","Mumbai"]
for l3 in zip(l1,l2):
print(l3) |
import numpy as np
arr=np.array([[1,2,3,4,7,8,9,10],[11,12,13,14,17,18,19,20]])
print(arr.reshape(4,4))
arr1=np.array([1,2,3,4,5,6,7,8,9,10,11,12])
newarr=arr1.reshape(2,3,2)
print("3D Array is:")
print(newarr)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .depot import MAX_FILE_SIZE
from .model import Object, Member
from sqlalchemy import (
Column, Integer, String, ForeignKey, DateTime, Boolean, sql, LargeBinary)
import sqlalchemy.orm
class Keylist(Object):
"""Ein Eintrag im Schlüsselbuch."""
subject = Column(String)
class Key(Object):
"""Ein Schlüssel."""
keylist_id = Column(
Integer, ForeignKey(Keylist.id, ondelete='cascade'), nullable=False)
keylist = sqlalchemy.orm.relation(
'Keylist', uselist=False, backref='keys', cascade='all')
serial = Column(String)
member_id = Column(Integer, ForeignKey(Member.id), nullable=True)
member = sqlalchemy.orm.relation('Member', uselist=False, backref='keys')
rent = Column(DateTime, nullable=True)
note = Column(String)
lost = Column(Boolean, default=sql.false())
class KeylistAttachment(Object):
"""Eine Anlage zum Schlüsselbuch."""
keylist_id = Column(
Integer, ForeignKey(Keylist.id, ondelete='cascade'), nullable=False)
keylist = sqlalchemy.orm.relation(
'Keylist', uselist=False, backref='attachments', cascade='all')
name = Column(String(100), default=u'')
mimetype = Column(String(30), default=u'')
size = Column(String(20), default=u'')
data = Column(LargeBinary(MAX_FILE_SIZE))
|
import argparse
import torch
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
batch_size = 64
train_dataset = datasets.MNIST(root='/data/', train=True, transform=transforms.ToTensor(), download=True)
test_dataset = datasets.MNIST(root='/data/', train=False, transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
class CNN(torch.nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = torch.nn.Conv2d(10,20, kernel_size=5)
# init a pulling leyer
self.mp = torch.nn.MaxPool2d(2)
# init fully connected layer
self.fc = torch.nn.Linear(320, 10)
def forward(self, x):
in_size = x.size(0)
x = F.relu(self.mp(self.conv1(x)))
x = F.relu(self.mp(self.conv2(x)))
x = x.view(in_size, -1)
x = self.fc(x)
return F.log_softmax(x)
model = CNN()
# we are using momentum for changing learning rate dynamically
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx %10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format
(epoch, batch_idx*len(data), len(train_loader.dataset),
100 * batch_idx / len(train_loader), loss.data))
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
data, target = Variable(data, volatile = True), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).data
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
for epoch in range(1, 3):
train(epoch)
test() |
# -*- coding:utf-8 -*-
# Author: LiuSha
import os
import croniter
import importlib.util as import_module
from uuid import uuid1
from datetime import datetime
from dateutil.tz import tzlocal, gettz
from rq import get_current_job, worker
from rq_scheduler.utils import to_unix
from rq_scheduler.scheduler import Scheduler as rScheduler
from ecs import json
from flask_rq2 import RQ
from flask_rq2.job import FlaskJob
__all__ = ["run", "get_results", "uuid", "task_list", "InvalidFnError", "InvalidConfigError", "BaseTask"]
rq = RQ()
class InvalidFnError(Exception):
def __init__(self, message=None):
super().__init__(message or "Invalid function name")
class InvalidConfigError(Exception):
def __init__(self, message=None):
super().__init__(message or "Invalid config")
def uuid():
return str(uuid1()).replace("-", "")
def absolute_path():
return os.path.join(os.path.dirname(os.path.abspath(__file__)))
def task_list():
return [
module_name[:-3]
for module_name in os.listdir(absolute_path())
if module_name != '__init__.py'
if module_name.endswith('.py')
]
def get_module(fn):
# invalidate_caches()
spec = import_module.spec_from_file_location(fn, f"{absolute_path()}/{fn}.py")
module = import_module.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def get_results(job_id, time=None):
"""
:param str job_id: job id
:param datetime time: when the task is not completed, time is None
:return dict: result
"""
job = rq.get_queue().fetch_job(job_id)
if job.created_at and job.ended_at:
time = f"{(job.ended_at - job.created_at).total_seconds():.2f}"
description = job.description.lstrip("ecs.tasks.") if isinstance(job.description, str) else job.description
return dict(result=job.result, status=job.status, time=time, description=description)
@rq.job(result_ttl=86400)
def run(mn, fn, *args, **kwargs):
"""
:param mn: module name of task
:param fn: function name of task
:param args: function args of task
:param kwargs: function kwargs of task
:return: None
"""
module = get_module(mn)
klass = getattr(module, mn.capitalize())
return klass.execute(fn, *args, **kwargs)
class BaseTask(object):
failed = "failed"
finished = "finished"
suspend = "suspend"
def __init__(self, job_id=None, cron=False):
self.run = run
self.uuid = uuid
self.cron = cron
self.task_log = None
if self.cron:
return
if job_id:
self.job = rq.get_queue().fetch_job(job_id)
else:
self.job = get_current_job()
def get_job_data(self):
description = self.job.description.lstrip("ecs.tasks.") \
if isinstance(self.job.description, str) else self.job.description
return {
"id": self.job.id,
"status": self.job.status,
"meta": {"ttl": self.job.ttl, "timeout": self.job.timeout, "key": self.job.key.decode()},
"time": None,
"result": None,
"description": description,
"created_at": self.job.created_at,
"started_at": self.job.started_at,
"ended_at": None
}
def update_task_log(self, result=None, status=None):
if status not in [self.finished, self.failed, self.suspend]:
status = self.finished
if not result:
result = status
if not self.cron and self.task_log:
self.task_log.ended_at = datetime.utcnow()
self.task_log.time = f"{(self.task_log.ended_at - self.job.created_at).total_seconds():.2f}"
self.task_log.status = status
if isinstance(result, (dict, list)):
self.task_log.result = json.dumps(result)
else:
self.task_log.result = result
self.task_log.save()
self.job.set_status(status)
return result
def cancel(job_id):
jobs = rq.get_scheduler().get_jobs()
for job in jobs:
if job._id == job_id:
# print('job:', job.__dict__)
rq.get_scheduler().cancel(job)
class Scheduler(rScheduler):
job_class = FlaskJob
def __init__(self, *args, **kwargs):
super(Scheduler, self).__init__(*args, **kwargs)
@classmethod
def get_next_scheduled_time(cls, cron_string):
"""Calculate the next scheduled time by creating a crontab object
with a cron string"""
itr = croniter.croniter(cron_string, datetime.now(tzlocal()))
return itr.get_next(datetime).astimezone(gettz("UTC"))
def schedule(self, scheduled_time, func, args=None, kwargs=None,
interval=None, repeat=None, result_ttl=None, ttl=None,
timeout=None, id=None, description=None, queue_name=None):
"""
Schedule a job to be periodically executed, at a certain interval.
"""
# Set result_ttl to -1 for periodic jobs, if result_ttl not specified
if interval is not None and result_ttl is None:
result_ttl = -1
job = self._create_job(func, args=args, kwargs=kwargs, commit=False,
result_ttl=result_ttl, ttl=ttl, id=id or uuid(),
description=description, queue_name=queue_name,
timeout=timeout)
scheduled_time = scheduled_time.replace(tzinfo=tzlocal())
if interval is not None:
job.meta['interval'] = int(interval)
if repeat is not None:
job.meta['repeat'] = int(repeat)
if repeat and interval is None:
raise ValueError("Can't repeat a job without interval argument")
job.save()
self.connection._zadd(self.scheduled_jobs_key,
to_unix(scheduled_time),
job.id)
return job
def cron(self, cron_string, func, args=None, kwargs=None, repeat=None,
queue_name=None, id=None, timeout=None, description=None):
"""
Schedule a cronjob
"""
scheduled_time = self.get_next_scheduled_time(cron_string)
# Set result_ttl to -1, as jobs scheduled via cron are periodic ones.
# Otherwise the job would expire after 500 sec.
job = self._create_job(func, args=args, kwargs=kwargs, commit=False,
result_ttl=-1, id=id or uuid(), queue_name=queue_name,
description=description, timeout=timeout)
job.meta['cron_string'] = cron_string
if repeat is not None:
job.meta['repeat'] = int(repeat)
job.save()
self.connection._zadd(self.scheduled_jobs_key,
to_unix(scheduled_time),
job.id)
return job
def enqueue_job(self, job):
"""
Move a scheduled job to a queue. In addition, it also does puts the job
back into the scheduler if needed.
"""
self.log.debug('Pushing {0} to {1}'.format(job.id, job.origin))
interval = job.meta.get('interval', None)
repeat = job.meta.get('repeat', None)
cron_string = job.meta.get('cron_string', None)
# If job is a repeated job, decrement counter
if repeat:
job.meta['repeat'] = int(repeat) - 1
queue = self.get_queue_for_job(job)
queue.enqueue_job(job)
self.connection.zrem(self.scheduled_jobs_key, job.id)
if interval:
# If this is a repeat job and counter has reached 0, don't repeat
if repeat is not None:
if job.meta['repeat'] == 0:
return
self.connection._zadd(self.scheduled_jobs_key,
to_unix(datetime.utcnow()) + int(interval),
job.id)
elif cron_string:
# If this is a repeat job and counter has reached 0, don't repeat
if repeat is not None:
if job.meta['repeat'] == 0:
return
self.connection._zadd(self.scheduled_jobs_key,
to_unix(self.get_next_scheduled_time(cron_string)),
job.id)
|
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2010, 2019. All Rights Reserved.
"""Function implementation"""
import logging
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
from fn_cb_protection.util.bit9_client import CbProtectClient
from resilient_lib import validate_fields
log = logging.getLogger(__name__)
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'bit9_file_instance_update"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.options = opts.get("fn_cb_protection", {})
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts.get("fn_cb_protection", {})
@function("bit9_file_instance_update")
def _bit9_file_instance_update_function(self, event, *args, **kwargs):
"""Function: Update the approval state of a file instance"""
try:
validate_fields(["bit9_file_instance_id", "bit9_file_instance_localstate"], kwargs)
# Get the function parameters:
bit9_file_instance_id = kwargs.get("bit9_file_instance_id") # number
bit9_file_instance_localstate = kwargs.get("bit9_file_instance_localstate") # number
log.info(u"bit9_file_instance_id: %s", bit9_file_instance_id)
log.info(u"bit9_file_instance_localstate: %s", bit9_file_instance_localstate)
payload = {
"localState": bit9_file_instance_localstate
}
bit9_client = CbProtectClient(self.options)
results = bit9_client.update_file_instance(bit9_file_instance_id, payload)
log.info("Done")
log.debug(results)
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception as err:
log.error(err)
yield FunctionError(err)
|
# howdy/views.py
from django.shortcuts import render
from django.views.generic import TemplateView
from django.http import HttpResponseRedirect
from .forms import NameForm
# Create your views here.
class HomePageView(TemplateView):
def get(self, request, **kwargs):
form = NameForm()
return render(request, 'index.html', {'form':form})
#return render(request, 'index.html', {'form': form})
class AboutPageView(TemplateView):
template_name = "about.html"
class NamePageView(TemplateView):
template_name = "yourname.html"
def get_name(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = NameForm(request.POST)
# check whether it's valid:
if form.is_valid():
# process the data in form.cleaned_data as required
# ...
firstname = form.cleaned_data['firstname']
surname = form.cleaned_data['surname']
birthdate = form.cleaned_data['birthdate']
# redirect to a new URL:
#return HttpResponseRedirect('yourname.html')
# if a GET (or any other method) we'll create a blank form
else:
form = NameForm()
#your_name = form.cleaned_data['your_name']
return render(request, 'yourname.html', {'firstname':firstname, 'surname':surname, 'birthdate':birthdate})
|
#!/usr/bin/env python3
import argparse
import logging
import imageio
from tqdm import tqdm
import pickle
parser = argparse.ArgumentParser()
parser.add_argument('--video', '-v', type=argparse.FileType('rb'))
parser.add_argument('--output', '-o', type=argparse.FileType('wb'))
args = parser.parse_args()
logging.basicConfig(format='%(levelname)s %(message)s')
logger = logging.getLogger('keyframe_every_n')
logger.setLevel(10)
# open the video file
video = imageio.get_reader(args.video, 'ffmpeg')
def grab_frame(number):
rgb = video.get_data(number)
return rgb
keyframes = []
num_frames = video.get_length()
target_keyframes = 24
step = int(num_frames / target_keyframes)
duration_s = step / 20.0
for keyframe_number in tqdm(range(0, num_frames, step)):
keyframe = grab_frame(keyframe_number)
keyframes.append((keyframe_number, keyframe_number, duration_s, keyframe))
if len(keyframes) > 24:
keyframes = keyframes[:24]
# sort keyframes by their start time
keyframes = sorted(keyframes, key=lambda k: k[0])
pickle.dump(keyframes, args.output)
|
import pyshark
import argparse
import itertools
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
fig = plt.figure()
ax = fig.add_subplot(111)
request_time = 0
response_time = 0
file_name = ""
timestamp_tls_phone_to_server = []
timestamp_tls_server_to_phone = []
timestamp_tcp_phone_to_server = []
timestamp_tcp_phone_to_phone = []
call_control = []
floor_control = []
access_time = []
parser = argparse.ArgumentParser(description='Process the wireshark files')
parser.add_argument("pcap_name", help="Type the pcap file name to parse the data", type=str)
args = parser.parse_args()
pcap_file_name = args.pcap_name
current_path = Path.cwd()
file_name = str(current_path) + "/" + pcap_file_name
print(f"Path to pcap file : {file_name}")
def GenericFilter(name_file, filter):
print("TLS filter : ", filter)
capture = pyshark.FileCapture(name_file, display_filter=filter)
#print(len(capture)) #Check why length is not working
packet_and_timestamp = []
for packet in capture:
packet_and_timestamp.append((packet.number, float(packet.frame_info.time_epoch)))
return(packet_and_timestamp)
def FloorControl(name_file, filter, data_to_get):
packet_and_timestamp = []
print(f"Data to get : {data_to_get}")
for pkt in data_to_get:
pkt_number = pkt[0]
final_filter = filter + pkt_number
print(final_filter)
capture = pyshark.FileCapture(name_file, display_filter=final_filter)
packet_length = len([pkt for pkt in capture])
if packet_length > 0:
packet_and_timestamp.append((capture[0].number, float(capture[0].frame_info.time_epoch)))
else:
print("No Packets found. Packets are missing for floor control")
capture.close()
if capture.eventloop.run_until_complete:
capture.close_async()
print(packet_and_timestamp)
return packet_and_timestamp
phone_src_ip = "12.1.1.3" #input("Provide the IP of the source phone : ")
serverip_ip = "169.55.65.207" #input("Provide the IP of the server : ")
phone_dst_ip = "169.45.211.199" #input("Provide the IP of the destination phone : ")
#phone_src_ip = "12.1.1.2" #input("Provide the IP of the source phone : ")
#serverip_ip = "169.45.211.199" #input("Provide the IP of the server : ")
#phone_dst_ip = "169.55.65.207" #input("Provide the IP of the destination phone : ")
call_control_from_phone = "ip.src=="+ phone_src_ip +" and ip.dst=="+ serverip_ip + " and tls.record.version==0x0303 and frame.len >= 1000 and frame.len<=1010"
call_control_from_server = "ip.src=="+ serverip_ip +" and ip.dst=="+ phone_src_ip + " and tls.record.version==0x0303 and frame.len >= 445 and frame.len<=500"
floor_control_to_server = "ip.src==" + phone_src_ip +" and ip.dst=="+ serverip_ip + " and tcp and not tls and frame.len >= 100"
floor_control_phone_to_phone = "ip.src==" + phone_src_ip + " and ip.dst==" + phone_dst_ip + " and frame.number>= "
timestamp_tls_phone_to_server = GenericFilter(file_name, call_control_from_phone)
print(f" Packet number and timestamp for call control from phone with tls : {timestamp_tls_phone_to_server}")
timestamp_tls_server_to_phone = GenericFilter(file_name, call_control_from_server)
print(f" Packet number and timestamp for call control from server with tls : {timestamp_tls_server_to_phone}")
timestamp_tcp_phone_to_server = GenericFilter(file_name, floor_control_to_server)
print(f" Packet number and timestamp for floor control from phone with TCP data : {timestamp_tcp_phone_to_server}")
timestamp_tcp_phone_to_phone = FloorControl(file_name, floor_control_phone_to_phone, timestamp_tcp_phone_to_server)
print(f" Packet number and timestamp for floor control from Phone to Phone with UDP data : {timestamp_tcp_phone_to_phone}")
if(len(timestamp_tls_phone_to_server)==len(timestamp_tls_server_to_phone)==len(timestamp_tcp_phone_to_server)==len(timestamp_tcp_phone_to_phone)):
for (a,b,c,d) in zip(timestamp_tls_phone_to_server, timestamp_tls_server_to_phone, timestamp_tcp_phone_to_server, timestamp_tcp_phone_to_phone):
call_control.append(float(b[1])-float(a[1]))
floor_control.append(float(d[1])-float(c[1]))
print(f"Over all Call control values received : {call_control}")
print(f"Over all Floor Control values received : {floor_control}")
for (i,j) in zip(call_control,floor_control):
access_time.append(float(i)+float(j))
print(f"Final Access Time values received : {access_time}")
elif (len(timestamp_tls_phone_to_server) > len(timestamp_tls_server_to_phone)):
print("Packet Loss from TLS server to Phone in Call Control")
elif (len(timestamp_tls_phone_to_server) < len(timestamp_tls_server_to_phone)):
print("Packet Loss from Phone to TLS server in Call Control")
elif (len(timestamp_tcp_phone_to_server) > len(timestamp_tcp_phone_to_phone)):
print("Packet Loss from Phone to Phone for Floor control")
elif (len(timestamp_tcp_phone_to_server) < len(timestamp_tcp_phone_to_phone)):
print("Packet Loss from Phone to Server for Floor control")
number_of_samples = len(floor_control)
x = np.sort(floor_control)
print(f"sorted_data for floor control : {x}")
y = (np.arange(len(x)) / float(len(x)-1))*100
plt.xlabel('Floor Control Latency in Seconds')
#plt.xlim(0,0.02)
#plt.ylim(0,95)
plt.ylabel('CDF in %')
plt.title('KPI 1')
plt.plot(x, y, marker='o', color='r', markerfacecolor='blue', markersize=12, linewidth = 4, linestyle='dashed')
for i,j in zip(x,y):
#ax.annotate('%s)' %j, xy=(i,j), xytext=(10,0), textcoords='offset points')
ax.annotate('%s)' %i, xy=(i,j), xytext=(10,0), textcoords='offset points')
plt.plot(0.3, 95, "*")
plt.show()
'''
capture = pyshark.FileCapture('/Users/homestuck/Desktop/Freelancing/connection2.pcap', display_filter="s1ap")
for packet in capture:
if hasattr(packet.s1ap, 'nas_eps_nas_msg_emm_type') and hasattr(packet.s1ap, 'nas_eps_nas_msg_esm_type'):
if((int(packet.s1ap.nas_eps_nas_msg_emm_type)==int(65)) and (int(packet.s1ap.nas_eps_nas_msg_esm_type)==int(208))):
request_time = float(packet.frame_info.time_epoch)
if hasattr(packet.s1ap, 'successfuloutcome_element'):
if (packet.s1ap.successfuloutcome_element == "successfulOutcome"):
response_time = float(packet.frame_info.time_epoch)
break
else:
continue
if request_time == 0:
print("No Initial EU request found")
if response_time == 0:
print("No setup response found")
if response_time >= 1 and request_time >= 1:
print(f"Request Time : {request_time} seconds")
print(f"Response Time : {response_time} seconds")
delta = (response_time - request_time)
print(f"Delta = {delta} seconds")
''' |
#! /usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import imp
import os
import re
import sys
import textwrap
import types
# A markdown code block template: https://goo.gl/9EsyRi
_CODE_BLOCK_FORMAT = '''```{language}
{code}
```
'''
_DEVIL_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
def md_bold(raw_text):
"""Returns markdown-formatted bold text."""
return '**%s**' % md_escape(raw_text, characters='*')
def md_code(raw_text, language):
"""Returns a markdown-formatted code block in the given language."""
return _CODE_BLOCK_FORMAT.format(
language=language or '', code=md_escape(raw_text, characters='`'))
def md_escape(raw_text, characters='*_'):
"""Escapes * and _."""
def escape_char(m):
return '\\%s' % m.group(0)
pattern = '[%s]' % re.escape(characters)
return re.sub(pattern, escape_char, raw_text)
def md_heading(raw_text, level):
"""Returns markdown-formatted heading."""
adjusted_level = min(max(level, 0), 6)
return '%s%s%s' % ('#' * adjusted_level, ' ' if adjusted_level > 0 else '',
raw_text)
def md_inline_code(raw_text):
"""Returns markdown-formatted inline code."""
return '`%s`' % md_escape(raw_text, characters='`')
def md_italic(raw_text):
"""Returns markdown-formatted italic text."""
return '*%s*' % md_escape(raw_text, characters='*')
def md_link(link_text, link_target):
"""returns a markdown-formatted link."""
return '[%s](%s)' % (md_escape(link_text, characters=']'),
md_escape(link_target, characters=')'))
class MarkdownHelpFormatter(argparse.HelpFormatter):
"""A really bare-bones argparse help formatter that generates valid markdown.
This will generate something like:
usage
# **section heading**:
## **--argument-one**
```
argument-one help text
```
"""
#override
def _format_usage(self, usage, actions, groups, prefix):
usage_text = super(MarkdownHelpFormatter, self)._format_usage(
usage, actions, groups, prefix)
return md_code(usage_text, language=None)
#override
def format_help(self):
self._root_section.heading = md_heading(self._prog, level=1)
return super(MarkdownHelpFormatter, self).format_help()
#override
def start_section(self, heading):
super(MarkdownHelpFormatter, self).start_section(
md_heading(heading, level=2))
#override
def _format_action(self, action):
lines = []
action_header = self._format_action_invocation(action)
lines.append(md_heading(action_header, level=3))
if action.help:
lines.append(md_code(self._expand_help(action), language=None))
lines.extend(['', ''])
return '\n'.join(lines)
class MarkdownHelpAction(argparse.Action):
def __init__(self,
option_strings,
dest=argparse.SUPPRESS,
default=argparse.SUPPRESS,
**kwargs):
super(MarkdownHelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
parser.formatter_class = MarkdownHelpFormatter
parser.print_help()
parser.exit()
def add_md_help_argument(parser):
"""Adds --md-help to the given argparse.ArgumentParser.
Running a script with --md-help will print the help text for that script
as valid markdown.
Args:
parser: The ArgumentParser to which --md-help should be added.
"""
parser.add_argument(
'--md-help',
action=MarkdownHelpAction,
help='print Markdown-formatted help text and exit.')
def load_module_from_path(module_path):
"""Load a module given only the path name.
Also loads package modules as necessary.
Args:
module_path: An absolute path to a python module.
Returns:
The module object for the given path.
"""
module_names = [os.path.splitext(os.path.basename(module_path))[0]]
d = os.path.dirname(module_path)
while os.path.exists(os.path.join(d, '__init__.py')):
module_names.append(os.path.basename(d))
d = os.path.dirname(d)
d = [d]
module = None
full_module_name = ''
for package_name in reversed(module_names):
if module:
d = module.__path__
full_module_name += '.'
r = imp.find_module(package_name, d)
full_module_name += package_name
module = imp.load_module(full_module_name, *r)
return module
def md_module(module_obj, module_link=None):
"""Write markdown documentation for a module.
Documents public classes and functions.
Args:
module_obj: a module object that should be documented.
Returns:
A list of markdown-formatted lines.
"""
def should_doc(name):
return (not isinstance(module_obj.__dict__[name], types.ModuleType)
and not name.startswith('_'))
stuff_to_doc = [
obj for name, obj in sorted(module_obj.__dict__.items())
if should_doc(name)
]
classes_to_doc = []
functions_to_doc = []
for s in stuff_to_doc:
if isinstance(s, type):
classes_to_doc.append(s)
elif isinstance(s, types.FunctionType):
functions_to_doc.append(s)
heading_text = module_obj.__name__
if module_link:
heading_text = md_link(heading_text, module_link)
content = [
md_heading(heading_text, level=1),
'',
md_italic('This page was autogenerated. '
'Run `devil/bin/generate_md_docs` to update'),
'',
]
for c in classes_to_doc:
content += md_class(c)
for f in functions_to_doc:
content += md_function(f)
print('\n'.join(content))
return 0
def md_class(class_obj):
"""Write markdown documentation for a class.
Documents public methods. Does not currently document subclasses.
Args:
class_obj: a types.TypeType object for the class that should be
documented.
Returns:
A list of markdown-formatted lines.
"""
content = [md_heading(md_escape(class_obj.__name__), level=2)]
content.append('')
if class_obj.__doc__:
content.extend(md_docstring(class_obj.__doc__))
def should_doc(name, obj):
return (isinstance(obj, types.FunctionType)
and (name.startswith('__') or not name.startswith('_')))
methods_to_doc = [
obj for name, obj in sorted(class_obj.__dict__.items())
if should_doc(name, obj)
]
for m in methods_to_doc:
content.extend(md_function(m, class_obj=class_obj))
return content
def md_docstring(docstring):
"""Write a markdown-formatted docstring.
Returns:
A list of markdown-formatted lines.
"""
content = []
lines = textwrap.dedent(docstring).splitlines()
content.append(md_escape(lines[0]))
lines = lines[1:]
while lines and (not lines[0] or lines[0].isspace()):
lines = lines[1:]
if not all(l.isspace() for l in lines):
content.append(md_code('\n'.join(lines), language=None))
content.append('')
return content
def md_function(func_obj, class_obj=None):
"""Write markdown documentation for a function.
Args:
func_obj: a types.FunctionType object for the function that should be
documented.
Returns:
A list of markdown-formatted lines.
"""
if class_obj:
heading_text = '%s.%s' % (class_obj.__name__, func_obj.__name__)
else:
heading_text = func_obj.__name__
content = [md_heading(md_escape(heading_text), level=3)]
content.append('')
if func_obj.__doc__:
content.extend(md_docstring(func_obj.__doc__))
return content
def main(raw_args):
"""Write markdown documentation for the module at the provided path.
Args:
raw_args: the raw command-line args. Usually sys.argv[1:].
Returns:
An integer exit code. 0 for success, non-zero for failure.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--module-link')
parser.add_argument('module_path', type=os.path.realpath)
args = parser.parse_args(raw_args)
return md_module(
load_module_from_path(args.module_path), module_link=args.module_link)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
from setuptools import find_packages, setup
print(find_packages())
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Analysis into the disproportionate impact of Covid-19 on different groups through the prism of race. ',
author='Quotennial',
license='MIT',
)
|
#!/usr/bin/python
#Libreria
import sys
#colores
end = "\033[0m"
red = "\033[1;91m"
green = "\033[92m"
yellow = "\033[33m"
b = "\033[1m"
#Dominio a usar
dominio = sys.argv[1]
#Detectar que comando ingresa
def command(x):
if x == "hola":
print "hola"
elif x == "1":
print "https://www.google.com.pe/search?&hl=en&q=site%3A"+ dominio + "+" + "filetype%3Asql"
elif x == "2":
print "https://www.google.com.pe/search?&hl=en&q=site%3A"+ dominio + "+" + "filetype%3APDF"
elif x == "3":
print "https://www.google.com.pe/search?&hl=en&q=site%3A"+ dominio + "+" + "filetype%3AJPG"
elif x == "3":
print "https://www.google.com.pe/search?&hl=en&q=site%3A"+ dominio + "+" + "filetype%3ATXT"
elif x == "3":
print "https://www.google.com.pe/search?&hl=en&q=site%3A"+ dominio + "+" + "filetype%3ARAR"
elif x == "3":
print "https://www.google.com.pe/search?&hl=en&q=site%3A"+ dominio + "+" + "filetype%3AZIP"
else:
print "invalid command"
#Opciones del Menu
def opciones():
print green
print " ===Menu==="
print "Ingresa un Numero"
print "1.- Buscar SQL"
print "2.- Buscar PDF"
print "3.- Buscar JPG"
print "4.- Buscar TXT"
print "5.- Buscar RAR"
print "6.- Buscar ZIP"
print end
#Menu Persistente
def menu():
while True:
try:
print ""
opciones()
cmd = raw_input(yellow + b + "#! " + end)
if cmd.lower() != "exit":
command(cmd.lower())
else:
break
except KeyboardInterrupt:
break
if len(sys.argv) < 2:
print "Modo de uso: dorkmaker.py dominio.com"
else:
menu()
|
class Gift:
def __init__(self, name, difficulty):
self.name = name
self.difficulty = difficulty
def __str__(self):
return self.name + ";" + str(self.difficulty) |
import dss
import pandas as pd
from Battery_Sizing import BatterySizing
class PVSizing:
def __init__(self, periods):
BASE = '/mnt/6840331B4032F004/Users/MARTINS/Documents/Texts/Acad/OAU/Part 5/Rain Semester/EEE502 - Final Year Project II/Work/IEEE Euro LV/Master_Control.dss'
ENG = dss.DSS
TEXT = ENG.Text
TEXT.Command = 'Clear'
self.CIRCUIT = ENG.ActiveCircuit # Sets CIRCUIT to be the ActiveCircuit in DSS
TEXT.Command = "compile '" + BASE + "'"
ENG.AllowForms = False
self.periods = periods
self.PT_curve = {
'0': 1.2,
'25': 1.0,
'30': 0.98,
'35': 0.96,
'40': 0.94,
'45': 0.92,
'50': 0.90,
'55': 0.88,
'60': 0.86
}
self.irrad_curve = [0, 0, 0, 0, 0, 0, 0.1, 0.2, 0.3, 0.5, 0.8, 0.9, 1.0, 1.0, 0.99, 0.9, 0.7, 0.4, 0.1, 0, 0, 0, 0, 0]
self.temp_curve = [25, 25, 25, 25, 25, 25, 25, 25, 35, 40, 45, 50, 60, 60, 55, 40, 35, 30, 25, 25, 25, 25, 25, 25]
def __get_total_loadshape__(self):
loadshapes = self.CIRCUIT.LoadShapes
loadshapes_dict = {}
for i in range(1,56):
shape_name = f"Shape_{i}"
loadshapes.Name = shape_name
loadshapes_dict[shape_name] = loadshapes.Pmult
total_loadshape = []
for i in range(1440):
total = sum((loadshapes_dict[shape][i]) for shape in loadshapes_dict)
total_loadshape.append(total)
return total_loadshape.copy()
def size_PV(self):
total_loadshape = self.__get_total_loadshape__()
bs = BatterySizing(self.periods)
bs.size_battery()
battery_sizes = bs.get_battery_sizes()
self.PV_sizes = {}
for period in self.periods:
POWER_OUT_HOUR, POWER_ON_HOUR = period
max_demand = 0
max_demand_hour = 0
backup_hours = []
if POWER_OUT_HOUR < POWER_ON_HOUR:
backup_hours = list(range(POWER_OUT_HOUR, POWER_ON_HOUR, 1))
else:
backup_hours = list(range(0, POWER_ON_HOUR, 1)) + list(range(POWER_OUT_HOUR, 24, 1))
for step in range(1440):
hour = (step//60) % 24
if (hour in backup_hours) and (self.irrad_curve[hour] > 0.1):
if total_loadshape[step] > max_demand:
max_demand = total_loadshape[step]
max_demand_hour = hour
pmpp = 0
charging_kw = battery_sizes[f"{POWER_OUT_HOUR}-{POWER_ON_HOUR}"]['kW']
if max_demand == 0:
pmpp = charging_kw * 1.25
else:
pmpp = (max_demand * 1.25) / (self.irrad_curve[max_demand_hour] * self.PT_curve[f"{self.temp_curve[max_demand_hour]}"])
if pmpp < charging_kw * 1.25:
pmpp = charging_kw * 1.25
self.PV_sizes[f"{POWER_OUT_HOUR}-{POWER_ON_HOUR}"] = pmpp
def get_PV_sizes(self):
return self.PV_sizes
def save_PV_sizes(self):
data = pd.DataFrame(columns=['Start hour', 'End hour', 'Required PV Pmpp (kW)'])
PV_sizes = self.get_PV_sizes()
for period in PV_sizes:
start, end = period.split("-")
data = data.append({
'Start hour': start,
'End hour': end,
'Required PV Pmpp (kW)': PV_sizes[period]
}, ignore_index=True)
try:
data.to_csv(f"Simulation results/PV_sizing.csv")
except Exception as e:
print(f"Could not save file: {str(e)}")
if __name__ == '__main__':
periods = [(0,6), (6,12), (12,18), (18,0), (0,12), (12,0), (6,18), (18,6)]
pvs = PVSizing(periods)
pvs.size_PV()
pvs.save_PV_sizes()
PV_sizes = pvs.get_PV_sizes()
for period in PV_sizes:
start, end = period.split("-")
print("+----------------------------------------")
print(f"| {start} - {end}")
print(f"| PV Pmpp: {PV_sizes[period]} kW")
print("+----------------------------------------\n")
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from collections import deque
class bnn(object):
# * this network is utilized to generate the parameters(two parameters:mu & sigma)
def __init__(self, observation_dim, action_dim, hidden_dim, max_logvar, min_logvar):
self.observation_dim = observation_dim
self.action_dim = action_dim
self.hidden_dim = hidden_dim
self.max_logvar = max_logvar
self.min_logvar = min_logvar
self.w1_mu = torch.zeros(self.observation_dim + self.action_dim, self.hidden_dim)
self.b1_mu = torch.zeros(self.hidden_dim)
self.w2_mu = torch.zeros(self.hidden_dim, self.observation_dim * 2)
self.b2_mu = torch.zeros(self.observation_dim * 2)
self.w1_var = torch.zeros(self.observation_dim + self.action_dim, self.hidden_dim)
self.b1_var = torch.zeros(self.hidden_dim)
self.w2_var = torch.zeros(self.hidden_dim, self.observation_dim * 2)
self.b2_var = torch.zeros(self.observation_dim * 2)
self.w1_size = np.prod(self.w1_mu.size())
self.b1_size = np.prod(self.b1_mu.size())
self.w2_size = np.prod(self.w2_mu.size())
self.b2_size = np.prod(self.b2_mu.size())
self.net_parameter_num = self.w1_size + self.b1_size + self.w2_size + self.b2_size
def set_params(self, param_mu, param_rho):
self.w1_mu = param_mu[0: self.w1_size].view(self.w1_mu.size())
self.b1_mu = param_mu[self.w1_size: self.w1_size + self.b1_size].view(self.b1_mu.size())
self.w2_mu = param_mu[self.w1_size + self.b1_size: self.w1_size + self.b1_size + self.w2_size].view(self.w2_mu.size())
self.b2_mu = param_mu[self.w1_size + self.b1_size + self.w2_size: ].view(self.b2_mu.size())
w1_rho = param_rho[0: self.w1_size].view(self.w1_var.size())
b1_rho = param_rho[self.w1_size: self.w1_size + self.b1_size].view(self.b1_var.size())
w2_rho = param_rho[self.w1_size + self.b1_size: self.w1_size + self.b1_size + self.w2_size].view(self.w2_var.size())
b2_rho = param_rho[self.w1_size + self.b1_size + self.w2_size: ].view(self.b2_var.size())
self.w1_var = (1 + torch.exp(w1_rho)).log().pow(2)
self.b1_var = (1 + torch.exp(b1_rho)).log().pow(2)
self.w2_var = (1 + torch.exp(w2_rho)).log().pow(2)
self.b2_var = (1 + torch.exp(b2_rho)).log().pow(2)
def linear(self, w_mu, b_mu, w_var, b_var, x):
mean = x @ w_mu + b_mu
variance = x.pow(2) @ w_var + b_var
# * Local Reparameterization Trick
noise = torch.distributions.Normal(torch.zeros_like(mean), torch.ones_like(variance)).sample()
output = mean + variance.pow(0.5) * noise
return output
def infer(self, observation, action):
input = torch.cat([observation, action], 1)
x = F.relu(self.linear(self.w1_mu, self.b1_mu, self.w1_var, self.b1_var, input))
x = F.relu(self.linear(self.w2_mu, self.b2_mu, self.w2_var, self.b2_var, x))
mean, logvar = x[:, : self.observation_dim], x[:, self.observation_dim:]
logvar = torch.clamp(logvar, self.min_logvar, self.max_logvar)
return mean, logvar
def calc_log_likelihood(self, next_observations, actions, observations):
# * calculate the log-likelihood term and the KL divergence of the loss function is ZERO
next_mean, next_logvar = self.infer(observations, actions)
# * it assumes that weight distribution q(theta; phi) is given by the fully factorized Gaussian distribution
# * so the covariance matrix is diagonal and it reduces the computation
log_likelihood = - 0.5 * ((next_observations - next_mean).pow(2) * (- next_logvar).exp() + next_logvar).sum(1) - 0.5 * self.observation_dim * np.log(2 * np.pi)
return log_likelihood
class vime(nn.Module):
def __init__(self, observation_dim, action_dim, hidden_size, min_logvar, max_logvar, learning_rate, kl_buffer_capacity, lamda, update_iteration, batch_size, eta):
super(vime, self).__init__()
self.observation_dim = observation_dim
self.action_dim = action_dim
self.hidden_size = hidden_size
self.min_logvar = min_logvar
self.max_logvar = max_logvar
self.learning_rate = learning_rate
self.kl_buffer_capacity = kl_buffer_capacity
self.lamda = lamda
self.update_iteration = update_iteration
self.batch_size = batch_size
self.eta = eta
self.dynamics_model = bnn(self.observation_dim, self.action_dim, self.hidden_size, self.max_logvar, self.min_logvar)
self.param_mu = nn.Parameter(torch.zeros(self.dynamics_model.net_parameter_num))
self.param_rho = nn.Parameter(torch.zeros(self.dynamics_model.net_parameter_num))
self.dynamics_model.set_params(self.param_mu, self.param_rho)
self.optimizer = torch.optim.Adam([self.param_mu, self.param_rho], lr=self.learning_rate)
self.kl_buffer = deque(maxlen=self.kl_buffer_capacity)
def calc_info_gain(self, observation, action, next_observation):
self.dynamics_model.set_params(self.param_mu, self.param_rho)
log_likelihood = self.dynamics_model.calc_log_likelihood(
torch.FloatTensor(np.expand_dims(next_observation, 0)),
torch.FloatTensor([action]).unsqueeze(0),
torch.FloatTensor(np.expand_dims(observation, 0))
)
log_likelihood = log_likelihood.mean()
self.optimizer.zero_grad()
(- log_likelihood).backward()
nabla = torch.cat([self.param_mu.grad.detach(), self.param_rho.grad.detach()])
H = self.calc_hessian()
info_gain = (self.lamda ** 2 / 2 * nabla.pow(2) * H.pow(-1)).sum().detach()
return info_gain.item()
def calc_hessian(self):
# * calculate the hessian matrix of the KL term and ignore the hessian matrix of the log-likelihood term
H_mu = (1 + torch.exp(self.param_rho)).log().pow(-2).detach()
H_rho = (1 + torch.exp(self.param_rho)).log().pow(-2) * 2 * torch.exp(2 * self.param_rho) * (1 + torch.exp(self.param_rho)).pow(-2)
H_rho = H_rho.detach()
# * find KL divergence partial guide to mu and rho
H = torch.cat([H_mu, H_rho], -1).detach()
return H
def calc_kl_div(self, prev_mu, prev_var):
# * calculate the KL divergence term
var = (1 + torch.exp(self.param_rho)).log().pow(2)
kl_div = 0.5 * ((var / prev_var) + prev_var.log() - var.log() + (prev_mu - self.param_mu).pow(2) / prev_var).sum() - 0.5 * len(self.param_mu)
return kl_div
def update(self, buffer):
# * maximize the elbo
prev_mu, prev_var = self.param_mu.detach(), (1 + torch.exp(self.param_rho.detach())).log().pow(2)
for i in range(self.update_iteration):
observations, actions, _, next_observations, _ = buffer.sample(self.batch_size)
observations = torch.FloatTensor(observations)
actions = torch.FloatTensor(actions).unsqueeze(1)
next_observations = torch.FloatTensor(next_observations)
self.dynamics_model.set_params(self.param_mu, self.param_rho)
log_likelihood = self.dynamics_model.calc_log_likelihood(next_observations, actions, observations).mean()
div_kl = self.calc_kl_div(prev_mu, prev_var)
elbo = log_likelihood - div_kl
self.optimizer.zero_grad()
(- elbo).backward(retain_graph=True)
self.optimizer.step()
return elbo
def store_kl(self, info_gains):
self.kl_buffer.append(np.median(info_gains))
def calc_curiosity_reward(self, rewards, info_gains):
if len(self.kl_buffer) == 0:
relative_gains = info_gains
else:
# * prevent the mean of the previous kl to be ZERO
relative_gains = info_gains / np.mean(self.kl_buffer) if np.mean(self.kl_buffer) != 0 else info_gains
return rewards + self.eta * relative_gains |
#!/usr/bin/env python
from __future__ import print_function
import rospy
import time
from threading import Thread, Event
import websocket
import pyaudio
try:
import thread
except ImportError:
import _thread as thread
import time
import json
import rospy
from std_msgs.msg import String
# global variable as "correct" handling of threading seems to break the management of websockets...
response = []
error = []
listen = True
vosk_thread = []
encoding = 'utf-8' # 'ascii'
def on_message(ws, message):
global response
response.append(json.loads(message))
def on_error(ws, error):
rospy.loginfo("Cannot access the VOSK DOCKER container")
error.append(json.loads(error))
def on_close(ws):
print("### closed ###")
def on_open(ws):
# stick to this parameter or it will "fail" in a weird way (no error, but also no answers)
RATE = 16000
CHUNK = 8000
BUFF_SIZE = 4000
timeout = 5
steps = RATE/CHUNK*timeout
p = pyaudio.PyAudio()
st = p.open(format=pyaudio.paInt16,channels = 1,rate = RATE,input = True, frames_per_buffer = CHUNK)
def run(*args):
while listen:
data = st.read(BUFF_SIZE)
ws.send(data, opcode=websocket.ABNF.OPCODE_BINARY)
print("thread terminating...")
thread.start_new_thread(run, ())
def startListening() :
global response
global error
websocket.enableTrace(False)
ws = websocket.WebSocketApp("ws://localhost:2700",
on_message = on_message,
on_error = on_error,
on_close = on_close)
ws.on_open = on_open
ws.keep_running = False
ws.run_forever()
return response
def vosk_talker():
global response
global vosk_thread
rospy.loginfo("Starting Speech Node")
rospy.loginfo("Note that ROS supports ascii strings, so UTF-8 characters are formatted. If you wat to remove that change encoding to ascii")
# To parse UTF-8 do this https://answers.ros.org/question/269873/how-do-i-use-unicode-strings-in-pubishsubscribe/
pub_partial = rospy.Publisher('vosk/partial_speech', String, queue_size=10)
pub_speech = rospy.Publisher('vosk/speech', String, queue_size=10)
pub_fullresults = rospy.Publisher('vosk/confidence', String, queue_size=10)
rospy.init_node('vosk_STT', anonymous=True)
vosk_thread =thread.start_new_thread(startListening, ())
rate = rospy.Rate(5) # 5hz
while not rospy.is_shutdown():
if response :
popstr = response.pop()
if 'partial' in popstr.keys() :
pub_partial.publish(popstr["partial"].encode(encoding, errors='replace'))
rate.sleep()
if 'result' in popstr.keys() :
pub_speech.publish(str(popstr['text'].encode(encoding, errors='replace')))
pub_fullresults.publish(str(popstr['result']).encode(encoding, errors='replace'))
if len(response) > 10:
rospy.loginfo("Queue on VOSK response is getting bigger, filtering out older results")
response = []
else :
pass
def shutdown():
global vosk_thread
rospy.loginfo("killing vosk listener node")
listen = False
if __name__ == '__main__':
try:
vosk_talker()
rospy.on_shutdown(shutdown)
except rospy.ROSInterruptException:
pass
|
"""
A set of classes to help with organizing our graph data into
- Individual points
- X/Y coordinates
- Line segments
- A set of 2 points
- Rectanges
- A set of 2 lines
"""
class Point:
"""
An individual point on a graph with X/Y coordinates
"""
def __init__(self, x:int, y:int):
self.X = x
self.Y = y
def __str__(self):
return "({},{})".format(self.X, self.Y)
class Line:
"""
A line segment from a graph with two points
low = Point with lowest x/y coordinates
high = Point with highest x/y coordinates
"""
def __init__(self, low: Point, high: Point):
self.low = low
self.high = high
def __str__(self):
return "LINE {}- {}".format(self.low, self.high)
class Rectangle:
"""
Consists of two parallel lines that construct a rectangle
"""
def __init__(self, left: Line, right: Line):
self.left = left
self.right = right
def __str__(self):
return "RECT: left({})- right({})".format(self.left, self.right)
|
# coding=utf-8
# if __name__ == "__main__":
# pass
# %%
"""
本版本用于在夜神模拟器上爬取链接,以及用于用链接爬取音频
"""
# %%
# import requests
# from bs4 import BeautifulSoup as bs4
# import urllib3
from airtest.core.api import *
from poco.drivers.android.uiautomation import AndroidUiautomationPoco
import pyperclip
import codecs
import csv
from bs4 import BeautifulSoup as bs4
import os
import requests
from typing import List
import time
# poco = AndroidUiautomationPoco(use_airtest_input=True, screenshot_each_action=False)
# %%
class CrawlerQQkg:
"""
1. 将raw_titles中的所有非数字的歌曲进入模拟器中查询,提取出链接,批量进行,存入raw_urls,并记录log
2. 对所有url进行爬取
"""
def __init__(self, base_dir):
self.base_dir = base_dir
self.all_data_num = 72898
self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"}
self.titles = [] # 元素格式:[idx, 歌名]
self.singers = [] # 元素格式:[idx, 歌手名称]
self.url_list: List[List] = [None] * (self.all_data_num + 1) # NOTE: 从1开始; 元素格式为该歌曲不同用户投稿的List
self.url_log = [] # 0为未下载,大于零则为获得的链接的数量,从1开始,最后多一个数据,为上次下载断掉时的位置
self.audio_log = []
# init methods
self.load_titles()
self.load_singers()
def load_titles(self):
with open(self.base_dir + "/raw_data/raw_titles.txt", 'r', encoding='utf-8') as titles:
cnt = 0
self.titles.append(None) # NOTE
for title in titles.readlines():
if title[:len(codecs.BOM_UTF8)] == codecs.BOM_UTF8:
title = title[codecs.BOM_UTF8:]
cnt += 1
self.titles.append([cnt, title.strip()]) # 从1开始
def load_singers(self):
with open(self.base_dir + "/raw_data/raw_singers.txt", 'r', encoding='utf-8') as singers:
cnt = 0
self.singers.append(None) # NOTE
for singer in singers.readlines():
if singer[:len(codecs.BOM_UTF8)] == codecs.BOM_UTF8:
singer = singer[codecs.BOM_UTF8:]
cnt += 1
self.singers.append([cnt, singer])
def load_url_log(self):
if os.path.getsize(self.base_dir + "/helpers/crawler_qqkg_url_log.txt"):
with open(self.base_dir + "/helpers/crawler_qqkg_url_log.txt", 'r') as log:
self.url_log = [0] # NOTE: 从1开始,第零个只是placeholder
for status in log.readlines():
self.url_log.append(int(status.strip()))
else:
self.url_log = [0] * (72898 + 2) # 从1开始,最后多一个中断信号
return self.url_log.pop()
def write_url_log(self, break_point):
with open(self.base_dir + "/helpers/crawler_qqkg_url_log.txt", 'w') as log:
for i in range(1, len(self.url_log)):
log.write(str(self.url_log[i]) + '\n')
log.write(str(break_point) + '\n')
def load_url_list(self):
if os.path.getsize(self.base_dir + "/raw_data/url_list.txt"):
self.url_list = [None]
with open(self.base_dir + "/raw_data/url_list.txt", 'r', encoding='utf-8') as f:
for line in f.readlines():
self.url_list.append(line.strip().split())
def write_url_list(self):
with open(self.base_dir + "/raw_data/url_list.txt", 'w', encoding='utf-8') as f:
for i in range(1, len(self.url_list)):
if self.url_list[i] is not None and len(self.url_list[i]) > 0:
for url in self.url_list[i]:
f.write(url + ' ')
f.write('\n')
def query(self, idx):
"""
利用poco模拟屏幕点击,在全民K歌上爬取歌曲的分享链接
:param idx: 歌曲的索引
"""
try:
# poco(name="com.tencent.karaoke:id/gvd").click() # 点击搜索框 NOTE: 该操作移至该函数外完成,节省时间
poco(name="com.tencent.karaoke:id/g02").set_text(self.titles[idx][1]) # 输入歌名
poco(name="com.tencent.karaoke:id/gvk").click() # 点击“搜索”
candidate_list = poco(name="com.tencent.karaoke:id/cvx") # 获取推荐
# except:
# return # 以防止出现各种奇怪的无法预料的问题
# try:
singer_list = []
for candidate in candidate_list:
singer = candidate.offspring(name="com.tencent.karaoke:id/cw6").get_text()
if singer[-3:] == " · ":
singer = singer[:-3]
# print(singer)
singer_list.append(singer)
i = singer_list.index(self.singers[idx][1].strip())
poco(name="com.tencent.karaoke:id/cvx")[i].click()
# except:
# poco(name="com.tencent.karaoke:id/cvx")[0].click()
# return # 没有该歌手 (没有该歌曲)
poco(name="总榜").click() # 点击总榜
except:
return
cnt = 0 # 要5首歌
flag = 0 # 用于检测一个页面滑动的最大次数。假设最多滑动3次,超过自动放弃
visited = [] # 已经下载了的作品的用户名
ret = [] # 该首歌的url资源
parent = poco(name="com.tencent.karaoke:id/fr")
maxNum = 3
while cnt < maxNum:
# swipe((432, 1455), (432, 1000))
try:
work_list = parent.poco(name="com.tencent.karaoke:id/f4") # 当前页面的推荐作品
except:
break # 如果该歌曲没有人唱过
if flag >= 3: # 可能出现了意想不到的情况导致cnt不满足要求但是陷入死循环,此时放弃
break
try: # 我佛了,为什么之前的检查过了这里会不过?
for work in work_list:
cur_usr_name = work.get_text()
if cur_usr_name in visited or cur_usr_name[-1] == ' ': # 用户名
continue
visited.append(cur_usr_name)
work.click() # 进入该用户的歌曲页面
# 进入页面
try:
poco(name="com.tencent.karaoke:id/u1").click() # 点击分享
except:
continue
# 该用户可能已经不存在
try:
# poco.swipe([500, 1000], [300, 1000])
poco(name="com.tencent.karaoke:id/eou").poco(name="com.tencent.karaoke:id/hh3")[-1].click() # 点复制链接
if len(ret) > 0:
assert pyperclip.paste() != ret[-1] # 防止各种奇奇怪怪的问题
except:
keyevent("KEYCODE_BACK")
continue
ret.append(pyperclip.paste())
cnt += 1
# poco(name="返回").click()
keyevent("KEYCODE_BACK")
time.sleep(0.1)
if cnt >= maxNum:
break
except:
break
if cnt < maxNum:
swipe((432, 1455), (432, 700))
flag += 1
if len(ret) > 0:
self.url_list[idx] = ret
self.url_log[idx] = len(ret)
# 返回
# poco(name="返回").click()
keyevent("KEYCODE_BACK")
time.sleep(0.1)
def get_url(self, batch_size, resume=True):
begin = 1
if resume:
begin = self.load_url_log() + batch_size
for i in range(begin, self.all_data_num + 1, batch_size):
self.load_url_log()
self.load_url_list()
try:
poco(name="com.tencent.karaoke:id/gvd").click() # 点击搜索框
except:
pass
for j in range(i, min(i + batch_size, self.all_data_num + 1)):
try:
if self.titles[j][1][0].isdigit():
continue
except IndexError:
print("There is a index error, j = ", j) # FIXME: 这应该会出问题
continue
if self.url_log[j]: # have downloaded
continue
print(j)
self.query(j)
self.write_url_list()
self.write_url_log(i) # 该breakpoint是该批次的开始位置
def load_audio_log(self):
if os.path.getsize(self.base_dir + "/helpers/crawler_qqkg_audio_log.txt"):
with open(self.base_dir + "/helpers/crawler_qqkg_audio_log.txt", 'r') as log:
self.audio_log = [0] # NOTE: 从1开始,第零个只是placeholder
for status in log.readlines():
self.audio_log.append(int(status.strip()))
else:
self.audio_log = [0] * (72898 + 2)
return self.audio_log.pop()
def write_audio_log(self, breakpoint):
with open(self.base_dir + "/helpers/crawler_qqkg_audio_log.txt", 'w') as log:
for i in range(1, len(self.audio_log)):
log.write(str(self.audio_log[i]) + '\n')
log.write(str(breakpoint) + '\n')
def download(self, url, idx, j):
"""
爬取响应的音频数据
:param url: 该歌曲资源的链接
:param idx: 该歌曲的索引,从1开始
:param j: 是该歌曲的第几首歌,从1开始
"""
try:
html = requests.get(url, headers=self.headers).text
# soup = bs4(html, 'html.parser')
# target = soup.find('script', type='text/javascript')
start = html.find('playurl') + 10
end = html.find('"', start+1)
# src_url = target['src']
src_url = html[start:end]
response = requests.get(src_url, headers=self.headers, stream=True)
with open(self.base_dir + "/audios/raw_audios/{}({}).mp3".format(idx, j), 'wb') as song_file:
for chunk in response.iter_content(chunk_size=512):
song_file.write(chunk)
self.audio_log[idx] += 1 # NOTE: 从1开始
print("{}({}) --- successfully downloaded".format(idx, j), end=' ')
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
except:
# self.log[title[0]] = 0
print("{}({}) --- downloading failed".format(idx, j), end=' ')
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
def get_audio(self, batch_size, resume=True):
begin = 1
if resume:
begin = self.load_audio_log() + batch_size
self.load_url_list()
for i in range(begin, self.all_data_num + 1, batch_size):
print('loading log...')
self.load_audio_log()
for idx in range(i, min(i + batch_size, self.all_data_num + 1)):
if self.audio_log[idx] > 0: # had downloaded
continue
if self.url_list[idx] is not None and len(self.url_list[idx]) > 0:
for j in range(len(self.url_list[idx])): # 对所有与该歌曲同名的歌曲进行爬取
self.download(self.url_list[idx][j], idx, j+1) # j从1开始
print('saving log...')
self.write_audio_log(i)
def test(self):
self.load_url_log()
self.write_url_log()
self.load_url_log()
# %%
if __name__ == "__main__":
crawler = CrawlerQQkg("E:/song_spider")
# crawler.get_url(10)
# crawler.test()
crawler.get_audio(40)
# %%
"""trytrywater"""
temp = [None]
with open(r"E:/song_spider/temp.txt") as f:
for line in f.readlines():
temp.append(line.strip().split())
# %%
for i in range(10, 1, -1):
print(i)
|
pais = "Ecuador "
ciudad = "Loja"
fecha_independencia = "18 de noviembre"
print(pais)
print(ciudad)
print(fecha_independencia) |
"""
演示协程 gevent
1、gevent 并不像 greenlet 需要手动调用 switch()切换任务。
2、要自动切换任务,前提是 gevent 碰到 延时操作。
3、gevent.sleep(1) 自带的延时操作
"""
# import gevent
# import time
# from gevent import monkey # 打补丁,将time的延时操作,转化为gevent的延时操作。
#
# monkey.patch_all() #自动将time的延时操作,转化为gevent的延时操作。
# def sing():
# for i in range(1, 5):
# print("bobo老师在唱第%s首歌" % i)
# time.sleep(1)
# # gevent.sleep(1)
#
# def dance():
# for i in range(1, 5):
# print("bobo老师在跳第%s支舞" % i)
# time.sleep(1)
# # gevent.sleep(1)
#
#
# g1 = gevent.spawn(sing) # 任务做为参数传入
# g2 = gevent.spawn(dance)
#
#
# def main():
# g1.join() # 启动协程
# g2.join()
#
#
# if __name__ == '__main__':
# main()
"""最终代码优化"""
import gevent
import time
from gevent import monkey # 打补丁,将time的延时操作,转化为gevent的延时操作。
monkey.patch_all() # 自动将time的延时操作,转化为gevent的延时操作。
def sing():
for i in range(1, 5):
print("bobo老师在唱第%s首歌" % i)
time.sleep(1)
def dance():
for i in range(1, 5):
print("bobo老师在跳第%s支舞" % i)
time.sleep(1)
def main():
gevent.joinall([
gevent.spawn(sing),
gevent.spawn(dance)
])
if __name__ == '__main__':
main()
|
from app import app, db
from app.models import User, Post, District, Hashtag, Url
from datetime import datetime, date, timedelta
unfilled = db.session.query(Post).\
filter(Post.created_at_dt == None).all()
indexCount = 0
for row in unfilled:
row.created_at_dt = row.created_at
db.session.add(row)
indexCount += 1
if indexCount % 200 == 0:
db.session.commit()
print("filled {} rows".format(indexCount))
db.session.commit()
|
import numpy as np
from ClassCorrelacaoPerseptron5 import CorrelacaoPerseptron5
if __name__ == '__main__':
caminho = "/home/guilherme/Downloads/Tracos/lame/trace_lame_larger.txt"
c2 = CorrelacaoPerseptron5(caminho)
print("Perceptron 5 treinando")
c2.treinaModelo() |
# standard imports
import unittest
# third party imports
# local imports
from ENDFtk.MF13 import TotalCrossSection
class Test_ENDFtk_MF13_TotalCrossSection( unittest.TestCase ) :
"""Unit test for the TotalCrossSection class."""
chunk = ( ' 0.000000+0 0.000000+0 0 0 1 2922813 18 \n'
' 2 5 922813 18 \n'
' 1.000000+0 2.000000+0 3.000000+0 4.000000+0 922813 18 \n' )
invalid = ( ' 0.000000+0 0.000000+0 0 0 2 2922813 18 \n'
' 2 2 922813 18 \n'
' 1.000000-5 8.579050+0 3.000000+7 1.487778+1 922813 18 \n' )
def test_component( self ) :
def verify_chunk( self, chunk ) :
# verify content
self.assertEqual( 2, chunk.NP )
self.assertEqual( 1, chunk.NR )
self.assertEqual( 1, len( chunk.interpolants ) )
self.assertEqual( 1, len( chunk.boundaries ) )
self.assertEqual( 5, chunk.interpolants[0] );
self.assertEqual( 2, chunk.boundaries[0] );
self.assertEqual( 2, len( chunk.E ) )
self.assertEqual( 2, len( chunk.energies ) )
self.assertEqual( 2, len( chunk.XS ) )
self.assertEqual( 2, len( chunk.cross_sections ) )
self.assertAlmostEqual( 1., chunk.E[0] )
self.assertAlmostEqual( 3., chunk.E[1] )
self.assertAlmostEqual( 1., chunk.energies[0] )
self.assertAlmostEqual( 3., chunk.energies[1] )
self.assertAlmostEqual( 2., chunk.XS[0] )
self.assertAlmostEqual( 4., chunk.XS[1] )
self.assertAlmostEqual( 2., chunk.cross_sections[0] )
self.assertAlmostEqual( 4., chunk.cross_sections[1] )
self.assertEqual( 3, chunk.NC )
# verify string
self.assertEqual( self.chunk, chunk.to_string( 9228, 13, 18 ) )
# the data is given explicitly
chunk = TotalCrossSection( boundaries = [ 2 ], interpolants = [ 5 ],
energies = [ 1., 3. ], xs = [ 2., 4. ] )
verify_chunk( self, chunk )
# the data is read from a string
chunk = TotalCrossSection.from_string( self.chunk, 9228, 13, 18 )
verify_chunk( self, chunk )
# the data is copied
copy = TotalCrossSection( chunk )
verify_chunk( self, copy )
def test_failures( self ) :
print( '\n' )
# wrong boundaries
with self.assertRaises( Exception ) :
chunk = TotalCrossSection( boundaries = [ 2 ], interpolants = [ 5, 2 ],
energies = [ 1., 3. ], xs = [ 2., 4. ] )
with self.assertRaises( Exception ) :
chunk = TotalCrossSection.from_string( self.invalid, 9228, 13, 18 )
if __name__ == '__main__' :
unittest.main()
|
import numpy as np
import matplotlib.pyplot as plt
import random
def filter(X, y, k):
m,n = X.shape
theta = np.zeros([n,1])
for i in range(m):
x_i = X[i]
y_i = y[i]
# find x_nh and x_nm
x_nh = -1
nh_dist = 0
x_nm = -1
nm_dist = 0
for a in range(n):
for j in range(m):
if j != i:
x_j = X[j]
y_j = y[j]
dist = calDist(x_i[a], x_j[a])
if y_j == y_i:
if x_nh == -1:
x_nh = j
nh_dist = dist
else:
if nh_dist > dist:
nh = j
nh_dist = dist
else:
if x_nm == -1:
x_nm = j
nm_dist = dist
else:
if nm_dist > dist:
nm = j
nm_dist = dist
theta[a] += - nh_dist**2 + nm_dist**2
ans = np.argsort(theta)
ans = ans[0:k:1]
return ans
def LVW(X, model, T):
error = 100000000
m,n = X.shape
d = n
t = 0
A_orignal = [i for i in range(n)]
while t < T:
temp_A = random.sample(A_orignal, random.randint(0, n-1))
temp_d = len(temp_A)
model.fit(X[temp_A])
temp_error = model.score(X[temp_A])
if (error > temp_error) or (temp_error == error or temp_d < d):
t = 0
error = temp_error
d = temp_d
A = temp_A
else:
t += 1
return A
def calDist(a, b, opt=1):
if opt == 1:
if a == b:
return 0
else:
return 1
else:
return np.abs(a - b)
|
#!/usr/bin/env python
# coding: utf-8
# For python 2 compatibility
from __future__ import unicode_literals
import optparse
import gitli
if __name__ == '__main__':
parser = optparse.OptionParser(
usage="""Usage: git-li <command> [command-options]
Commands:
init Initialize the git repositoryto use git-li
list <PATTERN...> List issues for this repository
new [--edit] <TITLE> Create a new issue for this repository
show <NUMBER> Show the given issue
edit <NUMBER> Edit the given issue
reopen <NUMBER> Reopen the given issue
remove <NUMBER> Remove the given issue (removes all info)
milestone Show the current milestone
milestone [--up] <MILE> Set the current milestone
close <NUMBER> Close the given issue
A few examples:
git li init
git li new 'My First Issue'
git li close 1
git li list open task 0.1
Aliases:
git li new|add|open
git li remove|delete""")
#comment Add a comment to the given issue (todo)
parser.add_option("-e", "--edit",
action="store_true",
dest="edit",
default=False,
help='change issue type and milestone when adding a new issue.')
parser.add_option("-u", "--up",
action="store_true",
dest="up",
default=False,
help='Move all the open issues to the next milestone specified by'\
' the milestone command.')
(options, args) = parser.parse_args()
gitli.main(options, args, parser)
|
#Faça um programa que leia o cateto oposto, cateto adjacente e nos devolva o valor da hipotenusa usando o modulo math
import math
#h2 = ca2 + co2
#50 = 25 + 25
oposto = float(input("Digite o comprimento do cateto oposto: "))
adjacente = float(input("Digite o comprimento do cateto adjacente: "))
hipotenusa = math.hypot(oposto, adjacente)
print("A hipotenusa vai medir {2.f}".format(hipotenusa))
|
import numpy
import random
def fingdcircle(adjmat):
n=adjmat.shape[0]
x=numpy.array([0 for i in range(n)])
visited=numpy.array([0 for i in range(n)])
k=0
x[k]=0
visited[x[k]]=1
while(k<n):
while(True):
x[k] = x[k] + 1
if (x[k] >= n):
visited[x[k - 1]] = 0
x[k] = 0
k -= 1
continue
if (visited[x[k]]==0 and adjmat[x[k],x[k-1]]==1):
visited[x[k]]=1
break
k+=1
return visited
n=6
adjmat=numpy.zeros(shape=(n,n))
xl=list(range(n))
yl=list(range(n))
connect_p=[(random.choice(xl),random.choice(yl)) for k in range(8)]
for points in connect_p:
i=points[0]
j=points[1]
adjmat[i,j]=1
adjmat[j,i]=1
visited=fingdcircle(adjmat)
print(visited) |
# USAGE
# python train_network.py --dataset images --model santa_not_santa.model
# set the matplotlib backend so figures can be saved in the background
import matplotlib
matplotlib.use("Agg")
# import the necessary packages
from keras.preprocessing.image import ImageDataGenerator
from keras import models
from keras import layers
from keras.callbacks import TensorBoard, ModelCheckpoint
from keras import optimizers
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import img_to_array
from keras.utils import to_categorical
from keras.applications import InceptionV3
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import random
from time import time
import cv2
import os
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
help="path to input dataset")
ap.add_argument("-p", "--plot", type=str, default="plot.png",
help="path to output loss/accuracy plot")
args = vars(ap.parse_args())
# initialize the number of epochs to train for, initia learning rate,
# and batch size
EPOCHS = 50
INIT_LR = 1e-3
BS = 10
# initialize the data and labels
print("[INFO] loading images...")
data = []
labels = []
# grab the image paths and randomly shuffle them
imagePaths = sorted(list(paths.list_images(args["dataset"])))
random.seed(42)
random.shuffle(imagePaths)
# print(imagePaths)
# loop over the input images
for imagePath in imagePaths:
# load the image, pre-process it, and store it in the data list
image = cv2.imread(imagePath)
image = cv2.resize(image, (299, 299))
image = img_to_array(image)
data.append(image)
# extract the class label from the image path and update the
# labels list
label = imagePath.split(os.path.sep)[-2]
if label == 'low':
label = 0
elif label == 'medium':
label = 1
elif label == 'severe':
label = 2
labels.append(label)
# scale the raw pixel intensities to the range [0, 1]
data = np.array(data, dtype="float") / 255.0
labels = np.array(labels)
# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
(trainX, testX, trainY, testY) = train_test_split(data,
labels, test_size=0.25, random_state=42)
# convert the labels from integers to vectors
trainY = to_categorical(trainY, num_classes=3)
testY = to_categorical(testY, num_classes=3)
# construct the image generator for data augmentation
aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1,
height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,
horizontal_flip=True, fill_mode="nearest")
# initialize the model
print("[INFO] compiling model...")
inceptionv3 = InceptionV3(input_shape=(299, 299, 3), include_top=False, weights='imagenet')
for layer in inceptionv3.layers:
layer.trainable = False
model = models.Sequential()
model.add(inceptionv3)
model.add(layers.AveragePooling2D((8, 8), border_mode='valid', name='avg_pool'))
model.add(layers.Dropout(0.5))
model.add(layers.Flatten())
model.add(layers.Dense(3, activation='softmax'))
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="categorical_crossentropy", optimizer=opt,
metrics=["accuracy"])
tensorboard = TensorBoard(log_dir="logs/{}".format(time()))
filepath="model-{epoch:02d}.h5"
checkpoint = ModelCheckpoint(filepath, period=10)
# train the network
print("[INFO] training network...")
H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),
validation_data=(testX, testY), steps_per_epoch=len(trainX) // BS,
epochs=EPOCHS, verbose=1, callbacks=[tensorboard, checkpoint])
# plot the training loss and accuracy
plt.style.use("ggplot")
plt.figure()
N = EPOCHS
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["acc"], label="train_acc")
plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy on damage/ no damage")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig(args["plot"])
print('=====')
|
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
class MainWindow( QWidget ):
workingThread = QThread()
def __init__(self, parent = None):
QWidget.__init__(self, parent = None)
self.workingThread.start()
def closeEvent(self, event):
self.workingThread.stop() |
#!/usr/bin/env cctools_python
# CCTOOLS_PYTHON_VERSION 2.7 2.6
# Copyright (c) 2010- The University of Notre Dame.
# This software is distributed under the GNU General Public License.
# See the file COPYING for details.
# This program is a very simple example of how to use Work Queue.
# It accepts a list of files on the command line.
# Each file is compressed with gzip and returned to the user.
import os
import sys
from work_queue import *
# Main program
if __name__ == '__main__':
port = WORK_QUEUE_DEFAULT_PORT
if len(sys.argv) < 2:
print "work_queue_example <file1> [file2] [file3] ..."
print "Each file given on the command line will be compressed using a remote worker."
sys.exit(1)
# Usually, we can execute the gzip utility by simply typing its name at a
# terminal. However, this is not enough for work queue; we have to
# specify precisely which files need to be transmitted to the workers. We
# record the location of gzip in 'gzip_path', which is usually found in
# /bin/gzip or /usr/bin/gzip.
gzip_path = "/bin/gzip"
if not os.path.exists(gzip_path):
gzip_path = "/usr/bin/gzip"
if not os.path.exists(gzip_path):
print "gzip was not found. Please modify the gzip_path variable accordingly. To determine the location of gzip, from the terminal type: which gzip (usual locations are /bin/gzip and /usr/bin/gzip)"
sys.exit(1);
# We create the tasks queue using the default port. If this port is already
# been used by another program, you can try setting port = 0 to use an
# available port.
try:
q = WorkQueue(port)
except:
print "Instantiation of Work Queue failed!"
sys.exit(1)
print "listening on port %d..." % q.port
# We create and dispatch a task for each filename given in the argument list
for i in range(1, len(sys.argv)):
infile = "%s" % sys.argv[i]
outfile = "%s.gz" % sys.argv[i]
# Note that we write ./gzip here, to guarantee that the gzip version we
# are using is the one being sent to the workers.
command = "./gzip < %s > %s" % (infile, outfile)
t = Task(command)
# gzip is the same across all tasks, so we can cache it in the * workers.
# Note that when specifying a file, we have to name its local * name
# (e.g. gzip_path), and its remote name (e.g. "gzip"). Unlike the *
# following line, more often than not these are the same. */
t.specify_file(gzip_path, "gzip", WORK_QUEUE_INPUT, cache=True)
# files to be compressed are different across all tasks, so we do not
# cache them. This is, of course, application specific. Sometimes you may
# want to cache an output file if is the input of a later task.
t.specify_file(infile, infile, WORK_QUEUE_INPUT, cache=False)
t.specify_file(outfile, outfile, WORK_QUEUE_OUTPUT, cache=False)
# Once all files has been specified, we are ready to submit the task to the queue.
taskid = q.submit(t)
print "submitted task (id# %d): %s" % (taskid, t.command)
print "waiting for tasks to complete..."
while not q.empty():
t = q.wait(5)
if t:
print "task (id# %d) complete: %s (return code %d)" % (t.id, t.command, t.return_status)
if t.return_status != 0:
# The task failed. Error handling (e.g., resubmit with new parameters, examine logs, etc.) here
None
# task object will be garbage collected by Python automatically when it goes out of scope
print "all tasks complete!"
# work queue object will be garbage collected by Python automatically when it goes out of scope
sys.exit(0)
|
import abc
from typing import Dict
from typing import List
import pandas as pd
import requests
from dateutil import parser
from requests.auth import HTTPBasicAuth
# Base Class For Git Insights
class RepoInsightsClient(abc.ABC):
def __init__(self, organization: str, project: str, repos: List[str], teamId: str, profileAliases: Dict[str, str] = None):
if profileAliases is None:
profileAliases = {}
self.organization: str = organization
self.project: str = project
self.repos: List[str] = repos
self.teamId: str = teamId
self.profileIdentityAliases = profileAliases
self.commitChangeCounts: Dict[str, dict] = {}
super().__init__()
@staticmethod
def dateStrDiffInDays(fromDate: str, toDate: str) -> float:
if not fromDate or not toDate:
raise ValueError('From and To Date are required')
fromDatetime = parser.parse(fromDate)
toDatetime = parser.parse(toDate)
return (fromDatetime - toDatetime).days
@staticmethod
def invokeAPICall(patToken: str, uri: str, responseValueProperty: str = 'value', method: str = "GET", postBody: Dict[str, str] = None) -> List[dict]:
response = None
if method == "GET":
response = requests.get(uri, auth=HTTPBasicAuth('', patToken))
elif method == "POST":
response = requests.post(uri, json=postBody, auth=HTTPBasicAuth('', patToken))
else:
raise TypeError(f"method: {method} is unsupported.")
return response.json()[responseValueProperty]
@abc.abstractmethod
def ParsePullRequest(self, pullrequest: dict) -> List[dict]:
raise NotImplementedError("Please Implement method parsePullRequest")
@abc.abstractmethod
def ParsePullRequestComments(self, comments: List[dict], repo: str) -> List[dict]:
raise NotImplementedError("Please Implement method parsePullRequestComments")
@abc.abstractmethod
def ParsePullRequestCommits(self, commits: List[dict], patToken: str, repo: str) -> List[dict]:
raise NotImplementedError("Please Implement method parsePullRequestCommits")
@abc.abstractmethod
def ParseWorkitems(self, repo: str, workitems: List[dict]) -> List[dict]:
raise NotImplementedError("Please Implement method parseWorkitems")
@abc.abstractmethod
def invokeWorkitemsAPICall(self, patToken: str, teamID: str) -> List[dict]:
raise NotImplementedError("Please Implement method invokeWorkitemsAPICall")
@abc.abstractmethod
def AggregationMeasures(self) -> dict:
raise NotImplementedError("Please Implement method aggregationMeasures")
@abc.abstractmethod
def invokePRsByProjectAPICall(self, patToken: str, repo: str) -> List[dict]:
raise NotImplementedError("Please Implement method invokePRsByProjectAPICall")
@abc.abstractmethod
def invokePRCommentThreadsAPICall(self, patToken: str, repoID: str, prID: str) -> List[dict]:
raise NotImplementedError("Please Implement method invokePRCommentThreadsAPICall")
@abc.abstractmethod
def invokePRCommitsAPICall(self, patToken: str, repoID: str, prID: str) -> List[dict]:
raise NotImplementedError("Please Implement method invokePRCommitsAPICall")
def aggregatePullRequestActivity(self, groupByColumns: List[str], patToken: str) -> pd.DataFrame:
return self.collectPullRequestActivity(patToken) \
.groupby(groupByColumns) \
.agg(self.AggregationMeasures())
def collectPullRequestActivity(self, patToken: str) -> pd.DataFrame:
recordList = []
if not self.repos:
raise TypeError("Repo list is empty")
if not patToken:
raise TypeError("Unable to resolve the PAT token: {}".format(patToken))
for repo in self.repos:
rsp = self.invokePRsByProjectAPICall(patToken, repo)
for _, pr in pd.DataFrame.from_dict(rsp).iterrows():
commentsRsp = self.invokePRCommentThreadsAPICall(patToken, pr['repository']['id'], pr['pullRequestId'])
commitsRsp = self.invokePRCommitsAPICall(patToken, pr['repository']['id'], pr['pullRequestId'])
recordList += self.ParsePullRequest(pr) + self.ParsePullRequestCommits(commitsRsp, patToken, repo)
for comments in commentsRsp:
recordList += self.ParsePullRequestComments(comments['comments'], repo)
workitemResponse = self.invokeWorkitemsAPICall(patToken, self.teamId)
recordList += self.ParseWorkitems(self.repos[0], workitemResponse)
return pd.DataFrame(recordList)
|
import d2lzh as d2l
from DL_model import Net
from process_dataset import load_mnist
if __name__ == '__main__':
#尝试使用GPU
ctx=d2l.try_gpu()
#加载数据集
train_images,train_labels,test_images,test_labels=load_mnist()
#初始化模型
model=Net(x_train=train_images,y_train=train_labels,\
x_test=test_images,y_test=test_labels,\
labels_num=10,ctx=ctx)
#生成网络
LeNet=model.LeNet()
#训练超参数
best_lr,best_batch_size=model.training_arg(net=LeNet)
#训练模型
LeNet=model.training(net=LeNet,batch_size=best_batch_size,lr=best_lr,num_epochs=25,istraining=True)
#测试效果
model.test_pred_MNIST() |
import requests,sys,os
import time
import math
from autotrading.machine.base_machine import Machine
from autotrading.machine.xcoin_api_client import *
import configparser
import json
import base64
import hashlib
import hmac
import urllib
# cur_dir = os.path.abspath(os.curdir)
# sys.path.append(cur_dir)
# PROJECT_HOME = cur_dir
# print(PROJECT_HOME)
# print(sys.path)
# nameconfig = os.path.abspath('config.ini')
# print(nameconfig)
class BithumbMachine(Machine):
BASE_API_URL = "https://api.bithumb.com"
TRADE_CURRENCY_TYPE = ["BTC", "ETH", "DASH", "LTC", "ETC", "XRP", "BCH", "XMR", "ZEC", "QTUM", "BTG", "EOS",
"ICX", "VEN", "TRX", "ELF", "MITH", "MCO", "OMG", "KNC", "GNT", "HSR", "ZIL", "ETHOS",
"PAY", "WAX", "POWR", "LRC", "GTO", "STEEM", "STRAT", "ZRX", "REP", "AE", "XEM", "SNT",
"ADA", "ALL"]
# 'F:\\BitCoinDev\\conf\\config.ini
def __init__(self):
config = configparser.ConfigParser()
config.read('F:/BitCoinDev/BitCoinDev_Pycharm/conf/config.ini')
self.CLIENT_ID = config['Bithumb']['connect_key']
self.CLIENT_SECRET = config['Bithumb']['secret_key']
# self.USER_NAME = config['Bithumb']['username']
def exception_Currency(self,currency_type):
if currency_type is None:
raise Exception("Need to currency_type")
if currency_type not in self.TRADE_CURRENCY_TYPE:
raise Exception("Not Support currency type")
def common_function(self, endpoint, endpoint_item_array):
uri_array = dict(endpoint_item_array)
str_data = urllib.parse.urlencode(uri_array)
nonce = self.get_nonce()
data = endpoint + chr(0) + str_data + chr(0) + nonce
utf8_data = data.encode('utf-8')
key = self.CLIENT_SECRET
utf8_key = key.encode('utf-8')
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'Api-Key': self.CLIENT_ID,
'Api-Sign': self.get_signature(utf8_data, bytes(utf8_key)),
'Api-Nonce': nonce}
return str_data,headers
def get_ticker(self, currency_type="BTC"):
if currency_type is None:
raise Exception('Need to currency type')
if currency_type not in self.TRADE_CURRENCY_TYPE:
raise Exception('Not support current type')
time.sleep(1)
ticker_api_path = "/public/ticker/{currency}".format(currency=currency_type)
url_path = self.BASE_API_URL + ticker_api_path
res = requests.get(url_path)
response_json = res.json()
result = {}
result["timestamp"] = str(response_json['data']["date"])
result["last"] = str(response_json['data']["closing_price"])
result["bid"] = str(response_json['data']["buy_price"])
result["ask"] = str(response_json['data']["sell_price"])
result["high"] = str(response_json['data']["max_price"])
result["low"] = str(response_json['data']["min_price"])
result["volume"] = str(response_json['data']["volume_1day"])
return result
def get_filled_orders(self, currency_type="BTC"):
if currency_type not in self.TRADE_CURRENCY_TYPE:
raise Exception("Not support currency Type")
time.sleep(1)
params = {'offfset':0,'count':100}
orders_api_path = "/public/transaction_history/{currency}".format(currency=currency_type)
url_path = self.BASE_API_URL + orders_api_path
res = requests.get(url_path, params)
result = res.json()
return result
def microtime(self, get_as_float= False):
if get_as_float:
return time.time()
else:
return '%f %d' % math.modf(time.time())
def usecTime(self):
mt = self.microtime(False)
mt_array = mt.split(" ")[:2]
return mt_array[1] + mt_array[0][2:5]
def get_nonce(self):
return self.usecTime()
def get_signature(self, encoded_payload, secret_key):
signature = hmac.new(secret_key, encoded_payload, hashlib.sha512)
api_sign = base64.b64encode(signature.hexdigest().encode('utf-8'))
return api_sign
def get_wallet_status(self, currency_type="ADA"):
if currency_type is None:
raise Exception("Need to currency_type")
if currency_type not in self.TRADE_CURRENCY_TYPE:
raise Exception('Not Support currency type')
time.sleep(1)
wallet_status_api_path = "/info/balance"
endpoint = "/info/balance"
url_path = self.BASE_API_URL + wallet_status_api_path
# endpoint_item_array = {
# "endpoint" : endpoint,
# "currency" : currency_type
# }
#
# str_data, headers = self.common_function(endpoint,endpoint_item_array)
rgParams = {
"order_currency": currency_type,
"payment_currency": "KRW"
}
api = XCoinAPI(self.CLIENT_ID,self.CLIENT_SECRET)
response = api.xcoinApiCall(endpoint,rgParams)
# res = requests.post(url_path, headers=headers, data=str_data)
# result = res.json()
return response
def get_my_order_status(self, currency_type=None):
"""
사용자의 현재 예약 중인 주문 현황을 조회하는 메서드 입니다.
:type currency_type: object
:param currency_type: 코인 이름
:return:
거래 중인 현황을 리스트로 반환합니다.
"""
self.exception_Currency(currency_type)
time.sleep(1)
endpoint ="/info/orders"
url_path = self.BASE_API_URL + endpoint
# endpoint_item_array = {
# "endpoint" : endpoint,
# "currency" : currency_type
# }
#
# str_data, headers = self.common_function(endpoint, endpoint_item_array)
# res = requests.post(url_path,headers=headers, data=str_data)
# result = res.json()
rgParams = {
"currency": currency_type,
}
api = XCoinAPI(self.CLIENT_ID,self.CLIENT_SECRET)
response = api.xcoinApiCall(endpoint, rgParams)
return response
def buy_order(self, currency_type=None, price=None, qty=None, order_type="bid"):
"""
매수주문 실행 매소드
:param currency_type(str): 화폐종류
:param price(int): 1개 수량 주문에 해당하는 원화값
:param qty: 주문 수량
:param order_type:
:return: 주문 상태 반환
"""
self.exception_Currency(currency_type)
time.sleep(1)
endpoint = "/trade/place"
url_path = self.BASE_API_URL + endpoint
endpoint_item_array ={
"endpoint": endpoint,
"order_currency": currency_type,
"payment_currenct": "KRW",
"units": qty,
"price": price,
"type": "bid"
}
str_data, headers = self.common_function(endpoint,endpoint_item_array)
res = requests.post(url_path,headers=headers, data=str_data)
result = res.json()
return result
def sell_order(self, currency_type=None, price=None, qty=None, order_type="ask"):
"""
매도주문 실행 매소드
:param currency_type(str): 화폐종류
:param price(int): 1개 매도 주문에 해당하는 원화값
:param qty: 매도 주문 수량
:param order_type:
:return: 매도 상태 반환
"""
self.exception_Currency(currency_type)
time.sleep(1)
endpoint = "/trade/place"
url_path = self.BASE_API_URL + endpoint
endpoint_item_array ={
"endpoint": endpoint,
"order_currency": currency_type,
"payment_currenct": "KRW",
"units": qty,
"price": price,
"type": "ask"
}
str_data, headers = self.common_function(endpoint,endpoint_item_array)
res = requests.post(url_path,headers=headers, data=str_data)
result = res.json()
return result
def cancel_order(self, currency_type=None, order_type=None, order_id=None):
"""
매수주문 실행 매소드
:param currency_type(str): 화폐종류
:param price(int): 1개 수량 주문에 해당하는 원화값
:param qty: 주문 수량
:param order_type(str): 취소하려는 주문이 종류(매수, 매도)
:param order_id: 취소 주문하려는 주문의 ID
:return: 주문 상태 반환
"""
self.exception_Currency(currency_type)
time.sleep(1)
endpoint = "/trade/cancel"
url_path = self.BASE_API_URL + endpoint
# endpoint_item_array ={
# "endpoint": endpoint,
# "order_currency": currency_type,
# "type": order_type,
# "order_id": order_id
# }
# str_data, headers = self.common_function(endpoint,endpoint_item_array)
# res = requests.post(url_path,headers=headers, data=str_data)
# result = res.json()
rgParams = {
"currency": currency_type,
"type": order_type,
"order_id": order_id
}
api = XCoinAPI(self.CLIENT_ID,self.CLIENT_SECRET)
response = api.xcoinApiCall(endpoint, rgParams)
return response
|
class Solution:
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
l = len(nums)
if l == 0:
return -1
if l == 1:
if nums[0] == target:
return 0
return -1
s=0
e=l-1
m=0
if nums[e-1] > nums[e]:
m=e
elif nums[s] > nums[e]:
while s<e:
m = (s+e)//2
# print(s, e, m)
if (m>0 or m<l-1) and nums[m] < nums[m-1] and nums[m] < nums[m+1]:
break
if nums[m] > nums[0] and nums[m] > nums[l-1]:
s=m+1
elif nums[m] < nums[0] and nums[m] < nums[l-1]:
e=m
# print(m)
# nums = nums[m:] + nums[0:m]
def get_index(numbers, target):
if len(numbers) == 0:
return -1
s=0
e=len(numbers)-1
while s<e:
# print(s,e)
m = (s+e)//2
if numbers[m] < target:
s=m+1
else:
e=m
if numbers[s] != target:
return -1
return s
# print(m)
i = get_index(nums[m:], target)
if i == -1:
return get_index(nums[0:m], target)
return m+i
import random
from collections import deque
s=Solution()
nums = [2,1]
target = 2
ans = s.search(nums, target)
print(ans)
# for i in range(0, 1000):
# nms = deque(list(range(1, random.randint(10, 20))))
# nms.rotate(random.randint(0,19))
# # print(nms)
# target = random.randint(1,len(nms))
# ans = s.search(list(nms), target)
# if ans != list(nms).index(target):
# print(nms, target, ans) |
from collections import namedtuple
MockedBoto3Request = namedtuple(
"MockedBoto3Request", ["method", "response", "expected_params", "generate_error", "error_code"]
)
# Set defaults for attributes of the namedtuple. Since fields with a default value must come after any fields without
# a default, the defaults are applied to the rightmost parameters. In this case generate_error = False and
# error_code = None
MockedBoto3Request.__new__.__defaults__ = (False, None)
def read_text(path):
"""Read the content of a file."""
with path.open() as f:
return f.read()
|
#!/usr/bin/env
# This program converts .libsvm sparse format into .arff sparse format
# Development has been inspired by the following converter: https://goo.gl/hp3Cke
import argparse
from collections import OrderedDict
argumentParser = argparse.ArgumentParser()
argumentParser.add_argument('-i', '--inputFile')
argumentParser.add_argument('-o', '--outputFile')
arguments = argumentParser.parse_args()
labelsSet = set()
libsvmIdToWekaIndexDictionary = OrderedDict()
with open(arguments.inputFile, 'r') as inputFile:
for line in inputFile:
lineSplit = line.split()
label = lineSplit[0]
features = lineSplit[1:]
labelsSet.add(label)
for feature in features:
featureId = feature.split(':')[0]
if featureId not in libsvmIdToWekaIndexDictionary:
libsvmIdToWekaIndexDictionary[featureId] = len(libsvmIdToWekaIndexDictionary)
print "Exporting..."
with open(arguments.outputFile, 'w') as outputFile, open(arguments.inputFile, 'r') as inputFile:
outputFile.write('@RELATION {}\n'.format(arguments.inputFile))
for key, value in libsvmIdToWekaIndexDictionary.iteritems():
outputFile.write('@ATTRIBUTE libsvmId_{}_to_wekaIndex_{} REAL\n'.format(key,value))
outputFile.write('@ATTRIBUTE class {}{}{}\n'.format('{', ','.join(labelsSet), '}'))
outputFile.write('@DATA\n')
for line in inputFile:
lineSplit = line.split()
label = lineSplit[0]
features = lineSplit[1:]
tempDict = {}
outputFile.write('{')
for feature in features:
featureId, featureValue = feature.split(':')
tempDict[int(libsvmIdToWekaIndexDictionary[featureId])] = float(featureValue)
tempDictSorted = OrderedDict(sorted(tempDict.items()))
for featureId, featureValue in tempDictSorted.items():
outputFile.write('{} {}, '.format(featureId, featureValue))
outputFile.write(str(len(libsvmIdToWekaIndexDictionary)))
outputFile.write(' {}'.format(label))
outputFile.write('}\n')
|
class queue:
def __init__(self):
self.items = []
def clear(self):
self.items = []
def peek(self):
return self.items[len(self.items)-1]
def isEmpty(self):
return self.items == []
def enqueue(self, item):
self.items.insert(0,item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
arrived=queue()
extra=queue()
waiting=queue()
waitb=queue()
print("********LAUGH PARK***********")
print("enter:if it is arrival,input a license nmber. if it is departure,input d license number.\n enter e for exit")
while True:
x=str(input(">>>>"))
x=x.lower()
if len(x)>2:
command=x[:2]
xx=x[2:]
if x=='show':
print(arrived.items)
print(waiting.items)
elif x=="e":
print("Come Again..!")
break
elif x=="":
continue
elif len(x)<3:
print("enter a valid command..")
continue
elif command=='a ':
print("Welcome to the laugh park.Vehicle with the license number " + str(xx) + " has arrived to the park.")
if arrived.size()<10:
arrived.enqueue(xx)
else:
print("Sry,park is full.There is no room to park.So plz wait till u gain a room.")
waiting.enqueue(xx)
elif command=='d ':
#print("The vehicle with the license number " + str(x) + " is going to depart from the garage.")
if xx not in arrived.items:
if xx not in waiting.items:
print("The vehicle with the license number " + str(xx) + " is not in the parking")
while not arrived.size()==0:
j=arrived.dequeue()
if j==xx:
print("The vehicle with the license number " + str(xx) + " departed from the garage.")
else:
extra.enqueue(j)
while not extra.size()==0:
arrived.enqueue(extra.dequeue())
## count=0
## move=arrived.dequeue()
## count=count+1
if xx in waiting.items:
print("The vehicle " + str(xx) + " waited for a room in the garage departed from the parking.The number of times it moved within the garage=0.")
while (not waiting.isEmpty()):
r=waiting.dequeue()
if r!=xx:
waitb.enqueue(r)
else:
pass
while (not waitb.isEmpty()):
waiting.enqueue(waitb.dequeue())
else:
continue
## if x in waiting.items:
## print("the vehicle with the license number"+ str(x) + "is going to exit.Number of moves done by the vehicle is =0.")
##
## if x in arrived.items:
##
##
## while not extra.isEmpty():
## extra.dequeue(x)
## arrived.enqueue(x)
if arrived.size()<10 and waiting.size()>10:
print("There is an empty room available in the park.")
arrived.enqueue(waiting.dequeue())
|
import unittest
from django.test import RequestFactory, TestCase
from django_pgschemas.middleware import TenantMiddleware
from django_pgschemas.utils import get_domain_model, get_tenant_model
TenantModel = get_tenant_model()
DomainModel = get_domain_model()
class TenantMiddlewareRedirectionTestCase(TestCase):
"""
Tests TenantMiddlewareRedirection.
"""
@classmethod
def setUpClass(cls):
if TenantModel is None:
raise unittest.SkipTest("Dynamic tenants are not being used")
super().setUpClass()
@classmethod
def setUpTestData(cls):
cls.factory = RequestFactory()
tenant1 = TenantModel(schema_name="tenant1")
tenant2 = TenantModel(schema_name="tenant2")
tenant1.auto_create_schema = tenant2.auto_create_schema = False
tenant1.save()
tenant2.save()
DomainModel(domain="tenant1.localhost", tenant=tenant1).save()
DomainModel(
domain="tenant1redirect.localhost",
tenant=tenant1,
is_primary=False,
redirect_to_primary=True,
).save()
DomainModel(
domain="everyone.localhost",
folder="tenant1redirect",
tenant=tenant1,
is_primary=False,
redirect_to_primary=True,
).save()
DomainModel(domain="everyone.localhost", folder="tenant2", tenant=tenant2).save()
DomainModel(
domain="tenant2redirect.localhost",
tenant=tenant2,
is_primary=False,
redirect_to_primary=True,
).save()
DomainModel(
domain="everyone.localhost",
folder="tenant2redirect",
tenant=tenant2,
is_primary=False,
redirect_to_primary=True,
).save()
def middleware(self, request):
def fake_get_response(request):
return request
return TenantMiddleware(fake_get_response)(request)
def test_domain_redirect_to_primary_domain(self):
request = self.factory.get("/some/random/url/", HTTP_HOST="tenant1redirect.localhost")
response = self.middleware(request)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "//tenant1.localhost/some/random/url/")
self.assertEqual(response["Location"], "//tenant1.localhost/some/random/url/")
def test_folder_redirect_to_primary_domain(self):
request = self.factory.get(
"/tenant1redirect/some/random/url/", HTTP_HOST="everyone.localhost"
)
response = self.middleware(request)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "//tenant1.localhost/some/random/url/")
self.assertEqual(response["Location"], "//tenant1.localhost/some/random/url/")
def test_domain_redirect_to_primary_folder(self):
request = self.factory.get("/some/random/url/", HTTP_HOST="tenant2redirect.localhost")
response = self.middleware(request)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "//everyone.localhost/tenant2/some/random/url/")
self.assertEqual(response["Location"], "//everyone.localhost/tenant2/some/random/url/")
def test_folder_redirect_to_primary_folder(self):
request = self.factory.get(
"/tenant2redirect/some/random/url/", HTTP_HOST="everyone.localhost"
)
response = self.middleware(request)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "//everyone.localhost/tenant2/some/random/url/")
self.assertEqual(response["Location"], "//everyone.localhost/tenant2/some/random/url/")
|
import glob
import numpy
import os
import subprocess
someNonExistantString = "someNonExistantString"
def getFaultyMassIndexList(process, suffix) :
massPointFile = "output_genParam/" + process + "/massPointInfo.txt"
massPointList = numpy.loadtxt(massPointFile, delimiter = ",")
nMass = massPointList.shape[0]
sourceFile = "../sourceFiles/" + process + "/" + process + "_custom.txt"
nSourceFile = numpy.loadtxt(sourceFile, delimiter = someNonExistantString, dtype = str).shape[0]
faultyMassIndexList = []
for iMass in range(0, nMass) :
stop1_m = int(massPointList[iMass][0])
neutralino1_m = int(massPointList[iMass][1])
dir = "output_analyzed/" + process + "_" + str(stop1_m) + "_" + str(neutralino1_m) + suffix
#print dir
isFaulty = False
fileList = glob.glob(dir + "/**")
fileList = [f for f in fileList if (".root" in f and "all" not in f)]
if len(fileList) < nSourceFile :
print "Not all files analyzed (" + str(len(fileList)) + "/" + str(nSourceFile) + "):", massPointList[iMass]
isFaulty = True
if isFaulty :
faultyMassIndexList.append(iMass)
print "Faulty mass-point indices:", faultyMassIndexList
print "Number of faulty mass-points:", len(faultyMassIndexList)
return faultyMassIndexList
def main() :
process = "SMS-T8bbstausnu_XCha0p5_XStau0p75_TuneCUETP8M1_13TeV-madgraphMLM-pythia8"
suffix = "_tauTau_analysis"
getFaultyMassIndexList(process, suffix)
|
def calcula_metros(empresa: str, num_talla_S: int, num_talla_M: int, num_talla_L: int) -> str:
metros = (num_talla_S*2) + (num_talla_M*2.5) + (num_talla_L*3)
return f"Para fabricar {num_talla_S} trajes talla S, {num_talla_M} trajes talla M, {num_talla_L} trajes talla L, para la empresa {empresa} se necesitan {metros} metros de tela"
print(calcula_metros("Empresa1",2,3,4))
print(calcula_metros("Empresa2",25,3,4))
print(calcula_metros("Empresa3",2,67,4))
print(calcula_metros("Empresa4",2,3,34))
print(calcula_metros("Empresa5",10,20,32)) |
import datetime
import time
Hours = 24 * 60 * 60
Minutes = 60 * 60
Seconds = 60
set_day = None
set_hour = None
set_minute = None
set_second = 3
# after_time = True
__all__ = ['GetSleepSec']
class GetSleepSec:
def __init__(self):
self.run_count = 0 # 执行几次
# self.cal_method = True # 是否使用短间断循环
def __cal_after_times(self):
# 没100s执行一次
_now = datetime.datetime.now()
_t = _now
if set_hour:
_now += datetime.timedelta(hours=set_hour)
if set_minute:
_now += datetime.timedelta(minutes=set_minute)
if set_second:
_now += datetime.timedelta(seconds=set_second)
return time.mktime(_now.timetuple()) - time.mktime(_t.timetuple())
def __cal_first(self):
# tm_min是不能被操作的 所以转换成时间戳
_now = datetime.datetime.now()
_t = _now.timetuple()
_c = time.mktime(_t)
if set_minute:
if _t.tm_min < set_minute:
# _c1 = 设定时间 - 当前时间 (获取剩下要执行的秒数)
_c1 = set_minute - _t.tm_min
if _t.tm_sec == set_minute:
_c1 = 0
if _t.tm_sec > set_minute:
_c1 = 60 - _t.tm_min + set_minute
print(_c1)
_now += datetime.timedelta(minutes=_c1)
if set_second:
if _t.tm_sec < set_second:
_c1 = set_second - _t.tm_sec
if _t.tm_sec == set_second:
_c1 = 0
if _t.tm_sec > set_second:
_c1 = 60 - _t.tm_sec + set_second
print(_c1)
_now += datetime.timedelta(seconds=_c1)
if set_hour:
if _t.tm_hour < set_hour:
_c1 = set_hour - _t.tm_hour
if _t.tm_hour == set_hour:
_c1 = 0
if _t.tm_hour > set_hour:
_c1 = 24 - _t.tm_min + set_hour #########此处c1未转换
_now += datetime.timedelta(hours=_c1)
_c2 = time.mktime(_now.timetuple()) - _c
return _c2
def __setup(self):
_now = datetime.datetime.now()
_t = time.mktime(_now.timetuple())
return _now, _t
def __cal_loop_sec(self):
_now, _t = self.__setup()
if set_second:
_now += datetime.timedelta(minutes=1)
_c = time.mktime(_now.timetuple()) - _t
return _c
def __cal_loop_min(self):
_now, _t = self.__setup()
if set_minute:
_now += datetime.timedelta(hours=1)
_c = time.mktime(_now.timetuple()) - _t
return _c
def __cal_loop_hours(self):
_now, _t = self.__setup()
if set_hour:
_now += datetime.timedelta(days=1)
# elif set_day:
# _now += datetime.timedelta(days=set_day)
_c = time.mktime(_now.timetuple()) - _t
return _c
# def __cal_loop(self):
# # tm_min是不能被操作的 所以转换成时间戳
# _now = datetime.datetime.now()
# _t = time.mktime(_now.timetuple())
#
# if set_second:
# _now += datetime.timedelta(minutes=1)
#
# if set_minute:
# _now += datetime.timedelta(hours=1)
#
# if set_hour:
# _now += datetime.timedelta(days=1)
#
# _c = time.mktime(_now.timetuple()) - _t
# return _c
def get(self):
# 将传入的时间值带入到时间计算方法中进行计算
m = None
if self.run_count == 0:
m = self.__cal_first()
self.run_count += 1
else:
if set_hour or set_day:
m = self.__cal_loop_hours()
elif set_minute:
m = self.__cal_loop_min()
elif set_second:
m = self.__cal_loop_sec()
if m < 0:
m = 0
return m
if __name__ == '__main__':
def test():
i = 20
while True:
get_sleep = GetSleepSec()
m = get_sleep.get()
time.sleep(m)
print('test正在执行')
i -= 1
if i == 0:
break
test() |
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def home(requests):
return render(requests,'home.html');
def index(requests):
return render(requests,'index.html');
def add(request):
name=0
num1 = request.GET["n1"]
num2 = request.GET["n2"]
return render(request,'results.html',{'num1':num1,'num2':num2});
|
import googlemaps
google_url = 'https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins=Washington,DC&destinations=New+York+City,NY&key='
api_key = 'AIzaSyBnSI5louf-p4SxBGhonqHgL_NYpztZro4'
gmaps = googlemaps.Client(key='AIzaSyBnSI5louf-p4SxBGhonqHgL_NYpztZro4')
consulta = gmaps.distance_matrix('london', 'manchester')
for x in consulta:
destination_city = consulta['destination_addresses'][0].split(",")[0]
print('')
|
# Octothorp
print("A FEW THINGS")
# tells me true or false
print(5 < 3)
# whole number
print(float(9 / 8))
# floating point whole number
print(float(9 / 9))
print(6 + 9 + 4 + 20 / 6 - 9)
# intiger
print(9 / 8)
# variable
cars = 100
space_in_car = 4.0
drivers = 30
passengers = 90
cars_driven = drivers
cars_not_driven = cars - drivers
carpool_capacity = cars_driven * space_in_car
average_passengers_per_car = passengers/cars_driven
print("There are", cars,"cars available.")
print("There are only", drivers,"drivers available.")
print("There will be", cars_not_driven,"empty cars today.")
print("We can trasnport", carpool_capacity,"people today.")
print("We have", passengers,"passengers to carpool today")
print("We need to put about", average_passengers_per_car,"people in each car")
myName = "Mr. Black" # use %s to use words
myAge = 257 # use %d or %i to use numbers
myHeight = 70.69
myEyes = "brown"
myTeeth = "white"
myHair = "yes, some"
print("\n\n\n\nLat's talk about %s." % myName)
print("He's %i inches tall." % myHeight)
print("He's got %s eyes and %s hair." % (myEyes, myHair))
print("His teeth \tare usually %s depending \n on the coffee." % myTeeth)
print("If I add %d and %d, I get %d." % (myAge, myHeight, myAge + myHeight)) |
# https://leetcode.com/problems/reverse-integer/
def reverse(x):
print(x)
if x >= 0:
xString = str(x)
result = int(''.join(reversed(xString)))
else:
xString = str(x)
negXstring = xString.strip('-')
print(negXstring)
result = int('-' + (''.join(reversed(negXstring))))
print(result)
if result >= -2147483648 and result <= 2147483647:
print(result)
return result
else:
print("result out of range")
return 0
reverse(-1237578) |
from tkinter import *
w = Tk()
Bbt = Button(w, text='아래쪽', padx=10).pack(side=BOTTOM) # bottom은 먼저 해줘야 할 듯
Tbt = Button(w, text='위쪽', padx=10).pack()
Lbt = Button(w, text='왼쪽', padx=10).pack(side=LEFT)
Rbt = Button(w, text='오른쪽', padx=10).pack(side=LEFT)
w.mainloop()
|
import jieba
import sys
filename = sys.argv[2]
target_filename = sys.argv[1]
fn = open(target_filename, 'r', encoding='utf-8')
f = open(filename, 'w+', encoding='utf-8')
for line in fn.readlines():
words = jieba.cut(line)
line_seg = ' '.join(words)
f.write(line_seg+'\n')
f.close()
fn.close()
|
from SHIMON.api.error import error_200
from SHIMON.api.api_base import ApiBase
from typing import TYPE_CHECKING
from SHIMON import HttpResponse
if TYPE_CHECKING:
from SHIMON.shimon import Shimon
class ApiStatus(ApiBase):
callname = "status"
unlock_required = False
def __init__(self) -> None:
super().__init__()
def entry(_, self: "Shimon", __: None, redirect: bool) -> HttpResponse:
return status(self, __, redirect)
def status(self: "Shimon", _: None, redirect: bool) -> HttpResponse:
unlocked = not self.cache.is_empty()
return error_200(
{
"version": self.VERSION,
"unlocked": unlocked,
"developer": self.developer,
"msg policy": self.msg_policy if unlocked else None,
},
redirect,
)
|
'''
Given a non-negative integer c, your task is to decide whether there're two integers a and b such that a2 + b2 = c.
Example 1:
Input: 5
Output: True
Explanation: 1 * 1 + 2 * 2 = 5
Example 2:
Input: 3
Output: False
'''
import unittest
class Solution:
def judgeSquareSum(self, c):
"""
:type c: int
:rtype: bool
"""
a = int(c ** 0.5)
while a >= 0:
b = (c - a ** 2) ** 0.5
# print(a, b)
if int(b) == b:
return True
a -= 1
return False
class TestSolution(unittest.TestCase):
def test_case(self):
examples = (
(5, True),
(3, False),
(0, True),
(25, True)
)
for first, second in examples:
self.assert_function(first, second)
def assert_function(self, first, second):
self.assertEqual(Solution().judgeSquareSum(first), second,
msg="first: {}; second: {}".format(first, second))
unittest.main()
|
"""Domain-level storage interaction."""
import abc
from typing import AsyncContextManager
from jupiter.core.domain.auth.infra.auth_repository import AuthRepository
from jupiter.core.domain.big_plans.infra.big_plan_collection_repository import (
BigPlanCollectionRepository,
)
from jupiter.core.domain.big_plans.infra.big_plan_repository import BigPlanRepository
from jupiter.core.domain.chores.infra.chore_collection_repository import (
ChoreCollectionRepository,
)
from jupiter.core.domain.chores.infra.chore_repository import ChoreRepository
from jupiter.core.domain.fast_info_repository import FastInfoRepository
from jupiter.core.domain.habits.infra.habit_collection_repository import (
HabitCollectionRepository,
)
from jupiter.core.domain.habits.infra.habit_repository import HabitRepository
from jupiter.core.domain.inbox_tasks.infra.inbox_task_collection_repository import (
InboxTaskCollectionRepository,
)
from jupiter.core.domain.inbox_tasks.infra.inbox_task_repository import (
InboxTaskRepository,
)
from jupiter.core.domain.metrics.infra.metric_collection_repository import (
MetricCollectionRepository,
)
from jupiter.core.domain.metrics.infra.metric_entry_repository import (
MetricEntryRepository,
)
from jupiter.core.domain.metrics.infra.metric_repository import MetricRepository
from jupiter.core.domain.persons.infra.person_collection_repository import (
PersonCollectionRepository,
)
from jupiter.core.domain.persons.infra.person_repository import PersonRepository
from jupiter.core.domain.projects.infra.project_collection_repository import (
ProjectCollectionRepository,
)
from jupiter.core.domain.projects.infra.project_repository import ProjectRepository
from jupiter.core.domain.push_integrations.email.infra.email_task_collection_repository import (
EmailTaskCollectionRepository,
)
from jupiter.core.domain.push_integrations.email.infra.email_task_repository import (
EmailTaskRepository,
)
from jupiter.core.domain.push_integrations.group.infra.push_integration_group_repository import (
PushIntegrationGroupRepository,
)
from jupiter.core.domain.push_integrations.slack.infra.slack_task_collection_repository import (
SlackTaskCollectionRepository,
)
from jupiter.core.domain.push_integrations.slack.infra.slack_task_repository import (
SlackTaskRepository,
)
from jupiter.core.domain.search.search_repository import SearchRepository
from jupiter.core.domain.smart_lists.infra.smart_list_collection_repository import (
SmartListCollectionRepository,
)
from jupiter.core.domain.smart_lists.infra.smart_list_item_repository import (
SmartListItemRepository,
)
from jupiter.core.domain.smart_lists.infra.smart_list_repository import (
SmartListRepository,
)
from jupiter.core.domain.smart_lists.infra.smart_list_tag_repository import (
SmartListTagRepository,
)
from jupiter.core.domain.user.infra.user_repository import UserRepository
from jupiter.core.domain.user_workspace_link.infra.user_workspace_link_repository import (
UserWorkspaceLinkRepository,
)
from jupiter.core.domain.vacations.infra.vacation_collection_repository import (
VacationCollectionRepository,
)
from jupiter.core.domain.vacations.infra.vacation_repository import VacationRepository
from jupiter.core.domain.workspaces.infra.workspace_repository import (
WorkspaceRepository,
)
class DomainUnitOfWork(abc.ABC):
"""A transactional unit of work from an engine."""
@property
@abc.abstractmethod
def user_repository(self) -> UserRepository:
"""The user repository."""
@property
@abc.abstractmethod
def auth_repository(self) -> AuthRepository:
"""The auth repository."""
@property
@abc.abstractmethod
def workspace_repository(self) -> WorkspaceRepository:
"""The workspace repository."""
@property
@abc.abstractmethod
def user_workspace_link_repository(self) -> UserWorkspaceLinkRepository:
"""The user workspace link repository."""
@property
@abc.abstractmethod
def vacation_collection_repository(self) -> VacationCollectionRepository:
"""The vacation collection repository."""
@property
@abc.abstractmethod
def vacation_repository(self) -> VacationRepository:
"""The vacation repository."""
@property
@abc.abstractmethod
def project_collection_repository(self) -> ProjectCollectionRepository:
"""The project collection repository."""
@property
@abc.abstractmethod
def project_repository(self) -> ProjectRepository:
"""The project database repository."""
@property
@abc.abstractmethod
def inbox_task_collection_repository(self) -> InboxTaskCollectionRepository:
"""The inbox task collection repository."""
@property
@abc.abstractmethod
def inbox_task_repository(self) -> InboxTaskRepository:
"""The inbox task repository."""
@property
@abc.abstractmethod
def habit_collection_repository(self) -> HabitCollectionRepository:
"""The habit collection repository."""
@property
@abc.abstractmethod
def habit_repository(self) -> HabitRepository:
"""The habit repository."""
@property
@abc.abstractmethod
def chore_collection_repository(self) -> ChoreCollectionRepository:
"""The chore collection repository."""
@property
@abc.abstractmethod
def chore_repository(self) -> ChoreRepository:
"""The chore repository."""
@property
@abc.abstractmethod
def big_plan_collection_repository(self) -> BigPlanCollectionRepository:
"""The big plan collection repository."""
@property
@abc.abstractmethod
def big_plan_repository(self) -> BigPlanRepository:
"""The big plan repository."""
@property
@abc.abstractmethod
def smart_list_collection_repository(self) -> SmartListCollectionRepository:
"""The smart list collection repository."""
@property
@abc.abstractmethod
def smart_list_repository(self) -> SmartListRepository:
"""The smart list repository."""
@property
@abc.abstractmethod
def smart_list_tag_repository(self) -> SmartListTagRepository:
"""The smart list tag repository."""
@property
@abc.abstractmethod
def smart_list_item_repository(self) -> SmartListItemRepository:
"""The smart list item repository."""
@property
@abc.abstractmethod
def metric_collection_repository(self) -> MetricCollectionRepository:
"""The metric collection repository."""
@property
@abc.abstractmethod
def metric_repository(self) -> MetricRepository:
"""The metric repository."""
@property
@abc.abstractmethod
def metric_entry_repository(self) -> MetricEntryRepository:
"""The metric entry repository."""
@property
@abc.abstractmethod
def person_collection_repository(self) -> PersonCollectionRepository:
"""The person collection repository."""
@property
@abc.abstractmethod
def person_repository(self) -> PersonRepository:
"""The person repository."""
@property
@abc.abstractmethod
def push_integration_group_repository(self) -> PushIntegrationGroupRepository:
"""The push integration group repository."""
@property
@abc.abstractmethod
def slack_task_collection_repository(self) -> SlackTaskCollectionRepository:
"""The Slack task collection repository."""
@property
@abc.abstractmethod
def slack_task_repository(self) -> SlackTaskRepository:
"""The Slack task repository."""
@property
@abc.abstractmethod
def email_task_collection_repository(self) -> EmailTaskCollectionRepository:
"""The email task collection repository."""
@property
@abc.abstractmethod
def email_task_repository(self) -> EmailTaskRepository:
"""The email task repository."""
@property
@abc.abstractmethod
def fast_into_repository(self) -> FastInfoRepository:
"""The fast info repository."""
class DomainStorageEngine(abc.ABC):
"""A storage engine of some form."""
@abc.abstractmethod
def get_unit_of_work(self) -> AsyncContextManager[DomainUnitOfWork]:
"""Build a unit of work."""
class SearchUnitOfWork(abc.ABC):
"""A unit of work from a search engine."""
@property
@abc.abstractmethod
def search_repository(self) -> SearchRepository:
"""The search repostory."""
class SearchStorageEngine(abc.ABC):
"""A storage engine of some form for the search engine."""
@abc.abstractmethod
def get_unit_of_work(self) -> AsyncContextManager[SearchUnitOfWork]:
"""Build a unit of work."""
|
import numpy as np
import pytest
from pyswallow.handlers.boundary_handler import (
StandardBH, NearestBH, ReflectiveBH, RandomBH
)
class TestBoundaryHandler:
@pytest.fixture
def standard_bh(self):
return StandardBH()
@pytest.fixture
def bounds(self):
lb = np.array([0, 0])
ub = np.array([10, 10])
return lb, ub
@pytest.mark.parametrize('pos', [[-5, -5], [5, 5], [15, 15]])
def test_standard(self, standard_bh, pos):
pos = np.array(pos, dtype=np.float32)
ret_pos = standard_bh(pos)
assert np.array_equal(pos, ret_pos)
# TODO >> Write better tests for these boundary handlers.
@pytest.mark.parametrize('pos', [[-5, -5], [5, 5], [15, 15]])
def test_nearest(self, bounds, pos):
lb, ub = bounds
arr_pos = np.array(pos, dtype=np.float32)
bh = NearestBH(lb, ub)
ret_pos = bh(arr_pos)
assert len(pos) == len(ret_pos)
assert np.logical_and(ret_pos >= lb, ret_pos <= ub).all()
if pos == [-5, -5]:
assert np.array_equal(ret_pos, lb)
elif pos == [5, 5]:
assert np.array_equal(ret_pos, arr_pos)
elif pos == [15, 15]:
assert np.array_equal(ret_pos, ub)
@pytest.mark.parametrize('pos', [[-5, -5], [5, 5], [15, 15]])
def test_reflective(self, bounds, pos):
lb, ub = bounds
arr_pos = np.array(pos, dtype=np.float32)
bh = ReflectiveBH(lb, ub)
ret_pos = bh(arr_pos)
assert len(pos) == len(ret_pos)
assert np.logical_and(ret_pos >= lb, ret_pos <= ub).all()
if pos == [-5, -5]:
assert np.array_equal(ret_pos, np.array([5, 5]))
elif pos == [5, 5]:
assert np.array_equal(ret_pos, arr_pos)
elif pos == [15, 15]:
assert np.array_equal(ret_pos, np.array([5, 5]))
@pytest.mark.parametrize('pos', [[-5, -5], [5, 5], [15, 15]])
def test_random(self, bounds, pos):
lb, ub = bounds
arr_pos = np.array(pos, dtype=np.float32)
bh = RandomBH(lb, ub)
ret_pos = bh(arr_pos)
assert len(pos) == len(ret_pos)
assert np.logical_and(ret_pos >= lb, ret_pos <= ub).all()
|
class Grid(object):
cells = []
houses = []
def __init__(self, cellList, houseList):
self.cells = cellList
self.houses = houseList
def getCell(self, hor, vert):
def __cellMatches(cell):
return cell.horizontal == hor and cell.vertical == vert
return filter(__cellMatches, cells)
def getHouses(self, hor, vert):
def __houseMatches(house):
goodCell = False
for i in house.cells:
if(i.horizontal == hor and i.vertical == vert):
goodCell = True
return goodCell
return filter(__houseMatches, houses)
def __str__(self):
return 'stub' |
# =============================================================================
# Created By : Giannis Kostas Georgiou
# Project : Machine Learning for Fish Recognition (Individual Project)
# =============================================================================
# Description : File to load the saved model, predict class of images
# of its test set and plot and save its roc curve
# To be used after test set is converted to .pickle files and
# model is trained and saved
# How to use : Replace variables in CAPS according to needs of the test set
# =============================================================================
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
import sklearn.metrics as metrics
from sklearn.metrics import confusion_matrix
import os
import cv2
import tensorflow as tf
import pickle
import sys
save_roc_loc='PATH TO WHERE THE ROC SHOULD BE SAVED'
name_roc='NAME OF THE ROC'
name_roc_zoomed='NAME OF THE ZOOMED ROC'
data_dir='PATH TO TEST SET DIRECTORY'
model_path='PATH TO SAVED MODEL'
#insert arrays of test set
#features
X=pickle.load(open(data_dir+"X_combined_test_data.pickle","rb"))
#class
y=pickle.load(open(data_dir+"y_combined_test_data.pickle","rb"))
#normalize the test set
X=X/255.0
#loads the saved model from the path specified
model=tf.keras.models.load_model(model_path+"Binary_Filters_32,32,64,64,64-Dense_64_BEST")
#predict the classes of the images in the test set
y_pred = model.predict_classes(X)
#produce the roc curve based on y and predicted y
fpr_keras, tpr_keras, thresholds_keras = roc_curve(y, y_pred)
#calculate the auc of the graph
auc_keras = auc(fpr_keras, tpr_keras)
#plot and save the ROC curve with y limit 0-1 and x limit 0-1 (normal graph)
plt.figure(figsize=(12,12))
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_keras, tpr_keras, label='(area = {:.3f})'.format(auc_keras))
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend()
plt.savefig(save_roc_loc+name_roc, dpi=400)
plt.show()
#plot and save the ROC curve with y limit 0.9-1 and x limit 0-0.1 (zoomed in graph)
plt.figure(figsize=(12,12))
plt.xlim(0, 0.1)
plt.ylim(0.9, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_keras, tpr_keras)
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.savefig(save_roc_loc+name_roc_zoomed, dpi=400)
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.