text stringlengths 38 1.54M |
|---|
# this is a simple example
import logging
import time
# define the log file, file mode and logging level
logging.basicConfig(filename='keepwriting.log', filemode="a", level=logging.DEBUG,format='%(asctime)s - %(levelname)s - %(message)s')
logging.debug('This message should go to the log file')
logging.info('So should this')
logging.warning('And this, too')
time.sleep(15)
logging.warning('I am late') |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class MyspidersItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class CompanyItem(scrapy.Item):
id = scrapy.Field()
name = scrapy.Field()
contact = scrapy.Field()
info = scrapy.Field()
Tel = scrapy.Field()
address = scrapy.Field()
nature = scrapy.Field()
industry = scrapy.Field()
scale = scrapy.Field()
email = scrapy.Field()
logo = scrapy.Field()
job = scrapy.Field()
class JobItem(scrapy.Item):
id = scrapy.Field()
name = scrapy.Field()
company = scrapy.Field()
time = scrapy.Field()
department = scrapy.Field()
contact = scrapy.Field()
Tel = scrapy.Field()
address = scrapy.Field()
education = scrapy.Field()
nature = scrapy.Field()
experience = scrapy.Field()
age = scrapy.Field()
location = scrapy.Field()
salary = scrapy.Field()
schedule = scrapy.Field()
welfare = scrapy.Field()
response_and_require = scrapy.Field()
|
from __future__ import print_function
print("===== abelfunctions Demo Script =====")
from abelfunctions import *
from sympy.abc import x,y
#f = y**3 + 2*x**3*y - x**7
f = y**2 - (x-1)*(x+1)*(x-2)*(x+2)
X = RiemannSurface(f, x, y)
print("\n\tRS")
print(X)
print("\n\tRS: monodromy")
base_point, base_sheets, branch_points, mon, G = X.monodromy()
print("\nbase_point:")
print(base_point)
print("\nbase_sheets:")
for s in base_sheets: print(s)
print("\nbranch points:")
for b in branch_points: print(b)
print("\nmonodromy group:")
for m in mon: print(m)
print("\n\tRS: homology")
hom = X.homology()
print("genus:")
print(hom['genus'])
print("cycles:")
for c in hom['cycles']: print(c)
print("lincomb:")
print(hom['linearcombination'])
print("\n\tRS: computing cycles")
gamma = [X.c_cycle(i) for i in range(len(hom['cycles']))]
# print "\n\tRS: period matrix"
# A,B = X.period_matrix()
# print A
# print B
# print
# print "abelfunctions: tau =", B[0][0]/A[0][0]
# print "maple: tau = 0.999999 + 1.563401 I"
|
import os
import rq_dashboard
from flask import Flask, Response
# We need to use an external dependency for env management because pycharm does not currently support .env files
from flask_cors import CORS
from flask_talisman import Talisman
import recommender.utilities.json_encode_utilities
from recommender.api.utils.json_content_type import (
generate_json_response,
json_content_type,
)
from recommender.db_config import DbBase, engine
from recommender.env_config import PROD
def start_api(test_config=None):
# create and configure the app
app = Flask(__name__)
CORS(app, origins=os.environ["FE_ORIGIN"], supports_credentials=True)
Talisman(
app,
force_https=PROD,
content_security_policy={
"default-src": "'self'",
"img-src": "*",
"script-src": "'self' 'unsafe-inline' 'unsafe-eval'",
"style-src": "'self' 'unsafe-inline'",
},
)
# import blue prints
from recommender.api.auth_route import auth
from recommender.api.business_route import business
from recommender.api.business_search_route import business_search
from recommender.api.rcv_route import rcv
app.register_blueprint(auth, url_prefix="/auth")
app.register_blueprint(business, url_prefix="/business")
app.register_blueprint(business_search, url_prefix="/business-search")
app.register_blueprint(rcv, url_prefix="/rcv")
# queue metrics
from recommender.api.global_services import auth_route_utils
@rq_dashboard.blueprint.before_request
def verify_auth():
if PROD:
auth_route_utils.require_user_before_request()
app.config["RQ_DASHBOARD_REDIS_URL"] = os.environ["REDIS_URL"]
app.register_blueprint(rq_dashboard.blueprint, url_prefix="/rq")
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile("config.py", silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
# init tables (after everything has been imported by the services)
DbBase.metadata.create_all(engine)
@app.errorhandler(recommender.api.utils.http_exception.HttpException)
def handle_http_exception(
error: recommender.api.utils.http_exception.HttpException,
) -> Response:
return generate_json_response(
data=None,
additional_root_params={
"message": error.message,
"errorCode": None if error.error_code is None else error.error_code,
**({} if error.additional_data is None else error.additional_data),
},
status=error.status_code,
)
@app.route("/wake", methods=["GET"])
@json_content_type()
def wake():
# this route exists to spin up the backend server when the auth loads the site
return None
return app
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 24 14:55:44 2019
@author: zwala
"""
##Conversions of int()
num1=raw_input("Please enter x: ")
num2=raw_input("Please enter y: ")
num=num1+num2
print "The Concatenation: ",num #Concatenation
num1=int(num1)
num2=int(num2)
num=num1+num2
print "The Addition is: ", num # Does the addition now
num1=int(num1)
num2=float(num2)
num=num1+num2
print "One value is int, 2ns is float: ",num
num1=float(num1)
num2=float(num2)
num=num1+num2
print "The Float version: ",num
num1=bool(num1)
num2=bool(num2)
num=num1+num1
print "The Bool Version(Generally returns 1forT& 0forF:",num
|
def Primefactors(N):# Nより小さい自然数の素因数の個数と約数の個数を返すO(Nlog(N))
Ints = [ i for i in range(N)]
Primefactors = [0]*N
Factors = [1]*N
for i in range(2, N):
if Ints[i] == 1:
continue
for j in range(1, N):
if i*j < N:
t = 1
while Ints[i*j]%i == 0:
Ints[i*j] //= i
Primefactors[i*j] += 1
t += 1
Factors[i*j] *= t
else:
break
return Primefactors, Factors
def factors(N): #約数を全て求める。ただし、順不同
from collections import deque
ret = deque()
middle = int( N**(1/2))
for i in range(1, middle):
if N%i == 0:
ret.append(i)
ret.append(N//i)
if N%middle == 0:
ret.append(middle)
if middle != N//middle:
ret.append(N//middle)
return ret
N = int( input())
P, F = Primefactors(N)
print(P, F)
# print(factors(N).pop())
# # local でしか import してないはずでは???
# print([ r for r in factors(N)])
# factors @python
# 15! > 10**12: 241ms
# 2**40 > 10**12: 224ms
|
import json
import jsonschema
import uuid
import unittest
import websockets
from tornado.testing import AsyncHTTPTestCase
from tornado.httpclient import AsyncHTTPClient
import broadway.api.definitions as definitions
from broadway.api.utils.bootstrap import (
initialize_global_settings,
initialize_database,
initialize_app,
)
from broadway.api.flags import app_flags
import tests.api._utils.database as database_utils
MOCK_COURSE1 = "mock_course1"
MOCK_COURSE2 = "mock_course2"
MOCK_CLIENT_TOKEN1 = "12345"
MOCK_CLIENT_TOKEN2 = "67890"
MOCK_CLIENT_QUERY_TOKEN = "C4OWEM2XHD"
class AsyncHTTPMixin(AsyncHTTPTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_app(self):
"""
Note: this is called by setUp in AsyncHTTPTestCase
"""
flags = app_flags.parse(
[
"tests/api/_fixtures/config.json",
"--token",
"test",
"--debug",
"--course-config=''",
# provide an empty path for testing course config
],
use_exc=True,
)
self.app = initialize_app(initialize_global_settings(flags), flags)
initialize_database(self.app.settings, flags)
database_utils.initialize_db(
self.app.settings,
{
MOCK_COURSE1: {
"tokens": [MOCK_CLIENT_TOKEN1],
"query_tokens": [MOCK_CLIENT_QUERY_TOKEN],
},
MOCK_COURSE2: {
"tokens": [MOCK_CLIENT_TOKEN1, MOCK_CLIENT_TOKEN2],
"query_tokens": [],
},
},
)
return self.app
def get_token(self):
return self.app.settings["FLAGS"]["token"]
def get_header(self, override=None):
return {
"Authorization": "Bearer "
+ (self.get_token() if not override else override)
}
def tearDown(self):
super().tearDown()
database_utils.clear_db(self.app.settings)
class ClientMixin(AsyncHTTPMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client_header1 = {"Authorization": "Bearer " + MOCK_CLIENT_TOKEN1}
self.client_header2 = {"Authorization": "Bearer " + MOCK_CLIENT_TOKEN2}
self.client_header_query_token = {
"Authorization": "Bearer " + MOCK_CLIENT_QUERY_TOKEN
}
self.course1 = MOCK_COURSE1
self.course2 = MOCK_COURSE2
def upload_grading_config(
self, course_id, assignment_name, header, grading_config, expected_code
):
response = self.fetch(
self.get_url(
"/api/v1/grading_config/{}/{}".format(course_id, assignment_name)
),
method="POST",
body=json.dumps(grading_config),
headers=header,
)
self.assertEqual(response.code, expected_code)
def get_grading_config(self, course_id, assignment_name, header, expected_code):
response = self.fetch(
self.get_url(
"/api/v1/grading_config/{}/{}".format(course_id, assignment_name)
),
method="GET",
headers=header,
)
self.assertEqual(response.code, expected_code)
if response.code == 200:
response_body = json.loads(response.body.decode("utf-8"))
return response_body["data"]
def start_grading_run(
self, course_id, assignment_name, header, students, expected_code
):
response = self.fetch(
self.get_url(
"/api/v1/grading_run/{}/{}".format(course_id, assignment_name)
),
method="POST",
headers=header,
body=json.dumps(students),
)
self.assertEqual(response.code, expected_code)
if response.code == 200:
response_body = json.loads(response.body.decode("utf-8"))
return response_body["data"]["grading_run_id"]
def get_grading_run_state(self, course_id, grading_run_id, header):
response = self.fetch(
self.get_url(
"/api/v1/grading_run_status/{}/{}".format(course_id, grading_run_id)
),
method="GET",
headers=header,
)
self.assertEqual(response.code, 200)
response_body = json.loads(response.body.decode("utf-8"))
return response_body["data"]
def check_grading_run_status(
self, course_id, grading_run_id, header, expected_code, expected_state=None
):
response = self.fetch(
self.get_url(
"/api/v1/grading_run_status/{}/{}".format(course_id, grading_run_id)
),
method="GET",
headers=header,
)
self.assertEqual(response.code, expected_code)
if response.code == 200:
response_body = json.loads(response.body.decode("utf-8"))
self.assertEqual(response_body["data"].get("state"), expected_state)
def get_grading_run_env(self, course_id, grading_run_id, header):
response = self.fetch(
self.get_url(
"/api/v1/grading_run_env/{}/{}".format(course_id, grading_run_id)
),
method="GET",
headers=header,
)
self.assertEqual(response.code, 200)
response_body = json.loads(response.body.decode("utf-8"))
return response_body["data"]
def get_grading_job_log(self, course_id, job_id, header, expected_code):
response = self.fetch(
self.get_url("/api/v1/grading_job_log/{}/{}".format(course_id, job_id)),
method="GET",
headers=header,
)
self.assertEqual(response.code, expected_code)
if response.code == 200:
response_body = json.loads(response.body.decode("utf-8"))
return response_body["data"]
def get_course_worker_nodes(self, course_id, scope, header, expected_code):
response = self.fetch(
self.get_url("/api/v1/worker/{}/{}".format(course_id, scope)),
method="GET",
headers=header,
)
self.assertEqual(response.code, expected_code)
if response.code == 200:
response_body = json.loads(response.body.decode("utf-8"))
return response_body["data"]
def get_course_queue_length(self, course_id, header, expected_code):
response = self.fetch(
self.get_url("/api/v1/queue/{}/length".format(course_id)),
method="GET",
headers=header,
)
self.assertEqual(response.code, expected_code)
if response.code == 200:
response_body = json.loads(response.body.decode("utf-8"))
return response_body["data"]
def get_grading_job_queue_position(
self, course_id, grading_job_id, header, expected_code
):
response = self.fetch(
self.get_url(
"/api/v1/queue/{}/{}/position".format(course_id, grading_job_id)
),
method="GET",
headers=header,
)
self.assertEqual(response.code, expected_code)
if response.code == 200:
response_body = json.loads(response.body.decode("utf-8"))
return response_body["data"]
def get_grading_job_stream(self, course_id, grading_job_id, header, callback):
# We have to create a new client as to not block other requests while receiving
# streaming chunks
AsyncHTTPClient().fetch(
self.get_url("/api/v1/stream/{}/{}".format(course_id, grading_job_id)),
method="GET",
headers=header,
header_callback=lambda _: None,
streaming_callback=callback,
)
class GraderMixin(AsyncHTTPMixin):
def register_worker(
self, header, expected_code=200, worker_id=None, hostname="mock_hostname"
):
worker_id = worker_id or str(uuid.uuid4())
response = self.fetch(
self.get_url("/api/v1/worker/{}".format(worker_id)),
method="POST",
headers=header,
body=json.dumps({"hostname": hostname}),
)
self.assertEqual(response.code, expected_code)
if expected_code == 200:
return worker_id
def poll_job(self, worker_id, header):
response = self.fetch(
self.get_url("/api/v1/grading_job/{}".format(worker_id)),
method="GET",
headers=header,
)
if response.code == 200:
self.assertEqual(response.code, 200)
response_body = json.loads(response.body.decode("utf-8"))
self.assertIn("grading_job_id", response_body["data"])
self.assertIn("stages", response_body["data"])
return response_body["data"]
return response.code
def post_job_result(
self, worker_id, header, job_id, job_success=True, expected_code=200
):
body = {
"grading_job_id": job_id,
"success": job_success,
"results": [{"res": "container 1 res"}, {"res": "container 2 res"}],
"logs": {"stdout": "stdout", "stderr": "stderr"},
}
response = self.fetch(
self.get_url("/api/v1/grading_job/{}".format(worker_id)),
method="POST",
headers=header,
body=json.dumps(body),
)
self.assertEqual(response.code, expected_code)
def send_heartbeat(self, worker_id, header, expected_code=200):
response = self.fetch(
self.get_url("/api/v1/heartbeat/{}".format(worker_id)),
method="POST",
body="",
headers=header,
)
self.assertEqual(response.code, expected_code)
class EqualityMixin(unittest.TestCase):
def assert_equal_grading_config(self, actual_config, expected_config):
jsonschema.validate(actual_config, definitions.grading_config)
jsonschema.validate(expected_config, definitions.grading_config)
for config_key in expected_config:
if config_key == "env":
self.assertEqual(
sorted(actual_config.get(config_key)),
sorted(expected_config[config_key]),
)
else:
self.assert_equal_grading_pipeline(
actual_config.get(config_key), expected_config[config_key]
)
def assert_equal_grading_pipeline(self, actual_pipeline, expected_pipeline):
jsonschema.validate(actual_pipeline, definitions.grading_pipeline)
jsonschema.validate(expected_pipeline, definitions.grading_pipeline)
for i in range(len(expected_pipeline)):
self.assert_equal_grading_stage(actual_pipeline[i], expected_pipeline[i])
def assert_equal_grading_stage(self, actual_stage, expected_stage):
jsonschema.validate(actual_stage, definitions.grading_stage)
jsonschema.validate(expected_stage, definitions.grading_stage)
for stage_key in expected_stage:
if stage_key == "env":
self.assertTrue(
set(expected_stage["env"].keys()).issubset(
set(actual_stage["env"].keys())
)
)
for env_key in expected_stage[stage_key]:
self.assertEqual(
actual_stage["env"].get(env_key), expected_stage["env"][env_key]
)
else:
self.assertEqual(actual_stage.get(stage_key), expected_stage[stage_key])
class WorkerWSMixin(AsyncHTTPMixin):
# lower level conn
def worker_ws_conn(self, worker_id, headers):
url = self.get_url("/api/v1/worker_ws/{}".format(worker_id)).replace(
"http://", "ws://"
)
return websockets.connect(url, extra_headers=headers)
def worker_ws_conn_register(self, conn, hostname):
return conn.send(
json.dumps({"type": "register", "args": {"hostname": hostname}})
)
def worker_ws_conn_reulst(self, conn, job_id, job_success):
args = {
"grading_job_id": job_id,
"success": job_success,
"results": [{"res": "container 1 res"}, {"res": "container 2 res"}],
"logs": {"stdout": "stdout", "stderr": "stderr"},
}
return conn.send(json.dumps({"type": "job_result", "args": args}))
# need to be closed
async def worker_ws(self, worker_id, headers, hostname="eniac"):
conn = await self.worker_ws_conn(worker_id=worker_id, headers=headers)
await self.worker_ws_conn_register(conn, hostname)
ack = json.loads(await conn.recv())
self.assertTrue(ack["success"])
return conn
class BaseTest(WorkerWSMixin, EqualityMixin, ClientMixin, GraderMixin):
pass
|
import webbrowser
class Movie:
"""This class provides information about the movies"""
VALID_RATINGS = ["G", "PG", "PG-13", "R"]
def __init__(
self, movie_title, movie_storyline, poster_image, trailer_youtube):
"""Initiating with the matched information
:param movie_title: The title of the movie.
:param movie_storyline: The storyline of the movie.
:param poster_image: The poster image of the movie.
:param trailer_youtube: The trailer video of the movie.
"""
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
def show_trailer(self):
"""Showing the website on google chrome with the given youtube url"""
webbrowser.get("open -a /Applications\
/Google\ Chrome.app %s").open(self.trailer_youtube_url)
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 28 20:08:05 2016
@author: Liberator
"""
import numpy as np
import matplotlib.pyplot as plt
#formula opisujaca ewolucje populacji USA
def ewolucjaPopulacji(rok):
potega = -0.03137 * (rok - 1913.25)
P = 19727300 / (1 + (np.e**potega))
return P
#zakres lat miedzy 1790 a 2000, co 10 lat
x = np.arange(1790, 2000, 10)
#wyznaczenie populacji w kazdej dacie
y = ewolucjaPopulacji(x)
#narysowanie wykresu
plt.plot(x, y)
plt.show()
|
from django.db import models
class Salas(models.Model):
sal_id = models.AutoField(primary_key=True)
sal_codigo = models.CharField(max_length=250)
salas_sal_id = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True)
|
a=int(input("enter the value of a:"))
b=int(input("enter the value of b:"))
if(a>b):print("a is greater than b")
if(b>c):print("b is greater than a")
else:print("b is equal a")
|
import math
import sys
try:
r = int(raw_input("Please enter radius of a circle "))
except ValueError as e:
print "Invalid radius value"
print e
sys.exit(-1)
except IOError as e:
print "IOError"
print e
sys.exit(-1)
area = math.pi * r * r
print area
|
#!/usr/bin/env python
from __future__ import print_function
import sys
from eTraveler.clientAPI.connection import Connection
myConn = Connection('jrb', 'Dev', localServer=False)
#myConn = Connection('jrb', 'Dev', localServer=True, debug=True)
#myConn = Connection('jrb', 'Raw', prodServer=True)
rsp = {}
try:
run = '4689D'
step = 'read_noise_raft'
rsp = myConn.getRunResults(run=run, stepName=step, itemFilter=('amp', 3))
for k in rsp:
if k != 'steps':
print('Value for key ',k, ' is ',rsp[k])
steps = rsp['steps']
for s in steps:
print('For stepName %s \n' % (s) )
sv = steps[s]
for schname in sv:
print('\n\nGot data for schema %s' % (schname))
instances = sv[schname]
print('Instance 0: ')
for field in instances[0]:
print(field, ':', instances[0][field])
if len(instances) > 1:
print('\nInstance 1: ')
for field in instances[1]:
print(field, ':', instances[1][field])
sys.exit(0)
except Exception as msg:
print('Operation failed with exception: ')
print(msg)
sys.exit(1)
|
from pymem import Pymem
fileStruct = (0x6d9100)
bufferOffset = (3-1)*4
pm = Pymem('Rebels.exe')
bufferPtr = pm.read_uint(fileStruct + bufferOffset)
content = []
idx = 0
while True:
ch = pm.read_uchar(bufferPtr + idx)
if ch in [0x0D, 0xF0, 0xAD, 0xBA]:
break
content.append(ch)
idx += 1
print(''.join([chr(x) for x in content])) |
from setuptools import setup
with open('README.rst', encoding='utf-8') as file:
long_description = file.read()
install_requires = [
'aiohttp',
'aioredis',
'click',
'structlog[dev]',
'websockets',
]
tests_require = install_requires + [
'aioresponses',
'pytest',
'pytest-asyncio',
]
setup(
name='socketshark',
version='0.2.2',
url='http://github.com/closeio/socketshark',
license='MIT',
description='WebSocket message router',
long_description=long_description,
test_suite='tests',
tests_require=tests_require,
platforms='any',
install_requires=install_requires,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
packages=[
'socketshark',
'socketshark.backend',
'socketshark.metrics',
],
entry_points={
'console_scripts': [
'socketshark = socketshark.__main__:run',
],
},
)
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
from decimal import Decimal
def bmi(weight,height):
return weight/(height*height)
weight = Decimal(input("Введіть вагу (в кг): "))
height = Decimal(input("Введіть зріст (в м): "))
print(bmi(weight,height))
|
# Copyright 2012 Sam Kleinman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`utils.py` is a module that provide a number of basic functions
for core :mod:`dtf` operations and functions.
"""
import os
import sys
def get_name(test):
"""
Returns the base name of a file without the file extension.
"""
return os.path.basename(test).split('.')[0]
def get_module_path(path):
"""
:param string path: The location within the current directory of
a Python module.
:returns: The full absolute path of the module.
In addition to rendering an absolute path,
:meth:`get_module_path()` also appends this path to ``sys.path``.
"""
r = os.getcwd() + '/' + path
sys.path.append(r)
return r
def expand_tree(path, input_extension='yaml'):
"""
:param string path: A starting path to begin searching for files.
:param string input_extension: Optional. A filter.
:returns: A list of paths starting recursively from the ``path``
and only including those objects that end with the
``input_extensions.``
:meth:`expand_tree()` returns a list of paths, filtered to
"""
file_list = []
for root, subFolders, files in os.walk(path):
for file in files:
f = os.path.join(root,file)
if f.rsplit('.', 1)[1] == input_extension:
file_list.append(f)
return file_list
def set_or_default(value, default):
if value is None:
return default
else:
return value
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 16 10:00:45 2020
@author: cpcle
"""
from os import listdir
from os.path import isfile, join
import re
from cloudant.client import Cloudant
from cloudant.error import CloudantException
from cloudant.result import Result, ResultByKey
serviceUsername = "afddfa00-603a-477e-8cb0-08bbe6fa4cf4-bluemix"
servicePassword = "ae6025c3e11ffea8876ac30bb5c94d30462943daf1ca0b9e03921e0e5a5c2e61"
serviceURL = "https://afddfa00-603a-477e-8cb0-08bbe6fa4cf4-bluemix.cloudantnosqldb.appdomain.cloud"
client = Cloudant(serviceUsername, servicePassword, url=serviceURL)
client.connect()
myDatabase = client['fca']
mypath = r'C:\Users\cpcle\OneDrive\Documentos\Celso\Maratona Behind the Code 2020\Desafio 8'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))
and re.search('^train_[0-9]*\.txt$' ,f)]
# Create documents using the sample data.
# Go through each row in the array
for document in onlyfiles:
with open(join(mypath, document), encoding='utf-8') as f:
texto = f.read()
# Create a JSON document
jsonDocument = {
"arquivo": document,
"texto": texto
}
# Create a document using the Database API.
newDocument = myDatabase.create_document(jsonDocument)
client.disconnect() |
#!/usr/bin/env python
#coding:utf-8
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait,Select
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver import ActionChains
import unittest,time
from util import drivers
class checkbaidu(unittest.TestCase):
def setUp(self):
d = drivers()
self.driver=d.driver
self.driver.implicitly_wait(5)
def test_tools(self):
+++1
def tearDown(self):
self.driver.quit()
if __name__=="__main__":
unittest.main()
|
import feedback, utilities, constants
furthersubcomponents = feedback, utilities
def initialize(client):
for furthersubcomponent in furthersubcomponents:
furthersubcomponent.client = client
feedback = feedback.feedback
|
# -*- coding: utf-8 -*-
"""
Задание 9.3
Создать функцию get_int_vlan_map, которая обрабатывает конфигурационный файл коммутатора
и возвращает кортеж из двух словарей:
* словарь портов в режиме access, где ключи номера портов, а значения access VLAN (числа):
{'FastEthernet0/12': 10,
'FastEthernet0/14': 11,
'FastEthernet0/16': 17}
* словарь портов в режиме trunk, где ключи номера портов, а значения список разрешенных VLAN (список чисел):
{'FastEthernet0/1': [10, 20],
'FastEthernet0/2': [11, 30],
'FastEthernet0/4': [17]}
У функции должен быть один параметр config_filename, который ожидает как аргумент имя конфигурационного файла.
Проверить работу функции на примере файла config_sw1.txt
Ограничение: Все задания надо выполнять используя только пройденные темы.
"""
def get_int_vlan_map(config_filename):
result_access = dict()
result_trunk = dict()
mode = ''
with open(config_filename, 'r') as file:
temp = list()
for line in file:
if line.find('interface') != -1:
temp.append(line[10:].rstrip())
elif line.find("vlan") != -1:
mode = 'access' if line.find('access') != -1 else 'trunk'
temp.append(line[line.find('vlan')+5:].rstrip().split(','))
if len(temp) == 2:
if mode == 'access':
result_access.update({temp[0]: int(*temp[1])})
elif mode == 'trunk' and type(temp[1]) is list:
temp_l = [int(item) for item in temp[1]]
result_trunk.update({temp[0]: temp_l})
temp.clear()
return result_access, result_trunk
print(get_int_vlan_map("config_sw1.txt")) |
import unittest
import numpy as np
from scipy.linalg import norm
from flowFieldWavy import *
# K=2, L=7, M=3, N=21
ind1 = np.index_exp[2,3,1,2,11] # k= 0, l=-4, m=-2, y=yCheb[11]
ind2 = np.index_exp[1,9,2,0,5] # k=-1, l= 2, m=-1, y=yCheb[5]
ind3 = np.index_exp[3,4,0,2,10] # k= 1, l=-3, m=-3, y=0.
ind4 = np.index_exp[0,8,4,1,17] # k=-2, l= 1, m= 1, y=yCheb[17]
ind5 = np.index_exp[2,9,4,0,:] # k= 0, l= 2, m= 1, y=yCheb
ind6 = np.index_exp[0,3,6,1,:] # k=-2, l=-4, m= 3, y=yCheb
ind7 = np.index_exp[1,10,3,2,:] # k=-1, l= 3, m= 0, y=yCheb
testDict = getDefaultDict()
testDict.update({'L':7,'M':3,'N':21,'K':2,'eps':3.0e-2,'alpha':25., 'beta':10.,'omega':5.,'isPois':1})
K = testDict['K']; L = testDict['L']; M = testDict['M']; N = testDict['N']
vf = flowFieldWavy(flowDict=testDict)
lArr = np.arange(-L,L+1).reshape((1,vf.nx,1,1,1))
mArr = np.arange(-M,M+1).reshape((1,1,vf.nz,1,1))
yArr = (vf.y).reshape((1,1,1,1,vf.N))
vf[:] = (lArr**2)*(mArr**2)*(1-yArr**2)
vNew = flowFieldWavy(flowDict=testDict.copy())
vNew[:,:,:,0:1] = 1./( (1.+lArr**2)*(1.+mArr**2) ) * (1.-yArr**2)
vNew[:,:,:,1:2] = 1./( (1.+lArr**2)*(1.+mArr**4) ) * (1.-yArr**4)*testDict['eps']
vNew[:,:,:,2:3] = 1./( (2.+lArr**2)*(2.+mArr**2) ) * (1.-yArr**6)*testDict['eps']
class WavyTestCase(unittest.TestCase):
""" Defines a simple flowFieldWavy instance, and verifies that operations on it,
such as ddx(), ddy(), etc.. return accurate flowFieldWavy instances
by checking a few of their elements
IMPORTANT: The tests start with first derivatives, and the derivatives are validated
only for the flowFieldWavy defined below. Do not modify it.
To ensure that I don't modify it by mistake, I'm including a copy of it here:
testDict = getDefaultDict()
testDict.update({'L':7,'M':3,'N':21,'K':2,'eps':3.0e-2,'alpha':25., 'beta':10.,'omega':5.})
K = testDict['K']; L = testDict['L']; M = testDict['M']; N = testDict['N']
vf = flowFieldWavy(flowDict=testDict)
lArr = np.arange(-L,L+1).reshape((1,vf.nx,1,1,1))
mArr = np.arange(-M,M+1).reshape((1,1,vf.nz,1,1))
yArr = (vf.y).reshape((1,1,1,1,vf.N))
vf[:] = (lArr**2)*(mArr**2)*(1-yArr**2)
On second thought, the field above isn't appropriate, because it has much more energy in
higher modes than in lower modes, which isn't what happens in my flow fields
So, a second field with the same dictionary, but with fields defined as
vNew[:,:,:,0] = 1/(l^2 +1) 1/(m^2+1) (1-y^2)
vNew[:,:,:,1] = eps* 1/(l^2 +1) 1/(m^4+1) (1-y^4)
vNew[:,:,:,2] = eps* 1/(l^2 +2) 1/(m^2+2) (1-y^6)
"""
print("Note: when testing for derivatives, if looking at the last modes in x or z,"+\
"remember that the general formulae I use do not apply, because one/some of their "+\
"neighbouring modes are missing")
def test_ddy(self):
partialY = vNew.ddy()
yCheb = vNew.y
eps = vNew.flowDict['eps']
# u.ddy() should be 1/(l*l+1)/(m*m+1)* (-2y)
# v.ddy() should be eps/(l*l+1)/(m**4+1)* (-4y**3)
# w.ddy() should be eps/(l*l+2)/(m**2+2)* (-6y**5)
# ind1 = np.index_exp[2,3,1,2,11] # k= 0, l=-4, m=-2, y=yCheb[11]
# At ind1 (refers to w), w.ddy() = eps/18/6*(-6.*yCheb[11]**5)
self.assertAlmostEqual(partialY[ind1] , -eps/18.*(yCheb[11]**5) )
# ind2 = np.index_exp[1,9,2,0,5] # k=-1, l= 2, m=-1, y=yCheb[5]
# At ind2 (refers to u), u.ddy() = 1/5/2*(-2.*yCheb[5]) = -yCheb[5]/5.
self.assertAlmostEqual(partialY[ind2] , -yCheb[5]/5. )
# ind4 = np.index_exp[0,8,4,1,17] # k=-2, l= 1, m= 1, y=yCheb[17]
# At ind4 (refers to v), v.ddy() = eps/2/2*(-4.*yCheb[17]**3) = -eps*yCheb[17]**3
self.assertAlmostEqual(partialY[ind4] , -eps*yCheb[17]**3 )
# ind5 = np.index_exp[2,9,4,0,:] # k= 0, l= 2, m= 1, y=yCheb
# At ind5 (refers to u), u.ddy() = 1/5/2*(-2.*yCheb) = -yCheb/5
self.assertAlmostEqual(norm(partialY[ind5] +yCheb/5.) , 0. )
# ind6 = np.index_exp[0,3,6,1,:] # k=-2, l=-4, m= 3, y=yCheb
# At ind6 (refers to v), v.ddy() = eps/17/82*(-4.*yCheb**3) = -4*eps/17/82*yCheb**3
self.assertAlmostEqual(norm(partialY[ind6] +4.*eps/17./82.*yCheb**3 ), 0.)
return
def test_ddx(self):
""" Refer to testCases.pdf in ./doc/"""
partialX = vNew.ddx()
y = vNew.y; a = vNew.flowDict['alpha']; eps = vNew.flowDict['eps']; g = eps*a;
# ind5 = np.index_exp[2,9,4,0,:] # k= 0, l= 2, m= 1, y=yCheb
# At ind5 (refers to u),
# u.ddx() = i.2.a.(1-y^2)/5/2 + 2.i.g.y.[ 1/2/1 - 1/10/5 ]
tempVec = 2.j*a*(1.-y**2)/5./2. + 2.j*g*y*(1./2. -1./50.)
self.assertAlmostEqual(norm(partialX[ind5]-tempVec) , 0. )
# ind6 = np.index_exp[0,3,6,1,:] # k=-2, l=-4, m= 3, y=yCheb
# At ind6 (refers to v),
# v.ddx() = i.-4.a.eps.(1-y**4)/17/82 + 4.i.g.eps.y**3.[1/26 - 1/10]/82
tempVec = -4.j*a*eps*(1.-y**4)/17./82. + 4.j*g*eps*(y**3)*(1./26./17.)
self.assertAlmostEqual(norm(partialX[ind6] -tempVec ), 0.)
ind7 = np.index_exp[0,8,3,1,:] # k=-2, l=1, m= 0, y=yCheb
# At ind7 (refers to v),
# v.ddx() = i.a.eps.(1-y**4)/2/1 + 4.i.g.y**3.[1 - 1/5]/1
tempVec = 1.j*a*eps*(1.-y**4)/2. + 4.j*g*eps*y**3*(1./1./2. - 1./5./2.)
self.assertAlmostEqual(norm(partialX[ind7] - tempVec ), 0.)
return
def test_ddz(self):
partialZ = vf.ddz()
yCheb = vf.y; b = vf.flowDict['beta']; eps = vf.flowDict['eps']
# Testing with vf_{k,l,m} (y) = l^2 m^2 (1-y^2)
# tilde{z} derivative of vf for mode (k,l,m) should be
# i.b.m^3.l^2.(1-y^2)+ 2i.eps.b.y.[(l-1)^2.(m-1)^2 - (l+1)^2.(m+1)^2]
self.assertAlmostEqual( partialZ[4,3,2,2,0], 2.j*eps*b*100) # l=-4, m=-1, y=1.
self.assertAlmostEqual( partialZ[2,8,3,1,17], 2.j*eps*b*yCheb[17]*(-4.)) # l=1,m=0
self.assertAlmostEqual( partialZ[3,9,1,0,8],
1.j* b*(-8.)*4.*(1.-yCheb[8]**2) )
# l=2, m=-2, y = yCheb[8]
return
def test_secondDerivatives(self):
ddx2 = vNew.ddx2()
ddy2 = vNew.ddy2()
ddz2 = vNew.ddz2()
y = vNew.y
eps = vNew.flowDict['eps']; a = vNew.flowDict['alpha']; b = vNew.flowDict['beta']
g = eps*a; gz = eps*b
# ind5 = np.index_exp[2,9,4,0,:] # k= 0, l= 2, m= 1, y=yCheb
# Field is u_xx
# Refer to eq. 0.5 in /doc/testCases.pdf
# First, collecting all terms with 'y' in them so that tempVec is defined as an array
tempVec = -2.*g*a*y*3./2./1. - 2.*g*a*y*(-5.)/10./5. - 4.*a*a*(1.-y**2)/5./2.
# Now the terms without y
tempVec += 2.*g*g/1./2. + 2.*g*g/17./10. - 4.*g*g/5./2.
self.assertAlmostEqual( norm(ddx2[ind5] - tempVec),0.)
# ind6 = np.index_exp[0,3,6,1,:] # k=-2, l=-4, m= 3, y=yCheb
# Testing ddx2(), for v:
tempVec = 12.*g*g*(y**2)/37./2. - 4.*g*a*(y**3)*(-9.)/26./17. -(16.*a*a*(1.-y**4) + 24.*g*g*y**2)/17./82.
tempVec = tempVec*eps
self.assertAlmostEqual( norm(ddx2[ind6] - tempVec),0.)
# ind5 = np.index_exp[2,9,4,0,:] # k= 0, l= 2, m= 1, y=yCheb
# Testing ddz2() for x:
tempVec = -2.*gz*b*y*1./2./1. - 2.*gz*b*y*(-3.)/10./5. - 1.*b*b*(1.-y**2)/5./2.
tempVec += 2.*gz*gz/1./2. + 2.*gz*gz/17./10. - 4.*gz*gz/5./2.
self.assertAlmostEqual( norm(ddz2[ind5] - tempVec),0.)
# ind7 = np.index_exp[1,10,3,2,:] # k=-1, l= 3, m= 0, y=yCheb
# Testing ddz2(), for w:
tempVec = 30.*(gz**2)*(y**4)* (1./3./6. + 1./27./6.) \
- 6.*gz*b*(y**5)* (-1./6./3. - 1./18./3.) \
- 60.*gz*gz*(y**4)/11./2.
tempVec = eps* tempVec
self.assertAlmostEqual( norm(ddz2[ind7] - tempVec),0.)
# Also testing ddy2() for u (ind5) and v(ind6) and w(ind7):
tempVec = -2./5./2.*np.ones(y.shape)
self.assertAlmostEqual( norm(ddy2[ind5] - tempVec),0.)
tempVec = -12.*(y**2)/17./82.*eps
self.assertAlmostEqual( norm(ddy2[ind6] - tempVec),0.)
tempVec = -30.*(y**4)/11./2.*eps
self.assertAlmostEqual( norm(ddy2[ind7] - tempVec),0.)
return
def test_norm(self):
""" The norm is defined as integral over X,Y,Z of v*v.conj()
in the transformed space. If I redefine the norm later,
expect this test to fail."""
# vf = \sum_k \sum_l \sum_m l^2 m^2 (1-y^2) e^{i(kwt + lax + mbz)}
# ||vf||^2 := 1/(T.L_x.L_z) \int_t \int_x \int_z \int_y vf * vf.conj() dy dz dx dt
# since \int_{x=0}^{2pi/a} e^{ilax} = 0 for any non-zero integer 'l',
# it can be shown that
# ||v||^2 = \sum_k \sum_l \sum_m (\int_y v_klm * v_klm.conj() dy)
# For vf,
# ||vf||^2 = 16/15 * \sum_{k=-2}^2 \sum_{l=-7}^7 \sum_{m=-3}^3 l^4 m^4
# = 16/15 * 5 \sum_{l=-7}^7 \sum_{m=-3}^3 l^4 m^4
# NOTE: The above norm is only for one scalar, not all of them. For 3 scalars, it should be thrice
l4Sum = 0.; m4Sum = 0.
for l in range(-L,L+1): l4Sum += l**4
for m in range(-M,M+1): m4Sum += m**4
Lx = 2.*np.pi/vf.flowDict['alpha']
Lz = 2.*np.pi/vf.flowDict['beta']
# scal = 1./Lx/Lz/2.
scal = 0.5
#
self.assertAlmostEqual(vf.getScalar().norm()**2,scal*16./3.*l4Sum*m4Sum)
self.assertAlmostEqual(vf.norm()**2, scal*16.*l4Sum*m4Sum)
return
def test_gradient(self):
v0 = vf.getScalar()
v0Grad = v0.grad()
xError = v0Grad.getScalar() - v0.ddx()
yError = v0Grad.getScalar(nd=1) - v0.ddy()
zError = v0Grad.getScalar(nd=2) - v0.ddz()
self.assertAlmostEqual(xError.norm(),0.)
self.assertAlmostEqual(yError.norm(),0.)
self.assertAlmostEqual(zError.norm(),0.)
return
def test_laplacian(self):
lapl = vf.laplacian()
laplSum0 = vf.getScalar().ddx2() + vf.getScalar().ddy2() + vf.getScalar().ddz2()
lapl0 = lapl.getScalar()
ind1 = np.index_exp[2,3,1,0,11] # k= 0, l=-4, m=-2, y=yCheb[11]
ind2 = np.index_exp[1,9,2,0,5] # k=-1, l= 2, m=-1, y=yCheb[5]
ind3 = np.index_exp[3,4,0,0,10] # k= 1, l=-3, m=-3, y=0.
ind4 = np.index_exp[0,8,4,0,17] # k=-2, l= 1, m= 1, y=yCheb[17]
self.assertAlmostEqual( laplSum0[ind1] , lapl0[ind1])
self.assertAlmostEqual( laplSum0[ind2] , lapl0[ind2])
self.assertAlmostEqual( laplSum0[ind3] , lapl0[ind3])
self.assertAlmostEqual( laplSum0[ind4] , lapl0[ind4])
return
def test_convection(self):
vTest = vNew.slice(K=0,L=7,M=0) # Not testing time-modes here
vNewSlice = vTest.copy()
vTest.flowDict['omega'] = 0.
vTest.flowDict['beta'] = 0.
vTest[:,:,:,1:] = 0. # Setting v and w to be zero
convTerm = np.zeros(N,dtype=np.complex)
a = vTest.flowDict['alpha']; b = vTest.flowDict['beta']; eps = vTest.flowDict['eps']
y = vTest.y
_L = vTest.flowDict['L']; _M = vTest.flowDict['M']
for l in range(-_L,_L+1):
for m in range(-_M,_M+1):
convTerm += -1.j*l*a*(1.-y**2)**2/ (l*l +1.)**2 / (m*m+1.)**2 \
+ 2.j*a*eps*y*(1.-y**2) / (m*m+1.)**2 / (l*l+1.) * (
1./( (l+1.)**2 + 1.) - 1./( (l-1.)**2 + 1.) )
convFromClass = vTest.convNL().getScalar()[0,_L,_M,0]
self.assertAlmostEqual(norm(convTerm-convFromClass), 0.)
# Next, we add v u_y
for l in range(-_L,_L+1):
for m in range(-_M, _M+1):
convTerm += -2.*eps*y*(1.-y**4)/( (l**2+1.)**2 * (m*m+1.) * (m**4+1.) )
vTest[0,:,:,1] = vNewSlice[0,:,:,1]
convFromClass = vTest.convNL().getScalar()[0,_L,_M,0]
self.assertAlmostEqual(norm(convTerm-convFromClass), 0.)
# Finally, adding w u_x
for l in range(-_L,_L+1):
for m in range(-_M, _M+1):
convTerm += -1.j*m*b*(1.-y**2) * (1.-y**6)/ (l*l +1.) / (l*l+2.) / (m*m+1.) / (m*m+2.) \
+ 2.j*b*eps*y*(1.-y**6) / (m*m+2.) / (l*l+2.) / (l*l+1.) * (
1./( (m+1.)**2 + 1.) - 1./( (m-1.)**2 + 1.) )
vTest[0,:,:,2] = vNewSlice[0,:,:,2]
convFromClass = vTest.convNL().getScalar()[0,_L,_M,0]
self.assertAlmostEqual(norm(convTerm-convFromClass), 0.)
return
def test_weighting(self):
self.assertAlmostEqual( (vNew - weighted2ff(flowDict=vNew.flowDict, arr=vNew.weighted()) ).norm(), 0.)
return
# @unittest.skip("Need to reset epsilon after debugging")
def test_dict(self):
""" Verifies that flowDict entries have not changed during previous tests"""
self.assertEqual( vf.flowDict['eps'],3.0e-2)
self.assertEqual( vf.flowDict['alpha'],25.)
self.assertEqual( vf.flowDict['beta'],10.)
self.assertEqual( vf.flowDict['omega'],5.)
self.assertEqual( vf.size, 5*15*7*3*21)
self.assertEqual( vf.N, 21)
self.assertEqual( vf.nd,3)
self.assertIsNone(vf.verify())
return
if __name__ == '__main__':
unittest.main()
|
# Thompson's contstruction
# Johnathan Joyce
def shunt(infix):
"""The Shunting yard algorithm - infix to postfix"""
# special characters precedence
specials = { '*': 50, '.':40, '|':30}
pofix = ""
# operator stack
stack = ""
for c in infix:
if c == '(':
stack = stack + c
# if open bracket push to stack, if closing pop and push output until open bracket
elif c == ')':
while stack[-1] != '(':
pofix, stack = pofix + stack[-1], stack[:-1]
stack = stack[:-1]
# if operator, push to sstack after popping lower or equal precedence
# operators from top of stack into output
elif c in specials:
while stack and specials.get(c,0) <= specials.get(stack[-1], 0):
pofix, stack = pofix + stack [-1], stack[:-1]
stack = stack + c
# regular characters are push immediately to the output
else: pofix = pofix + c
# pop all remaining operators from stack to output
while stack:
pofix, stack = pofix + stack[-1], stack[:-1]
# return postfix regex
return pofix
class state:
label = None
edge1 = None
edge2 = None
class nfa:
initial = None
accept = None
def __init__(self, initial, accept):
self.initial = initial
self.accept = accept
def compile(pofix):
nfastack = []
for c in pofix:
if c == '.':
# pop two nfa's off the stack
nfa2 = nfastack.pop()
nfa1 = nfastack.pop()
# connect first nfa's accept state to the second's initial
nfa1.accept.edge1 = nfa2.initial
# push nfa to the stack
newnfa = nfa(nfa1.initial, nfa2.accept)
nfastack.append(newnfa)
elif c == '|':
# pop two nfa's off the stack
nfa2 = nfastack.pop()
nfa1 = nfastack.pop()
initial = state()
# create a new initial state, connect it to initial state
# of the two nfa's popped from the stack
initial.edge1 = nfa1.initial
initial.edge2 = nfa2.initial
# create a new accept state, connecting the accept states
# of the two nfa's popped from the stack to the new state.
accept = state()
nfa1.accept.edge1 = accept
nfa2.accept.edge1 = accept
# push new nfa to the stack
newnfa = nfa(initial, accept)
nfastack.append(newnfa)
elif c == '*':
# pop a single nfa from the stack.
nfa1 = nfastack.pop()
# create new initial and accept states
initial = state()
accept = state()
# Join the new inital state to nfa1's inital state and the new accept state
initial.edge1 = nfa1.initial
initial.edge2 = accept
# join the old accept state to the new accept state and nfa1's intial state.
nfa1.accept.edge1 = nfa1.initial
nfa1.accept.edge2 = accept
# push new NFA to the stack
newnfa = nfa(initial, accept)
nfastack.append(newnfa)
else:
# create new initial and accept states
accept = state()
initial = state()
# Join the inital state and the accept state using an arrow labelled c.
initial.label = c
initial.edge1 = accept
# push new nfa to the stack
newnfa = nfa(initial, accept)
nfastack.append(newnfa)
return nfastack.pop()
print(compile("ab.cd.|"))
print(compile("aa.*"))
def followes(state):
#create a new set, with state as its only member
states = set()
states.add(state)
# check if the state has arrows labelled
if state.label is None:
# check if edge 1 is a state
if state.edge1 is not None:
# if there's an edge1, follow it
states |= followes(state.edge1)
if state.edge2 is not None:
# if theres an edge2, follow it.
states |= followes(state.edge2)
return states
def match(infix, string):
# matches string to infix regex
# shunt and compile the regular expression.
postfix = shunt(infix)
nfa = compile(postfix)
# the current set of states and the next set of states
current = set()
next = set()
# add the initial state to the current set.
current |= followes(nfa.initial)
for s in string:
# loop through the current set of states.
for c in current:
# check if state is labelled s
if c.label == s:
# add edge1 state to next set
next |= followes(c.edge1)
# set current to next, and clear out next
current = next
next = set()
# commented test for nfa.accept returning correct values
#print (nfa.accept in current)
return (nfa.accept in current)
infixes = ["a.b.c*", "a.(b|d).c*", "(a.(b|d))*", "a.(b.b)*.c"]
strings = ["", "abc", "abbc", "abcc", "abad", "abbbc"]
for i in infixes:
for s in strings:
print (match(i, s), i, s)
|
"""Utility functions for [pygame.Rect]s."""
import pygame
def create(center, size):
"""Create Rect given a [center] and [size]."""
rect = pygame.Rect((0, 0), size)
rect.center = center
return rect
|
import os
from configurations import Configuration
class Dev(Configuration):
# Stripe keys will be blank for pushing to GitHub
# Fill out the test mode keys here before running
STRIPE_PRIVATE_KEY = ""
STRIPE_PUBLIC_KEY = ""
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SECRET_KEY = 'a_v@l1i4s0$vutlyf%vvfyupz-fom1xvgsz(e(7-7u0o&tez7('
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'south',
'json_field',
'shirts',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'bhs_sales.urls'
WSGI_APPLICATION = 'bhs_sales.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
os.path.join(BASE_DIR, "shirts/static"),
)
try:
from local_settings import *
except:
pass |
import os
import time
import shutil
from conans.client import tools
from conans.model.env_info import EnvInfo
from conans.model.user_info import UserInfo
from conans.paths import CONANINFO, BUILD_INFO, RUN_LOG_NAME, long_paths_support
from conans.util.files import save, rmdir, mkdir, make_read_only
from conans.model.ref import PackageReference
from conans.util.log import logger
from conans.errors import (ConanException, conanfile_exception_formatter,
ConanExceptionInUserConanfileMethod)
from conans.client.packager import create_package
from conans.client.generators import write_generators, TXTGenerator
from conans.model.build_info import CppInfo
from conans.client.output import ScopedOutput
from conans.client.source import config_source
from conans.tools import environment_append
from conans.util.tracer import log_package_built
from conans.util.env_reader import get_env
def _init_package_info(deps_graph, paths):
for node in deps_graph.nodes:
conan_ref, conan_file = node
if conan_ref:
package_id = conan_file.info.package_id()
package_reference = PackageReference(conan_ref, package_id)
package_folder = paths.package(package_reference, conan_file.short_paths)
conan_file.package_folder = package_folder
conan_file.cpp_info = CppInfo(package_folder)
else:
conan_file.cpp_info = CppInfo("")
conan_file.cpp_info.version = conan_file.version
conan_file.cpp_info.description = conan_file.description
conan_file.env_info = EnvInfo()
conan_file.user_info = UserInfo()
def build_id(conan_file):
if hasattr(conan_file, "build_id"):
# construct new ConanInfo
build_id_info = conan_file.info.copy()
conan_file.info_build = build_id_info
# effectively call the user function to change the package values
with conanfile_exception_formatter(str(conan_file), "build_id"):
conan_file.build_id()
# compute modified ID
return build_id_info.package_id()
return None
class _ConanPackageBuilder(object):
"""Builds and packages a single conan_file binary package"""
def __init__(self, conan_file, package_reference, client_cache, output):
self._client_cache = client_cache
self._conan_file = conan_file
self._out = output
self._package_reference = package_reference
self._conan_ref = self._package_reference.conan
self._skip_build = False # If build_id()
new_id = build_id(self._conan_file)
self.build_reference = PackageReference(self._conan_ref, new_id) if new_id else package_reference
self.build_folder = self._client_cache.build(self.build_reference,
self._conan_file.short_paths)
def prepare_build(self):
if os.path.exists(self.build_folder) and hasattr(self._conan_file, "build_id"):
self._skip_build = True
return
# build_id is not caching the build folder, so actually rebuild the package
_handle_system_requirements(self._conan_file, self._package_reference,
self._client_cache, self._out)
package_folder = self._client_cache.package(self._package_reference,
self._conan_file.short_paths)
src_folder = self._client_cache.source(self._conan_ref, self._conan_file.short_paths)
export_folder = self._client_cache.export(self._conan_ref)
export_source_folder = self._client_cache.export_sources(self._conan_ref,
self._conan_file.short_paths)
try:
rmdir(self.build_folder)
rmdir(package_folder)
except OSError as e:
raise ConanException("%s\n\nCouldn't remove folder, might be busy or open\n"
"Close any app using it, and retry" % str(e))
self._out.info('Building your package in %s' % self.build_folder)
config_source(export_folder, export_source_folder, src_folder,
self._conan_file, self._out)
self._out.info('Copying sources to build folder')
if getattr(self._conan_file, 'no_copy_source', False):
mkdir(self.build_folder)
self._conan_file.source_folder = src_folder
else:
if not long_paths_support:
from conans.util.windows import ignore_long_path_files
ignore = ignore_long_path_files(src_folder, self.build_folder, self._out)
else:
ignore = None
shutil.copytree(src_folder, self.build_folder, symlinks=True, ignore=ignore)
logger.debug("Copied to %s", self.build_folder)
logger.debug("Files copied %s", os.listdir(self.build_folder))
self._conan_file.source_folder = self.build_folder
def build(self):
"""Calls the conanfile's build method"""
if self._skip_build:
return
with environment_append(self._conan_file.env):
self._build_package()
def package(self):
"""Generate the info txt files and calls the conanfile package method.
Receives que build_folder because it can change if build_id() method exists"""
# FIXME: Is weak to assign here the recipe_hash
manifest = self._client_cache.load_manifest(self._conan_ref)
self._conan_file.info.recipe_hash = manifest.summary_hash
# Creating ***info.txt files
save(os.path.join(self.build_folder, CONANINFO), self._conan_file.info.dumps())
self._out.info("Generated %s" % CONANINFO)
save(os.path.join(self.build_folder, BUILD_INFO), TXTGenerator(self._conan_file).content)
self._out.info("Generated %s" % BUILD_INFO)
os.chdir(self.build_folder)
if getattr(self._conan_file, 'no_copy_source', False):
source_folder = self._client_cache.source(self._conan_ref,
self._conan_file.short_paths)
else:
source_folder = self.build_folder
with environment_append(self._conan_file.env):
package_folder = self._client_cache.package(self._package_reference,
self._conan_file.short_paths)
install_folder = self.build_folder # While installing, the infos goes to build folder
create_package(self._conan_file, source_folder, self.build_folder, package_folder,
install_folder, self._out)
if get_env("CONAN_READ_ONLY_CACHE", False):
make_read_only(package_folder)
def _build_package(self):
""" builds the package, creating the corresponding build folder if necessary
and copying there the contents from the src folder. The code is duplicated
in every build, as some configure processes actually change the source
code. Receives the build_folder because it can change if the method build_id() exists
"""
package_folder = self._client_cache.package(self._package_reference,
self._conan_file.short_paths)
os.chdir(self.build_folder)
self._conan_file.build_folder = self.build_folder
self._conan_file.conanfile_directory = self.build_folder
self._conan_file.package_folder = package_folder
# In local cache, install folder always is build_folder
self._conan_file.install_folder = self.build_folder
# Read generators from conanfile and generate the needed files
logger.debug("Writing generators")
write_generators(self._conan_file, self.build_folder, self._out)
logger.debug("Files copied after generators %s", os.listdir(self.build_folder))
# Build step might need DLLs, binaries as protoc to generate source files
# So execute imports() before build, storing the list of copied_files
from conans.client.importer import run_imports
copied_files = run_imports(self._conan_file, self.build_folder, self._out)
try:
# This is necessary because it is different for user projects
# than for packages
logger.debug("Call conanfile.build() with files in build folder: %s",
os.listdir(self.build_folder))
self._out.highlight("Calling build()")
with conanfile_exception_formatter(str(self._conan_file), "build"):
self._conan_file.build()
self._out.success("Package '%s' built" % self._conan_file.info.package_id())
self._out.info("Build folder %s" % self.build_folder)
except Exception as exc:
self._out.writeln("")
self._out.error("Package '%s' build failed" % self._conan_file.info.package_id())
self._out.warn("Build folder %s" % self.build_folder)
if isinstance(exc, ConanExceptionInUserConanfileMethod):
raise exc
raise ConanException(exc)
finally:
export_folder = self._client_cache.export(self._conan_ref)
self._conan_file.conanfile_directory = export_folder
# Now remove all files that were imported with imports()
if not getattr(self._conan_file, "keep_imports", False):
for f in copied_files:
try:
if f.startswith(self.build_folder):
os.remove(f)
except OSError:
self._out.warn("Unable to remove imported file from build: %s" % f)
def _raise_package_not_found_error(conan_file, conan_ref, out):
settings_text = ", ".join(conan_file.info.full_settings.dumps().splitlines())
options_text = ", ".join(conan_file.info.full_options.dumps().splitlines())
out.warn('''Can't find a '%s' package for the specified options and settings:
- Settings: %s
- Options: %s
''' % (conan_ref, settings_text, options_text))
raise ConanException('''Missing prebuilt package for '%s'
Try to build it from sources with "--build %s"
Or read "http://docs.conan.io/en/latest/faq/troubleshooting.html#error-missing-prebuilt-package"
''' % (conan_ref, conan_ref.name))
def _handle_system_requirements(conan_file, package_reference, client_cache, out):
""" check first the system_reqs/system_requirements.txt existence, if not existing
check package/sha1/
Used after remote package retrieving and before package building
"""
if "system_requirements" not in type(conan_file).__dict__:
return
system_reqs_path = client_cache.system_reqs(package_reference.conan)
system_reqs_package_path = client_cache.system_reqs_package(package_reference)
if os.path.exists(system_reqs_path) or os.path.exists(system_reqs_package_path):
return
ret = call_system_requirements(conan_file, out)
try:
ret = str(ret or "")
except:
out.warn("System requirements didn't return a string")
ret = ""
if getattr(conan_file, "global_system_requirements", None):
save(system_reqs_path, ret)
else:
save(system_reqs_package_path, ret)
def call_system_requirements(conanfile, output):
try:
return conanfile.system_requirements()
except Exception as e:
output.error("while executing system_requirements(): %s" % str(e))
raise ConanException("Error in system requirements")
def call_package_info(conanfile, package_folder):
# Once the node is build, execute package info, so it has access to the
# package folder and artifacts
with tools.chdir(package_folder):
with conanfile_exception_formatter(str(conanfile), "package_info"):
conanfile.source_folder = None
conanfile.build_folder = None
conanfile.install_folder = None
conanfile.package_info()
class ConanInstaller(object):
""" main responsible of retrieving binary packages or building them from source
locally in case they are not found in remotes
"""
def __init__(self, client_cache, output, remote_proxy, build_mode, build_requires):
self._client_cache = client_cache
self._out = output
self._remote_proxy = remote_proxy
self._build_requires = build_requires
self._build_mode = build_mode
self._built_packages = set() # To avoid re-building twice the same package reference
def install(self, deps_graph):
""" given a DepsGraph object, build necessary nodes or retrieve them
"""
t1 = time.time()
_init_package_info(deps_graph, self._client_cache)
# order by levels and propagate exports as download imports
nodes_by_level = deps_graph.by_levels()
logger.debug("Install-Process buildinfo %s", (time.time() - t1))
t1 = time.time()
skip_private_nodes = self._compute_private_nodes(deps_graph)
logger.debug("Install-Process private %s", (time.time() - t1))
t1 = time.time()
self._build(nodes_by_level, skip_private_nodes, deps_graph)
logger.debug("Install-build %s", (time.time() - t1))
def _compute_private_nodes(self, deps_graph):
""" computes a list of nodes that are not required to be built, as they are
private requirements of already available shared libraries as binaries.
If the package requiring a private node has an up to date binary package,
the private node is not retrieved nor built
"""
skip_nodes = set() # Nodes that require private packages but are already built
for node in deps_graph.nodes:
conan_ref, conanfile = node
if not [r for r in conanfile.requires.values() if r.private]:
continue
if conan_ref:
build_forced = self._build_mode.forced(conanfile, conan_ref)
if build_forced:
continue
package_id = conanfile.info.package_id()
package_reference = PackageReference(conan_ref, package_id)
check_outdated = self._build_mode.outdated
if self._remote_proxy.package_available(package_reference,
conanfile.short_paths,
check_outdated):
skip_nodes.add(node)
# Get the private nodes
skippable_private_nodes = deps_graph.private_nodes(skip_nodes)
return skippable_private_nodes
def nodes_to_build(self, deps_graph):
"""Called from info command when a build policy is used in build_order parameter"""
# Get the nodes in order and if we have to build them
nodes_by_level = deps_graph.by_levels()
skip_private_nodes = self._compute_private_nodes(deps_graph)
nodes = self._get_nodes(nodes_by_level, skip_private_nodes)
return [(PackageReference(conan_ref, package_id), conan_file)
for conan_ref, package_id, conan_file, build in nodes if build]
def _build(self, nodes_by_level, skip_private_nodes, deps_graph):
""" The build assumes an input of conans ordered by degree, first level
should be independent from each other, the next-second level should have
dependencies only to first level conans.
param nodes_by_level: list of lists [[nodeA, nodeB], [nodeC], [nodeD, ...], ...]
build_mode => ["*"] if user wrote "--build"
=> ["hello*", "bye*"] if user wrote "--build hello --build bye"
=> False if user wrote "never"
=> True if user wrote "missing"
=> "outdated" if user wrote "--build outdated"
"""
inverse = deps_graph.inverse_levels()
flat = []
for level in inverse:
level = sorted(level, key=lambda x: x.conan_ref)
flat.extend(level)
# Get the nodes in order and if we have to build them
nodes_to_process = self._get_nodes(nodes_by_level, skip_private_nodes)
for conan_ref, package_id, conan_file, build_needed in nodes_to_process:
output = ScopedOutput(str(conan_ref), self._out)
if build_needed and (conan_ref, package_id) not in self._built_packages:
package_ref = PackageReference(conan_ref, package_id)
build_allowed = self._build_mode.allowed(conan_file, conan_ref)
if not build_allowed:
_raise_package_not_found_error(conan_file, conan_ref, output)
if conan_file.build_policy_missing:
output.info("Building package from source as defined by build_policy='missing'")
elif self._build_mode.forced(conan_file, conan_ref):
output.warn('Forced build from source')
self._build_requires.install(conan_ref, conan_file, self)
t1 = time.time()
# Assign to node the propagated info
self._propagate_info(conan_file, conan_ref, flat, deps_graph)
builder = _ConanPackageBuilder(conan_file, package_ref, self._client_cache, output)
with self._client_cache.conanfile_write_lock(conan_ref):
self._remote_proxy.get_recipe_sources(conan_ref, conan_file.short_paths)
builder.prepare_build()
with self._client_cache.conanfile_read_lock(conan_ref):
with self._client_cache.package_lock(builder.build_reference):
builder.build()
builder.package()
self._remote_proxy.handle_package_manifest(package_ref, installed=True)
package_folder = self._client_cache.package(package_ref, conan_file.short_paths)
# Call the info method
call_package_info(conan_file, package_folder)
# Log build
self._log_built_package(conan_file, package_ref, time.time() - t1)
self._built_packages.add((conan_ref, package_id))
else:
# Get the package, we have a not outdated remote package
package_ref = None
if conan_ref:
package_ref = PackageReference(conan_ref, package_id)
with self._client_cache.package_lock(package_ref):
self._get_remote_package(conan_file, package_ref, output)
# Assign to the node the propagated info
# (conan_ref could be None if user project, but of course assign the info
self._propagate_info(conan_file, conan_ref, flat, deps_graph)
if package_ref:
# Call the info method
package_folder = self._client_cache.package(package_ref, conan_file.short_paths)
call_package_info(conan_file, package_folder)
def _get_remote_package(self, conan_file, package_reference, output):
"""Get remote package. It won't check if it's outdated"""
# Compute conan_file package from local (already compiled) or from remote
package_folder = self._client_cache.package(package_reference,
conan_file.short_paths)
# If already exists do not dirt the output, the common situation
# is that package is already installed and OK. If don't, the proxy
# will print some other message about it
if not os.path.exists(package_folder):
self._out.info("Retrieving package %s" % package_reference.package_id)
if self._remote_proxy.get_package(package_reference,
short_paths=conan_file.short_paths):
_handle_system_requirements(conan_file, package_reference,
self._client_cache, output)
if get_env("CONAN_READ_ONLY_CACHE", False):
make_read_only(package_folder)
return True
_raise_package_not_found_error(conan_file, package_reference.conan, output)
def _log_built_package(self, conan_file, package_ref, duration):
build_folder = self._client_cache.build(package_ref, conan_file.short_paths)
log_file = os.path.join(build_folder, RUN_LOG_NAME)
log_file = log_file if os.path.exists(log_file) else None
log_package_built(package_ref, duration, log_file)
@staticmethod
def _propagate_info(conan_file, conan_ref, flat, deps_graph):
# Get deps_cpp_info from upstream nodes
node_order = deps_graph.ordered_closure((conan_ref, conan_file), flat)
public_deps = [name for name, req in conan_file.requires.items() if not req.private]
conan_file.cpp_info.public_deps = public_deps
for n in node_order:
conan_file.deps_cpp_info.update(n.conanfile.cpp_info, n.conan_ref.name)
conan_file.deps_env_info.update(n.conanfile.env_info, n.conan_ref.name)
conan_file.deps_user_info[n.conan_ref.name] = n.conanfile.user_info
# Update the info but filtering the package values that not apply to the subtree
# of this current node and its dependencies.
subtree_libnames = [ref.name for (ref, _) in node_order]
for package_name, env_vars in conan_file._env_values.data.items():
for name, value in env_vars.items():
if not package_name or package_name in subtree_libnames or \
package_name == conan_file.name:
conan_file.info.env_values.add(name, value, package_name)
def _get_nodes(self, nodes_by_level, skip_nodes):
"""Install the available packages if needed/allowed and return a list
of nodes to build (tuples (conan_file, conan_ref))
and installed nodes"""
nodes_to_build = []
# Now build each level, starting from the most independent one
package_references = set()
for level in nodes_by_level:
for node in level:
if node in skip_nodes:
continue
conan_ref, conan_file = node
# it is possible that the root conans
# is not inside the storage but in a user folder, and thus its
# treatment is different
build_node = False
package_id = None
if conan_ref:
logger.debug("Processing node %s", repr(conan_ref))
package_id = conan_file.info.package_id()
package_reference = PackageReference(conan_ref, package_id)
# Avoid processing twice the same package reference
if package_reference not in package_references:
package_references.add(package_reference)
check_outdated = self._build_mode.outdated
if self._build_mode.forced(conan_file, conan_ref):
build_node = True
else:
available = self._remote_proxy.package_available(package_reference,
conan_file.short_paths,
check_outdated)
build_node = not available
nodes_to_build.append((conan_ref, package_id, conan_file, build_node))
# A check to be sure that if introduced a pattern, something is going to be built
if self._build_mode.patterns:
to_build = [str(n[0].name) for n in nodes_to_build if n[3]]
self._build_mode.check_matches(to_build)
return nodes_to_build
|
#!/usr/bin/env python
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
# Define LEDs
led_6 = 8
led_5 = 10
led_4 = 12
led_3 = 16
led_2 = 18
led_1 = 22
leds = (8, 10, 12, 16, 18, 22)
# Setup LEDs
GPIO.setup(led_1, GPIO.OUT)
GPIO.setup(led_2, GPIO.OUT)
GPIO.setup(led_3, GPIO.OUT)
GPIO.setup(led_4, GPIO.OUT)
GPIO.setup(led_5, GPIO.OUT)
GPIO.setup(led_6, GPIO.OUT)
# Testing LEDs
for x in leds:
GPIO.output(x, 1)
time.sleep(0.05)
GPIO.output(x, 0)
for x in reversed(leds):
GPIO.output(x, 1)
time.sleep(0.05)
GPIO.output(x, 0)
# Define buttons
button_6 = 3
button_5 = 5
button_4 = 7
button_3 = 11
button_2 = 13
button_1 = 15
# Setup buttons
GPIO.setup(button_1, GPIO.IN, GPIO.PUD_UP)
GPIO.setup(button_2, GPIO.IN, GPIO.PUD_UP)
GPIO.setup(button_3, GPIO.IN, GPIO.PUD_UP)
GPIO.setup(button_4, GPIO.IN, GPIO.PUD_UP)
GPIO.setup(button_5, GPIO.IN, GPIO.PUD_UP)
GPIO.setup(button_6, GPIO.IN, GPIO.PUD_UP)
# Testing buttons
while True:
if GPIO.input(button_1) == False:
print("1 button pressed")
GPIO.output(led_1, 1)
time.sleep(0.2)
GPIO.output(led_1, 0)
time.sleep(0.1)
if GPIO.input(button_2) == False:
print("2 button pressed")
GPIO.output(led_2, 1)
time.sleep(0.2)
GPIO.output(led_2, 0)
time.sleep(0.1)
if GPIO.input(button_3) == False:
print("3 button pressed")
GPIO.output(led_3, 1)
time.sleep(0.2)
GPIO.output(led_3, 0)
time.sleep(0.1)
if GPIO.input(button_4) == False:
print("4 button pressed")
GPIO.output(led_4, 1)
time.sleep(0.2)
GPIO.output(led_4, 0)
time.sleep(0.1)
if GPIO.input(button_5) == False:
print("5 button pressed")
GPIO.output(led_5, 1)
time.sleep(0.2)
GPIO.output(led_5, 0)
time.sleep(0.1)
if GPIO.input(button_6) == False:
print("6 button pressed")
GPIO.output(led_6, 1)
time.sleep(0.2)
GPIO.output(led_6, 0)
time.sleep(0.1)
# Clear all
GPIO.cleanup()
|
import datetime
from django.db import models
from django.contrib.auth.models import User
class EventManager(models.Manager):
def get_subscriptions(self, subscriber):
event_subscriptions = EventSubscription.objects.filter(subscriber=subscriber)
return super().get_queryset().filter(eventsubscription__in=event_subscriptions)
class BaseEventCategory(models.Model):
name = models.CharField(max_length=200, default='')
trip_or_event = models.CharField(max_length=200, default='Event')
def __str__(self):
return self.name
class BaseEvent(models.Model):
creater = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
title = models.CharField(max_length=200, default='')
description = models.TextField()
num_of_participants = models.IntegerField(default=0)
preview = models.ImageField(default='', upload_to='previews')
date = models.DateTimeField(default=datetime.date.today)
category = models.ForeignKey(BaseEventCategory, on_delete=models.DO_NOTHING, null=True, blank=True)
event_manager = EventManager()
objects = models.Manager()
def subscribe(self, user):
subscription = EventSubscription.objects.create(subscriber=user, event=self)
self.num_of_participants += 1
self.save()
def unsubscribe(self, user):
if self.is_user_subscribed(user):
subscription = EventSubscription.objects.all().filter(subscriber=user, event=self)
self.num_of_participants -= 1
self.save()
subscription.delete()
def is_user_subscribed(self, user):
num_of_subscribers = EventSubscription.objects.all().filter(subscriber=user, event=self).count()
return num_of_subscribers == 1
def get_subscribers(self):
return EventSubscription.objects.filter(event=self)
def __str__(self):
return self.title
class Event(BaseEvent):
def delete(self, *args, **kwargs):
self.base_event.delete()
return super(self.__class__, self).delete(*args, **kwargs)
class Trip(BaseEvent):
distance = models.IntegerField(default=0)
num_of_places = models.IntegerField(default=0)
def delete(self, *args, **kwargs):
self.base_event.delete()
return super(self.__class__, self).delete(*args, **kwargs)
class Location(models.Model):
lat = models.FloatField()
lng = models.FloatField()
address = models.CharField(max_length=200, default='')
event = models.ForeignKey(BaseEvent, on_delete=models.CASCADE, default=None)
class EventSubscription(models.Model):
subscriber = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
event = models.ForeignKey(BaseEvent, on_delete=models.CASCADE, null=True)
class Meta:
unique_together = ('subscriber','event')
class Comment(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
event = models.ForeignKey(BaseEvent, on_delete=models.CASCADE, null=True)
content = models.TextField()
date = models.DateTimeField(auto_now=True)
parrent = models.ForeignKey("Comment", on_delete=models.DO_NOTHING, null=True)
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True)
avatar = models.ImageField(default='/avatars/default-avatar.png', upload_to='avatars')
about = models.TextField(default='')
age = models.IntegerField(default=0)
class Message(models.Model):
addresser = models.ForeignKey(User, on_delete=models.CASCADE, null=False, related_name='addresser')
recipient = models.ForeignKey(User, on_delete=models.CASCADE, null=False, related_name='recipient')
date = models.DateTimeField(auto_now=True)
message = models.TextField()
|
from flask import current_app, url_for, flash
from werkzeug.utils import redirect
from view_models.forms.login import LoginForm
from x_app.identity_provider import WrongPasswordError, UserNotFoundError
from x_app.navigation import XNavigationMixin, XNav
from x_app.view_model import XFormPage
class LoginViewModel(XFormPage, XNavigationMixin):
__template_name__ = 'login.html'
def __init__(self):
super().__init__("Sign In")
def on_form_success(self, form):
try:
u = current_app.identity_provider.try_login(**form.get_user_data())
return redirect(f'/user/{u.username}')
except WrongPasswordError:
flash('User and password doesn\'t match!', 'danger')
return self.on_form_error(form)
except UserNotFoundError:
flash('User is not found!', 'danger')
return self.on_form_error(form)
def on_form_error(self, form):
return super().render_template()
def make_form(self):
return LoginForm()
@property
def navigation(self):
return [XNav('Sign Up', url_for('auth.register'), 'btn-light')]
|
#!/usr/bin/env python
import pandas as pd
from miran import Character, battle
cs = [Character.rand() for i in range(0, 200)]
df = pd.DataFrame(((c.str, c.dex, c.d) for c in cs), columns=['str', 'dex', 'def'])
df['wins'] = 0
for i in range(0, len(cs)):
for j in range(i + 1, len(cs)):
assert i != j
result = battle(cs[i], cs[j])
if result is None:
continue
if result:
df.ix[i, 'wins'] += 1
else:
df.ix[j, 'wins'] += 1
print df.sort_values('wins', ascending=False)
|
def extractEachKth(inputArray, k):
newInputarray = []
for i in range(1, len(inputArray)+1):
if i % k != 0:
newInputarray.append(inputArray[i-1])
return newInputarray
inputArray = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
k = 3
result = extractEachKth(inputArray, k)
print(result) |
""" Cloud component
This program act as the component that has to run on the centralised
cloud environment. Currently, it receives a customisable number of images and
temporarily stored them in a provided repository. This component
can be further extended for any excessive computational task.
"""
from socket import *
from time import time
from parameters import *
import logging
import uuid
host = "0.0.0.0" # To use public IP
addr = (host, port)
base_path = 'images/'
img_current_index = 0
list_file_names = []
def generate_image_name():
file_name = str(uuid.uuid4()) + ".jpg"
return file_name
def handle():
logger.info("Cloud component running...")
logger.info("Listening port => {0}".format(port))
logger.info("Socker timeout=> {0}".format(socket_timeout))
img_current_index=0
while True:
try:
s = socket(AF_INET, SOCK_DGRAM)
s.bind(addr)
gen_file_name = generate_image_name()
file_name = base_path + gen_file_name
f = open(file_name, 'wb')
data, address = s.recvfrom(buffer_size)
logger.info("Read start => {0}".format(time()))
try:
while(data):
f.write(data)
s.settimeout(socket_timeout)
data, address = s.recvfrom(buffer_size)
except timeout as err:
logger.info("Timeout => {0} ".format(err))
f.close()
s.close()
logger.info("Read finish => {0}".format(time()))
fileSize = os.path.getsize(file_name)
logger.info("Address => {0} , Field Size => {1}".format(address, fileSize))
# Dealt with images
if len(list_file_names) >= max_no_of_images:
file_name_to_be_deleted = list_file_names[0]
full_path = base_path + file_name_to_be_deleted
os.remove(full_path)
list_file_names.insert(max_no_of_images-1,file_name)
else:
list_file_names.insert(img_current_index,file_name)
img_current_index = img_current_index + 1
except Exception as exc:
logger.info("Exception occured => {0}".format(exc))
if __name__ == '__main__':
global logger
logger = logging.getLogger("fd-cloud."+__name__)
logging.basicConfig(level=logging.DEBUG)
handle() |
from __future__ import print_function
import os,sys
from struct import unpack,pack
f = open('test.caj','rb')
header = f.read(9*16)
pages_string = f.read(8)
pages = []
contents = []
[pages_count,unknown] = unpack('ii', pages_string)
print('pages: '+str(pages_count))
# extract content index
f.seek(12*16, os.SEEK_CUR)
[contents_count] = unpack('i', f.read(4))
print('contents items: '+str(contents_count))
for x in xrange(0,contents_count):
contents_string = f.read(308)
[contents_title, contents_index, contents_page, unknown, contentd_level] = unpack('256s24s12s12si', contents_string)
contents.append([contents_title, contents_page, contentd_level])
print('\t'*(contentd_level-1)+contents_title.replace('\x00','').decode('cp936').encode('utf-8'))
# extract page offsets
for x in xrange(0,pages_count):
pageoffset_string = f.read(12)
f.seek(8, os.SEEK_CUR)
[page_offset, page_content_offset, rel_index, index] = unpack('IIhh', pageoffset_string)
pages.append([page_offset, page_content_offset, rel_index, index])
# print(str(index)+':'+str(page_offset))
for page in pages:
f.seek(page[0])
offset = 0
f.seek(12, os.SEEK_CUR)
while offset<=page[1]:
char = f.read(4)
[u1, u2, u3, u4] = unpack('bbbb', char)
word = ''
if u3==0:
[word] = unpack('s', pack('b',u4))
else:
[word] = unpack('2s', pack('bb',u4,u3))
print(word, end='')
offset += 4
print('\n')
f.close()
os.system('pause >nul') |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'modificacionSectorDePersonal.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(542, 300)
self.su_btn_cancelar = QtWidgets.QPushButton(Form)
self.su_btn_cancelar.setGeometry(QtCore.QRect(290, 250, 131, 31))
self.su_btn_cancelar.setStyleSheet("color:white;\n"
"font-size:10pt;\n"
"border:none;\n"
"background-color:#ff4e4e;")
self.su_btn_cancelar.setObjectName("su_btn_cancelar")
self.label = QtWidgets.QLabel(Form)
self.label.setGeometry(QtCore.QRect(50, 110, 100, 31))
self.label.setStyleSheet("font: 11pt \"MS Shell Dlg 2\";\n"
"text-align:center;")
self.label.setObjectName("label")
self.label_1 = QtWidgets.QLabel(Form)
self.label_1.setGeometry(QtCore.QRect(50, 153, 150, 31))
self.label_1.setStyleSheet("font: 11pt \"MS Shell Dlg 2\";\n"
"text-align:center;")
self.label_1.setObjectName("label_1")
self.label_2 = QtWidgets.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(10, 20, 521, 41))
self.label_2.setStyleSheet("font-size:20px;\n"
"")
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.su_input_1 = QtWidgets.QLineEdit(Form)
self.su_input_1.setGeometry(QtCore.QRect(200, 110, 300, 31))
self.su_input_1.setStyleSheet("border:none;\n"
"background-color: rgb(255, 255, 255);\n"
"font: 11pt \"MS Shell Dlg 2\";")
self.su_input_1.setEchoMode(QtWidgets.QLineEdit.Password)
self.su_input_1.setMaxLength(10)
self.su_input_1.setObjectName("su_input_1")
self.su_input_2 = QtWidgets.QLineEdit(Form)
self.su_input_2.setGeometry(QtCore.QRect(210, 155, 290, 31))
self.su_input_2.setStyleSheet("border:none;\n"
"background-color: rgb(255, 255, 255);\n"
"font: 11pt \"MS Shell Dlg 2\";")
self.su_input_2.setEchoMode(QtWidgets.QLineEdit.Password)
self.su_input_2.setMaxLength(10)
self.su_input_2.setObjectName("su_input_2")
self.su_btn_confirmar = QtWidgets.QPushButton(Form)
self.su_btn_confirmar.setGeometry(QtCore.QRect(130, 250, 131, 31))
self.su_btn_confirmar.setStyleSheet("background-color: rgb(99, 206, 104);\n"
"color:white;\n"
"font-size:10pt;\n"
"border:none;")
self.su_btn_confirmar.setObjectName("su_btn_confirmar")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Modificación de sector"))
self.su_btn_cancelar.setText(_translate("Form", "Cancelar"))
self.label.setText(_translate("Form", "Contraseña"))
self.label_1.setText(_translate("Form", "Confirmar Contraseña"))
self.label_2.setText(_translate("Form", "Ingrese Contraseña"))
self.su_btn_confirmar.setText(_translate("Form", "Confirmar"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_()) |
from django import template
from django.utils.safestring import mark_safe
import markdown
register= template.Library()
@register.filter
def field_type(bound_field):
return bound_field.field.widget.__class__.__name__
@register.filter
def input_class(bound_field):
css_class=''
if bound_field.form.is_bound:
if bound_field.errors:
css_class= 'is-invalid'
elif field_type(bound_field)!='PasswordInput':
css_class='is-valid'
return 'form-control {}'.format(css_class)
@register.filter(name='markdown')
def markdown_format(text):
return mark_safe(markdown.markdown(text))
|
import sys
#sys.stdin=open("in5.txt","r")
k,n=map(int,input().split())
base=[int(input()) for _ in range(k)]
start=1
end=max(base)
max_len=-100
while start<=end:
mid=(start+end)//2
nn=0
for j in range(k):
nn+=base[j]//mid
if nn>=n:
max_len=mid
start=mid+1
else:
end=mid-1
print(max_len)
###
#import sys
#sys.stdin=open("c:/Users/jung/Desktop/AA/in1.txt","r")
#k,n=map(int,input().split())
#base=[int(input()) for _ in range(k)]
#max_len=-100
#for i in range(1,max(base)+1):
# nn=0
# for j in range(k):
# nn+=base[j]//i
# if nn>=n and max_len<i:
# max_len=i
#print(max_len)
## 시간초과 40점
|
# -*- coding: ISO-8859-15 -*-
# Copyright (c) 2004 Nuxeo SARL <http://nuxeo.com>
# Author: Encolpe Degoute <edegoute@nuxeo.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
#
# $Id$
from base64 import encodestring
from urllib import quote, unquote
from DateTime import DateTime
from types import ListType
from Globals import InitializeClass
from AccessControl import ClassSecurityInfo
from ZPublisher.HTTPRequest import HTTPRequest
from Products.CMFCore import CookieCrumbler
from Products.CMFCore.CookieCrumbler import ATTEMPT_NONE, ATTEMPT_RESUME, \
ATTEMPT_DISABLED, ATTEMPT_LOGIN
from zLOG import LOG, DEBUG
class NTLMCookieCrumbler(CookieCrumbler.CookieCrumbler):
meta_type = 'NTLM Cookie Crumbler'
title = 'NTLM Cookie Crumbler'
security = ClassSecurityInfo()
# overloaded from CMFCore.CookieCrumbler
security.declarePrivate('modifyRequest')
def modifyRequest(self, req, resp):
if req.__class__ is not HTTPRequest:
return ATTEMPT_DISABLED
if (req[ 'REQUEST_METHOD' ] not in ( 'HEAD', 'GET', 'PUT', 'POST' )
and not req.has_key(self.auth_cookie)):
return ATTEMPT_DISABLED
if req.environ.has_key( 'WEBDAV_SOURCE_PORT' ):
return ATTEMPT_DISABLED
username = getattr(req, 'ntml_authenticated_user', None)
if username is not None:
pass
elif req.get('QUERY_STRING') != '':
qs = req.get('QUERY_STRING')
if '&' in qs:
split_query = qs.split('&')
for parameter in split_query:
if '&' in parameter:
split_query.remove(parameter)
for e in parameter.split('&'):
split_query.append(e)
else:
split_query = qs.split('&')
for parameter in split_query:
if parameter.startswith('ntlm_remote_user='):
## XXX len('ntlm_remote_user=') = 17
username = parameter[17:]
split_query.remove(parameter)
setattr(req, 'ntml_authenticated_user', username)
req.environ['QUERY_STRING'] = '&'.join(split_query)
# cleaning form, at least
if req.form.get('ntlm_remote_user'):
del req.form['ntlm_remote_user']
elif hasattr(req.form, 'ntlm_remote_user'):
username = req.form.get('ntlm_remote_user')
setattr(req, 'ntml_authenticated_user', username)
del req.form['ntlm_remote_user']
else:
username = False
setattr(req, 'ntml_authenticated_user', None)
if isinstance(username, ListType):
username = username[0]
## condition for: username is not None and username != ''
if username:
user = self.acl_users.getUser(username)
if user is None:
# The user in the certificate does not exist
LOG('NTLM Cookie Crumbler', DEBUG, "User '%s' did not exist\n" % username)
return ATTEMPT_DISABLED
##user._getPassword return nothing usable from LDAPUserGroupsFolder
#password = user._getPassword()
#ac = encodestring('%s:%s' % (username, password))
ac = encodestring('%s:%s' % (username, '__'+username+'__'))
req._auth = 'Basic %s' % ac
req._cookie_auth = 1
resp._auth = 1
return ATTEMPT_RESUME
elif req._auth and not getattr(req, '_cookie_auth', 0):
# Using basic auth.
return ATTEMPT_DISABLED
else:
if req.has_key(self.pw_cookie) and req.has_key(self.name_cookie):
# Attempt to log in and set cookies.
name = req[self.name_cookie]
pw = req[self.pw_cookie]
ac = encodestring('%s:%s' % (name, pw))
req._auth = 'Basic %s' % ac
req._cookie_auth = 1
resp._auth = 1
if req.get(self.persist_cookie, 0):
# Persist the user name (but not the pw or session)
expires = (DateTime() + 365).toZone('GMT').rfc822()
resp.setCookie(self.name_cookie, name, path='/',
expires=expires)
else:
# Expire the user name
resp.expireCookie(self.name_cookie, path='/')
method = self.getCookieMethod( 'setAuthCookie'
, self.defaultSetAuthCookie )
method( resp, self.auth_cookie, quote( ac ) )
self.delRequestVar(req, self.name_cookie)
self.delRequestVar(req, self.pw_cookie)
return ATTEMPT_LOGIN
elif req.has_key(self.auth_cookie):
# Copy __ac to the auth header.
ac = unquote(req[self.auth_cookie])
req._auth = 'Basic %s' % ac
req._cookie_auth = 1
resp._auth = 1
self.delRequestVar(req, self.auth_cookie)
return ATTEMPT_RESUME
return ATTEMPT_NONE
InitializeClass(NTLMCookieCrumbler)
manage_addCCForm = CookieCrumbler.manage_addCCForm
def manage_addCC(self, id, REQUEST=None):
""" interface to add a NTML Cookie Crumbler """
ob = NTLMCookieCrumbler()
ob.id = id
self._setObject(id, ob)
if REQUEST is not None:
return self.manage_main(self, REQUEST)
return id
|
import os
import re
from Person import Person
class Displayer():
def __init__(self, simulator, medicalModel, config_isolation, config_preventions, log=None, showDetails=False):
self.simulator = simulator
self.medicalModel = medicalModel
self.isolationTag = config_isolation['sign']
self.preventions = dict((preventionName,config['sign']) for preventionName,config in config_preventions.items())
self.illnessStages = {
'healthy': colourText('● healthy', 'White'),
'init': colourText('● incubation', 'Yellow'),
'sick': colourText('● sick', 'Red'),
'recovered': colourText('● recovered', 'Cyan'),
'dead': colourText('● dead', 'DarkGray'),
}
self.log = log
if self.log is not None:
with open(self.log, 'a') as f:
heads = 'day', 'population', 'dailyCase', 'dailyDead', 'totalCase', 'totalDead', 'recovered', 'currentPaitent', 'isolation'
f.write('\n'+'\t'.join(heads)+'\n')
self.showDetails = showDetails
self.columnWidth = 16
self.width = self.columnWidth * 20
self.totalCase = sum([1 if person.illnessStage==self.medicalModel.initIllnessStage else 0 for person in self.simulator.population])
self.totalDead = 0
self.recovered = 0
self.population = len(simulator.population)
def update(self):
peopleState, dailyCase, dailyDead, currentPaitent, isolation = self.renderPeopleState()
statistics = dailyCase, dailyDead, self.totalCase, self.totalDead, self.recovered, currentPaitent, isolation
if self.log is not None:
with open(self.log, 'a') as f:
f.write(f'{self.simulator.day}\t{self.population}\t')
f.write('\t'.join([str(data) for data in statistics])+'\n')
clearScreen()
print(makeBar('=', self.width), flush=False)
self.printLegend()
print(makeBar('-', self.width), flush=False)
self.printStatistics(*statistics)
print(makeBar('-', self.width), flush=False)
# print details
if self.showDetails:
buffer = []
columnNumber = self.width//self.columnWidth
for index, personStateText in enumerate(peopleState):
buffer.append(personStateText+'|')
if index % columnNumber == columnNumber-1:
buffer.append('\n')
print(''.join(buffer).strip(), flush=False)
print(makeBar('=', self.width), flush=True)
def renderPeopleState(self):
peopleState = []
dailyCase = 0
totalDead = 0
recovered = 0
currentPaitent = 0
isolation = 0
for person in self.simulator.population:
isNewCase = False
if person.illnessStage==self.medicalModel.healthStage:
healthStateColour = 'White'
elif person.illnessStage==self.medicalModel.initIllnessStage:
healthStateColour = 'Yellow'
if person.illnessStage_course == 0:
isNewCase = True
dailyCase += 1
elif person.illnessStage==self.medicalModel.recoveryStage:
healthStateColour = 'Cyan'
recovered += 1
elif person.illnessStage==self.medicalModel.deadStage:
healthStateColour = 'DarkGray'
if person.illnessStage_course == 0:
totalDead += 1
else: # riskStages
healthStateColour = 'Red'
currentPaitent += 1
preventionSigns = [self.preventions[prevention] for prevention in person.preventions]
personType = colourText(person.type, background='Magenta') if isNewCase else person.type
sign = '●'
if self.simulator.enableIsolation and person.inIsolation>=0:
sign = self.isolationTag
isolation += 1
personType = colourText(personType, "Black", background='LightGray')
stageText = colourText(f' {sign} ', healthStateColour)+personType
preventionText = colourText(''.join(preventionSigns), "White")
combinedText = stageText+makeBar(' ',self.columnWidth-getTextLength(stageText)-getTextLength(preventionText)-1)+preventionText
peopleState.append(combinedText)
dailyDead = totalDead - self.totalDead
self.totalDead = totalDead
self.totalCase += dailyCase
self.recovered = recovered
return peopleState, dailyCase, dailyDead, currentPaitent, isolation
def printStatistics(self, dailyCase, dailyDead, totalCase, totalDead, recovered, currentPaitent, isolation):
statistics = f"Day: {self.simulator.day} "
statistics += f"Population: {self.population} " + "| "
statistics += f"Daily Case: {dailyCase} "
statistics += f"Daily Dead: {dailyDead} "
statistics += f"Total Case: {totalCase} "
statistics += f"Total Dead: {totalDead} " + "| "
statistics += f"Recovered: {recovered} "
statistics += f"Current Paitent: {currentPaitent} "
statistics += f"Isolation: {isolation} "
print(statistics)
def printLegend(self):
legends = self.illnessStages.values()
preventions = [f'{preventionName}: {colourText(sign, "White")}' for preventionName,sign in self.preventions.items()]
text = 'Health State: '+' '.join(legends)+' '+colourText('Daily Case', background='Magenta')+' '
text += f'{colourText(f"{self.isolationTag} isolation", "Black", background="LightGray")} ' + '| '
text += 'Preventions: '+' '.join(preventions)+' '
textLengt = getTextLength(text)
print(text,makeBar(' ', self.width-textLengt-1),'|',sep='')
def makeBar(sign, length):
return ''.join([sign]*length)
def getTextLength(text):
plainText = re.sub(r"\033\[[0-9;]*m",'',text)
return len(plainText)
def colourText(text, colour='Default', style='Bold', background='Default'):
styleColour = {
'Bold': "1",
'Dim': "2",
'Underlined': "4",
'Blink': "5",
'Reverse': "7",
'Hidden': "8",
'ResetBold': "21",
'ResetDim': "22",
'ResetUnderlined': "24",
'ResetBlink': "25",
'ResetReverse': "27",
'ResetHidden': "28",
}[style]
colourCode = {
'Default': "39",
'Black': "30",
'Red': "31",
'Green': "32",
'Yellow': "33",
'Blue': "34",
'Magenta': "35",
'Cyan': "36",
'LightGray': "37",
'DarkGray': "90",
'LightRed': "91",
'LightGreen': "92",
'LightYellow': "93",
'LightBlue': "94",
'LightMagenta': "95",
'LightCyan': "96",
'White': "97",
}[colour]
backgroundCode = {
'Default': "49",
'Black': "40",
'Red': "41",
'Green': "42",
'Yellow': "43",
'Blue': "44",
'Magenta': "45",
'Cyan': "46",
'LightGray': "47",
'DarkGray': "100",
'LightRed': "101",
'LightGreen': "102",
'LightYellow': "103",
'LightBlue': "104",
'LightMagenta': "105",
'LightCyan': "106",
'White': "107",
}[background]
return f"\033[{styleColour};{colourCode};{backgroundCode}m{text}\033[0m"
def clearScreen():
os.system('clear')
# ========================== Platform compatibility for Windows ==========================
import sys
platform = sys.platform
# windows cannot colour the texts in terminals; also, windows uses cls indtead of clear
if platform=='win32':
def colourText_win(text, colour='Default', style='Bold', background='Default'):
return text
colourText = colourText_win
def clearScreenWin():
os.system('cls')
clearScreen = clearScreenWin |
import random
import base64
class Config:
SECRET_KEY = base64.b64encode(bytes(random.randint(100000, 19999999)))
DEBUG = True # 关闭debug |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
from torch.nn.utils import spectral_norm
from torchvision.models.video.resnet import r2plus1d_18
from miscc.config import cfg
from torch.autograd import Variable
import numpy as np
import pdb
if torch.cuda.is_available():
T = torch.cuda
else:
T = torch
def conv3x3(in_planes, out_planes, stride=1, use_spectral_norm=False):
"3x3 convolution with padding"
if use_spectral_norm:
return spectral_norm(nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False))
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
# Upsale the spatial size by a factor of 2
def upBlock(in_planes, out_planes):
#print("in_planes: {}, out_planes: {}".format(in_planes, out_planes))
block = nn.Sequential(
nn.Upsample(scale_factor=2, mode='nearest'),
#nn.functional.interpolate(scale_factor=2, mode='nearest'),
conv3x3(in_planes, out_planes),
nn.BatchNorm2d(out_planes),
nn.ReLU(True))
return block
class CA_NET(nn.Module):
# some code is modified from vae examples
# (https://github.com/pytorch/examples/blob/master/vae/main.py)
def __init__(self):
super(CA_NET, self).__init__()
self.t_dim = cfg.TEXT.DIMENSION * cfg.VIDEO_LEN
self.c_dim = cfg.GAN.CONDITION_DIM
self.fc = nn.Linear(self.t_dim, self.c_dim * 2, bias=True)
self.relu = nn.ReLU()
def encode(self, text_embedding):
x = self.relu(self.fc(text_embedding))
mu = x[:, :self.c_dim]
logvar = x[:, self.c_dim:]
return mu, logvar
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if cfg.CUDA:
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def forward(self, text_embedding):
mu, logvar = self.encode(text_embedding)
c_code = self.reparametrize(mu, logvar)
return c_code, mu, logvar
class D_GET_LOGITS(nn.Module):
def __init__(self, ndf, nef, bcondition=True):
super(D_GET_LOGITS, self).__init__()
self.df_dim = ndf
self.ef_dim = nef
self.bcondition = bcondition
if bcondition:
self.outlogits = nn.Sequential(
conv3x3(ndf * 8 + nef, ndf * 8, use_spectral_norm=True),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
spectral_norm(nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4)),
nn.Sigmoid())
else:
self.outlogits = nn.Sequential(
spectral_norm(nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4)),
nn.Sigmoid())
def forward(self, h_code, c_code=None):
# conditioning output
if self.bcondition and c_code is not None:
c_code = c_code.view(-1, self.ef_dim, 1, 1)
c_code = c_code.repeat(1, 1, 4, 4)
# state size (ngf+egf) x 4 x 4
h_c_code = torch.cat((h_code, c_code), 1)
else:
h_c_code = h_code
output = self.outlogits(h_c_code)
return output.view(-1)
class R2Plus1dStem(nn.Sequential):
"""R(2+1)D stem is different than the default one as it uses separated 3D convolution
"""
def __init__(self):
super(R2Plus1dStem, self).__init__(
spectral_norm(nn.Conv3d(3, 45, kernel_size=(1, 7, 7),
stride=(1, 2, 2), padding=(0, 3, 3),
bias=False)),
nn.BatchNorm3d(45),
nn.ReLU(inplace=True),
spectral_norm(nn.Conv3d(45, 64, kernel_size=(1, 1, 1),
stride=(1, 1, 1), padding=(1, 0, 0),
bias=False)),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True))
class BasicBlock(nn.Module):
__constants__ = ['downsample']
expansion = 1
def __init__(self, inplanes, planes, conv_builder, stride=1, downsample=None):
midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(
conv_builder(inplanes, planes, midplanes, stride),
nn.BatchNorm3d(planes),
nn.ReLU(inplace=True)
)
self.conv2 = nn.Sequential(
conv_builder(planes, planes, midplanes),
nn.BatchNorm3d(planes)
)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class VideoEncoder(nn.Module):
def __init__(self):
super(VideoEncoder, self).__init__()
video_resnet = r2plus1d_18(pretrained=False, progress=True)
padding= 1
block = [
R2Plus1dStem(),
spectral_norm(nn.Conv3d(64, 128, kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, padding, padding)
,bias=False)),
nn.BatchNorm3d(128),
nn.LeakyReLU(0.2),
spectral_norm(nn.Conv3d(128, 128, kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(padding, 0, 0),
bias=False)),
nn.BatchNorm3d(128),
nn.LeakyReLU(0.2),
spectral_norm(nn.Conv3d(128, 128, kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, padding, padding),
bias=False)),
nn.BatchNorm3d(128),
nn.LeakyReLU(0.2),
spectral_norm(nn.Conv3d(128, 256, kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(padding, 0, 0),
bias=False)),
nn.BatchNorm3d(256),
nn.LeakyReLU(0.2),
spectral_norm(nn.Conv3d(256, 256, kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, padding, padding),
bias=False)),
nn.BatchNorm3d(256),
nn.LeakyReLU(0.2),
spectral_norm(nn.Conv3d(256, 512, kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(padding, 0, 0),
bias=False)),
nn.BatchNorm3d(512),
nn.LeakyReLU(0.2),
spectral_norm(nn.Conv3d(512, 512, kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, padding, padding),
bias=False)),
nn.BatchNorm3d(512),
nn.LeakyReLU(0.2),
spectral_norm(nn.Conv3d(512, 512, kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(padding, 0, 0),
bias=False)),
nn.BatchNorm3d(512),
nn.LeakyReLU(0.2),
]
self.pool = nn.AdaptiveAvgPool3d(1)
self.story_encoder = nn.Sequential(*block)
self.detector = nn.Sequential(
spectral_norm(nn.Linear(512, 128)),
nn.BatchNorm1d(128),
nn.ReLU(),
spectral_norm(nn.Linear(128, 1)),
)
def forward(self, story):
'''
story: B x T X C X W X H
B: batch size, N : number of story, T story length
C: image channel, WxH width and height
'''
B = story.shape[0]
latents = self.story_encoder(story)
latents = self.pool(latents)
latents = latents.view(B, -1)
return self.detector(latents)
# ############# Networks for stageI GAN #############
class StoryGAN(nn.Module):
def __init__(self, video_len):
super(StoryGAN, self).__init__()
self.batch_size = cfg.TRAIN.IM_BATCH_SIZE
self.gf_dim = cfg.GAN.GF_DIM * 8 # 128*8=1024
self.gf_dim_seg = cfg.GAN.GF_SEG_DIM #512 #48
self.motion_dim = cfg.TEXT.DIMENSION + cfg.LABEL_NUM # (356+9=365)
self.content_dim = cfg.GAN.CONDITION_DIM # encoded text dim (124)
self.noise_dim = cfg.GAN.Z_DIM # noise (100)
self.recurrent = nn.GRUCell(self.noise_dim + self.motion_dim, self.motion_dim) # (465,365)
self.mocornn = nn.GRUCell(self.motion_dim, self.content_dim) # (365,124)
self.video_len = video_len
self.n_channels = 3
self.filter_num = 3
self.filter_size = 21
self.image_size = 124
self.out_num = 1
# for segment image v1
self.use_segment = cfg.SEGMENT_LEARNING
self.segment_size = 4*2*2*2*2 # inital size is 4, upsample 4 times = 64
self.segment_flat_size = 3*self.segment_size**2 # 12288
# v2
self.aux_size = 5
self.fix_input = 0.1*torch.tensor(range(self.aux_size)).float().cuda()
self.define_module()
def define_module(self):
from layers import DynamicFilterLayer1D as DynamicFilterLayer
ninput = self.motion_dim + self.content_dim + self.image_size # (365+124+124=613)
ngf = self.gf_dim # 128*8=1024
self.ca_net = CA_NET()
# -> ngf x 4 x 4
self.filter_net = nn.Sequential(
nn.Linear(self.content_dim, self.filter_size * self.filter_num * self.out_num),
nn.BatchNorm1d(self.filter_size * self.filter_num * self.out_num))
self.image_net = nn.Sequential(
nn.Linear(self.motion_dim, self.image_size * self.filter_num),
nn.BatchNorm1d(self.image_size * self.filter_num),
nn.Tanh())
# For generate final image
self.fc = nn.Sequential(
nn.Linear(ninput, ngf * 4 * 4, bias=False),
nn.BatchNorm1d(ngf * 4 * 4),
nn.ReLU(True))
self.upsample1 = upBlock(ngf, ngf//2)
# -> ngf/4 x 16 x 16
self.upsample2 = upBlock(ngf//2, ngf//4)
# -> ngf/8 x 32 x 32
self.upsample3 = upBlock(ngf//4, ngf//8)
# -> ngf/16 x 64 x 64
self.upsample4 = upBlock(ngf//8, ngf//16)
# -> 3 x 64 x 64
self.img = nn.Sequential(
conv3x3(ngf // 16, 3),
nn.Tanh())
if self.use_segment:
ngf_seg = self.gf_dim_seg
self.seg_c = conv3x3(ngf_seg, ngf)
self.seg_c1 = conv3x3(ngf_seg//2, ngf//2)
# self.seg_c2 = conv3x3(ngf_seg//4, ngf//4)
# self.seg_c3 = conv3x3(ngf_seg//8, ngf//8)
# self.seg_c4 = conv3x3(ngf_seg//16, ngf//16)
# For generate seg and img v4 and v5 and v6
self.fc_seg = nn.Sequential(
nn.Linear(ninput, ngf_seg * 4 * 4, bias=False),
nn.BatchNorm1d(ngf_seg * 4 * 4),
nn.ReLU(True))
# ngf x 4 x 4 -> ngf/2 x 8 x 8
self.upsample1_seg = upBlock(ngf_seg, ngf_seg // 2)
# -> ngf/4 x 16 x 16
self.upsample2_seg = upBlock(ngf_seg // 2, ngf_seg // 4)
# -> ngf/8 x 32 x 32
self.upsample3_seg = upBlock(ngf_seg // 4, ngf_seg // 8)
# -> ngf/16 x 64 x 64
self.upsample4_seg = upBlock(ngf_seg // 8, ngf_seg // 16)
# -> 3 x 64 x 64
self.img_seg = nn.Sequential(
conv3x3(ngf_seg // 16, 1),
nn.Tanh())
self.m_net = nn.Sequential(
nn.Linear(self.motion_dim, self.motion_dim),
nn.BatchNorm1d(self.motion_dim))
self.c_net = nn.Sequential(
nn.Linear(self.content_dim, self.content_dim),
nn.BatchNorm1d(self.content_dim))
self.dfn_layer = DynamicFilterLayer(self.filter_size,
pad = self.filter_size//2)
def get_iteration_input(self, motion_input):
num_samples = motion_input.shape[0]
noise = T.FloatTensor(num_samples, self.noise_dim).normal_(0,1)
return torch.cat((noise, motion_input), dim = 1)
def get_gru_initial_state(self, num_samples):
return Variable(T.FloatTensor(num_samples, self.motion_dim).normal_(0, 1))
def sample_z_motion(self, motion_input, video_len=None):
video_len = video_len if video_len is not None else self.video_len
num_samples = motion_input.shape[0]
h_t = [self.m_net(self.get_gru_initial_state(num_samples))]
for frame_num in range(video_len):
if len(motion_input.shape) == 2:
e_t = self.get_iteration_input(motion_input)
else:
e_t = self.get_iteration_input(motion_input[:,frame_num,:])
h_t.append(self.recurrent(e_t, h_t[-1]))
z_m_t = [h_k.view(-1, 1, self.motion_dim) for h_k in h_t]
z_motion = torch.cat(z_m_t[1:], dim=1).view(-1, self.motion_dim)
return z_motion
def motion_content_rnn(self, motion_input, content_input):
video_len = 1 if len(motion_input.shape) == 2 else self.video_len
h_t = [self.c_net(content_input)]
if len(motion_input.shape) == 2:
motion_input = motion_input.unsqueeze(1)
for frame_num in range(video_len):
h_t.append(self.mocornn(motion_input[:,frame_num, :], h_t[-1]))
c_m_t = [h_k.view(-1, 1, self.content_dim) for h_k in h_t]
mocornn_co = torch.cat(c_m_t[1:], dim=1).view(-1, self.content_dim)
return mocornn_co
def sample_videos(self, motion_input, content_input, seg=False):
###
# motion_input: batch_size, video_len, 365
# content_input: batch_size, video_len, 356
###
bs, video_len = motion_input.shape[0], motion_input.shape[1]
num_img = bs * video_len
content_input = content_input.view(-1, cfg.VIDEO_LEN * content_input.shape[2])
if content_input.shape[0] > 1:
content_input = torch.squeeze(content_input)
r_code, r_mu, r_logvar = self.ca_net(content_input) ## h0
#c_code = r_code.repeat(self.video_len, 1).view(-1, r_code.shape[1])
c_mu = r_mu.repeat(self.video_len, 1).view(-1, r_mu.shape[1])
#c_logvar = r_logvar.repeat(self.video_len, 1).view(-1, r_logvar.shape[1])
crnn_code = self.motion_content_rnn(motion_input, r_code) ## i_t = GRU(s_t)
temp = motion_input.view(-1, motion_input.shape[2])
m_code, m_mu, m_logvar = temp, temp, temp #self.ca_net(temp)
m_code = m_code.view(motion_input.shape[0], self.video_len, self.motion_dim)
zm_code = self.sample_z_motion(m_code, self.video_len) ## *
# one
zmc_code = torch.cat((zm_code, c_mu), dim = 1)
# two
m_image = self.image_net(m_code.view(-1, m_code.shape[2])) ## linearly transform motion(365) to image(372)
m_image = m_image.view(-1, self.filter_num, self.image_size)
c_filter = self.filter_net(crnn_code) ## Filter(i_t)
c_filter = c_filter.view(-1, self.out_num, self.filter_num, self.filter_size)
mc_image = self.dfn_layer([m_image, c_filter]) ## *
zmc_all_ = torch.cat((zmc_code, mc_image.squeeze(1)), dim = 1)
zmc_img = self.fc(zmc_all_).view(-1, self.gf_dim, 4, 4)
if self.use_segment:
zmc_seg = self.fc_seg(zmc_all_).view(-1, self.gf_dim_seg, 4, 4)
zmc_img = self.seg_c(zmc_seg) * zmc_img + zmc_img
h_seg = self.upsample1_seg(zmc_seg)
h_img = self.upsample1(zmc_img)
h_img = self.seg_c1(h_seg) * h_img + h_img
h_seg = self.upsample2_seg(h_seg)
h_img = self.upsample2(h_img)
# h_img = self.seg_c2(h_seg) * h_img + h_img
h_seg = self.upsample3_seg(h_seg)
h_img = self.upsample3(h_img)
# h_img = self.seg_c3(h_seg) * h_img + h_img
h_seg = self.upsample4_seg(h_seg)
h_img = self.upsample4(h_img)
# h_img = self.seg_c4(h_seg) * h_img + h_img
# generate seg
segm_video = self.img_seg(h_seg)
segm_temp = segm_video.view(-1, self.video_len, 1, self.segment_size, self.segment_size)
segm_temp = segm_temp.permute(0, 2, 1, 3, 4)
# generate video
fake_video = self.img(h_img)
fake_video = fake_video.view(-1, self.video_len, self.n_channels, self.segment_size, self.segment_size)
fake_video = fake_video.permute(0, 2, 1, 3, 4)
if seg==True:
return None, fake_video, m_mu, m_logvar, r_mu, r_logvar, segm_video # m_mu(60,365), m_logvar(60,365), r_mu(12,124), r_logvar(12,124)
else:
return None, fake_video, m_mu, m_logvar, r_mu, r_logvar, None # m_mu(60,365), m_logvar(60,365), r_mu(12,124), r_logvar(12,124)
else:
h_code = self.upsample1(zmc_img) # h_code: batch_size*video_len, 1024, 8, 8 *
h_code = self.upsample2(h_code) # h_code: batch_size*video_len, 512, 16, 16 *
h_code = self.upsample3(h_code) # h_code: batch_size*video_len, 256, 32, 32 *
h_code = self.upsample4(h_code) # h_code: batch_size*video_len=60, 128, 64, 64 *
# state size 3 x 64 x 64
h = self.img(h_code) ## *
fake_video = h.view( int(h.size(0)/self.video_len), self.video_len, self.n_channels, h.size(3), h.size(3)) # 12, 5, 3, 64, 64
fake_video = fake_video.permute(0, 2, 1, 3, 4) # 12, 3, 5, 64, 64
#pdb.set_trace()
return None, fake_video, m_mu, m_logvar, r_mu, r_logvar, None # m_mu(60,365), m_logvar(60,365), r_mu(12,124), r_logvar(12,124)
def sample_images(self, motion_input, content_input, seg=False):
### Adding segmenation result ###
bs, video_len = motion_input.shape[0], motion_input.shape[1]
num_img = bs
m_code, m_mu, m_logvar = motion_input, motion_input, motion_input
content_input = content_input.reshape(-1, cfg.VIDEO_LEN * content_input.shape[2])
c_code, c_mu, c_logvar = self.ca_net(content_input) ## h0
crnn_code = self.motion_content_rnn(motion_input, c_mu) ## GRU
zm_code = self.sample_z_motion(m_code, 1) ## Text2Gist
# one
zmc_code = torch.cat((zm_code, c_mu), dim = 1) # (60,365 ; 60,124)->(60,489)
# two
m_image = self.image_net(m_code) ## *
m_image = m_image.view(-1, self.filter_num, self.image_size) #(60,3,124)
c_filter = self.filter_net(crnn_code) ## *
c_filter = c_filter.view(-1, self.out_num, self.filter_num, self.filter_size)
mc_image = self.dfn_layer([m_image, c_filter]) ## * #(60,1,124)
zmc_all_ = torch.cat((zmc_code, mc_image.squeeze(1)), dim = 1) # (60,613)
zmc_img = self.fc(zmc_all_).view(-1, self.gf_dim, 4, 4)
if self.use_segment:
zmc_seg = self.fc_seg(zmc_all_).view(-1, self.gf_dim_seg, 4, 4)
zmc_img = self.seg_c(zmc_seg) * zmc_img + zmc_img
h_seg = self.upsample1_seg(zmc_seg)
h_img = self.upsample1(zmc_img)
h_img = self.seg_c1(h_seg) * h_img + h_img
h_seg = self.upsample2_seg(h_seg)
h_img = self.upsample2(h_img)
# h_img = self.seg_c2(h_seg) * h_img + h_img
h_seg = self.upsample3_seg(h_seg)
h_img = self.upsample3(h_img)
# h_img = self.seg_c3(h_seg) * h_img + h_img
h_seg = self.upsample4_seg(h_seg)
h_img = self.upsample4(h_img)
# h_img = self.seg_c4(h_seg) * h_img + h_img
# generate seg
segm_img = self.img_seg(h_seg)
# generatte video
fake_img = self.img(h_img)
fake_img = fake_img.view(-1, self.n_channels, self.segment_size, self.segment_size)
if seg==True:
return None, fake_img, m_mu, m_logvar, c_mu, c_logvar, segm_img
else:
return None, fake_img, m_mu, m_logvar, c_mu, c_logvar, None
else:
h_code = self.upsample1(zmc_img) ## *
h_code = self.upsample2(h_code) ## *
h_code = self.upsample3(h_code) ## *
h_code = self.upsample4(h_code) ## *
# state size 3 x 64 x 64
fake_img = self.img(h_code)
return None, fake_img, m_mu, m_logvar, c_mu, c_logvar, None
class STAGE1_D_IMG(nn.Module):
def __init__(self, use_categories = True):
super(STAGE1_D_IMG, self).__init__()
self.df_dim = cfg.GAN.DF_DIM
self.ef_dim = cfg.GAN.CONDITION_DIM
self.text_dim = cfg.TEXT.DIMENSION
self.label_num = cfg.LABEL_NUM
self.define_module(use_categories)
def define_module(self, use_categories):
ndf, nef = self.df_dim, self.ef_dim
self.encode_img = nn.Sequential(
nn.Conv2d(3, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
spectral_norm(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False)),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size (ndf*2) x 16 x 16
spectral_norm(nn.Conv2d(ndf*2, ndf * 4, 4, 2, 1, bias=False)),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size (ndf*4) x 8 x 8
spectral_norm(nn.Conv2d(ndf*4, ndf * 8, 4, 2, 1, bias=False)),
nn.BatchNorm2d(ndf * 8),
# state size (ndf * 8) x 4 x 4)
nn.LeakyReLU(0.2, inplace=True)
)
self.seq_consisten_model = None
self.get_cond_logits = D_GET_LOGITS(ndf, nef + self.text_dim + self.label_num)
self.get_uncond_logits = None
if use_categories:
self.cate_classify = nn.Conv2d(ndf * 8, self.label_num, 4, 4, 1, bias = False)
else:
self.cate_classify = None
def forward(self, image):
img_embedding = self.encode_img(image)
#(60,3,64,64) -> (60,992,4,4)
return img_embedding
class STAGE1_D_SEG(nn.Module):
def __init__(self, use_categories = True):
super(STAGE1_D_SEG, self).__init__()
self.df_dim = cfg.GAN.DF_DIM
self.ef_dim = cfg.GAN.CONDITION_DIM
self.text_dim = cfg.TEXT.DIMENSION
self.label_num = cfg.LABEL_NUM
self.define_module(use_categories)
def define_module(self, use_categories):
ndf, nef = self.df_dim, self.ef_dim
self.encode_img = nn.Sequential(
nn.Conv2d(1, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
spectral_norm(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False)),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size (ndf*2) x 16 x 16
spectral_norm(nn.Conv2d(ndf*2, ndf * 4, 4, 2, 1, bias=False)),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size (ndf*4) x 8 x 8
spectral_norm(nn.Conv2d(ndf*4, ndf * 8, 4, 2, 1, bias=False)),
nn.BatchNorm2d(ndf * 8),
# state size (ndf * 8) x 4 x 4)
nn.LeakyReLU(0.2, inplace=True)
)
self.seq_consisten_model = None
self.get_cond_logits = D_GET_LOGITS(int(ndf), nef + self.text_dim + self.label_num)
self.get_uncond_logits = None
if use_categories:
self.cate_classify = nn.Conv2d(ndf * 8, self.label_num, 4, 4, 1, bias = False)
else:
self.cate_classify = None
def forward(self, image):
img_embedding = self.encode_img(image)
#(60,3,64,64) -> (60,992,4,4)
return img_embedding
class STAGE1_D_STY_V2(nn.Module):
def __init__(self):
super(STAGE1_D_STY_V2, self).__init__()
self.df_dim = cfg.GAN.DF_DIM
self.ef_dim = cfg.GAN.CONDITION_DIM
self.text_dim = cfg.TEXT.DIMENSION
self.label_num = cfg.LABEL_NUM
self.define_module()
def define_module(self):
ndf, nef = self.df_dim, self.ef_dim
self.encode_img = nn.Sequential(
spectral_norm(nn.Conv2d(3, ndf, 4, 2, 1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
spectral_norm(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False)),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size (ndf*2) x 16 x 16
spectral_norm(nn.Conv2d(ndf*2, ndf * 4, 4, 2, 1, bias=False)),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size (ndf*4) x 8 x 8
spectral_norm(nn.Conv2d(ndf*4, ndf * 8, 4, 2, 1, bias=False)),
nn.BatchNorm2d(ndf * 8),
# state size (ndf * 8) x 4 x 4)
nn.LeakyReLU(0.2, inplace=True)
)
self.seq_consisten_model = None
if cfg.USE_SEQ_CONSISTENCY:
self.seq_consisten_model = VideoEncoder()
# checkpoint = torch.load('logs/consistencybaseline_0.5/model.pt')
# self.seq_consisten_model.load_state_dict(checkpoint['model'].state_dict())
self.get_cond_logits = D_GET_LOGITS(ndf, nef + self.text_dim + self.label_num)
self.get_uncond_logits = None
self.cate_classify = None
def forward(self, story):
N, C, video_len, W, H = story.shape
story = story.permute(0,2,1,3,4)
story = story.contiguous().view(-1, C,W,H)
story_embedding = torch.squeeze(self.encode_img(story))
_, C1, W1, H1 = story_embedding.shape
story_embedding = story_embedding.view(N,video_len, C1, W1, H1)
story_embedding = story_embedding.mean(1).squeeze()
return story_embedding
"""
class GET_LOGITS():
def __init__(self):
super(GET_LOGITS, self).__init__()
self.project_dim = cfg.GAN.TEXT_CYC_DIS_PROJECT_DIM # 100
self.define_module()
def define_module(self):
self.get_logits = nn.Sequential(
nn.Linear(self.project_dim,1),
nn.Sigmoid())
def forward(self, input):
return self.get_logits(input).view(-1)
class STAGE1_D_TextCyc(nn.Module):
def __init__(self):
super(STAGE1_D_TextCyc, self).__init__()
self.text_dim = cfg.TEXT.DIMENSION # 356
self.project_dim = cfg.GAN.TEXT_CYC_DIS_PROJECT_DIM # 100
self.define_module()
def define_module(self):
self.embedding = nn.Sequential(
nn.Linear(self.text_dim, self.project_dim),
nn.BatchNorm1d(self.project_dim),
nn.LeakyReLU(0.2, inplace=True))
#self.get_uncond_logits = nn.Sequential(
# nn.Linear(self.project_dim,1),
# nn.Sigmoid())
self.get_uncond_logits = GET_LOGITS()
def forward(self, text):
return self.embedding(text)"""
if __name__ == "__main__":
img = torch.randn(3,3,5,64, 64)
m = VideoEncoder()
m(img) |
"""
Ansible action plugin to ensure inventory variables are set
appropriately and no conflicting options have been provided.
"""
import collections
import six
from ansible.plugins.action import ActionBase
from ansible import errors
FAIL_MSG = """A string value that appears to be a file path located outside of
{} has been found in /etc/origin/master/master-config.yaml.
In 3.10 and newer, all files needed by the master must reside inside of
those directories or a subdirectory or it will not be readable by the
master process. Please migrate all files needed by the master into
one of {} or a subdirectory and update your master configs before
proceeding. The string found was: {}
***********************
NOTE: the following items do not need to be migrated, they will be migrated
for you: {}"""
ITEMS_TO_POP = (
('oauthConfig', 'identityProviders'),
)
# Create csv string of dot-separated dictionary keys:
# eg: 'oathConfig.identityProviders, something.else.here'
MIGRATED_ITEMS = ", ".join([".".join(x) for x in ITEMS_TO_POP])
ALLOWED_DIRS = (
'/etc/origin/master/',
'/var/lib/origin',
'/etc/origin/cloudprovider',
'/etc/origin/kubelet-plugins',
'/usr/libexec/kubernetes/kubelet-plugins',
)
ALLOWED_DIRS_STRING = ', '.join(ALLOWED_DIRS)
def pop_migrated_fields(mastercfg):
"""Some fields do not need to be searched because they will be migrated
for users automatically"""
# Walk down the tree and pop the specific item we migrate / don't care about
for item in ITEMS_TO_POP:
field = mastercfg
for sub_field in item:
parent_field = field
field = field[sub_field]
parent_field.pop(item[len(item) - 1])
def do_item_check(val, strings_to_check):
"""Check type of val, append to strings_to_check if string, otherwise if
it's a dictionary-like object call walk_mapping, if it's a list-like
object call walk_sequence, else ignore."""
if isinstance(val, six.string_types):
strings_to_check.append(val)
elif isinstance(val, collections.Sequence):
# A list-like object
walk_sequence(val, strings_to_check)
elif isinstance(val, collections.Mapping):
# A dictionary-like object
walk_mapping(val, strings_to_check)
# If it's not a string, list, or dictionary, we're not interested.
def walk_sequence(items, strings_to_check):
"""Walk recursively through a list, items"""
for item in items:
do_item_check(item, strings_to_check)
def walk_mapping(map_to_walk, strings_to_check):
"""Walk recursively through map_to_walk dictionary and add strings to
strings_to_check"""
for _, val in map_to_walk.items():
do_item_check(val, strings_to_check)
def check_strings(strings_to_check):
"""Check the strings we found to see if they look like file paths and if
they are, fail if not start with /etc/origin/master"""
for item in strings_to_check:
if item.startswith('/') or item.startswith('../'):
matches = 0
for allowed in ALLOWED_DIRS:
if item.startswith(allowed):
matches += 1
if matches == 0:
raise errors.AnsibleModuleError(
FAIL_MSG.format(ALLOWED_DIRS_STRING,
ALLOWED_DIRS_STRING,
item, MIGRATED_ITEMS))
# pylint: disable=R0903
class ActionModule(ActionBase):
"""Action plugin to validate no files are needed by master that reside
outside of /etc/origin/master as masters will now run as pods and cannot
utilize files outside of that path as they will not be mounted inside the
containers."""
def run(self, tmp=None, task_vars=None):
"""Run this action module"""
result = super(ActionModule, self).run(tmp, task_vars)
# self.task_vars holds all in-scope variables.
# Ignore settting self.task_vars outside of init.
# pylint: disable=W0201
self.task_vars = task_vars or {}
# mastercfg should be a dictionary from scraping an existing master's
# config yaml file.
mastercfg = self._task.args.get('mastercfg')
# We migrate some paths for users automatically, so we pop those.
pop_migrated_fields(mastercfg)
# Create an empty list to append strings from our config file to to check
# later.
strings_to_check = []
walk_mapping(mastercfg, strings_to_check)
check_strings(strings_to_check)
result["changed"] = False
result["failed"] = False
result["msg"] = "Aight, configs looking good"
return result
|
n, m = 5, 5
mylist = [[1, 3], [1, 4], [4, 5], [4, 3], [3, 2]]
# 플로이드워셜
import sys
input = sys.stdin.readline
n, m = map(int, input().split())
mylist = [list(map(int, input().split())) for _ in range(m)]
graph = [[99999999]*n for _ in range(n)]
for a, b in mylist:
graph[a-1][b-1] = 1
graph[b-1][a-1] = 1
for i in range(n) : # i노드를 거쳐서
for j in range(n) :
for k in range(n):
graph[j][k] = min(graph[j][k], graph[j][i] + graph[i][k])
ss = []
for i in range(n):
ss.append(sum(graph[i])-graph[i][i])
print(ss.index(min(ss))+1)
# ###bfs
# import sys
# input = sys.stdin.readline
# n, m = map(int, input().split())
# mylist = [list(map(int, input().split())) for _ in range(m)]
# nodes = [[] for _ in range(n+1)]
# for a, b in mylist:
# nodes[a].append(b)
# nodes[b].append(a)
# answers = []
# for i in range(1, n+1):
# q = [i]
# answer = [-1]*(n+1)
# answer[i] = 0
# while q:
# x = q.pop(0)
# for j in nodes[x]:
# if answer[j] == -1:
# q.append(j)
# answer[j] = answer[x] + 1
# answers.append(sum(answer)+1)
# print(answers.index(min(answers))+1)
|
import pandas as pd
ufo = pd.read_csv('http://bit.ly/uforeports')
ufo.shape
ufo.head()
ufo.drop('City', axis=1).head()
# city column did't gone
ufo.head()
ufo.drop('City', axis=1, inplace=True)
ufo.head()
######
ufo.dropna(how='any')
ufo.dropna(how='any').shape
# now yet inplaced
ufo.shape
# can be done by assigin for a variable
|
from modules.FlaskModule.FlaskModule import flask_app
from opentera.modules.BaseModule import BaseModule, ModuleNames
from opentera.config.ConfigManager import ConfigManager
# Same directory
from .TwistedModuleWebSocketServerFactory import TwistedModuleWebSocketServerFactory
from .TeraWebSocketServerUserProtocol import TeraWebSocketServerUserProtocol
from .TeraWebSocketServerParticipantProtocol import TeraWebSocketServerParticipantProtocol
from .TeraWebSocketServerDeviceProtocol import TeraWebSocketServerDeviceProtocol
# WebSockets
from autobahn.twisted.resource import WebSocketResource, WSGIRootResource
# Twisted
from twisted.internet import reactor, ssl
from twisted.web.http import HTTPChannel
from twisted.web.server import Site
from twisted.web.static import File
from twisted.web import resource
from twisted.web.wsgi import WSGIResource
from twisted.python import log
from OpenSSL import SSL
import sys
import os
class MyHTTPChannel(HTTPChannel):
def allHeadersReceived(self):
# Verify if we have a client with a certificate...
# cert = self.transport.getPeerCertificate()
cert = None
if getattr(self.transport, "getPeerCertificate", None):
cert = self.transport.getPeerCertificate()
# Current request
req = self.requests[-1]
# SAFETY X-Device-UUID, X-Participant-UUID must not be set in header before testing certificate
if req.requestHeaders.hasHeader('X-Device-UUID'):
req.requestHeaders.removeHeader('X-Device-UUID')
# TODO raise error?
if req.requestHeaders.hasHeader('X-Participant-UUID'):
req.requestHeaders.removeHeader('X-Participant-UUID')
# TODO raise error ?
#
# if cert is not None:
# # Certificate found, add information in header
# subject = cert.get_subject()
# # Get UID if possible
# if 'Device' in subject.CN and hasattr(subject, 'UID'):
# user_id = subject.UID
# req.requestHeaders.addRawHeader('X-Device-UUID', user_id)
# if 'Participant' in subject.CN and hasattr(subject, 'UID'):
# user_id = subject.UID
# req.requestHeaders.addRawHeader('X-Participant-UUID', user_id)
# Look for nginx headers (can contain a certificate)
if req.requestHeaders.hasHeader('x-ssl-client-dn'):
# TODO do better parsing. Working for now...
# Domain extracted by nginx (much faster)
client_dn = req.requestHeaders.getRawHeaders('x-ssl-client-dn')[0]
uuid = ''
for key in client_dn.split(','):
if 'UID' in key and len(key) == 40:
uuid = key[4:]
if 'CN' in key and 'Device' in key:
req.requestHeaders.addRawHeader('X-Device-UUID', uuid)
if 'CN' in key and 'Participant' in key:
req.requestHeaders.addRawHeader('X-Participant-UUID', uuid)
HTTPChannel.allHeadersReceived(self)
class MySite(Site):
protocol = MyHTTPChannel
def __init__(self, resource, requestFactory=None, *args, **kwargs):
super().__init__(resource, requestFactory, *args, **kwargs)
class TwistedModule(BaseModule):
def __init__(self, config: ConfigManager):
BaseModule.__init__(self, ModuleNames.TWISTED_MODULE_NAME.value, config)
# create a Twisted Web resource for our WebSocket server
# Use IP stored in config
# USERS
wss_user_factory = TwistedModuleWebSocketServerFactory(u"wss://%s:%d" % (self.config.server_config['hostname'],
self.config.server_config['port']),
redis_config=self.config.redis_config)
wss_user_factory.protocol = TeraWebSocketServerUserProtocol
wss_user_resource = WebSocketResource(wss_user_factory)
# PARTICIPANTS
wss_participant_factory = TwistedModuleWebSocketServerFactory(u"wss://%s:%d" %
(self.config.server_config['hostname'],
self.config.server_config['port']),
redis_config=self.config.redis_config)
wss_participant_factory.protocol = TeraWebSocketServerParticipantProtocol
wss_participant_resource = WebSocketResource(wss_participant_factory)
# DEVICES
wss_device_factory = TwistedModuleWebSocketServerFactory(u"wss://%s:%d" %
(self.config.server_config['hostname'],
self.config.server_config['port']),
redis_config=self.config.redis_config)
wss_device_factory.protocol = TeraWebSocketServerDeviceProtocol
wss_device_resource = WebSocketResource(wss_device_factory)
# create a Twisted Web WSGI resource for our Flask server
wsgi_resource = WSGIResource(reactor, reactor.getThreadPool(), flask_app)
# create resource for static assets
# static_resource = File(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates', 'assets'))
base_folder = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
static_resource = File(os.path.join(base_folder, 'static'))
static_resource.contentTypes['.js'] = 'text/javascript'
static_resource.forbidden = True
# the path "/assets" served by our File stuff and
# the path "/wss" served by our WebSocket stuff
# root_resource = WSGIRootResource(wsgi_resource, {b'wss': wss_resource})
# Avoid using the wss resource at root level
wss_root = resource.ForbiddenResource()
wss_root.putChild(b'user', wss_user_resource)
wss_root.putChild(b'participant', wss_participant_resource)
wss_root.putChild(b'device', wss_device_resource)
# Establish root resource
root_resource = WSGIRootResource(wsgi_resource, {b'assets': static_resource, b'wss': wss_root})
# Create a Twisted Web Site
site = MySite(root_resource)
# List of available CA clients certificates
# TODO READ OTHER CERTIFICATES FROM FILE/DB...
# caCerts=[cert.original]
caCerts = []
# Use verify = True to verify certificates
self.ssl_factory = ssl.CertificateOptions(verify=False, caCerts=caCerts,
requireCertificate=False,
enableSessions=False)
ctx = self.ssl_factory.getContext()
ctx.use_privatekey_file(self.config.server_config['ssl_path'] + '/'
+ self.config.server_config['site_private_key'])
ctx.use_certificate_file(self.config.server_config['ssl_path'] + '/'
+ self.config.server_config['site_certificate'])
# Certificate verification callback
ctx.set_verify(SSL.VERIFY_NONE, self.verifyCallback)
# With self-signed certs we have to explicitely tell the server to trust certificates
ctx.load_verify_locations(self.config.server_config['ssl_path'] + '/'
+ self.config.server_config['ca_certificate'])
if self.config.server_config['use_ssl']:
reactor.listenSSL(self.config.server_config['port'], site, self.ssl_factory)
else:
reactor.listenTCP(self.config.server_config['port'], site)
def __del__(self):
pass
def verifyCallback(self, connection, x509, errnum, errdepth, ok):
# 'b707e0b2-e649-47e7-a938-2b949c423f73'
# errnum 24=invalid CA certificate...
if not ok:
print('Invalid cert from subject:', connection, x509.get_subject(), errnum, errdepth, ok)
return False
else:
print("Certs are fine", connection, x509.get_subject(), errnum, errdepth, ok)
return True
def setup_module_pubsub(self):
# Additional subscribe
pass
def notify_module_messages(self, pattern, channel, message):
"""
We have received a published message from redis
"""
print('TwistedModule - Received message ', pattern, channel, message)
pass
def run(self):
log.startLogging(sys.stdout)
reactor.run()
|
import subprocess
def webcam_make_screenshot():
"""
Returns: The screenshot filename
"""
subprocess.call("fswebcam -r 320x240 --jpeg 85 -D 1 -S 2 webcam.jpg", shell=True)
return "webcam.jpg"
|
import numpy as np
import pandas as pd
import time
import argparse
from sklearn.metrics import f1_score, confusion_matrix, matthews_corrcoef, classification_report,\
balanced_accuracy_score, roc_auc_score
from flair.models import TextClassifier
from flair.data import Sentence
import statistics
import sys
import os
"""
Example use:
to test classification of a single 10 fold CV language model:
python3 classification.py --k_folds=10 --subset=test --model=Glove --dataset=MPD
for preliminary experiment on MPD please use the bash script:
bash ./grid_class_lrp_mpd
for preliminary experiment on TREC6 please use the bash script:
bash ./grid_class_lrp_trec6
to carry out all final experiments from the paper:
bash ./grid_class
"""
""" Argument Parser """
parser = argparse.ArgumentParser(description='Classify data')
parser.add_argument('--subset', required=False, type=str, default='test', help='string: which subset to analyze? train,'
' test, dev or whole file?')
parser.add_argument('--k_folds', required=False, type=int, default=10)
parser.add_argument('--block_print', required=False, default=False,
action='store_true', help='Block printable output')
parser.add_argument('--model', required=True, default='Glove')
parser.add_argument('--dataset', required=True, type=str, default="MPD")
args = parser.parse_args()
# number of folds for k_folds_validation
k_folds = args.k_folds
# file and subset to analyse
subset = args.subset
dataset = args.dataset
# group subject categories
if dataset == "MPD":
gsc = ['subject', 'performance_assessment', 'recommendations', 'time', "technical"]
elif dataset == "TREC6":
gsc = ['ABBR', 'DESC', 'ENTY', 'HUM', "LOC", "NUM"]
# test run descriptor
model_type = args.model
# disable printing out
block_print = args.block_print
if block_print:
sys.stdout = open(os.devnull, 'w')
# define results path
results_path = "./results/{}/{}".format(dataset, model_type)
# read the data sentences
# choose the subset for analysis
if subset == 'test':
data = pd.read_excel(f"./data/{dataset}/test/test.xlsx")
data = data[['subject_category', 'sentence']]
print('Data file to be analyzed loaded\n')
# placeholders for metrics
time_schedule = []
f1_list = []
mcc_list = []
f1sub_list = []
f1perf_list = []
f1rec_list = []
f1tec_list = []
f1time_list = []
f1ll_list = []
bas_list = []
# main loop
for fold in range(k_folds):
# load pre-trained classifier
try:
subject_classifier = TextClassifier.load('./data/{}/model_subject_category_{}/{}_best-model.pt'.format(dataset,
fold,
model_type))
except FileNotFoundError:
print("----- Does such test run exist? Try another name. -----")
quit()
# placeholder for results of predictions
flair_subject = []
# add timestamp before all classification takes place
time_schedule.append(time.perf_counter())
# Main Analysis Loop: start analysis
for i in range(len(data)):
# get the sentence to be analyzed
sent = [str(data.iloc[i, 2])]
for sent in sent:
# print the sentence with original labels
print('sentence: ' + sent, '| ' + '[' + str(data.iloc[i, 1]) + ']'
+ ' | ' + '[' + str(data.iloc[i, 0]) + ']')
# predict subject category
sentence = Sentence(sent, use_tokenizer=True)
subject_classifier.predict(sentence)
sent_dict = sentence.to_dict()
# print the results
print('Subject category: ', sent_dict['labels'][0]['value'],
'{:.4f}'.format(sent_dict['labels'][0]['confidence']))
group_subject = sent_dict['labels'][0]['value']
# append the results
flair_subject.append(group_subject)
# add timestamp after all classification and before data aggregation
time_schedule.append(time.perf_counter())
# prepare DF for results
pred_results = pd.DataFrame(columns=['flair_subject'])
# add result columns to DF
pred_results['flair_subject'] = flair_subject
# add original data columns to results DF
pred_results['label'] = data['subject_category'].values
pred_results['sentence'] = data['sentence'].values
""" compute metric scores """
def metrics_compute(original_data, prediction, name):
original_data = original_data
prediction_list = prediction
# compute global f1 score
f1_results_score = f1_score(y_true=original_data, y_pred=prediction_list, average='weighted',
labels=np.unique(prediction_list))
output_string = 'F1 score for {} prediction: '.format(name), f1_results_score
f1localscore = '{:.4f}'.format(f1_results_score)
print(output_string)
# prepare confusion matrix
conf_matrix = confusion_matrix(y_true=original_data, y_pred=prediction_list)
conf_matrix = pd.DataFrame(index=gsc, data=conf_matrix, columns=gsc)
print(conf_matrix)
# compute matthews correlation coefficient
mcc = matthews_corrcoef(y_true=original_data, y_pred=prediction_list)
print(mcc)
# compute classification report which includes per-class F1 scores
cr = classification_report(y_true=original_data, y_pred=prediction_list, target_names=gsc, output_dict=True)
cr = pd.DataFrame(cr).transpose()
print(cr)
# compute class-balanced accuracy score
bas = balanced_accuracy_score(y_true=original_data, y_pred=prediction_list)
return f1localscore, conf_matrix, mcc, cr, bas
# compute all metrics
computed_metrics = metrics_compute(pred_results['label'].to_list(), pred_results['flair_subject'].to_list(),
'Flair subject')
# extract metrics
f1_list.append(float(computed_metrics[0]))
conf_matrix = computed_metrics[1]
mcc_list.append(float(computed_metrics[2]))
cr = computed_metrics[3]
f1sub_list.append(float(cr.loc["subject"]["f1-score"]))
f1perf_list.append(float(cr.loc["performance_assessment"]["f1-score"]))
f1rec_list.append(float(cr.loc["recommendations"]["f1-score"]))
f1tec_list.append(float(cr.loc["technical"]["f1-score"]))
f1time_list.append(float(cr.loc["time"]["f1-score"]))
bas_list.append(float(computed_metrics[4]))
if model_type == "TREC6":
f1ll_list.append(float(cr.loc[gsc[5]]["f1-score"]))
conf_matrix.to_excel('{}/{}_summaryCM_{}_{}.xlsx'.format(results_path, model_type, fold, subset))
cr.to_excel('{}/{}_summaryCR_{}_{}.xlsx'.format(results_path, model_type, fold, subset))
# compute avg and std k_fold classification times
fold_times = []
for i in range(len(time_schedule)):
if i % 2 == 0:
try:
fold_times.append(time_schedule[i+1]-time_schedule[i])
except IndexError:
print("end of range")
# aggregate all class f1 score and mcc for all k_folds
aggregated_fold_scores = pd.DataFrame(data=f1_list, index=range(k_folds), columns=["F1 global"])
aggregated_fold_scores["MCC scores"] = mcc_list
aggregated_fold_scores["F1 subject"] = f1sub_list
aggregated_fold_scores["F1 performance assessment"] = f1perf_list
aggregated_fold_scores["F1 recommendations"] = f1rec_list
aggregated_fold_scores["F1 technical"] = f1tec_list
aggregated_fold_scores["F1 time"] = f1time_list
aggregated_fold_scores["Classification time"] = fold_times
aggregated_fold_scores["Balanced accuracy"] = bas_list
if model_type == "TREC6":
aggregated_fold_scores[f"F1 {gsc[5]}"] = f1ll_list
# read time data from training of the model
training_data = pd.read_excel("{}/{}_timetrainingstats.xlsx".format(results_path, model_type))
training_data = training_data["Training time"].to_list()
aggregated_fold_scores["Training time"] = training_data[:k_folds]
if model_type == "MPD":
parameter_list = [f1_list, mcc_list, f1sub_list, f1perf_list, f1rec_list, f1tec_list, f1time_list, fold_times, bas_list,
training_data]
elif model_type == "TREC6":
parameter_list = [f1_list, mcc_list, f1sub_list, f1perf_list, f1rec_list, f1tec_list, f1time_list, f1ll_list,
fold_times, bas_list, training_data]
# compute mean and std for all metrics and add to DataFrame
mean_list = []
std_list = []
max_list = []
min_list = []
for i in range(len(parameter_list)):
mean_list.append(statistics.mean(parameter_list[i]))
try:
std_list.append(statistics.stdev(parameter_list[i]))
except statistics.StatisticsError:
std_list.append(0)
max_list.append(max(parameter_list[i]))
min_list.append(min(parameter_list[i]))
aggregated_fold_scores.loc["mean"] = mean_list
aggregated_fold_scores.loc["std"] = std_list
aggregated_fold_scores.loc["max"] = max_list
aggregated_fold_scores.loc["min"] = min_list
# print to file all metrics
aggregated_fold_scores.to_excel("{}/{}_aggregated_fold_scores.xlsx".format(results_path, model_type))
|
from flask import Flask, render_template
app = Flask(import_name=__name__, static_url_path='/',
static_folder='static', template_folder='templates')
# 添加html访问路由
@app.route('/')
def blog():
return render_template('index.html')
if __name__ == "__main__":
app.run() # 默认设置host:0.0.0.0 port:5000
|
from .simdis import sdinput, sdstart, sdprint, sdstop
__ALL__ = ["sdstart", "sdstop", "sdprint", "sdinput"] |
import random
from time import sleep
import sys
import threading
import os
import argparse
import platform
import subprocess
REQUIRED_PACKAGES = ["selenium", "feedparser", "beautifulsoup4", "setuptools"]
def updateDependencies(dependencies):
for dependency in dependencies:
try:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', dependency])
except:
print("Unable to update %s\n" % dependency)
def checkDependencies(packageList):
# Make sure that all dependencies are installed
installed_packages = pip.get_installed_distributions()
flat_installed_packages = [package.project_name for package in installed_packages]
for packageName in packageList:
if packageName not in flat_installed_packages:
try:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', packageName])
except:
sys.stderr.write("NEED TO INSTALL \"%s\"" % packageName)
sys.stderr.write("run the command \"pip install -U %s\"" % packageName)
updateDependencies(packageList)
try:
from FirefoxWebDriver import FirefoxWebDriver
from ChromeWebDriver import ChromeWebDriver
from Searches import Searches
except:
checkDependencies(REQUIRED_PACKAGES)
# Try the imports again
from FirefoxWebDriver import FirefoxWebDriver
from ChromeWebDriver import ChromeWebDriver
from Searches import Searches
class BingRewards(object):
Edge = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299"
SafariMobile = "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1"
DESKTOP = "desktop"
MOBILE = "mobile"
base_url = "https://www.bing.com/search?q="
def __init__(self, artifacts_dir, desktopSearches, mobileSearches, UseFirefox, UseChrome, searchesList=None,
useHeadless=False, loadcookies=False, load_default_profile=True):
#If load_default_profile == False, loading cookies doesnt work
self.UseFirefox = UseFirefox
self.UseChrome = UseChrome
self.totalSearches = desktopSearches + mobileSearches
self.numSearches = {BingRewards.DESKTOP: desktopSearches, BingRewards.MOBILE: mobileSearches}
self.useHeadless = useHeadless
self.loadcookies = loadcookies
self.load_default_profile = load_default_profile
if platform.system() == "Windows":
downloads_dir = os.path.join(os.getenv('HOMEPATH'), "Downloads")
elif platform.system() == "Darwin":
downloads_dir = os.path.join(os.getenv('HOME'), "Downloads")
elif platform.system() == "Linux":
downloads_dir = os.path.join(os.getenv('HOME'), "Downloads")
if artifacts_dir == None:
self.artifacts_dir = downloads_dir
else:
if os.path.exists(artifacts_dir):
self.artifacts_dir = artifacts_dir
else:
raise Exception("The location %s does not exist" % artifacts_dir)
if searchesList == None:
searchesThread = threading.Thread(name='searches_init', target=self.init_searches)
searchesThread.start()
else:
self.searchesList = searchesList
if self.UseFirefox == True:
startFirefox = threading.Thread(name='startFirefox', target=self.init_firefox, args=())
startFirefox.start()
if self.UseChrome == True:
startChrome = threading.Thread(name='startChrome', target=self.init_chrome, args=())
startChrome.start()
if searchesList == None:
searchesThread.join()
if self.UseChrome == True:
startChrome.join()
if self.UseFirefox == True:
startFirefox.join()
def init_searches(self, ):
self.searchesList = Searches(self.totalSearches).getSearchesList()
def init_chrome(self, ):
if self.UseChrome == True:
self.chromeObj = ChromeWebDriver(self.artifacts_dir, BingRewards.Edge, BingRewards.SafariMobile,
self.useHeadless, loadCookies=self.loadcookies,
load_default_profile=self.load_default_profile)
if self.chromeObj == None:
raise ("ERROR: chromeObj = None")
def init_firefox(self, ):
if self.UseFirefox == True:
self.firefoxObj = FirefoxWebDriver(self.artifacts_dir, BingRewards.Edge, BingRewards.SafariMobile,
self.useHeadless, loadCookies=self.loadcookies,
load_default_profile=self.load_default_profile)
if self.firefoxObj == None:
raise ("ERROR: firefoxObj = None")
def firefox_search(self, browser):
if self.UseFirefox == False:
return
if browser == BingRewards.DESKTOP:
self.firefoxObj.startDesktopDriver()
elif browser == BingRewards.MOBILE:
self.firefoxObj.startMobileDriver()
for index in range(self.numSearches[browser]):
if browser == BingRewards.DESKTOP:
print("Firefox %s search %d : \"%s\"" % (browser, index+1, self.searchesList[index]))
self.firefoxObj.getDesktopUrl(BingRewards.base_url + self.searchesList[index])
elif browser == BingRewards.MOBILE:
print("Firefox %s search %d : \"%s\"" % (browser, index+1,
self.searchesList[index + self.numSearches[BingRewards.DESKTOP]]))
self.firefoxObj.getMobileUrl(BingRewards.base_url +
self.searchesList[index + self.numSearches[BingRewards.DESKTOP]])
sleep(random.uniform(1.25, 3.25))
if browser == BingRewards.DESKTOP:
self.firefoxObj.closeDesktopDriver()
elif browser == BingRewards.MOBILE:
self.firefoxObj.closeMobileDriver()
def chrome_search(self, browser):
if self.UseChrome == False:
return
if browser == BingRewards.DESKTOP:
self.chromeObj.startDesktopDriver()
elif browser == BingRewards.MOBILE:
self.chromeObj.startMobileDriver()
for index in range(self.numSearches[browser]):
if browser == BingRewards.DESKTOP:
print("Chrome %s search %d : \"%s\"" % (browser, index+1, self.searchesList[index]))
self.chromeObj.getDesktopUrl(BingRewards.base_url + self.searchesList[index])
elif browser == BingRewards.MOBILE:
print("Chrome %s search %d : \"%s\"" % (browser, index+1,
self.searchesList[index + self.numSearches[BingRewards.DESKTOP]]))
self.chromeObj.getMobileUrl(BingRewards.base_url +
self.searchesList[index + self.numSearches[BingRewards.DESKTOP]])
sleep(random.uniform(1.25, 3.25))
if browser == BingRewards.DESKTOP:
self.chromeObj.closeDesktopDriver()
elif browser == BingRewards.MOBILE:
self.chromeObj.closeMobileDriver()
def runDesktopSearches(self):
firefoxDesktopSearches = threading.Thread(name='ff_desktop', target=self.firefox_search, kwargs={
'browser': BingRewards.DESKTOP})
firefoxDesktopSearches.start()
chromeDesktopSearches = threading.Thread(name='chrome_desktop', target=self.chrome_search, kwargs={
'browser': BingRewards.DESKTOP})
chromeDesktopSearches.start()
firefoxDesktopSearches.join()
chromeDesktopSearches.join()
def runMobileSearches(self):
firefoxMobileSearches = threading.Thread(name='ff_mobile', target=self.firefox_search, kwargs={
'browser': BingRewards.MOBILE})
firefoxMobileSearches.start()
chromeMobileSearches = threading.Thread(name='chrome_mobile', target=self.chrome_search, kwargs={
'browser': BingRewards.MOBILE})
chromeMobileSearches.start()
firefoxMobileSearches.join()
chromeMobileSearches.join()
def testChromeMobileGPSCrash():
searchList = ["find my location", "near me", "weather"]
bingRewards = BingRewards(None, desktopSearches=0, mobileSearches=len(searchList), UseFirefox=False,
UseChrome=True, searchesList=searchList)
bingRewards.runMobileSearches()
sleep(5)
def parseArgs():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '--firefox', dest='firefox', action='store_true',
help='include this option to use firefox')
parser.add_argument('-c', '--chrome', dest='chrome', action='store_true', help='include this option to use chrome')
parser.add_argument('-m', '--mobile', dest='mobile_searches', type=int,
default=50, help='Number of Mobile Searches')
parser.add_argument('-d', '--desktop', dest='desktop_searches', type=int,
default=70, help='Number of Desktop Searches')
# Either load the default browser profile or load cookies saved from the microsoftLogin.py script"
parser.add_argument('--cookies', dest='cookies', action='store_true',
help="include this option to load cookies that were set using the microsoftLogin.py script."
"the script was not used or no cookies were saved this will work as is this flag was not set. Use "
"this option if the pc you are running bingtool on doesnt have a GUI")
parser.add_argument('--headless', dest='headless', action='store_true',
help='include this option to use headless mode')
parser.add_argument('-a', '--artifact', dest='artifact_dir', type=str, help='Directory to both store bing rewards artifacts and look for '
"cookies created with the microsoftLogin.py script. If this option is not set the default value of None indicates to use"
" the users downloads directory. The artifacts stored are the downloaded webdriver binaries which get deleted at completion")
parser.add_argument('-cl', '--chrome_location', dest='chrome_location', action='store_true',
help='Check if the chrome mobile webdriver blocks prompts for allowing location. If this option is selected nothing else runs')
return parser.parse_args()
def main():
args = parseArgs()
# This allows the script to work with a windows scheduler
os.chdir(os.path.dirname(os.path.abspath(__file__)))
if args.chrome_location is True:
testChromeMobileGPSCrash()
print("Chrome mobile location test complete, exiting")
return
if (args.firefox is False) and (args.chrome is False):
print("Error : At least one browser must be selected. run \"%s --help\"" % sys.argv[0])
sys.exit(0)
bingRewards = BingRewards(args.artifact_dir, desktopSearches=args.desktop_searches, mobileSearches=args.mobile_searches,
UseFirefox=args.firefox, UseChrome=args.chrome, useHeadless=args.headless, loadcookies=args.cookies)
print("Init BingRewards Complete")
bingRewards.runDesktopSearches()
print("runDesktopSearches Complete")
bingRewards.runMobileSearches()
print("runMobileSearches Complete")
print("Main COMPLETE")
if __name__ == '__main__':
main()
|
# import argparse
# parser = argparse.ArgumentParser()
# parser.add_argument("--record", help="Record the demo and store the outcome to `temp` directory")
# parser.add_argument("--prepare", help="Get `data,csv` and screenshots of the game, then prepare features and labels in `data` directory")
# args = parser.parse_args()
# print(args)
# print(args.record)
# def a():
# while True:
# pass
# # print('hello')
# def b():
# print('hi')
# for i in range(3):
# print(i)
# try:
# a()
# except KeyboardInterrupt:
# b()
# import os
# for i in range(3):
# os.system("mkdir f_{0}".format(i))
import numpy as np
for i in range(2):
if i == 0:
base = np.load("data/X_ep_{0}.npy".format(i))
else:
temp = np.load("data/X_ep_{0}.npy".format(i))
np.concatenate((base, temp), axis=0)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from db.BOW_DB import BOW_DB
from db.LDA_DB import LDA_DB
from vis.TermTopicMatrix2 import TermTopicMatrix2
def index():
with BOW_DB() as bow_db:
with LDA_DB() as lda_db:
handler = TermTopicMatrix2(request, response, bow_db, lda_db)
return handler.GenerateResponse()
def GetEntry():
with BOW_DB() as bow_db:
with LDA_DB() as lda_db:
handler = TermTopicMatrix2(request, response, bow_db, lda_db)
data = handler.GetEntry()
dataStr = json.dumps(data, encoding='utf-8', indent=2, sort_keys=True)
response.headers['Content-Type'] = 'application/json'
return dataStr
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
cases = int(input().strip())
for i, line in enumerate(range(cases), 1):
number = input().strip()
if int(number) == 0:
print("Case #{0:s}:".format(str(i)), 'INSOMNIA')
else:
current_L = list(map(int, number))
current_S = ''.join(str(x) for x in current_L)
seen = set(current_L)
counter = 1
while len(seen) < 10:
temp = int(number) * counter
current_S = str(temp)
current_L = list(map(int, current_S))
seen |= set(current_L)
counter += 1
print("Case #{0:s}:".format(str(i)), current_S)
|
# -*- coding: utf-8 -*-
'''
This function calls the org_netcdf_files function (which organizes the files by date) and returns the file that matches with the date.
It is meant specifically for soundings and prints the file name for reference.
author: Grant Mckercher
'''
import datetime
def gather_sounding_files(date,directory,sound_num):
[(dates_sound),(filename_sound)] = org_netcdf_files(directory)
dates = [i.date() for i in dates_sound]
index_sound = dates.index(date.date())
index_sound_num = index_sound+(sound_num-1)
print 'Reading',filename_sound[index_sound_num]
return filename_sound[index_sound_num] |
import numpy as np
import matplotlib.pyplot as plt
from model.constant_variables import (
D0,
k_i,
k_a,
rho_a,
rho_i,
C_i,
C_a,
ka0,
ka1,
ka2,
L_Cal,
mH2O,
kB,
T_ref_L,
a0,
a1,
a2,
f,
rho_ref,
T_ref_C,
c1,
c2,
c3,
c4,
c5,
c6,
R_v,
)
def update_model_parameters(phi, T, nz, coord, SWVD, form="Calonne"):
"""
Computes model effective parameters
Arguments
---------
phi ice volume fraction
T temperature [K]
nz number of computational nodes
coord coordinates of the computational nodes
SWVD decide between three different equations for saturation water vapor density : 'Libbrecht', 'Hansen', 'Calonne'
form compute k_eff and D_eff based on 'Hansen' or 'Calonne'
Returns
-------
D_eff effective diffusion coefficient [s2m-1]
k_eff thermal conductivity [Wm-1K-1]
rhoC_eff effective heat capacity [JK-1]
rho_v water vapor density at equilibrium [kgm-3]
rho_v_dT derivative w.r.t. T of rho_v [kgm-3s-1]
Parameters
--------
k_i thermal conductivity ice [Wm-1K-1]
k_a thermal conductivity air [Wm-1K-1]
D0 diffusion coefficient of water vapor in air
ka0,ka1,ka2 Parameters to compute k_eff
C_a heat capacity air [JK-1]
C_i heat capacity ice [JK-1]
D_eff effective diffusion coefficient [s2m-1]
k_eff thermal conductivity [Wm-1K-1]
"""
D_eff = np.ones(nz)
if form == "Hansen": # Hansen and Foslien (2015)
D_eff = phi * (1 - phi) * D0 + D0
elif form == "Calonne": # Calonne et al. (2014)
x = 2 / 3 - phi
b = np.heaviside(x, 1)
D_eff = D0 * (1 - 3 / 2 * phi) * b
else:
print("requested method not available, check input")
## effective thermal conductivity W/m/K
k_eff = np.ones(nz)
if form == "Hansen": # Hansen and Foslien (2015)
k_eff = phi * ((1 - phi) * k_a + phi * k_i) + k_a
elif form == "Calonne": # Calonne et al. (2011)
k_eff = ka0 + ka1 * (rho_i * phi) + ka2 * (rho_i * phi) ** 2
else:
print("requested method not available, check input")
## effective heat capacity - similar forumla in Hansen and Foslien (2015) and Löwe et al. (2019)
rhoC_eff = np.zeros(nz)
rhoC_eff = phi * rho_i * C_i + (np.ones(nz) - phi) * rho_a * C_a
## Water Vapor density rho_v and its derivative rho_v_dT:
[rho_v, rho_v_dT] = sat_vap_dens(nz, T, SWVD)
return D_eff, k_eff, rhoC_eff, rho_v, rho_v_dT
def sat_vap_dens(nz, T, SWVD, plot=False):
"""
Equilibrium water vapor density formulations and their derivatives as used in Libbrecht (1999), Calonne et al. (2014) and Hansen and Foslien (2015)
Arguments
-------------
nz number of computational nodes
T temperature
SWD equation for saturation water vapor density
after 'Hansen', 'Calonne', or 'Libbrecht'
Returns
-------------
rho_v equilibiurm water vapor density [kgm-3]
rho_v_dT derivative w.r.t. temperature of equilibrium water vapor density [kgm-3K-1]
"""
rho_v = np.zeros(nz)
rho_v_dT = np.zeros(nz)
if SWVD == "Libbrecht":
rho_v = (
np.exp(-T_ref_L / T) / (f * T) * (a0 + a1 * (T - 273) + a2 * (T - 273) ** 2)
) # [kg/m^3] Water vapor density
rho_v_dT = (
np.exp(-T_ref_L / T)
/ (f * T ** 2)
* (
(a0 - a1 * 273 + a2 * 273 ** 2) * (T_ref_L / T - 1)
+ (a1 - a2 * 2 * 273) * T_ref_L
+ a2 * T ** 2 * (T_ref_L / T + 1)
)
) # [kg/m^3/K]
elif SWVD == "Calonne":
x = (L_Cal * mH2O) / (rho_i * kB)
rho_v = rho_ref * np.exp(x * ((1 / T_ref_C) - (1 / T)))
rho_v_dT = x / T ** 2 * rho_ref * np.exp(x * ((1 / T_ref_C) - (1 / T)))
elif SWVD == "Hansen":
rho_v = (
(10.0 ** (c1 / T + c2 * np.log(T) / np.log(10) + c3 * T + c4 * T ** 2 + c5))
* c6
/ R_v
/ T
)
rho_v_dT = (
rho_v
* np.log(10)
* (-c1 / T ** 2 + c2 / (T * np.log(10)) + c3 + 2 * c4 * T)
- rho_v / T
)
else:
raise ValueError("Saturation water vapor density not available")
if plot:
fig1 = plt.plot(T, rho_v)
plt.title("Water vapor density with respect to temperature")
plt.show(fig1)
fig2 = plt.plot(T, rho_v_dT)
plt.title("Derivative of water vapor density with respect to temperature")
plt.show(fig2)
return rho_v, rho_v_dT
|
import requests, sys, re, configparser
from docx import Document
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from docx.shared import Pt
from docx.oxml.ns import qn
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT
from docx.enum.style import WD_STYLE_TYPE
class Article(object):
def __init__(self, title, link):
self.title = title
self.link = link
self.category = ""
self.author = ""
self.date = ""
self.content = ""
class SingleDay(object):
def __init__(self, main_url):
self.main_url = main_url
self.urls = []
self.articles = []
def _getpage(self, url, css_rules):
r = requests.get(url)
r.raise_for_status()
c = r.content
soup = BeautifulSoup(c, "html.parser")
results = soup.select(css_rules)
return results
# cssrules: .right_title-name a
def get_urls(self, css_rules):
pageList = self._getpage(self.main_url, css_rules)
# from relative to absolute.
self.urls = [urljoin(self.main_url, i.get('href')) for i in pageList]
# cssrules1: .one a ; cssrules2:".text_c"
def get_index(self, css_rules1, css_rules2):
for url in self.urls:
pageList = self._getpage(url, css_rules1)
for i in pageList:
s = str(i.contents)
title = s.split('"')[1].strip()
for s in keywords + subkeywords:
if s in title:
link = urljoin(self.main_url, i.get('href'))
article = Article(title, link)
if s in subkeywords:
article.category = "集锦"
else:
article.category = s
result = self._getpage(link, css_rules2)[0]
article.author = result.h4
article.date = ''.join(result.div.text.split())
paragraph = result.find(id='ozoom').find_all('p')
# article.content = (c.text for c in paragraph if len(c.text.rstrip())!=0)
article.content = (c.text for c in paragraph)
self.articles.append(article)
break
def write_to_docx(self):
for i in self.articles:
filename = i.category + '.docx'
self.document = Document(filename)
if "New Title" not in self.document.styles:
styles = self.document.styles
new_heading_style = styles.add_style('New Title', WD_STYLE_TYPE.PARAGRAPH)
new_heading_style.base_style = styles['Title']
font = new_heading_style.font
font.name = '宋体'
font.size = Pt(18)
# style: title:小二 宋体 paragrapth:三号 仿宋
self.document.styles['Normal'].font.name = u'仿宋'
self.document.styles['Normal']._element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋')
self.document.styles['New Title']._element.rPr.rFonts.set(qn('w:eastAsia'), u'宋体')
self.document.styles['Normal'].font.size = Pt(16)
head = self.document.add_paragraph(i.title, style="New Title")
head.paragraph_format.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
author = self.document.add_paragraph(i.author)
author.paragraph_format.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
date = self.document.add_paragraph(i.date)
date.paragraph_format.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
for p in i.content:
self.document.add_paragraph(p)
self.document.save(filename)
if __name__ == "__main__":
config = configparser.ConfigParser()
config.read('config.ini', encoding='utf-8')
keywords = config['DEFAULT']['Keywords'].split(',')
subkeywords = config['DEFAULT']['SubKeywords'].split(',')
period = {'year': config['DEFAULT']['Year'], 'month': config['DEFAULT']['Month'],
'start': config['DEFAULT']['StartDay'], 'end': config['DEFAULT']['EndDay']}
base_url = "http://paper.people.com.cn/rmrb/html/{year}-{month}/{day}/nbs.D110000renmrb_01.htm"
for i in keywords + ["集锦"]:
doc = Document()
doc.save(i + '.docx')
for i in range(int(period['start']), int(period['end']) + 1):
main_url = base_url.format(year=period['year'], month=period['month'].zfill(2), day="%02d" % i)
s = SingleDay(main_url)
s.get_urls('.right_title-name a')
s.get_index('#titleList a', ".text_c")
s.write_to_docx()
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 2 14:11:18 2018
@author: jacky
"""
#import libraries
import sys
import tweepy
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy.streaming import StreamListener
#import json
#import re
#import matplotlib.pyplot as plt
#import pandas as pd
#from nltk.tokenize import word_tokenize
#from nltk.tokenize import TweetTokenizer
#from os import path
#from scipy.misc import imread
#from wordcloud import WordCloud, STOPWORDS
#twitter developer account information
consumer_key = 'WZyeqNt1yUP6OFRBzfQN4ho7Z'
consumer_secret = 'DfMcZsJbSMZcFCLpfxgySqqqbXDCKZSazs8ED8GMjxYREWU71b'
access_token = '1058132943850401792-yEEByQSQDHRu47rWVaA1crfmF95iZv'
access_secret = 'rjkXCbZaZXyb9eekdR2Lrc2MXnAWk5fPkstpv7CC57Nlq'
#set up the API
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
#get tweets from Twitter
class MyListener(StreamListener):
print("StreamListener...")
tweet_number = 0 #initialize the counter
#__init__runs when an instance of the class is created
def __init__(self, max_tweets):
self.max_tweets = max_tweets #set max number of tweets
print("max number of tweets: ", self.max_tweets)
def on_data(self, data):
self.tweet_number += 1 #update number of tweets
print("get tweet # ", self.tweet_number)
#print(self.tweet_number, self.max_tweets)
if self.tweet_number > self.max_tweets:
sys.exit("reach the limit of" + " " + str(self.max_tweets)) #stop point
try:
print("writing data into json")
with open('Twitter.json', 'a') as f:
f.write(data)
return True #output tweets in json format/one tweet per line
except BaseException:
print("Wrong")
return True
#method for on_error (error handler)
def on_error(self, status):
print("Error")
if(status == 420):
print("Error ", status, "rate limited") #Twitter API rate limit
return False
#get user input to run and do error check
max_tweets = int(input("What is the max number of tweets? (must be an integer bigger than 0) "))
if max_tweets > 30 or max_tweets <= 0:
sys.exit("the max set is 30 and the min set is 1")
hashtag = str(input("What is the hashtag? (must include # at the beginning) "))
if hashtag.find("#") != 0:
sys.exit("must include # at the beginning")
twitter_stream = Stream(auth, MyListener(max_tweets)) #assign the max_tweets
twitter_stream.filter(track=[hashtag]) #assign hashtag to filter
|
class Solution:
def isValid(self, s: str) -> bool:
open_brackets = {'(': ')', '[': ']', '{': '}'}
stack = []
for c in s:
if c in open_brackets: # open bracket
stack.append(c)
else: # close brakcet
if len(stack) == 0:
# if nothing to pop then it must be invalid
return False
open_bracket = stack.pop()
if open_brackets[open_bracket] != c:
return False
if len(stack) == 0:
return True
else:
return False
|
Arr=[10,23,45,67,89,24,68,59,39,36,20]
n=len(Arr)
K=int(input())
def SL(Arr,n,K):
L,Arr[n-1]=Arr[n-1],K
i=0
while(Arr[i]!=K):
i+=1
Arr[n-1]=L
if(i<n-1) or (Arr[i]==K):
return i
else:
return False
I=SL(Arr,n,K)
if(I):
print("Position=",I)
else:
print("Not found")
#O(n)
#N+2 comparisons |
# -*- coding: utf-8 -*-
######################################################################################################
#
# Copyright (C) B.H.C. sprl - All Rights Reserved, http://www.bhc.be
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
# including but not limited to the implied warranties
# of merchantability and/or fitness for a particular purpose
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
{
'name': 'Driver Location',
'version': '1.0',
'category': 'Locate Driver Location',
'author': 'Blueapple Technology',
'images': ['static/description/icon.png'],
'website': 'http://www.blueappleonline.com',
'depends': ['hr','project', 'web_google_maps',
],
'data': ['views/location_view.xml',
'views/project_task.xml'
],
'installable': True,
'auto_install': False,
'license': 'AGPL-3',
} |
#!/usr/bin/python3
import os
#import time
import configparser
from gpiozero import Button
#from gpiozero import LED
from signal import pause
config = configparser.ConfigParser()
config.read('/opt/vougen/vougen.conf')
gpio = config['gpio']
#led_number = gpio['led']
key_number = int(gpio['key'])
def send_request():
#os.system('echo 1,1,1d,1 > /opt/vougen/inbox/new.txt')
os.system('cat /opt/vougen/gpiorder.conf > /opt/vougen/inbox/new.txt')
#led = LED(16)
#led.on()
#time.sleep(3) # Sleep for 3 seconds
#led.off()
#button = Button(21)
button = Button(key_number)
button.when_pressed = send_request
pause()
|
from funcs import readHTTP
# Get the page and print an error if not HTTP 200
addCheck, e = readHTTP("http://172.16.0.198")
if e != "":
print(e)
else:
print("Retrieved page successfully")
|
import math
def newtonsAlgorithm(func, funcder):
"""
Newtons method
:param func: Equation
:param funcder: Derivative of the equation
:return:
"""
x0 = float(input('x0'))
m = int(input('M'))
delta = float(input('delta'))
epsilon = float(input('epsilon'))
v = func(x0)
print('k = 0')
print('x0 = ' + str(x0))
print('v = ' + str(v))
if abs(v) < epsilon:
return
for k in range(1, m+1):
x1 = x0 - v / funcder(x0)
v = func(x1)
print('k = ' + str(k))
print('x1 = ' + str(x1))
print('v = ' + str(v))
if abs(x1 - x0) < delta or abs(v) < epsilon:
return
x0 = x1
def function1(x: float):
return x - math.tan(x)
def function1der(x: float):
return 1 - 1 / (math.cos(x)) ** 2
if __name__ == '__main__':
newtonsAlgorithm(function1, function1der) |
# Simple CNN model for CIFAR-10
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.constraints import maxnorm
from keras.optimizers import SGD
from keras.layers.convolutional import Conv3D
from keras.layers.convolutional import MaxPooling3D
from keras.utils import np_utils
from keras import backend as K
K.set_image_dim_ordering('th')
from shapes import Sphere
from shapes import Torus
from shapes import Gridspace
import numpy as np
from scipy.stats import ortho_group
import pydot_ng as pydot
from keras.utils import plot_model
# NET PARAMETERS
stride=(2,2,2)
pad='same'
filters=16
lrate=.1
epochs=5
#sphere = Sphere()
#torus = Torus()
grid = Gridspace(stepsize=.5, radius=15)
mysphere = Sphere(dim=3, radius=5)
mytorus = Torus(dim=3, major_radius=4, minor_radius=2)
# RANDOMLY GENERATE DATA
# 0 is voxel that doesn't contain anything
# 1 is a voxel that contains a point
X_train = []
y_train = []
X_test = []
y_test = []
for i in range(0, 5):
mysphere.transformationmatrix = ortho_group.rvs(dim=3) + np.random.normal(0,.05,(3,3))
mytorus.transformationmatrix = ortho_group.rvs(dim=3) + np.random.normal(0,.05,(3,3))
mysphere.translationvector = np.random.normal(0,3,3)
mytorus.translationvector = np.random.normal(0,3,3)
myspherepoints = mysphere.sample(20)
mytoruspoints = mytorus.sample(20)
myspheregrid = mysphere.as_grid(grid)
desiredsphereshape = myspheregrid.shape + (1,)
myspheregrid = myspheregrid.reshape(*desiredsphereshape)
mytorusgrid = mytorus.as_grid(grid)
desiredtorusshape = mytorusgrid.shape + (1,)
mytorusgrid = mytorusgrid.reshape(*desiredtorusshape)
class1 = np.array([0, 1])
class2 = np.array([1, 0])
if i < 4:
X_train.append(myspheregrid)
y_train.append(class1)
X_train.append(mytorusgrid)
y_train.append(class2)
if i >= 4:
X_test.append(myspheregrid)
y_test.append(class1)
X_test.append(mytorusgrid)
y_test.append(class2)
xtrain = np.array(X_train)
ytrain = np.array(y_train)
xtest = np.array(X_test)
ytest = np.array(y_test)
'''
for i in range(0,50):
image1 = sphere.sampleToImage(1000, "sphere.png")
class1 = np.array([0,1])
image2 = torus.sampleToImage(1000, "torus.png")
class2 = np.array([1,0])
if i < 45:
X_train.append(image1)
y_train.append(class1)
X_train.append(image2)
y_train.append(class2)
else:
X_test.append(image1)
y_test.append(class1)
X_test.append(image2)
y_test.append(class2)
xtrain = np.array(X_train)
ytrain = np.array(y_train)
xtest = np.array(X_test)
ytest = np.array(y_test)
'''
# Create the model
model = Sequential()
model.add(Conv3D(filters, (3, 3, 3), strides=stride, input_shape=myspheregrid.shape, data_format="channels_last",
padding=pad, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
#model.add(Conv2D(200, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3)))
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
# Compile model
decay = lrate / epochs
sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
model.compile(loss='categorical_crossentropy',
optimizer=sgd, metrics=['accuracy'])
print(model.summary())
# Fit the model
model.fit(xtrain, ytrain, validation_data=(
xtest, ytest), epochs=epochs, batch_size=32)
# Final evaluation of the model
scores = model.evaluate(xtest, ytest, verbose=0)
print("Accuracy: %.2f%%" % (scores[1] * 100))
plot_model(model, show_shapes=True, to_file='model.png')
#model.save('conv3D_model.h5')
|
import socket
import time
import os
import subprocess
import threading
import re
import signal
from tools import connect_to_host, client_thread, get_ip
func_dict = {"firewall" : '0',
"monitor": '1',
"nat": '2',
"ids": '3',
"vpn": '4',
"firewall_setget" : '5',
"monitor_setget": '6',
"nat_setget": '7',
"ids_setget": '8',
"vpn_setget": '9'}
def run_client_cmd(_cmd):
# shell=True is a bad habbit
# but i dont want to debug when shell=False
# because dpdk-pktgen just report some errs
print(_cmd)
p = subprocess.Popen(_cmd, shell=True, stdout=subprocess.PIPE)
# p = subprocess.Popen(_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ret = []
while True:
line = p.stdout.readline()
if not line:
break
print(str(line[:-1], encoding = 'utf-8'))
ret.append(line)
return ret
# this is for process which must be killed manually
def run_client_cmd_withuserkill(_cmd):
# shell=True is a bad habbit
# but i dont want to debug when shell=False
# because dpdk-pktgen just report some errs
print(_cmd)
p = subprocess.Popen(_cmd, shell=True, stdout=subprocess.PIPE)
# p = subprocess.Popen(_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def parse_socket_data(data):
return [data], ["tmp type"]
def parse_pktgen_data(data):
cmd_list = []
# type list is for situations when
# may execution process may return difference type of result
# so as to make the parse result easier
# notice: if not python is not in
# /home/ubuntu/projects/serverless-nfv/tests/pktgen-3.5.0
# error reports 'lua-shell: module 'Pktgen' not found:'
type_list = []
rate = data.split(";")[1]
_cmd = 'sudo ./app/x86_64-native-linuxapp-gcc/pktgen '
_cmd = _cmd + '-l 1,3,5,7,9,11,13,15,12 -n 4 --socket-mem 1024,1024 '
_cmd = _cmd + '-- -P -m "[3/5:7/9].0" '
tp_cmd = _cmd + '-f ./tmp_stdin.lua'
lt_cmd = _cmd + '-f ./latency.lua'
if data.find("throughput") != -1:
cmd_list.append(tp_cmd)
type_list.append("tp")
else:
linelist = []
with open("./latency.lua", "r") as f:
line_cnt = 0
for line in f.readlines():
line_cnt = line_cnt + 1
if line_cnt == 2:
line = line[:line.rfind(',')] + ", " + rate + ")\n"
linelist.append(line)
with open("./latency.lua", "w") as f:
for line in linelist:
f.write(line)
cmd_list.append(lt_cmd)
type_list.append("lt")
return cmd_list, type_list
def parse_pktgen_result(ret, test_type):
if test_type == "tp":
tp_result = 0
def parse_return_stats(stats):
pattern = re.compile("(\d+)/\d+")
return pattern.findall(stats)
def parse_return_stats2(stats):
pattern = re.compile("\d+/(\d+)")
return pattern.findall(stats)
for line in ret:
line = str(line, encoding = "utf-8")
pos = line.rfind("UP-40000-FD")
if pos != -1:
bps_tp_result = parse_return_stats(line[pos:])[2]
pps_tp_result = parse_return_stats2(line[pos:])[0]
return [bps_tp_result, pps_tp_result]
else:
lt_result = 0
for line in ret:
line = str(line, encoding = "utf-8")
line = re.sub('\x1b', ';', line)
# print(line)
pos = line.rfind(";[9;20H")
if pos != -1:
pos_head = pos - 1
tmp_res = ""
while line[pos_head] != ' ':
tmp_res = line[pos_head] + tmp_res
pos_head = pos_head - 1
lt_result = int(tmp_res)
return [lt_result]
def parse_gateway_data(data):
cmd_list = []
type_list = []
gateway_main_path = "/home/ubuntu/projects/serverless-nfv/framework/gateway_rtc/main.c"
s = ""
with open(gateway_main_path, 'r') as f:
s = f.read()
if data.find("throughput") != -1:
MAX_PKTS_BURST_TX = "32"
type_list.append("tp")
type_list.append("tp")
else:
MAX_PKTS_BURST_TX = "32"
# MAX_PKTS_BURST_TX = "1"
type_list.append("lt")
type_list.append("lt")
open(gateway_main_path, 'w').write(
re.sub(r'#define\sMAX_PKTS_BURST_TX\s[0-9]+',
"#define MAX_PKTS_BURST_TX " + MAX_PKTS_BURST_TX, s))
cmd_list.append("./make.sh gateway")
cmd_list.append("./start.sh gateway")
return cmd_list, type_list
def parse_gateway_result(ret, test_type):
return ["done"]
def parse_executor_old_data(data):
# data:
# throughput/latency;funxbox_x;firewall/monitor/nat/ids/vpn
funcbox_name = data.split(";")[1]
nf_name = data.split(";")[2]
max_threads_num = data.split(";")[3]
max_batch_num = data.split(";")[4]
print(max_threads_num)
cmd_list = []
type_list = []
dispatcher_h_path = "/home/amax/projects/serverless-nfv/framework/executor/includes/dispatcher.h"
s = ""
with open(dispatcher_h_path, 'r') as f:
s = f.read()
if data.find("throughput") != -1:
MAX_PKTS_BURST_TX = "32"
else:
MAX_PKTS_BURST_TX = "1"
open(dispatcher_h_path, 'w').write(
re.sub(r'#define\sDISPATCHER_BUFFER_PKTS\s[0-9]+',
"#define DISPATCHER_BUFFER_PKTS " + MAX_PKTS_BURST_TX, s))
dispatcher_main_path = "/home/amax/projects/serverless-nfv/framework/executor/main.c"
linelist = []
with open(dispatcher_main_path, "r") as f:
for line in f.readlines():
if line.find(funcbox_name) != -1:
if line[0] == '/':
line = line[2:]
elif line.find("funcbox_") != -1:
if line[0] != '/':
line = "//" + line
linelist.append(line)
with open(dispatcher_main_path, "w") as f:
for line in linelist:
f.write(line)
dir_name = "/home/amax/projects/serverless-nfv/funcboxes/" + funcbox_name + "/includes/"
with open(dir_name + "funcbox.h", 'r') as f:
s = f.read()
open(dir_name + "funcbox.h", 'w').write(
re.sub(r'#define\sMAX_BATCH_SIZE\s[0-9]+',
"#define MAX_BATCH_SIZE " + max_batch_num, s))
# dir_name = "/home/amax/projects/serverless-nfv/funcboxes/funcbox4_instance/includes/"
# with open(dir_name + "funcbox.h", 'r') as f:
# s = f.read()
# open(dir_name + "funcbox.h", 'w').write(
# re.sub(r'#define\sMAX_BATCH_SIZE\s[0-9]+',
# "#define MAX_BATCH_SIZE " + max_batch_num, s))
dir_name = "/home/amax/projects/serverless-nfv/funcboxes/" + funcbox_name + "/includes/"
with open(dir_name + "funcbox.h", 'r') as f:
s = f.read()
open(dir_name + "funcbox.h", 'w').write(
re.sub(r'#define\sMAX_THREADS_NUM\s[0-9]+',
"#define MAX_THREADS_NUM " + max_threads_num, s))
linelist = []
# dir_name = "/home/amax/projects/serverless-nfv/funcboxes/funcbox4_instance/includes/"
with open(dir_name + "funcworker.h", "r") as f:
line_cnt = 0
for line in f.readlines():
line_cnt = line_cnt + 1
if line[0] != '/' and line.find("include") != -1 and \
(line.find("firewall") != -1 or line.find("monitor") != -1 \
or line.find("nat") != -1 or line.find("ids") != -1\
or line.find("vpn") != -1):
line = "//" + line
if line.find(nf_name.split("_")[0]) != -1:
line = line[2:]
linelist.append(line)
with open(dir_name + "funcworker.h", "w") as f:
for line in linelist:
f.write(line)
dir_name = "/home/amax/projects/serverless-nfv/funcboxes/" + funcbox_name + "/"
# dir_name = "/home/amax/projects/serverless-nfv/funcboxes/funcbox4_instance/"
linelist = []
with open(dir_name + "Makefile", "r") as f:
line_cnt = 0
for line in f.readlines():
line_cnt = line_cnt + 1
if line[0] != '#' and (line.find("SRCS-y") != -1 or \
line.find("INC") != -1 or line.find("CFLAGS") != -1) and (line.find("firewall") != -1 or line.find("monitor") != -1 \
or line.find("nat") != -1 or line.find("ids") != -1\
or line.find("vpn") != -1):
line = "# " + line
if line.find(nf_name) != -1 and \
(line.find("set") == -1 or nf_name.find("set") != -1) and \
(line.find("SRCS-y") != -1 or \
line.find("INC") != -1 or line.find("CFLAGS") != -1):
# print(">" * 30)
# print(line[:-1])
# print(nf_name, line.find(nf_name))
# print("<" * 30)
line = line[2:]
linelist.append(line)
print("*" * 30)
with open(dir_name + "Makefile", "w") as f:
for line in linelist:
f.write(line)
print(line[:-1])
print("*" * 30)
cmd_list.append("cd /home/amax/projects/serverless-nfv/framework/ && ./make.sh executor")
type_list.append("mk")
cmd_list.append("cd /home/amax/projects/serverless-nfv/funcboxes/" + funcbox_name +" && make clean && make")
type_list.append("mk")
# cmd_list.append("cd /home/amax/projects/serverless-nfv/funcboxes/funcbox4_instance && make clean && make")
# type_list.append("mk")
cmd_list.append("cd /home/amax/projects/serverless-nfv/framework/ && ./start.sh sandbox")
type_list.append("run")
cmd_list.append("cd /home/amax/projects/serverless-nfv/framework/ && ./start.sh executor")
type_list.append("run")
# cmd_list.append("cd /home/amax/projects/serverless-nfv/funcboxes/funcbox_4 && ./start.sh ")
# type_list.append("run")
return cmd_list, type_list
def parse_executor_data(data):
# data:
# throughput/latency;funxbox_x;firewall/monitor/nat/ids/vpn
funcbox_name = data.split(";")[1]
nf_name = data.split(";")[2]
max_threads_num = data.split(";")[3]
max_batch_num = data.split(";")[4]
print(max_threads_num)
cmd_list = []
type_list = []
dispatcher_h_path = "/home/amax/projects/serverless-nfv/framework/executor/includes/dispatcher.h"
s = ""
with open(dispatcher_h_path, 'r') as f:
s = f.read()
if data.find("throughput") != -1:
MAX_PKTS_BURST_TX = "32"
else:
MAX_PKTS_BURST_TX = "32"
# MAX_PKTS_BURST_TX = "1"
open(dispatcher_h_path, 'w').write(
re.sub(r'#define\sDISPATCHER_BUFFER_PKTS\s[0-9]+',
"#define DISPATCHER_BUFFER_PKTS " + MAX_PKTS_BURST_TX, s))
dispatcher_main_path = "/home/amax/projects/serverless-nfv/framework/executor/main.c"
linelist = []
with open(dispatcher_main_path, "r") as f:
for line in f.readlines():
if line.find("manager_init") != -1:
if funcbox_name == "funcbox_4":
if line[0] != '/':
line = "//" + line
else:
if line[0] == '/':
line = line[2:]
if line.find(funcbox_name) != -1:
if line[0] == '/':
line = line[2:]
line = line[:line.rfind(',')] + ", " + func_dict[nf_name] + ");\n"
print(">" * 30)
print(line)
print(">" * 30)
elif line.find("funcbox_") != -1:
if line[0] != '/':
line = "//" + line
linelist.append(line)
with open(dispatcher_main_path, "w") as f:
for line in linelist:
f.write(line)
dispatcher_manager_path = "/home/amax/projects/serverless-nfv/framework/executor/manager.c"
linelist = []
with open(dispatcher_manager_path, "r") as f:
line_cnt = 0
for line in f.readlines():
line_cnt = line_cnt + 1
if 85 <= line_cnt and line_cnt <= 100:
if funcbox_name == "funcbox_4":
if line[0] != '/':
line = "//" + line
else:
if line[0] == '/':
line = line[2:]
linelist.append(line)
with open(dispatcher_manager_path, "w") as f:
for line in linelist:
f.write(line)
dir_name = "/home/amax/projects/serverless-nfv/funcboxes/" + funcbox_name + "/includes/"
with open(dir_name + "funcbox.h", 'r') as f:
s = f.read()
open(dir_name + "funcbox.h", 'w').write(
re.sub(r'#define\sMAX_BATCH_SIZE\s[0-9]+',
"#define MAX_BATCH_SIZE " + max_batch_num, s))
if funcbox_name == "funcbox_4":
dir_name = "/home/amax/projects/serverless-nfv/funcboxes/funcbox4_instance/includes/"
with open(dir_name + "funcbox.h", 'r') as f:
s = f.read()
open(dir_name + "funcbox.h", 'w').write(
re.sub(r'#define\sMAX_BATCH_SIZE\s[0-9]+',
"#define MAX_BATCH_SIZE " + max_batch_num, s))
dir_name = "/home/amax/projects/serverless-nfv/funcboxes/" + funcbox_name + "/includes/"
with open(dir_name + "funcbox.h", 'r') as f:
s = f.read()
open(dir_name + "funcbox.h", 'w').write(
re.sub(r'#define\sMAX_THREADS_NUM\s[0-9]+',
"#define MAX_THREADS_NUM " + max_threads_num, s))
cmd_list.append("cd /home/amax/projects/serverless-nfv/framework/ && ./make.sh executor")
type_list.append("mk")
cmd_list.append("cd /home/amax/projects/serverless-nfv/funcboxes/" + funcbox_name +" && make clean && make")
type_list.append("mk")
if funcbox_name == "funcbox_4":
cmd_list.append("cd /home/amax/projects/serverless-nfv/funcboxes/funcbox4_instance && make clean && make")
type_list.append("mk")
else:
cmd_list.append("cd /home/amax/projects/serverless-nfv/framework/ && ./start.sh sandbox")
type_list.append("run")
cmd_list.append("cd /home/amax/projects/serverless-nfv/framework/ && ./start.sh executor")
type_list.append("run")
if funcbox_name == "funcbox_4":
cmd_list.append("cd /home/amax/projects/serverless-nfv/funcboxes/funcbox_4 && "
"sudo ./build/app/funcbox_4 -l 17 -n 4 --proc-type=secondary -- -r Deliver_rx_0_queue -t "
"Deliver_tx_0_queue -k Deliver_0_lock -l 17 -f " + func_dict[nf_name])
type_list.append("run")
return cmd_list, type_list
def parse_executor_result(ret, test_type):
return ["done"]
def parse_data(data, cur_ip_addr):
if cur_ip_addr == "202.112.237.39":
return parse_pktgen_data(data)
elif cur_ip_addr == "202.112.237.37":
return parse_gateway_data(data)
elif cur_ip_addr == "202.112.237.33":
return parse_executor_data(data)
def parse_result(ret, test_type, cur_ip_addr):
if cur_ip_addr == "202.112.237.39":
return parse_pktgen_result(ret, test_type)
elif cur_ip_addr == "202.112.237.37":
return parse_gateway_result(ret, test_type)
elif cur_ip_addr == "202.112.237.33":
return parse_executor_result(ret, test_type)
def executorkill_p():
_cmd = "ps -ef | grep sandbox | grep -v grep |awk '{print $2}' | xargs sudo kill -9"
run_client_cmd_withuserkill(_cmd)
_cmd = "ps -ef | grep executor | grep -v grep |awk '{print $2}' | xargs sudo kill -9"
run_client_cmd_withuserkill(_cmd)
_cmd = "ps -ef | grep funcbox | grep -v grep |awk '{print $2}' | xargs sudo kill -9"
run_client_cmd_withuserkill(_cmd)
def gatewaykill_p():
_cmd = "ps -ef | grep gateway | grep -v grep |awk '{print $2}' | xargs sudo kill -9"
run_client_cmd_withuserkill(_cmd)
if __name__ == '__main__':
cur_ip_addr = get_ip()
print(cur_ip_addr, type(cur_ip_addr))
host_name = '202.112.237.33'
port = 6132
s = connect_to_host(host_name, port)
# p_list = []
while True:
data = s.recv(1024)
data = str(data, encoding = "utf-8")
if data:
print(data)
if data.find("stop") != -1:
if cur_ip_addr == "202.112.237.37":
gatewaykill_p()
elif cur_ip_addr == "202.112.237.33":
executorkill_p()
s.send(bytes("stopmyend", encoding = "utf-8"))
continue
task_list = []
ret = []
cmd_list, type_list = parse_data(data, cur_ip_addr)
print(cmd_list, type_list)
if cur_ip_addr == "202.112.237.37":
i = 0
for cmd in cmd_list[:-1]:
task = client_thread(run_client_cmd, (cmd, ))
task_list.append(task)
task.start()
ret = ret + parse_result(task_list[i].get_result(), type_list[i], cur_ip_addr)
i = i + 1
p = run_client_cmd_withuserkill(cmd_list[-1])
# p_list.append(p)
elif cur_ip_addr == "202.112.237.39":
for cmd in cmd_list:
task = client_thread(run_client_cmd, (cmd, ))
task_list.append(task)
task.start()
for i in range(len(task_list)):
ret = ret + parse_result(task_list[i].get_result(), type_list[i], cur_ip_addr)
elif cur_ip_addr == "202.112.237.33":
for cmd in cmd_list[:-2]:
task = client_thread(run_client_cmd, (cmd, ))
task_list.append(task)
task.start()
print(cmd)
time.sleep(3)
for i in range(len(task_list)):
ret = ret + parse_result(task_list[i].get_result(), type_list[i], cur_ip_addr)
# start sandbox
task = client_thread(run_client_cmd, (cmd_list[-2], ))
task.start()
time.sleep(1)
# start executor
task = client_thread(run_client_cmd, (cmd_list[-1], ))
task.start()
print(ret)
for line in ret:
s.send(bytes(str(line), encoding = "utf-8"))
s.send(bytes(";", encoding = "utf-8"))
s.send(bytes("myend", encoding = "utf-8")) |
# -*- coding: utf-8 -*-
# __author__ = 'zs'
# 2018/12/4 下午2:20
print(len(''))
print('abc'[0:1])
print('abc'[1:2])
print('abc'[2:3])
l1 = [1, 2, 3]
print(l1[:]) # 返回[1, 2, 3]
print(l1[::]) # 返回[1, 2, 3]
print(l1[:-1]) # 返回[1, 2]
print(l1[::-1]) # 列表反转,返回[3, 2, 1]
def factorial(n):
"""return n!"""
return 1 if n < 2 else n * factorial(n - 1)
l2 = list(map(factorial, range(1, 6)))
print(l2)
print(factorial(5))
print(factorial.__doc__)
print(type(factorial))
L = [2, 1, 4, 3]
L1 = sorted(L)
print('after sort L1:', L1)
fruits = ['strawberry', 'fig', 'apple', 'pear']
fruits.sort()
print(fruits)
_L = [('b', 2), ('a', 1), ('c', 3), ('d', 4)]
_L.sort(key=lambda x: x[1])
print(_L)
|
#Dropouts
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.layers import Dropout
from sklearn.model_selection import GridSearchCV
def build_model(d1, d2):
model = Sequential()
model.add(Dense(32, input_shape=(X_train.shape[1],), activation='relu'))
model.add(Dropout(d1))
model.add(Dense(16, activation='relu'))
model.add(Dropout(d2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['acc'])
return model
parameters = parameters = {'d1':[0.1,0.2,0.3],
'd2':[0.1,0.2,0.3]}
estimator = KerasClassifier(build_fn=build_model, verbose=0, batch_size=16, epochs=100)
grid_search = GridSearchCV(estimator=estimator, param_grid=parameters, scoring='accuracy', cv=10)
grid_search.fit(X_train, y_train) |
# 导包
import requests
class LoginApi:
def __init__(self):
self.login_url="http://ihrm-test.itheima.net"+"/api/sys/login"
def loginapi(self,jsonData,headers):
repons = requests.post(url=self.login_url,
json=jsonData, # 发送登录请求
headers=headers)
return repons
|
#use snake case naming convention although camel case also works but prefer snake case
# name_second_name -> snake case & nameSecondName->camelcase
#no special symbol
#firstcharacter is either character or _ only
_name="vimal lohani"
number1 =5
print(_name)
print(number1)
print(_name *3)
name,age ="vimal",28
print(name,age)
x=y=z=1
print(x+y+z) |
'''
Created on Feb 15, 2012
@author: mjbommar
'''
import dateutil.parser
import lxml.html
import nltk
import nltk_contrib.readability.readabilitytests
import re
class Document(object):
'''
The Document class handles parsing and storing
data about a GPO document.
'''
def __init__(self, buffer=None):
'''
Constructor.
'''
# Initialize local fields.
self.buffer = buffer
self.session = None
self.date = None
self.cite = None
self.type = None
self.title = None
self.stage = None
self.chamber = None
self.text = None
self.sentences = None
self.tokens = []
self.tokenFreq = {}
self.stems = []
self.stemFreq = {}
self.readability = None
self.sectionCount = 0
self.sentenceCount = 0
self.tokenCount = 0
self.uniqTokenCount = 0
self.uniqStemCount = 0
self.avgWordsPerSentence = 0.0
self.avgCharsPerWord = 0.0
# Parsing values
self.valid = True
self.exceptions = []
# NLTK stemmer and stopwords.
self.stemmer = nltk.stem.PorterStemmer()
self.stopwords = nltk.corpus.stopwords.words("english")
self.parseDocument()
def matchFirstXPath(self, expression):
'''
Match the first element that meets the XPath expression.
'''
elementList = self.document.xpath(expression)
if len(elementList) > 0:
return elementList[0]
else:
return None
def parseDocument(self):
'''
Parse the document.
'''
# Parse the document or report an error.
try:
self.document = lxml.etree.fromstring(self.buffer)
except Exception, E:
self.exceptions.append(E)
self.valid = False
return
# Identify each of the document variables.
try:
self.parseDate()
self.parseCongress()
self.parseSession()
self.parseLegislationNumber()
self.parseLegislationType()
self.parseLegislationTitle()
self.parseStage()
self.parseCurrentChamber()
self.parseText()
except Exception, E:
self.exceptions.append(E)
self.valid = False
return
def parseDate(self):
'''
Parse the dc:date flag.
'''
elementDate = self.matchFirstXPath('/bill/dublinCore/date')
if elementDate != None:
self.date = dateutil.parser.parse(' '.join([t for t in elementDate.itertext()]))
def parseCongress(self):
'''
Parse the congress.
'''
elementCongress = self.matchFirstXPath('/bill/form/congress')
if elementCongress != None:
self.congress = ' '.join([t for t in elementCongress.itertext()])
def parseSession(self):
'''
Parse the session.
'''
elementSession = self.matchFirstXPath('/bill/form/session')
if elementSession != None:
self.session = ' '.join([t for t in elementSession.itertext()])
def parseLegislationNumber(self):
'''
Parse the legislation number.
'''
elementLegislationNumber = self.matchFirstXPath('/bill/form/legis-num')
if elementLegislationNumber != None:
self.cite = ' '.join([t for t in elementLegislationNumber.itertext()])
def parseLegislationType(self):
'''
Parse the legislation type.
'''
elementLegislationType = self.matchFirstXPath('/bill/form/legis-type')
if elementLegislationType != None:
self.type = ' '.join([t for t in elementLegislationType.itertext()])
def parseLegislationTitle(self):
'''
Parse the legislation title.
'''
elementLegislationTitle = self.matchFirstXPath('/bill/form/official-title')
if elementLegislationTitle != None:
self.title = ' '.join([t for t in elementLegislationTitle.itertext()])
self.title = self.title.replace(u"\u2019", "-")
def parseStage(self):
'''
Parse the stage.
'''
if 'bill-stage' in self.document.attrib:
self.stage = self.document.attrib['bill-stage']
def parseCurrentChamber(self):
'''
Parse the current chamber.
'''
elementCurrentChamber = self.matchFirstXPath('/bill/form/current-chamber')
if elementCurrentChamber != None:
self.chamber = ' '.join([t for t in elementCurrentChamber.itertext()])
def parseText(self):
'''
Extract the document text.
'''
# Extract text from legis-body.
legisBody = self.matchFirstXPath('/bill/legis-body')
# Number of sections
sectionList = legisBody.xpath('//section')
self.sectionCount = len(sectionList)
textList = legisBody.xpath('//text')
self.text = unicode("")
for text in textList:
curText = unicode(" ").join([unicode(t.strip()) for t in text.itertext() if t.strip()])
curText = re.sub("\s{2,}", " ", curText, re.UNICODE)
self.text += curText + unicode("\n")
# Build sentence, token, and stem lists and sets.
self.sentences = nltk.sent_tokenize(self.text)
self.tokens = nltk.word_tokenize(self.text)
self.stems = [self.stemmer.stem(t) for t in self.tokens]
self.tokenFreq = nltk.FreqDist(self.tokens)
self.stemFreq = nltk.FreqDist(self.stems)
# Get some counts.
self.sentenceCount = len(self.sentences)
self.tokenCount = len(self.tokens)
self.uniqTokenCount = len(self.tokenFreq)
self.uniqStemCount = len(self.stemFreq)
# Calculate reading level.
self.readability = nltk_contrib.readability.readabilitytests.ReadabilityTool(self.text)
self.readingLevel = self.readability.FleschKincaidGradeLevel()
self.avgCharsPerWord = self.readability.analyzedVars['charCount'] / self.readability.analyzedVars['wordCount']
self.avgWordsPerSentence = self.readability.analyzedVars['wordCount'] / self.readability.analyzedVars['sentenceCount']
|
def is_prime(num):
if num == 1:
return False
for i in range(2, int(num ** 0.5) + 1):
if num % i == 0:
return False
return True
n = int(input())
data = list(map(int, input().split()))
count = 0
for i in data:
if is_prime(i):
count += 1
print(count)
|
#!/usr/bin/env python3
"""
This module extends Python's re module just a touch, so re will be doing
almost all the work. I love the stock re module, but I'd also like it to
support extensible regular expression syntax.
So that's what this module does. It is a pure Python wrapper around
Python's standard re module that lets you register your own regepx
extensions by calling
RE.extend(name,pattern)
Doing so means that "(?E:name)" in regular expressions used with *this*
module will be replaced with "(pattern)", and "(?E:label=name)" will be
replaced with "(?P<label>pattern)", in any regular expressions you use
with this module. To keep things compatible with the common usage of
Python's standard re module, it's a good idea to import RE like this:
import RE as re
This keeps your code from calling the standard re functions directly
(which will report things like "(?E:anything)" as errors, of course),
and it lets you then create whatever custom extension you'd like in this
way:
re.extend('last_first',r'([!,]+)\s*,\s*(.*)')
This regepx matches "Flanders, Ned" in this example string:
name: Flanders, Ned
And you can use it this way:
re_name=re.compile(r'name:\s+(?E:last_first)')
That statement is exactly the same as
re_name=re.compile(r'name:\s+(([!,]+)\s*,\s*(.*))')
but it's much easier to read and understand what's going on. If you use
the extension like this,
re_name=re.compile(r'name:\s+(?E:name=last_first)')
with "name=last_first" rather than just "last_first", that translates to
re_name=re.compile(r'name:\s+(?P<name>([!,]+)\s*,\s*(.*))')
so you can use the match object's groupdict() method to get the value of
the "name" group.
It turns out having a few of these regepx extensions predefined for your
code can be a handy little step-saver that also tends to increase its
readability, especially if it makes heavy use of regular expressions.
This module comes with several pre-loaded regepx extensions that I've
come to appreciate:
General:
id - This matches login account names and programming language
identifiers (for Python, Java, C, etc., but not SQL or other
more special-purpose languages). Still '(?E:id)' is a nifty
way to match account names.
comment - Content following #, ;, or //, possibly preceded by
whitespace.
Network:
ipv4 - E.g. "1.2.3.4".
ipv6 - E.g. "1234:5678:9abc:DEF0:2:345".
ipaddr - Matches either ipv4 or ipv6.
cidr - E.g. "1.2.3.4/24".
macaddr - Looks a lot like ipv6, but the colons may also
be dashes or dots instead.
hostname - A DNS name.
host - Matches either hostname or ipaddr.
service - Matches host:port.
email - Any valid email address. This RE is well above average, but
not quite perfect. There's also an email_localpart
extension, which is used inside both "email" and "url"
(below), but it's really just for internal use. Take a look
if you're curious.
url - Any URL consisting of:
protocol - req (e.g. "http" or "presto:http:")
designator - req (either "email_localpart@" or "//")
host - req (anything matching our "host" extension)
port - opt (e.g. ":443")
path - opt (e.g. "/path/to/content.html")
params - opt (e.g. "q=regular%20expression&items=10")
Time and Date:
day - Day of week, Sunday through Saturday, or any unambiguous
prefix thereof.
day3 - Firt three letters of any month.
DAY - Full name of day of week.
month - January through December, or any unambiguous prefix
thereof.
month3 - First three letters of any month.
MONTH - Full name of any month.
date_YMD - [CC]YY(-|/|.)[M]M(-|/|.)[D]D
date_YmD - [CC]YY(-|/|.)month(-|/|.)[D]D
date_mD - "month DD"
time_HM - [H]H(-|:|.)MM
time_HMS - [H]H(-|:|.)MM(-|:|.)SS
Some of these preloaded RE extensions are computed directly in the
module. For instance the day, day3, DAY, month, month3, and MONTH
extensions are computed according to the current locale when this module
loads. The rest are loaded from /etc/RE.rc and/or ~/.RE.rc (in that
order). For this to work, you need to copy the .RE.rc file that came
with this module to your home directory or copy it to /etc/RE.rc. Or
make your own. It's up to you.
"""
from datetime import date # We use day- and month-names of the current locale.
import re,os
from re import error,escape,purge,template
from re import I,IGNORECASE,L,LOCALE,M,MULTILINE,S,DOTALL,U,UNICODE,X,VERBOSE,DEBUG
# Public symbols:
__all__=[
"compile",
"error",
"escape",
"extend",
"findall",
"match",
"purge",
"read_extensions",
"search",
"split",
"sub",
"subn",
"template",
# "Error",
"I","IGNORECASE", # 2
"L","LOCALE", # 4
"M","MULTILINE", # 8
"S","DOTALL", # 16
"U","UNICODE", # 32
"X","VERBOSE", # 64
"DEBUG", # 128
"_extensions",
]
#class Error(Exception):
# pass
# This dictionary holds all extensions, keyed by name.
_extensions={}
# This RE matches an RE extension, possibly in a larger string.
_extpat=re.compile(r'(\(\?E:[_A-Za-z][_A-Za-z0-9]*(=[_A-Za-z][_A-Za-z0-9]*)?\))')
# This RE matches a line in /etc/RE.rc and ~/.RE.rc.
_extdef=re.compile(r'^\s*([_A-Za-z][_A-Za-z0-9]*)\s*([=<])(.*)$')
# This RE matches blank lines and comments in /etc/RE.rc and ~/.RE.rc.
_extcmt=re.compile(r'^\s*(([#;]|//).*)?$')
def _apply_extensions(pattern,allow_named=True):
"""Return the given pattern with all regexp extension references
expanded."""
outer=True
while True:
extensions=set([r[0] for r in _extpat.findall(pattern)])
if not extensions:
break;
for ref in extensions:
ext=ref[4:-1]
#print('D: ext=%r'%(ext,))
if not ext:
raise error('RE extension name is empty')
if '=' in ext:
label,ext=ext.split('=')
else:
label=None
#print('D: label=%r, ext=%r'%(label,ext))
if ext not in _extensions:
raise error('Unregistered RE extension %r'%(ext,))
if label and outer and allow_named:
pattern=pattern.replace(ref,'(?P<%s>%s)'%(label,_extensions[ext]))
else:
pattern=pattern.replace(ref,'(%s)'%(_extensions[ext],))
outer=False
return pattern
def extend(name,pattern,expand=False):
"""Register an extension regexp pattern that can be referenced with
the "(?E:name)" extension construct. You can call RE.extend() like
this:
RE.extend('id',r'[-_0-9A-Za-z]+')
This registers a regexp extension named id with a regexp value of
r'[-_0-9A-Za-z]+'. This means that rather than using r'[-_0-9A-Za-z]+'
in every regexp where you need to match a username, you can use
r'(?E:id)' or maybe r'(?E:user=id)' instead. The first form is
simply expanded to
r'([-_0-9A-Za-z]+)'
Notice that parentheses are used so this becomes a regexp group. If
you use the r'(?E:user=id)' form of the id regexp extension, it is
expanded to
r'(?P<user>[-_0-9A-Za-z]+)'
In addition to being a parenthesized regexp group, this is a *named*
group that can be retrived by the match object's groupdict() method.
Normally, the pattern parameter is stored directly in this module's
extension registry (see RE._extensions). If the expand parameter is
True, any regexp extensions in the pattern are expanded before being
added to the registry. So for example,
RE.extend('cred',r'^\s*cred\s*=\s*(?E:id):(.*)$')
will simply store that regular expression in the registry labeled as
"cred". But if you register it this way,
RE.extend('cred',r'^\s*cred\s*=\s*(?E:id):(.*)$',expand=True)
this expands the regexp extension before registering it, which means
this is what's stored in the registry:
r'^\s*cred\s*=\s*([-_0-9A-Za-z]+):(.*)$'
The result of using '(?E:cred)' in a regular expression is exactly
the same in either case.
If the pattern argument is None, and the value of the name parameter
is already in the registry, it is de-registered.
"""
if not pattern:
# Remove name if it's already defined.
if name in _extensions:
del _extensions[name]
else:
# Add this named extension.
if expand:
pattern=_apply_extensions(pattern)
_extensions[name]=pattern
def read_extensions(filename='~/.RE.rc'):
"""Read RE extension definitions from the given file. The default
file is ~/.RE.rc."""
filename=os.path.expanduser(filename)
if os.path.isfile(filename):
with open(filename) as f:
count=0
for line in f:
count+=1
if _extcmt.match(line):
continue;
m=_extdef.match(line)
if not m:
#raise error('%s: Bad extension in line %d: "%s"'%(filename,count,line.rstrip()))
raise error(f"{filename}: Bad extension in line {count}: {line.rstrip()!r}")
name,op,pat=m.groups()
extend(name,pat,expand=op=='<')
def compile(pattern,flags=0):
"Compile a regular expression pattern, returning a pattern object."
return re.compile(_apply_extensions(pattern),flags)
def findall(pattern,s,flags=0):
"""Return a list of all non-overlapping matches in the string.
If one or more groups are present in the pattern, return a list of
groups; this will be a list of tuples if the pattern has more than one
group.
Empty matches are included in the result."""
return re.findall(_apply_extensions(pattern),s,flags)
def finditer(pattern,s,flags=0):
"""Return an iterator over all non-overlapping matches in the string.
For each match, the iterator returns a match object.
Empty matches are included in the result."""
return re.finditer(_apply_extensions(pattern),s,flags)
def match(pattern,s,flags=0):
"""Try to apply the pattern at the start of the string, returning a
match object, or None if no match was found."""
return re.match(_apply_extensions(pattern),s,flags)
def search(pattern,s,flags=0):
"""Scan through string looking for a match to the pattern, returning a
match object, or None if no match was found."""
return re.search(_apply_extensions(pattern),s,flags)
def split(pattern,s,maxsplit=0,flags=0):
"""Split the source string by the occurrences of the pattern,
returning a list containing the resulting substrings."""
return re.split(_apply_extensions(pattern),s,maxsplit,flags)
def sub(pattern, repl, string, count=0, flags=0):
"""Return the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl. repl can be either a string or a callable; if a
string, backslash escapes in it are processed. If it is a callable,
it's passed the match object and must return a replacement string to
be used."""
return re.sub(_apply_extensions(pattern),repl,s,count,flags)
def subn(pattern, repl, string, count=0, flags=0):
"""Return a 2-tuple containing (new_string, number). new_string is the
string obtained by replacing the leftmost non-overlapping occurrences
of the pattern in the source string by the replacement repl. number
is the number of substitutions that were made. repl can be either a
string or a callable; if a string, backslash escapes in it are
processed. If it is a callable, it's passed the match object and must
return a replacement string to be used."""
return re.subn(_apply_extensions(pattern),repl,s,count,flags)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Compute some extensions that are a pain to compose manually.
# TODO: Compute the "day" and "month" extensions like we're doing for day3,
# DAY, month3, and MONTH below. The way we're doing it now only kind of works.
# Day, date, and time matching are furtile ground for improvement.
# day = SUNDAY|MONDAY|TUESDAY|WEDNESDAY|THURSDAY|FRIDAY|SATURDAY
# Case doesn't matter, and any distinct beginning of those day names is
# sufficient to match.
dnames=[date(2001,1,x+7).strftime('%A').lower() for x in range(7)]
extend('day',r'(([Ss][Uu]|[Mm]|[Tt][Uu]|[Ww]|[Tt][Hh]|[Ff]|[Ss][Aa])[AEDIONSRUTYaedionsruty]*)')
extend('day3',r'(%s)'%'|'.join(['(%s)'%''.join(['[%s%s]'%(c.upper(),c) for c in d[:3]]) for d in dnames]))
extend('DAY',r'(%s)'%'|'.join(['(%s)'%''.join(['[%s%s]'%(c.upper(),c) for c in d]) for d in dnames]))
# month = JANUARY|FEBRUARY|APRIL|MAY|JUNE|JULY|AUGUST|SEPTEMBER|OCTOBER|NOVEMBER|DECEMBER
# This works just like the day extension, but for month names.
mnames=[date(2001,x,1).strftime('%B').lower() for x in range(1,13)]
extend('month',r'(([Jj][Aa]|[Ff]|[Mm][Aa][Rr]|[Aa][Pp]|[Mm][Aa][Yy]|[Jj][Uu][Nn]|[Jj][Uu][Ll]|[Aa][Uu]|[Ss]|[Oo]|[Nn]|[Dd])[ACBEGIHMLONPSRUTVYacbegihmlonpsrutvy]*)')
# month3=JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC
extend('month3',r'(%s)'%'|'.join(['(%s)'%''.join(['[%s%s]'%(c.upper(),c) for c in m[:3]]) for m in mnames]))
extend('MONTH',r'(%s)'%'|'.join(['(%s)'%''.join(['[%s%s]'%(c.upper(),c) for c in m]) for m in mnames]))
if False: # These are defined in ~/.RE.rc now.
# Account names.
extend('id',r'[-_0-9A-Za-z]+')
# Python (Java, C, et al.) identifiers.
extend('ident',r'[_A-Za-z][_0-9A-Za-z]+')
# Comments may begin with #, ;, or // and continue to the end of the line.
# If you need to handle multi-line comments ... feel free to roll your own
# extension for that. (It CAN be done.)
extend('comment',r'\s*([#;]|//).*')
# Network
extend('ipv4',r'\.'.join([r'\d{1,3}']*4))
extend('ipv6',':'.join([r'[0-9A-Fa-f]{1,4}']*8))
extend('ipaddr',r'(?E:ipv4)|(?E:ipv6)')
extend('cidr',r'(?E:ipv4)/\d{1,2}')
extend('macaddr48','(%s)|(%s)|(%s)'%(
'[-:]'.join(['([0-9A-Fa-f]{2})']*6),
'[-:]'.join(['([0-9A-Fa-f]{3})']*4),
r'\.'.join(['([0-9A-Fa-f]{4})']*3)
))
extend('macaddr64','(%s)|(%s)'%(
'[-:.]'.join(['([0-9A-Fa-f]{2})']*8),
'[-:.]'.join(['([0-9A-Fa-f]{4})']*4)
))
extend('macaddr',r'(?E:macaddr48)|(?E:macaddr64)')
extend('hostname',r'[0-9A-Za-z]+(\.[-0-9A-Za-z]+)*')
extend('host','(?E:ipaddr)|(?E:hostname)')
extend('service','(?E:host):\d+')
extend('hostport','(?E:host)(:(\d{1,5}))?') # Host and optional port.
extend('filename',r'[^/]+')
extend('path',r'/?(?E:filename)(/(?E:filename))*')
extend('abspath',r'/(?E:filename)(/(?E:filename))*')
extend('email_localpart',
r"(\(.*\))?" # Comments are allowed in email addresses. Who knew!?
r"([0-9A-Za-z!#$%&'*+-/=?^_`{|}~]+)"
r"(\.([0-9A-Za-z!#$%&'*+-/=?^_`{|}~])+)*"
r"(\(.*\))?" # The comment is either before or after the local part.
r"@"
)
extend('email',r"(?E:email_localpart)(?E:hostport)")
extend('url_scheme',r'([A-Za-z]([-+.]?[0-9A-Za-z]+)*:){1,2}')
extend('url',
r'(?E:url_scheme)' # E.g. 'http:' or 'presto:http:'.
r'((?E:email_localpart)|(//))' # Allows both 'mailto:addr' and 'http://host'.
r'(?E:hostport)?' # Host (required) and port (optional).
r'(?E:abspath)?' # Any path that MIGHT follow all that.
r'(\?' # Any parameters that MIGHT be present.
r'((.+?)=([^&]*))'
r'(&((.+?)=([^&]*)))*'
r')?'
)
#r'(([^=]+)=([^&]*))'
#r'(&(([^=]+)=([^&]*)))*'
# date_YMD = [CC]YY(-|/|.)[M]M(-|/|.)[D]D
# Wow. the BNF format is uglier than the regexp. Just think YYYY-MM-DD
# where the dashes can be / or . instead.
extend('date_YMD',r'(\d{2}(\d{2})?)([-/.])(\d{1,2})([-/.])(\d{1,2})')
# date_YmD is basically "[CC]YY-mon-DD" where mon is the name of a month as
# defined by the "month" extension above.
extend('date_YmD',r'(\d{2}(\d{2})?)([-/.])((?E:month))([-/.])(\d{1,2})')
# date_mD is basically "mon DD" where mon is the name of a month as defined by
# the "month" extension above.
extend('date_mD',r'(?E:month)\s+(\d{1,2})')
# time_HMS = HH:MM:SS
# time_HM = HH:MM
# 12- or 24-hour time will do, and the : can be . or - instead.
extend('time_HM',r'(\d{1,2})([-:.])(\d{2})')
extend('time_HMS',r'(\d{1,2})([-:.])(\d{2})([-:.])(\d{2})')
# TODO: Parse https://www.timeanddate.com/time/zones/ for TZ data, and hardcode
# that into a regexp extension here.
read_extensions('/etc/RE.rc')
read_extensions()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Behave like a command if we're being called as one.
#
if __name__=='__main__':
import argparse,os,sys
from debug import DebugChannel
from doctest import testmod
from english import nounf
from handy import ProgInfo,die
prog=ProgInfo()
def grep(opt):
"Find all matching conent according to the given argparse namespace."
def output(line,tgroups,dgroups):
"This sub-function helps us implement the -v option."
if not (opt.count or opt.list):
# Show each match.
if opt.fmt and (tgroups or dgroups):
line=opt.fmt.format(line,*tgroups,**dgroups)
if opt.tuple or opt.dict:
print('')
if opt.tuple:
print((repr(m.groups())))
if opt.dict:
print(('{%s}'%', '.join([
f'"{k}": "{v}"' for k,v in sorted(dgroups.items())
])))
if show_filename:
print(f"{fn}: {line}")
else:
print(line)
opt.re_flags=0
if opt.ignore_case:
opt.re_flags|=re.IGNORECASE
if dc:
dc('Options').indent(1)
dc('count=%r'%(opt.count,))
dc('fmt=%r'%(opt.fmt,))
dc('tuple=%r'%(opt.tuple,))
dc('dict=%r'%(opt.dict,))
dc('ignore_case=%r'%(opt.ignore_case,))
dc('invert=%r'%(opt.invert,))
dc('ext=%r'%(opt.ext,))
dc('extensions=%r'%(opt.extensions,))
dc('re_flags=%r'%(opt.re_flags,))
dc.indent(-1)('Aguements').indent(1)
dc('pattern=%r'%(opt.pattern,))
dc('filenames=%r'%(opt.filenames,))
dc.indent(-1)
all_matches=0 # Total matches over all scanned input files.
if not opt.pattern:
die("No regular expression given.")
pat=compile(opt.pattern,opt.re_flags)
opt.filenames=[a for a in opt.filenames if not os.path.isdir(a)]
show_filename=False
if len(opt.filenames)<1:
opt.filenames.append('-')
elif len(opt.filenames)>1 and not opt.one:
show_filename=True
for fn in opt.filenames:
matches=0 # Matches found in this file.
if fn=='-':
fn='stdin'
f=sys.stdin
else:
f=open(fn)
for line in f:
if line[-1]=='\n':
line=line[:-1]
m=pat.search(line)
if bool(m)!=bool(opt.invert):
matches+=1
if m:
output(line,m.groups(),m.groupdict())
else:
output(line,(),{})
if matches:
if opt.count:
if show_filename:
print(('%s: %d'%(fn,matches)))
else:
print(('%d'%matches))
elif opt.list:
print(fn)
all_matches+=matches
if fn!='stdin':
f.close()
sys.exit((0,1)[all_matches==0])
def test(opt):
"""
>>> # Just to make testing dict result values predictable ...
>>> def sdict(d):print('{%s}'%(', '.join(['%r: %r'%(k,v) for k,v in sorted(d.items())])))
>>> # Basic expansion of a registered extension.
>>> _apply_extensions(r'user=(?E:user=id)')
'user=(?P<user>[-_0-9A-Za-z]+)'
>>> _apply_extensions(r'user=(?E:id)')
'user=([-_0-9A-Za-z]+)'
>>> # "id"
>>> s=' user=account123, client=123.45.6.78 '
>>> m=search(r'user=(?E:user=id)',s)
>>> m.groups()
('account123',)
>>> b,e=m.span()
>>> s[b:e]
'user=account123'
>>> # "id" combined wih "ipv4"
>>> m=search(r'user=(?E:user=id),\s*client=(?E:client=ipv4)',s)
>>> m.groups()
('account123', '123.45.6.78')
>>> sdict(m.groupdict())
{'client': '123.45.6.78', 'user': 'account123'}
>>> s='x=5 # This is a comment. '
>>> m=search(r'(?E:comment)',s)
>>> s[:m.span()[0]]
'x=5'
>>> # "cidr"
>>>
match(r'subnet=(?E:net=cidr)','subnet=123.45.6.78/24').groupdict()['net']
'123.45.6.78/24'
>>> # "ipv6"
>>> s='client=2001:0db8:85A3:0000:0000:8a2e:0370:7334'
>>> p=compile(r'client=(?E:client=ipv6)')
>>> m=p.match(s)
>>> m.groups()
('2001:0db8:85A3:0000:0000:8a2e:0370:7334',)
>>> m.groupdict()['client']==m.groups()[0]
True
>>> b,e=m.span()
>>> s[b:e]==s
True
>>> # "ipaddr". This really starts to show off how powerful these
>>> # extensions can be. If you don't believe what a step-saver this is,
>>> # run RE._expand_extensions the regexp that's compiled below.
>>> s='server=2001:0db8:85A3:0000:0000:8a2e:0370:7334, client=123.45.6.78'
>>> p=compile(r'server=(?E:srv=ipaddr),\s*client=(?E:cli=ipaddr)')
>>> m=p.match(s)
>>> m.groups()
('2001:0db8:85A3:0000:0000:8a2e:0370:7334', None, '2001:0db8:85A3:0000:0000:8a2e:0370:7334', '123.45.6.78', '123.45.6.78', None)
>>> sdict(m.groupdict())
{'cli': '123.45.6.78', 'srv': '2001:0db8:85A3:0000:0000:8a2e:0370:7334'}
>>> # "macaddr48"
>>> s='from 01:23:45:67:89:aB to 012-345-678-9aB via 0123.4567.89aB'
>>> p=r'from\s+(?E:from=macaddr48)\s+to\s+(?E:to=macaddr48)\s+via\s+(?E:mid=macaddr48)'
>>> sdict(search(p,s).groupdict())
{'from': '01:23:45:67:89:aB', 'mid': '0123.4567.89aB', 'to': '012-345-678-9aB'}
>>> # "macaddr64"
>>> s='from 01:23:45:67:89:ab:cd:EF to 0123.4567.89ab.cdEF'
>>> p=r'from\s+(?E:from=macaddr64)\s+to\s+(?E:to=macaddr64)'
>>> sdict(match(p,s).groupdict())
{'from': '01:23:45:67:89:ab:cd:EF', 'to': '0123.4567.89ab.cdEF'}
>>> # "macaddr". Again, this is a pretty big regexp that we're getting for
>>> # very little effort.
>>> s='from 01:23:45:67:89:ab:cd:EF to 01:23:45:67:89:aB'
>>> p=r'from\s+(?E:src=macaddr)\s+to\s+(?E:dst=macaddr)'
>>> sdict(search(p,s).groupdict())
{'dst': '01:23:45:67:89:aB', 'src': '01:23:45:67:89:ab:cd:EF'}
>>> # "hostname". This should match any valid DNS name.
>>> p='\s*host\s*[ :=]?\s*(?E:host=hostname)'
>>> match(p,'host=this.is.a.test').groupdict()['host']
'this.is.a.test'
>>> # "host". Matches "ipaddr" or "hostname".
>>> p='\s*host\s*[ :=]?\s*(?E:host=host)'
>>> match(p,'host=this.is.a.test').groupdict()['host']
'this.is.a.test'
>>> s='host=123.45.6.78'
>>> match(p,'host=123.45.6.78').groupdict()['host']
'123.45.6.78'
>>> # "hostport". Just like "host", but you can specify a port.
>>> p='\s*host\s*[ :=]\s*(?E:host=hostport)'
>>> match(p,'host=this.is.a.test').groupdict()['host']
'this.is.a.test'
>>> match(p,'host=123.45.6.78').groupdict()['host']
'123.45.6.78'
>>> match(p,'host=this.is.a.test:123').groupdict()['host']
'this.is.a.test:123'
>>> match(p,'host=123.45.6.78:99').groupdict()['host']
'123.45.6.78:99'
>>> # "filename"
>>> p='\s*file\s*[ :=]\s*(?E:file=filename)'
>>> search(p,'file=.file-name.EXT').groupdict()['file']
'.file-name.EXT'
>>> # "path"
>>> p='\s*file\s*[ :=]\s*(?E:file=path)'
>>> search(p,'file=.file-name.EXT').groupdict()['file']
'.file-name.EXT'
>>> search(p,'file=dir1/dir2/file-name.EXT').groupdict()['file']
'dir1/dir2/file-name.EXT'
>>> search(p,'file=/dir1/dir2/file-name.EXT').groupdict()['file']
'/dir1/dir2/file-name.EXT'
>>> # "abspath"
>>> p='\s*file\s*[ :=]\s*(?E:file=abspath)'
>>> search(p,'file=/dir1/dir2/file-name.EXT').groupdict()['file']
'/dir1/dir2/file-name.EXT'
>>> print(search(p,'file=dir1/dir2/file-name.EXT'))
None
>>> # "email_localpart"
>>> p='from: (?E:from=email_localpart)'
>>> match(p,'from: some.person@').groupdict()['from']
'some.person@'
>>> match(p,'from: (comment)some.person@').groupdict()['from']
'(comment)some.person@'
>>> match(p,'from: some.person(comment)@').groupdict()['from']
'some.person(comment)@'
>>> # "email"
>>> p='from: (?E:from=email)'
>>> match(p,'from: some.person@someplace').groupdict()['from']
'some.person@someplace'
>>> match(p,'from: (comment)some.person@someplace').groupdict()['from']
'(comment)some.person@someplace'
>>> match(p,'from: some.person(comment)@someplace').groupdict()['from']
'some.person(comment)@someplace'
>>> # "url_scheme"
>>> p='(?E:proto=url_scheme)'
>>> match(p,'http:').groupdict()['proto']
'http:'
>>> match(p,'ht-tp:').groupdict()['proto']
'ht-tp:'
>>> match(p,'h.t-t+p:').groupdict()['proto']
'h.t-t+p:'
>>> print(match(p,'-http:'))
None
>>> print(match(p,'http-:'))
None
>>> match(p,'presto:http:').groupdict()['proto']
'presto:http:'
>>> # "url"
>>> p='(?E:url=url)'
>>> search(p,'mailto:some.person@someplace.com').groupdict()['url']
'mailto:some.person@someplace.com'
>>> search(p,'mailto:some.person@someplace.com?to=me').groupdict()['url']
'mailto:some.person@someplace.com?to=me'
>>> search(p,'mailto:some.person@someplace.com?to=me&subject=testing').groupdict()['url']
'mailto:some.person@someplace.com?to=me&subject=testing'
>>> search(p,'ftp://vault.com').groupdict()['url']
'ftp://vault.com'
>>> search(p,'ftp://vault.com:500').groupdict()['url']
'ftp://vault.com:500'
>>> search(p,'ftp://vault.com:500/file').groupdict()['url']
'ftp://vault.com:500/file'
>>> search(p,'ftp://vault.com/file').groupdict()['url']
'ftp://vault.com/file'
>>> search(p,'ftp://vault.com/path/to/file').groupdict()['url']
'ftp://vault.com/path/to/file'
>>> search(p,'ftp://vault.com/path/to/file?encrypt=1').groupdict()['url']
'ftp://vault.com/path/to/file?encrypt=1'
>>> search(p,'ftp://vault.com/path/to/file?encrypt=1&compress=0').groupdict()['url']
'ftp://vault.com/path/to/file?encrypt=1&compress=0'
>>> # There's somethin screwey with they way we're matching URL parameters.
>>> # Maybe we need a url_path (rather than abs_path) that rejects ? as a
>>> # valid character. (And of course, we're not handling escaping at all,
>>> # but I'm not sure that can even be expressed regularly.)
>>> #search(p,'ftp://vault.com/path/to/file?encrypt=1&compress=0').groups()
>>> p='(?E:day=day)'
>>> search(p,'Sunday').groupdict()['day']
'Sunday'
>>> search(p,'Sun').groupdict()['day']
'Sun'
>>> search(p,'M').groupdict()['day']
'M'
>>> search(p,'m').groupdict()['day']
'm'
>>> search(p,'tuesday').groupdict()['day']
'tuesday'
>>> search(p,'tu').groupdict()['day']
'tu'
>>> p='(?E:day=day3)'
>>> search(p,'Sun').groupdict()['day']
'Sun'
>>> search(p,'sun').groupdict()['day']
'sun'
>>> search(p,'tu')==None
True
>>> p='(?E:month=month)'
>>> search(p,'January').groupdict()['month']
'January'
>>> search(p,'ja').groupdict()['month']
'ja'
>>> search(p,'May').groupdict()['month']
'May'
>>> search(p,'D').groupdict()['month']
'D'
>>> p='(?E:month=month3)'
>>> search(p,'Jan').groupdict()['month']
'Jan'
>>> search(p,'ja')==None
True
>>> search(p,'May').groupdict()['month']
'May'
>>> search(p,'Dec').groupdict()['month']
'Dec'
"""
f,t=testmod(report=False)
if f>0:
print("---------------------------------------------------------------------\n")
print(("Passed %d of %s."%(t-f,nounf('test',t))))
sys.exit((1,0)[f==0])
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Parse our command line using an ArgumentParser instance with subparsers. If
# This command was symlinked to the name of one of the subparsers, then allow
# only the options for that subparser.
dc=DebugChannel(label='D',enabled=True)
ap=argparse.ArgumentParser()
ap.add_argument('--debug',action='store_true',help="Turn on debug messages.")
ap.add_argument('-x',dest='ext',action='append',default=[],help="""Add a "name=pattern" RE extension. Use as many -x options as you need.""")
ap.add_argument('--extensions',action='store_true',help="List our RE extensions.")
if prog.name==prog.real_name:
sp=ap.add_subparsers()
ap_find=sp.add_parser('grep',description="This works a lot like grep.")
ap_test=sp.add_parser('test',description="Just run internal tests and report the result.")
elif prog.name=='pygrep':
ap_find=ap # Add "find" subparser's opttions to the main parser.
ap_find.description="This works a lot like grep (but without so many features."
elif prog.name=='test':
ap_test=ap
if prog.name in (prog.real_name,'pygrep'):
ap_find.set_defaults(func=grep)
ap_find.add_argument('-1',dest='one',action='store_true',help="Do not output names of files containing matches, even if more than one file is to be scanned.")
ap_find.add_argument('-c',dest='count',action='store_true',help="Output only the number of matching lines of each file scanned.")
ap_find.add_argument('-f',dest='fmt',action='store',help="Use standard Python template syntax to format output.")
ap_find.add_argument('-g',dest='tuple',action='store_true',help='Output the tuple of matching groups above each matching line.')
ap_find.add_argument('-G',dest='dict',action='store_true',help='Output the dictionary of matching groups above each matching line.')
ap_find.add_argument('-i',dest='ignore_case',action='store_true',help="Ignore the case of alphabetic characters when scanning.")
ap_find.add_argument('-l',dest='list',action='store_true',help="Output only the name of each file scanned. (Trumps -1.)")
ap_find.add_argument('-v',dest='invert',action='store_true',help="Output (or count) non-matching lines rather than matching lines.")
ap_find.add_argument('pattern',metavar='RE',action='store',nargs='?',help="A regular expression of the Python dialect, which can also include RE extensions.")
ap_find.add_argument('filenames',metavar='FILENAME',action='store',nargs='*',help="Optional filenames to scan for the given pattern.")
if prog.name in (prog.real_name,'test'):
ap_test.set_defaults(func=test)
#from pprint import pprint
#pprint(ap.__dict__)
opt=ap.parse_args()
dc.enable(opt.debug)
for x in opt.ext:
name,pat=x.split('=',1)
dc('Registering RE %r as "%s"'%(name,pat))
extend(name,pat)
if opt.extensions:
print(('\n'.join(['%s=%s'%(n,p) for n,p in sorted(_extensions.items())])))
sys.exit(0)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Do whatever our command line says.
if hasattr(opt,'func'):
opt.func(opt)
|
import json
countries=countries=[
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Singapore",
"optionValue": "Singapore"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Malaysia",
"optionValue": "Malaysia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Indonesia",
"optionValue": "Indonesia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Philippines",
"optionValue": "Philippines"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Thailand",
"optionValue": "Thailand"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Vietnam",
"optionValue": "Vietnam"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Afghanistan",
"optionValue": "Afghanistan"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Albania",
"optionValue": "Albania"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Algeria",
"optionValue": "Algeria"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Andorra",
"optionValue": "Andorra"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Angola",
"optionValue": "Angola"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Antigua & Deps",
"optionValue": "Antigua & Deps"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Argentina",
"optionValue": "Argentina"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Armenia",
"optionValue": "Armenia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Australia",
"optionValue": "Australia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Austria",
"optionValue": "Austria"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Azerbaijan",
"optionValue": "Azerbaijan"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Bahamas",
"optionValue": "Bahamas"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Bahrain",
"optionValue": "Bahrain"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Bangladesh",
"optionValue": "Bangladesh"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Barbados",
"optionValue": "Barbados"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Belarus",
"optionValue": "Belarus"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Belgium",
"optionValue": "Belgium"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Belize",
"optionValue": "Belize"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Benin",
"optionValue": "Benin"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Bhutan",
"optionValue": "Bhutan"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Bolivia",
"optionValue": "Bolivia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Bosnia Herzegovina",
"optionValue": "Bosnia Herzegovina"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Botswana",
"optionValue": "Botswana"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Brazil",
"optionValue": "Brazil"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Brunei",
"optionValue": "Brunei"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Bulgaria",
"optionValue": "Bulgaria"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Burkina",
"optionValue": "Burkina"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Burundi",
"optionValue": "Burundi"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Cambodia",
"optionValue": "Cambodia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Cameroon",
"optionValue": "Cameroon"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Canada",
"optionValue": "Canada"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Cape Verde",
"optionValue": "Cape Verde"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Central African Rep",
"optionValue": "Central African Rep"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Chad",
"optionValue": "Chad"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Chile",
"optionValue": "Chile"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "China",
"optionValue": "China"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Colombia",
"optionValue": "Colombia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Comoros",
"optionValue": "Comoros"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Congo",
"optionValue": "Congo"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Congo {Democratic Rep}",
"optionValue": "Congo {Democratic Rep}"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Costa Rica",
"optionValue": "Costa Rica"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Croatia",
"optionValue": "Croatia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Cuba",
"optionValue": "Cuba"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Cyprus",
"optionValue": "Cyprus"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Czech Republic",
"optionValue": "Czech Republic"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Denmark",
"optionValue": "Denmark"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Djibouti",
"optionValue": "Djibouti"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Dominica",
"optionValue": "Dominica"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Dominican Republic",
"optionValue": "Dominican Republic"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "East Timor",
"optionValue": "East Timor"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Ecuador",
"optionValue": "Ecuador"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Egypt",
"optionValue": "Egypt"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "El Salvador",
"optionValue": "El Salvador"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Equatorial Guinea",
"optionValue": "Equatorial Guinea"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Eritrea",
"optionValue": "Eritrea"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Estonia",
"optionValue": "Estonia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Ethiopia",
"optionValue": "Ethiopia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Fiji",
"optionValue": "Fiji"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Finland",
"optionValue": "Finland"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "France",
"optionValue": "France"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Gabon",
"optionValue": "Gabon"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Gambia",
"optionValue": "Gambia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Georgia",
"optionValue": "Georgia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Germany",
"optionValue": "Germany"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Ghana",
"optionValue": "Ghana"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Greece",
"optionValue": "Greece"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Grenada",
"optionValue": "Grenada"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Guatemala",
"optionValue": "Guatemala"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Guinea",
"optionValue": "Guinea"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Guinea-Bissau",
"optionValue": "Guinea-Bissau"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Guyana",
"optionValue": "Guyana"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Haiti",
"optionValue": "Haiti"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Honduras",
"optionValue": "Honduras"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Hungary",
"optionValue": "Hungary"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Iceland",
"optionValue": "Iceland"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "India",
"optionValue": "India"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Iran",
"optionValue": "Iran"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Iraq",
"optionValue": "Iraq"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Ireland {Republic}",
"optionValue": "Ireland {Republic}"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Israel",
"optionValue": "Israel"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Italy",
"optionValue": "Italy"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Ivory Coast",
"optionValue": "Ivory Coast"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Jamaica",
"optionValue": "Jamaica"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Japan",
"optionValue": "Japan"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Jordan",
"optionValue": "Jordan"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Kazakhstan",
"optionValue": "Kazakhstan"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Kenya",
"optionValue": "Kenya"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Kiribati",
"optionValue": "Kiribati"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Korea North",
"optionValue": "Korea North"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Korea South",
"optionValue": "Korea South"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Kosovo",
"optionValue": "Kosovo"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Kuwait",
"optionValue": "Kuwait"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Kyrgyzstan",
"optionValue": "Kyrgyzstan"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Laos",
"optionValue": "Laos"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Latvia",
"optionValue": "Latvia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Lebanon",
"optionValue": "Lebanon"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Lesotho",
"optionValue": "Lesotho"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Liberia",
"optionValue": "Liberia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Libya",
"optionValue": "Libya"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Liechtenstein",
"optionValue": "Liechtenstein"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Lithuania",
"optionValue": "Lithuania"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Luxembourg",
"optionValue": "Luxembourg"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Macedonia",
"optionValue": "Macedonia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Madagascar",
"optionValue": "Madagascar"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Malawi",
"optionValue": "Malawi"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Maldives",
"optionValue": "Maldives"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Mali",
"optionValue": "Mali"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Malta",
"optionValue": "Malta"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Marshall Islands",
"optionValue": "Marshall Islands"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Mauritania",
"optionValue": "Mauritania"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Mauritius",
"optionValue": "Mauritius"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Mexico",
"optionValue": "Mexico"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Micronesia",
"optionValue": "Micronesia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Moldova",
"optionValue": "Moldova"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Monaco",
"optionValue": "Monaco"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Mongolia",
"optionValue": "Mongolia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Montenegro",
"optionValue": "Montenegro"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Morocco",
"optionValue": "Morocco"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Mozambique",
"optionValue": "Mozambique"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Myanmar, {Burma}",
"optionValue": "Myanmar, {Burma}"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Namibia",
"optionValue": "Namibia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Nauru",
"optionValue": "Nauru"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Nepal",
"optionValue": "Nepal"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Netherlands",
"optionValue": "Netherlands"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "New Zealand",
"optionValue": "New Zealand"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Nicaragua",
"optionValue": "Nicaragua"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Niger",
"optionValue": "Niger"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Nigeria",
"optionValue": "Nigeria"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Norway",
"optionValue": "Norway"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Oman",
"optionValue": "Oman"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Pakistan",
"optionValue": "Pakistan"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Palau",
"optionValue": "Palau"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Panama",
"optionValue": "Panama"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Papua New Guinea",
"optionValue": "Papua New Guinea"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Paraguay",
"optionValue": "Paraguay"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Peru",
"optionValue": "Peru"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Poland",
"optionValue": "Poland"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Portugal",
"optionValue": "Portugal"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Qatar",
"optionValue": "Qatar"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Romania",
"optionValue": "Romania"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Russian Federation",
"optionValue": "Russian Federation"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Rwanda",
"optionValue": "Rwanda"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "St Kitts & Nevis",
"optionValue": "St Kitts & Nevis"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "St Lucia",
"optionValue": "St Lucia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Saint Vincent & the Grenadines",
"optionValue": "Saint Vincent & the Grenadines"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Samoa",
"optionValue": "Samoa"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "San Marino",
"optionValue": "San Marino"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Sao Tome & Principe",
"optionValue": "Sao Tome & Principe"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Saudi Arabia",
"optionValue": "Saudi Arabia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Senegal",
"optionValue": "Senegal"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Serbia",
"optionValue": "Serbia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Seychelles",
"optionValue": "Seychelles"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Sierra Leone",
"optionValue": "Sierra Leone"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Slovakia",
"optionValue": "Slovakia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Slovenia",
"optionValue": "Slovenia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Solomon Islands",
"optionValue": "Solomon Islands"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Somalia",
"optionValue": "Somalia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "South Africa",
"optionValue": "South Africa"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "South Sudan",
"optionValue": "South Sudan"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Spain",
"optionValue": "Spain"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Sri Lanka",
"optionValue": "Sri Lanka"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Sudan",
"optionValue": "Sudan"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Suriname",
"optionValue": "Suriname"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Swaziland",
"optionValue": "Swaziland"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Sweden",
"optionValue": "Sweden"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Switzerland",
"optionValue": "Switzerland"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Syria",
"optionValue": "Syria"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Taiwan",
"optionValue": "Taiwan"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Tajikistan",
"optionValue": "Tajikistan"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Tanzania",
"optionValue": "Tanzania"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Togo",
"optionValue": "Togo"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Tonga",
"optionValue": "Tonga"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Trinidad & Tobago",
"optionValue": "Trinidad & Tobago"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Tunisia",
"optionValue": "Tunisia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Turkey",
"optionValue": "Turkey"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Turkmenistan",
"optionValue": "Turkmenistan"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Tuvalu",
"optionValue": "Tuvalu"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Uganda",
"optionValue": "Uganda"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Ukraine",
"optionValue": "Ukraine"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "United Arab Emirates",
"optionValue": "United Arab Emirates"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "United Kingdom",
"optionValue": "United Kingdom"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "United States",
"optionValue": "United States"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Uruguay",
"optionValue": "Uruguay"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Uzbekistan",
"optionValue": "Uzbekistan"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Vanuatu",
"optionValue": "Vanuatu"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Vatican City",
"optionValue": "Vatican City"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Venezuela",
"optionValue": "Venezuela"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Yemen",
"optionValue": "Yemen"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Zambia",
"optionValue": "Zambia"
},
{
"id": "male",
"active": True,
"defaultOption": True,
"optionText": "Zimbabwe",
"optionValue": "Zimbabwe"
}
]
c=1
for te in countries:
te["id"]=str(c)
c=c+1
v=json.dumps(te,indent=4)
print(v,end=',\n')
|
# Time Complexity : O(N + mlogm)
# Space Complexity : O(n)
# Did this code successfully run on Leetcode : Yes
# Any problem you faced while coding this : No
'''
1. Store sentences ad thier occurence time in hashmap
2. To sort these sentences according to their ocuurence I have use max heap
3 Maximum priority is given to count, in case count is same priority is given lexicographically smaller sentence
'''
import heapq
from collections import defaultdict
class AutocompleteSystem:
map_ = defaultdict()
input_ = ""
def __init__(self, sentences, times):
for i in range(len(sentences)):
self.map_[sentences[i]] += times[i]
def input(self, c):
if c == '#':
self.map_[self.input_] += 1
self.input_ = ""
return []
self.input_ += c
heap_list = []
for key in self.map_:
if s.startswith(self.input_):
heapq.heappush((key, -self.map_[key]), heap_list)
if len(heap_list) > 3:
heapq.heappop(heap_list)
result = []
while len(heap_list) >0:
p = heapq.heappop()
result.insert(0, p[0])
return result
|
## 2018/01/12 basic_Tkinter_2
## Entry & Text
import tkinter as tk
window = tk.Tk()
window.title('my window')
window.geometry('300x150')
#show輸入後顯示為*
#entry
e = tk.Entry(window,show='●')
e.pack()
def insert_point():
var = e.get()
t.insert('insert',var)
def insert_end():
var = e.get()
#t.insert('end',var)
t.insert(1.2,var)
b1 = tk.Button(window,text='insert point',width=13,height=2,command=insert_point)
b1.pack()
b2 = tk.Button(window,text='insert end',width=13,height=2,command=insert_end)
b2.pack()
#text
t = tk.Text(window,height=2)
t.pack()
window.mainloop()
|
"""AppAssure 5 REST API"""
from appassure.api import AppAssureAPI
class IReplicationManagement(AppAssureAPI):
"""Full documentation online at
http://docs.appassure.com/display/AA50D/IReplicationManagement
"""
def setAgentReplicationSettings(self, data, agentId):
"""Set replication settings into an agent with given ID."""
return self.session.request('replication/agents/%s'
% (agentId), 'PUT',
self.getXML(data, 'agentReplicationSettings'))
def getAgentReplicationSettings(self, agentId):
"""Get replication settings for agent with given ID."""
return self.session.request('replication/agents/%s'
% (agentId))
def getReplicationConfiguration(self):
"""Gets replication configuration."""
return self.session.request('replication/config')
def setReplicationConfiguration(self, data):
"""Sets replication configuration."""
return self.session.request('replication/config', 'PUT',
self.getXML(data, 'replicationServiceConfig'))
def getRemoteCores(self, forceRefresh):
"""Gets a list of all of the remote cores this core knows
about, both master and slave.
"""
return self.session.request('replication/cores/?forceRefresh=%s'
% (forceRefresh))
def switchReplicatedAgentToFailover(self, coreId, agentId):
"""Converts failover agent to replicated agent.
failback?ignoreRunningReplicationJobs={ignoreReplication}.
"""
return self.session.request('replication/cores/%s/agents/%s/'
% (coreId, agentId), 'POST')
def switchFailoverAgentToReplicated(self, coreId, agentId):
"""Converts replicated agent to failover."""
return self.session.request('replication/cores/%s/agents/%s/failover'
% (coreId, agentId), 'POST')
def getRemoteAgentsRecoveryPoints(self, coreId, agentid):
"""Gets the replicated recovery points on a remote slave
core for the specific agent. This won't work with a master core.
"""
return self.session.request('replication/cores/%s/agents/%s/recoveryPoints'
% (coreId, agentid))
def getCountRemoteAgentsRecoveryPoints(self, coreId, agentid):
"""Gets count of replicated recovery points on a remote
slave core for the specific agent. This won't work with a master
core.
"""
return self.session.request('replication/cores/%s/agents/%s/recoveryPointsCount'
% (coreId, agentid))
def getRemoteSlaveRecoveryPoint(self, coreId, agentId, recoveryPointId):
"""Gets the details for a replicated recovery point on a
remote slave core. This won't work with a master core.
"""
return self.session.request('replication/cores/%s/agents/%s/rps/%s'
% (coreId, agentId, recoveryPointId))
def selectRangeRemoteAgentsRecoveryPoints(self, coreId, agentid, skipCount):
"""Gets the range of replicated recovery points on a
remote slave core for the specific agent. This won't work with a
master core. maxCount/{maxCount}/recoveryPoints.
"""
return self.session.request('replication/cores/%s/agents/%s/skipCount/%s/'
% (coreId, agentid, skipCount))
def addAgentsByDemand(self, data, coreId):
"""Add agents to existing pairing by demand."""
return self.session.request('replication/cores/%s/agents/demand'
% (coreId), 'POST',
self.getXML(data, 'addAgentsDemand'))
def addAgentsByRequest(self, data, coreId):
"""Add agents to existing pairing by request."""
return self.session.request('replication/cores/%s/agents/request'
% (coreId), 'POST',
self.getXML(data, 'addAgentsRequest'))
def getReplicatedAgentsRecoveryPointsInfo(self, coreId):
"""Gets the list of agents which have recovery points on
a remote slave core.
"""
return self.session.request('replication/cores/%s/agents/rpsinfo'
% (coreId))
def deletePairing(self, coreId):
"""Delete pairing between master and slave cores.
{deleteRecoveryPoints}.
"""
return self.session.request('replication/cores/%s/pairing?deleteRecoveryPoints='
% (coreId))
def getCoreIdByUrl(self, hostUri):
"""Tests connection to a remote core. Returns CoreId.
Using Anonymous authentication.
"""
return self.session.request('replication/cores/%s'
% (hostUri), 'PUT')
def getCoreIdByDescriptor(self, data):
"""Tests a core descriptor to validate the ability to
connect to it. Returns CoreId. Using NTLM authentication.
"""
return self.session.request('replication/cores/descriptor', 'PUT',
self.getXML(data, 'remoteCoreDescriptor'))
def demandPairing(self, data):
"""Instructs this core to send a replication demand to a
remote core. This operation will require admin credentials on
the remote core, but if successful will take effect right away.
Returns slave core Id.
"""
return self.session.request('replication/cores/pairing/demand', 'POST',
self.getXML(data, 'remoteCoreReplicationPairingDemand'))
def requestPairing(self, data):
"""Instructs this core to send a replication request to a
remote core. Replication will start once the remote core
approves the request. Returns slave core Id.
"""
return self.session.request('replication/cores/pairing/request', 'POST',
self.getXML(data, 'remoteCoreReplicationPairingRequest'))
def verifyAddAgentsByDemandForExistingCore(self, data, coreId):
"""Verifies whether agents can be safely replicated by
demand.
"""
return self.session.request('replication/cores/slave/%s/agents/demand/verify'
% (coreId), 'POST',
self.getXML(data, 'addAgentsVerificationByDemand'))
def verifyAddAgentsByRequestForExistingCore(self, data, coreId):
"""Verifies whether agents can be safely replicated by
request.
"""
return self.session.request('replication/cores/slave/%s/agents/request/verify'
% (coreId), 'POST',
self.getXML(data, 'addAgentsVerificationByRequest'))
def verifyAddAgentsByDemand(self, data):
"""Verifies whether agents can be safely replicated by
demand.
"""
return self.session.request('replication/cores/slave/agents/demand/verify', 'POST',
self.getXML(data, 'addAgentsVerificationByDemand'))
def verifyAddAgentsByRequest(self, data):
"""Verifies whether agents can be safely replicated by
request.
"""
return self.session.request('replication/cores/slave/agents/request/verify', 'POST',
self.getXML(data, 'addAgentsVerificationByRequest'))
def getRemoteMasterCoresForDemand(self, data):
"""Getting remote masers cores info for current slave
core.
"""
return self.session.request('replication/cores/slave/masters', 'PUT',
self.getXML(data, 'remoteCoreDescriptor'))
def getRemoteCoreRepositories(self, data):
"""Gets the repositories on a remote core. Admin
credentials on the remote core are required.
"""
return self.session.request('replication/cores/slave/repositories', 'PUT',
self.getXML(data, 'remoteCoreDescriptor'))
def getAgentRepositoryRelationships(self, slaveCoreId):
"""Gets the repositories on a remote core for agents."""
return self.session.request('replication/cores/slaves/%s/agentRepositoryRelationships'
% (slaveCoreId))
def getRemoteCoreRepositoriesForDemand(self, slaveCoreId):
"""Gets the repositories on a remote core for. Uses
certificate authentication and works only for demanded core.
"""
return self.session.request('replication/cores/slaves/%s/pairingdemand/repositories'
% (slaveCoreId), 'PUT')
def updateSlaveCoreSettings(self, data, slaveCoreId):
"""Sets remote slave core configuration. This work with a
master core side only.
"""
return self.session.request('replication/cores/slaves/%s/settings'
% (slaveCoreId), 'PUT',
self.getXML(data, 'updateCoreSettingsRequest'))
def setRemoteSlaveCoreReplicationPolicy(self, data, slaveCoreId):
"""Sets remote slave core replication policy. This work
with a master core side only.
"""
return self.session.request('replication/cores/slaves/%s/settings/policy'
% (slaveCoreId), 'PUT',
self.getXML(data, 'replicationPolicy'))
def getRemoteSlaveCoreReplicationPolicy(self, slaveCoreId):
"""Gets remote slave core replication policy. This work
with a master core side only.
"""
return self.session.request('replication/cores/slaves/%s/settings/policy'
% (slaveCoreId))
def verifyCorePairingAbilityByDemand(self, data):
"""Tests a core descriptor to validate the ability to
create pairing to remote core. Returns CoreId. Using NTLM
authentication.
"""
return self.session.request('replication/cores/verify/demand', 'PUT',
self.getXML(data, 'remoteCoreDescriptor'))
def verifyCorePairingAbilityByRequest(self, hostUri):
"""Tests a core descriptor to validate the ability to
create pairing to remote core. Returns CoreId. Using anonymous
authentication.
"""
return self.session.request('replication/cores/verify/request/%s'
% (hostUri), 'PUT')
def forceReplication(self, data):
"""Force replication for agents."""
return self.session.request('replication/force', 'PUT',
self.getXML(data, 'forceReplicationRequest'))
def deleteAgentFromMaster(self, coreId, agentId):
"""Delete agent from replication relationship from
slave's side only. Actual replicated and protected agent on
master and slave cores stay available.
?deleteRecoveryPoints={deleteRecoveryPoints}.
"""
return self.session.request('replication/masters/%s/replicatedagents/%s/'
% (coreId, agentId))
def deleteMasterCore(self, deleteRecoveryPoints):
"""Delete remote master core from replication."""
return self.session.request('replication/masters/%s?deleteRecoveryPoints=%s'
% (deleteRecoveryPoints))
def setAgentReplicationPauseConfigurationForMasterCores(self, data, masterCoreId, agentId):
"""Pauses replication for agent."""
return self.session.request('replication/masters/%s/agents/%s/pauseConfiguration'
% (masterCoreId, agentId), 'POST',
self.getXML(data, 'replicationPauseConfiguration'))
def requestForceReplication(self, data):
"""Request force replication."""
return self.session.request('replication/requestForceReplication', 'PUT',
self.getXML(data, 'forceReplicationRequest'))
def ignorePairingRequest(self, requestId):
"""Deletes a pending replication request without
responding to it.
"""
return self.session.request('replication/requests/%s'
% (requestId))
def respondToPairingRequest(self, data, requestId):
"""Responds to a pending replication requests."""
return self.session.request('replication/requests/pairing/%s'
% (requestId), 'POST',
self.getXML(data, 'pendingReplicationPairingResponse'))
def respondToAddAgentsByRequest(self, data, requestId):
"""Responds to a pending agents from replication requests."""
return self.session.request('replication/requests/pairing/%s/agents'
% (requestId), 'POST',
self.getXML(data, 'pendingReplicationAgents'))
def getPendingPairingRequest(self, requestId):
"""Gets a the pending request for a specific request ID."""
return self.session.request('replication/requests/pairing/pending/%s'
% (requestId))
def getPendingPairingRequests(self):
"""Gets a list of all pending replication pairing
requests received by this core from remote master cores.
"""
return self.session.request('replication/requests/pending')
def deleteSlaveCore(self, coreId):
"""Delete remote slave core from replication."""
return self.session.request('replication/slaves/%s'
% (coreId))
def deleteAgentFromSlave(self, coreId, agentId):
"""Delete agent from replication relationship from
master's side only. Actual replicated and protected agent on
master and slave cores stay available.
"""
return self.session.request('replication/slaves/%s/replicatedagents/%s'
% (coreId, agentId))
def setAgentReplicationPauseConfiguration(self, data, slaveCoreId, agentId):
"""Pauses replication for agent."""
return self.session.request('replication/slaves/%s/agents/%s/pauseConfiguration'
% (slaveCoreId, agentId), 'POST',
self.getXML(data, 'replicationPauseConfiguration'))
def setAgentReplicationPauseConfigurationForSlaveCores(self, data, agentId):
"""Pauses replication for agent."""
return self.session.request('replication/slaves/agents/%s/pauseConfiguration'
% (agentId), 'POST',
self.getXML(data, 'replicationPauseConfiguration'))
|
\1、什么是线程
# 在传统操作系统中,每个进程有一个地址空间,而且默认就有一个控制线程。线程才是真正的执行单位。
# 线程顾名思义,就是一条流水线工作的过程,一条流水线必须属于一个车间,一个车间的工作过程是一个进程。
# 车间负责把资源整合到一起,是一个资源单位,而一个车间内至少有一个流水线。流水线的工作需要电源,电源就相当于cpu。
# 所以,进程只是用来把资源集中到一起(进程只是一个资源单位,或者说资源集合),而线程才是cpu上的执行单位。
# 多线程(即多个控制线程)的概念是,在一个进程中存在多个控制线程,多个控制线程共享该进程的地址空间,相当于一个车间内有多条流水线,都共用一个车间的资源。
# 例如,北京地铁与上海地铁是不同的进程,而北京地铁里的13号线是一个线程,北京地铁所有的线路共享北京地铁所有的资源,比如所有的乘客可以被所有线路拉。
\2、线程的创建开销小
# 创建进程的开销要远大于线程? 是的
# 如果我们的软件是一个工厂,该工厂有多条流水线,流水线工作需要电源,电源只有一个即cpu(单核cpu)。一个车间就是一个进程,一个车间至少一条流水线(一个进程至少有一个线程)
# 创建一个进程,就是创建一个车间(申请空间,在该空间内建至少一条流水线),而建线程,就只是在一个车间内造一条流水线,无需申请空间,所以创建开销小。创建一个车间肯定比创建一条流水线要慢。
# 进程之间是竞争关系,线程之间是协作关系? 是的
# 不同车间之间是竞争/抢电源的关系,竞争(不同的进程直接是竞争关系,是不同的程序员写的程序运行的,迅雷抢占其他进程的网速,360把其他进程当做病毒干死)
# 一个车间的不同流水线式协同工作的关系(同一个进程的线程之间是合作关系,是同一个程序写的程序内开启动,迅雷内的线程是合作关系,不会自己干自己)
\3、线程与进程的区别
# 线程共享进程的地址空间;进程有自己的地址空间。
# 线程可以直接访问其进程的数据段;进程拥有父进程的数据段的自己的副本。
# 线程可以直接与进程中的其他线程通信;进程必须使用进程间通信来与同级进程通信。
# 新线程很容易创建;新进程需要父进程的复制。
# 线程可以对同一进程的线程进行相当大的控制;进程只能对子进程进行控制。
# 主线程的更改(取消、优先级更改等)可能会影响进程中其他线程的行为;对父进程的更改不影响子进程。
\4、为何要用多线程
# 多线程指的是,在一个进程中开启多个线程,简单的讲:如果多个任务共用一块地址空间,那么必须在一个进程内开启多个线程。详细的讲分为4点:
1. 多线程共享一个进程的地址空间
2. 线程比进程更轻量级,线程比进程更容易创建可撤销,在许多操作系统中,创建一个线程比创建一个进程要快10-100倍,在有大量线程需要动态和快速修改时,这一特性很有用
3. 若多个线程都是cpu密集型的,那么并不能获得性能上的增强,但是如果存在大量的计算和大量的I/O处理,拥有多个线程允许这些活动彼此重叠运行,从而会加快程序执行的速度。
4. 在多cpu系统中,为了最大限度的利用多核,可以开启多个线程,比开进程开销要小的多。(这一条并不适用于python)
\5、线程相关的其他方法
# Thread实例对象的方法
# isAlive(): 返回线程是否活动的。
# getName(): 返回线程名。
# setName(): 设置线程名。
# threading模块提供的一些方法:
# threading.currentThread(): 返回当前的线程变量。
# threading.enumerate(): 返回一个包含正在运行的线程的list。正在运行指线程启动后、结束前,不包括启动前和终止后的线程。
# threading.activeCount(): 返回正在运行的线程数量,与len(threading.enumerate())有相同的结果。
\使用多线程执行
from threading import Thread # threading包中Thread线程模块
from multiprocessing import Process # multiprocessing包中的Process进程模块
import os
import time
import random
def task():
print('%s is runing' %os.getpid()) # 线程的pid就是进程的pid
time.sleep(random.randint(1,3))
print('%s is done' %os.getpid())
if __name__ == '__main__':
t=Thread(target=task,) # 开启多线程执行,可以看出线程启动很快,在‘主’还没打印出来就已经执行线程中的代码了。
t.start()
print('主',os.getpid())
# 执行结果:
# 46950 is runing
# 主 46950
# 46950 is done
\使用多进程执行
from threading import Thread
from multiprocessing import Process
import os
import time
import random
def task():
print('%s is runing' %os.getpid())
time.sleep(random.randint(1,3))
print('%s is done' %os.getpid())
if __name__ == '__main__':
t=Process(target=task,) # 开启子进程执行,可以看出主进程执行完才执行的子进程。对比出进程启动比线程慢很多。
t.start()
print('主',os.getpid())
# 执行结果:
# 主 46656
# 46658 is runing
# 46658 is done
\使用线程类继承执行
from threading import Thread
from multiprocessing import Process
import os
import time
import random
class Mythread(Thread): # 继承自threading包中的Thread模块.
def __init__(self,name):
super().__init__()
self.name=name
def run(self):
print('%s is runing' %os.getpid())
time.sleep(random.randint(1,3))
print('%s is done' %os.getpid())
if __name__ == '__main__':
t=Mythread('线程1')
t.start()
print('主',os.getpid())
# 执行结果:
# 46630 is runing
# 主 46630
# 46630 is done
\例子(瞅一瞅pid):
from threading import Thread
from multiprocessing import Process
import os
def work():
print('hello',os.getpid(),end="\n")
if __name__ == '__main__':
#part1:在主进程下开启多个线程,每个线程都跟主进程的pid一样
t1=Thread(target=work)
t2=Thread(target=work)
t1.start()
t2.start()
print('主线程/主进程pid',os.getpid())
#part2:开多个进程,每个进程都有不同的pid
p1=Process(target=work)
p2=Process(target=work)
p1.start()
p2.start()
print('主线程/主进程pid',os.getpid())
'''
hello 55734
hello 55734
主线程/主进程pid 55734
主线程/主进程pid 55734
hello 55735
hello 55736
''' |
# turime sarasa pasikartojanciu elementu (skaiciu)
skaiciai = [1, 2, 3, 4, 67, 132, 3, 1, 1, -1 , -1, 1.3, 1.3 , 2.2]
# skaiciuojam kiekvieno saraso elemento pasikartojimus
for elementas in skaiciai:
pasikartojimai = skaiciai.count(elementas)
# ismetam pasikartojancius is skaiciai saraso
# paliekam tik viena pasikartojima
if pasikartojimai > 1:
skaiciai.remove(elementas)
print(elementas, pasikartojimai, sep='\t\t')
print()
print(skaiciai)
# Surusiuojam sarasa. rusiuoja tik int ar float
skaiciai.sort()
print(skaiciai)
# toliau uzduotis surusiuoti pagal pasikartojima.. |
import pandas as pd
import pickle
import numpy as np
import torch
# Loading DataSet
db = pd.read_csv('Pickles/tadpole-preprocessed - Tadpole dataset - Sheet1.csv')
y_pred = db["DX_bl"].to_numpy()
to_add = 0
add_dict = dict()
# Converting predictions to numbers
for i in range(len(y_pred)):
if y_pred[i] not in add_dict:
add_dict[y_pred[i]] = to_add
to_add += 1
y_pred[i] = add_dict[y_pred[i]]
# Getting feature names
inp = input().split()
feats_to_add = []
for feat in inp:
feats_to_add.append("\'" + feat + "\'")
# Removing rows containing Empty values
feats_to_add = np.delete(feats_to_add, [113, 202, 222, 286, 313, 393], axis=0)
features = db[feats_to_add].to_numpy()
# Creating edges
edge_features = db["APOE4"].to_numpy()
adj = np.zeros((edge_features.shape[0], edge_features.shape[0]))
print(adj[0].shape)
for i in range(edge_features.shape[0]):
for j in range(edge_features.shape[0]):
if i != j:
if edge_features[i] == edge_features[j]:
adj[i, j] = 1
# Saving data
with open('Pickles/apoe_adj.pickle', 'wb') as handle:
pickle.dump(adj, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('Pickles/feats.pickle', 'wb') as handle:
pickle.dump(features, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('Pickles/preds.pickle', 'wb') as handle:
pickle.dump(y_pred, handle, protocol=pickle.HIGHEST_PROTOCOL)
|
import sys, inspect, hashlib
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from exceptions import *
class BoxList(list):
''' variant of Python's list to store Box contents '''
# methods we need; then we disable the rest - throw error if called
implement = [
'__new__', '__init__', '__str__', '__repr__', '__doc__', '__dir__',
'__iter__', '__len__', '__contains__', '__getitem__',
'__getattribute__','__eq__',
'index', 'count', 'insert' ]
for i in dir(list):
if i not in implement:
code = exec(f'def {i}(self): raise BoxImplementationError(' +
f'"{i} is not implemented")')
@classmethod
def internal(cls, caller):
''' check if request is from inside a Box-type subclass '''
# nb. diff to that in VesselABC
frame = sys._getframe()
if caller not in frame.f_globals and \
caller in dir(Box):
return True
return False
def append(self, item):
# we get caller i.e. calling func here instead of inside 'internal'
# here: caller = method that called append e.g. 'put' <- what we want
# inside 'internal': would be append <- not what we want
caller = sys._getframe().f_back.f_code.co_name
if BoxList.internal(caller):
super().append(item)
def insert(self, position, item):
caller = sys._getframe().f_back.f_code.co_name
if BoxList.internal(caller):
super().insert(position, item)
def remove(self, item):
caller = sys._getframe().f_back.f_code.co_name
if BoxList.internal(caller):
super().remove(item)
def clear(self):
caller = sys._getframe().f_back.f_code.co_name
if BoxList.internal(caller):
super().clear() |
import numpy as np
import time as time
import os
from datetime import datetime
from random import shuffle
import tensorflow as tf
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Dense, Flatten, Dropout
from tensorflow.keras.optimizers import Adam
def setup():
tf.compat.v1.keras.backend.clear_session()
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement = True
sess = tf.compat.v1.Session(config=config)
tf.compat.v1.keras.backend.set_session(sess)
setup()
log_dir = os.path.join("logs","scalars","vgg16_model",)
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,histogram_freq=1,write_images=True)
LAST_RUN = 1583693488
epochs = 30
files = 149
v = int(time.time())
WIDTH = 480
HEIGHT = 270
MODEL_NAME = 'model_gta_vgg16_v-{}.h5'.format(v)
PREV_MODEL_NAME = 'model_gta_vgg16_v-{}.h5'.format(LAST_RUN)
for e in range(epochs):
setup()
model = load_model(PREV_MODEL_NAME)
#model.compile(Adam(lr=0.001),loss='categorical_crossentropy',metrics=['accuracy'])
data_order = [i for i in range(1,files+1)]
shuffle(data_order)
for count,i in enumerate(data_order):
print(count)
print(i)
file_name = 'training_data_{}.npy'.format(i)
train_data = np.load(file_name,allow_pickle=True)
train_set = train_data[:-100]
test_set = train_data[-100:]
X = np.array([i[0] for i in train_set]).reshape(-1,WIDTH,HEIGHT,3)
Y = np.array([i[1] for i in train_set])
test_x = np.array([i[0] for i in test_set]).reshape(-1,WIDTH,HEIGHT,3)
test_y = np.array([i[1] for i in test_set])
model.fit(X,Y,batch_size=8,epochs=1,validation_data=(test_x,test_y),callbacks=[tensorboard_callback],shuffle=True)
model.save(MODEL_NAME)
LAST_RUN = v
v = int(time.time())
MODEL_NAME = 'model_gta_vgg16_v-{}.h5'.format(v)
PREV_MODEL_NAME = 'model_gta_vgg16_v-{}.h5'.format(LAST_RUN) |
#На основе нейрона из видео https://www.youtube.com/watch?v=SEukWq_e3Hs сделать однослойный перцептрон
import numpy as np
import matplotlib.pyplot as plt
import os
D = None
Y=None
w = np.zeros((5, 25))
a = 0.2
b = -0.4
c = lambda x: 1 if x > 0 else 0
def f(x,i):
s = b + np.sum(x @ w[i])
return c(s)
def train(i):
global w
_w = w[i].copy()
for x, y in zip(D, Y[i]):
w[i] += a * (y - f(x,i)) * x
return (w[i] != _w).any()
def net(xs):
s = ''
for i in range(np.shape(w)[0]):
s = '{}{}'.format(s, f(xs, i))
return(s)
# выгружаем все картинки из папки для обучения
path_train='2_2_train/'
for name in os.listdir(path_train):
img = plt.imread('{}{}'.format(path_train, name))
#print(img)
xs = (np.dot(img[...,:3], [1, 1, 1])).astype(int).flatten() #Привели к ч/б умножив срез на [1, 1, 1]+ превращаем матрицу в вектор
#print(xs)
#print(type(xs))
ys=np.binary_repr(ord(name.split('.')[0]))[2:]#убираем 10 из начала, тк везде одинаково, то понижаем размерность
#ys=int(ys)
#ys1=np.binary_repr(ord(name.split('.')[0])).split(
#ys1=np.binary_repr(ord(name.split('.')[0]))
ys = ' '.join(ys)
ys=np.array(ys.split(),dtype=int)
#print(type(ys1))
#ys=letter_to_bin(name.split('.')[0])
#ys=(np.array(int(ys1)))
#дополнить нулями слева
#print(ys)
#print(type(ys))
#print(chr((ord(name.split('.')[0]))))
if D is None:
D = xs
Y=ys
else:
D = np.vstack((D, xs))
Y = np.vstack((Y, ys))
Y = np.swapaxes(Y, 0, 1)
#print(Y)
#print(D)
#print('---')
#print(len(Y))
for i in range(np.shape(w)[0]):
while train(i):
print(w[i])
path_test = '2_2_test/'
for name in os.listdir(path_test):
img = plt.imread('{}{}'.format(path_test, name))
#print(img)
ac = np.binary_repr(ord(name.split('.')[0]))[2:]
print("Бинарный код тестовой буквы 10",ac)
xs = np.dot(img[..., :3], [1, 1, 1]) .flatten() #Привели к ч/б умножив срез на [1, 1, 1]+ превращаем матрицу в вектор
result = net(xs)
print("Бинарный код распознанной буквы 10",result)
|
from django.urls import path
from . import views
from rest_framework import routers
urlpatterns = [
path('', views.index_page, name='index_page'), # index page
path('todos/', views.todos, name='todo'),
path('api/v1/todos/', views.Todos.as_view(), name='api_todo'),
path('api/v1/todos/<int:pk>/', views.ToDoDetail.as_view(), name='api_todo_detail')
]
router = routers.SimpleRouter()
router.register('api/v2/todos', views.ToDoViewSet)
urlpatterns += router.urls
|
import schedule
import threading
import time
# this is a class which uses inheritance to act as a normal Scheduler,
# but also can run_continuously() in another thread
class ContinuousScheduler(schedule.Scheduler):
def run_continuously(self, interval=1):
"""Continuously run, while executing pending jobs at each elapsed
time interval.
@return cease_continuous_run: threading.Event which can be set to
cease continuous run.
Please note that it is *intended behavior that run_continuously()
does not run missed jobs*. For example, if you've registered a job
that should run every minute and you set a continuous run interval
of one hour then your job won't be run 60 times at each interval but
only once.
"""
cease_continuous_run = threading.Event()
class ScheduleThread(threading.Thread):
@classmethod
def run(cls):
# I've extended this a bit by adding self.jobs is None
# now it will stop running if there are no jobs stored on this schedule
while not cease_continuous_run.is_set() and self.jobs:
# for debugging
# print("ccr_flag: {0}, no. of jobs: {1}".format(cease_continuous_run.is_set(), len(self.jobs)))
self.run_pending()
time.sleep(interval)
continuous_thread = ScheduleThread()
continuous_thread.start()
return cease_continuous_run
''' # example using this custom scheduler that can be run in a separate thread
your_schedule = ContinuousScheduler()
your_schedule.every().day.do(print)
# it returns a threading.Event when you start it.
halt_schedule_flag = your_schedule.run_continuously()
# you can now do whatever else you like here while that runs
# if your main script doesn't stop the background thread, it will keep running
# and the main script will have to wait forever for it
# if you want to stop it running, just set the flag using set()
halt_schedule_flag.set()
# I've added another way you can stop the schedule to the class above
# if all the jobs are gone it stops, and you can remove all jobs with clear()
your_schedule.clear()
# the third way to empty the schedule is by using Single Run Jobs only
# single run jobs return schedule.CancelJob
def job_that_executes_once():
# Do some work ...
print("I'm only going to run once!")
return schedule.CancelJob
# using a different schedule for this example to avoid some threading issues
another_schedule = ContinuousScheduler()
another_schedule.every(5).seconds.do(job_that_executes_once)
halt_schedule_flag = another_schedule.run_continuously()
'''
|
from django.urls import path
from .views import (
TaskListView,
TaskDetailView
)
urlpatterns = [
path('', TaskListView.as_view(), name='task-list'),
path('<pk>/', TaskDetailView.as_view(), name='task-detail'),
] |
import numpy as np
import tensorflow as tf
from functools import partial
class Actor(object):
def __init__(self, n_observation, n_action, name='actor_net'):
self.n_observation = n_observation
self.n_action = n_action
self.name = name
self.sess = None
self.build_model()
self.build_train()
def build_model(self):
activation = tf.nn.relu
kernel_initializer = tf.truncated_normal_initializer(stddev=0.0001)
kernel_regularizer = tf.contrib.layers.l2_regularizer(0.0)
default_dense = partial(tf.layers.dense, \
activation=activation, \
kernel_initializer=kernel_initializer, \
kernel_regularizer=kernel_regularizer)
with tf.variable_scope(self.name):
observation = tf.placeholder(tf.float32, shape=[None, self.n_observation])
hid1 = default_dense(observation, 128)
#hid1 = tf.layers.batch_normalization(hid1)
hid1 = tf.layers.dropout(hid1, 0.7)
hid2 = default_dense(hid1, 128)
#hid2 = tf.layers.batch_normalization(hid2)
hid2 = tf.layers.dropout(hid2, 0.7)
hid3 = default_dense(hid2, 64)
action = default_dense(hid3, self.n_action, activation=tf.nn.tanh, use_bias=False)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
self.observation, self.action, self.trainable_vars = observation, action, trainable_vars
def build_train(self, learning_rate=0.0001):
with tf.variable_scope(self.name) as scope:
action_grads = tf.placeholder(tf.float32, [None, self.n_action])
#with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
var_grads = tf.gradients(self.action, self.trainable_vars, -action_grads)
train_op = tf.train.AdamOptimizer(learning_rate).apply_gradients(zip(var_grads, self.trainable_vars))
self.action_grads, self.train_op = action_grads, train_op
def predict_action(self, obs_batch):
return self.action.eval(session=self.sess, feed_dict={self.observation: obs_batch})
def train(self, obs_batch, action_grads):
batch_size = len(action_grads)
self.train_op.run(session=self.sess,
feed_dict={self.observation: obs_batch, self.action_grads: action_grads / batch_size})
def set_session(self, sess):
self.sess = sess
def get_trainable_dict(self):
return {var.name[len(self.name):]: var for var in self.trainable_vars} |
import unittest
import json
from collections import OrderedDict
from nltk import pos_tag
from testResolvit import WordFrequencyAnalyzer
class Test(unittest.TestCase):
def setUp(self):
self.wfa = WordFrequencyAnalyzer()
def test_remove_stopwords(self):
result = self.wfa.removeStopwords(["be", "honest"])
self.assertEqual( result, ["honest"])
def test_remove_stopwords_From_Empty_List(self):
result = self.wfa.removeStopwords([])
self.assertEqual( result, [])
def test_remove_punctuation(self):
result = self.wfa.removePunct(["be?", "hones.t"])
self.assertEqual( result, ["be", "honest"])
def test_lemmatize_Text_From_IS_to_BE_And_Plural(self):
tagged = pos_tag( "This is so nice dogs".split() ) #After removing punctuation
result = self.wfa.lemmatizeText(tagged)
self.assertEqual( result, ["this", "be", "so", "nice", "dog"])
def test_lemmatize_Text_unique_words(self):
tagged = pos_tag( "This can be a test".split() ) #After removing punctuation
result = self.wfa.lemmatizeText(tagged)
self.assertEqual( result, ["this", "can", "be", "a", "test"])
def test_analyze_Text_And_Get_Stats_OK(self):
text = "Dogs are wonderful. My dog is the best."
resultList = [
{
"word": "best",
"total-occurances": 1,
"sentence-indexes": "[1]"
},
{
"word": "dog", #From Dogs and dog
"total-occurances": 2,
"sentence-indexes": "[0],[1]"
},
{
"word": "my",
"total-occurances": 1,
"sentence-indexes": "[1]"
},
{
"word": "wonderful",
"total-occurances": 1,
"sentence-indexes": "[0]"
}]
ordList = [OrderedDict(sorted(d.items(), key=lambda t: t[0], reverse=True)) for d in resultList]
jsonOut = {"results": ordList } # "The" word is removed
result = self.wfa.analyzeTextAndGetStats(text)
self.assertEqual(result, json.dumps(jsonOut, indent=4))
if __name__ == '__main__':
unittest.main()
|
#REST API:
from flask import Flask
app = Flask(__name__)#referance the file
@app.route("/patient_report/<string:id>/<int:du>")
def hello(id , du):
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import datetime
import pandas as pd
import numpy as np
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
from firebase_admin import storage
import pyrebase
from datetime import date, timedelta
import urllib.request, json
import time
#get_ipython().run_line_magic('matplotlib', 'inline')
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
import os
import csv
from IPython.display import display
from Model import trainData
import random
#from google.cloud import storage
from matplotlib.patches import Ellipse
import seaborn as sns
# signal processing
from scipy import signal
from scipy.ndimage import label
from scipy.stats import zscore
from scipy.interpolate import interp1d
from scipy.integrate import trapz
# misc
import warnings
#generate pdf
from reportlab.pdfgen import canvas
from reportlab.lib.colors import Color, lightblue, black, HexColor
# In[2]:
cred = credentials.Certificate("/Users/raghadaziz/Desktop/GP2/SereneReports/SereneReport/serene-firebase-adminsdk.json")
app = firebase_admin.initialize_app(cred , {
'storageBucket': 'serene-2dfd6.appspot.com',
}, name='[DEFAULT]')
db = firestore.client()
# In[3]:
duration = du
userID = id #"UqTdL3T7MteuQHBe1aNfSE9u0Na2"
# In[4]:
today = datetime.datetime.now()
timestamp = today.strftime("%Y-%m-%d %H:%M:%S")
bucket = storage.bucket(app=app)
# ## Get data from storage and get list of dates
# In[5]:
dates =[]
for x in range(0 ,duration):
today=date.today()
yesterday = today - datetime.timedelta(days=1)
start_date = (yesterday-timedelta(days=duration-x)).isoformat()
dates.append(start_date)
# In[6]:
df= pd.DataFrame()
# loop through the storage and get the data
sleep =[]
for x in range(0 ,len(dates)):
#Sleep
blob = bucket.blob(userID+"/fitbitData/"+dates[x]+"/"+dates[x]+"-sleep.json")
# download the file
u = blob.generate_signed_url(datetime.timedelta(seconds=300), method='GET')
try:
with urllib.request.urlopen(u) as url:
data = json.loads(url.read().decode())
sleepMinutes = data['summary']["totalMinutesAsleep"]
except:
pass
#Activity (Steps)
blob = bucket.blob(userID+"/fitbitData/"+dates[x]+"/"+dates[x]+"-activity.json")
# download the file
u = blob.generate_signed_url(datetime.timedelta(seconds=300), method='GET')
try:
with urllib.request.urlopen(u) as url:
data = json.loads(url.read().decode())
steps = data['summary']["steps"]
except:
pass
#heartrate
blob = bucket.blob(userID+"/fitbitData/"+dates[x]+"/"+dates[x]+"-heartrate.json")
u = blob.generate_signed_url(datetime.timedelta(seconds=300), method='GET')
try:
with urllib.request.urlopen(u) as url:
data = json.loads(url.read().decode())
df_heartrate = pd.DataFrame(data['activities-heart-intraday']['dataset'])
df_heartrate.time.apply(str)
df_heartrate['time'] = pd.to_datetime(df_heartrate['time'])
df_heartrate['hour'] = df_heartrate['time'].apply(lambda time: time.strftime('%H'))
df_heartrate.drop(['time'],axis=1, inplace = True)
heart_rate = df_heartrate.groupby(["hour"], as_index=False).mean()
heart_rate['sleepMin'] = sleepMinutes
heart_rate['TotalSteps'] = steps
heart_rate['date'] = dates[x]
heart_rate = heart_rate.astype({"hour": int})
except:
pass
# append dataframe
df = df.append(heart_rate, ignore_index = True)
# In[7]:
df
# ### Get user location
# In[8]:
# get location from database
loc_df = pd.DataFrame()
locID = []
locations = db.collection(u'PatientLocations').where(u'patientID', u'==', userID ).stream()
for location in locations:
loc = location.to_dict()
locID.append(location.id)
loc_df = loc_df.append(pd.DataFrame(loc,index=[0]),ignore_index=True)
loc_df['id'] = locID
# In[9]:
loc_df.drop(['anxietyLevel', 'lat','lng', 'patientID' ], axis=1, inplace = True)
# In[10]:
loc_df.time.apply(str)
loc_df['time'] = pd.to_datetime(loc_df['time'])
loc_df['date'] = pd.to_datetime(loc_df['time'], format='%Y:%M:%D').dt.date
loc_df['hour'] = loc_df['time'].apply(lambda time: time.strftime('%H'))
loc_df.drop(['time'], axis=1, inplace = True)
loc_df.hour = loc_df.hour.astype(int)
loc_df.date = loc_df.date.astype(str)
df.date = df.date.astype(str)
# In[11]:
dfinal = pd.merge(left=df,
right = loc_df,
how = 'left',
left_on=['hour','date'],
right_on=['hour','date']).ffill()
# ### Test data into model
# In[12]:
#test model
train_df = dfinal.rename(columns={'value': 'Heartrate'})
# In[13]:
Labeled_df = pd.DataFrame()
Labeled_df = trainData(train_df)
# In[14]:
Labeled_df.drop(['lon'],axis=1, inplace = True)
# In[15]:
# Replace missing values because it doesn't exist
Labeled_df['name'].fillna("Not given", inplace=True)
Labeled_df['id'].fillna("Not given", inplace=True)
# In[16]:
# Update firebase with the user anxiety level
for row in Labeled_df.itertuples():
if row.id != 'Not given':
if row.Label == 'Low' or row.Label == 'LowA':
anxietyLevel = 1
elif row.Label == 'Meduim':
anxietyLevel = 2
else:
anxietyLevel = 3
doc_ref = db.collection(u'PatientLocations').document(row.id)
doc_ref.update({
u'anxietyLevel':anxietyLevel
})
# ### Show the places with highest anxiety level
# In[17]:
# Show the highest level
df_high = pd.DataFrame()
df_high = Labeled_df[Labeled_df.Label == 'High']
# In[18]:
df_high.head(5)
# # Improvements
# # Recommendation
# In[19]:
docDf = pd.DataFrame()
doc_ref = db.collection(u'Patient').document(userID)
doc = doc_ref.get().to_dict()
docDf = docDf.append(pd.DataFrame(doc,index=[0]),ignore_index=True)
# In[20]:
age1 = docDf['age'].values
name1 = docDf['name'].values
emp1 = docDf['employmentStatus'].values
mar1 = docDf['maritalStatus'].values
income1 = docDf['monthlyIncome'].values
chronicD1 = docDf['chronicDiseases'].values
smoke1 = docDf['smokeCigarettes'].values
gad1 = docDf['GAD-7ScaleScore'].values
age = age1[0]
name = name1[0]
emp = emp1[0]
mar = mar1[0]
income = income1[0]
chronicD = chronicD1[0]
smoke = smoke1[0]
gad = gad1[0]
compareAge = int(age)
# In[21]:
sleepMin = Labeled_df['sleepMin'].mean()
totalSteps = Labeled_df['TotalSteps'].mean()
sleepRecomendation = False
stepsRecomendation = False
recomendedSteps = 'No recomendation'
if sleepMin < 360:
sleepRecomendation = True
if compareAge < 20 and compareAge > 11:
if totalSteps < 6000:
stepsRecomendation = True
recomendedSteps = '6000'
if compareAge < 66 and compareAge > 19:
if totalSteps < 3000:
stepsRecomendation = True
recomendedSteps = '3000'
sleepMin = sleepMin / 60
float("{:.2f}".format(sleepMin))
float("{:.2f}".format(totalSteps))
# In[22]:
# store recomendation in database
ID = random.randint(1500000,10000000)
doc_rec = db.collection(u'LastGeneratePatientReport').document(str(ID))
doc_rec.set({
u'steps': totalSteps,
u'patientID':userID,
u'sleepMin': sleepMin,
u'sleepRecomendation': sleepRecomendation,
u'stepsRecomendation': stepsRecomendation,
u'recommended_steps': recomendedSteps
})
# ## Storage intilization
# In[113]:
firebaseConfig = {
"apiKey": "AIzaSyBoxoXwFm9TuFysjQYag0GB1NEPyBINlTU",
"authDomain": "serene-2dfd6.firebaseapp.com",
"databaseURL": "https://serene-2dfd6.firebaseio.com",
"projectId": "serene-2dfd6",
"storageBucket": "serene-2dfd6.appspot.com",
"messagingSenderId": "461213981433",
"appId": "1:461213981433:web:62428e3664182b3e58e028",
"measurementId": "G-J66VP2Y3CR"
}
firebase = pyrebase.initialize_app(firebaseConfig)
storage = firebase.storage()
# # AL
# In[114]:
# Change Label values to num, to represent them in a barchart
nums=[]
for row in Labeled_df.itertuples():
if row.Label == 'Low' or row.Label == 'LowA':
nums.append(1)
elif row.Label == 'Meduim':
nums.append(2)
else:
nums.append(3)
Labeled_df['numLabel'] = nums
# In[115]:
# Get anxiety level by day and store it in a new data frame
plot_df = pd.DataFrame()
avgAnxiety = []
totalAnxiety = 0
rowCount = 1
for x in range(0 ,len(dates)):
for row in Labeled_df.itertuples():
if (row.date == dates[x]):
rowCount += 1
totalAnxiety += row.numLabel
avgAnxiety.append(totalAnxiety/rowCount)
plot_df['date'] = dates
plot_df['Anxiety'] = avgAnxiety
# ## To generate graphs for Android application
# In[116]:
#divide dataframe into 15 rows (2 weeks)
df1 = pd.DataFrame()
df2 = pd.DataFrame()
df3 = pd.DataFrame()
df4 = pd.DataFrame()
df5 = pd.DataFrame()
df6 = pd.DataFrame()
df7 = pd.DataFrame()
df8 = pd.DataFrame()
df9 = pd.DataFrame()
df10 = pd.DataFrame()
df11 = pd.DataFrame()
df12 = pd.DataFrame()
dfarray = []
count = 0
if(len(plot_df) > 15):
df1 = plot_df[:15]
df2 = plot_df[15:]
dfarray.append(df1)
dfarray.append(df2)
if(len(df2)>15):
count = (df2.last_valid_index() - (len(df2) - 15))
df3 = df2[count:]
dfarray.append(df3)
if(len(df3)>15):
count = (df3.last_valid_index() - (len(df3) - 15))
df4 = df3[count:]
dfarray.append(df4)
if(len(df4)>15):
count = (df4.last_valid_index() - (len(df4) - 15))
df5 = df4[count:]
dfarray.append(df5)
if(len(df5)>15):
count = (df5.last_valid_index() - (len(df5) - 15))
df6 = df5[count:]
dfarray.append(df6)
if(len(df6)>15):
count = (df6.last_valid_index() - (len(df6) - 15))
df7 = df6[count:]
dfarray.append(df7)
if(len(df7)>15):
count = (df7.last_valid_index() - (len(df7) - 15))
df8 = df7[count:]
dfarray.append(df8)
if(len(df8)>15):
count = (df8.last_valid_index() - (len(df8) - 15))
df9 = df8[count:]
dfarray.append(df9)
if(len(df9)>15):
count = (df9.last_valid_index() - (len(df9) - 15))
df10 = df9[count:]
dfarray.append(df10)
if(len(df10)>15):
count = (df10.last_valid_index() - (len(df10) - 15))
df11 = df10[count:]
dfarray.append(df11)
if(len(df11)>15):
count = (df11.last_valid_index() - (len(df11) - 15))
df12 = df11[count:]
dfarray.append(df12)
# In[117]:
# Plot AL
if(len(plot_df)<15):
fig, ax = plt.subplots()
# Draw the stem and circle
ax.stem(plot_df.date, plot_df.Anxiety, basefmt=' ')
plt.tick_params(axis='x', rotation=70)
# Start the graph at 0
ax.set_ylim(0, 3)
ax.set_title('Anxiety level (Throughout week)')
plt.xlabel('Date')
plt.ylabel('Low Meduim High', fontsize= 12)
ax.yaxis.set_label_coords(-0.1, 0.47)
(markers, stemlines, baseline) = plt.stem(plot_df.date, plot_df.Anxiety)
plt.setp(stemlines, linestyle="-", color="#4ba0d1", linewidth=2)
plt.setp(markers, marker='o', markersize=5, markeredgecolor="#4ba0d1", markeredgewidth=1)
plt.setp(baseline, linestyle="-", color="#4ba0d1", linewidth=0)
conv = str(x)
fig.savefig('AL.png', dpi = 100)
imagePath = 'AL.png'
storage.child(userID+"/lastGeneratedPatientReport/AL.png").put('AL.png')
os.remove('AL.png')
else:
for x in range(0,len(dfarray)):
fig, ax = plt.subplots()
# Draw the stem and circle
ax.stem(dfarray[x].date, dfarray[x].Anxiety, basefmt=' ')
plt.tick_params(axis='x', rotation=70)
# Start the graph at 0
ax.set_ylim(0, 3)
ax.set_title('Anxiety level (Throughout week)')
plt.xlabel('Date')
plt.ylabel('Low Meduim High', fontsize= 12)
ax.yaxis.set_label_coords(-0.1, 0.47)
(markers, stemlines, baseline) = plt.stem(dfarray[x].date, dfarray[x].Anxiety)
plt.setp(stemlines, linestyle="-", color="#4ba0d1", linewidth=2)
plt.setp(markers, marker='o', markersize=5, markeredgecolor="#4ba0d1", markeredgewidth=1)
plt.setp(baseline, linestyle="-", color="#4ba0d1", linewidth=0)
conv = str(x)
fig.savefig('ALP'+str(x)+'.png', dpi = 100)
imagePath = 'ALP'+str(x)+'.png'
storage.child(userID+"/lastGeneratedPatientReport/ALP"+str(x)+'.png').put('ALP'+str(x)+'.png')
os.remove('ALP'+str(x)+'.png')
# ## To generate graphs for PDF report
# In[108]:
df1 = pd.DataFrame()
df2 = pd.DataFrame()
dfarray = []
count = 0
if(len(plot_df) > 90):
df1 = plot_df[:90]
df2 = plot_df[90:]
dfarray.append(df1)
dfarray.append(df2)
# In[111]:
# Plot AL
if(len(plot_df)<=90):
fig, ax = plt.subplots()
# Draw the stem and circle
ax.stem(plot_df.date, plot_df.Anxiety, basefmt=' ')
plt.tick_params(axis='x', rotation=70)
# Start the graph at 0
ax.set_ylim(0, 3)
ax.set_title('Anxiety level (Throughout week)')
plt.xlabel('Date')
plt.ylabel('Low Meduim High', fontsize= 12)
ax.yaxis.set_label_coords(-0.1, 0.47)
(markers, stemlines, baseline) = plt.stem(plot_df.date, plot_df.Anxiety)
plt.setp(stemlines, linestyle="-", color="#4ba0d1", linewidth=2)
plt.setp(markers, marker='o', markersize=5, markeredgecolor="#4ba0d1", markeredgewidth=1)
plt.setp(baseline, linestyle="-", color="#4ba0d1", linewidth=0)
conv = str(x)
fig.savefig('ALpdf.png', dpi = 100)
else:
for x in range(0,len(dfarray)):
fig, ax = plt.subplots()
# Draw the stem and circle
ax.stem(dfarray[x].date, dfarray[x].Anxiety, basefmt=' ')
plt.tick_params(axis='x', rotation=70)
# Start the graph at 0
ax.set_ylim(0, 3)
ax.set_title('Anxiety level (Throughout week)')
plt.xlabel('Date')
plt.ylabel('Low Meduim High', fontsize= 12)
ax.yaxis.set_label_coords(-0.1, 0.47)
(markers, stemlines, baseline) = plt.stem(dfarray[x].date, dfarray[x].Anxiety)
plt.setp(stemlines, linestyle="-", color="#4ba0d1", linewidth=2)
plt.setp(markers, marker='o', markersize=5, markeredgecolor="#4ba0d1", markeredgewidth=1)
plt.setp(baseline, linestyle="-", color="#4ba0d1", linewidth=0)
fig.savefig('AL'+str(x)+'pdf.png', dpi = 100)
# # Location Analysis
# In[41]:
loc = pd.DataFrame()
loc = Labeled_df[Labeled_df.name != 'Not given']
# In[42]:
loc.drop(['Heartrate', 'sleepMin','TotalSteps', 'id' ], axis=1, inplace = True)
# In[43]:
names = []
Name =""
for row in loc.itertuples():
Name = row.name
names.append(Name)
# In[44]:
new_name =pd.DataFrame()
new_name ['name']= names
# In[45]:
new_name = new_name.drop_duplicates()
# In[46]:
new_name
# In[47]:
fnames = []
fName =""
for row in new_name.itertuples():
fName = row.name
fnames.append(fName)
# In[61]:
analysis = pd.DataFrame()
count = 0
i = 0
label = ""
locationName = ""
counts = []
labels = []
locationNames = []
for x in range(0,len(fnames)):
count = 0
locName = fnames[i]
for row in loc.itertuples():
if(locName == row.name):
if(row.Label=='High'):
count+=1
label = row.Label
locationName = row.name
i+=1
counts.append(count)
labels.append(label)
locationNames.append(locationName)
analysis ['Location'] = locationNames
analysis ['Frequency'] = counts
analysis ['Anxiety Level'] = labels
# In[62]:
analysis
# In[63]:
newA = analysis.drop(analysis[analysis['Frequency'] == 0].index, inplace= True)
# In[64]:
analysis
# In[65]:
import six
# In[66]:
def render_mpl_table(data, col_width=5.0, row_height=0.625, font_size=14,
header_color='#23495f', row_colors=['#e1eff7', 'w'], edge_color='#23495f',
bbox=[0, 0, 1, 1], header_columns=0,
ax=None, **kwargs):
if ax is None:
size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])
fig, ax = plt.subplots(figsize=size)
ax.axis('off')
mpl_table = ax.table(cellText=data.values, bbox=bbox, colLabels=data.columns, cellLoc='center' ,**kwargs)
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(font_size)
for k, cell in six.iteritems(mpl_table._cells):
cell.set_edgecolor(edge_color)
if k[0] == 0 or k[1] < header_columns:
cell.set_text_props(weight='bold', color='w')
cell.set_facecolor(header_color)
else:
cell.set_facecolor(row_colors[k[0]%len(row_colors) ])
cell.alignment = 'center'
fig.savefig('Location.png', dpi = 100)
return ax
# In[67]:
if(len(analysis) > 0):
render_mpl_table(analysis, header_columns=0, col_width=4)
# # Genertate patient report and save it in storage
# In[71]:
pdf = canvas.Canvas('Patient.pdf')
pdf.setTitle('Patient report')
#sleepRecomendation
#recomendedSteps
pdf.drawImage("serene .png", 150, 730, width=300,height=130, mask= 'auto')
pdf.setFillColor(HexColor('#e1eff7'))
pdf.roundRect(57,400, 485,200,4,fill=1, stroke= 0)
pdf.setFont("Helvetica-Bold", 16)
pdf.setFillColor(HexColor('#23495f'))
pdf.drawString(115,570, "Report Duration From: " + dates[0] +" To: "+ dates[len(dates)-1])
pdf.setFont("Helvetica-Bold", 15)
pdf.drawString(250,540, "Improvments: ")
pdf.drawString(200,500, "Highest day of anxiety level: ")
pdf.setFillColor(HexColor('#e1eff7'))
pdf.roundRect(57,160, 485,200,4,fill=1, stroke= 0)
pdf.setFont("Helvetica-Bold", 16)
pdf.setFillColor(HexColor('#23495f'))
pdf.drawString(130,330, "Recommendations: ")
pdf.drawString(150,300, "Sleep Recomendation: ")
pdf.drawString(150,260, "Steps Recomendation: ")
pdf.setFont("Helvetica", 16)
pdf.setFillColor(black)
if(sleepRecomendation == True):
pdf.drawString(180,280, "we reccomend you to sleep from 7-9 hours")
else:
pdf.drawString(180,280, "keep up the good work")
if(stepsRecomendation == True):
pdf.drawString(180,240, "we reccomend you to walk at least " + recomendedSteps)
else:
pdf.drawString(180,240, "keep up the good work")
pdf.showPage()
pdf.drawImage("serene .png", 150, 730, width=300,height=130, mask= 'auto')
pdf.setFont("Helvetica-Bold", 20)
pdf.setFillColor(HexColor('#808080'))
pdf.drawString(100,650, "Anxiety Level")
if(len(plot_df)<=90):
pdf.drawImage("ALpdf.png", 57, 400, width=485,height=200)
pdf.drawString(100,350, "Location Analysis")
if(len(analysis) > 0):
pdf.drawImage("Location.png", 57, 100, width=485,height=200)
else:
pdf.setFont("Helvetica", 15)
pdf.setFillColor(HexColor('#23495f'))
t = pdf.beginText(130,250)
text = [
name +" condition was stable through this period,",
"no locations with high anxiety level were detected." ]
for line in text:
t.textLine(line)
pdf.drawText(t)
pdf.showPage()
else:
j = 400
for x in range(0,len(dfarray)):
pdf.drawImage('AL'+str(x)+'pdf.png', 57, j, width=485,height=200)
j = j-300
pdf.showPage()
pdf.drawImage("serene .png", 150, 730, width=300,height=130, mask= 'auto')
pdf.setFont("Helvetica-Bold", 20)
pdf.setFillColor(HexColor('#808080'))
pdf.drawString(100,650, "Location Analysis")
if(len(analysis) > 0):
pdf.drawImage("Location.png", 57, 400, width=485,height=200)
else:
pdf.setFont("Helvetica", 15)
pdf.setFillColor(HexColor('#23495f'))
t = pdf.beginText(130,550)
text = [
name +" condition was stable through this period,",
"no locations with high anxiety level were detected." ]
for line in text:
t.textLine(line)
pdf.drawText(t)
pdf.save()
# In[ ]:
#new method
doct = storage.child(userID+"/lastGeneratedPatientReport/patientReport").put('Patient.pdf')
# In[73]:
os.remove('Patient.pdf')
if(len(plot_df)<=90):
os.remove('ALpdf.png')
else:
for x in range(0,len(dfarray)):
os.remove('AL'+str(x)+'pdf.png')
# In[ ]:
return "HI"
if __name__ == "__main__":
app.run(debug=True)
|
import sys
import collections
from p4_hlir.main import HLIR
from function import *
def parseControlFLow():
#h = HLIR("./stateful.p4")
h = HLIR("./l2_switch.p4")
#h = HLIR("../../tutorials-master/SIGCOMM_2015/flowlet_switching/p4src/simple_router.p4")
#h = HLIR("../../tutorials-master/SIGCOMM_2015/source_routing/p4src/source_routing.p4")
h.build()
print "\n====start"
# start form p4_ingress_ptr
for table_node, parser_node_set in h.p4_ingress_ptr.items():
#sys.exit(0)
table_p = table_node # table_p is the current table node
hit_p = None
miss_p = None
table_list = [] # table sequence in dataPlane
hit_list = [] # hit_list has not been analysised
i = 0 # test_loop_tag
while table_p != None:
print "======%d loop======"%i
#print i, table_p
i = i+1
appendTableList(table_list, table_p)
''' # table node info.
print table_p.name + ".next_ info: ", table_p.next_
print "==control_flow_parent", table_p.control_flow_parent
print "==conditional_barrier", table_p.conditional_barrier
print '==dependencies_to', table_p.dependencies_to
print '==dependencies_for', table_p.dependencies_for
print '==base_default_next', table_p.base_default_next
'''
miss_p = None
if type(table_p.next_) == dict: # {"hit": **,"miss": **}
for hit_ness, hit_table_node in table_p.next_.items():
if hit_ness == 'hit':
if hit_table_node != None:
hit_list.append(hit_table_node)
else:
miss_p = hit_table_node
if miss_p != None:
table_p = miss_p
else:
table_p = None
else: # {actions: **, actions: **}
#print table_p.next_
for action_node, action_table_node in table_p.next_.items():
table_p = action_table_node
#print "abc", action_node, action_table_node
break
if (len(hit_list) > 0) and (table_p == None):
table_p = hit_list[0]
del hit_list[0]
else:
table_p = table_p
#print "hit_lis:", hit_list
print table_list
print "end===="
p4_field_type = '<class \'p4_hlir.hlir.p4_headers.p4_field\'>'
p4_signature_ref_type = '<class \'p4_hlir.hlir.p4_imperatives.p4_signature_ref\'>'
table_matchWidth_list = [] # list, used to describe the width of each table
table_actionWidth_list = [] # list, used to describe the width of total actions in each table, including parameter and action_bit
table_matchType_list = [] # list, used to describe the type of each table
table_action_matching_list = {} #dict, used to describe the matching relationship of table to actions
table_dep_list = [] # list, used to describe the table dependent to the front table represented by tableID
metadata_list = [] # list, used to describe the field/key should be included in the metadata
table_match_meta_list = [] # list, used to describe the field/key used by each table_match
table_action_meta_list = {} # dict, used to describe the field/key used by each table_action
# add switching_metadata to metadata_list
for header_instances_name, header_instances in h.p4_header_instances.items():
print header_instances_name
if header_instances.header_type.name == 'switching_metadata_t':
for field_p in header_instances.fields:
#print '\t', field_p.name, field_p.width
metadata_list.append(field_p)
# get table_list...
for table_p in table_list:
#print 'match_fields:', table_p.match_fields
#print table_p, table_p.conditional_barrier #table_p.dependencies_to, table_p.dependencies_for
match_width = 0
action_width = 0
match_type = ''
premitive_action_list = []
table_dep_id = 0
table_dep_hitness = ''
eachTable_match_meta_list = []
eachTable_action_meta_list = []
# add table dependence; just supporting "hit" & "miss" in this version
if table_p.conditional_barrier != None:
table_dep_hitness = table_p.conditional_barrier[1]
table_dep_id = findTableID(table_p.conditional_barrier[0].name, table_list)
#print "============table_dep_id:", table_dep_id
else:
table_dep_id = 0
table_dep_hitness = ''
table_dep_list.append((table_dep_hitness, table_dep_id))
# add match_width & match_type
for match_field_p in table_p.match_fields:
match_width += match_field_p[0].width
match_type = str(match_field_p[1])
appendMetadataList(metadata_list, match_field_p[0])
#print type(match_field_p[0]), match_field_p[0].name
table_matchWidth_list.append(match_width)
table_matchType_list.append(match_type)
# calculate table_match_meta_list
for match_field_p in table_p.match_fields:
match_field_startBit = locateField(metadata_list, match_field_p[0])
match_field_endBit = match_field_startBit + match_field_p[0].width
eachTable_match_meta_list.append((match_field_startBit, match_field_endBit))
# add action_width &action_table_matching list
for action_p in table_p.actions:
subAction_list = []
#print "1", action_p.name, action_p.signature, action_p.signature_widths
#action_width += action_p.signature_widths
for signature_width_p in action_p.signature_widths:
action_width += signature_width_p
#print "call_sequence:", action_p.call_sequence
#print "flat_call_sequence:", action_p.flat_call_sequence
eachSubAction_meta_list = []
for subAction in action_p.call_sequence:
#print subAction[0].name, subAction[1]
subAction_list.append(subAction)
#appendMetadataList(metadata_list, action_field_p)
para_meta_list = []
for action_field_p in subAction[1]:
if str(type(action_field_p)) == p4_field_type:
appendMetadataList(metadata_list, action_field_p)
action_field_startBit = locateField(metadata_list, action_field_p)
action_field_endBit = action_field_startBit + action_field_p.width
else:
action_field_startBit = 0
action_field_endBit = 0
para_meta_list.append((action_field_startBit, action_field_endBit))
eachSubAction_meta_list.append((subAction[0], para_meta_list))
'''
if subAction[1] == []:
print "2"
for parameter in subAction[1]:
if str(type(parameter)) == p4_field_type:
print parameter.width
print "3"
elif str(type(parameter)) == p4_signature_ref_type:
print '4', parameter.idx
'''
# each action refrence to 1bit in actionBit
action_width += 1
premitive_action_list.append(subAction_list)
eachTable_action_meta_list.append(eachSubAction_meta_list)
table_actionWidth_list.append(action_width)
table_action_matching_list[ str(table_p.name)] = premitive_action_list
table_match_meta_list.append(eachTable_match_meta_list)
table_action_meta_list[ str(table_p.name) ] = eachTable_action_meta_list
print 'table_matchWidth_list:\t', table_matchWidth_list
print 'table_actionWidth_list:\t', table_actionWidth_list
print 'table_matchType_list:\t', table_matchType_list
print 'table_action_matching_dict:\t', table_action_matching_list
print 'table_dep_list:\t', table_dep_list
print 'metadata_list:'
for field_p in metadata_list:
print '\t', field_p.name, field_p.instance, field_p.width
print 'table_match_meta_list:\t', table_match_meta_list
print 'table_action_matching_dict:\t', table_action_meta_list
metadata_list_pkt = []
for field_p in metadata_list:
if field_p.instance.header_type.name != 'switching_metadata_t':
metadata_list_pkt.append(field_p)
return metadata_list_pkt
#for action_name, action in h.p4_actions.items():
# print action.name+"============="
# print action.call_sequence
#print action.flat_call_sequence
#print action.signature
#print action.signature_widths
#print action.signature_flags
'''
for sigF_name, sigF in action.signature_flags.items():
#print sigF_name,sigF
for sigF_item_name, sigF_item in sigF.items():
#print sigF_item_name, sigF_item
if str(sigF_item_name) == "data_width":
#print action.signature_flags
#print sigF
print type(sigF_item), sigF_item
'''
'''
print a_name.name, type(a_name.name)
for c in a_name.match_fields:
print c[0].name, c[1], c[2]
print "=====actions====="
for d in a_name.actions:
print d.name
print "=====size====="
print a_name.min_size, a_name.max_size
print "=====next====="
print a_name.next_, type(a_name.next_)
if type(a_name.next_) == dict:
print "abc"
for hit_ness, e in a_name.next_.items():
if hit_ness == "miss":
f_miss = e
else:
f_hit = e
print f_miss.next_
print f_hit.next_
print "=====timeOut===="
if a_name.support_timeout == False:
print a_name.support_timeout
'''
# p4_egress_ptr is only a table node
#print h.p4_egress_ptr, type(h.p4_egress_ptr), h.p4_egress_ptr.next_
'''
for c in b_item:
print c.name
'''
#print h.p4_egress_ptr
#p4_tables
"""
for table_name, table in h.p4_tables.items():
print table_name, table.match_fields
"""
#p4_headers
'''
for header_name, header in h.p4_headers.items():
print header.name, type(header.length)
#print header.layout
print header.attributes
#for field, width in header.layout.items():
# print type(field), width
'''
#p4_header_instances
'''
for header_name, header in h.p4_header_instances.items():
print header.name + "===================================="
print header.virtual
#for field, width in header.header_type.layout.items():
# print type(field)
'''
'''
#p4_fields
for field_name, field in h.p4_fields.items():
print field.name, field.calculation
for item in field.calculation:
print item[0]
print item[1].name
print item[2].left, item[2].right, item[2].op
'''
#p4_field_lists
'''
for field_list_name, field_list in h.p4_field_lists.items():
print field_list.name
for field in field_list.fields:
print field.name, field.offset, field.width, field.calculation
for item in field.calculation:
for i in range(3):
print type(item[i])
#print item[1].output_width
'''
#p4_field_list_calculations
'''
for field_list_name, field_list in h.p4_field_list_calculations.items():
print field_list.name, field_list.input, field_list.output_width, field_list.algorithm
for a in field_list.input:
for b in a.fields:
print b
'''
#p4_parser_state
#print type(h.p4_parse_states)
'''
for parser_name, parser in h.p4_parse_states.items():
print parser.name
#call_sequence
#print parser.call_sequence
for se in parser.call_sequence:
print se
if len(se) == 3:
print str(se[0]) == "set"
print se[1].name, se[1].instance, se[1].offset
#branch_on
#print parser.branch_on, type(parser.branch_on)
for field in parser.branch_on:
print field.name
#branch_to
for key, dest in parser.branch_to.items():
print key, dest
#prev
#print parser.prev
for state in parser.prev:
print state.name
'''
#p4_action
'''
for action_name, action in h.p4_actions.items():
print action.name+"============="
for sig_name in action.signature:
print sig_name
print action.signature_widths
#print action.signature_flags
for sigF_name, sigF in action.signature_flags.items():
#print sigF_name,sigF
for sigF_item_name, sigF_item in sigF.items():
#print sigF_item_name, sigF_item
if str(sigF_item_name) == "data_width":
#print action.signature_flags
#print sigF
print type(sigF_item), sigF_item
#call_sequence
print action.call_sequence
for call_function in action.call_sequence:
for i in range(len(call_function)):
if i ==0:
print call_function[0].name, call_function[0].signature
else:
print call_function[i]
for item in call_function[1]:
print item,type(item)
#print "***************"
#print action.flat_call_sequence
'''
#p4_node
'''
for table_name, table in h.p4_nodes.items():
print table.name+"============="
#print table.next_
#match_fields
print table.control_flow_parent
print table.base_default_next
for match_field in table.match_fields:
for field in match_field:
print field
#print table.attached_counters
print "1"+table.control_flow_parent, table.conditional_barrier
print table.base_default_next
print table.dependencies_to
'''
'''
#p4_action_node
for action_node_name, action_node in h.p4_action_nodes.items():
print action_node.name
'''
#p4_conditional_node
for action_node_name, action_node in h.p4_conditional_nodes.items():
print action_node_name, action_node.name
'''
for action_node_name, action_node in h.p4_action_profiles.items():
print action_node.name
'''
#p4_counter
"""
for counter_name, counter in h.p4_counters.items():
print counter.name, counter.type, counter.min_width, counter.saturating
print counter.binding, counter.instance_count
"""
#p4_register
"""
for register_name, register in h.p4_registers.items():
print register.name+"=================="
print register.layout, register.width, register.instance_count
print register.binding
"""
#p4_parser_exception
"""
for parser_ex_name, parser_ex in h.p4_parser_exceptions.items():
print parser_ex
"""
"""
tuple: e.g.,
fruits = ("apple", "banana", "orange")
for i in range(len(fruits)):
print fruits[i]
list: e.g.,
fruits = ["apple","banana","orange"]
for fruit in fruits:
print fruit
dictionary: e.g.,
fruit_dict = {"apple":1, "banana":2, "orange":3}
for key in fruit_dict:
print fruit_dict[key]
"""
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from .forms import Formulario, FormularioCursos, TestForm
from django.core.files.storage import FileSystemStorage
from .models import Cursos,Usuarios, CursosDDAF, Cursos_Alumno, asistencia
from django.contrib.auth.models import User
from django.views.generic.edit import CreateView
def index(request):
bienvenida={'titulo': 'Bienvenido al Prototipo de Control y Gestión de la Piscina','intro': '¿Qué sección quieres ver?'}
return render(request, 'Main_DDAF/index.html', bienvenida)
def cursos(request):
bienvenida={'titulo':'Cursos registrados', 'intro':"Nombre de los cursos rendidos almenos una vez ",
'cursos':Cursos.objects.values('nombre').distinct(),'cursosddaf':CursosDDAF.objects.all()}
return render(request,'Main_DDAF/cursos.html', bienvenida)
def users(request):
info={'titulo':'Usuarios registrados en el sistema', 'intro':"Los siguientes usuarios están registrados en nuestra base de datos",
'user':Usuarios.objects.all(),'user_beau':Usuarios.objects.exclude(puntaje_psu__gt=740)} # se escribe como atributo__condicion = valor_buscado # esto es del tipo where usuarios.puntaje_psu>740
return render(request,'Main_DDAF/users.html', info)
def adduser(request):
form=Formulario()
info={'titulo':'Agregar Usuarios', 'intro':"Registrese, Entreguenos todos sus datos aqui",'form':form} # se escribe como atributo__condicion = valor_buscado # esto es del tipo where usuarios.puntaje_psu>740
return render(request,'Main_DDAF/adduser.html', info)
def added(request):
new_user = User.objects.create_user(username = request.POST['username'], password = request.POST['password']);
new_user.save()
usuario = Usuarios(nombre = request.POST['nombre'],apellido = request.POST['apellido'],
direccion = request.POST['direccion'], rut = request.POST['rut'], puntaje_psu = request.POST['puntaje_psu'],
user = new_user);
usuario.save()
context={'Titulo':'Felicidades', 'comentario':'Te haz registrado bien!'}
return render(request, 'Main_DDAF/error.html', context)
def reclamos(request):
context={'texto2':'Aquí deberian ir los reclamos','texto1':'Reclamos'}
return render(request, 'Main_DDAF/reclamos.html',context)
def indicadores(request):
context={'texto2':'Aquí deberian ir los indicadores','texto1':'Indicadores'}
return render(request, 'Main_DDAF/indicadores.html',context)
'''CURSOS'''
def addcurso(request):
form=FormularioCursos()
context={'texto2':'Aquí deberian ir los campos del formulario','texto1':'Agregar curso','form':form}
return render(request, 'Main_DDAF/addcurso.html',context)
def addedcurso(request):
context={'Titulo':request.POST['nombre'], 'comentario':request.POST['tipo'],'comentario2':request.POST['horario']}
curso = CursosDDAF(nombre=request.POST['nombre'],tipo=request.POST['tipo'],horario=request.POST['horario'],profesor=request.POST['profesor'],
pista=request.POST['pista'],días=request.POST['dias'],capacidad=int(request.POST['pista'])*10)
curso.save()
return render(request, 'Main_DDAF/addedcurso.html', context)
def inscribircurso(request,pk):
#guardo el user ingresado en el login
usuario=request.session['user']
#obtengo el id asociado a ese user del modelo User de Django
usuarioobj=User.objects.values_list('id', flat=True).get(username=usuario)
#Genero el objeto CursoDDAF asociado al id ingresado
t=CursosDDAF.objects.get(id=pk)
#Actualizo la capacidad del curso
if int(t.capacidad)>0:
a=int(t.capacidad)-1
t.capacidad=a
print(usuarioobj)
if Cursos_Alumno.objects.filter(glosa_alumno=usuario, glosa_curso=t.nombre):
context={'texto3':"Ya estás inscrite en este curso, intenta otro.",'texto1':request.session['user']}
return render(request,'Main_DDAF/curso_inscrito.html',context)
else:
print("no existe")
#Aquí se genera el objeto "Cursos_alumnos" definido en el modelo con 5 columnas [id,id_curso,glosa_curso,id_alumno,glosa_alumno]
curso_alumno=Cursos_Alumno(glosa_curso=t.nombre, glosa_alumno=usuario, id_curso=t.id, id_alumno=usuarioobj)
curso_alumno.save()
t.save()
context={'texto4':'En el horario:'+t.horario,'texto2':'Curso inscrito:'+t.nombre,'texto1':request.session['user']}
return render(request,'Main_DDAF/curso_inscrito.html',context)
else:
context={'texto1':'No hay cupo para el curso seleccionado','texto2':'Por favor elegir otro curso.'}
return render(request,'Main_DDAF/lleno.html',context)
def ver_alumnos_curso(request,pk):
usuario=request.session['user']
t=CursosDDAF.objects.get(id=pk)
a=t.nombre
print(a)
print(Cursos_Alumno.objects.values_list('glosa_alumno', flat=True).filter(glosa_alumno=usuario))
info={'titulo':'Usuarios registrados en el sistema', 'intro':"Los siguientes usuarios están registrados en nuestra base de datos",
'user':Cursos_Alumno.objects.filter(glosa_curso=a).distinct()} # se escribe como atributo__condicion = valor_buscado # esto es del tipo where usuarios.puntaje_psu>740
return render(request,'Main_DDAF/users_curso.html', info)
def asistencias(request,pk):
print(pk+"la ide del curso es:",pk)
form=TestForm()
t=CursosDDAF.objects.get(id=pk)
a=t.nombre
print(a)
pkkk=(('pepito','pepito'),('juanito','juanito'),)
#print(request.GET())
texto1=TestForm.hola(pkkk)
print(Cursos_Alumno.objects.values_list('glosa_alumno', flat=True).filter(id_curso=pk))
context={'texto2':'Aquí deberian ir las asistencias','texto1':texto1,'form':form,'user':Cursos_Alumno.objects.filter(glosa_curso=a).distinct()}
return render(request, 'Main_DDAF/asistencias.html',context)
def add_asistencia(request):
return render(request, 'Main_DDAF/asistencias.html')
class MyCreateView(CreateView):
model = asistencia
form_class = TestForm
template_name = 'Main_DDAF/asistencias.html'
def get_form_kwargs(self):
kwargs = super( MyCreateView, self).get_form_kwargs()
# update the kwargs for the form init method with yours
kwargs.update(self.kwargs) # self.kwargs contains all url conf params
return kwargs
|
import os
import threading
import json
from core import core
from flask import Flask, render_template, request, redirect, url_for
from flask_restful import Resource, Api
bn_config = {
'action' : 'config',
'name' : 'Config',
'class' : 'btn btn-outline-secondary'
}
bn_start = {
'action' : 'start',
'name' : 'Start',
'class' : 'btn btn-outline-secondary'
}
bn_restart = {
'action' : 'restart',
'name' : 'Restart',
'class' : 'btn btn-outline-secondary'
}
bn_stop = {
'action' : 'stop',
'name' : 'Stop',
'class' : 'btn btn-outline-secondary'
}
##### FLASK API #####
api = Flask(__name__)
@api.route('/')
def index():
return render_template('index.html')
@api.route('/debug')
def get_debug():
return render_template('debug.html', threads=[thread.name for thread in threading.enumerate()])
@api.route('/runners', methods=['GET','POST'])
def get_runners():
if request.method == 'POST':
retVal = dict(request.form)
if retVal['action'] == 'create':
extParameters = {key.split("_", 1)[1]: value for key, value in retVal.items() if key.startswith('parameters_')}
targets = [key.split("_", 1)[1] for key in retVal if key.startswith('targets_')]
print(str(retVal) + '||' + str(extParameters) + '||' + str(targets))
myCore.addRunner(runnerID=retVal['runnerID'],
module=retVal['module'],
runner=retVal['runner'],
interval = int(retVal['interval']),
targets = targets,
extParameters = extParameters
)
elif retVal['action'] == 'delete':
myCore.removeApp(retVal['runnerID'])
elif retVal['action'] == 'start':
myCore.startApp(retVal['runnerID'])
elif retVal['action'] == 'stop':
myCore.stopApp(retVal['runnerID'])
elif retVal['action'] == 'restart':
myCore.stopApp(retVal['runnerID'])
myCore.startApp(retVal['runnerID'])
else:
return 'Application Error: unknown action'
retVal = myCore.getStatus()
for key, value in retVal.items():
retVal[key]['actions'] = [bn_stop if value['status'] else bn_start, bn_restart]
return render_template('runners.html', runners=myCore.config.apps, modules=myCore.config.modules)
@api.route('/modules', methods=['GET','POST'])
def get_modules():
if request.method == 'POST':
retVal = dict(request.form)
if retVal['action'] == 'create':
myCore.addModule(moduleID=retVal['moduleID'])
return redirect(url_for('get_module', moduleID=retVal['moduleID']))
elif retVal['action'] == 'delete':
myCore.removeModule(moduleID=retVal['moduleID'])
return render_template('modules.html', modules=myCore.config.modules)
@api.route('/module/<moduleID>', methods=['GET','POST'])
def get_module(moduleID):
content = ''
if request.method == 'POST':
content = dict(request.form)['content']
myCore.updateModule(moduleID=moduleID, content=content)
return redirect(url_for('get_modules'))
content = myCore.getModuleContent(moduleID)
return render_template('module.html', moduleID=moduleID, moduleContent=content)
@api.route('/integration', methods=['GET','POST'])
def get_integration():
if request.method == 'POST':
retVal = dict(request.form)
myCore.integration.execute(**retVal)
return render_template(
'integration.html',
dxl=myCore.integration.execute('dxl', 'getCurrentConfig'),
esm=myCore.integration.execute('esm', 'getCurrentConfig')
)
##### END FLASK #####
myCore = core()
appRunning = {}
def main():
api.jinja_env.auto_reload = True
api.config["TEMPLATES_AUTO_RELOAD"] = True
api.run(host='0.0.0.0')
if __name__ == "__main__":
main()
|
import simplejson as json
from index_preprocess import *
from features import *
with open('xda_posts.json') as f:
posts = json.load(f)
posts = sorted(posts, key=lambda k: len(k['thanks']), reverse=True)
top_posts = posts[0:250]
with open('xda_threads.json') as f:
thread_lookup = build_thread_lookup(json.load(f))
index_preprocess(top_posts, thread_lookup)
compute_features(top_posts, thread_lookup, posts)
top_posts_minus_op = []
for post in top_posts:
if post['thread_position'] > 2 or not post['author_is_op']:
top_posts_minus_op.append(post)
if len(top_posts_minus_op) == 100:
break
with open('xda_posts_top_100_thanked.json', 'w') as f:
json.dump(top_posts_minus_op, f, indent=2) |
from semmatch.data.fields.field import Field
from semmatch.data.fields.label_field import LabelField
from semmatch.data.fields.text_filed import TextField
from semmatch.data.fields.index_field import IndexField
from semmatch.data.fields.numerical_field import NumericalField
from semmatch.data.fields.vector_fields import VectorField
from semmatch.data.fields.multilabel_field import MultiLabelField |
#!/usr/bin/env python3
import argparse
from util.base_util import *
from util.file_util import *
BUILDBOT_CONFIG = [path.join('testing', 'buildbot', 'chromium.gpu.json'),
path.join('testing', 'buildbot', 'chromium.gpu.fyi.json'),
path.join('testing', 'buildbot', 'chromium.dawn.json')]
def parse_arguments():
parser = argparse.ArgumentParser(
description='Check tryjob configuration',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--src-dir', '--dir', '-d', default='.',
help='Chromium source directory.\n\n')
parser.add_argument('--print-job', '-j', action='store_true',
help='Print the details of each job.\n\n')
parser.add_argument('--print-task', '-t', action='store_true',
help='Print the details of each task.\n\n')
parser.add_argument('--email', '-e', action='store_true',
help='Send the error by email.\n\n')
args = parser.parse_args()
config = load_tryjob_config()
args.name_to_type = {}
for test_name, test_type, _, _ in config['tryjob']:
args.name_to_type[test_name] = test_type
args.shards = config['shards']
args.test_args = config['test_args']
args.browser_args = config['browser_args']
args.receiver = config['email']['receiver']['admin']
args.src_dir = path.abspath(args.src_dir)
if path.exists(path.join(args.src_dir, 'src')):
args.src_dir = path.join(args.src_dir, 'src')
return args
class Task(object):
def __init__(self, name):
self.name = name
self.shards = None
self.test_args = []
self.browser_args = []
def __eq__(self, other):
return ((self.name, self.shards, self.test_args, self.browser_args) ==
(other.name, other.shards, other.test_args, other.browser_args))
def __str__(self):
ret = ' ===== shards: %d =====\n' % self.shards if self.shards else ''
for arg in self.test_args:
ret += ' %s\n' % arg
for arg in self.browser_args:
ret += ' %s\n' % arg
return ret
class TryJob(object):
def __init__(self, name, platform, gpu):
self.name = name
self.platform = platform
self.gpu = gpu
self.tasks = []
def __str__(self):
ret = ' %s %s\n' % (self.platform, self.gpu)
for task in self.tasks:
ret += ' %s\n' % task.name
return ret
def find_gtest_tests(items):
for item in items:
name = item['name'] if 'name' in item else item['test']
if 'spvc' in name:
continue
if (name in ['gl_tests', 'vulkan_tests'] or
match_any(['angle_end2end', 'dawn_end2end'], lambda x: name.startswith(x))):
task = Task(name)
task.test_args = item['args']
if 'swarming' in item and 'shards' in item['swarming']:
task.shards = item['swarming']['shards']
yield task
def find_isolated_scripts(items):
for item in items:
name = item['name']
if match_any(['webgl', 'webgpu', 'angle_perf', 'dawn_perf', 'trace', 'info'], lambda x: name.startswith(x)):
task = Task(name.replace('perftests', 'perf_tests'))
skip_next = False
for arg in item['args']:
if skip_next:
skip_next = False
elif arg.startswith('--extra-browser-args='):
for browser_arg in arg[len('--extra-browser-args='):].split(' '):
if not match_any(['--enable-logging', '--js-flags'], lambda x: browser_arg.startswith(x)):
task.browser_args.append(browser_arg)
elif arg.startswith('--additional-driver-flag='):
task.browser_args.append(arg[len('--additional-driver-flag='):])
elif not match_any(['--browser=', '--target=', '--gtest-benchmark-name'], lambda x: arg.startswith(x)):
if arg.startswith('--expected-') and arg.endswith('-id'):
skip_next = True
if arg.startswith('--read-abbreviated-json-results-from='):
arg = arg[:len('--read-abbreviated-json-results-from=')]
task.test_args.append(arg)
if 'swarming' in item and 'shards' in item['swarming']:
task.shards = item['swarming']['shards']
yield task
def find_tryjob(config_file):
for name, value in read_json(config_file).items():
name = name.lower().replace(' ', '-')
if (not 'intel' in name or
match_any(['mac', 'x86', 'ozone', 'deqp', 'angle', 'skia'], lambda x: x in name)):
continue
match = re_match(r'^(.*)-\((.*)\)$', name)
name, gpu = match.group(1), match.group(2)
if 'linux' in name:
platform = 'linux'
elif 'win10' in name:
platform = 'win'
name = name.replace('linux-', '').replace('win10-', '').replace('x64-', '')
name = name.replace('experimental', 'exp').replace('release', 'rel')
tryjob = TryJob(name, platform, gpu)
if 'gtest_tests' in value:
for task in find_gtest_tests(value['gtest_tests']):
tryjob.tasks.append(task)
if 'isolated_scripts' in value:
for task in find_isolated_scripts(value['isolated_scripts']):
tryjob.tasks.append(task)
if tryjob.tasks:
yield tryjob
def main():
args = parse_arguments()
def handle_error(error):
print(error)
if args.email:
send_email(args.receiver, error)
total_jobs, total_tasks = defaultdict(list), defaultdict(dict)
for config_file in BUILDBOT_CONFIG:
for tryjob in find_tryjob(path.join(args.src_dir, config_file)):
total_jobs[tryjob.name].append(tryjob)
for task in tryjob.tasks:
total_tasks[task.name][tryjob.platform] = task
if args.print_job:
for name, tryjobs in total_jobs.items():
print(name)
for tryjob in tryjobs:
print(tryjob)
if args.print_task:
for name, tasks in total_tasks.items():
print(name)
if 'win' in tasks and 'linux' in tasks and tasks['win'] != tasks['linux']:
print(' [Win]\n%s' % tasks['win'])
print(' [Linux]\n%s' % tasks['linux'])
elif 'win' in tasks:
print(tasks['win'])
elif 'linux' in tasks:
print(tasks['linux'])
for name, tasks in total_tasks.items():
if not name in args.name_to_type:
handle_error('Missing test: ' + name)
continue
module, backend = args.name_to_type[name]
test_keys = [module] + backend.split('_')
test_keys = ['_'.join(test_keys[0:i]) for i in range(1, len(test_keys)+1)]
for platform, task in tasks.items():
if task.shards:
key = find_match(test_keys, lambda x: x in args.shards)
if not key or task.shards != args.shards[key]:
handle_error('Shard number mismatch: %s on %s' % (task.name, platform))
test_args, browser_args = [], []
for key in test_keys + ['%s_%s' % (x, platform) for x in test_keys]:
test_args += args.test_args.get(key, [])
browser_args += args.browser_args.get(key, [])
if not set(task.test_args) <= set(test_args):
handle_error('Test argument mismatch: %s on %s' % (task.name, platform))
if not set(task.browser_args) <= set(browser_args):
handle_error('Browser argument mismatch: %s on %s' % (task.name, platform))
if __name__ == '__main__':
sys.exit(main())
|
class Customer:
def __init__(self, name, fund):
self.name = name
self.fund = fund
self.bike = None
class Bicycle:
def __init__(self, model, weight, cost): #Method to sell bicycles with a margin over the cost
self.model = model
self.weight = weight
self.cost = cost
def __repr__(self):
template = "The {0} | Cost: ${1}, Weight: {2}lb" #formats and list of bike, cost and weight
return template.format(self.model, self.cost, self.weight)
class BikeShop:
def __init__(self, name, margin, bikes):
self.name = name
self.margin = margin
self.profit = 0
self.inventory = {}
for bike in bikes:
bike.markup = int((bike.cost / 100.0) * self.margin)
bike.price = bike.cost + bike.markup
self.inventory[bike.model] = bike
def __repr__(self):
template = "\n{0} (${1} profit)\n{2}\n"
bikes = "\n".join( str(bike) for bike in self.inventory.values() )
return template.format(self.name, self.profit, bikes)
def filter(self, budget): #method used to show each customers budget for bike purchase
bikes = self.inventory.values()
return [ bike for bike in bikes if bike.price <= budget ]
def sell(self, bike, customer): #Method used to show margin of profit from sell of bikes
customer.bike = bike
customer.fund -= bike.price
self.profit += bike.markup
del self.inventory[bike.model] |
import sublime
import sys
import io
from datetime import datetime as dt
from unittest import TestCase
from unittest.mock import Mock, patch
from code.SublimePlugin import codeTime
codeTime1 = sys.modules["SE_Fall20_Project-1.code.SublimePlugin.codeTime"]
class TestFunctions(TestCase):
@patch('time.time', return_value=100)
def test_when_activated(self, mock_time):
view = Mock()
view.filename.return_value = "sample.txt"
# datetime = Mock()
codeTime1.when_activated(view)
view.window.assert_called_once()
def test_when_deactivated(self):
view = Mock()
view.file_name.return_value = "sample.txt"
curr_date = dt.now().strftime('%Y-%m-%d')
codeTime1.file_times_dict[curr_date] = {'sample.txt': ["1234", None]}
view.assert_called_once()
|
def celsius_2_fahrenhit(celsius):
if celsius < -273.15:
return "the lowest possibletemperature that physicalmatter can reach is -273.15C not allowed less than"
else:
fahrenhit = celsius * (9/5) + 32
return fahrenhit
#print(celsius_2_fahrenhit(-32433))
temprature_list = [10,-20,-289,100]
for i in temprature_list:
with open('answers.txt', 'a+') as file:
file.write(str(celsius_2_fahrenhit(i)) + '\n')
def string_length(str):
if type(str) == int:
return "Sorry integers don't have length"
elif type(str) == float:
return "Sorry floats don't have length"
return len(str)
name = input('what is your name: ')
print(string_length(name))
|
from Heap import MinHeap
def k_heap_sort(conjunto, k):
""" Dado un conjunto y un indice k, devuelve el k elemento mas chico.
Si k es mas grande que el tamanio del conjunto devuelve None.
"""
heap = MinHeap()
heap.heapify(conjunto)
for i in xrange(k - 1):
heap.sacar_primero()
return heap.sacar_primero()
|
import matplotlib.pyplot as plt
trainFileStr = "D:/Research/Dataset/checkin/user_checkin_above_10x10x5_us_train - Copy.txt"
testFileStr = "D:/Research/Dataset/checkin/user_checkin_above_10x10x5_us_test - Copy.txt"
counts = []
trains = []
trainSets = []
docTrainLens = []
tests = []
testSets = []
docTestLens = []
a = []
with open(trainFileStr, "r") as trainFile:
for line in trainFile:
doc = list(map(int, line.rstrip().split(" ")))
docTrainLens.append(len(doc))
trains.append(doc)
places = set(doc)
trainSets.append(places)
for place in places:
a.append(doc.count(place))
with open(testFileStr, "r") as testFile:
for line in testFile:
doc = list(map(int, line.rstrip().split(" ")))
docTestLens.append(len(doc))
places = set(doc)
testSets.append(places)
tests.append(doc)
full_match_count = 0
match_places_len = 0
test_len = 0
for i in range(0, len(trainSets)):
oneTimePlaces = []
for word in trainSets[i]:
if trains[i].count(word) == 1:
oneTimePlaces.append(word)
# match_places = set(oneTimePlaces) & testSets[i]
match_places = trainSets[i] & testSets[i]
match_places_len += len(match_places)
test_len += len(testSets[i])
if len(match_places) == len(testSets[i]):
full_match_count += 1
counts.append(len(match_places) / len(testSets[i]))
print(docTrainLens[i], end="\t")
print(len(trainSets[i]), end="\t")
print(len(oneTimePlaces), end="\t")
print(docTestLens[i], end="\t")
print(len(testSets[i]), end="\t")
print(len(match_places), end="\t")
print(len(set(oneTimePlaces) & testSets[i]))
# print(full_match_count)
# print(match_places_len)
# print(test_len)
# print(a)
n, bins, patches = plt.hist(a, 100, density=False, facecolor='g', alpha=0.75)
# plt.axis([0, 1, 0, 10000])
plt.grid(True)
# plt.show()
|
from docassemble.base.functions import define, defined, value, comma_and_list, word, comma_list, DANav, url_action, showifdef
from docassemble.base.util import Address, Individual, DAEmpty, DAList, Thing, DAObject, Person
from docassemble.assemblylinewizard.interview_generator import map_names
class AddressList(DAList):
"""Store a list of Address objects"""
def init(self, *pargs, **kwargs):
super(AddressList, self).init(*pargs, **kwargs)
self.object_type = Address
def __str__(self):
return comma_and_list([item.on_one_line() for item in self])
class VCBusiness(Person):
"""A legal entity, like a school, that is not an individual. Has a .name.text attribute that must be defined to reduce to text."""
pass
class VCBusinessList(DAList):
"""Store a list of VCBusinesses. Includes method .names_and_addresses_on_one_line to reduce to a semicolon-separated list"""
def init(self, *pargs, **kwargs):
super(VCBusinessList, self).init(*pargs, **kwargs)
self.object_type = VCBusiness
def names_and_addresses_on_one_line(self, comma_string='; '):
"""Returns the name of each business followed by their address, separated by a semicolon"""
return comma_and_list( [str(person) + ", " + person.address.on_one_line() for person in self], comma_string=comma_string)
class PeopleList(DAList):
"""Used to represent a list of people. E.g., defendants, plaintiffs, children"""
def init(self, *pargs, **kwargs):
super(PeopleList, self).init(*pargs, **kwargs)
self.object_type = VCIndividual
def names_and_addresses_on_one_line(self, comma_string='; '):
"""Returns the name of each person followed by their address, separated by a semicolon"""
return comma_and_list([str(person) + ', ' + person.address.on_one_line() for person in self], comma_string=comma_string)
def familiar(self):
return comma_and_list([person.name.familiar() for person in self])
def familiar_or(self):
return comma_and_list([person.name.familiar() for person in self],and_string=word("ord"))
class UniquePeopleList(PeopleList):
pass
class VCIndividual(Individual):
"""Used to represent an Individual on the assembly line/virtual court project.
Two custom attributes are objects and so we need to initialize: `previous_addresses`
and `other_addresses`
"""
def init(self, *pargs, **kwargs):
super(VCIndividual, self).init(*pargs, **kwargs)
# Initialize the attributes that are themselves objects. Requirement to work with Docassemble
# See: https://docassemble.org/docs/objects.html#ownclassattributes
if not hasattr(self, 'previous_addresses'):
self.initializeAttribute('previous_addresses', AddressList)
if not hasattr(self, 'other_addresses'):
self.initializeAttribute('other_addresses', AddressList)
def phone_numbers(self):
nums = []
if hasattr(self, 'mobile_number') and self.mobile_number:
nums.append(self.mobile_number + ' (cell)')
if hasattr(self, 'phone_number') and self.phone_number:
nums.append(self.phone_number + ' (other)')
return comma_list(nums)
def merge_letters(self, new_letters):
"""If the Individual has a child_letters attribute, add the new letters to the existing list"""
if hasattr(self, 'child_letters'):
self.child_letters = filter_letters([new_letters, self.child_letters])
else:
self.child_letters = filter_letters(new_letters)
# TODO: create a class for OtherCases we list on page 1. Is this related
# to the other care/custody proceedings?
class OtherCase(Thing):
pass
class OtherProceeding(DAObject):
"""Currently used to represents a care and custody proceeding."""
def init(self, *pargs, **kwargs):
super(OtherProceeding, self).init(*pargs, **kwargs)
if not hasattr(self, 'children'):
self.initializeAttribute('children', PeopleList)
if not hasattr(self, 'attorneys'):
self.initializeAttribute('attorneys', PeopleList)
if not hasattr(self, 'attorneys_for_children'):
self.initializeAttribute('attorneys_for_children', PeopleList)
if not hasattr(self, 'other_parties'):
self.initializeAttribute('other_parties', PeopleList)
if not hasattr(self, 'gals'):
self.initializeAttribute('gals', GALList.using(ask_number=True))
# We use a property decorator because Docassemble expects this to be an attribute, not a method
@property
def complete_proceeding(self):
"""Tells docassemble the list item has been gathered when the variables named below are defined."""
# self.user_role # Not asked for adoption cases
self.case_status
self.children.gathered
self.other_parties.gather()
if self.is_open:
self.atty_for_user
if self.atty_for_children:
if len(self.children) > 1:
self.attorneys_for_children.gather()
if self.has_gal:
self.gals.gather()
else:
self.gals.auto_gather=True
self.gals.gathered=True
return True
# We're going to gather this per-attorney instead of
# per-case now
#if self.case_status == 'pending':
# self.attorneys.gather()
def child_letters(self):
"""Return ABC if children lettered A,B,C are part of this case"""
return ''.join([child.letter for child in self.children if child.letter])
def status(self):
"""Should return the status of the case, suitable to fit on Section 7 of the affidavit disclosing care or custody"""
if self.case_status in ['adoption',"adoption-pending", "adoption-closed"]:
return 'Adoption'
elif hasattr(self, 'custody_awarded') and self.custody_awarded:
return "Custody awarded to " + self.person_given_custody + ", " + self.date_of_custody.format("yyyy-MM-dd")
elif not self.is_open:
return "Closed"
elif self.is_open:
return 'Pending'
else:
return self.case_status.title()
def role(self):
"""Return the letter representing user's role in the case. If it's an adoption case, don't return a role."""
if self.case_status == 'adoption':
return ''
return self.user_role
def case_description(self):
"""Returns a short description of the other case or proceeding meant to display to identify it
during list gathering in the course of the interview"""
description = ""
description += self.case_status.title() + " case in "
description += self.court_name
if hasattr(self, 'docket_number') and len(self.docket_number.strip()):
description += ', case number: ' + self.docket_number
description += " (" + str(self.children) + ")"
return description
def role(self):
"""Return the letter representing user's role in the case. If it's an adoption case, don't return a role."""
if self.case_status == 'adoption':
return ''
return self.user_role
def __str__(self):
return self.case_description()
class OtherProceedingList(DAList):
"""Represents a list of care and custody proceedings"""
def init(self, *pargs, **kwargs):
super(OtherProceedingList, self).init(*pargs, **kwargs)
self.object_type = OtherProceeding
self.complete_attribute = 'complete_proceeding' # triggers the complete_proceeding method of an OtherProceeding
def includes_adoption(self):
"""Returns true if any of the listed proceedings was an adoption proceeding."""
for case in self.elements:
if case.case_status == 'adoption':
return True
def get_gals(self, intrinsic_name):
GALs = GALList(intrinsic_name, auto_gather=False,gathered=True)
for case in self:
if case.has_gal:
for gal in case.gals:
if gal.represented_all_children:
gal.represented_children = case.children
GALs.append(gal, set_instance_name=True)
return GALs
class GAL(VCIndividual):
"""This object has a helper for printing itself in PDF, as well as a way to merge attributes for duplicates"""
def status(self):
return str(self) + ' (' + comma_and_list(self.represented_children) + ')'
def is_match(self, new_gal):
return str(self) == str(new_gal)
def merge(self, new_gal):
self.represented_children = PeopleList(elements=self.represented_children.union(new_gal.represented_children))
class GALList(PeopleList):
"""For storing a list of Guardians ad Litem in Affidavit of Care and Custody"""
def init(self, *pargs, **kwargs):
super(GALList, self).init(*pargs, **kwargs)
self.object_type = GAL
def append(self, new_item, set_instance_name=False):
"""Only append if this GAL has a unique name"""
match = False
for item in self:
if item.is_match(new_item):
match = True
# Merge list of children represented if same name
item.merge(new_item)
if not match:
return super().append(new_item, set_instance_name=set_instance_name)
return None
def get_signature_fields(interview_metadata_dict):
"""Returns a list of the signature fields in the list of fields, based on assembly line naming conventions"""
signature_fields = []
for field_dict in (interview_metadata_dict.get('built_in_fields_used',[]) + interview_metadata_dict.get('fields',[])):
field = map_names(field_dict.get('variable',''))
if field.endswith('.signature'):
signature_fields.append(field)
return signature_fields
def number_to_letter(n):
"""Returns a capital letter representing ordinal position. E.g., 1=A, 2=B, etc. Appends letters
once you reach 26 in a way compatible with Excel/Google Sheets column naming conventions. 27=AA, 28=AB...
"""
string = ""
if n is None:
n = 0
while n > 0:
n, remainder = divmod(n - 1, 26)
string = chr(65 + remainder) + string
return string
def mark_unfilled_fields_empty(interview_metadata_dict):
"""Sets the listed fields that are not yet defined to an empty string. Requires an interview metadata dictionary
as input parameter. Aid for filling in a PDF. This will not set ALL fields in the interview empty;
only ones mentiond in the specific metadata dict."""
# There are two dictionaries in the interview-specific metadata that list fields
# fields and built_in_fields used. Some older interviews may use `field_list`
# We loop over each field and check if it's already defined in the Docassemble interview's namespace
# If it isn't, we set it to an empty string.
for field_dict in interview_metadata_dict['field_list'] + interview_metadata_dict['built_in_fields_used'] + interview_metadata_dict['fields']:
try:
field = field_dict['variable'] # Our list of fields is a dictionary
except:
continue
# Make sure we don't try to define a method
# Also, don't set any signatures to DAEmpty
# This will not work on a method call, other than the 3 we explicitly handle:
# address.line_two(), address.on_one_line(), and address.block()
# Empty strings will break this
if field and not map_names(field).endswith('.signature') and not '(' in map_names(field):
if not defined(map_names(field)):
define(map_names(field), '') # set to an empty string
#define(map_names(field), 'DAEmpty()') # set to special Docassemble empty object. Should work in DA > 1.1.4
# Handle special case of an address that we skipped filling in on the form
elif map_names(field).endswith('address.on_one_line()'):
address_obj_name = map_names(field).partition('.on_one_line')[0]
if not defined(address_obj_name+'.address'): # here we're checking for an empty street address attribute
# define(individual_name, '') # at this point this should be something like user.address
try:
exec(address_obj_name + "= DAEmpty()")
except:
pass
# define(address_obj_name, DAEmpty())
elif map_names(field).endswith('address.line_two()'):
address_obj_name = map_names(field).partition('.line_two')[0]
if not defined(address_obj_name+'.city'): # We check for an undefined city attribute
try:
exec(address_obj_name + "= DAEmpty()")
except:
pass
elif map_names(field).endswith('address.block()'):
address_obj_name = map_names(field).partition('.block')[0]
if not defined(address_obj_name+'.address'): # We check for an undefined street address
try:
exec(address_obj_name + "= DAEmpty()")
except:
pass
def filter_letters(letter_strings):
"""Used to take a list of letters like ["A","ABC","AB"] and filter out any duplicate letters."""
# There is probably a cute one liner, but this is easy to follow and
# probably same speed
unique_letters = set()
if isinstance(letter_strings, str):
letter_strings = [letter_strings]
for string in letter_strings:
if string: # Catch possible None values
for letter in string:
unique_letters.add(letter)
try:
retval = ''.join(sorted(unique_letters))
except:
reval = ''
return retval
def yes_no_unknown(var_name, condition, unknown="Unknown", placeholder=0):
"""Return 'unknown' if the value is None rather than False. Helper for PDF filling with
yesnomaybe fields"""
if condition:
return value(var_name)
elif condition is None:
return unknown
else:
return placeholder
def section_links(nav):
"""Returns a list of clickable navigation links without animation."""
sections = nav.get_sections()
section_link = []
for section in sections:
for key in section:
section_link.append('[' + section[key] + '](' + url_action(key) + ')' )
return section_link
def space(var_name, prefix=' ', suffix=''):
"""If the value as a string is defined, return it prefixed/suffixed. Defaults to prefix
of a space. Helps build a sentence with less cruft. Equivalent to SPACE function in
HotDocs."""
if defined(var_name):
return prefix + showifdef(var_name) + suffix
else:
return '' |
def func():
n=int(input())
l=[int(x) for x in input().split()]
a=[]
for i in range(n-1):
max=l[i+1]
for j in range(i+1,n):
if(max<=l[j]):
max=l[j]
a.append(max)
a.append(0)
print(*a)
op=func()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.