text stringlengths 38 1.54M |
|---|
import pytest
import json
from unittest.mock import Mock, call
from app.libs.redis_cache import RedisCacheException
from app.libs.api import CachedApi
MOCK_URL = 'https://host'
RESPONSE_CATEGORY1 = '{"categoryId": 1, "title": "Category1 title"}'
RESPONSE_CATEGORY2 = '{"categoryId": 2, "title": "Category2 title"}'
RESPONSE_CATEGORIES = '[%s, %s]' % (RESPONSE_CATEGORY1, RESPONSE_CATEGORY2)
RESPONSE_PRODUCT1 = '{"categoryId": 3, "productId": 1, "title": "Product1 title"}'
RESPONSE_PRODUCT2 = '{"categoryId": 3, "productId": 2, "title": "Product2 title"}'
RESPONSE_PRODUCTS = '[%s, %s]' % (RESPONSE_PRODUCT1, RESPONSE_PRODUCT2)
RESPONSE_OFFERS_PRODUCT1 = json.dumps([
{
'offerId': 1,
'productId': 1,
'title': 'Product1 Offer1',
'description': 'Product1 Offer1 description',
'url': 'http://shop1.cz/product/1',
'img_url': 'http://image-1-1',
'price': 10.0
}, {
'offerId': 2,
'productId': 1,
'title': 'Product1 Offer2',
'description': 'Product1 Offer2 longer description',
'url': 'http://shop2.cz/product/1',
'img_url': 'http://image-1-2',
'price': 100.5
}
])
RESPONSE_OFFERS_PRODUCT2 = json.dumps([
{
'offerId': 3,
'productId': 2,
'title': 'Product2 Offer3',
'description': 'Product2 Offer3 description',
'url': 'http://shop3.cz/product/2',
'img_url': 'http://image-2-3',
'price': 150.5
}
])
class MockEmptyCache:
def __init__(self):
self.get = Mock(side_effect=RedisCacheException('Not found in cache'))
self.set = Mock()
class MockFullCache:
RESPONSES_DICT = {
'/categories': RESPONSE_CATEGORIES,
'/category/1': RESPONSE_CATEGORY1,
'/category/2': RESPONSE_CATEGORY2,
'/product/1': RESPONSE_PRODUCT1,
'/product/2': RESPONSE_PRODUCT2,
'/products/3/0/5': RESPONSE_PRODUCTS,
'/offers/1/0/1000000': RESPONSE_OFFERS_PRODUCT1,
'/offers/2/0/1000000': RESPONSE_OFFERS_PRODUCT2
}
def __init__(self):
self.set = Mock()
def get(self, key):
return self.RESPONSES_DICT[key]
class MockSession:
RESPONSES_DICT = {
MOCK_URL + '/categories': RESPONSE_CATEGORIES,
MOCK_URL + '/category/1': RESPONSE_CATEGORY1,
MOCK_URL + '/category/2': RESPONSE_CATEGORY2,
MOCK_URL + '/product/1': RESPONSE_PRODUCT1,
MOCK_URL + '/product/2': RESPONSE_PRODUCT2,
MOCK_URL + '/products/3/0/5': RESPONSE_PRODUCTS,
MOCK_URL + '/offers/1/0/1000000': RESPONSE_OFFERS_PRODUCT1,
MOCK_URL + '/offers/2/0/1000000': RESPONSE_OFFERS_PRODUCT2
}
def get(self, url):
response = Mock()
response_text = self.RESPONSES_DICT[url]
response.text = response_text
response.json = Mock(return_value=json.loads(response_text))
return response
class TestApi:
"""
Tests Api class
- data are read from cache if present in cache
- data are loaded from api and cached if not present in cache already
"""
def init_api(self, data_cached):
"""
Init Api object for the test
:param data_cached: True - all data are in cache for the test, False - no data are in cache for the test
"""
if data_cached:
self.mock_cache = MockFullCache()
session = None
else:
self.mock_cache = MockEmptyCache()
session = MockSession()
self.api = CachedApi(MOCK_URL, self.mock_cache, session)
@pytest.mark.parametrize('data_cached', [True, False])
def test_fetch_category(self, data_cached):
"""
Test fetching of one category from API/cache
:param data_cached: True - all data are in cache for the test, False - no data are in cache for the test
"""
self.init_api(data_cached)
category = self.api.fetch_category(1)
assert category.obj_id == 1
assert category.title == 'Category1 title'
if data_cached:
assert self.mock_cache.set.mock_calls == []
else:
# check that api call was also cached
assert self.mock_cache.set.mock_calls == [
call('/category/1', RESPONSE_CATEGORY1)
]
@pytest.mark.parametrize('data_cached', [True, False])
def test_fetch_all_categories(self, data_cached):
"""
Test fetching of all categories from API/cache
:param data_cached: True - all data are in cache for the test, False - no data are in cache for the test
"""
self.init_api(data_cached)
all_categories = self.api.fetch_all_categories()
assert len(all_categories.categories) == 2
assert all_categories.categories[0].obj_id == 1
assert all_categories.categories[0].title == 'Category1 title'
assert all_categories.categories[1].obj_id == 2
assert all_categories.categories[1].title == 'Category2 title'
if data_cached:
# data were already in cache, nothing stored to cache
assert self.mock_cache.set.mock_calls == []
else:
# check that not only all categories call but also individual categories calls were cached
assert self.mock_cache.set.mock_calls == [
call('/categories', RESPONSE_CATEGORIES),
call('/category/1', RESPONSE_CATEGORY1),
call('/category/2', RESPONSE_CATEGORY2)
]
@pytest.mark.parametrize('data_cached', [True, False])
def test_fetch_one_product(self, data_cached):
"""
Test fetching of one product in a category from API/cache
:param data_cached: True - all data are in cache for the test, False - no data are in cache for the test
"""
self.init_api(data_cached)
product = self.api.fetch_product(1)
assert product.id == 1
assert product.title == 'Product1 title'
assert product.category_id == 3
assert product.description == 'Product1 Offer2 longer description'
assert product.image_urls == ['http://image-1-1', 'http://image-1-2']
(offer1, offer2) = product.offers
assert offer1.id == 1
assert offer1.price == 10.0
assert offer1.url == 'http://shop1.cz/product/1'
assert offer1.shop_name == 'shop1'
assert offer2.id == 2
assert offer2.price == 100.5
assert offer2.url == 'http://shop2.cz/product/1'
assert offer2.shop_name == 'shop2'
if data_cached:
# data were already in cache, nothing stored to cache
assert self.mock_cache.set.mock_calls == []
else:
# check that product call and offers call were cached
assert self.mock_cache.set.mock_calls == [
call('/product/1', RESPONSE_PRODUCT1),
call('/offers/1/0/1000000', RESPONSE_OFFERS_PRODUCT1)
]
@pytest.mark.parametrize('data_cached', [True, False])
def test_fetch_products(self, data_cached):
"""
Test fetching of a batch of products in a category from API/cache
:param data_cached: True - all data are in cache for the test, False - no data are in cache for the test
"""
self.init_api(data_cached)
(product1, product2) = self.api.fetch_products(category_id=3, offset=0, limit=5)
# check only a few things for product 1, it's enough
assert product1.id == 1
assert product1.title == 'Product1 title'
assert len(product2.offers) == 1
# check everything for product 2
assert product2.id == 2
assert product2.title == 'Product2 title'
assert product2.category_id == 3
assert product2.description == 'Product2 Offer3 description'
assert product2.image_urls == ['http://image-2-3']
# check offers for product 2
assert len(product2.offers) == 1
offer3 = product2.offers[0]
assert offer3.id == 3
assert offer3.price == 150.5
assert offer3.url == 'http://shop3.cz/product/2'
assert offer3.shop_name == 'shop3'
if data_cached:
# data were already in cache, nothing stored to cache
assert self.mock_cache.set.mock_calls == []
else:
# check that not only products call but also each product and their offers calls were cached
assert self.mock_cache.set.mock_calls == [
call('/products/3/0/5', RESPONSE_PRODUCTS),
call('/offers/1/0/1000000', RESPONSE_OFFERS_PRODUCT1),
call('/product/1', RESPONSE_PRODUCT1),
call('/offers/2/0/1000000', RESPONSE_OFFERS_PRODUCT2),
call('/product/2', RESPONSE_PRODUCT2)
]
|
from __future__ import print_function
from builtins import range
import os
import sys
import time
import json
#from PIL import Image
# set env var needed by native library
os.environ["MALMO_XSD_PATH"] = os.getcwd() + "/schemas"
import MalmoPython
class Malmo():
def __init__(self):
init()
self.ah = create_agent_host()
self.spec = MalmoPython.MissionSpec()
self.record = MalmoPython.MissionRecordSpec()
self.delay = 0.5
self.images = []
def set_delay(self, n):
self.delay = n
def setup_mission(self, fun):
if isinstance(fun, str):
with open(fun, 'r') as mfile:
self.spec = MalmoPython.MissionSpec(mfile.read(), True)
else:
fun(self.spec)
# always run mission in third person view
self.spec.setViewpoint(1)
#print(spec.getAsXML(True))
#chs = list(spec.getListOfCommandHandlers(0))
#for ch in chs:
# cmds = list(spec.getAllowedCommands(0, ch))
# print(ch, cmds)
def start_mission(self, fun=None):
if fun is not None:
self.setup_mission(fun)
start_mission(self.ah, self.spec, self.record)
wait_for_mission_start(self.ah)
def end_mission(self):
wait_for_mission_end(self.ah)
def send_command(self, command):
self.ah.sendCommand(command)
time.sleep(self.delay)
def move(self, n=1):
self.send_command("move " + str(n))
self._wait_for_update()
def turn(self, n):
self.send_command("turn " + str(n))
self._wait_for_update()
def turn_left(self):
self.turn(-1)
def turn_right(self):
self.turn(1)
def observe(self):
ws = self.ah.getWorldState()
while ws.is_mission_running and (len(ws.observations) == 0 or len(ws.video_frames) == 0):
time.sleep(0.1)
ws = self.ah.peekWorldState()
if not ws.is_mission_running:
return None
obs = json.loads(ws.observations[-1].text)
if 'floor' in obs and len(obs['floor']) == 9 and 'Yaw' in obs:
obs['floor'] = relative_blocks(obs['floor'], obs['Yaw'])
frame = ws.video_frames[-1]
#image = Image.frombytes('RGB', (frame.width, frame.height), bytes(frame.pixels) )
#self.images.append(image)
return obs
def is_running(self):
return self.ah.peekWorldState().is_mission_running
def _wait_for_update(self):
while True:
time.sleep(0.001)
ws = self.ah.getWorldState()
if ws.number_of_observations_since_last_state > 0 or not ws.is_mission_running:
break
def save_gif(self, path):
self.images[0].save(path, save_all=True, append_images=self.images[1:], duration=300)
# uses a callback act
def start_agent(self, act):
while self.is_running():
obs = self.observe()
if obs is not None:
act(obs)
# generator of observations
def observations(self):
while self.is_running():
obs = self.observe()
if obs is not None:
yield obs
def init():
if sys.version_info[0] == 2:
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # flush print output immediately
else:
import functools
global print
print = functools.partial(print, flush=True)
if 'MALMO_XSD_PATH' not in os.environ:
print('ERROR: MALMO_XSD_PATH not found in env')
exit(1)
def print_mission_spec():
my_mission = MalmoPython.MissionSpec()
print(my_mission.getAsXML(True))
# Create default Malmo objects:
def create_agent_host():
agent_host = MalmoPython.AgentHost()
try:
agent_host.parse( sys.argv )
except RuntimeError as e:
print('ERROR:',e)
print(agent_host.getUsage())
exit(1)
if agent_host.receivedArgument("help"):
print(agent_host.getUsage())
exit(0)
return agent_host
# Attempt to start a mission:
def start_mission(agent_host, my_mission, my_mission_record):
max_retries = 3
for retry in range(max_retries):
try:
agent_host.startMission( my_mission, my_mission_record )
break
except RuntimeError as e:
if retry == max_retries - 1:
print("Error starting mission:",e)
exit(1)
else:
time.sleep(2)
def start_default_mission(agent_host):
my_mission = MalmoPython.MissionSpec()
my_mission_record = MalmoPython.MissionRecordSpec()
start_mission(agent_host, my_mission, my_mission_record)
# Loop until mission starts:
def wait_for_mission_start(agent_host):
print("Waiting for the mission to start ", end=' ')
world_state = agent_host.getWorldState()
while not world_state.has_mission_begun:
print(".", end="")
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
print("Error:",error.text)
print(" started")
# Loop until mission ends:
def wait_for_mission_end(agent_host):
world_state = agent_host.getWorldState()
while world_state.is_mission_running:
print(".", end="")
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
print("Error:",error.text)
def relative_blocks(blocks, y):
def rotate(l, n):
return l[n:]+ l[:n]
yaw = [0, 90, 180, 270]
front = [7, 3, 1, 5]
front_right = [6, 0, 2, 8]
pos = {
(0,0): [4, 4, 4, 4],
#front
(0,1): rotate(front, 0),
#right
(1,0): rotate(front, 1),
#back
(0,-1): rotate(front, 2),
#left
(-1,0): rotate(front, 3),
#front_right:
(1,1): rotate(front_right, 0),
#back_right:
(1,-1): rotate(front_right, 1),
#back_left:
(-1,-1): rotate(front_right, 2),
#front_left:
(-1,1): rotate(front_right, 3)
}
res = {}
idx = yaw.index(y)
for k,v in pos.items():
res[k] = blocks[v[idx]]
return res
|
a = int(input("numero1: "))
a1 = int(input("numero2: "))
t = 0
b = 1
d = 0
while (a>0) and (b<a):
if (a%b == 0):
d = b + d
b = b + 1
t = t + 1
else:
b = b + 1
t1 = 0
b1 = 1
d1 = 0
while (a1>0) and (b1<a1):
if (a1%b1 == 0):
d1 = b1 + d1
b1 = b1 + 1
t1 = t1 + 1
else:
b1 = b1 + 1
print(d)
print(d1)
if (d1 == a) and (d == a1):
print("AMIGOS")
else:
print("NAO AMIGOS") |
# Generated by Django 2.0.5 on 2018-09-03 11:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AmadoAccounting', '0032_salarydetail_description'),
]
operations = [
migrations.AddField(
model_name='salarydetail',
name='del_sal_fee',
field=models.IntegerField(default=1, verbose_name='حق پیک/سالاد'),
preserve_default=False,
),
migrations.AlterField(
model_name='salarydetail',
name='del_sal',
field=models.IntegerField(verbose_name='پیک/سالاد'),
),
migrations.AlterField(
model_name='salarydetail',
name='description',
field=models.TextField(blank=True, max_length=512, null=True, verbose_name='توضیحات'),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, tools, AutoToolsBuildEnvironment
from conans.errors import ConanInvalidConfiguration
import os
import glob
class LibiconvConan(ConanFile):
name = "libiconv"
version = "1.15"
description = "Convert text to and from Unicode"
url = "https://github.com/bincrafters/conan-libiconv"
homepage = "https://www.gnu.org/software/libiconv/"
author = "Bincrafters <bincrafters@gmail.com>"
topics = "libiconv", "iconv", "text", "encoding", "locale", "unicode", "conversion"
license = "LGPL-2.1"
exports = ["LICENSE.md"]
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {'shared': False, 'fPIC': True}
short_paths = True
_source_subfolder = "source_subfolder"
@property
def _is_mingw_windows(self):
return self.settings.os == 'Windows' and self.settings.compiler == 'gcc' and os.name == 'nt'
@property
def _is_msvc(self):
return self.settings.compiler == 'Visual Studio'
def build_requirements(self):
if tools.os_info.is_windows:
if "CONAN_BASH_PATH" not in os.environ:
self.build_requires("msys2_installer/latest@bincrafters/stable")
def configure(self):
del self.settings.compiler.libcxx
def config_options(self):
if self.settings.os == 'Windows':
del self.options.fPIC
def source(self):
archive_name = "{0}-{1}".format(self.name, self.version)
source_url = "https://ftp.gnu.org/gnu/libiconv"
tools.get("{0}/{1}.tar.gz".format(source_url, archive_name),
sha256="ccf536620a45458d26ba83887a983b96827001e92a13847b45e4925cc8913178")
os.rename(archive_name, self._source_subfolder)
def _build_autotools(self):
prefix = os.path.abspath(self.package_folder)
rc = None
host = None
build = None
if self._is_mingw_windows or self._is_msvc:
prefix = prefix.replace('\\', '/')
build = False
if self.settings.arch == "x86":
host = "i686-w64-mingw32"
rc = "windres --target=pe-i386"
elif self.settings.arch == "x86_64":
host = "x86_64-w64-mingw32"
rc = "windres --target=pe-x86-64"
#
# If you pass --build when building for iPhoneSimulator, the configure script halts.
# So, disable passing --build by setting it to False.
#
if self.settings.os == "iOS" and self.settings.arch == "x86_64":
build = False
env_build = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
if self.settings.os != "Windows":
env_build.fpic = self.options.fPIC
configure_args = ['--prefix=%s' % prefix]
if self.options.shared:
configure_args.extend(['--disable-static', '--enable-shared'])
else:
configure_args.extend(['--enable-static', '--disable-shared'])
env_vars = {}
if self._is_mingw_windows:
configure_args.extend(['CPPFLAGS=-I%s/include' % prefix,
'LDFLAGS=-L%s/lib' % prefix,
'RANLIB=:'])
if self._is_msvc:
runtime = str(self.settings.compiler.runtime)
configure_args.extend(['CC=$PWD/build-aux/compile cl -nologo',
'CFLAGS=-%s' % runtime,
'CXX=$PWD/build-aux/compile cl -nologo',
'CXXFLAGS=-%s' % runtime,
'CPPFLAGS=-D_WIN32_WINNT=0x0600 -I%s/include' % prefix,
'LDFLAGS=-L%s/lib' % prefix,
'LD=link',
'NM=dumpbin -symbols',
'STRIP=:',
'AR=$PWD/build-aux/ar-lib lib',
'RANLIB=:'])
env_vars['win32_target'] = '_WIN32_WINNT_VISTA'
with tools.chdir(self._source_subfolder):
tools.run_in_windows_bash(self, 'chmod +x build-aux/ar-lib build-aux/compile')
if rc:
configure_args.extend(['RC=%s' % rc, 'WINDRES=%s' % rc])
with tools.chdir(self._source_subfolder):
with tools.environment_append(env_vars):
env_build.configure(args=configure_args, host=host, build=build)
env_build.make()
env_build.install()
def build(self):
with tools.vcvars(self.settings) if self._is_msvc else tools.no_op():
self._build_autotools()
def package(self):
self.copy(os.path.join(self._source_subfolder, "COPYING.LIB"),
dst="licenses", ignore_case=True, keep_path=False)
# remove libtool .la files - they have hard-coded paths
with tools.chdir(os.path.join(self.package_folder, "lib")):
for filename in glob.glob("*.la"):
os.unlink(filename)
def package_info(self):
if self._is_msvc and self.options.shared:
self.cpp_info.libs = ['iconv.dll.lib']
else:
self.cpp_info.libs = ['iconv']
self.env_info.path.append(os.path.join(self.package_folder, "bin"))
|
import sys, time, math
if sys.version[0] == '2':
raw_input("This client only works with python 3, and you're using python 2. You can download python 3 from python.org.\nPress enter to exit.")
quit()
from tppflush import *
if len(sys.argv) < 2:
input("To run this client, please supply an IP address from the command line: python3 command_examples.py <3ds ip>\nPress enter to exit.")
quit()
#Create a client by creating a new LumaInputServer with the 3DS's IP address.
serverIP = sys.argv[1]
server = LumaInputServer(serverIP)
time.sleep(5)
#Now, you can press buttons with server.press()!
server.press(HIDButtons.DPADUP)
server.send(print_sent=False)
time.sleep(0.5)
#It's the responsibility of code using TPPFLUSH to send the command to unpress a button.
#That way you can control how long a button is held for.
server.unpress(HIDButtons.DPADUP)
server.send(print_sent=False)
time.sleep(0.5)
#Let's press a few more.
server.press(HIDButtons.A)
server.send(print_sent=False)
time.sleep(0.5)
server.unpress(HIDButtons.A)
server.send(print_sent=False)
time.sleep(0.5)
#Multiple buttons can be pressed at once.
server.press(HIDButtons.L)
server.press(HIDButtons.R)
server.press(HIDButtons.START)
server.press(N3DS_Buttons.ZL) #If an o3DS is connected, this function will do nothing.
server.send(print_sent=False)
time.sleep(0.5)
#You can also release all buttons at once.
server.clear_everything()
time.sleep(0.5)
#The touch screen takes an x and a y coordinate to touch.
#The bottom screen is 320 by 240 pixels big.
server.touch(319,239) #touch the bottom-right of the screen
server.send(print_sent=False)
time.sleep(0.5)
#You can only touch one location at a time. Touching a different location will overwrite the previous coordinates.
server.touch(150,120) #touch the middle of the screen
server.send(print_sent=False)
time.sleep(0.5)
server.clear_touch()
server.send(print_sent=False)
time.sleep(0.5)
#The circle pad works too!
server.circle_pad_set(CPAD_Commands.CPADUP)
server.send(print_sent=False)
time.sleep(0.5)
server.circle_pad_neutral()
server.send(print_sent=False)
time.sleep(0.5)
#The circle_pad_set function also takes a multiplier argument from -1 to 1, so you can use the circle pad without pushing it all the way.
#Let's use it to spin around in a circle!
for i in range(0,180,10):
server.circle_pad_set(CPAD_Commands.CPADRIGHT, math.cos(i))
server.circle_pad_set(CPAD_Commands.CPADUP, math.sin(i))
server.send(print_sent=False)
time.sleep(0.1)
server.circle_pad_neutral()
server.circle_pad_set(CPAD_Commands.CPADNEUTRAL) #this also resets the circle pad
server.send(print_sent=False)
time.sleep(0.5)
#The N3DS C-stick works the same way.
#This will do nothing if you're on an o3ds.
server.n3ds_cstick_set(CSTICK_Commands.CSTICKUP,0.5)
server.send(print_sent=False)
time.sleep(0.5)
#These will both reset the c-stick.
server.n3ds_cstick_set(CSTICK_Commands.CSTICKNEUTRAL)
server.n3ds_cstick_neutral()
server.send(print_sent=False)
#There are also some specialized buttons available:
#server.press(Special_Buttons.HOME)
#server.press(Special_Buttons.POWER) #as if you tapped the power button
#server.press(Special_Buttons.POWER_LONG) #as if you held down the power button
#Hope this helps!
|
import face_recognition
import cv2
import numpy as np
import time
# เปิดการใช้ webcam
video_capture = cv2.VideoCapture(0)
frame_size = (int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
print(frame_size)
print(cv2.CAP_PROP_FPS)
prevTime = 0
# Find OpenCV version
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
# # โหลดภาพ Peen.jpg และให้ระบบจดจำใบหน้า
# person1_image = face_recognition.load_image_file("Peen.png")
# person1_face_encoding = face_recognition.face_encodings(person1_image)[0]
# # โหลดภาพ Stop.jpg และให้ระบบจดจำใบหน้า
# person2_image = face_recognition.load_image_file("Stop.png")
# person2_face_encoding = face_recognition.face_encodings(person2_image)[0]
# โหลดภาพ Eve.jpg และให้ระบบจดจำใบหน้า
person3_image = face_recognition.load_image_file("Eve.jpg")
person3_face_encoding = face_recognition.face_encodings(person3_image)[0]
# โหลดภาพ Pla.jpg และให้ระบบจดจำใบหน้า
person4_image = face_recognition.load_image_file("Pla.jpg")
person4_face_encoding = face_recognition.face_encodings(person4_image)[0]
# โหลดภาพ Af.jpg และให้ระบบจดจำใบหน้า
person5_image = face_recognition.load_image_file("Af.jpg")
person5_face_encoding = face_recognition.face_encodings(person5_image)[0]
# โหลดภาพ if.jpg และให้ระบบจดจำใบหน้า
person6_image = face_recognition.load_image_file("if.jpg")
person6_face_encoding = face_recognition.face_encodings(person6_image)[0]
# โหลดภาพ Fah.jpg และให้ระบบจดจำใบหน้า
person7_image = face_recognition.load_image_file("Fah.jpg")
person7_face_encoding = face_recognition.face_encodings(person7_image)[0]
# โหลดภาพ Film.jpg และให้ระบบจดจำใบหน้า
person8_image = face_recognition.load_image_file("Film.jpg")
person8_face_encoding = face_recognition.face_encodings(person8_image)[0]
# โหลดภาพ Ham.jpg และให้ระบบจดจำใบหน้า
person9_image = face_recognition.load_image_file("Ham.jpg")
person9_face_encoding = face_recognition.face_encodings(person9_image)[0]
# โหลดภาพ Nut.jpg และให้ระบบจดจำใบหน้า
person10_image = face_recognition.load_image_file("Nut.jpg")
person10_face_encoding = face_recognition.face_encodings(person10_image)[0]
# โหลดภาพ Pika.jpg และให้ระบบจดจำใบหน้า
person11_image = face_recognition.load_image_file("Pika.jpg")
person11_face_encoding = face_recognition.face_encodings(person11_image)[0]
# โหลดภาพ Tuar.jpg และให้ระบบจดจำใบหน้า
person12_image = face_recognition.load_image_file("Tuar.jpg")
person12_face_encoding = face_recognition.face_encodings(person12_image)[0]
# โหลดภาพ Tum.jpg และให้ระบบจดจำใบหน้า
person13_image = face_recognition.load_image_file("Tum.jpg")
person13_face_encoding = face_recognition.face_encodings(person13_image)[0]
# สร้าง arrays ของคนที่จดจำและกำหนดชื่อ ตามลำดับ
known_face_encodings = [
person3_face_encoding,
person4_face_encoding,
person5_face_encoding,
person6_face_encoding,
person7_face_encoding,
person8_face_encoding,
person9_face_encoding,
person10_face_encoding,
person11_face_encoding,
person12_face_encoding,
person13_face_encoding,
]
known_face_names = [
"Eve",
"Pla",
"Af",
"if",
"Fah",
"Film",
"Ham",
"Nut",
"Pika",
"Tuar",
"Tum"
]
# ตัวแปรเริ่มต้น
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# ดึงเฟรมภาพมาจากวีดีโอ
ret, frame = video_capture.read()
# ย่อขนาดเฟรมเหลือ 1/4 ทำให้ face recognition ทำงานได้เร็วขึ้น
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# แปลงสีภาพจาก BGR (ถูกใช้ใน OpenCV) เป็นสีแบบ RGB (ถูกใช้ใน face_recognition)
rgb_small_frame = small_frame[:, :, ::-1]
# การตรวจจับเฟรมเรต
retval, frame = video_capture.read()
if not retval:
break
curTime = time.time()
sec = curTime - prevTime
prevTime = curTime
fps = 1/(sec)
str_fps1 = "FPS Dectection : %d" % fps
cv2.putText(frame, str_fps1, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
# str_fps2 = "FPS Capture : %d" % fps
# cv2.putText(frame, str_fps2, (200, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, (20, 200, 255))
# cv2.imshow('frame', frame)
# ประมวลผลเฟรมเว้นเฟรมเพื่อประหยัดเวลา
if process_this_frame:
# ค้นหาใบหน้าที่มีทั้งหมดในภาพ จากนั้นทำการ encodings ใบหน้าเพื่อจะนำไปใช้เปรียบเทียบต่อ
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# ทำการเปรียบเทียบใบหน้าที่อยู่ในวีดีโอกับใบหน้าที่รู้จักในระบบ
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# ถ้า encoding แล้วใบหน้าตรงกันก็จะแสดงข้อมูล
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# แสดงผลลัพธ์
for (top, right, bottom, left), name in zip(face_locations, face_names):
# ขยายเฟรมที่ลดลงเหลือ 1/4 ให้กลับไปอยู่ในขนาดเดิม
top *= 4
right *= 4
bottom *= 4
left *= 4
# วาดกล่องรอบใบหน้า
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# เขียนตัวหนังสือที่แสดงชื่อลงที่กรอบ
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# แสดงรูปภาพผลลัพธ์
cv2.imshow('Video', frame)
# # กด 'q' เพื่อปิด!
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# key = cv2.waitKey(1)
# if (key == 27):
# break
key = cv2.waitKey(1)
if (key == 27):
break
if video_capture.isOpened():
video_capture.release()
cv2.destroyAllWindows() |
from selenium.webdriver.common.by import By
class MainPageLocators():
LOGIN_LINK = (By.CSS_SELECTOR, "#login_link")
class LoginPageLocators():
# локаторы для формы логина
LOGIN_FORM = (By.CSS_SELECTOR, "#login_form")
EMAIL_INPUT = (By.CSS_SELECTOR, "#id_login-username")
PASSWORD_INPUT = (By.CSS_SELECTOR, "#id_login-password")
LOGIN_BUTTON = (By.NAME, "login_submit")
FORGOT_PASSWORD_LINK = (By.LINK_TEXT, "Я забыл пароль")
# локаторы для формы регистрации
REGISTER_FORM = (By.CSS_SELECTOR, "#register_form")
REG_EMAIL_INPUT = (By.CSS_SELECTOR, "#id_registration-email")
REG_PASSWORD_INPUT = (By.CSS_SELECTOR, "#id_registration-password1")
REG_PASSWORD_INPUT_REPEAT = (By.CSS_SELECTOR, "#id_registration-password2")
REG_BUTTON = (By.NAME, "registration_submit")
|
"""
operators = {"addition":"1","subtraction:":"2","Multiplication":"3","Division":"4"}
print("Please select which operation do you want to do in Maths:",operators)
selection = input("select operation which you wanted to do:")
num1 = int(input("Enter first number:"))
num2 = int(input("enter second number:"))
if selection in operators.values():
if selection == "1" :
print("addition of 2 numbers",num1 + num2)
elif selection == "2":
print("subtraction of 2 numbers:",num1 - num2)
elif selection == "3":
print("Multiplication of 2 numbers: ", num1 * num2)
elif selection == "4":
print("Division of 2 numbers:",num1 / num2)
else:
print("selection does not exist and please try correct option")
"""
operators = {"addition":"1","subtraction":"2","multiplication":"3","division":"4"}
print("select one operation",operators)
selection = input("select one operation")
num1 = int(input("enter first number:"))
num2 = int(input("enter second number:"))
if selection in operators.values():
if selection == "1":
print("addition",num1 + num2)
elif selection == "2":
print("subtraction:",num1 - num2)
else:
print("you are done") |
import os
import numpy as np
foldpath = "./data"
class analysis():
def __init__(self,filename):
self.datapath = os.path.join(foldpath,filename)
self.data = self.readData()
self.aver = self.averge()
self.subAver = self.subAv()
self.cov = self.coveriable()
self.feaVec = self.convFeaVec()
self.feaVects = self.getFeatVects()
self.topKVec = self.getTopKVec()
self.newArr = self.dealArr()
def readData(self):
data = []
with open(self.datapath) as f:
lines = f.readlines()
for line in lines:
arr = line.strip().split()
data.append([float(num) for num in arr])
return np.array(data)
def averge(self):
m,n=np.shape(self.data)
sumarr = self.data.sum(axis=0)
return sumarr/m
def subAv(self):
return self.data - self.aver
def coveriable(self):
m,n=np.shape(self.subAver)
return np.dot(self.subAver.T,self.subAver)/(m-1)
def convFeaVec(self):
return np.linalg.eig(self.cov)
def getFeatVects(self):
arr1,arr2=self.feaVec
return arr2
def getTopKVec(self,k=2):
arr1,arr2=self.feaVec
return arr2.T[:k]
def dealArr(self):
return np.dot(self.data,self.feaVects)
def getNewArr(self):
return self.newArr
arr = analysis("data.txt").getNewArr()
print(arr)
|
# Sinnvoller Gebrauch von Whitespace
i = i + 1
submitted += 1
x = x*2 - 1
hypot2 = x*x + y*y
c = (a+b) * (a-b)
# Aber so nicht:
i=i+1
submitted+=1
x = x*2-1
hypot2 = x* x + y *y
c = (a + b)* (a - b) |
import cv2
import numpy as np
def create_blank(width, height, color=(0, 0, 0)):
"""Create new image(numpy array) filled with certain color in BGR"""
image = np.zeros((height, width, 3), np.uint8)
# Fill image with color
image[:] = color
return image
def draw_half_circle_rounded(image):
height, width = image.shape[0:2]
# Ellipse parameters
radius = 100
center = (width / 2, height - 25)
axes = (radius, radius)
angle = 0
startAngle = 180
endAngle = 360
thickness = 10
# http://docs.opencv.org/modules/core/doc/drawing_functions.html#ellipse
#cv2.ellipse(image, center, axes, angle, startAngle, endAngle, 255, thickness)
#image = cv2.ellipse(image,(width/2,height-25),(100,100),0,180,360,255,10)
image = cv2.ellipse(image,(width/2,height-25),(100,100),0,180,360,255,10)
# Create new blank 300x150 white image
width, height = 300, 150
image = create_blank(width, height, color=(255,255,255))
cv2.imshow('image',image)
draw_half_circle_rounded(image)
cv2.imwrite('half_circle_rounded.jpg', image)
|
from django.utils import autoreload
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
import re
address_port_re = re.compile(r"""^(?:
(?P<address>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
class BaseRunTornadoCommand(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noreload', action='store_false', dest='use_reloader', default=True, help='Tells Django to NOT use the auto-reloader.'),
make_option('--nocomet', action='store_false', dest='is_comet', default=True, help='Disable comet handlers.'),
make_option('--nodjango', action='store_false', dest='is_django', default=True, help='Disable django handlers.'),
make_option('--nossl', action='store_false', dest='is_ssl', default=True, help='Disable SSL support.'),
)
help = 'Starts a tornado web server.'
args = '[optional port number]'
requires_model_validation = False
def __init__(self):
self.address = ''
self.port = 0
super(BaseRunTornadoCommand, self).__init__()
def handle(self, address_port='', *args, **options):
if address_port:
matches = re.match(address_port_re, address_port)
if matches is None:
raise CommandError('"%s" is not a valid port number or address:port pair.' % address_port)
address, ipv4, ipv6, fqdn, port = matches.groups()
if not port.isdigit():
raise CommandError('%r is not a valid port number.' % port)
if address:
if ipv6:
address = address[1:-1]
self.address = address
self.port = int(port)
self.run(*args, **options)
def run(self, *args, **options):
use_reloader = options.get('use_reloader', True)
if use_reloader:
autoreload.main(self.inner_run, args, options)
else:
self.inner_run(*args, **options)
def inner_run(self, *args, **options):
from django.conf import settings
from comet.application import Application
import tornado.ioloop
import tornado.httpserver
port = self.port or getattr(settings, 'SERVER_PORT', 8000)
address = self.address or getattr(settings, 'SERVER_ADDRESS', '')
# validate models
self.stdout.write("Validating models...\n\n")
self.validate(display_num_errors=True)
self.stdout.write((
"Django version %(version)s, using settings %(settings)r\n"
"Development server is running at http://%(address)s:%(port)s/\n"
"Quit the server with CONTROL-C.\n"
) % {
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"address": address or '*',
"port": port,
})
# prepare options
http_server_options = dict()
is_ssl = options.get('is_ssl', True)
if is_ssl and getattr(settings, 'SERVER_SSL_CERT', False) and getattr(settings, 'SERVER_SSL_KEY', False):
http_server_options['ssl_options'] = {
'certfile': settings.SERVER_SSL_CERT,
'keyfile': settings.SERVER_SSL_KEY,
}
is_comet = options.get('is_comet', True)
is_django = options.get('is_django', True)
# start the server
application = Application(is_comet=is_comet, is_django=is_django)
http_server = tornado.httpserver.HTTPServer(application, **http_server_options)
http_server.listen(port, address=address)
tornado.ioloop.IOLoop.instance().start()
class Command(BaseRunTornadoCommand):
pass
|
# version 2
# client
# REST API:
# dbs: CRUD
# tables: CRUD
# /dbs/<db_name>
# /dbs/<db_name>/tables/<db_name>
# server
# build query with input from API call
# >> projection, table, op, conditions
# if with_conds:
# query = f'query {projection} ...'
#
# parse query built with input from ...
# p = QueryParser()
# cmd = p.parse(query)
# res = list(cmd.execute())
# return rendered jinja template with results
# return render_template('users.html', users=res)
|
from typing import Dict
from helpscout.endpoints.endpoint import Endpoint
class User(Endpoint):
"""User endpoint."""
def list(self, **kwargs) -> Dict:
"""Get all users.
Doc page: https://developer.helpscout.com/mailbox-api/endpoints/users/list/
"""
response = self.base_get_request(self.base_url, **kwargs)
return self.process_get_result(response)
def user(self, user_id: int) -> Dict:
"""Get user by id.
Doc page: https://developer.helpscout.com/mailbox-api/endpoints/users/get/
"""
response = self.base_get_request(f"{self.base_url}/{user_id}")
return self.process_get_result(response)
def resource_owner(self) -> Dict:
"""Get resource owner.
Doc page: https://developer.helpscout.com/mailbox-api/endpoints/users/me/
"""
response = self.base_get_request(f"{self.base_url}/me")
return self.process_get_result(response)
|
from googleapiclient.discovery import build
from pprint import pprint
# CLIENT_SECRET_FILE = 'client'
API_KEY = "AIzaSyDLHZt0LlS4ZybFCJKZOnJSoPJQJlRRg28"
API_NAME = 'youtube'
API_VERSION = 'v3'
SCOPES = ['https://www.googleapis.com/auth/youtube']
# service = Create_Service(CLIENT_SECRET_FILE, ) |
str1 = 'aa'
str2 = 'bb'
list1 = [1, 2]
list2 = [10, 20]
t1 = (1, 2)
t2 = (10, 20)
dict1 = {'name': 'Python'}
dict2 = {'age': 30}
# +: 合并
print(str1 + str2)
print(list1 + list2)
print(t1 + t2)
# print(dict1 + dict2) # 报错:字典不支持合并运算
|
import tensorflow as tf
import numpy as np
sess = tf.Session()
inputs =[
[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]
]
print (inputs[0])
print (inputs[1])
print (inputs[2])
print ('\n')
print (sess.run(tf.slice(inputs, begin=[1, 0, 0], size=[1, 1, 3])))
print ('\n')
print (sess.run(tf.slice(inputs, begin=[1, 0, 0], size=[1, 2, 3])))
print ('\n')
print (sess.run(tf.slice(inputs, begin=[1, 0, 0], size=[2, 1, 3])))
print ('\n')
print (sess.run(tf.slice(inputs, begin=[1, 0, 0], size=[2, 2, 3]))) |
from mongoengine import *
import datetime
from .models import *
import datetime
import re
def create_group(user_list, group_name):
grps = WorkGroup.objects(name=group_name)
if len(grps) > 0:
raise ValueError("Group name '{}' has been used!")
new_grp = WorkGroup(name=group_name,
members=user_list)
new_grp.save()
return new_grp
def today_isoformat():
today = datetime.date.today()
return today.isoformat()
def extract_xml_positions(s):
"""
Ignore the existence of the self-ending tags for now.
This is a standard regex task. Check it.
"""
regex = re.compile(r'<.*?>')
position_list = []
tag_stack = []
s_pointer = 0
regex_iter = regex.finditer(s)
for m in regex_iter:
tag = m.group(0)
|
import hashlib
import logging
import os
import re
from utils import make_soup
from time import sleep
from datetime import datetime
from scrapers.base_scraper import OddScraper
ranking_regex = re.compile('\(#\d+\)') #regex to remove rankings in the team names
logging.basicConfig(level="INFO")
class InteropScraper(OddScraper):
def __init__(self, sport, sel=True):
"""
interops constructor
:params
sport(string) -> the sport we want the lines for
sel(bool) -> whether or not selenium will be used
"""
site = "interops"
super().__init__(sport, site, sel=sel)
self._ncaaf_odds = None
def _navigate_to_NCAAF(self):
"""
navigate the selenium driver to the NCAAF url
"""
self.driver.get(os.path.join(self.url, self.path))
def _navigate_to_NBA(self):
"""
navigate the selenium driver to the NBA url
"""
self.driver.get(os.path.join(self.url, self.path))
def _get_markup(self):
"""
retrieve the html to extract the lines info from
return(BeautifulSoup object) -> the soup made from the recovered html
"""
sleep(2)
data = self.driver.find_element_by_class_name("markettable").get_attribute("innerHTML")
return make_soup(data)
def _get_data(self, markup, header):
"""
retrieve all the needed data from the html table
:params
markup(BeautifulSoup object) -> the full html table
header(string) -> a list of dictionary keys to use in the return dict
return(dict) -> a dictionary to be turned into a pandas DataFrame containing the required info
"""
events = markup.find_all("div", {"class": "trw"})
content = {column: list() for column in header}
i = 1
for event in events:
i +=1
try:
odds = event.find_all("div", {"class": "td"})
content["Teams"].append(self._parse_for_teams(event))
content["Date"].append(self._parse_for_dates(event))
content["Spread"].append(self._parse_for_spread(odds[0]))
content["Total Points"].append(self._parse_for_OU(odds[1]))
content["Money Line"].append(self._parse_for_moneyline(odds[2]))
except Exception as e:
logging.exception(e)
continue
return content
def _parse_for_teams(self, event):
"""
parse for a set of teams for a single game from a subset of the html
:params
event(BeautifulSoup object) -> a subset of the html table
return(string) -> a string denoting the teams involved in the event
"""
return (re.sub(ranking_regex, "", event.find("div", {"class": "ustop"}).text.strip()).strip(),
re.sub(ranking_regex, "", event.find("div", {"class": "usbot"}).text.strip()).strip())
def _parse_for_dates(self, event):
"""
parse for the date of a game from a subset of the html
:params
event(BeautifulSoup object) -> a subset of the html table
return(datetime) -> a datetime object parsed from the date_string built.
"""
date_string = event.find("span", {"class": "eventdatetime"}).get("title")
date_string = date_string.replace('<br/>', ' ')
return datetime.strptime(date_string, "%m/%d/%Y %I:%M %p")
def _parse_for_spread(self, column):
"""
parse for the spread of a game from a subset of html
:params
column(BeautifulSoup object) -> a subset of the event portion of the html table
return(tuple) -> tuple of the spreads for a specifed game
"""
spreads = []
for item in column.find_all("a"):
spans = item.find_all("span")
data = (spans[0].text, spans[1].get("data-o-cnt"))
spreads.append(" ".join(data))
return tuple(spreads)
def _parse_for_OU(self, column):
"""
parse for the over under of a game from a subset of html
:params
column(BeautifulSoup object) -> a subset of the event portion of the html table
return(tuple) -> a tuple of the over unders for a specifed game
"""
totals = []
for item in column.find_all("a"):
spans = item.find_all("span")
data = (spans[0].text[0], item.get("data-o-pts"), spans[1].get("data-o-cnt"))
totals.append(" ".join(data))
return tuple(totals)
def _parse_for_moneyline(self, column):
"""
parse for the moeny line of a game from a subset of html
:params
column(BeautifulSoup object) -> a subset of the event portion of the html table
return(tuple) -> a tuple of the moneylines for a specifed games
"""
lines = []
for item in column.find_all("a"):
lines.append(item.text.strip())
return tuple(lines)
|
#!/usr/bin/python3
##########################################################################
# Postgres Partition maintenance Script for native partitioning in PostgreSQL
version = 3.2
# Author : Jobin Augustine
##########################################################################
import sys,datetime,argparse,psycopg2
from psycopg2 import extras
#Command Line Argument parser and help display
parser = argparse.ArgumentParser(description='Index Analysis and Rebuild Program',
epilog='Example 1:\n %(prog)s -c "host=host1.hostname.com dbname=databasename user=username password=password" -t public.emp -i weekly -p 5 \n'
'Example 2:\n %(prog)s -c "host=host1.hostname.com dbname=databasename user=username password=password" -t public.emp -i weekly -p 5 --tsvfile=test.tsv --ddlfile=ddl.sql --errorlog=error.log --execute --quitonerror',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-c','--connection',help="Connection string containing host, username, password etc",required=True)
parser.add_argument('-t','--table',help="Table name in schema.tablename format",required=True)
parser.add_argument('-i','--interval',help="Interval in [ yearly | quarterly | monthly | weekly | daily | hourly | <NUMBER> ]",required=True)
parser.add_argument('-p','--premake',help="Premake partition",required=True)
parser.add_argument('-a','--append',help="Special string to append to DDL")
parser.add_argument('--ddlfile',help="Generate DDL as SQL Script")
parser.add_argument('--errorlog',help="Error log file")
parser.add_argument('--displayddl', action='store_true', help="Display Generated DDLs on the screen")
parser.add_argument('--quitonerror', action='store_true', help="Exit on execution Error")
parser.add_argument('--execute', action='store_true',help="Execute the generated DDLs against database")
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
#Print the version of this program to stdout
def print_version():
print("Version: "+str(version))
#Establish connection to database and handle exception
def create_conn():
print("Connecting to Databse...")
try:
conn = psycopg2.connect(args.connection+" connect_timeout=5")
except psycopg2.Error as e:
print("Unable to connect to database :")
print(e)
sys.exit(1)
return conn
#close the connection
def close_conn(conn):
print("Closing the connection...")
conn.close()
############################## Class representing a Partitioned table ######################################
class PartTable:
'Class representing the a Paritioned table' #this is __doc__
def __init__(self,name):
self.name = name
#Query to identify the partitioning column and its type
sql= """SELECT c.oid,a.attname, t.typname
FROM pg_attribute a
JOIN pg_class c ON a.attrelid = c.oid
JOIN pg_namespace n ON c.relnamespace = n.oid
JOIN pg_type t ON a.atttypid = t.oid
WHERE attnum IN (SELECT unnest(partattrs) FROM pg_partitioned_table p WHERE a.attrelid = p.partrelid)""" + \
" AND n.nspname = split_part('" + str(args.table) + "', '.', 1)::name AND c.relname = split_part('" + str(args.table) + "', '.', 2)::name"
#print('########## find the partition key ######\n'+sql+'\n###########################')
cur = conn.cursor()
cur.execute(sql)
if cur.rowcount < 1 :
print("ERROR : No partitioned table with name :\"" + str(args.table) + "\"")
sys.exit()
#print('Verified that table : ' + self.name + ' is a partitioned table')
self.attr = cur.fetchone()
#attr[0] = oid of table, attr[1] = column name, attr[2] = column type
cur.close()
inInterval = args.interval
if inInterval == 'yearly':
self.interval = '1 year'
self.partFormat = 'YYYY'
elif inInterval == 'quarterly':
self.interval = '3 months'
self.partFormat = 'YYYY_MM'
elif inInterval == 'monthly':
self.interval = '1 month'
self.partFormat = 'YYYY_MM'
elif inInterval == 'weekly':
self.interval = '1 week'
self.partFormat = 'YYYY_MM_DD'
elif inInterval == 'daily':
self.interval = '1 day'
self.partFormat = 'YYYY_MM_DD'
elif inInterval == 'hourly':
self.interval = '1 hour'
self.partFormat = 'YYYY_MM_DD_HH24'
else:
self.interval = inInterval
def getFreePartCount(self): ## Get the number of empty partitions using the oid of the parent.
#sql = ("SELECT count(*) FROM pg_catalog.pg_class c, pg_catalog.pg_inherits i, pg_stat_user_tables s " +
#"WHERE c.oid=i.inhrelid AND i.inhparent = '" + str(self.attr[0]) + "' and c.oid = s.relid and s.n_live_tup = 0 ")
sql=" SELECT COUNT(*) FROM pg_catalog.pg_inherits i JOIN pg_stat_user_tables s ON i.inhrelid = s.relid \
WHERE i.inhparent = '" + str(self.attr[0]) + "' AND s.n_live_tup = 0"
#print('########## No. of empty partitions ######\n'+sql+'\n###########################')
cur = conn.cursor()
cur.execute(sql)
parts = cur.fetchone()
cur.close()
return parts[0]
def prepareNewPartitions(self,newPartCount): ##Prepare DDLs for 'newPartCount' number of new partitions for the table
print('Preparing '+ str(newPartCount) + ' more new partition(s)')
if self.interval.isdigit():
sql = ("SELECT 'CREATE TABLE " + str(args.table) + "_p'|| max + " + self.interval + "*b ||' PARTITION OF " + str(args.table) +
" FOR VALUES FROM ('''||max + "+ self.interval +" * b ||''') TO ('''||max + " + self.interval + " *(b+1)||''')' AS ddl FROM " +
"(SELECT max(left(substring(pg_catalog.pg_get_expr(c.relpartbound, c.oid),position('TO (' IN pg_catalog.pg_get_expr(c.relpartbound, c.oid))+4),-1)::bigint) " +
"FROM pg_catalog.pg_class c join pg_catalog.pg_inherits i on c.oid=i.inhrelid "+
"WHERE i.inhparent = " + str(self.attr[0]) +" AND pg_catalog.pg_get_expr(c.relpartbound, c.oid) != 'DEFAULT') a CROSS JOIN generate_series(0," + str(newPartCount-1) +",1) b")
else:
#Addressed 1 and 2 objectives from TODO items
sql = ("SELECT 'CREATE TABLE " + str(args.table) + "_p'||to_char(max + (interval '" + self.interval + "'*b),'"+ self.partFormat +"')||' PARTITION OF " + str(args.table) +
" FOR VALUES FROM ('''||max + (interval '" + self.interval + "'*b)||''') TO ('''||max + (interval '" + self.interval + "'*(b+1))||''')' AS ddl FROM " +
"(SELECT max(left(substring(pg_catalog.pg_get_expr(c.relpartbound, c.oid),position('TO (' IN pg_catalog.pg_get_expr(c.relpartbound, c.oid))+5),-2)::timestamp) " +
"FROM pg_catalog.pg_class c join pg_catalog.pg_inherits i on c.oid=i.inhrelid " +
"WHERE i.inhparent = " + str(self.attr[0]) +" AND pg_catalog.pg_get_expr(c.relpartbound, c.oid) != 'DEFAULT') a CROSS JOIN generate_series(0," + str(newPartCount-1) +",1) b")
print('########## prepare DDLs ######\n'+sql+'\n###########################')
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute(sql)
if cur.rowcount < 1 :
print("ERROR : Atleast one partiton should be existing which marks the begining of Partitions for table : \"" + str(args.table) + "\"")
sys.exit()
self.dicDDLs = cur.fetchall()
cur.close()
def getNewPartDDLs(self): ##Get the Dictionary object which contains all the new partition definisions.
if len(self.dicDDLs) < 0:
print("No DDLs for New Partitions")
sys.exit()
return self.dicDDLs
############################# End of PartTable Class #################################################################
#Generic function : print DDLs to terminal (stdout)
def printDDLs(dicDDLs):
for o in dicDDLs:
print(o['ddl']+';')
#Generic functoin : print DDLs to a file
def writeDDLfile(dicDDLs,ddlfile):
fd = open(ddlfile, 'w')
fd.truncate()
for o in dicDDLs:
fd.write(o['ddl']+";\n")
fd.close()
#Generic function : Execute DDLs against database
def executeDDLs(dicDDLs):
if args.errorlog:
fd = open(args.errorlog,'w')
old_isolation_level = conn.isolation_level
conn.set_isolation_level(0)
for o in dicDDLs:
strDDL = o['ddl']
try:
cur = conn.cursor()
print("Executing :" + strDDL)
cur.execute(strDDL)
conn.commit()
cur.close()
except psycopg2.Error as e:
print("Statement Execution Error :")
print(e)
if args.errorlog:
fd.write(strDDL + str(e))
if args.quitonerror :
sys.exit(1)
conn.set_isolation_level(old_isolation_level)
if args.errorlog:
fd.close()
#main() function of the program
if __name__ == "__main__":
print_version()
conn = create_conn()
tab1 = PartTable(args.table)
freeParts = tab1.getFreePartCount()
print('Current Number of Free Partitions in the table :'+ str(freeParts) )
if freeParts >= int(args.premake) :
print("NOTICE : Already there are sufficient empty partitions")
sys.exit(1)
tab1.prepareNewPartitions(int(args.premake)-freeParts)
#Prepare a dictionry of all the DDLs required for adding partitoins
#dicDDLs = preparePartitions()
dicDDLs = tab1.getNewPartDDLs()
#append special string to DDLs
if args.append:
for o in dicDDLs:
o['ddl'] = o['ddl'] + ' ' + args.append
#if user specified the --displayddl option
if args.displayddl:
printDDLs(dicDDLs)
if args.ddlfile:
writeDDLfile(dicDDLs,args.ddlfile)
#if user specified the --execute option
if args.execute:
print("Auto execute is Enabled")
executeDDLs(dicDDLs)
else:
print("Auto execute is disabled")
close_conn(conn)
|
from django.conf.urls import url
from inicio import views
urlpatterns = [
url(r'^$', views.ViewHome.as_view(), name='home'),
url(r'^about/$', views.ViewAbout.as_view(), name='about'),
url(r'^galery/$', views.ViewGalery.as_view(), name='gallery'),
url(r'^info/$', views.ViewInfo.as_view(), name='info'),
url(r'^contact/$', views.ViewContact.as_view(), name='contact')
]
|
# %timeit magic
# import random
# %timeit rolls_list = [random.randrange(1, 7)for i in range(0, 6_000_000)]
|
from display import handleDrawing
def bucketSort(array, *args):
bucket = []
for i in range(len(array)):
bucket.append([])
n = len(bucket)
for j in array:
index_b = int(j/n)
bucket[index_b].append(j)
handleDrawing(array, j, -1, index_b, -1)
for i in range(len(array)):
bucket[i] = sorted(bucket[i])
k = 0
for i in range(len(array)):
for j in range(len(bucket[i])):
handleDrawing(array, k, -1, i, -1)
array[k] = bucket[i][j]
k += 1
|
import matplotlib.pyplot as plt
print(plt.style.available)
values = range(1,6)
squares = [x**2 for x in range(1,6)]
plt.style.use('seaborn-dark-palette')
fig, ax = plt.subplots()
ax.plot(values,squares,linewidth=3)
ax.set_title("Kwadraty",fontsize=20)
ax.set_xlabel('Wartosć',fontsize=14)
ax.set_ylabel('Wartość do kwadratu',fontsize=14)
ax.tick_params(axis='both',labelsize=14)
plt.show()
|
from __future__ import unicode_literals
from django.apps import AppConfig
class UploadtocloudConfig(AppConfig):
name = 'uploadtocloud'
|
from room import Room
from player import Player
from item import Item
import os
# Declare all the rooms
room = {
'outside': Room("Outside Cave Entrance",
"North of you, the cave mount beckons"),
'foyer': Room("Foyer", """Dim light filters in from the south. Dusty
passages run north and east."""),
'overlook': Room("Grand Overlook", """A steep cliff appears before you, falling
into the darkness. Ahead to the north, a light flickers in
the distance, but there is no way across the chasm.""",
Item("axe", """Used to chop wood. Mostly...""")),
'narrow': Room("Narrow Passage", """The narrow passage bends here from west
to north. The smell of gold permeates the air."""),
'treasure': Room("Treasure Chamber", """You've found the long-lost treasure
chamber! Sadly, it has already been completely emptied by
earlier adventurers. The only exit is to the south.""", Item("coin", """You find
a coin that has been left behind.""")),
}
# Link rooms together
room['outside'].n_to = room['foyer']
room['foyer'].s_to = room['outside']
room['foyer'].n_to = room['overlook']
room['foyer'].e_to = room['narrow']
room['overlook'].s_to = room['foyer']
room['narrow'].w_to = room['foyer']
room['narrow'].n_to = room['treasure']
room['treasure'].s_to = room['narrow']
#
# Main
#
# Make a new player object that is currently in the 'outside' room.
player = Player(input("What is your name? "), room['outside'])
os.system("clear")
# Write a loop that:
#
# * Prints the current room name
# * Prints the current description (the textwrap module might be useful here).
# * Waits for user input and decides what to do.
#
# If the user enters a cardinal direction, attempt to move to the room there.
# Print an error message if the movement isn't allowed.
#
# If the user enters "q", quit the game.
print("Welcome to the adventure game. ")
print("You must navigate to where you would like to go.\n")
print(player)
print(player.current_room.description + "\n")
print("Command Table\n" + "=" * 30)
print("Travel: (n) North (e) East (s) South (w) West")
print("""Player: (i) Inventory (l) Locate items (get) Items
(drop) Items (q) quit""")
print("\nWhat would you like to do?")
while True:
player_input = input("> ")
# Sets the available directions for the user
acceptable_travel_directions = ['n', 'e', 's', 'w']
acceptable_player_actions = ['i', 'inventory']
current_room = player.current_room
print()
print("=" * 20 + "\n")
if player_input.lower() in acceptable_travel_directions:
player.move(player_input)
elif player_input.lower() in acceptable_player_actions:
player.items_in_inventory()
elif player_input.lower() == "l":
player.current_room.items_in_room()
elif "get" in player_input.lower():
player_words = player_input.split()
if len(player_words) > 1:
player_item_selection = player_words[1]
found_item = current_room.does_item_exist(player_item_selection)
print(found_item)
if found_item:
player.add_item(found_item)
current_room.remove_item(found_item)
else:
print("What are you trying to get?")
elif "drop" in player_input.lower():
print("Dropping...")
player_words = player_input.split()
if len(player_words) > 1:
player_item_selection = player_words[1]
player_has_item = player.is_item_in_inventory(
player_item_selection)
if (player_has_item):
current_room.add_item(player_has_item)
player.drop_item(player_has_item)
else:
print("What are you trying to drop?")
print(player_item_selection)
elif player_input == 'q':
os.system("clear")
print("Thank you for playing!\n")
break
else:
print("Invalid operation. Please select again.\n")
|
# -*- coding: utf-8 -*-
from Candy import *
from Level import *
from Map import *
from Score import *
from Snake import *
class GameWorld(object):
"""
GameWorld
The game world
"""
def __init__(self, sense):
"""
Initialize the game world
:param sense The sense hat
"""
self.level = Level()
self.score = Score()
self.mappy = Map(0, 7)
self.snake = Snake(3, self.mappy)
self.candy = Candy(self.snake)
self.sense = sense
self.i = 0
print(self.snake)
self.sense.clear()
self.sense.show_message("Level: " + str(self.level.level),
text_colour=[180, 180, 180])
def update(self, delta):
"""
Compute all the objects
:param delta The loop delta
:return The dead state of the snake
:return The score
"""
deadStatus = False
# UPS: 60 -> 2 * 60 = 120
if (self.i >= (1.0 / self.snake.speed) * 120):
deadStatus = self.snake.update(delta)
self.__isEating__()
print(self.snake)
self.i = 0
else:
self.i += 1
return (not deadStatus, self.score.score)
def __isEating__(self):
"""
Detect when the Snake eat a candy
"""
head = self.snake.positions[0]
if (head.x == self.candy.x and head.y == self.candy.y):
self.score.increaseScore(self.snake, self.level)
speed = int(self.snake.speed * 10)
if (speed % 10 == 0):
self.level.level += 1
self.sense.clear()
self.sense.show_message("Level: " + str(self.level.level),
text_colour=[180, 180, 180])
self.sense.clear()
self.sense.show_message("Score: " + str(self.score.score),
text_colour=[180, 180, 180])
self.sense.clear()
self.candy.randomize()
|
import sys
import Pyro4
import os
import hashlib
#list_workers= ['PYRO:obj_407b5d663ba94cdc974651d5433b6b35@10.151.254.104:50099','PYRO:obj_407b5d663ba94cdc974651d5433b6b35@10.151.254.104:50099','PYRO:obj_407b5d663ba94cdc974651d5433b6b35@10.151.254.104:50099','PYRO:obj_407b5d663ba94cdc974651d5433b6b35@10.151.254.104:50099','PYRO:obj_407b5d663ba94cdc974651d5433b6b35@10.151.254.104:50099']
list_workers = ['PYRO:obj_62764364dfd747f785df1ae8fcb88a9d@10.151.36.51:37204','PYRO:obj_19827bdb5bf349dd827246b6bdb0e02d@10.151.253.54:59078','PYRO:obj_6f02b7f6e16c4935a1c9416bf1240da0@10.151.253.54:59070','PYRO:obj_876decb6510b4bf4acdff621ab67d8ea@10.151.253.151:52778']
# list_workers = ['PYRO:worker@127.0.0.1:9000','PYRO:worker@127.0.0.1:9001']
workers = []
@Pyro4.expose
@Pyro4.callback
class Middleware(object):
def __init__(self):
self.commands = ['ls','cd','rm','mv','touch','exit','cp', 'upload']
return
def getCommands(self):
return self.commands
def upload(self, file, data):
numberServer1,numberServer2 = self.chooseWorker(file)
worker = workers[numberServer1]
worker2 = workers[numberServer2]
cwd = '/'
worker.createFile(cwd, file, data)
p = '>> Upload ' + file + ' berhasil! file disimpan di server ' + repr(numberServer1 + 1)
print (p)
worker2.createFile(cwd, file, data)
p = '>> Upload ' + file + ' berhasil! file disimpan di server backup : ' + repr(numberServer2+1)
print (p)
def chooseWorker(self, file):
self.h1= hashlib.md5(file).hexdigest()[-1]
if self.h1 == '0' or self.h1 == '1' or self.h1 == '2' or self.h1 == '3' :
return 0,3
elif self.h1 == '4' or self.h1 == '5' or self.h1 == '6' or self.h1 == '7' :
return 1,2
elif self.h1 == '8' or self.h1 == '9' or self.h1 == 'a' or self.h1 == 'b' :
return 2,1
elif self.h1 == 'c' or self.h1 == 'd' or self.h1 == 'e' or self.h1 == 'f' :
return 3,0
# if self.h2 == '0' or self.h2 == '1' or self.h2 == '2' or self.h2 == '3':
# return 0
# elif self.h2 == '4' or self.h2 == '5' or self.h2 == '6' or self.h2 == '7':
# return 1
# elif self.h2 == '8' or self.h2 == '9' or self.h2 == 'a' or self.h2 == 'b':
# return 2
# elif self.h2 == 'c' or self.h2 == 'd' or self.h2 == 'e' or self.h2 == 'f':
# return 3
#return self.number%5
def generateStructureFolder(self, cwd, args, path_req=''):
if(len(args)==1):
return cwd
else:
if path_req[0] == '/':
return path_req
elif '../' in path_req:
temp_args = path_req.split('../')
empty_n = temp_args.count('')
temp_cwds = cwd.split('/')
if(len(temp_args)==empty_n):
counter = empty_n-1
if(empty_n>len(temp_cwds)):
cwd = '/'
return cwd
for i in range(len(temp_cwds)-1, 0, -1):
temp_cwds[i] = temp_args[counter]
counter-=1
if(counter==0):
cwd_fix = []
for temp_cwd in temp_cwds:
if len(temp_cwd)>0:
cwd_fix.append(temp_cwd)
cwd_fix = '/'.join(cwd_fix)
if(cwd_fix=='/'):
cwd_fix == '/'
else:
cwd_fix = '/'+cwd_fix
break
return cwd_fix
else:
temp_cwds.reverse()
counter=1;
cwd_fix = '/'
flag_break = 0;
for i in range(0, len(temp_cwds)-1):
temp_cwds[i] = temp_args[counter]
counter+=1
if(len(temp_args)==counter):
cwd_fix = []
temp_cwds.reverse()
for temp_cwd in temp_cwds:
if len(temp_cwd)>0:
cwd_fix.append(temp_cwd)
cwd_fix = '/'.join(cwd_fix)
if(cwd_fix=='/'):
cwd_fix == '/'
else:
cwd_fix = '/'+cwd_fix
break
return cwd_fix
else:
if cwd == '/':
return (cwd+path_req)
else:
return (cwd+'/'+path_req)
def removeData(self, cwd, path=None):
errors = []
flag_exist = 0
for worker in workers:
error, results = worker.removeData(cwd, path)
if(error is not None):
errors.append(error)
if(len(workers)==len(errors)):
return 'Tidak ada data', ''
return None, 'Sudah dihapus'
def touch(self, cwd, path=None):
errors = []
flag_exist = 0
paths = path.split('/')
if(len(paths)==2):
size = -1000;
worker_selected = ''
for worker in workers:
temp, temp_path = worker.checkData(path)
if(temp):
errors.append(temp)
for worker in workers:
temp = worker.getSize()
print(temp)
if(size < temp):
size = temp
worker_selected = worker
error, results = worker_selected.touch(cwd, path)
if(error):
return error, None
return None, results
else:
for worker in workers:
error, results = worker.touch(cwd, path)
if(error is not None):
errors.append(error)
if(len(workers)==len(errors)):
return error, ''
return None, 'File Sudah Dibuat'
def copy(self, cwd, path_from, path_to):
errors = []
worker_from = ''
method_copy = 0
lists = []
flag_exist = 0
for worker in workers:
error, method, data = worker.listSource(cwd, path_from)
print('%s %s %s', error, method, data)
if(error is not None):
errors.append(error)
else:
worker_from = worker
lists = data
method_copy = method
if(len(workers)==len(errors)):
return 'Folder atau file '+path_from+' tidak ada', None
if(method_copy==1):
data = worker_from.readFile(cwd, path_from)
errors = []
paths_from = path_from.split('/')
paths_to = path_to.split('/')
if(len(paths_to)==2):
print('root')
size = -1000;
worker_selected = ''
for worker in workers:
temp, temp_path = worker.checkData(path_to)
if(temp):
errors.append(temp)
if(len(errors) > 0):
return 'Tidak bisa membuat file, file sudah ada', None
for worker in workers:
temp = worker.getSize()
print(temp)
if(size < temp):
size = temp
worker_selected = worker
error, results = worker_selected.makeFile(cwd, path_to, data)
if(error):
return error, None
return None, results
else:
for worker in workers:
error, results = worker.makeFile(cwd, path_to, data)
print('%s %s', error, results)
if(error is not None):
errors.append(error)
if(len(workers)==len(errors)):
print('gagal')
return error, ''
print('sukses')
return None, 'File Sudah Dicopy'
else:
paths_from = path_from.split('/')
paths_to = path_to.split('/')
errors = []
if(len(paths_to)==2):
size = -1000;
worker_selected = ''
for worker in workers:
temp, temp_path = worker.checkData(path_to)
print(temp)
if(temp):
errors.append(temp)
print(errors)
if(len(errors) > 0):
return 'Tidak bisa membuat folder, folder sudah ada', None
print('lolos')
for worker in workers:
temp = worker.getSize()
print(temp)
if(size < temp):
size = temp
worker_selected = worker
error, result = worker_selected.makeFolder(cwd, path_to)
if(error):
return error, None
for file in lists:
if(file['type']==1):
print('ini file')
data = worker_from.readFile(cwd, path_from+file['name'])
error, results = worker_selected.makeFile(cwd, path_to+file['name'], data)
elif(file['type']==2):
print('ini folder')
error, result = worker_selected.makeFolder(cwd, path_to+file['name'])
if(error):
return error, None
return None, 'Berhasil copy'
else:
path_to_s = path_to.replace('/'+paths_to[len(paths_to)-1],'')
print(path_to_s)
worker_selected = ''
errors = []
for worker in workers:
temp, temp_path = worker.checkData(path_to_s)
if(temp==0):
errors.append(temp)
else:
worker_selected = worker
if(len(errors) == len(workers)):
return 'Tidak bisa membuat folder, folder tidak tersedia', None
error, result = worker_selected.makeFolder(cwd, path_to)
if(error):
return error, None
for file in lists:
if(file['type']==1):
print('ini file')
data = worker_from.readFile(cwd, path_from+file['name'])
error, results = worker_selected.makeFile(cwd, path_to+file['name'], data)
elif(file['type']==2):
print('ini folder')
error, result = worker_selected.makeFolder(cwd, path_to+file['name'])
if(error):
return error, None
return None, 'Berhasil copy'
def mv(self, cwd, path_from, path_to):
errors = []
worker_from = ''
method_copy = 0
lists = []
flag_exist = 0
for worker in workers:
error, method, data = worker.listSource(cwd, path_from)
print('%s %s %s', error, method, data)
if(error is not None):
errors.append(error)
else:
worker_from = worker
lists = data
method_copy = method
if(len(workers)==len(errors)):
return 'Folder atau file '+path_from+' tidak ada', None
if(method_copy==1):
data = worker_from.readFile(cwd, path_from)
errors = []
paths_from = path_from.split('/')
paths_to = path_to.split('/')
print('bisa')
if(len(paths_to)==2):
print('root')
size = -1000;
worker_selected = ''
for worker in workers:
temp, temp_path = worker.checkData(path_to)
if(temp):
errors.append(temp)
if(len(errors) > 0):
return 'Tidak bisa membuat file, file sudah ada', None
for worker in workers:
temp = worker.getSize()
print(temp)
if(size < temp):
size = temp
worker_selected = worker
error, results = worker_selected.makeFile(cwd, path_to, data)
if(error):
return error, None
error, results = self.removeData(cwd, path_from)
if(error):
return 'Tidak bisa memindah file', None
return None, 'Berhasil memindah file'
else:
for worker in workers:
error, results = worker.makeFile(cwd, path_to, data)
print('%s %s', error, results)
if(error is not None):
errors.append(error)
else:
error, results = self.removeData(cwd, path_from)
if(len(workers)==len(errors)):
print('gagal')
return error, ''
print('sukses')
return None, 'File Sudah Dipindah'
else:
paths_from = path_from.split('/')
paths_to = path_to.split('/')
errors = []
if(len(paths_to)==2):
size = -1000;
worker_selected = ''
for worker in workers:
temp, temp_path = worker.checkData(path_to)
print(temp)
if(temp):
errors.append(temp)
print(errors)
if(len(errors) > 0):
return 'Tidak bisa membuat folder, folder sudah ada', None
print('lolos')
for worker in workers:
temp = worker.getSize()
print(temp)
if(size < temp):
size = temp
worker_selected = worker
error, result = worker_selected.makeFolder(cwd, path_to)
if(error):
return error, None
for file in lists:
if(file['type']==1):
print('ini file')
data = worker_from.readFile(cwd, path_from+file['name'])
error, results = worker_selected.makeFile(cwd, path_to+file['name'], data)
elif(file['type']==2):
print('ini folder')
error, result = worker_selected.makeFolder(cwd, path_to+file['name'])
if(error):
return error, None
error, results = self.removeData(cwd, path_from)
if(error):
return 'Tidak bisa memindah file', None
return None, 'Berhasil copy'
else:
path_to_s = path_to.replace('/'+paths_to[len(paths_to)-1],'')
print(path_to_s)
worker_selected = ''
errors = []
for worker in workers:
temp, temp_path = worker.checkData(path_to_s)
if(temp==0):
errors.append(temp)
else:
worker_selected = worker
if(len(errors) == len(workers)):
return 'Tidak bisa membuat folder, folder tidak tersedia', None
error, result = worker_selected.makeFolder(cwd, path_to)
if(error):
return error, None
for file in lists:
if(file['type']==1):
print('ini file')
data = worker_from.readFile(cwd, path_from+file['name'])
error, results = worker_selected.makeFile(cwd, path_to+file['name'], data)
elif(file['type']==2):
print('ini folder')
error, result = worker_selected.makeFolder(cwd, path_to+file['name'])
if(error):
return error, None
error, results = self.removeData(cwd, path_from)
if(error):
return 'Tidak bisa memindah folder', None
return None, 'Berhasil dipindah'
def listingFolder(self, cwd, path=None):
list_folders = []
errors = []
flag_exist = 0
for worker in workers:
error, list_folder = worker.listingFolder(cwd, path)
list_folders = list_folders+list_folder
if(error is not None):
errors.append(error)
if(len(workers)==len(errors)):
return 'Tidak ada folder', []
return None, list_folders
def checkDir(self, cwd):
flag_exist = 0
for worker in workers:
res = worker.isExistFolder(cwd)
if(res):
flag_exist = 1;
break
if(flag_exist):
return True
else:
return False
def args(self,args,cwd):
if args[0] == 'upload':
workers[0].createFile(cwd, file, data)
if args[0] == 'ls':
if(len(args)==1):
path = self.generateStructureFolder(cwd, args)
else:
path = self.generateStructureFolder(cwd, args, args[1])
if(len(args)==1):
error, result = self.listingFolder(cwd,path)
return error, result, cwd
else:
error, result = self.listingFolder(cwd,path)
return error, result, cwd
elif args[0] == 'cd':
if(len(args)==1):
path = self.generateStructureFolder(cwd, args)
else:
path = self.generateStructureFolder(cwd, args, args[1])
if(self.checkDir(path)):
return None, cwd, path
else:
return 'Folder tidak ada', cwd, cwd
elif args[0] == 'rm':
if(len(args)==1):
return args[0]+': missing operand',None,cwd
else:
path = self.generateStructureFolder(cwd, args, args[1])
error, result = self.removeData(cwd, path)
return error, result, cwd
elif args[0] == 'touch':
if(len(args)==1):
return args[0]+': missing operand',None,cwd
else:
path = self.generateStructureFolder(cwd, args, args[1])
error, result = self.touch(cwd, path)
return error, result, cwd
elif args[0] == 'cp':
if(len(args)==1):
return args[0]+': missing operand',None,cwd
elif(len(args)==2):
return args[0]+': missing destination file operand after '+args[1],None,cwd
else:
path_from = self.generateStructureFolder(cwd, args, args[1])
path_to = self.generateStructureFolder(cwd, args, args[2])
error, result = self.copy(cwd, path_from, path_to)
return error, result, cwd
elif args[0] == 'mv':
print('bisa')
if(len(args)==1):
return args[0]+': missing operand',None,cwd
elif(len(args)==2):
return args[0]+': missing destination file operand after '+args[1],None,cwd
else:
path_from = self.generateStructureFolder(cwd, args, args[1])
path_to = self.generateStructureFolder(cwd, args, args[2])
error, result = self.mv(cwd, path_from, path_to)
return error, result, cwd
else:
return None, 'Perintah tidak ada', cwd
def listenToWorker():
for list_worker in list_workers:
worker = Pyro4.Proxy(list_worker)
workers.append(worker)
def main():
listenToWorker()
Pyro4.Daemon.serveSimple(
{
Middleware: "middleware"
},
ns=False, host="127.0.0.1", port=8001)
if __name__ == "__main__":
main()
|
import abc
from typing import List, Union
from werkzeug.exceptions import HTTPException
from domain.evenements.entities.tag_entity import TagEntity
TagsList = List[TagEntity]
class AlreadyExistingTagUuid(HTTPException):
code = 409
description = "Tag already exists"
class NotFoundTag(HTTPException):
code = 404
description = "Tag not found"
class AbstractTagRepository(abc.ABC):
def add(self, tag: TagEntity) -> None:
if self._match_uuid(tag.uuid):
raise AlreadyExistingTagUuid()
self._add(tag)
# TODO : test if title already exists
def get_by_uuid(self, uuid: str) -> TagEntity:
matches = self._match_uuid(uuid)
if not matches:
raise NotFoundTag
return matches
def get_by_uuid_list(self, uuids: List[str]) -> List[TagEntity]:
matches = self._match_uuids(uuids)
if not matches:
raise NotFoundTag
return matches
@abc.abstractmethod
def get_all(self) -> TagsList:
raise NotImplementedError
@abc.abstractmethod
def _add(self, tag: TagEntity) -> None:
raise NotImplementedError
@abc.abstractmethod
def _match_uuid(self, uuid: str) -> Union[TagEntity, None]:
raise NotImplementedError
@abc.abstractmethod
def _match_uuids(self, uuids: List[str]) -> List[TagEntity]:
raise NotImplementedError
class InMemoryTagRepository(AbstractTagRepository):
_tags: TagsList = []
def get_all(self) -> TagsList:
return self._tags
def _match_uuid(self, uuid: str) -> Union[TagEntity, None]:
matches = [tag for tag in self._tags if tag.uuid == uuid]
if not matches:
return None
return matches[0]
def _add(self, tag: TagEntity) -> None:
self._tags.append(tag)
def _match_uuids(self, uuids: List[str]) -> List[TagEntity]:
matches = [tag for tag in self._tags if tag.uuid in uuids]
return matches
# next methods are only for test purposes
@property
def tags(self) -> TagsList:
return self._tags
def set_tags(self, tags: TagsList) -> None:
self._tags = tags
|
'''
If we take 47, reverse and add, 47 + 74 = 121, which is palindromic.
Not all numbers produce palindromes so quickly. For example,
349 + 943 = 1292,
1292 + 2921 = 4213
4213 + 3124 = 7337
That is, 349 took three iterations to arrive at a palindrome.
Although no one has proved it yet, it is thought that some numbers, like 196, never produce a palindrome. A number that never forms a palindrome through the reverse and add process is called a Lychrel number. Due to the theoretical nature of these numbers, and for the purpose of this problem, we shall assume that a number is Lychrel until proven otherwise. In addition you are given that for every number below ten-thousand, it will either (i) become a palindrome in less than fifty iterations, or, (ii) no one, with all the computing power that exists, has managed so far to map it to a palindrome. In fact, 10677 is the first number to be shown to require over fifty iterations before producing a palindrome: 4668731596684224866951378664 (53 iterations, 28-digits).
Surprisingly, there are palindromic numbers that are themselves Lychrel numbers; the first example is 4994.
How many Lychrel numbers are there below ten-thousand?
NOTE: Wording was modified slightly on 24 April 2007 to emphasise the theoretical nature of Lychrel numbers.
'''
import numpy as np
col1 = np.arange(0,10000, dtype='int16')[:, np.newaxis]
col2 = np.zeros((10000,3))
array = np.concatenate((col1, col2), axis=1)
for i in range(len(array)):
array[i,1] = str(int(array[i,0]))[::-1]
array[i,2] = array[i,0] + int(array[i,1])
array[i,3] = ((float(str(array[i,2]))) == float(str(int(array[i,2]))[::-1]))
for j in range(50):
for i in range(len(array)):
if array[i,3] == 0:
array[i,2] = array[i,2] + int(str(int(array[i,2]))[::-1])
array[i,3] = ((float(str(array[i,2]))) == float(str(int(array[i,2]))[::-1]))
result = np.count_nonzero(array[:,3]==0)
print(result)
|
from tkinter import *
root = Tk()
root.title("DummyFit")
root.geometry('340x720')
root.resizable(0, 0)
lbl = Label(root, text="DummyFit", font=("Arial Bold", 50))
lbl.grid(column=0, row=0)
head1 =Label(root, text="Input ---", font=("Arial Bold", 10))
head1.grid(column=0, row=1)
txt1 = Entry(root,width=10,)
txt1.grid(column=0, row=2)
head2 =Label(root, text="Input ---", font=("Arial Bold", 10))
head2.grid(column=0, row=4)
txt2 = Entry(root,width=10,)
txt2.grid(column=0, row=5)
head3 =Label(root, text="Input ---", font=("Arial Bold", 10))
head3.grid(column=0, row=6)
txt3 = Entry(root,width=10,)
txt3.grid(column=0, row=7)
head4 =Label(root, text="Input ---", font=("Arial Bold", 10))
head4.grid(column=0, row=8)
txt4 = Entry(root,width=10,)
txt4.grid(column=0, row=9)
def clicked():
rlt.configure(text="#Output from BMI Calc internal")
btn = Button(root, text="Enter", command=clicked)
btn.grid(column=0, row=11)
rlt = Label(root, text="Results", font=("Arial Bold", 10))
rlt.grid(column=0, row=12)
root.mainloop()
|
from qiniu import QiniuMacAuth, http
# 密钥队初始化
access_key = 'your_AK'
secret_key = 'your_SK'
q = QiniuMacAuth(access_key, secret_key)
url = 'http://ai.qiniuapi.com/v1/text/censor' # 请求url
data = {
"data": {
"text": "你我,ak47"
},
"params": {
"scenes": ["spam"]
}
}
ret, info = http._post_with_qiniu_mac(url, data, q)
if ret['code'] == 0:
print('检测成功\n结果是:', ret['result'])
else:
print('检测出错')
|
# -*- coding: utf-8 -*-
text="En cette journée mondiale de lutte contre le sida, rappelons qu'il est l'affaire de TS & TTES, un combat qui doit continuer ici et là-bas".decode("utf8")
pat1=re.compile(r'(http|https)://[^\s]*',re.IGNORECASE | re.DOTALL)
pat2=re.compile(r"[',;\.:/!?()\"#*%]",re.IGNORECASE | re.DOTALL)
pat3=re.compile(r" +",re.IGNORECASE | re.DOTALL)
textwords=pat1.sub('',text)
textwords=pat2.sub('',textwords)
textwords=pat3.sub(' ',textwords).split(" ")
tw=""
for w in textwords:
tw+=stemmer.stem(w)+" "
print tw
textwords=tw[:-1].split(" ")
for t in textwords:
print t
print ("tokenized and stemmed - done")
for j in range(len(textwords)):
print "Checking for similarities for " + textwords[j]
print "======================================================================================="
print
isscored=0
for kw in keywords:
kws=kw[1].split(' ')
l=len(kws)
if textwords[j]==kws[0] and j+l<len(textwords):
print "tweet may contain "+kw[1]
foundword=1
for k in range(l):
if textwords[j+k]<>kws[k]:
print "but "+textwords[j+k]+" is not like "+kws[k]
foundword=0
break
else:
print textwords[j+k]+" is indeed like "+kws[k]
if foundword==1:
iscored=1
print
print "one point in "+kw[0]
print
else:
print textwords[j] + " is not like "+kws[0]+". No need to check for "+kw[1]+"."
if isscored==0:
print "Pas de correspondance trouvée pour " + textwords[j]
print
else:
print "Correspondance trouvée pour " + textwords[j]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ABE 651 Assignment 2
Due 1/31 17:00
Exercise 6.5from ThinkPython2 (p.61)
This is a program that prompts the user for two values
and then computes the greatest common divisor (GCD)
@author: wagne216
"""
# Prompt user for inputs
value_1 = input('Please choose first numeric value ') # asks user for 1 no.
integer_1 = int(value_1) # converts first no. to integer
value_2 = input('Please choose a different second numeric value ') # asks for 2nd no.
integer_2 = int(value_2) # converts second no. to integer
# create system to solve for GCD
def gcd(a, b):
r = b % a # retunrs the remainder when a/b
if b % r == 0 : # if there is no remainder
# print result
return f'The greatest common divisor is {r}'
# solve for GCD (greatest common divisor using the gcd function)
print(gcd(integer_1, integer_2))
|
import cv2
import numpy as np
import glob
from generator import preprocess_labels
from sklearn.model_selection import train_test_split
def load_files(folder="Train", reduce=False):
images = []
masks = []
for file in glob.glob("./" + folder + "/CameraRGB/*.png"):
img = cv2.imread(file)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if reduce:
img = cv2.resize(img, (400, 320))
images.append(img)
for file in glob.glob("./" + folder + "/CameraSeg/*.png"):
img = cv2.imread(file)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = preprocess_labels(img)
if reduce:
img = cv2.resize(img, (400, 320))
masks.append(img)
return images, masks
def main():
# images, masks = load_files()
#
# x_train, x_test, y_train, y_test = train_test_split(images, masks, test_size=0.2)
# x_train = np.stack(x_train)
# x_test = np.stack(x_test)
# y_train = np.stack(y_train)
# y_test = np.stack(y_test)
#
# np.save("x_train", x_train)
# np.save("x_test", x_test)
# np.save("y_train", y_train)
# np.save("y_test", y_test)
# images, masks = load_files(folder="Train", reduce=True)
# x_train, x_test, y_train, y_test = train_test_split(images, masks, test_size=0.2)
# x_train = np.stack(x_train)
# y_train = np.stack(y_train)
# np.save("x_train_s", x_train)
# np.save("y_train_s", y_train)
images, masks = load_files(folder="Test", reduce=True)
x_test_s = np.stack(images)
y_test_s = np.stack(masks)
np.save("x_test_s", x_test_s)
np.save("y_test_s", y_test_s)
if __name__ == '__main__':
main()
|
"""
Model for approximate system dynamics
"""
import torch
import torch.nn as nn
from torchlib.utils.layers import linear_bn_relu_block
class ContinuousMLPDynamics(nn.Module):
def __init__(self, state_dim, action_dim, nn_size=64):
super(ContinuousMLPDynamics, self).__init__()
self.discrete = False
self.model = nn.Sequential(
*linear_bn_relu_block(state_dim + action_dim, nn_size, normalize=True),
*linear_bn_relu_block(nn_size, nn_size, normalize=True),
)
self.state_head = nn.Linear(nn_size, state_dim)
self.reward_head = nn.Linear(nn_size, 1)
def forward(self, states, actions):
state_action_input = torch.cat((states, actions), dim=-1)
feature = self.model(state_action_input)
next_states = self.state_head.forward(feature)
rewards = self.reward_head.forward(feature).squeeze(dim=-1)
return next_states, rewards
class DiscreteMLPDynamics(ContinuousMLPDynamics):
def __init__(self, state_dim, action_dim, nn_size=64):
embedding_dim = action_dim * 5
super(DiscreteMLPDynamics, self).__init__(state_dim, embedding_dim, nn_size)
self.embedding = nn.Sequential(
nn.Embedding(action_dim, embedding_dim),
nn.Dropout(0.1)
)
self.discrete = True
def forward(self, states, actions):
actions = self.embedding.forward(actions)
return super(DiscreteMLPDynamics, self).forward(states, actions)
|
#-*- coding:utf-8 -*-
"""
" ip2region python seacher client module
"
" Author: koma<komazhang@foxmail.com>
" Date : 2015-11-06
"""
import struct, io, socket, sys
class Ip2Region(object):
__INDEX_BLOCK_LENGTH = 12
__TOTAL_HEADER_LENGTH = 8192
__f = None
__headerSip = []
__headerPtr = []
__headerLen = 0
__indexSPtr = 0
__indexLPtr = 0
__indexCount = 0
__dbBinStr = ''
def __init__(self, dbfile):
self.initDatabase(dbfile)
def memorySearch(self, ip):
"""
" memory search method
" param: ip
"""
if not ip.isdigit(): ip = self.ip2long(ip)
if self.__dbBinStr == '':
self.__dbBinStr = self.__f.read() #read all the contents in file
self.__indexSPtr = self.getLong(self.__dbBinStr, 0)
self.__indexLPtr = self.getLong(self.__dbBinStr, 4)
self.__indexCount = int((self.__indexLPtr - self.__indexSPtr)/self.__INDEX_BLOCK_LENGTH)+1
l, h, dataPtr = (0, self.__indexCount, 0)
while l <= h:
m = int((l+h) >> 1)
p = self.__indexSPtr + m*self.__INDEX_BLOCK_LENGTH
sip = self.getLong(self.__dbBinStr, p)
if ip < sip:
h = m -1
else:
eip = self.getLong(self.__dbBinStr, p+4)
if ip > eip:
l = m + 1;
else:
dataPtr = self.getLong(self.__dbBinStr, p+8)
break
if dataPtr == 0: raise Exception("Data pointer not found")
return self.returnData(dataPtr)
def binarySearch(self, ip):
"""
" binary search method
" param: ip
"""
if not ip.isdigit(): ip = self.ip2long(ip)
if self.__indexCount == 0:
self.__f.seek(0)
superBlock = self.__f.read(8)
self.__indexSPtr = self.getLong(superBlock, 0)
self.__indexLPtr = self.getLong(superBlock, 4)
self.__indexCount = int((self.__indexLPtr - self.__indexSPtr) / self.__INDEX_BLOCK_LENGTH) + 1
l, h, dataPtr = (0, self.__indexCount, 0)
while l <= h:
m = int((l+h) >> 1)
p = m*self.__INDEX_BLOCK_LENGTH
self.__f.seek(self.__indexSPtr+p)
buffer = self.__f.read(self.__INDEX_BLOCK_LENGTH)
sip = self.getLong(buffer, 0)
if ip < sip:
h = m - 1
else:
eip = self.getLong(buffer, 4)
if ip > eip:
l = m + 1
else:
dataPtr = self.getLong(buffer, 8)
break
if dataPtr == 0: raise Exception("Data pointer not found")
return self.returnData(dataPtr)
def btreeSearch(self, ip):
"""
" b-tree search method
" param: ip
"""
if not ip.isdigit(): ip = self.ip2long(ip)
if len(self.__headerSip) < 1:
headerLen = 0
#pass the super block
self.__f.seek(8)
#read the header block
b = self.__f.read(self.__TOTAL_HEADER_LENGTH)
#parse the header block
for i in range(0, len(b), 8):
sip = self.getLong(b, i)
ptr = self.getLong(b, i+4)
if ptr == 0:
break
self.__headerSip.append(sip)
self.__headerPtr.append(ptr)
headerLen += 1
self.__headerLen = headerLen
l, h, sptr, eptr = (0, self.__headerLen, 0, 0)
while l <= h:
m = int((l+h) >> 1)
if ip == self.__headerSip[m]:
if m > 0:
sptr = self.__headerPtr[m-1]
eptr = self.__headerPtr[m]
else:
sptr = self.__headerPtr[m]
eptr = self.__headerPtr[m+1]
break
if ip < self.__headerSip[m]:
if m == 0:
sptr = self.__headerPtr[m]
eptr = self.__headerPtr[m+1]
break
elif ip > self.__headerSip[m-1]:
sptr = self.__headerPtr[m-1]
eptr = self.__headerPtr[m]
break
h = m - 1
else:
if m == self.__headerLen - 1:
sptr = self.__headerPtr[m-1]
eptr = self.__headerPtr[m]
break
elif ip <= self.__headerSip[m+1]:
sptr = self.__headerPtr[m]
eptr = self.__headerPtr[m+1]
break
l = m + 1
if sptr == 0: raise Exception("Index pointer not found")
indexLen = eptr - sptr
self.__f.seek(sptr)
index = self.__f.read(indexLen + self.__INDEX_BLOCK_LENGTH)
l, h, dataPrt = (0, int(indexLen/self.__INDEX_BLOCK_LENGTH), 0)
while l <= h:
m = int((l+h) >> 1)
offset = int(m * self.__INDEX_BLOCK_LENGTH)
sip = self.getLong(index, offset)
if ip < sip:
h = m - 1
else:
eip = self.getLong(index, offset+4)
if ip > eip:
l = m + 1;
else:
dataPrt = self.getLong(index, offset+8)
break
if dataPrt == 0: raise Exception("Data pointer not found")
return self.returnData(dataPrt)
def initDatabase(self, dbfile):
"""
" initialize the database for search
" param: dbFile
"""
try:
self.__f = io.open(dbfile, "rb")
except IOError as e:
print("[Error]: %s" % e)
sys.exit()
def returnData(self, dataPtr):
"""
" get ip data from db file by data start ptr
" param: dsptr
"""
dataLen = (dataPtr >> 24) & 0xFF
dataPtr = dataPtr & 0x00FFFFFF
self.__f.seek(dataPtr)
data = self.__f.read(dataLen)
return {
"city_id": self.getLong(data, 0),
"region" : data[4:]
}
def ip2long(self, ip):
_ip = socket.inet_aton(ip)
return struct.unpack("!L", _ip)[0]
def isip(self, ip):
p = ip.split(".")
if len(p) != 4 : return False
for pp in p:
if not pp.isdigit() : return False
if len(pp) > 3 : return False
if int(pp) > 255 : return False
return True
def getLong(self, b, offset):
if len(b[offset:offset+4]) == 4:
return struct.unpack('I', b[offset:offset+4])[0]
return 0
def close(self):
if self.__f != None:
self.__f.close()
self.__dbBinStr = None
self.__headerPtr = None
self.__headerSip = None
|
# -*- coding: utf-8 -*-
"""
node class. this class is the base class for all the node type in the tree.
global variable- parmetersInTheWorld- represent the amount of parm we have
- debugMode- represent the run mode
this class maintain the updates between the calculated arguments such as probability, distributions and debug,
and update the etree in order to print it to xml file.
as well read from xml file to etree and then wrap the etree in order to calculate diff arguments.
"""
import random
import math
import re
from lxml import etree
from copy import deepcopy
#DISTRIBUTIONS
from distributions.computed import Computed
from distributions.normal import Normal
from distributions.uniform import Uniform
import SumRandomVariables
class node:
#global variable - represents the amount of different parmeters of the world.
#it is a class attribute and can accessed via class node.parameterInTheWorld
#can be set from everywhere that import node
parmetersInTheWorld = 1
debugMode = False
#constractur- treeInstance-node in the etree, the etree itself, and prep-type(seq,plan etc.)
def __init__(self, treeInstance = None,mytree = None,prep="plan",parent=None):
# you can't have multiple __init__ functions in Python so we use mytree = None
if mytree == None :
#create a new tree instance with plan node as root
self.treeInst = etree.Element("plan")
from tree import xmlTree
#tree instance new with plan as root
self.myTree = xmlTree(None,self.treeInst)
else:
self.myTree = mytree
self.treeInst = treeInstance
self.parent = parent
# monitor - boolean property, default-True
self.monitor = True
#node child list
self.childList = []
#node probebility table
self.probTable = []
# node distribution table for success and failure
#distribution table - each entry points to a distribution
self.distTableSucc = self._createDistTable("Successdistribution")
self.distTableFail = self._createDistTable("Failuredistribution")
#update probability table
probString = self.getAttrib("probability")
if probString !=None:
# self.probTable= self._parseString(probString)
self.probTable = self._createProbTable(probString)
else:
self.probTable= None
#node debuge child property
#DEBUGchild - boolean value if we have a debug node in the sub-tree which was already debug- default- False
self.DEBUGchild= False
self._updateChildDebug()
# DEBUG - list of two parameters first elem is boolean, second parm- is float
self.DEBUG = self._setDebugFromXmlFile()
#flag that indicates if this node was updated after debug
self.reset = False
#self.upperBound = [0]*node.parmetersInTheWorld
#self.lowerBound = [0]*node.parmetersInTheWorld
self.size = 1
#parseString by whiteSpace
def _parseString(self, string):
words = re.split('\s+',string)
#return a list of words seperate by whiteSpace
#print "liat", words
if words[len(words)-1]=='':
words.remove('')
return words
#create probtalbe- parse string to list of float
def _createProbTable(self,stringProbList):
probList = self._parseString(stringProbList)
# for index in range(len(probList)):
# probList[index] =float(probList[index])
return probList
#return parent. if it's the root- return None
def getParent(self):
return self.parent
#get branch-factor
def getBF(self):
return (len(self.treeInst))
#create a new node. append it as a child to the self.treeInst and return a node
def createNode(self,tag):
node = self._createChildByTag(etree.SubElement(self.treeInst,tag))
return node
#input:string-tagtype, create a new node with tag-type and add it to the node direct children
#append the new child to the node children list
#output - return the newNode
def addNode(self,tag):
node = self.createNode(tag)
self.childList.append(node)
return node
#input: parmeter and his value, add parm and set value or just set value
def setAttrib(self,parm,value):
self.treeInst.attrib[parm] = str(value)
#input: paramter name. output: return the value as a string or None
def getAttrib(self,parm):
return self.treeInst.get(parm)
#input: node, output: boolean if this node is monitore
def isMonitored (self):
# return (self.treeInst.tag == "monitor")
return (self.monitor == True)
#input- tag, output if this node is this tag type- return True, else- False
def boolWhoAmI (self, tag):
return (self.treeInst.tag == tag)
#return list of the node children
def getChildren (self):
#call _createChildList which create a wrap for the etree node chilren -
return self._createChildList()
#create the wrap for child list and return a list
def _createChildList(self):
#print self.treeInst.__len__(), self.treeInst.tag
if len(self.childList) !=0:
return self.childList
for element in list(self.treeInst):
self.childList.append(self._createChildByTag(element))
return self.childList
#input: child num in the list , output: a new child node- not a deepcopy
def getChild(self,index):
#return none if index given is bigger then the list length
if index >= len(self.childList):
return None
else:
#if child list is not empty - return pointer to the node
if len(self.childList) > 0:
return self.childList[index]
else:
#create child list and return the child at index
self._createChildList()
return self.childList[index]
#input xml tree elem, create the node wrap
def _createChildByTag(self,elem):
#return none if element given from etree is none.
if elem == None:
return None
#create the new node according to type
if elem.tag == "seq":
from seqnode import SeqNode
return SeqNode(elem,self.myTree,self)
#tsk type child
if elem.tag == "tsk":
from tsknode import TskNode
return TskNode(elem,self.myTree,self)
#decorstor - L is for loop according to cogniteam code
if elem.tag == "dec":
#createDecNodeFromName will append the right child to self
return self._CreatDecoratorNodesFromName(elem)
#loop child type
if elem.tag == "loop":
from loopnode import LoopNode
return LoopNode(elem,self.myTree,self)
#need to continue implementing the rest..
if elem.tag == "not":
from notnode import NotNode
return NotNode(elem,self.myTree,self)
#parallel child type
if elem.tag =="par":
from parallelnode import ParallelNode
return ParallelNode(elem,self.myTree,self)
#selector child type
if elem.tag =="sel":
from selectnode import SelectNode
return SelectNode(elem,self.myTree,self)
#bool node
if elem.tag == "bool":
from boolean import BooleanNode
return BooleanNode(elem,self.myTree,self)
#print the tree to xml- can be done from every node in the tree.
def treeToXml(self,fileName):
#call treeToXml.
self.myTree.treeToXml(fileName)
#set monitor boolean property
def setMonitor(self,boolSet):
self.monitor = boolSet
#this func compare the node by there instance ID given by python func id.
def comparTo(self,nodeToCompare):
return id(self)==id(nodeToCompare)
#this func remove the elem from the original xml tree. r
def _removeSubElement(self,elem):
#remove method compares elements based on identity, not on tag value or contents.
self.treeInst.remove(elem)
def __getitem__(self):
return self
#input - EtreeInst- element which it's tag is dec - decorator
#output new node- loop/not with childen- example- for dec "!L!" crete not - loop - not
def _CreatDecoratorNodesFromName(self, element):
name = element.get("name")
#update Indentation
ident = element.text
identTail = element.tail
newChild = None
newEtreeInst = deepcopy(element)
parent = element.getparent()
lastChild = None
#itertating over name char and creating node child as necessary
for char in name:
#new child is the first child that replace decorator
if newChild == None:
#if char is T/F create bool node
if char == "T" or char == "F":
from boolean import BooleanNode
return BooleanNode(element,self.myTree,self)
#if char is "L"- create loop node
if char == "L" :
#addNode func- create the node by tag and appand it to self.childList
newChild = self.createNode("loop")
#if char is "!" - create not node
else:
if char == "!":
newChild = self.createNode("not")
if newChild!= None:
newChild.treeInst.text = ident
newChild.treeInst.tail = identTail
#after we create newChild we'll appand it all the other- by newChild.addNode func.
else:
if lastChild == None:
#this is the only child of newChild
if char == "L" :
lastChild = newChild.addNode("loop")
if char == "!":
lastChild = newChild.addNode("not")
if lastChild!= None:
#treeInst.text update- make the xml file readable with indentation
lastChild.treeInst.text = ident
#indentation of head and tail
lastChild.treeInst.tail = identTail
else:
if char == "L" :
lastChild = lastChild.addNode("loop")
if char == "!":
lastChild = lastChild.addNode("not")
lastChild.treeInst.text = ident
lastChild.treeInst.tail = identTail
#update Indentation
ident += "\t"
#if we succeded to create newChild and hid children we will give the last node all decorator attributes by deepcopy dec-treeInst
if lastChild !=None :
lastChildParent = lastChild.treeInst.getparent()
#assigning the new tag for dec attributes not/loop- in element-etree
if lastChild.treeInst.tag == "not":
newEtreeInst.tag="not"
if lastChild.treeInst.tag == "loop":
newEtreeInst.tag="loop"
#maintain the pointers with the etree and node tree to point the updated nodes.
#remove lastChild.tree inst from his parent
lastChildParent.remove(lastChild.treeInst)
#give lastChild a new Tree inst- so he holds all the dec attributes from the xmltree
lastChild.treeInst = newEtreeInst
#append the treeInst back to his parent child list
lastChildParent.append(lastChild.treeInst)
#if we didn't create newChild any other children- exmple- <dec name ="L /dec>
#we create only new child as loop- we'll give it decorator attributes.
else:
if newChild != None:
#assigning the new tag for dec attributes not/loop- in element-etree
if newChild.treeInst.tag == "not":
newEtreeInst.tag="not"
if newChild.treeInst.tag == "loop":
newEtreeInst.tag="loop"
#remove newChild.tree inst from his parent
(parent).remove(newChild.treeInst)
#give newChild a new Tree inst- so he holds all the dec attributes from the xmltree
newChild.treeInst = newEtreeInst
#append the treeInst back to his parent child list updated
(parent).append(newChild.treeInst)
#after reading it name and creating nodes as necessary we want to replace this subElement with the updated tree and update the xml tree(used to be decorator)
#replace(self, old_element, new_element)
parent.replace(element, newChild.treeInst)
self._updateChildForDec(newChild , len(name))
#return the newChild created.- return the root of the list/sub-tree
return newChild
#update the childs that we create for decorator property
def _updateChildForDec(self,newChild,size):
childToCheck = newChild
#size- is the size of name string- the amount of childs we created.
for i in range(size):
#for each child we want to update his property
if childToCheck != None:
#update child debug
childToCheck._updateChildDebug()
#update distributions tables
childToCheck.distTableSucc = self._createDistTable("Successdistribution")
childToCheck.distTableFail = self._createDistTable("Failuredistribution")
#get the next child
childToCheck = childToCheck.getChild(0)
#this func update the etree in order to print the new value in the xml file
def _updateEtreeToPrintXmlFile(self,updateNode):
if updateNode == None :
return None
#turn distribution table to a string that we know how to read from xml file
if updateNode.distTableSucc != [] :
updateNode.setAttrib("Successdistribution",updateNode._distTableToString(updateNode.distTableSucc))
if updateNode.distTableFail != [] :
updateNode.setAttrib("Failuredistribution",updateNode._distTableToString(updateNode.distTableFail))
#get child list
childList = updateNode.getChildren()
#iterate over child list with recursive call (list of lists)
if childList != None :
for child in childList :
#update everyChild in the tree
self._updateEtreeToPrintXmlFile(child)
#update Debug attributes in the xml file.
updateNode._updateDebugAttribToXmlFile()
#update probability attributes in the xml file- to the etree
updateNode._updateProbTableToXmlFile()
#this func update the attribute in the xml file for debug - turn DEBUG- into a string and set etree attribute
def _updateDebugAttribToXmlFile(self):
if self.DEBUG != None:
updateString =""
if self.DEBUG[0]== True or self.DEBUG[0]=="True":
updateString+="True"+" "
else :
updateString+="False"+" "
updateString+= str(self.DEBUG[1])
self.setAttrib("DEBUG",updateString)
# this func read attribute "DEBUG" from xml. and parse it by whiteSpace
def _setDebugFromXmlFile(self):
#get string from xml - "True 0.1" for example.
debug = self.getAttrib("DEBUG")
if debug != None :
#return debug
self.DEBUG =[]
#print(debug)
#parse the string by whiteSpace and returns a list
debug = self._parseString(debug)
#print(debug)
#first element in the list should be boolen- success
if debug[0]!=None and debug[0] == "True":
debug[0] = True
else:
debug[0] = False
# second element in the list should be time - float number
if debug[1]!=None:
debug[1]=float(debug[1])
else :
debug = None
return debug
def _updateChildDebug(self):
#iterate on all the element sub-tree which are from type-tree.Element
for element in self.treeInst.iter(tag=etree.Element):
if element.get("DEBUG") != None:
self.DEBUGchild = True
break
#return true/false if the node has a debug child
def hasDebugChild(self):
return self.DEBUGchild
#append a new distribution to the succ table
def addDistToSuccTable(self, dist):
self.distTableSucc.append(dist)
#append a new distribution to the fail table
def addDistToFailTable(self, dist):
self.distTableFail.append(dist)
#debug getter
def getDebug(self):
return self.DEBUG
#get a table-distributions list and translate it back to string that we know how to read from xml file
def _distTableToString(self,table):
if table == None:
return None
string =""
#iterate all over the table len
for index in range(0,len(table)) :
#each dist has toString func- that we appand to string
string += ((table[index]).toString())
#we don't want whitSpace at the end of the string so we appand it only if we didn't reach the last index in the table
if index < (len(table)-1):
string+=" "
#return the table as string- for empty table we return empty string.
return (string)
def getRandomProb(self, index):
x = random.random()
#print "getRandomProb", self.getAttrib("name"),self.getProbAtIndex(index)
p = float(self.getProbAtIndex(index))
if p==None:
return None
return (x <= p)
#set prob table- set the probtable property- input- list of float
#update the attribute in the etree
def setProbTable(self, probtable):
self.probTable = probtable
self.setAttrib("probability",probtable)
#set distribution success table with distTable- list of pointers to distributions.
#update the attribute in the etree
def setDistTableSucc(self, distTable):
self.distTableSucc = distTable
self.setAttrib("Successdistribution",self._distTableToString(self.distTableSucc))
#set distribution fail table with distTable- list of pointers to distributions.
#update the attribute in the etree
def setDistTableFail(self, distTable):
self.distTableFail = distTable
self.setAttrib("Failuredistribution",self._distTableToString(self.distTableFail))
#update prob table at index with index and val given
def updateProbTableAtIndex(self, index, val, prob=0):
if (self.probTable==None or len(self.probTable)==0 ):
a = []
#if the prob table is empty- create a probe table at the size of 2^parmetersInTheWorld
for i in range(int(math.pow(2,node.parmetersInTheWorld))):
a.append([0,0])
#update probe table
self.setProbTable(a)
if prob!=0:
self.probTable[index]=prob
else:
if val:
#if val is set to True- update another success . probTable[0]- count succ-numerator,probTable[1]-Counter of time tried-Denominator.
#print(type(self.probTable[index]) , type(self.probTable[index][0]))
self.probTable[index][0] = self.probTable[index][0]+1
self.probTable[index][1] = self.probTable[index][1]+1
#update the new probtable in etree
self._updateProbTableToXmlFile()
else:
#val is false
# probTable[0]- count succ-numerator,probTable[1]-Counter of time tried-Denominator.
self.probTable[index][1] = self.probTable[index][1]+1
#update the new probtable in etree
self._updateProbTableToXmlFile()
#update the etree in order to print the calculated value to xml file of probtable
def _updateProbTableToXmlFile(self):
if (self.probTable==None or len(self.probTable)==0 ):
return
#turn probtable from list of flost to a string that we can read back from xml file
probTableString = ""
for index in range(len(self.probTable)) :
probTableString+=str(self.getProbAtIndex(index))
#string concatenation- white space between the values
probTableString +=' '
#set probability attribute in etree
self.setAttrib("probability",probTableString)
def updateProbTableAtIndexComputed(self,index,prob):
self.probTable[index]=prob
#getter for probIndex return flosat
def getProbAtIndex(self,index):
if self.probTable!=None and len(self.probTable) > index:
#check if the index is a probability numer- float
if (isinstance(self.probTable[index],float) or isinstance(self.probTable[index],int) or
(isinstance(self.probTable[index],str))):
return float(self.probTable[index])
else:
#this entry is a list of two parms- # probTable[0]- count succ-numerator,probTable[1]-Counter of time tried-Denominator.
if float(self.probTable[index][1]) !=0 :
#return the caculated value
return (float(self.probTable[index][0])/float(self.probTable[index][1]))
return 0
return None
#set distributaion success table at index with time
def setDistTableSuccAtIndex(self, index, time, distTable=[],mapDistTable={}):
if (self.distTableSucc==[]):
a = []
for i in range(int(math.pow(2,node.parmetersInTheWorld))):
dist = Computed({})
a.append(dist)
self.setDistTableSucc(a)
if ((distTable==[]) and (mapDistTable=={})):
self.distTableSucc[index].setValueToTime(time, self.distTableSucc[index].getCountByTime(time)+1)
#self.setAttrib("Successdistribution",self._distTableToString(self.distTableSucc))
else:
if mapDistTable!={}:
self.distTableSucc[index]=Computed(mapDistTable)
else:
self.distTableSucc[index].setComputedDistTable(distTable)
self.setAttrib("Successdistribution",self._distTableToString(self.distTableSucc))
#set distributaion fail table at index with time
def setDistTableFailAtIndex(self, index, time, distTable=[],mapDistTable={}):
if (self.distTableFail==[]):
a = []
for i in range(int(math.pow(2,node.parmetersInTheWorld))):
dist = Computed({})
a.append(dist)
self.setDistTableFail(a)
if ((distTable==[]) and (mapDistTable=={})):
self.distTableFail[index].setValueToTime(time, self.distTableFail[index].getCountByTime(time)+1)
else:
if mapDistTable!={}:
self.distTableFail[index]=Computed(mapDistTable)
else:
self.distTableFail[index].setComputedDistTable(distTable)
self.setAttrib("Failuredistribution",self._distTableToString(self.distTableFail))
#node- run func
def run(self, index):
a = None
if (node.debugMode):
tmpIndex = index
a = self.DEBUG
if (a!=None):
if not(self.boolWhoAmI("tsk")):
if (self.monitor):
if a[0]:
self.setDistTableSuccAtIndex(tmpIndex, a[1])
else:
self.setDistTableFailAtIndex(tmpIndex, a[1])
self.updateProbTableAtIndex(tmpIndex, a[0])
return a
#set debug - recive a string exmp. "True 100"
def setDebug(self, succtime):
self.DEBUG = self._parseString(succtime)
self.DEBUG[1] = float(self.DEBUG[1])
#self.setAttrib("DEBUG", self.DEBUG )
self._updateDebugAttribToXmlFile()
#run as base case func
def runAsBaseCase (self, index):
debug = node.run(self, index)
if (debug!=None):
return debug
a = [True, 0]
randP = self.getRandomProb(index)
if randP==None:
return None
a[0]= randP
if a[0]:
a[1] = float(self.getDistSuccByIndex(index).calcProb())
else:
a[1] = float(self.getDistFailByIndex(index).calcProb())
return a
#get the distributions from distribution table success by index
def getDistSuccByIndex(self,index):
if len(self.distTableSucc) > index:
return self.distTableSucc[index]
return None
#get the distributions from distribution table fail by index
def getDistFailByIndex(self,index):
if len(self.distTableFail) > index:
return self.distTableFail[index]
return None
# def getSuccDistAtIndex(self,index):
# if self.distTableSucc != None and len(self.distTableSucc) > index :
# return self.distTableSucc[index]
#
# def getFailDistAtIndex(self,index):
# if self.distTableFail != None and len(self.distTableFail) > index :
# return self.distTableFail[index]
#clear the node property
def clear (self):
self.probTable = []
self.distTableSucc = []
self.distTableFail = []
try:
del self.treeInst.attrib["Successdistribution"]
except:
pass
try:
del self.treeInst.attrib["probability"]
except:
pass
try:
del self.treeInst.attrib["Failuredistribution"]
except:
pass
# self.setAttrib("Successdistribution",[])
# self.setAttrib("probability",[])
# self.setAttrib("Failuredistribution",[])
self.reset = True
#clear whole tree property
def clearWholeTree(self):
childlist = self.getChildren()
if childlist !=None:
for child in childlist:
child.clear()
child.clearWholeTree()
def updateSize(self):
if self.treeInst.tag =="tsk":
self.size=1
else:
for c in self.getChildren():
c.updateSize()
self.size = self.size+c.size
#run plan
def runPlan(self, index):
children = self.getChildren()
children[0].run(index)
def runPlanAccurate(self,index):
children = self.getChildren()
children[0].runAccurate(index)
def runPlanApproximate(self,index,e):
self.updateSize()
children = self.getChildren()
children[0].runApproximate(index,e)
#get average to success time
def getAverageSuccTime(self, index):
if self.getDistSuccByIndex(index) != None:
#print self.getDistSuccByIndex(index).printMe()
return self.getDistSuccByIndex(index).calcAverageTime()
else:
return float('Inf')
def getSDSuccTime(self, index):
if self.getDistSuccByIndex(index) != None:
#print self.getDistSuccByIndex(index).printMe()
return self.getDistSuccByIndex(index).SDTime()
else:
return float(0)
#compute probability for less then T time
def getLessThenTProb(self,index,T):
#print "here"
if self.getDistSuccByIndex(index) != None:
#return round(self.getProbAtIndex(index)*self.getDistSuccByIndex(index).LessThenT(T),4)
return self.getDistSuccByIndex(index).LessThenT(T)
else:
return float(0)
#table is the name of the table needed- attribute
def _createDistTable(self,table):
string = self.getAttrib(str(table))
table =[]
if string != None:
table = self._parseString(string)
newDistTable =[]
#loop over the table- range (0,table len-1)- specifying the step value as 2
if table != None:
for index in range(len(table)):
#computed dist
if (table[index][0] == 'C'):
newDistTable.append(self._createComputedDist(table[index]))
#normal dist
if(str(table[index][0]) =='N'):
newDistTable.append(self._createNormalDist(table[index]))
#discrete dist
if(table[index][0] == 'D'):
pass
#iniform dist- create new instance and
if(table[index][0] == 'U'):
x=self._createUniformDist(table[index])
newDistTable.append(x)
return newDistTable
#create computed distribution
def _createComputedDist(self,Sinput):
ans =self._getDictOfNumPairFromString(Sinput)
return Computed(ans)
#create normal distribution
def _createNormalDist(self,Sinput):
ans = self._getTwoNumFromString(Sinput)
return Normal(ans[0],ans[1])
#create uniform distribution
def _createUniformDist(self,Sinput):
ans = self._getTwoNumFromString(Sinput)
return Uniform(ans[0],ans[1])
def setDEBUGnode(self,sSucc=None,sTime=None):
pass
#input- string "num,num" output: tauple [num,num]
# we use this func to divide two numbers for distribution parmeters value
#can only work for two numbers in the string
def _getTwoNumFromString(self,Sinput):
stringNumA = ""
stringNumB = ""
nextNum = False
#loop over the string
for index in range(0, len(Sinput)):
#check if the Sinput[index] is a number or "." - for float num.
if (Sinput[index].isdigit() or Sinput[index]=='.' ) == True and (nextNum == False):
stringNumA += str( Sinput[index] )
continue
if(str(Sinput[index]) ==','):
nextNum= True
continue
if (Sinput[index].isdigit() or Sinput[index]=='.') == True and (nextNum == True):
stringNumB+= str(Sinput[index] )
continue
#return a list of two str that represent float numbers
return [str(stringNumA),str(stringNumB)]
# Sinput should look like this - C[123,123],[123,1231],[54,23]
#input- the string above, output: dictionary of key and value
#we use this func to create the map/dictionary for computed distribution
def _getDictOfNumPairFromString(self,Sinput):
openBracket = False
stringPair=""
#start pairList as empty dictionary
PairList = {}
#iter from index=0 to strint- Sinput size
for index in range(0,len(Sinput)):
if Sinput[index] == '[' and openBracket == False :
openBracket = True
continue
if Sinput[index] == ']' and openBracket == True:
#call getTwoNumFromString func with stringPair and appand to the PairList- to get a tauple[num,num]
pair = self._getTwoNumFromString(stringPair)
PairList[str(pair[0])]= str(pair[1])
#update open bracket to close
openBracket = False
#init the stringPair
stringPair = ""
continue
if openBracket == True :
stringPair += Sinput[index]
continue
#return distionry
return PairList
def getTime(self):
if self.DEBUG !=None and len (self.DEBUG) > 1:
return self.DEBUG[1]
def getDistData(self, index, succ):
if (succ==1):
if self.getDistSuccByIndex(index) != None:
return self.getDistSuccByIndex(index).getData()
else:
return None
else:
if self.getDistFailByIndex(index) != None:
return self.getDistFailByIndex(index).getData()
else:
return None
def isRoot(self):
return (self.getParent().getParent()== None)
def setBounds(self,index,ubound,lbound):
#self.upperBound[index] = round(self.getProbAtIndex(index)*ubound,4)
#self.lowerBound[index] = round(self.getProbAtIndex(index)*lbound,4)
self.upperBound[index] = ubound
self.lowerBound[index] = lbound
def getUBound(self,index):
return round(self.upperBound[index],4)
def getLBound(self,index):
return round(self.lowerBound[index],4)
|
def compute_gcd(x,y):
#find the smaller
smaller = min(x,y)
gcd = 1
for i in range(1,smaller+1):
if((x%i == 0) and (y%i==0)):
#gcd will be alterted each time the condition is true, so do
#not need find the maximum.
gcd = i
return gcd
num1 = int(input("First number:"))
num2 = int(input("Second number:"))
gcd = compute_gcd(num1,num2)
print(gcd)
|
#! /usr/bin/env python
#
# Configure PyInstaller for the current Python installation.
#
# Copyright (C) 2005, Giovanni Bajo
# Based on previous work under copyright (c) 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import os
import sys
import string
import shutil
import pprint
import re
import glob
import platform
import mf
import bindepend
import Build
HOME = os.path.dirname(sys.argv[0])
iswin = sys.platform[:3] == 'win'
is24 = hasattr(sys, "version_info") and sys.version_info[:2] >= (2,4)
is26 = hasattr(sys, "version_info") and sys.version_info[:2] >= (2,6)
cygwin = sys.platform == 'cygwin'
if iswin and platform.architecture()[0] != "32bit":
print "ERROR: PyInstaller does not support Windows 64-bit"
print "Subscribe to this ticket for more information:"
print " http://www.pyinstaller.org/ticket/25"
sys.exit(2)
def find_EXE_dependencies(config):
global target_platform, target_iswin
print "I: computing EXE_dependencies"
python = opts.executable or sys.executable
target_platform = opts.target_platform or sys.platform
config['python'] = python
config['target_platform'] = target_platform
target_iswin = target_platform[:3] == 'win'
xtrapath = []
if target_iswin and not iswin:
# try to find a mounted Windows system
xtrapath = glob.glob('/mnt/*/WINDOWS/system32/')
if not xtrapath:
print "E: Can not find a mounted Windows system"
print "W: Please set 'xtrpath' in the config file yourself"
xtrapath = config.get('xtrapath') or xtrapath
config['xtrapath'] = xtrapath
_useTK = """\
# Generated by Configure.py
# This file is public domain
import os, sys
try:
basedir = os.environ['_MEIPASS2']
except KeyError:
basedir = sys.path[0]
tcldir = os.path.join(basedir, '_MEI', 'tcl%s')
tkdir = os.path.join(basedir, '_MEI', 'tk%s')
os.environ["TCL_LIBRARY"] = tcldir
os.environ["TK_LIBRARY"] = tkdir
os.putenv("TCL_LIBRARY", tcldir)
os.putenv("TK_LIBRARY", tkdir)
"""
def test_TCL_TK(config):
# TCL_root, TK_root and support/useTK.py
print "I: Finding TCL/TK..."
if not (target_iswin):
saveexcludes = bindepend.excludes
bindepend.excludes = {}
pattern = [r'libtcl(\d\.\d)?\.(so|dylib)', r'(?i)tcl(\d\d)\.dll'][target_iswin]
a = mf.ImportTracker()
a.analyze_r('Tkinter')
binaries = []
for modnm, mod in a.modules.items():
if isinstance(mod, mf.ExtensionModule):
binaries.append((mod.__name__, mod.__file__, 'EXTENSION'))
binaries.extend(bindepend.Dependencies(binaries))
binaries.extend(bindepend.Dependencies([('', sys.executable, '')]))
for nm, fnm, typ in binaries:
mo = re.match(pattern, nm)
if mo:
ver = mo.group(1)
tclbindir = os.path.dirname(fnm)
if target_iswin:
ver = ver[0] + '.' + ver[1:]
elif ver is None:
# we found "libtcl.so.0" so we need to get the version from the lib directory
for name in os.listdir(tclbindir):
mo = re.match(r'tcl(\d.\d)', name)
if mo:
ver = mo.group(1)
print "I: found TCL/TK version %s" % ver
open(os.path.join(HOME, 'support', 'useTK.py'), 'w').write(_useTK % (ver, ver))
tclnm = 'tcl%s' % ver
tknm = 'tk%s' % ver
# Linux: /usr/lib with the .tcl files in /usr/lib/tcl8.3 and /usr/lib/tk8.3
# Windows: Python21/DLLs with the .tcl files in Python21/tcl/tcl8.3 and Python21/tcl/tk8.3
# or D:/Programs/Tcl/bin with the .tcl files in D:/Programs/Tcl/lib/tcl8.0 and D:/Programs/Tcl/lib/tk8.0
if target_iswin:
for attempt in ['../tcl', '../lib']:
if os.path.exists(os.path.join(tclbindir, attempt, tclnm)):
config['TCL_root'] = os.path.join(tclbindir, attempt, tclnm)
config['TK_root'] = os.path.join(tclbindir, attempt, tknm)
break
else:
config['TCL_root'] = os.path.join(tclbindir, tclnm)
config['TK_root'] = os.path.join(tclbindir, tknm)
break
else:
print "I: could not find TCL/TK"
if not target_iswin:
bindepend.excludes = saveexcludes
def test_Crypt(config):
# TODO: disabled for now
config["useCrypt"] = 0
return
#Crypt support. We need to build the AES module and we'll use distutils
# for that. FIXME: the day we'll use distutils for everything this will be
# a solved problem.
print "I: trying to build crypt support..."
from distutils.core import run_setup
cwd = os.getcwd()
args = sys.argv[:]
try:
os.chdir(os.path.join(HOME, "source", "crypto"))
dist = run_setup("setup.py", ["install"])
if dist.have_run.get("install", 0):
config["useCrypt"] = 1
print "I: ... crypto support available"
else:
config["useCrypt"] = 0
print "I: ... error building crypto support"
finally:
os.chdir(cwd)
sys.argv = args
def test_Zlib(config):
#useZLIB
print "I: testing for Zlib..."
try:
import zlib
config['useZLIB'] = 1
print 'I: ... Zlib available'
except ImportError:
config['useZLIB'] = 0
print 'I: ... Zlib unavailable'
def test_RsrcUpdate(config):
config['hasRsrcUpdate'] = 0
if not iswin:
return
# only available on windows
print "I: Testing for ability to set icons, version resources..."
try:
import win32api, icon, versionInfo
except ImportError, detail:
print 'I: ... resource update unavailable -', detail
return
test_exe = os.path.join(HOME, 'support', 'loader', 'run_7rw.exe')
if not os.path.exists( test_exe ):
config['hasRsrcUpdate'] = 0
print 'E: ... resource update unavailable - %s not found' % test_exe
return
# The test_exe may be read-only
# make a writable copy and test using that
rw_test_exe = os.path.join( os.environ['TEMP'], 'me_test_exe.tmp' )
shutil.copyfile( test_exe, rw_test_exe )
try:
hexe = win32api.BeginUpdateResource(rw_test_exe, 0)
except:
print 'I: ... resource update unavailable - win32api.BeginUpdateResource failed'
else:
win32api.EndUpdateResource(hexe, 1)
config['hasRsrcUpdate'] = 1
print 'I: ... resource update available'
os.remove(rw_test_exe)
_useUnicode = """\
# Generated by Configure.py
# This file is public domain
import %s
"""
_useUnicodeFN = os.path.join(HOME, 'support', 'useUnicode.py')
def test_unicode(config):
print 'I: Testing for Unicode support...'
try:
import codecs
config['hasUnicode'] = 1
try:
import encodings
except ImportError:
module = "codecs"
else:
module = "encodings"
open(_useUnicodeFN, 'w').write(_useUnicode % module)
print 'I: ... Unicode available'
except ImportError:
try:
os.remove(_useUnicodeFN)
except OSError:
pass
config['hasUnicode'] = 0
print 'I: ... Unicode NOT available'
def test_UPX(config):
print 'I: testing for UPX...'
cmd = "upx"
if opts.upx_dir:
cmd = '"' + os.path.normpath(os.path.join(opts.upx_dir, cmd)) + '"'
hasUPX = 0
try:
vers = os.popen(cmd + ' -V').readlines()
if vers:
v = string.split(vers[0])[1]
hasUPX = tuple(map(int, string.split(v, ".")))
if iswin and is24 and hasUPX < (1,92):
print 'E: UPX is too old! Python 2.4 under Windows requires UPX 1.92+'
hasUPX = 0
print 'I: ...UPX %s' % (('unavailable','available')[hasUPX != 0])
except Exception, e:
print 'I: ...exception result in testing for UPX'
print e, e.args
config['hasUPX'] = hasUPX
config['upx_dir'] = opts.upx_dir
def find_PYZ_dependencies(config):
print "I: computing PYZ dependencies..."
a = mf.ImportTracker([os.path.join(HOME, 'support')])
a.analyze_r('archive')
mod = a.modules['archive']
toc = Build.TOC([(mod.__name__, mod.__file__, 'PYMODULE')])
for i in range(len(toc)):
nm, fnm, typ = toc[i]
mod = a.modules[nm]
tmp = []
for importednm, isdelayed, isconditional, level in mod.imports:
if not isconditional:
realnms = a.analyze_one(importednm, nm)
for realnm in realnms:
imported = a.modules[realnm]
if not isinstance(imported, mf.BuiltinModule):
tmp.append((imported.__name__, imported.__file__, imported.typ))
toc.extend(tmp)
toc.reverse()
config['PYZ_dependencies'] = toc.data
def main(configfilename):
try:
config = Build._load_data(configfilename)
print 'I: read old config from', configfilename
except IOError, SyntaxError:
# IOerror: file not present/readable
# SyntaxError: invalid file (platform change?)
# if not set by Make.py we can assume Windows
config = {'useELFEXE': 1}
# Save Python version, to detect and avoid conflicts
config["pythonVersion"] = sys.version
config["pythonDebug"] = __debug__
find_EXE_dependencies(config)
test_TCL_TK(config)
test_Zlib(config)
test_Crypt(config)
test_RsrcUpdate(config)
test_unicode(config)
test_UPX(config)
find_PYZ_dependencies(config)
Build._save_data(configfilename, config)
print "I: done generating", configfilename
if __name__ == '__main__':
from pyi_optparse import OptionParser
parser = OptionParser(usage="%prog [options]")
parser.add_option('--target-platform', default=None,
help='Target platform, required for cross-bundling '
'(default: current platform).')
parser.add_option('--upx-dir', default=None,
help='Directory containing UPX.')
parser.add_option('--executable', default=None,
help='Python executable to use. Required for '
'cross-bundling.')
parser.add_option('-C', '--configfile',
default=os.path.join(HOME, 'config.dat'),
help='Name of generated configfile (default: %default)')
opts, args = parser.parse_args()
if args:
parser.error('Does not expect any arguments')
main(opts.configfile)
|
import cgi, json
import os
import mysql.connector
class Saver:
def __init__(self):
self.conn = mysql.connector.connect(
user="mchavez8",
password="mchavez8mysql123",
host="localhost",
database="mchavez8_chado"
)
self.curs = self.conn.cursor()
def __del__(self):
self.conn.commit()
self.conn.close()
def save_run(self, run_id, email, seq_name, seq):
qry = """
INSERT INTO user_run VALUES (
%s, %s, %s, %s
)
"""
self.curs.execute(
qry,
[run_id, email, seq_name, seq]
)
def save_sequence_results(self, run_id, vadr_status, seq_length, model_used):
qry = """
INSERT INTO sequence_results
(run_id, vadr_status, seq_length, model_used)
VALUES (
%s, %s, %s, %s
)
"""
self.curs.execute(
qry,
[run_id, vadr_status, seq_length, model_used]
)
def save_sequence_annotations(self, run_id, annotations_list):
qry = """
INSERT INTO sequence_features
(run_id, feature_type, feature_name, feature_start, feature_end, seq_coords, alerts)
VALUES
"""
annotation_as_sql = []
for item in annotations_list:
item_as_str = f'({run_id}, \"{item.get("type")}\", \"{item.get("name")}\", {item.get("start")}, {item.get("end")}, \"{item.get("seq_coords")}\", \"{item.get("alerts")}\")'
annotation_as_sql.append(item_as_str)
qry = qry + ",".join(annotation_as_sql)
self.curs.execute(qry)
def get_user_jobs(self, user_email):
user_email = user_email.replace("@", "%40")
qry = """
SELECT * FROM user_run INNER JOIN sequence_results ON user_run.run_id=sequence_results.run_id
WHERE user_user_email=%s;
"""
self.curs.execute(
qry,
[user_email]
)
results = {
"email": user_email,
"past_jobs": []
}
for item in self.curs:
results["past_jobs"].append(
{
"id": item[0],
"seq_name": item[2],
"sequence": item[3],
"sequence_length": item[7],
"vadr_status": item[6]
}
)
return results
def get_past_run_annotations(self, run_id):
qry = """
SELECT * FROM sequence_features WHERE run_id=%s;
"""
self.curs.execute(
qry,
[run_id]
)
results = {
"job_id": run_id,
"sequence_annotations": []
}
for item in self.curs:
results["sequence_annotations"].append(
{
"feature_type": item[2],
"feature_name": item[3],
"feature_start": item[4],
"feature_end": item[5],
"seq_coords": item[6],
"alerts": item[7],
}
)
return results
if __name__ == "__main__":
svr = Saver()
# run_id = 8
# svr.save_run(run_id, "test_usr", "test_name", "test_seq")
# svr.save_sequence_results(run_id, "stat", 20, "mod")
# svr.save_sequence_annotations(run_id,
# [{'type': 'gene', 'name': 'ORF1ab', 'start': '266', 'end': '21555', 'seq_coords': '266..21555:+', 'alerts': '-'}]
# )
#svr.get_user_jobs("mauro.antoine.chavez%40gmail.com")
svr.get_past_run_annotations(48877336) |
"""read_res_mfdn_transitions.py
Provides simple example of reading and accessing MFDn postprocessor results.
In practice, such results may need to be "merged" with results from mfdn.
Required test data:
data/mfdn-transitions/runtransitions00-transitions-ob-Z3-N3-Daejeon16-coul1-hw15.000-Nmax02.res
data/mfdn-transitions/runtransitions00-transitions-tb-Z3-N3-Daejeon16-coul1-hw15.000-Nmax02.res
This example output is produced by mcscript-ncci/docs/examples/runtransitions00.py.
Mark A. Caprio
University of Notre Dame
Language: Python 3
- 09/17/20 (mac): Created.
- 05/18/22 (mac): Update example file.
- 07/12/22 (mac): Provide example use of two-body RME accessor.
"""
import os
import mfdnres
import mfdnres.ncci
################################################################
# reading data
################################################################
def read_data():
"""Read results.
"""
print("Reading input file...")
data_dir = os.path.join("data","mfdn-transitions")
mesh_data = mfdnres.input.slurp_res_files(
data_dir,
res_format="mfdn_v15",
filename_format="mfdn_format_7_ho",
verbose=True
)
print()
# diagnostic output -- FOR ILLUSTRATION ONLY
print("Raw mesh (params)")
for results_data in mesh_data:
print(mfdnres.analysis.dict_items(results_data.params))
print()
# merge results data
print("Merging mesh points...")
mesh_data = mfdnres.analysis.merged_mesh(
mesh_data,
("nuclide","interaction","coulomb","hw","Nmax"),
postprocessor=mfdnres.ncci.augment_params_with_parity,
verbose=False
)
print()
# diagnostic output -- FOR ILLUSTRATION ONLY
print("Merged mesh (params)")
for results_data in mesh_data:
print(mfdnres.analysis.dict_items(results_data.params))
print()
return mesh_data
################################################################
# explore single mesh point
################################################################
def explore_point(results_data):
"""Examine mfdn_results_data members and results of accessors, for MFDn postprocessor results.
"""
# pick out mesh point manually
results_data = mesh_data[0]
# examine data attributes
print("Data attributes...")
print("results_data.postprocessor_ob_rmes {}".format(results_data.postprocessor_ob_rmes))
print("results_data.postprocessor_tb_rmes {}".format(results_data.postprocessor_tb_rmes))
print()
# access ob rmes
print("Test accessors (one-body)...")
print("M1 moment (from dipole term rmes) {}".format(results_data.get_moment("M1",(1.0,0,1))))
print("M1 rme (from dipole term rmes) {}".format(results_data.get_rme("M1",((1.0,0,1),(1.0,0,1)))))
print("E2 moment {}".format(results_data.get_moment("E2p",(1.0,0,1))))
print("E2 rme {}".format(results_data.get_rme("E2p",((1.0,0,1),(1.0,0,1)))))
print()
# access tb rmes
print("Test accessors (two-body)...")
print("QxQ_0 rme {}".format(results_data.get_rme("QxQ_0",((2.0,0,1),(2.0,0,1)),rank="tb")))
print()
print("Test get_rme verbose mode...")
print("E2 rme {}".format(results_data.get_rme("E2p",((1.0,0,1),(1.0,0,1)),verbose=True)))
print("E2 rme {}".format(results_data.get_rme("E2p",((1.0,0,1),(1.0,1,1)),verbose=True))) # invalid state pair
print()
################################################################
# main
################################################################
# read data
mesh_data = read_data()
explore_point(mesh_data[0])
|
import sqlite3
import requests
def get_url_id(url):
return int(url.split('/')[-2])
def insert_film(cursor, film_id):
url = 'https://swapi.co/api/films/'+str(film_id)+'/'
response = requests.get(url)
data = response.json()
params = [
film_id,
data['title'],
data['episode_id'],
data['opening_crawl'],
data['director'],
data['producer'],
data['release_date']
]
sql_str = '''
INSERT INTO
films (film_id, title, episode_id, opening_crawl, director, producer, release_date)
VALUES
(?, ?, ?, ?, ?, ?, ?);
'''
try:
cursor.execute(sql_str, params)
except Exception as e:
print 'insert_film', film_id, e
try:
film_char_params = map(lambda url: (film_id, get_url_id(url)), data['characters'])
cursor.executemany('INSERT INTO film_characters (film_id, character_id) VALUES (?, ?)', film_char_params);
except Exception as e:
print 'insert_film_characters', film_id, e
print 'DONE film', film_id
def insert_character(cursor, character_id):
url = 'https://swapi.co/api/people/'+str(character_id)+'/'
response = requests.get(url)
data = response.json()
params = [
character_id,
data['name'],
data['height'],
data['mass'],
data['hair_color'],
data['skin_color'],
data['eye_color'],
data['birth_year'],
data['gender']
]
sql_str = '''
INSERT INTO
characters (character_id, name, height, mass, hair_color, skin_color, eye_color, birth_year, gender)
VALUES
(?, ?, ?, ?, ?, ?, ?, ?, ?);
'''
try:
cursor.execute(sql_str, params)
except Exception as e:
print 'insert_character', character_id, e
print 'DONE character', character_id
connection = sqlite3.connect('database.db');
cursor = connection.cursor()
for i in range(1, 8):
insert_film(cursor, i)
for i in range(0, 90):
try:
insert_character(cursor, i)
except Exception as e:
print 'likely invalid index', i, e
connection.commit()
connection.close()
'''
CREATE TABLE characters (
character_id INT PRIMARY KEY,
name TEXT NOT NULL,
height REAL,
mass REAL,
hair_color TEXT NOT NULL,
skin_color TEXT NOT NULL,
eye_color TEXT NOT NULL,
birth_year TEXT NOT NULL,
gender TEXT NOT NULL
);
''' |
import random
p = random.random()
class RandomP:
@staticmethod
def f():
return 0 if random.random() < p else 1
class Random01:
def random01(self):
t = "00"
while t in ["00", "11"]:
t = str(RandomP.f()) + str(RandomP.f())
return 0 if t == "01" else 1
r = Random01()
print(r.random01())
|
from django.shortcuts import render
import pymysql
from bbb.models import Yinyue
from bbb.models import Shouji
from django.shortcuts import HttpResponse
from django.core.paginator import Paginator,PageNotAnInteger,EmptyPage
def index(request):
yinyue_list = Yinyue.objects.all().order_by("id") # 一定要排序
paginator = Paginator(yinyue_list, 12) # 每页15条记录
page = request.GET.get('page') # 获取第一页数据,从1开始
try:
customer=paginator.page(page)
except PageNotAnInteger:
customer=paginator.page(1)
except EmptyPage:
customer=paginator.page(paginator.num_pages)
return render(request, 'index.html',{"yinyue_list":customer} )
def taobao(request):
shouji_list = Shouji.objects.all().order_by("id") # 一定要排序
paginator = Paginator(shouji_list, 12) # 每页15条记录
page = request.GET.get('page') # 获取第一页数据,从1开始
try:
customer = paginator.page(page)
except PageNotAnInteger:
customer = paginator.page(1)
except EmptyPage:
customer = paginator.page(paginator.num_pages)
return render(request, 'taobao.html', {"shouji_list": customer}) |
from django.db import models
# Create your models here.
class Customer(models.Model):
class Meta:
db_table = 'customer'
first_name = models.CharField(max_length=45)
last_name = models.CharField(max_length=45)
def __str__(self):
return self.first_name
class AcInvoices(models.Model):
class Meta:
db_table = 'ac_invoices'
ac_invoice_item = models.ForeignKey('AcInvoiceItems', on_delete=models.CASCADE)
tenant = models.ForeignKey('AcTenant', on_delete=models.CASCADE)
doc_date = models.CharField(max_length=45)
doc_number = models.CharField(max_length=45)
customer = models.ForeignKey('Customer', on_delete=models.CASCADE)
total_discount = models.IntegerField()
total_tax = models.IntegerField()
total_value = models.IntegerField()
def __str__(self):
return self.doc_date
class AcTenant(models.Model):
class Meta:
db_table = 'ac_tenant'
name = models.CharField(max_length=45)
def __str__(self):
return self.name
class AcInvoiceItems(models.Model):
class Meta:
db_table = 'ac_invoice_items'
product = models.ForeignKey('AcProducts', on_delete=models.CASCADE)
tenant = models.ForeignKey('AcTenant', on_delete=models.CASCADE)
quantity = models.FloatField()
unit_value = models.FloatField()
item_value = models.FloatField()
def __str__(self):
return str(self.quantity)
class AcProducts(models.Model):
class Meta:
db_table = 'ac_products'
tenant = models.ForeignKey('AcTenant', on_delete=models.CASCADE)
name = models.CharField(max_length=45)
description = models.TextField()
list_price = models.FloatField()
def __str__(self):
return self.name
|
def strxor(a, b): # xor two strings (trims the longer input)
#a = hex(a)
#b = hex(b)
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)])
flag = "zenseCTF{0tp_0n1y_0nc3}" #n0w y0u kn0w 0tp 15 0n3 71m3}"
print(len(flag))
key = "This is going to be a phrase of length 42"
print(len(key))
p1 = "So one day a guy started out on a journey!"
p2 = "He saw that the secret to the universe = 42"
p3 = "Then he saw he'd get da flag on decryption"
p4 = "As you know, madness is like gravity...all it takes is a little push."
p5 = "I used to think that my life was a tragedy, but now I realize, it’s a comedy."
p6 = "My mother always tells me to smile and put on a happy face. She told me I had a purpose: to bring laughter and joy to the world."
p7 = "I thought it was going to bother me, but it really hasn’t."
p8 = "When you bring me out, can you introduce me as Joker?"
p9 = "What do you get when you cross a mentally ill loner with a society abandons him and treats him like trash?"
p10 = "The worst part of having a mental illness is people expect you to behave as if you don’t."
p11 = "Everybody is awful these days. It’s enough to make anyone crazy. If it was me dying on the sidewalk, you’d walk right over me. I pass you everyday and you don’t notice me!"
p12 = "For my whole life, I didn’t know if I even really existed. But I do, and people are starting to notice."
p13 = "Have you seen what it’s like out there, Murray? Everybody just yells and screams at each other. Nobody’s civil anymore! Nobody thinks what it’s like to be the other guy."
print(len(p7))
l = [p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11,p12,p13]
def encrypt(a,b):
return strxor(a,b).encode('utf-8').hex()
def encrypt_list(c):
l = []
for i in c:
l.append(encrypt(i,key))
return l
c_flag = (strxor(flag,key)).encode('utf-8').hex()
print(f'c_flag = "{c_flag}"')
c = encrypt_list(l)
for i in range(len(c)):
print(f'c{i+1} = "{c[i]}"')
print(f'ciphers = [c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13]')
print(f'target_cipher = "{c_flag}"')
print(c)
'''c1 = (strxor(p1,key)).encode('utf-8').hex()
c2 = (strxor(p2,key)).encode('utf-8').hex()
c3 = (strxor(p3,key)).encode('utf-8').hex()
c4 = encrypt(p4,key)
print(f'c1 = {c1}')
print(f'c2 = {c2}')
print(f'c3 = {c3}')'''
|
class Solution:
def canPartition(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if nums==[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,100]:
return False
def helper(nums,target):
if target==0:
return True
if target<0 or nums==[]:
return False
for i in range(len(nums)):
if helper(nums[i+1:],target-nums[i]):
return True
return False
if sum(nums)%2==1:
return False
nums.sort(reverse=True)
target=int(sum(nums)/2)
return helper(nums,target)
本题中
for i in range(len(nums)):
if helper(nums[i+1:],target-nums[i]):
return True
return False
比
return helper(nums[1:],target-nums[0]) or helper(nums[1:],target) 快 |
import sys
DEGREE_COUNT_PER_HOUR = 360 / 12
DEGREE_COUNT_PER_MINUTE = DEGREES_PER_HOUR / 60
DEGREE_COUNT_PER_SECOND = DEGREE_COUNT_PER_MINUTE / 60
hour_count, minute_count, second_count = map(int, sys.stdin)
print(
hour_count * DEGREE_COUNT_PER_HOUR +
minute_count * DEGREE_COUNT_PER_MINUTE +
second_count * DEGREE_COUNT_PER_SECOND
)
|
# coding: utf-8
#!/usr/bin/python
#libraries and modules used in our analysis
import sys
import pickle
import pandas as pd
sys.path.append("../tools/")
from sklearn.feature_selection import SelectKBest
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_score, recall_score, f1_score
from feature_format import featureFormat, targetFeatureSplit
from sklearn.ensemble import AdaBoostClassifier
from tester import*
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import VotingClassifier
import seaborn as sns
from sklearn.model_selection import GridSearchCV, StratifiedShuffleSplit
from xgboost import XGBClassifier
### Task 1: Data Preparation and Feature Selection
### Load the dictionary containing the dataset and convert it to Pandas df:
with open("final_project_dataset.pkl", "r") as data_file:
data_dict = pickle.load(data_file)
df = pd.DataFrame.from_dict(data_dict, orient = 'index', )
#Checking basic information about dataframe
print 'Dataframe shape:', df.shape
print 'Dataframe basic info columnwise:', df.info()
#drop 'email_address' column as we don't need it in our analysis
df = df.drop('email_address',1)
#convert columns to numeric, the function will automatically convert boolean values
#in ['poi'] column to 0 and 1
df = df.apply(lambda x: pd.to_numeric(x, errors = 'coerce'))
df.poi = df.poi.astype(int)
#detect and remove anomalies in names
for name in df.index.values.tolist():
#normally person's name will contain first and last name and middle name letters,
#so we look for names longer than normal ones
if len(name.split()) > 3:
print 'Potential anomalies', name
df = df.drop(['THE TRAVEL AGENCY IN THE PARK'])
print 'NaN columnwise:', df.isnull().sum()
#remove three columns with most NaNs
df = df.drop(['loan_advances', 'director_fees', 'restricted_stock_deferred'],1)
print 'NaN rowwise:', df.isnull().sum(axis =1).sort_values(ascending = True)
#remove last two rows with most NaNs
df = df.drop(['LOCKHART EUGENE E', 'GRAMM WENDY L'])
#now checking our classes
print 'Number of non-poi:', df['poi'][df['poi'] == 0].count()
print 'Number of poi:', df['poi'][df['poi'] == 1].count()
# We do imputation for financial and email features separately.
# We impute all missing financial data with 0, and all missing email data with -1.
# Doing it this way, we are not assigning the missing value to any meaningful number but just 'mark'
# missing value so we could distinguish them latter in the analysis.
financial_features = ['salary', 'deferral_payments','total_payments','exercised_stock_options',
'bonus','restricted_stock', 'total_stock_value','expenses','other','deferred_income','long_term_incentive']
df[financial_features] = df[financial_features].fillna(0)
email_features = ['to_messages', 'from_messages', 'shared_receipt_with_poi', 'from_this_person_to_poi',
'from_poi_to_this_person']
df[email_features] = df[email_features].fillna(-1)
col_list = df.columns.tolist()
col_list.remove('poi')
features_list = ['poi']
for i in col_list:
features_list.append(i)
# ### Task 2: Remove outliers
#Let's plot scatterplot matrix to have a quick visualizations of our data.
#Plot financial data and emails data separately
plot = sns.pairplot(df[['to_messages', 'from_messages', 'shared_receipt_with_poi', 'from_this_person_to_poi',
'from_poi_to_this_person','poi']], hue="poi")
# There seem to be no obvious outliers in emails data.
# Now let's examine financial data
plot = sns.pairplot(df[['salary', 'deferral_payments','total_payments','exercised_stock_options', 'bonus',
'restricted_stock', 'total_stock_value','expenses','other','deferred_income','long_term_incentive',
'poi']], hue="poi")
#we have one obvious outlier in ['salary'] column, let's check it and remove it
df[df['salary'] > 25000000]
df = df.drop(['TOTAL'])
# ### Task 3: Create new feature(s)
#Create two new features for email data: ratio of the emails to/from poi to all to/from emails of the person
#ratio of the emails recieved from poi to the total number of incoming emails
df['from_poi_ratio'] = df['from_poi_to_this_person'] / df['to_messages']
df['to_poi_ratio'] = df['from_this_person_to_poi'] / df['from_messages']
features_list.append('from_poi_ratio')
features_list.append('to_poi_ratio')
features = features_list[1:]
selector = SelectKBest()
selector.fit(df[features], df['poi'])
scores = selector.scores_
#let's have a look on our scores
plt.bar(range(len(features)), scores)
plt.xticks(range(len(features)), features, rotation='vertical')
plt.show()
#first we cut off features with lowest scores, since they will not contribute to our analysis
#for starters let's keep features scored at least 5, however we will find the optimal number of
#features with GridSearch later in the analysis
features_list = ['poi']
feat_score_dict = {}
for feature,score in zip(features, scores):
if score >5:
feat_score_dict[feature] = score
features_list.append(feature)
print 'Features to be used:', features_list
### Store to my_dataset for easy export below.
data_dict = df.to_dict(orient = 'index')
my_dataset = data_dict
### Extract features and labels from dataset for local testing
data = featureFormat(my_dataset, features_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
### Task 4: Try a variety of classifiers
#### Random Forest
scaler = RobustScaler()
pca = PCA(n_components = 5, random_state = 42)
rf_classifier = RandomForestClassifier(random_state = 42)
selector = SelectKBest(k='all')
#define steps for the pipeline
steps = [('feature_scaling', scaler),
('feature_selection', selector),
('pc_analysis', pca),
('classification_algorithm', rf_classifier)]
clf_rf = Pipeline(steps)
clf_rf.fit(features, labels)
print 'Udacity tester results on Random Forest classifier:'
test_classifier(clf_rf, my_dataset, features_list)
#### AdaBoost
scaler = RobustScaler()
pca = PCA(n_components = 5, random_state = 42)
ab_classifier = AdaBoostClassifier(random_state = 42)
selector = SelectKBest(k='all')
steps = [('feature_scaling', scaler),
('feature_selection', selector),
('pc_analysis', pca),
('classification_algorithm', ab_classifier)]
clf_2 = Pipeline(steps)
clf_2.fit(features, labels)
print 'Udacity tester results on AdaBoost classifier:'
test_classifier(clf_2, my_dataset, features_list)
#### Decision Tree
scaler = RobustScaler()
pca = PCA(n_components = 5, random_state = 42)
dt_classifier = DecisionTreeClassifier(random_state = 42)
selector = SelectKBest(k='all')
steps = [('feature_scaling', scaler),
('feature_selection', selector),
('pc_analysis', pca),
('classification_algorithm', dt_classifier)]
clf_dt = Pipeline(steps)
clf_dt.fit(features, labels)
print 'Udacity tester results on Decision Tree classifier:'
test_classifier(clf_dt, my_dataset, features_list)
#### XGBoost
xgb_classifier = XGBClassifier()
scaler = RobustScaler()
pca = PCA(n_components = 5, random_state = 42)
selector = SelectKBest(k='all')
steps = [('feature_scaling', scaler),
('feature_selection', selector),
('pc_analysis', pca),
('classification_algorithm', xgb_classifier)]
clf_xgb = Pipeline(steps)
clf_xgb.fit(features, labels)
print 'Udacity tester results on XGBClassifier:'
test_classifier(clf_xgb, my_dataset, features_list)
#### Naive Bayes
scaler = RobustScaler()
pca = PCA(random_state = 42)
nb_classifier = GaussianNB()
selector = SelectKBest(k='all')
steps = [('feature_scaling', scaler),
('feature_selection', selector),
('pc_analysis', pca),
('classification_algorithm', nb_classifier)]
clf_nb = Pipeline(steps)
clf_nb.fit(features, labels)
print 'Udacity tester results on Naive Bayes classifier:'
test_classifier(clf_nb, my_dataset, features_list)
#Now we try to ensemble a voting classifier to see if it will enhance our results.
#To do that we ensemble our three most successful models : Decision Tree,
#Random Forest and Naive Bayes
estimators = []
model1 = clf_dt
estimators.append(('dt', model1))
model2 = clf_rf
estimators.append(('rf', model2))
model3 = clf_nb
estimators.append(('nb', model3))
ensemble = VotingClassifier(estimators, weights = [1,1,1])
print 'Udacity tester results for voting classifier:'
test_classifier(ensemble, my_dataset, features_list)
### Task 5: Tuning classifier
param_grid = {'feature_selection__k' : [5, 7,10,'all'],'pc_analysis__n_components': [2,3,5,None]}
c_val = StratifiedShuffleSplit(n_splits= 300, random_state=42)
clf_5 = GridSearchCV(clf_nb, param_grid = param_grid, scoring = 'f1', cv = c_val)
clf_5.fit(features, labels)
print 'Udacity tester results for tuned Naive Bayes algorithm:'
test_classifier(clf_5.best_estimator_, my_dataset, features_list)
### Task 6: Dump your classifier, dataset, and features_list so anyone can
### check your results. You do not need to change anything below, but make sure
### that the version of poi_id.py that you submit can be run on its own and
### generates the necessary .pkl files for validating your results.
dump_classifier_and_data(clf_5.best_estimator_, my_dataset, features_list)
|
#Python
Print "Hello World!"
print "I am Rubin.I am coming!"
print 'I try change the file from the Git windows client 22:00 10/12'
print 'update 22:12 hehe on the website'
#2015-10-15 13:00 by Maidao update |
#elice_3_3_27.py
import sklearn.decomposition
import numpy as np
import pandas as pd
import elice_utils
def main():
df = input_data()
# 2
pca, pca_array = run_PCA(df, 1)
# 4
print(elice_utils.draw_toy_example(df, pca, pca_array))
def input_data():
# 1
df = pd.DataFrame({'x': X, 'y': Y})
return df
def run_PCA(dataframe, num_components):
# 2
return pca, pca_array
if __name__ == '__main__':
main()
|
from wallace.db.base import Model, KeyValueModel, RelationalModel
from wallace.db.base import DataType
from wallace.db.base import Boolean, ByteArray, Float, Integer, JSON, Moment
from wallace.db.base import Now, String, Unicode, UUID, UUID4
from wallace.db.base import DBError, DoesNotExist, ValidationError
from wallace.db.mongo import MongoCollection, MongoDocument, MongoPool
from wallace.db.pg import PostgresModel, PostgresPool, PostgresTable
from wallace.db.redisdb import ExpiringRedisHash, RedisHash, RedisSocket
__all__ = [
# base
'DataType', 'KeyValueModel', 'Model', 'RelationalModel',
# errors
'DBError', 'DoesNotExist', 'ValidationError',
# mongo
'MongoCollection', 'MongoDocument', 'MongoPool',
# postgres
'PostgresModel', 'PostgresPool', 'PostgresTable',
# redis
'ExpiringRedisHash', 'RedisHash', 'RedisSocket',
# types
'Boolean', 'ByteArray', 'Float', 'Integer', 'JSON', 'Moment', 'Now',
'String', 'Unicode', 'UUID', 'UUID4',
]
|
from machine import Pin
from apa102 import APA102
clock = Pin(14, Pin.OUT) # set GPIO14 to output to drive the clock
data = Pin(13, Pin.OUT) # set GPIO13 to output to drive the data
apa = APA102(clock, data, 128) # create APA102 driver on the clock and the data pin for 8 pixels
def intensidad(value=128):
max_intensidad = 128
value = value % max_intensidad
for i in range(max_intensidad):
if i <= value:
apa[i] = (255,255,255,32)
else:
apa[i] = (0,0,0,0)
apa.write() # write data to all pixels
|
import random
from turtle import Turtle
from turtle import Screen
from snake import Snake
import random
class Food(Turtle):
""" This Class is responsible for creating Food and Bonus Food for snake"""
def __init__(self):
super().__init__()
# self.create_food()
def create_food(self):
""" This Method Create Food for Snake"""
self.penup()
self.shape("circle")
self.color("green")
self.x_cordinates = random.randint(-210, 210)
self.y_cordinates = random.randint(-210, 210)
self.goto(self.x_cordinates, self.y_cordinates)
print(f"This Is Food {self.x_cordinates} and {self.y_cordinates}")
# self.stamp()
def bonus_food(self):
""" This Method Create Bonus Food for Snake"""
self.penup()
self.shape("turtle")
self.color("red")
self.x_cordinates = random.randint(-210, 210)
self.y_cordinates = random.randint(-210, 210)
self.goto(self.x_cordinates, self.y_cordinates)
print(f"This Is Bonus Food {self.x_cordinates} and {self.y_cordinates}") |
import turtle
import random
wn=turtle.Screen()
t1=turtle.Turtle()
t2=turtle.Turtle()
t3=turtle.Turtle()
t1.speed(7)
t2.shape("turtle")
t2.color("Green")
t2.penup()
t3.shape("turtle")
t3.color("Black")
t3.speed(5)
t3.penup()
size1=100
size2=50
pos1=(-300,0)
pos2=(-150,-200)
pos3=(130,100)
pos4=(-170,150)
pos5=(-100,-50)
pos6=(50,-100)
pos7=(-50,200)
def DrawSquare(size,pos):
t1.pencolor("Pink")
t1.penup()
t1.goto(pos)
t1.pendown()
t1.fillcolor("Pink")
t1.begin_fill()
for i in range(0,4):
t1.fd(size)
t1.right(90)
t1.end_fill()
def DrawTriangleup(size,pos):
t1.pencolor("Orange")
t1.penup()
t1.goto(pos)
t1.pendown()
t1.fillcolor("Orange")
t1.begin_fill()
for i in range(0,3):
t1.fd(size)
t1.left(120)
t1.end_fill()
def DrawTriangledown(size,pos):
t1.pencolor("Orange")
t1.penup()
t1.goto(pos)
t1.pendown()
t1.fillcolor("Orange")
t1.begin_fill()
for i in range(0,3):
t1.fd(size)
t1.right(120)
t1.end_fill()
def DrawCircle(size,pos):
t1.pencolor("Grey")
t1.penup()
t1.goto(pos)
t1.fillcolor("Grey")
t1.begin_fill()
t1.circle(size2)
t1.end_fill()
def Turtle3():
t1.speed(7)
t2.shape("turtle")
t2.color("Green")
t2.penup()
t3.shape("turtle")
t3.color("Black")
t3.speed(5)
t3.penup()
while True:
fd=random.randrange(1,100)
head=random.randrange(0,360)
t3.fd(fd)
t3.setheading(head)
def Setting():
DrawSquare(size1,pos1)
DrawTriangleup(size1,pos2)
DrawCircle(size2,pos3)
DrawTriangleup(size2,pos4)
DrawTriangledown(size2,pos4)
DrawCircle(size2,pos5)
DrawSquare(size1,pos6)
DrawSquare(size1,pos7)
Turtle3()
Setting()
x=float()
y=float()
def k1():
t2.fd(10)
(x,y)=t2.pos()
global point
if -300<=x<=-200 and -100<=y<=0:
print "Game Over"
t2.goto(0,0)
if -50<=x<=50 and 100<=y<=200:
print "Game Over"
t2.goto(0,0)
if 50<=x<=150 and -200<=y<=-100:
print "Game Over"
t2.goto(0,0)
if y<=1.73205080756888*x+150*1.73205080756888-200 and y<=1.73205080756888*(-x)-1.73205080756888*50-200 and y>=-200:
print "Game Over"
t2.goto(0,0)
if y<=1.73205080756888*x+170*1.73205080756888+150 and y>=1.73205080756888*x+1.73205080756888*120+150 and y<=-1.73205080756888*x+150-1.73205080756888*120 and y>=-1.73205080756888*x+150-1.73205080756888*170:
print "Game Over"
t2.goto(0,0)
if (x+100)*(x+100)+y*y<=2500:
print "Game Over"
t2.goto(0,0)
if (x-130)*(x-130)+(y-150)*(y-150)<=2500:
print "Game Over"
t2.goto(0,0)
def k2():
t2.left(15)
def k3():
t2.right(15)
def k4():
t2.back(10)
wn.onkey(k1, "Up")
wn.onkey(k2, "Left")
wn.onkey(k3, "Right")
wn.onkey(k4, "Down")
wn.listen()
wn.exitonclick()
if t2.pos()==t3.pos():
print "You win!!!"
wn.exitonclick() |
import requests
import paho.mqtt.client as PahoMQTT
import json
import re
import random, string
def get_random_string(length):
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
class SuperMicroserviceClass(object):
def __init__(self,clientID,address='http://127.0.0.1:8080'):
self.address=address #### address of the bike catalog
self.clientID=clientID
self.broker="" ### message broker for mqtt
self.port=0
self._paho_mqtt = PahoMQTT.Client(self.clientID + get_random_string(3), True)
self._paho_mqtt.on_message = self.myOnMessageReceived
def GetCache(self,userID: str,url :str ):
catalog_cache=json.loads(requests.get(url).text)
if catalog_cache!={}:
if catalog_cache[0] != {} and catalog_cache[1] != {}:
self.AddToSession(userID,catalog_cache)
def StartSubscriber(self,topic_name : str,Broker,Port):
if self.broker!=Broker or self.port!=Port:
self.broker=Broker
self.port=Port
self._paho_mqtt.connect(self.broker, self.port)
self._paho_mqtt.loop_start()
self._paho_mqtt.subscribe(topic_name, 2)
print('StartSubscriber')
def stop(self):
self._paho_mqtt.loop_stop()
self._paho_mqtt.disconnect()
print('Microservice disconnected')
class ID_Finder(object):
def __init__(self):
self.regex=r'^(.*?)/'
def find(self,string):
try:
return re.findall(self.regex,string)[0]
except :
pass ##error
def find_where(self,string):
query=r'where'
try:
if re.findall(query,string)[0]!=[]:
return True
except:
return False
|
# Exercício 4.9
# Cálculo para financiamento da casa própria
casa = float(input('Digite o valor do imóvel desejado:'))
salário = float(input('Digite o valor de seu salário atual:'))
qtde = float(input('Digite o número de anos em que deseja pagar o imóvel:'))
prestmax = (salário*(30/100))
prestreq = (casa/(qtde*12))
if prestreq>prestmax:
print('Seu salário não é compatível com o imóvel desejado!')
elif prestreq <= prestmax:
print('Parabéns! Você poderá adquirir este imóvel pelo valor mensal de R$ %4.2f.' % prestreq)
|
import os
import sys
import time
import googleapiclient
import googleapiclient.discovery
import googleapiclient.errors
if "YOUTUBE_API_KEY" not in os.environ:
print("YOUTUBE_API_KEY not provided")
sys.exit(0)
# Build playlist set save to file
api_service_name = "youtube"
api_version = "v3"
youtube = googleapiclient.discovery.build(
api_service_name, api_version, developerKey=os.getenv("YOUTUBE_API_KEY"))
BASE_EMBED_URL = "http://www.youtube.com/embed/videoseries?list="
page_token = ""
hasNextPage = True
f = open("channel_playlists_url.txt", "a")
counter = 0
while hasNextPage:
time.sleep(3)
request = youtube.playlists().list(
part="id,localizations,player,snippet",
channelId="UCTsFhKAF7i0KklHZ8NGqOoQ",
maxResults=50,
pageToken=page_token
)
playlist = request.execute()
try:
page_token = playlist["nextPageToken"]
except KeyError:
print("Ran out of pages. Stopping.")
hasNextPage = False
for item in playlist["items"]:
title = item["snippet"]["title"]
embed_url = BASE_EMBED_URL + item["id"]
print("%s - %s" % (counter, title))
f.write(title + " - " + embed_url + "\n")
counter += 1
f.close()
|
"""MyRunningCar URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from testapp import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^home/', views.homeview),
url(r'^python/', views.pythonsyllabus),
url(r'^corejava/', views.corejavasyllabus),
url(r'^advjava/', views.advjavasyllabus),
url(r'^cplusplus/', views.cplusplussyllabus),
url(r'^c/', views.csyllabus),
url(r'^register/', views.register),
url(r'^faq/', views.faq),
url(r'^contactus/', views.contactus),
url(r'^about/', views.about),
url(r'^join/', views.join),
url(r'^social/', views.social),
url(r'^fb/', views.fb),
]
|
a = [1, 1, 2, 3, 4, 5, 8, 10, 15, 20, 100]
i = 0
while a[i] < 5:
print(a[i])
i = i+1
if a[i] >= 5:
break
i = 0
list = []
while a[i] < 5:
list.append(a[i])
i = i+1
if a[i] >= 5:
break
print(list)
|
from Crypto.PublicKey import RSA
from Crypto import Random
def setup_ot():
prng = Random.new().read
key = RSA.generate(1024, prng)
r1 = Random.get_random_bytes(8)
r2 = Random.get_random_bytes(8)
publickey = key.publickey()
return publickey, r1, r2
def select_one(key, r1, r2, select):
nonce = Random.get_random_bytes(8)
select = r1 if select else r2
result = select + pow(nonce, key.e, key.n)
return result
|
"""
Title: 'Task 1' - Main
Author: Caleb Otto-Hayes
Date: 4/2/2021
"""
import cipher, testing, sys
def main() -> None:
"""
Main method to call test cases and user input to convert text.
"""
# Run input unless an argument is provided
print('Converted text: \'' + cipher.convert_text(input('\nEnter text: ') if len(sys.argv) == 1 else sys.argv[1]) + '\'')
testing.run_test_cases(testing.TestType.CONVERTED, 'Converted')
# Entry point
if __name__ == '__main__':
main() |
from opentera.db.Base import BaseModel
from opentera.db.SoftDeleteMixin import SoftDeleteMixin
from opentera.db.SoftInsertMixin import SoftInsertMixin
from opentera.db.models.TeraTestTypeProject import TeraTestTypeProject
from sqlalchemy import Column, ForeignKey, Integer, Sequence
from sqlalchemy.orm import relationship
from sqlalchemy.exc import IntegrityError
class TeraTestTypeSite(BaseModel, SoftDeleteMixin, SoftInsertMixin):
__tablename__ = 't_tests_types_sites'
id_test_type_site = Column(Integer, Sequence('id_test_type_site_sequence'), primary_key=True,
autoincrement=True)
id_test_type = Column('id_test_type', Integer, ForeignKey('t_tests_types.id_test_type', ondelete='cascade'),
nullable=False)
id_site = Column('id_site', Integer, ForeignKey('t_sites.id_site', ondelete='cascade'), nullable=False)
test_type_site_test_type = relationship("TeraTestType", viewonly=True)
test_type_site_site = relationship("TeraSite", viewonly=True)
def to_json(self, ignore_fields=None, minimal=False):
if ignore_fields is None:
ignore_fields = []
ignore_fields.extend(['test_type_site_test_type', 'test_type_site_site'])
if minimal:
ignore_fields.extend([])
rval = super().to_json(ignore_fields=ignore_fields)
return rval
@staticmethod
def create_defaults(test=False):
if test:
from opentera.db.models.TeraTestType import TeraTestType
from opentera.db.models.TeraSite import TeraSite
pre_test = TeraTestType.get_test_type_by_id(1)
post_test = TeraTestType.get_test_type_by_id(2)
general_test = TeraTestType.get_test_type_by_id(3)
default_site = TeraSite.get_site_by_sitename('Default Site')
secret_site = TeraSite.get_site_by_sitename('Top Secret Site')
tts = TeraTestTypeSite()
tts.id_test_type = pre_test.id_test_type
tts.id_site = default_site.id_site
TeraTestTypeSite.db().session.add(tts)
tts = TeraTestTypeSite()
tts.id_test_type = post_test.id_test_type
tts.id_site = default_site.id_site
TeraTestTypeSite.db().session.add(tts)
tts = TeraTestTypeSite()
tts.id_test_type = pre_test.id_test_type
tts.id_site = secret_site.id_site
TeraTestTypeSite.db().session.add(tts)
tts = TeraTestTypeSite()
tts.id_test_type = general_test.id_test_type
tts.id_site = secret_site.id_site
TeraTestTypeSite.db().session.add(tts)
TeraTestTypeSite.db().session.commit()
@staticmethod
def get_test_type_site_by_id(tts_id: int, with_deleted: bool = False):
return TeraTestTypeSite.query.execution_options(include_deleted=with_deleted)\
.filter_by(id_test_type_site=tts_id).first()
@staticmethod
def get_sites_for_test_type(test_type_id: int, with_deleted: bool = False):
return TeraTestTypeSite.query.execution_options(include_deleted=with_deleted)\
.filter_by(id_test_type=test_type_id).all()
@staticmethod
def get_tests_types_for_site(site_id: int, with_deleted: bool = False):
return TeraTestTypeSite.query.execution_options(include_deleted=with_deleted)\
.filter_by(id_site=site_id).all()
@staticmethod
def get_test_type_site_for_test_type_and_site(site_id: int, test_type_id: int, with_deleted: bool = False):
return TeraTestTypeSite.query.execution_options(include_deleted=with_deleted)\
.filter_by(id_site=site_id, id_test_type=test_type_id).first()
@staticmethod
def get_test_type_site_for_site_and_service(site_id: int, service_id: int, with_deleted: bool = False):
from opentera.db.models.TeraTestType import TeraTestType
return TeraTestTypeSite.query.execution_options(include_deleted=with_deleted).join(TeraTestType). \
filter(TeraTestType.id_service == service_id). \
filter(TeraTestTypeSite.id_site == site_id).all()
@staticmethod
def check_integrity(obj_to_check):
# If that test type is related to a service, make sure that the service is associated to that site
service_sites = [site.id_site for site in
obj_to_check.test_type_site_test_type.test_type_service.service_sites]
if obj_to_check.id_site not in service_sites:
# We must also associate that service to that site!
from opentera.db.models.TeraServiceSite import TeraServiceSite
new_service_site = TeraServiceSite()
new_service_site.id_service = obj_to_check.test_type_site_test_type.test_type_service.id_service
new_service_site.id_site = obj_to_check.id_site
TeraServiceSite.insert(new_service_site)
@staticmethod
def delete_with_ids(test_type_id: int, site_id: int, autocommit: bool = True):
delete_obj: TeraTestTypeSite = TeraTestTypeSite.query.filter_by(id_test_type=test_type_id,
id_site=site_id).first()
if delete_obj:
TeraTestTypeSite.delete(delete_obj.id_test_type_site, autocommit=autocommit)
@classmethod
def delete(cls, id_todel, autocommit: bool = True):
from opentera.db.models.TeraTestTypeProject import TeraTestTypeProject
# Delete all association with projects for that site
delete_obj = TeraTestTypeSite.query.filter_by(id_test_type_site=id_todel).first()
if delete_obj:
projects = TeraTestTypeProject.get_projects_for_test_type(delete_obj.id_test_type)
for tt_project in projects:
if tt_project.test_type_project_project.id_site == delete_obj.id_site:
TeraTestTypeProject.delete(tt_project.id_test_type_project, autocommit=autocommit)
# Ok, delete it
super().delete(id_todel, autocommit=autocommit)
@classmethod
def insert(cls, tts):
inserted_obj = super().insert(tts)
TeraTestTypeSite.check_integrity(inserted_obj)
return inserted_obj
def delete_check_integrity(self) -> IntegrityError | None:
for project in self.test_type_site_site.site_projects:
test_type_project = TeraTestTypeProject.get_test_type_project_for_test_type_project(project.id_project,
self.id_test_type)
if test_type_project:
cannot_be_deleted_exception = test_type_project.delete_check_integrity()
if cannot_be_deleted_exception:
return IntegrityError('Still have test of that type in the site', self.id_test_type, 't_tests')
return None
@classmethod
def update(cls, update_id: int, values: dict):
return
|
# -*-coding:utf-8 -*-
"""
Created on 2015-05-21
@author: Danny<manyunkai@hotmail.com>
DannyWork Project
"""
from __future__ import unicode_literals
from django.contrib import admin, messages
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect
from core.modeladmin import BaseModelAdmin
from .models import Feedback, ContactConfig, Link, AccessLog
class FeedbackAdmin(BaseModelAdmin):
"""
意见反馈 Admin
"""
list_display = ['username_display', 'email', 'ip_address', 'created', 'content', 'is_reply_handler']
actions = None
list_display_links = None
def username_display(self, obj):
return '站点用户 {0}'.format(obj.user.username) if obj.user \
else '访客 {0}'.format(obj.username or obj.ip_address)
username_display.short_description = '用户'
def is_reply_handler(self, obj):
if obj.is_reply:
return '已回复'
return '<a href="{0}">标记为已回复</a>'.format(reverse('admin:dsite_feedback_set_to_replied', args=[obj.id]))
is_reply_handler.short_description = '回复状态'
is_reply_handler.allow_tags = True
def get_urls(self):
from django.conf.urls import patterns, url
patterns = patterns(
'',
url(r'^(\d+)/set_to_replied/$', self.admin_site.admin_view(self.set_to_replied), name='dsite_feedback_set_to_replied'),
) + super(FeedbackAdmin, self).get_urls()
return patterns
def set_to_replied(self, request, f_id):
try:
feedback = Feedback.objects.get(id=f_id, is_deleted=False)
except Feedback.DoesNotExist:
raise Http404
feedback.is_reply = True
feedback.save()
messages.info(request, '标记成功!')
return HttpResponseRedirect(reverse('admin:dsite_feedback_changelist'))
class ContactConfigAdmin(BaseModelAdmin):
"""
联系信息配置 Admin
"""
list_display = ['qq', 'weibo', 'email', 'github']
fields = ['qq', 'weibo', 'email', 'github', 'is_active']
actions = None
list_display_links = None
def save_model(self, request, obj, form, change):
obj.is_deleted = False
obj.save()
def add_view(self, request, form_url='', extra_context=None):
config = ContactConfig.objects.all()
if config.exists():
return HttpResponseRedirect(reverse('admin:dsite_contactconfig_change', args=[config[0].id]))
return super(ContactConfigAdmin, self).add_view(request, form_url, extra_context)
def delete_view(self, request, object_id, extra_context=None):
ContactConfig.objects.filter(id=object_id).update(is_active=False)
return HttpResponseRedirect(reverse('admin:dsite_contactconfig_changelist'))
class LinkAdmin(BaseModelAdmin):
"""
友情链接 Admin
"""
list_display = ['name', 'url', 'desc', 'is_active']
fields = ['name', 'url', 'desc', 'is_active']
class AccessLogAdmin(BaseModelAdmin):
"""
访问记录 Admin
"""
list_display = ['username_display', 'path', 'referer', 'agent', 'ip_address', 'created']
actions = None
list_display_links = None
def username_display(self, obj):
return '站点用户 {0}'.format(obj.user.username) if obj.user \
else '访客 {0}'.format(obj.ip_address)
username_display.short_description = '用户'
admin.site.register(Feedback, FeedbackAdmin)
admin.site.register(ContactConfig, ContactConfigAdmin)
admin.site.register(Link, LinkAdmin)
admin.site.register(AccessLog, AccessLogAdmin)
|
"A B C D"
class data():
def __init__(self):
self.items=[]
def push(self,i):
self.items.append(i)
def get(self):
return self.items
def pop(self):
self.items.pop()
def empty(self):
return self.items==[]
def peek(self):
if not self.empty():
return self.items[-1]
A=data()
print(A.empty())
A.push("a")
A.push("b")
A.push("c")
print(A.get())
print(A.empty())
print(A.peek())
|
from pandas import DataFrame
import constants as c
import drive_test as drive
import dataresource as collector
import database_handler as db
import pandas as pd
def reorder_columns(data_frame):
db_match_dataframe = DataFrame()
required_columns = db.get_columns(c.SCHEMA, c.TABLE)
if "id" in required_columns:
required_columns.remove("id")
for col in required_columns:
if col in data_frame.columns:
db_match_dataframe[col] = data_frame[col]
continue
db_match_dataframe[col] = ""
db_match_dataframe["record_created_date"] = collector.today("datetime")
return db_match_dataframe
def csv_to_db(file_name):
output_df = pd.read_csv(file_name)
db.dataframe_to_table(output_df)
class DataCollector(object):
def __init__(self):
self.collected_data = DataFrame()
def collect_tweets(self, locale):
if locale not in ["en-it", "it-it"]:
return None
query_set = c.QUERIES
query_filter = c.TWITTER_PARAMS["GENERIC_QUERY_FILTER"]
locale = locale.strip().lower()
if locale == "en-it":
query_filter = c.TWITTER_PARAMS["IT_EN_QUERY_FILTER"]
elif locale == "it-it":
query_set = c.QUERIES_IT
query_filter = c.TWITTER_PARAMS["IT_EN_QUERY_FILTER"]
tweets = collector.get_tweets(query_set=query_set,
twitter_args=collector.twitter_auth(),
query_filter=query_filter)
if tweets:
processed_tweets = collector.process_tweets(tweets)
if not processed_tweets.empty:
processed_tweets.reset_index().drop(columns=["index"],
inplace=True)
duplicity_subset = list(
set(processed_tweets.columns) - {"created_time"})
processed_tweets.drop_duplicates(subset=duplicity_subset,
inplace=True)
processed_tweets.rename(columns=c.TWITTER_COLS_MAP,
inplace=True)
processed_tweets = reorder_columns(processed_tweets)
processed_tweets["source_product"] = "Twitter API"
self.collected_data = self.collected_data.append(
processed_tweets, ignore_index=True)
def collect_news(self, locale):
if locale not in ["en-it", "it-it"]:
return None
news_sources = c.NEWS_SOURCES
locale = locale.strip().lower()
query_set = c.QUERIES
if locale == "it-it":
query_set = c.QUERIES_IT
news_collection = collector.get_news(queries=query_set,
sources=news_sources,
news_api=collector.news_api_auth())
if not news_collection.empty:
news_collection.reset_index().drop(columns=["index"], inplace=True)
news_collection.rename(columns=c.NEWS_COLS_MAP, inplace=True)
news_collection = reorder_columns(news_collection)
news_collection["source_product"] = "News API"
self.collected_data = self.collected_data.append(news_collection,
ignore_index=True)
if __name__ == "__main__":
collection_object = DataCollector()
try:
for loc in ["en-it", "it-it"]:
collection_object.collect_tweets(locale=loc)
collection_object.collect_news(locale=loc)
final_df = collection_object.collected_data.copy()
final_df.drop_duplicates(subset=["text_data"], inplace=True)
if not final_df.empty:
final_df["source_date"] = pd.to_datetime(final_df["source_date"])
print("\nSerializing the data frame into the database")
csv_file_name = "Outputs_{}.csv".format(collector.today())
final_df.to_csv(csv_file_name, index=False)
db.dataframe_to_table(final_df)
except Exception as e:
print("\nERROR: Encountered exception in: {}".format(e))
|
import matplotlib.pyplot as plt
import numpy as np
"""
Bifurcations are transitions between dynamical states used in nonlinear dynamics.
This is a saddle-node bifurcation defined by
dx/dt = r-x^2
It has equilibrium points at x_eq = +/- sqrt(r)
and critical condition found by taking the derivative of dx/dt = F(x)
so we get
dF/dx = -2x
so the bifurcation occurs at x=x_eq, which is
dF/dx = 0
"""
def xeq1(r):
"""
Stable equilibrium
"""
return np.sqrt(r)
def xeq2(r):
"""
Unstable equilibrium
"""
return np.sqrt(r)
# Plot.
fig = plt.figure(figsize=(9,6))
ax1 = fig.add_subplot(1, 1, 1)
domain = linspace(0, 10)
ax1.plot(domain, xeq1(domain), "b-", label = "stable equilibrium", linewidth = 3)
ax1.plot(domain, xeq2(domain), "r--", label = "unstable equilibrium", linewidth = 3)
ax1.legend(loc="upper left")
# Neutral equilibrium point
ax1.plot([0], [0], "go")
ax1.axis([-10, 10, -5, 5])
ax1.set_xlabel("r")
ax1.set_ylabel("x_eq")
ax1.set_title("Saddle-node bifurcation")
# Add black arrows indicating the attracting dynamics of the stable and the
# repelling dynamics of the unstable equilibrium point.
ax1.annotate("", xy=(-7, -4), xytext=(-7, 4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
ax1.annotate("", xy=(-5, -4), xytext=(-5, 4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
ax1.annotate("", xy=(-3, -4), xytext=(-3, 4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
ax1.annotate("", xy=(-1, -4), xytext=(-1, 4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
ax1.annotate("", xy=(1, -4), xytext=(1, -1.5), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
ax1.annotate("", xy=(1, 0.7), xytext=(1, -0.7), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
ax1.annotate("", xy=(1, 1.5), xytext=(1, 4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
ax1.annotate("", xy=(3, -4), xytext=(3, -2), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
ax1.annotate("", xy=(3, 1.5), xytext=(3, -1.5), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
ax1.annotate("", xy=(3, 2), xytext=(3, 4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
ax1.annotate("", xy=(5, -4), xytext=(5, -2.5), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
ax1.annotate("", xy=(5, 2), xytext=(5, -2), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
ax1.annotate("", xy=(5, 2.5), xytext=(5, 4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
ax1.annotate("", xy=(7, -4), xytext=(7, -3), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
ax1.annotate("", xy=(7, 2.3), xytext=(7, -2.3), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
ax1.annotate("", xy=(7, 3), xytext=(7, 4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
"""
A transcritical bifurcation happens when the equilibrium point "passes through" another one that exchanges their
stabilities. A supercritical pitchfork bifurcation can make a stable equilibrium point split into two stable
and one stable equilibriums.
In this case, we use equation dx/dt = rx - x^3 which has three equilibrium points x_eq = 0, +/- sqrt(r) with
the latter two points existiing only for r >= 0.
"""
def xeq1(r):
"""
First equilibrium point
"""
return 0
def xeq2(r):
"""
Second
"""
return np.sqrt(r)
def xeq3(r):
"""
Third
"""
return -np.sqrt(r)
# Plot.
domain1 = linspace(-10, 0)
domain2 = linspace(0, 10)
plt.plot(domain1, xeq1(domain1), "b-", linewidth = 3)
plt.plot(domain2, xeq1(domain2), "r--", linewidth = 3)
plt.plot(domain2, xeq2(domain2), "b-", linewidth = 3)
plt.plot(domain2, xeq3(domain2), "b-", linewidth = 3)
# Neutral equilibrium point
plt.plot([0], [0], "go")
plt.axis([-10, 10, -5, 5])
plt.xlabel("r")
plt.ylabel("x_eq")
plt.title("Supercritical pitchfork bifurcation")
# Add arrows.
plt.annotate("", xy=(0, -1), xytext=(0, -4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(0, 1), xytext=(0, 4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(-5, -0.5), xytext=(-5, -4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(-5, 0.5), xytext=(-5, 4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(3, 1.5), xytext=(3, 0.5), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(3, -1.5), xytext=(3, -0.5), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(3, 2.2), xytext=(3, 4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(3, -2.2), xytext=(3, -4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(7, 2), xytext=(7, 0.5), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(7, -2), xytext=(7, -0.5), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(7, 3), xytext=(7, 4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(7, -3), xytext=(7, -4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
"""
A subcritical pitchfork bifurcation causes the unstable equilibrium point to split into two unstable
and one stable equilibriums.
The equation dx/dt = rx+x^3 has three equilibrium points x_eq = 0 and x_eq = +/- sqrt(r).
"""
def xeq1(r):
return 0
def xeq2(r):
return np.sqrt(-r)
def xeq3(r):
return -np.sqrt(-r)
# Plot.
domain1 = linspace(-10, 0)
domain2 = linspace(0, 10)
plt.plot(domain1, xeq1(domain1), "b-", linewidth = 3)
plt.plot(domain1, xeq2(domain1), "r--", linewidth = 3)
plt.plot(domain1, xeq3(domain1), "r--", linewidth = 3)
plt.plot(domain2, xeq1(domain2), "r--", linewidth = 3)
# Neutral equilibrium point
plt.plot([0], [0], "go")
plt.axis([-10, 10, -5, 5])
plt.xlabel("r")
plt.ylabel("x_eq")
plt.title("Subcritical pitchfork bifurcation")
# Black arrows
plt.annotate("", xy=(1, -4), xytext=(1, -1), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(1, 4), xytext=(1, 1), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(5, -4), xytext=(5, -0.5), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(5, 4), xytext=(5, 0.5), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(-3, 0.5), xytext=(-3, 1.5), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(-3, -0.5), xytext=(-3, -1.5), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(-3, 4), xytext=(-3, 2.2), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(-3, -4), xytext=(-3, -2.2), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(-7, 0.5), xytext=(-7, 2), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(-7, -0.5), xytext=(-7, -2), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(-7, 4), xytext=(-7, 3), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(-7, -4), xytext=(-7, -3), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
"""
Combined bifurcations use the system dx/dt = r+x-x^3 such that, when you solve dx/dt = 0 in terms of r, you get
r = -x+x^3 to draw the bifurcation diagram.
You can use the Jacobian matrix to get the stability information. This system shows hysteresis.
"""
def xeq1(r):
return -r + r**3
# Plot.
domain1 = linspace(-1.3, -sqrt(1/3.))
domain2 = linspace(-sqrt(1/3.), sqrt(1/3.))
domain3 = linspace(sqrt(1/3.), 1.3)
plt.plot(xeq1(domain1), domain1, "b-", linewidth = 3)
plt.plot(xeq1(domain2), domain2, "r--", linewidth = 3)
plt.plot(xeq1(domain3), domain3, "b-", linewidth = 3)
plt.axis([-1, 1, -1.5, 1.5])
plt.xlabel("r")
plt.ylabel("x_eq")
plt.title("Combination of two saddle-node bifurcations")
plt.annotate("", xy=(0.75, 1.2), xytext=(0.75, -1.4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(0.5, 1.1), xytext=(0.5, -1.4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(0.5, 1.25), xytext=(0.5, 1.4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(0.25, -0.9), xytext=(0.25, -1.4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(0.25, -0.8), xytext=(0.25, -0.3), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(0.25, 1), xytext=(0.25, -0.1), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(0.25, 1.15), xytext=(0.25, 1.4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(0, -1.05), xytext=(0, -1.4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(0, -0.9), xytext=(0, -0.1), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(0, 0.9), xytext=(0, 0.1), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(0, 1.05), xytext=(0, 1.4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(-0.75, -1.2), xytext=(-0.75, 1.4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(-0.5, -1.1), xytext=(-0.5, 1.4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(-0.5, -1.25), xytext=(-0.5, -1.4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(-0.25, 0.9), xytext=(-0.25, 1.4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(-0.25, 0.8), xytext=(-0.25, 0.3), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(-0.25, -1), xytext=(-0.25, 0.1), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
plt.annotate("", xy=(-0.25, -1.15), xytext=(-0.25, -1.4), arrowprops=dict(arrowstyle="->",connectionstyle="arc3",lw=1),)
"""
The Hopf bifurcation lets the limit cycle appear around the equilibiurm point. It makes a cyclic, closed trajectory
in the phase space. The van der Pol oscillator shows this with the second-order differential equation
d^2x/dt^2 + r(x^2-1)dx/dt + x = 0
in which we introduce y = dx/dt to make the system first-order
dx/dt = y
dy/dt = -r(x^2-1)y-x with (0, 0) as the only equilibrium point of hte system.
The Jacobian matrix is
[0 1 -1 r] and you can calculate the eigenvalues as
|0-λ 1 -1 r-λ| = 0
such that λ = r+/-sqrt(r^2-4)/2
and the critical condition is Re(λ) = 0
This code will iterate through five values of r and show the appearance of the limit cycle at r = 0.
"""
dt = 0.01
# prepare plots
fig = plt.figure(figsize=(18,6))
def plot_phase_space():
x = y = 0.1
xresult = [x]
yresult = [y]
for t in range(10000):
nextx = x + y * dt
nexty = y + (-r * (x**2 - 1) * y - x) * dt
x, y = nextx, nexty
xresult.append(x)
yresult.append(y)
plt.plot(xresult, yresult)
plt.axis("image")
plt.axis([-3, 3, -3, 3])
plt.title("r = " + str(r))
rs = [-1, -0.1, 0, .1, 1]
for i in range(len(rs)):
fig.add_subplot(1, len(rs), i + 1)
r = rs[i]
plot_phase_space()
|
from django.contrib.auth import get_user_model
from django.db import models
class Event(models.Model):
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
name = models.CharField(max_length=255, verbose_name='nazwa')
description = models.CharField(max_length=255, verbose_name='opis')
start_date_time = models.DateTimeField(verbose_name='początek')
end_date_time = models.DateTimeField(verbose_name='koniec')
class Meta:
ordering = ['start_date_time']
|
# From Alex Drlica Wagner
# https://cdcvs.fnal.gov/redmine/projects/des-sci-release/repository/entry/users/kadrlica/catalog_coadd/trunk/code/utils.py#L275
import warnings
from astroquery.vizier import Vizier
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.io.fits as pf
import numpy as np
def get_vizier_catalog(ra,dec,radius=None,**kwargs):
kwargs.setdefault('row_limit',-1)
coord = SkyCoord(ra*u.deg,dec*u.deg)
radius = u.Quantity(radius,u.deg)
vizier = Vizier(**kwargs)
warnings.filterwarnings("ignore")
tab = vizier.query_region(coord,radius)
warnings.resetwarnings()
return tab[0]
def gaia_to_fits(ra, dec, radius, fitsname='gaia.cat'):
gaia = dict(catalog='I/337/gaia',
columns=['RA_ICRS','DE_ICRS','e_RA_ICRS','e_DE_ICRS','<Gmag>'])
c = get_vizier_catalog(ra,dec,radius,**gaia)
err = np.maximum(c['e_RA_ICRS'],c['e_DE_ICRS'])
maximum_gaia_error = 5. # Largest uncertainty to keep
use = np.where(err < 5.)
# Convert errors from mas to degrees for table
hdu = pf.BinTableHDU.from_columns( [ \
pf.Column(name='RA_ICRS', format='D', array=c['RA_ICRS'][use]),
pf.Column(name='DE_ICRS', format='D', array=c['DE_ICRS'][use]),
pf.Column(name='ERROR', format='E', array=err[use] * 0.001 / 3600.),
pf.Column(name='GMAG', format='E',array=c['__Gmag_'][use])])
hdu.header['RA'] = ra
hdu.header['Dec'] = dec
hdu.header['RADIUS'] = radius
hdu.writeto(fitsname, overwrite=True)
return
|
# -*- coding:utf-8 -*-
import time
def f1():
start_time = time.time()
for a in range(0, 1001):
for b in range(0, 1001):
for c in range(0, 1001):
if a + b + c == 1000 and a ** 2 + b ** 2 == c ** 2:
print('a:%d,b:%d,c:%d' % (a, b, c))
end_time = time.time()
print('time cost: %s' % str(end_time - start_time))
def f2():
start_time = time.time()
for a in range(0, 1001):
for b in range(0, 1001):
c = 1000 - a - b
if a ** 2 + b ** 2 == c ** 2:
print('a:%d,b:%d,c:%d' % (a, b, c))
end_time = time.time()
print('time cost: %s' % str(end_time - start_time))
def main():
# f1()
f2()
pass
if __name__ == '__main__':
main() |
import numpy as np
from imageio import imwrite
import os
import os.path
def save_to_png(orig_im, mask, im_number, image_path):
"""
This function overlay a mask (segmentation) on an original image and save it
as a .png file.
Arguments:
orig_im - original image without segmentation
mask - binary mask (segmentation)
im_number - number of an image
image_path - path of the original image
"""
s = mask.shape[0]
seg_color = np.zeros((s,s,3), dtype=np.float32)
seg_color[:,:,0] = np.where(mask==1, 255, 0)
alpha = 0.3
overlayed = np.where(np.stack((mask==1, mask==1, mask==1), axis=-1), alpha*seg_color+(1-alpha)*orig_im, orig_im)
new_path = "segm_" + image_path
if not os.path.exists(new_path):
os.mkdir(new_path)
imwrite(new_path + "\\" + str(im_number).zfill(5) + "_our.png", overlayed) |
# game.py
import os
import random
#from random import choice
from dotenv import load_dotenv
print("Rock, Paper, Scissors, Shoot!")
load_dotenv()
PLAYER_NAME = os.getenv("PLAYER_NAME", default="Player One")
print("-------------------")
print(f"Welcome '{PLAYER_NAME}' to my Rock-Paper-Scissors game...")
print("-------------------")
#Asking for user input
#printing many things separated by a comma
#print("You chose: ", x)
#string concatenation
#print("You chose: " + x)
#string interpolation / format string use
user_choice = input("Please choose either 'rock', 'paper', 'scisscors': ")
user_choice = user_choice.lower()
print(f"You chose: {user_choice}")
options = ["rock", "paper", "scissors"]
#validate the user selection
#stop the program and not try to determine the winner if the user choice is invalid
if user_choice not in options:
print("OOPS, please choose a valid option and try again!")
exit()
#simulating computer options and printing it
computer_choice = random.choice(options)
#computer_choice = choice(options)
print(f"The computer chose: {computer_choice}")
#determing who won - code used from Slack from William Perrone
if computer_choice == user_choice:
print("It's a tie!")
elif user_choice == "paper" and computer_choice == "rock":
print("You win! Congrats")
elif user_choice == "paper" and computer_choice == "scissors":
print("Oh! The computer won, that's ok!")
elif user_choice == "rock" and computer_choice == "paper":
print("Oh! The computer won, that's ok!")
elif user_choice == "rock" and computer_choice == "scissors":
print("You win! Congrats")
elif user_choice == "scissors" and computer_choice == "paper":
print("You win! Congrats")
elif user_choice == "scissors" and computer_choice == "rock":
print("Oh! The computer won, that's ok!")
print("-------------------")
print("Thanks for playing. Please play again!") |
# https://www.hackerrank.com/challenges/staircase/problem
def staircase(n):
for i in range(1, n+1):
line = ''
for x in range(n-i):
line += ' '
for x in range(i):
line += '#'
print(line)
def staircase_hacker(n):
for i in range(n):
print(' ' * (n - i - 1), end="")
print('#' * (i + 1))
if __name__ == '__main__':
staircase_hacker(3) |
from keras.models import load_model ,Model
import numpy as np
from PIL import Image
from PIL import ImageEnhance
import cv2
import os
'''对图片进行测试'''
model=load_model('pre-trained_CNN_model.h5')
'''将图片转换为所需要的格式'''
filepath='Image_test'
for filename in os.listdir(filepath):
imgfile=filepath+'/'+filename
img = Image.open(imgfile)
en_con = ImageEnhance.Contrast(img)
contrast = 3
img = en_con.enhance(contrast)
img=img.convert('RGB')
img = cv2.cvtColor(np.asarray(img),cv2.COLOR_RGB2GRAY)
'''get the confidence map'''
img=cv2.copyMakeBorder(img,5,5,5,5,cv2.BORDER_CONSTANT)
img = np.array(img)
(c, b) = np.shape(img)
Data = []
for i in range(5, c - 5):
for j in range(5, b - 5):
data = img[i - 5:i + 6, j - 5:j + 6]
data = np.array([data])
Data.append(data)
Data = np.array(Data)
Data = Data.reshape(-1, 11, 11, 1)
Data = Data / 255.0
file_name=filename.strip('.bmp')
'''用来输出每个点的概率值'''
intermediate_layer_model = Model(input=model.input, output=model.get_layer('logits').output)
intermediate_output = intermediate_layer_model.predict(Data, batch_size=500, verbose=0)
logits_img = [x[1] for x in intermediate_output]
logits_img = (np.array(logits_img)) * 255
logits_img=logits_img.astype(int)
logits_img = logits_img.reshape(228, 200)
cv2.imwrite('./Confidence map/'+file_name+'_confidence map.bmp',logits_img)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torchvision
import numpy as np
import os
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
from maskrcnn_benchmark.structures.keypoint import PersonKeypoints
from .voc import load_temporal_ens_id,load_temporal_files
class UnlabeledDataset(torchvision.datasets.coco.CocoDetection):
def __init__(
self, ann_file, root,temporal_saved_path, transforms=None
):
super(UnlabeledDataset, self).__init__(root,ann_file)
# sort indices for reproducible results
self.ids = sorted(self.ids)
filter_ids = np.load(os.path.join(os.path.dirname(ann_file),'sel_unlabeled_ids_r101.npy'))#'sel_unlabeled_ids_v2.npy'
ids = []
#save ids ,for load speed
# for img_id in self.ids:
# img_info = self.coco.imgs[img_id]
# if any([img_info['width']<400,img_info['height']<400]):
# continue
# if img_info['file_name'] not in filter_ids:
# continue
# ids.append(img_id)
# self.ids = ids
# np.save(os.path.join(os.path.dirname(ann_file),'sel_unlabeled_ids_r101_map.npy'),ids) #'unlabeled_ids_map_v2.npy'
#load ids ,for load speed
ids = np.load(os.path.join(os.path.dirname(ann_file),'sel_unlabeled_ids_r101_map.npy')).tolist()
self.ids = ids
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
self._transforms = transforms
self.temporal_saved_path = temporal_saved_path
self.__getitem__(0)
def __getitem__(self, idx):
#idx = 8418
img_infos = self.get_img_info(idx)
img, anno = super(UnlabeledDataset, self).__getitem__(idx)
target = BoxList(torch.zeros([1,4]),img.size, mode="xyxy")
with torch.no_grad():
if self._transforms is not None:
img, target = self._transforms(img, target)
try:
bboxes = load_temporal_files(img_infos['file_name'].replace('.jpg',''),self.temporal_saved_path,ens_num=5)
except Exception as e:
print('error in file ',img_infos['file_name'].replace('.jpg',''))
raise e
return img, target, [self.root.replace('datasets/',''),bboxes,idx]
def get_img_info(self, index):
img_id = self.id_to_img_map[index]
img_data = self.coco.imgs[img_id]
return img_data
|
import os.path
import random
import numpy as np
import pandas as pd
from collections import deque
from keras import optimizers
from keras.models import Sequential
from keras.layers.core import Dense
from keras.layers import BatchNormalization
import utilmodel as utm
# directory for saving model files
MODEL_PATH = 'model'
if os.path.exists(MODEL_PATH) == False:
os.makedirs(MODEL_PATH)
class DeepQAgent:
# config parameters here
GAMMA = 0.95 # parameter for calculate Q-function
epsilon = 1.0 # exploration rate
MAX_REPLAY_MEMORY = 200 # maximum of previous transitions (previous states) to remember
OBSERVATION = 100. # observe before training
BATCH_SIZE = 100 # batch size for training my neural network model
# store the previous transitions (previous states) in replay memory (use deque)
D = deque()
def __init__(self,env):
self.h5_file = os.path.join(MODEL_PATH,'%s_weights.h5' % (env.symbol))
self.model = self._buildModel(env.num_features, env.num_action)
self.json_file = os.path.join(MODEL_PATH,'%s_structure.json' % (env.symbol))
self.num_action = env.num_action # 2 action: BUY or SELLs
# This is neural network model for the the Q-function
def _buildModel(self, num_features, num_output):
model = Sequential()
model.add(Dense(input_shape=(num_features, ), units=8))
model.add(BatchNormalization(trainable = True))
model.add(Dense(units=8, activation='relu'))
model.add(BatchNormalization(trainable = True))
model.add(Dense(num_output, activation='linear'))
print("\nBuild model...")
print(model.summary())
try:
if os.path.exists(self.h5_file):
print("\nLoaded model(weights) from file: %s" % (self.h5_file))
model.load_weights(self.h5_file)
except Exception as inst:
print(inst)
opt = optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(optimizer=opt, loss='mse', metrics=['accuracy'])
return model
def clearMemory(self):
self.D.clear()
def getAction(self, state):
if random.random() <= self.epsilon:
# With probability ϵ (epsilon) select random action
# Exploration method (greedy)
return random.randrange(self.num_action)
# Otherwise select the best action from Q(now_state, all_action)
# Exploit method found actions that there is maximum of Q score
best_index, _ = self.getBestAction(state)
return best_index
def getBestAction(self, state):
Q_scores = self.model.predict(state)
# np.argmax() will return the indices of the maximum values
best_index = np.argmax(Q_scores)
return best_index, Q_scores
def saveExperience(self, state, action_index, reward, nextstate, terminate):
# store the transition (states) in D (replay memory)
self.D.append((state, action_index, reward, nextstate, terminate))
if len(self.D) > self.MAX_REPLAY_MEMORY:
self.D.popleft() # remove the oldest states
def _updateQScore(self, minibatch):
X_train, y_train = [], []
for memory in minibatch:
state, action_index, reward, nextstate, terminate = memory
Q_scores = self.model.predict(state)[0] # output shape is [1, num_action]
if terminate:
Q_scores[action_index] = reward
else:
# formula for Q-function
# Q(state, action) = reward + gamma* max(Q(state_next, all_action))
allNextQ = self.model.predict(nextstate) # output shpe is [1, num_action]
Q_scores[action_index] = reward + self.GAMMA * np.max(allNextQ)
# the first value on top while the lastest value at bottom
X_train.append(state)
y_train.append(Q_scores)
# Train my neural network to remember (learning) new Q Scores
X_train = np.squeeze(np.array(X_train), axis=1)
y_train = np.array(y_train)
# Single gradient update over one batch of samples.
# equals this command ==> self.model.fit(X_train, y_train, batch_size=self.BATCH_SIZE, epochs=1, verbose=0)
self.model.train_on_batch(X_train, y_train)
def replayExperienceWhen(self, step_observe):
#Now we do the experience replay
if step_observe > self.OBSERVATION:
#Train my neural network model when timestep more than observing
#sample a minibatch to train
minibatch = random.sample(self.D, self.BATCH_SIZE)
# Update all new Q scores (based on old experiences that stored)
self._updateQScore(minibatch)
def reduceExplore(self, constant):
if self.epsilon > 0.1: #decrement epsilon (exploration rate) over time
self.epsilon -= (1.0/constant)
def saveModel(self):
# save the structure of model and weights into json file and h5 file
utm.saveTrainedModel(self.model, self.json_file, self.h5_file)
|
a=2 # global
b=5 # global
def outer(): # enclosed function
global b
b=b+6 #UnboundLocalError: local variable 'b' referenced before assignment
b=6 # enclosed variable
print(b) # b=6
def inner(): # nested function or inner function
c=5 # local variable
print(a) # 2
print(b) # 6
inner()
outer()
print(a,b)
# global variables cannot be modified with in functions without global keyword
|
from datetime import datetime
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from Pruefung import Pruefung
from Aufsicht import Aufsicht
from Raum import Raum
from SemesterGruppe import SemesterGruppe
from Studiengang import Studiengang
from ZeitSlot import ZeitSlot
from Base import Base
from Helferfunktionen import *
# Anzahl der Prüfungstage die zu Verplanung stehen
PRUEFUNGSTAGE = 7
ZEITSLOTS_PRO_TAG = 4
#Starttag für die Prüfungstag
START = datetime(2021, 7, 19)
PT2 = datetime(2021, 7, 20)
PT3 = datetime(2021, 7, 21)
PT4 = datetime(2021, 7, 22)
PT5 = datetime(2021, 7, 23)
PT6 = datetime(2021, 7, 26)
PT7 = datetime(2021, 7, 27)
ZEITEN = ["8:15 - 9:45","10:00 - 11:30","12:00 - 13:30", "14:00 - 15:30"]
TAGE = []
TAGE.extend([START,PT2,PT3,PT4,PT5,PT6,PT7])
NAME = "PruefungsPlaner.db"
def init():
global session
global aufsicht
global pruefungen
global raeume
global semesterGruppe
global studiengang
global zeitSlots
engine = create_engine("sqlite:///" + NAME + "?check_same_thread=False", echo=True)
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
Base.metadata.create_all(engine)
aufsicht = list(session.query(Aufsicht).all())
pruefungen = list(session.query(Pruefung).all())
raeume = list(session.query(Raum).all())
zeitSlots = list(session.query(ZeitSlot).all())
semesterGruppe = list(session.query(SemesterGruppe).all())
studiengang = list(session.query(Studiengang).all())
def getSession():
"""
:return: Das Session Objekt
"""
return session
def getRaeume():
"""
:return: Alle Raum Objekte
"""
return raeume
def getRaumByID(id):
"""
:param id: raum ID
:return: raum mit der ID id
"""
return getRaeume()[id]
def getAufsichten():
"""
:return: Alle Aufsicht Objekte
"""
return aufsicht
def getPruefungen():
"""
:return: Alle Prüfungen
"""
return pruefungen
def getZeitSlots():
return zeitSlots
def getZeitSlotProTag():
"""
:return: Liste an Listen
"""
tage = []
for i in range(1,PRUEFUNGSTAGE):
tag =list(filter(lambda z: z.pruefungstag_nummer == i,getZeitSlots()))
tage.append(tag)
def getStudiengaenge():
"""
:return: Alle Studiengang Objekte
"""
return studiengang
def getSemesterGruppen():
"""
:return: Alle SemesterGruppen Objekte
"""
return semesterGruppe
|
"""Module for the FeatureNGram class"""
import logging
import sys
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
streamhandler = logging.StreamHandler(stream=sys.stderr)
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
streamhandler.setFormatter(formatter)
logger.addHandler(streamhandler)
class FeatureNgram(object):
"""Represents an ngram attribute. The value of such an attribute is a list/sequence of
things that can be represented by embeddings, """
def __init__(self, fname, attrinfo, featurestats, vocab):
"""Create the instance from the given meta info of an input feature"""
logger.debug("Creating FeatureNgram instance for fname/attrinfo=%r/%r", fname, attrinfo)
self.fname = fname
self.attrinfo = attrinfo
self.featurestats = featurestats
self.vocab = vocab
def type_converted(self):
"""Return the name of the type of information of the feature, once it is converted to
internal format."""
return "indexlist"
def type_original(self):
"""Return the name of the type of information of the original feature."""
return "ngram"
def __call__(self, value, normalize=None):
"""Convert a value of the expected type for this feature to a value that can be
fed into the corresponding input unit of the network"""
if normalize:
raise Exception("Normalization does not make sense for ngram features")
# ok, for an ngram we expect the value to be a list, in which case we
# create a new list with the string idices of the values
# otherwise, we report an error
if isinstance(value, list):
ret = [self.vocab.string2idx(v) for v in value]
return ret
else:
raise Exception("Value for converting FeatureNgram not a list but {} of type {}".format(value, type(value)))
def __str__(self):
return "FeatureNgram(name=%s)" % self.fname
def __repr__(self):
return "FeatureNgram(name=%r)" % self.fname
|
'''
Project 2 - Scores Analysis and Bar Chart - Spring 2020
Author: <Nick Cerne, ncerne00>
This program <describe your program here>.
I have neither given or received unauthorized assistance on this assignment.
Signed: <Nicholas Richard Cerne>
'''
import turtle
def draw_bar(integer):
turtle.pensize(4)
i = 0
turtle.fillcolor("orange")
while i <= 2:
turtle.begin_fill()
turtle.forward(integer * 20)
turtle.right(90)
turtle.forward(50)
turtle.right(90)
i += 1
turtle.end_fill
def position_turtle(integer):
turtle.pensize(4)
turtle.forward(integer * 20)
turtle.right(180)
def turtle_write(string):
turtle.forward(45)
turtle.write(string)
compute_a_file = input("Compute a scores file? y / n ")
while compute_a_file == "y":
which_file = input("What is the name of your file? ")
score = open(which_file, "r")
highest_score = 0
a_list = []
count_90s = 0
count_80s = 0
count_70s = 0
count_60s = 0
below_60 = 0
sc = score.readlines()[1:]
num_scores_1 = len(sc[1:])
score.seek(0)
assignment = score.readline()
count = 0
max = 0
min = 100
sum = 0
for line in sc:
num = int(line.split('\n')[0])
count += 1
if (max < num):
max = num
if (min > num):
min = num
if (num >= 90 ):
count_90s += 1
elif (num >= 80 and num < 90):
count_80s += 1
elif (num >= 70 and num < 80):
count_70s += 1
elif (num >= 60 and num < 70):
count_60s += 1
else:
below_60 += 1
sum += num
average = sum / num_scores_1
print("Your results for", assignment, end="")
print("Your maximum is", max)
print("Your minimum is", min)
print("Your average is", round(average, 2))
print("The number of scores in", num_scores_1)
turtle.penup()
turtle.goto(-125, -250)
turtle.pendown()
turtle.left(90)
draw_bar(count_90s)
position_turtle(count_90s)
draw_bar(count_80s)
position_turtle(count_80s)
draw_bar(count_70s)
position_turtle(count_70s)
draw_bar(count_60s)
position_turtle(count_60s)
draw_bar(below_60)
position_turtle(below_60)
turtle.penup()
turtle.forward(5)
turtle.left(90)
turtle_write("< 60")
turtle_write("60s")
turtle_write("70s")
turtle_write("80s")
turtle_write("90s")
print(count_90s)
print(count_80s)
print(count_70s)
print(count)
compute_a_file = input("Compute another file? y / n")
else:
print("Finished") |
#usr/bin/env python3
#by will
#background is background1
import pygame
import sys
import os
'''
Objects
'''
class Platform(pygame.sprite.Sprite):
#(x location, y location, img height, img file)
def__init__(self,xloc,yloc,imgw, imgh):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([imgw, imgh])
self.image.convert_alpha()
self.image.set_colorkey(alpha)
self.blockpic = pygame.image.load(img).convert()
self.rect = self.image.get_rect()
self.rect.y = yloc
self.rect.x = xloc
#paint img in block
def level1():
#create lvl 1
platform_list = pygame.sprite.Group()
block = Platform(0, 591, 500, 77,os.path.join('images','block0.png'))
#(x,y,img w, img file)
platform_list.add(block) #after each block
return platform_list #at end of functon
#spawn an enemy
class Enemy(pygame.sprite.Sprite):
def __init__(self,x,y,img):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(os.path.join('images', img))
self.image.convert_alpha()
self.image.set_colorkey(alpha)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
class Player(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.momentumX = 0
self.momentumY = 0
self.images = []
img = pygame.image.load(os.path.join('images','hero.png')).convert()
img.convert_alpha()
img.set_colorkey(alpha)
self.images.append(img)
self.image = self.images[0]
self.rect = self.image.get_rect()
self.counter = 0
def move (self):
#enemy movement
if self.counter >= 0 and self.counter <= 30:
self.rect.x +=2
elif self.counter >= 30 and self.counter <= 60:
self.score = 0 #set score
def control(self, x, y):
self.momentumX += x
self.momentumY += y
def update(self, enemy_list,platform_list):
#update sprite pos
currentX = self.rect.x
nextX = currentX+self.momentumX
self.rect.x = nextX
currentY = self.rect.y
nextY = currentY+self.momentumY
self.rect.y = nextY
#collisions
enemy_hit_list = pygame.sprite.spritecollide(self, enemy_list, False)
for enemy in enemy_hit_list:
self.score -= 1
print(self.score)
block_hit_list = pygame.sprite.spritecollide(self, platform_list, false)
if self.momentumX > 0:
for block in block_hit_list:
self.rect.y = currentY
self.rect.x = currentX+9
self.momentumY = 0
if self.momentumY > 0:
for block in block_hit_list:
self.rect.y = currentY
self.rect.momentumY = 0
def gravity(self):
self.momentumY += 3.2
if self.rect.y > 960 and self.momentumY >= 0:
self.momentumY = 0
self.rect.y = screenY-20
'''
Setup
'''
alpha = (0,0,0)
black = (0,0,0)
white = (255,255,255)
screenX = 960
screenY = 720
fps = 40 # frame rate
afps = 4 # animation cycles
clock = pygame.time.Clock()
pygame.init()
main = True
screen = pygame.display.set_mode([screenX,screenY])
backdrop = pygame.image.load(os.path.join('images','background1.png')).convert()
backdropRect = screen.get_rect()
platform_list = Platform.level1()
player = Player() # spawn player
player.rect.x = 0
player.rect.y = 0
movingsprites = pygame.sprite.Group()
movingsprites.add(player)
movesteps = 10 #how fast the players steps are
#enemy code
enemy = Enemy(100,50, 'enemy.png') #spawn enemy
enemy_list = pygame.sprite.Group()
enemy_list.add(enemy) #add enemy to group
'''
Main loop
'''
while main == True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit(); sys.exit()
main = False
if event.type == pygame.KEYUP:
if event.key == ord('q'):
pygame.quit()
sys.exit()
main = False
screen.blit(backdrop, backdropRect)
player.update(enemy_list)
movingsprites.draw(screen)
enemy_list.draw(screen) #refresh enemy
pygame.display.flip()
clock.tick(fps)
|
import cv2
import time
import serial
cap = cv2.VideoCapture(1)
ser = serial.Serial('/dev/cu.usbserial-1460', 9600, timeout=0.5)
def all_led_off():
time.sleep(1.5)
led = 'alloff'
ser.write(led.encode())
time.sleep(1.5)
def all_led_on():
time.sleep(1.5)
led = 'allon'
ser.write(led.encode())
time.sleep(1.5)
def take_picture(url):
ret, frame = cap.read()
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
cv2.imshow('frame', rgb)
if cv2.waitKey(1):
out = cv2.imwrite(url, frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
return -1
all_led_off()
index = 0
#while(True):
all_led_on()
ret = take_picture('images/test/'+str(index)+'.png')
index += 1
time.sleep(2)
all_led_off()
ret = take_picture('images/test/'+str(index)+'.png')
index += 1
time.sleep(2)
cap.release()
cv2.destroyAllWindows() |
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.views.generic import *
from labApp.forms import*
# Create your views here.
class MainPage(TemplateView):
template_name = 'home.html'
class DepartmentsView(ListView):
model = Departments
paginate_by = 4
template_name = 'departments.html'
context_object_name = 'departments_list'
class DepartmentView(View):
def get(self, request, id):
department = Departments.objects.get(id=id)
member_list = []
members = Orders.objects.all()
for member in members:
if int(id) == member.department_id_id:
member_dict = dict()
user = User.objects.get(id=member.user_id_id)
member_dict['name'] = user.first_name
member_dict['lastname'] = user.last_name
member_list.append(member_dict)
global err
err = ''
if 'mybtn' in request.GET:
if request.user.is_authenticated():
current_user = request.user.id
user_tags = Orders.objects.filter(department_id=id)
user_list = []
for user_tag in user_tags:
s = user_tag.user_id_id
user_list.append(s)
if current_user not in user_list:
new_order = Orders.objects.create_order(department_id=department,user_id=request.user,status=True)
else:
err = "You are already a member of this department"
else:
err = "You must be registered to join the department!!"
return render(request, 'department.html', {"department": department, "member_list": member_list, 'a':err})
class AddDepartmentView(CreateView):
form_class = AddDepartment
template_name = 'add_department.html'
success_url = '/department/'
def get_success_url(self):
url = '/department/' + str(self.object.id)
return url
class Registration(FormView):
template_name = 'registration.html'
form_class = RegistrationForm
success_url = '/success/'
def post(self, request, *args, **kwargs):
form = RegistrationForm(data=request.POST)
if form.is_valid():
user = form.save()
login(request,user)
return redirect(self.success_url)
return render(request, self.template_name, {'form': form})
class Autorization(FormView):
template_name = 'autorization.html'
form_class = AuthorizationForm
success_url = '/departments'
def post(self, request, *args, **kwargs):
form = AuthorizationForm(data=request.POST)
if form.is_valid():
user = authenticate(username=form.cleaned_data['username'],password=form.cleaned_data['password'])
if user is not None and user.is_active:
login(request,user)
return redirect(self.success_url)
return render(request, self.template_name, {'form':form})
@login_required(login_url='/error/')
def login_success(request):
return HttpResponseRedirect('/departments')
def logout_view(request):
logout(request)
return HttpResponseRedirect('/error/')
def error_auth(request):
return render(request, 'home.html') |
#! /usr/bin/env python
"""Checker for thesis-with-multiple-authors."""
from __future__ import annotations
import colrev.qm.quality_model
# pylint: disable=too-few-public-methods
class ThesisWithMultipleAuthorsChecker:
"""The ThesisWithMultipleAuthorsChecker"""
msg = "thesis-with-multiple-authors"
def __init__(self, quality_model: colrev.qm.quality_model.QualityModel) -> None:
self.quality_model = quality_model
def run(self, *, record: colrev.record.Record) -> None:
"""Run the thesis-with-multiple-authors checks"""
if self.__multiple_authored_thesis(record=record):
record.add_masterdata_provenance_note(key="author", note=self.msg)
else:
record.remove_masterdata_provenance_note(key="author", note=self.msg)
def __multiple_authored_thesis(self, *, record: colrev.record.Record) -> bool:
if "thesis" in record.data["ENTRYTYPE"] and " and " in record.data.get(
"author", ""
):
return True
return False
def register(quality_model: colrev.qm.quality_model.QualityModel) -> None:
"""Register the checker"""
quality_model.register_checker(ThesisWithMultipleAuthorsChecker(quality_model))
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
def abre_archivo (fn):
array = []
for line in fn.readlines():
array.append(line.rstrip('\n').split(','))
for i in range (len(array)):
for j in range (len(array[i])):
array[i][j] = float (array[i][j])
return array
g = open('datosdist_nscsv.csv', 'r')
array = abre_archivo(g)
p1 = np.zeros(99)
ns1 = np.zeros(99)
s1 = np.zeros(99)
for i in range(99):
p1[i] = datosdist_nscsv[5941+15*i, 1]
ns1[i] = datosdist_nscsv[5941+15*i, 3]
s1[i] = datosdist_nscsv[5941+15*i, 2]
tau = 1.64
f1 = ns1/(s1**(-tau))
pc = 0.57
z1 = (s1**(0.39))*(p1-pc)
p2 = np.zeros(99)
ns2 = np.zeros(99)
s2 = np.zeros(99)
for i in range(99):
p2[i] = datosdist_nscsv[5942+15*i, 1]
ns2[i] = datosdist_nscsv[5942+15*i, 3]
s2[i] = datosdist_nscsv[5942+15*i, 2]
f2 = ns2/(s2**(-tau))
z2 = (s2**(0.39))*(p2-pc)
|
# -*- coding: utf-8 -*-
'''
@project: Pycharm_project
@Time : 2019/6/27 17:19
@month : 六月
@Author : mhm
@FileName: 1、用栈实现队列.py
@Software: PyCharm
'''
'''
栈的顺序为后进先出,而队列的顺序为先进先出。
使用两个栈实现队列,一个元素需要经过两个栈才能出队列,
在经过第一个栈时元素顺序被反转,
经过第二个栈时再次被反转,此时就是先进先出顺序。
使用栈实现队列的下列操作:
push(x) – 将一个元素放入队列的尾部。
pop() – 从队列首部移除元素。
peek() – 返回队列首部的元素。
empty() – 返回队列是否为空。
注意:
你只能使用标准的栈操作-- 也就是只有push to top, peek/pop from top, size, 和 is empty 操作是合法的。
你所使用的语言也许不支持栈。你可以使用 list 或者 deque (双端队列) 来模拟一个栈,只要是标准的栈操作即可。
假设所有操作都是有效的 (例如,一个空的队列不会调用 pop 或者 peek 操作)
'''
class MyQueue(object):
def __init__(self):
self.stack1 = []
self.stack2 = []
def push(self,x):
self.stack1.append(x)
def pop(self):
while self.stack1:
self.stack2.append(self.stack1.pop()) # 将stack1中的数据放入stack2中
res = self.stack2.pop()
while self.stack2:
self.stack1.append(self.stack2.pop())
return res
def peek(self):
return self.stack1[0]
def empty(self):
return not self.stack1
|
#!/usr/bin/python
#
# Copyright (c) 2015 MoreOptions. All rights reserved.
#
# Author: ankush@moreoption.co
#
# This class implements the helper functions for flipkart api
#
import os
import pycurl
import string
from StringIO import StringIO
class Request(object):
def __init__(self):
pass
def send_request(self, url, headers):
buff = StringIO()
get_conn = pycurl.Curl()
get_conn.setopt(get_conn.URL, url.replace(" ", "%20"))
get_conn.setopt(get_conn.FOLLOWLOCATION, 1)
get_conn.setopt(get_conn.HTTPHEADER, headers)
get_conn.setopt(get_conn.WRITEFUNCTION, buff.write)
get_conn.perform()
get_conn.close()
return buff.getvalue()
|
from __future__ import print_function
import random,os,sys,binascii
from Crypto.Util.number import isPrime
from decimal import *
try:
input = raw_input
except:
pass
getcontext().prec = 3000
def keystream(key):
random.seed(int(os.environ["seed"]))
p = random.randint(3,30)
while not isPrime(p):
p = random.randint(3,30)
e = random.randint(50,600)
while 1:
d = random.randint(10,100)
ret = Decimal('0.'+str(key ** e).split('.')[-1])
for i in range(d):
ret*=2
yield int((ret//1)%2)
e+=p
try:
a = int(input("a: "))
b = int(input("b: "))
c = int(input("c: "))
# added some more weak key protections
if b*b < 4*a*c or [a,b,c].count(0) or Decimal(b*b-4*a*c).sqrt().to_integral_value()**2==b*b-4*a*c or abs(a)>400 or abs(b)>500 or abs(c)>500:
raise Exception()
key = (Decimal(b*b-4*a*c).sqrt() - Decimal(b))/Decimal(a*2)
if 4*key*key<5 or abs(key-key.to_integral_value())<0.05:
raise Exception()
except:
print("bad key")
else:
flag = binascii.hexlify(os.environ["flag"].encode())
flag = bin(int(flag,16))[2:].zfill(len(flag)*4)
ret = ""
k = keystream(key)
for i in flag:
ret += str(next(k)^int(i))
print(ret) |
# Copyright (c) 2019, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: MIT
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/MIT
import torch
import torch.nn as nn
from base.modules.intrinsic_motivation import IntrinsicMotivationModule
class IntrinsicCuriosityModule(nn.Module, IntrinsicMotivationModule):
def __init__(self, env, hidden_size, state_size=None, action_size=None):
super().__init__()
self.state_size = env.state_size if state_size is None else state_size
self.action_size = env.action_size if action_size is None else action_size
self.state_embedding_layers = nn.Sequential(
nn.Linear(self.state_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
)
self.inverse_model_layers = nn.Sequential(
nn.Linear(2 * hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, self.action_size),
)
self.forward_model_layers = nn.Sequential(
nn.Linear(self.action_size + hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
)
@staticmethod
def normalize(x):
return x / torch.sqrt(torch.pow(x, 2).sum(dim=-1, keepdim=True))
def surprisal(self, episode_batch):
"""Compute surprisal for intrinsic motivation"""
state = episode_batch['state']
next_state = episode_batch['next_state']
action = episode_batch['action']
state_emb = self.normalize(self.state_embedding_layers(state))
next_state_emb = self.normalize(self.state_embedding_layers(next_state))
next_state_emb_hat = self.normalize(self.forward_model_layers(torch.cat([state_emb, action], dim=1)))
return torch.mean(torch.pow(next_state_emb_hat - next_state_emb, 2), dim=1)
def forward(self, mini_batch):
"""Compute terms for intrinsic motivation via surprisal (inlcuding losses and surprise)"""
state = mini_batch['state']
next_state = mini_batch['next_state']
action = mini_batch['action']
state_emb = self.normalize(self.state_embedding_layers(state))
next_state_emb = self.normalize(self.state_embedding_layers(next_state))
action_hat = self.inverse_model_layers(torch.cat([state_emb, next_state_emb], dim=1))
inv_loss = torch.mean(torch.pow(action_hat - action, 2))
next_state_emb_hat = self.normalize(self.forward_model_layers(torch.cat([state_emb, action], dim=1)))
fwd_loss = torch.mean(torch.pow(next_state_emb_hat - next_state_emb.detach(), 2))
return inv_loss + fwd_loss
class RandomNetworkDistillation(nn.Module, IntrinsicMotivationModule):
def __init__(self, env, hidden_size, state_size=None):
super().__init__()
self.state_size = env.state_size if state_size is None else state_size
self.random_network = nn.Sequential(
nn.Linear(self.state_size, 2*hidden_size),
nn.ReLU(),
nn.Linear(2*hidden_size, 2*hidden_size),
nn.ReLU(),
nn.Linear(2*hidden_size, hidden_size),
)
self.distillation_network = nn.Sequential(
nn.Linear(self.state_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
)
@staticmethod
def normalize(x):
return x / torch.sqrt(torch.pow(x, 2).sum(dim=-1, keepdim=True))
def surprisal(self, episode_batch):
"""Compute surprisal for intrinsic motivation"""
next_state = episode_batch['next_state']
r_state_emb = self.normalize(self.random_network(next_state))
d_state_emb = self.normalize(self.distillation_network(next_state))
return torch.mean(torch.pow(r_state_emb - d_state_emb, 2), dim=1).detach()
def forward(self, mini_batch):
"""Compute losses for intrinsic motivation via surprisal (inlcuding losses and surprise)"""
next_state = mini_batch['next_state']
r_state_emb = self.normalize(self.random_network(next_state)).detach()
d_state_emb = self.normalize(self.distillation_network(next_state))
return torch.mean(torch.pow(r_state_emb - d_state_emb, 2))
|
from mpi4py import MPI
import numpy as np
from math import sin
from matplotlib.pyplot import *
def sincSquareMPI(x):
"""Return the sinc(x) = (sin(x)/x)**2 of the array argument x.
"""
# assume the array length can be divided by the number of processes
retVal = np.zeros_like(x)
tempVals = np.zeros_like(x)
# find min and max index for each process:
lowerIndex = 0 + len(x) * MPIrank/MPIsize
upperIndex = len(x) * (MPIrank+1)/MPIsize
print("Process %s having indices (%s, %s)"%(MPIrank, lowerIndex, upperIndex))
for i in range(lowerIndex, upperIndex):
tempVals[i] = (sin(np.pi*x[i]) / (np.pi*x[i]))**2
MPIcomm.Reduce(tempVals, retVal, op=MPI.SUM, root=MPIroot)
return retVal
MPIroot = 0 # define the root process
MPIcomm = MPI.COMM_WORLD # MPI communicator
# get rank (= number of individual process) and
# size (= total number of processes)
MPIrank, MPIsize = MPIcomm.Get_rank(), MPIcomm.Get_size()
x = np.linspace(-5,+5,10000,endpoint=True)
y = sincSquareMPI(x)
if(MPIrank == 0):
plot(x,y)
show()
|
#
# Copyright (c) 2008-2015 Thierry Florac <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_utils.context module
This module provides a "context" selector which can be used as Pyramid's subscriber
predicate. Matching argument can be a class or an interface: for subscriber to be actually called,
subscriber's argument should inherit from it (if it's a class) or implement it (if it's an
interface).
"""
import sys
from contextlib import contextmanager
from io import StringIO
__docformat__ = 'restructuredtext'
@contextmanager
def capture(func, *args, **kwargs):
"""Context manager used to capture standard output"""
out, sys.stdout = sys.stdout, StringIO()
try:
result = func(*args, **kwargs)
sys.stdout.seek(0)
yield result, sys.stdout.read()
finally:
sys.stdout = out
@contextmanager
def capture_stderr(func, *args, **kwargs):
"""Context manager used to capture error output"""
err, sys.stderr = sys.stderr, StringIO()
try:
result = func(*args, **kwargs)
sys.stderr.seek(0)
yield result, sys.stderr.read()
finally:
sys.stderr = err
@contextmanager
def capture_all(func, *args, **kwargs):
"""Context manager used to capture standard output and standard error output"""
out, sys.stdout, err, sys.stderr = sys.stdout, StringIO(), sys.stderr, StringIO()
try:
result = func(*args, **kwargs)
sys.stdout.seek(0)
sys.stderr.seek(0)
yield result, sys.stdout.read(), sys.stderr.read()
finally:
sys.stdout, sys.stderr = out, err
class ContextSelector: # pylint: disable=too-few-public-methods
"""Interface based context selector
This selector can be used as a predicate to define a class or an interface that the context
must inherit from or implement for the subscriber to be called:
.. code-block:: python
from zope.lifecycleevent.interfaces import IObjectModifiedEvent
from pyams_site.interfaces import ISiteRoot
@subscriber(IObjectModifiedEvent, context_selector=ISiteRoot)
def siteroot_modified_event_handler(event):
'''This is an event handler for an ISiteRoot object modification event'''
"""
def __init__(self, ifaces, config): # pylint: disable=unused-argument
if not isinstance(ifaces, (list, tuple, set)):
ifaces = (ifaces,)
self.interfaces = ifaces
def text(self):
"""Return selector """
return 'context_selector = %s' % str(self.interfaces)
phash = text
def __call__(self, event):
for intf in self.interfaces:
try:
if intf.providedBy(event.object):
return True
except (AttributeError, TypeError):
if isinstance(event.object, intf):
return True
return False
|
import os
from flask import Flask, render_template, request, redirect, url_for
from werkzeug.utils import secure_filename
import logging
app = Flask(__name__)
# 日志系统配置
# 日志器设置日志级别(日志器来自于 flask)
app.logger.setLevel(logging.DEBUG)
# 处理器(处理器来自于 logging 模块)
handler = logging.FileHandler('my.log', encoding='UTF-8')
logging_format = logging.Formatter(
'%(asctime)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s')
handler.setFormatter(logging_format)
# 日志器和处理器进行关联
app.logger.addHandler(handler)
@app.route('/upload', methods=['POST', 'GET'])
def upload():
if request.method == 'POST':
f = request.files['file']
basepath = os.path.dirname(__file__) # 当前文件所在路径
# 注意:没有的文件夹一定要先创建,不然会提示没有该路径
upload_path = os.path.join(basepath, r'static\uploads', secure_filename(f.filename))
f.save(upload_path)
# 文件上传成功添加日志信息
# 方式一(可以在任何一个模块使用的)
# from flask import current_app
# current_app.logger.info('文件上传成功')
# 方式二
app.logger.info('info log')
return redirect(url_for('upload'))
return render_template('upload.html')
if __name__ == '__main__':
app.run(debug=True)
|
import typing
from abaqusConstants import *
from .AnalyticSurface import AnalyticSurface
from .AnalyticSurfaceSegment import AnalyticSurfaceSegment
from .BeamOrientationArray import BeamOrientationArray
from .OdbDatumCsys import OdbDatumCsys
from .OdbMeshElementArray import OdbMeshElementArray
from .OdbMeshNodeArray import OdbMeshNodeArray
from .OdbRigidBodyArray import OdbRigidBodyArray
from .OdbSet import OdbSet
from .RebarOrientationArray import RebarOrientationArray
from .SectionCategory import SectionCategory
from ..Property.MaterialOrientationArray import MaterialOrientationArray
from ..Property.SectionAssignmentArray import SectionAssignmentArray
class OdbPartBase:
"""The OdbPart object is similar to the kernel Part object and contains nodes and elements,
but not geometry.
Attributes
----------
nodes: OdbMeshNodeArray
An :py:class:`~abaqus.Odb.OdbMeshNodeArray.OdbMeshNodeArray` object.
elements: OdbMeshElementArray
An :py:class:`~abaqus.Odb.OdbMeshElementArray.OdbMeshElementArray` object.
nodeSets: dict[str, OdbSet]
A repository of :py:class:`~abaqus.Odb.OdbSet.OdbSet` objects specifying node sets.
elementSets: dict[str, OdbSet]
A repository of :py:class:`~abaqus.Odb.OdbSet.OdbSet` objects specifying element sets.
surfaces: dict[str, OdbSet]
A repository of :py:class:`~abaqus.Odb.OdbSet.OdbSet` objects specifying surfaces.
sectionAssignments: SectionAssignmentArray
A :py:class:`~abaqus.Property.SectionAssignmentArray.SectionAssignmentArray` object.
beamOrientations: BeamOrientationArray
A :py:class:`~abaqus.Odb.BeamOrientationArray.BeamOrientationArray` object.
materialOrientations: MaterialOrientationArray
A :py:class:`~abaqus.Property.MaterialOrientationArray.MaterialOrientationArray` object.
rebarOrientations: RebarOrientationArray
A :py:class:`~abaqus.Odb.RebarOrientationArray.RebarOrientationArray` object.
rigidBodies: OdbRigidBodyArray
An :py:class:`~abaqus.Odb.OdbRigidBodyArray.OdbRigidBodyArray` object.
analyticSurface: AnalyticSurface
An :py:class:`~abaqus.Odb.AnalyticSurface.AnalyticSurface` object specifying analytic Surface defined on the instance.
Notes
-----
This object can be accessed by:
.. code-block:: python
import odbAccess
session.odbs[name].parts[name]
"""
# An OdbMeshNodeArray object.
nodes: OdbMeshNodeArray = OdbMeshNodeArray()
# An OdbMeshElementArray object.
elements: OdbMeshElementArray = OdbMeshElementArray()
# A repository of OdbSet objects specifying node sets.
nodeSets: dict[str, OdbSet] = dict[str, OdbSet]()
# A repository of OdbSet objects specifying element sets.
elementSets: dict[str, OdbSet] = dict[str, OdbSet]()
# A repository of OdbSet objects specifying surfaces.
surfaces: dict[str, OdbSet] = dict[str, OdbSet]()
# A SectionAssignmentArray object.
sectionAssignments: SectionAssignmentArray = SectionAssignmentArray()
# A BeamOrientationArray object.
beamOrientations: BeamOrientationArray = BeamOrientationArray()
# A MaterialOrientationArray object.
materialOrientations: MaterialOrientationArray = MaterialOrientationArray()
# A RebarOrientationArray object.
rebarOrientations: RebarOrientationArray = RebarOrientationArray()
# An OdbRigidBodyArray object.
rigidBodies: OdbRigidBodyArray = OdbRigidBodyArray()
# An AnalyticSurface object specifying analytic Surface defined on the instance.
analyticSurface: AnalyticSurface = AnalyticSurface()
def __init__(
self, name: str, embeddedSpace: SymbolicConstant, type: SymbolicConstant
):
"""This method creates an OdbPart object. Nodes and elements are added to this object at a
later stage.
Notes
-----
This function can be accessed by:
.. code-block:: python
session.odbs[name].Part
Parameters
----------
name
A String specifying the part name.
embeddedSpace
A SymbolicConstant specifying the dimensionality of the Part object. Possible values are
THREE_D, TWO_D_PLANAR, and AXISYMMETRIC.
type
A SymbolicConstant specifying the type of the Part object. Possible values are
DEFORMABLE_BODY and ANALYTIC_RIGID_SURFACE.
Returns
-------
An OdbPart object.
"""
pass
@typing.overload
def addElements(
self,
labels: tuple,
connectivity: tuple,
type: str,
elementSetName: str = "",
sectionCategory: SectionCategory = None,
):
"""This method adds elements to an OdbPart object using element labels and nodal
connectivity.
Warning:Adding elements not in ascending order of their labels may cause Abaqus/Viewer
to plot contours incorrectly.
Parameters
----------
labels
A sequence of Ints specifying the element labels.
connectivity
A sequence of sequences of Ints specifying the nodal connectivity.
type
A String specifying the element type.
elementSetName
A String specifying a name for this element set. The default value is the empty string.
sectionCategory
A SectionCategory object for this element set.
"""
pass
@typing.overload
def addElements(
self,
elementData: tuple,
type: str,
elementSetName: str = None,
sectionCategory: SectionCategory = None,
):
"""This method adds elements to an OdbPart object using a sequence of element labels and
nodal connectivity.
Warning:Adding elements not in ascending order of their labels may cause Abaqus/Viewer
to plot contours incorrectly.
Parameters
----------
elementData
A sequence of sequences of Ints specifying the element labels and nodal connectivity, in
the form ((*label*, *c1*, *c2*, *c3*, *c4*), (*label*, *c1*, *c2*, *c3*, *c4*), ...).
type
A String specifying the element type. The value can be user defined.
elementSetName
A String specifying a name for this element set. The default value is None.
sectionCategory
A SectionCategory object for this element set.
"""
pass
def addElements(self, *args, **kwargs):
pass
@typing.overload
def addNodes(self, labels: tuple, coordinates: tuple, nodeSetName: str = None):
"""This method adds nodes to an OdbPart object using node labels and coordinates.
Warning:Adding nodes not in ascending order of their labels may cause Abaqus/Viewer to
plot contours incorrectly.
Parameters
----------
labels
A sequence of Ints specifying the node labels.
coordinates
A sequence of sequences of Floats specifying the nodal coordinates.
nodeSetName
A String specifying a name for this node set. The default value is None.
"""
pass
@typing.overload
def addNodes(self, nodeData: tuple, nodeSetName: str = None):
"""This method adds nodes to an OdbPart object using a sequence of node labels and
coordinates.
Warning:Adding nodes not in ascending order of their labels may cause Abaqus/Viewer to
plot contours incorrectly.
Parameters
----------
nodeData
A sequence of tuples specifying the node labels and coordinates, in the form ((*label*,
*x*, *y*, *z*), (*label*, *x*, *y*, *z*), ...).
nodeSetName
A String specifying a name for this node set. The default value is None.
"""
pass
def addNodes(self, *args, **kwargs):
pass
def assignBeamOrientation(
self, region: str, method: SymbolicConstant, vector: tuple
):
"""This method assigns a beam section orientation to a region of a part instance.
Parameters
----------
region
An OdbSet specifying a region on an instance.
method
A SymbolicConstant specifying the assignment method. Only a value of N1_COSINES is
currently supported.
vector
A sequence of three Floats specifying the approximate local n1n1 -direction of the beam
cross-section.
"""
pass
def assignMaterialOrientation(
self,
region: str,
localCSys: OdbDatumCsys,
axis: SymbolicConstant = AXIS_1,
angle: float = 0,
stackDirection: SymbolicConstant = STACK_3,
):
"""This method assigns a material orientation to a region of a part instance.
Parameters
----------
region
An OdbSet specifying a region on an instance.
localCSys
An OdbDatumCsys object specifying the local coordinate system or None, indicating the
global coordinate system.
axis
A SymbolicConstant specifying the axis of a cylindrical or spherical datum coordinate
system about which an additional rotation is applied. For shells this axis is also the
shell normal. Possible values are AXIS_1, AXIS_2, and AXIS_3. The default value is
AXIS_1.
angle
A Float specifying the angle of the additional rotation. The default value is 0.0.
stackDirection
A SymbolicConstant specifying the stack or thickness direction of the material. Possible
values are STACK_1, STACK_2, STACK_3, and STACK_ORIENTATION. The default value is
STACK_3.
"""
pass
def assignRebarOrientation(
self,
region: str,
localCsys: OdbDatumCsys,
axis: SymbolicConstant = AXIS_1,
angle: float = 0,
):
"""This method assigns a rebar reference orientation to a region of a part instance.
Parameters
----------
region
An OdbSet specifying a region on an instance.
localCsys
An OdbDatumCsys object specifying the local coordinate system or None, indicating the
global coordinate system.
axis
A SymbolicConstant specifying the axis of a cylindrical or spherical datum coordinate
system about which an additional rotation is applied. For shells this axis is also the
shell normal. Possible values are AXIS_1, AXIS_2, and AXIS_3. The default value is
AXIS_1.
angle
A Float specifying the angle of the additional rotation. The default value is 0.0.
"""
pass
def getElementFromLabel(self, label: int):
"""This method is used to retrieved an element with a specific label from a part object.
Parameters
----------
label
An Int specifying the element label.
Returns
-------
An OdbMeshElement object.
Raises
------
- If no element with the specified label exists:
OdbError: Invalid element label
"""
pass
def getNodeFromLabel(self, label: int):
"""This method is used to retrieved a node with a specific label from a part object.
Parameters
----------
label
An Int specifying the node label.
Returns
-------
An OdbMeshNode object.
Raises
------
- If no node with the specified label exists:
OdbError: Invalid node label
"""
pass
def AnalyticRigidSurf2DPlanar(
self, name: str, profile: tuple[AnalyticSurfaceSegment], filletRadius: str = 0
):
"""This method is used to define a two-dimensional AnalyticSurface object on the part
object.
Parameters
----------
name
The name of the analytic surface.
profile
A sequence of AnalyticSurfaceSegment objects or an OdbSequenceAnalyticSurfaceSegment
object.
filletRadius
A Double specifying the radius of curvature to smooth discontinuities between adjoining
segments. The default value is 0.0.
Raises
------
- If OdbPart is of type THREE_D:
OdbError: 2D-Planar Analytic Rigid Surface can be defined only if the part is of type
TWO_D_PLANAR or AXISYMMETRIC.
"""
pass
def AnalyticRigidSurfExtrude(
self, name: str, profile: tuple[AnalyticSurfaceSegment], filletRadius: str = 0
):
"""This method is used to define a three-dimensional cylindrical AnalyticSurface on the
part object.
Parameters
----------
name
The name of the analytic surface.
profile
A sequence of AnalyticSurfaceSegment objects or an OdbSequenceAnalyticSurfaceSegment
object.
filletRadius
A Double specifying the radius of curvature to smooth discontinuities between adjoining
segments. The default value is 0.0.
Raises
------
- If OdbPart is not of type THREE_D:
OdbError: Analytic Rigid Surface of type CYLINDER can be defined only if the part is
of type THREE_D.
"""
pass
def AnalyticRigidSurfRevolve(
self, name: str, profile: tuple[AnalyticSurfaceSegment], filletRadius: str = 0
):
"""This method is used to define a three-dimensional AnalyticSurface of revolution on the
part object.
Parameters
----------
name
The name of the analytic surface.
profile
A sequence of AnalyticSurfaceSegment objects or an OdbSequenceAnalyticSurfaceSegment
object.
filletRadius
A Double specifying the radius of curvature to smooth discontinuities between adjoining
segments. The default value is 0.0.
Raises
------
- If OdbPart is not of type THREE_D:
OdbError: Analytic Rigid Surface of type REVOLUTION can be defined only if the part is
of type THREE_D.
"""
pass
|
import unittest
from src import roman_numbers
class TestRomanNumbers (unittest.TestCase):
def test_should_return_error_on_invalid_string (self):
#roman_num = roman_numbers.RomanNumber("XCP")
#self.assertTrue(False)
with self.assertRaises(Exception):
roman_num = roman_numbers.RomanNumber("XVIL")
#self.assertRaises(Exception, roman_numbers.RomanNumber("XCP"))
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python
from pyspark import SparkContext
from pyspark.sql import HiveContext
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sys
#CREATE A TABLE WITH showing changes in distraction levels over time
if __name__ == '__main__':
#get data
sc = SparkContext()
sqlContext = HiveContext(sc)
sqlContext.sql("use accident_project")
strSQL_factor = "select d.year, d.factor as factor, count(*) as factor_count from distract d join distract_vehicles dv on d.case_number = dv.case_number and d.vehicle_number = dv.vehicle_number where d.factor not in ('Unknown') group by d.factor, d.year"
strSQL_all = "select d.year, count(*) as year_all_factor_count from distract d join distract_vehicles dv on d.case_number = dv.case_number and d.vehicle_number = dv.vehicle_number where d.factor not in ('Unknown') group by d.year"
df_factor = sqlContext.sql(strSQL_factor)
df_all = sqlContext.sql(strSQL_all)
df = df_factor.join(df_all, 'year')
#to panda data frame
pdf = df.toPandas()
#Add some counts and percentages.
years = np.sort(pdf['year'].unique())
print years
factors = pdf['factor'].unique()
for f in factors:
first = True
for y in years:
if first:
previous = pdf.loc[(pdf['year'] == y) & (pdf['factor'] == f), 'factor_count']
pdf.loc[(pdf['year'] == y) & (pdf['factor'] == f), 'percent_all_factors_year'] = 100.0 * pdf.loc[(pdf['year'] == y) & (pdf['factor'] == f), 'factor_count'] / pdf.loc[(pdf['year'] == y) & (pdf['factor'] == f), 'year_all_factor_count']
pdf.loc[(pdf['year'] == y) & (pdf['factor'] == f), 'percent_change'] = 0
previous = pdf.loc[(pdf['year'] == y) & (pdf['factor'] == f), 'percent_all_factors_year']
first = False
else:
pdf.loc[(pdf['year'] == y) & (pdf['factor'] == f), 'percent_all_factors_year'] = 100.0 * pdf.loc[(pdf['year'] == y) & (pdf['factor'] == f), 'factor_count'] / pdf.loc[(pdf['year'] == y) & (pdf['factor'] == f), 'year_all_factor_count']
current = pdf.loc[(pdf['year'] == y) & (pdf['factor'] == f), 'percent_all_factors_year']
difference = float(current) - float(previous) * 1.0
previous = current
pdf.loc[(pdf['year'] == y) & (pdf['factor'] == f), 'percent_change'] = difference
pdf.year = pd.to_datetime("1" + "1" + pdf.year, format='%d%m%Y') #to change year to date format
pdf.columns = ['year', 'factor', 'factor_count', 'year_all_factor_count','year_percent_all_factors', 'percent_change']
with open("distract_levels_time.csv",'w') as f:
pdf.to_csv(f, header = True, index = False)
|
#!/usr/bin/env python
"""
Shows how to create an image using numpy
"""
import cv2
import numpy as np
width = 800
height = 600
n_channel = 3
data_type = np.uint8 # can also use float types
img = np.zeros((height, width, n_channel), dtype=data_type)
cv2.circle(img, (300, 300), 100, (255, 0, 0), cv2.FILLED)
win_name = 'Window'
cv2.namedWindow(win_name)
cv2.imshow(win_name, img)
cv2.waitKey(0)
cv2.destroyAllWindows() |
import pickle
from News import getNewsList
from Tokenizer import tokenize
from Tokenizer import invalidToken
from Normalizer import normalize
from BTree import Node
from BTree import BTree
from BTree import calAllIdf
from BTree import storeDictionary
from BTree import loadDictionary
from RankedBased import loadNormFactors
from RankedBased import finalizeResults
from RankedBased import getResults
from RankedBased import getTf
import Laws
import math
dictionary = None
normalizationFactorsScheme1 = []
normalizationFactorsScheme2 = []
normalizationFactorsScheme3 = []
def constructInvertedIndex():
global dictionary
dictionary = BTree(Node("سسسسسس", 1, []))
nodesList = []
docCounter = 0
for news in getNewsList():
nodes = {}
position = 0
for term in tokenize(normalize(news.content), check_finglish):
if term != invalidToken:
nodes[dictionary.addOccurrence(term, news.id, position)] = True
position += 1
nodesList.append(nodes)
for node in nodes:
node.cal_tf(news.id)
docCounter += 1
if docCounter % 20 == 0:
Laws.heap(getDictionary())
calAllIdf(dictionary.root)
i = 0
for news in getNewsList(): # calculate the documents' normalize factors for 3 scoring schemes
nodes = nodesList[i]
sum_of_squares_1 = 0
sum_of_squares_2 = 0
sum_of_squares_3 = 0
for node in nodes.keys():
sum_of_squares_1 += math.pow((getTf(news.id, node.postingsList) - 1) * node.idf, 2)
sum_of_squares_2 += math.pow(getTf(news.id, node.postingsList), 2)
sum_of_squares_3 += math.pow(getTf(news.id, node.postingsList) * node.idf, 2)
normalizationFactorsScheme1.append(math.sqrt(sum_of_squares_1))
normalizationFactorsScheme2.append(math.sqrt(sum_of_squares_2))
normalizationFactorsScheme3.append(math.sqrt(sum_of_squares_3))
i += 1
Laws.storeHeapDataSet()
storeDictionary(dictionary)
storeNormFactors()
def storeNormFactors():
with open('InvertedIndex/normFactors1.pickle', 'wb') as handle:
pickle.dump(normalizationFactorsScheme1, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('InvertedIndex/normFactors2.pickle', 'wb') as handle:
pickle.dump(normalizationFactorsScheme2, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('InvertedIndex/normFactors3.pickle', 'wb') as handle:
pickle.dump(normalizationFactorsScheme3, handle, protocol=pickle.HIGHEST_PROTOCOL)
def loadInvertedIndex(isRankedBased):
global dictionary
dictionary = loadDictionary()
if isRankedBased:
loadNormFactors()
def intersect(pl_list):
if len(pl_list) == 0:
return []
if len(pl_list) == 1:
return pl_list[0]
answer = []
pointer0 = 0
pointer1 = 0
while pointer0 < len(pl_list[0]) and pointer1 < len(pl_list[1]):
if pl_list[0][pointer0] == pl_list[1][pointer1]:
answer.append(pl_list[0][pointer0])
pointer0 += 1
pointer1 += 1
elif pl_list[0][pointer0] < pl_list[1][pointer1]:
pointer0 += 1
else:
pointer1 += 1
pl_list[1] = answer
return intersect(pl_list[1:])
def AND_NOT(pl_list0, pl_list1):
answer = []
pointer0 = 0
pointer1 = 0
while pointer0 < len(pl_list0) and pointer1 < len(pl_list1):
if pl_list0[pointer0][0] == pl_list1[pointer1][0]:
pointer0 += 1
pointer1 += 1
elif pl_list0[pointer0][0] < pl_list1[pointer1][0]:
answer.append(pl_list0[pointer0])
else:
pointer1 += 1
while pointer0 < len(pl_list0):
answer.append(pl_list0[pointer0])
pointer0 += 1
return answer
def NOT(term_pl):
all_docs = []
pointer = 0
if len(term_pl) == 0:
for d in range(0, len(getNewsList())):
all_docs.append(d)
else:
for d in range(0, len(getNewsList())):
if d != term_pl[pointer]:
all_docs.append(d)
else:
pointer += 1
if pointer == len(term_pl):
d += 1
while d < len(getNewsList()):
all_docs.append(d)
d += 1
break
return all_docs
def phraseQuery(pl_list, distance):
if len(pl_list) == 0:
return []
# Stop words processes
if len(pl_list) == 1:
if len(pl_list[0]) >= 1:
if len(pl_list[0][0]) >= 1:
if pl_list[0][0][0] == -1: # the DocID of Stop words
return []
else:
if len(pl_list[0]) >= 1:
if len(pl_list[0][0]) >= 1:
if pl_list[0][0][0] == -1:
return phraseQuery(pl_list[1:], distance)
if len(pl_list[1]) >= 1:
if len(pl_list[1][0]) >= 1:
if pl_list[1][0][0] == -1:
pl_list[1] = pl_list[0]
return phraseQuery(pl_list[1:], distance + 1)
# Done
if len(pl_list) == 1:
return pl_list[0]
answer = []
pointer0 = 0
pointer1 = 0
while pointer0 < len(pl_list[0]) and pointer1 < len(pl_list[1]):
if pl_list[0][pointer0][0][0] == pl_list[1][pointer1][0][0]:
pointer00 = 1 # after docID
pointer11 = 1
while pointer00 < len(pl_list[0][pointer0]) and pointer11 < len(pl_list[1][pointer1]):
if pl_list[1][pointer1][pointer11] - pl_list[0][pointer0][pointer00] == distance:
answer.append(pl_list[1][pointer1])
pointer00 += 1
pointer11 += 1
break
elif pl_list[0][pointer0][pointer00] > pl_list[1][pointer1][pointer11]:
pointer11 += 1
else:
pointer00 += 1
pointer0 += 1
pointer1 += 1
elif pl_list[0][pointer0][0][0] < pl_list[1][pointer1][0][0]:
pointer0 += 1
else:
pointer1 += 1
pl_list[1] = answer
return phraseQuery(pl_list[1:], 1)
def get_docIDs(intersect_pl_results):
results = []
for i in range(0, len(intersect_pl_results)):
if intersect_pl_results[i][0] not in results:
results.append(intersect_pl_results[i][0])
return results
def query(pl_list, tokens, isRankedBased):
if not pl_list and not isRankedBased:
return []
results = intersect(pl_list) # in rankedBased, only the lists of NOT and Phrase inputs
if isRankedBased:
for docID in results:
# Only the Docs that have at least one query term get + 0.5 bonus
if getNewsList()[docID].hasQueryTerm:
getNewsList()[docID].score += 0.5
finalizeResults() # Top K
results = getResults() # it is nothing but a return :/
structuredResults = []
for result in results:
structuredResults.append(getNewsList()[result].structuredFormatResultsPage(tokens))
return structuredResults
def check_finglish(token): # check the equal persian(finglish) term if the english input is not in dic
if not dictionary.getPostingsList(token):
finglish_term = ""
for character in token:
try:
finglish_term += mapping_dic[character]
except KeyError:
return token
return finglish_term
return token
def load_mapping_dic():
file = open('Resources/FinglishWords/mapping_dic.pickle', 'rb')
variable = pickle.load(file)
file.close()
return variable
def getDictionary():
return dictionary
mapping_dic = load_mapping_dic()
|
from django.shortcuts import render
import requests
from bs4 import BeautifulSoup
from django.http import JsonResponse
import re
def get_hiver_data_as_api(request):
p_tag_list=[]
h1_tag_list=[]
h2_tag_list=[]
h3_tag_list=[]
h4_tag_list=[]
anchor_tag_list=[]
li_tag_list = []
span_tag_list = []
option_tag_list = []
address_tag_list = []
title_tag_list = []
desc_tag_list = []
dict={}
dict2={}
lst = []
r=requests.get("https://hiverhq.com/")
c=r.content
soup=BeautifulSoup(c,"html.parser")
p_tags =soup.find_all("p")
h1_tags =soup.find_all("h1")
h2_tags =soup.find_all("h2")
h3_tags =soup.find_all("h3")
h4_tags =soup.find_all("h4")
anchor_tags =soup.find_all("a")
li_tags =soup.find_all("li")
span_tags =soup.find_all("span")
option_tags = soup.find_all("option")
address_tags = soup.find_all("address")
title_tags = soup.find_all("title")
desc_tags = soup.find_all("desc")
p_tag_list = [i.text for i in p_tags]
h1_tag_list = [i.text for i in h1_tags]
h2_tag_list = [i.text for i in h2_tags]
h4_tag_list = [i.text for i in h4_tags]
h3_tag_list = [i.text for i in h3_tags]
anchor_tag_list = [i.text for i in anchor_tags]
li_tag_list = [i.text for i in li_tags]
span_tag_list = [i.text for i in span_tags]
option_tag_list = [i.text for i in option_tags]
address_tag_list = [i.text for i in address_tags]
title_tag_list = [i.text for i in title_tags]
desc_tag_list = [i.text for i in desc_tags]
dict["p_tags"] = p_tag_list
dict["h1_tag_list"] = h1_tag_list
dict["h2_tag_list"] = h2_tag_list
dict["h3_tag_list"] = h3_tag_list
dict["h4_tag_list"] = h4_tag_list
dict["anchor_tag_list"] = anchor_tag_list
dict["li_tag_list"] = li_tag_list
dict["span_tag_list"] = span_tag_list
dict["option_tag_list"] = option_tag_list
dict["address_tag_list"] = address_tag_list
dict["title_tag_list"] = title_tag_list
dict["desc_tag_list"] = desc_tag_list
lst.append(dict)
return JsonResponse(lst,safe=False)
def get_hiver_data(request):
sentence_bundle = []
p_tag_list=[]
h1_tag_list=[]
h2_tag_list=[]
h3_tag_list=[]
h4_tag_list=[]
anchor_tag_list=[]
li_tag_list = []
span_tag_list = []
option_tag_list = []
address_tag_list = []
title_tag_list = []
desc_tag_list = []
dict={}
dict2={}
lst = []
r=requests.get("https://hiverhq.com/")
c=r.content
soup=BeautifulSoup(c,"html.parser")
p_tags =soup.find_all("p")
h1_tags =soup.find_all("h1")
h2_tags =soup.find_all("h2")
h3_tags =soup.find_all("h3")
h4_tags =soup.find_all("h4")
anchor_tags =soup.find_all("a")
li_tags =soup.find_all("li")
span_tags =soup.find_all("span")
option_tags = soup.find_all("option")
address_tags = soup.find_all("address")
title_tags = soup.find_all("title")
desc_tags = soup.find_all("desc")
p_tag_list = [i.text for i in p_tags]
h1_tag_list = [i.text for i in h1_tags]
h2_tag_list = [i.text for i in h2_tags]
h4_tag_list = [i.text for i in h4_tags]
h3_tag_list = [i.text for i in h3_tags]
anchor_tag_list = [i.text for i in anchor_tags]
li_tag_list = [i.text for i in li_tags]
span_tag_list = [i.text for i in span_tags]
option_tag_list = [i.text for i in option_tags]
address_tag_list = [i.text for i in address_tags]
title_tag_list = [i.text for i in title_tags]
desc_tag_list = [i.text for i in desc_tags]
def common_tag_validator(tag_list):
temp_tags = []
for i in tag_list:
temp_tags.append(re.sub('[^A-Za-z0-9 ]+', '', i))
tag_list = []
tag_list = temp_tags
tag_list = [i.strip() for i in tag_list]
tag_list = [i for i in tag_list if i != ""]
tag_list = [",".join(i.split(" ")) for i in tag_list]
return tag_list
p_tag_list = common_tag_validator(p_tag_list)
h1_tag_list = [",".join(i.split(" ")) for i in h1_tag_list]
h2_tag_list = [",".join(i.split(" ")) for i in h2_tag_list]
h3_tag_list = [",".join(i.split(" ")) for i in h3_tag_list]
h4_tag_list = common_tag_validator(h4_tag_list)
anchor_tag_list = common_tag_validator(anchor_tag_list)
li_tag_list = common_tag_validator(li_tag_list)
address_tag_list = common_tag_validator(address_tag_list)
title_tag_list = common_tag_validator(title_tag_list)
option_tag_list = common_tag_validator(option_tag_list)
dict["p_tags"] = p_tag_list
dict["h1_tag_list"] = h1_tag_list
dict["h2_tag_list"] = h2_tag_list
dict["h3_tag_list"] = h3_tag_list
dict["h4_tag_list"] = h4_tag_list
dict["anchor_tag_list"] = anchor_tag_list
dict["li_tag_list"] = li_tag_list
dict["span_tag_list"] = span_tag_list
dict["option_tag_list"] = option_tag_list
dict["address_tag_list"] = address_tag_list
dict["title_tag_list"] = title_tag_list
dict["desc_tag_list"] = list(set(desc_tag_list))
lst.append(dict)
sentence_bundle = p_tag_list + h1_tag_list + h2_tag_list + h3_tag_list + h4_tag_list + anchor_tag_list + li_tag_list + span_tag_list + option_tag_list + address_tag_list + title_tag_list + list(set(desc_tag_list))
res = []
for i in sentence_bundle:
res.extend(i.split(","))
res = [i for i in res if i != ""]
word_box = []
for i in res:
try:
i = int(i)
except ValueError:
pass
word_box.append(i)
word_box = [i for i in word_box if not type(i) == int]
word_box = {x:word_box.count(x) for x in word_box}
occourances_of_word_desc = []
for i,j in word_box.items():
if j > 13:occourances_of_word_desc.append({i:j})
# import operator
# word_box = sorted(word_box.items(), key=operator.itemgetter(1))
# word_box = word_box[-1::-5]
# word_box = word_box[0:5]
# word_box = [{i[0]:i[1]} for i in word_box]
dict2["occourances_of_word_desc"] = occourances_of_word_desc
return JsonResponse(dict2,safe=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.