text stringlengths 38 1.54M |
|---|
import PySimpleGUI as sg
import pymunk
import random
import socket
"""
Demo that shows integrating PySimpleGUI with the pymunk library. This combination
of PySimpleGUI and pymunk could be used to build games.
Note this exact same demo runs with PySimpleGUIWeb by changing the import statement
"""
class Ball():
def __init__(self, x, y, r, graph_elem, *args, **kwargs):
mass = 10
# Create a Body with mass and moment
self.body = pymunk.Body(
mass, pymunk.moment_for_circle(mass, 0, r, (0, 0)))
self.body.position = x, y
# Create a box shape and attach to body
self.shape = pymunk.Circle(self.body, r, offset=(0, 0))
self.shape.elasticity = 0.99999
self.shape.friction = 0.8
self.gui_circle_figure = None
self.graph_elem = graph_elem
def move(self):
self.graph_elem.RelocateFigure(
self.gui_circle_figure, self.body.position[0], ball.body.position[1])
class Playfield():
def __init__(self, graph_elem):
self.space = pymunk.Space()
self.space.gravity = 0, 200
self.add_wall((0, 400), (600, 400)) # ground
self.add_wall((0, 0), (0, 600)) # Left side
self.add_wall((600, 0), (600, 400)) # right side
self.arena_balls = [] # type: List[Ball]
self.graph_elem = graph_elem # type: sg.Graph
def add_wall(self, pt_from, pt_to):
body = pymunk.Body(body_type=pymunk.Body.STATIC)
ground_shape = pymunk.Segment(body, pt_from, pt_to, 0.0)
ground_shape.friction = 0.8
ground_shape.elasticity = .99
ground_shape.mass = pymunk.inf
self.space.add(ground_shape)
def add_random_balls(self):
for i in range(1, 200):
x = random.randint(0, 600)
y = random.randint(0, 400)
r = random.randint(1, 10)
self.add_ball(x, y, r)
def add_ball(self, x, y, r, fill_color='black', line_color='red'):
ball = Ball(x, y, r, self.graph_elem)
self.arena_balls.append(ball)
area.space.add(ball.body, ball.shape)
ball.gui_circle_figure = self.graph_elem.draw_circle(
(x, y), r, fill_color=fill_color, line_color=line_color)
return ball
def shoot_a_ball(self, x, y, r, vector=(-10, 0), fill_color='black', line_color='red'):
ball = self.add_ball(
x, y, r, fill_color=fill_color, line_color=line_color)
# ball.shape.surface_velocity=10
ball.body.apply_impulse_at_local_point(100 * pymunk.Vec2d(vector))
# ------------------- Build and show the GUI Window -------------------
graph_elem = sg.Graph((600, 400), (0, 400), (600, 0),
enable_events=True,
key='-GRAPH-',
background_color='lightblue')
# hostname = socket.gethostbyname(socket.gethostname())
layout = [[sg.Text('Ball Test'), sg.Text('My IP {}'.format('000000000000'))],
[graph_elem],
[sg.Button('Kick'), sg.Button('Player 1 Shoot', size=(15, 2)),
sg.Button('Player 2 Shoot', size=(15, 2)), sg.Button('Exit')]
]
window = sg.Window('Window Title', layout, disable_close=True, finalize=True)
area = Playfield(graph_elem)
area.add_wall((0, 300), (300, 300))
graph_elem.draw_line((0, 300), (300, 300))
# area.add_random_balls()
# ------------------- GUI Event Loop -------------------
while True: # Event Loop
event, values = window.read(timeout=10)
# print(event, values)
if event in (sg.WIN_CLOSED, 'Exit'):
break
area.space.step(0.01)
if event == 'Player 2 Shoot':
area.shoot_a_ball(555, 200, 5, (-10, 0),
fill_color='green', line_color='green')
elif event == 'Player 1 Shoot':
area.shoot_a_ball(10, 200, 5, (10, 0))
for ball in area.arena_balls:
if event == 'Kick':
pos = ball.body.position[0], ball.body.position[1] - random.randint(1, 200)
ball.body.position = pos
ball.move()
window.close() |
import requests
import json
class PostExperience(object):
def __init__(self, common, headers, accesstoken):
self.headers = headers
self.baseUrl = common.get('baseUrl')
self.accesstoken = accesstoken
self.headers.update({"accesstoken": self.accesstoken})
def post_experience(self, p="P90"):
# POST http://192.168.1.155:55262/services/P90/experience HTTP/1.1
url = "{}/services/{}/experience".format(self.baseUrl, p)
self.headers.update({'Content-Length': '0'})
response = requests.request("POST", url, headers=self.headers)
return json.loads(response.text)
if __name__ == '__main__':
pass |
def main(j, args, params, tags, tasklet):
params.result = (args.doc, args.doc)
storagenodedict = dict()
# import ipdb; ipdb.set_trace()
scl = j.clients.osis.getNamespace('system')
nodes = scl.node.search({"roles": "storagedriver"})[1:]
for idx, node in enumerate(nodes):
node["ips"] = ", ".join(node["ipaddr"])
for nic in node["netaddr"]:
if nic["name"] == "backplane1":
node['publicip'] = nic["ip"][0]
nodes[idx] = node
storagenodedict['nodes'] = nodes
args.doc.applyTemplate(storagenodedict, True)
return params
def match(j, args, params, tags, tasklet):
return True
|
import numpy as np
import pyccl as ccl
def test_hodcl():
# With many thanks to Ryu Makiya, Eiichiro Komatsu
# and Shin'ichiro Ando for providing this benchmark.
# HOD params
log10Mcut = 11.8
log10M1 = 11.73
sigma_Ncen = 0.15
alp_Nsat = 0.77
rmax = 4.39
rgs = 1.17
# Input power spectrum
ks = np.loadtxt("benchmarks/data/k_hod.txt")
zs = np.loadtxt("benchmarks/data/z_hod.txt")
pks = np.loadtxt("benchmarks/data/pk_hod.txt")
l_bm, cl_bm = np.loadtxt("benchmarks/data/cl_hod.txt",
unpack=True)
# Set N(z)
def _nz_2mrs(z):
# From 1706.05422
m = 1.31
beta = 1.64
x = z / 0.0266
return x**m * np.exp(-x**beta)
z1 = 1e-5
z2 = 0.1
z_arr = np.linspace(z1, z2, 1024)
dndz = _nz_2mrs(z_arr)
# CCL prediction
# Make sure we use the same P(k)
cosmo = ccl.CosmologyCalculator(
Omega_b=0.05,
Omega_c=0.25,
h=0.67,
n_s=0.9645,
A_s=2.0E-9,
m_nu=0.00001,
mass_split='equal',
pk_linear={'a': 1./(1.+zs[::-1]),
'k': ks,
'delta_matter:delta_matter': pks[::-1, :]})
cosmo.compute_growth()
# Halo model setup
mass_def = ccl.halos.MassDef(200, 'critical')
cm = ccl.halos.ConcentrationDuffy08(mass_def=mass_def)
hmf = ccl.halos.MassFuncTinker08(mass_def=mass_def)
hbf = ccl.halos.HaloBiasTinker10(mass_def=mass_def)
hmc = ccl.halos.HMCalculator(mass_function=hmf, halo_bias=hbf,
mass_def=mass_def)
prf = ccl.halos.HaloProfileHOD(
mass_def=mass_def,
concentration=cm,
log10Mmin_0=np.log10(10.**log10Mcut/cosmo['h']),
siglnM_0=sigma_Ncen,
log10M0_0=np.log10(10.**log10Mcut/cosmo['h']),
log10M1_0=np.log10(10.**log10M1/cosmo['h']),
alpha_0=alp_Nsat,
bg_0=rgs,
bmax_0=rmax)
prf2pt = ccl.halos.Profile2ptHOD()
# P(k)
a_arr, lk_arr, _ = cosmo.get_linear_power().get_spline_arrays()
pk_hod = ccl.halos.halomod_Pk2D(cosmo, hmc, prf, prof_2pt=prf2pt,
lk_arr=lk_arr, a_arr=a_arr)
# C_ell
tr = ccl.NumberCountsTracer(cosmo, has_rsd=False, dndz=(z_arr, dndz),
bias=(z_arr, np.ones(len(dndz))))
cl_hod = ccl.angular_cl(cosmo, tr, tr, ell=l_bm, p_of_k_a=pk_hod)
assert np.all(np.fabs(cl_hod/cl_bm-1) < 0.005)
|
from src.AST import AST
indent_char = '| '
def addToClass(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
class TreePrinter:
@addToClass(AST.Node)
def printTree(self, indent=0):
raise Exception("printTree not defined in class " + self.__class__.__name__)
@addToClass(AST.Program)
def printTree(self, indent=0):
result = ""
result += self.instructions.printTree(indent)
return result
@addToClass(AST.Instructions)
def printTree(self, indent=0):
result = ""
for i in self.instructions:
result += i.printTree(indent)
return result
@addToClass(AST.ContinueInstruction)
def printTree(self, indent=0):
return indent * indent_char + "CONTINUE\n"
@addToClass(AST.BreakInstruction)
def printTree(self, indent=0):
return indent * indent_char + "BREAK\n"
@addToClass(AST.Constant)
def printTree(self, indent=0):
result = indent * indent_char
result += str(self.value)
return result + "\n"
@addToClass(AST.Variable)
def printTree(self, indent=0):
result = indent * indent_char
result += self.name + '\n'
return result
@addToClass(AST.Assignment)
def printTree(self, indent=0):
result = indent * indent_char + "=\n"
result += self.variable.printTree(indent + 1)
result += self.expression.printTree(indent + 1)
return result
@addToClass(AST.MatrixElement)
def printTree(self, indent=0):
result = indent_char * indent + "REF\n"
result += indent_char * (indent + 1) + self.variable + '\n'
result += self.row.printTree(indent + 1)
result += self.column.printTree(indent + 1)
return result
@addToClass(AST.ZerosInitialization)
def printTree(self, indent=0):
result = indent_char * indent
result += 'ZEROS\n'
result += self.expression.printTree(indent + 1)
return result
@addToClass(AST.EyeInitialization)
def printTree(self, indent=0):
result = indent_char * indent
result += 'EYE\n'
result += self.expression.printTree(indent + 1)
return result
@addToClass(AST.OnesInitialization)
def printTree(self, indent=0):
result = indent_char * indent
result += 'ONES\n'
result += self.expression.printTree(indent + 1)
return result
@addToClass(AST.MatrixAssignment)
def printTree(self, indent=0):
result = indent * indent_char + "=\n"
result += indent_char * (indent + 1) + self.variable.printTree()
result += self.expression_list.printTree(indent + 1)
return result
@addToClass(AST.ListsOfExpressions)
def printTree(self, indent=0):
result = indent * indent_char + "LISTS\n"
for e in self.expression_lists:
result += e.printTree(indent + 1)
return result
@addToClass(AST.ListOfExpressions)
def printTree(self, indent=0):
# result = indent * indent_char + "LIST\n"
result = 0 * indent_char
for e in self.expression_list:
result += e.printTree(indent)
return result
@addToClass(AST.NegUnaryExpression)
def printTree(self, indent=0):
result = indent_char * indent
result += "-" + '\n'
result += self.expression.printTree(indent + 1)
return result
@addToClass(AST.TransUnaryExpression)
def printTree(self, indent=0):
result = indent_char * indent
result += "TRANSPOSE" + '\n'
result += self.expression.printTree(indent + 1)
return result
@addToClass(AST.BinaryExpression)
def printTree(self, indent=0):
result = indent_char * indent
result += self.operator + '\n'
result += self.expression_left.printTree(indent + 1)
result += self.expression_right.printTree(indent + 1)
return result
@addToClass(AST.CompoundAssignment)
def printTree(self, indent=0):
result = indent * indent_char + self.operator + "\n"
result += indent_char * (indent + 1) + self.variable.printTree()
result += self.expression.printTree(indent + 1)
return result
@addToClass(AST.ForInstruction)
def printTree(self, indent=0):
result = indent * indent_char + "FOR\n"
result += indent_char * (indent + 1) + self.variable.printTree()
result += indent_char * (indent + 1) + "RANGE\n"
result += self.start.printTree(indent + 2)
result += self.end.printTree(indent + 2)
result += self.instruction.printTree(indent + 1)
return result
@addToClass(AST.CompoundInstruction)
def printTree(self, indent=0):
result = self.instructions.printTree(indent)
return result
@addToClass(AST.PrintInstructions)
def printTree(self, indent=0):
result = indent * indent_char + "PRINT\n"
result += self.expressions_list.printTree(indent + 1)
return result
#przerzucic wyswietlanie
@addToClass(AST.IfInstruction)
def printTree(self, indent=0):
result = indent * indent_char + "IF\n"
result += self.condition.printTree(indent + 1)
result += indent * indent_char + "THEN\n"
result += self.instruction.printTree(indent + 1)
return result
@addToClass(AST.IfElseInstruction)
def printTree(self, indent=0):
result = indent * indent_char + "IF\n"
result += self.condition.printTree(indent + 1)
result += indent * indent_char + "THEN\n"
result += self.instruction.printTree(indent + 1)
result += indent * indent_char + "ELSE\n"
result += self.else_instruction.printTree(indent + 1)
return result
@addToClass(AST.WhileInstruction)
def printTree(self, indent=0):
result = indent * indent_char + "WHILE\n"
result += self.condition.printTree(indent + 1)
result += self.instruction.printTree(indent + 1)
return result
@addToClass(AST.ReturnInstruction)
def printTree(self, indent=0):
result = indent * indent_char + "RETURN\n"
result += self.expression.printTree(indent + 1)
return result
|
WELCOME = "Hello there! You can use my funky functionality to be even more happy with bunq!"
UPDATE = "You spent {} Euro of your {} budget {}"
CANCEL = "You canceled the process. Will delete all information."
INVALID_INPUT = "Some of the information you gave were incorrect. Try again :-("
INVALID_INPUT_NUMBER = "Invalid input. Please enter a number."
NO_PERMISSION = "You don't have permission to contact this bot!"
ERROR = "Sorry, but an error occurred on my side."
CREATE_START = "You want to create a new Budget! Great! Let's get going right away"
CREATE_NAME = "Please enter a name for the new Budget"
CREATE_IBAN = "Next, select the accounts, which the Budget should monitor and click 'Done' when " \
"you're finished."
CREATE_DURATION = "How many days should the new Budget cover?"
CREATE_DURATION_MORE = "Enter a number for how many days the Budget should cover"
CREATE_FINISH = "Congratulations! You created a new Budget!"
|
"""Fully Kiosk Browser switch."""
import logging
from homeassistant.components.switch import SwitchEntity
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Fully Kiosk Browser switch."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities([FullyScreenSaverSwitch(hass, coordinator)], False)
async_add_entities([FullyMaintenanceModeSwitch(hass, coordinator)], False)
async_add_entities([FullyKioskLockSwitch(hass, coordinator)], False)
class FullySwitch(CoordinatorEntity, SwitchEntity):
"""Representation of a generic Fully Kiosk Browser switch entity."""
def __init__(self, hass, coordinator):
"""Initialize the switch."""
self.coordinator = coordinator
self.hass = hass
self._name = ""
self._unique_id = ""
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def device_info(self):
"""Return the device info."""
return {
"identifiers": {(DOMAIN, self.coordinator.data["deviceID"])},
"name": self.coordinator.data["deviceName"],
"manufacturer": self.coordinator.data["deviceManufacturer"],
"model": self.coordinator.data["deviceModel"],
"sw_version": self.coordinator.data["appVersionName"],
}
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
async def async_added_to_hass(self):
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Update Fully Kiosk Browser entity."""
await self.coordinator.async_request_refresh()
class FullyScreenSaverSwitch(FullySwitch):
"""Representation of a Fully Kiosk Browser screensaver switch."""
def __init__(self, hass, coordinator):
"""Initialize the screensaver switch."""
super().__init__(hass, coordinator)
self._name = f"{coordinator.data['deviceName']} Screensaver"
self._unique_id = f"{coordinator.data['deviceID']}-screensaver"
@property
def is_on(self):
"""Return if switch is on."""
if self.coordinator.data:
if self.coordinator.data["appVersionCode"] < 784:
return self.coordinator.data["currentFragment"] == "screensaver"
return self.coordinator.data["isInScreensaver"]
async def async_turn_on(self, **kwargs):
"""Turn on the screensaver."""
await self.coordinator.fully.startScreensaver()
await self.coordinator.async_refresh()
async def async_turn_off(self, **kwargs):
"""Turn off the screensaver."""
await self.coordinator.fully.stopScreensaver()
await self.coordinator.async_refresh()
class FullyMaintenanceModeSwitch(FullySwitch):
"""Representation of a Fully Kiosk Browser maintenance mode switch."""
def __init__(self, hass, coordinator):
"""Initialize the maintenance mode switch."""
super().__init__(hass, coordinator)
self._name = f"{coordinator.data['deviceName']} Maintenance Mode"
self._unique_id = f"{coordinator.data['deviceID']}-maintenance"
@property
def is_on(self):
"""Return if maintenance mode is on."""
return self.coordinator.data["maintenanceMode"]
async def async_turn_on(self, **kwargs):
"""Turn on maintenance mode."""
await self.coordinator.fully.enableLockedMode()
await self.coordinator.async_refresh()
async def async_turn_off(self, **kwargs):
"""Turn off maintenance mode."""
await self.coordinator.fully.disableLockedMode()
await self.coordinator.async_refresh()
class FullyKioskLockSwitch(FullySwitch):
"""Representation of a Fully Kiosk Browser kiosk lock switch."""
def __init__(self, hass, coordinator):
"""Intialize the kiosk lock switch."""
super().__init__(hass, coordinator)
self._name = f"{coordinator.data['deviceName']} Kiosk Lock"
self._unique_id = f"{coordinator.data['deviceID']}-kiosk"
@property
def is_on(self):
"""Return if kiosk lock is on."""
return self.coordinator.data["kioskLocked"]
async def async_turn_on(self, **kwargs):
"""Turn on kiosk lock."""
await self.coordinator.fully.lockKiosk()
await self.coordinator.async_refresh()
async def async_turn_off(self, **kwargs):
"""Turn off kiosk lock."""
await self.coordinator.fully.unlockKiosk()
await self.coordinator.async_refresh()
|
# Load packages
import os
import pandas as pd
import numpy as np
# This line is needed to display plots inline in Jupyter Notebook
#matplotlib inline
# Required for basic python plotting functionality
import matplotlib.pyplot as plt
# Required for formatting dates later in the case
import datetime
import matplotlib.dates as mdates
# Required to display image inline
#from IPython.display import Image
# Advanced plotting functionality with seaborn
import seaborn as sns
sns.set(style="whitegrid") # can set style depending on how you'd like it to look
#import statsmodels.api as sm
#from statsmodels.formula.api import ols
#import statsmodels
#from scipy import stats
#from pingouin import pairwise_ttests #this is for performing the pairwise tests
#import warnings
#warnings.filterwarnings("ignore") # Suppress all warnings
school_df=pd.read_csv('dashboard_school_df.csv', index_col=0)
g = sns.PairGrid(school_df)
g.map_upper(sns.scatterplot)
g.map_lower(sns.kdeplot)
g.map_diag(sns.kdeplot, lw=3, legend=False)
plt.savefig('pairgrid.png')
|
import discord
import asyncio
import random
import time
import sys
import os
import random
import aiohttp
useproxies = sys.argv[6]
if useproxies == 'True':
proxy_list = open("proxies.txt").read().splitlines()
proxy = random.choice(proxy_list)
con = aiohttp.ProxyConnector(proxy="http://"+proxy)
client = discord.Client(connector=con)
else:
client = discord.Client()
token = sys.argv[1]
tokenno = sys.argv[2]
textchan = sys.argv[3]
allchan = sys.argv[4]
SERVER = sys.argv[5]
#fuck i'm stupid
#i had asc = "" up here instead of in the loop
@client.event
async def on_ready():
txtchan = client.get_channel(textchan)
if allchan == 'true': #wew no sleep
while not client.is_closed:
for c in client.get_server(SERVER).channels:
if c.type != discord.ChannelType.text:
continue
myperms = c.permissions_for(client.get_server(SERVER).get_member(client.user.id))
if not myperms.send_messages:
continue
asc = ''
for x in range(1999):
num = random.randrange(13000)
asc = asc + chr(num)
try:
await client.send_message(c, asc)
except Exception:
return ''
else:
while not client.is_closed:
asc = ''
for x in range(1999):
num = random.randrange(13000)
asc = asc + chr(num)
try:
await client.send_message(txtchan, asc)
except Exception:
return ''
try:
client.run(token, bot=False)
except Exception as c:
print (c)
|
class MotherBoard:
def __init__(self, json=None):
self.name = json.get('Text', None)
self.image = json.get('ImageURL', None)
|
#!/usr/bin/env python
# -*- coding: gb2312 -*-
#
#
# 2.py
#
# Copyright 2020 天琼懵 <天琼懵@DESKTOP-03B3450>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import sys
import requests
import json
import yaml
#import oss2
#from urllib.parse import urlparse
from datetime import datetime, timedelta, timezone
#from urllib3.exceptions import InsecureRequestWarning
# 获取当前utc时间,并格式化为北京时间
def getTimeStr():
utc_dt = datetime.utcnow().replace(tzinfo=timezone.utc)
bj_dt = utc_dt.astimezone(timezone(timedelta(hours=8)))
return bj_dt.strftime("%Y-%m-%d %H:%M:%S")
# 输出调试信息,并及时刷新缓冲区
def log(content):
print(getTimeStr() + ' ' + str(content))
sys.stdout.flush()
# debug模式
debug = False
if debug:
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
apies={}
apies['login-url']='https://authserver.hactcm.edu.cn/authserver/login?service=https%3A%2F%2Fhactcm.cpdaily.com%2Fportal%2Flogin'
#apis['host']=
def getSession(loginUrl):
params = {
'login_url': loginUrl,
# 保证学工号和密码正确下面两项就不需要配置
'needcaptcha_url': '',
'captcha_url': '',
'username': '201818****', #用户名
'password': '******' #密码
}
cookies = {}
# 借助上一个项目开放出来的登陆API,模拟登陆
res = requests.post('http://www.zimo.wiki:8080/wisedu-unified-login-api-v1.0/api/login', params, verify=not debug)
print(res)
cookieStr = str(res.json()['cookies'])
log(cookieStr)
if cookieStr == 'None':
log(res.json())
return None
# 解析cookie
for line in cookieStr.split(';'):
name, value = line.strip().split('=', 1)
cookies[name] = value
session = requests.session()
session.cookies = requests.utils.cookiejar_from_dict(cookies)
return session
#表单登陆,需要修改
def queryForm(session, apis):
host ='hactcm.cpdaily.com'
headers = {
'Accept': 'application/json, text/plain, */*',
'User-Agent': 'Mozilla/5.0 (Linux; Android 4.4.4; OPPO R11 Plus Build/KTU84P) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/33.0.0.0 Safari/537.36 yiban/8.1.11 cpdaily/8.1.11 wisedu/8.1.11',
'content-type': 'application/json',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'zh-CN,en-US;q=0.8',
'Content-Type': 'application/json;charset=UTF-8'
}
queryCollectWidUrl = 'https://{host}/wec-counselor-collector-apps/stu/collector/queryCollectorProcessingList'.format(host=host)
params = {
'pageSize': 6,
'pageNumber': 1
}
if 'hactcm' in host:
session.get("https://authserver.hactcm.edu.cn/authserver/login?service=https%3A%2F%2Fhactcm.cpdaily.com%2Fportal%2Flogin")
res = session.post(queryCollectWidUrl, headers=headers, data=json.dumps(params), verify=not debug)
log("res.json 前")
log(session)
print(res.json())
if len(res.json()['datas']['rows']) < 1:
return ("查询失败123")
collectWid = res.json()['datas']['rows'][0]['wid']
formWid = res.json()['datas']['rows'][0]['formWid']
detailCollector = 'https://{host}/wec-counselor-collector-apps/stu/collector/detailCollector'.format(host=host)
res = session.post(url=detailCollector, headers=headers,data=json.dumps({"collectorWid": collectWid}), verify=not debug)
schoolTaskWid = res.json()['datas']['collector']['schoolTaskWid']
getFormFields = 'https://{host}/wec-counselor-collector-apps/stu/collector/getFormFields'.format(host=host)
res = session.post(url=getFormFields, headers=headers, data=json.dumps({"pageSize": 100, "pageNumber": 1, "formWid": formWid, "collectorWid": collectWid}), verify=not debug)
form = res.json()['datas']['rows']
return {'collectWid': collectWid, 'formWid': formWid, 'schoolTaskWid': schoolTaskWid, 'form': form}
#begin
log('当前用户:'+'first' )
apis=apies
log('脚本开始执行。。。')
log('开始模拟登陆。。。')
session = getSession(apis['login-url'])
log(session)
if session != None:
log('模拟登陆成功。。。')
log('正在查询最新待填写问卷。。。')
params = queryForm(session, apis)
print(params)
|
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
number = torch.cuda.device_count()
print(number)
print(device)
tensor = torch.Tensor(3,4)
tensor.cuda(0) |
# Generated by Django 3.0.5 on 2021-03-22 00:08
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Articulo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Titulo', models.CharField(max_length=100)),
('Numero', models.CharField(default='null', max_length=10)),
('Contenido', models.TextField()),
('Estado', models.BooleanField()),
('imagen', models.ImageField(default='null', upload_to='Article')),
],
options={
'verbose_name': 'Categoria',
},
),
migrations.CreateModel(
name='Despedida_soltera',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Titulo', models.CharField(max_length=100)),
('Numero', models.CharField(default='null', max_length=10)),
('Contenido', models.TextField()),
('Estado', models.BooleanField()),
('imagen', models.ImageField(default='null', upload_to='Article')),
],
options={
'verbose_name': 'Despedida_soltera',
},
),
migrations.CreateModel(
name='Jugetes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Titulo', models.CharField(max_length=100)),
('Numero', models.CharField(default='null', max_length=10)),
('Contenido', models.TextField()),
('Estado', models.BooleanField()),
('imagen', models.ImageField(default='null', upload_to='Article')),
],
options={
'verbose_name': 'Jugete',
},
),
migrations.CreateModel(
name='Lenceria_Hombres',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Titulo', models.CharField(max_length=100)),
('Numero', models.CharField(default='null', max_length=10)),
('Contenido', models.TextField()),
('Estado', models.BooleanField()),
('imagen', models.ImageField(default='null', upload_to='Article')),
],
options={
'verbose_name': 'Lenceria_Hombre',
},
),
migrations.CreateModel(
name='Lenceria_Mujeres',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Titulo', models.CharField(max_length=100)),
('Numero', models.CharField(default='null', max_length=10)),
('Contenido', models.TextField()),
('Estado', models.BooleanField()),
('imagen', models.ImageField(default='null', upload_to='Article')),
],
options={
'verbose_name': 'Lenceria_Mujere',
},
),
migrations.CreateModel(
name='Lubricante_cosmeticos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Titulo', models.CharField(max_length=100)),
('Numero', models.CharField(default='null', max_length=10)),
('Contenido', models.TextField()),
('Estado', models.BooleanField()),
('imagen', models.ImageField(default='null', upload_to='Article')),
],
options={
'verbose_name': 'Lubricante_cosmetico',
},
),
]
|
"""
Simple Caesar cipher implementation
"""
import unittest
def encrypt(message, shift_n):
"""
Encrypts a message via shifting position of each letter by shift_n
"""
# mod 256 due to ASCII encoding
shifted = [chr(_encrypt_helper(letter, shift_n) % 256)
for letter in message]
encrypted = ''.join(shifted)
return encrypted
def _encrypt_helper(letter, shift_n):
"""
Shifts integer ordinal position of a letter by shift_n
"""
return ord(letter) + shift_n
class SubstitutionTestCase(unittest.TestCase):
shift_1 = {
"carl": 'dbsm',
"Carl": 'Dbsm',
"This is a sentence.": "Uijt!jt!b!tfoufodf/",
"Encrypt me!": "Fodszqu!nf\"",
}
shift_256 = {k: k for k in shift_1}
def test_shift_by_256(self):
shift_n = 256
for message in self.shift_256:
correct = self.shift_256[message]
self.assertEqual(encrypt(message, shift_n), correct)
def test_shift_by_1(self):
shift_n = 1
for message in self.shift_1:
correct = self.shift_1[message]
self.assertEqual(encrypt(message, shift_n), correct)
if __name__ == '__main__':
unittest.main()
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.nn.functional as F
from torch.autograd import Variable
import distiller.quantization as quantization
from distiller.apputils.checkpoint import load_checkpoint
import torchvision
import torchvision.transforms as transforms
import numpy
from bitstring import BitArray
import math
from torch.nn.parameter import Parameter
from torch.nn.modules.utils import _pair
import torch.autograd.function as Function
###############################################
import math
import torch
from torch.nn.parameter import Parameter
from torch.nn import functional as F
from torch.nn import init
from torch._ops import ops
from torch.nn.modules.module import Module
from torch.nn.modules.utils import _single, _pair, _triple
#from torch.nn.intrinsic import ConvReLU2d
from torch._jit_internal import List
#from ..._jit_internal import List
#from caffe2.python.schema import List
# torch.device('cpu')
######################################## Pre-Processing Cifar 10 ###############################
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
############################## Test 2 ################################
#####################################################################################################
class _ConvNd(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros'):
super(_ConvNd, self).__init__()
if padding_mode != 'zeros':
raise NotImplementedError(
"Currently only zero-padding is supported by quantized conv")
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = False
self.output_padding = 0
self.groups = groups
self.padding_mode = padding_mode
# Initialize as NCHW. set_weight will internally transpose to NHWC.
qweight = torch._empty_affine_quantized(
[out_channels, in_channels // self.groups] + list(kernel_size),
scale=1, zero_point=0, dtype=torch.qint8)
bias_float = (
torch.zeros(out_channels, dtype=torch.float) if bias else None)
self.set_weight_bias(qweight, bias_float)
self.scale = 1.0
self.zero_point = 0
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}, scale={scale}, zero_point={zero_point}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias() is None:
s += ', bias=False'
return s.format(**self.__dict__)
# ===== Serialization methods =====
# The special consideration here is that we have to unpack the weights into
# their regular QTensor form for serialization. Packed weights should not
# live outside the process in which they were created, rather they should be
# derived from the QTensor weight.
def _save_to_state_dict(self, destination, prefix, keep_vars):
super(_ConvNd, self)._save_to_state_dict(destination, prefix, keep_vars)
(w, b) = self._weight_bias()
destination[prefix + 'weight'] = w
destination[prefix + 'scale'] = torch.tensor(self.scale)
destination[prefix + 'zero_point'] = torch.tensor(self.zero_point)
destination[prefix + 'bias'] = b
@torch.jit.export
def __getstate__(self):
if not torch.jit.is_scripting():
raise RuntimeError(
'torch.save() is not currently supported for quantized modules.'
' See https://github.com/pytorch/pytorch/issues/24045.'
' Please use state_dict or torch.jit serialization.')
(w, b) = self._weight_bias()
return (
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.padding,
self.dilation,
self.transposed,
self.output_padding,
self.groups,
self.padding_mode,
w,
b,
self.scale,
self.zero_point,
self.training
)
# ===== Deserialization methods =====
# Counterpart to the serialization methods, we must pack the serialized
# QTensor weight into its packed format for use by the FBGEMM ops.
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
self.set_weight_bias(
state_dict[prefix + 'weight'], state_dict[prefix + 'bias'])
state_dict.pop(prefix + 'weight')
state_dict.pop(prefix + 'bias')
self.scale = float(state_dict[prefix + 'scale'])
state_dict.pop(prefix + 'scale')
self.zero_point = int(state_dict[prefix + 'zero_point'])
state_dict.pop(prefix + 'zero_point')
super(_ConvNd, self)._load_from_state_dict(
state_dict, prefix, local_metadata, False, missing_keys,
unexpected_keys, error_msgs)
@torch.jit.export
def __setstate__(self, state):
self.in_channels = state[0]
self.out_channels = state[1]
self.kernel_size = state[2]
self.stride = state[3]
self.padding = state[4]
self.dilation = state[5]
self.transposed = state[6]
self.output_padding = state[7]
self.groups = state[8]
self.padding_mode = state[9]
self.set_weight_bias(state[10], state[11])
self.scale = state[12]
self.zero_point = state[13]
self.training = state[14]
######################################
class Conv2d(_ConvNd):
_FLOAT_MODULE = nn.Conv2d
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros'):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
groups, bias, padding_mode)
def _get_name(self):
return 'QuantizedConv2d'
def set_weight_bias(self, w, b):
# type: #(torch.Tensor, Optional[torch.Tensor]) -> None
self._packed_params = torch.ops.quantized.conv2d_prepack(
w, b, self.stride, self.padding, self.dilation, self.groups)
def _weight_bias(self):
return torch.ops.quantized.conv2d_unpack(self._packed_params)
def weight(self):
(w, _) = torch.ops.quantized.conv2d_unpack(self._packed_params)
return w
def bias(self):
(_, b) = torch.ops.quantized.conv2d_unpack(self._packed_params)
return b
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 4:
raise ValueError("Input shape must be `(N, C, H, W)`!")
return ops.quantized.conv2d(
input, self._packed_params, self.stride, self.padding,
self.dilation, self.groups, self.scale, self.zero_point)
# @classmethod
# def from_float(cls, mod):
# r"""Creates a quantized module from a float module or qparams_dict.
# Args:
# mod (Module): a float module, either produced by torch.quantization
# utilities or provided by the user
# """
# if hasattr(mod, 'weight_fake_quant'):
# # assert type(mod) == cls.__QAT_MODULE, ' nnq.' + cls.__name__ + \
# # '.from_float only works for ' + cls.__QAT_MODULE.__name__
# if type(mod) == nniqat.ConvBn2d:
# mod.weight, mod.bias = fuse_conv_bn_weights(
# mod.weight, mod.bias, mod.running_mean, mod.running_var,
# mod.eps, mod.gamma, mod.beta)
# assert hasattr(mod, 'activation_post_process'), \
# 'Input QAT module must have observer attached'
# weight_post_process = mod.weight_fake_quant
# activation_post_process = mod.activation_post_process
# else:
# assert type(mod) == cls._FLOAT_MODULE, \
# ' nnq.' + cls.__name__ + '.from_float only works for ' + \
# cls._FLOAT_MODULE.__name__
# assert hasattr(mod, 'qconfig'), \
# 'Input float module must have qconfig defined.'
# # workaround for sequential, ConvReLU2d should probably
# # inherit from Conv2d instead
# if type(mod) == nni.ConvReLU2d:
# activation_post_process = mod[1].activation_post_process
# mod = mod[0]
# else:
# activation_post_process = mod.activation_post_process
# weight_post_process = mod.qconfig.weight()
# weight_post_process(mod.weight)
# act_scale, act_zp = activation_post_process.calculate_qparams()
# assert weight_post_process.dtype == torch.qint8, \
# 'Weight observer must have a dtype of qint8'
# qweight = _quantize_weight(mod.weight.float(), weight_post_process)
# qconv = cls(mod.in_channels, mod.out_channels, mod.kernel_size,
# mod.stride, mod.padding, mod.dilation, mod.groups,
# mod.bias is not None, mod.padding_mode)
# qconv.set_weight_bias(qweight, mod.bias)
# qconv.scale = float(act_scale)
# qconv.zero_point = int(act_zp)
#
# return qconv
##################################################################################################
################################
class Simplenet(nn.Module):
def __init__(self):
super(Simplenet, self).__init__()
cus_in = 3
cus_out = 6
cus_kernel = 5
self.count = 1
#self.check = nn.ModuleList([nn.Conv2d(cus_in, cus_out, kern) for kern in cus_kernel]) ### Using Custom Conv2d
#print("test--", self.check)
#for mod in self.modules():
#print("test--", mod)
self.conv1 = Conv2d(3, 6, 5)
self.relu_conv1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.relu_conv2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(16 * 5 * 5, 120) ### Using Custom Linear
self.relu_fc1 = nn.ReLU()
self.fc2 = nn.Linear(120, 84)
self.relu_fc2 = nn.ReLU()
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
#test = nn.ModuleList()
#print("test-----------", test)
#if (self.count == 1):
# for mod in self.modules():
# print("test--", mod)
#self.count = self.count + 1
x = self.pool1(self.relu_conv1(self.conv1(x)))
x = self.pool2(self.relu_conv2(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = self.relu_fc1(self.fc1(x))
x = self.relu_fc2(self.fc2(x))
x = self.fc3(x)
x = F.log_softmax(x, dim=1)
return x
#########################################################################################################
################################## Loading Model and Quantization ###############################
model = Simplenet()
model = torch.nn.DataParallel(model)
# device = 'cpu'
# model.cpu()
quantizer = quantization.PostTrainLinearQuantizer(model, model_activation_stats='/home/soumyadeep/distiller/examples/classifier_compression/logs/2019.10.10-234320/configs/acts_quantization_stats.yaml')
dummy_input = Variable(torch.randn(4, 3, 32, 32), requires_grad=True)
quantizer.prepare_model(dummy_input)
#quantizer.prepare_model()
# model.to(device)
checkpoint = torch.load('/home/soumyadeep/distiller/examples/classifier_compression/logs/2019.10.10-234320/quantized_checkpoint.pth.tar')
model.load_state_dict(checkpoint['state_dict'], strict=True)
model = model.to(torch.device('cuda'))
print('<<<<<<<<<<<<<<<<< Model Loaded !!!! >>>>>>>>>>>>>>>>>>')
#################################################################################################
###################################### Prediction Part ########################################
def test_label_predictions(model, device, test_loader):
model.eval()
actuals = []
predictions = []
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
prediction = output.argmax(dim=1, keepdim=True)
actuals.extend(target.view_as(prediction))
predictions.extend(prediction)
correct += prediction.eq(target.view_as(prediction)).sum().item()
return [i.item() for i in actuals], [i.item() for i in predictions], correct
#########################################################################################
########################################## Accuracy ####################################
device_name = 'cuda'
actuals, predictions, correct = test_label_predictions(model, device_name, testloader)
# print("actuals ---",actuals)
# print("prediction ---", predictions)
total_num_data = len(testloader.dataset)
accuracy = (float(correct / total_num_data)) * 100
print("--acuracy---", accuracy)
##################################################################################################
######################################### CODE END ############################################## |
import random
d={}
with open('./E_zfin_gene_alias_2014.12.08.gff3','r') as gff:
for line in gff:
if not line.startswith('#') and len(line) >1:
cols = line.rstrip().split('\t')
chromosome = cols[0]
if not chromosome.startswith('Zv9'):
chromosome = 'chr'+chromosome
start,end = cols[3:5]
gid = cols[8].split(';')[0].split('=')[1]
d[gid] = {'chromosome': chromosome,
'start': start,
'end': end}
with open('./gene_association.zfin','r') as zfin:
for line in zfin:
if not line.startswith('!') and len(line) >1:
cols = line.rstrip().split('\t')
d.get(cols[1],{})['go'] = cols[4]
# for k,v in d.items():
# if 'go' in v:
# print '{}\t{}\t{}\t{}\t{}'.format(
# v['chromosome'],
# v['start'],
# v['end'],
# k,
# v['go']
# )
pop = d.keys()
for i in range(10):
s = random.sample(pop,500)
with open('./random{}.bed'.format(i),'w') as fo:
for k in s:
fo.write('{}\n'.format(k))
|
from office365.sharepoint.base_entity import BaseEntity
class UserCustomAction(BaseEntity):
pass
|
from socket import AF_INET
from pathlib import Path
from flask import render_template
from pyroute2 import IPRoute
from pr2modules.netlink.rtnl.rtmsg import rtmsg
from manager import app
from manager.structures import Route, Address, RTProto, RTScope
class RouteTable:
'''Manipulates entries in the kernel routing tables'''
@staticmethod
def static_routes() -> set:
'''Returns static routes'''
with IPRoute() as ipr:
routes = set()
for route in ipr.get_routes(table=254, family=AF_INET):
if route['proto'] in [RTProto.BOOT, RTProto.STATIC]:
routes.add(RouteTable.parse_route(route))
return routes
@staticmethod
def onlink_routes() -> set:
'''Returns onlink routes'''
with IPRoute() as ipr:
routes = set()
for route in ipr.get_routes(table=254, family=AF_INET,
proto=RTProto.KERNEL):
routes.add(RouteTable.parse_route(route))
return routes
@staticmethod
def add_route(route: Route):
'''Adds a new static route'''
with IPRoute() as ipr:
ipr.route('add', dst=route.dst, mask=route.prefix,
gateway=route.gateway)
ifname = RouteTable.dst_to_ifname(route.dst)
RouteTable.save_to_disk(ifname)
@staticmethod
def delete_route(route: Route):
'''Deletes the static route'''
with IPRoute() as ipr:
ipr.route('del', dst=route.dst, mask=route.prefix)
RouteTable.save_to_disk(route.ifname)
@staticmethod
def route_exists(ipv4: str) -> bool:
'''Checks if the static route exists'''
return any(ipv4 == f'{route.dst}/{route.prefix}'
for route in RouteTable.static_routes())
@staticmethod
def dst_to_ifname(dst: str) -> str:
'''Finds an interface name with a destination network'''
with IPRoute() as ipr:
route = ipr.get_routes(dst=dst)[0]
return RouteTable.parse_route(route).ifname
@staticmethod
def parse_route(route: rtmsg) -> Route:
'''Parses RTNL message'''
with IPRoute() as ipr:
ifindex = route.get_attr('RTA_OIF')
ifname = ipr.get_links(ifindex)[0].get_attr('IFLA_IFNAME')
return Route(dst=route.get_attr('RTA_DST'),
prefix=route['dst_len'],
gateway=route.get_attr('RTA_GATEWAY', 'On-link'),
ifindex=ifindex,
ifname=ifname)
@staticmethod
def save_to_disk(ifname: str):
'''Saves the current routes into file'''
routes = [route for route in RouteTable.static_routes()
if route.ifname == ifname]
route_cfg = Path(app.config['ROUTE_CONF'] + ifname)
content = render_template('route.cfg', routes=routes)
route_cfg.write_text(content)
class NetworkInterfaces:
'''Manipulates IPv4 address attached to a network device'''
@staticmethod
def addresses() -> set:
'''Returns addresses attached to network devices'''
with IPRoute() as ipr:
addresses = set()
for address in ipr.get_addr(family=AF_INET, scope=RTScope.GLOBAL):
addresses.add(Address(ip=address.get_attr('IFA_ADDRESS'),
prefix=address['prefixlen'],
ifindex=address['index'],
ifname=address.get_attr('IFA_LABEL')))
return addresses
@staticmethod
def interfaces() -> list:
'''Returns names of network devices except loopback'''
with IPRoute() as ipr:
ifnames = []
for link in ipr.get_links()[1:]:
ifnames.append(link.get_attr('IFLA_IFNAME'))
return ifnames
@staticmethod
def address_exists(ipv4: str) -> bool:
'''Checks if the address exists'''
return any(ipv4 == f'{address.ip}/{address.prefix}'
for address in NetworkInterfaces.addresses())
@staticmethod
def add_address(address: Address):
'''Attaches a new address to the network interface'''
with IPRoute() as ipr:
ifindex = NetworkInterfaces.ifname_to_ifindex(address.ifname)
ipr.addr('add', index=ifindex, address=address.ip,
mask=address.prefix)
NetworkInterfaces.save_to_disk(address.ifname)
@staticmethod
def delete_address(address: Address):
'''Detaches the address from the network interface'''
with IPRoute() as ipr:
ifindex = NetworkInterfaces.ifname_to_ifindex(address.ifname)
ipr.addr('del', index=ifindex, address=address.ip,
mask=address.prefix)
NetworkInterfaces.save_to_disk(address.ifname)
@staticmethod
def ifname_to_ifindex(ifname):
'''Finds an interface index with an interface name'''
with IPRoute() as ipr:
return ipr.link_lookup(ifname=ifname)[0]
@staticmethod
def save_to_disk(ifname):
'''Saves the current interface's addresses into configuration file'''
addresses = [address for address in NetworkInterfaces.addresses()
if address.ifname == ifname]
interface_cfg = Path(app.config['INTERFACE_CONF'] + ifname)
content = render_template('interface.cfg', interface=ifname,
addresses=addresses)
interface_cfg.write_text(content)
|
import unittest
from advent2020.day5.seat_identifier import SeatIdentifier
from parameterized import parameterized
class TestSeatIdentifier(unittest.TestCase):
'''
BFFFBBFRRR: row 70, column 7, seat ID 567.
FFFBBBFRRR: row 14, column 7, seat ID 119.
BBFFBBFRLL: row 102, column 4, seat ID 820.
'''
@parameterized.expand([("BFFFBBFRRR", 70), ("FFFBBBFRRR", 14), ("BBFFBBFRLL", 102)])
def test_seat_identifier_return_expected_row(self, seat_id: str, expected_row: int):
# Arrange, Act
sut = SeatIdentifier(seat_id)
# Assert
self.assertEqual(sut.row, expected_row)
@parameterized.expand([("BFFFBBFRRR", 7), ("FFFBBBFRRR", 7), ("BBFFBBFRLL", 4)])
def test_seat_identifier_return_expected_column(self, seat_id: str,
expected_column: int):
# Arrange, Act
sut = SeatIdentifier(seat_id)
# Assert
self.assertEqual(sut.column, expected_column)
@parameterized.expand([("BFFFBBFRRR", 567), ("FFFBBBFRRR", 119),
("BBFFBBFRLL", 820)])
def test_seat_identifier_return_expected_id(self, seat_id: str, expected_id: int):
# Arrange, Act
sut = SeatIdentifier(seat_id)
# Assert
self.assertEqual(sut.id, expected_id)
|
import numpy as np
from colorama import init, Fore,Back
init()
class Board:
def __init__(self,rows,columns):
self.__rows=rows
self.__columns = columns
self.__matrix = []
self.str=""
for i in range (self.__rows): #rows
self.__new = []
for j in range (self.__columns): #columns
self.__new.append(" ")
self.__matrix.append(self.__new)
def print_board(self,start_pos,screen_width):
if start_pos+screen_width<500:
for i in range(self.__rows):
for j in range(start_pos , start_pos + screen_width):
# print(self.matrix[i][j] , end="")
self.str+=self.__matrix[i][j]
# print()
#self.str+="\n"
print(self.str)
self.str=""
else:
for i in range(self.__rows):
for j in range(500-screen_width , 500):
# print(self.matrix[i][j] , end="")
self.str+=self.__matrix[i][j]
# print()
print(self.str)
self.str=""
def matrix_get(self):
return self.__matrix
def matrix_set(self,matrix):
self.__matrix=matrix
|
from nose.tools import eq_
from ..segments_added import segments_added
def test_segments_added():
contiguous_segments_added = ["foobar {{herpaA}}", 'A[a]Aa[[Awu]]ta']
eq_(segments_added(contiguous_segments_added), 2)
|
#from phonenumber_field.modelfields import PhoneNumberField
from django.db import models
from django.utils.translation import ugettext as _
# Create your models here.
from django.contrib.auth.models import (BaseUserManager, AbstractBaseUser)
class UserManager(BaseUserManager):
def create_user(self, email, phone_number, nickname ,date_of_birth, password=None):
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
date_of_birth=date_of_birth,
nickname = nickname,
phone_number = phone_number,
#phoneNumber = phoneNumber,
)
#user.is_seller = False
user.set_password(password)
user.save(using=self._db)
return user
def create_bussiness_user(self, email, business_number, team, phone_number, nickname ,date_of_birth, password=None):
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
date_of_birth=date_of_birth,
nickname = nickname,
phone_number = phone_number,
team = team,
business_number = business_number,
)
#user.is_seller = True
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, phone_number, nickname, date_of_birth, password):
user = self.create_user(
email,
password=password,
date_of_birth=date_of_birth,
nickname = nickname,
phone_number = phone_number,
#phoneNumber = phoneNumber,
)
#user.is_seller = False
user.is_admin = True
user.save(using=self._db)
return user
class User(AbstractBaseUser):
email = models.EmailField(
verbose_name='email',
max_length=255,
unique=True,
)
nickname = models.CharField(
max_length=20,
null=False,
unique=True
)
MODE_CHOICES = (
('서울', '서울'),
('부산', '부산'),
('대전', '대전'),
('대구', '대구'),
('인천', '인천'),
('강릉', '강릉'),
)
phone_number = models.CharField(max_length=14, null = False, unique = True)
date_of_birth = models.DateField()
business_number = models.CharField(max_length = 30, null = True, unique = False, choices = MODE_CHOICES)
team = models.CharField(max_length = 10, null = True, unique = True)
#seller_name = models.CharField(max_length = 30, null = True, unique = True)
#phoneNumber = PhoneNumberField(_("phoneNumber"),null=False, blank = False, unique = True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
#is_seller = models.BooleanField(null = True, default = True)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['date_of_birth', 'nickname', 'phone_number']
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_label):
return True
@property
def is_staff(self):
return self.is_admin
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import re
import itertools
def pywordcountplugin(text):
return adjust_for_mail(text)
def adjust_for_mail(text):
"""
Go through each of the special cases for email.
"""
rest_cases = [
remove_headers,
remove_quoted,
]
for case in rest_cases:
text = case(text)
return text
def remove_headers(text):
"""
Remove any lines from the start of the file that look like header lines,
until any non-header line is reached (as headers should only be at the
start).
"""
lines = text.split("\n")
header = re.compile(r"^[^:]+: ")
return u"\n".join([l for l in itertools.dropwhile(header.match, lines)])
def remove_quoted(text):
"""
Remove any lines with > at the start of them.
"""
lines = text.split("\n")
quote = re.compile(r"^>")
return u"\n".join([l for l in lines if not quote.match(l)])
|
def factorial(n) :
fact = 1
for i in range(1,n+1):
fact *= i
return fact
print("팩토리얼 값 계산 프로그램")
while 1:
n = int(input("\n정수 입력: "))
result = factorial(n)
print("계산결과 %d! = %d" % (n,result))
input()
|
from typing import Any, Optional
class BaseEmailBackend:
fail_silently: Any = ...
def __init__(self, fail_silently: bool = ..., **kwargs: Any) -> None: ...
def open(self) -> None: ...
def close(self) -> None: ...
def __enter__(self) -> BaseEmailBackend: ...
def __exit__(
self, exc_type: None, exc_value: None, traceback: None
) -> None: ...
def send_messages(self, email_messages: Any) -> None: ...
|
# 将处理过的武器的各种dataproperty通过代码的方式写入到owl文件中
# 即生成owl格式的代码,可以直接复制到owl中(先写道txt中,再复制到owl中)
# 需要生成的是owl中关于datapropery的定义以及其domain,range
# individual的添加,三元组的添加
import os
import pandas as pd
from tqdm import tqdm
txt_file_1 = 'D:\文件夹汇总\项目\军事知识图谱\\bootstraping_extraction\Weapons\\txts\\Dataproperty_declaration.txt'
txt_file_2 = 'D:\文件夹汇总\项目\军事知识图谱\\bootstraping_extraction\Weapons\\txts\\Individual_declaration.txt'
txt_file_3 = 'D:\文件夹汇总\项目\军事知识图谱\\bootstraping_extraction\Weapons\\txts\\Class_assertion.txt'
txt_file_4 = 'D:\文件夹汇总\项目\军事知识图谱\\bootstraping_extraction\Weapons\\txts\\Objectproperty_assertion.txt'
txt_file_5 = 'D:\文件夹汇总\项目\军事知识图谱\\bootstraping_extraction\Weapons\\txts\\Dataproperty_domain.txt'
txt_file_6 = 'D:\文件夹汇总\项目\军事知识图谱\\bootstraping_extraction\Weapons\\txts\\Dataproperty_range.txt'
txt_file_7 = 'D:\文件夹汇总\项目\军事知识图谱\\bootstraping_extraction\Weapons\\txts\\Dataproperty_assertion.txt'
Dataproperty_declaration = open(txt_file_1,'w',encoding='utf-8')
Individual_declaration = open(txt_file_2,'w',encoding='utf-8')
Class_assertion = open(txt_file_3,'w',encoding='utf-8')
Objectproperty_assertion = open(txt_file_4,'w',encoding='utf-8')
Dataproperty_domain = open(txt_file_5,'w',encoding='utf-8')
Dataproperty_range = open(txt_file_6,'w',encoding='utf-8')
Dataproperty_assertion = open(txt_file_7,'w',encoding='utf-8')
nations = ('中国 中 丹麦 乌克兰 以色列 伊拉克 伊朗 俄罗斯 俄 保加利亚 利比亚 加拿大 加 卡塔尔 印度 印 印度尼西亚 印尼 '+
'叙利亚 古巴 哥伦比亚 土耳其 委内瑞拉 巴基斯坦 巴勒斯坦 巴西 希腊 德国 意大利 挪威 新西兰 日本 日 朝鲜 柬埔寨 '+
'比利时 沙特阿拉伯 法国 波兰 泰国 澳大利亚 瑞士 科威特 秘鲁 缅甸 罗马尼亚 美国 美 老挝 英国 荷兰 菲律宾 葡萄牙 '+
'蒙古 西班牙 越南 阿富汗 阿根廷 韩国 韩 马来西亚 黎巴嫩').split()
files_directory = 'D:\文件夹汇总\项目\军事知识图谱\\bootstraping_extraction\Weapons\\types'
files = os.listdir(files_directory)
for file in tqdm(files):
print(file)
excel_path = files_directory + '\\' + file.strip()
df = pd.read_excel(excel_path).astype(str)
length = len(df)
columns = list(df.columns)
columns_num = len(columns)
# 完成对dataproperty的定义以及domain,range的限定
for i in range(columns_num):
column_name = df.iat[0,i]
if column_name not in ['Height','Length','Weight','Width','Country_of_origin','Name']:
Dataproperty_declaration.write(' <Declaration>')
Dataproperty_declaration.write('\n')
Dataproperty_declaration.write(' <DataProperty IRI="#' + column_name + '"/>')
Dataproperty_declaration.write('\n')
Dataproperty_declaration.write(' </Declaration>')
Dataproperty_declaration.write('\n')
Dataproperty_declaration.write('\n')
Dataproperty_domain.write(' <DataPropertyDomain>')
Dataproperty_domain.write('\n')
Dataproperty_domain.write(' <DataProperty IRI="#' + column_name + '"/>')
Dataproperty_domain.write('\n')
Dataproperty_domain.write(' <Class IRI="#' + file[:-5] + '"/>')
Dataproperty_domain.write('\n')
Dataproperty_domain.write(' </DataPropertyDomain>')
Dataproperty_domain.write('\n')
Dataproperty_domain.write('\n')
Dataproperty_range.write(' <DataPropertyRange>')
Dataproperty_range.write('\n')
Dataproperty_range.write(' <DataProperty IRI="#' + column_name + '"/>')
Dataproperty_range.write('\n')
Dataproperty_range.write(' <Datatype abbreviatedIRI="xsd:string"/>')
Dataproperty_range.write('\n')
Dataproperty_range.write(' </DataPropertyRange>')
Dataproperty_range.write('\n')
Dataproperty_range.write('\n')
# else:
# if column_name not in ['Country_of_origin','Name']:
# fh2.write(' <DataPropertyDomain>')
# fh2.write('\n')
# fh2.write(' <DataProperty IRI="#' + column_name + '"/>')
# fh2.write('\n')
# fh2.write(' <Class IRI="#武器/设备"/>')
# fh2.write('\n')
# fh2.write(' </DataPropertyDomain>')
# fh2.write('\n')
# fh2.write('\n')
# fh2.write(' <DataPropertyRange>')
# fh2.write('\n')
# fh2.write(' <DataProperty IRI="#' + column_name + '"/>')
# fh2.write('\n')
# fh2.write(' <Datatype abbreviatedIRI="xsd:string"/>')
# fh2.write('\n')
# fh2.write(' </DataPropertyRange>')
# fh2.write('\n')
# fh2.write('\n')
# 完成对individual的declaration,两类property的assertion,class和individual的assertion
for j in range(1,length):
for k in range(columns_num):
if df.iat[0,k] == 'Name':
Individual_declaration.write(' <Declaration>')
Individual_declaration.write('\n')
Individual_declaration.write(' <NamedIndividual IRI="#' + df.iat[j,k] + '"/>')
Individual_declaration.write('\n')
Individual_declaration.write(' </Declaration>')
Individual_declaration.write('\n')
Individual_declaration.write('\n')
Class_assertion.write(' <ClassAssertion>')
Class_assertion.write('\n')
Class_assertion.write(' <Class IRI="#' + file[:-5] + '"/>')
Class_assertion.write('\n')
Class_assertion.write(' <NamedIndividual IRI="#' + df.iat[j,k] + '"/>')
Class_assertion.write('\n')
Class_assertion.write(' </ClassAssertion>')
Class_assertion.write('\n')
Class_assertion.write('\n')
# 完成对武器类的objectproperty的assertion
if df.iat[0,k] == 'Country_of_origin':
if df.iat[j,k] not in nations:
Individual_declaration.write(' <Declaration>')
Individual_declaration.write('\n')
Individual_declaration.write(' <NamedIndividual IRI="#' + df.iat[j,k] + '"/>')
Individual_declaration.write('\n')
Individual_declaration.write(' </Declaration>')
Individual_declaration.write('\n')
Individual_declaration.write('\n')
Class_assertion.write(' <ClassAssertion>')
Class_assertion.write('\n')
Class_assertion.write(' <Class IRI="#国家/地区"/>')
Class_assertion.write('\n')
Class_assertion.write(' <NamedIndividual IRI="#' + df.iat[j,k] + '"/>')
Class_assertion.write('\n')
Class_assertion.write(' </ClassAssertion>')
Class_assertion.write('\n')
Class_assertion.write('\n')
Objectproperty_assertion.write(' <ObjectPropertyAssertion>')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write(' <ObjectProperty IRI="#Produce"/>')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write(' <NamedIndividual IRI="#' + df.iat[j,k] + '"/>')
Objectproperty_assertion.write('\n')
name_index = df.columns.get_loc('名称')
Objectproperty_assertion.write(' <NamedIndividual IRI="#' + df.iat[j,name_index] + '"/>')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write(' </ObjectPropertyAssertion>')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write(' <ObjectPropertyAssertion>')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write(' <ObjectProperty IRI="#HasCountryOfOrigin"/>')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write(' <NamedIndividual IRI="#' + df.iat[j,name_index] + '"/>')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write(' <NamedIndividual IRI="#' + df.iat[j,k] + '"/>')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write(' </ObjectPropertyAssertion>')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write('\n')
else:
Objectproperty_assertion.write(' <ObjectPropertyAssertion>')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write(' <ObjectProperty IRI="#Produce"/>')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write(' <NamedIndividual IRI="#' + df.iat[j,k] + '"/>')
Objectproperty_assertion.write('\n')
name_index = df.columns.get_loc('名称')
Objectproperty_assertion.write(' <NamedIndividual IRI="#' + df.iat[j,name_index] + '"/>')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write(' </ObjectPropertyAssertion>')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write(' <ObjectPropertyAssertion>')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write(' <ObjectProperty IRI="#HasCountryOfOrigin"/>')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write(' <NamedIndividual IRI="#' + df.iat[j,name_index] + '"/>')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write(' <NamedIndividual IRI="#' + df.iat[j,k] + '"/>')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write(' </ObjectPropertyAssertion>')
Objectproperty_assertion.write('\n')
Objectproperty_assertion.write('\n')
# 完成对dataproperty中的具体值的添加
else:
if df.iat[j,k] != 'nan':
Dataproperty_assertion.write(' <DataPropertyAssertion>')
Dataproperty_assertion.write('\n')
Dataproperty_assertion.write(' <DataProperty IRI="#' + df.iat[0 ,k] + '"/>')
Dataproperty_assertion.write('\n')
name_index = df.columns.get_loc('名称')
Dataproperty_assertion.write(' <NamedIndividual IRI="#' + df.iat[j,name_index] + '"/>')
Dataproperty_assertion.write('\n')
Dataproperty_assertion.write(' <Literal>' + df.iat[j,k] + '</Literal>')
Dataproperty_assertion.write('\n')
Dataproperty_assertion.write(' </DataPropertyAssertion>')
Dataproperty_assertion.write('\n')
Dataproperty_assertion.write('\n')
Dataproperty_declaration.close()
Individual_declaration.close()
Class_assertion.close()
Objectproperty_assertion.close()
Dataproperty_domain.close()
Dataproperty_range.close()
Dataproperty_assertion.close()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 28 14:44:06 2018
@author: Administrator
"""
import os
import sys
config_name = 'myapp.cfg'
# determine if application is a script file or frozen exe
if getattr(sys, 'frozen', False):
application_path = os.path.dirname(sys.executable)
elif __file__:
application_path = os.path.dirname(__file__)
config_path = os.path.join(application_path, config_name) |
"""
Basic tests to check that the core functionalities are at least running.
"""
import os
import itertools
import numpy as np
import Bio
from goldenhinges import (
OverhangsSelector,
list_overhangs,
gc_content,
sequences_differences,
reverse_complement,
OverhangSetOptimizer,
load_record,
)
from goldenhinges.biotools import sequences_differences_array
from goldenhinges.clique_methods import find_compatible_overhangs
from dnachisel import random_dna_sequence, sequence_to_biopython_record, annotate_record
import pytest
@pytest.fixture
def data():
data_path = os.path.join("tests", "test_data")
with open(os.path.join(data_path, "phage_sequence.txt"), "r") as f:
phage_sequence = f.read()
return {"phage_sequence": phage_sequence}
def test_generate_overhangs_collection():
selector = OverhangsSelector(gc_min=0.5, gc_max=0.5, differences=2, time_limit=2)
collection = selector.generate_overhangs_set(n_overhangs=18, n_cliques=100)
collection = selector.generate_overhangs_set(start_at=len(collection))
assert len(collection) == 24
for o1, o2 in itertools.combinations(collection, 2):
assert sequences_differences(o1, o2) >= 2
assert sequences_differences(o1, reverse_complement(o2)) >= 2
def test_generate_overhangs_collection2():
selector = OverhangsSelector(gc_min=0.25, gc_max=0.75, differences=2, time_limit=3)
collection = selector.generate_overhangs_set()
assert len(collection) >= 22
for o1, o2 in itertools.combinations(collection, 2):
assert sequences_differences(o1, o2) >= 2
assert sequences_differences(o1, reverse_complement(o2)) >= 2
def test_generate_overhangs_collection_with_possible():
selector = OverhangsSelector(
gc_min=0.25,
gc_max=0.75,
differences=1,
possible_overhangs=["ATTC", "AAAA", "GAAT", "CTCA"],
time_limit=2,
)
collection = selector.generate_overhangs_set()
assert len(collection) == 2
def test_cut_sequence_into_similar_lengths(data):
def invalid_overhang(overhang):
gc = gc_content(overhang)
three_gc = max([gc_content(overhang[:-1]), gc_content(overhang[1:])]) == 1
return (gc != 0.5) and (three_gc or (gc != 0.75))
forbidden_overhangs = list_overhangs(filters=[invalid_overhang])
selector = OverhangsSelector(
forbidden_overhangs=forbidden_overhangs, differences=1, time_limit=2
)
sequence = data["phage_sequence"]
solution = selector.cut_sequence(
sequence, equal_segments=50, max_radius=20, include_extremities=False
)
indices = [o["location"] for o in solution]
diffs = np.diff([0] + indices + [len(sequence)])
assert len(diffs) == 50
assert int(diffs.mean()) == 970
def test_from_record():
seq = random_dna_sequence(7202, seed=123)
record = sequence_to_biopython_record(seq)
zone = (1900, len(seq) - 1900)
annotate_record(record, location=zone, label="Gene: acs", color="#8edfff")
annotate_record(record, location=zone, label="@EnforceTranslation")
annotate_record(
record, location=(zone[0] - 1800, zone[0], 0), label="@AvoidChanges"
)
annotate_record(
record, location=(zone[1], 1800 + zone[1], 0), label="@AvoidChanges"
)
# ADD SEMI-RANDOM CUTTING ZONES
cut_region_size = 70
zones = [
(
x + int(200 * np.sin(x)),
x + cut_region_size + int(200 * np.sin(x) - 50 * np.cos(x)),
0,
)
for x in range(50, len(seq), 1030)[1:]
]
for zone in zones:
annotate_record(record, location=zone, label="!cut")
# SOLVE PROBLEM
selector = OverhangsSelector(gc_min=0.25, gc_max=0.75, differences=2)
solution = selector.cut_sequence(record, allow_edits=True, include_extremities=True)
assert solution is not None
def test_overhangsetoptimizer():
number_of_required_overhangs = 4
optimizer = OverhangSetOptimizer(
set_size=number_of_required_overhangs,
possible_overhangs=[
"TAGG",
"ATGG",
"GACT",
"GGAC",
"TCCG",
"CCAG",
"AAAA",
"TTTT",
],
external_overhangs=["TAGG", "ACTG"],
)
optimizer.optimize(iterations=100)
assert len(optimizer.selected_overhangs) == number_of_required_overhangs
assert (
len(optimizer.selected_overhangs & set(optimizer.possible_overhangs))
== number_of_required_overhangs
)
def test_find_compatible_overhangs():
assert find_compatible_overhangs(n_solutions_considered=5, randomize=True)
def test_sequences_differences_array():
with pytest.raises(ValueError):
sequences_differences_array("AAA", "AAAT")
# Only use on same-size sequences (3, 4)
def test_load_record():
with pytest.raises(ValueError):
load_record("seq.asd") # wrong extension
seq_path = os.path.join("tests", "test_data", "sequence.gb")
record_name = "Name longer than 20characters"
record = load_record(filename=seq_path, name=record_name)
assert type(record) == Bio.SeqRecord.SeqRecord
assert record.id == record_name
assert record.name == "Name_longer_than_20c"
assert type(load_record(filename=seq_path, fmt="gb")) == Bio.SeqRecord.SeqRecord
|
# Generated by Django 2.0.7 on 2018-07-27 23:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0075_auto_20180727_1428'),
]
operations = [
migrations.AddField(
model_name='contest',
name='is_primary',
field=models.BooleanField(default=False),
),
]
|
"""
A top-most level script to run the Game
"""
from Game.Breakout import Breakout
GAME = Breakout()
GAME.start()
|
import unittest
# From: https://eli.thegreenplace.net/2011/08/02/python-unit-testing-parametrized-test-cases
class ParametrizedTestCase(unittest.TestCase):
""" TestCase classes that want to be parametrized should
inherit from this class.
"""
def __init__(self, methodName='runTest', param=None):
super(ParametrizedTestCase, self).__init__(methodName)
self.param = param
@staticmethod
def parametrize(testcase_klass, param=None):
""" Create a suite containing all tests taken from the given
subclass, passing them the parameter 'param'.
"""
testloader = unittest.TestLoader()
testnames = testloader.getTestCaseNames(testcase_klass)
suite = unittest.TestSuite()
for name in testnames:
suite.addTest(testcase_klass(name, param=param))
return suite
###########
# Test Data
import jgrapht
from jgrapht.generators import grid
g1 = jgrapht.create_graph(directed=False,weighted=False)
grid(g1,5,5)
v_g1 = {
0 : {'label' : 'A'},
1 : {'label' : 'A'},
2 : {'label' : 'A'},
3 : {'label' : 'A'},
4 : {'label' : 'A'},
5 : {'label' : 'A'},
6 : {'label' : 'T'},
7 : {'label' : 'T'},
8 : {'label' : 'A'},
9 : {'label' : 'A'},
10 : {'label' : 'A'},
11 : {'label' : 'A'},
12 : {'label' : 'A'},
13 : {'label' : 'T'},
14 : {'label' : 'A'},
15 : {'label' : 'A'},
16 : {'label' : 'A'},
17 : {'label' : 'T'},
18 : {'label' : 'T'},
19 : {'label' : 'T'},
20 : {'label' : 'A'},
21 : {'label' : 'A'},
22 : {'label' : 'A'},
23 : {'label' : 'T'},
24 : {'label' : 'A'}
}
g2 = jgrapht.create_graph(directed=False,weighted=False)
grid(g2,5,5)
v_g2 = {
0 : {'label' : 'A'},
1 : {'label' : 'A'},
2 : {'label' : 'A'},
3 : {'label' : 'A'},
4 : {'label' : 'A'},
5 : {'label' : 'A'},
6 : {'label' : 'A'},
7 : {'label' : 'A'},
8 : {'label' : 'A'},
9 : {'label' : 'A'},
10 : {'label' : 'A'},
11 : {'label' : 'A'},
12 : {'label' : 'A'},
13 : {'label' : 'A'},
14 : {'label' : 'A'},
15 : {'label' : 'A'},
16 : {'label' : 'A'},
17 : {'label' : 'A'},
18 : {'label' : 'A'},
19 : {'label' : 'A'},
20 : {'label' : 'A'},
21 : {'label' : 'A'},
22 : {'label' : 'A'},
23 : {'label' : 'A'},
24 : {'label' : 'A'}
}
g3 = jgrapht.create_graph(directed=False,weighted=False)
grid(g3,5,5)
v_g3 = {
0 : {'label' : 'T'},
1 : {'label' : 'T'},
2 : {'label' : 'T'},
3 : {'label' : 'T'},
4 : {'label' : 'T'},
5 : {'label' : 'T'},
6 : {'label' : 'T'},
7 : {'label' : 'T'},
8 : {'label' : 'T'},
9 : {'label' : 'T'},
10 : {'label' : 'T'},
11 : {'label' : 'T'},
12 : {'label' : 'T'},
13 : {'label' : 'T'},
14 : {'label' : 'T'},
15 : {'label' : 'T'},
16 : {'label' : 'T'},
17 : {'label' : 'T'},
18 : {'label' : 'T'},
19 : {'label' : 'T'},
20 : {'label' : 'T'},
21 : {'label' : 'T'},
22 : {'label' : 'T'},
23 : {'label' : 'T'},
24 : {'label' : 'T'}
}
g4 = jgrapht.create_graph(directed=False,weighted=False)
grid(g4,5,5)
v_g4 = {
0 : {'label' : 'T'},
1 : {'label' : 'A'},
2 : {'label' : 'T'},
3 : {'label' : 'A'},
4 : {'label' : 'A'},
5 : {'label' : 'A'},
6 : {'label' : 'A'},
7 : {'label' : 'A'},
8 : {'label' : 'A'},
9 : {'label' : 'A'},
10 : {'label' : 'A'},
11 : {'label' : 'A'},
12 : {'label' : 'A'},
13 : {'label' : 'A'},
14 : {'label' : 'A'},
15 : {'label' : 'T'},
16 : {'label' : 'A'},
17 : {'label' : 'A'},
18 : {'label' : 'A'},
19 : {'label' : 'A'},
20 : {'label' : 'A'},
21 : {'label' : 'A'},
22 : {'label' : 'A'},
23 : {'label' : 'A'},
24 : {'label' : 'T'}
}
g5 = jgrapht.create_graph(directed=False,weighted=False)
grid(g5,5,5)
v_g5 = {
0 : {'label' : 'A'},
1 : {'label' : 'A'},
2 : {'label' : 'A'},
3 : {'label' : 'A'},
4 : {'label' : 'A'},
5 : {'label' : 'A'},
6 : {'label' : 'T'},
7 : {'label' : 'T'},
8 : {'label' : 'T'},
9 : {'label' : 'A'},
10 : {'label' : 'T'},
11 : {'label' : 'T'},
12 : {'label' : 'T'},
13 : {'label' : 'T'},
14 : {'label' : 'A'},
15 : {'label' : 'A'},
16 : {'label' : 'T'},
17 : {'label' : 'T'},
18 : {'label' : 'T'},
19 : {'label' : 'A'},
20 : {'label' : 'A'},
21 : {'label' : 'A'},
22 : {'label' : 'A'},
23 : {'label' : 'A'},
24 : {'label' : 'A'}
}
g6 = jgrapht.create_graph(directed=False,weighted=False)
grid(g6,5,5)
v_g6 = {
0 : {'label' : 'A'},
1 : {'label' : 'A'},
2 : {'label' : 'A'},
3 : {'label' : 'A'},
4 : {'label' : 'A'},
5 : {'label' : 'A'},
6 : {'label' : 'T'},
7 : {'label' : 'T'},
8 : {'label' : 'T'},
9 : {'label' : 'A'},
10 : {'label' : 'T'},
11 : {'label' : 'A'},
12 : {'label' : 'T'},
13 : {'label' : 'T'},
14 : {'label' : 'A'},
15 : {'label' : 'A'},
16 : {'label' : 'T'},
17 : {'label' : 'T'},
18 : {'label' : 'T'},
19 : {'label' : 'A'},
20 : {'label' : 'A'},
21 : {'label' : 'A'},
22 : {'label' : 'A'},
23 : {'label' : 'A'},
24 : {'label' : 'A'}
}
g7 = jgrapht.create_graph(directed=False,weighted=False)
g7.add_vertex(0)
v_g7 = {
0 : {'label' : 'A'}
}
g8 = jgrapht.create_graph(directed=False,weighted=False)
grid(g8,5,5)
v_g8 = {
0 : {'label' : 'T'},
1 : {'label' : 'A'},
2 : {'label' : 'T'},
3 : {'label' : 'A'},
4 : {'label' : 'T'},
5 : {'label' : 'A'},
6 : {'label' : 'T'},
7 : {'label' : 'A'},
8 : {'label' : 'T'},
9 : {'label' : 'A'},
10 : {'label' : 'T'},
11 : {'label' : 'A'},
12 : {'label' : 'T'},
13 : {'label' : 'A'},
14 : {'label' : 'T'},
15 : {'label' : 'A'},
16 : {'label' : 'T'},
17 : {'label' : 'A'},
18 : {'label' : 'T'},
19 : {'label' : 'A'},
20 : {'label' : 'T'},
21 : {'label' : 'A'},
22 : {'label' : 'T'},
23 : {'label' : 'A'},
24 : {'label' : 'T'}
}
g9 = jgrapht.create_graph(directed=False,weighted=False)
grid(g9,2,2)
v_g9 = {
0 : {'label' : 'T'},
1 : {'label' : 'T'},
2 : {'label' : 'T'},
3 : {'label' : 'A'}
}
class Test_ilhas(ParametrizedTestCase):
def test_base (self):
f,g,v_g,expected = self.param
result = f(g,v_g)
self.assertTrue(all(any(all(x in r for x in e) and all(x in e for x in r) for e in expected) for r in result))
self.assertTrue(all(any(all(x in r for x in e) and all(x in e for x in r) for r in result) for e in expected))
## Obs: A ordem dos elementos nas listas e das listas no resultado produzido
## pela sua função não precisa ser igual. Basta conter os mesmos elementos/listas.
params = [[g1,v_g1,[[6, 7], [17, 18, 19, 23, 13]]],
[g2,v_g2,[]],
[g3,v_g3,[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]]],
[g4,v_g4,[[0], [2], [15], [24]]],
[g5,v_g5,[[16, 17, 18, 6, 7, 8, 10, 11, 12, 13]]],
[g6,v_g6,[[10],[6,7,16, 17, 18, 8, 12, 13]]],
[g7,v_g7,[]],
[g8,v_g8,[[0], [2], [4], [6], [8], [10], [12], [14], [16], [18], [20], [22], [24]]],
[g9,v_g9,[[0,1,2]]]
]
|
#Leetcode 1221. Split a String in Balanced Strings
'''
Balanced strings are those who have equal quantity of 'L' and 'R' characters.
Given a balanced string s split it in the maximum amount of balanced strings.
Return the maximum amount of splitted balanced strings.
Example 1:
Input: s = "RLRRLLRLRL"
Output: 4
Explanation: s can be split into "RL", "RRLL", "RL", "RL", each substring contains same number of 'L' and 'R'.
Example 2:
Input: s = "RLLLLRRRLR"
Output: 3
Explanation: s can be split into "RL", "LLLRRR", "LR", each substring contains same number of 'L' and 'R'.
Example 3:
Input: s = "LLLLRRRR"
Output: 1
Explanation: s can be split into "LLLLRRRR".
Example 4:
Input: s = "RLRRRLLRLL"
Output: 2
Explanation: s can be split into "RL", "RRRLLRLL", since each substring contains an equal number of 'L' and 'R'
Constraints:
1 <= s.length <= 1000
s[i] = 'L' or 'R'
'''
def balancedStringSplit(s):
numOfL = 0
numOfR = 0
result = 0
#loop through the string to count the amount of balanced substring that we can make
for i in s:
if i == 'R':
numOfR += 1
if i == 'L':
numOfL += 1
if numOfL == numOfR:
result += 1
numOfL = 0
numOfR = 0
return result
#time complexity: O(n), we have to loop through the entire string s to count the number of l and r
#space complexity: O(1), constant space
#main function to test the test cases:
def main():
print("TESTING SPLIT STRING BALANCED STRING...")
#test cases:
s_01 = "RLRRLLRLRL"
s_02 = "RLLLLRRRLR"
s_03 = "LLLLRRRR"
s_04 = "RLRRRLLRLL"
print(balancedStringSplit(s_01))
print(balancedStringSplit(s_02))
print(balancedStringSplit(s_03))
print(balancedStringSplit(s_04))
print("END OF TESTING...")
main() |
'''
*************************************************
* Title: Creating a dictionary from a .txt file using Python
* Author: miloJ
* Date: 2017
* Code version: python 3
* Availability: https://stackoverflow.com/questions/17714571/creating-a-dictionary-from-a-txt-file-using-python
*****************************************************
'''
# Reading file and putting into dictionary => O(n+k+k) -> O(n^3)
def CheckPrice(spreadsheet, number):
num_dict = []
# O(n) because we are iterating throught each line
# read spreadsheet and put it in dictionary
with open(spreadsheet, 'r') as cost_file:
for line in cost_file:
elements = line.rstrip().split(",")
num_dict.append((dict(zip(elements[::2], elements[1::2])))) # O(k+K) k is the slice size. The zip is an O(1) operation.
# BEST + WORST: O(n^2) - because we have two iterations whih both takes n time
# write the number taken and the price in output.txt
output_file = open("output.txt", "a")
for i in num_dict: # O(n) - iterate through n dictionaries
for key, value in i.items(): # O(n) - have to go through n entries to find
if key == number: # O(1)
output_file.write("\nNUM: {} COST: {}".format(key,value))
output_file.close
if __name__ == "__main__":
CheckPrice("costs-10.txt", "+449916707")
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 11:30:52 2020
@author: pitan
"""
list=[4,78,52,7]
large=list[0]
for i in list:
if(i>large):
large=i
print(large) |
from pm4pymdl.algo.mvp.gen_framework.rel_events import being_produced, eventually_follows, existence, rel_events_builder, link, \
rel_dfg
|
# Copyright (c) 2020-2021 impersonator.org authors (Wen Liu and Zhixin Piao). All rights reserved.
import numpy as np
import torch
import scipy.signal as signal
from scipy.ndimage import filters
from scipy import interpolate
from scipy.spatial.transform.rotation import Rotation as R
from iPERCore.tools.utils.geometry import rotations
VALID_FILTERS = ["low-pass", "median"]
def temporal_filter_invalid_kps(seq_kps):
"""
Args:
seq_kps:
Returns:
"""
length, num_joints, _ = seq_kps.shape
for i in range(num_joints):
kps_i = seq_kps[:, i]
invalid = kps_i[:, 2] == 0
valid = ~invalid
valid_ids = np.where(valid)[0]
f = interpolate.interp1d(valid_ids, kps_i[valid_ids], axis=0, kind="linear",
fill_value="extrapolate", assume_sorted=True)
invalid_ids = np.where(invalid)[0]
kps_new = f(invalid_ids)
seq_kps[invalid_ids, i] = kps_new
# print(i, len(invalid_ids), length)
return seq_kps
def fist_order_low_pass_filter(signal, alpha=0.7):
"""
Y(n) = alpha * Y(n-1) + (1 - alpha) * X(n).
Args:
signal (np.ndarray or torch.Tensor): (n, c);
alpha (float): Y(n) = alpha * Y(n-1) + (1 - alpha) * X(n)
Returns:
sm_signal (np.ndarray or torch.Tensor): (n, c)
"""
if isinstance(signal, np.ndarray):
sm_signal = np.copy(signal)
else:
sm_signal = signal.clone()
n = signal.shape[0]
for i in range(1, n):
sm_signal[i] = alpha * sm_signal[i - 1] + (1 - alpha) * signal[i]
return sm_signal
def get_smooth_params(sig, n=5, fc=300):
"""
Low-pass filters.
Args:
sig (np.ndarray): (length, feature dimension);
n (int): the number of low-pass order;
fc (float): the factor to control the degree of smoothness. The smaller of fc, the more smoother.
Returns:
smooth_sig (np.ndarray): (length, feature dimension).
"""
fs = 2208
w = fc / (fs / 2)
b, a = signal.butter(n, w, 'low')
smooth_sig = signal.filtfilt(b, a, sig.T).T
return smooth_sig
def mean_filter(sig, kernel_size):
"""
Mean-Filters.
Args:
sig (np.ndarray): (n1, n2, n3, ...., nk);
kernel_size (tuple): the kernel size, (s1, s2, s3, ..., nk).
Returns:
filtered_sig (np.ndarray): (length, feature dimension).
"""
filtered_sig = filters.median_filter(sig, size=kernel_size, mode="nearest")
return filtered_sig
def pose2d_distance(kps1, kps2):
"""
Args:
kps1 (np.ndarray): (length, num_joints_1, 2)
kps2 (np.ndarray): (length, num_joints_2, 2)
Returns:
"""
n, num_joints = kps1.shape[0:2]
assert n == kps2.shape[0]
# (length, num_joints_1, 2) -> (length, num_joints_1, 1, 2) -> (length, num_joints_1, num_joints_2, 2)
kps1 = np.tile(kps1[:, :, np.newaxis, :], reps=(1, 1, num_joints, 1))
# (length, num_joints_2, 2) -> (length, 1, num_joints_2, 2) -> (length, num_joints_1, num_joints_2, 2)
kps2 = np.tile(kps2[:, np.newaxis, :, :], reps=(1, num_joints, 1, 1))
# (length, num_joints_1, num_joints_2)
dist = np.sum((kps1 - kps2) ** 2, axis=-1)
return dist
def pose2d_temporal_filter(keypoints, window_size, mode, **kwargs):
"""
Temporal filter the keypoints. It mainly focuses on fixing the case that the coordinates of the keypoints are
estimated successfully, while the it fails on the right-left orders. Here, we try to deal with it by following
strategies:
1. temporal smooth the keypoints, get the filtered_kps, and the smooth mode can be `mean`, `low-pass`;
2. calculate the distance between the original keypoints and the filtered keypoints, and find the nearest
neighbour joints of the original keypoints to the filtered_kps;
3. permutate the keypoints based on the nearest neighbour indexes.
Args:
keypoints (np.ndarray): the original keypoints, (length, 2) or (length, 3);
window_size (int): the size of temporal window;
mode (str): the mode name of filters. Currently, it support,
`median` for median filters;
`low-pass` for low-pass filter;
**kwargs: the other parameters, such as
--fc (float): the smooth factor for low-pass filters.
Returns:
sm_keypoints (np.ndarray): the smoothed keypoints, (length, 2) or (length, 3).
"""
global VALID_FILTERS
if mode == "median":
filtered_kps = mean_filter(keypoints, kernel_size=(window_size, 1, 1))
elif mode == "low-pass":
filtered_kps = get_smooth_params(keypoints, n=window_size, fc=kwargs["fc"])
else:
raise ValueError(f"{mode} is not valid mode. Currently, it only support {VALID_FILTERS}.")
his_kps = filtered_kps[:, :, 0:2]
query_kps = keypoints[:, :, 0:2]
# (length, num_joints, num_joints)
dist = pose2d_distance(query_kps, his_kps)
nn_ids = np.argmin(dist, axis=2)
length, num_joints, c = keypoints.shape
ids = np.arange(length) * num_joints
ids = ids[:, np.newaxis] + nn_ids
# TODO, make it more readable and use numpy advanced indexing.
sm_keypoints = keypoints.reshape(-1, c)[ids, :].reshape(-1, num_joints, c)
return sm_keypoints
def temporal_smooth_smpls(ref_smpls, pose_fc=300, cam_fc=100):
"""
Args:
ref_smpls (np.ndarray): (length, 72)
pose_fc:
cam_fc:
Returns:
ref_smpls (np.ndarray): (length, 72)
"""
ref_rotvec = ref_smpls[:, 3:-10]
n = ref_rotvec.shape[0]
ref_rotvec = ref_rotvec.reshape((-1, 3))
ref_rotmat = R.from_rotvec(ref_rotvec).as_matrix()
ref_rotmat = torch.from_numpy(ref_rotmat)
ref_rot6d = rotations.rotmat_to_rot6d(ref_rotmat)
ref_rot6d = ref_rot6d.numpy()
ref_rot6d = ref_rot6d.reshape((n, -1))
ref_rot6d = get_smooth_params(ref_rot6d, fc=pose_fc)
ref_rot6d = ref_rot6d.reshape((-1, 6))
ref_rotmat = rotations.rot6d_to_rotmat(torch.from_numpy(ref_rot6d)).numpy()
ref_rotvec = R.from_matrix(ref_rotmat).as_rotvec()
ref_smpls[:, 3:-10] = ref_rotvec.reshape((n, -1))
ref_smpls[:, 0:3] = get_smooth_params(ref_smpls[:, 0:3], fc=cam_fc)
return ref_smpls
def pose_temporal_smooth(init_pose_rotvec, opt_pose_rotvec, threshold: float = 10):
"""
Args:
init_pose_rotvec (torch.Tensor): (n, 72);
opt_pose_rotvec (torch.Tensor): (n, 72);
threshold (float):
Returns:
opt_pose_rotvec (torch.Tensor): (n, 72).
"""
n = opt_pose_rotvec.shape[0]
init_pose = rotations.rotvec_to_rot6d(init_pose_rotvec.reshape(-1, 3)).reshape(n, -1)
opt_pose = rotations.rotvec_to_rot6d(opt_pose_rotvec.reshape(-1, 3)).reshape(n, -1)
# sm_init_pose = smooth_poses.fist_order_low_pass_filter(init_pose, alpha=0.7)
# sm_opt_pose = smooth_poses.fist_order_low_pass_filter(opt_pose, alpha=0.7)
diff = torch.sum(torch.abs(init_pose - opt_pose), dim=1)
abnormal_ids = (diff > threshold)
opt_pose_rotvec[abnormal_ids] = init_pose_rotvec[abnormal_ids]
return opt_pose_rotvec
|
import os
from config import BASIC_RESULT, ADVANCED_RESULT
from readers import (
read_xml,
read_json,
read_csv
)
from advnced import advanced
from dataOperating import (
sort,
dictionariate,
)
from writers import create_output
if __name__ == "__main__":
if os.path.isfile(BASIC_RESULT):
os.remove(BASIC_RESULT)
if os.path.isfile(ADVANCED_RESULT):
os.remove(ADVANCED_RESULT)
csv_min, csv_list = read_csv()
json_min, json_list = read_json()
xml_min, xml_list = read_xml()
min_over = min(csv_min, json_min, xml_min)
csv_dict = dict(sorted(list(dictionariate(csv_list, min_over).items())[1:]))
json_dict = dict(sorted(list(dictionariate(json_list, min_over).items())[1:]))
xml_dict = dict(sorted(list(dictionariate(xml_list, min_over).items())[1:]))
create_output(csv_dict, json_dict, xml_dict)
sort()
advanced()
|
import numpy as np
class SpeedEstimator:
def __init__(self, lanes=10, width=50):
self.min_speed = - np.ones(lanes) * 3
self.max_speed = np.zeros(lanes)
self.p = np.zeros(lanes)
self.width = width
self.lanes = lanes
self.rounds = 0
def update(self, cars, occupancy_trails):
self.rounds += 1
for x in range(self.lanes):
for y in range(self.width):
speed = -1
# if there is car at the position
if(cars[x,y] > 0):
end_trail = (y+1)%self.width
while(occupancy_trails[x,end_trail] > 0 and cars[x, end_trail] == 0):
end_trail = (end_trail + 1)%self.width
speed -= 1
# make sure the end of trail is not followed by another car, to exclude ambiguity
if(cars[x,end_trail] == 0):
# speed estimate could be used
if speed < self.max_speed[x]:
self.max_speed[x] = speed
if speed > self.min_speed[x]:
self.min_speed[x] = speed
self.p[x] = 1.0 / (self.min_speed[x] - self.max_speed[x] + 1)
def p_speed_greater(self, lane, speed):
if self.min_speed[lane] < speed:
return 0.0
if self.max_speed[lane] > speed:
return 0.0
return (speed - self.max_speed[lane]) * self.p[lane]
def print(self):
for x in range(self.lanes):
print(self.min_speed[x], self.max_speed[x])
|
#!usr/bin/env python
#coding: utf-8
import redis
r = redis.Redis(host="localhost", port=6379)
f = open("credit11315/proxy_ips.txt", "r")
for i in f:
r.zadd("credit11315_proxy_ips:sorted_set", '1', i.strip())
|
import types
class Animal(object):
pass
class Dog(Animal):
pass
class Husky(Dog):
pass
def fn():
pass
a = Animal()
d = Dog()
h = Husky()
print(type(123))
print(type('str'))
print(type(None))
print(type(abs))
print(type(a))
print(type(fn))
print()
print(type(123)==type(465))
print(type(123)==int)
print(type('abc')==type('123'))
print(type('abc')==str)
print(type('abc')==type(123))
print(type([1,2,3])==list)
print()
print(type(fn)==types.FunctionType)
print(type(abs)==types.BuiltinFunctionType)
print(type(lambda x:x)==types.LambdaType)
print(type((x for x in range(10)))==types.GeneratorType)
print()
print(isinstance(h, Husky))
print(isinstance(h, Dog))
print(isinstance(h, Animal))
print(isinstance(d, Husky))
print(isinstance('a', str))
print(isinstance(123, int))
print(isinstance(b'a', bytes))
print(isinstance([12,34,45], (list, tuple)))
print(isinstance((1,2,3),(list, tuple)))
|
from django.db import models
from datetime import datetime
from django.contrib.auth.models import User
class Hospital(models.Model):
name = models.CharField(max_length=500)
address = models.CharField(max_length=1000)
description = models.TextField(max_length=1000)
registered_at = models.DateTimeField(default=datetime.now)
added_by = models.ForeignKey(User, on_delete=models.DO_NOTHING, blank=True,null=True)
def __str__(self) -> str:
return self.name
def get_departments(self):
return list(self.department_set.all())
def get_staff(self):
dept_list = self.get_departments()
print(dept_list)
staff_list = []
for dept in dept_list:
staff_list.extend(list(dept.profile_set.all()))
print(staff_list)
return staff_list
|
from path import *
import os
import shutil
class outils_fichier:
def requete_py(self, fichier, element, liste):
self.fichier = fichier
self.element = element
self.liste = liste
with open(str(self.fichier),"w", errors = "ignore") as file:
file.write(str(self.element))
with open(self.fichier,"r") as file2:
b = file2.read()
self.liste.append(b)
def dossier_creer_fonction_phrase(self, compteur):
self.compteur = compteur
b = int(self.compteur / 2)
for i in range(b):
a = str(i)
os.chdir(r"C:\Users\jeanbaptiste\ste fois c la bonne\imageimage1.3\image")
os.mkdir(str(a))
def suppression_pour_nouvelle_phrase(self):
os.chdir(r"C:\Users\jeanbaptiste\ste fois c la bonne\imageimage1.3\image")
liste = os.listdir()
for i in liste:
shutil.rmtree(i)
def image(self, element, compteur):
self.compteur = compteur
self.element = element
path_image = r"C:\Users\jeanbaptiste\ste fois c la bonne\imageimage1.3\image\{}".format(self.compteur)
shutil.move(self.element, path_image)
def image_in_dossier(self, compteur):
self.compteur = compteur
os.chdir(r"C:\Users\jeanbaptiste\ste fois c la bonne\imageimage1.3\image\{}".format(self.compteur))
liste = os.listdir()
print(liste)
|
#!/usr/bin/env python
import twitter
def main():
module = AnsibleModule(
argument_spec = dict(
msg=dict(required=True),
auth=dict(required=True),
key=dict(required=True),
),
supports_check_mode = False
)
msg = module.params['msg']
auth1 = module.params['auth']
key_id = module.params['key']
creds = auth1.split(',')
auth = {}
auth['ck'] = creds[0]
auth['cs'] = creds[1]
auth['atk'] = creds[2]
auth['ats'] = creds[3]
api = twitter.Api()
api = twitter.Api(consumer_key=auth['ck'],consumer_secret=auth['cs'], \
access_token_key=auth['atk'], access_token_secret=auth['ats'])
status = api.PostUpdate(msg + ' ' + ' #' + key_id)
module.exit_json(msg='tweet sent')
from ansible.module_utils.basic import *
main()
|
from read_info import CFindRegionFieldByTitle
import os
import sqlite3
import time
class CCheck(object):
def __init__(self, check_path):
self.m_check_path = check_path
self.m_dbname = "record.db"
conn = sqlite3.connect(self.m_dbname)
c = conn.cursor()
c.execute("create table if not exists path_info(path varchar(256));")
conn.commit()
conn.close()
while True:
self.check()
time.sleep(3)
def check(self):
dirs = os.listdir(self.m_check_path)
for d in dirs:
path = os.path.join(self.m_check_path, d)
is_file = os.path.isfile(path)
if is_file is True:
is_exist = self.path_is_exist(path)
if is_exist is False:
self.write_db(path)
dir_path = path + ".dir"
if os.path.exists(dir_path) is False:
os.makedirs(dir_path)
reader = CFindRegionFieldByTitle(path)
reader.read()
reader.gen(os.path.join(dir_path, d))
def path_is_exist(self, path):
conn = sqlite3.connect(self.m_dbname)
c = conn.cursor()
cursor = c.execute("""
select count(*) from path_info where path = "{0}";
""".format(path))
for row in cursor:
count = row[0]
if count > 0:
conn.close()
return True
conn.close()
return False
def write_db(self, path):
conn = sqlite3.connect(self.m_dbname)
c = conn.cursor()
c.execute("""insert into path_info values("{0}");""".format(path))
conn.commit()
conn.close()
if __name__ == "__main__":
obj_dir = "./workspace"
if os.path.exists(obj_dir) is False:
os.makedirs(obj_dir)
checker = CCheck(obj_dir)
checker.check()
|
from typing import List
import os, pickle
from neusum.service.basic_service import meta_str_surgery, easy_post_processing
def iterator(bag):
pass
from random import shuffle
import random
def dropword(inp_str: str):
inp_list = inp_str.split(" ")
indc = random.sample(range(0, len(inp_list)), int(len(inp_list) / 10))
inp_list = [x for idx, x in enumerate(inp_list) if idx not in indc]
return " ".join(inp_list)
def replace_lrbrrb(inp_str: str):
inp_str = inp_str.replace("-LRB-", '(')
inp_str = inp_str.replace("-RRB-", ')')
return inp_str
import csv
def assign_task(ext_bag, ext_dp_bag, model_bag,see_bag):
cells = []
num_of_unit = 4
for ext, extdp, model,see in zip(ext_bag, ext_dp_bag, model_bag,see_bag):
_tmp = [None for _ in range(2 * num_of_unit)]
lis = [ext, extdp, model,see]
nam = ['ext', 'extdp', 'model','see']
idx = list(range(num_of_unit))
shuffle(idx)
for m in range(num_of_unit):
_tmp[int(2 * m)] = lis[idx[m]]
_tmp[int(2 * m + 1)] = nam[idx[m]]
# _tmp[2] = lis[idx[1]]
# _tmp[3] = nam[idx[1]]
# _tmp[4] = lis[idx[2]]
# _tmp[5] = nam[idx[2]]
cells.append(_tmp)
return cells
from nltk.tokenize.treebank import TreebankWordTokenizer, TreebankWordDetokenizer
d = TreebankWordDetokenizer()
def detok(inp_str):
inp_list = inp_str.split(" ")
return d.detokenize(inp_list)
def read_abigail_output(path) -> List[str]:
files = os.listdir(path)
bag = []
for fname in files:
with open(os.path.join(path, fname), 'r') as fd:
lines = fd.read().splitlines()
lines = [detok(l) for l in lines]
bag += lines
return bag
def rm_head_cnn(inp_str: str):
if inp_str.find("CNN -RRB-", 0, 50) > 0:
where = inp_str.find("CNN -RRB-", 0, 50)
inp_str = inp_str[where + 9:]
return inp_str
def fix_vowel(inp_str):
lis = inp_str.split(" ")
v = "aeio"
len_of_seq = len(lis)
for idx in range(len_of_seq - 1):
word = lis[idx]
if word == "a" or word == "an":
nex_w: str = lis[idx + 1]
if nex_w[0] in v:
lis[idx] = "an"
else:
lis[idx] = "a"
return " ".join(lis)
if __name__ == '__main__':
path = "/scratch/cluster/jcxu/exComp"
file = "0.325-0.120-0.289-cnnTrue1.0-1True-1093-cp_0.6"
see_output = "/scratch/cluster/jcxu/data/cnndm_compar/pointgencov/cnn"
ext_bag, model_bag, ext_dp_bag, see_bag = [], [], [], []
see_bag = read_abigail_output(see_output)
with open(os.path.join(path, file), 'rb') as fd:
x = pickle.load(fd)
pred = x['pred']
ori = x['ori']
cnt = 0
for pre, o in zip(pred, ori):
shuffle(pre)
shuffle(o)
p = [meta_str_surgery(easy_post_processing(replace_lrbrrb(fix_vowel(x)))).lower() for x in pre]
o = [meta_str_surgery(easy_post_processing(replace_lrbrrb(rm_head_cnn(x)))).lower() for x in o]
o_drop = [dropword(x) for x in o]
o = [detok(x) for x in o]
o_drop = [detok(x) for x in o_drop]
p = [detok(x) for x in p]
# print("\n".join(p))
# print("-" * 5)
ext_bag += o
ext_dp_bag += o_drop
model_bag += p
cnt += 1
if cnt > 250:
# print(ext_bag)
# print(ext_dp_bag)
# for visp, viso, viss in zip(model_bag, ext_bag, see_bag):
# print(visp)
# print(viso)
# print(viss)
# print("--------")
# exit()
shuffle(ext_dp_bag)
shuffle(model_bag)
shuffle(ext_bag)
cells = assign_task(ext_bag, ext_dp_bag, model_bag,see_bag)
cells_len = len(cells)
cstep = int(cells_len/2)
for idx in range(cstep):
x = "\t".join(cells[2*idx]+cells[2*idx+1])
print(x)
if idx >250:
exit()
with open('data_v0.csv', 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=' ',
quotechar=',', quoting=csv.QUOTE_MINIMAL)
for c in cells:
print("\t".join(c))
spamwriter.writerow(c)
# print(model_bag)
exit()
|
import tensorflow as tf
import numpy as np
import gym
#from collections import deque # not using tgis for fun
import random # for random sampling from deque
env=gym.make('MountainCar-v0')
env=env.unwrapped
n_obs=2
n_act=3
qprimary = tf.keras.models.Sequential()
qprimary.add(tf.keras.layers.Dense(units=128,input_dim=n_obs, activation='sigmoid'))
qprimary.add(tf.keras.layers.Dense(units=128, activation="relu"))
qprimary.add(tf.keras.layers.Dense(units=n_act, activation=None))
#optimizer=tf.keras.optimizers.Adam(lr=0.001,beta_1=0.9,epsilon=None,decay=0.00,amsgrad=False)
qprimary.compile(loss="mse", optimizer="RMSprop", metrics=['accuracy'])
qprimary.summary()
qtarget = tf.keras.models.Sequential()
qtarget.add(tf.keras.layers.Dense(units=128,input_dim=n_obs, activation='sigmoid'))
qtarget.add(tf.keras.layers.Dense(units=128, activation="relu"))
qtarget.add(tf.keras.layers.Dense(units=n_act, activation=None))
#optimizer=tf.keras.optimizers.Adam(lr=0.001,beta_1=0.9,epsilon=None,decay=0.00,amsgrad=False)
qtarget.compile(loss="mse", optimizer="RMSprop", metrics=['accuracy'])
qtarget.summary()
#memory=deque(maxlen=2000)
class my_que:
def __init__(self,maxlen):
self.max=maxlen
self.counter=0
self.queue=[]
def push(self,item):
if self.counter<self.max:
self.queue.append(item)
self.counter+=1
else:
self.queue.pop(0)
self.queue.append(item)
self.counter+=1
def show(self):
return self.queue,self.counter
mem= my_que(maxlen=2000)
def sampling(list,size):
length=len(list)
if length<=size:
ctr = 0
else:
ctr = np.random.randint(0,length-size)
return list[ctr:ctr+size]
epsilon=1.0
epsilon_min=0.01
epsilon_decay=0.995
def act(s):
global epsilon
epsilon=epsilon*epsilon_decay
epsilon=max(epsilon,epsilon_min)
if np.random.random()<epsilon:
#print("random")
return np.random.choice(np.arange(n_act))
#print("action")
return np.argmax(qprimary.predict(s)[0])
def remember(s,a,r,ns,d):
#memory.append([s,a,r,ns,d])
mem.push([s,a,r,ns,d])
def replay(size):
gamma=0.8
sample_size=size
memory,length=mem.show()
if length<sample_size: # len(memory) < sample_size:
return
#samples=random.sample(memory,sample_size)
samples=sampling(memory,sample_size)
#here we will update parameters of primary so it takes actions in direction of policy.
# we will sample q values from target but wont update it here so a to keep q values(acts as rule book) stable
for sample in samples:
s,a,r,ns,d=sample
q_target = qtarget.predict(s)
if d == True:
q_target[0][a]=r# making it episodic other wise it will relate gameover state to restart state and will do reward farming
else:
q_target[0][a]=r+ gamma*max(qtarget.predict(ns)[0])
qprimary.fit(s,q_target,epochs=1,verbose=0)
def qtraget_train():
tau=0.1# for soft weight updates
qprimary_weights= qprimary.get_weights()
qtraget_weights=qtarget.get_weights()
for i in range(len(qtraget_weights)):
qtraget_weights[i]=(1-tau)*qprimary_weights[i]+tau*qtraget_weights[i]
qtarget.set_weights(qtraget_weights)
def save():
FILE=r"C:\\Users\\Dell\\Desktop\\holidASY\\dqn_keras.h5"
tf.keras.models.save_model(model=qprimary,filepath= FILE ,overwrite=True,include_optimizer=True)
print("saved")
def load():
FILE=r"C:\\Users\\Dell\\Desktop\\holidASY\\dqn_keras.h5"
qprimary=tf.keras.models.load_model( FILE )
qprimary.compile(loss="mse", optimizer="RMSprop", metrics=['accuracy'])
print("loaded")
episodes = 200
#load()
for i in range(episodes):
s= env.reset()
s=np.array([s])
done=False
steps=0
while not done:
env.render()
action=act(s)
ns,reward,done,info=env.step(action)
reward=ns[1]
#print(reward)
ns=np.array([ns])
if done:
reward=100
remember(s,action,reward,ns,done)
if steps%2 == 0:
replay(8)
qtraget_train()
s=ns
steps+=1
print("episode: "+str(i)+"| steps :"+str(steps) )
replay(512)
qtraget_train()
save()
|
from upnpavcontrol.web.api import media_proxy
import pytest
import itsdangerous
import itsdangerous.exc
def test_encode_decode():
url = 'http://fancy.de'
token = media_proxy.encode_url_proxy_token(url)
decoded_url = media_proxy.decode_url_proxy_token(token)
assert decoded_url == url
assert token != url
def test_fake_decode():
fake_token = 'rfegesrekborksbld.fv02314fwf'
with pytest.raises(itsdangerous.exc.BadSignature):
media_proxy.decode_url_proxy_token(fake_token)
def test_altered_token_decode():
url = 'http://fancy.de'
token = media_proxy.encode_url_proxy_token(url)
altered_token = token[:1] + 'a' + token[2:]
with pytest.raises(itsdangerous.exc.BadSignature):
media_proxy.decode_url_proxy_token(altered_token)
|
import FWCore.ParameterSet.Config as cms
process = cms.Process( "SiStripDQMBadStripsValidationReReco" )
### Miscellanous ###
## Logging ##
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool( True )
)
process.MessageLogger = cms.Service( "MessageLogger",
destinations = cms.untracked.vstring(
'cout'
),
cout = cms.untracked.PSet(
threshold = cms.untracked.string( 'INFO' )
)
)
## Profiling ##
# Memory #
process.SimpleMemoryCheck = cms.Service( "SimpleMemoryCheck",
ignoreTotal = cms.untracked.int32( 0 )
)
### Import ###
## Magnetic fiels ##
process.load( "Configuration.StandardSequences.MagneticField_38T_cff" )
## Geometry ##
process.load( "Configuration.StandardSequences.GeometryRecoDB_cff" )
## Calibration ##
# Global tag #
process.load( "Configuration.StandardSequences.FrontierConditions_GlobalTag_cff" )
process.GlobalTag.connect = 'frontier://FrontierProd/CMS_COND_21X_GLOBALTAG'
process.GlobalTag.globaltag = 'CRAFT_ALL_V4::All'
process.es_prefer_GlobalTag = cms.ESPrefer( 'PoolDBESSource', 'GlobalTag' )
### SiStrip DQM ###
## Reconstruction ##
process.load( "RecoTracker.Configuration.RecoTrackerP5_cff" )
## DQM modules ##
# SiStripMonitorCluster #
import DQM.SiStripMonitorCluster.SiStripMonitorCluster_cfi
process.siStripMonitorCluster = DQM.SiStripMonitorCluster.SiStripMonitorCluster_cfi.SiStripMonitorCluster.clone()
process.siStripMonitorCluster.OutputMEsInRootFile = False
process.siStripMonitorCluster.SelectAllDetectors = True
process.siStripMonitorCluster.StripQualityLabel = ''
process.siStripMonitorCluster.TH1ClusterPos.moduleswitchon = True
process.siStripMonitorCluster.TH1nClusters.layerswitchon = True
process.siStripMonitorCluster.TH1nClusters.moduleswitchon = False
process.siStripMonitorCluster.TH1ClusterStoN.moduleswitchon = False
process.siStripMonitorCluster.TH1ClusterStoNVsPos.moduleswitchon = True
process.siStripMonitorCluster.TH1ClusterNoise.moduleswitchon = False
process.siStripMonitorCluster.TH1NrOfClusterizedStrips.moduleswitchon = False
process.siStripMonitorCluster.TH1ModuleLocalOccupancy.moduleswitchon = False
process.siStripMonitorCluster.TH1ClusterCharge.moduleswitchon = False
process.siStripMonitorCluster.TH1ClusterWidth.moduleswitchon = False
# SiStripMonitorTrack #
import RecoTracker.TrackProducer.RefitterWithMaterial_cfi
process.ctfWithMaterialTracksP5Refitter = RecoTracker.TrackProducer.RefitterWithMaterial_cfi.TrackRefitter.clone()
process.ctfWithMaterialTracksP5Refitter.src = 'ctfWithMaterialTracksP5'
process.ctfWithMaterialTracksP5Refitter.TrajectoryInEvent = True
import DQM.SiStripMonitorTrack.SiStripMonitorTrack_cfi
process.SiStripMonitorTrackReal = DQM.SiStripMonitorTrack.SiStripMonitorTrack_cfi.SiStripMonitorTrack.clone()
process.SiStripMonitorTrackReal.TrackProducer = 'ctfWithMaterialTracksP5'
process.SiStripMonitorTrackReal.TrackLabel = ''
process.SiStripMonitorTrackReal.Cluster_src = 'siStripClusters'
process.SiStripMonitorTrackReal.FolderName = 'SiStrip/Tracks'
# process.SiStripMonitorTrackReal.Mod_On = True
### Input ###
## PoolSource ##
process.source = cms.Source( "PoolSource",
fileNames = cms.untracked.vstring(
'/store/data/Commissioning08/Cosmics/RECO/CRAFT_ALL_V4_ReReco-v1/0001/DE20B094-1FC2-DD11-90AC-001D0967D5A8.root',
'/store/data/Commissioning08/Cosmics/RECO/CRAFT_ALL_V4_ReReco-v1/0004/6C368AB3-96C2-DD11-BDAC-001D0967CF86.root',
'/store/data/Commissioning08/Cosmics/RECO/CRAFT_ALL_V4_ReReco-v1/0007/1A572E65-75C4-DD11-8E97-001D0967C64E.root',
'/store/data/Commissioning08/Cosmics/RECO/CRAFT_ALL_V4_ReReco-v1/0010/52664B9C-E8C4-DD11-B292-0019B9E48877.root'
),
skipEvents = cms.untracked.uint32(0)
)
## Input steering ##
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32( 1000 )
)
### Output ###
## DQM ##
process.load( "DQMServices.Core.DQM_cfg" )
process.DQM.collectorHost = ''
process.load( "DQMServices.Components.DQMEnvironment_cfi" )
process.dqmSaver.convention = 'Online'
process.dqmSaver.dirName = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_TRACKER/DQM/SiStrip/jobs/output'
process.dqmSaver.producer = 'DQM'
process.dqmSaver.saveByRun = 1
process.dqmSaver.saveAtJobEnd = True
process.dqmSaver.referenceHandling = 'qtests'
process.dqmEnv.subSystemFolder = 'SiStrip'
### Scheduling ###
## Paths ##
# DQM path #
process.p = cms.Path(
process.siStripMonitorCluster *
process.ctfWithMaterialTracksP5Refitter *
process.SiStripMonitorTrackReal *
process.dqmSaver
)
|
arr = []
n = int(input("Enter the size of the array: "))
for i in range(n):
x = int(input("Enter the element: "))
arr.append(x)
print("The array is:", arr)
arr.sort()
print("Array after sorting:",arr)
item = int(input("Enter the element to search: "))
l = 0
u = n-1
while l <= u:
mid = (l+u) // 2
if arr[mid] == item:
print("Element Found at position", mid+1)
break
elif arr[mid] < item:
l = mid +1
else:
u = mid -1
|
import torch
import numpy as np
import torch.nn.functional as F
from survae.distributions import ConditionalDistribution
from survae.transforms.surjections import Surjection
from .utils import integer_to_base, base_to_integer
class BinaryProductArgmaxSurjection(Surjection):
'''
A generative argmax surjection using a Cartesian product of binary spaces. Argmax is performed over the final dimension.
Args:
encoder: ConditionalDistribution, a distribution q(z|x) with support over z s.t. x=argmax z.
Example:
Input tensor x of shape (B, D, L) with discrete values {0,1,...,C-1}:
encoder should be a distribution of shape (B, D, L, D), where D=ceil(log2(C)).
When e.g. C=27, we have D=5, such that 2**5=32 classes are represented.
'''
stochastic_forward = True
def __init__(self, encoder, num_classes):
super(BinaryProductArgmaxSurjection, self).__init__()
assert isinstance(encoder, ConditionalDistribution)
self.encoder = encoder
self.num_classes = num_classes
self.dims = self.classes2dims(num_classes)
@staticmethod
def classes2dims(num_classes):
return int(np.ceil(np.log2(num_classes)))
def idx2base(self, idx_tensor):
return integer_to_base(idx_tensor, base=2, dims=self.dims)
def base2idx(self, base_tensor):
return base_to_integer(base_tensor, base=2)
def forward(self, x):
z, log_qz = self.encoder.sample_with_log_prob(context=x)
ldj = -log_qz
return z, ldj
def inverse(self, z):
binary = torch.gt(z, 0.0).long()
idx = self.base2idx(binary)
return idx
|
def find_even_index(arr):
for i in range(len(arr)):
sommeDroite = 0
sommeGauche = 0
for j in range(0, i):
sommeGauche += arr[j]
for j in range(i+1, len(arr)):
sommeDroite += arr[j]
if sommeDroite == sommeGauche:
return i
return -1
def affiche_liste_index(liste):
for index in range(len(liste)):
print('index = ', index, 'élément = ', liste[index])
print('\n')
debug = True
print(find_even_index([1,2,3,4,3,2,1]))
print(find_even_index([1,100,50,-51,1,1]))
print(find_even_index([1,2,3,4,5,6]))
print(find_even_index([20,10,30,10,10,15,35]))
print(find_even_index([20,10,-80,10,10,15,35]))
print(find_even_index([10,-80,10,10,15,35,20]))
print(find_even_index(list(range(1,100))))
print(find_even_index([0,0,0,0,0]), "Should pick the first index if more cases are valid")
print(find_even_index([-1,-2,-3,-4,-3,-2,-1]))
print(find_even_index(list(range(-100,-1)))) |
import cv2
import os
import numpy as np
from matplotlib import pyplot as plt
import time
import similarity_util
if __name__ == '__main__':
# check_path = r'C:\Users\Administrator\Desktop\zawu\similarity\check'
# check_path = r'F:\pub_pic\004'
# save_hist_path = r'C:\Users\Administrator\Desktop\zawu\similarity\res_hist'
# save_MD5_path = r'C:\Users\Administrator\Desktop\zawu\similarity\res_md5'
# standard_path = r'C:\Users\Administrator\Desktop\zawu\similarity\dui'
check_path = r'/opt/10.20.31.124.pub.pic'
save_hist_path = r'res_hist'
save_MD5_path = r'res_md5'
standard_path = r'dui'
need_file_count = False
point_file_name = r'hist.point'
img_min_wh = 100
hist_size = 256
hist_min_similar = 0.75
run_hist = True
run_MD5 = False
print('run_hist:', run_hist)
print('run_MD5:', run_MD5)
if not os.path.exists(save_hist_path):
os.makedirs(save_hist_path)
if not os.path.exists(save_MD5_path):
os.makedirs(save_MD5_path)
if need_file_count:
file_count = 0
for fpathe, dirs, check_img_names in os.walk(check_path):
for check_img_name in check_img_names:
file_count += 1
print('file_count:', file_count)
duiHistArr = similarity_util.initDuiHistArr(standard_path, hist_size)
duiMD5Arr = similarity_util.initDuiMD5Arr(standard_path)
t = time.time()
i = 0
point_num = 0
if os.path.exists(point_file_name):
with open(point_file_name, 'r') as point_file:
point_num = int(point_file.readline())
for fpathe, dirs, check_img_names in os.walk(check_path):
for check_img_name in check_img_names:
i += 1
if i < point_num:
continue
try:
font, ext = os.path.splitext(check_img_name)[0], os.path.splitext(check_img_name)[1]
if ext == '.gif':
continue
check_img_path_name = os.path.join(fpathe, check_img_name)
# print('start:', os.path.join(fpathe, check_img_name))
vis = cv2.imread(check_img_path_name)
if vis.shape[0] < img_min_wh or vis.shape[1] < img_min_wh:
continue
if run_hist:
corrcuomean, minbc = similarity_util.check_one_img_hist(vis, duiHistArr, hist_size)
# print(os.path.join(fpathe, check_img_name), corrcuomean, minbc)
if corrcuomean > hist_min_similar:
font, ext = os.path.splitext(check_img_name)[0], os.path.splitext(check_img_name)[1]
corrcuomean = ('%.2f' % corrcuomean)
cv2.imwrite(os.path.join(save_hist_path, font+"_"+str(corrcuomean)+ext), vis)
# mD5相似
if run_MD5:
is_MD5_similar = similarity_util.check_one_img_md5(check_img_path_name, duiMD5Arr)
if is_MD5_similar:
cv2.imwrite(os.path.join(save_MD5_path, check_img_name), vis)
if i % 1000 == 0:
print('耗时:', str(time.time()-t), check_img_path_name, i)
point_num = i
with open(point_file_name, 'w+') as point_file:
point_file.write(str(i))
except Exception as e:
print('error:', os.path.join(fpathe, check_img_name))
|
import sys
import torch
import math
import random
try:
import accimage
except ImportError:
accimage = None
import numpy as np
import numbers
import types
import collections
import warnings
import cv2
from PIL import Image, ImageFile
if sys.version_info < (3, 3):
Sequence = collections.Sequence
Iterable = collections.Iterable
else:
Sequence = collections.abc.Sequence
Iterable = collections.abc.Iterable
def _is_pil_image(img):
if accimage is not None:
return isinstance(img, (Image.Image, accimage.Image))
else:
return isinstance(img, Image.Image)
def _is_tensor_image(img):
return torch.is_tensor(img) and img.ndimension() == 3
def _is_numpy(img):
return isinstance(img, np.ndarray)
def _is_numpy_image(img):
return img.ndim in {2, 3}
def to_tensor(pic):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
See ``ToTensor`` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not(_is_pil_image(pic) or _is_numpy(pic)):
raise TypeError(
'pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
if _is_numpy(pic) and not _is_numpy_image(pic):
raise ValueError(
'pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))
if isinstance(pic, np.ndarray):
# handle numpy array
if pic.ndim == 2:
pic = pic[:, :, None]
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
if accimage is not None and isinstance(pic, accimage.Image):
nppic = np.zeros(
[pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return torch.from_numpy(nppic)
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
elif pic.mode == 'F':
img = torch.from_numpy(np.array(pic, np.float32, copy=False))
elif pic.mode == '1':
img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
def normalize(tensor, mean, std, inplace=False):
"""Normalize a tensor image with mean and standard deviation.
.. note::
This transform acts out of place by default, i.e., it does not mutates the input tensor.
See :class:`~torchvision.transforms.Normalize` for more details.
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation inplace.
Returns:
Tensor: Normalized Tensor image.
"""
if not _is_tensor_image(tensor):
raise TypeError('tensor is not a torch image.')
if not inplace:
tensor = tensor.clone()
dtype = tensor.dtype
mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
tensor.sub_(mean[:, None, None]).div_(std[:, None, None])
return tensor
def crop(img, i, j, h, w):
if not _is_numpy_image(img):
raise TypeError(
'img should be OpenCV numpy Image. Got {}'.format(type(img)))
return img[i:i+h, j:j+w, :]
def center_crop(img, output_size):
if isinstance(output_size, numbers.Number):
output_size = (int(output_size), int(output_size))
h, w, _ = img.shape
th, tw = output_size
i = int(round((h - th) / 2.))
j = int(round((w - tw) / 2.))
return img[i:i+th, j:j+tw, :]
def resized_crop(img, i, j, h, w, size, interpolation=cv2.INTER_LINEAR):
assert _is_numpy_image(img), 'img should be OpenCV numpy Image'
img = crop(img, i, j, h, w)
img = resize(img, size, interpolation)
return img
def resize(img, size, interpolation=cv2.INTER_LINEAR):
if not _is_numpy_image(img):
raise TypeError(
'img should be OpenCV numpy Image. Got {}'.format(type(img)))
if not (isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)):
raise TypeError('Got inappropriate size arg: {}'.format(size))
if isinstance(size, int):
h, w, _ = img.shape
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return cv2.resize(img, (ow, oh), interpolation)
else:
oh = size
ow = int(size * w / h)
return cv2.resize(img, (ow, oh), interpolation)
else:
return cv2.resize(img, size[::-1], interpolation)
def hflip(img):
if not _is_numpy_image(img):
raise TypeError(
'img should be OpenCV numpy Image. Got {}'.format(type(img)))
if img.shape[2] == 1:
return cv2.flip(img, 1)[:, :, np.newaxis]
else:
return cv2.flip(img, 1)
|
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cafe.engine.http.client import AutoMarshallingHTTPClient
from cloudcafe.glance.models.image import (
Image, Images, ImageUpdate, Member, Members)
from cloudcafe.glance.models.task import Task, Tasks
from cloudcafe.glance.models.versions import Versions
class ImagesClient(AutoMarshallingHTTPClient):
"""@summary: Client for Images"""
def __init__(self, base_url, auth_token, serialize_format,
deserialize_format):
"""@summary: Constructs the Images API client"""
super(ImagesClient, self).__init__(serialize_format,
deserialize_format)
self.auth_token = auth_token
self.serialize_format = serialize_format
self.deserialize_format = deserialize_format
self.default_headers['X-Auth-Token'] = auth_token
content_type = 'application/{0}'.format(self.serialize_format)
accept = 'application/{0}'.format(self.deserialize_format)
self.default_headers['Content-Type'] = content_type
self.default_headers['Accept'] = accept
self.base_url = base_url
def list_images(self, params=None, requestslib_kwargs=None):
"""
@summary: List subset of images
@param params: Parameters to alter the returned list of images
@type params: Dictionary
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/images'.format(self.base_url)
return self.request('GET', url, params=params,
response_entity_type=Images,
requestslib_kwargs=requestslib_kwargs)
def get_image_details(self, image_id, requestslib_kwargs=None):
"""
@summary: Get the details of an image
@param image_id: Id of image to be returned
@type params: UUID
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/images/{1}'.format(self.base_url, image_id)
return self.request('GET', url, response_entity_type=Image,
requestslib_kwargs=requestslib_kwargs)
def register_image(self, auto_disk_config=None, checksum=None,
container_format=None, created_at=None,
disk_format=None, file_=None, id_=None, image_type=None,
min_disk=None, min_ram=None, name=None, os_type=None,
owner=None, protected=None, schema=None, self_=None,
size=None, status=None, tags=None, updated_at=None,
user_id=None, visibility=None,
additional_properties=None, requestslib_kwargs=None):
"""
@summary: Register an image - Not listed in the Images API docs
@param auto_disk_config: Auto disk config for the image being created
@type auto_disk_config: String
@param checksum: Checksum for the image being created
@type checksum: String
@param container_format: Container format for the image being created
@type container_format: String
@param created_at: Created at for the image being created
@type created_at: Datetime
@param disk_format: Disk format for the image being created
@type disk_format: String
@param file_: File location for the image being created
@type file_: String
@param id_: Id for the image being created
@type id_: UUID
@param image_type: Image type for the image being created
@type image_type: String
@param min_disk: Minimum disk for the image being created
@type min_disk: String
@param min_ram: Minimum ram for the image being created
@type min_ram: String
@param name: Name for the image being created
@type name: String
@param os_type: OS type for the image being created
@type os_type: String
@param owner: Owner for the image being created
@type owner: String
@param protected: Protected flag for the image being created
@type protected: Boolean
@param schema: Schema for the image being created
@type schema: String
@param self_: Self location for the image being created
@type self_: String
@param size: Size for the image being created
@type size: String
@param status: Status for the image being created
@type status: String
@param tags: Tags for the image being created
@type tags: Dictionary
@param updated_at: Updated at for the image being created
@type updated_at: Datetime
@param user_id: User id for the image being created
@type user_id: String
@param visibility: Visibility for the image being created
@type visibility: String
@param additional_properties: Additional properties for the image being
created
@type additional_properties: Dictionary
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/images'.format(self.base_url)
image = Image(auto_disk_config=auto_disk_config, checksum=checksum,
container_format=container_format, created_at=created_at,
disk_format=disk_format, file_=file_, id_=id_,
image_type=image_type, min_disk=min_disk,
min_ram=min_ram, name=name, os_type=os_type, owner=owner,
protected=protected, schema=schema, self_=self_,
size=size, status=status, tags=tags,
updated_at=updated_at, user_id=user_id,
visibility=visibility,
additional_properties=additional_properties)
return self.request('POST', url, request_entity=image,
response_entity_type=Image,
requestslib_kwargs=requestslib_kwargs)
def store_image_file(self, image_id, file_data, content_type=None,
requestslib_kwargs=None):
"""
@summary: Store an image file data - Not listed in the Images API docs
@param image_id: Id of image to store image file data to
@type image_id: UUID
@param file_data: File date to be stored to the image
@type file_data: Data
@param content_type: Content type of data to be stored to the image
@type content_type: String
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/images/{1}/file'.format(self.base_url, image_id)
content_type = content_type or 'application/octet-stream'
headers = {'Content-Type': content_type}
return self.request('PUT', url, headers=headers, data=file_data,
requestslib_kwargs=requestslib_kwargs)
def get_image_file(self, image_id, requestslib_kwargs=None):
"""
@summary: Get an image file data - Not listed in the Images API docs
@param image_id: Id of image to return image file data from
@type image_id: UUID
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/images/{1}/file'.format(self.base_url, image_id)
return self.request('GET', url, response_entity_type=Image,
requestslib_kwargs=requestslib_kwargs)
def update_image(self, image_id, replace=None, add=None, remove=None,
requestslib_kwargs=None):
"""
@summary: Update an image
@param image_id: Id of image to update
@type image_id: UUID
@param replace: Image operation to replace an attribute of an image
including the actual value to replace
@type replace: Dictionary
@param add: Image operation to add an attribute to an image including
the actual value to add
@type add: Dictionary
@param remove: Image operation to remove an attribute from an image
including the actual value to remove
@type remove: Dictionary
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/images/{1}'.format(self.base_url, image_id)
image_update = ImageUpdate(add, replace, remove)
headers = self.default_headers
headers['Content-Type'] = (
'application/openstack-images-v2.1-json-patch')
return self.request('PATCH', url, headers=headers,
request_entity=image_update,
response_entity_type=Image,
requestslib_kwargs=requestslib_kwargs)
def delete_image(self, image_id, requestslib_kwargs=None):
"""
@summary: Delete an image
@param image_id: Id of image to delete
@type image_id: UUID
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/images/{1}'.format(self.base_url, image_id)
return self.request('DELETE', url,
requestslib_kwargs=requestslib_kwargs)
def list_image_members(self, image_id, params=None,
requestslib_kwargs=None):
"""
@summary: List all image members
@param image_id: Id of image to list image members for
@type image_id: UUID
@param params: Parameters to alter the returned list of images
@type params: Dictionary
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/images/{1}/members'.format(self.base_url, image_id)
return self.request('GET', url, params=params,
response_entity_type=Members,
requestslib_kwargs=requestslib_kwargs)
def get_image_member(self, image_id, member_id, requestslib_kwargs=None):
"""
@summary: Get an image member of an image
@param image_id: Id of image to use to get image member id
@type image_id: UUID
@param member_id: Id of image member to return
@type member_id: String
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/images/{1}/members/{2}'.format(self.base_url, image_id,
member_id)
return self.request('GET', url, response_entity_type=Member,
requestslib_kwargs=requestslib_kwargs)
def create_image_member(self, image_id, member_id,
requestslib_kwargs=None):
"""
@summary: Create an image member
@param image_id: Id of image to add image member id to
@type image_id: UUID
@param member_id: Id of image member to add to the image
@type member_id: String
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/images/{1}/members'.format(self.base_url, image_id)
member = Member(member_id=member_id)
return self.request('POST', url, request_entity=member,
response_entity_type=Member,
requestslib_kwargs=requestslib_kwargs)
def delete_image_member(self, image_id, member_id,
requestslib_kwargs=None):
"""
@summary: Delete an image member
@param image_id: Id of image to delete image member id from
@type image_id: UUID
@param member_id: Id of image member to delete from the image
@type member_id: String
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/images/{1}/members/{2}'.format(self.base_url, image_id,
member_id)
return self.request('DELETE', url,
requestslib_kwargs=requestslib_kwargs)
def update_image_member(self, image_id, member_id, status,
requestslib_kwargs=None):
"""@summary: Update an image member
@param image_id: Id of image to update the image member id of
@type image_id: UUID
@param member_id: Id of image member to update from the image
@type member_id: String
@param status: Status to which the image member should be set
@type status: String
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/images/{1}/members/{2}'.format(self.base_url, image_id,
member_id)
member = Member(status=status)
return self.request('PUT', url, request_entity=member,
response_entity_type=Member,
requestslib_kwargs=requestslib_kwargs)
def add_image_tag(self, image_id, tag, requestslib_kwargs=None):
"""
@summary: Add an image tag
@param image_id: Id of image to add image tag to
@type image_id: UUID
@param tag: Image tag to add to the image
@type tag: String
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/images/{1}/tags/{2}'.format(self.base_url, image_id, tag)
return self.request('PUT', url, requestslib_kwargs=requestslib_kwargs)
def delete_image_tag(self, image_id, tag, requestslib_kwargs=None):
"""
@summary: Delete an image tag
@param image_id: Id of image to delete image tag from
@type image_id: UUID
@param tag: Image tag to delete from the image
@type tag: String
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/images/{1}/tags/{2}'.format(self.base_url, image_id, tag)
return self.request('DELETE', url,
requestslib_kwargs=requestslib_kwargs)
def list_tasks(self, requestslib_kwargs=None):
"""
@summary: List subset of tasks
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/tasks'.format(self.base_url)
return self.request('GET', url, response_entity_type=Tasks,
requestslib_kwargs=requestslib_kwargs)
def get_task_details(self, task_id, requestslib_kwargs=None):
"""
@summary: Get the details of a task
@param task_id: Id of the task being returned
@type task_id: UUID
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/tasks/{1}'.format(self.base_url, task_id)
return self.request('GET', url, response_entity_type=Task,
requestslib_kwargs=requestslib_kwargs)
def task_to_import_image(self, input_=None, type_=None,
requestslib_kwargs=None):
"""
@summary: Create a task to import an image
@param input_: Container for import input parameters containing
image properties and import from
@type input_: Dictionary
@param type_: Type of task
@type type_: String
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/tasks'.format(self.base_url)
task = Task(input_=input_, type_=type_)
return self.request('POST', url, request_entity=task,
response_entity_type=Task,
requestslib_kwargs=requestslib_kwargs)
def task_to_export_image(self, input_=None, type_=None,
requestslib_kwargs=None):
"""
@summary: Create a task to export an image
@param input_: Container for export input parameters containing
image uuid and receiving swift container
@type input_: Dictionary
@param type_: Type of task
@type type_: String
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/tasks'.format(self.base_url)
task = Task(input_=input_, type_=type_)
return self.request('POST', url, request_entity=task,
response_entity_type=Task,
requestslib_kwargs=requestslib_kwargs)
def delete_task(self, task_id, requestslib_kwargs=None):
"""
@summary: Delete a task - Not listed in the Images API docs
@param task_id: Id of the task being deleted
@type task_id: UUID
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/tasks/{1}'.format(self.base_url, task_id)
return self.request('DELETE', url, response_entity_type=Task,
requestslib_kwargs=requestslib_kwargs)
def get_images_schema(self, requestslib_kwargs=None):
"""
@summary: Get images json schema
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/{1}'.format(self.base_url, 'schemas/images')
return self.request('GET', url, requestslib_kwargs=requestslib_kwargs)
def get_image_schema(self, requestslib_kwargs=None):
"""
@summary: Get image json schema
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/{1}'.format(self.base_url, 'schemas/image')
return self.request('GET', url, requestslib_kwargs=requestslib_kwargs)
def get_image_members_schema(self, requestslib_kwargs=None):
"""
@summary: Get image members schema
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/{1}'.format(self.base_url, 'schemas/members')
return self.request('GET', url, requestslib_kwargs=requestslib_kwargs)
def get_image_member_schema(self, requestslib_kwargs=None):
"""
@summary: Get image member schema
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/{1}'.format(self.base_url, 'schemas/member')
return self.request('GET', url, requestslib_kwargs=requestslib_kwargs)
def get_tasks_schema(self, requestslib_kwargs=None):
"""
@summary: Get tasks schema
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/{1}'.format(self.base_url, 'schemas/tasks')
return self.request('GET', url, requestslib_kwargs=requestslib_kwargs)
def get_task_schema(self, requestslib_kwargs=None):
"""
@summary: Get task schema
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
url = '{0}/{1}'.format(self.base_url, 'schemas/task')
return self.request('GET', url, requestslib_kwargs=requestslib_kwargs)
def list_versions(self, url_addition=None, requestslib_kwargs=None):
"""
@summary: List all versions - Not listed in the Images API docs
@param url_addition: Additional text to be added to the end of the url
@type url_addition: String
@param requestslib_kwargs: Keyword arguments to be passed on to
python requests
@type requestslib_kwargs: Dictionary
@return: Response
@rtype: Object
"""
endpoint = self.base_url.replace('/v2', '')
url = endpoint
if url_addition:
url = '{0}{1}'.format(endpoint, url_addition)
return self.request('GET', url, response_entity_type=Versions,
requestslib_kwargs=requestslib_kwargs)
|
# -*- python -*-
load(
"@drake//tools/install:install.bzl",
"install",
)
licenses(["by_exception_only"]) # Gurobi
# This rule is only built if a glob() call fails.
genrule(
name = "error-message",
outs = ["error-message.h"],
cmd = "echo 'error: Gurobi 9.0.2 is not installed at {gurobi_home}' && false", # noqa
visibility = ["//visibility:private"],
)
GUROBI_C_HDRS = glob([
"gurobi-distro/include/gurobi_c.h",
]) or [":error-message.h"]
GUROBI_CXX_HDRS = glob([
"gurobi-distro/include/gurobi_c.h",
"gurobi-distro/include/gurobi_c++.h",
]) or [":error-message.h"]
cc_library(
name = "gurobi_c",
hdrs = GUROBI_C_HDRS,
includes = ["gurobi-distro/include"],
linkopts = [
"-L{gurobi_home}/lib",
"-lgurobi90",
"-Wl,-rpath,{gurobi_home}/lib",
],
visibility = ["//visibility:public"],
)
cc_library(
name = "gurobi_cxx",
hdrs = GUROBI_CXX_HDRS,
includes = ["gurobi-distro/include"],
linkopts = [
"-L{gurobi_home}/lib",
"-lgurobi90",
"-lgurobi_stdc++",
"-Wl,-rpath,{gurobi_home}/lib",
],
visibility = ["//visibility:public"],
)
# For macOS, the Drake install step does not need any additional actions to
# install Gurobi, since Gurobi was already installed system-wide in /Library.
install(
name = "install",
visibility = ["//visibility:public"],
)
|
import socket
import sys
from bs4 import BeautifulSoup
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('10.181.1.242', 50002)
client_socket.connect(server_address)
request_header = 'GET '
request_header2= ' HTTP/1.0\r\nHost: 10.181.1.242\r\n\r\n'
#client_socket.send(request_header)
try:
while True:
response = ''
isi_file = ''
command=raw_input('request : ')
request_header_fix=request_header+command+request_header2
client_socket.send(request_header_fix)
recv = client_socket.recv(1024)
# print recv
panjang_isi=int(recv.split('Content-Length:')[1].split('\r\n\r\n')[0])
is_download=recv.split('Content-Type:')[1].split(';')[0].strip()
if is_download=='application/octet-stream':
nama_file=recv.split('filename:')[1].split('\r')[0]
# print nama_file
isi_file=recv.split('\r\n\r\n',1)[1]
# print isi_file
panjang_data=len(isi_file)
response += recv
while panjang_data < panjang_isi :
recv = client_socket.recv(1024)
panjang_data+= len(recv)
response += recv
isi_file += recv
if is_download=='application/octet-stream':
f=open(nama_file,'wb')
f.write(isi_file)
f.close()
else:
soup = BeautifulSoup(response)
print soup.get_text()
# print response
#/dataset/contoh.html
if not recv:
break
#
client_socket.close()
except KeyboardInterrupt:
client_socket.close()
sys.exit(0) |
# Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import os
import pathlib
from unittest import mock
from click.testing import Result
from functional.run import util
from functional.run.test_run_sql_queries import VDK_DB_DEFAULT_TYPE
from vdk.api.plugin.hook_markers import hookimpl
from vdk.internal.builtin_plugins.run.job_context import JobContext
from vdk.plugin.test_utils.util_funcs import cli_assert_equal
from vdk.plugin.test_utils.util_funcs import CliEntryBasedTestRunner
from vdk.plugin.test_utils.util_funcs import get_test_job_path
from vdk.plugin.test_utils.util_plugins import DB_TYPE_SQLITE_MEMORY
from vdk.plugin.test_utils.util_plugins import SqLite3MemoryDbPlugin
class AppendTemplatePlugin:
@hookimpl
def initialize_job(self, context: JobContext) -> None:
context.templates.add_template(
"append",
pathlib.Path(
get_test_job_path(
pathlib.Path(os.path.dirname(os.path.abspath(__file__))),
"template-append",
)
),
)
@mock.patch.dict(os.environ, {VDK_DB_DEFAULT_TYPE: DB_TYPE_SQLITE_MEMORY})
def test_run_job_with_template():
db_plugin = SqLite3MemoryDbPlugin()
runner = CliEntryBasedTestRunner(db_plugin, AppendTemplatePlugin())
result: Result = runner.invoke(["run", util.job_path("job-using-templates")])
cli_assert_equal(0, result)
assert db_plugin.db.execute_query("select count(1) from dim_vm")[0][0] == 5
|
Status = {"health": 100, "power": 99, "mana": 77, "armor": 66, "name": "Ethan"}
print("health:", Status["health"])
print("power:", Status["power"])
print("Mana:", Status["mana"])
print("armor:", Status["armor"])
print("name:", Status["name"])
|
import csv
with open('new_vgsale.csv', newline='') as csvfile:
# 讀取 CSV 檔案內容
row1 = csv.reader(csvfile)
rows = csv.DictReader(csvfile)
# 以迴圈輸出每一列
# for row in rows:
# print(row['platform_n'], ".", row['Platform'])
with open('new_vgsale2.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['num', 'Rank', 'Name', 'Platform', 'Year', 'Genre', 'Publisher', 'NA_Sales', 'EU_Sales',
'JP_Sales', 'Other_Sales', 'Global_Sales', 'genre_n', 'publisher_n', 'platform_n', 'adjPlatform', 'adjGenre'])
for row in rows:
writer.writerow(
[row['num'], row['Rank'], row['Name'], row['Platform'], row['Year'], row['Genre'], row['Publisher'], row['NA_Sales'], row['EU_Sales'], row['JP_Sales'], row['Other_Sales'], row['Global_Sales'], row['genre_n'], row['publisher_n'], row['platform_n'], row['platform_n'] + "."+row['Platform'], row['genre_n'] + "."+row['Genre']])
|
'''
For the table results aggregate results across context sizes
But then generate a plot for each of the dataset size that shows
performance for these tasks does not depend much on the context size
'''
import numpy as np
import joblib
from collections import defaultdict
import os.path as osp
context_sizes = [1,2,3,4]
# amounts = [(64,1), (64,20), (16,20), (4,20)]
amounts = [(64,1)]
sa_irl_results = {
'name': 'Meta-IRL (state-action)',
(64,1): [
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-64-demos-sub-1-state-action-rew-search-normalized-fixed/hc_rand_vel_np_airl_64_demos_sub_1_state_action_rew_search_normalized_fixed_2019_04_18_17_20_04_0000--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-64-demos-sub-1-state-action-rew-search-normalized-fixed/hc_rand_vel_np_airl_64_demos_sub_1_state_action_rew_search_normalized_fixed_2019_04_18_17_22_04_0001--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-64-demos-sub-1-state-action-rew-search-normalized-fixed/hc_rand_vel_np_airl_64_demos_sub_1_state_action_rew_search_normalized_fixed_2019_04_18_17_26_05_0002--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-64-demos-sub-1-state-action-rew-search-normalized-fixed/hc_rand_vel_np_airl_64_demos_sub_1_state_action_rew_search_normalized_fixed_2019_04_18_17_26_35_0003--s-0',
],
(64,20): [
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-64-demos-rew-search-with-saving-more-rew-search/hc_rand_vel_np_airl_64_demos_rew_search_with_saving_more_rew_search_2019_04_14_22_27_53_0001--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-64-demos-rew-search-with-saving-more-rew-search/hc_rand_vel_np_airl_64_demos_rew_search_with_saving_more_rew_search_2019_04_14_22_27_54_0004--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-64-demos-rew-search-with-saving-more-rew-search/hc_rand_vel_np_airl_64_demos_rew_search_with_saving_more_rew_search_2019_04_14_22_27_54_0007--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-64-demos-rew-search-with-saving-more-rew-search/hc_rand_vel_np_airl_64_demos_rew_search_with_saving_more_rew_search_2019_04_14_22_27_53_0010--s-0',
],
(16,20): [
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-16-demos-rew-search-with-saving-more-rew-search/hc_rand_vel_np_airl_16_demos_rew_search_with_saving_more_rew_search_2019_04_15_16_03_04_0001--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-16-demos-rew-search-with-saving-more-rew-search/hc_rand_vel_np_airl_16_demos_rew_search_with_saving_more_rew_search_2019_04_15_16_06_43_0004--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-16-demos-rew-search-with-saving-more-rew-search/hc_rand_vel_np_airl_16_demos_rew_search_with_saving_more_rew_search_2019_04_15_16_33_52_0007--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-16-demos-rew-search-with-saving-more-rew-search/hc_rand_vel_np_airl_16_demos_rew_search_with_saving_more_rew_search_2019_04_15_16_57_06_0010--s-0',
],
(4,20): [
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-4-demos-rew-search-with-saving-more-rew-search/hc_rand_vel_np_airl_4_demos_rew_search_with_saving_more_rew_search_2019_04_20_13_22_53_0000--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-4-demos-rew-search-with-saving-more-rew-search/hc_rand_vel_np_airl_4_demos_rew_search_with_saving_more_rew_search_2019_04_20_13_22_53_0001--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-4-demos-rew-search-with-saving-more-rew-search/hc_rand_vel_np_airl_4_demos_rew_search_with_saving_more_rew_search_2019_04_20_13_22_54_0002--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-4-demos-rew-search-with-saving-more-rew-search/hc_rand_vel_np_airl_4_demos_rew_search_with_saving_more_rew_search_2019_04_20_13_22_54_0003--s-0',
]
}
s_irl_results = {
'name': 'Meta-IRL (state-only)',
(64,1): [
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-64-demos-sub-1-state-only-rew-search-normalized-correct/hc_rand_vel_np_airl_64_demos_sub_1_state_only_rew_search_normalized_correct_2019_04_20_00_00_12_0001--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-64-demos-sub-1-state-only-rew-search-normalized-correct/hc_rand_vel_np_airl_64_demos_sub_1_state_only_rew_search_normalized_correct_2019_04_20_00_00_12_0002--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-64-demos-sub-1-state-only-rew-search-normalized-correct/hc_rand_vel_np_airl_64_demos_sub_1_state_only_rew_search_normalized_correct_2019_04_20_00_00_13_0000--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-64-demos-sub-1-state-only-rew-search-normalized-correct/hc_rand_vel_np_airl_64_demos_sub_1_state_only_rew_search_normalized_correct_2019_04_20_00_01_42_0003--s-0',
],
(64,20): [
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-64-demos-sub-20-state-only-rew-search-normalized-correct/hc_rand_vel_np_airl_64_demos_sub_20_state_only_rew_search_normalized_correct_2019_04_20_00_02_42_0000--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-64-demos-sub-20-state-only-rew-search-normalized-correct/hc_rand_vel_np_airl_64_demos_sub_20_state_only_rew_search_normalized_correct_2019_04_20_00_22_13_0001--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-64-demos-sub-20-state-only-rew-search-normalized-correct/hc_rand_vel_np_airl_64_demos_sub_20_state_only_rew_search_normalized_correct_2019_04_20_00_25_13_0002--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-64-demos-sub-20-state-only-rew-search-normalized-correct/hc_rand_vel_np_airl_64_demos_sub_20_state_only_rew_search_normalized_correct_2019_04_20_00_26_13_0003--s-0',
],
(16,20): [
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-16-demos-sub-20-state-only-rew-search-normalized-correct/hc_rand_vel_np_airl_16_demos_sub_20_state_only_rew_search_normalized_correct_2019_04_20_00_26_13_0000--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-16-demos-sub-20-state-only-rew-search-normalized-correct/hc_rand_vel_np_airl_16_demos_sub_20_state_only_rew_search_normalized_correct_2019_04_20_00_35_13_0001--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-16-demos-sub-20-state-only-rew-search-normalized-correct/hc_rand_vel_np_airl_16_demos_sub_20_state_only_rew_search_normalized_correct_2019_04_20_00_46_43_0002--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-16-demos-sub-20-state-only-rew-search-normalized-correct/hc_rand_vel_np_airl_16_demos_sub_20_state_only_rew_search_normalized_correct_2019_04_20_00_47_43_0003--s-0',
],
(4,20): [
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-4-demos-sub-20-state-only-rew-search-normalized-correct/hc_rand_vel_np_airl_4_demos_sub_20_state_only_rew_search_normalized_correct_2019_04_20_13_21_24_0000--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-4-demos-sub-20-state-only-rew-search-normalized-correct/hc_rand_vel_np_airl_4_demos_sub_20_state_only_rew_search_normalized_correct_2019_04_20_13_21_25_0001--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-4-demos-sub-20-state-only-rew-search-normalized-correct/hc_rand_vel_np_airl_4_demos_sub_20_state_only_rew_search_normalized_correct_2019_04_20_13_21_25_0002--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-np-airl-4-demos-sub-20-state-only-rew-search-normalized-correct/hc_rand_vel_np_airl_4_demos_sub_20_state_only_rew_search_normalized_correct_2019_04_20_13_21_26_0003--s-0',
]
}
# MLE version
# bc_results = {
# 'name': 'Meta-BC',
# (64,1): [
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-64-demos-sub-1-no-norm-with-saving/hc_rand_vel_64_demos_sub_1_no_norm_with_saving_2019_04_19_21_36_41_0000--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-64-demos-sub-1-no-norm-with-saving/hc_rand_vel_64_demos_sub_1_no_norm_with_saving_2019_04_19_21_36_41_0001--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-64-demos-sub-1-no-norm-with-saving/hc_rand_vel_64_demos_sub_1_no_norm_with_saving_2019_04_19_21_36_41_0002--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-64-demos-sub-1-no-norm-with-saving/hc_rand_vel_64_demos_sub_1_no_norm_with_saving_2019_04_19_21_36_41_0003--s-0',
# ],
# (64,20): [
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-64-demos-sub-20-no-norm-with-saving/hc_rand_vel_64_demos_sub_20_no_norm_with_saving_2019_04_19_21_41_10_0002--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-64-demos-sub-20-no-norm-with-saving/hc_rand_vel_64_demos_sub_20_no_norm_with_saving_2019_04_19_21_41_11_0000--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-64-demos-sub-20-no-norm-with-saving/hc_rand_vel_64_demos_sub_20_no_norm_with_saving_2019_04_19_21_41_11_0001--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-64-demos-sub-20-no-norm-with-saving/hc_rand_vel_64_demos_sub_20_no_norm_with_saving_2019_04_19_21_41_11_0003--s-0',
# ],
# (16,20): [
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-16-demos-sub-20-no-norm-with-saving/hc_rand_vel_16_demos_sub_20_no_norm_with_saving_2019_04_19_21_38_40_0003--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-16-demos-sub-20-no-norm-with-saving/hc_rand_vel_16_demos_sub_20_no_norm_with_saving_2019_04_19_21_38_41_0000--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-16-demos-sub-20-no-norm-with-saving/hc_rand_vel_16_demos_sub_20_no_norm_with_saving_2019_04_19_21_38_41_0001--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-16-demos-sub-20-no-norm-with-saving/hc_rand_vel_16_demos_sub_20_no_norm_with_saving_2019_04_19_21_38_41_0002--s-0',
# ],
# (4,20): [
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-4-demos-sub-20-no-norm-with-saving/hc_rand_vel_4_demos_sub_20_no_norm_with_saving_2019_04_19_22_22_41_0001--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-4-demos-sub-20-no-norm-with-saving/hc_rand_vel_4_demos_sub_20_no_norm_with_saving_2019_04_19_22_22_41_0002--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-4-demos-sub-20-no-norm-with-saving/hc_rand_vel_4_demos_sub_20_no_norm_with_saving_2019_04_19_22_22_41_0003--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-4-demos-sub-20-no-norm-with-saving/hc_rand_vel_4_demos_sub_20_no_norm_with_saving_2019_04_19_22_22_43_0000--s-0',
# ]
# }
# MSE version
bc_results = {
'name': 'Meta-BC',
(64,1): [
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-mse-64-demos-sub-1-paper-version/hc_rand_vel_mse_64_demos_sub_1_paper_version_2019_05_25_15_00_00_0000--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-mse-64-demos-sub-1-paper-version/hc_rand_vel_mse_64_demos_sub_1_paper_version_2019_05_25_15_00_01_0001--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-mse-64-demos-sub-1-paper-version/hc_rand_vel_mse_64_demos_sub_1_paper_version_2019_05_25_15_00_01_0002--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-mse-64-demos-sub-1-paper-version/hc_rand_vel_mse_64_demos_sub_1_paper_version_2019_05_25_15_00_02_0003--s-0',
],
(64,20): [
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-mse-64-demos-sub-20-paper-version/hc_rand_vel_mse_64_demos_sub_20_paper_version_2019_05_25_14_59_10_0000--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-mse-64-demos-sub-20-paper-version/hc_rand_vel_mse_64_demos_sub_20_paper_version_2019_05_25_14_59_12_0001--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-mse-64-demos-sub-20-paper-version/hc_rand_vel_mse_64_demos_sub_20_paper_version_2019_05_25_14_59_12_0002--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-mse-64-demos-sub-20-paper-version/hc_rand_vel_mse_64_demos_sub_20_paper_version_2019_05_25_14_59_13_0003--s-0',
],
(16,20): [
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-mse-16-demos-sub-20-paper-version/hc_rand_vel_mse_16_demos_sub_20_paper_version_2019_05_25_14_58_30_0000--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-mse-16-demos-sub-20-paper-version/hc_rand_vel_mse_16_demos_sub_20_paper_version_2019_05_25_14_58_31_0001--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-mse-16-demos-sub-20-paper-version/hc_rand_vel_mse_16_demos_sub_20_paper_version_2019_05_25_14_58_31_0002--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-mse-16-demos-sub-20-paper-version/hc_rand_vel_mse_16_demos_sub_20_paper_version_2019_05_25_14_58_33_0003--s-0',
],
(4,20): [
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-mse-4-demos-sub-20-paper-version/hc_rand_vel_mse_4_demos_sub_20_paper_version_2019_05_25_14_57_46_0000--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-mse-4-demos-sub-20-paper-version/hc_rand_vel_mse_4_demos_sub_20_paper_version_2019_05_25_14_57_46_0001--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-mse-4-demos-sub-20-paper-version/hc_rand_vel_mse_4_demos_sub_20_paper_version_2019_05_25_14_57_47_0002--s-0',
'/scratch/hdd001/home/kamyar/output/hc-rand-vel-mse-4-demos-sub-20-paper-version/hc_rand_vel_mse_4_demos_sub_20_paper_version_2019_05_25_14_57_47_0003--s-0',
]
}
# Dagger MSE version
dagger_results = {
'name': 'Meta-Dagger',
(64,1): [
'/scratch/hdd001/home/kamyar/output/correct-hc-rand-vel-meta-dagger-use-z-sample-det-expert-MSE-64-demos-sub-1-100-updates-per-call/correct_hc_rand_vel_meta_dagger_use_z_sample_det_expert_MSE_64_demos_sub_1_100_updates_per_call_2019_07_28_16_29_06_0000--s-0',
'/scratch/hdd001/home/kamyar/output/correct-hc-rand-vel-meta-dagger-use-z-sample-det-expert-MSE-64-demos-sub-1-100-updates-per-call/correct_hc_rand_vel_meta_dagger_use_z_sample_det_expert_MSE_64_demos_sub_1_100_updates_per_call_2019_07_28_16_29_07_0001--s-0',
'/scratch/hdd001/home/kamyar/output/correct-hc-rand-vel-meta-dagger-use-z-sample-det-expert-MSE-64-demos-sub-1-100-updates-per-call/correct_hc_rand_vel_meta_dagger_use_z_sample_det_expert_MSE_64_demos_sub_1_100_updates_per_call_2019_07_28_16_29_09_0002--s-0',
]
}
# MLE version
# bc_results = {
# 'name': 'Meta-BC',
# (64,1): [
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-MLE-64-demos-sub-1-paper-version/hc_rand_vel_MLE_64_demos_sub_1_paper_version_2019_05_25_16_47_11_0000--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-MLE-64-demos-sub-1-paper-version/hc_rand_vel_MLE_64_demos_sub_1_paper_version_2019_05_25_16_47_11_0001--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-MLE-64-demos-sub-1-paper-version/hc_rand_vel_MLE_64_demos_sub_1_paper_version_2019_05_25_16_47_11_0002--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-MLE-64-demos-sub-1-paper-version/hc_rand_vel_MLE_64_demos_sub_1_paper_version_2019_05_25_16_47_12_0003--s-0',
# ],
# (64,20): [
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-MLE-64-demos-sub-20-paper-version/hc_rand_vel_MLE_64_demos_sub_20_paper_version_2019_05_25_16_47_49_0000--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-MLE-64-demos-sub-20-paper-version/hc_rand_vel_MLE_64_demos_sub_20_paper_version_2019_05_25_16_47_50_0001--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-MLE-64-demos-sub-20-paper-version/hc_rand_vel_MLE_64_demos_sub_20_paper_version_2019_05_25_16_47_50_0002--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-MLE-64-demos-sub-20-paper-version/hc_rand_vel_MLE_64_demos_sub_20_paper_version_2019_05_25_16_47_51_0003--s-0',
# ],
# (16,20): [
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-MLE-16-demos-sub-20-paper-version/hc_rand_vel_MLE_16_demos_sub_20_paper_version_2019_05_25_16_48_27_0000--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-MLE-16-demos-sub-20-paper-version/hc_rand_vel_MLE_16_demos_sub_20_paper_version_2019_05_25_16_48_28_0001--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-MLE-16-demos-sub-20-paper-version/hc_rand_vel_MLE_16_demos_sub_20_paper_version_2019_05_25_16_48_28_0002--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-MLE-16-demos-sub-20-paper-version/hc_rand_vel_MLE_16_demos_sub_20_paper_version_2019_05_25_16_48_29_0003--s-0',
# ],
# (4,20): [
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-MLE-4-demos-sub-20-paper-version/hc_rand_vel_MLE_4_demos_sub_20_paper_version_2019_05_25_16_48_59_0000--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-MLE-4-demos-sub-20-paper-version/hc_rand_vel_MLE_4_demos_sub_20_paper_version_2019_05_25_16_49_00_0001--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-MLE-4-demos-sub-20-paper-version/hc_rand_vel_MLE_4_demos_sub_20_paper_version_2019_05_25_16_49_00_0002--s-0',
# '/scratch/hdd001/home/kamyar/output/hc-rand-vel-MLE-4-demos-sub-20-paper-version/hc_rand_vel_MLE_4_demos_sub_20_paper_version_2019_05_25_16_49_01_0003--s-0',
# ]
# }
# for method in [sa_irl_results, s_irl_results, bc_results]:
# print('\n{}'.format(method['name']))
# for amount in [(64,1), (64,20), (16,20), (4,20)]:
# d = joblib.load()
def gather_run_costs_for_context_size(d, c_size):
l = []
for task_d in d.values():
l.extend(task_d[c_size]['run_costs'])
return l
# gather the results
def gather_results(method_paths):
new_dict = {}
for data_amount in amounts:
print('\t{}'.format(data_amount))
amount_dict = defaultdict(list)
for path in method_paths[data_amount]:
print('\t\tGathering: {}'.format(path))
try:
d = joblib.load(osp.join(path, 'all_eval_stats.pkl'))['all_eval_stats'][0]
except:
print('FAILED {}'.format(path))
continue
for c in context_sizes:
l = gather_run_costs_for_context_size(d, c)
amount_dict[c].extend(l)
new_dict[data_amount] = amount_dict
return new_dict
# # IF YOU WANT TO REGATHER RESULTS RUN THIS
# save_dict = {}
# # for method in [sa_irl_results, s_irl_results, bc_results]:
# # for method in [bc_results]:
# for method in [dagger_results]:
# print(method['name'])
# save_dict[method['name']] = gather_results(method)
# joblib.dump(save_dict, 'hc_rand_vel_save_dict_with_mse_dagger.pkl', compress=3)
# ELSE
# save_dict = joblib.load('hc_rand_vel_save_dict.pkl')
save_dict = joblib.load('hc_rand_vel_save_dict_with_mse_dagger.pkl')
print(save_dict.keys())
# save_dict.update(joblib.load('hc_rand_vel_save_dict_mse_bc.pkl'))
# save_dict = joblib.load('hc_rand_vel_save_dict_with_mse_bc.pkl')
for name, method_d in save_dict.items():
print('\n')
print(name)
for amount, amount_d in method_d.items():
print('\t{}'.format(amount))
all_run_costs = []
for context_size, costs in amount_d.items():
all_run_costs.extend(costs)
print('\t\tDelta: %.3f +/- %.3f' % (np.mean(all_run_costs)/1000, np.std(all_run_costs)/1000))
# plot some things
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
data_amount_to_plot = (64,1)
context_means = defaultdict(list)
context_stds = defaultdict(list)
for name, method_d in save_dict.items():
# print(name)
# print(method_d)
for context_size in context_sizes:
context_means[name].append(
np.mean(
method_d[data_amount_to_plot][context_size]
) / 1000.0
)
context_stds[name].append(
np.std(
method_d[data_amount_to_plot][context_size]
) / 1000.0
)
# print(context_means)
# print(context_stds)
fig, ax = plt.subplots(1)
ax.set_xlabel('Number of Context Trajectories')
ax.set_ylabel('Delta from Target Velocity')
# ax.set_ylim([0.0, 1.6])
ax.set_ylim([0.0, 0.75])
print(context_means.keys())
# print(len(context_means['bc']))
# ax.errorbar(
# np.array(list(range(1,5))), context_means['Meta-BC'], context_stds['Meta-BC'],
# elinewidth=2.0, capsize=4.0, barsabove=True, linewidth=2.0, label='Meta-BC'
# )
# ax.errorbar(
# np.array(list(range(1,5))) + 0.03, context_means['Meta-IRL (state-action)'], context_stds['Meta-IRL (state-action)'],
# elinewidth=2.0, capsize=4.0, barsabove=True, linewidth=2.0, label='Meta-IRL (state-action)'
# )
# ax.errorbar(
# np.array(list(range(1,5))) + 0.06, context_means['Meta-IRL (state-action)'], context_stds['Meta-IRL (state-action)'],
# elinewidth=2.0, capsize=4.0, barsabove=True, linewidth=2.0, label='Meta-IRL (state-only)'
# )
ax.errorbar(
np.array(list(range(1,5))) + 0.06, context_means['Meta-Dagger'], context_stds['Meta-Dagger'],
elinewidth=2.0, capsize=4.0, barsabove=True, linewidth=2.0, label='Meta-Dagger'
)
# lgd = ax.legend(loc='upper center', bbox_to_anchor=(0.725, 0.1), shadow=False, ncol=3)
lgd = ax.legend(loc='upper right', shadow=False, ncol=1)
# plt.savefig('hc_context_size_plot.png', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig('hc_rand_vel_dagger_context_size_plot.png', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
|
from odoo import models, fields, api
from lxml import etree
from odoo.tools.safe_eval import safe_eval
from odoo.osv.orm import setup_modifiers
class HrEmployee(models.Model):
_inherit = "hr.employee"
state = fields.Selection([
('unverified', 'Unverified'),
('verified', 'Verified'),
], readonly=True, copy=False, index=True, track_visibility='onchange', string="State", default='unverified')
def verification(self):
for rec in self:
rec.state = 'verified'
def make_unverified(self):
for rec in self:
rec.state = 'unverified'
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
result = super(HrEmployee, self).fields_view_get(view_id, view_type, toolbar=toolbar, submenu=submenu)
if view_type=="form":
doc = etree.XML(result['arch'])
for node in doc.iter(tag="field"):
if 'readonly' in node.attrib.get("modifiers",''):
attrs = node.attrib.get("attrs",'')
if 'readonly' in attrs:
attrs_dict = safe_eval(node.get('attrs'))
r_list = attrs_dict.get('readonly',)
if type(r_list)==list:
r_list.insert(0,('state','=','verified'))
if len(r_list) == 2:
r_list.insert(0,'|')
if len(r_list) > 2:
r_list.insert(2,'|')
attrs_dict.update({'readonly':r_list})
node.set('attrs', str(attrs_dict))
setup_modifiers(node, result['fields'][node.get("name")])
continue
else:
continue
node.set('attrs', "{'readonly':[('state','=','verified')]}")
setup_modifiers(node, result['fields'][node.get("name")])
result['arch'] = etree.tostring(doc)
return result |
import requests
import json
from lxml import etree
url="https://s.weibo.com/top/summary?Refer=top_hot&topnav=1&wvr=6"
header={'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36'}
def main():
html=etree.HTML(requests.get(url,headers=header).text)
rank=html.xpath('//td[@class="td-01 ranktop"]/text()')
affair=html.xpath('//td[@class="td-02"]/a/text()')
view = html.xpath('//td[@class="td-02"]/span/text()')
# top=affair[0]
# affair=affair[1:11]
# print('{0:<10}\t{1:<40}'.format("top",top))
# for i in range(0, len(affair)):
# print("{0:<10}\t{1:{3}<30}\t{2:{3}>20}".format(rank[i],affair[i],view[i],chr(12288)))
filename = 'hots.json'
with open(filename, 'w', encoding='utf-8') as file_obj:
json.dump(affair, file_obj, ensure_ascii=False)
if __name__ == "__main__":
main() |
class Node:
def __init__(self):
self.property = None
self.salary = None
self.Tax = None
# Nr of nodes & edges, max nr of turns / player, starting node of Alob and Bice
N,M,K,sa,sb = list(map(int, input().split()))
edges = []
for i in range(M):
u, v = list(map(int, input().split()))
edges.append([u,v])
for i in range(N):
# Salary , tax or property
pass
|
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency
)
author = 'Fan Yuting & Liu Hang'
doc = """
Fishery app, etc.
"""
class Constants(BaseConstants):
name_in_url = 'login'
players_per_group = None
num_rounds = 1
# Views
instructions_template = 'login/Instructions.html'
class Subsession(BaseSubsession):
def creating_session(self):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer):
# Name may be duplicated, use student id as the key
user_name = models.CharField()
student_id = models.CharField()
|
import numpy as np
import pickle as pkl
import scipy.sparse as sp
import sys
import re
import datetime
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def load_corpus(dataset_str, neg_loss):
"""
Loads input corpus from data directory
ind.dataset_str.y => the one-hot labels of the labeled training docs as numpy.ndarray object;
ind.dataset_str.ty => the one-hot labels of the test docs as numpy.ndarray object;
ind.dataset_str.ally => the labels for training docs and words as numpy.ndarray object;
ind.dataset_str.adj => adjacency matrix of word/doc nodes as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.train.index => the indices of training docs in original doc list.
All objects above must be saved using python pickle module.
:param dataset_str: Dataset name
:return: All data input files loaded (as well the training/test data).
"""
names = ['y', 'ty', 'ally', 'adj']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
y, ty, ally, adj = tuple(objects)
# print(y.shape, ty.shape, ally.shape)
labels = np.vstack((ally, ty))
# print(labels.shape)
train_idx_orig = parse_index_file(
"data/{}.train.index".format(dataset_str))
train_size = len(train_idx_orig)
val_size = int(0.1 * train_size)
test_size = ty.shape[0]
idx_train = range(len(y))
idx_val = range(len(y), len(y) + val_size)
idx_test = range(ally.shape[0], ally.shape[0] + test_size)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
if neg_loss:
adj_pos_i = list()
adj_pos_j = list()
with open('data/' + dataset_str + '_adj_pos.txt', 'r') as f:
lines = f.readlines()
for line in lines:
t = line.strip().split(',')
adj_pos_i.append([int(t[0])])
adj_pos_j.append([int(t[1])])
adj_neg_i = list()
adj_neg_j = list()
with open('data/' + dataset_str + '_adj_neg.txt', 'r') as f:
lines = f.readlines()
for line in lines:
t = line.strip().split(',')
adj_neg_i.append([int(t[0])])
adj_neg_j.append([int(t[1])])
else:
adj_pos_i = None
adj_pos_j = None
adj_neg_i = None
adj_neg_j = None
return adj, y_train, y_val, y_test, train_mask, val_mask, test_mask, train_size, test_size, adj_pos_i, adj_pos_j, adj_neg_i, adj_neg_j
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
# return sparse_to_tuple(features)
return features.A
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
# return sparse_to_tuple(adj_normalized)
return adj_normalized
def print_log(msg='', end='\n'):
now = datetime.datetime.now()
t = str(now.year) + '/' + str(now.month) + '/' + str(now.day) + ' ' \
+ str(now.hour).zfill(2) + ':' + str(now.minute).zfill(2) + ':' + str(now.second).zfill(2)
if isinstance(msg, str):
lines = msg.split('\n')
else:
lines = [msg]
for line in lines:
if line == lines[-1]:
print('[' + t + '] ' + str(line), end=end)
else:
print('[' + t + '] ' + str(line))
|
from django.urls import path
from . import views
app_name = 'main'
urlpatterns = [
path('sys',views.get_sys_info,name='sys'),
path('cpu_percent',views.get_cpu_percent, name='cpu_percent'),
path('cpu_count',views.get_cpu_count, name='cpu_count'),
path('cpu_times',views.get_cpu_times, name='cpu_times'),
path('mem_usage',views.get_mem_usage, name='cpu_usage'),
path('disk_usage',views.get_disk_usage, name='disk_usage'),
path('disk_count',views.get_disk_count, name='disk_count'),
path('net_speed',views.get_net_speed,name='net_speed'),
path('net_info',views.get_net_info,name='net_info'),
path('process',views.get_process_list,name='process'),
]
|
# r = 0
# for n in range(1, 1001):
# a = 3
# b = 5
# if n % a == 0 or n % b == 0:
# r += n
# print(r)
index = 0
count = 0
for i in range(1, 1001):
if index % 3 == 0 or index % 5 == 0:
count += index
index += 1
print(count) |
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import face_recognition
def get_embedding(img):
img = img[:, :, :3].astype('uint8')
encodings = face_recognition.face_encodings(img)
if len(encodings) == 1:
return encodings[0]
else:
return None |
class WithdrawalError(Exception):
pass
class DepositError(Exception):
pass
class SavingsAccount:
def __init__(self,b=500):
try:
if b<500:
raise DepositError
else:
self.bal=b
print('Account created with balance:',self.bal,'\n')
except DepositError:
print('Minimum amount to be deposited to create an account is 500.Please deposit Rs.500.\n')
del(self)
def deposit(self,a):
self.bal+=a
print('Rs.',a,'has been deposited. Your available balance:',self.bal,'\n')
def withdraw(self,x):
try:
if x>self.bal:
raise WithdrawalError
else:
self.bal-=x
print('Rs.', x, 'has been withdrawn. Your available balance:', self.bal, '\n')
except WithdrawalError:
print('You cannot withdraw more than the account balance. Available balance:',self.bal)
u=SavingsAccount(400)
u=SavingsAccount(1000)
u.deposit(10000)
u.withdraw(200000)
u.withdraw(2000)
|
"""
Exercise 1: Vigenère cyper
"""
letters = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
Ki = lambda c, k: letters.index(k[c % len(k)])
def viginere_cipher_encrypt(D, key):
"""Encrypt: Ei = (Pi + Ki) % 26"""
Pi = lambda i: letters.index(i)
Ei = lambda a, b: letters[(a + b) % 26]
E = [Ei(Pi(i), Ki(c, key)) for c, i in enumerate(D)]
return "".join(E)
def viginere_cipher_decode(E, key):
"""Decrypt: Di = (Ei - Ki + 26) % 26"""
Di = lambda c, i: (letters.index(i) - Ki(c, key) + 26) % 26
D = [letters[Di(c, i)] for c, i in enumerate(E)]
return "".join(D)
def viginiere_test():
key = "LEMON"
plein = "ATTACKATDAWN"
encrypted = "LXFOPVEFRNHR"
assert viginere_cipher_encrypt(plein, key) == encrypted
assert viginere_cipher_decode(encrypted, key) == plein
if __name__ == '__main__':
viginiere_test()
cipher_text = "HIMEZYZIPK"
key = "GOOGLE"
D = viginere_cipher_decode(cipher_text, key)
print(D)
|
def difference(x,y):
list1=list(x)
list2=list(y)
n=list(list1)
for i in range(len(list1)):
for j in range(len(list2)):
if list1[i]==list2[j]:
n.remove(list1[i])
print("The difference of these two sets is ",n)
def main():
set1=set([2,4,6,8,10,11,3])
set2=set([1,2,3,4])
difference(set1,set2)
main() |
import logging
from datetime import datetime
from django.core.management.base import BaseCommand
from django.db import connections, connection, transaction as db_transaction
from django.db.models import Count
from usaspending_api.awards.models import TransactionFPDS, TransactionNormalized, Award
from usaspending_api.common.helpers import fy
from usaspending_api.common.helpers import timer
from usaspending_api.etl.award_helpers import update_awards, update_contract_awards, update_award_categories
from usaspending_api.etl.broker_etl_helpers import dictfetchall
from usaspending_api.etl.management.load_base import load_data_into_model, format_date
from usaspending_api.references.helpers import canonicalize_location_dict
from usaspending_api.references.models import RefCountryCode, Location, LegalEntity, Agency, ToptierAgency, \
SubtierAgency
BATCH_SIZE = 100000
logger = logging.getLogger('console')
exception_logger = logging.getLogger("exceptions")
fpds_bulk = []
pop_bulk = []
lel_bulk = []
legal_entity_lookup = []
legal_entity_bulk = []
awarding_agency_list = []
funding_agency_list = []
award_lookup = []
award_bulk = []
parent_award_lookup = []
parent_award_bulk = []
transaction_normalized_bulk = []
pop_field_map = {
# not sure place_of_performance_locat maps exactly to city name
# "city_name": "place_of_performance_locat", # location id doesn't mean it's a city. Can't use this mapping
"congressional_code": "place_of_performance_congr",
"state_code": "place_of_performance_state",
"zip4": "place_of_performance_zip4a",
"location_country_code": "place_of_perform_country_c"
}
le_field_map = {
"address_line1": "legal_entity_address_line1",
"address_line2": "legal_entity_address_line2",
"address_line3": "legal_entity_address_line3",
"location_country_code": "legal_entity_country_code",
"city_name": "legal_entity_city_name",
"congressional_code": "legal_entity_congressional",
"state_code": "legal_entity_state_code",
"zip4": "legal_entity_zip4"
}
class Command(BaseCommand):
help = "Update historical transaction data for a fiscal year from the Broker."
country_code_map = {}
subtier_agency_map = {}
subtier_to_agency_map = {}
toptier_agency_map = {}
agency_no_sub_map = {}
agency_sub_only_map = {}
agency_toptier_map = {}
award_map = {}
parent_award_map = {}
le_map = {}
def set_lookup_maps(self):
self.country_code_map = {country.country_code: country for country in RefCountryCode.objects.all()}
self.subtier_agency_map = {
subtier_agency['subtier_code']: subtier_agency['subtier_agency_id']
for subtier_agency in SubtierAgency.objects.values('subtier_code', 'subtier_agency_id')
}
self.subtier_to_agency_map = {
agency['subtier_agency_id']: {'agency_id': agency['id'], 'toptier_agency_id': agency['toptier_agency_id']}
for agency in Agency.objects.values('id', 'toptier_agency_id', 'subtier_agency_id')
}
self.toptier_agency_map = {
toptier_agency['toptier_agency_id']: toptier_agency['cgac_code']
for toptier_agency in ToptierAgency.objects.values('toptier_agency_id', 'cgac_code')
}
self.agency_no_sub_map = {
(agency.toptier_agency.cgac_code, agency.subtier_agency.subtier_code): agency
for agency in Agency.objects.filter(subtier_agency__isnull=False)
}
self.agency_sub_only_map = {
agency.toptier_agency.cgac_code: agency
for agency in Agency.objects.filter(subtier_agency__isnull=True)
}
self.agency_toptier_map = {
agency.toptier_agency.cgac_code: agency
for agency in Agency.objects.filter(toptier_flag=True)
}
self.agency_subtier_map = {
sa.subtier_code: sa.agency_set.first()
for sa in SubtierAgency.objects
.annotate(n_agencies=Count('agency')).filter(n_agencies=1)
}
self.award_map = {award.piid: award for award in Award.objects.filter(piid__isnull=False)}
self.le_map = {(le.recipient_unique_id, le.recipient_name): le for le in LegalEntity.objects.all()}
def diff_fpds_data(self, db_cursor, ds_cursor, fiscal_year=None):
db_query = 'SELECT detached_award_procurement_id ' \
'FROM detached_award_procurement'
db_arguments = []
ds_query = 'SELECT detached_award_procurement_id ' \
'FROM transaction_fpds'
ds_arguments = []
if fiscal_year:
if db_arguments:
db_query += ' AND'
else:
db_query += ' WHERE'
if ds_arguments:
ds_query += ' AND'
else:
ds_query += ' WHERE'
fy_begin = '10/01/' + str(fiscal_year - 1)
fy_end = '09/30/' + str(fiscal_year)
db_query += ' action_date::Date BETWEEN %s AND %s'
db_arguments += [fy_begin, fy_end]
ds_query += ' action_date::Date BETWEEN %s AND %s'
ds_arguments += [fy_begin, fy_end]
db_cursor.execute(db_query, db_arguments)
ds_cursor.execute(ds_query, ds_arguments)
db_dict = dictfetchall(db_cursor)
ds_dict = dictfetchall(ds_cursor)
db_set = set(map(lambda db_entry: int(db_entry['detached_award_procurement_id']), db_dict))
ds_set = set(map(lambda ds_entry: int(ds_entry['detached_award_procurement_id']), ds_dict))
to_insert = db_set - ds_set
to_delete = ds_set - db_set
logger.info('Number of records to insert: %s' % str(len(to_insert)))
logger.info('Number of records to delete: %s' % str(len(to_delete)))
# Return what is not currently in our database (to insert) and what we have that Broker does not (to delete)
return to_insert, to_delete
def get_fpds_data(self, db_cursor, fiscal_year=None, to_insert=None):
query = 'SELECT * FROM detached_award_procurement'
arguments = []
if to_insert:
if arguments:
query += ' AND'
else:
query += ' WHERE'
query += ' detached_award_procurement_id IN %s'
arguments += [tuple(to_insert)]
if fiscal_year:
if arguments:
query += ' AND'
else:
query += ' WHERE'
fy_begin = '10/01/' + str(fiscal_year - 1)
fy_end = '09/30/' + str(fiscal_year)
query += ' action_date::Date BETWEEN %s AND %s'
arguments += [fy_begin, fy_end]
query += ' ORDER BY detached_award_procurement_id'
logger.info("Executing select query on Broker DB")
db_cursor.execute(query, arguments)
logger.info("Running dictfetchall on db_cursor")
return dictfetchall(db_cursor)
def load_locations(self, fpds_broker_data, total_rows, pop_flag=False):
start_time = datetime.now()
for index, row in enumerate(fpds_broker_data, 1):
if not (index % 10000):
logger.info('Locations: Loading row {} of {} ({})'.format(str(index),
str(total_rows),
datetime.now() - start_time))
if pop_flag:
location_value_map = {"place_of_performance_flag": True}
field_map = pop_field_map
else:
location_value_map = {'recipient_flag': True}
field_map = le_field_map
row = canonicalize_location_dict(row)
# THIS ASSUMPTION DOES NOT HOLD FOR FPDS SINCE IT DOES NOT HAVE A PLACE OF PERFORMANCE CODE
# We can assume that if the country code is blank and the place of performance code is NOT '00FORGN', then
# the country code is USA
# if pop_flag and not country_code and pop_code != '00FORGN':
# row[field_map.get('location_country_code')] = 'USA'
# Get country code obj
location_country_code_obj = self.country_code_map.get(row[field_map.get('location_country_code')])
# Fix state code periods
state_code = row.get(field_map.get('state_code'))
if state_code is not None:
location_value_map.update({'state_code': state_code.replace('.', '')})
if location_country_code_obj:
location_value_map.update({
'location_country_code': location_country_code_obj,
'country_name': location_country_code_obj.country_name
})
if location_country_code_obj.country_code != 'USA':
location_value_map.update({
'state_code': None,
'state_name': None
})
else:
# no country found for this code
location_value_map.update({
'location_country_code': None,
'country_name': None
})
location_instance_data = load_data_into_model(
Location(),
row,
value_map=location_value_map,
field_map=field_map,
as_dict=True)
loc_instance = Location(**location_instance_data)
loc_instance.load_city_county_data()
loc_instance.fill_missing_state_data()
loc_instance.fill_missing_zip5()
if pop_flag:
pop_bulk.append(loc_instance)
else:
lel_bulk.append(loc_instance)
if pop_flag:
logger.info('Bulk creating POP Locations (batch_size: {})...'.format(BATCH_SIZE))
Location.objects.bulk_create(pop_bulk, batch_size=BATCH_SIZE)
else:
logger.info('Bulk creating LE Locations (batch_size: {})...'.format(BATCH_SIZE))
Location.objects.bulk_create(lel_bulk, batch_size=BATCH_SIZE)
def load_legal_entity(self, fpds_broker_data, total_rows):
start_time = datetime.now()
for index, row in enumerate(fpds_broker_data, 1):
if not (index % 10000):
logger.info('Legal Entity: Loading row {} of {} ({})'.format(str(index),
str(total_rows),
datetime.now() - start_time))
recipient_name = row['awardee_or_recipient_legal']
if recipient_name is None:
recipient_name = ''
recipient_unique_id = row['awardee_or_recipient_uniqu']
if recipient_unique_id is None:
recipient_unique_id = ''
lookup_key = (recipient_unique_id, recipient_name)
legal_entity = self.le_map.get(lookup_key)
if not legal_entity:
legal_entity = LegalEntity(
recipient_unique_id=row['awardee_or_recipient_uniqu'],
recipient_name=recipient_name
)
legal_entity = load_data_into_model(
legal_entity,
row,
value_map={"location": lel_bulk[index - 1]},
save=False)
LegalEntity.update_business_type_categories(legal_entity)
self.le_map[lookup_key] = legal_entity
legal_entity_bulk.append(legal_entity)
legal_entity_lookup.append(legal_entity)
logger.info('Bulk creating Legal Entities (batch_size: {})...'.format(BATCH_SIZE))
LegalEntity.objects.bulk_create(legal_entity_bulk, batch_size=BATCH_SIZE)
def load_parent_awards(self, fpds_broker_data, total_rows):
start_time = datetime.now()
for index, row in enumerate(fpds_broker_data, 1):
if not (index % 10000):
logger.info('Parent Awards: Loading row {} of {} ({})'.format(str(index),
str(total_rows),
datetime.now() - start_time))
# If awarding toptier agency code (aka CGAC) is not supplied on the D2 record,
# use the sub tier code to look it up. This code assumes that all incoming
# records will supply an awarding subtier agency code
if row['awarding_agency_code'] is None or len(row['awarding_agency_code'].strip()) < 1:
awarding_subtier_agency_id = self.subtier_agency_map[row["awarding_sub_tier_agency_c"]]
awarding_toptier_agency_id = self.subtier_to_agency_map[awarding_subtier_agency_id]['toptier_agency_id']
awarding_cgac_code = self.toptier_agency_map[awarding_toptier_agency_id]
row['awarding_agency_code'] = awarding_cgac_code
# Find the award that this award transaction belongs to. If it doesn't exist, create it.
awarding_agency = self.agency_no_sub_map.get((
row['awarding_agency_code'],
row["awarding_sub_tier_agency_c"]
))
if awarding_agency is None:
awarding_agency = self.agency_sub_only_map.get(row['awarding_agency_code'])
# parent_award_id from the row = parent piid
parent_award_piid = row.get('parent_award_id')
parent_award = None
if parent_award_piid:
parent_award = self.award_map.get(parent_award_piid)
if not parent_award:
create_kwargs = {'awarding_agency': awarding_agency, 'piid': parent_award_piid}
parent_award = Award(**create_kwargs)
self.award_map[parent_award_piid] = parent_award
parent_award_bulk.append(parent_award)
parent_award_lookup.append(parent_award)
logger.info('Bulk creating Parent Awards (batch_size: {})...'.format(BATCH_SIZE))
Award.objects.bulk_create(parent_award_bulk, batch_size=BATCH_SIZE)
def load_awards(self, fpds_broker_data, total_rows):
start_time = datetime.now()
for index, row in enumerate(fpds_broker_data, 1):
if not (index % 10000):
logger.info('Awards: Loading row {} of {} ({})'.format(str(index),
str(total_rows),
datetime.now() - start_time))
# If awarding toptier agency code (aka CGAC) is not supplied on the D2 record,
# use the sub tier code to look it up. This code assumes that all incoming
# records will supply an awarding subtier agency code
if row['awarding_agency_code'] is None or len(row['awarding_agency_code'].strip()) < 1:
awarding_subtier_agency_id = self.subtier_agency_map[row["awarding_sub_tier_agency_c"]]
awarding_toptier_agency_id = self.subtier_to_agency_map[awarding_subtier_agency_id]['toptier_agency_id']
awarding_cgac_code = self.toptier_agency_map[awarding_toptier_agency_id]
row['awarding_agency_code'] = awarding_cgac_code
# If funding toptier agency code (aka CGAC) is empty, try using the sub
# tier funding code to look it up. Unlike the awarding agency, we can't
# assume that the funding agency subtier code will always be present.
if row['funding_agency_code'] is None or len(row['funding_agency_code'].strip()) < 1:
funding_subtier_agency_id = self.subtier_agency_map.get(row["funding_sub_tier_agency_co"])
if funding_subtier_agency_id is not None:
funding_toptier_agency_id = self.subtier_to_agency_map[funding_subtier_agency_id][
'toptier_agency_id']
funding_cgac_code = self.toptier_agency_map[funding_toptier_agency_id]
else:
funding_cgac_code = None
row['funding_agency_code'] = funding_cgac_code
# Find the award that this award transaction belongs to. If it doesn't exist, create it.
awarding_agency = self.agency_no_sub_map.get((
row['awarding_agency_code'],
row["awarding_sub_tier_agency_c"]
))
if awarding_agency is None:
awarding_agency = self.agency_sub_only_map.get(row['awarding_agency_code'])
# If we still haven't found the agency, try surmising it from subtier
if awarding_agency is None:
awarding_agency = self.agency_subtier_map.get(row['awarding_sub_tier_agency_c'])
funding_agency = self.agency_no_sub_map.get((
row['funding_agency_code'],
row["funding_sub_tier_agency_co"]
))
if funding_agency is None:
funding_agency = self.agency_sub_only_map.get(row['funding_agency_code'])
if funding_agency is None:
funding_agency = self.agency_subtier_map.get(row['funding_sub_tier_agency_co'])
awarding_agency_list.append(awarding_agency)
funding_agency_list.append(funding_agency)
piid = row.get('piid')
award = self.award_map.get(piid)
if award and awarding_agency is not None and award.awarding_agency_id is not None:
if award.awarding_agency_id != awarding_agency.id:
award = None
if not award:
# create the award since it wasn't found
create_kwargs = {'awarding_agency': awarding_agency, 'piid': piid}
award = Award(**create_kwargs)
award.parent_award = parent_award_lookup[index - 1]
self.award_map[piid] = award
award_bulk.append(award)
award_lookup.append(award)
logger.info('Bulk creating Awards (batch_size: {})...'.format(BATCH_SIZE))
Award.objects.bulk_create(award_bulk, batch_size=BATCH_SIZE)
def load_transaction_normalized(self, fpds_broker_data, total_rows):
start_time = datetime.now()
for index, row in enumerate(fpds_broker_data, 1):
if not (index % 10000):
logger.info('Transaction Normalized: Loading row {} of {} ({})'.format(str(index),
str(total_rows),
datetime.now() - start_time))
parent_txn_value_map = {
"award": award_lookup[index - 1],
"awarding_agency": awarding_agency_list[index - 1],
"funding_agency": funding_agency_list[index - 1],
"recipient": legal_entity_lookup[index - 1],
"place_of_performance": pop_bulk[index - 1],
"period_of_performance_start_date": format_date(row['period_of_performance_star']),
"period_of_performance_current_end_date": format_date(row['period_of_performance_curr']),
"action_date": format_date(row['action_date']),
"last_modified_date": row['last_modified']
}
contract_field_map = {
"type": "contract_award_type",
"description": "award_description"
}
transaction_normalized = load_data_into_model(
TransactionNormalized(),
row,
field_map=contract_field_map,
value_map=parent_txn_value_map,
as_dict=False,
save=False)
transaction_normalized.fiscal_year = fy(transaction_normalized.action_date)
transaction_normalized_bulk.append(transaction_normalized)
logger.info('Bulk creating Transaction Normalized (batch_size: {})...'.format(BATCH_SIZE))
TransactionNormalized.objects.bulk_create(transaction_normalized_bulk, batch_size=BATCH_SIZE)
def load_transaction_fpds(self, fpds_broker_data, total_rows):
logger.info('Starting bulk loading for FPDS data')
start_time = datetime.now()
for index, row in enumerate(fpds_broker_data, 1):
if not (index % 10000):
logger.info('Transaction FPDS: Loading row {} of {} ({})'.format(str(index),
str(total_rows),
datetime.now() - start_time))
fpds_instance_data = load_data_into_model(
TransactionFPDS(), # thrown away
row,
as_dict=True)
fpds_instance = TransactionFPDS(**fpds_instance_data)
fpds_instance.transaction = transaction_normalized_bulk[index - 1]
fpds_bulk.append(fpds_instance)
logger.info('Bulk creating Transaction FPDS (batch_size: {})...'.format(BATCH_SIZE))
TransactionFPDS.objects.bulk_create(fpds_bulk, batch_size=BATCH_SIZE)
def delete_stale_fpds(self, to_delete=None):
if not to_delete:
return
# This cascades deletes for TransactionFPDS & Awards in addition to deleting TransactionNormalized records
TransactionNormalized.objects.filter(contract_data__detached_award_procurement_id__in=to_delete).delete()
def add_arguments(self, parser):
parser.add_argument(
'--fiscal_year',
dest="fiscal_year",
nargs='+',
type=int,
help="Year for which to run the historical load"
)
@db_transaction.atomic
def handle(self, *args, **options):
logger.info('Starting FPDS bulk data load...')
db_cursor = connections['data_broker'].cursor()
ds_cursor = connection.cursor()
fiscal_year = options.get('fiscal_year')
if fiscal_year:
fiscal_year = fiscal_year[0]
else:
fiscal_year = 2017
logger.info('Processing data for Fiscal Year ' + str(fiscal_year))
with timer('Diff-ing FPDS data', logger.info):
to_insert, to_delete = self.diff_fpds_data(db_cursor=db_cursor,
ds_cursor=ds_cursor,
fiscal_year=fiscal_year)
total_rows = len(to_insert)
total_rows_delete = len(to_delete)
if total_rows_delete > 0:
with timer('Deleting stale FPDS data', logger.info):
self.delete_stale_fpds(to_delete=to_delete)
if total_rows > 0:
# Set lookups after deletions to only get latest
self.set_lookup_maps()
with timer('Get Broker FPDS data', logger.info):
fpds_broker_data = self.get_fpds_data(
db_cursor=db_cursor, fiscal_year=fiscal_year, to_insert=to_insert)
with timer('Loading POP Location data', logger.info):
self.load_locations(fpds_broker_data=fpds_broker_data, total_rows=total_rows, pop_flag=True)
with timer('Loading LE Location data', logger.info):
self.load_locations(fpds_broker_data=fpds_broker_data, total_rows=total_rows)
with timer('Loading Legal Entity data', logger.info):
self.load_legal_entity(fpds_broker_data=fpds_broker_data, total_rows=total_rows)
with timer('Loading Parent Award data', logger.info):
self.load_parent_awards(fpds_broker_data=fpds_broker_data, total_rows=total_rows)
with timer('Loading Award data', logger.info):
self.load_awards(fpds_broker_data=fpds_broker_data, total_rows=total_rows)
with timer('Loading Transaction Normalized data', logger.info):
self.load_transaction_normalized(fpds_broker_data=fpds_broker_data, total_rows=total_rows)
with timer('Loading Transaction FPDS data', logger.info):
self.load_transaction_fpds(fpds_broker_data=fpds_broker_data, total_rows=total_rows)
award_update_id_list = [award.id for award in award_lookup]
with timer('Updating awards to reflect their latest associated transaction info', logger.info):
update_awards(tuple(award_update_id_list))
with timer('Updating contract-specific awards to reflect their latest transaction info', logger.info):
update_contract_awards(tuple(award_update_id_list))
with timer('Updating award category variables', logger.info):
update_award_categories(tuple(award_update_id_list))
else:
logger.info('Nothing to insert...FINISHED!')
|
"""Helper class to get a database engine and to get a session."""
from pollbot.config import config
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm.session import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine(config['database']['sql_uri'],
pool_size=config['database']['connection_count'],
max_overflow=config['database']['overflow_count'],
echo=False)
base = declarative_base(bind=engine)
def get_session(connection=None):
"""Get a new db session."""
session = scoped_session(sessionmaker(bind=engine))
return session
|
# See LICENSE file for full copyright and licensing details.
{
"name": "Web Digital Signature",
"version": "14.0.1.0.0",
"author": "Serpent Consulting Services Pvt. Ltd.",
"maintainer": "Serpent Consulting Services Pvt. Ltd.",
"complexity": "easy",
"depends": ["web"],
"license": "AGPL-3",
"category": "Tools",
"description": """
This module provides the functionality to store digital signature
Example can be seen into the User's form view where we have
added a test field under signature.
""",
"summary": """
Touch screen enable so user can add signature with touch devices.
Digital signature can be very usefull for documents.
""",
"images": ["static/description/Digital_Signature.jpg"],
"data": ["views/web_digital_sign_view.xml", "views/users_view.xml"],
"website": "http://www.serpentcs.com",
"qweb": ["static/src/xml/digital_sign.xml"],
"installable": True,
"auto_install": False,
}
|
password = 'a123456'
while True:
i = input('請輸入密碼: ')
if i == password:
print('登入成功')
break
else:
print('密碼錯誤! 還有2次機會')
i = input('請輸入密碼: ')
if i == password:
print('登入成功')
break
else:
print('密碼錯誤! 還有1次機會')
i = input('請輸入密碼: ')
if i == password:
print('登入成功')
break
else:
print('暫停輸入30分鐘, 請稍後再試!')
break
|
"""
Pre-Programming 61 Solution
By Teerapat Kraisrisirikul
"""
def main():
""" Main function """
num_a, num_b = int(input()), int(input())
if num_a < num_b:
print('Too High')
elif num_a > num_b:
print('Too Low')
else:
print('Correct!')
main()
|
#!/usr/bin/python
# Make graphs of the timing results from V5, V6, Prog-QUBO
# We're comparing order statistics from 100 things (V5 data) with order statistics of 1000 things (V6 and Prog-QUBO data).
# Put on the same graph by downsampling the 1000 samples to 100.
# What does the 87^th lowest value out of 100 (counting from 1) correspond to in 1000 ordered things?
#
# It's approximately the 865^th lowest value, but better is to imagine taking a random
# subset of size 100 from your size 1000 sample and working out the 87^th lowest value of
# that. The rank of that item in your size 1000 sample is a random variable with negative
# hypergeometric distribution:
# P(rank_{1000} = s) = (s-1 choose 86)*(1000-s choose 100-87)/(1000 choose 100)
#
# (Ranks start at 1 in the above example.)
import scipy
import scipy.special
from math import log,exp
def fact(x): return scipy.special.gamma(x+1)
def logfact(x): return scipy.special.gammaln(x+1)
def logbin(n,r): return logfact(n)-logfact(r)-logfact(n-r)
def bin(n,r):
if r>=0 and r<=n: return exp(logbin(n,r))
else: return 0.
from subprocess import Popen,PIPE
output='png'# 'png' or 'ps'
n=100# Resample down to this
# Assumed setup and read times in seconds
V5setup=.201;V5read=.29e-3
V6setup=.036;V6read=.126e-3
def add(label,times,setup):
N=len(times)
tr=[]
for r in range(n):
t=0
for s in range(r,N-(n-1-r)): t+=bin(s,r)*bin(N-1-s,n-1-r)/bin(N,n)*times[s]# Yes it's stupidly inefficient
tr.append(t)
absres.append([x+setup for x in tr])
absresnosetup.append(tr)
med=(times[(N-1)/2]+times[N/2])/2# median of no setup case
scaleres.append([x/med for x in tr])# scaled no setup case
labels.append(label)
for wn in [439,502]:
scaleres=[]
absres=[]
absresnosetup=[]
labels=[]
if wn==439:
f=open('Fig6.439.sorted','r')
V5=[]
for x in f.read().strip().split('\n'):
if x[0]!='#': V5.append(int(x))
f.close()
N=len(V5)
add("D-Wave V5, McGeoch n=439 set",[1000./V5[N-1-i]*V5read for i in range(n)],V5setup)
f=open('Fig8.%d.sorted'%wn,'r')
V6=[]
for x in f.read().strip().split('\n'):
if x[0]!='#': V6.append(int(x))
f.close()
N=len(V6)
add("D-Wave V6, McGeoch large n=%d set"%wn,[10000./V6[N-1-i]*V6read for i in range(N)],V6setup)
for s in [1,3]:#[0,1,3,10]:
f=open('output/weightmode5/%d.strat%d/summary.sorted-by-TTS'%(wn,s),'r')
l=[]
for x in f.read().strip().split('\n'):
if x[0]!='#': l.append(float(x.split()[2]))
f.close()
add("Prog-QUBO-S%d, Set %d (n=%d)"%(s,1+(wn==502),wn),l,0)
for (name,res) in [('timecomp',absres),('timecomp-nosetup',absresnosetup),('scaling',scaleres)]:
fn='%s%d'%(name,wn)
f=open(fn,'w')
for i in range(len(labels)):
print >>f,"# Column %d = %s"%(i+1,labels[i])
for r in range(n):
print >>f,"%4d"%(r+1),
for x in res:
assert x[r]
print >>f,"%12g"%(log(x[r])/log(10)),
print >>f
f.close()
print "Written scaling data to",fn
for (fn,title,ylabel) in [
('scaling%d','"log_10 running time relative to median, n=%d"','"log_10(TTS(p)/TTS(50.5))"'),
('timecomp%d','"log_10 running time in seconds, n=%d"','"log_10(TTS(p)/1 second)"'),
('timecomp-nosetup%d','"log_10 running time in seconds, n=%d. D-Wave times exlude setup."','"log_10(TTS(p)/1 second)"')]:
fn=fn%wn;title=title%wn
p=Popen("gnuplot",shell=True,stdin=PIPE).stdin
if output=='ps':
print >>p,'set terminal postscript color solid "Helvetica" 9'
elif output=='png':
print >>p,'set terminal pngcairo size 1400,960'
print >>p,'set bmargin 5;set lmargin 15;set rmargin 15;set tmargin 5'
else: assert 0
print >>p,'set output "%s.%s"'%(fn,output)
print >>p,'set zeroaxis'
print >>p,'set xrange [0:101]'
print >>p,'set key left'
print >>p,'set title',title
print >>p,'set xlabel "Hardness rank, p, from 1 to 100"'
print >>p,'set ylabel',ylabel
print >>p,'set y2tics mirror'
s='plot '
for i in range(len(labels)):
if i>0: s+=', '
s+='"%s" using ($1):($%d) title "%s"'%(fn,i+2,labels[i])
print >>p,s
p.close()
print "Written graph to %s.%s"%(fn,output)
|
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[0]
def size(self):
return len(self.items)
def Show(self):
x= ""
for i in self.items:
t = i + " "
x += t
return(x)
inp = input('Enter Input : ').split()
S = Stack()
temp = ""
count = 0
combo = 0
isCombo = False
for i in inp:
S.push(i)
if(temp == i):
count += 1
else:
temp = i
count = 1
if(count == 3 ):
S.pop()
S.pop()
S.pop()
isCombo = True
combo += 1
count += 1
temp = ""
while(isCombo):
inp = S.Show().split()
S = Stack()
temp = ""
count = 0
isCombo = False
for i in inp:
S.push(i)
if(temp == i):
count += 1
else:
temp = i
count = 1
if(count == 3 ):
S.pop()
S.pop()
S.pop()
isCombo = True
combo += 1
count += 1
temp = ""
if(S.size() == 0):
print(0)
print("Empty")
else:
print(S.size())
print(S.Show().replace(" ","")[::-1])
if(combo >= 2 ):
print("Combo : "+ str(combo) + " ! ! !")
### Enter Your Code Here ### |
class Solution(object):
def countAndSay(self, n):
"""
:type n: int
:rtype: str
"""
ret = '1'
for _ in xrange(n - 1):
last_ch = ret[0]
count = 0
new_ret = ''
for c in ret:
if c == last_ch:
count += 1
else:
new_ret += str(count) + last_ch
last_ch = c
count = 1
new_ret += str(count) + last_ch
ret = new_ret
return ret
|
def sherlock(n, m):
partsum = []
sum = 0
for i in m:
sum += i
partsum.append(sum)
for i in range(n):
leftsum = partsum[i] - m[i]
rightsum = partsum[n-1] - partsum[i]
if leftsum == rightsum:
return "YES"
return "NO"
from sys import stdin
#inp = open("in", "r")
inp = stdin
t = int(inp.readline().strip())
for i in range(t):
n = int(inp.readline().strip())
m = map(int, inp.readline().strip().split())
print(sherlock(n,list(m)))
|
import dotenv
import oci
import os
dotenv.load_dotenv()
oci_config = {
"config": {
"user": os.getenv("OCI_USER"),
"fingerprint": os.getenv("OCI_FINGERPRINT"),
"tenancy": os.getenv("OCI_TENANCY"),
"region": os.getenv("OCI_REGION"),
"key_file": os.getenv("OCI_KEY_FILE"),
"pass_phrase": os.getenv("OCI_PASS_PHRASE"),
}
}
# service_endpoint = None # Use it to test on a real environment
service_endpoint = "http://localhost:12000" # Use it to test on mock environment
compartment_id = os.getenv("COMPARTMENT_ID")
cli = oci.object_storage.ObjectStorageClient(
oci_config["config"], service_endpoint=service_endpoint
)
r = cli.get_namespace()
namespace_name = r.data
def list_buckets():
cli = oci.object_storage.ObjectStorageClient(
oci_config["config"], service_endpoint=service_endpoint
)
b = cli.list_buckets(namespace_name=namespace_name, compartment_id=compartment_id)
print(b.request_id)
print(b.headers)
print(b.data)
print(b.status)
def compute_client_sample():
cli = oci.core.ComputeClient(
oci_config["config"], service_endpoint=service_endpoint
)
cli.list_instances(compartment_id)
def create_bucket():
cli = oci.object_storage.ObjectStorageClient(
oci_config["config"], service_endpoint=service_endpoint
)
create_opts = oci.object_storage.models.CreateBucketDetails(
name="bucket_name",
compartment_id=compartment_id,
public_access_type="ObjectRead",
storage_tier="Standard",
freeform_tags={"tag_name": "tag_value"},
versioning="Disabled",
)
a = cli.create_bucket(
namespace_name=namespace_name, create_bucket_details=create_opts
)
print(a.request_id)
print(a.headers)
print(a.data)
print(a.status)
def delete_bucket():
cli = oci.object_storage.ObjectStorageClient(
oci_config["config"], service_endpoint=service_endpoint
)
r = cli.delete_bucket(namespace_name=namespace_name, bucket_name="bucket_name")
print(r.request_id)
print(r.headers)
print(r.data)
print(r.status)
def put_object():
cli = oci.object_storage.ObjectStorageClient(
oci_config["config"], service_endpoint=service_endpoint
)
r = cli.put_object(
namespace_name=namespace_name,
bucket_name="bucket_name",
object_name="folder/file.txt",
put_object_body=b"teste alo testando",
content_type="text/plain",
cache_control="private, Immutable, max-age=31557600",
)
print(r.request_id)
print(r.headers)
print(r.data)
print(r.status)
def list_objects():
cli = oci.object_storage.ObjectStorageClient(
oci_config["config"], service_endpoint=service_endpoint
)
r = cli.list_objects(namespace_name=namespace_name, bucket_name="bucket_name")
print(r.request_id)
print(r.headers)
print(r.data)
print(r.status)
def delete_object():
cli = oci.object_storage.ObjectStorageClient(
oci_config["config"], service_endpoint=service_endpoint
)
r = cli.delete_object(
namespace_name=namespace_name,
bucket_name="bucket_name",
object_name="folder/file.txt",
)
print(r.request_id)
print(r.headers)
print(r.data)
print(r.status)
def create_table():
ddl_statement = """
CREATE TABLE table_name ( campo1 string, campo2 number, campo3 string DEFAULT "[]" NOT NULL, PRIMARY KEY ( SHARD ( campo1 ), campo2 ) )
"""
table_limits = oci.nosql.models.TableLimits(
max_read_units=1, max_write_units=1, max_storage_in_g_bs=1
)
nosql_details = oci.nosql.models.CreateTableDetails(
name="table_name",
compartment_id=compartment_id,
ddl_statement=ddl_statement,
table_limits=table_limits,
)
cli = oci.nosql.NosqlClient(oci_config["config"], service_endpoint=service_endpoint)
r = cli.create_table(create_table_details=nosql_details)
print(r.request_id)
print(r.headers)
print(r.data)
print(r.status)
def create_row():
nosql_row = oci.nosql.models.UpdateRowDetails()
nosql_row.value = {"campo1": "value1", "campo2": 1}
nosql_row.compartment_id = compartment_id
cli = oci.nosql.NosqlClient(oci_config["config"], service_endpoint=service_endpoint)
r = cli.update_row(table_name_or_id="table_name", update_row_details=nosql_row)
print(r.request_id)
print(r.headers)
print(r.data)
print(r.status)
def query():
cli = oci.nosql.NosqlClient(oci_config["config"], service_endpoint=service_endpoint)
q = f"SELECT * FROM table_name"
details = oci.nosql.models.QueryDetails(compartment_id=compartment_id, statement=q)
r = cli.query(details)
print(r.request_id)
print(r.headers)
print(r.data)
print(r.status)
def delete_row():
cli = oci.nosql.NosqlClient(oci_config["config"], service_endpoint=service_endpoint)
r = cli.delete_row(
table_name_or_id="table_name",
compartment_id=compartment_id,
key=[f"campo1:value1", f"campo2:1"],
)
print(r.request_id)
print(r.headers)
print(r.data)
print(r.status)
def delete_table():
cli = oci.nosql.NosqlClient(oci_config["config"], service_endpoint=service_endpoint)
r = cli.delete_table(table_name_or_id="table_name", compartment_id=compartment_id)
print(r.request_id)
print(r.headers)
print(r.data)
print(r.status)
# create_bucket()
# put_object()
# create_table()
# create_row()
# query()
# delete_row()
# delete_table()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 2 10:42:30 2021
@author: William
@email: williams8645@gmail.com
"""
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
os.chdir('D:/Charite/labA/WP2')
condition_dict = {1:'high_density_untreated',2:'high_density_0uM',3:'high_density_1.25uM',
4:'high_density_2.5uM',5:'high_density_5uM',6:'high_density_10uM',
7:'medium_density_untreated',8:'medium_density_0uM',9:'medium_density_1.25uM',
10:'medium_density_2.5uM',11:'medium_density_5uM',12:'medium_density_10uM',
13:'low_density_untreated',14:'low_density_0uM',15:'low_density_1.25uM',
16:'low_density_2.5uM',17:'low_density_5uM',18:'low_density_10uM',
19:'very_low_density_untreated',20:'very_low_density_0uM',21:'very_low_density_1.25uM'}
# plot histograms of circadian powers
for i in condition_dict:
file_name = condition_dict[i]+'_circadian_powerseries'
powers_series = pd.read_csv('wider_table/powerseries/%s.csv'% file_name)
# plot the histogram
kde = stats.gaussian_kde(powers_series['0'])
fig, ax = plt.subplots()
n,bins,patches=ax.hist(
powers_series['0'],
bins=np.arange(min(powers_series['0']),max(powers_series['0']),2),
edgecolor='w',
density=True)
ax.plot(bins,kde(bins))
ax.set_xlim((0,60))
ax.set_xlabel('Average Ridge Power')
ax.set_ylabel('Density')
ax.set_title(condition_dict[i].capitalize()+' '+'n = %d' % len(powers_series))
plt.savefig('./graph/%s.png' % file_name)
plt.clf() |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from ranger_performance_tool.ranger_perf_object_stores import random_generators
class ServiceStoreBase(ABC):
@abstractmethod
def generate_resources(self):
pass
class HDFSServiceStore(ServiceStoreBase):
def __init__(self, random_type="random"):
self.random_generator = random_generators.get_random_generator(random_type)
def generate_resources(self):
paths = self.random_generator.generate_string_array()
resources = {'path': {'values': paths, 'isExcludes': False, 'isRecursive': True}}
return resources
class HBaseServiceStore(ServiceStoreBase):
def __init__(self, random_type="random"):
self.random_generator = random_generators.get_random_generator(random_type)
def generate_resources(self):
columns = self.random_generator.generate_string_array()
tables = self.random_generator.generate_string_array()
column_families = self.random_generator.generate_string_array()
resources = {'column-family': {'values': column_families, 'isExcludes': False, 'isRecursive': False},
'column': {'values': columns, 'isExcludes': False, 'isRecursive': False},
'table': {'values': tables, 'isExcludes': False, 'isRecursive': False}}
return resources
class HiveServiceStore(ServiceStoreBase):
def __init__(self, random_type="random"):
self.random_generator = random_generators.get_random_generator(random_type)
def generate_resources(self):
tables = self.random_generator.generate_string_array()
databases = self.random_generator.generate_string_array()
columns = self.random_generator.generate_string_array()
resources = {'table': {'values': tables, 'isExcludes': False, 'isRecursive': False},
'database': {'values': databases, 'isExcludes': False, 'isRecursive': False},
'column': {'values': columns, 'isExcludes': False, 'isRecursive': False}}
return resources
class YarnServiceStore(ServiceStoreBase):
def __init__(self, random_type="random"):
self.random_generator = random_generators.get_random_generator(random_type)
def generate_resources(self):
queues = self.random_generator.generate_string_array()
resources = {'queue': {'values': queues, 'isExcludes': False, 'isRecursive': False}}
return resources
class KnoxServiceStore(ServiceStoreBase):
def __init__(self, random_type="random"):
self.random_generator = random_generators.get_random_generator(random_type)
def generate_resources(self):
services = self.random_generator.generate_string_array()
topologies = self.random_generator.generate_string_array()
resources = {'topology': {'values': topologies, 'isExcludes': False, 'isRecursive': False},
'service': {'values': services, 'isExcludes': False, 'isRecursive': False}}
return resources
class SolrServiceStore(ServiceStoreBase):
def __init__(self, random_type="random"):
self.random_generator = random_generators.get_random_generator(random_type)
def generate_resources(self):
collections = self.random_generator.generate_string_array()
resources = {'collection': {'values': collections, 'isExcludes': False, 'isRecursive': False}}
return resources
class KafkaServiceStore(ServiceStoreBase):
def __init__(self, random_type="random"):
self.random_generator = random_generators.get_random_generator(random_type)
def generate_resources(self):
consumer_groups = self.random_generator.generate_string_array()
resources = {'consumergroup': {'values': consumer_groups, 'isExcludes': False, 'isRecursive': False}}
return resources
class AtlasServiceStore(ServiceStoreBase):
def __init__(self, random_type="random"):
self.random_generator = random_generators.get_random_generator(random_type)
def generate_resources(self):
entity_types = self.random_generator.generate_string_array()
entity_classifications = self.random_generator.generate_string_array()
entity_business_metadatas = self.random_generator.generate_string_array()
entities = self.random_generator.generate_string_array()
resources = {'entity-type': {'values': entity_types, 'isExcludes': False, 'isRecursive': False},
'entity-classification': {'values': entity_classifications, 'isExcludes': False,
'isRecursive': False},
'entity': {'values': entities, 'isExcludes': False, 'isRecursive': False},
'entity-business-metadata': {'values': entity_business_metadatas, 'isExcludes': False,
'isRecursive': False}}
return resources
class OzoneServiceStore(ServiceStoreBase):
def __init__(self, random_type="random"):
self.random_generator = random_generators.get_random_generator(random_type)
def generate_resources(self):
buckets = self.random_generator.generate_string_array()
keys = self.random_generator.generate_string_array()
volume = self.random_generator.generate_string_array()
resources = {'bucket': {'values': buckets, 'isExcludes': False, 'isRecursive': False},
'key': {'values': keys, 'isExcludes': False, 'isRecursive': False},
'volume': {'values': volume, 'isExcludes': False, 'isRecursive': False}}
return resources
class AdlsServiceStore(ServiceStoreBase):
def __init__(self, random_type="random"):
self.random_generator = random_generators.get_random_generator(random_type)
def generate_resources(self):
containers = self.random_generator.generate_string_array()
paths = self.random_generator.generate_string_array()
storage_accounts = self.random_generator.generate_string_array()
resources = {'container': {'values': containers, 'isExcludes': False, 'isRecursive': False},
'relativepath': {'values': paths, 'isExcludes': False, 'isRecursive': True},
'storageaccount': {'values': storage_accounts, 'isExcludes': False, 'isRecursive': False}}
return resources
class S3ServiceStore(ServiceStoreBase):
def __init__(self, random_type="random"):
self.random_generator = random_generators.get_random_generator(random_type)
def generate_resources(self):
buckets = self.random_generator.generate_string_array()
paths = self.random_generator.generate_string_array()
resources = {'bucket': {'values': buckets, 'isExcludes': False, 'isRecursive': False},
'path': {'values': paths, 'isExcludes': False, 'isRecursive': False}}
return resources
class KuduServiceStore(ServiceStoreBase):
def __init__(self, random_type="random"):
self.random_generator = random_generators.get_random_generator(random_type)
def generate_resources(self):
columns = self.random_generator.generate_string_array()
databases = self.random_generator.generate_string_array()
tables = self.random_generator.generate_string_array()
resources = {'table': {'values': tables, 'isExcludes': False, 'isRecursive': False},
'database': {'values': databases, 'isExcludes': False, 'isRecursive': False},
'column': {'values': columns, 'isExcludes': False, 'isRecursive': False}}
return resources
|
import cv2
# Carregando a imagem em RGB
imagem = cv2.imread("frutas.jpg")
# Convertendo e exibindo a imagem em tons de cinza
imagem = cv2.cvtColor(imagem, cv2.COLOR_RGB2GRAY)
#nome da funcao utilizada
cv2.imshow("Imagem", imagem)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
########
# Copyright (c) 2018 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from unittest import TestCase
from cloudify import exceptions
from cloudify.workflows import tasks, tasks_graph
from cloudify_rest_client.operations import Operation, TasksGraph
class _MockCtx(object):
def __init__(self, storage):
self._storage = storage
self.execution_token = 'mock_token'
def _get_current_object(self):
return self
def store_tasks_graph(self, name, operations):
self._storage['name'] = name
self._storage['operations'] = [Operation(op) for op in operations]
return {'id': 'abc'}
def get_operations(self, graph_id):
return self._storage['operations']
def _make_remote_task(kwargs=None):
kwargs = kwargs or {'a': 1}
kwargs['__cloudify_context'] = {'task_name': 'x'}
return tasks.RemoteWorkflowTask(
kwargs=kwargs,
cloudify_context=kwargs['__cloudify_context'],
workflow_context=None,
info={'info': 'info'}
)
def _on_success_func(tsk):
pass
class _OnFailureHandler(object):
def __init__(self, value):
self.value = value
def dump(self):
return {
'value': self.value
}
def __call__(self):
return self.value
class TestSerialize(TestCase):
def test_task_serialize(self):
task = _make_remote_task()
task._state = tasks.TASK_SENT
serialized = Operation(task.dump())
deserialized = tasks.RemoteWorkflowTask.restore(
ctx=_MockCtx({}),
graph=None,
task_descr=serialized)
for attr_name in ['id', 'info', 'total_retries', 'retry_interval',
'_state', 'current_retries']:
self.assertEqual(getattr(task, attr_name),
getattr(deserialized, attr_name))
task_ctx = task._cloudify_context
deserialized_task_ctx = deserialized._cloudify_context
# when deserializing, execution_token is added from the workflow ctx
self.assertEqual(deserialized_task_ctx, {
'task_name': task_ctx['task_name'],
'execution_token': 'mock_token'
})
def test_handler_serialize_func(self):
task = _make_remote_task()
task.on_success = _on_success_func
serialized = Operation(task.dump())
deserialized = tasks.RemoteWorkflowTask.restore(
ctx=_MockCtx({}),
graph=None,
task_descr=serialized)
self.assertIs(task.on_success, deserialized.on_success)
def test_handler_serialize_class(self):
task = _make_remote_task()
task.on_success = _OnFailureHandler(42)
serialized = Operation(task.dump())
deserialized = tasks.RemoteWorkflowTask.restore(
ctx=_MockCtx({}),
graph=None,
task_descr=serialized)
self.assertEqual(deserialized.on_success(), 42)
def test_handler_serialize_error(self):
task = _make_remote_task()
task.on_success = lambda tsk: None
self.assertRaises(
exceptions.NonRecoverableError,
task.dump
)
class TestGraphSerialize(TestCase):
def test_graph_serialize(self):
_stored = {}
task = _make_remote_task({'task': 1})
graph = tasks_graph.TaskDependencyGraph(_MockCtx(_stored))
graph.add_task(task)
self.assertIs(graph.id, None)
self.assertFalse(graph._stored)
graph.store(name='graph1')
self.assertIsNot(graph.id, None)
self.assertTrue(graph._stored)
self.assertEqual(_stored['name'], 'graph1')
self.assertEqual(len(_stored['operations']), 1)
def test_graph_dependencies(self):
_stored = {}
ctx = _MockCtx(_stored)
task1 = _make_remote_task({'task': 1})
task2 = _make_remote_task({'task': 2})
graph = tasks_graph.TaskDependencyGraph(ctx)
subgraph = graph.subgraph('sub1')
subgraph.add_task(task1)
subgraph.add_task(task2)
graph.add_dependency(task1, task2)
graph.store(name='graph1')
deserialized = tasks_graph.TaskDependencyGraph.restore(
ctx, TasksGraph({'id': graph.id}))
self.assertEqual(graph.id, deserialized.id)
deserialized_task1 = deserialized.get_task(task1.id)
deserialized_task2 = deserialized.get_task(task2.id)
deserialized_subgraph = deserialized.get_task(subgraph.id)
self.assertEqual(deserialized_task1.containing_subgraph.id,
deserialized_subgraph.id)
self.assertEqual(deserialized_task2.containing_subgraph.id,
deserialized_subgraph.id)
# this checks dependencies
self.assertEqual(deserialized._dependencies, graph._dependencies)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect, get_object_or_404
from django.template.context_processors import csrf
from django.contrib import auth, messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from .forms import LoginForm, RegistrationForm, EditProfileForm, DeletionForm, ChangePasswordForm
from news.models import Comment
from forum.models import Thread, Post
from store.models import Cart
from .models import User
# Create your views here.
# Register a new user.
def register(request):
if request.method == 'POST':
form = RegistrationForm(request.POST, request.FILES)
if form.is_valid():
form.save()
user = auth.authenticate(username=request.POST.get('username'),
password=request.POST.get('password1'))
if user:
messages.success(request, 'Your registration was successful!')
auth.login(request, user)
return redirect(request.GET.get('next') or reverse('user_profile'))
else:
messages.error(request, 'Sorry, we were unable to register your account. Please try again.')
else:
form = RegistrationForm()
# The page is neither an archive page nor a team page.
archive = False
team = False
args = {
'form': form,
'button_text': 'Register',
'archive': archive,
'team': team
}
args.update(csrf(request))
return render(request, 'user_details.html', args)
# Log a user in to the site.
def login(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
user = auth.authenticate(username=request.POST.get('username'),
password=request.POST.get('password'))
if user is not None:
auth.login(request, user)
messages.success(request, "You have successfully logged in!")
return redirect(request.GET.get('next') or reverse('user_profile'))
else:
messages.error(request, "Your username or password was not recognised. Please try again.")
else:
form = LoginForm()
# The page is neither an archive page nor a team page.
archive = False
team = False
args = {'form': form,
'archive': archive,
'team': team
}
args.update(csrf(request))
return render(request, 'login.html', args)
# Log a user out from the site.
def logout(request):
auth.logout(request)
messages.success(request, 'You have successfully logged out.')
return redirect(reverse('login'))
# Logged in users can view their own profile.
@login_required(login_url='/login/')
def user_profile(request):
user = request.user
comments = Comment.objects.filter(user_id=user.id)
posts = Post.objects.filter(user_id=user.id)
threads = Thread.objects.filter(user_id=user.id)
orders = Cart.objects.filter(user_id=user.id, status__in=['Received', 'Dispatched'])
subscription_plan = user.subscription_plan
contributions = []
for comment in comments:
contributions.append(comment)
for post in posts:
contributions.append(post)
# The page is neither an archive page nor a team page.
archive = False
team = False
return render(request, 'profile.html', {'comments': comments, 'threads': threads,
'posts': posts, 'contributions': contributions,
'profile_user': user, 'orders': orders,
'subscription_plan': subscription_plan,
'archive': archive, 'team': team})
# Logged in users can also view the profiles of others.
@login_required(login_url='/login/')
def other_profile(request, user_id):
user = request.user
profile_user = get_object_or_404(User, pk=user_id)
comments = Comment.objects.filter(user_id=profile_user.id)
posts = Post.objects.filter(user_id=profile_user.id)
threads = Thread.objects.filter(user_id=profile_user.id)
contributions = []
# The page is neither an archive page nor a team page.
archive = False
team = False
for comment in comments:
contributions.append(comment)
for post in posts:
contributions.append(post)
# If a user enters the url with their own profile_id,
# redirect to the user_profile view as different data will be shown.
if user == profile_user:
return redirect(reverse('user_profile'))
else:
return render(request, 'profile.html', {'comments': comments, 'threads': threads,
'posts': posts, 'contributions': contributions,
'profile_user': profile_user,
'archive': archive, 'team': team})
# Logged in users can edit their own profile.
@login_required(login_url='/login/')
def edit_profile(request):
user = request.user
if request.method == 'POST':
form = EditProfileForm(request.POST, request.FILES, instance=user)
if form.is_valid():
form.save()
messages.success(request, 'Your profile has been updated.')
return redirect(reverse('user_profile'))
else:
messages.error(request, 'Sorry, we were unable to update your details. Please try again.')
else:
form = EditProfileForm(instance=user)
# The page is neither an archive page nor a team page.
archive = False
team = False
args = {
'form': form,
'button_text': 'Update Profile',
'archive': archive,
'team': team
}
args.update(csrf(request))
return render(request, 'user_details.html', args)
# Logged in users can also delete their own profile.
@login_required(login_url='/login/')
def delete_profile(request):
user = request.user
if request.method == 'POST':
form = DeletionForm(request.POST)
if form.is_valid():
user_to_delete = auth.authenticate(username=user.username,
password=request.POST.get('password'))
if user_to_delete is not None:
user_to_delete.delete()
messages.success(request, 'Your profile has been deleted.')
return redirect(reverse('login'))
else:
messages.error(request, 'Your password was not recognised. Please try again.')
else:
form = DeletionForm()
# The page is neither an archive page nor a team page.
archive = False
team = False
args = {
'form': form,
'button_text': 'Delete Account',
'archive': archive,
'team': team
}
args.update(csrf(request))
return render(request, 'delete_profile.html', args)
# Once logged in, a user can change their password.
@login_required(login_url='/login/')
def change_password(request):
user = request.user
if request.method == 'POST':
form = ChangePasswordForm(request.POST)
if form.is_valid():
user_to_change = auth.authenticate(username=user.username,
password=request.POST.get('password'))
if user_to_change is not None:
user.set_password(request.POST.get('password1'))
user.save()
auth.login(request, user)
messages.success(request, 'Your password has been changed.')
return redirect(reverse('user_profile'))
else:
messages.error(request, 'Sorry, we were unable to change your password. Please try again.')
else:
form = ChangePasswordForm()
# The page is neither an archive page nor a team page.
archive = False
team = False
args = {
'form': form,
'button_text': 'Change Password',
'archive': archive,
'team': team
}
args.update(csrf(request))
return render(request, 'change_password.html', args)
|
import sys
sys.path.append('/anaconda3/lib/python3.7/site-packages')
import networkx as nx
import tree_operations_v1 as tree_operations
from networkx.drawing.nx_agraph import graphviz_layout
import matplotlib.pyplot as plt
import inits_v1 as inits
import os
import utiles
import seaborn as sns
import numpy as np
def draw_garph(H, S, G, deleted_nodes, big_size, small_size):
print('Drawing hypergraph...')
labels1 = nx.get_node_attributes(H, 's')
labels2 = nx.get_node_attributes(H, 't')
labels3 = nx.get_node_attributes(H, 'l')
labels4 = nx.get_edge_attributes(H,'source')
labels5 = nx.get_edge_attributes(H,'target')
#labels6 = nx.get_node_attributes(H, 'red_colors')
#labels7 = nx.get_node_attributes(H, 'black_colors')
new_labels3 = {}
for k,match in labels3.items():
l = ""
for to_seperate in match:
l = l+str(to_seperate)+'\n'
new_labels3.update({k:l})
for k,l in labels2.items():
for x in S.postorder_node_iter():
if x.taxon != None:
if x.label == l:
l = l+" ("+str(x.taxon)+")"
labels2.update({k:l})
for k,l in labels1.items():
for x in G.postorder_node_iter():
if x.taxon != None:
if x.label == l:
l = l+" ("+str(x.taxon)+")"
labels1.update({k:l})
nodes_labels = {}
for i in range(0,len(labels1)+len(deleted_nodes)):
if not i in deleted_nodes:
nodes_labels.update({i:("node_number: "+str(i)+"\ns: "+str(labels1[i])+"\nt: "+str(labels2[i])+"\n"+str(new_labels3[i])+"\nred color:")})
edges_labels = {}
for e in H.edges(keys=True):
edges_labels.update({e:("source: "+str(labels4[e])+"\ntarget: "+str(labels5[e]))})
sizes = []
for nd in H.node(data=True):
if((nd[1]['s']==G.seed_node.label and nd[1]['t']==S.seed_node.label)):
sizes = sizes+[big_size]
else:
sizes = sizes+[small_size]
pos = graphviz_layout(H, prog='dot', args="-Grankdir=BT")
plt.figure(12,figsize=(20,20)) #size of fig
nx.draw(H, pos, arrows=True,node_size=sizes)
nx.draw_networkx_labels(H, pos,nodes_labels, font_size=7)
nx.draw_networkx_edges(H, pos, alpha = 0.1, width = 2, edge_color='b')
nx.draw_networkx_edge_labels(H, pos,font_size=6,labels = edges_labels)
plt.savefig('hyperGraph.png')
print('Finished drawing hypergraph.\n')
def draw_new_G(G,G_nodes_identified,colors,sigma,new_G):
print('Drawing new G...')
plt.clf()
tree_to_draw = nx.DiGraph()
index = 1
for u in G.postorder_node_iter():
tree_to_draw.add_node(index, label=u.label)
if not tree_operations.is_a_leaf(u):
child = u.child_nodes()
i = 0
while i < len(child):
if len(child) >= i + 1:
tree_to_draw.add_edge(index, list(G.postorder_node_iter()).index(child[i]) + 1)
i = i + 1
index += 1
labels1 = nx.get_node_attributes(new_G, 'label')
pos1 = graphviz_layout(tree_to_draw, prog='dot')
plt.figure(12, figsize=(40, 40)) # size of fig
print("G_nodes_identified = %s" % str(G_nodes_identified))
nodes_color = []
nodes_size = []
for nd in new_G.nodes(data = True):
if new_G.out_degree(nd[0]) == 0 and not new_G.in_degree(nd[0]) == 0:
if colors[sigma[nd[1]['label']]] == 'red':
nodes_color.append('red')
else:
nodes_color.append('black')
nodes_size.append(200)
elif G_nodes_identified[nd[1]['label']] > 0:
nodes_color.append('blue')
nodes_size.append(G_nodes_identified[nd[1]['label']] * 350)
else:
nodes_color.append('white')
nodes_size.append(200)
for r,l in labels1.items():
for x in G.postorder_node_iter():
if x.taxon != None:
if x.label == l:
l = l+"\n ("+str(x.taxon)+")"
labels1.update({r:l})
#edges_width = []
edges_color = []
for e in new_G.edges(data=True):
if e[2]['weight'][0] == 0 and e[2]['weight'][1] == 0:
edges_color.append('grey')
#edges_width.append(1)
elif e[2]['weight'][0] > e[2]['weight'][1]: #more red HT
#edges_width.append(e[2]['weight'][0]*10)
edges_color.append('red')
else: #more black HT
#edges_width.append(e[2]['weight'][1]*10)
edges_color.append('black')
#edges_width = utile.normlize (edges_width,20)
print("len(nodes_size) = %s, len(nodes_color) = %s" % (len(nodes_size),len(nodes_color)))
nx.draw(tree_to_draw, pos1, arrows=True,node_size = nodes_size, node_color=nodes_color,edge_color = edges_color, width = 1)
nx.draw_networkx_labels(tree_to_draw, pos1, labels1, font_size=7,)
#nx.draw_networkx_edges(tree_to_draw, pos1, )
#nx.draw_networkx_edge_labels(tree_to_draw, pos1, font_size=6, labels=lables2)
plt.savefig('new_G.png')
print('Finished drawing new G.\n')
def draw_tree(tree, name, old_sigma, colors, sigma,path,color_tree,x_axis,y_axis,label_flag):
tree_to_draw = nx.DiGraph()
index = 1
nodes_color = []
for u in tree.postorder_node_iter():
tree_to_draw.add_node(index, label=u.label)
if not tree_operations.is_a_leaf(u):
child = u.child_nodes()
i = 0
while i < len(child):
if len(child) >= i+1:
tree_to_draw.add_edge(index, list(tree.postorder_node_iter()).index(child[i]) + 1)
i = i+1
index += 1
labels = nx.get_node_attributes(tree_to_draw, 'label')
for k,l in labels.items():
for x in tree.postorder_node_iter():
if x.taxon != None:
if x.label == l:
l = l+"\n ("+str(x.taxon)+")"
if name == 'G':
l = l+'\n'+str(old_sigma[x.taxon.label])
labels.update({k:l})
else:
labels.update({k: l})
if name == 'S' :
for u in tree.postorder_node_iter():
if color_tree:
if u.label != None and u.label in colors:
if colors[u.label] == 'red':
nodes_color.append('red')
elif colors[u.label] == 'black':
nodes_color.append('grey')
else :
nodes_color.append('pink')
else:
nodes_color.append('white')
else:
nodes_color.append('white')
else:
for u in tree.postorder_node_iter():
if tree_operations.is_a_leaf(u) and not tree_operations.isolated(u) and sigma[u.label] in colors:
if colors[sigma[u.label]] == 'red':
nodes_color.append('red')
else:
nodes_color.append('grey')
else:
nodes_color.append('white')
postree = graphviz_layout(tree_to_draw, prog='dot')
plt.figure(12, figsize=(x_axis, y_axis)) # size of fig
nx.draw(tree_to_draw, postree, arrows=True,node_color=nodes_color)
if label_flag:
text = nx.draw_networkx_labels(tree_to_draw, postree, labels, font_size=7)
for _, t in text.items():
t.set_rotation('vertical')
nx.draw_networkx_edges(tree_to_draw, postree)
plt.savefig(path+name+'.png')
print('Drawing'+name)
def draw_S_and_G(S,G, old_sigma, colors, sigma,path,sol,ext, to_color):
plt.clf()
S_to_draw = nx.DiGraph()
G_to_draw = nx.DiGraph()
index = 1
nodes_color_S = []
nodes_color_G = []
##FOR S
for u in S.postorder_node_iter():
S_to_draw.add_node(index, label=u.label)
if not tree_operations.is_a_leaf(u):
child = u.child_nodes()
i = 0
while i < len(child):
if len(child) >= i+1:
S_to_draw.add_edge(index, list(S.postorder_node_iter()).index(child[i]) + 1)
i = i+1
index += 1
labels_S = nx.get_node_attributes(S_to_draw, 'label')
for k,l in labels_S.items():
for x in S.postorder_node_iter():
if x.taxon != None:
if x.label == l:
l = l+"\n ("+str(x.taxon)+")"
labels_S.update({k: l})
for u in S.postorder_node_iter():
if u.label != None and u.label in colors and to_color:
if colors[u.label] == 'red':
nodes_color_S.append('red')
elif colors[u.label] == 'black':
nodes_color_S.append('grey')
else :
nodes_color_S.append('pink')
else:
nodes_color_S.append('white')
## FOR G
index = 1
for u in G.postorder_node_iter():
G_to_draw.add_node(index, label=u.label)
if not tree_operations.is_a_leaf(u):
child = u.child_nodes()
i = 0
while i < len(child):
if len(child) >= i+1:
G_to_draw.add_edge(index, list(G.postorder_node_iter()).index(child[i]) + 1)
i = i+1
index += 1
labels_G = nx.get_node_attributes(G_to_draw, 'label')
for k,l in labels_G.items():
for x in G.postorder_node_iter():
if x.taxon != None:
if x.label == l and (x.taxon.label in old_sigma):
l = l+"\n ("+str(x.taxon)+")"
l = l+'\n'+str(old_sigma[x.taxon.label])
labels_G.update({k:l})
for u in G.postorder_node_iter():
degel = False
if sol != None:
for int, temp_sol in sol.items():
for p in range (0,len(temp_sol['list_of_couples'])):
if (u.label == temp_sol['Marked'] or u.label == temp_sol['list_of_couples'][p][0] or u.label == temp_sol['list_of_couples'][p][1]) and not degel:
nodes_color_G.append('blue')
degel = True
if not degel and tree_operations.is_a_leaf(u) and u.label in sigma and not tree_operations.isolated(u) and sigma[u.label] in colors and to_color:
if colors[sigma[u.label]] == 'red':
nodes_color_G.append('red')
elif colors[sigma[u.label]] == 'black':
nodes_color_G.append('grey')
else:
nodes_color_G.append('pink')
elif not degel:
nodes_color_G.append('white')
postree_S = graphviz_layout(S_to_draw, prog='dot')
postree_G = graphviz_layout(G_to_draw, prog='dot')
for k, v in postree_G.items():
# Shift the x values of every node by 10 to the right
lst = list(v)
lst[0] = lst[0] + 100
postree_G.update({k:tuple(lst)})
fig, axes = plt.subplots(1,2,figsize=(90, 50))
ax = axes.flatten()
ax[0].set_title('Species tree',fontsize=50,rotation='vertical',x=-0.1,y=0.5)
ax[1].set_title('Gene tree',fontsize=50,rotation='vertical',x=-0.1,y=0.5)
nx.draw(S_to_draw, postree_S, arrows=True,node_color=nodes_color_S,ax=ax[0])
nx.draw(G_to_draw, postree_G, arrows=True,node_color=nodes_color_G,ax=ax[1])
t1 = nx.draw_networkx_labels(S_to_draw, postree_S, labels_S, font_size=7,ax=ax[0])
t2 = nx.draw_networkx_labels(G_to_draw, postree_G, labels_G, font_size=7,ax=ax[1])
for _, t in t1.items():
t.set_rotation('vertical')
for _, t in t2.items():
t.set_rotation('vertical')
nx.draw_networkx_edges(S_to_draw, postree_S,ax=ax[0])
nx.draw_networkx_edges(G_to_draw, postree_G,ax=ax[1])
to_create = path + '/figures/'
os.makedirs(os.path.dirname(to_create), exist_ok=True)
plt.savefig(path + '/figures/S+G'+ext+'.png')
def draw_G_diffrent_optimal_solutions(marked_nodes, colors, sigma, old_sigma, new_G, G, k, path, both, alpha, labels, TH_compare_subtrees, TH_both, TH_pattern_in_subtree,compare_subtrees,evolutinary_event,pattern,iterations, factor,size):
print('Drawing new G...')
plt.clf()
plt.figure(figsize=(80, 40))
tree_to_draw = nx.DiGraph()
index = 1
for u in G.postorder_node_iter():
tree_to_draw.add_node(index, label=u.label)
if not tree_operations.is_a_leaf(u):
child = u.child_nodes()
i = 0
while i < len(child):
if len(child) >= i + 1:
tree_to_draw.add_edge(index, list(G.postorder_node_iter()).index(child[i]) + 1)
i = i + 1
index += 1
labels1 = nx.get_node_attributes(new_G[0], 'label')
pos1 = graphviz_layout(tree_to_draw, prog='dot')
marked_counter = inits.init_dic(G.nodes(),0)
max_counts = 0
i = 1
for solution in marked_nodes:
print('solution number '+str(i)+":")
for item in solution:
marked_node = item[0]
scoring = item[1]
temp_high_score = 0
for j in range(0,2):
for m in range(0,2):
if temp_high_score < scoring[j][m]:
temp_high_score = scoring[j][m]
print(' %s with score: %s' % (str(marked_node),str(temp_high_score)))
marked_counter[marked_node] += temp_high_score
if marked_counter[marked_node] > max_counts:
max_counts = marked_counter[marked_node]
i += factor
for u,counter in marked_counter.items():
if max_counts>0:
marked_counter.update({u:(counter/max_counts)*size})
#print('max_counter = %s, marked_counter = %s' % (str(max_counts),str(marked_counter)))
nodes_color = []
nodes_size = []
for nd in new_G[0].nodes(data=True):
if new_G[0].out_degree(nd[0]) == 0 and not new_G[0].in_degree(nd[0]) == 0:
if colors[sigma[nd[1]['label']]] == 'red':
nodes_color.append('red')
else:
nodes_color.append('grey')
nodes_size.append(200)
elif marked_counter[nd[1]['label']] > 0:
nodes_color.append('blue')
nodes_size.append(marked_counter[nd[1]['label']])
else:
nodes_color.append('white')
nodes_size.append(200)
if labels:
for r, l in labels1.items():
for x in G.postorder_node_iter():
if x.taxon != None:
if x.label == l:
l = l + "\n (" + str(x.taxon) + ")"
l = l + '\n' + str(old_sigma[x.taxon.label])
labels1.update({r: l})
nx.draw(tree_to_draw, pos1, arrows=True, node_size=nodes_size, node_color=nodes_color,
width=1)
nx.draw_networkx_labels(tree_to_draw, pos1, labels1, font_size=10)
plt.savefig(path + '/figures/G_different_optimal. k=' + str(k) + '_TH_compare_subtrees = ' + str(TH_compare_subtrees) + '_TH_pattern_in_subtree = ' + str(TH_pattern_in_subtree) +"_pattern="+pattern+"_"+evolutinary_event+"compare_subtrees="+str(compare_subtrees)+'.png')
print('Finished drawing new G.\n')
def connectpoints(x,y,p1,p2,color):
x1, x2 = x[p1], x[p2]
y1, y2 = y[p1], y[p2]
plt.plot([x1,x2],[y1,y2],color,linewidth=4)
def draw_plot(all_vertices_with_noise,path,marked_vertex):
print('Drawing plot..')
plt.clf()
plt.figure(12, figsize=(20, 20)) # size of fig
list_to_draw_reds = []
list_to_draw_blacks = []
length = []
names = []
to_connect = []
length_to_connect = []
max = 0
max_noise_level = 0
for noise_level, all_vertices in all_vertices_with_noise.items():
for u, couple in all_vertices.items():
names.append(u)
length.append(noise_level)
list_to_draw_reds.append(couple[0])
list_to_draw_blacks.append(couple[1])
if max < couple[0]:
max = couple[0]
if max < couple[1]:
max = couple[1]
if max_noise_level < noise_level:
max_noise_level = noise_level
if u in marked_vertex:
to_connect.append(couple[1])
length_to_connect.append(noise_level)
fig, ax = plt.subplots()
ax.plot(length, list_to_draw_reds, 'ro', length, list_to_draw_blacks, 'ro')
for X, Y, Z in zip(length, list_to_draw_reds, names):
# Annotate the points 5 _points_ above and to the left of the vertex
ax.annotate('{}'.format(Z), xy=(X, Y), xytext=(5, -5), ha='left',
textcoords='offset points')
for X, Y, Z in zip(length, list_to_draw_blacks, names):
# Annotate the points 5 _points_ above and to the left of the vertex
ax.annotate('{}'.format(Z), xy=(X, Y), xytext=(5, -5), ha='left',
textcoords='offset points')
sns.set(font_scale=1.4)
plt.xlabel('Noise', fontsize=10)
plt.ylabel('Score', fontsize=10)
for p in range(0, len(length_to_connect) - 1):
connectpoints(length_to_connect, to_connect, p, p + 1)
plt.axis([0, max_noise_level + 5, 0, max + 0.0001])
plt.savefig(path + '/figures/plot_noise.png')
def draw_compare_k_plot(all_vertices_with_k,path):
print('Drawing plot..')
plt.clf()
sns.set(font_scale=1.6)
plt.figure(figsize=(25, 5)) # size of fig
l = utiles.compare_dict_entries(all_vertices_with_k)
print(l)
to_display = []
xtick = []
ytick = []
first = True
for k, all_vertices_for_k in l.items():
ytick.append(k)
to_display_temp = []
for vertex, couple in all_vertices_for_k.items():
to_display_temp.append(max(couple))
if first:
xtick.append(vertex)
first = False
to_display = to_display + [to_display_temp]
print(to_display)
ax = sns.heatmap(utiles.flip_list(to_display,len(xtick)), yticklabels=xtick, xticklabels=ytick, linewidth=0.5)
plt.yticks(rotation=0)
plt.xticks(rotation=0)
fig = ax.get_figure()
fig.savefig(path + '/figures/plot_noise.png')
def get_text_positions(text, x_data, y_data, txt_width, txt_height):
a = list(zip(y_data, x_data))
text_positions = list(y_data)
for index, (y, x) in enumerate(a):
local_text_positions = [i for i in a if i[0] > (y - txt_height)
and (abs(i[1] - x) < txt_width * 2) and i != (y,x)]
if local_text_positions:
sorted_ltp = sorted(local_text_positions)
if abs(sorted_ltp[0][0] - y) < txt_height: #True == collision
differ = np.diff(sorted_ltp, axis=0)
a[index] = (sorted_ltp[-1][0] + txt_height, a[index][1])
text_positions[index] = sorted_ltp[-1][0] + txt_height*1.01
for k, (j, m) in enumerate(differ):
#j is the vertical distance between words
if j > txt_height * 2: #if True then room to fit a word in
a[index] = (sorted_ltp[k][0] + txt_height, a[index][1])
text_positions[index] = sorted_ltp[k][0] + txt_height
break
return text_positions
def text_plotter(text, x_data, y_data, text_positions, txt_width,txt_height):
for z,x,y,t in zip(text, x_data, y_data, text_positions):
plt.annotate(str(z), xy=(x-txt_width/2, t), size=12)
if y != t:
plt.arrow(x, t,0,y-t, color='red',alpha=0.3, width=txt_width*0.1,
head_width=txt_width, head_length=txt_height*0.5,
zorder=0,length_includes_head=True) |
# Generated by Django 3.2.5 on 2021-08-16 17:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Doctors',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('doctor_name', models.CharField(max_length=120)),
('speciality', models.CharField(max_length=250)),
('image', models.ImageField(upload_to='images')),
('specialist', models.CharField(max_length=120)),
('about', models.CharField(max_length=500)),
('start_time', models.PositiveIntegerField(default=10)),
('end_time', models.PositiveIntegerField(default=10)),
('availability', models.CharField(max_length=120)),
],
),
]
|
"""
Given a min-heap 'pq' containing N element, return whether or not the value 'val' is lower or equal than
the Kth value of the heap
"""
from typing import List
"""
The simple approach is to pop K value and check => This approach takes O(K log N)
- This is the only approach available if you need the value of the Kth element of the heap
- But we do not need this value for the problem
The next approach is to do a DFS in the heap and count the number of elements lower than 'val'
Since it is a min-heap, we can avoid going down a heap node if the value is higher than 'val' (all sub-nodes are bigger)
"""
def is_lower_than_kth_element(heap: List[int], val: int):
pass # TODO
|
#!/usr/bin/python3
"""task 0
"""
import requests
def number_of_subscribers(subreddit):
url = "https://www.reddit.com/r/" + subreddit + "/about.json"
head = {"User-Agent": "linux: didierrevelo:v1.0.0 by /u/didierrevelo"}
reddit_req = requests.get(url, headers=head).json().get("data")
if reddit_req is None:
return 0
return reddit_req.get("subscribers")
|
# Generated by Django 3.2.8 on 2021-10-17 22:00
from django.db import migrations
def refactor_suggestion(apps, schema_editor):
suggestions = apps.get_model('dashboard', 'ProblemSuggestion')
for s in suggestions.objects.all().select_related('student__user'):
s.user = s.student.user
s.save()
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0059_problemsuggestion_user'),
]
operations = [migrations.RunPython(refactor_suggestion, migrations.RunPython.noop)]
|
import numpy as np
from sklearn.metrics import (
accuracy_score,
f1_score,
precision_score,
recall_score)
import warnings
warnings.filterwarnings('ignore')
@property
def my_acc_auc(outputs, targets):
"""Computes the accuracy and auc score for multiple binary predictions"""
y_true = targets.cpu().detach().numpy().squeeze()
y_pred = outputs.cpu().detach().numpy().squeeze()
y_pred = np.where(y_pred>=0,1,y_pred) # if larger than 0 then convert to 1 (True)
y_pred = np.where(y_pred<0,0,y_pred) # if smaller than 0 then convert to 0 (False)
acc = accuracy_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred)
precision = precision_score(y_true, y_pred)
recall= recall_score(y_true, y_pred)
return acc, f1, precision, recall
|
from IPython.display import clear_output
def display_board(board_list): #for displaying the game board, corresponding positions are numpad positions of keypad
print(board_list[7]+' | '+ board_list[8]+' | '+ board_list[9])
print('---|-----|---')
print(board_list[4]+' | '+ board_list[5]+' | '+ board_list[6])
print('---|-----|---')
print(board_list[1]+' | '+ board_list[2]+' | '+ board_list[3])
def player_select():
from random import randint
if randint(0,1) == 0:
return 'player1'
else:
return 'player2'
def player_input(): #for assigning 2 players with markers 'X' or 'O'
marker = ' '
while marker not in ['X','O']:
marker = input('Player 1: please choose between X or O: ').upper()
player1 = marker
if player1 == 'X':
player2 = 'O'
else:
player2 = 'X'
print(f'player 1 has selected {player1} and player 2 has selected {player2}')
return (player1,player2)
def select_position():
position = 'dadadaffafa'
while position not in range(1,10):
position = input ('choose among positions 1,2,3,4,5,6,7,8,9: ')
if position.isdigit() and int(position) in range(1,10):
return int(position)
else:
print('Wrong selection! please select number among 1,2,3,4,5,6,7,8,9')
continue
def place_marker_on_board(board_list, marker, position): #update board with player input
board_list[position] = marker
return board_list
def win_check(board_list,marker): #check horizontal, vertical and diagnal positions of the board for win
if((board_list[1] == marker and board_list[2] == marker and board_list[3] == marker) or
(board_list[4] == marker and board_list[5] == marker and board_list[6] == marker) or
(board_list[7] == marker and board_list[8] == marker and board_list[9] == marker) or
(board_list[1] == marker and board_list[4] == marker and board_list[7] == marker) or
(board_list[2] == marker and board_list[5] == marker and board_list[8] == marker) or
(board_list[3] == marker and board_list[6] == marker and board_list[9] == marker) or
(board_list[1] == marker and board_list[5] == marker and board_list[9] == marker) or
(board_list[3] == marker and board_list[5] == marker and board_list[7] == marker)):
return True
else:
return False
#def fullboard_check(board_list):
# for index,value in enumerate(board_list):
# if board_list[index] != ' ':
# return True
# else:
# return False
def position_check(board_list,position):
if board_list[position] == ' ':
return True
else:
return False
def fullboard_check(board_list):
for position in range(1,10):
if position_check(board_list, position)== True:
return False
return True
def replay():
game_on = 'adaaefefcf'
while game_on not in ['Y','y','N','n']:
game_on = input('do you want to play again? Y or N: ')
if game_on in ['Y','y']:
return True
elif game_on in ['N','n']:
return False
else:
print('select Y or N')
continue
while True:
board_list = ['*',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']
display_board(board_list)
player1_marker,player2_marker = player_input()
turn = player_select()
print(f'{turn} has the first go')
play_game = input('Ready to play? Y or N')
if play_game.lower() == 'y':
game_on = True
elif play_game.lower() == 'n':
game_on = False
print('Run later')
break
while game_on == True:
if turn == 'player1':
display_board(board_list)
position = select_position()
place_marker_on_board(board_list,player1_marker,position)
if win_check(board_list,player1_marker) == True:
display_board(board_list)
print('Player 1 has won')
game_on = False
else:
if fullboard_check(board_list) == True:
display_board(board_list)
print('There is a draw!')
break
else:
turn = 'player2'
else:
display_board(board_list)
position = select_position()
place_marker_on_board(board_list, player2_marker, position)
if win_check(board_list, player2_marker) == True:
display_board(board_list)
print('Player 2 has won')
game_on = False
else:
if fullboard_check(board_list) == True:
display_board(board_list)
print('There is a draw!')
break
else:
turn = 'player1'
if replay() == True:
game_on = True
else:
break |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetBackupScheduleGroupResult',
'AwaitableGetBackupScheduleGroupResult',
'get_backup_schedule_group',
'get_backup_schedule_group_output',
]
@pulumi.output_type
class GetBackupScheduleGroupResult:
"""
The Backup Schedule Group
"""
def __init__(__self__, id=None, name=None, start_time=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if start_time and not isinstance(start_time, dict):
raise TypeError("Expected argument 'start_time' to be a dict")
pulumi.set(__self__, "start_time", start_time)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> 'outputs.TimeResponse':
"""
The start time. When this field is specified we will generate Default GrandFather Father Son Backup Schedules.
"""
return pulumi.get(self, "start_time")
@property
@pulumi.getter
def type(self) -> str:
"""
The type.
"""
return pulumi.get(self, "type")
class AwaitableGetBackupScheduleGroupResult(GetBackupScheduleGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBackupScheduleGroupResult(
id=self.id,
name=self.name,
start_time=self.start_time,
type=self.type)
def get_backup_schedule_group(device_name: Optional[str] = None,
manager_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
schedule_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBackupScheduleGroupResult:
"""
The Backup Schedule Group
:param str device_name: The name of the device.
:param str manager_name: The manager name
:param str resource_group_name: The resource group name
:param str schedule_group_name: The name of the schedule group.
"""
__args__ = dict()
__args__['deviceName'] = device_name
__args__['managerName'] = manager_name
__args__['resourceGroupName'] = resource_group_name
__args__['scheduleGroupName'] = schedule_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:storsimple/v20161001:getBackupScheduleGroup', __args__, opts=opts, typ=GetBackupScheduleGroupResult).value
return AwaitableGetBackupScheduleGroupResult(
id=__ret__.id,
name=__ret__.name,
start_time=__ret__.start_time,
type=__ret__.type)
@_utilities.lift_output_func(get_backup_schedule_group)
def get_backup_schedule_group_output(device_name: Optional[pulumi.Input[str]] = None,
manager_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
schedule_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBackupScheduleGroupResult]:
"""
The Backup Schedule Group
:param str device_name: The name of the device.
:param str manager_name: The manager name
:param str resource_group_name: The resource group name
:param str schedule_group_name: The name of the schedule group.
"""
...
|
from django import forms
from .models import Products
class ProductsForm(forms.Form):
class meta:
model = Products
fields = ('items')
|
import RestAPI
import BASE_URL
import json
import getcode
import time
main_server = "https://api.blood.land/mining/servers/9"
user_id = "" #miningpool_ID
user_pw = "" #miningpool_PW
secret_key_1 = BASE_URL.secret_key_1
token = RestAPI.getToken(user_id, user_pw)
print(token)
print(BASE_URL.miningurl)
while True:
getInfo = RestAPI.getInfo(token)
reportInterval = json.loads(getInfo)['reportInterval']
timestamp = json.loads(getInfo)['timestamp']
secret_key_2 = json.loads(getInfo)['secret']
mo = json.loads(getInfo)['mo']
mx = json.loads(getInfo)['mx']
mainwallet = json.loads(getInfo)['user']['mainWalletAddress']
point = json.loads(getInfo)['user']['point']
print("unix time", timestamp, "secret_key_2", secret_key_2)
#code_not_working
print(code)
report = RestAPI.report(token, code)
print(report)
reportsec = int(reportInterval / 1000)
time.sleep(reportsec) |
import numpy as np
import pandas as pd
N_PERIOD = {
'monthly': 12,
'weekly': 52,
}
class LoanProduct:
'''Standard compounding loan product'''
def __init__(
self, loan, rate, num_years=30, loan_years=None,
payment_freq='monthly', annual_fee=0,
):
self._loan = loan
self._rate = rate
self._num_years = num_years
self._loan_years = loan_years if loan_years is not None else num_years
self._payment_freq = payment_freq
self._annual_fee = annual_fee
self._n_period = N_PERIOD[self._payment_freq]
self._n = self._n_period * self._num_years
self._r = self._rate / 100 / self._n_period
self.periodic_payment = self._calc_periodic_payment()
self.total_fees = self._calc_total_fees()
self.total_payment = self._calc_total_payment()
self.interest_payment = self._calc_interest()
self.payment_hist = self._calc_payment_hist()
def _calc_periodic_payment(self):
'''Calculate periodic payment given parameters; use:
discount_factor = {[(1 + r)^n] - 1} / [r(1 + r)^n]
'''
compound = (1 + self._r) ** self._n
discount_factor = (compound - 1) / self._r / compound
return self._loan / discount_factor
def _calc_total_fees(self):
'''Total fees paid over `loan_years`
'''
return self._annual_fee * self._loan_years
def _calc_total_payment(self):
'''Total payment over all `num_years`
'''
return self.periodic_payment * self._n + self.total_fees
def _calc_interest(self):
'''Total interest paid over all `num_years`
'''
return self.total_payment - self.total_fees - self._loan
def _calc_payment_hist(self) -> pd.DataFrame:
'''Historical payments by period (in years)
'''
n_loan = self._n_period * self._loan_years
df = pd.DataFrame(
index=np.arange(n_loan),
columns=[
'year',
'amount_paid',
'amount_owing',
'interest',
'interest_paid',
'fees_paid',
'offset_total',
'_periodic_payment',
'_periodic_fee',
],
dtype=float,
)
df['year'] = (df.index + 1) / self._n_period
df['_periodic_payment'] = self.periodic_payment
df['_periodic_fee'] = self._annual_fee / self._n_period
df['interest'] = self.interest_payment / self._n
df['amount_paid'] = df['_periodic_payment'].cumsum()
df['interest_paid'] = df['interest'].cumsum()
df['fees_paid'] = df['_periodic_fee'].cumsum()
df['amount_owing'] = self._loan - df['amount_paid']
df['offset_total'] = 0
return df.drop(columns=['_periodic_payment', '_periodic_fee'])
class LoanOffset(LoanProduct):
'''Loan with offset account / facility
Args:
offset_first_idx (int): period idx of first offset_periodic
contribution
'''
def __init__(
self, loan, rate,
offset0, offset_periodic, offset_first_idx=0,
num_years=30, loan_years=None,
payment_freq='monthly', annual_fee=0,
):
self._offset0 = offset0
self.offset_periodic = offset_periodic
self.offset_first_idx = offset_first_idx
super().__init__(
loan, rate, num_years, loan_years, payment_freq, annual_fee)
self.payment_hist = self._calc_payment_hist()
self.total_fees = self._calc_total_fees2()
def _calc_total_fees2(self):
return self._annual_fee * self.payment_hist['year'].iloc[-1]
def _calc_amount_owing(self, amount_owing, offset_total, idx=0) -> dict:
'''Recursive calculation of amount owing given fixed periodic offset
contribution
'''
if idx >= self.offset_first_idx:
offset_total += self.offset_periodic
interest = np.max([(amount_owing - offset_total) * self._r, 0])
amount_owing += (interest - self.periodic_payment)
if amount_owing <= 1e-5:
return {
'amount_owing': [amount_owing],
'interest': [interest],
'offset_total': [offset_total],
}
else:
tmp = self._calc_amount_owing(amount_owing, offset_total, idx + 1)
return {
'amount_owing': [amount_owing] + tmp['amount_owing'],
'interest': [interest] + tmp['interest'],
'offset_total': [offset_total] + tmp['offset_total'],
}
def _calc_payment_hist(self) -> pd.DataFrame:
'''Historical payments by period (in years)
'''
n_loan = self._n_period * self._loan_years
df = pd.DataFrame(
index=np.arange(n_loan),
columns=[
'year',
'amount_paid',
'amount_owing',
'interest',
'interest_paid',
'offset_total',
'fees_paid',
'_periodic_payment',
'_periodic_fee',
],
dtype=float,
)
df['year'] = (df.index + 1) / self._n_period
df['_periodic_payment'] = self.periodic_payment
df['_periodic_fee'] = self._annual_fee / self._n_period
cols = ['amount_owing', 'interest', 'offset_total']
df[cols] = pd.DataFrame(self._calc_amount_owing(
self._loan, offset_total=self._offset0)
)
df['amount_paid'] = df['_periodic_payment'].cumsum()
df['interest_paid'] = df['interest'].cumsum()
df['fees_paid'] = df['_periodic_fee'].cumsum()
return df.drop(columns=['_periodic_payment', '_periodic_fee']).dropna()
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
#API route: posts view
path("posts/compose", views.compose, name="compose"),
path("posts/<int:post_id>", views.post_info, name="post_info"),
path("posts/<str:postview>", views.posts, name="posts"),
#API route: account info
path("user/<str:username>", views.user_info, name="user_info")
]
|
import math
class SparseTable:
"""
構築 O(NlogN)、クエリ O(1)
"""
def __init__(self, values, fn):
"""
:param list values:
:param callable fn: 結合則を満たす冪等な関数。min、max など。add はだめ
"""
self._values = values
self._fn = fn
# SparseTable を構築
# self._table[i][p]: [i, i+2^p) に fn を適用した結果の値のインデックス
self._table = self._build(values, fn)
# self._msb[i]: 最上位ビット; どの p を見るべきか
self._msb = [0] * (len(values) + 1)
for i in range(2, len(values) + 1):
self._msb[i] = self._msb[i >> 1] + 1
@staticmethod
def _build(values, fn):
# AtCoder の PyPy 2.4.0 では math.log2 が使えない
size = int(math.log(len(values), 2)) + 1
st = [[0] * size for _ in range(len(values))]
for i in range(len(values)):
st[i][0] = i
for p in range(1, size):
for i in range(len(values)):
q = min(i + (1 << (p - 1)), len(values) - 1)
l = st[i][p - 1]
r = st[q][p - 1]
if values[l] == fn(values[l], values[r]):
st[i][p] = l
else:
st[i][p] = r
return st
def get(self, a, b):
"""
半開区間 [a, b) に fn を適用した結果
:param int a:
:param int b:
"""
if b <= a:
return None
p = self._msb[b - a]
return self._fn(
self._values[self._table[a][p]],
self._values[self._table[b - (1 << p)][p]]
)
|
from django.db import models
from Account.models import User,Seller,Buyer
from Sell.models import Product
# Create your models here.
class Order(models.Model):
seller = models.ForeignKey(Seller, related_name='order', on_delete=models.CASCADE)
buyer = models.ForeignKey(Buyer, related_name='order', on_delete=models.CASCADE)
product = models.ForeignKey(Product, related_name='order', on_delete=models.CASCADE)
type_choice = {
('placed','placed'),
('dispatched','dispatched'),
('confirmed','confirmed'),
('conplete','complete'),
}
status = models.CharField(max_length=50, null=True, choices=type_choice, default='placed')
quantity = models.IntegerField(null=True)
invoice = models.FileField(upload_to='docOrder/invoices/', blank=True, null=True)
dispatch_slip = models.FileField(upload_to='docOrder/dispatch/', blank=True, null=True)
payment_slip = models.FileField(upload_to='docOrder/payment/', blank=True, null=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.