text stringlengths 38 1.54M |
|---|
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
from localflavor.us.us_states import STATE_CHOICES
from localflavor.us.models import USStateField
class MoveType(models.Model):
temporary= 'Temporary'
permanent='Permanent'
individual='Individual'
family='Family'
buisness='Buisness'
own= 'Own'
rent='Rent'
movingtype_choice=((temporary, 'Temporary'),
(permanent, 'Permanent'))
moving_status_choices = (
(individual, 'Individual'),
(family, 'Family'),
(buisness,'Business'))
TITLE_CHOICES = (
('MR', 'Mr.'),
('MRS', 'Mrs.'),
('MS', 'Ms.'),)
place_choices=((own, 'Own'),
(rent, 'Rent'))
movingtype=models.CharField(max_length=100, choices=movingtype_choice, default=None)
movingdate=models.DateField(blank=True, null=True)
movingstatus= models.CharField(max_length=30, choices=moving_status_choices, default=None)
buisnessname=models.CharField(max_length=100, default=None, blank=True, null=True)
title=models.CharField(max_length=10, choices=TITLE_CHOICES,default=None)
first_name= models.CharField(max_length=100, default=None)
last_name=models.CharField(max_length=100, default=None)
phone_number= models.CharField(max_length=12,default=None)
exclusive_notification=models.BooleanField()
primary_email=models.EmailField(max_length=50)
secondary_email=models.EmailField(max_length=50, blank=True)
person_validate=models.BooleanField(default=None)
person_authorization= models.BooleanField(default=None)
own_rent=models.CharField(max_length=5, choices=place_choices, default=None)
old_street=models.CharField(max_length=100, default=None)
old_apt_suite=models.CharField(max_length=100,default=None, blank=True)
old_city=models.CharField(max_length=100, default=None)
old_state=USStateField(choices = STATE_CHOICES, default=None)
old_zip_code=models.CharField(max_length=5, default=None, blank=True)
new_street=models.CharField(max_length=100, default=None)
new_apt_suite=models.CharField(max_length=100,default=None, blank=True)
new_city=models.CharField(max_length=100, default=None)
new_state=USStateField(choices = STATE_CHOICES, default=None)
new_zip_code=models.CharField(max_length=5, default=None, blank=True)
def __unicode__(self):
return self.first_name
class Contact(models.Model):
subject = models.CharField(max_length=100)
message = models.TextField()
email = models.EmailField()
cc_myself = models.BooleanField()
|
# Chocolatey package version checking and packing of package
# Written by Hadrien Dussuel
import os
import subprocess
import json
import urllib.request
import hashlib
import zipfile
import pathlib
import shutil
import functions as func
# Routine de vérification et de paquetage automatique en fonction de la version connue sur le site de Chocolatey
packages = os.listdir('src/')
print(packages)
for p in packages:
print("Checking -> " + p)
# versionPublished = subprocess.getoutput('choco search -r --pre ' + p).split('|')[1]
versionLocalData = func.GetLocalData(p)
versionLocal = versionLocalData["version"]
if not os.path.exists("packed/" + p + "." + versionLocal + ".nupkg"):
print("Packing -> " + p + " | " + versionLocal)
# Create the temp directory
if not os.path.isdir("tmp/"):
os.mkdir("tmp")
# Calculate checksums
urllib.request.urlretrieve(versionLocalData["url"], "tmp/tmpfile") # DEBUG
checksum = func.GetFileChecksum("tmp/tmpfile")
# Loading source files
fileNuspec = func.GetFileContent("src/" + p + "/" + p + ".nuspec")
fileInstall = func.GetFileContent("src/" + p + "/tools/chocolateyinstall.ps1")
# Replace variables
fileNuspec = fileNuspec.replace("{{version}}", versionLocal)
fileInstall = fileInstall.replace("{{checksum}}", checksum).replace("{{url}}", versionLocalData["url"])
if not os.path.isdir("tmp/tools"):
os.mkdir("tmp/tools")
func.WriteFileContent("tmp/" + p + ".nuspec", fileNuspec)
func.WriteFileContent("tmp/tools/chocolateyinstall.ps1", fileInstall)
# Execute the packing
choco = subprocess.Popen(["choco", "pack"], cwd="tmp/")
choco.wait()
# Move the package to the packed directory
os.rename("tmp/" + p + "." + versionLocal + ".nupkg", "packed/" + p + "." + versionLocal + ".nupkg")
# Clean up temp directory
shutil.rmtree('tmp/')
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import uuid
from openstack.load_balancer.v2 import health_monitor
from openstack.load_balancer.v2 import l7_policy
from openstack.load_balancer.v2 import l7_rule
from openstack.load_balancer.v2 import listener
from openstack.load_balancer.v2 import load_balancer
from openstack.load_balancer.v2 import member
from openstack.load_balancer.v2 import pool
from openstack.tests.functional import base
from openstack.tests.functional.load_balancer import base as lb_base
@unittest.skipUnless(base.service_exists(service_type='load-balancer'),
'Load-balancer service does not exist')
class TestLoadBalancer(lb_base.BaseLBFunctionalTest):
HM_NAME = uuid.uuid4().hex
L7POLICY_NAME = uuid.uuid4().hex
LB_NAME = uuid.uuid4().hex
LISTENER_NAME = uuid.uuid4().hex
MEMBER_NAME = uuid.uuid4().hex
POOL_NAME = uuid.uuid4().hex
UPDATE_NAME = uuid.uuid4().hex
HM_ID = None
L7POLICY_ID = None
LB_ID = None
LISTENER_ID = None
MEMBER_ID = None
POOL_ID = None
VIP_SUBNET_ID = None
PROJECT_ID = None
PROTOCOL = 'HTTP'
PROTOCOL_PORT = 80
LB_ALGORITHM = 'ROUND_ROBIN'
MEMBER_ADDRESS = '192.0.2.16'
WEIGHT = 10
DELAY = 2
TIMEOUT = 1
MAX_RETRY = 3
HM_TYPE = 'HTTP'
ACTION = 'REDIRECT_TO_URL'
REDIRECT_URL = 'http://www.example.com'
COMPARE_TYPE = 'CONTAINS'
L7RULE_TYPE = 'HOST_NAME'
L7RULE_VALUE = 'example'
# Note: Creating load balancers can be slow on some hosts due to nova
# instance boot times (up to ten minutes) so we are consolidating
# all of our functional tests here to reduce test runtime.
@classmethod
def setUpClass(cls):
super(TestLoadBalancer, cls).setUpClass()
subnets = list(cls.conn.network.subnets())
cls.VIP_SUBNET_ID = subnets[0].id
cls.PROJECT_ID = cls.conn.session.get_project_id()
test_lb = cls.conn.load_balancer.create_load_balancer(
name=cls.LB_NAME, vip_subnet_id=cls.VIP_SUBNET_ID,
project_id=cls.PROJECT_ID)
assert isinstance(test_lb, load_balancer.LoadBalancer)
cls.assertIs(cls.LB_NAME, test_lb.name)
# Wait for the LB to go ACTIVE. On non-virtualization enabled hosts
# it can take nova up to ten minutes to boot a VM.
cls.lb_wait_for_status(test_lb, status='ACTIVE',
failures=['ERROR'], interval=1, wait=600)
cls.LB_ID = test_lb.id
test_listener = cls.conn.load_balancer.create_listener(
name=cls.LISTENER_NAME, protocol=cls.PROTOCOL,
protocol_port=cls.PROTOCOL_PORT, loadbalancer_id=cls.LB_ID)
assert isinstance(test_listener, listener.Listener)
cls.assertIs(cls.LISTENER_NAME, test_listener.name)
cls.LISTENER_ID = test_listener.id
cls.lb_wait_for_status(test_lb, status='ACTIVE',
failures=['ERROR'])
test_pool = cls.conn.load_balancer.create_pool(
name=cls.POOL_NAME, protocol=cls.PROTOCOL,
lb_algorithm=cls.LB_ALGORITHM, listener_id=cls.LISTENER_ID)
assert isinstance(test_pool, pool.Pool)
cls.assertIs(cls.POOL_NAME, test_pool.name)
cls.POOL_ID = test_pool.id
cls.lb_wait_for_status(test_lb, status='ACTIVE',
failures=['ERROR'])
test_member = cls.conn.load_balancer.create_member(
pool=cls.POOL_ID, name=cls.MEMBER_NAME, address=cls.MEMBER_ADDRESS,
protocol_port=cls.PROTOCOL_PORT, weight=cls.WEIGHT)
assert isinstance(test_member, member.Member)
cls.assertIs(cls.MEMBER_NAME, test_member.name)
cls.MEMBER_ID = test_member.id
cls.lb_wait_for_status(test_lb, status='ACTIVE',
failures=['ERROR'])
test_hm = cls.conn.load_balancer.create_health_monitor(
pool_id=cls.POOL_ID, name=cls.HM_NAME, delay=cls.DELAY,
timeout=cls.TIMEOUT, max_retries=cls.MAX_RETRY, type=cls.HM_TYPE)
assert isinstance(test_hm, health_monitor.HealthMonitor)
cls.assertIs(cls.HM_NAME, test_hm.name)
cls.HM_ID = test_hm.id
cls.lb_wait_for_status(test_lb, status='ACTIVE',
failures=['ERROR'])
test_l7policy = cls.conn.load_balancer.create_l7_policy(
listener_id=cls.LISTENER_ID, name=cls.L7POLICY_NAME,
action=cls.ACTION, redirect_url=cls.REDIRECT_URL)
assert isinstance(test_l7policy, l7_policy.L7Policy)
cls.assertIs(cls.L7POLICY_NAME, test_l7policy.name)
cls.L7POLICY_ID = test_l7policy.id
cls.lb_wait_for_status(test_lb, status='ACTIVE',
failures=['ERROR'])
test_l7rule = cls.conn.load_balancer.create_l7_rule(
l7_policy=cls.L7POLICY_ID, compare_type=cls.COMPARE_TYPE,
type=cls.L7RULE_TYPE, value=cls.L7RULE_VALUE)
assert isinstance(test_l7rule, l7_rule.L7Rule)
cls.assertIs(cls.COMPARE_TYPE, test_l7rule.compare_type)
cls.L7RULE_ID = test_l7rule.id
cls.lb_wait_for_status(test_lb, status='ACTIVE',
failures=['ERROR'])
@classmethod
def tearDownClass(cls):
test_lb = cls.conn.load_balancer.get_load_balancer(cls.LB_ID)
cls.lb_wait_for_status(test_lb, status='ACTIVE', failures=['ERROR'])
cls.conn.load_balancer.delete_l7_rule(
cls.L7RULE_ID, l7_policy=cls.L7POLICY_ID, ignore_missing=False)
cls.lb_wait_for_status(test_lb, status='ACTIVE', failures=['ERROR'])
cls.conn.load_balancer.delete_l7_policy(
cls.L7POLICY_ID, ignore_missing=False)
cls.lb_wait_for_status(test_lb, status='ACTIVE', failures=['ERROR'])
cls.conn.load_balancer.delete_health_monitor(
cls.HM_ID, ignore_missing=False)
cls.lb_wait_for_status(test_lb, status='ACTIVE', failures=['ERROR'])
cls.conn.load_balancer.delete_member(
cls.MEMBER_ID, cls.POOL_ID, ignore_missing=False)
cls.lb_wait_for_status(test_lb, status='ACTIVE', failures=['ERROR'])
cls.conn.load_balancer.delete_pool(cls.POOL_ID, ignore_missing=False)
cls.lb_wait_for_status(test_lb, status='ACTIVE', failures=['ERROR'])
cls.conn.load_balancer.delete_listener(cls.LISTENER_ID,
ignore_missing=False)
cls.lb_wait_for_status(test_lb, status='ACTIVE', failures=['ERROR'])
cls.conn.load_balancer.delete_load_balancer(
cls.LB_ID, ignore_missing=False)
def test_lb_find(self):
test_lb = self.conn.load_balancer.find_load_balancer(self.LB_NAME)
self.assertEqual(self.LB_ID, test_lb.id)
def test_lb_get(self):
test_lb = self.conn.load_balancer.get_load_balancer(self.LB_ID)
self.assertEqual(self.LB_NAME, test_lb.name)
self.assertEqual(self.LB_ID, test_lb.id)
self.assertEqual(self.VIP_SUBNET_ID, test_lb.vip_subnet_id)
def test_lb_list(self):
names = [lb.name for lb in self.conn.load_balancer.load_balancers()]
self.assertIn(self.LB_NAME, names)
def test_lb_update(self):
update_lb = self.conn.load_balancer.update_load_balancer(
self.LB_ID, name=self.UPDATE_NAME)
self.lb_wait_for_status(update_lb, status='ACTIVE',
failures=['ERROR'])
test_lb = self.conn.load_balancer.get_load_balancer(self.LB_ID)
self.assertEqual(self.UPDATE_NAME, test_lb.name)
update_lb = self.conn.load_balancer.update_load_balancer(
self.LB_ID, name=self.LB_NAME)
self.lb_wait_for_status(update_lb, status='ACTIVE',
failures=['ERROR'])
test_lb = self.conn.load_balancer.get_load_balancer(self.LB_ID)
self.assertEqual(self.LB_NAME, test_lb.name)
def test_listener_find(self):
test_listener = self.conn.load_balancer.find_listener(
self.LISTENER_NAME)
self.assertEqual(self.LISTENER_ID, test_listener.id)
def test_listener_get(self):
test_listener = self.conn.load_balancer.get_listener(self.LISTENER_ID)
self.assertEqual(self.LISTENER_NAME, test_listener.name)
self.assertEqual(self.LISTENER_ID, test_listener.id)
self.assertEqual(self.PROTOCOL, test_listener.protocol)
self.assertEqual(self.PROTOCOL_PORT, test_listener.protocol_port)
def test_listener_list(self):
names = [ls.name for ls in self.conn.load_balancer.listeners()]
self.assertIn(self.LISTENER_NAME, names)
def test_listener_update(self):
test_lb = self.conn.load_balancer.get_load_balancer(self.LB_ID)
self.conn.load_balancer.update_listener(
self.LISTENER_ID, name=self.UPDATE_NAME)
self.lb_wait_for_status(test_lb, status='ACTIVE',
failures=['ERROR'])
test_listener = self.conn.load_balancer.get_listener(self.LISTENER_ID)
self.assertEqual(self.UPDATE_NAME, test_listener.name)
self.conn.load_balancer.update_listener(
self.LISTENER_ID, name=self.LISTENER_NAME)
self.lb_wait_for_status(test_lb, status='ACTIVE',
failures=['ERROR'])
test_listener = self.conn.load_balancer.get_listener(self.LISTENER_ID)
self.assertEqual(self.LISTENER_NAME, test_listener.name)
def test_pool_find(self):
test_pool = self.conn.load_balancer.find_pool(self.POOL_NAME)
self.assertEqual(self.POOL_ID, test_pool.id)
def test_pool_get(self):
test_pool = self.conn.load_balancer.get_pool(self.POOL_ID)
self.assertEqual(self.POOL_NAME, test_pool.name)
self.assertEqual(self.POOL_ID, test_pool.id)
self.assertEqual(self.PROTOCOL, test_pool.protocol)
def test_pool_list(self):
names = [pool.name for pool in self.conn.load_balancer.pools()]
self.assertIn(self.POOL_NAME, names)
def test_pool_update(self):
test_lb = self.conn.load_balancer.get_load_balancer(self.LB_ID)
self.conn.load_balancer.update_pool(self.POOL_ID,
name=self.UPDATE_NAME)
self.lb_wait_for_status(test_lb, status='ACTIVE', failures=['ERROR'])
test_pool = self.conn.load_balancer.get_pool(self.POOL_ID)
self.assertEqual(self.UPDATE_NAME, test_pool.name)
self.conn.load_balancer.update_pool(self.POOL_ID,
name=self.POOL_NAME)
self.lb_wait_for_status(test_lb, status='ACTIVE', failures=['ERROR'])
test_pool = self.conn.load_balancer.get_pool(self.POOL_ID)
self.assertEqual(self.POOL_NAME, test_pool.name)
def test_member_find(self):
test_member = self.conn.load_balancer.find_member(self.MEMBER_NAME,
self.POOL_ID)
self.assertEqual(self.MEMBER_ID, test_member.id)
def test_member_get(self):
test_member = self.conn.load_balancer.get_member(self.MEMBER_ID,
self.POOL_ID)
self.assertEqual(self.MEMBER_NAME, test_member.name)
self.assertEqual(self.MEMBER_ID, test_member.id)
self.assertEqual(self.MEMBER_ADDRESS, test_member.address)
self.assertEqual(self.PROTOCOL_PORT, test_member.protocol_port)
self.assertEqual(self.WEIGHT, test_member.weight)
def test_member_list(self):
names = [mb.name for mb in self.conn.load_balancer.members(
self.POOL_ID)]
self.assertIn(self.MEMBER_NAME, names)
def test_member_update(self):
test_lb = self.conn.load_balancer.get_load_balancer(self.LB_ID)
self.conn.load_balancer.update_member(self.MEMBER_ID, self.POOL_ID,
name=self.UPDATE_NAME)
self.lb_wait_for_status(test_lb, status='ACTIVE', failures=['ERROR'])
test_member = self.conn.load_balancer.get_member(self.MEMBER_ID,
self.POOL_ID)
self.assertEqual(self.UPDATE_NAME, test_member.name)
self.conn.load_balancer.update_member(self.MEMBER_ID, self.POOL_ID,
name=self.MEMBER_NAME)
self.lb_wait_for_status(test_lb, status='ACTIVE', failures=['ERROR'])
test_member = self.conn.load_balancer.get_member(self.MEMBER_ID,
self.POOL_ID)
self.assertEqual(self.MEMBER_NAME, test_member.name)
def test_health_monitor_find(self):
test_hm = self.conn.load_balancer.find_health_monitor(self.HM_NAME)
self.assertEqual(self.HM_ID, test_hm.id)
def test_health_monitor_get(self):
test_hm = self.conn.load_balancer.get_health_monitor(self.HM_ID)
self.assertEqual(self.HM_NAME, test_hm.name)
self.assertEqual(self.HM_ID, test_hm.id)
self.assertEqual(self.DELAY, test_hm.delay)
self.assertEqual(self.TIMEOUT, test_hm.timeout)
self.assertEqual(self.MAX_RETRY, test_hm.max_retries)
self.assertEqual(self.HM_TYPE, test_hm.type)
def test_health_monitor_list(self):
names = [hm.name for hm in self.conn.load_balancer.health_monitors()]
self.assertIn(self.HM_NAME, names)
def test_health_monitor_update(self):
test_lb = self.conn.load_balancer.get_load_balancer(self.LB_ID)
self.conn.load_balancer.update_health_monitor(self.HM_ID,
name=self.UPDATE_NAME)
self.lb_wait_for_status(test_lb, status='ACTIVE', failures=['ERROR'])
test_hm = self.conn.load_balancer.get_health_monitor(self.HM_ID)
self.assertEqual(self.UPDATE_NAME, test_hm.name)
self.conn.load_balancer.update_health_monitor(self.HM_ID,
name=self.HM_NAME)
self.lb_wait_for_status(test_lb, status='ACTIVE', failures=['ERROR'])
test_hm = self.conn.load_balancer.get_health_monitor(self.HM_ID)
self.assertEqual(self.HM_NAME, test_hm.name)
def test_l7_policy_find(self):
test_l7_policy = self.conn.load_balancer.find_l7_policy(
self.L7POLICY_NAME)
self.assertEqual(self.L7POLICY_ID, test_l7_policy.id)
def test_l7_policy_get(self):
test_l7_policy = self.conn.load_balancer.get_l7_policy(
self.L7POLICY_ID)
self.assertEqual(self.L7POLICY_NAME, test_l7_policy.name)
self.assertEqual(self.L7POLICY_ID, test_l7_policy.id)
self.assertEqual(self.ACTION, test_l7_policy.action)
def test_l7_policy_list(self):
names = [l7.name for l7 in self.conn.load_balancer.l7_policies()]
self.assertIn(self.L7POLICY_NAME, names)
def test_l7_policy_update(self):
test_lb = self.conn.load_balancer.get_load_balancer(self.LB_ID)
self.conn.load_balancer.update_l7_policy(
self.L7POLICY_ID, name=self.UPDATE_NAME)
self.lb_wait_for_status(test_lb, status='ACTIVE', failures=['ERROR'])
test_l7_policy = self.conn.load_balancer.get_l7_policy(
self.L7POLICY_ID)
self.assertEqual(self.UPDATE_NAME, test_l7_policy.name)
self.conn.load_balancer.update_l7_policy(self.L7POLICY_ID,
name=self.L7POLICY_NAME)
self.lb_wait_for_status(test_lb, status='ACTIVE', failures=['ERROR'])
test_l7_policy = self.conn.load_balancer.get_l7_policy(
self.L7POLICY_ID)
self.assertEqual(self.L7POLICY_NAME, test_l7_policy.name)
def test_l7_rule_find(self):
test_l7_rule = self.conn.load_balancer.find_l7_rule(
self.L7RULE_ID, self.L7POLICY_ID)
self.assertEqual(self.L7RULE_ID, test_l7_rule.id)
self.assertEqual(self.L7RULE_TYPE, test_l7_rule.type)
def test_l7_rule_get(self):
test_l7_rule = self.conn.load_balancer.get_l7_rule(
self.L7RULE_ID, l7_policy=self.L7POLICY_ID)
self.assertEqual(self.L7RULE_ID, test_l7_rule.id)
self.assertEqual(self.COMPARE_TYPE, test_l7_rule.compare_type)
self.assertEqual(self.L7RULE_TYPE, test_l7_rule.type)
self.assertEqual(self.L7RULE_VALUE, test_l7_rule.rule_value)
def test_l7_rule_list(self):
ids = [l7.id for l7 in self.conn.load_balancer.l7_rules(
l7_policy=self.L7POLICY_ID)]
self.assertIn(self.L7RULE_ID, ids)
def test_l7_rule_update(self):
test_lb = self.conn.load_balancer.get_load_balancer(self.LB_ID)
self.conn.load_balancer.update_l7_rule(self.L7RULE_ID,
l7_policy=self.L7POLICY_ID,
rule_value=self.UPDATE_NAME)
self.lb_wait_for_status(test_lb, status='ACTIVE', failures=['ERROR'])
test_l7_rule = self.conn.load_balancer.get_l7_rule(
self.L7RULE_ID, l7_policy=self.L7POLICY_ID)
self.assertEqual(self.UPDATE_NAME, test_l7_rule.rule_value)
self.conn.load_balancer.update_l7_rule(self.L7RULE_ID,
l7_policy=self.L7POLICY_ID,
rule_value=self.L7RULE_VALUE)
self.lb_wait_for_status(test_lb, status='ACTIVE', failures=['ERROR'])
test_l7_rule = self.conn.load_balancer.get_l7_rule(
self.L7RULE_ID, l7_policy=self.L7POLICY_ID,)
self.assertEqual(self.L7RULE_VALUE, test_l7_rule.rule_value)
|
'''
Created by auto_sdk on 2013-11-07 12:53:22
'''
from top.api.base import RestApi
class TripJipiaoAgentItinerarySendRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.company_code = None
self.express_code = None
self.itinerary_id = None
self.itinerary_no = None
self.send_date = None
def getapiname(self):
return 'taobao.trip.jipiao.agent.itinerary.send'
|
from neteaseSpider.proxy.parsing_html import *
from neteaseSpider.proxy.checking_ip import *
def main():
ipPoolsPath = "ipProxy/ip_pools.txt" # 原始ip保存路径
ipFormatPoolsPath = "ipProxy/ipFormatPools.txt" # 格式化后ip保存路径
ipUsePath = "ipProxy/ipUse.txt" # 可用ip保存路径
openProxy = False # 是否要开启代理模式
if not openProxy:
# 不开启代理模式,直接获取代理ip
get_data5u_free_ip(None, ipPoolsPath, openProxy)
get_kuaidaili_free_ip(None, ipPoolsPath, openProxy)
get_xsdaili_free_ip(None, ipPoolsPath, openProxy)
get_xicidaili_free_ip(None, ipPoolsPath, openProxy)
get_89ip_free_ip(None, ipPoolsPath, openProxy)
else:
# 开启代理模式,获取代理ip
available_ip_path = "ipProxy/ipUse.txt" # 目前可用的代理ip的保存路径
ipUseList = []
with open(available_ip_path, "r") as fr:
ipUseLines = fr.readlines()
for ipUse_line in ipUseLines:
ipUseLineNew = ipUse_line.replace("\n", "")
ipUseList.append(ipUseLineNew)
for i in range(len(ipUseList)):
# 获取ip建立IP池
try:
print("正在使用第" + str(i) + "条代理ip")
get_data5u_free_ip(ipUseList[i], ipPoolsPath, openProxy)
break
except:
pass
for i in range(len(ipUseList)):
# 获取ip建立IP池
try:
print("正在使用第" + str(i) + "条代理ip")
get_kuaidaili_free_ip(ipUseList[i], ipPoolsPath, openProxy)
break
except:
pass
for i in range(len(ipUseList)):
# 获取ip建立IP池
try:
print("正在使用第" + str(i) + "条代理ip")
get_xsdaili_free_ip(ipUseList[i], ipPoolsPath, openProxy)
break
except:
pass
for i in range(len(ipUseList)):
# 获取ip建立IP池
try:
print("正在使用第" + str(i) + "条代理ip")
get_xicidaili_free_ip(ipUseList[i], ipPoolsPath, openProxy)
break
except:
pass
for i in range(len(ipUseList)):
# 获取ip建立IP池
try:
print("正在使用第" + str(i) + "条代理ip")
get_89ip_free_ip(ipUseList[i], ipPoolsPath, openProxy)
break
except:
pass
# 筛选ip进行查重
ip_format(ipPoolsPath, ipFormatPoolsPath)
check_repeat(ipFormatPoolsPath)
# 验证ip可用性
ip_batch_inspection(ipFormatPoolsPath, ipUsePath)
if __name__ == '__main__':
main() |
import FWCore.ParameterSet.Config as cms
l1tRawToDigi = cms.EDProducer(
"L1TRawToDigi",
Setup = cms.string("stage2::CaloSetup"),
InputLabel = cms.InputTag("l1tDigiToRaw"),
FedIds = cms.vint32(1352),
FWId = cms.uint32(1),
FWOverride = cms.bool(False),
TMTCheck = cms.bool(True),
lenSlinkHeader = cms.untracked.int32(8),
lenSlinkTrailer = cms.untracked.int32(8),
lenAMCHeader = cms.untracked.int32(8),
lenAMCTrailer = cms.untracked.int32(0),
lenAMC13Header = cms.untracked.int32(8),
lenAMC13Trailer = cms.untracked.int32(8)
)
|
import tkinter as tk
import sqlite3
LARGE_FONT=("Verdana",12, "bold")
conn = sqlite3.connect('users.db')
conn.execute("CREATE TABLE IF NOT EXISTS pins(id INT UNSIGNED AUTO_INCREMENT PRIMARY KEY, pin TEXT NOT NULL, balance INT NOT NULL)")
#conn.row_factory = lambda cursor, row: row[0]
#try:
# conn.execute("CREATE UNIQUE INDEX idx ON pins(pin)")
#except:
# pass
class container(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self.winfo_toplevel().title("Fake ATM")
global parent
parent = tk.Frame(self, bg="#728bd4")
global regPin
global regBalance
global loginPin
global transactionMoney
global changemsg
global oldPin
global newPin
global regmsg
global balanceamt
global checkmsg
global transmsg
global delePin
global delemsg
regPin = tk.StringVar()
regBalance = tk.DoubleVar()
#loginName = tk.StringVar()
loginPin = tk.StringVar()
transactionMoney = tk.DoubleVar()
changemsg = tk.StringVar()
oldPin = tk.StringVar()
newPin = tk.StringVar()
regmsg = tk.StringVar()
balanceamt = tk.StringVar()
checkmsg = tk.StringVar()
transmsg = tk.StringVar()
delePin = tk.StringVar()
delemsg = tk.StringVar()
parent.pack(side="top", fill="both", expand=True)
parent.grid_columnconfigure(0,weight=1)
parent.grid_columnconfigure(2,weight=1)
self.frames = {}
def add(self, frameName, frame):
self.frames[frameName] = frame
self.frames[frameName].grid(row=0, column=1, sticky="nsew")
def show_frame(self,cont):
frame = self.frames[cont]
frame.tkraise()
if(cont == "Main"):
regPin.set("")
regBalance.set(0.0)
#loginName = tk.StringVar()
loginPin.set("")
transactionMoney.set(0.0)
changemsg.set("")
oldPin.set("")
newPin.set("")
regmsg.set("")
balanceamt.set("")
checkmsg.set("")
transmsg.set("")
delePin.set("")
delemsg.set("")
class page(tk.Frame):
def __init__(self, parent, Name):
self.i = 1
tk.Frame.__init__(self, parent, bg="#728bd4")
#self.place(x=0, y=0, anchor="nw", width=500, height=500)
#self.configure(width=1000, height=1000)
space = tk.Label(self,text="", bg="#728bd4")
space.grid(row=0, column=1, sticky="nsew")
label = tk.Label(self,text=Name,font=LARGE_FONT, bg="#728bd4", fg="#FFD700")
label.grid(row=1, column=1, sticky="nsew")
self.grid_columnconfigure(0,weight=1)
self.grid_columnconfigure(2,weight=1)
self.i+=1
def addLabel(self, label):
label["fg"] ="#FFD700"
label["bg"] ="#728bd4"
label.grid(row=self.i, column=1, sticky="nsew")
self.i+=1
def addButton(self, button):
button.grid(row=self.i, column=1, sticky="nsew")
self.i+=1
def addEntry(self, entry):
entry.grid(row=self.i, column=1, sticky="nsew")
self.i+=1
def checkIfPinExists(pin, msg):
cur = conn.cursor()
cur.execute("SELECT pin FROM pins")
data= cur.fetchall()
out = [item for t in data for item in t]
if(pin in out):
msg.set("Error")
return False
else:
msg.set("Successful")
return True
def addUser():
try:
if ((checkIfPinExists(regPin.get(), regmsg)) and (len(regPin.get()) == 4)):
conn.execute("INSERT INTO pins(pin, balance) VALUES(" + regPin.get() + ", " + str(regBalance.get()) + ")")
else:
regmsg.set("Error")
except:
regmsg.set("Error")
regPin.set(0)
regBalance.set(0.0)
def check():
try:
cur = conn.cursor()
cur.execute("SELECT * FROM pins WHERE pin=" + loginPin.get())
data= cur.fetchall()
global out
out = [item for t in data for item in t]
if (len(out) == 0):
checkmsg.set("Error")
else:
FakeATM.show_frame("Transaction")
balanceamt.set("Current Balance: " + str(out[2]))
except:
checkmsg.set("Error")
def withdraw():
try:
bal = int(out[2])
if(transactionMoney.get() > bal):
transmsg.set("Error")
else:
bal = bal - transactionMoney.get()
conn.execute("UPDATE pins SET balance=" + str(bal) + " WHERE pin=" + out[1])
balanceamt.set("Current Balance: " + str(bal))
out[2] = bal
transmsg.set("Successful")
except:
transmsg.set("Error")
def deposit():
try:
bal = int(out[2])
bal = bal + transactionMoney.get()
balanceamt.set("Current Balance: " + str(bal))
conn.execute("UPDATE pins SET balance=" + str(bal) + " WHERE pin=" + out[1])
out[2] = bal
transmsg.set("Successful")
except:
transmsg.set("Error")
def change():
try:
if((not checkIfPinExists(oldPin.get(), changemsg)) is True):
if ((checkIfPinExists(newPin.get(), changemsg)) and (len(newPin.get()) == 4)):
conn.execute("UPDATE pins SET pin=" + newPin.get() + " WHERE pin=" + oldPin.get())
else:
changemsg.set("Error")
else:
changemsg.set("Error")
except:
changemsg.set("Error")
oldPin.set("")
newPin.set("")
def delete():
try:
if((not checkIfPinExists(delePin.get(), delemsg)) is True):
conn.execute("DELETE FROM pins WHERE pin=" + delePin.get())
delemsg.set("Successful")
else:
delemsg.set("Error")
except:
delemsg.set("Error")
delePin.set("")
def popupOpen(event):
global pop
pop = tk.Menu(accSett2, tearoff=0)
pop.add_command(label="Please make sure to contact your imaginary bank before proceeding and also take the necessary precautions. You have been warned! Click anywhere to close this..")
try:
pop.post(event.x_root, event.y_root)
finally:
pop.grab_release()
def popupClose(event):
pop.unpost()
FakeATM = container()
#regName = tk.StringVar()
main = page(parent, "Fake ATM")
reg = page(parent, "Register")
login = page(parent, "Enter PIN")
transaction = page(parent, "Transaction")
accSett = page(parent, "Change PIN")
accSett2 = page(parent, "Delete Pin")
FakeATM.add("Main", main)
FakeATM.add("Regsiter", reg)
FakeATM.add("Login", login)
FakeATM.add("Transaction", transaction)
FakeATM.add("AccSett", accSett)
FakeATM.add("AccSett2", accSett2)
FakeATM.minsize(250, 300)
main.addLabel(tk.Label(main, text=""))
main.addButton(tk.Button(main, text="Register",
command=lambda: FakeATM.show_frame("Regsiter")))
main.addLabel(tk.Label(main, text=""))
main.addButton(tk.Button(main, text="Transaction",
command=lambda: FakeATM.show_frame("Login")))
main.addLabel(tk.Label(main, text=""))
main.addButton(tk.Button(main, text="Change PIN",
command=lambda: FakeATM.show_frame("AccSett")))
main.addLabel(tk.Label(main, text=""))
main.addButton(tk.Button(main, text="Delete PIN",
command=lambda: FakeATM.show_frame("AccSett2")))
#reg.addLabel(tk.Label(reg, text="User Name"))
#reg.addEntry(tk.Entry(reg, textvariable=))
#reg.addLabel(tk.Label(reg, text=""))
reg.addLabel(tk.Label(reg, text="PIN"))
reg.addEntry(tk.Entry(reg, textvariable=regPin))
reg.addLabel(tk.Label(reg, text=""))
reg.addLabel(tk.Label(reg, text="Balance"))
reg.addEntry(tk.Entry(reg, textvariable=regBalance))
reg.addLabel(tk.Label(reg, textvariable=regmsg))
reg.addButton(tk.Button(reg, text="Regsiter", command=addUser))
reg.addLabel(tk.Label(reg, text=""))
reg.addButton(tk.Button(reg, text="Go back to Main Page",
command=lambda: FakeATM.show_frame("Main")))
#login.addLabel(tk.Label(login, text="User Name"))
#login.addEntry(tk.Entry(login))
#login.addLabel(tk.Label(login, text=""))
login.addLabel(tk.Label(login, text="Insert Imaginary Card"))
login.addLabel(tk.Label(login, text=""))
login.addLabel(tk.Label(login, text="PIN"))
login.addEntry(tk.Entry(login, textvariable=loginPin))
login.addLabel(tk.Label(login, textvariable=checkmsg))
login.addButton(tk.Button(login, text="Submit",
command=check))
login.addLabel(tk.Label(login, text=""))
login.addButton(tk.Button(login, text="Go back to Main Page",
command=lambda: FakeATM.show_frame("Main")))
transaction.addLabel(tk.Label(transaction, textvariable=balanceamt))
transaction.addLabel(tk.Label(transaction, text="Amount:"))
transaction.addEntry(tk.Entry(transaction, textvariable=transactionMoney))
transaction.addLabel(tk.Label(transaction, textvariable=transmsg))
transaction.addButton(tk.Button(transaction, text="Withdraw", command=withdraw))
transaction.addButton(tk.Button(transaction, text="Deposit", command=deposit))
transaction.addLabel(tk.Label(transaction, text=""))
transaction.addButton(tk.Button(transaction, text="Go back to Main Page",
command=lambda: FakeATM.show_frame("Main")))
#accSett.addLabel(tk.Label(accSett, text="User Name"))
#accSett.addEntry(tk.Entry(accSett))
#accSett.addLabel(tk.Label(accSett, text=""))
accSett.addLabel(tk.Label(accSett, text="Old PIN"))
accSett.addEntry(tk.Entry(accSett, textvariable=oldPin))
accSett.addLabel(tk.Label(accSett, text=""))
accSett.addLabel(tk.Label(accSett, text="New PIN"))
accSett.addEntry(tk.Entry(accSett, textvariable=newPin))
accSett.addLabel(tk.Label(accSett, textvariable=changemsg))
accSett.addButton(tk.Button(accSett, text="Confirm",
command=change))
accSett.addLabel(tk.Label(accSett, text=""))
accSett.addButton(tk.Button(accSett, text="Go back to Main Page",
command=lambda: FakeATM.show_frame("Main")))
popupmsg = tk.Label(accSett2, text="More information...")
popupmsg.bind("<Enter>", popupOpen)
popupmsg.bind("<Leave>", popupClose)
accSett2.addLabel(popupmsg)
accSett2.addLabel(tk.Label(accSett2, text="PIN"))
accSett2.addEntry(tk.Entry(accSett2, textvariable=delePin))
accSett2.addLabel(tk.Label(accSett2, textvariable=delemsg))
accSett2.addButton(tk.Button(accSett2, text="Confirm",
command=delete))
accSett2.addLabel(tk.Label(accSett2, text=""))
accSett2.addButton(tk.Button(accSett2, text="Go back to Main Page",
command=lambda: FakeATM.show_frame("Main")))
FakeATM.show_frame("Main")
FakeATM.mainloop()
conn.commit()
conn.close()
"""
CREATE TABLE IF NOT EXISTS scripts (id INT UNSIGNED AUTO_INCREMENT PRIMARY KEY, name TEXT NOT NULL, notes TEXT NOT NULL
CREATE UNIQUE INDEX idx_name ON scripts (name)
"""
|
## note: cv2 works with python2. so type in terminal: alias python=python2 after launch
from __future__ import division
import cv2
import numpy as np
from sklearn.metrics import accuracy_score
from namespace import *
def equiv(a, b):
return ((a[0] == b[0]) and (a[1] == b[1]) and (a[2] == b[2]))
def getTP(obj, comporation):
return comporation[obj][obj]
def getFP(obj, comporation):
notObj = objectTypes[:]
notObj.remove(obj)
res = 0
for anotherObj in notObj:
res += comporation[obj][anotherObj]
return res
def getFN(obj, comporation):
notObj = objectTypes[:]
notObj.remove(obj)
res = 0
for anotherObj in notObj:
res += comporation[anotherObj][obj]
return res
def getTN(obj, comporation):
notObj = objectTypes[:]
notObj.remove(obj)
res = 0
for anotherObj in notObj:
for oneMoreObj in notObj:
res += comporation[anotherObj][oneMoreObj]
return res
def my_accuracy(imgResult = cv2.imread('to_test.tif'), imgIdeal = cv2.imread('to_test1.tif')):
try:
cv2.imwrite("my_seg.tif", imgResult)
except:
pass
detectedOnImage = dict.fromkeys(objectTypes, False)
if (imgResult.shape != imgIdeal.shape):
print "sizes of images are distinct"
else:
high = imgResult.shape[0]
width = imgResult.shape[1]
emptyDict = dict.fromkeys(objectTypes, 0)
comporation = dict.fromkeys(objectTypes, {})
for obj in objectTypes:
comporation[obj] = emptyDict.copy()
precision = emptyDict.copy()
recall = emptyDict.copy()
accuracy = emptyDict.copy()
f1 = emptyDict.copy()
for y in range(high):
for x in range(width):
pixelResult = tuple(imgResult[y][x])
pixelIdeal = tuple(imgIdeal[y][x])
detectedOnImage[pixelIdeal] = True
detectedOnImage[pixelResult] = True
comporation[pixelResult][pixelIdeal] += 1
for obj in objectTypes:
if not detectedOnImage[obj]:
print "no", objectNames[obj], "is detected \n"
continue
print "objectType: ", objectNames[obj]
tp = getTP(obj, comporation)
tn = getTN(obj, comporation)
fp = getFP(obj, comporation)
fn = getFN(obj, comporation)
if (tp + fp) > 0:
precision[obj] = tp / (tp + fp)
print "precision", precision[obj]
if (tp + fn) > 0:
recall[obj] = tp / (tp + fn)
print "recall", recall[obj]
if (precision[obj] + recall[obj]) > 0:
f1[obj] = (2 * precision[obj] * recall[obj]) / (precision[obj] + recall[obj])
print "f1", f1[obj]
TP = sum([getTP(obj, comporation) for obj in objectTypes])
TN = sum([getTN(obj, comporation) for obj in objectTypes])
FP = sum([getFP(obj, comporation) for obj in objectTypes])
FN = sum([getFN(obj, comporation) for obj in objectTypes])
accuracy = (TP + TN) / (TP + FP + TN + FN)
print "\n accuracy", accuracy, "\n"
def compute_accuracy(test_set):
average_accuracy = 0.0
for num in test_set:
result_image = get_result_image(num)
gt_image = get_gt_image(num)
cur_accuracy = np.mean(result_image == gt_image)
average_accuracy += cur_accuracy
print "accuracy of segmentation picture ", num, " = ", cur_accuracy
average_accuracy /= len(test_set)
print "avarage accuracy = ", average_accuracy
#accuracy_1()
#cv2.waitKey()
|
import sys
import uinspect
# init the ability to detect static method for main module
uinspect.enable_static_method_detect(uinspect.main_module)
class TestCase(object):
def assert_equal(self, a, b):
if a != b:
raise TestError(str(a) + ' is not equal to ' + str(b))
class TestError(Exception):
pass
# return a wrapper that has a function attr names __utest_cond__
def cond(cond_expr):
def func(f):
return CondWrapper(f, cond_expr)
return func
pass
class CondWrapper:
def __init__(self, func, cond_expr):
self.__func = func
self.__name__ = func.__name__
self.__cond_expr__ = cond_expr
def __call__(self, *args, **kwargs):
self.__func(*args, **kwargs)
# NOTE: your test cases should be in the main module
# if cond is specified, only call methods which is satisfied the cond
def main(cond=None):
# base_class_func_names = list(map(lambda func: func.__name__, __get_method_from(TestCase)))
base_class_func_names = [func.__name__ for func in __get_target_method_from(TestCase)]
for test_case in (attr for attr in __get_attrs_form(uinspect.main_module)
if uinspect.is_inherit_from(attr, TestCase)):
# find the target classes
# create an instance for each of the classes
# and call its inst methods
error_raised = False
inst = test_case()
for test_func in (func for func in __get_target_method_from(test_case)
if func.__name__ not in base_class_func_names and (
cond is None or not hasattr(func, '__cond_expr__') or getattr(func, '__cond_expr__')(cond))):
try:
test_func(inst)
except TestError as t:
if not error_raised:
error_raised = True
print('at', test_case.__name__ + ':')
print(' at', test_func.__name__ + ':', *t.args)
def __get_target_method_from(cls):
return (attr for attr in __get_attrs_form(cls)
if uinspect.is_public_method(attr) and not uinspect.is_static_method(attr))
# TODO: move to utilities
def __get_attrs_form(obj):
return (getattr(obj, attr_name) for attr_name in dir(obj))
|
# -*- coding: utf-8 -*-
# @Date : 2019-05-14 16:15:56
# @Author : QilongPan
# @Email : 3102377627@qq.com
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
tips = sns.load_dataset("iris")
print(tips.head())
|
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import MeanSquaredError
from algorithms.dane.models import AutoEncoder
class PreTrainer(object):
def __init__(self, config):
self.config = config
self.net_input_dim = config['net_input_dim']
self.att_input_dim = config['att_input_dim']
self.net_hidden = config['net_hidden']
self.net_dimension = config['net_dimension']
self.att_hidden = config['att_hidden']
self.att_dimension = config['att_dimension']
self.pretrain_params_path = config['pretrain_params_path']
self.batch_size = config['batch_size']
self.learning_rate = config['learning_rate']
self.dropout = config['drop_prob']
self.W_init = {}
self.b_init = {}
def pretrain(self, data, model):
if model == 'net':
input_shape = self.net_input_dim
hidden_shape = self.net_hidden
dimension_shape = self.net_dimension
else: # model == 'att'
input_shape = self.att_input_dim
hidden_shape = self.att_hidden
dimension_shape = self.att_dimension
autoencoder = AutoEncoder(hidden1_dim=hidden_shape, hidden2_dim=dimension_shape, output_dim=input_shape, dropout=self.dropout)
optimizer = Adam(lr=self.learning_rate)
autoencoder.compile(optimizer=optimizer, loss=MeanSquaredError())
autoencoder.fit(data, data, epochs=50, batch_size=self.batch_size, shuffle=True)
autoencoder.save_weights(self.pretrain_params_path + '_' + model)
return autoencoder
|
from django.conf.urls import url
from . import views
# Url patterns for this app
urlpatterns = []
|
import os.path
import unittest
import collections
import json
from .. import invitation
from ..base32 import b2a
from ..client import Client
from .. import database
from ..scripts.create_node import create_node
from ..netstring import split_netstrings
class Outbound(unittest.TestCase):
def test_create(self):
code_ascii = b2a("code_binary")
self.failUnlessEqual(b2a(invitation.get_hmac_key(code_ascii)),
"fvyuhvbg567wpixb5lzodtkvhpmfccwdrlp5a6zf7vvvqvlhhshq")
self.failUnlessEqual(invitation.get_sender_address(code_ascii),
"channel-ihraxtbpsuxoohiuzjy646zkk5bh25e7gpyhwtpessv2ywn46bvq")
h1, m1 = invitation.pack_messages(code_ascii, "1", "hello world")
m2 = invitation.unpack_messages(code_ascii, h1, m1)
self.failUnlessEqual(list(m2), ["1", "hello world"])
wrong_code = b2a("wrong code")
self.failUnlessRaises(ValueError,
invitation.unpack_messages, wrong_code, h1, m1)
otherh,otherm = invitation.pack_messages(code_ascii, "different msg")
self.failUnlessRaises(ValueError,
invitation.unpack_messages, code_ascii,
otherh, m1)
def testfilepath(*names):
expanded = []
for n in names:
if isinstance(n, (tuple,list)):
expanded.extend(list(n))
else:
expanded.append(n)
names = expanded
for i in range(1,len(names)):
dirname = os.path.join(*names[:i])
if not os.path.isdir(dirname):
os.mkdir(dirname)
return os.path.abspath(os.path.join(*names))
class Nexus:
def __init__(self):
self.subscriptions = collections.defaultdict(set)
def send(self, c, m):
messages = split_netstrings(m)
if messages[0] == "subscribe":
self.subscriptions[messages[1]].add(c)
elif messages[0] == "send":
for c_to in self.subscriptions[messages[1]]:
c_to.message_received(c, messages)
else:
raise ValueError("unrecognized command %s" % messages[0])
class FakeClient(Client):
nexus = None
def maybe_send_messages(self):
if not self.nexus:
return
while self.pending_messages:
m = self.pending_messages.popleft()
self.nexus.send(self, m)
def message_received(self, fromwho, messages):
Client.message_received(self, fromwho, messages)
self.log.append((fromwho, messages))
def add_addressbook_entry(self, petname, data, localdata):
self.book.append( (petname,json.loads(data),json.loads(localdata)) )
class Roundtrip(unittest.TestCase):
def mkfile(self, *names):
return testfilepath("_test", *names)
def create_clients(self, *names):
base = os.path.join("_test", *names)
self.mkfile(names, "dummy")
create_node({"basedir": os.path.join(base, "c1"),
"webport": "tcp:0",
"relay": "tcp:host=localhost:port=0"})
dbfile1 = self.mkfile(names, "c1", "toolbed.db")
c1 = FakeClient(database.get_db(dbfile1)[1])
create_node({"basedir": os.path.join(base, "c2"),
"webport": "tcp:0",
"relay": "tcp:host=localhost:port=0"})
dbfile2 = self.mkfile(names, "c2", "toolbed.db")
c2 = FakeClient(database.get_db(dbfile2)[1])
c1.control_setProfileName("alice")
c1.control_setProfileIcon("alice-icon")
c2.control_setProfileName("bob")
c2.control_setProfileIcon("bob-icon")
n = Nexus()
c1.nexus = n; c1.log = []; c1.book = []
c2.nexus = n; c2.log = []; c2.book = []
c1.maybe_send_messages(); c2.maybe_send_messages()
self.c1 = c1
self.c2 = c2
self.n = n
def test_contact(self):
self.create_clients("invitation", "Roundtrip", "contact")
c1,c2 = self.c1,self.c2
c1.send_message_to_relay("send", c2.vk_s, "hello")
self.failUnlessEqual(len(c2.log), 1)
self.failUnlessEqual(c2.log[-1][0], c1)
self.failUnlessEqual(c2.log[-1][1], ["send", c2.vk_s, "hello"])
def test_invite(self):
self.create_clients("invitation", "Roundtrip", "invite")
c1,c2,n = self.c1,self.c2,self.n
c1.control_sendInvitation("pet-bob")
data = c1.control_getOutboundInvitationsJSONable()
self.failUnlessEqual(len(data), 1)
self.failUnlessEqual(data[0]["petname"], "pet-bob")
code_ascii = data[0]["code"]
# c1 should have subscribed to hear about its channel by now
c1_channel = invitation.get_sender_address(code_ascii)
self.failUnless(c1_channel in n.subscriptions)
self.failUnlessEqual(n.subscriptions[c1_channel], set([c1]))
# all protocol messages complete inside this call
c2.control_acceptInvitation("pet-alice", code_ascii)
self.failUnlessEqual(len(c1.book), 1)
self.failUnlessEqual(c1.book[0][0], "pet-bob")
d1 = c1.book[0][1]
self.failUnlessEqual(sorted(d1.keys()),
sorted(["my-name", "my-icon", "my-pubkey"]))
self.failUnlessEqual(d1["my-name"], "bob")
self.failUnlessEqual(d1["my-icon"], "bob-icon")
k1 = d1["my-pubkey"]
d2 = c1.book[0][2]
self.failUnlessEqual(sorted(d2.keys()),
sorted(["my-pubkey", "my-privkey"]))
k2 = d2["my-pubkey"]
self.failUnlessEqual(len(c2.book), 1)
self.failUnlessEqual(c2.book[0][0], "pet-alice")
d3 = c2.book[0][1]
self.failUnlessEqual(sorted(d3.keys()),
sorted(["my-name", "my-icon", "my-pubkey"]))
self.failUnlessEqual(d3["my-name"], "alice")
self.failUnlessEqual(d3["my-icon"], "alice-icon")
k3 = d3["my-pubkey"]
d4 = c2.book[0][2]
self.failUnlessEqual(sorted(d4.keys()),
sorted(["my-pubkey", "my-privkey"]))
k4 = d4["my-pubkey"]
self.failUnlessEqual(k1, k4)
self.failUnlessEqual(k2, k3)
|
import argparse
import json
import os
from glob import glob
from typing import Dict
def load_correlations(input_dir: str, summarizer_type: str, level: str) -> Dict[str, Dict[str, float]]:
correlations_dict = {}
for input_file in glob(f'{input_dir}/*-{summarizer_type}.json'):
name = os.path.basename(input_file)[:-len(f'-{summarizer_type}.json')]
correlations = json.load(open(input_file, 'r'))
correlations = correlations[level]
correlations_dict[name] = {
'pearson': correlations['pearson']['r'],
'spearman': correlations['spearman']['rho'],
'kendall': correlations['kendall']['tau'],
}
return correlations_dict
def main(args):
correlations_dict = {}
for name, input_dir in zip(args.dataset_names, args.correlation_dirs):
correlations_dict[name] = load_correlations(input_dir, args.summarizer_type, args.correlation_level)
metrics = sorted(list(correlations_dict.values())[0].keys())
dirname = os.path.dirname(args.output_file)
if dirname:
os.makedirs(dirname, exist_ok=True)
with open(args.output_file, 'w') as out:
out.write('<table>\n')
# Print the header with the dataset names
out.write('<tr>\n')
out.write('<th></th>\n')
for dataset in args.dataset_names:
out.write(f'<th colspan="3">{dataset}</th>\n')
out.write('</tr>\n')
# Print the header with the correlation coefficient names
out.write('<tr>\n')
out.write('<th></th>\n')
for _ in args.dataset_names:
for coef in ['r', 'p', 'k']:
out.write(f'<th>{coef}</th>\n')
out.write('</tr>\n')
# Print each value in the table
for metric in metrics:
out.write('<tr>\n')
out.write(f'<td>{metric}</td>\n')
for dataset in args.dataset_names:
out.write(f'<td>{correlations_dict[dataset][metric]["pearson"]:.2f}</td>\n')
out.write(f'<td>{correlations_dict[dataset][metric]["spearman"]:.2f}</td>\n')
out.write(f'<td>{correlations_dict[dataset][metric]["kendall"]:.2f}</td>\n')
out.write('</tr>\n')
out.write('</table>\n')
if __name__ == '__main__':
argp = argparse.ArgumentParser()
argp.add_argument('--dataset-names', nargs='+')
argp.add_argument('--correlation-dirs', nargs='+')
argp.add_argument('--output-file')
argp.add_argument('--correlation-level', choices=['summary_level', 'system_level', 'global'])
argp.add_argument('--summarizer-type', choices=['peer', 'reference', 'all'])
args = argp.parse_args()
main(args) |
import json
import boto3
from boto3.dynamodb.conditions import Key, Attr
"""
JSON Format:
"token" = "auth token ",
"displayName" = "Steve Nash"
"""
def lambda_handler(event, context):
output = {}
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('PickPocket')
token = event['token']
displayName = event['displayName']
queryTable = table.scan(FilterExpression=Key('Token').eq(token))
if (queryTable['Count'] == 0):
output["response"] = "USER_DOES_NOT_EXIST"
return output
userId = queryTable['Items'][0]['UserId']
response = table.update_item(
Key={
'UserId': userId
},
UpdateExpression="set DisplayName = :d",
ExpressionAttributeValues={
':d': displayName
},
ReturnValues="UPDATED_NEW"
)
output = {}
output["response"] = "success"
return output |
import torch
import numpy as np
from models.basecritic import AbstractCritic
class DenseCritic(torch.nn.Module, AbstractCritic):
def __init__(self, m, n, t, lr=0.001):
"""
max_input is the size of the maximum state size
Let t be the max number of timesteps,
maximum state/action size: (m + t - 1, n+1)
"""
super(DenseCritic, self).__init__()
self.model = torch.nn.Sequential(
# input layer
torch.nn.Linear((m + t - 1) * (n + 1), 256),
torch.nn.ReLU(),
torch.nn.Linear(256, 256),
torch.nn.ReLU(),
torch.nn.Linear(256, 1)
)
self.m = m
self.t = t
self.n = n
self.full_length = (m + t - 1) * (n + 1)
# DEFINE THE OPTIMIZER
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)
def _compute_value_torch(self, state): # use batched version when possible
Ab, _, _ = state
x = Ab.flatten()
padded_x = np.append(x, np.zeros(self.full_length - len(x)))
score = self.model(torch.FloatTensor(padded_x)).flatten()
return score
def _compute_values_torch(self, states):
batch = []
for state in states:
Ab, _, _ = state
x = Ab.flatten()
padded_x = np.append(x, np.zeros(self.full_length - len(x)))
batch.append(padded_x)
batch_torch = torch.FloatTensor(batch)
scores = self.model(batch_torch).flatten()
return scores
class NoCritic(AbstractCritic):
def __init__(self):
pass
def _compute_values_torch(self, states):
return torch.Tensor(np.zeros(len(states)))
def compute_values(self, states):
return np.zeros(len(states)) # baseline is zero
def train(self, memory):
pass
|
import socket
from django.core.exceptions import ImproperlyConfigured
hostname = socket.gethostname()
if hostname == "arun-desktop":
from settings_arun import *
elif hostname == "Leoankit":
from settings_ankit import *
elif hostname == "anup-desktop":
from settings_anup import *
elif hostname == "Leonilesh":
from settings_nilesh import *
else:
raise ImproperlyConfigured("No settings module found for host: %s" % hostname)
del(hostname)
|
__author__ = 'martin.majer'
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import h5py
import cv2
root = '/storage/plzen1/home/mmajer/pr4/data/sun_full/SUN397'
storage = '/storage/plzen1/home/mmajer/pr4/data/'
filename = storage + 'sun_img_names.hdf5'
def list_filepaths(root):
'''
Get paths to all images.
:param root: root directory
:return: list of image paths
'''
imgs_paths = []
imgs_names = []
for path, subdirs, files in os.walk(root):
for name in files:
if not name.startswith('.'):
imgs_paths.append(os.path.join(path, name))
imgs_names.append(name)
return imgs_paths, imgs_names
# paths to all image files
imgs_paths, img_names = list_filepaths(root)
zipped = zip(imgs_paths, img_names)
i = 0
with h5py.File(filename, 'w') as fw:
fw.create_dataset('filename', (len(imgs_paths), ), dtype=h5py.special_dtype(vlen=unicode))
fw.create_dataset('path', (len(imgs_paths), ), dtype=h5py.special_dtype(vlen=unicode))
for path, name in zipped:
if i % 50 == 0:
print '\r', i, path,
try:
# open image using opencv
img = cv2.imread(path)
# color channels
if len(img.shape) == 3:
fw['filename'][i] = name
fw['path'][i] = path
i += 1
except:
print '\nNot an image' |
"""Необходимо вывести все числа кратные 4 между числами 40 и 60 включительно.
Реализовать 2 варианта:
использовать конструкцию if для определения кратности (цикл с шагом 1, i = i + 1);
без использования конструкции if (шаг цикла на ваше усмотрение).
"""
for i in range(40, 61) :
if i%4==0:
print(i)
for i in range(40,61,4):
print(i)
#
# print(40//3)
# for i in range(100,200):
# if i%7 ==0: #Если число кратно 7
# print(i) #то, вывести его |
from contacts.Group import DGroup
from common import pref
from logging import getLogger; log = getLogger('blistsort'); info = log.info
def grouping():
s = pref('buddylist.sortby', 'none none').startswith
return s('*status') or s('*service')
class SpecialGroup(DGroup):
_renderer = 'DGroup'
def groupkey(self):
return self.__class__.__name__ + '_' + DGroup.groupkey(self)
def renamable(self):
return None
STATUS_ORDER = ['available',
'away',
'idle',
'mobile',
'invisible',
'offline',
'unknown']
STATUS_ORDER_INDEXES = dict((STATUS_ORDER[i], i) for i in xrange(len(STATUS_ORDER)))
|
# coding:utf-8
# 经历了诸多实验,终于开始尝试写作基于用户工作满意度的主观分类器
# 其核心思想是基于提取的17维度的用户JS特征,
# 1. 首先,假设全体用户中存在者许多满意度相近的用户,反映在特征空间中,即是相近的样本聚为一个类;
# 2. 全部用户中,所有用户的工作满意度大致存在三种情况,根据正态分布,绝大多数用户的满意度在一个正常范围,少部分高于或者低于这个范围,
# 而我们所关注的即低于这个范围的用户;
# 为了找到这些用户,我们基于高于或等于正常范围的正常用户进行OCSVM训练,而这些用户通过聚类中心的JS分数高于中位数的群簇提供;
# 3. 通过训练OCSVM的样本整体上位于所有用户中上部分,因而其判断的用户异常可能存在中间水平边界的用户,对于这些用户,考虑JS分数高于均值的,
# 从异常用户集合中剔除,剩下的即为判定的高危用户;
# 为了实现上述算法,按照算法实现步骤,基本需要:
# 1. 17JS特征数据读入
# 2. KMeans聚类的最优K值确定[2,10]:计算
# 2.1 每个样本点的聚合度、分离度;
# 2.2 计算所有样本点的轮廓系数,得到其平均值;
# 2.3 选择轮廓系数最大的K值分类;
# 2.4 每个K值至少聚合三次,计算三组轮廓系数,保留最大的轮廓系数,避免局部最优值
# 3. 计算K个群簇中心的JS分数,然后排序,选择JS分数高于中位数的群簇作为OCSVM训练样本;
# 4. 训练OCSVM,得到一个主观分类器,然后判断所有样本,打标签;
# 5. 筛选出其中的异常样本,并计算这些样本的JS分数,若高于全部样本的均值特征计算得到的JS分数,则剔除;
# 6. 最后剩余的即为认定的内部高危用户集合,并计算其中所有用户的JS分数;
# 本脚本作为JS_CLF-KMeans-v0.1.py,主要用于计算KMeans的轮廓系数,
# 针对17JS特征从[2,10]中选择最好的K值(每个K值随机选择初始点聚类三次,选择最好的轮廓系数作为K值的候选参数;
import sys
from sklearn.cluster import KMeans
from sklearn.preprocessing import scale
from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
# 计算两个点的欧式距离
def Distance(Pa, Pb):
dist = 0.0
i = 0
while i < len(Pa):
dist += (Pa[i] - Pb[i]) * (Pa[i] - Pb[i])
i += 1
dist = np.sqrt(dist)
return dist
# 1,计算样本i到同簇其他样本的平均距离ai。ai 越小,说明样本i越应该被聚类到该簇。将ai 称为样本i的簇内不相似度。
# 簇C中所有样本的a i 均值称为簇C的簇不相似度。
# 2,计算样本i到其他某簇Cj 的所有样本的平均距离bij,称为样本i与簇Cj 的不相似度。定义为样本i的簇间不相似度:bi =min{bi1, bi2, ..., bik}
# bi越大,说明样本i越不属于其他簇。
# 计算一个点到一个簇的所有点的平均距离
def Dist_Pt_Clst(Pa, Clst_a):
dist = 0.0
for pt in Clst_a:
dist += Distance(Pa, pt)
# 如果样本中包含Pa,那么计算平均距离时除以len(Clus_a) - 1
if Pa in Clst_a:
dist = dist / (len( Clst_a) - 1)
return dist
# 如果样本中不包含Pa,那么计算平均距离时正常计算
if Pa not in Clst_a:
dist = dist / len(Clst_a)
return dist
# 计算样本点Pa的簇间不相似度
def B(Pa, Clusters_Users): # 输入群簇用户列表,且Pa不属于这些群簇
B_lst = []
for c_users in Clusters_Users:
# c_users代表特定群簇中的所有用户,c_users中的每个用户即为Clst_a中的每个pt
B_lst.append(Dist_Pt_Clst(Pa, c_users))
return min(B_lst)
# 计算一个群簇用户集合的轮廓系数
def SC_Clusters(k, Clusters_Users_lst):
# k表示现在有几个群簇
SC_users = [] # 用于存储所有用户的轮廓系数
i = 0
while i < k:
cnt_ = 0
# 数据比较大小,如果一摸一样可以用np.all
for user in Clusters_Users_lst[i]:
Other_Clusters_Users = []
j = 0
while j < k:
# print Clusters_Users_lst[j], '\n'
# c = np.all([user, Clusters_Users_lst[j]], axis=0)
# print c.all(), '\n'
if user in Clusters_Users_lst[j]:
j += 1
continue
Other_Clusters_Users.append(Clusters_Users_lst[j])
j += 1
# 上步得到了用户不属于的群簇列表
B_user = B(user, Other_Clusters_Users)
A_user = Dist_Pt_Clst(user, Clusters_Users_lst[i])
Cmp_lst = []
Cmp_lst.append(A_user)
Cmp_lst.append(B_user)
SC_user = (B_user - A_user) / max(Cmp_lst)
SC_users.append(SC_user)
# print '第 ', cnt_, '个用户计算完毕...\n'
cnt_ += 1
print k, '第 ', i, '群簇轮廓系数计算完毕...\n'
i += 1
SC_users_array = np.array(SC_users)
return SC_users_array.mean()
print '开始测试、选择对于已有用户JS特征最适宜的K值(KMeans)。。。\n'
print '读入JS特征数据...\n'
f_cert = open(r'CERT4.2-2009-12-New-26JS.csv', 'r')
f_cert_lst = f_cert.readlines()
f_cert.close()
print '提取原始JS特征中的字段列表...\n'
# user_id,O_Score,C_Score,E_Score,A_Score,N_Score,
# Team_CPB-I-mean,Team_CPB-O-mean,Users-less-mean-A,Users-less-mean-A and C,Users-less-mean-C,Users-High-mean-N,Team_CPB-I-median,Team_CPB-O-median,No-JobState-in-Team,
# Dpt-CPB-I-mean,Dpt_CPB-O-mean,Dpt-Less-A-mean,Dpt-Less-AC-mean,Dpt-less-C-mean,Dpt-High-N-mean,Dpt_CPB-I-median,Dpt_CPB-O-median,No-JobState-in-Dpt,
# Job State,
# Leader_CPB-I,Leader_CPB-O
JS_lst = []
User_lst = [] # 用于存储每条记录的用户id
for line in f_cert_lst:
JS_tmp = []
line_lst = line.strip('\n').strip(',').split(',')
if line_lst[0] == 'user_id':
continue
User_lst.append(line_lst[0])
i = 1
while i < len(line_lst):
JS_tmp.append(float(line_lst[i]))
i += 1
JS_lst.append(JS_tmp)
JS_lst_ori = MinMaxScaler().fit_transform(JS_lst)
JS_lst = []
for line in f_cert_lst:
JS_tmp = []
line_lst = line.strip('\n').strip(',').split(',')
if line_lst[0] == 'user_id':
continue
# user_id,O_Score,C_Score,E_Score,A_Score,N_Score,
# Team_CPB-I-mean,Team_CPB-O-mean,Users-less-mean-A,Users-less-mean-A and C,Users-less-mean-C,Users-High-mean-N,Team_CPB-I-median,Team_CPB-O-median,No-JobState-in-Team,
# Dpt-CPB-I-mean,Dpt_CPB-O-mean,Dpt-Less-A-mean,Dpt-Less-AC-mean,Dpt-less-C-mean,Dpt-High-N-mean,Dpt_CPB-I-median,Dpt_CPB-O-median,No-JobState-in-Dpt,
# Job State,
# Leader_CPB-I,Leader_CPB-O
i = 1
while i < len(line_lst):
# 先来计算OCEAN分数
# 再来计算团队部分的分数, 下标14为团队工作状态,留作工作状态分析,这样共9-1=8个
# if i < 6 or i > 14: # 未考虑团队中位数CPB(12),考虑团队中位数CPB(14)
# if i < 15 or i > 20:
if i < 25:
# if i != 14 and i != 23 and i != 24:
# if i < 6:
i += 1
continue
# if i == 21 or i == 22: # 先不考虑中位数
# i += 1
# continue
# if i == 6 or i == 7 or i == 12 or i == 13: # 团队CPB数据提取;由于均值与中位数重要性都很好,因此同时考虑
# CPB分数需要取负值得到对应的满意度影响因子
# if i == 14 or i == 23 or i == 24:
# if i == 15 or i == 16 or i == 21 or i == 22:
JS_tmp.append(-1 * float(line_lst[i]))
# JS_tmp.append(float(line_lst[i]))
i += 1
continue
JS_lst.append(JS_tmp)
# JS_lst中只有特征数据,没有user_id
# JS_array = np.array(JS_lst)
print 'JS特征数据读入完毕...\n'
print '开始测试选择最优的K值(KMeans)...\n'
# K值尝试从2-10
# 每个K值会计算三次,选择轮廓系数最高的一个值作为该K值的代表
# 最后输出轮廓系数的最大值以及对应的K值,返回该值
# 一个K值的KMeans的轮廓系数为SC_K = K个聚类中所有点的轮廓系数的均值
# SC_pt = B(pt) - A(Pt) / max(A(pt), B(pt))
# 轮廓系数越接近1越好,越大越好
SC_lst = [] # 轮廓系数列表,保存K值与对应的轮廓系数
a = [2,3,4,5,6,7,8,9,10]
for k in a:
print '现在实验的K值为 ', k, '\n'
print '每个K值需要实验三次以避免局部最优偏见...\n'
# 先做PCA降维到5(原先17)
# pca = PCA(n_components=5)
# pca.fit_transform(JS_lst)
print 'PCA降维完成...\n'
SC_tmp = [] # 保存本次K值的三次实验结果,K值与对应的轮廓系数
i = 0
while i < 2: # 同一个K值进行三次K均值聚类
y_pred = KMeans(n_clusters=k).fit(JS_lst).labels_
print k, 'KMeans单次聚类完成\n'
clusters_users = [] #存储k值时每个群簇的用户的特征
cls_no = 0 # 代表群簇号(类别号)
# 开始统计每个群簇的用户列表
while cls_no < k:
# print '开始统计每个群簇的用户列表..\n'
cluster_users_tmp = []
j = 0
while j < len(y_pred):
if y_pred[j] == cls_no:
cluster_users_tmp.append(JS_lst[j])
# print '写入群簇类用户数据...\n'
j += 1
else:
j += 1
continue
clusters_users.append(cluster_users_tmp)
cls_no += 1
print k, ' 群簇用户列表统计完毕...\n'
# 接下来开始计算此时的轮廓系数
SC_value = SC_Clusters(k, clusters_users)
# 将本次计算得到的轮廓系数存入一个临时列表,好比较最好的轮廓系数
print i, '\n'
SC_tmp.append(SC_value)
i += 1
SC_values = [] # 用于存储选中的K-SC值对
SC_values.append(k)
SC_values.append(max(SC_tmp))
SC_lst.append(SC_values)
print 'K = [2, 11]的KMeans聚类的轮廓系数计算完毕...\n'
print '各个K值的K均值聚类的最优轮廓系数为: \n'
for line in SC_lst:
print 'K值为: ',line[0], ' 轮廓系数为 ',line[1], '\n'
print '其中轮廓系数最高的K值为: \n'
SC_value = 0.0
K_best = 0
for line in SC_lst:
if line[1] > SC_value:
SC_value = line[1]
K_best = line[0]
print K_best, SC_value, '\n'
print 'KMeans'
|
import scrapy
from scrapy import Request
import re
from ..items import RaidforumsItem
class Quotespider(scrapy.Spider):
name = 'database_posts'
# allowed_domains = ["https://raidforums.com"]
start_urls = [
'https://raidforums.com/Forum-Databases'
]
base_url = 'https://raidforums.com/'
def parse(self, response):
items = RaidforumsItem()
post_div = response.xpath('/html/body/div[1]/main/section[2]/table[3]//tr')
length = len(post_div)
counter=0
for post in post_div[9:length-1]:
counter += 1
print("counter", counter)
items["post_name"] = post.css(".forum-display__thread-subject::text").extract()
items["post_by"] = post.css(".author span::text").extract()
try:
post_date = post.xpath('.//div/span[2]/span::attr(title)').extract
print(post_date)
possibility = post.css('.forum-display__thread-date::text').extract()
if (re.search("ago$", str(possibility)) == None):
items["post_date"] = post_date + possibility[-12:]
except:
items["post_date"] = post.css('.forum-display__thread-date::text').extract()
items["post_views_no"] = post.css(".hidden-sm:nth-child(4)::text").extract()
items["post_replies_no"] = post.css(".hidden-sm > a::text").extract()
try:
next_page = post.xpath("td[2]/div/div[1]/span[2]/a/@href").extract()
except:
next_page = post.xpath("td[2]/div/div[1]/span[2]/a/@href").extract()
items["link_to_post"] = self.base_url + str(next_page)
print("next page", items["link_to_post"])
yield items
# yield Request(items['link_to_post'],
# meta={'items': items},
# callback=self.parse_post)
def parse_post(self,response):
items = response.request.meta['items']
items['actual_post'] = response.css('#pid_437190::text').extrract()
items['user_name'] = response.css('.rf_god::text').extract()
items['user_status'] = response.css('.post__user-title::text').extract()
items['user_posts'] = response.css('.group:nth-child(1) .float_right ::text').extract()
items['user_threads'] = response.css('.group:nth-child(2) .float_right ::text').extract()
items['user_joined'] = response.css('.group:nth-child(3) .float_right ::text').extract()
items['user_reputation'] = response.css('.group:nth-child(4) .float_right ::text').extract()
try:
items['user_service'] = response.css('.user_service::text').extract()
except:
items['user_service'] = "less than a year"
print(items)
yield items
# / html / body / div[1] / main / section[2] / table[3] / tbody / tr[9]
# print("length", len(post_div))
# print(post_div)
# # items["post_name"] = post_div.css(".forum-display__thread-subject::text").extract()
# names = post_div.css(".forum-display__thread-subject::text").extract()
# print("title-length",len(names))
# print("post_names", names)
#
# counter = 0
# for post in post_div:
# counter = counter +1
# name = post.css(".forum-display__thread-subject::text").extract()
#
# yield {
# "name" : name,
# "counter" : counter
# }
# for post in post_div:
# items["post_name"] = post.css(".forum-display__thread-subject::text").extract()
# items["post_by"] = post.css(".rf_i rf_vip::text").extract()
# # items["post_date"] = post.css(".forum-display__thread-date::text").extract()
# try:
# post_date = post_div.xpath('.//div/span[2]/span::attr(title)').extract
# possibility = post_div.css('.forum-display__thread-date::text').extract()
# if (re.search("ago$", str(possibility)) == None):
# items["post_date"] = post_date + possibility[-12:]
# except:
# items["post_date"] = post.css('.forum-display__thread-date::text').extract()
# items["post_views"] = post.css(".hidden-sm > a::text").extract()
# items["post_replies"] = post.css(".trow2 forumdisplay_regular hidden-sm selectorgadget_suggested::text").extract()
#
# yield
# # items["post_by"] = post_div.css(".rf_i rf_vip::text").extract()
# print("post_by", post_div.css(".author span::text").extract())
# # items["post_date"] = post_div.css(".forum-display__thread-date::text").extract()
# print("post_names", post_div.css(".forum-display__thread-date::text").extract())
# # items["post_views"] = post_div.css(".hidden-sm > a::text").extract()
# print("post_names", post_div.css(".hidden-sm > a::text").extract())
# # items["post_replies"] = post_div.css(".hidden-sm > a::text").extract()
# print("post_names", post_div.css(".hidden-sm > a::text").extract())
# # yield items
|
import numpy as np
A = np.arange(12).reshape(3,4)
print(A)
#根据行进行分割
print(np.split(A,2,axis=1)) #根据行分成两组两个列
'''
[array([[0, 1],
[4, 5],
[8, 9]]), array([[ 2, 3],
[ 6, 7],
[10, 11]])]
'''
#等效分割
print(np.split(A, 1, axis=0)) # 根据列分成两组两个列
'''
[array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])]
'''
#不等效的分割
print(np.array_split(A, 1, axis=0)) # 根据列分成两组两个列
#进行横向和纵向的分割
print(np.vsplit(A,3)) #纵向分割
# [array([[0, 1, 2, 3]]), array([[4, 5, 6, 7]]), array([[ 8, 9, 10, 11]])]
print(np.hsplit(A,2)) #横向分割
'''
[array([[0, 1],
[4, 5],
[8, 9]]), array([[ 2, 3],
[ 6, 7],
[10, 11]])]
'''
|
class Solution:
def twoCitySchedCost(self, costs: List[List[int]]) -> int:
aCitiesDiff = []
bCitiesDiff = []
s = 0
aCount = 0
bCount = 0
for list in costs:
if(list[0]<list[1]):
s += list[0]
aCount+=1
bCitiesDiff += [list[1]-list[0]]
else:
s += list[1]
bCount+=1
aCitiesDiff += [list[0]-list[1]]
aCitiesDiff.sort()
bCitiesDiff.sort()
if(aCount<len(costs)/2):
s += sum(aCitiesDiff[:((int)(len(costs)/2)-aCount)])
elif(bCount<len(costs)/2):
s += sum(bCitiesDiff[:((int)(len(costs)/2)-bCount)])
return s |
import FWCore.ParameterSet.Config as cms
from Calibration.TkAlCaRecoProducers.AlcaSiStripLorentzAngleHarvester_cfi import *
from DQMServices.Components.EDMtoMEConverter_cfi import *
EDMtoMEConvertSiStripLorentzAngle = EDMtoMEConverter.clone(
lumiInputTag = ("MEtoEDMConvertSiStripLorentzAngle","MEtoEDMConverterLumi"),
runInputTag = ("MEtoEDMConvertSiStripLorentzAngle","MEtoEDMConverterRun")
)
DQMStore = cms.Service("DQMStore")
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
dqmEnvSiStripLorentzAngle = DQMEDHarvester('DQMHarvestingMetadata',
subSystemFolder = cms.untracked.string('AlCaReco'),
)
ALCAHARVESTSiStripLorentzAngle = cms.Sequence( EDMtoMEConvertSiStripLorentzAngle + alcaSiStripLorentzAngleHarvester + dqmEnvSiStripLorentzAngle )
|
class ImportanceErrors(Exception):
def __init__(self, data):
self.message = {
"error":{"valid_options":{
"importance":[1,2],
"urgency":[1,2]},
},
"recieved_options":{
"importance":data['importance'],
"urgency": data['urgency']
}
}
super().__init__(self.message)
|
"""
This module serves as a web frontend to allow getting requested username
from MySQL via db_connector from a browser (or by json request, too).
made to make testing with selenium easier.
"""
from flask import Flask
import db_connector as db
app = Flask(__name__)
@app.route('/users/get_user_data/<user_id>', methods=['GET'])
def user(user_id):
"""
queries the database via json request to db_connector for requested
username by supplied user_id.
:param user_id: requested user id # to check DB against, in json.
:return: HTML, test-friendly, response with username or error.
"""
db_response = db.get_from_db({'id': user_id})
if db_response == 500:
return f'<H1 id="error">No such user: {user_id}</H1>'
else:
return f'<H1 id="user">' + db_response + '</H1>'
@app.route('/stop_server')
def stop_server():
"""
Stops the running server by sending a CTRL-C / SIGTERM
thru signal module
"""
import os
import signal
try:
# on windows:
os.kill(os.getpid(), signal.CTRL_C_EVENT)
print('server stopped - SIGTERM')
return 'server stopped'
except AttributeError:
# on linux:
os.kill(os.getpid(), signal.SIGTERM)
print('server stopped')
return 'server stopped'
if __name__ == '__main__':
app.run(host='127.0.0.1', debug=True, port=5001)
|
from setuptools import setup
setup(
name='lenin',
version='0.1',
author=['brainopia', 'gazay'],
packages=['lenin', 'lenin.augmentors', 'lenin.datasets', 'lenin.preloader'],
install_requires=['torchbearer', 'scikit-learn'],
)
|
import curses
screen = curses.initscr()
curses.noecho()
curses.curs_set(0)
screen.keypad(1)
curses.mousemask(1)
screen.addstr("This is a Sample Curses Script\n\n")
while True:
event = screen.getch()
if event == ord("q"): break
if event == curses.KEY_MOUSE:
_, mx, my, _, _ = curses.getmouse()
screen.addstr(10, 10, f"{' ' * 20}")
screen.addstr(10, 10, f"mouse {mx} {my}")
screen.addstr(my, mx, "█")
#screen.clear()
curses.endwin()
|
# http://www.practicepython.org/exercise/2014/03/05/05-list-overlap.html
import random
#a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
#b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a = random.sample(range(120), k = 10)
b = random.sample(range(101), k = 8)
#common = set(a).intersection(set(b)) #clean version of the below
common = []
for i in a:
if i in b and i not in common:
common.append(i)
print (a)
print (b)
print (common) |
from collections import Counter
from numpy import *
with open('hw12Data/digitsDataset/trainFeatures.csv') as f:
inp = f.readlines()
trainFeatures = []
for line in inp:
trainFeatures.append(array([float(x) for x in line.split(',')]))
with open('hw12Data/digitsDataset/trainLabels.csv') as f:
inp = f.readlines()
trainLabels = []
for line in inp:
trainLabels.append(float(line))
with open('hw12Data/digitsDataset/valFeatures.csv') as f:
inp = f.readlines()
valFeatures = []
for line in inp:
valFeatures.append(array([float(x) for x in line.split(',')]))
with open('hw12Data/digitsDataset/valLabels.csv') as f:
inp = f.readlines()
valLabels = []
for line in inp:
valLabels.append(float(line))
with open('hw12Data/digitsDataset/testFeatures.csv') as f:
inp = f.readlines()
testFeatures = []
for line in inp:
testFeatures.append(array([float(x) for x in line.split(',')]))
k = 1
with open('digitsOutput.csv', 'w') as f:
classifications = []
for i in range(len(testFeatures)):
distances = []
for j in range(len(trainFeatures)):
arr = testFeatures[i] - trainFeatures[j]
arr = arr*arr
dist = sum(arr)
distances.append((dist, trainLabels[j]))
distances.sort(key=lambda x: x[0])
nearest = []
for ind in range(k):
nearest.append(distances[ind][1])
counts = Counter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
for label in nearest:
counts[label] += 1
classification = counts.most_common(1)[0][0]
f.write(str(classification) + '\n')
classifications.append(classification)
# total = len(valFeatures)
# numCorrect = 0.0
# for i in range(len(valLabels)):
# if classifications[i] == valLabels[i]:
# numCorrect += 1
# print(numCorrect/total)
|
import pytest
def solution_on2(n, arr):
counters = []
for i in range(n):
counters.append(0)
max_ele = float('-inf')
for ele in arr:
if ele > n:
for i in range(n):
counters[i] = max_ele
else:
counters[ele - 1] += 1
if counters[ele - 1] > max_ele:
max_ele = counters[ele - 1]
return counters
def solution_better(n, arr):
counters = [0 for _ in range(n)]
max_ele = float('-inf')
prev_max_ele = max_ele
for ele in arr:
if ele > n:
prev_max_ele = max_ele
else:
if counters[ele - 1] < prev_max_ele:
counters[ele - 1] = prev_max_ele + 1
else:
counters[ele - 1] += 1
if counters[ele - 1] > max_ele:
max_ele = counters[ele - 1]
for i in range(n):
if counters[i] < prev_max_ele:
counters[i] = prev_max_ele
return counters
@pytest.mark.parametrize(
"n, arr, result",
[
(5, [4, 1, 4, 3, 6, 2, 2, 2, 5], [2, 5, 2, 2, 3]),
(4, [4, 1, 4, 3, 5, 2, 2, 2, 5], [5, 5, 5, 5]),
]
)
def test_solution(n, arr, result):
assert solution_on2(n, arr) == result
assert solution_better(n, arr) == result
|
class Anima:
def __init__(self, animaTitle):
self.animaTitle = animaTitle
self.season = 1
self.episode = 12
self.currentlyOnEpisode = 1
# getter method for anima title
def get_anima_title(self):
return self.animaTitle
# setter method for anima title
def set_anima_title(self, title):
self.animaTitle = title
# getter method for season
def get_season(self):
return self.season
# setter method for season
def set_episode(self, season):
self.season = season
# getter method for episode
def get_episode(self):
return self.episode
# setter method for episode
def set_episode(self, episode):
self.episode= episode
# getter method for currently on
def get_currently_on_episode(self):
return self.currentlyOnEpisode
# setter method for currently on
def set_currently_on_episode(self, currentlyOn):
self.currentlyOn = currentlyOn
|
from testPipe import *
from pyspark.ml import Pipeline
from pyspark.ml.classification import *
from pyspark.ml.feature import *
def getSpark():
return SparkSession.builder.getOrCreate()
spark = getSpark()
index = 8
lr = LogisticRegression(regParam=0.1, maxIter=20)
pipeline = Pipeline(stages=[lr])
description = "Logistic Regression con regParam=0.1 e maxIter=20"
testPipeline(pipeline, description, index)
lr = LogisticRegression(regParam=0.01, maxIter=20)
pipeline = Pipeline(stages=[lr])
description = "Logistic Regression con regParam=0.01 e maxIter=20"
testPipeline(pipeline, description, index)
spark.stop()
spark = getSpark()
#RANDOM FOREST
lr = RandomForestClassifier(maxDepth=5, numTrees=20, seed=42)
pipeline = Pipeline(stages=[lr])
description = "Random Forest con maxDepth=5 e numTrees=20"
testPipeline(pipeline, description, index)
lr = RandomForestClassifier(maxDepth=8, numTrees=40, seed=42)
pipeline = Pipeline(stages=[lr])
description = "Random Forest con maxDepth=8 e numTrees=40"
testPipeline(pipeline, description, index)
spark.stop()
spark = getSpark()
index = 9
lr = LogisticRegression(regParam=0.1, maxIter=20)
pipeline = Pipeline(stages=[lr])
description = "Logistic Regression con regParam=0.1 e maxIter=20"
testPipeline(pipeline, description, index)
lr = LogisticRegression(regParam=0.01, maxIter=20)
pipeline = Pipeline(stages=[lr])
description = "Logistic Regression con regParam=0.01 e maxIter=20"
testPipeline(pipeline, description, index)
spark.stop()
spark = getSpark()
#RANDOM FOREST
lr = RandomForestClassifier(maxDepth=5, numTrees=20, seed=42)
pipeline = Pipeline(stages=[lr])
description = "Random Forest con maxDepth=5 e numTrees=20"
testPipeline(pipeline, description, index)
lr = RandomForestClassifier(maxDepth=8, numTrees=40, seed=42)
pipeline = Pipeline(stages=[lr])
description = "Random Forest con maxDepth=8 e numTrees=40"
testPipeline(pipeline, description, index)
spark.stop()
spark = getSpark()
index = 10
lr = LogisticRegression(regParam=0.1, maxIter=20)
pipeline = Pipeline(stages=[lr])
description = "Logistic Regression con regParam=0.1 e maxIter=20"
testPipeline(pipeline, description, index)
lr = LogisticRegression(regParam=0.01, maxIter=20)
pipeline = Pipeline(stages=[lr])
description = "Logistic Regression con regParam=0.01 e maxIter=20"
testPipeline(pipeline, description, index)
spark.stop()
spark = getSpark()
#RANDOM FOREST
lr = RandomForestClassifier(maxDepth=5, numTrees=20, seed=42)
pipeline = Pipeline(stages=[lr])
description = "Random Forest con maxDepth=5 e numTrees=20"
testPipeline(pipeline, description, index)
lr = RandomForestClassifier(maxDepth=8, numTrees=40, seed=42)
pipeline = Pipeline(stages=[lr])
description = "Random Forest con maxDepth=8 e numTrees=40"
testPipeline(pipeline, description, index)
|
import time
def warn_slow(func):
def inner(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
duration = end - start
if duration > 2:
print(f"execution of {func.__name__} with {(*args, *kwargs.values())} arguments took more than 2 seconds")
return result
return inner
@warn_slow
def func_slow(x, y):
time.sleep(3)
@warn_slow
def func_fast(x, y):
print(x, y)
func_slow(1, 2)
func_fast(1, 2) |
#!/usr/bin/env python3
import argparse
import collections
import itertools
import math
import sys
def take(c, iterable):
out = []
for x in iterable:
out.append(x)
if len(out) == c:
return out
return out
def skip(c, iterable):
iterable = iter(iterable)
for x in iterable:
if c > 0:
c -= 1
else:
yield x
break
for x in iterable:
yield x
def repeated(c, iterable):
for x in iterable:
for _ in range(c):
yield x
def m(x, y):
if x == 0:
return 0
elif x == 1:
return y
elif x == -1:
return -y
PROGRESS = "---///|||\\\\\\"
def FFT(phase, sequence):
for i in range(phase):
sequence = FFT2(i, sequence)
print("\x08..", end="", flush=True)
print()
return "".join(map(str, sequence))
def FFT2(phase, sequence):
base = [0, 1, 0, -1]
out = []
# Compute the secondary sequence we multiply with.
for pos, val in enumerate(sequence):
l = skip(1, itertools.cycle(repeated(pos + 1, base)))
s = 0
for x, y in zip(l, sequence):
s += m(x, y)
out.append(abs(s) % 10)
print("\x08" + PROGRESS[pos % len(PROGRESS)], end="", flush=True)
return out
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input', type=argparse.FileType('r'), nargs='?', default=sys.stdin)
args = parser.parse_args(sys.argv[1:])
print(repeated(2, [0,1,0,-1]))
print(list(skip(2, [0,1,0,-1])))
print(list(skip(2, repeated(3, [0,1,0,-1]))))
sequence = args.input.read().strip()
#print(FFT(0, list(map(int, "12345678"))))
#print(FFT(1, list(map(int, "12345678"))))
#print(FFT(2, list(map(int, "12345678"))))
#print(FFT(3, list(map(int, "12345678"))))
#print(FFT(100, list(map(int, "80871224585914546619083218645595"))))
#print(FFT(100, list(map(int, "03036732577212944063491565474664" * 10000))))
print(sequence)
offset = int(sequence[:8])
print(offset)
p1 = FFT(100, list(map(int, sequence)))
print(p1)
|
#!/usr/bin/env python
from socket import *
import pickle
class remoteXclient:
def __init__(self):
self.cmdstring_list = []
#self.socketup()
#if not self.serverup:
# print "RemoteX Client: server not found. Please start the server on your local machine."
#self.socketdown()
def __getattr__(self, func):
def interceptmethod(*args, **kwargs):
#self.socketup()
cmdstring = {'func': func, 'args': args, 'kwargs': kwargs}
self.cmdstring_list.append(cmdstring)
if func == 'show':
self.socketup()
if self.serverup:
self.socketobj.send(pickle.dumps(self.cmdstring_list,2))
self.socketdown()
else:
print "RemoteX Client: server is down, cannot forward."
print cmdstring
self.cmdstring_list = []
#else:
# self.cmdstring_list.append(cmdstring)
return interceptmethod
def socketup(self, ip='', port=9898):
self.socketobj = socket(AF_INET, SOCK_STREAM)
try:
self.socketobj.connect((ip,port))
self.serverup = True
except:
self.serverup = False
def socketdown(self):
self.socketobj.close()
if __name__ == '__main__':
remX = remoteXclient()
remX.figure()
remX.plot(range(20),range(20),linewidth=0.5)
remX.show()
remX.figure()
remX.plot(range(10), range(10))
remX.title('Test Title, 2nd Figure')
remX.show()
|
import sys
sys.stdin = open("D3_5108_input.txt", "r")
T = int(input())
for test_case in range(T):
N, M, L = map(int, input().split())
data = list(map(int, input().split()))
for _ in range(M):
idx, val = map(int, input().split())
data.insert(idx, val)
print("#{} {}".format(test_case + 1, data[L])) |
'''
Created on Oct 24, 2016
@author: Noor Jahan Mukammel
Program: set_method: issuperset()
* x.issuperset(y) returns True, if
* x is a superset of y.
* ">=" is an abbreviation for "issuperset of"
* ">" is used to check if a set is a proper superset of a set.
'''
x = {"a","b","c","d","e"}
y = {"c","d"}
print(x.issuperset(y))
print(x > y)
print(x >= y)
print(x >= x)
print(x > x)
print(x.issuperset(x))
|
import sys
"""The prime factors of 13195 are 5, 7, 13 and 29.
What is the largest prime factor of the number 600851475143 ?"""
def isprime(x):
for i in range(x - 1, 2, -1):
if x % i == 0:
return False
return True
def isfactor(num, factor):
return num % factor == 0
def main():
x = 600851475143L
print 'Buscando factor primo: ' + str(x)
i = int(x/2)
while i > 2:
if isprime(i) and isfactor():
print str(i)
sys.exit(0)
i -= 1
if __name__ == '__main__':
main()
|
import math
p=1009
q=3643
n = p * q
φ = (p-1)*(q-1)
tmp_list = []
tmp = 0
dic = {}
for e in range(2, φ):
if (math.gcd(e, φ) == 1):
tmp_list.append(e)
dic[e] = (math.gcd(e-1, p-1)+1) * (math.gcd(e-1, q-1)+1)
for e in tmp_list:
if dic[e] == 9 :
tmp = tmp + e
print(tmp)
|
# Escribir un programa que almacene las asignaturas de un curso
# (por ejemplo Matemáticas, Física, Química, Historia y Lengua)
# en una lista, pregunte al usuario la nota que ha sacado
# en cada asignatura, y después las muestre por pantalla
# con el mensaje En <asignatura> has sacado <nota>
# donde <asignatura> es cada una des las asignaturas
# de la lista y <nota> cada una de las correspondientes
# notas introducidas por el usuario.
asignaturas = ["Matemáticas", "Física", "Química", "Historia", "Lengua"]
notas=[]
for a in asignaturas:
c = input("¿Qué has sacado en " + a + "? ")
notas.append(c)
for i in range(len(asignaturas)): #i te calcula la length, 3 y entonces range pasa a ser 3
print("En " + asignaturas[i] + " has sacado " + notas[i])
#for a in asignaturas quiere decir que a va tomando los "valores"
#entonces te va preguntando que has sacado en a1, a2, a3...
#esos valores que tú das los guarda en notas con el notas.append
#para i en longitud de la cadena asignaturas, imprime en
#el la asignatura que ocupa el valor i junto con has sacado y la nota que ocupe el valor i
#todo junto. MAS O MENOS :) |
__author__ = 'juliewe'
if __name__=='__main__':
filename='/Volumes/LocalScratchHD/juliewe/Documents/workspace/Compounds/data/WNCompounds/teststuff/oneline'
instream = open(filename)
lines=[]
for line in instream:
lines.append(line.rstrip())
fields=lines[0].split('\t')
print len(fields)
entry =fields[0]
print entry
sum=0
for i in range(1,len(fields),2):
try:
feat = fields[i]
score = float(fields[i+1])
sum+=score
except(IndexError):
print i, feat
print sum
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper classes to simplify common cases of cache implementation."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import collections
import datetime
import logging
import sys
from common import caching
from common import utils
from models import config
from models import counters
from models import transforms
from google.appengine.api import namespace_manager
CacheFactoryEntry = collections.namedtuple(
'CacheFactoryEntry',
['cache_class',
'cache_entry_class',
'connection_class',
'manager_class',
'config_property',
'perf_counter_length',
'perf_counter_size'])
class CacheFactory(object):
_CACHES = {}
@classmethod
def build(cls, name, label, desc, max_size_bytes, ttl_sec, dao_class):
"""Build the family of classes for a process-scoped Entity cache.
Args:
name: Name under which cache is registered. This should be in the
lower_case_and_underscores naming style
label: Label for the course-level setting enabling/disabling
process-level caching for this entity type.
desc: Description to add to the course-level setting
enabling/disabling process level caching for this entity type.
max_size_bytes: Largest size the cache may take on. If adding
an item to the cache would make it exceed this size, items
are LRU'd out until the item fits.
ttl_sec: Number of seconds after which cached entries are
considered stale and a (lazy) refresh is performed.
dao_class: The class of an DAO in the Entity/DTO/DAO scheme
common for Course Builder data access. Used for itself
and also for its references to its matching DTO, Entity
classes.
Returns:
A ResourceCacheFactory entry containing the constellation of
objects that interoperate to form a cche.
"""
if name in cls._CACHES:
return cls._CACHES[name]
config_property = config.ConfigProperty(
'gcb_can_use_%s_in_process_cache' % name, bool, desc, label=label,
default_value=True)
class EntityCache(caching.ProcessScopedSingleton):
"""This class holds in-process global cache of objects."""
@classmethod
def get_cache_len(cls):
# pylint: disable=protected-access
return len(cls.instance()._cache.items.keys())
@classmethod
def get_cache_size(cls):
# pylint: disable=protected-access
return cls.instance()._cache.total_size
def __init__(self):
self._cache = caching.LRUCache(max_size_bytes=max_size_bytes)
self._cache.get_entry_size = self._get_entry_size
def _get_entry_size(self, key, value):
if not value:
return 0
return sys.getsizeof(key) + sys.getsizeof(value)
@property
def cache(self):
return self._cache
class CacheEntry(caching.AbstractCacheEntry):
"""Cache entry containing an entity."""
def __init__(self, entity):
self.entity = entity
self.created_on = datetime.datetime.utcnow()
def getsizeof(self):
return (
dao_class.ENTITY.getsizeof(self.entity) +
sys.getsizeof(self.created_on))
def has_expired(self):
age = (datetime.datetime.utcnow() -
self.created_on).total_seconds()
return age > ttl_sec
def is_up_to_date(self, key, update):
if update and self.entity:
return update.updated_on == self.entity.updated_on
return not update and not self.entity
def updated_on(self):
if self.entity:
return self.entity.updated_on
return None
@classmethod
def externalize(cls, key, entry):
entity = entry.entity
if not entity:
return None
return dao_class.DTO(
entity.key().id_or_name(),
transforms.loads(entity.data))
@classmethod
def internalize(cls, key, entity):
return cls(entity)
class CacheConnection(caching.AbstractCacheConnection):
PERSISTENT_ENTITY = dao_class.ENTITY
CACHE_ENTRY = CacheEntry
@classmethod
def init_counters(cls):
caching.AbstractCacheConnection.init_counters()
@classmethod
def is_enabled(cls):
return config_property.value
def __init__(self, namespace):
caching.AbstractCacheConnection.__init__(self, namespace)
self.cache = EntityCache.instance().cache
def get_updates_when_empty(self):
"""Load in all ResourceBundles when cache is empty."""
q = self.PERSISTENT_ENTITY.all()
for entity in caching.iter_all(q):
self.put(entity.key().name(), entity)
self.CACHE_UPDATE_COUNT.inc()
# we don't have any updates to apply; all items are new
return {}
class ConnectionManager(caching.RequestScopedSingleton):
"""Class that provides access to in-process Entity cache.
This class only supports get() and does not intercept
put() or delete() and is unaware of changes to
Entities made in this very process. When
entites change, the changes will be picked up
when new instance of this class is created. If you are
watching perfomance counters, you will see EVICT and
EXPIRE being incremented, but not DELETE or PUT.
"""
def __init__(self):
# Keep a separate CacheConnection for each namespace that
# makes a get() request.
self._conns = {}
def _conn(self, ns):
connected = self._conns.get(ns)
if not connected:
logging.debug(
'CONNECTING a CacheConnection for namespace "%s",', ns)
connected = CacheConnection.new_connection(ns)
self._conns[ns] = connected
return connected
@classmethod
def _ns(cls, app_context):
if app_context:
return app_context.get_namespace_name()
return namespace_manager.get_namespace()
def _get(self, key, namespace):
found, stream = self._conn(namespace).get(key)
if found and stream:
return stream
with utils.Namespace(namespace):
entity = dao_class.ENTITY_KEY_TYPE.get_entity_by_key(
dao_class.ENTITY, str(key))
if entity:
self._conn(namespace).put(key, entity)
return dao_class.DTO(
entity.key().id_or_name(),
transforms.loads(entity.data))
self._conn(namespace).CACHE_NOT_FOUND.inc()
self._conn(namespace).put(key, None)
return None
def _get_multi(self, keys, namespace):
return [self._get(key, namespace) for key in keys]
@classmethod
def get(cls, key, app_context=None):
# pylint: disable=protected-access
return cls.instance()._get(key, cls._ns(app_context))
@classmethod
def get_multi(cls, keys, app_context=None):
# pylint: disable=protected-access
return cls.instance()._get_multi(keys, cls._ns(app_context))
cache_len = counters.PerfCounter(
'gcb-models-%sCacheConnection-cache-len' %
dao_class.ENTITY.__name__,
'Total number of items in the cache')
cache_len.poll_value = EntityCache.get_cache_len
cache_size = counters.PerfCounter(
'gcb-models-%sCacheConnection-cache-bytes' %
dao_class.ENTITY.__name__,
'Total number of bytes in the cache.')
cache_size.poll_value = EntityCache.get_cache_size
CacheConnection.init_counters()
entry = CacheFactoryEntry(
EntityCache, CacheEntry, CacheConnection, ConnectionManager,
config_property, cache_len, cache_size)
cls._CACHES[name] = entry
return entry
@classmethod
def get_cache_instance(cls, name):
if name not in cls._CACHES:
return None
return cls._CACHES[name].cache_class.instance()
@classmethod
def get_manager_class(cls, name):
if name not in cls._CACHES:
return None
return cls._CACHES[name].manager_class
@classmethod
def all_instances(cls):
return [cls.get_cache_instance(name) for name in cls._CACHES]
|
import pandas as pd
data = open("CityTemps.csv")
for row in data:
print(row.split(","))
"""
table = pd.read_csv("CityTemps.csv")
print(table)
print("Fetching only years")
print(table["Year"])
print("----iloc----")
print(table.iloc[1:5])
""" |
#!/usr/bin/python
swiss_websites = []
f = open('input_ex5.txt','r')
for line in f:
line = line.strip()
if line[-3:] == '.ch':
swiss_websites.append(line)
f.close()
print(swiss_websites)
|
import xml.dom.minidom
# 创建文档
doc = xml.dom.minidom.Document()
# 创建节点(属性):文本
root_node = doc.createElement('books')
name_attr = doc.createAttribute('name')
name_attr.value = '马帅哥'
# ....
txt_node = doc.createTextNode('数据')
# 把属性加入根节点
root_node.setAttributeNode(name_attr)
# 把文本加入节点
root_node.appendChild(txt_node)
# 把根节点,写入文档
doc.appendChild(root_node)
fd = open('mybook.xml', 'w')
doc.writexml(fd, indent='', addindent='', newl='\n')
fd.close()
|
class SAMPLESCAN():
def scan(self, axis, start, end, step, exposure):
print("Sample scan for eiger")
## variables
sensor = []
motor = []
prescan_pos = 0
DEBUG=0
## Eiger channels
Eiger_acquire = create_channel_device("PINK:EIGER:cam1:Acquire", type='i')
Eiger_status = create_channel_device("PINK:EIGER:cam1:Acquire_RBV", type='i')
Eiger_status.setMonitored(True)
Eiger_frameID = create_channel_device("PINK:EIGER:cam1:ArrayCounter_RBV", type='d')
Eiger_frameID.setMonitored(True)
# Eiger_roi_array = create_channel_device("PINK:EIGER:image3:ArrayData", type='[d', size=int(Eiger_ROI_X*Eiger_ROI_Y))
# Eiger_roi_array.setMonitored(True)
# Eiger_Spectra = create_channel_device("PINK:EIGER:spectrum_RBV", type='[d', size=Eiger_ROI_X)
# Eiger_Spectra.setMonitored(True)
# Eiger_Spectra_sum = create_channel_device("PINK:EIGER:specsum_RBV", type='[d', size=Eiger_ROI_X)
Eiger_trigger = create_channel_device("PINK:EIGER:cam1:Trigger", type='d')
SENSOR = create_channel_device("PINK:EIGER:spectra_avg", type='d')
SENSOR.setMonitored(True)
## sample motor
if axis=="x":
MOTOR = create_channel_device("PINK:SMA01:m10.VAL")
MOTOR_RBV = create_channel_device("PINK:SMA01:m10.RBV")
MOTOR_RBV.setMonitored(True)
MOTOR_DMOV = create_channel_device("PINK:SMA01:m10.DMOV")
MOTOR_DMOV.setMonitored(True)
if axis=="y":
MOTOR = create_channel_device("PINK:SMA01:m9.VAL")
MOTOR_RBV = create_channel_device("PINK:SMA01:m9.RBV")
MOTOR_RBV.setMonitored(True)
MOTOR_DMOV = create_channel_device("PINK:SMA01:m9.DMOV")
MOTOR_DMOV.setMonitored(True)
## setup filename
set_exec_pars(open=False, name="sample_scan_eiger", reset=True)
## save initial scan data
save_dataset("scan/start_time", time.ctime())
save_dataset("scan/type", "sample scan with eiger")
## configure scan positions
positionarray = linspace(start, end, step)
## plot setup
if axis=='x':
ypos = '{:.1f}'.format(caget("PINK:SMA01:m9.RBV"))
xlabel = 'X position'
plottitle = 'Horizontal Sample Scan (Y='+ypos+')'
else:
ypos = '{:.1f}'.format(caget("PINK:SMA01:m10.RBV"))
xlabel = 'Y position'
plottitle = 'Vertical Sample Scan (X='+ypos+')'
p1h=plot([None], [xlabel], title=plottitle)
p1 = p1h.get(0)
p1.getAxis(p1.AxisId.X).setRange(min(start, end),max(start,end))
## Stop eiger
if Eiger_status.read():
Eiger_acquire.write(0)
if DEBUG: log("Eiger Stop", data_file = False)
while(Eiger_status.read()):
sleep(1)
if DEBUG: log("Eiger Idle", data_file = False)
## setup eiger
caput("PINK:EIGER:cam1:AcquireTime", exposure)
sleep(1)
caput("PINK:EIGER:cam1:AcquirePeriod", exposure+0.001)
caput("PINK:EIGER:cam1:NumImages", 1)
caput("PINK:EIGER:cam1:NumTriggers", len(positionarray))
# manual trigger enable
caput("PINK:EIGER:cam1:ManualTrigger", 1)
sleep(0.5)
## arm detector
Eiger_acquire.write(1)
print("Scanning...")
# saving pre scan position
prescan_pos = MOTOR_RBV.read()
# move to first position
pos = positionarray[0]
MOTOR.write(pos)
MOTOR_RBV.waitValueInRange(pos, 1.0, 60000)
MOTOR_DMOV.waitValueInRange(1, 0.5, 60000)
# Open fast shutter
caput("PINK:PLCGAS:ei_B01", 1)
## Main loop
for pos in positionarray:
MOTOR.write(pos)
MOTOR_RBV.waitValueInRange(pos, 1.0, 60000)
MOTOR_DMOV.waitValueInRange(1, 0.5, 60000)
Eiger_trigger.write(1)
#resp = SENSOR.waitCacheChange(1000*int(exposure+2))
resp = Eiger_frameID.waitCacheChange(1000*int(exposure+2))
sleep(0.1)
if resp==False:
print("Timeout: No data from eiger")
continue
sensor.append(SENSOR.take())
motor.append(MOTOR_RBV.take())
p1.getSeries(0).setData(motor, sensor)
# close fast shutter
caput("PINK:PLCGAS:ei_B01", 0)
## Stop eiger
if Eiger_status.read():
Eiger_acquire.write(0)
if DEBUG: log("Eiger Stop", data_file = False)
while(Eiger_status.read()):
sleep(1)
if DEBUG: log("Eiger Idle", data_file = False)
## setup eiger
caput("PINK:EIGER:cam1:AcquireTime", 1)
sleep(1)
caput("PINK:EIGER:cam1:AcquirePeriod", 1.001)
caput("PINK:EIGER:cam1:NumImages", 1)
caput("PINK:EIGER:cam1:NumTriggers", 1)
# manual trigger enable
caput("PINK:EIGER:cam1:ManualTrigger", 0)
## Save data
save_dataset("raw/sensor", sensor)
save_dataset("raw/blade", motor)
## Save plot data
save_dataset("plot/title", plottitle)
save_dataset("plot/xlabel", "Position")
save_dataset("plot/ylabel", "Counts per second")
save_dataset("plot/y_desc", "Pass 0")
save_dataset("plot/x", motor)
create_dataset("plot/y", 'd', False, (0, len(sensor)))
append_dataset("plot/y", sensor)
## save data
save_dataset("scan/finish_time", time.ctime())
## Move back to original position
MOTOR.write(prescan_pos)
MOTOR_RBV.waitValueInRange(pos, 1.0, 60000)
MOTOR_DMOV.waitValueInRange(1, 0.5, 60000)
## save beamline/station snapshot
pink_save_bl_snapshot()
print("Scan complete")
|
#!/usr/bin/python3
import minimalmodbus
import serial
import time
class HM310P():
rDepth = 100
def __init__(self):
self.supply = minimalmodbus.Instrument('/dev/dcpowersupply', 1, minimalmodbus.MODE_RTU)
self.supply.serial.baudrate = 9600
self.supply.serial.startbits = 1
self.supply.serial.stopbits = 1
self.supply.serial.parity = serial.PARITY_NONE
self.supply.serial.bytesize = 8
self.supply.timeout = 0.5
##########################
#### Power Management ####
##########################
def get_power(self):
r = 0
value = "Error"
while r <= self.rDepth:
value = self.read_power()
if not value == "Error":
return value
r += 1
time.sleep(0.001)
return False
def read_power(self):
try:
power = self.supply.read_register(1,0)
return power
except:
return "Error"
def power_on(self):
self.set_power(1)
def power_off(self):
self.set_power(0)
def set_power(self, status):
r = 0
value = "Error"
while r <= self.rDepth:
value = self.write_power(status)
if not value == "Error":
return value
r += 1
time.sleep(0.001)
return False
def write_power(self, status):
try:
self.supply.write_register(1, status, 0)
return True
except:
return "Error"
############################
#### Voltage Management ####
############################
def get_voltage(self):
r = 0
value = "Error"
while r <= self.rDepth:
value = self.read_voltage()
if not value == "Error":
return value
r += 1
time.sleep(0.001)
return False
def read_voltage(self):
try:
voltage = self.supply.read_register(16,2)
return voltage
except:
return "Error"
def set_voltage(self, voltage):
r = 0
value = "Error"
while r <= self.rDepth:
value = self.write_voltage(voltage)
if not value == "Error":
return value
r += 1
time.sleep(0.001)
return False
def write_voltage(self, voltage):
try:
self.supply.write_register(48, voltage, 2)
return True
except:
return "Error"
if __name__ == "__main__":
supply = HM310P()
supply.set_voltage(0)
supply.power_on()
time.sleep(5)
for i in range(0,101):
supply.set_voltage(i/100*5)
time.sleep(3)
supply.power_off() |
import AVFoundation
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestAVCaption(TestCase):
def test_enum_types(self):
self.assertIsEnumType(AVFoundation.AVCaptionAnimation)
self.assertIsEnumType(AVFoundation.AVCaptionDecoration)
self.assertIsEnumType(AVFoundation.AVCaptionFontStyle)
self.assertIsEnumType(AVFoundation.AVCaptionFontWeight)
self.assertIsEnumType(AVFoundation.AVCaptionRegionDisplayAlignment)
self.assertIsEnumType(AVFoundation.AVCaptionRegionScroll)
self.assertIsEnumType(AVFoundation.AVCaptionRegionWritingMode)
self.assertIsEnumType(AVFoundation.AVCaptionRubyAlignment)
self.assertIsEnumType(AVFoundation.AVCaptionRubyPosition)
self.assertIsEnumType(AVFoundation.AVCaptionTextAlignment)
self.assertIsEnumType(AVFoundation.AVCaptionTextCombine)
self.assertIsEnumType(AVFoundation.AVCaptionUnitsType)
def test_structs(self):
v = AVFoundation.AVCaptionDimension()
self.assertIsInstance(v.value, float)
self.assertIsInstance(v.units, int)
self.assertPickleRoundTrips(v)
v = AVFoundation.AVCaptionPoint()
self.assertIsInstance(v.x, AVFoundation.AVCaptionDimension)
self.assertIsInstance(v.y, AVFoundation.AVCaptionDimension)
self.assertPickleRoundTrips(v)
v = AVFoundation.AVCaptionSize()
self.assertIsInstance(v.width, AVFoundation.AVCaptionDimension)
self.assertIsInstance(v.height, AVFoundation.AVCaptionDimension)
self.assertPickleRoundTrips(v)
def test_constants(self):
self.assertEqual(AVFoundation.AVCaptionUnitsTypeUnspecified, 0)
self.assertEqual(AVFoundation.AVCaptionUnitsTypeCells, 1)
self.assertEqual(AVFoundation.AVCaptionUnitsTypePercent, 2)
self.assertEqual(AVFoundation.AVCaptionRegionDisplayAlignmentBefore, 0)
self.assertEqual(AVFoundation.AVCaptionRegionDisplayAlignmentCenter, 1)
self.assertEqual(AVFoundation.AVCaptionRegionDisplayAlignmentAfter, 2)
self.assertEqual(
AVFoundation.AVCaptionRegionWritingModeLeftToRightAndTopToBottom, 0
)
self.assertEqual(
AVFoundation.AVCaptionRegionWritingModeTopToBottomAndRightToLeft, 2
)
self.assertEqual(AVFoundation.AVCaptionRegionScrollNone, 0)
self.assertEqual(AVFoundation.AVCaptionRegionScrollRollUp, 1)
self.assertEqual(AVFoundation.AVCaptionAnimationNone, 0)
self.assertEqual(AVFoundation.AVCaptionAnimationCharacterReveal, 1)
self.assertEqual(AVFoundation.AVCaptionFontWeightUnknown, 0)
self.assertEqual(AVFoundation.AVCaptionFontWeightNormal, 1)
self.assertEqual(AVFoundation.AVCaptionFontWeightBold, 2)
self.assertEqual(AVFoundation.AVCaptionFontStyleUnknown, 0)
self.assertEqual(AVFoundation.AVCaptionFontStyleNormal, 1)
self.assertEqual(AVFoundation.AVCaptionFontStyleItalic, 2)
self.assertEqual(AVFoundation.AVCaptionDecorationNone, 0)
self.assertEqual(AVFoundation.AVCaptionDecorationUnderline, 1 << 0)
self.assertEqual(AVFoundation.AVCaptionDecorationLineThrough, 1 << 1)
self.assertEqual(AVFoundation.AVCaptionDecorationOverline, 1 << 2)
self.assertEqual(AVFoundation.AVCaptionTextCombineAll, -1)
self.assertEqual(AVFoundation.AVCaptionTextCombineNone, 0)
self.assertEqual(AVFoundation.AVCaptionTextCombineOneDigit, 1)
self.assertEqual(AVFoundation.AVCaptionTextCombineTwoDigits, 2)
self.assertEqual(AVFoundation.AVCaptionTextCombineThreeDigits, 3)
self.assertEqual(AVFoundation.AVCaptionTextCombineFourDigits, 4)
self.assertEqual(AVFoundation.AVCaptionTextAlignmentStart, 0)
self.assertEqual(AVFoundation.AVCaptionTextAlignmentEnd, 1)
self.assertEqual(AVFoundation.AVCaptionTextAlignmentCenter, 2)
self.assertEqual(AVFoundation.AVCaptionTextAlignmentLeft, 3)
self.assertEqual(AVFoundation.AVCaptionTextAlignmentRight, 4)
self.assertEqual(AVFoundation.AVCaptionRubyPositionBefore, 0)
self.assertEqual(AVFoundation.AVCaptionRubyPositionAfter, 1)
self.assertEqual(AVFoundation.AVCaptionRubyAlignmentStart, 0)
self.assertEqual(AVFoundation.AVCaptionRubyAlignmentCenter, 1)
self.assertEqual(AVFoundation.AVCaptionRubyAlignmentDistributeSpaceBetween, 2)
self.assertEqual(AVFoundation.AVCaptionRubyAlignmentDistributeSpaceAround, 3)
@min_os_level("12.0")
def test_methods(self):
self.assertArgIsOut(AVFoundation.AVCaption.textColorAtIndex_range_, 1)
self.assertArgIsOut(AVFoundation.AVCaption.backgroundColorAtIndex_range_, 1)
self.assertArgIsOut(AVFoundation.AVCaption.fontWeightAtIndex_range_, 1)
self.assertArgIsOut(AVFoundation.AVCaption.fontStyleAtIndex_range_, 1)
self.assertArgIsOut(AVFoundation.AVCaption.decorationAtIndex_range_, 1)
self.assertArgIsOut(AVFoundation.AVCaption.textCombineAtIndex_range_, 1)
self.assertArgIsOut(AVFoundation.AVCaption.rubyAtIndex_range_, 1)
@min_os_level("12.0")
def test_functions(self):
AVFoundation.AVCaptionDimensionMake
AVFoundation.AVCaptionPointMake
AVFoundation.AVCaptionSize
|
from django.contrib.auth.models import User
from django.db import models
from datetime import datetime
class TipoMaterial(models.Model):
nombre=models.CharField(max_length=100,blank=True,default=" ")
status=models.BooleanField(default=True)
def __str__(self):
return '{}'.format(self.nombre)
class Meta:
verbose_name = 'Tipo Material'
verbose_name_plural = 'Tipo Materiales'
ordering = ['nombre']
class Material(models.Model):
material = models.CharField(max_length=200,default=" ")
tipo = models.ForeignKey(TipoMaterial, blank=True, null=True, on_delete=models.PROTECT)
arprecio = models.DecimalField(decimal_places=0, max_digits=19,default=0)
stock= models.IntegerField(default=0)
status = models.BooleanField(default=True)
def __str__(self):
return '{}'.format(self.material)
class Meta:
verbose_name = 'Materialll'
verbose_name_plural = 'Materialll'
ordering = ['material']
TIPOINVENTARIO = (
(1,'Compra'),
(2,'Ingreso'),
(3,'Modificado'),
(4,'Produccion'),)
class Inventario(models.Model):
material = models.ForeignKey(Material, on_delete=models.PROTECT)
fechaingreso =models.DateTimeField(default=datetime.now(), blank=True)
fechasalida =models.DateTimeField(default=None, blank=True,null=True)
cantidad = models.IntegerField(default=0)
precio = models.DecimalField(decimal_places=0,max_digits=19, default=0)
tipoinventario = models.IntegerField(choices=TIPOINVENTARIO, default=2)
status = models.BooleanField(default=True)
class Meta:
verbose_name = 'Inventario General'
verbose_name_plural = 'Inventario General'
ordering = ['material']
TIPOClien_Provee = (
(1,'Cliente'),
(2,'Proveedor'))
class CliProentidad(models.Model):
nombre = models.CharField(max_length=200, verbose_name="EMPRESA",default='')
tipo = models.IntegerField(choices=TIPOClien_Provee, default=0)
direccion = models.CharField(max_length=200,verbose_name="DIRECCION",default='')
telefono = models.CharField(max_length=200,verbose_name="TELEFONO",default='')
ced_ruc = models.CharField(max_length=200, verbose_name="Cedula o Ruc",default='')
status = models.BooleanField(default=True)
email=models.CharField(max_length=200,default="")
def __str__(self):
return '{}'.format(self.nombre)
class Meta:
verbose_name = 'Cliente o Proveedor'
verbose_name_plural = 'Cliente o Proveedor'
ordering = ['nombre']
class Empresa(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
nombre =models.CharField(max_length=60,verbose_name="nombre",default='')
cedula= models.CharField(max_length=15,verbose_name="cedula",default='')
direcion= models.CharField(max_length=50,verbose_name="direccion",default='')
celular =models.CharField(max_length=50,verbose_name="celular",default='')
image = models.ImageField(upload_to='empresa/%Y/%m/%d/', default='')
def __str__(self):
return '{}'.format(self.cedula)
class Meta:
verbose_name = 'Empresa dato'
verbose_name_plural = 'Empresa dato'
ordering = ['cedula']
class Compra(models.Model):
fecha = models.DateTimeField(default=datetime.now(), blank=True)
total = models.DecimalField(decimal_places=0, max_digits=19,default=0)
cliProentidad = models.ForeignKey(CliProentidad,on_delete=models.PROTECT)
status = models.BooleanField(default=True)
class Meta:
verbose_name = 'Compra'
verbose_name_plural = 'Compras'
ordering = ['id']
class Comprainventario(models.Model):
salidacompra = models.ForeignKey(Compra,on_delete=models.PROTECT)
cantidad = models.IntegerField(default=0)
valor = models.DecimalField(decimal_places=0, max_digits=19,default=0)
material = models.ForeignKey(Material, on_delete=models.PROTECT,default=None)
status = models.BooleanField(default=True)
class Meta:
verbose_name = 'Detalle de Compra'
verbose_name_plural = 'Detalle de Compra'
ordering = ['id']
class Articulo(models.Model):
nombre = models.CharField(max_length=200, verbose_name="articulo",default='')
descripcion = models.CharField(max_length=200, verbose_name="descripción",default='')
cantidad = models.IntegerField(verbose_name="cantidad",default=0)
iva = models.DecimalField(decimal_places=2, verbose_name="iva",max_digits=19,default=0.12)
subtotal = models.DecimalField(decimal_places=0, verbose_name="subtotal",max_digits=19,default=0)
precio = models.DecimalField(decimal_places=0, verbose_name="Precio",max_digits=19,default=0)
image = models.ImageField(upload_to='Articulo/%Y/%m/%d/', default='')
status = models.BooleanField(default=True)
def __str__(self):
return '{}'.format(self.nombre)
class Meta:
verbose_name = 'Articulo'
verbose_name_plural = 'Articulos'
ordering = ['nombre']
class Pedido(models.Model):
descripcion = models.CharField(max_length=150,default='')
fecentrega = models.DateTimeField(default=datetime.now())
coutainicial = models.DecimalField(decimal_places=0, verbose_name="coutainicial",max_digits=19,default=0)
cliProentidad = models.ForeignKey(CliProentidad, on_delete=models.PROTECT,default=None)
status = models.BooleanField(default=True)
abono = models.DecimalField(decimal_places=0, verbose_name="coutainicial", max_digits=19, default=0)
class Meta:
verbose_name = 'Pedido de Clientes Articulo'
verbose_name_plural = 'Pedido de Clientes articulos'
ordering = ['fecentrega']
class Pedidoarticulo(models.Model):
articulo = models.ForeignKey(Articulo, on_delete=models.PROTECT,default=None)
cantidad = models.IntegerField(verbose_name="cantidad", default=0)
abono = models.DecimalField(default=0, max_digits=10, decimal_places=0)
pedido = models.ForeignKey(Pedido, on_delete=models.PROTECT,default=None)
class Meta:
verbose_name = 'Pedido de detalle Articulo'
verbose_name_plural = 'Pedido de detalle articulos'
ordering = ['articulo']
class Pedidodetalle (models.Model):
cantidad = models.IntegerField(verbose_name="cantidad",default=0)
material = models.ForeignKey(Material, on_delete=models.PROTECT,default=None)
descripcion = models.CharField(max_length=150, default='')
status = models.BooleanField(default=True)
class Meta:
verbose_name = 'Materiales que se usaran en el Pedido'
verbose_name_plural = 'Materiales que se usaran en el Pedido'
ordering = ['id']
class Factura(models.Model):
cliente = models.ForeignKey(CliProentidad, on_delete=models.PROTECT)
fecventa = models.DateTimeField(default=datetime.now())
iva = models.DecimalField(default=0, max_digits=10, decimal_places=2)
subtotal = models.DecimalField(default=0, max_digits=10, decimal_places=2)
total = models.DecimalField(default=0, max_digits=10, decimal_places=2)
status = models.BooleanField(default=True)
class Meta:
verbose_name = 'Factura'
verbose_name_plural = 'Factura'
ordering = ['id']
class Proforma(models.Model):
empresa = models.ForeignKey(Empresa,on_delete=models.PROTECT)
fec = models.DateTimeField(default=datetime.now())
total = models.DecimalField(default=0, max_digits=10, decimal_places=0)
status = models.BooleanField(default=True)
descripcion = models.CharField(max_length=150, default='')
class Meta:
verbose_name = 'Proforma'
verbose_name_plural = 'Proforma'
ordering = ['id']
class DetalleProforma(models.Model):
proforma = models.ForeignKey(Proforma, on_delete=models.PROTECT)
articulo = models.ForeignKey(Articulo, on_delete=models.PROTECT)
descripcion = models.CharField(max_length=150,default='')
cantidad = models.IntegerField(verbose_name="cantidad",default=0)
total = models.DecimalField(default=0, max_digits=10, decimal_places=0)
status = models.BooleanField(default=True)
maximo = models.DecimalField(default=0, max_digits=10, decimal_places=0)
class Meta:
verbose_name = 'DetalleProforma'
verbose_name_plural = 'DetalleProforma'
ordering = ['id']
class Detfactura(models.Model):
factura = models.ForeignKey(Factura, on_delete=models.PROTECT)
articulo = models.ForeignKey(Articulo, on_delete=models.PROTECT)
cantidad = models.IntegerField(verbose_name="cantidad",default=0)
total = models.DecimalField(default=0, max_digits=10, decimal_places=2)
status = models.BooleanField(default=True)
class Meta:
verbose_name = 'Detalle factura'
verbose_name_plural = 'Detalle factura'
ordering = ['id']
class Reporteventa(models.Model):
mes =models.IntegerField(verbose_name='mes',default=0)
año = models.IntegerField(default=0,verbose_name='año')
total = models.DecimalField(default=0, max_digits=10, decimal_places=2,verbose_name='total')
def __str__(self):
return '{}-{}-{}'.format(self.mes,self.año,self.total)
class Meta:
verbose_name ='Reporte'
verbose_name_plural ='reportes'
ordering =['id']
def month_string_to_number(string):
m = { "Ener": 1,"Febr": 2,"Marz": 3,"Abri":4,"Mayo":5,"Juni":6,"Juli":7,"Agos":8,"Sept":9,"Octu":10,"Novi":11,"Dici":12}
s = string.strip()[:4].capitalize()
try:
out = m[s]
return out,True
except:
return 0, False |
# // leetcode 351
# //Given an Android 3x3 key lock screen and two integers m and n, where 1 ≤ m ≤ n ≤ 9, count the total number of unlock patterns of the Android lock screen, which consist of minimum of m keys and maximum n keys.
# //| 1 | 2 | 3 |
# //| 4 | 5 | 6 |
# //| 7 | 8 | 9 |
# //Invalid move: 4 - 1 - 3 - 6
# //Line 1 - 3 passes through key 2 which had not been selected in the pattern.
# //Invalid move: 4 - 1 - 9 - 2
# //Line 1 - 9 passes through key 5 which had not been selected in the pattern.
# //Valid move: 2 - 4 - 1 - 3 - 6
# //Line 1 - 3 is valid because it passes through key 2, which had been selected in the pattern
# //Valid move: 6 - 5 - 4 - 1 - 9 - 2
# //Line 1 - 9 is valid because it passes through key 5, which had been selected in the pattern.
|
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from pyramid import testing
from paasta_tools.api.views import autoscaler
from paasta_tools.kubernetes_tools import KubernetesDeploymentConfig
from paasta_tools.marathon_tools import MarathonServiceConfig
@mock.patch("paasta_tools.api.views.autoscaler.get_instance_config", autospec=True)
def test_get_autoscaler_count(mock_get_instance_config):
request = testing.DummyRequest()
request.swagger_data = {"service": "fake_service", "instance": "fake_instance"}
mock_get_instance_config.return_value = mock.MagicMock(
get_instances=mock.MagicMock(return_value=123),
spec=KubernetesDeploymentConfig,
)
response = autoscaler.get_autoscaler_count(request)
assert response.json_body["desired_instances"] == 123
assert response.json_body["calculated_instances"] == 123
@mock.patch("paasta_tools.api.views.autoscaler.get_instance_config", autospec=True)
def test_update_autoscaler_count_marathon(mock_get_instance_config):
request = testing.DummyRequest()
request.swagger_data = {
"service": "fake_marathon_service",
"instance": "fake_marathon_instance",
"json_body": {"desired_instances": 123},
}
mock_get_instance_config.return_value = mock.MagicMock(
get_min_instances=mock.MagicMock(return_value=100),
get_max_instances=mock.MagicMock(return_value=200),
spec=MarathonServiceConfig,
)
response = autoscaler.update_autoscaler_count(request)
assert response.json_body["desired_instances"] == 123
assert response.status_code == 202
@mock.patch("paasta_tools.api.views.autoscaler.get_instance_config", autospec=True)
def test_update_autoscaler_count_kubernetes(mock_get_instance_config):
request = testing.DummyRequest()
request.swagger_data = {
"service": "fake_kubernetes_service",
"instance": "fake__kubernetes_instance",
"json_body": {"desired_instances": 155},
}
mock_get_instance_config.return_value = mock.MagicMock(
get_min_instances=mock.MagicMock(return_value=100),
get_max_instances=mock.MagicMock(return_value=200),
spec=KubernetesDeploymentConfig,
)
response = autoscaler.update_autoscaler_count(request)
assert response.json_body["desired_instances"] == 155
assert response.status_code == 202
@mock.patch("paasta_tools.api.views.autoscaler.get_instance_config", autospec=True)
def test_update_autoscaler_count_warning(mock_get_instance_config):
request = testing.DummyRequest()
request.swagger_data = {
"service": "fake_service",
"instance": "fake_instance",
"json_body": {"desired_instances": 123},
}
mock_get_instance_config.return_value = mock.MagicMock(
get_min_instances=mock.MagicMock(return_value=10),
get_max_instances=mock.MagicMock(return_value=100),
spec=KubernetesDeploymentConfig,
)
response = autoscaler.update_autoscaler_count(request)
assert response.json_body["desired_instances"] == 100
assert "WARNING" in response.json_body["status"]
|
def aprovacao(n):
return 'aprovado' if n >= 7 else 'reprovado'
if __name__ == '__main__':
print(aprovacao(10))
print(aprovacao(8))
print(aprovacao(6))
print(aprovacao(0))
print(aprovacao(7))
|
# code_report Solution
# https://youtu.be/CvJz_RgTYgU
n = int (input ())
for i in range (n):
x = int (input ())
print (-1 if x % 5 != 0 else (0 if x % 2 == 0 else 1))
|
from django.contrib.auth.models import User
from django.db import models
class FHSUser(models.Model):
user = models.OneToOneField(User, primary_key=True)
is_married = models.BooleanField(default=False)
num_kids = models.IntegerField(default=0)
num_ticket = models.IntegerField(default=0)
profession = models.CharField(max_length=50)
current_city = models.CharField(max_length=50)
current_state = models.CharField(max_length=50)
def __unicode__(self):
return '{0}, {1}'.format(self.user.last_name, self.user.first_name)
class Image(models.Model):
image = models.ImageField(upload_to='images')
|
import datetime
from pymongo import MongoClient
cl = MongoClient()
db_bank = cl['banking']
db_bank['Calvin'].insert_one({"kid": "Calvin", "type": "deposit", "amount": 6.00, "description": "School and house work", "date": str(datetime.date.today())})
db_bank['Samuel'].insert_one({"kid": "Samuel", "type": "deposit", "amount": 6.00, "description": "School and house work", "date": str(datetime.date.today())})
db_bank['Kay'].insert_one({"kid": "Kay", "type": "deposit", "amount": 6.00, "description": "School and house work", "date": str(datetime.date.today())})
|
# -*- coding: utf-8 -*-
"""
Tests for dnsimple updater. Since we rely on an external library implementing
the actual interfacing with the remote service, the tests in here merely check
the behavior of the Dyndnsc wrapper class.
"""
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
import sys
sys.modules['dnsimple_dyndns'] = mock.Mock()
class TestDnsimpleUpdater(unittest.TestCase):
def test_mocked_dnsimple(self):
from dyndnsc.updater.dnsimple import UpdateProtocolDnsimple
theip = "127.0.0.1"
self.assertEqual("dnsimple", UpdateProtocolDnsimple.configuration_key())
upd = UpdateProtocolDnsimple(hostname="dnsimple_record.example.com", key="1234")
upd.handler.update_record.return_value = theip
self.assertEqual(theip, upd.update(theip))
upd.handler.update_record.assert_called_once_with(name="dnsimple_record", address=theip)
|
import logging
from datetime import datetime as dt
from io import StringIO
import time
import pandas as pd
import psycopg2.extras
import pytz
from tzlocal import get_localzone
from tagbase_server.utils.db_utils import connect
from tagbase_server.utils.io_utils import compute_file_sha256, make_hash_sha256
from tagbase_server.utils.slack_utils import post_msg
logger = logging.getLogger(__name__)
def process_global_attributes_metadata(
global_attributes_lines,
cur,
submission_id,
submission_filename,
line_counter,
):
attributes_map = {}
metadata = []
for line in global_attributes_lines:
line = line.strip()
logger.debug("Processing global attribute: %s", line)
tokens = line[1:].split(" = ")
# attribute_name = tokens[0], attribute_value = tokens[1]
if len(tokens) > 1:
attributes_map[tokens[0]] = tokens[1]
else:
logger.warning("Metadata line %s NOT in expected format!", line)
attributes_names = ", ".join(
["'{}'".format(attrib_name) for attrib_name in attributes_map.keys()]
)
attribute_ids_query = (
"SELECT attribute_id, attribute_name FROM metadata_types "
"WHERE attribute_name IN ({})".format(attributes_names)
)
logger.debug("Query=%s", attribute_ids_query)
cur.execute(attribute_ids_query)
rows = cur.fetchall()
str_submission_id = str(submission_id)
for row in rows:
attribute_id = row[0]
attribute_name = row[1]
attribute_value = attributes_map[attribute_name]
metadata.append((str_submission_id, str(attribute_id), attribute_value))
attributes_map.pop(attribute_name)
if len(attributes_map.keys()) > 0:
not_found_attributes = ", ".join(attributes_map.keys())
msg = (
f"*{submission_filename}* _line:{line_counter}_ - "
f"Unable to locate attribute_names *{not_found_attributes}* in _metadata_types_ table."
)
post_msg(msg)
return metadata
def get_tag_id(cur, dataset_id):
"""
Retrieve a 'tag_id' for a submission by performing a lookup on the 'dataset_id'.
If an entry exists for the dataset then grab the existing associated tag_id. If not,
create a new tag_id.
:param cur: A database cursor
:type cur: cursor connection
:param dataset_id: Dataset ID as described above.
:type dataset_id: str
"""
sql_query = "SELECT COALESCE(MAX(tag_id), NEXTVAL('submission_tag_id_seq')) FROM submission WHERE dataset_id = '{}'".format(
dataset_id
)
logger.debug("Executing: %s", sql_query)
cur.execute(sql_query)
result = cur.fetchone()[0]
logger.debug("Result: %s", result)
return result
def get_dataset_id(cur, instrument_name, serial_number, ptt, platform):
"""
Retreive or create a dataset entry for a submission. If a dataset entry exists then grab the existing
id, if not, create a new one.
:param cur: A database cursor
:type cur: cursor connection
:param instrument_name: A unique instrument name, made clear to the end user that it is the primary identifier, e.g., iccat_gbyp0008
:type instrument_name: str
:param serial_number: A the device internal ID, e.g., 18P0201
:type serial_number: str
:param ptt: A satellite platform ID, e.g., 62342
:type ptt: str
:param platform: The species code/common name on which the device was deployed, e.g., Thunnus thynnus
:type platform: str
"""
cur.execute(
"SELECT COALESCE(MAX(dataset_id), NEXTVAL('dataset_dataset_id_seq')) FROM dataset WHERE instrument_name = '{}' AND serial_number = '{}' AND ptt = '{}' AND platform = '{}'".format(
instrument_name, serial_number, ptt, platform
)
)
dataset_id = cur.fetchone()[0]
logger.debug("Computed dataset_id: %s", dataset_id)
cur.execute(
"INSERT INTO dataset (dataset_id, instrument_name, serial_number, ptt, platform) VALUES ('{}', '{}', '{}', '{}', '{}') ON CONFLICT DO NOTHING".format(
dataset_id, instrument_name, serial_number, ptt, platform
)
)
logger.debug(
"Successful INSERT of '%s' into 'dataset' table.",
dataset_id,
)
return dataset_id
def get_submission_id(cur, tag_id, dataset_id, data_sha256):
cur.execute(
"SELECT submission_id FROM submission"
" WHERE tag_id = '{}' AND dataset_id = '{}'"
" AND data_sha256 = '{}'".format(tag_id, dataset_id, data_sha256)
)
db_results = cur.fetchone()
if not db_results:
return None
submission_id = db_results[0]
logger.debug("Found submission_id: %s", submission_id)
return submission_id
def insert_new_submission(
cur,
tag_id,
submission_filename,
notes,
version,
file_sha256,
dataset_id,
md_sha256,
data_sha256,
):
cur.execute(
"INSERT INTO submission (tag_id, filename, date_time, notes, version, file_sha256, dataset_id, md_sha256, data_sha256) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)",
(
tag_id,
submission_filename,
dt.now(tz=pytz.utc).astimezone(get_localzone()),
notes,
version,
file_sha256,
dataset_id,
md_sha256,
data_sha256,
),
)
logger.info(
"Successful INSERT of '%s' into 'submission' table.",
submission_filename,
)
def detect_duplicate_file(cursor, file_sha256):
"""
Detect a duplicate file by performing a lookup on submission.file_sha256.
Returns True if duplicate.
:param file_sha256: A SHA256 hash of an entire eTUFF file.
:type file_sha256: str
:param cursor: A database cursor
:type cursor: cursor connection
"""
logger.debug("Detecting duplicate file submission...")
cursor.execute(
"SELECT file_sha256 FROM submission WHERE file_sha256 = %s",
(file_sha256,),
)
db_results = cursor.fetchone()
if not db_results:
return False
duplicate = db_results[0]
logger.info(
"Computed hash: %s Duplicate: %s",
file_sha256,
duplicate,
)
if duplicate is not None:
return True
else:
return False
def get_dataset_properties(submission_filename):
"""
Extract 'instrument_name', 'serial_number', 'ptt', 'platform' and
'referencetrack_included' values from global attributesaa and calculate
an SHA256 signature for the global metadata.
:param submission_filename: The file from which we wish to extract certain global attributes
:type submission_filename: str
"""
global_attributes = {
"instrument_name": "unknown",
"serial_number": "unknown",
"ppt": "unknown",
"platform": "unknown",
"referencetrack_included": "0",
}
content = []
metadata_content = []
processed_lines = 0
with open(submission_filename, "rb") as file:
for line in file:
line = line.decode("utf-8", "ignore").strip()
if line.startswith("//"):
continue
if line.startswith(":"):
processed_lines += 1
# keeping all metadata together
metadata_content.append(line)
value = line[1:].split(" = ")[1].replace('"', "")
if line.startswith(":instrument_name"):
global_attributes["instrument_name"] = value
elif line.startswith(":serial_number"):
global_attributes["serial_number"] = value
elif line.startswith(":ptt"):
global_attributes["ppt"] = value
elif line.startswith(":platform"):
global_attributes["platform"] = value
elif line.startswith(":referencetrack_included"):
global_attributes["referencetrack_included"] = int(value)
else:
content.append(line)
# we use zero-based indexing for accessing array of lines later on
processed_lines = processed_lines - 1 if processed_lines > 0 else 0
return (
global_attributes["instrument_name"],
global_attributes["serial_number"],
global_attributes["ppt"],
global_attributes["platform"],
global_attributes["referencetrack_included"],
content,
metadata_content,
processed_lines,
)
def is_only_metadata_change(cursor, metadata_hash, file_content_hash):
logger.debug("Detecting metadata submitted...")
cursor.execute(
"SELECT md_sha256 FROM submission WHERE md_sha256 <> %s AND data_sha256 = %s ",
(
metadata_hash,
file_content_hash,
),
)
db_results = cursor.fetchone()
if not db_results:
return False
different_metadata_stored = db_results[0]
logger.info(
"Computed metadata hash: %s stored: %s",
metadata_hash,
different_metadata_stored,
)
if different_metadata_stored:
return True
else:
return False
def insert_metadata(cur, metadata, tag_id):
for x in metadata:
a = x[0]
b = x[1]
c = x[2]
mog = cur.mogrify("(%s, %s, %s, %s)", (a, b, str(c).strip('"'), tag_id))
cur.execute(
"INSERT INTO metadata (submission_id, attribute_id, attribute_value, tag_id) VALUES "
+ mog.decode("utf-8")
)
def get_current_submission_id(cur):
cur.execute("SELECT currval('submission_submission_id_seq')")
submission_id = cur.fetchone()[0]
logger.debug("New submission_id=%d", submission_id)
return submission_id
def update_submission_metadata(
cur, tag_id, metadata, submission_id, dataset_id, metadata_hash
):
# update submission information
current_time = dt.now(tz=pytz.utc).astimezone(get_localzone())
cur.execute(
"UPDATE submission SET md_sha256 = '{}', date_time = '{}'"
" WHERE tag_id = {} AND dataset_id = {} AND submission_id = {}".format(
metadata_hash, current_time, tag_id, dataset_id, submission_id
)
)
logger.info(
"Submission_id=%s updated with metadata hash=%s", submission_id, metadata_hash
)
# update metadata attributes
for x in metadata:
submission_id = x[0]
attribute_id = x[1]
attribute_value = x[2]
attribute_value = str(attribute_value).strip('"')
cur.execute(
"UPDATE metadata SET attribute_value = '{}' WHERE submission_id = {} AND tag_id = {} AND attribute_id = {}".format(
attribute_value, submission_id, tag_id, attribute_id
)
)
logger.info("Updated metadata attributes: %s", metadata)
def process_etuff_file(file, version=None, notes=None):
start = time.perf_counter()
submission_filename = file # full path name is now preferred rather than - file[file.rindex("/") + 1 :]
logger.info(
"Processing etuff file: %s",
submission_filename,
)
conn = connect()
conn.autocommit = True
# TODO we should read the file once and return the hashes we need (metadata/content/entire-file)
(
instrument_name,
serial_number,
ptt,
platform,
referencetrack_included,
file_content,
metadata_content,
number_global_attributes_lines,
) = get_dataset_properties(submission_filename)
content_hash = make_hash_sha256(file_content)
logger.debug("Content Hash: %s", content_hash)
metadata_hash = make_hash_sha256(metadata_content)
logger.debug("MD Hash: %s", metadata_hash)
entire_file_hash = compute_file_sha256(submission_filename)
logger.debug("File Hash: %s", entire_file_hash)
with conn:
with conn.cursor() as cur:
if detect_duplicate_file(cur, entire_file_hash):
logger.info(
"Data file '%s' with SHA256 hash '%s' identified as exact duplicate. No ingestion performed.",
submission_filename,
entire_file_hash,
)
return 1
dataset_id = get_dataset_id(
cur, instrument_name, serial_number, ptt, platform
)
tag_id = get_tag_id(cur, dataset_id)
submission_id = get_submission_id(
cur,
tag_id,
dataset_id,
content_hash,
)
if not submission_id:
insert_new_submission(
cur,
tag_id,
submission_filename,
notes,
version,
entire_file_hash,
dataset_id,
metadata_hash,
content_hash,
)
submission_id = get_current_submission_id(cur)
# compute global attributes which are considered as metadata
metadata = process_global_attributes_metadata(
metadata_content,
cur,
submission_id,
submission_filename,
number_global_attributes_lines,
)
if is_only_metadata_change(cur, metadata_hash, content_hash):
update_submission_metadata(
cur, tag_id, metadata, submission_id, dataset_id, metadata_hash
)
return 1
proc_obs = []
variable_lookup = {}
# at this point we have already read form the file all global attribute lines
# line_counter = number_global_attributes_lines
# # TODO we should use the 'content' variable in the following
s_time = time.perf_counter()
# with open(file, "rb") as data:
# lines = [line.decode("utf-8", "ignore") for line in data.readlines()]
# lines_length = len(lines)
num_lines_content = len(file_content)
logger.debug(
"len number_global_atttributes_lines: '%s' len lines_length: '%s'",
number_global_attributes_lines,
num_lines_content,
)
for counter in range(0, num_lines_content):
line = file_content[counter]
counter += 1
tokens = line.split(",")
tokens = [token.replace('"', "") for token in tokens]
if tokens:
variable_name = tokens[3]
if variable_name in variable_lookup:
variable_id = variable_lookup[variable_name]
else:
cur.execute(
"SELECT variable_id FROM observation_types WHERE variable_name = %s",
(variable_name,),
)
row = cur.fetchone()
if row:
variable_id = row[0]
else:
try:
logger.debug(
"variable_name=%s\ttokens=%s",
variable_name,
tokens,
)
cur.execute(
"INSERT INTO observation_types("
"variable_name, variable_units) VALUES (%s, %s) "
"ON CONFLICT (variable_name) DO NOTHING",
(variable_name, tokens[4].strip()),
)
except (
Exception,
psycopg2.DatabaseError,
) as error:
logger.error(
"variable_id '%s' already exists in 'observation_types'. tokens:"
" '%s. \nerror: %s",
variable_name,
tokens,
error,
)
conn.rollback()
cur.execute(
"SELECT nextval('observation_types_variable_id_seq')"
)
variable_id = cur.fetchone()[0]
variable_lookup[variable_name] = variable_id
date_time = None
if tokens[0] != '""' and tokens[0] != "":
if tokens[0].startswith('"'):
tokens[0].replace('"', "")
date_time = dt.strptime(
tokens[0], "%Y-%m-%d %H:%M:%S"
).astimezone(pytz.utc)
else:
stripped_line = line.strip("\n")
msg = (
f"*{submission_filename}* _line:{counter}_ - "
f"No datetime... skipping line: {stripped_line}"
)
post_msg(msg)
continue
proc_obs.append(
[
date_time,
variable_id,
tokens[2],
submission_id,
tag_id,
]
)
len_proc_obs = len(proc_obs)
e_time = time.perf_counter()
sub_elapsed = round(e_time - s_time, 2)
logger.info(
"Built raw 'proc_observations' data structure from %s observations in: %s second(s)",
len_proc_obs,
sub_elapsed,
)
insert_metadata(cur, metadata, tag_id)
# build pandas df
s_time = time.perf_counter()
df = pd.DataFrame.from_records(
proc_obs,
columns=[
"date_time",
"variable_id",
"variable_value",
"submission_id",
"tag_id",
],
)
e_time = time.perf_counter()
sub_elapsed = round(e_time - s_time, 2)
logger.info(
"Built Pandas DF from %s records. Time elapsed: %s second(s)",
len_proc_obs,
sub_elapsed,
)
logger.debug("DF Info: %s", df.info)
logger.debug("DF Memory Usage: %s", df.memory_usage(True))
# save dataframe to StringIO memory buffer
s_time = time.perf_counter()
buffer = StringIO()
df.to_csv(buffer, header=False, index=False)
buffer.seek(0)
e_time = time.perf_counter()
sub_elapsed = round(e_time - s_time, 2)
logger.info(
"Copied Pandas DF to StringIO memory buffer. Time elapsed: %s second(s)",
sub_elapsed,
)
# copy buffer to db
s_time = time.perf_counter()
logger.info(
"Copying memory buffer to 'proc_observations' and executing data migration."
)
try:
cur.copy_from(buffer, "proc_observations", sep=",")
ref = bool(referencetrack_included)
logger.debug(
"Executing sp_execute_data_migration(%s, %s);",
int(submission_id),
ref,
)
cur.execute(
"CALL sp_execute_data_migration(%s, %s);", (int(submission_id), ref)
)
except (Exception, psycopg2.DatabaseError) as error:
logger.error("Error: %s", error)
conn.rollback()
return 1
e_time = time.perf_counter()
sub_elapsed = round(e_time - s_time, 2)
logger.info(
"Successful migration of %s 'proc_observations'. Elapsed time: %s second(s).",
len_proc_obs,
sub_elapsed,
)
conn.commit()
cur.close()
conn.close()
finish = time.perf_counter()
elapsed = round(finish - start, 2)
logger.info(
"Data file %s successfully ingested into Tagbase DB. Total time: %s second(s)",
submission_filename,
elapsed,
)
|
import os
from selenium import webdriver
#chromeDriPath=os.path.abspath(r'E:\Python3\Ptthon3.6Install\Scripts\chromedriver.exe')
driver=webdriver.Chrome()
driver.get("http://www.baidu.com")
print(driver.title)
driver.close()
|
Enoncé
Dans ce challenge, on utilise un format de données qui est une version simplifiée du XML. Les noms des balises ne sont composés que d'une lettre minuscule, une balise ouvrante étant représentée par cette lettre seule et la balise fermante étant représentée par le caractère -, suivi de la lettre.
Par exemple, la chaîne ab-bcd-d-c-ae-e est l'équivalent de <a><b></b><c><d></d></c></a><e></e> en XML. La chaîne fournie sera toujours correctement formée.
On définit à présent la profondeur d'une balise comme 1 + le nombre de balises dans lesquelles elle est incluse.
Dans l'exemple précédent : a et e ont une profondeur de 1,
b et c ont une profondeur de 2
et d a une profondeur de 3.
On définit enfin le poids d'un nom de balise par la somme des inverses des profondeurs de chacune de ses occurrences.
Par exemple, dans la chaine a-abab-b-a-b, il y a : - deux balises a de profondeur 1 et 2
- deux balises b de profondeurs 1 et 3.
Le poids de a est donc de (1/1)+(1/2) = 1.5 et le poids de b est donc (1/1)+(1/3)=1.33.
Dans ce challenge vous devez déterminer la lettre correspondant à la balise de plus grand poids de la chaîne passée en paramètre.
Format des données
Entrée
Sur une seule ligne, une chaîne correctement formée d'au maximum 1024 caractères représentant une imbrication de balises.
Sortie
La lettre correspondant au nom de balise de plus grand poids. Si deux noms de balises ont le même poids, affichez le plus petit dans l'ordre alphabétique.
#**************Solution by Isograd ************************/
from collections import Counter
s = input()
n = len(s)
letters = []
c = Counter()
i = 0
while i < n:
if s[i] == '-':
i += 1
letters.pop()
else:
letters.append(s[i])
c[s[i]] += 1 / len(letters)
i += 1
print(min((-v, k) for k, v in c.items())[1]) |
from ..core import my_player
class Knight(object):
def skillset(self):
player_level = my_player.get_level()
player_hp = my_player.get_health()
if player_level < 3:
skills_dict = {
"1. Slash: " : player_hp * 1/6,
"2. Heavy Strike: " : player_hp * 1/5
}
return skills_dict
elif player_level >= 6:
skills_dict = {
"1. Slash: " : player_hp * 1/6,
"2. Heavy Slash: " : player_hp * 1/5,
"3. Atomic Punch: " : player_hp
}
return skills_dict
|
#looking at profile curves
import numpy as np
import scipy as sc
import matplotlib.pyplot as plt
#Switch these out for the appropriate files
directory = '/Users/carmenlee/Desktop/13082020_pip1_1/'
profile = np.genfromtxt(directory +'profile.csv')
horizontal = np.genfromtxt(directory +'horizontal.csv')
frame = np.genfromtxt(directory +'frames.csv')
pipette =np.genfromtxt(directory +'pipette2.csv')
time_raw =directory+'metadata.txt'
import metadata_reader
timedata = metadata_reader.read_txtfile(time_raw)[200:]
# #print(timedata[k] - timedata[k+1])
############################################
# Homemade functions
############################################
def find_max(arrayx, arrayy):
p = np.polyfit(arrayx, arrayy, 2)
#print(p)
return (-p[1]/(2*p[0]), p[0]*(-p[1]/(2*p[0]))**2+p[1]*-p[1]/(2*p[0])+p[2])
def find_centroid(arrayx, arrayy):
arrayx = np.asarray(arrayx)
arrayy = np.asarray(arrayy)
#print(arrayx, arrayy, np.sum((arrayx-arrayx[0])*arrayy)/np.sum(arrayy)+arrayx[0])
#exit()
return(np.sum((arrayx-arrayx[0])*arrayy)/np.sum(arrayy)+arrayx[0])
def find_nearest(array, value):
arraything = np.asarray(array)
idx = (np.abs(arraything - value)).argmin()
return idx
def check_for_breaks(x):
beginning = [0]
end = []
for k in range(len(x)-1):
if x[k+1]-x[k]>1:
beginning.append(k+1)
end.append(k)
end.append(len(x)-1)
return beginning, end
def extrema_checker(horz, vert, averaging):
derive = np.gradient(np.asarray(vert), np.asarray(horz))
smooth_deriv = []
xpos = []
for x in range(len(vert)-averaging):
a = np.polyfit(horz[x:x+averaging], derive[x:x+averaging], 1)
smooth_deriv.append(np.polyval(a, horz[x+int(averaging/2)]))
xpos.append(horz[x+int(averaging/2)])
height = []
height_loc = []
fitheight = []
fit_pos = []
centroid = []
smooth_deriv = np.asarray(smooth_deriv)
doublederiv = np.gradient(smooth_deriv, np.asarray(xpos))
averaging = 30
smooth_dblderiv = []
xposdd = []
for m in range(len(smooth_deriv)-averaging):
b = np.polyfit(xpos[m:m+averaging], doublederiv[m:m+averaging], 1)
smooth_dblderiv.append(np.polyval(b, xpos[m+int(averaging/2)]))
xposdd.append(xpos[m+int(averaging/2)])
minima = np.where(abs(smooth_deriv)>0.18)[0]
#print(minima)
beginning, end = check_for_breaks(minima)
print(beginning, end)
#print(len(beginning), end)
drop_start = []
drop_end = []
volume = []
#print(beginning,end)
if len(beginning)>3:
print('path1')
for k in range(len(beginning)):
if smooth_deriv[minima[beginning[k]]] > 0:
drop_start.append(minima[beginning[k]])
if smooth_deriv[minima[end[k]-1]] <0:
drop_end.append(minima[end[k]-1])
for k in range(len(beginning)-1):
if smooth_deriv[minima[end[k]-1]]>0 and smooth_deriv[minima[beginning[k+1]]]<0:
x, h = find_max(horz[minima[end[k]]+int(averaging/2):minima[beginning[k+1]+int(averaging/2)]], vert[minima[end[k]]+int(averaging/2):minima[beginning[k+1]]+int(averaging/2)])
fitheight.append(h)
fit_pos.append(x)
height.append(max(vert[minima[end[k]]+int(averaging/2):minima[beginning[k+1]]+int(averaging/2)]))
height_loc.append(np.argmax(vert[minima[end[k]]+int(averaging/2):minima[beginning[k+1]]+int(averaging/2)])+minima[end[k]]+int(averaging/2))
for m in range(len(drop_start)):
xmass = find_centroid(horz[drop_start[m]+int(averaging/2):drop_end[m]+int(averaging/2)], vert[drop_start[m]+int(averaging/2):drop_end[m]+int(averaging/2)])
centroid.append(xmass)
elif len(beginning)==3:
print('path2')
drop_start= [minima[beginning[0]], minima[end[1]]]
drop_end=[minima[end[1]],minima[end[2]]]
for k in range(len(drop_start)):
x, h = find_max(horz[drop_start[k]+int(averaging/2):drop_end[k]+int(averaging/2)], vert[drop_start[k]+int(averaging/2):drop_end[k]+int(averaging/2)])
fitheight.append(h)
fit_pos.append(x)
height.append(max(vert[drop_start[k]+int(averaging/2):drop_end[k]+int(averaging/2)]))
height_loc.append(np.argmax(vert[drop_start[k]+int(averaging/2):drop_end[k]+int(averaging/2)])+drop_start[k]+int(averaging/2))
xmass = find_centroid(horz[drop_start[0]+int(averaging/2):drop_end[1]+int(averaging/2)], vert[drop_start[0]+int(averaging/2):drop_end[1]+int(averaging/2)])
centroid.append(xmass)
centroid.append(0)
#volume.append(sum(vert[minima[end[k]]+int(averaging/2):minima[beginning[k+1]]+int(averaging/2)]))'''
else:
print('path3')
#if smooth_deriv[minima[0]:minima[-1]]
drop_start.append(minima[0])
drop_end.append(minima[-1])
x, h = find_max(horz[minima[0]+int(averaging/2):minima[-1]+int(averaging/2)], vert[minima[0]+int(averaging/2):minima[-1]+int(averaging/2)])
fitheight.append(h)
fit_pos.append(x)
height.append(max(vert[minima[0]+int(averaging/2):minima[end[-1]]+int(averaging/2)]))
height_loc.append(np.argmax(vert[minima[0]+int(averaging/2):minima[-1]+int(averaging/2)])+minima[0]+int(averaging/2))
for m in range(len(drop_start)):
xmass = find_centroid(horz[drop_start[m]+int(averaging/2):drop_end[m]+int(averaging/2)], vert[drop_start[m]+int(averaging/2):drop_end[m]+int(averaging/2)])
centroid.append(xmass)
#volume.append(sum(vert[minima[0]+int(averaging/2):minima[-1]+int(averaging/2)]))
#print(xpos, smooth_deriv, drop_start, drop_end, height, height_loc, volume)
'''figs,[axes1, axes2, axes3] = plt.subplots(nrows=3)
axes1.plot(horz, vert)
for k in range(len(beginning)):
axes1.plot(horz[minima[beginning[k]]], vert[minima[beginning[k]]], '*')
axes1.plot(horz[minima[end[k]]], vert[minima[end[k]]], 'o')
#axes1.plot(horz[beginning[0]+height_loc[0]], height[0], 's')
axes1.vlines(centroid, 0, 100)
axes2.plot(xpos, smooth_deriv)
axes3.plot(xpos, doublederiv)
axes3.plot(xposdd, smooth_dblderiv)'''
#plt.show()
return(xpos, smooth_deriv, drop_start, drop_end, height, height_loc, fit_pos, fitheight, centroid)
########Dealing with data
positions = []
heights = []
grads = []
rs = []
vol = []
drops_positions =[]
times = []
fig = plt.figure(1)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
start = 1
avg_interval = 30
half_interval = 15
smooth_interval= 30
half_smo_interval = 15
for k in range(268):
#for k in range(int(len(profile))-start):
prof_smooth = []
hzt = []
thresh = []
horz_t = []
'''fig = plt.figure(1)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)'''
for m in range(len(profile[k+start])-avg_interval):
p = np.polyfit(horizontal[k+start][m:m+avg_interval], profile[k+start][m:m+avg_interval], 1)
height = np.polyval(p,horizontal[k+start][m+half_interval])
prof_smooth.append(height)
hzt.append(horizontal[avg_interval][m+half_interval])
#thresh.append(height)
#horz_t.append(horizontal[10][m+5])
ax1.plot(hzt, prof_smooth)
h = []
r = []
grad = []
pos = []
drop_pos = []
volumes = []
timer = []
spot=extrema_checker(hzt, prof_smooth,smooth_interval)
ax2.plot(spot[0], spot[1])
x = [len(spot[3]),len(spot[2]), len(spot[4]), len(spot[5])]
#print(x)
w = min(x)
for z in range(w):
ax2.plot(spot[0][spot[2][z]], spot[1][spot[2][z]], 'o')
ax2.plot(spot[0][spot[3][z]], spot[1][spot[3][z]], 's')
ax1.plot(hzt[half_smo_interval+spot[3][z]], prof_smooth[half_smo_interval+spot[3][z]]+np.polyval(pipette, hzt[half_smo_interval+spot[3][z]]),'ro')
ax1.plot(hzt[half_smo_interval+spot[2][z]], prof_smooth[half_smo_interval+spot[2][z]]+np.polyval(pipette, hzt[half_smo_interval+spot[2][z]]),'bo')
ax1.plot(hzt[spot[5][z]], spot[4][z]+np.polyval(pipette, hzt[half_smo_interval+spot[5][z]]),'ks')
ax1.plot([hzt[half_smo_interval+spot[3][z]],hzt[half_smo_interval+spot[2][z]]], [prof_smooth[half_smo_interval+spot[3][z]]+np.polyval(pipette, hzt[half_smo_interval+spot[3][z]]),prof_smooth[half_smo_interval+spot[2][z]]+np.polyval(pipette, hzt[half_smo_interval+spot[2][z]])])
grad.append((prof_smooth[half_smo_interval+spot[3][z]]+np.polyval(pipette, hzt[half_smo_interval+spot[3][z]])-prof_smooth[half_smo_interval+spot[2][z]]-np.polyval(pipette, hzt[half_smo_interval+spot[2][z]]))/(hzt[half_smo_interval+spot[3][z]]-hzt[half_smo_interval+spot[2][z]]))
h.append(spot[7][z]+np.polyval(pipette, hzt[half_smo_interval]+spot[6][z]))
#drop_pos.append((spot[0][spot[2][z]]+spot[0][spot[3][z]])/2)
drop_pos.append(spot[8][z])
ax1.plot(hzt[spot[5][z]], spot[4][z],'*')
ax1.plot(spot[6][z], spot[7][z],"o")
pos.append(spot[6][z])
r.append(np.polyval(pipette, hzt[half_smo_interval+spot[5][z]]))
ax1.plot(hzt, prof_smooth+np.polyval(pipette, hzt))
ax1.vlines(spot[8][z], 0, 100)
timer.append(float(timedata[k+start-1,0]))
#volumes.append(spot[6][z])
ax1.set_xlim(0,1280)
ax2.set_xlim(0,1280)
#plt.title(str(k+start))
plt.show()
heights.append(h)
rs.append(r)
grads.append(grad)
positions.append(pos)
drops_positions.append(drop_pos)
vol.append(volumes)
times.append(timer)
length = len(sorted(grads,key=len, reverse=True)[0])
pos_array= np.array([xi+[0]*(length-len(xi)) for xi in positions])
grad_array =np.array([xi+[0]*(length-len(xi)) for xi in grads])
rs_array =np.array([xi+[0]*(length-len(xi)) for xi in rs])
heights_array =np.array([xi+[0]*(length-len(xi)) for xi in heights])
drop_pos_array = np.array([xi+[0]*(length-len(xi)) for xi in drops_positions])
time_array = np.array([xi+[0]*(length-len(xi)) for xi in times])
print(grad_array)
print(pos_array.shape)
figv = plt.figure(3)
axv = figv.add_subplot(111)
for k in range(pos_array.shape[1]):
axv.plot(time_array[:,k], pos_array[:,k], '.')
figm = plt.figure(4)
axm = figm.add_subplot(111)
for k in range(pos_array.shape[1]):
axm.plot(time_array[:,k], drop_pos_array[:,k], '.')
plt.show()
np.savetxt(directory +'drop_positions.csv', pos_array)
np.savetxt(directory +'drop_com.csv', drop_pos_array)
np.savetxt(directory +'drop_height.csv', heights_array)
np.savetxt(directory +'gradients.csv', grad_array)
np.savetxt(directory +'drop_piprad.csv', rs_array)
np.savetxt(directory +'times.csv', time_array)
|
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import scattertext as st
import time
import scattertext.categoryprojector.pairplot
t0 = time.time()
newsgroups_train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))
print(time.time() - t0)
vectorizer = TfidfVectorizer()
tfidf_X = vectorizer.fit_transform(newsgroups_train.data)
print(time.time() - t0)
corpus = st.CorpusFromScikit(
X=CountVectorizer(vocabulary=vectorizer.vocabulary_).fit_transform(newsgroups_train.data),
y=newsgroups_train.target,
feature_vocabulary=vectorizer.vocabulary_,
category_names=newsgroups_train.target_names,
raw_texts=newsgroups_train.data
).build().get_unigram_corpus()
print(time.time() - t0)
html = scattertext.produce_pairplot(corpus)
print(time.time() - t0)
file_name = 'demo_pair_plot.html'
open(file_name, 'wb').write(html.encode('utf-8'))
print('Open ./%s in Chrome.' % (file_name))
print(time.time() - t0)
|
import pandas as pd
from pandas_datareader import data as pdr
import pymysql, calendar, time, json
from datetime import datetime
from threading import Timer
import pymysql
import sys
import yfinance
class DBUpdater:
def __init__(self):
self.conn = pymysql.connect(host='localhost', user='hong', password='hong', db='finance_global', charset='utf8')
with self.conn.cursor() as curs:
sql = """
CREATE TABLE IF NOT EXISTS company_info (
code VARCHAR(20),
company VARCHAR(100),
last_update DATE,
PRIMARY KEY(code)
)
"""
curs.execute(sql)
#no diff
#BIGINT -> DOUBLE
sql = """
CREATE TABLE IF NOT EXISTS daily_price (
code VARCHAR(20),
date DATE,
open DOUBLE(40,10),
high DOUBLE(40,10),
low DOUBLE(40,10),
close DOUBLE(40,10),
volume DOUBLE(40,10),
PRIMARY KEY (code, date)
)
"""
curs.execute(sql)
self.conn.commit()
self.codes = dict()
def __del__(self):
self.conn.close()
def read_global_code(self):
target_company = []
try:
with open('global_list', 'r') as in_file:
data = in_file.read().split('\n')
for line in data:
if line != '':
target_company.append(line)
except FileNotFoundError:
print('Need global list! Exit the program!')
sys.exit()
new_idx = 0
index = []
columns = ['code','company']
rows = []
print(target_company)
for item in target_company:
code = item.split(':')[0]
company = item.split(':')[1]
row = [code, company]
rows.append(row)
index.append(new_idx)
new_idx+=1
res = pd.DataFrame(rows, columns=columns, index=index)
return res
def update_comp_info(self):
sql = "SELECT * FROM company_info"
df = pd.read_sql(sql, self.conn)
for idx in range(len(df)):
self.codes[df['code'].values[idx]] = df['company'].values[idx]
with self.conn.cursor() as curs:
sql = "SELECT max(last_update) FROM company_info"
curs.execute(sql)
rs = curs.fetchone()
today = datetime.today().strftime('%Y-%m-%d')
if rs[0]==None or rs[0].strftime('%Y-%m-%d') < today:
print("Read company list...")
krx = self.read_global_code()
for idx in range(len(krx)):
code = krx.code.values[idx]
company = krx.company.values[idx]
sql = "REPLACE INTO company_info (code, company, last_update) VALUES ({},{},{})".format(json.dumps(code),json.dumps(company),json.dumps(today))
curs.execute(sql)
self.codes[code] = company
tmnow = datetime.now().strftime('%Y-%m-%d %H:%M')
print(f"[{tmnow}] #{idx+1:04d} REPLACE INTO company_infro VALUES ({code}, {company}, {today})")
self.conn.commit()
def get_stock(self, name, start, end):
nums = start.split('-')
nums = list(map(int, nums))
start = datetime(nums[0],nums[1],nums[2])
nums = end.split('-')
nums = list(map(int,nums))
end = datetime(nums[0],nums[1],nums[2])
return pdr.DataReader(name, 'yahoo', start, end)
def read_yahoo(self, code, company, start_date):
end_date = datetime.now().strftime('%Y-%m-%d')
print("waiting for {} data...".format(company))
df = self.get_stock(code, start_date, end_date)
print("{} data downloaded".format(company))
df['Date'] = df.index
df = df[['Date','Close','Open','High','Low','Volume']]
df = df.rename(columns={'Date':'date','Close':'close','Open':'open','High':'high','Low':'low','Volume':'volume'})
df['date'] = df['date'].replace('.','-')
df = df.dropna()
df[['close','open','high','low','volume']] = df[['close','open','high','low','volume']].astype(float)
return df
def replace_into_db(self, df, num, code, company):
with self.conn.cursor() as curs:
for r in df.itertuples():
sql = "REPLACE INTO daily_price VALUES ('{}', '{}', '{}', '{}', '{}', '{}', '{}')".format(code, r.date, r.open, r.high, r.low, r.close, r.volume)
curs.execute(sql)
self.conn.commit()
print("[{}] #{:04d} {} ({}) : {} rows > REPLACE_INTO daily_price [OK]".format(datetime.now().strftime('%Y-%m-%d %H:%M'), num+1, company, code, len(df)))
def update_daily_price(self, start_date):
for idx,code in enumerate(self.codes):
df = self.read_yahoo(code, self.codes[code], start_date)
if df is None:
continue
self.replace_into_db(df, idx, code, self.codes[code])
def execute_daily(self):
self.update_comp_info()
try:
with open('config_global.json', 'r') as in_file:
config = json.load(in_file)
start_date = config['start_date']
except FileNotFoundError:
with open('config_global.json', 'w') as out_file:
start_date = '2016-01-01'
config = {'start_date':'2021-01-01'}
json.dump(config, out_file)
self.update_daily_price(start_date)
tmnow = datetime.now()
lastday = calendar.monthrange(tmnow.year, tmnow.month)[1]
if tmnow.month == 12 and tmnow.day == lastday:
tmnext = tmnow.replace(year==tmnow.year+1, month=1, day=1, hour=17, minute=0, second=0)
elif tmnow.day == lastday:
tmnext = tmnow.replace(month==tmnow.month+1, day=1, hour=17, minute=0, second=0)
else:
tmnext = tmnow.replace(day=tmnow.day+1, hour=17, minute=0, second=0)
tmdiff = tmnext - tmnow
secs = tmdiff.seconds
t = Timer(secs, self.execute_daily)
print("Waiting for next update ({}) ...".format(tmnext.strftime('%Y-%m-%d %H:%M')))
t.start()
if __name__ == '__main__':
dbu = DBUpdater()
dbu.execute_daily()
|
import os
import discord
from discord.ext import commands
class SelfRole(commands.Cog):
"""
This class does the following given a message ID on the server:
- For every reaction on the message, it assign a role to the users
who have reacted to the message.
- A mapping of emojis <-> Role can be configured
- Re-assign those roles when restarted: Check that every users on the guild
have the role only if they have reacted to said message
"""
def __init__(self, bot):
self.bot = bot
self.channel_id = 0
self.message_id = 0
self.roles_mapping = {}
def test(self):
assert (
not os.getenv("ROLE_MESSAGE_ID") is None
), "ROLE_MESSAGE_ID is not defined"
assert (
not os.getenv("ROLE_CHANNEL_ID") is None
), "ROLE_CHANNEL_ID is not defined"
assert (
not os.getenv("ROLE_EMOJIS") is None
), "ROLE_EMOJIS is not defined"
try:
_ = int(os.getenv("ROLE_MESSAGE_ID", default="0"))
except Exception:
self.fail("ROLE_MESSAGE_ID is not an integer")
try:
_ = int(os.getenv("ROLE_CHANNEL_ID", default="0"))
except Exception:
self.fail("ROLE_CHANNEL_ID is not an integer")
@commands.Cog.listener()
async def on_ready(self):
self.channel_id = self.get_channel_id()
self.message_id = self.get_message_id()
self.extract_roles_mapping()
channel = self.bot.get_guild().get_channel(self.channel_id)
message = await channel.fetch_message(self.message_id)
await self.clean_emojis(message)
await self.update_roles(message)
async def clean_emojis(self, message):
"""Remove non wanted emojis from role message"""
for message_reaction in message.reactions:
if message_reaction.emoji.name not in self.roles_mapping.keys():
users = await message_reaction.users().flatten()
for member in users:
await self.bot.remove_emoji(
member,
message_reaction.emoji,
message.channel.id,
message.id,
)
break
async def update_roles(self, message):
"""Update users roles based on reaction on the given message and
according to roles_mapping
"""
for emoji, role in self.roles_mapping.items():
# Check the reaction corresponding to that emoji on the message
users = []
for message_reaction in message.reactions:
if message_reaction.emoji.name == emoji:
users = await message_reaction.users().flatten()
break
for member in self.bot.get_guild().members:
# User have the role but has not reacted
if role in member.roles:
if member not in users:
await self.bot.remove_role(member, role)
# User does not have the role but has reacted
else:
if member in users:
await self.bot.give_role(member, role)
def get_channel_id(self):
"""Gives the ID of the channel used to allow users
to select their roles
"""
return int(os.getenv("ROLE_CHANNEL_ID", default="0"))
def get_message_id(self):
"""Gives the ID of the message used to allow users to select
their roles
"""
return int(os.getenv("ROLE_MESSAGE_ID", default="0"))
def extract_roles_mapping(self):
"""Extract mapping of emoji to role to assign roles to users
that add the emoji to the message with ID ROLE_MESSAGE_ID
"""
raw_mapping = os.getenv("ROLE_EMOJIS", default="")
self.roles_mapping = {}
try:
raw_mappings = raw_mapping.split(";")
for raw_mapping in raw_mappings:
if raw_mapping == "":
continue
emoji, role_name = raw_mapping.split(",")
role = discord.utils.get(
self.bot.get_guild().roles, name=role_name
)
if not role:
print(
"Role {} not found on that server, ignored".format(
role_name.encode("ascii", "ignore")
)
)
continue
self.roles_mapping[emoji] = role
except Exception:
raise Exception(
"ROLE_EMOJIS is badly formatted: \
:[EMOJI 1]:,[ROLE 1];:[EMOJI 2]:,[ROLE 2]"
)
print("Self role feature has loaded the following roles: ")
for emoji, role in self.roles_mapping.items():
print(
":{}: to {}".format(
emoji.encode("ascii", "ignore"),
role.name.encode("ascii", "ignore"),
)
)
async def process_reaction(self, payload, remove=False):
"""
Analyse the payload's emoji to determine which role to add or remove
"""
member = self.bot.get_guild().get_member(payload.user_id)
if payload.emoji.name not in self.roles_mapping.keys():
if remove:
return
await self.bot.remove_emoji(
member, payload.emoji, payload.channel_id, payload.message_id
)
return
role = self.roles_mapping[payload.emoji.name]
if remove:
await self.bot.remove_role(member, role)
else:
await self.bot.give_role(member, role)
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
if payload.message_id != self.message_id:
return
await self.process_reaction(payload, remove=False)
@commands.Cog.listener()
async def on_raw_reaction_remove(self, payload):
if payload.message_id != self.message_id:
return
await self.process_reaction(payload, remove=True)
def setup(bot):
bot.add_cog(SelfRole(bot))
|
import turtle
def up():
turtle.goto(turtle.xcor(),turtle.ycor()+50)
def down():
turtle.goto(turtle.xcor(),turtle.ycor()-50)
def left():
turtle.goto(turtle.xcor()-50,turtle.ycor())
def right():
turtle.goto(turtle.xcor()+50,turtle.ycor())
turtle.onkeypress(up,"w")
turtle.onkeypress(down, "s")
turtle.onkeypress(right, "d")
turtle.onkeypress(left, "a")
turtle.listen()
turtle.mainloop()
|
import os
os.system('mcrl22lps -nfTv ball_game.mcrl2 ball_game.lps')
os.system('lpsuntime -v ball_game.lps ball_gameu.lps')
os.system('lpsrealelm --max=11 -v ball_gameu.lps ball_gamer.lps')
os.system('lps2lts -v ball_gamer.lps ball_game.lts')
|
from MaximumDecisionTree import MaximumDecisionTree
import random
import numpy as np
class RandomForest:
def __init__(self, n_estimator=5, maximum_samples=100, max_depth = 2, sampling_type='bagging'):
self.n_estimator = n_estimator
self.sampling_type = sampling_type
self.maximum_sample = maximum_samples
self.max_depth = max_depth
def sampling(self, x, y):
subset = random.sample(range(len(x)), k=self.maximum_sample)
return x[subset], y[subset]
def fit_single_tree(self, x, y):
decision_tree = MaximumDecisionTree(max_depth=self.max_depth)
decision_tree.fit(x, y)
# print(decision_tree.print_tree())
return decision_tree
def fit(self, x, y):
self.random_forest = []
for n in range(self.n_estimator):
sub_x, sub_y = self.sampling(x, y)
self.random_forest.append(self.fit_single_tree(sub_x, sub_y))
def predict(self, test):
n_test = len(test)
total_predictions = np.zeros((self.n_estimator, n_test))
predictions = []
for n in range(self.n_estimator):
decision_tree = self.random_forest[n]
total_predictions[n] = decision_tree.predict(test)
for i in range(n_test):
unique, counts = np.unique(total_predictions[:, i], return_counts=True)
predictions.append(unique[np.argmax(counts)])
return predictions
|
n = 10
r_shift = n >> 1
print(r_shift)
l_shift = n << 1
print(l_shift)
k = 3
if 7 & (1 << k-1):
print("set")
|
class MyString():
def __init__(self, str=""): # initializes the object
self.str=str
#Returns the current string.
def getString(self):
return self.str
#Returns a string that consists of all and only the vowels in the current string.
#Only letters a, e, i, o, and u (both lower and upper case) are considered vowels.
#The returned string contains each occurrence of a vowel in the current string.
def getVowels(self):
lowercase=self.str
lowercase=lowercase.lower()
vowels=""
count=len(lowercase)
for a in range(0,count):
if lowercase[a]=='a'or lowercase[a]=='e' or lowercase[a]== 'i' or lowercase[a]=='o' or lowercase[a]== 'u':
vowels=vowels+self.str[a]
return vowels
#Returns a string that consists of the substring between start and end indexes (both included) in the current string.
#Index 1 corresponds to the first character in the current string.'''
def getSubstring(self,start,end):
sub=""
try:
for a in range(start-1,end):
sub=sub+self.str[a]
except IndexError:
print("Index out of bounds")
return sub
#Breaks the string down, and returns it as a character string
def getCharList(self):
thelisttorulethemall=[]
counter=len(self.str)
print(counter)
for w in range (0,counter):
thelisttorulethemall.append(self.str[w])
return thelisttorulethemall
#Returns the index of the first occurrence of a character in the current string.
#Index 1 corresponds to the first character in the current string.
# return 0 if no match is found
def indexOf(self,c):
indexthing = ""
try:
for a in (0,c):
indexthing = self.str[c-1]
except IndexError:
print("Index out of bounds")
return indexthing
# Removes all occurrences of the specified character from the current string.
def removeChar(self,c):
themainstring=self.str
listcount=len(themainstring)
theextraletters=""
counter=0
while counter<listcount:
if themainstring[counter]!=c:
theextraletters = theextraletters + themainstring[counter]
counter = counter + 1
else:
counter = counter + 1
return theextraletters
#Invert the current string.
def invert(self):
backwardstext = self.str[::-1]
return backwardstext
|
import pygame, sys
from random import randint
from pygame.sprite import Sprite
#Game parameters
screen_width = 800
screen_height = 800
black = (0,0,0)
white = (255, 255, 255)
red = (255,0,0)
score = 0
button_down = False
#Initializing game
pygame.init()
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption("Minigame1")
clock = pygame.time.Clock()
#Create Crosshair
crosshair_image = pygame.image.load("crosshair.png").convert_alpha()
crosshair_w, crosshair_h = crosshair_image.get_size()
#Create balloons class
class Balloon(Sprite):
def __init__(self, screen):
Sprite.__init__(self)
self.screen = screen
self.image = pygame.image.load("new-green-balloon-light-lft-th.png").convert_alpha()
self.image_w, self.image_h = self.image.get_size()
self.speed = randint(25,75) * 0.01
self.x_position = randint(self.image_w/2, self.screen.get_width()-self.image_w/2)
self.y_position = self.screen.get_height() + self.image_h/2
def update(self, time_passed):
self.y_position -= self.speed * time_passed
def blitme(self):
self.draw_pos = self.image.get_rect().move(self.x_position-self.image_w/2, self.y_position-self.image_h/2)
self.screen.blit(self.image, self.draw_pos)
#List of balloons
balloons = [Balloon(screen)]
for x in range(0, randint(3, 5)):
balloons.append(Balloon(screen))
#Game Loop
gameLoop=True
while gameLoop:
#Time passed and FPS
time_passed = clock.tick(150)
#Mouse position
mouse_x, mouse_y = pygame.mouse.get_pos()
#Killing the loop if quit game
for event in pygame.event.get():
if (event.type==pygame.QUIT):
gameLoop=False
#Drawing and updating balloons
screen.fill(black)
for balloon in balloons:
#Update then draw balloon
balloon.update(time_passed)
balloon.blitme()
#If balloon shot
if pygame.mouse.get_pressed()[0] == 1 and button_down == False:
if balloon.draw_pos.collidepoint(pygame.mouse.get_pos()):
balloons.remove(balloon)
score += 1
button_down = True
elif pygame.mouse.get_pressed()[0] == 0 and button_down == True:
button_down = False
#If no more balloons, make more
if len(balloons) == 0:
for x in range(0, randint(3, 5)):
balloons.append(Balloon(screen))
#Draw multiple balloons after a balloon dies
if balloon.y_position < -balloon.image_h/2:
balloons.remove(balloon)
if len(balloons) < 7:
for x in range(0, randint(2, 4)):
balloons.append(Balloon(screen))
if pygame.time.get_ticks() >= 10000:
print (score)
pygame.quit()
#Draw crosshair
crosshair_x = mouse_x - (crosshair_w/2)
crosshair_y = mouse_y - (crosshair_h/2)
screen.blit(crosshair_image, (crosshair_x, crosshair_y))
pygame.display.flip()
pygame.quit()
|
import pyexcel
import json
with open('./courses.json') as json_data:
data = json.load(json_data)
pyexcel.save_as(records=data, dest_file_name="akashi.xls")
|
# - *- coding: utf- 8 - *-
from aiogram import types
from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton
|
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.core.mail import send_mail
from django.conf import settings
from django.core.urlresolvers import reverse
from contact.forms import *
def contact(request):
if request.method == 'POST': # If the form has been submitted...
form = ContactForm(request.POST or None) # A form bound to the POST data
if form.is_valid():
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
sender = form.cleaned_data['sender']
cc_myself = form.cleaned_data['cc_myself']
recipients = settings.RECIPIENTS
if cc_myself:
recipients.append(sender)
send_mail(subject, message, sender, recipients)
return HttpResponseRedirect(reverse('contact:thankyou')) # Redirect after POST # All validation rules pass
# Process the data in form.cleaned_data
# ...
return HttpResponseRedirect(reverse('contact:thankyou'))# Redirect after POST
# return HttpResponseRedirect('polls:results', args=(p.id,)))
else:
form = ContactForm() # An unbound form
return render(request, 'contact/contact.html', {'form': form})
|
from schema import *
from flask import Flask, render_template, request, jsonify, redirect, url_for, flash
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, login_user, logout_user, current_user, login_required, UserMixin
from werkzeug.urls import url_parse
from werkzeug.security import generate_password_hash, check_password_hash
from datetime import datetime
app = Flask(__name__)
app.config['SECRET_KEY'] = 'I dont know how this works'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///schema.db'
db = SQLAlchemy(app)
# with app.test_request_context():
# db.create_all()
login_manager = LoginManager(app)
login_manager.login_view = 'login'
#-------------------------------------------------------
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
#-------------------------------------------------------
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
#tasks = db.relationship('Task', backref='person', lazy=True)
def __repr__(self):
return '<User {}>'.format(self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
# class Post(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# body = db.Column(db.String(140))
# timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
# user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
# def __repr__(self):
# return '<Post {}>'.format(self.body)
class Task(db.Model):
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.String(200))
status = db.Column(db.Integer)
person_id = db.Column(db.Integer, db.ForeignKey('user.id'),
nullable=True)
#-------------------------------------------------------
@app.route('/')
@app.route('/index')
@login_required
def index():
stat_todo = Task.query.filter_by(status=0, person_id=current_user.id).all()
stat_doing = Task.query.filter_by(status=1, person_id=current_user.id).all()
stat_done = Task.query.filter_by(status=2, person_id=current_user.id).all()
return render_template('index.html',
todo=stat_todo,
doing=stat_doing,
done=stat_done)
@app.route('/add', methods=['POST'])
def add():
try:
text = request.form.get('card_text', type=str)
print(text, request.form, current_user.id)
task = Task(text=text, status=0, person_id=current_user.id)
db.session.add(task)
db.session.commit()
return jsonify({
"status":"success",
"err":None,
"card_id":str(task.id),
"session":section_dict_rev[0],
"text":task.text
})
except Exception as e:
return jsonify({"status":"fail", "err":str(e)})
@app.route('/updatestat', methods=['POST'])
def updatestat():
try:
card_id = request.form.get('card_id', type=int)
section = request.form.get('section', type=str)
section_id = section_dict[section]
task = Task.query.filter_by(id=card_id).first()
task.status = section_id
db.session.commit()
return jsonify({"status":"success", "err":None})
except Exception as e:
return jsonify({"status":"fail", "err":str(e)})
@app.route('/remove', methods=['POST'])
def remove():
try:
card_id = request.form.get('card_id', type=int)
task = Task.query.filter_by(id=card_id).first()
db.session.delete(task)
db.session.commit()
return jsonify({"status":"success", "err":None})
except Exception as e:
return jsonify({"status":"fail", "err":str(e)})
#-------------------------------------------------------
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
return redirect(next_page)
return render_template('login.html', title='Sign In', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Thanks for registration! Please login in...')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
#-------------------------------------------------------
if __name__ == '__main__':
app.run(debug = True) |
class Solution:
def uniqueOccurrences(self, arr: List[int]) -> bool:
repeat_dict = {}
comparisons = []
if arr == [] or len(arr) == 1:
return True
for val in arr:
if val not in repeat_dict:
repeat_dict[val] = 1
else:
repeat_dict[val] += 1
for val in repeat_dict:
comparisons.append(repeat_dict[val])
is_unique = set(comparisons)
if len(is_unique) != len(comparisons):
return False
return True
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
import skimage.io as io
import skimage.transform as transform
import bound as bo
def main():
foreground=bo.get_foreground()
masknew=foreground.copy()
masknew[masknew>0]=1
background=io.imread('/home/jayasurya/Desktop/storerack.jpg')/255.0
background = transform.resize(background, foreground.shape[:2])
background = background*(1 - masknew)
composed_image = background + foreground
plt.imshow(composed_image)
plt.axis('off')
plt.show()
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import base64
from json import JSONDecoder
import simplejson
import requests
import time
key = "your key"
secret = "your secret"
url = 'https://api-cn.faceplusplus.com/facepp/v3/detect'
url_add = "https://api-cn.faceplusplus.com/imagepp/v1/mergeface"
def find_face(imgpath):
print("Processing Image")
data = {"api_key": key, "api_secret": secret, "image_url": imgpath, "return_landmark": 1}
files = {"image_file": open(imgpath, "rb")}
response = requests.post(url, data=data, files=files)
req_con = response.content.decode('utf-8')
req_dict = JSONDecoder().decode(req_con)
json = simplejson.dumps(req_dict)
json2 = simplejson.loads(json)
rectangle = json2['faces'][0]['face_rectangle']
#print(rectangle)
return rectangle
def synthetic_face(image_url_1,image_url_2,image_url,number):
ff1 = find_face(image_url_1)
time.sleep(1) #delay
ff2 = find_face(image_url_2)
rectangle1 = str(str(ff1['top']) + "," + str(ff1['left']) + "," + str(ff1['width']) + "," + str(ff1['height']))
rectangle2 = str(ff2['top']) + "," + str(ff2['left']) + "," + str(ff2['width']) + "," + str(ff2['height'])
#print(rectangle1)
#print(rectangle2)
with open(image_url_1, 'rb') as file:
f1_64 = base64.b64encode(file.read())
with open(image_url_2, 'rb') as file:
f2_64 = base64.b64encode(file.read())
data = {"api_key": key, "api_secret": secret, "template_base64": f1_64, "template_rectangle": rectangle1,
"merge_base64": f2_64, "merge_rectangle": rectangle2, "merge_rate": number}
response = requests.post(url_add, data=data)
req_con = response.content.decode('utf-8')
req_dict = JSONDecoder().decode(req_con)
#print(req_dict)
result = req_dict['result']
imgdata = base64.b64decode(result)
with open(image_url, 'wb') as file:
file.write(imgdata)
def main():
image_url_1 = "yourIMG1.jpg"
image_url_2 = "yourIMG2.jpg"
image_url = 'result.jpg'
synthetic_face(image_url_1,image_url_2,image_url,50)
print("done!")
if __name__ == '__main__':
main()
|
from __future__ import print_function
from tensorflow.python.keras.models import load_model
import tensorflow as tf
import numpy as np
from PIL import Image
MODEL_NAME = 'cat10086.hd5'
#MODEL_NAME = 'cattrain3.hd5'
dict={0:'Backward', 1:'Forward', 2:'Left', 3:'Right',4:'Stop'}
graph = tf.get_default_graph()
def classify(model,image):
global graph
with graph.as_default():
result = model.predict(image)
themax = np.argmax(result)
return (dict[themax],result[0][themax],themax)
def load_image(image_fname):
img = Image.open(image_fname)
img = img.resize((249,249))
imgarray = np.array(img)/255.0
final = np.expand_dims(imgarray, axis=0)
return final
def main():
model = load_model(MODEL_NAME)
img = load_image("c.JPG")
label,prob,_ = classify(model,img)
print("We think with certainty %3.2f that it is %s."%(prob, label))
if __name__=="__main__":
main()
|
x < y
x > y
x == y
x >= y
x <= y
x != y
# Compare if two variables point to the same object
x is y
x is not y
# Determine if a value is in a collection
x in collection
x not in collection
|
"""http://www.geeksforgeeks.org/count-triplets-with-sum-smaller-that-a-given-value/"""
import fileinput
inputLines = fileinput.input()
#I didn't manage to solve this one. I looked it up after staring for about an hour.
#Also its currently n^2 and doesn't pass so I'm shelving this one for a day when I've slept.
#Turns out it passes if i just put the whole thing in a function so *shrug*
testCases = int(inputLines.readline())
for l in range(testCases):
n, s = list(map(int,inputLines.readline().strip().split()))
numList = list(sorted(map(int,inputLines.readline().strip().split())))
#numList.sort()
count = 0
for i, v in enumerate(numList):
if v + numList[0] >= s:
break
s2 = s - v
j = i + 1
k = n - 1
while j < k:
if numList[j] + numList[k] < s2:
count += k - j
j += 1
else:
k -= 1
print(count) |
"""
编写一个函数来查找字符串数组中的最长公共前缀。
如果不存在公共前缀,返回空字符串 ""。
示例 1:
输入: ["flower","flow","flight"]
输出: "fl"
示例 2:
输入: ["dog","racecar","car"]
输出: ""
解释: 输入不存在公共前缀。
说明:
所有输入只包含小写字母 a-z 。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/longest-common-prefix
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
# Step 1: 找出第一个参考字符串,对此字符串进行遍历,游标为i
# Step 2: 在遍历第一个字符串的同时,对剩余的每个数组元素的同样位置进行比较
# Step 3: 如果此时相等,则继续往下遍历,不等的话,则返回游标之前的元素
if not strs: return ""
n = len(strs)
m = len(strs[0])
for i in range(m):
tmp = strs[0][i]
for j in range(1, n):
#终止条件:游标位置元素与数组中比对元素不等,或者长度等于数组元素长度,无法继续下行
#####
# 注意: i == len(strs[j]) 为True时,or 后面不执行
# 顺序调换则 会out of range
#####
if i == len(strs[j]) or strs[j][i] != tmp:
return strs[0][:i]
return strs[0] |
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: libdzyl
#
# Created: 26-05-2014
# Copyright: (c) libdzyl 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
import math
def fibo(len):
a=[0,1]
for n in range(len-2):
a.append(a[n]+a[n+1])
return a
class Circle:
def __init__(self,point,radius):
self.radius=radius
self.point=point
def areaAndGirth(self):
area=math.pi*pow(self.radius,2)
girth=2*math.pi*self.radius
return area,girth
def printCircle(self):
a,b=self.point
print 'Point:[',a,b,']'
area,girth=self.areaAndGirth()
print 'Area:',area
print 'Girth:',girth
def main():
pass
if __name__ == '__main__':
main()
|
# Defino mi funcion
def sumar (num1, num2):
return( num1 + num2 )# Retorno el resultado
#region Main
operacion = sumar(4, 5) # A la variable operacion, le voy asignar el resultado de la funcion suma(num1, num2)
print(f"El resultado es: {operacion}")
# Puedo llamar a la funcion, las veces que sea necesario
operacion2 = sumar(3, 2) # A la variable operacion, le voy asignar el resultado de la funcion suma(num1, num2)
print(f"El resultado es: {operacion2}")
#endregion Main |
from django.db import models
# TODO: product's price is separated by packing
class Produk(models.Model):
"""docstring for Product."""
nama = models.CharField(max_length=100)
modal = models.IntegerField(default=0)
harga = models.IntegerField(default=0)
berat = models.IntegerField(default=0)
stok = models.IntegerField(default=0)
dibuat_pada = models.DateTimeField(auto_now_add = True, auto_now = False)
dibuat_pada = models.DateTimeField(auto_now_add = False, auto_now = True)
class Meta:
verbose_name_plural = 'list produk'
def save(self, *args, **kwargs):
# TODO: create history if product is created
print('save method is being called')
super(Produk, self).save(*args, **kwargs)
def __str__(self):
return str(self.nama)
def get_jumlah_stok(self):
return str(self.stok) + " Kg"
@staticmethod
def get_semua_produk():
return Produk.objects.all()
class BeratLiter(models.Model):
produk = models.ForeignKey(Produk)
berat = models.IntegerField(default=0)
def __str__(self):
return self.produk.nama
|
import tensorflow as tf
import numpy as np
from PIL import Image
import scipy.misc
import utils
import model
import segmentizer
import net
#test_path = 'learning/00001.jpg'
#test_path = 'learning2/57.jpg'
#test_path = 'input.jpg'
#learning_list = utils.get_files('resized_img/learning')
learning_list = utils.get_files('input_dev')
#input0 = utils.get_img(test_path,img_size=[48,48])
#input0 = np.array(Image.fromarray(input0.astype(np.uint8)))#.convert('L'))
#input_org = input0
#input1 = np.reshape(input0, [-1, input0.shape[0], input0.shape[1], 3])
#input = input_#np.zeros([1, 48, 48,3])
#input = np.zeros([1, 48, 48,3])
def imsave(path, img):
#img = np.clip(img, 0, 255).astype(np.uint8)
#print('out img data', img)
Image.fromarray(img).save(path, quality=100)
device_ = '/cpu:0'
x =tf.placeholder(tf.float32, shape=[1,48,48,3],name="input")
#y = tf.placeholder(tf.float32, shape = [None,48*48],name="output_placeholder")
#
#segNet = net.SegNet()
#mask_pred = segNet(x)
# Simple SegNet
weight_kener_size = 5
conv1_weight_shape = [weight_kener_size, weight_kener_size, 3, 64]
conv2_weight_shape = [weight_kener_size, weight_kener_size, 64, 64]
conv3_weight_shape = [3, 3, 64, 64]
fc4_weight_shape = [12 * 12 * 64, 100]
fc5_weight_shape = [100, 400]
fc6_weight_shape = [400, 48 * 48]
print('process')
conv1_weight = model.weights_variables(conv1_weight_shape, "conv1_weight")
conv1_bias = model.bias_variables([64], "conv1_bias")
conv2_weight = model.weights_variables(conv2_weight_shape, "conv2_weight")
conv2_bias = model.bias_variables([64], "conv2_bias")
conv3_weight = model.weights_variables(conv3_weight_shape, "conv3_weight")
conv3_bias = model.bias_variables([64], "conv3_bias")
fc4_weight = model.weights_variables(fc4_weight_shape, "fc4_weight")
fc4_bias = model.bias_variables([100], "fc4_bias")
fc5_weight = model.weights_variables(fc5_weight_shape, "fc5_weight")
fc5_bias = model.bias_variables([400], "fc5_bias")
fc6_weight = model.weights_variables(fc6_weight_shape, "fc6_weight")
fc6_bias = model.bias_variables([48 * 48], "fc6_bias")
conv1 = tf.nn.conv2d(x, conv1_weight, strides=[1, 1, 1, 1], padding='SAME', name='conv')
pool1 = tf.nn.relu(conv1+conv1_bias)
pool1 = tf.nn.max_pool(pool1, ksize=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding='SAME')
kenel2 = conv2_weight# + conv2_bias
conv2 = tf.nn.conv2d(pool1, conv2_weight, strides=[1, 1, 1, 1], padding='SAME', name='conv2')
pool2 = tf.nn.relu(conv2+conv2_bias)
pool2 = tf.nn.max_pool(pool2, ksize=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding='SAME')
conv3 = tf.nn.conv2d(pool2, conv3_weight, strides=[1, 1, 1, 1], padding='SAME', name='conv2')
conv3 = tf.nn.relu(conv3+conv3_bias)
h_flat = tf.reshape(conv3, [-1, 12 * 12 * 64]) # -1은 batch size를 유지하는 것.
fc4 = (tf.matmul(h_flat, fc4_weight) + fc4_bias)
fc4 = tf.nn.sigmoid(fc4)
print('fc4', fc4)
fc5 = (tf.matmul(fc4, fc5_weight) + fc5_bias)
fc5 = tf.nn.sigmoid(fc5)
print('fc5', fc5)
# h_flat = tf.reshape(fc5, [-1, 12 * 12 * 64]) # -1은 batch size를 유지하는 것.
h = tf.matmul(fc5, fc6_weight) + fc6_bias
# fc6 = tf.nn.sigmoid(h, name="fc6")
output_node = tf.nn.sigmoid(h, name="output")
# mask_pred = segmentizer.inference(x)
model_directory = './weights/'
saver = tf.train.Saver()
init = tf.global_variables_initializer()
with tf.Session() as sess:
#
sess.run(init)
print('evaluate...')
#saver = tf.train.import_meta_graph('model/model.ckpt.meta')
saver.restore(sess,"model_test2/model.ckpt")
#saver.restore(sess,tf.train.latest_checkpoint('model'))
with open(model_directory + "conv1_W.bin", 'wb') as f:
W_conv1_p = tf.transpose(conv1_weight, perm=[3, 0, 1, 2])
f.write(sess.run(W_conv1_p).tobytes())
with open(model_directory + "conv2_W.bin", 'wb') as f:
W_conv2_p = tf.transpose(conv2_weight, perm=[3, 0, 1, 2])
f.write(sess.run(W_conv2_p).tobytes())
with open(model_directory + "conv3_W.bin", 'wb') as f:
W_conv3_p = tf.transpose(conv3_weight, perm=[3, 0, 1, 2])
f.write(sess.run(W_conv3_p).tobytes())
with open(model_directory + "conv1_b.bin", 'wb') as f:
f.write(sess.run(conv1_bias).tobytes())
with open(model_directory + "conv2_b.bin", 'wb') as f:
f.write(sess.run(conv2_bias).tobytes())
with open(model_directory + "conv3_b.bin", 'wb') as f:
f.write(sess.run(conv3_bias).tobytes())
with open(model_directory + "fc4_W.bin", 'wb') as f:
W_fc4_shp = tf.reshape(fc4_weight, [12, 12, 64, 100])
W_fc4_p = tf.transpose(W_fc4_shp, perm=[3, 0, 1, 2])
f.write(sess.run(W_fc4_p).tobytes())
with open(model_directory + "fc5_W.bin", 'wb') as f:
W_fc5_shp = tf.reshape(fc5_weight, [1, 1, 100, 400])
W_fc5_p = tf.transpose(W_fc5_shp, perm=[3, 0, 1, 2])
f.write(sess.run(W_fc5_p).tobytes())
with open(model_directory + "fc6_W.bin", 'wb') as f:
W_fc6_shp = tf.reshape(fc6_weight, [1, 1, 400, 48 * 48])
W_fc6_p = tf.transpose(W_fc6_shp, perm=[3, 0, 1, 2])
f.write(sess.run(W_fc6_p).tobytes())
with open(model_directory + "fc4_b.bin", 'wb') as f:
f.write(sess.run(fc4_bias).tobytes())
with open(model_directory + "fc5_b.bin", 'wb') as f:
f.write(sess.run(fc5_bias).tobytes())
with open(model_directory + "fc6_b.bin", 'wb') as f:
f.write(sess.run(fc6_bias).tobytes())
len = len(learning_list)
for i in range(len):
test_path = 'learning2/'+str(i+1)+".jpg"
#test_path = 'input.jpg'
image_q = utils.get_img(learning_list[i], img_size=[48, 48])
# img = scipy.misc.imread("learning2/"+str(i+1)+".jpg", mode='RGB')
# image_q = scipy.misc.imresize(img, [48,48],interp="lanczos")
image_q = np.array(Image.fromarray(image_q.astype(np.uint8)))#.convert('L'))
#image_q = np.array(Image.fromarray(image_q.astype(np.uint8)))
image_q = np.reshape(image_q, [-1, 48, 48, 3])
output = sess.run(output_node,feed_dict={x:image_q})
output = np.reshape(output, [48, 48])
array1 = np.array(output)
array = 255 * array1
image2 = Image.fromarray(array.astype(np.uint8)) # Image.fromarray(convert.astype(np.uint8))
original_yuv = np.array(image2) # .convert('YCbCr'))
#imsave('eval/restore'+str(i)+".jpg", original_yuv)
imsave('eval/'+learning_list[i], original_yuv)
#image2 =np.array(Image.fromarray(input0.astype(np.uint8)))
#original_yuv = np.array(image2) # .convert('YCbCr'))
#imsave('org_developing.jpg', original_yuv)
#print('input',input) |
class Record:
def init(self, n, s):
Record.name = n
Record.score = s
p = Record()
q = Record()
p.init('123', 20)
q.init('456', 30)
print(p.name, p.score)
|
password=input("Enter a password: ")
capital_letter=0
lower_letter=0
digit_number=0
special_character=0
for c in password:
if c.isupper():
capital_letter=capital_letter+1
elif c.islower():
lower_letter=lower_letter+1
elif c.isdigit():
digit_number=digit_number+1
else:
special_character=special_character+1
print(capital_letter)
print(lower_letter)
print(digit_number)
print(special_character)
|
MEDIA_LINKS = [("https://www.facebook.com/sharer.php?u=http://andrewbberger.com", "fa fa-facebook-official" ),
("https://www.twitter.com/share?url=http://andrewbberger.com", "fa fa-twitter-square" ),
("https://www.linkedin.com/shareArticle?mini=true&url=http://andrewbberger.com", "fa fa-linkedin-square" ),
("https://www.reddit.com/submit?url=http://andrewbberger.com", "fa fa-reddit-alien" ),
("https://soundcloud.com/djbergmane", "fa fa-soundcloud" ),
("https://www.plus.google.com/share?url=http://andrewbberger.com", "fa fa-google-plus" ),
("https://www.github.com/aberger91", "fa fa-github" ),
("https://www.stackoverflow.com", "fa fa-stack-overflow" )]
|
import random
res = random.sample(range(1, 500),10)
res.sort()
print ("Random number list is : " + str(res))
|
#!/usr/bin/env python
"""This script extracts the partition global tags dependency trees from the CondDB Release Notes.
This script is intended to be used with a cgi interface to extract possible
global tags dependency tries from a release_notes.xml file of a Det/SQLDDDB package.
At a web page form the user have to specify the CondDB partition he wants to analyze
(could be DDDB, LHCBCOND or SIMCOND) and for SIMCOND chose also the Velo(Opened/Closed)
and Magnet (polarity:Up/Off/Down) configuration scheme.
The script has two regimes now:
1) It looks for the most recent one global tag in the partition, and
builds the dependency tree almost to the beginning of time ;), returning for the user
an html page with a result.
2) It also has the functionality to produce the list of all current top global tags for their
branches and the user can choose which global tag branch dependency sequence for the requested
partition he wants to see. The result is returned also as html page.
"""
__author__ = "Illya Shapoval"
__credits__ = "Illya Shapoval <illya.shapoval@cern.ch>, Marco Clemencic <marco.clemencic@cern.ch>"
__version__ = "$Id: presenter.py,v 1.1 2009-12-14 20:00:59 ishapova Exp $"
__maintainer__ = "Illya Shapoval <illya.shapoval@cern.ch>"
import os, sys
import cgi
import cgitb
cgitb.enable()
import xml.etree.ElementTree as ET
# define needed namespaces
ET._namespace_map["http://lhcb.cern.ch"] = "lhcb"
ET._namespace_map["http://www.w3.org/2001/XMLSchema-instance"] = "xsi"
_xel = lambda t: ET.QName("http://lhcb.cern.ch",t)
def get_last_GT_name(rootelement, partition, velo_magnet_state, branch_type = None):
"""Finds and returns the most recent global tag name for given partition.
'velo_magnet_state' variable is used only for SIMCOND partition and
can be the sum of any following string pairs:
'vc' - correponds to opened velo;
'vo' - velo closed;
'-mu100' - magnet polarity up;
'-moff' - magnet off;
'-md100' - magnet polarity down.
"""
last_GT_name = ""
for element in rootelement:
if last_GT_name: break
if element.tag == _xel("global_tag"):
for subelement in element:
if subelement.tag == _xel("partition"):
# Check if current global tag element corresponds to requested partition
if subelement.find(str(_xel("name"))).text == partition:
GT_name = element.find(str(_xel("tag"))).text
# Checking that requested velo and magnet configuration (valid only for SIMCOND)
# or branch type is met here
if partition == "SIMCOND" and velo_magnet_state:
if velo_magnet_state not in GT_name: continue
elif (partition in ["DDDB", "LHCBCOND"]) and branch_type:
if branch_type not in GT_name: continue
last_GT_name = GT_name
return last_GT_name
def get_GT_element(rootelement, partition, GT_name):
"""Finds and returns the xml element of global tag.
It looks for a global tag element with the name 'GT_name'
for partition 'partition'.
"""
for element in rootelement:
if element.tag == _xel("global_tag"):
for subelement1 in element:
if subelement1.tag == _xel("tag") and subelement1.text == GT_name:
# Before returning the global tag element checking partition to be requested.
for subelement2 in element:
if subelement2.tag == _xel("partition"):
for part_subelement2 in subelement2:
if part_subelement2.tag == _xel("name") and part_subelement2.text == partition:
return element
def get_base_and_LTs(partition, GT_element):
"""Finds properties of the chosen global tag element (GT_element) for requested partition.
For the given partition name (partition) returns the base
global tag name (base_GT_name) and the list of local
tags (local_tag_names) of the given global tag element (GT_element) .
"""
local_tag_names = []
found_needed_partition = False # Flag to indicate that the element with needed partition was entered
base_GT_name = ""
for GT_subelement in GT_element:
if found_needed_partition: break # When requested partition in the found global tag element is found - return collected 'garbage' :)
if GT_subelement.tag == _xel("partition"):
for part_subelement in GT_subelement:
if part_subelement.tag == _xel("name") and part_subelement.text == partition:
found_needed_partition = True
continue
if found_needed_partition and part_subelement.tag == _xel("base"):
base_GT_name = part_subelement.text
continue
if found_needed_partition and part_subelement.tag == _xel("tag"):
local_tag_names.append(part_subelement.text)
return base_GT_name, local_tag_names
def get_all_GTs(rootelement, partition):
"""The function finds all global tags ever created for a partition."""
all_gts = []
for element in rootelement:
if element.tag == _xel("global_tag"):
# Before returning the global tag element checking partition to be requested.
for subelement2 in element:
if subelement2.tag == _xel("partition"):
for part_subelement2 in subelement2:
if part_subelement2.tag == _xel("name") and part_subelement2.text == partition:
for subelement1 in element:
if subelement1.tag == _xel("tag"):
all_gts.append(subelement1.text)
return all_gts
def get_branches_hats(rootelement, partition, velo_magnet_state):
"""The function finds the latest global tag for every branch for a partition
For doing that it compares the list of all global tags ever created for a partition
and the list of all global tags, which were used as base global tags.
"""
all_base_gts = []
all_gts_per_partition = get_all_GTs(rootelement, partition)
for element in rootelement:
if element.tag == _xel("global_tag"):
# Before returning the global tag element checking partition to be requested.
for subelement1 in element:
if subelement1.tag == _xel("partition"):
for part_subelement in subelement1:
if part_subelement.tag == _xel("name") and part_subelement.text == partition:
for part_subelement1 in subelement1:
if part_subelement1.tag == _xel("base"):
if (part_subelement1.text in all_gts_per_partition) and (part_subelement1 not in all_base_gts):
all_base_gts.append(part_subelement1.text)
branches_hats = list(set(all_gts_per_partition)-set(all_base_gts))
if velo_magnet_state:
filtered_branches_hats = []
for tag in branches_hats:
if velo_magnet_state in tag:
filtered_branches_hats.append(tag)
branches_hats = filtered_branches_hats
return branches_hats
def get_descriptions(rootelement, partition, all_LTs):
"""Collects the descriptions for every local tag in 'all_LTs' list for requested partition.
'all_LTs' - list of all local tag names, included in the very recent one global tag
"""
all_LTs_descriptions = {}
all_LTs_1D = [] # one dimensional list of all local tags, which belong to requested global tag
for i in all_LTs: all_LTs_1D += i
for element in rootelement:
if element.tag == _xel("note"):
part_elements = element.findall(str(_xel("partition")))
for part_element in part_elements:
tag_element = part_element.find(str(_xel("tag")))
name_element = part_element.find(str(_xel("name")))
if tag_element.text in all_LTs_1D:
if partition == name_element.text:
desc_element = element.find(str(_xel("description")))
full_desc = ""
# This loop is for collecting of all the lines of the local tag description
for subsubelement in desc_element:
if subsubelement.tag == _xel("di"):
full_desc += "\n"+subsubelement.text
all_LTs_descriptions[tag_element.text] = full_desc
full_desc = ""
return all_LTs_descriptions
def display_dependency_tree(rootelement, partition, GT_name, search_mode = None):
"""The function builds and displays the dependency sequence for GT_name and partition."""
found_GT_element = True
all_GTs = [GT_name]
all_LTs = []
while found_GT_element:
# Find the global tag element for given partition and global tag name (GT_name)
found_GT_element = get_GT_element(rootelement, partition, GT_name)
if not found_GT_element: break
# Find base global tag name and its local tags included in found global tag element
base_GT_name, local_tag_names = get_base_and_LTs(partition, found_GT_element)
all_GTs.append(base_GT_name)
all_LTs.append(local_tag_names)
GT_name = base_GT_name
all_LTs_descriptions = get_descriptions(rootelement, partition, all_LTs)
# Create an object of empty .xhtml page (tree), which has only general content
root = initialize_XHTML(partition, search_mode)
# Make the xhtml entries one by one for global tag "gt" and its list of included local tags "lt"
for gt,lt in zip(all_GTs, all_LTs):
root = make_XHTML_entry(root, gt, lt, all_LTs_descriptions)
# Write the .xhtml tree object to file
finilize_XHTML(root)
def display_branches_hats(rootelement, partition, branches_hats, search_mode, velo_magnet_state):
"""The function builds and displays the top global tags for every present branch for a partition."""
branches_hats.sort()
branches_hats.reverse()
latest_GT_names = []
velo_magnet_states = [velo_magnet_state]
# Collect the latest global tag names for definite branches (for definite symbols in GT names)
if partition == "SIMCOND":
if not velo_magnet_state:
# If for SIMCOND 'all configurations' are activated for branch view
velo_magnet_states = ["vc-md", "vc-moff", "vc-mu", "vo-md", "vo-moff", "vo-mu", "vc15mm-md"]
for state in velo_magnet_states:
latest_GT_names.append(get_last_GT_name(rootelement, partition, state))
else:
branch_types = ["hlt", "head"]
for type in branch_types:
latest_GT_names.append(get_last_GT_name(rootelement, partition, velo_magnet_states[0], type))
# Buid the xhtml page
root = initialize_XHTML(partition, search_mode, velo_magnet_state)
for hat in branches_hats:
root = make_XHTML_hat_entry(root, hat, latest_GT_names)
finilize_XHTML(root)
def initialize_XHTML(partition, search_mode, velo_magnet_state = None):
"""Prepares the common part of the xhtml page. Adds partition name, buttons and some explanations."""
#### The HEAD for html ###############################################
root = ET.Element("html")
head = ET.SubElement(root, "head")
title = ET.SubElement(head, "title")
title.text = "%s partition" % partition
script = ET.SubElement(head, "script", {"type":"text/javascript"})
script.text = """
function toggle(link_id, descr_id)
{
var linkText = document.getElementById(link_id).innerHTML;
//alert(link_id);
if(linkText == "[+]")
{
document.getElementById(link_id).innerHTML = "[-]";
document.getElementById(descr_id).style.display = "block";
}
else
{
//alert(linkText);
document.getElementById(link_id).innerHTML = "[+]";
document.getElementById(descr_id).style.display = "none";
}
}
"""
body = ET.SubElement(root, "body", {"style":"background-color:#C8C8DA"})
########## The title of a page ######################################
title_table = ET.SubElement(body, "table", {"border":"1", "bgcolor":"#5858AF", "width":"100%"})
title_row = ET.SubElement(title_table, "tr")
title_cell = ET.SubElement(title_row, "td")
title_center = ET.SubElement(title_cell, "center")
name_head = ET.SubElement(title_center, "h1")
name_head.text = "CondDB Partition"
########## Navigation Main Menu buttons ##############################
menu_button = ET.SubElement(body, "button", {"type":"button", "name":"Branches", \
"onclick":'javascript: location.assign("../conddb/form.html")',\
"style":"color: #006400; "})
menu_button.text = "Main menu"
align = ET.SubElement(body, "center")
name = ET.SubElement(align, "h1")
name.text = partition
body.insert(3, menu_button)
########## Building the branches hat view ##############################
if search_mode == "from_branches":
form = ET.SubElement(align, "form", {"method":"get", "action":"../cgi-bin/presenter.cgi", "name":"hats"})
if not velo_magnet_state:
form.text = "This global tags are the hats of all %s global tag branches. Click a branch of interest:" \
%partition
else:
form.text = "This global tags are the hats of %s global tag branches for '%s' configuration. Click a branch of interest:"\
%(partition, velo_magnet_state)
breakline1 = ET.SubElement(form, "br")
breakline2 = ET.SubElement(form, "br")
partition = ET.SubElement(form, "input", {"type":"hidden", "name":"partition", "value":partition})
# Building the dependency sequence view, entered from branches view ####
elif not search_mode:
back_button = ET.Element("button", {"type":"button", "name":"Back", \
"onclick":"javascript: history.go(-1)",\
"style":"color: #006400; "})
back_button.text = "Branches"
body.insert(2, back_button)
body.insert(5, back_button)
return root
def make_XHTML_entry(root, GT_name, localTags, all_LTs_descriptions):
"""Creates the xhtml entry for every global tag with its local tags and descriptions."""
body = root.find("body")
center = body.find("center")
gtagLine = ET.SubElement(center, "b", {"style":"color: #800000; "})
gtag = ET.SubElement(gtagLine, "big")
gtag.text = GT_name
for lt in localTags:
table = ET.SubElement(center, "table", {"border":"1", "cellspacing":"0", \
"style":"background: #5858AF; color: #FFFFFF; "})
row = ET.SubElement(table, "tr")
cell = ET.SubElement(row, "td", {"width":"170", "align":"center"})
cell.text = lt
linkCell = ET.SubElement(row, "td", {"width":"25"})
clickable = ET.SubElement(linkCell, "a", {"title":"show/hide description", "id":"%s_link" %lt,\
"href":"javascript: void(0);", \
"onclick":'toggle("%s_link", "%s");' %(lt,lt),\
"style":"text-decoration: none; color: #00BFFF; text-align:right"})
clickable.text = "[-]"
hiddenCell = ET.SubElement(row, "td", {"id":lt, "align":"left"})
ltagLine = ET.SubElement(hiddenCell, "i")
ltagLine.text = all_LTs_descriptions[lt]
script = ET.SubElement(center, "script", {"type":"text/javascript"})
script.text = 'toggle("%s_link","%s");' %(lt,lt)
arrow = ET.SubElement(center, "p")
arrow.text = "/|\\"
return root
def make_XHTML_hat_entry(root, branch_hat_name, latest_GT_names):
"""Creates the xhtml entry for every global tag branch hat."""
body = root.find("body")
center = body.find("center")
form = center.find("form")
if branch_hat_name not in latest_GT_names:
button = ET.SubElement(form, "input", {"type":"submit", "name":"branch_hat", "value":branch_hat_name})
else:
button = ET.SubElement(form, "input", {"type":"submit", "name":"branch_hat", "value":branch_hat_name, \
"style":"color: #0000FF; "})
break_line = ET.SubElement(form, "br")
break_line2 = ET.SubElement(form, "br")
return root
def finilize_XHTML(root):
"""Writing out the xhtml tree object to the stdout, prepending it with a line for cgi processing."""
tree = ET.ElementTree(root)
print "content-type: text/html\n"
print '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">'
tree.write(sys.stdout)
# For the case of file write out
#file = open("depend.html","w")
#tree.write(file)
#file.close()
def main():
FormData = cgi.FieldStorage()
##########################################################################################
# Web page form variables manipulations and partition verification
##########################################################################################
partition = FormData["partition"].value
partitions = ["DDDB", "LHCBCOND", "SIMCOND"]
if partition not in partitions:
print "'%s' is not a valid partition name. Allowed: %s" %(partition, partitions)
return 1
#################################
if FormData.has_key("search_mode"):
search_mode = FormData["search_mode"].value
#################################
if partition == "SIMCOND":
if search_mode == "from_latest":
velo_magnet_state = FormData["velo_state"].value + FormData["magnet_state"].value
elif search_mode == "from_branches":
if not FormData.has_key("all_config"):
velo_magnet_state = FormData["velo_state"].value + FormData["magnet_state"].value
#all_configurations = None
elif FormData.has_key("all_config"):
velo_magnet_state = None
#all_configurations = FormData["all_config"]
else:
#all_configurations = None
velo_magnet_state = None
else:
branch_hat = FormData["branch_hat"].value
##########################################################################################
# Opening the release notes xml tree
##########################################################################################
from CondDBUI.Admin import ReleaseNotes
#import ReleaseNotes
try:
rn = ReleaseNotes("../conddb/release_notes.xml")
except IOError:
print "\nSorry.. Path to Release Notes file is not valid or hardware IO problems occurred.\
\nCheck path or try again later."
return 1
rootelement = rn.tree.getroot()
##########################################################################################
# Parsing the release notes tree
##########################################################################################
if FormData.has_key("search_mode"):
# Build dependency sequence for latest global tag for the given partition
if search_mode == "from_latest":
last_gt_name = get_last_GT_name(rootelement, partition, velo_magnet_state)
display_dependency_tree(rootelement, partition, last_gt_name, search_mode)
# Build dependency sequence for particular global tag branch for the given partition
elif search_mode == "from_branches":
branches_hats = get_branches_hats(rootelement, partition, velo_magnet_state)
display_branches_hats(rootelement, partition, branches_hats, search_mode, velo_magnet_state)
else:
display_dependency_tree(rootelement, partition, branch_hat)
if __name__ == '__main__':
sys.exit(main())
|
"""Create yhbatch jobs in GuangZhou TianHe2 computer cluster.
"""
import os
import stat
import argparse
from datetime import datetime
if __name__ == "__main__":
cmdparser = argparse.ArgumentParser(description="yhbatch jobs")
cmdparser.add_argument("-I", "--input", dest="input", required=True,
help="Input shell file.")
cmdparser.add_argument("-n", "--number", dest="number", type=int, required=True,
help="The number of sub shells.")
cmdparser.add_argument("-t", "--parallel", dest="t", type=int, required=True,
help="The number of parallel tasks.")
args = cmdparser.parse_args()
commands = []
with open(args.input) as I:
for line in I:
if line.startswith("#"):
continue
commands.append(line.strip())
total_cmd = len(commands)
if args.number > total_cmd:
args.number = total_cmd
inter_step_size = int(total_cmd/args.number)
if inter_step_size * args.number < total_cmd:
inter_step_size += 1
_, shell_fname = os.path.split(args.input)
now_time = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
sub_shell_dir = os.path.abspath("{shell_fname}.{now_time}".format(**locals()))
if not os.path.exists(sub_shell_dir):
os.makedirs(sub_shell_dir)
for i, k in enumerate(range(0, total_cmd, inter_step_size)):
sub_shell_fname = os.path.join(sub_shell_dir, "work.%d.sh" % (i+1))
with open(sub_shell_fname, "w") as OUT:
OUT.write("#!/bin/bash\n")
n = 0
for j, cmd in enumerate(commands[k:k+inter_step_size]):
OUT.write("%s &\n" % cmd if args.t > 1 else "%s\n" % cmd)
if (j + 1) % args.t == 0:
n += 1
if args.t > 1:
OUT.write("wait\n")
OUT.write("echo \"----------- %d ----------\"\n" %n)
OUT.write("wait\n")
OUT.write("echo \"----------- %d ----------\"\n" % (n+1))
os.chmod(sub_shell_fname, stat.S_IRWXU) # 0700
|
import os
from time import localtime, strftime
from flask import Flask, request, render_template, jsonify, redirect, url_for, flash, send_from_directory
from flask_login import LoginManager, login_user, current_user, logout_user, login_required
from flask_socketio import SocketIO, send, emit, join_room, leave_room, close_room
import requests
from PIL import Image
from message import MessageType, send_message
import json
import uuid
from model.user import User
from model.rooms import Room
from model.invites import Invite
from errors import register_error_handlers
from errors import ApplicationError
from security.basic_authentication import generate_password_hash, init_basic_auth
from werkzeug.security import generate_password_hash, check_password_hash
from wtform_fields import *
app = Flask(__name__)
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
auth = init_basic_auth()
app.secret_key = 'takamekefinenemekefibratnqkude33trqqima'
login_manager = LoginManager()
login_manager.init_app(app)
register_error_handlers(app)
socketio = SocketIO(app)
@login_manager.user_loader
def load_user(user_id):
return User.find(user_id)
@app.route("/", methods=["GET", "POST"])
def main():
login_form = LoginForm()
if login_form.validate_on_submit():
user_object = User.find_by_name(login_form.username.data)
login_user(user_object)
return redirect(url_for('chat'))
return render_template("index.html", form=login_form)
@app.route("/chat", methods=['GET', 'POST'])
def chat():
return render_template("chat.html", username=current_user.name, rooms = Room.all_neznam(), private_rooms = Room.all_private())
@app.route("/profiles/<username>", methods=['GET', 'POST'])
def profile(username):
user = User.find_by_name(username)
return render_template("profile.html", user = user)
@app.route('/logout', methods=['GET'])
def logout():
logout_user()
flash('You have successfully logged yourself out.')
return redirect(url_for('main'))
@app.route("/api/rooms", methods=["POST"])
def create_room():
room_data = request.get_json(force=True, silent=True)
if room_data == None:
return "Bad request", 401
room = Room(room_data["is_private"], room_data['name'])
room.save()
return jsonify(room.to_dict()), 201
@app.route("/api/rooms", methods=["GET"])
def list_rooms():
result = {"result": []}
for room in Room.all():
result["result"].append(room.to_dict())
return jsonify(result), 201
@app.route("/api/invites", methods=["POST"])
def create_invite():
invite_data = request.get_json(force=True, silent=True)
if invite_data == None:
return "Bad request", 401
invite = Invite(invite_data["room_id"], invite_data["username"])
invite.save()
return jsonify(invite.to_dict()), 201
@app.route("/api/invites", methods=["GET"])
def list_invites():
result = {"result": []}
for invite in Invite.all():
result["result"].append(invite.to_dict())
return jsonify(result), 201
@app.route("/api/users", methods=["POST"])
def create_user():
user_data = request.get_json(force=True, silent=True)
if user_data == None:
return "Bad request", 401
#hashed_password = generate_password_hash(user_data["password"])
user = User(user_data['name'], user_data['password'], user_data['room'], user_data['description'], user_data['picture_location'])
user.save()
return jsonify(user.to_dict()), 201
@app.route("/api/users/<user_id>", methods=["GET"])
def get_user(user_id):
return jsonify(User.find(user_id).to_viewable())
@app.route("/api/users", methods=["GET"])
def list_users():
result = {"result": []}
for user in User.all():
result["result"].append(user.to_viewable())
return jsonify(result), 201
@app.route("/api/users/<user_id>", methods=["PATCH"])
def update_user(user_id):
user_data = request.get_json(force=True, silent=True)
if user_data == None:
return "Bad request", 401
user = User.find(user_id)
return jsonify(user.save().to_dict()), 201
@app.route("/api/users/<user_id>", methods=["DELETE"])
def delete_user(user_id):
user = User.find(user_id)
user.delete(user_id)
return ""
@socketio.on('message')
def send_text(data):
send_message(MessageType.TEXT, data['msg'], data['username'], data['room'])
@socketio.on('send_gif')
def send_gif(data):
uri = data['gif_url']
send_message(MessageType.IMAGE, uri, data['username'], data['room'])
def notify_join_room(username, room_name, invite_check):
if invite_check:
join_room(room_name)
send({'msg': username + " has joined the " + room_name + " room."}, room=room_name)
User.update_room(room_name, User.find_by_name(username).name)
else:
join_room("Lounge")
send({'msg': "You are not invited, choose another room."})
User.update_room("Lounge", User.find_by_name(username).name)
@socketio.on('join')
def join(data):
if Room.private_check(data['room']):
if Invite.check_for_invite(data['room'], data['username']):
notify_join_room(data['username'], data['room'], True)
else:
notify_join_room(data['username'], data['room'], False)
else:
notify_join_room(data['username'], data['room'], True)
@socketio.on('leave')
def leave(data):
leave_room(data['room'])
send({'msg': data['username'] + " has left the " + data['room'] + " room."}, room=data['room'])
def notify_room_creation(username, room_name, is_private):
if is_private == True:
for current_room in Room.all_rooms():
send({'msg': username + " has created 'private' " + room_name + " room. Refresh page."}, room=current_room)
Room.add_room(1, room_name)
elif is_private == False:
for current_room in Room.all_rooms():
send({'msg': username + " has created " + room_name + " room. Refresh page."}, room=current_room)
Room.add_room(0, room_name)
Invite.add_invite(room_name, username)
@socketio.on('create_room')
def create_room(data):
notify_room_creation(data['username'], data['name'], False)
@socketio.on('create_private_room')
def create_private_room(data):
notify_room_creation(data['username'], data['name'], True)
def static_check(room_name):
if room_name == "Lounge" or room_name == "Narga" or room_name == "Clashka" or room_name == "Techno boom boom":
return True
return False
def neznamkvopravi(room_name, username):
if room_name in Room.all_rooms() and Invite.check_for_invite(room_name, username):
for current_room in Room.all_rooms():
send({'msg': username + " has deleted " + room_name + " room. Refresh page."}, room=current_room)
Room.delete_room(room_name)
Invite.delete_invite(room_name)
else:
send({'msg': "Cannot delete this room"})
@socketio.on('close_room')
def close_room(data):
if static_check(data['name']):
send({'msg': "Cannot delete static rooms!"}, room=data['room'])
else:
neznamkvopravi(data['name'], data['username'])
def search_and_invite(username, invited_user, room_name):
for user in User.all():
if user.name == invited_user:
Invite.add_invite(room_name, invited_user)
send({'msg': "Invite sent!"})
send({'msg': username + " has invited you in the " + room_name + " room."}, room=user.room)
return True
return False
@socketio.on('invite_user')
def invite_user(data):
if data['invited_user'] == data['username']:
send({'msg': "Cannot invite yourself!"})
raise ApplicationError("Can't invite yourself", 404)
else:
check = False
if not search_and_invite(data['username'], data['invited_user'], data['room']):
send({'msg': "User does not exist!"})
raise ApplicationError("User doesn't exist", 404)
if __name__ == "__main__":
socketio.run(app, debug=True)
|
from django.db import models
# Create your models here.
class Employee_Category(models.Model):
name = models.CharField(max_length=255)
abbr = models.CharField(max_length=5)
cost = models.DecimalField(max_digits=7, decimal_places=2)
created = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __str__(self):
return self.name
class Employee(models.Model):
employee_category = models.ForeignKey(Employee_Category, null=True)
ssn = models.CharField(max_length=9, null=True)
name_last = models.CharField(max_length=255, null=True)
name_first = models.CharField(max_length=225, null=True)
name_nick = models.CharField(max_length=255, null=True)
date_birth = models.DateField(auto_now_add=False, auto_now=False, null=True)
date_hire = models.DateField(auto_now_add=False, auto_now=True)
date_fire = models.DateField(auto_now_add=False, auto_now=False, null=True)
rate_hourly = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True)
date_rate_increase = models.DateField(auto_now_add=False, auto_now=False)
address_street = models.CharField(max_length=255, null=True)
address_city = models.CharField(max_length=255, null=True)
address_state = models.CharField(max_length=2, null=True)
address_zip = models.CharField(max_length=5, null=True)
phone_cell = models.CharField(max_length=10, null=True)
phone_home = models.CharField(max_length=10, null=True)
active = models.BooleanField(default=1)
pancho = models.BooleanField(default=0)
daboin = models.BooleanField(default=0)
deleted = models.DateTimeField(auto_now_add=False, auto_now=False, null=True)
created = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __str__(self):
return "%s, %s" % (self.name_last, self.name_first)
def full_name(self):
return "%s, %s ( %s )" % (self.name_last, self.name_first, self.name_nick)
def full_address(self):
if len(self.address_street):
return "%s\n%s, %s %s" % (self.address_street, self.address_city, self.address_state, self.address_zip)
else:
return "No address specified"
def phone_number(self, phone_string):
if phone_string is not None and len(phone_string)==10:
"""must be 'some' object whose length is 10"""
return phone_string[:3] + '-' + phone_string[3:6] + '-' + phone_string[6:]
else:
return phone_string
def phone_cell_display(self):
return self.phone_number(self.phone_cell)
def phone_home_display(self):
return self.phone_number(self.phone_home)
def ssn_display(self):
if self.ssn is not None and len(self.ssn) == 9:
return "XXX-XX-%s" % (self.ssn[5:])
else:
pass
def rate_increase_date_display(self):
if self.date_rate_increase is not None and self.date_rate_increase > self.date_hire:
return self.date_rate_increase
else:
return "Initial rate"
def nick_title(self):
if self.name_nick is not None:
_name = self.name_nick
elif self.name_first is not None:
_name = self.name_first
else:
_name = self.name_last
if self.employee_category is not None:
_cat = "( %s )" % self.employee_category.abbr
else:
_cat = ""
return "%s %s" % (_name, _cat) |
import numpy as np
import pandas as pd
import glob
import os
import numpy as np
import sys
import masstitr_tools as mt
from Bio import Seq
# %%
# ==============================================================
# // parameters
# ==============================================================
# count cutoff threshold
FILTER_METHOD='cells'
COUNT_CUTOFF = 10
CLUSTER_READS = True
# list of barcodes to exclude from the analysis at this stage
EXCLUDED_BARCODES = ["barcode_5", "barcode_20", "barcode_21"]
# weighting of the different bins
WEIGHTS = {
1:3000,
2:300,
3:30,
4:0
}
# %%
# ==============================================================
# // group gates by concentrations, calculate binding signal for each concentration
# ==============================================================
def calc_binding_signal(df, conc, bin_key, weights):
df[str(conc)]=(df[bin_key[1]]*weights[1] + df[bin_key[2]]*weights[2] + df[bin_key[3]]*weights[3] + df[bin_key[4]]*weights[4])/(df[bin_key[1]] + df[bin_key[2]] + df[bin_key[3]] + df[bin_key[4]])
return df[['seq'] + [str(conc)]]
def process_exp(exp_directory, experiment, sample_key, count_cutoff, excluded_barcodes, weights):
'''run pipeline for a single experiment'''
file_list = [f for f in glob.glob(os.path.join(exp_directory, "seq_counts_complete", "*nts_only"))]
R, T, barcode_cols = mt.load_and_merge_data(file_list, excluded_barcodes)
R['AA_seq'] = R['seq'].apply(mt.trans_string)
if CLUSTER_READS:
R = mt.cluster_AA_seqs(R, barcode_cols)
read_f = mt.read_count2fraction(R, T, barcode_cols)
# get cell counts
x = mt.read_fraction2cell_count(read_f, sample_key, barcode_cols)
# run main processing
b_curve = processed_data2binding_curve(x, R, weights, sample_key, experiment, count_cutoff, n_missing_allowed=2)
return R, x, b_curve
def processed_data2binding_curve(x, R, weights, sample_key, experiment, count_cutoff, n_missing_allowed=2):
'''calculate binding signal for a single experiment - used in `process_exp()`'''
concentrations = sorted(list(sample_key['concentration (nM)'].unique()), reverse=True)
b_curve = pd.DataFrame(index=x['seq'])
for conc in concentrations:
# get list of barcodes for gates used at `conc`
barcodes = mt.conc2barcodes(sample_key, experiment, conc)
# get bin # for each barcode. Bin # needs to correspond to
# numbering in `weights`
bin_key = mt.barcodes2bin(sample_key, barcodes)
# select data for only the gates used at `conc`
x_conc = x[['seq'] + barcodes].copy()
R_conc = R[['seq'] + barcodes].copy()
# filter
if FILTER_METHOD=='reads':
x_conc = mt.single_conc_readcount_filter(x_conc, R_conc, count_cutoff)
if FILTER_METHOD=='cells':
x_conc = mt.single_conc_cell_count_filter(x_conc, count_cutoff)
#calculate binding signal
df = calc_binding_signal(
x_conc,
conc,
bin_key,
weights
)
# merge to create full curve
# 'outer' merge will keep all sequences even if for some
# concentrations they have <count_cutoff
b_curve = b_curve.merge(df, on='seq', how='outer')
b_curve = b_curve[b_curve.isna().sum(1)<=n_missing_allowed]
return b_curve
def save_exp_params(output_file):
with open(output_file, 'w') as handle:
handle.write(f"{FILTER_METHOD=}\n")
handle.write(f"{COUNT_CUTOFF=}\n")
handle.write(f"{CLUSTER_READS=}\n")
handle.write(f"{EXCLUDED_BARCODES=}\n")
handle.write(f"{WEIGHTS=}\n")
def main(count_cutoff, excluded_barcodes, weights):
experiment_directory = str(sys.argv[1])
sample_key_file = str(sys.argv[2])
binding_curve_folder = str(sys.argv[3])
sample_key = pd.read_csv(sample_key_file)
exp_dirs = mt.get_exp_dir_dict(sample_key_file, experiment_directory)
if not os.path.exists(binding_curve_folder):
os.mkdir(binding_curve_folder)
for exp, exp_directory in exp_dirs.items():
print("experiment: {}".format(exp_directory))
R, x, b_curve = process_exp(
exp_directory=exp_directory,
experiment=exp,
sample_key=sample_key,
count_cutoff=count_cutoff,
excluded_barcodes=excluded_barcodes,
weights=weights
)
# x.to_csv('{}{}-cell_counts.csv'.format(
# os.path.join(binding_curve_folder,''),
# exp
# ), index=False)
b_curve.to_csv('{}{}-binding_curves.csv'.format(
os.path.join(binding_curve_folder,''),
exp
), index=False)
# R.to_csv('{}{}-read_counts.csv'.format(
# os.path.join(binding_curve_folder,''),
# exp
# ), index=False)
save_exp_params(os.path.join(binding_curve_folder,'mt04_processing_parameters.txt'))
if __name__ == "__main__":
main(COUNT_CUTOFF, EXCLUDED_BARCODES, WEIGHTS)
|
"""
Refer to handout for details.
- Build scripts to train your model
- Submit your code to Autolab
"""
import torch
import torch.nn as nn
import torch.utils.data as data_utils
import torch.nn.functional as F
import numpy as np
import hw2.all_cnn
import hw2.preprocessing
def write_results(predictions, output_file='predictions.txt'):
"""
Write predictions to file for submission.
File should be:
named 'predictions.txt'
in the root of your tar file
:param predictions: iterable of integers
:param output_file: path to output file.
:return: None
"""
with open(output_file, 'w') as f:
for y in predictions:
f.write("{}\n".format(y))
def load_data(path):
x = np.load(path + 'train_feats.npy')
labels = np.load(path + 'train_labels.npy')
xtest = np.load(path + 'test_feats.npy')
#N = 100
x, xtest = hw2.preprocessing.cifar_10_preprocess(x, xtest, image_size=32)
return x,labels,xtest
def to_tensor(numpy_array):
# Numpy array -> Tensor
return torch.from_numpy(numpy_array).float()
def to_variable(tensor):
# Tensor -> Variable (on GPU if possible)
if torch.cuda.is_available():
# Tensor -> GPU Tensor
tensor = tensor.cuda()
return torch.autograd.Variable(tensor)
def save_model(model, path):
torch.save(model.state_dict(), path)
def main(tag):
lr = 1e-2
L2 = 1e-3
n_epochs = 50
momentum = 0.9
batch_size = 128
data_path = './dataset/'
log_path = './logs/' + tag
model_path = './models/'+ tag + '.pt'
model = hw2.all_cnn.all_cnn_module()
optimizer = torch.optim.SGD(model.parameters(),
lr=lr,momentum=momentum,nesterov=True,weight_decay=L2)
loss_fn = nn.NLLLoss() #
if torch.cuda.is_available():
model = model.cuda()
loss_fn = loss_fn.cuda()
train_feats, train_labels , test_feats = load_data(data_path)
train_size = train_labels.shape[0]
print('Data loading done')
train = data_utils.TensorDataset(to_tensor(train_feats),
to_tensor(train_labels))
train_loader = data_utils.DataLoader(train,
batch_size=batch_size, shuffle=True)
log_numpy = []
print('Done testing !!')
for epoch in range(n_epochs):
epoch_loss = 0
correct = 0
model.train()
for batch_index, (data, label) in enumerate(train_loader):
optimizer.zero_grad()
X, Y = to_variable(data), to_variable(label)
out = F.log_softmax(model(X.view(label.size()[0], 3, 32, 32)))
pred = out.data.max(1, keepdim=True)[1].int()
predicted = pred.eq(Y.data.view_as(pred).int())
correct += predicted.sum()
loss = loss_fn(out, Y.long())
loss.backward()
optimizer.step()
epoch_loss += loss.data.sum()
if (batch_index % 100)==0:
print("batchs left : ",int(train_size/batch_size-batch_index))
total_loss = epoch_loss*batch_size/train_size
train_error = 1 - correct/train_size
log_numpy.append([total_loss,train_error])
print("epoch: {:f}, loss: {:f}, error: {:f}".format(epoch+1, total_loss, train_error))
try:
save_model(model,model_path)
except:
print("dumping model error!!")
try:
np.save(log_path+'.npy',np.array(log_numpy))
except:
print("dumping log error!!")
try:
model.eval()
test_feats = to_variable(to_tensor(test_feats))
out = F.log_softmax(model(test_feats))
pred = out.data.max(1, keepdim=True)[1].int()
write_results(pred.numpy().tolist(), tag)
except:
print("Testing failed !!")
if __name__ == '__main__':
main('train1_5epochs_lr_2')
|
from django.urls import path
from django.conf.urls import url
from apps.user.views import RegisterView, ActiveView, LoginView
app_name = 'apps.user'
urlpatterns = [
url(r'^register/$', RegisterView.as_view(), name='register'), # 注册页面
url(r'^active/(?P<token>.*)$', ActiveView.as_view(), name='active'), # 用户激活
url(r'login/$', LoginView.as_view(), name='login') # 登录
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.IoTBPaaSMerchantOrderInfo import IoTBPaaSMerchantOrderInfo
class AlipayOpenIotbpaasMerchantorderRefreshResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenIotbpaasMerchantorderRefreshResponse, self).__init__()
self._order_list = None
@property
def order_list(self):
return self._order_list
@order_list.setter
def order_list(self, value):
if isinstance(value, list):
self._order_list = list()
for i in value:
if isinstance(i, IoTBPaaSMerchantOrderInfo):
self._order_list.append(i)
else:
self._order_list.append(IoTBPaaSMerchantOrderInfo.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayOpenIotbpaasMerchantorderRefreshResponse, self).parse_response_content(response_content)
if 'order_list' in response:
self.order_list = response['order_list']
|
class Person:
self.messages = []
def __init__(self, name):
self.name = name
def addMessage(self, message):
self.messages.append(message) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.