text stringlengths 38 1.54M |
|---|
import random
def adn(n):
adn = ['a', 'c', 't', 'g']
return ''. join(random.choice(adn) for x in range(n))
|
count=0
i=2
while True:
n=int(input("Enter number: "))
while i <= n/2:
if n%i==0:
count +=1
break
i +=1
if n==1:
print("number is niether prime nor composite")
elif count==0:
print("number is prime")
else:
print("number is not prime")
count=0
i=2
|
def followAndCountTheRedirect(url):
url = url.strip()
redirectionCount = 0
if( len(url) > 0 ):
indexOfLocation = 0
httpResponseCodes = ''
while indexOfLocation > -1:
co = 'curl -s -I ' + url
output = commands.getoutput(co)
indexOfFirstNewLine = output.find('\n')
if( indexOfFirstNewLine > -1 ):
httpResponseCodes = output[0:indexOfFirstNewLine].split(' ')[1] + ' ' + httpResponseCodes
#if( len(httpResponseCodes) > 0 ):
# httpResponseCodes = httpResponseCodes[:-1]
indexOfLocation = output.find('location:')
if( indexOfLocation == -1 ):
indexOfLocation = output.find('Location:')
if( indexOfLocation > -1 ):
indexOfNewLine = output.find('\n', indexOfLocation + 9)
url = output[indexOfLocation + 9:indexOfNewLine]
url = url.strip()
redirectionCount = redirectionCount + 1
return redirectionCount, url, httpResponseCodes |
# 내부 함수
def knight(saying):
def inner():
return "We are the knights who say: '%s'" % saying
return inner()
print(knight('khs'))
# 내부 함수를 이용해 클로져처럼 행동
# 클로져(closure) : 외부 함수에 의해 동적으로 생성되고, 그 함수의 변수값을 알고 있는 함수
def knight2(saying):
def inner2():
return "We are the knights who say: '%s'" % saying
# inner2를 호출하지 않고 그대로 리턴. 이 때 inner2는 saying을 기억하는 특별한 inner2 함수의 복사본
return inner2
k1 = knight2('khs')
k2 = knight2('kwon')
# k1은 함수이자 클로져
print(type(k1), k1)
print(k1())
print(k2()) |
class Neighborhood:
def __init__(self, myInfo, proximity, maxPoint):
self._me = myInfo
self._neighbor = [] #(근접도, values) 리스트
self.proximity = proximity #neighbor로 추가할 최소 근접도
self.max = maxPoint #value 점수의 최대값
self._S = set() #neighborKeys - mykeys
def getMyInfo(self):
return self._me
def getNeighbors(self):
return self._neighbor
def _percent(self, A, B):
A_keys = set(A.keys())
B_keys = set(B.keys())
ANB = A_keys.intersection(B_keys)
AUB = A_keys.union(B_keys)
E = 0
S = 0
for e in ANB:
E += self.max
S += abs(A[e] - B[e])
if E == 0:
return 0
return (((len(ANB)+len(AUB))/(len(AUB)+len(AUB))) * (1 - S/E))
def setNeighbors(self, users):
self._S = set()
mykeys = set(self._me.keys())
for u in users:
p = self._percent(self._me, u)
if p > self.proximity:
self._neighbor.append((p, u))
self._S = self._S.union(set(u.keys()))
self._S = self._S.difference(set(mykeys))
def recomend(self, infimum):
rL = [] #recomend List
for e in self._S:
M = 0 #근접도의 합
D = 0 #근접도*e의 점수의 합
R = 0 #해당 e를 가진 이웃 수
for n in self._neighbor:
p = n[0]
v = n[1].get(e)
if v != None:
M += p
D += p*v
R += 1
if D/M >= infimum:
rL.append({'element' : e, 'probability' : M/R, 'evaluation' : D/M})
return rL
if __name__ == '__main__':
A = {'a':5, 'b':1}
B = {'a':5, 'c':3, 'd':4}
C = {'a':5, 'b':2, 'c':2}#, 'd':0, 'e':3
D = {'a':2, 'b':5}
E = {'a':1}
F = {'a':5, 'b':1,'e':5}
users = [B, C, D, E, F]
n = Neighborhood(A, 0.5, 5)
n.setNeighbors(users)
print(n.recomend(3))
|
# FizzBuzz is a popular programming problem to test a developer's ability to think logically with code.
# The problem is simple but deceptive.
# Define a fizzbuzz function that accepts a single number as an argument. The function should print every number from 1 to that argument.
# There are a couple caveats.
# If the number is divisible by 3, print "Fizz" instead of the number.
# If the number is divisible by 5, print "Buzz" instead of the number.
# If the number is divisible by both 3 and 5, print "FizzBuzz" instead of the number.
# If the number is not divisible by either 3 or 5, just print the number.
# Example: fizzbuzz(30)should print:
# 1
# 2
# Fizz
# 4
# Buzz
# Fizz
# 7
# 8
# Fizz
# Buzz
# 11
# Fizz
# 13
# 14
# FizzBuzz
# 16
# 17
# Fizz
# 19
# Buzz
# Fizz
# 22
# 23
# Fizz
# Buzz
# 26
# Fizz
# 28
# 29
# FizzBuzz
# def FizzBuzz (num1):
# count = 1
# while count <= num1:
# if count % 3 == 0 and count % 5 == 0:
# print("FizzBuzz")
# elif count % 5 == 0:
# print("Buzz")
# elif count % 3 == 0:
# print("Fizz")
# else:
# print(count)
# count += 1
FizzBuzz(30)
# def factorial(num1):
# apple = num1
# procces = 1
# if apple >= procces:
# print(apple)
# if apple == procces:
# return
# factorial(num1 - 1)
# factorial(100)
|
from zgui import models
from zgui.addons.point_of_sale.report.pos_details import pos_details
class PosDetailsCustom(pos_details):
def _pos_sales_details_custom(self, form):
user_obj = self.pool.get('res.users')
user_ids = form['user_ids'] or self._get_all_users()
company_id = user_obj.browse(self.cr, self.uid, self.uid).company_id.id
res = self.pool.get('report.pos.order').read_group(self.cr, self.uid, groupby=["product_categ_id"], fields=["product_categ_id", "product_qty", "price_total", 'total_discount'], domain=[('date', '>=', form['date_start'] + ' 00:00:00'), ('date', '<=', form['date_end'] + ' 23:59:59'), ('user_id', 'in', user_ids), ('state', 'in', ['done', 'paid', 'invoiced']), ('company_id', '=', company_id)])
for r in res:
self.qty += r['product_qty']
self.total += r['price_total']
self.discount += r['total_discount']
return res
def __init__(self, cr, uid, name, context):
super(PosDetailsCustom, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'pos_sales_details_custom': self._pos_sales_details_custom,
})
class ReportPosDetails(models.AbstractModel):
_inherit = 'report.point_of_sale.report_detailsofsales'
_wrapped_report_class = PosDetailsCustom
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 2 12:40:52 2021
@author: Maxi
"""
# Some Basic Commands
x = 5
print(type(x))
pi_approx = 22/7
radius = 2.2
area = pi_approx*(radius**2)
radius = radius +1
print("The area of the circle is {}". format(float(area)))
## if - else basics
x = int(input('Enter an integer: '))
if x%2 == 0: ##Test
print('') ## block
print('Even') ##block
else:
print('') #False Block
print('Odd') #False Block
print('Done with conditional') # rest
# Python uses identation to identify each part of the code
### Nested Conditonals
if x%2 == 0:
if x%3 == 0:
print('Divisible by 2 and 3')
else:
print('Divisible by 2 and not by 3')
elif x%3 == 0:
print('Divisible by 3 and not by 2')
### Compound Booleans --> Sequence of tests
x=2
y=3
z=4
if x < y and x < z:
print ('x is least')
elif y<z:
print('y is least')
else:
print ("z is least")
|
def sumProblem(x, y):
sum = x + y
sentence = 'The sum of {} and {} is {}.'.format(x, y, sum)
print(sentence)
def main():
sumProblem(1, 6)
sumProblem(670, 80)
a = int(input("Enter an integer: "))
b = int(input("Enter another integer: "))
sumProblem(a, b)
main()
person = input("Enter the name of person: ")
greetings = "Hello {}".format(person)
print(greetings)
|
import scrapy
from mySpider.items import MyspiderItem
class GushiwenSpider(scrapy.Spider):
name = 'gushiwen'
allowed_domains = ['gushiwen.cn']
start_urls = ['https://www.gushiwen.cn/default_1.aspx']
def parse(self, response):
div_list = response.xpath('//div[@class="left"]/div[@class="sons"]')
for div in div_list:
titles = div.xpath('.//b/text()').extract_first()
author = div.xpath('.//p[@class="source"]//text()').extract()
contents = div.xpath('.//div[@class="contson"]//text()').extract()
contents = ''.join(contents).strip()
# poem = {}
item = MyspiderItem()
if titles != None:
item["标题"] = titles
item["作者"] = author
item["内容"] = contents
yield item
# print(poem)
href = response.xpath('//div[@class="pagesright"]/a[@id="amore"]/@href').extract_first()
try:
if len(href) != 0:
href = response.urljoin(href)
yield scrapy.Request(
href,
callback=self.parse,
)
except:
pass
|
# -*- encoding: latin-1 -*-
import RPi.GPIO as GPIO
import time
import curses
# Konfigurer Raspberry PI's GPIO.
# Fortæl hvilken måde hvorpå vi fortolker GPIO pin's på.
GPIO.setmode(GPIO.BOARD)
# Lav en liste indeholdende pins der bruges til mortorne.
motorPins = [11, 12, 15, 16]
# Set pin nummerne i "motorPins" til output.
for pin in motorPins:
GPIO.setup(pin, GPIO.OUT)
# Sørg for at slukke før vi tænder.
GPIO.output(pin, 0)
# Lav en liste af tuples til hver opperation af motorne.
stop = [(11, 0), (12, 0), (15, 0), (16, 0)]
frem = [(12, 1), (15, 1)]
tilbage = [(11, 1), (16, 1)]
hoejre = [(11, 1), (15, 1)]
venstre = [(12, 1), (16, 1)]
def robotDo(opperation):
for t in opperation:
GPIO.output(*t)
time.sleep(2)
for t in stop:
GPIO.output(*t)
time.sleep(1)
print("Frem.")
robotDo(frem)
print("Tilbage.")
robotDo(tilbage)
print("Højre.")
robotDo(hoejre)
print("Venstre.")
robotDo(venstre)
GPIO.cleanup()
|
#!/usr/bin/env python
# encoding: utf-8
'''
@author:maidou
@contact:QQ4113291000
@time:2018/6/14.上午10:15
'''
if __name__ == '__main__':
pass |
import nester
movies = ['The Simpsons','Eric',['Rick & Morty','Rick'],['South Park','2016','Kyel']]
nester.print_lol(movies,True,0)
|
def bucket_sort(alist, bucket_num):
max_num, min_num = max(alist), min(alist)
bucket_size = (max_num - min_num + 1) / bucket_num
bucket = []
for i in range(bucket_num):
bucket.append([])
# assign elements to buckets
for num in alist:
bucket[int((num - min_num) / bucket_size)].append(num)
print(bucket)
# merge buckets
res = []
for i in range(bucket_num):
res.extend(sorted(bucket[i]))
return res
arr = [0, 9, 3, 5, 6, 4, 2, 8, 1, 7]
print(bucket_sort(arr, 5))
|
from django.db import models
from django.contrib.auth.models import User
class Client(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="Utilisateur associe")
default_shipping_address = models.ForeignKey("Address",on_delete=models.CASCADE,
related_name="default_shipping_address",
null=True,
verbose_name="Adresse de livraison par défaut"
)
default_invoicing_address = models.ForeignKey("Address",on_delete=models.CASCADE,
related_name="default_invoicing_address",
null=True,
verbose_name="Adresse de facturation par défaut"
)
def __unicode__(self):
return self.user.username + " (" + self.user.first_name + " " + self.user.last_name + ")"
def addresses(self):
return Address.objects.filter(client_id=self.id)
def orders(self):
return Order.objects.filter(client_id=self.id).order_by('-id')
class TVA(models.Model):
percent = models.FloatField(verbose_name="Taux de TVA (décimal)")
class Meta:
verbose_name = 'Taux de TVA'
verbose_name_plural = 'Taux de TVA'
def __unicode__(self):
return str(self.percent * 100) + " %"
class Category(models.Model):
name = models.CharField(max_length=150, verbose_name="Nom de la catégorie")
short_desc = models.CharField(max_length=150, verbose_name="Description courte", blank=True)
parent_category = models.ForeignKey("Category",on_delete=models.CASCADE, null=True, blank=True, verbose_name="Catégorie parente")
class Meta:
verbose_name = 'Catégorie de produits'
verbose_name_plural = 'Catégories de produits'
def __unicode__(self):
return self.name
# noinspection PyMethodFirstArgAssignment
def breadcrum(self):
breadcrum = list()
breadcrum.append(self)
while self.parent_category:
breadcrum.insert(0, self.parent_category)
self = self.parent_category
return breadcrum
def childs_categories(self):
"""Retourne les catégories enfant de la catégorie"""
childs = Category.objects.filter(parent_category_id__exact=self.id)
return childs
def all_products(self):
""" """
next_main_category = Category.objects.filter(id__gt=self.id, parent_category_id=None).order_by('id').first()
if not next_main_category:
products = Product.objects.filter(category_id__gte=self.id)
else:
products = Product.objects.filter(category_id__range=(self.id, next_main_category.id-1))
return products
class Product(models.Model):
name = models.CharField(max_length=150, verbose_name="Nom du produit")
category = models.ForeignKey(Category,on_delete=models.CASCADE, verbose_name="Catégorie du produit")
short_desc = models.CharField(max_length=150, verbose_name="Description courte")
long_desc = models.TextField(verbose_name="Description longue")
price = models.FloatField(verbose_name="Prix HT du produit")
vat = models.ForeignKey(VAT,on_delete=models.CASCADE, verbose_name="Taux de TVA")
thumbnail = models.ImageField(verbose_name="Miniature du produit", upload_to='art/media', null=True)
class Meta:
verbose_name = 'Produit'
verbose_name_plural = 'Produits'
def __unicode__(self):
return self.name
def price_including_vat(self):
return round(self.price + (self.price * self.vat.percent), 2)
class Photo(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
photo = models.ImageField(upload_to='art/media')
|
# Modules needed
import mysql.connector
import csv
import smtplib
from mysql.connector import errorcode
from datetime import date
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
# SQL host values
config = {'host': 'mysql-server.domain.com', 'user': 'nameofdatabaseuser', 'password': 'password123', 'database':'nameofdatabase'}
try:
# Connect to SQL host and open connection
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
# Get all attachment data from database
query = ("SELECT DISTINCT c.contentid, c.title AS attachmentTitle, u.username AS uploadedBy, co.title AS pageTitle, cn.longval as size, cd.creationdate, c.CONTENT_STATUS "
"FROM CONTENT AS c JOIN user_mapping AS u ON u.user_key = c.creator "
"JOIN CONTENT AS co ON c. pageid = co.contentid "
"JOIN CONTENT AS cd ON c.creationdate = cd.creationdate "
"JOIN CONTENTPROPERTIES AS cn ON cn.contentid = c.contentid "
"WHERE c.contenttype = 'ATTACHMENT' AND cn.longval IS NOT NULL")
# Get results of SQL query
cursor.execute(query)
# Create date object
d = date.today()
d = d.strftime("%m%d%y")
# Close MySQL Query
cursor.close()
# Output query data to CSV
csvfile = "confluence-attachments-" + d + ".csv"
with open(csvfile,'w', encoding='utf-8') as out:
csv_out=csv.writer(out, lineterminator='\n')
csv_out.writerow(['attachmentUID','attachmentTitle','uploadedBy','pageTitle','attachmentSize','attachedDate','attachmentStatus'])
csv_out.writerows(cursor)
# Close CSV file
csvfile.close()
# Create email message
fromaddr = "sender@domain.com"
toaddr = "recipient01@domain.com, recipient02@domain.com"
# Create message headers and body
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "Weekly Confluence Attachment Report"
body = "Report of Confluence attachment data."
msg.attach(MIMEText(body, 'plain'))
filename = csvfile
attachment = open(filename, "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
msg.attach(part)
# Send message using SMTP server with no authentication required
server = smtplib.SMTP('mail-server.domain.com', 25)
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
#Close attachment report
attachment.close()
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print("Something went wrong: {}".format(err))
else:
cnx.close()
|
from abc import ABCMeta, abstractmethod
class :
__metaclass__ = ABCMeta
@abstractmethod
def compose(self): raise NotImplementedError |
from dsl.element import Html
from dsl.element import Input
class Form(Html):
tag = "form"
class CharField(Input):
default_attributes = {"type": "text"}
class EmailField(CharField):
pass
class PasswordField(Input):
default_attributes = {"type": "password"}
|
import argparse
from subprocess import call
import pandas as pd
from os.path import dirname, join
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dry', dest='dry', action="store_true")
args = parser.parse_args()
dry = args.dry
base = "/lfs/l2/chec/userspace/jasonjw/Data/astri_onsky_archive"
runlist_paths = [
join(base, "d2019-04-30_cosmicray/runlist.txt"),
join(base, "d2019-05-01_cosmicray/runlist.txt"),
join(base, "d2019-05-01_mrk501/runlist.txt"),
join(base, "d2019-05-02_PG1553+113/runlist.txt"),
join(base, "d2019-05-02_mrk421/runlist.txt"),
join(base, "d2019-05-02_mrk501/runlist.txt"),
join(base, "d2019-05-06_mrk501/runlist.txt"),
join(base, "d2019-05-07_cosmicray/runlist.txt"),
join(base, "d2019-05-08_cosmicray/runlist.txt"),
# join(base, "d2019-05-08_ledflashers_dynrange/runlist.txt"),
# join(base, "d2019-05-08_mrk501_drift/runlist.txt"),
# join(base, "d2019-05-08_slowsignal/runlist.txt"),
# join(base, "d2019-05-09_ledflashers_altscans/runlist.txt"),
join(base, "d2019-05-09_mrk421/runlist.txt"),
]
extract_dl1 = "extract_dl1_onsky -f {}\n"
extract_hillas = "extract_hillas -f {}\n"
correct_permissions = "getfacl -d . | setfacl --set-file=- {}\n"
for runlist_path in runlist_paths:
df_runlist = pd.read_csv(runlist_path, sep='\t')
directory = dirname(runlist_path)
for _, row in df_runlist.iterrows():
run = row['run']
shell_path = join(directory, f"Run{run:05d}.sh")
r1_path = shell_path.replace(".sh", "_r1.tio")
dl1_path = r1_path.replace("_r1.tio", "_dl1.h5")
hillas_path = dl1_path.replace("_dl1.h5", "_hillas.h5")
with open(shell_path, 'w') as file:
file.write("source $HOME/.bash_profile\n")
file.write("source activate cta\n")
file.write("export NUMBA_NUM_THREADS=6\n")
file.write(extract_dl1.format(r1_path))
file.write(extract_hillas.format(dl1_path))
file.write(correct_permissions.format(dl1_path))
file.write(correct_permissions.format(hillas_path))
file.write(f"if [ -f {hillas_path} ]; then\n")
file.write(f"\trm -f {shell_path}\n")
file.write("fi\n")
call("chmod +x {}".format(shell_path), shell=True)
cmd = "qsub -cwd -V -q lfc.q {}".format(shell_path)
print(cmd)
if not dry:
call(cmd, shell=True)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from builtins import dict, object
from future.utils import raise_from
import base64
import json
import boto3
import requests
from jose import jwk, jwt
from jose.utils import base64url_decode
from requests import Session
from requests.adapters import HTTPAdapter
from requests.exceptions import HTTPError
from requests.packages.urllib3.util.retry import Retry
# pennsieve
from pennsieve import log
from pennsieve.models import User
class UnauthorizedException(Exception):
pass
class PennsieveRequest(object):
def __init__(self, func, uri, *args, **kwargs):
self._func = func
self._uri = uri
self._args = args
self._kwargs = kwargs
self._response = None
self._logger = log.get_logger("pennsieve.base.PennsieveRequest")
def raise_for_status(self, resp):
try:
resp.raise_for_status()
except HTTPError as e: # raise for status raise an HTTPError, so we can use it to grab the message
if resp.text:
raise_from(HTTPError(resp.content, response=resp), e)
else:
raise e
return
def _handle_response(self, resp):
self._logger.debug(u"resp = {}".format(resp))
self._logger.debug(u"resp.content = {}".format(resp.text)) # decoded unicode
if resp.status_code in [requests.codes.forbidden, requests.codes.unauthorized]:
raise UnauthorizedException()
if not resp.status_code in [requests.codes.ok, requests.codes.created]:
self.raise_for_status(resp)
try:
# return object from json
resp.data = json.loads(resp.text)
except:
# if not json, still return response content
resp.data = resp.text
def call(self, timeout=None):
self._response = self._func(
self._uri, *self._args, timeout=timeout, **self._kwargs
)
self._handle_response(self._response)
return self._response
class ClientSession(object):
def __init__(self, settings):
self._host = settings.api_host
self._api_token = settings.api_token
self._api_secret = settings.api_secret
self._headers = settings.headers
self._model_service_host = settings.model_service_host
self._logger = log.get_logger("pennsieve.base.ClientSession")
self._session = None
self._token = None
self._secret = None
self._context = None
self._organization = None
self.profile = None
self.settings = settings
def authenticate(self, organization=None):
"""
An API token is used to authenticate against the Pennsieve platform.
The token that is returned from the API call will be used for all
subsequent API calls.
"""
cognito_config = self._get("/authentication/cognito-config")
cognito_client_application_id = cognito_config["tokenPool"]["appClientId"]
cognito_region_name = cognito_config["region"]
# Authenticate to AWS Cognito
#
# Hack: stub the access and secret keys with empty values so boto does
# not look for AWS credentials in the environment. Some versions of boto
# fail when they cannot find AWS credentials even though Cognito does
# not need creds.
cognito_idp_client = boto3.client(
"cognito-idp",
region_name=cognito_region_name,
aws_access_key_id="",
aws_secret_access_key="",
)
response = cognito_idp_client.initiate_auth(
AuthFlow="USER_PASSWORD_AUTH",
AuthParameters={"USERNAME": self._api_token, "PASSWORD": self._api_secret},
ClientId=cognito_client_application_id,
)
# Grab the tokens
access_token_jwt = response["AuthenticationResult"]["AccessToken"]
id_token_jwt = response["AuthenticationResult"]["IdToken"]
# Since we passed the verification, we can now safely use the claims
claims = jwt.get_unverified_claims(id_token_jwt)
# Ensures that `self._session` exists
self.session
# Parse response, set session access token
self.token = access_token_jwt
self.profile = User.from_dict(self._get("/user/"))
if organization is None:
organization = claims["custom:organization_node_id"]
self._set_org_context(organization)
@property
def token(self):
return self._token
@token.setter
def token(self, value):
self._token = value
self._set_auth(value)
def _set_org_context(self, organization_id):
self._organization = organization_id
self._session.headers["X-ORGANIZATION-ID"] = organization_id
def _set_auth(self, session_token):
self._session.headers["Authorization"] = "Bearer {}".format(session_token)
@property
def session(self):
"""
Make requests-futures work within threaded/distributed environment.
"""
if self._session is None:
self._session = Session()
self._set_auth(self.token)
# Set global headers
if self._headers:
self._session.headers.update(self._headers)
# Enable retries via urllib
adapter = HTTPAdapter(
max_retries=Retry(
total=self.settings.max_request_timeout_retries,
backoff_factor=0.5,
status_forcelist=[
500,
502,
503,
504,
], # Retriable errors (but not POSTs)
)
)
self._session.mount("http://", adapter)
self._session.mount("https://", adapter)
return self._session
def _make_request(self, func, uri, *args, **kwargs):
self._logger.debug("~" * 60)
self._logger.debug("uri = {} {}".format(func.__func__.__name__, uri))
self._logger.debug("args = {}".format(args))
self._logger.debug("kwargs = {}".format(kwargs))
self._logger.debug("headers = {}".format(self.session.headers))
return PennsieveRequest(func, uri, *args, **kwargs)
def _call(self, method, endpoint, base="", reauthenticate=True, *args, **kwargs):
if method == "get":
func = self.session.get
elif method == "put":
func = self.session.put
elif method == "post":
func = self.session.post
elif method == "delete":
func = self.session.delete
# serialize data
if "data" in kwargs:
kwargs["data"] = json.dumps(kwargs["data"])
# we might specify a different host
if "host" in kwargs:
host = kwargs["host"]
kwargs.pop("host")
else:
host = self._host
# call endpoint
uri = self._uri(endpoint, base=base, host=host)
req = self._make_request(func, uri, *args, **kwargs)
resp = self._get_response(req, reauthenticate=reauthenticate)
return resp.data
def _uri(self, endpoint, base, host=None):
if host is None:
host = self._host
return "{}{}{}".format(host, base, endpoint)
def _get(self, endpoint, *args, **kwargs):
return self._call("get", endpoint, *args, **kwargs)
def _post(self, endpoint, *args, **kwargs):
return self._call("post", endpoint, *args, **kwargs)
def _put(self, endpoint, *args, **kwargs):
return self._call("put", endpoint, *args, **kwargs)
def _del(self, endpoint, *args, **kwargs):
return self._call("delete", endpoint, *args, **kwargs)
def _get_response(self, req, reauthenticate=True):
try:
return req.call(timeout=self.settings.max_request_time)
except UnauthorizedException as e:
if self._token is None or reauthenticate is False:
raise e
# try to refresh the session and re-request
self.authenticate(self._organization)
return req.call(timeout=self.settings.max_request_time)
def register(self, *components):
"""
Register API component with session. Components should all be of
APIBase type and have a name and base_uri property.
The registered component will have reference to base session to
make higher-level calls outside of its own scope, if needed.
"""
# initialize
for component in components:
c = component(session=self)
assert len(component.name) > 1, "Invalid API component name"
# component is accessible via session.(name)
self.__dict__.update({component.name: c})
@property
def headers(self):
return self.session.headers
|
import sys
sys.path.insert(0, '/project')
from app import app as application
export http_proxy=
export https_proxy=
export no_proxy= |
import logging
from functools import reduce
def get_productionplans(data):
load, fuels, powerplants = data.values()
values = sorted(map(lambda powerplant : get_values(powerplant,fuels), powerplants), key=lambda k: k['price'])
def reducer(data, value):
name, price, pmax, pmin = value.values()
if data['rest'] <= 0:
p = 0
elif data['rest'] < pmin:
p = pmin
elif data['rest'] >= pmax:
p = pmax
else:
p = data['rest']
data['rest'] -= p
data['values'].append({'name':name, 'p': p,'price':price, 'pmax':pmax, 'pmin':pmin});
return data;
def balancer(data,value):
name, p, price, pmax, pmin = value.values()
if p > pmin:
if( p < abs(data['rest'])):
p = 0
data['rest'] = data['rest'] + p
elif p + data['rest'] >= pmin: #check pmin risk
p = p + data['rest']
data['rest'] = 0
else:
data['rest'] = data['rest'] + ( p - pmin)
p = pmin
data['values'].append({'name':name, 'p': p});
return data
reduced = reduce(reducer,values,{'values':[], 'rest':load})
return reduce(balancer, sorted(reduced['values'], key=lambda k: k['price'], reverse=True), {'values':[], 'rest':reduced['rest']})
def gasfired(efficiency,fuels):
# for calculate emissions, value of tons per MW is needed, use 1 value for mock this.
tonsPerMW = 1
emissionCost = tonsPerMW * fuels['co2(euro/ton)']
return fuels['gas(euro/MWh)'] / efficiency + emissionCost
def turbojet(efficiency,fuels):
# for calculate emissions, value of tons per MW is needed, use 1 value for mock this.
tonsPerMW = 1
emissionCost = tonsPerMW * fuels['co2(euro/ton)']
return fuels['kerosine(euro/MWh)'] / efficiency + emissionCost
def windturbine(efficiency,fuels):
return 0
def gasfired_pmax(efficiency,fuels,pmax):
return pmax
def turbojet_pmax(efficiency,fuels,pmax):
return pmax
def windturbine_pmax(efficiency,fuels,pmax):
return pmax * fuels['wind(%)'] / 100
def get_values(powerplant,fuels):
name, type, efficiency, pmin, pmax = powerplant.values()
price = eval(type)
pmaxc = eval(type + '_pmax')
return {
'name': name,
'price': price(efficiency, fuels),
'pmax': pmaxc(efficiency, fuels, pmax),
'pmin': pmin
} |
# 100x100 matrix random floating points inside
# each row is a data point
# each column is a feature
# standardize each feature
import numpy as np
def standardize_features(array: np.array):
# for each column, (datapoint - mean) / std
mean_vector = np.mean(array, axis=0)
std_vector = np.std(array, axis=0)
standardized_matrix = array - mean_vector / std_vector
return standardized_matrix
|
from django.contrib import admin
from .models import EmployeeModel
# Register your models here.
class EmployeeModelAdmin(admin.ModelAdmin):
pass
admin.site.register(EmployeeModel, EmployeeModelAdmin)
admin.site.site_header = 'Haritha Computers & Technology'
|
import getopt
import sys
def usage():
print(
"""
Usage:sys.args[0] [option]
-h or --help: 显示帮助信息
-c or --cache-disk: 缓存盘磁盘
-m or --meta-disk: 元数据磁盘
-b or --data-disks: 数据盘
-ws or --wal-disk-size: 日志盘大小
-ds or --db-disk-size: 数据库盘大小
-sid or --start-osd-id: 起始osd id
"""
)
#! /bin/bash
set -x
while [ $# -gt 0 ];
do
case $1 in
-c) cache_disk=$2
shift
;;
-m) meta_disk=$2
shift
;;
-b) data_disks=${@:2}
shift
;;
-cs) cache_size=$2
shift
;;
-ws) wal_disk_size=$2
shift
;;
-ds) db_disk_size=$2
shift
;;
-sid) start_osd_id=$2
shift
;;
esac
shift
done
bdisks=${data_disks//,/ }
echo "cache=${cache_disk} meta=${meta_disk} wal_size=${wal_disk_size} db_size=${db_disk_size} block=${blocks}"
# cache 不为空
if [ ${cache_disk} ]
then
sdparm -s WCE=0 /dev/${cache_disk}
dd if=/dev/zero of=/dev/${cache_disk} bs=1M count=10
parted /dev/${cache_disk} --script mktable gpt
fi
# meta 不为空
if [ ${meta_disk} ]
then
sdparm -s WCE=0 /dev/${meta_disk}
dd if=/dev/zero of=/dev/${meta_disk} bs=1M count=10
parted /dev/${meta_disk} --script mktable gpt
fi
# block 处理
for disk in ${bdisks[*]}
do
sdparm -s WCE=0 /dev/${disk}
dd if=/dev/zero of=/dev/${disk} bs=1M count=10
done
tmp_link_dir=/tmp/osd/link
mkdir -p ${tmp_link_dir}
rm -rf /${tmp_link_dir}/*
function split_cache_disk
{
start=0
end=0
i=0
cdisk=$1
disk_size=$2
for disk in ${bdisks[*]}
do
let "i+=1"
let "start=end"
let "end=start + cache_size"
parted /dev/${cdisk} --script mkpart primary ${start}G ${end}G
ln -s /dev/${cdisk}p${i} /${tmp_link_dir}/cache_${disk}
done
}
# 将缓存盘分成多个区
if [ ${cache_disk} ]
then
split_cache_disk ${cache_disk} ${cache_size}
fi
function split_meta_disk
{
echo $1 $2 $3 $4
end=0
start=0
i=0
mdisk=$1
wal_size=$2
db_size=$3
for disk in ${bdisks[*]}
do
let "i+=1"
let "start=end"
let "end=start + wal_size"
parted /dev/${mdisk} --script mkpart primary ${start}G ${end}G
ln -s /dev/${mdisk}p${i} /${tmp_link_dir}/wal_${disk}
let "i+=1"
let "start=end"
let "end=start + db_size"
parted /dev/${mdisk} --script mkpart primary ${start}G ${end}G
ln -s /dev/${mdisk}p${i} /${tmp_link_dir}/db_${disk}
done
}
# 将元数据划分为多个分区
if [ ${meta_disk} ]
then
split_meta_disk ${meta_disk} ${wal_disk_size} ${db_disk_size}
fi
function clean_osd
{
osd_curr_id=$1
$(ceph osd down osd.${osd_curr_id})
$(ceph osd out osd.${osd_curr_id})
$(ceph osd crush remove osd.${osd_curr_id})
$(ceph auth del osd.${osd_curr_id})
$(ceph osd rm osd.${osd_curr_id})
rm -rf /var/lib/ceph/osd/ceph-${osd_curr_id}
}
# 获取当前空闲osd
osd_id=${start_osd_id}
for disk in ${bdisks[*]}
do
clean_osd ${osd_id}
let "osd_id += 1"
done
osd_id=${start_osd_id}
for disk in ${bdisks[*]}
do
cache_param=""
wal_param=""
db_param=""
if [ -L /${tmp_link_dir}/cache_${disk} ]; then
divice=$(readlink /${tmp_link_dir}/cache_${disk})
cache_param="--block.cache ${divice}"
fi
if [ -L /${tmp_link_dir}/wal_${disk} ]; then
divice=$(readlink /${tmp_link_dir}/wal_${disk})
wal_param="--block.wal ${divice}"
fi
if [ -L /${tmp_link_dir}/db_${disk} ]; then
divice=$(readlink /${tmp_link_dir}/db_${disk})
db_param="--block.db ${divice}"
fi
/bin/bash -c ulimit -n 32768;ceph-volume raw prepare --bluestore --data /dev/${disk} ${wal_param} ${db_param} ${cache_param} --osd_id ${osd_id} --no-tmpfs
let "osd_id += 1"
done
# 激活 osd磁盘
for disk in ${bdisks[*]}
do
cache_param=""
wal_param=""
db_param=""
if [ -L /${tmp_link_dir}/cache_${disk} ]; then
divice=$(readlink /${tmp_link_dir}/cache_${disk})
cache_param="--block.cache ${divice}"
fi
if [ -L /${tmp_link_dir}/wal_${disk} ]; then
divice=$(readlink /${tmp_link_dir}/wal_${disk})
wal_param="--block.wal ${divice}"
fi
if [ -L /${tmp_link_dir}/db_${disk} ]; then
divice=$(readlink /${tmp_link_dir}/db_${disk})
db_param="--block.db ${divice}"
fi
# ceph-volume lvm activate -h
/bin/bash -c ulimit -n 32768;ceph-volume raw activate --device /dev/${disk} ${wal_param} ${db_param} ${cache_param} --no-tmpfs --no-systemd
done
rm -rf /${tmp_link_dir}/*
osd_id=${start_osd_id}
for disk in ${bdisks[*]}
do
systemctl enable ceph-osd@${osd_id}
systemctl start ceph-osd@${osd_id}
let "osd_id += 1"
done
|
import scrapy
from scrapy.http import FormRequest
import sys
class FlaskSpider(scrapy.Spider):
name = 'flaskspider'
start_urls = []
count = 0
image_urls = []
depth = 0
def __init__(self, category=None, *args, **kwargs):
# print 'init method'
self.depth = int(sys.argv[-1].split('=')[-1].strip())
#print sys.argv[-3]
self.start_urls = [sys.argv[-3].split('=')[-1].strip()]
def parse(self, response):
print 'depth : ', self.depth
count = self.count
self.count += 1
current_page = self.count
print 'current_page : ', current_page
if current_page < self.depth:
next_link = ''
# print response.selector.xpath('//*[@id="mugnav"]/a/@onclick').extract()
if count:
next_link = response.selector.xpath('//*[@id="mugnav"]/a[2]/@onclick').extract()[0]
else:
# print response.selector.xpath('//*[@id="mugnav"]/a/@onclick').extract()
image = response.url + response.selector.xpath('//*[@id="dropzone"]/div[1]/article/img/@src').extract()[0]
print image
self.image_urls.append(image)
next_link = response.selector.xpath('//*[@id="mugnav"]/a/@onclick').extract()[0]
next_link = next_link.split('(')[-1]
next_link = next_link.replace(';', '').replace(')', '').strip()
# print 'next_link : ',next_link
#if current_page < self.depth:
next_image = 'http://bso.sun-sentinel.com/mugshots/' + next_link + '.png'
print next_image
self.image_urls.append(next_image)
yield FormRequest('http://bso.sun-sentinel.com/index.php', callback=self.parse, formdata={'paginate':next_link})
else:
print "Completed"
filename = 'templates/Results.html'
image_urls = self.image_urls
print image_urls
with open(filename, 'wb') as f:
for i in range(len(image_urls)):
f.write('<img src="' + image_urls[i] + '"/>')
|
from pyb import Pin
PIN_D1 = Pin("Y2", Pin.OUT_PP)
PIN_D2 = Pin("Y1", Pin.OUT_PP)
PIN_LAT = Pin("Y3", Pin.OUT_PP)
PIN_OE = Pin("Y4", Pin.OUT_PP)
PIN_A1 = Pin("Y5", Pin.OUT_PP)
PIN_A0 = Pin("Y6", Pin.OUT_PP)
PIN_CLK = Pin("Y7", Pin.OUT_PP)
def write_bit_to_both(bit):
PIN_D1.value(bit)
PIN_D2.value(bit)
PIN_CLK.value(False)
PIN_CLK.value(True)
def write_colour():
# Write blues
# state = False
# for i in range(16):
# write_bit_to_both(state)
# state = not state
for i in range(7):
write_bit_to_both(False)
write_bit_to_both(True)
for i in range(7):
write_bit_to_both(False)
write_bit_to_both(True)
# Write Greens
for i in range(16):
write_bit_to_both(False)
# Write Reds
for i in range(16):
write_bit_to_both(False)
def write_test():
# THIS IS WHAT JON RUSSELL DID - EVEN THOUGH IT'S NOT WHAT THE SPEC SAID
for i in range(64):
write_colour()
PIN_OE.value(True)
PIN_A0.value(False)
PIN_A1.value(False)
PIN_LAT.value(True)
PIN_LAT.value(False)
PIN_OE.value(False)
write_test()
|
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
# Create your views here.
def index(request):
# return HttpResponse("calculator app is running")
return render(request, 'index.html')
def submitquery(request):
q = request.GET['query']
# return HttpResponse(q)
try:
ans = eval(q)
mydictionary = {
"q" : q,
"ans" : ans,
"error" : False,
"result" : True
}
return render(request,'index.html',context = mydictionary)
except:
mydictionary = {
"error" : True,
"result" : False
}
return render(request,'index.html',context=mydictionary)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import math
import networkx as nx
class Trajectoire():
def __init__(self,obstacle_1_x,obstacle_1_y,obstacle_2_x,obstacle_2_y):
self.obstacle_1_x = obstacle_1_x
self.obstacle_1_y = obstacle_1_y
self.obstacle_2_x = obstacle_2_x
self.obstacle_2_y = obstacle_2_y
self.printmess = False
self.ga=20
def setObstacle(self,obstacle_1_x,obstacle_1_y,obstacle_2_x,obstacle_2_y):
gap = self.ga
gap2 = gap +10
self.Ox21=gap2+obstacle_2_x
self.Ox22=gap2+obstacle_2_x
self.Ox23=obstacle_2_x-gap
self.Ox24=obstacle_2_x-gap
self.Oy21=gap2+obstacle_2_y
self.Oy22=obstacle_2_y-gap
self.Oy23=gap2+obstacle_2_y
self.Oy24=obstacle_2_y-gap
self.Ox11=gap2+obstacle_1_x
self.Ox12=gap2+obstacle_1_x
self.Ox13=obstacle_1_x-gap
self.Ox14=obstacle_1_x-gap
self.Oy11=gap2+obstacle_1_y
self.Oy12=obstacle_1_y-gap
self.Oy13=gap2+obstacle_1_y
self.Oy14=obstacle_1_y-gap
def InitialisationChemin(self,Departx,Departy,Finx,Finy):
self.Envers = False
self.Inverse = False
if Departx-Finx == 0 and Departy-Finy <= 0 :
self.Inverse = True
self.posDepartx =Finy
self.posDeparty =Finx
self.posFinx = Departy
self.posFiny =Departx
self.setObstacle(self.obstacle_1_y,self.obstacle_1_x,self.obstacle_2_y,self.obstacle_2_x)
self.SortieMax = 216
elif Departx-Finx == 0 and Departy-Finy > 0 :
self.Inverse = True
self.Envers = True
self.posDepartx =Departy
self.posDeparty =Departx
self.posFinx = Finy
self.posFiny =Finx
self.setObstacle(self.obstacle_1_y,self.obstacle_1_x,self.obstacle_2_y,self.obstacle_2_x)
self.SortieMax = 216
elif Departx-Finx < 0 :
self.Envers = True
self.posDepartx =Finx
self.posDeparty =Finy
self.posFinx =Departx
self.posFiny =Departy
self.setObstacle(self.obstacle_1_x,self.obstacle_1_y,self.obstacle_2_x,self.obstacle_2_y)
self.SortieMax = 96
elif Departx-Finx > 0:
self.posDepartx =Departx
self.posDeparty =Departy
self.posFinx =Finx
self.posFiny =Finy
self.setObstacle(self.obstacle_1_x,self.obstacle_1_y,self.obstacle_2_x,self.obstacle_2_y)
self.SortieMax = 96
if 145 < Departx < 205 and 145 < Finx < 205:
self.setObstacle(1000,1000,1000,1000)
def PathFinding(self,Departx,Departy,Finx,Finy):
self.InitialisationChemin(Departx,Departy,Finx,Finy)
self.gr = nx.Graph()
self.Trouvetrajectoire(self.posDepartx,self.posDeparty,self.posFinx,self.posFiny)
self.grs = nx.Graph()
self.grs = nx.shortest_path(self.gr,"Depart","Fin","weight")
if self.printmess:
print self.grs
self.FaireListe()
if self.printmess:
print self.liste
return self.liste
def Trouvetrajectoire(self,Posdx,Posdy,Posfx,Posfy):
self.TrouveO = True
self.TrouveO1 = False
self.TrouveO2 = False
self.TrouveO13 = False
self.TrouveO14 = False
self.TrouveO23 = False
self.TrouveO24 = False
self.ParcourireLigne(Posdx,Posdy,Posfx,Posfy,"Depart")
while(self.TrouveO == True):
self.TrouveO=False
if self.printmess:
print "Tour de faite"
if self.TrouveO14:
if self.printmess:
print("Traject 14")
self.TrouveO14=False
self.ParcourireLigne(self.Ox14,self.Oy14,Posfx,Posfy,"O14")
if self.TrouveO13:
if self.printmess:
print("Traject 13")
self.TrouveO13=False
self.ParcourireLigne(self.Ox13,self.Oy13,Posfx,Posfy,"O13")
if self.TrouveO24:
if self.printmess:
print("Traject 24")
self.TrouveO24=False
self.ParcourireLigne(self.Ox24,self.Oy24,Posfx,Posfy,"O24")
if self.TrouveO23:
#print("Traject 23")
self.TrouveO23=False
self.ParcourireLigne(self.Ox23,self.Oy23,Posfx,Posfy,"O23")
def ParcourireLigne(self,Posdx,Posdy,Posfx,Posfy,depart):
ad = Posfy-Posdy
bd = Posdx-Posfx
#print "bd: %d" % bd
if bd ==0:
bd = bd+1
tanA = ad/bd
b = 1
self.TrouveO = False
while bd > b and self.TrouveO ==False :
a = (tanA * b)
posy = a + Posdy
posx = Posdx - b
#print "posx: %d" % posx
# print "posy: %d" % posy
if posy>= self.Oy24 and posy<=self.Oy21 and posx>= self.Ox24 and posx<=self.Ox21:
if self.printmess:
print(" O2")
# Calcule des distances
Sortie21 = self.EstSortie(self.Oy21)
Sortie22 = self.EstSortie(self.Oy22)
Sortie12 = self.EstSortie(self.Oy12)
Sortie11 = self.EstSortie(self.Oy11)
distx=self.Ox21 - Posdx
if Posdx > self.Oy21:
TrouveVO21 = self.verifierTrajectoire(Posdx,Posdy,self.Ox21,self.Oy21,2)
else:
TrouveVO21 = self.verifierTrajectoire(self.Ox21,self.Oy21,Posdx,Posdy,2)
Colision21 = self.verifierTrajectoire(self.Ox21,self.Oy21,self.Ox23,self.Oy23,2)
if Sortie21==False and TrouveVO21 == False and Colision21 == False:
#print "Trouver 21"
dist = self.CalculeDiagonal(distx,self.Oy21 - Posdy)
self.gr.add_edge(depart,"O21" , weight=dist )
dist = self.Ox21-self.Ox23
self.gr.add_edge("O21","O23" , weight=dist )
self.TrouveO = True
self.TrouveO23=True
if Posdx > self.Oy22:
TrouveVO22 = self.verifierTrajectoire(Posdx,Posdy,self.Ox22,self.Oy22,2)
else:
TrouveVO22 = self.verifierTrajectoire(self.Ox22,self.Oy22,Posdx,Posdy,2)
Colision22=self.verifierTrajectoire(self.Ox22,self.Oy22,self.Ox24,self.Oy24,2)
if Sortie22==False and TrouveVO22 == False and Colision22 == False:
if self.printmess:
print "Trouver 22"
dist = self.CalculeDiagonal(distx,self.Oy22 - Posdy)
self.gr.add_edge(depart,"O22" , weight=dist )
dist = self.Ox22-self.Ox24
self.gr.add_edge("O22","O24" , weight=dist )
self.TrouveO = True
self.TrouveO24=True
if Colision21 and Colision22 and Sortie21 == False and Sortie22 == False :
if self.printmess:
print "Execption1"
if Sortie11 == False:
dist = self.CalculeDiagonal(distx,self.Oy21 - Posdy)
self.gr.add_edge(depart,"O21" , weight=dist )
dist = self.Ox21 - self.Ox13
self.gr.add_edge("O21","O13" , weight=dist)
dist = self.CalculeDiagonal(distx,self.Oy22 - Posdy)
self.TrouveO = True
self.TrouveO13 = True
if Sortie12==False:
self.gr.add_edge(depart,"O22" , weight=dist )
self.gr.add_edge("O22","O14" , weight=dist)
self.TrouveO = True
self.TrouveO14 = True
if Colision21 and Sortie21==False and self.TrouveO13 == False:
if self.printmess:
print "Colisiont 21"
if Sortie11==False :
dist = self.CalculeDiagonal(distx,self.Oy11 - Posdy)
self.gr.add_edge(depart,"O11" , weight=dist)
dist = self.Ox11-self.Ox13
self.gr.add_edge("O11","O13" , weight=dist)
self.TrouveO = True
self.TrouveO13 = True
if Colision22 and Sortie21==False and self.TrouveO14 == False:
if self.printmess:
print "Colisiont 22"
if Sortie12==False:
if self.Ox14 < self.Ox24:
dist = self.CalculeDiagonal(distx,self.Oy12 - Posdy)
self.gr.add_edge(depart,"O12" , weight=dist)
dist = self.Ox12-self.Ox14
self.gr.add_edge("O12","O14" , weight=dist)
self.TrouveO = True
self.TrouveO14 = True
else :
dist = self.CalculeDiagonal(distx,self.Oy21 - Posdy)
self.gr.add_edge(depart,"O21" , weight=dist)
dist = self.Ox21-self.Ox23
self.gr.add_edge("O21","O23" , weight=dist)
self.TrouveO = True
self.TrouveO23 = True
if Colision21 and Sortie22:
if self.printmess:
print "Perdu"
if Sortie21 == False:
TrouveTO12 =self.verifierTrajectoire(self.Ox21,self.Ox21,self.Ox12,self.Oy12,0)
if TrouveTO12 == False:
if self.printmess:
print "Pas sortie 21"
if Sortie11==False:
dist = self.CalculeDiagonal(distx,self.Oy21 - Posdy)
self.gr.add_edge(depart,"O21" , weight=dist)
dist = self.CalculeDiagonal(distx,self.Oy11 - self.Oy21)
self.gr.add_edge("O21","O11" , weight=dist)
dist = self.Ox11-self.Ox13
self.gr.add_edge("O11","O13" , weight=dist)
self.TrouveO = True
self.TrouveO13 = True
else:
TrouveTO11 =self.verifierTrajectoire(self.Ox11,self.Ox11,Posdx,Posdy,0)
if Sortie11==False :
dist = self.CalculeDiagonal(distx,self.Oy11 - Posdy)
self.gr.add_edge(depart,"O11" , weight=dist)
dist = self.Ox12-self.Ox14
self.gr.add_edge("O11","O13" , weight=dist)
self.TrouveO=True
self.TrouveO13 = True
if self.Ox14 > self.Ox24 :
TrouveTO14 =self.verifierTrajectoire(Posdx,Posdy,self.Ox14,self.Ox14,0)
if TrouveTO14 == False:
dist = self.CalculeDiagonal(distx,self.Oy13 -self.Oy23)
self.gr.add_edge(depart,"O23" , weight=dist)
self.TrouveO = True
self.TrouveO23 = True
if Colision22 and Sortie21:
if self.printmess:
print "Execption"
if Sortie22 == False:
if self.printmess:
print "Pas sortie 22"
TrouveTO22 =self.verifierTrajectoire(self.Ox22,self.Oy22,self.Ox12,self.Oy12,0)
if TrouveTO22 == False:
if Sortie12==False:
dist = self.CalculeDiagonal(distx,self.Oy22 - Posdy)
self.gr.add_edge(depart,"O22" , weight=dist)
dist = self.CalculeDiagonal(distx,self.Oy12- self.Oy22)
self.gr.add_edge("O22","O12" , weight=dist)
dist = self.Ox12-self.Ox14
self.gr.add_edge("O12","O14" , weight=dist)
self.TrouveO = True
self.TrouveO14 = True
else:
if Sortie12==False:
dist = self.CalculeDiagonal(distx,self.Oy12 - Posdy)
self.gr.add_edge(depart,"O12" , weight=dist)
dist = self.Ox12-self.Ox14
self.gr.add_edge("O12","O14" , weight=dist)
self.TrouveO = True
self.TrouveO14 = True
if self.Ox14 > self.Ox24:
dist = self.CalculeDiagonal(distx,self.Oy14 -self.Oy24)
self.gr.add_edge(depart,"O24" , weight=dist)
self.TrouveO = True
self.TrouveO24 = True
if Sortie11 and Sortie22 and Colision21==False and Colision22==False:
dist = self.CalculeDiagonal(distx,self.Oy21 - Posdy)
self.gr.add_edge(depart,"O21" , weight=dist)
dist = self.Ox21-self.Ox23
self.gr.add_edge("O21","O23" , weight=dist)
self.TrouveO = True
self.TrouveO23 = True
if Sortie12 and Sortie21 and Colision21==False and Colision22==False:
dist = self.CalculeDiagonal(distx,self.Oy22 - Posdy)
self.gr.add_edge(depart,"O22" , weight=dist)
dist = self.Ox21-self.Ox23
self.gr.add_edge("O22","O24" , weight=dist)
self.TrouveO = True
self.TrouveO24 = True
if self.TrouveO == False:
if self.printmess:
print "Pas trouver dobstacle"
self.ga = 17
self.setObstacle( self.obstacle_1_x, self.obstacle_1_y, self.obstacle_2_x, self.obstacle_2_y)
if posy >= self.Oy14 and posy<=self.Oy11 and posx>=self.Ox14 and posx<=self.Ox11:
if self.printmess:
print(" O1")
Sortie21 = self.EstSortie(self.Oy21)
Sortie22 = self.EstSortie(self.Oy22)
Sortie12 = self.EstSortie(self.Oy12)
Sortie11 = self.EstSortie(self.Oy11)
if Posdx > self.Oy11:
TrouveVO11 = self.verifierTrajectoire(Posdx,Posdy,self.Ox11,self.Oy11,1)
else:
TrouveVO11 = self.verifierTrajectoire(self.Ox11,self.Oy11,Posdx,Posdy,1)
Colision11 =self.verifierTrajectoire(self.Ox11,self.Oy11,self.Ox13,self.Oy13,1)
# Calcule des distance
distx= self.Ox11 - Posdx
if Sortie11==False and TrouveVO11 == False and Colision11 == False:
if self.printmess:
print "trouver 11"
dist = self.CalculeDiagonal(distx,self.Oy11 - Posdy)
#print dist
self.gr.add_edge(depart,"O11" , weight=dist )
dist = self.Ox11-self.Ox13
self.gr.add_edge("O11","O13" , weight=dist )
self.TrouveO = True
self.TrouveO13 = True
if Posdx > self.Oy12:
TrouveVO12 = self.verifierTrajectoire(Posdx,Posdy,self.Ox12,self.Oy12,1)
else:
TrouveVO12 = self.verifierTrajectoire(self.Ox12,self.Oy12,Posdx,Posdy,1)
Colision12 =self.verifierTrajectoire(self.Ox12,self.Oy12,self.Ox14,self.Oy14,1)
if Sortie12==False and TrouveVO12 == False and Colision12 == False:
if self.printmess:
print "trouver 12"
dist = self.CalculeDiagonal(distx,self.Oy12 - Posdy)
#print dist
self.gr.add_edge(depart,"O12" , weight=dist)
dist = self.Ox12-self.Ox14
self.gr.add_edge("O12","O14" , weight=dist )
self.TrouveO = True
self.TrouveO14 = True
if Colision11 and Colision12 and Sortie11 == False and Sortie12 == False :
#print "Execption"
dist = self.CalculeDiagonal(distx,self.Oy12 - Posdy)
self.gr.add_edge(depart,"O12" , weight=dist )
dist = self.CalculeDiagonal(distx,self.Oy11 - Posdy)
self.gr.add_edge(depart,"O11" , weight=dist )
dist = self.Ox11 - self.Ox23
self.gr.add_edge("O11","O23" , weight=dist)
self.gr.add_edge("O12","O24" , weight=dist)
self.TrouveO = True
self.TrouveO23 = True
self.TrouveO24 = True
if Colision11 and Sortie12==False and self.TrouveO==False:
print "Colisiont 11"
if Sortie11== False:
if self.Ox13 > self.Ox23:
if Sortie21 == False:
dist = self.CalculeDiagonal(distx,self.Oy21 - Posdy)
self.gr.add_edge(depart,"O21" , weight=dist)
dist = self.Ox21-self.Ox23
#print dist
self.gr.add_edge("O21","O23" , weight=dist)
self.TrouveO = True
self.TrouveO23 = True
else:
if Sortie21==False:
dist = self.CalculeDiagonal(distx,self.Oy11 - Posdy)
self.gr.add_edge(depart,"O11" , weight=dist)
dist = self.CalculeDiagonal(distx,self.Oy21 - self.Oy11)
self.gr.add_edge("O11","O21" , weight=dist)
dist = self.Ox21-self.Ox23
self.gr.add_edge("O21","O23" , weight=dist)
self.TrouveO = True
self.TrouveO23 = True
if self.Ox14 < self.Ox24:
if Sortie21:
dist = self.CalculeDiagonal(distx,self.Oy13 -self.Oy23)
self.gr.add_edge(depart,"O23" , weight=dist)
self.TrouveO = True
self.TrouveO23 = True
if Colision12 and Sortie11==False :
print "Colisiont 12"
if self.Ox14 < self.Ox24:
dist = self.CalculeDiagonal(distx,self.Oy22 - Posdy)
self.gr.add_edge(depart,"O22" , weight=dist)
dist = self.Ox22-self.Ox24
self.gr.add_edge("O22","O24" , weight=dist)
self.TrouveO = True
self.TrouveO24 = True
else :
if Sortie22 == False:
dist = self.CalculeDiagonal(distx,self.Oy12 - Posdy)
self.gr.add_edge(depart,"O12" , weight=dist)
dist = self.CalculeDiagonal(distx,self.Oy22 - self.Oy12)
self.gr.add_edge("O12","O22" , weight=dist)
dist = self.Ox22-self.Ox24
self.gr.add_edge("O22","O24" , weight=dist)
self.TrouveO = True
self.TrouveO24 = True
if self.Ox14 < self.Ox24:
dist = self.CalculeDiagonal(distx,self.Oy14 -self.Oy24)
self.gr.add_edge(depart,"O24" , weight=dist)
self.TrouveO = True
self.TrouveO24 = True
if (Colision11 and Sortie12)or(Colision12 and Sortie11):
if self.printmess:
print "Execption"
if Sortie11 == False:
if self.printmess:
print "Pas sortie 11"
TrouveTO12 =self.verifierTrajectoire(self.Ox11,self.Ox11,self.Ox21,self.Oy21,0)
if TrouveTO12 == False:
if self.printmess:
print "TrouveTO12 == False"
if Sortie21==False:
dist = self.CalculeDiagonal(distx,self.Oy11 - Posdy)
self.gr.add_edge(depart,"O11" , weight=dist)
dist = self.CalculeDiagonal(distx,self.Oy11 - self.Oy21)
self.gr.add_edge("O11","O21" , weight=dist)
dist = self.Ox21-self.Ox23
self.gr.add_edge("O21","O23" , weight=dist)
self.TrouveO = True
self.TrouveO23 = True
else:
dist = self.CalculeDiagonal(distx,self.Oy21-Posdy)
self.gr.add_edge(depart,"O22" , weight=dist)
self.gr.add_edge("O22","O24" , weight=dist)
self.TrouveO24 = True
if Sortie12 == False:
dist = self.CalculeDiagonal(distx,self.Oy22 - Posdy)
self.gr.add_edge(depart,"O12" , weight=dist)
dist = self.CalculeDiagonal(distx,self.Oy12- self.Oy22)
self.gr.add_edge("O12","O22" , weight=dist)
dist = self.Ox22-self.Ox24
self.gr.add_edge("O22","O24" , weight=dist )
self.TrouveO = True
self.TrouveO24 = True
if self.TrouveO == False:
print "Objet pas trouver"
self.ga = 17
self.setObstacle( self.obstacle_1_x, self.obstacle_1_y, self.obstacle_2_x, self.obstacle_2_y)
b = b+1
if self.TrouveO == False :
#print("fin")
dist = self.CalculeDiagonal(Posfx- Posdx,Posfy- Posdy)
#print dist
self.gr.add_edge(depart,"Fin" , weight=dist)
def verifierTrajectoire(self,Posdx,Posdy,Posfx,Posfy,obstacle):
ad = Posdy-Posfy
bd = Posdx-Posfx
if bd ==0:
bd = bd+1
tanA = ad/bd
b =1
bd = abs(Posdx-Posfx)
while bd > b :
a = (tanA * b)
posy = a + Posdy
posx = Posdx - b
if obstacle!=1:
if posy >= self.Oy14 and posy<=self.Oy11 and posx>=self.Ox14 and posx<=self.Ox11:
#print "O1Autre"
return True
if obstacle!=2:
if posy >= self.Oy24 and posy<=self.Oy21 and posx>=self.Ox24 and posx<=self.Ox21:
#print "O2Autre"
return True
b = b + 1
return False
def EstSortie(self,Position):
if Position < self.SortieMax and Position >15:
return False
else:
if self.printmess:
print "Sortie"
return True
def FaireListe(self):
self.liste = []
self.nbrelement = 0
n1 = ""
for n in self.grs:
if self.Envers==False and self.Inverse==False:
self.liste.append((self.TrouverValeurX(n),self.TrouverValeurY(n)))
if self.Envers==False and self.Inverse==True:
self.liste.append((self.TrouverValeurY(n),self.TrouverValeurX(n)))
if self.Envers==True and self.Inverse==True:
self.liste.insert(0,(self.TrouverValeurY(n),self.TrouverValeurX(n)))
if self.Envers==True and self.Inverse==False:
self.liste.insert(0,(self.TrouverValeurX(n),self.TrouverValeurY(n)))
def CalculeDiagonal(self,x,y):
c = x**2 + y**2
return math.sqrt(c)
def TrouverValeurX(self,point):
if point =="Depart":
return self.posDepartx
elif point =="O11":
return self.Ox11
elif point =="O12":
return self.Ox12
elif point =="O13":
return self.Ox13
elif point =="O14":
return self.Ox14
elif point =="O21":
return self.Ox21
elif point =="O22":
return self.Ox22
elif point =="O23":
return self.Ox23
elif point =="O24":
return self.Ox24
elif point =="Fin":
return self.posFinx
def TrouverValeurY(self,point):
if point =="Depart":
return self.posDeparty
elif point =="O11":
return self.Oy11
elif point =="O12":
return self.Oy12
elif point =="O13":
return self.Oy13
elif point =="O14":
return self.Oy14
elif point =="O21":
return self.Oy21
elif point =="O22":
return self.Oy22
elif point =="O23":
return self.Oy23
elif point =="O24":
return self.Oy24
elif point =="Fin":
return self.posFiny
|
# To add a new cell, type '#%%'
# To add a new markdown cell, type '#%% [markdown]'
#%% [markdown]
# ## Integrantes
#
# 1. Gabriela Alfaro
# 2. Sebastian Guerraty
# 3. Maria Jose Jimenez
#%% [markdown]
# # Instrucciones
#
# El laboratorio tiene 6 ptos, donde obtener 6 ptos equivale a un 7.0 y 0 ptos un 1.0.
#
# El formato de entrega será subir a u-cursos un Jupyter notebook
# laboratorio4.ipynb, que se debe ejecutar sin errores desde la primera celda a la última. Todo el código debe estar en el mismo notebook, el código debe estar comentado y testeado, el notebook debe estar escrito en forma de informe técnico, escribiendo una celda markdown antes o después de cada celda de código que arroja algún output.
#%% [markdown]
# # Laboratorio 4: Clustering
#
# Objetivos:
#
#
#
# 1. Entender en qué casos se puede utilizar clustering y cuál es su fin
# 2. Conocer y aplicar modelos de clustering
# 3. Conocer y aplicar métricas relacionadas a clustering
# 4. Entender diferencia entre clustering y aprendizaje no supervisado
#%% [markdown]
# # Investigación (2 ptos)
#
# Elija 4 de 5 preguntas (`0.5 ptos c/u`)
#
#
#
# 1. Explique un caso en que pueda fallar K-Means y mencione una forma de solucionarlo.
#
# R: k-means al ser basado en la distancia euclidiana está orientado a clusters con forma circular, por lo que en clusters donde los grupos, o puntos pertenecientes a un cluster sean de una forma que se cruzen en términos de distancia euclidiana, k-means tendría dificultad para identificar correctamente los cluster apropiados.
# Una forma de corregir el problema es utilizar un método de clustering que permita identificar los cluster sin estar basado en distancia euclidiana, por ejemplo un método basado en densidad de puntos o vecinos más cercanos, como el caso del método DBSCAN
# <img src="images/kmeans_fail.png" style="width:600px;height:300px;">
# 2. ¿Es PCA un método de clustering? Justifique.
#
# R: PCA es un método de compresión de información que es particularmente útil cuando la cantidad de atributos en un DF es grande, en particular cuando se tiene un número interesante de variables que indiquen un mismo concepto central. No es un método de clustering, ya que no asigna un identificador de pertenencia ni total ni parcial a algún grupo como si lo hace un método de clustering .
# 3. Investigue las siguientes métricas: *purity, silhouette score.* Describa ventajas y desventajas.
# 4. ¿En qué consiste el algoritmo Gaussian Mixture Models (GMM)? Comente su relación con K-Means.
#
# R: Mixturas gausianas asume que los datos se componen de una cantidad N de distrubuciones gausianas de parámetros desconocidos, se puede considerar como una generalización de el método k-means ya que incorpora información de la coviarianza entre variables.
# Adicionalmente, no tiene la limitación de k-means donde se orientan clusters con la distancia euclidiana, que termina en esferas en N dimensiones, GMM admite elipsoides debido a que asume que los datos dentro del cluster distribuyen como una gausiana.
# 5. Explique como hallaría el número "óptimo" de clusters en un problema de clustering.
#
#%% [markdown]
# # Práctica (4 ptos)
#
#
# Considere el problema que enfrenta una empresa del retail que desea segmentar a sus clientes con el fin de entender mejor su comportamiento y así poder realizar ofertas específicas para cada grupo.
#
# Para lo anterior cuenta con los siguientes datos:
#
#
# 1. Edad, género, educación, lugar dónde vive, teléfono, etc,
# 2. Si es miembro o no del club de puntos, gastos realizados en un año, y una métrica otorgada (spending score) por el departamento de marketing que indica qué tan buenos gastadores son, donde 100 corresponde a lo más alto y 0 a lo más bajo.
#
# **Notas:**
#
# 1. No posee registro de los gastos de quienes no pertenecen al club de puntos, sin embargo según lo indicado por el departamento de marketing es una variable muy importante.
# 2. A priori debería existir una correlación entre en el spending score y los gastos de una persona, aunque no necesariamente es así, por lo que se recomienda estudiar esta relación.
#
#%% [markdown]
# **Tareas:**
#
#
# 1. Realice un análisis exploratorio de los datos (cantidad de registros, medias, medianas, missing values, etc) y muestre al menos 2 gráficos de variables que considere relevantes para el análisis. (`0.5 ptos`)
# 2. Cree una base de datos consistente (limpieza de NAs, transformaciones, imputaciones) y deje claramente expresadas las * features* que utilizará para el clustering (al menos 3). Justifique las variables elegidas/creadas apoyándose en visualizaciones del punto 1. (`0.5 ptos`)
# 3. Utilice K-Means y con la ayuda del método del codo, encuentre el número "adecuado" de clusters, comente si lo encontrado por los métodos hace sentido y justifique su elección. Comente además respecto al tamaño de cada cluster y los centroides. (`1.5 ptos`)
#
# `Nota:Se recomienda utilizar PCA y gráfico de radar con el fin de visualizar los clusters y sus centroides.`
#
# 4. Utilice Clustering Jerárquico con al menos 2 linkage distintos al mostrado en clases y visualice sus respectivos dendogramas. Compare la cantidad de clusters encontrados con K-Means. ¿Se encuentra la misma cantidad? En caso de existir diferencias explique por qué cree que se dan. (`1.5 ptos`)
#
#
#
#%% [markdown]
# # Importar Librerías
#%%
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.cluster import KMeans
import seaborn as sns
import os
#%%
df=pd.read_csv('https://raw.githubusercontent.com/Camiloez/Labs-Data-Mining/master/data.csv')
#%%
df.head(5)
#%%
df.describe()
#%%
df.isna().sum()
#%%
df.dtypes
#%% [markdown]
# Matriz de correlacion para establecer relacion entre variables
corr = df.drop('CustomerID',axis=1).corr()
corr.style.background_gradient(cmap='coolwarm')
#%%
df['female'] = pd.get_dummies(df['Genre']).iloc[:,0]
sns.pairplot(df.iloc[:,5:13])
#%%
df["Expenses"]=df["Expenses"].replace('-',np.nan)
#%%
sns.scatterplot(x = "Annual Income (k$)",y= "Expenses", data= df[df["Expenses"].notnull()])
#%%
https://scikit-learn.org/stable/modules/mixture.html |
import graphics
from board import Board
from input import get_move_int
WELCOME_MESSAGE = "Welcome to 15 puzzle"
EXIT_MESSAGE = "WooHoo Genius!"
def main():
board = Board()
board.start()
graphics.display_message(WELCOME_MESSAGE)
graphics.display_board(board.get_board())
while not board.in_order():
move_val = get_move_int()
board_list = board.move(move_val)
graphics.display_board(board_list)
graphics.display_message(EXIT_MESSAGE)
if __name__ == '__main__':
main()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class GetProjectDetailsNonSqlTaskInput(Model):
"""Input for the task that reads configuration from project artifacts.
:param project_name: Name of the migration project
:type project_name: str
:param project_location: A URL that points to the location to access
project artifacts
:type project_location: str
"""
_validation = {
'project_name': {'required': True},
'project_location': {'required': True},
}
_attribute_map = {
'project_name': {'key': 'projectName', 'type': 'str'},
'project_location': {'key': 'projectLocation', 'type': 'str'},
}
def __init__(self, project_name, project_location):
super(GetProjectDetailsNonSqlTaskInput, self).__init__()
self.project_name = project_name
self.project_location = project_location
|
import os, sys
from os.path import exists
from os import system
import tensorflow as tf
import numpy as np
import scipy.misc
import scipy.stats
from scipy.stats import stats
from utils import data_list_batch_1_30_4
np.set_printoptions(threshold='nan')
# Model
length = 30
filter_size = [3, 5, 7]
filter_num = [100, 70, 40]
node_1 = 80
node_2 = 60
l_rate = 0.001
inputs_sg = tf.placeholder(tf.float32, [None, 1, length, 4])
inputs = inputs_sg
y_ = tf.placeholder(tf.float32, [None, 1])
targets = y_
is_training = False
def create_new_conv_layer(input_data, num_input_channels, num_filters, filter_shape, pool_shape, name):
# setup the filter input shape for tf.nn.conv_2d
conv_filt_shape = [filter_shape[0], filter_shape[1], num_input_channels,num_filters]
# initialise weights and bias for the filterS
weights = tf.Variable(tf.truncated_normal(conv_filt_shape, stddev=0.03),name=name+'_W')
bias = tf.Variable(tf.truncated_normal([num_filters]), name=name+'_b')
out_layer = tf.nn.conv2d(input_data, weights, [1, 1, 1, 1], padding='VALID')
out_layer += bias
out_layer = tf.layers.dropout(tf.nn.relu(out_layer), 0.3, is_training)
ksize = [1, pool_shape[0], pool_shape[1], 1]
strides = [1, 1, 2, 1]
out_layer = tf.nn.avg_pool(out_layer, ksize=ksize, strides=strides,padding='SAME')
return out_layer
L_pool_0 = create_new_conv_layer(inputs, 4, filter_num[0], [1, filter_size[0]], [1, 2],name='conv1')
L_pool_1 = create_new_conv_layer(inputs, 4, filter_num[1], [1, filter_size[1]], [1, 2],name='conv2')
L_pool_2 = create_new_conv_layer(inputs, 4, filter_num[2], [1, filter_size[2]], [1, 2],name='conv3')
with tf.variable_scope('Fully_Connected_Layer1'):
layer_node_0 = int((length-filter_size[0])/2)+1
node_num_0 = layer_node_0*filter_num[0]
layer_node_1 = int((length-filter_size[1])/2)+1
node_num_1 = layer_node_1*filter_num[1]
layer_node_2 = int((length-filter_size[2])/2)+1
node_num_2 = layer_node_2*filter_num[2]
L_flatten_0 = tf.reshape(L_pool_0, [-1, node_num_0])
L_flatten_1 = tf.reshape(L_pool_1, [-1, node_num_1])
L_flatten_2 = tf.reshape(L_pool_2, [-1, node_num_2])
L_flatten = tf.concat([L_flatten_0, L_flatten_1, L_flatten_2], 1, name='concat')
node_num = node_num_0 + node_num_1 + node_num_2
W_fcl1 = tf.get_variable("W_fcl1", shape=[node_num, node_1])
B_fcl1 = tf.get_variable("B_fcl1", shape=[node_1])
L_fcl1_pre = tf.nn.bias_add(tf.matmul(L_flatten, W_fcl1), B_fcl1)
L_fcl1 = tf.nn.relu(L_fcl1_pre)
L_fcl1_drop = tf.layers.dropout(L_fcl1, 0.3, is_training)
with tf.variable_scope('Fully_Connected_Layer2'):
W_fcl2 = tf.get_variable("W_fcl2", shape=[node_1, node_2])
B_fcl2 = tf.get_variable("B_fcl2", shape=[node_2])
L_fcl2_pre = tf.nn.bias_add(tf.matmul(L_fcl1_drop, W_fcl2), B_fcl2)
L_fcl2 = tf.nn.relu(L_fcl2_pre)
L_fcl2_drop = tf.layers.dropout(L_fcl2, 0.3, is_training)
with tf.variable_scope('Output_Layer'):
W_out = tf.get_variable("W_out", shape=[node_2, 1])#, initializer=tf.contrib.layers.xavier_initializer())
B_out = tf.get_variable("B_out", shape=[1])#, initializer=tf.contrib.layers.xavier_initializer())
outputs = tf.nn.bias_add(tf.matmul(L_fcl2_drop, W_out), B_out)
# Define loss function and optimizer
obj_loss = tf.reduce_mean(tf.square(targets - outputs))
optimizer = tf.train.AdamOptimizer(l_rate).minimize(obj_loss)
#def end: def __init__
#class end: DeepCas9
def train_DeepCas9(trainData,trainDataAll,testDataAll,ENZ):
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
for e in range(110): # for e in range(60):
for featureData,targetLabel in data_list_batch_1_30_4(trainData):
sess.run(optimizer, feed_dict={inputs_sg: featureData, y_:targetLabel})
targetLabelList = []
outputList = []
trainMSEList = []
trainMSEloss = 0
for featureData,targetLabel in trainDataAll:
output = outputs.eval(feed_dict={inputs_sg: featureData, y_: targetLabel})
trainMSEloss_temp = obj_loss.eval(feed_dict={inputs_sg: featureData, y_: targetLabel})
trainMSEList.append(trainMSEloss_temp)
targetLabel_temp = targetLabel.squeeze().tolist()
output_temp = output.squeeze().tolist()
if not(type(output_temp) == list):
output_temp = [output_temp]
if not(type(targetLabel_temp) == list):
targetLabel_temp = [targetLabel_temp]
targetLabelList = targetLabelList + targetLabel_temp
outputList = outputList + output_temp
trainMSEloss = sum(trainMSEList) / len(trainMSEList)
train_spcc, _ = stats.spearmanr(targetLabelList, outputList) #spearmar
featureData = testDataAll[0]
targetLabel = testDataAll[1]
output = outputs.eval(feed_dict={inputs_sg: featureData, y_: targetLabel})
testMSEloss = obj_loss.eval(feed_dict={inputs_sg: featureData, y_: targetLabel})
targetLabel_st = targetLabel.squeeze().tolist()
output_cdnst = output.squeeze().tolist()
test_spcc, _ = stats.spearmanr(targetLabel_st, output_cdnst)
print("epch {0}: train_loss {1}, train_spcc {2} || test_loss {3}, test_spcc {4}".format(e,trainMSEloss,train_spcc,testMSEloss,test_spcc))
|
'''
https://www.hackerrank.com/challenges/find-second-maximum-number-in-a-list/problem
'''
def sol(arr):
thisset = set()
for num in arr:
thisset.add(num)
thislist = list(thisset)
thislist.sort(reverse = True)
return thislist[1]
if __name__ == '__main__':
n = int(input())
arr = map(int, input().split())
print(sol(arr)) |
# -*- coding: utf-8 -*-
'''
Created on 2019-May-02 04:48:56
TICKET NUMBER -AI_1083
@author: Prazi
'''
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose
from w3lib.html import remove_tags, replace_escape_chars
from Data_scuff.spiders.AI_1083.items import IaJohnsonIowacityBuildingPermitsSpiderItem
from Data_scuff.spiders.__common import CommonSpider,CustomSettings
from Data_scuff.utils.utils import Utils
from scrapy.http import FormRequest, Request
from inline_requests import inline_requests
from scrapy.selector.unified import Selector
from Data_scuff.utils.JavaScriptUtils import JavaScriptUtils
from Data_scuff.utils.searchCriteria import SearchCriteria
import scrapy
import re
import requests
import json
import operator
import itertools
import datetime
class IaJohnsonIowacityBuildingPermitsSpider(CommonSpider):
name = '1083_ia_johnson_iowacity_building_permits'
allowed_domains = ['iowa-city.org']
start_urls = ['http://www.iowa-city.org/IcgovApps/Tidemark/Search']
main_url='http://www.iowa-city.org'
handle_httpstatus_list= [500]
custom_settings = {
'FILE_NAME':Utils.getRundateFileName('AI-1083_Permits_Buildings_IA_Johnson_IowaCity_CurationReady'),
'JIRA_ID':'AI_1083',
'DOWNLOAD_DELAY':0.5,
'CONCURRENT_REQUESTS':1,
'TRACKING_OPTIONAL_PARAMS':['case_number'],
'COOKIES_ENABLED':True,
'COOKIES_DEBUG':True,
'HTTPCACHE_ENABLED':False,
# 'JOBDIR' : CustomSettings.getJobDirectory('IaJohnsonIowacityBuildingPermitsSpider'),
'TOP_HEADER':{'case action-notes': 'Case Action-Notes','case actions-date': 'Case Actions-Date','case actions-description': 'Case Actions-Description','case actions-status': 'Case Actions-Status','inspection_date': 'Case Actions-Date.1','inspection_description': '','inspection_pass_fail': 'Case Actions-Status.1','inspection_type': '','location_address_string': 'Address','permit_lic_desc': 'Description','permit_lic_no': 'Case Number','permit_lic_status': 'Status','permit_type': '','violation_date': '','violation_type': ''},
'FIELDS_TO_EXPORT':['permit_lic_no','permit_lic_status','location_address_string','permit_lic_desc','case actions-date','case actions-description','case actions-status','case action-notes','inspection_date','inspection_pass_fail','inspection_description','inspection_type','violation_date','violation_type','permit_type','sourceName','url','ingestion_timestamp'],
'NULL_HEADERS':['case actions-date', 'case actions-description', 'case actions-status', 'case action-notes']
}
SearchCriteria = ['ELE','ABN', 'BLD', 'CSR', 'DAC', 'DEM', 'DRC', 'EXC', 'FAP', 'FLD', 'FSP', 'HPC', 'MEC', 'PLM', 'PSD', 'WTR']
check_first = True
def parse(self, response):
if self.check_first:
self.check_first = False
self.search_element=self.SearchCriteria[int(self.start):int(self.end)]
if len(self.search_element) > 0:
param = self.search_element.pop(0)
form_data={
'SearchTerms.CaseNumber':str(param),
'SearchTerms.CaseAddress': ''
}
next_url='http://www.iowa-city.org/IcgovApps/Tidemark/Search?Length=10'
yield FormRequest(url=next_url,formdata=form_data,callback= self.parse_list, dont_filter=True)
@inline_requests
def parse_list(self,response):
meta={}
meta['case_action_date']=meta['inspection_date']=meta['inspection_type']=meta['case_action_notes']=meta['case_action_status']=meta['case_action_description']=meta['description']=meta['address']=meta['status']=meta['case_number']=meta['inspection_pass_fail']=meta['inspection_description']=meta['violation_date']=meta['violation_type']=''
first_table=response.xpath('/html/body/div[2]/table//tr')[16185:19001]
for i in first_table:
meta['case_action_date']=meta['case_action_notes']=meta['case_action_status']=meta['case_action_description']=''
meta['case_number']=i.xpath('td[1]/a/text()').extract_first()
meta['status']=i.xpath('td[2]/text()').extract_first()
address=i.xpath('td[3]/text()').extract_first()
if address:
meta['address']=address+', IowaCity, IA'
else:
meta['address']='IA'
desc=self.data_clean(i.xpath('td[4]/text()').extract_first())
if desc:
meta['description']=desc
else:
meta['description']='Building Permit'
yield self.save_to_csv(response,**meta)
number_link=i.xpath('td[1]/a/@href').extract_first()
next_page=self.main_url+str(number_link)
link = yield scrapy.Request(url=next_page,dont_filter=True,meta={'optional':{'case_number':meta['case_number']}})
status=link.status
if status==500:
pass
else:
table=link.xpath('/html/body/div[2]/table[1]//tr')[1:]
if table:
for j in table:
meta['inspection_date']=meta['inspection_type']=meta['inspection_pass_fail']=meta['inspection_description']=meta['violation_date']=meta['violation_type']=''
meta['case_action_date']=j.xpath('td[1]/text()').extract_first()
meta['case_action_description']=self.data_clean(j.xpath('td[2]/text()').extract_first())
meta['case_action_status']=j.xpath('td[3]/text()').extract_first()
meta['case_action_notes']=j.xpath('td[4]/text()').extract()
meta['case_action_notes']=' '.join(meta['case_action_notes'])
if meta['case_action_description']:
if 'Inspection' in meta['case_action_description'] or 'Insp -' in meta['case_action_description'] or 'Initial inspection' in meta['case_action_description'] or 'Re-inspection' in meta['case_action_description'] or 'inspection' in meta['case_action_description']:
meta['inspection_date']=meta['case_action_date']
meta['inspection_type']='building_inspection'
meta['inspection_pass_fail']=meta['case_action_status']
meta['inspection_description']=meta['case_action_notes']
meta['violation_date']=meta['violation_type']=''
meta['case_action_status']=meta['case_action_notes']=''
if 'VIOLATION' in meta['case_action_description'] or 'violation' in meta['case_action_description']:
meta['violation_date']=meta['case_action_date']
meta['violation_type']='building_violation'
yield self.save_to_csv(response,**meta)
else:
yield self.save_to_csv(response,**meta)
if len(self.search_element) > 0:
yield scrapy.Request(url=self.start_urls[0], callback=self.parse, dont_filter=True)
def save_to_csv(self,response,**meta):
il = ItemLoader(item=IaJohnsonIowacityBuildingPermitsSpiderItem(),response=response)
# il.default_input_processor = MapCompose(lambda v: v.strip(), remove_tags, replace_escape_chars)
il.add_value('ingestion_timestamp', Utils.getingestion_timestamp())
il.add_value('sourceName', 'IA_Johnson_IowaCity_Building_Permits')
il.add_value('url', 'http://www.iowa-city.org/IcgovApps/Tidemark/Search')
il.add_value('permit_lic_no',meta['case_number'])
il.add_value('permit_lic_status',meta['status'])
il.add_value('location_address_string',meta['address'])
il.add_value('permit_lic_desc',meta['description'])
il.add_value('case actions-date',meta['case_action_date'])
il.add_value('case actions-description',meta['case_action_description'])
il.add_value('case actions-status',meta['case_action_status'])
il.add_value('case action-notes',meta['case_action_notes'])
il.add_value('inspection_date',meta['inspection_date'])
il.add_value('inspection_type',meta['inspection_type'])
il.add_value('inspection_pass_fail',meta['inspection_pass_fail'])
il.add_value('inspection_description',meta['inspection_description'])
il.add_value('violation_date',meta['violation_date'])
il.add_value('violation_type',meta['violation_type'])
il.add_value('permit_type', 'building_permit')
return il.load_item()
def data_clean(self, value):
if value:
try:
clean_tags = re.compile('<.*?>')
desc_list = re.sub('\s+', ' ', re.sub(clean_tags, '', value))
desc_list_rep = desc_list.replace('&', '&')
return desc_list_rep.strip()
except:
return ''
else:
return ''
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Raindrop.
#
# The Initial Developer of the Original Code is
# Mozilla Messaging, Inc..
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Tarek Ziade <tarek@mozilla.com>
import socket
import unittest
import httplib2
import json
import mock
import urllib2
from linkoauth.util import setup_config
from linkoauth.backends import google_
from linkoauth import Services
from linkoauth import sstatus
from linkoauth.tests.test_base import MockCache
_ACCOUNT = {'oauth_token': 'xxx',
'oauth_token_secret': 'xxx',
'profile': {'emails':
[{'value': 'tarek@ziade.org'}]}}
_CONFIG = {'oauth.yahoo.com.consumer_key': 'xxx',
'oauth.yahoo.com.consumer_secret': 'xxx',
'oauth.linkedin.com.consumer_key': 'xxx',
'oauth.linkedin.com.consumer_secret': 'xxx',
'oauth.twitter.com.consumer_key': 'xxx',
'oauth.twitter.com.consumer_secret': 'xxx'}
class _Res(dict):
def __init__(self, status):
self.status = status
self['status'] = status
def _request(*args, **kwargs):
res = {'status': 200, 'id': 123, 'error': '',
'result': {'status': 200}}
return _Res(200), json.dumps(res)
class _SMTP(object):
working = True
def __init__(self, *args):
pass
def quit(self):
pass
ehlo_or_helo_if_needed = starttls = quit
def authenticate(self, *args):
if not self.working:
raise socket.timeout()
sendmail = authenticate
def save_capture(self, msg):
pass
class _FakeResult(object):
headers = {}
def read(self):
res = {'id': 123, 'status': 200}
return json.dumps(res)
def _urlopen(*args):
return _FakeResult()
class TestBasics(unittest.TestCase):
def setUp(self):
setup_config(_CONFIG)
self.old_httplib2 = httplib2.Http.request
httplib2.Http.request = _request
self.old_smtp = google_.SMTP
google_.SMTPRequestor = _SMTP
self.old_urlopen = urllib2.urlopen
urllib2.urlopen = _urlopen
self.mcclient_patcher = mock.patch('linkoauth.sstatus.Client')
self.mcclient_patcher.start()
self.mock_cache = MockCache()
sstatus.Client.return_value = self.mock_cache
def tearDown(self):
httplib2.Http.request = self.old_httplib2
google_.SMTPRequestor = self.old_smtp
urllib2.urlopen = self.old_urlopen
self.mcclient_patcher.stop()
def test_callbacks(self):
message = ''
args = {'to': 'tarek@ziade.org',
'subject': 'xxx',
'title': 'the title',
'description': 'some description',
'link': 'http://example.com',
'shorturl': 'http://example.com'}
services = Services(['google.com'])
services.initialize('google.com')
# this sends a success to the callback
res, error = services.sendmessage('google.com', _ACCOUNT,
message, args, None)
status = services.get_status('google.com')
self.assertEquals(status, (True, 1, 0))
# let's break SMTP
_SMTP.working = False
try:
res, error = services.sendmessage('google.com', _ACCOUNT,
message, args, None)
finally:
_SMTP.working = True
status = services.get_status('google.com')
self.assertEquals(status, (True, 1, 1))
|
# pip install requests
# (or pip3 install requests)
import requests
# Generic way of saving a web file
def save(filename, rsp):
with open(filename, 'wb') as file:
for bytes in rsp.iter_content(10000):
file.write(bytes)
resp = requests.get('https://en.wikipedia.org/wiki/Main_Page')
# print(resp.status_code)
resp.raise_for_status() # raise an exception IF I don't get my page
# print('GOOD')
print(resp.text)
resp = requests.get('https://en.wikipedia.org/wiki/Main_Page#/media/File:European_Storm_Petrel_From_The_Crossley_ID_Guide_Eastern_Birds.jpg')
resp.raise_for_status()
# print(resp.text) NO IT'S BINARY
save('petrel.jpg', resp)
|
import string
import random
import os
import re
def moveToBaseDir():
drivePrefix = os.path.splitdrive(os.getcwdu())[0]
os.chdir(drivePrefix+ '\\')
#get the input
moveToBaseDir()
os.chdir("programs")
text = open("input.txt", "r").read().split(' ')
puzzle_size = int(raw_input("dimensions"))
puzzle = []
a_word_too_big = False
for word in text:
if len(word) > puzzle_size:
print ' '.join([word, 'is', str(len(word)), 'characters long and the puzzle', str(puzzle_size)])
a_word_too_big = True
if a_word_too_big:
print "Aborting process"
exit()
#None or '' for empty squares? which will require more verbose programming
for x in range(puzzle_size):
puzzle.append([None for y in range(puzzle_size)])
def getColumn(puzzle, colNumber):
return [x[colNumber] for x in puzzle]
def getColumns(puzzle):
return [getColumn(puzzle, x) for x in range(len(puzzle[0]))]
def getAscendingDiagonals(puzzle):#works only for squares
"""-> all ascending diagonals, just rotate 90 to get descending"""
dimension = len(puzzle[0])
result = []
coords = [(x, y) for y in range(dimension) for x in range(dimension)]
#number of diagonals is (x - 2) * 2 + 1
numdiags = (dimension - 2) * 2 + 1
#sum of ascending diagonals left-right is always the same
for x in range(1, numdiags+1):
result.append([a for a in coords if sum(a) == x])
#pop off the corners which are 1 in length
result = [x for x in result if len(x) != 1]
#dereference the coordinates
for diag in range(len(result)):
for cell in range(len(result[diag])):
x,y = result[diag][cell]
result[diag][cell] = puzzle[x][y]
return result
def getAllDiagonals(puzzle):
rotated = getColumns(puzzle)
rotated.reverse()
return getAscendingDiagonals(puzzle) + getAscendingDiagonals(rotated)
def maxConsecutiveEmpty(lis):
"""->(endposition, length) in the list of the longest stretch of Nones"""
high_score = 0
temp_score = 0
endpos = 0
for x in range(len(lis)):
if not lis[x]:
temp_score += 1
else:
if temp_score > high_score:
high_score = temp_score
endpos = x
temp_score = 0
else:
temp_score = 0
#case: max run at end
if temp_score > high_score:
high_score = temp_score
endpos = len(lis)
return (endpos, high_score)
""" Note to self. I'm going to prototype something that gets
some of the words into the puzzle so I can experiment with
the whole process of generating the puzzle"""
text.sort(lambda x,y: cmp(len(x), len(y)))
row = 0
col = 0
while row < puzzle_size:
try:
word = text.pop()
except:
if len(text) == 0:
break
if len(word) > puzzle_size - col:
row += 1
col = 0
else:
for letter in word:
print row, col
puzzle[row][col] = letter
col += 1
for row in puzzle:
print row
|
def solution(skill, skill_trees):
answer = 0
for word in skill_trees:
flag = 0
temp = ''
for i in range(len(word)):
if word[i] in skill:
temp += word[i]
print(temp)
for i in range(len(temp)):
if temp[i] != skill[i]:
flag = 1
break
if not flag:
answer += 1
return answer
solution("CBD", ["BACDE", "CBADF", "AECB", "BDA"]) |
#!/usr/bin/python
import re
import subprocess
import serial
import time
import os
while True:
port = "";
while len(port) == 0:
port = re.sub('\n','',subprocess.check_output('/.../port.sh').decode())
os.system('beep')
time.sleep(0.5);
ser = serial.Serial("/dev/" + port,115200)
print "hi"
ser.write('1')
while True:
try:
result = subprocess.check_output('/.../power_info')
result = result.decode()
result = re.sub(' +','',result)
result = result.strip()
lines = result.split('\n')
for line in lines:
if line.find("percentage") != -1:
percentage_location = lines.index(line)
split = lines[percentage_location].split(':')
percentage = int(split[1].replace('%',''))
state = lines[0].split(':')[1]
# print percentage, state
if percentage <= 50 and state != 'charging':
ser.write('1')
if state == 'fully-charged':
ser.write('0')
try:
f = open('/.../Arduino_command')
fl = f.readline()
if len(fl) > 0:
print 'in', fl
ser.write(fl)
f = open('/.../Arduino_command','w')
f.write('')
f.close()
except Exception:
break
except Exception:
break
time.sleep(1)
|
from argparse import ArgumentParser
from struct import unpack, pack
import sys
import hashlib
from Crypto.Cipher import AES
#aes_key = b'2B63B478DC23D5692B63B478DC23D569'
#aes_key = b'2B63B478DC23D5692B63B478DC23D569'
aes_key = b'ylsuxfhy}w{mh{|k5nn\x86\x87}nmhmxujt|}'
#v9[0] = 0x5A5A3257;
#v9[1] = 0x66975412;
#v9[2] = 0x66975412;
#v9[3] = 0x5A5A3257;
# iv = pack('<I', 0x5A5A3257) + pack('<I', 0x66975412) + \
# pack('<I', 0x66975412) + pack('<I', 0x5A5A3257)
iv = b'1A52A367CB12C458'
# b'\x57\x32\x5a\x5a\x12\x54\x97\x66\x12\x54\x97\x66\x57\x32\x5a\x5a'
def main():
parser = ArgumentParser(
description='Read security configuration from MediaTek seccfg partition')
parser.add_argument('file')
args = parser.parse_args()
file = open(args.file, 'rb')
if unpack('<I', file.read(4))[0] != 0x4d4d4d4d:
print('Invalid input file')
sys.exit(1)
version = unpack('<I', file.read(4))[0]
unk0 = unpack('<I', file.read(4))[0]
lock_state = unpack('<I', file.read(4))[0]
unk1 = unpack('<I', file.read(4))[0]
unk2 = unpack('<I', file.read(4))[0]
if unpack('<I', file.read(4))[0] != 0x45454545:
print('Invalid input file')
sys.exit(1)
sha256_encrypted = file.read(32)
file.close()
print('SECCFGv%d device lock state is %d ' % (version, lock_state), end='')
if lock_state == 1:
print('(locked)')
elif lock_state == 3:
print('(unlocked)')
else:
print('(unknown)')
print('Encrypted SHA256 sum is:', sha256_encrypted)
m = hashlib.sha256()
m.update(pack('<I', 0x4d4d4d4d))
m.update(pack('<I', version))
m.update(pack('<I', unk0))
m.update(pack('<I', lock_state))
m.update(pack('<I', unk1))
m.update(pack('<I', unk2))
m.update(pack('<I', 0x45454545))
computed_sha256 = m.digest()
print('computed SHA: ', computed_sha256)
cipher = AES.new(aes_key, AES.MODE_CBC, iv)
sha256_computed_encrypted = cipher.encrypt(computed_sha256)
print('Encrypted SHA256 sum is:', sha256_computed_encrypted)
if __name__ == '__main__':
main()
|
# coding: utf-8
# In[41]:
dummy_list=[99, 1, 45, 1, 10, 15, 4]
# Number 2
# In[42]:
print(dummy_list)
# In[43]:
dummy_list.reverse()
# In[44]:
print(dummy_list)
# In[45]:
dummy_list_2 = [2, 200, 16, 4, 1, 0, 9.45, 45.67, 90, 12.01, 12.02]
# In[46]:
i = 0
while i < len(dummy_list_2):
dummy_list.append(dummy_list_2[i])
i += 1
# In[47]:
print(dummy_list)
# In[48]:
dummy_list.count(dummy_list[1])
# In[49]:
#creating dictionary
dummy_dict={}
i=0
while i<len(dummy_list):
dummy_dict[dummy_list[i]]=dummy_list.count(dummy_list[i])
i += 1
# In[50]:
print(dummy_dict)
# In[52]:
dummy_list.sort()
print(dummy_list)
# In[54]:
dummy_list.sort(reverse= True)
print(dummy_list)
# In[60]:
x=200
dummy_list.remove(x)
print(dummy_list)
# In[61]:
x=20
dummy_list.remove(x)
print(dummy_list)
# In[62]:
x=5
del dummy_list[x]
print(dummy_list)
# In[63]:
x=50
del dummy_list[x]
print(dummy_list)
# In[65]:
del dummy_list[:]
print (dummy_list)
|
import pytest
@pytest.mark.asyncio
@pytest.mark.buvar_plugins("buvar.plugins.bg")
async def test_bg_error(log_output, Anything):
# TODO XXX FIXME without buvar_stage, I get
# --- Logging error ---
# Traceback (most recent call last):
# File "/home/olli/.pyenv/versions/3.7.4/lib/python3.7/logging/__init__.py", line 1028, in emit
# stream.write(msg + self.terminator)
# ValueError: I/O operation on closed file.
from buvar.plugins import bg
from buvar import context
async def make_error():
raise Exception("foobar")
jobs = context.get(bg.Jobs)
jobs.add(make_error())
await jobs
assert {
"event": "Background job failed",
"exc_info": Anything,
"log_level": "error",
} in log_output.entries
@pytest.mark.asyncio
@pytest.mark.buvar_plugins("buvar.plugins.bg")
async def test_bg_semaphore():
import asyncio
from buvar.plugins import bg
from buvar import context
state = {"counter": 0, "sync": []}
k = 3
sem = asyncio.Semaphore(k)
async def count():
state["counter"] += 1
await asyncio.sleep(0)
state["sync"].append(state["counter"] % k)
jobs = context.get(bg.Jobs)
i = k * 10
for _ in range(i):
jobs.add(count(), sync=sem)
await jobs
assert state == {"counter": i, "sync": [0] * i}
|
"""
You are given a 0-indexed array nums consisting of positive integers, representing targets on a number line. You are also given an integer space.
You have a machine which can destroy targets. Seeding the machine with some nums[i] allows it to destroy all targets with values that can be represented as nums[i] + c * space, where c is any non-negative integer. You want to destroy the maximum number of targets in nums.
Return the minimum value of nums[i] you can seed the machine with to destroy the maximum number of targets.
Example 1:
Input: nums = [3,7,8,1,1,5], space = 2
Output: 1
Explanation: If we seed the machine with nums[3], then we destroy all targets equal to 1,3,5,7,9,...
In this case, we would destroy 5 total targets (all except for nums[2]).
It is impossible to destroy more than 5 targets, so we return nums[3].
Example 2:
Input: nums = [1,3,5,2,4,6], space = 2
Output: 1
Explanation: Seeding the machine with nums[0], or nums[3] destroys 3 targets.
It is not possible to destroy more than 3 targets.
Since nums[0] is the minimal integer that can destroy 3 targets, we return 1.
Example 3:
Input: nums = [6,2,5], space = 100
Output: 2
Explanation: Whatever initial seed we select, we can only destroy 1 target. The minimal seed is nums[1].
Constraints:
1 <= nums.length <= 10^5
1 <= nums[i] <= 10^9
1 <= space <= 10^9
hints:
1 Keep track of nums[i] modulo k.
2 Iterate over nums in sorted order.
analysis:
TC: O(N)
SC: O(N)
"""
import collections
from typing import List
class DestroySequentialTargets:
def destroyTargets(self, nums: List[int], space: int) -> int:
cnt = collections.Counter(n % space for n in nums)
max_freq = max(cnt.values())
return min(n for n in nums if cnt[n % space] == max_freq) |
import numpy as np
import pandas as pd
import random
from sklearn.metrics import confusion_matrix
import sys
import time
# np.random.seed(21)
# random.seed(21)
# Follows algo from https://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/icdm08b.pdf
class IsolationTreeEnsemble:
def __init__(self, sample_size, n_trees=10):
self.sample_size = sample_size
self.n_trees = n_trees
self.height_limit = np.log2(sample_size)
self.trees = []
def fit(self, X:np.ndarray, improved=False):
"""
Given a 2D matrix of observations, create an ensemble of IsolationTree
objects and store them in a list: self.trees. Convert DataFrames to
ndarray objects.
"""
if isinstance(X, pd.DataFrame):
X = X.values
len_x = len(X)
col_x = X.shape[1]
self.trees = []
if improved:
for i in range(self.n_trees):
sample_idx = random.sample(list(range(len_x)), self.sample_size)
temp_tree = IsolationTree(self.height_limit, 0).fit_improved(X[sample_idx, :], improved=True)
self.trees.append(temp_tree)
else:
for i in range(self.n_trees):
sample_idx = random.sample(list(range(len_x)), self.sample_size)
temp_tree = IsolationTree(self.height_limit, 0).fit(X[sample_idx, :], improved=False)
self.trees.append(temp_tree)
return self
def path_length(self, X:np.ndarray) -> np.ndarray:
"""
Given a 2D matrix of observations, X, compute the average path length
for each observation in X. Compute the path length for x_i using every
tree in self.trees then compute the average for each x_i. Return an
ndarray of shape (len(X),1).
"""
pl_vector = []
if isinstance(X, pd.DataFrame):
X = X.values
for x in (X):
pl = np.array([path_length_tree(x, t, 0) for t in self.trees])
pl = pl.mean()
pl_vector.append(pl)
pl_vector = np.array(pl_vector).reshape(-1, 1)
return pl_vector
def anomaly_score(self, X:np.ndarray) -> np.ndarray:
"""
Given a 2D matrix of observations, X, compute the anomaly score
for each x_i observation, returning an ndarray of them.
"""
return 2.0 ** (-1.0 * self.path_length(X) / c(len(X)))
def predict_from_anomaly_scores(self, scores:np.ndarray, threshold:float) -> np.ndarray:
"""
Given an array of scores and a score threshold, return an array of
the predictions: 1 for any score >= the threshold and 0 otherwise.
"""
predictions = [1 if p[0] >= threshold else 0 for p in scores]
return predictions
def predict(self, X:np.ndarray, threshold:float) -> np.ndarray:
"A shorthand for calling anomaly_score() and predict_from_anomaly_scores()."
scores = 2.0 ** (-1.0 * self.path_length(X) / c(len(X)))
predictions = [1 if p[0] >= threshold else 0 for p in scores]
return predictions
class IsolationTree:
def __init__(self, height_limit, current_height):
self.height_limit = height_limit
self.current_height = current_height
self.split_by = None
self.split_value = None
self.right = None
self.left = None
self.size = 0
self.exnodes = 0
self.n_nodes = 1
def fit_improved(self, X: np.ndarray, improved=False):
"""
Add Extra while loop
"""
if len(X) <= 1 or self.current_height >= self.height_limit:
self.exnodes = 1
self.size = len(X)
return self
split_by = random.choice(np.arange(X.shape[1]))
min_x = X[:, split_by].min()
max_x = X[:, split_by].max()
if min_x == max_x:
self.exnodes = 1
self.size = len(X)
return self
condition = True
while condition:
split_value = min_x + random.betavariate(0.5,0.5)*(max_x-min_x)
a = X[X[:, split_by] < split_value]
b = X[X[:, split_by] >= split_value]
if len(X) < 10 or a.shape[0] < 0.25 * b.shape[0] or b.shape[0] < 0.25 * a.shape[0] or (
a.shape[0] > 0 and b.shape[0] > 0):
condition = False
self.size = len(X)
self.split_by = split_by
self.split_value = split_value
self.left = IsolationTree(self.height_limit, self.current_height + 1).fit_improved(a, improved=False)
self.right = IsolationTree(self.height_limit, self.current_height + 1).fit_improved(b, improved=False)
self.n_nodes = self.left.n_nodes + self.right.n_nodes + 1
return self
def fit(self, X:np.ndarray, improved=False):
"""
Given a 2D matrix of observations, create an isolation tree. Set field
self.root to the root of that tree and return it.
If you are working on an improved algorithm, check parameter "improved"
and switch to your new functionality else fall back on your original code.
"""
if len(X) <= 1 or self.current_height >= self.height_limit:
self.exnodes = 1
self.size = X.shape[0]
return self
split_by = random.choice(np.arange(X.shape[1]))
X_col = X[:, split_by]
min_x = X_col.min()
max_x = X_col.max()
if min_x == max_x:
self.exnodes = 1
self.size = len(X)
return self
else:
split_value = min_x + random.betavariate(0.5, 0.5) * (max_x - min_x)
w = np.where(X_col < split_value, True, False)
del X_col
self.size = X.shape[0]
self.split_by = split_by
self.split_value = split_value
self.left = IsolationTree(self.height_limit, self.current_height + 1).fit(X[w], improved=True)
self.right = IsolationTree(self.height_limit, self.current_height + 1).fit(X[~w], improved=True)
self.n_nodes = self.left.n_nodes + self.right.n_nodes + 1
return self
def find_TPR_threshold(y, scores, desired_TPR):
"""
Start at score threshold 1.0 and work down until we hit desired TPR.
Step by 0.01 score increments. For each threshold, compute the TPR
and FPR to see if we've reached to the desired TPR. If so, return the
score threshold and FPR.
"""
threshold = 1
while threshold > 0:
y_pred = [1 if p[0] >= threshold else 0 for p in scores]
tn, fp, fn, tp = confusion_matrix(y, y_pred).ravel()
TPR = tp / (tp + fn)
FPR = fp / (fp + tn)
if TPR >= desired_TPR:
return threshold, FPR
threshold = threshold - 0.001
return threshold, FPR
def c(n):
if n > 2:
return 2.0*(np.log(n-1)+0.5772156649) - (2.0*(n-1.)/(n*1.0))
elif n == 2:
return 1
if n == 1:
return 0
def path_length_tree(x, t,e):
e = e
if t.exnodes == 1:
e = e+ c(t.size)
return e
else:
a = t.split_by
if x[a] < t.split_value :
return path_length_tree(x, t.left, e+1)
if x[a] >= t.split_value :
return path_length_tree(x, t.right, e+1)
|
import keras
import os
from keras.callbacks import ModelCheckpoint, TensorBoard
from load_hand_data import load_data, shuffle_data, preprocess_label, preprocess_feature
from keras.applications.mobilenet import MobileNet
from keras.applications.vgg16 import VGG16
from keras.applications.inception_v3 import InceptionV3
from keras import models
from keras import layers
model_name = "cv2_batch_8_epoch_10000_data_50_adam_random_inception"
# loading datasets
train_X, train_Y = load_data(['Train1'], ['female'], read_labels=True)
train_X = preprocess_feature(train_X)
train_Y = preprocess_label(train_Y)
print("train x {} train y {}".format(train_X.shape, train_Y.shape))
# building model
conv_base = InceptionV3(weights=None,
input_shape=(224, 224, 3),
include_top=False)
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='selu'))
model.add(layers.Dense(128, activation='selu'))
model.add(layers.Dense(40))
conv_base.trainable = True
model.summary()
os.makedirs("/media/dawars/hdd/dawars/Hand/temalab_ckpt/" + model_name, exist_ok=True)
checkpointer = ModelCheckpoint(
"/media/dawars/hdd/dawars/Hand/temalab_ckpt/" + model_name + "/weights.{epoch:02d}-{val_loss:.2f}-{val_acc:.2f}.hdf5",
monitor='val_loss', verbose=0,
save_best_only=False, save_weights_only=True, mode='auto', period=3)
tensorboard = TensorBoard(log_dir='./logs/' + model_name)
model.compile(loss='mean_squared_error',
optimizer=keras.optimizers.Adam(),
metrics=['acc'])
# serialize model to JSON
model_json = model.to_json()
with open(model_name + ".json", "w") as json_file:
json_file.write(model_json)
# model.fit_generator(generate_batches(train_X, train_Y), steps_per_epoch=num_samples // 64)
model.fit(train_X, train_Y, batch_size=8, validation_split=.3, epochs=10000, shuffle=True,
callbacks=[checkpointer, tensorboard])
# serialize weights to HDF5
model.save_weights(model_name + ".h5")
print("Saved model to disk, finished training")
import atexit
def exit_handler():
print('Exiting app!')
model.save_weights(model_name + "_exit.h5")
print("Saved model to disk")
atexit.register(exit_handler)
|
"""server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
#from card.views import index, send
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="Quiz API",
default_version='v1',
description=''' Documentation
The `ReDoc` view can be found [here](/doc).
''',
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="zdimon77@gmail.com"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
from quiz.web.views import index
urlpatterns = [
path('v1/',include([
path('quiz/',include('quiz.api.urls')),
])),
path('admin/', admin.site.urls),
path('web', index),
path('', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
]
from django.conf import settings
from django.conf.urls.static import static
urlpatterns += [
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
import re
specialSyllables_en = """
tottered 2
chummed 1
peeped 1
moustaches 2
shamefully 3
messieurs 2
satiated 4
sailmaker 4
sheered 1
disinterred 3
propitiatory 6
bepatched 2
particularized 5
caressed 2
trespassed 2
sepulchre 3
flapped 1
hemispheres 3
pencilled 2
motioned 2
poleman 2
slandered 2
sombre 2
etc 4
sidespring 2
mimes 1
effaces 2
mr 2
mrs 2
ms 1
dr 2
st 1
sr 2
jr 2
truckle 2
foamed 1
fringed 2
clattered 2
capered 2
mangroves 2
suavely 2
reclined 2
brutes 1
effaced 2
quivered 2
h'm 1
veriest 3
sententiously 4
deafened 2
manoeuvred 3
unstained 2
gaped 1
stammered 2
shivered 2
discoloured 3
gravesend 2
60 2
lb 1
unexpressed 3
greyish 2
unostentatious 5
"""
fallback_cache = {}
# @AddSyl and @SubSyl list regexps to massage the basic count.
# Each match from @AddSyl adds 1 to the basic count, each @SubSyl match -1
# Keep in mind that when the regexps are checked, any final 'e' will have
# been removed, and all '\'' will have been removed.
fallback_subsyl = ["cial", "tia", "cius", "cious", "gui", "ion", "iou",
"sia$", ".ely$"]
fallback_addsyl = ["ia", "riet", "dien", "iu", "io", "ii",
"[aeiouy]bl$", "mbl$",
"[aeiou]{3}",
"^mc", "ism$",
"(.)(?!\\1)([aeiouy])\\2l$",
"[^l]llien",
"^coad.", "^coag.", "^coal.", "^coax.",
"(.)(?!\\1)[gq]ua(.)(?!\\2)[aeiou]",
"dnt$"]
# Compile our regular expressions
for i in range(len(fallback_subsyl)):
fallback_subsyl[i] = re.compile(fallback_subsyl[i])
for i in range(len(fallback_addsyl)):
fallback_addsyl[i] = re.compile(fallback_addsyl[i])
def _normalize_word(word):
return word.strip().lower()
# Read our syllable override file and stash that info in the cache
for line in specialSyllables_en.splitlines():
line = line.strip()
if line:
toks = line.split()
assert len(toks) == 2
fallback_cache[_normalize_word(toks[0])] = int(toks[1])
def count(word):
word = _normalize_word(word)
if not word:
return 0
# Check for a cached syllable count
count = fallback_cache.get(word, -1)
if count > 0:
return count
# Remove final silent 'e'
if word[-1] == "e":
word = word[:-1]
# Count vowel groups
count = 0
prev_was_vowel = 0
for c in word:
is_vowel = c in ("a", "e", "i", "o", "u", "y")
if is_vowel and not prev_was_vowel:
count += 1
prev_was_vowel = is_vowel
# Add & subtract syllables
for r in fallback_addsyl:
if r.search(word):
count += 1
for r in fallback_subsyl:
if r.search(word):
count -= 1
# Cache the syllable count
fallback_cache[word] = count
return count
|
class Solution:
def search(self, nums: List[int], target: int) -> int:
if(len(nums) == 0):
return -1
if(len(nums) == 1):
if (nums[0] == target):
return 0
return -1
copy = nums
run = True
start = 0
cap = len(nums)-1
count = 0
middle = math.ceil((cap + start)/2)
prevMiddle = None
while(run):
middle = math.ceil((cap + start)/2)
if (nums[middle] > nums[cap]):
start = middle
elif(nums[start] > nums[middle-1]):
cap = middle
count = count + 1
if (nums[middle-1] > nums[middle]):
run = False
if (prevMiddle == middle):
run = False
print(middle)
prevMiddle = middle
mid = middle
start = 0
end = mid-1
if (nums[mid] <= target <= nums[len(nums)-1]):
start = mid
end = len(nums)-1
while start <= end:
mid = int(start + (end - start)/2);
if nums[mid] == target:
return mid
elif nums[mid] < target:
start = mid + 1
else:
end = mid - 1
return -1
x = Solution()
print(x.search([1,2], 0)) |
from recmd.algorithm.CB import cb_recommend_by_items
from recmd.constants import user_activity
from recmd.database import get_id
def filter_user(item_ls, user_id):
if user_id is None or user_id < 0:
return item_ls
item_dict = user_activity[user_id]
return [obj for obj in item_ls if get_id(obj) not in item_dict]
def rcmd_by_item(item_id, filter_user_id=None, _max=20):
_result = cb_recommend_by_items([item_id], _max)
return filter_user(_result, filter_user_id)
|
# Programa 3.3: programa3_03.py
# Convertir un entero a una cadena en base 2-16
cadenaConversion = "0123456789ABCDEF"
def aCadena(n,base):
if n < base: return cadenaConversion[n]
else:
return aCadena(n / base,base) + cadenaConversion[n%base]
# Asignatura de Estructuras de Datos
# Dr. Ing. Mauricio Orozco Alzate
# Departamento de Informatica y Computacion
# Universidad Nacional de Colombia Sede Manizales |
class Message:
INFO=6
NOTICE=5
WARN=4
ERROR=3
CRITICAL=2
CHECKERROR=1
LEVELS=["","CHECKERROR","CRITICAL","ERROR","WARN","NOTICE","INFO"]
def __init__(self, module, level, text):
self.module=module
self.level=level
self.text=text
def __iter__(self):
yield 'module', self.module
yield 'level', self.level
yield 'text', self.text
def __repr__(self):
return f'Message({self.module!r}, {Message.LEVELS[self.level]}, {self.text!r})' |
# coding:utf-8
# Author : microease
# Date : 2019/4/21
import urllib.request, urllib.parse, urllib.error
import xml.etree.ElementTree as ET
import ssl
import requests
from bs4 import BeautifulSoup
response = requests.get("http://py4e-data.dr-chuck.net/comments_189317.xml")
soup = BeautifulSoup(response.text, 'xml')
num = []
for count in soup.find_all('count'):
num.append(count.text.strip())
sum = 0
for i in num:
sum+=int(i)
print(sum)
|
#不带壳的Tree
#这里介绍一个很重要的思想:遍历
#tree的很多method都会用到遍历,包括深度优先搜索和广度优先搜索
#我已经get了tree的遍历(用到recursion)的题的诀窍了
#直接就对tree的左支和右支call当前function,把他当做已经得到了你要的结果了,再想之后要怎么办,再凑我们最终要的答案
#只做当前一层要做的事!
#还有!在class里面的function必须判断左支右支为不为空!
class BTNode():
def __init__(self, data, left = None, right = None):
self.data = data
self.left = left
self.right = right
def is_leaf(self):
return self.left == self.right == None
def traversal(self):#遍历
'''
遍历就是把tree里所有元素都visit一遍,visit的同时do something with the data.
这个function就是把tree里的所有元素都print出来。
遍历的格式基本就是这样,你想要这个遍历干什么基本就取决于你的base case是什么。
这三种order都属于深度优先搜索(DFS)
pre-order: middle -> left -> right
post-order: left -> right -> middle
in-order: left -> middle -> right
'''
#这个其实是in-order
#如果要其他order就把下面这3行code换一下顺序就好
#如果是在class内部写的就要判断为不为空,如果再class外面写的就不用
if self.left:#左
self.left.traversal()#要相信call了这个function他就会自己完成给你所有排列好的元素的
#也是base case
print(self.data)#中
if self.right:
self.right.traversal()#右
def find(self, data):
'''Return True iff the tree contains the given data.
这个function的思想和遍历也是一样的,只是在visit每个元素的
同时判断一下当前元素是不是我们要找的元素。
接下来几个function的格式都和这个很像,就是要注意判断两个东西:
1.左支右支存不存在 2.如果不存在怎么办(base case)'''
if self.data == data:
return True
if self.left:
left_result = self.left.find(data)#注意,在class里面是self.call这个function,class外面就只function(parameter)
else:
left_result = False #base case
#这里不能直接return,因为要左边右边最后要合起来一起看,不能based on一边的结果就return
#所以这里要把左支和右支的结果都存起来
if self.right:
right_result = self.right.find(data)
else:
right_result = False#base case
return left_result or right_result
def node_num(self):
'''Return the number of nodes/data in the tree.
这个function也是沿用了遍历的思想。
左支的总元素数加上右支的总元素数加上1(self.head)'''
if self.left:
left_result = self.left.node_num()
else:#如果左支不存在
left_result = 0 #遍历时要这个function干嘛就取决于base case
if self.right:
right_result = self.right.node_num()
else:#如果右支不存在
right_result = 0 #base case
return 1 + left_result + right_result
def find_max(self):
'''Return the maximum value in the tree.
用recursion得到左支最大,右支最大,来和self.head比较'''
if self.left:
left_max = self.left.find_max()
else:
left_max = self.data#这里的base case就是self.data
if self.right:
right_max = self.right.find_max()
else:
right_max = self.data
return max(self.data,left_max,right_max)
def height(self):
'''Return the height of the tree (maximum depth).
这种code就别想着track了,一层上的逻辑对了就行。'''
if self.left:
left_height = self.left.height()
else:
left_height = 0
if self.right:
right_height = self.right.height()
else:
right_height = 0
return 1 + max(left_height, right_height)#左支右支的最高depth加上本身的1
#壳
class BinaryTree():
def __init__(self, root=None):
self.root = root #Tree的壳也只有一个root的attribute
def traversal_pre(self):
'''
(BinaryTree)->list
pre-order: middle -> left -> right
'''
#这个function画遍图就懂了
todo = [self.root]
while todo != []:
curr = todo.pop() #这个pop是list的method
if curr != None:
print(curr.data)
todo.append(curr.right) #先append进curr.right,就后pop出
todo.append(curr.left) #后append进curr.left,就先pop出左支的
#todo里又有东西了,就又循环起来,按照中左右的顺序print完所有东西
def traversal_post(self):
'''Postorder: left -> right -> middle'''
todo = [self.root]
nodes = []
while todo != []:
curr = todo.pop()
if curr != None:
nodes.append(curr)#先按照反的顺序把元素都加进nodes里,最后再倒转回来
todo.append(curr.left)
todo.append(curr.right)
while nodes !=[]:#最后再把nodes里存的元素从后面print出来
node = nodes.pop()
print(node.data)
def DFS(self):
'''难:深度优先搜索,三种order其实都是DFS
读一个数放进list,pop并print,同时读他的右支左支并放进list,pop左支出来并print,
然后再把他的右支左支加到list里,再pop掉右支并print,再把他的在下级的右支左支加进去。
【读上层元素就把当前元素的右支左支加进list,pop并print左支,再把他的再下级加进list,
再pop并print list的最后一位,也就是左支,直到没有更下一级的元素,这样list里的左支元素就
都被pop和print掉的,剩下的就是按照顺序添加进去的右边的元素】'''
todo = [self.root] #as Stack
while todo != []:
curr = todo.pop()
if curr != None:
print(curr.data, end = ' ')
todo.append(curr.right)
todo.append(curr.left)
def BFS(self):
'''简单:广度优先搜索
读一个数,放进list,pop的同时print,同时读取他的左支,右支
放到list最后,然后从list的前面开始pop并print,并读取他的左支右支放到list最后面
【读上一层的元素时就同时把他的下一层按照顺序放到了list最后,然后按照从前往后的顺序读list的元素就好】'''
todo = [self.root] #as Queue
while todo != []:
curr = todo.pop(0)
if curr != None:
print(curr.data, end = ' ')
todo.append(curr.left)#从前往后pop,所以按照正序append
todo.append(curr.right)
a = BTNode(1,BTNode(2,BTNode(4), BTNode(5)),BTNode(3, BTNode(6)))
t = BinaryTree(a) |
class Node:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def set_left(self, value):
self.left = Node(value)
def set_right(self, value):
self.right = Node(value)
def is_leaf(self):
if self.left == None and self.right == None:
return True
return False
class BinaryTree:
def __init__(self):
self.root = None
def get_root(self):
if self.root == None:
print("Root not set yet.")
else:
print(self.root.value)
return self.root
def set_root(self, value):
self.root = Node(value)
def is_empty(self):
if self.root == None:
print("Tree is empty.")
else:
print("Tree is not empty.")
def count_nodes(self, node):
if node == None:
return 0
if node.is_leaf():
return 1
return 1 + self.count_nodes(node.left) + self.count_nodes(node.right)
def no_of_nodes(self):
if self.root == None:
return 0
return self.count_nodes(self.root)
def count_height(self, node):
if node == None:
return 0
if node.is_leaf():
return 1
return 1 + max(self.count_height(node.left), self.count_height(node.right))
def height_of_tree(self):
if self.root == None:
return 0
return self.count_height(self.root)
bt = BinaryTree()
bt.get_root()
bt.set_root(2)
bt.get_root()
bt.is_empty()
root = bt.get_root()
root.set_left(3)
root.set_right(4)
print(bt.no_of_nodes())
root_left = root.left
root.left.set_left(5)
print(bt.height_of_tree())
|
from django.conf import settings
MEDIA_SERVER_HOST = getattr(settings, "MEDIA_SERVER_HOST", "")
MEDIA_SERVER_USER = getattr(settings, "MEDIA_SERVER_USER", "")
MEDIA_SERVER_PASSWORD = getattr(settings, "MEDIA_SERVER_PASSWORD", "")
MEDIA_SERVER_PORT = getattr(settings, "MEDIA_SERVER_PORT", 22)
MEDIA_SERVER_VIDEO_BUCKET = getattr(settings, "MEDIA_SERVER_VIDEO_BUCKET", "")
MEDIA_SERVER_AUDIO_BUCKET = getattr(settings, "MEDIA_SERVER_AUDIO_BUCKET", "")
MEDIA_SERVER_AUDIO_PATH = getattr(settings, "MEDIA_SERVER_AUDIO_PATH", "")
MEDIA_SERVER_VIDEO_PATH = getattr(settings, "MEDIA_SERVER_VIDEO_PATH", "")
MULTIMEDIA_NOTIFICATION_EMAIL = getattr(settings, "MULTIMEDIA_NOTIFICATION_EMAIL", "")
DEFAULT_VIDEO_PROFILES = {
'f4v': {
'encode_cmd': 'ffmpeg -y -i "%(input)s" -f mp4 -acodec libfaac -ab 128k -vcodec libx264 -vpre slow -b 690k -ac 1 -s 620x350 -r 30 "%(output)s"',
'encode':True,
'name':'Flash Video',
'container':'f4v',
'thumbnail_cmd': 'ffmpeg -y -itsoffset -%(offset)s -i "%(input)s" -vcodec mjpeg -vframes 1 -an -f rawvideo -s 620x350 "%(output)s"'
},
}
MULTIMEDIA_VIDEO_PROFILES = getattr(settings, "MULTIMEDIA_VIDEO_PROFILES", DEFAULT_VIDEO_PROFILES)
DEFAULT_AUDIO_PROFILES = {
'audio': {
'encode_cmd': 'ffmpeg -y -i "%(input)s" "%(output)s"',
'encode':True,
'name':'MP3 Audio',
'container':'mp3',
},
}
MULTIMEDIA_AUDIO_PROFILES = getattr(settings, "MULTIMEDIA_AUDIO_PROFILES", DEFAULT_AUDIO_PROFILES)
MULTIMEDIA_APP_LABLEL = getattr(settings, "MULTIMEDIA_APP_LABEL", "Multimedia") |
# Rewrite the program that prompts the user for a list of numbers and prints out the maximum and minimum of the numbers at the end when the user enters “done”. Write the program to store the numbers the user enters in a list and use the max() and min() functions to compute the maximum and minimum numbers after the loop completes.
list = []
while True:
inp = input('enter a number (type done to quit): ')
if inp == 'done':
break
try:
finp = float(inp)
list.append(finp)
except:
print('invalid input')
continue
print('max:', max(list), 'min:', min(list))
|
import random
def generate_random_list(number_of_items):
random_items = []
for i in range(number_of_items):
random_items.append(random.randint(0,100))
return random_items
for i in range(10):
random_list = generate_random_list(i)
print(random_list)
|
# encoding: utf-8
# dp[i] 表示以i结尾的子数组的最大和,
# 那么, if dp[i] - 1 <= 0: dp[i] = array[i]
# if dp[i-1] > 0: dp[i] = dp[i-1] + array[i]
def findMaxSubArray(array):
dp = [None] * len(array)
for idx in range(0, len(array)):
if idx == 0 or dp[idx - 1] <= 0:
dp[idx] = array[idx]
else:
dp[idx] = dp[idx - 1] + array[idx]
print max(dp)
if __name__ == '__main__':
array = [1, -2, 3, 10, -4, 7, 2, -5]
findMaxSubArray(array=array)
|
#!/usr/bin/env python
__author__ = "Dihia BOULEGANE"
__copyright__ = ""
__credits__ = ["Dihia BOULEGANE"]
__license__ = "GPL"
__version__ = "0.1"
__maintainer__ = "Dihia BOULEGANE"
__email__ = "dihia.boulegane@telecom-paristech.fr"
__status__ = "Development"
from ade.arbitrated_ensemble_abstaining_threshold import ArbitratedEnsembleAbstainingThreshold
from diversity.diversity_factory import DiversityMeasuresFactory
from skmultiflow.utils import *
from utils.functions import *
from utils.metrics import *
from ade.selection_methods import select_experts_diversity
class ArbitratedEnsembleAbstainingThresholdDiversity(ArbitratedEnsembleAbstainingThreshold):
def __init__(self, meta_models, base_models, meta_error_metric='MAPE', competence_threshold=30,
threshold_update_method='product', threshold_update_step=0.01, meta_confidence_level=False,
diversity_method='sliding_window', diversity_measure='disagree', diversity_threshold=0.7,
n_sliding=200, fading_factor=None, output_file=None):
if diversity_method == 'sliding_window':
data = {'model_list': base_models, 'window_size': n_sliding}
elif diversity_method == 'fading factor':
data = {'model_list': base_models, 'alpha': fading_factor}
elif diversity_method == 'incremental':
data = {'model_list': base_models}
else:
raise NotImplementedError
self.diversity_method = diversity_method
# Diversity selection parameters
self.diversity_measure = diversity_measure
#TODO: change threshold based on diversity_measure
self.correlation_threshold = diversity_threshold
self.n_sliding = n_sliding
self.fading_factor = fading_factor
div = DiversityMeasuresFactory()
self.diversity_evaluator = div.get_diversity_evaluator(diversity_method=diversity_method, diversity_measure=diversity_measure, args_dict=data)
super(ArbitratedEnsembleAbstainingThresholdDiversity, self).__init__(meta_models=meta_models,
base_models=base_models,
meta_error_metric=meta_error_metric,
competence_threshold=competence_threshold,
threshold_update_method=threshold_update_method,
threshold_update_step=threshold_update_step,
meta_confidence_level=meta_confidence_level,
output_file=output_file)
if self.output_file is not None:
super(ArbitratedEnsembleAbstainingThresholdDiversity, self)._init_file()
def fit(self, X, y, classes=None, weight=None):
raise NotImplementedError
def partial_fit(self, X, y, classes=None, weight=None):
""" performs a partial fit for all meta and base model with an update strategy of the threshold
Parameters
----------
X: Numpy.ndarray of shape (n_samples, n_features)
The array of samples used to fit the model.
y: Array-like
An array-like with the labels of all samples in X.
classes: Array-like, optional (default=None)
Contains all possible labels. Applicability varies depending on the algorithm.
weight: Array-like, optional (default=None)
Instance weight. If not provided, uniform weights are assumed.
Applicability varies depending on the algorithm.
"""
# Add first predictions to buffer in the first run
super(ArbitratedEnsembleAbstainingThresholdDiversity, self).partial_fit(X=X, y=y, classes=classes, weight=weight)
def predict(self, X):
""" Predicts target using the arbitrated ensemble model.
Parameters
----------
X : Numpy.ndarray of shape (n_samples, n_features)
The matrix of samples one wants to predict.
Returns
-------
list of all predicted for samples in X.
"""
r, _ = get_dimensions(X)
predictions = []
for i in range(r):
meta_models, base_models, meta_predictions, base_predictions, step_results = select_experts_diversity(self.meta_models, self.base_models, np.array([X[i]]),
self.diversity_evaluator, self.diversity_measure,
self.meta_confidence_level,
self.competence_threshold, self.correlation_threshold,
2)
if len(base_models) > 0:
""" Sub set of experts found"""
base_predictions = np.array(base_predictions).reshape(1, -1)
weights = weight_predictions(meta_predictions)
else:
# No experts found ==> consider all base-mdoels
meta_predictions = [z.predict([X[i]])[0] for z in self.meta_models]
base_predictions = np.array([m.predict([X[i]])[0] for m in self.base_models]).reshape(1, -1)
weights = weight_predictions(meta_predictions)
# All base models have been selected
step_results['selected_experts_idx'] = [1 for i in range(len(self.base_models))]
try:
final_prediction = get_aggregated_result(base_predictions, weights, method='weighted_average')
predictions.append(final_prediction)
# TODO: get all base_predictions and all meta_predictions and all selected indices
super(ArbitratedEnsembleAbstainingThresholdDiversity, self)._update_outputs(
global_prediction=final_prediction,
base_predictions=step_results['all_base_predictions'],
meta_predictions=step_results['all_meta_predictions'],
base_selected_idx=step_results['selected_experts_idx']
)
except Exception as exc:
raise exc
self.previous_predictions.enqueue(predictions)
return predictions
def predict_proba(self, X):
raise NotImplementedError
def score(self, X, y):
raise NotImplementedError
def get_class_type(self):
return 'ArbitratedEnsembleAbstaining'
def get_info(self):
info = super(ArbitratedEnsembleAbstainingThresholdDiversity, self).get_info()
# Diversity selection parameters
info += ' - diversity_measure: {}'.format(self.diversity_measure)
info += ' - n_sliding: {}'.format(self.n_sliding) if self.n_sliding is not None else ''
info += ' - fading_factor: {}'.format(self.fading_factor) if self.fading_factor is not None else ''
info += ' - correlation_threshold: {}'.format(self.correlation_threshold)
return info
def get_model_name(self):
model_name = super(ArbitratedEnsembleAbstainingThresholdDiversity, self).get_model_name()
model_name = '_'.join([model_name, 'DIVERSITY', self.diversity_method, self.diversity_measure])
return model_name
|
from utils import equals, digits
def isGood( n ):
return equals( *map( lambda x : sorted( digits( x ) ),
( n * i for i in xrange( 1, 7 ) ) ) )
def find():
for e in xrange( 1, 10 ):
for x in xrange( 10 ** e, 10 ** ( e + 1 ) / 6 + 1 ):
if( isGood( x ) ):
return x
print find()
|
from collections import OrderedDict
from datetime import datetime, timedelta, timezone
from typing import Optional
# ############### DATETIME ############### #
TIMEDELTA_INTERVALS = (
(timedelta(days=365.2425), 'year'),
(timedelta(days=30.436875), 'month'),
# (timedelta(days=7), 'week'),
(timedelta(days=1), 'day'),
(timedelta(hours=1), 'hour'),
(timedelta(minutes=1), 'minute'),
(timedelta(seconds=1), 'second'),
# (timedelta(milliseconds=1), 'millisecond'),
# (timedelta(microseconds=1), 'microsecond'),
)
def timedelta_(td: timedelta) -> str:
for unit, unit_name in TIMEDELTA_INTERVALS:
if unit <= td:
return f'''{td / unit:.1f} {unit_name}s ago'''
return 'just now'
def timedelta_multi(td: timedelta, max_levels: Optional[int] = 2):
results = OrderedDict()
for unit, unit_name in TIMEDELTA_INTERVALS:
if td // unit != 0: results[unit_name] = td // unit
td %= unit
if len(results) == 0: return 'just now'
if max_levels: max_levels = min(max_levels, len(results))
else: max_levels = len(results)
string = ''
for _ in range(1, max_levels):
unit_name, value = results.popitem(last=False)
string += f'''{value} {unit_name}{'s' if value != 1 else ''}, '''
else: string = string[:-2] + ' and '
unit_name, value = results.popitem(last=False)
return string + f'''{value} {unit_name}{'s' if value != 1 else ''}'''
def datetime_(dt: datetime, now: Optional[datetime] = None) -> str:
if now is None: now = datetime.utcnow()
if dt.tzinfo != now.tzinfo:
tz = dt.tzinfo or now.tzinfo
dt, now = dt.astimezone(tz), now.astimezone(tz)
return timedelta_(now - dt)
def strftime(dt: Optional[datetime] = None) -> str:
if dt is None: dt = datetime.now(timezone.utc)
elif dt.tzinfo is None: dt = dt.astimezone(timezone.utc)
return f'{dt:%A, %d %B %Y %H:%M:%S %Z}' # Monday, 02 January 2017 14:25:37 UTC
def strptime(string: str) -> datetime:
return datetime.strptime(string, '%A, %d %B %Y %H:%M:%S %Z')
# ############### NUMBERS ############### #
def percentage(pct: float) -> str: return f'{pct:3.1%}'
|
# 图
# 2020/08/07
# author : tanjiaxian
from abc import ABC, abstractmethod
from enum import Enum
from queue import Queue
from typing import Any, List
import numpy as np
from DataStructuresAndAlgorithms.stack import Stack
class VStatus(Enum):
# 顶点状态
UNDISCOVERD = 0
DISCOVERD = 1
VISITED = 2
class EType(Enum):
# 边在遍历树中所属的类型
UNDETERMINED = 0
TREE = 1
CROSS = 2
FORWARD = 3
BACKWARD = 4
class Vertex:
"""顶点对象"""
def __init__(self, data: Any):
self.data = data
self.inDegree = 0
self.outDegree = 0
self.status = VStatus.UNDISCOVERD
self.dTime = -1
self.fTime = -1
self.parent = -1
self.priority = np.inf
def __repr__(self):
return str(self.data)
class Edge:
"""边对象"""
def __init__(self, data: Any, weight: int):
self.data = data
self.weight = weight
self.type = EType.UNDETERMINED
def __bool__(self):
return bool(self.data is not None)
def __repr__(self):
return "<- " + str(self.data) + ": " + str(self.weight) + "->"
class PrimPU:
"""针对Prim算法的顶点优先级更新器"""
def __call__(self, g, uk: int, v: int): # g: Graph
if VStatus.UNDISCOVERD == g.status(v): # 针对uk, 每一尚未被发现的邻接顶点v
if g.priority(v) > g.weight(uk, v): # 按Prim策略做松弛
V = g.V[v]
V.priority = g.weight(uk, v) # 更新优先级
V.parent = uk # 更新父节点
class DijkstraPU:
"""针对Dijkstra算法的顶点优先级更新器"""
def __call__(self, g, uk: int, v: int):
if VStatus.UNDISCOVERD == g.status(v):
if g.priority(v) > (g.priority(uk) + g.weight(uk, v)):
V = g.V[v]
V.priority = g.priority(uk) + g.weight(uk, v)
V.parent = uk
class Graph(ABC):
"""图Graph模板类"""
def __init__(self):
self.n = 0 # 顶点总数
self.e = 0 # 边总数
self.V: List[Vertex] = []
self.E: List[List[Edge]] = []
def __reset(self):
"""所有顶点,边的辅助信息复位"""
for i in range(self.n):
v = self.V[i]
v.status = VStatus.UNDISCOVERD
v.dTime = -1
v.fTime = -1
v.parent = -1
v.priority = np.inf
for j in range(self.n):
if self.exists(i, j):
e = self.E[i][j]
e.type = EType.UNDETERMINED
@abstractmethod
def insert(self, v):
"""插入顶点, 返回编号"""
pass
@abstractmethod
def remove(self, i: Any):
"""删除顶点及其关联边, 返回该顶点信息"""
pass
@abstractmethod
def vertex(self, i: Any):
"""顶点v的数据(该顶点确实存在)"""
pass
@abstractmethod
def inDegree(self, i: Any):
"""顶点v的入度(该顶点的确存在)"""
pass
@abstractmethod
def outDegree(self, i: Any):
"""顶点v的出度(该顶点的确存在)"""
pass
@abstractmethod
def firstNbr(self, i: Any):
"""顶点v的首个邻接节点"""
pass
@abstractmethod
def nextNbr(self, i: Any, j: Any):
"""顶点v的(相对于顶点u的)下一个邻接顶点"""
pass
@abstractmethod
def status(self, i: Any):
"""顶点v的状态"""
pass
@abstractmethod
def dTime(self, i: Any):
"""顶点v的时间标签dTime"""
pass
@abstractmethod
def fTime(self, i: Any):
"""顶点v的时间标签fTime"""
pass
@abstractmethod
def parent(self, i: Any):
"""顶点v在遍历树中的父亲"""
pass
@abstractmethod
def priority(self, i: Any):
"""顶点v的在遍历树中优先级"""
pass
@abstractmethod
def exists(self, i: Any, j: Any):
"""边(v, u)是否存在"""
pass
@abstractmethod
def insert_edge(self, e, i: Any, j: Any, w):
"""在顶点v和u之间插入权重为w的边e"""
pass
@abstractmethod
def remove_edge(self, i: Any, j: Any):
"""删除顶点v和u之间的边e,并返回该边信息"""
pass
@abstractmethod
def type(self, i: Any, j: Any):
"""边(v, u)的类型"""
pass
@abstractmethod
def edge(self, i: Any, j: Any):
"""边(v, u) 的数据(该边的确存在)"""
pass
@abstractmethod
def weight(self, i: Any, j: Any):
"""边的权重"""
pass
def bfs(self, s: int):
"""广度优先搜索算法"""
assert (0 <= s) and (s < self.n)
self.__reset()
self.clock = 0
v = s
while True:
if VStatus.UNDISCOVERD == self.status(v):
self.__BFS(v)
v += 1
v %= self.n
if s == v:
break
def __BFS(self, v: int):
"""(连通域)广度优先搜索算法"""
Q = Queue() # 引入辅助队列
V = self.V[v]
V.status = VStatus.UNDISCOVERD
Q.put(v) # 初始化起点
while not Q.empty():
v = Q.get()
V = self.V[v]
self.clock += 1
V.dtime = self.clock # 取出队首顶点v
u = self.firstNbr(v)
while -1 < u:
U = self.V[u]
E = self.E[v][u]
if VStatus.UNDISCOVERD == U.status: # 若u未被发现,则
U.status = VStatus.DISCOVERD # 发现该顶点
Q.put(u)
E.type = EType.TREE
U.parent = v # 引入树边,拓展支撑树
print(U, end='\t')
else: # 若u已被发现,或者甚至已访问完毕
E.type = EType.CROSS
u = self.nextNbr(v, u)
V.status = VStatus.VISITED
def dfs(self, s: int):
"""深度优先搜索算法"""
assert (0 <= s) and (s < self.n)
self.__reset()
self.clock = 0
v = s
while True:
if VStatus.UNDISCOVERD == self.status(v): # 一旦遇到尚未发现的顶点
self.__DFS(v)
v += 1
v %= self.n
if s == v:
break
def __DFS(self, v: int):
"""(连通域)深度优先算法"""
assert (0 <= v) and (v < self.n)
V = self.V[v]
self.clock += 1
V.dTime = self.clock
V.status = VStatus.DISCOVERD # 发现当前顶点v
u = self.firstNbr(v)
while -1 < u:
U = self.V[u]
E = self.E[v][u]
if VStatus.UNDISCOVERD == U.status: # u尚未被发现,意味着支撑树可在此拓展
E.type = EType.TREE
U.parent = v
print(U, end='\t')
self.__DFS(u)
elif VStatus.DISCOVERD == U.status: # u已被发现但尚未访问完毕,应属被后代指向的祖先
E.type = EType.BACKWARD
else: # u已访问完毕(VISITED, 有向图),则视承接关系分为前向边或跨边
E.type = EType.FORWARD if V.dTime < U.dTime else EType.CROSS
u = self.nextNbr(v, u)
V.status = VStatus.VISITED
self.clock += 1
V.fTime = self.clock
def bcc(self, s: Any):
"""基于DFS的双连通分量分解算法"""
assert (0 <= s) and (s < self.n)
self.__reset()
self.clock = 0
v = s
S = Stack([], self.n)
while True:
if VStatus.UNDISCOVERD == self.status(v):
self.__BCC(v, S)
S.pop()
v += 1
v %= self.n
if s == v:
break
def __BCC(self, v: int, S: Stack):
"""(连通域)基于DFS的双连通分量分解算法"""
assert (0 <= v) and (v < self.n)
self.clock += 1
V = self.V[v]
V.dTime = self.clock
V.fTime = self.clock
V.status = VStatus.DISCOVERD
S.push(v)
u = self.firstNbr(v)
while -1 < u:
U = self.V[u]
E = self.E[v][u]
if VStatus.UNDISCOVERD == U.status:
U.parent = v
E.type = EType.TREE
print(U, end='\t')
self.__BCC(u, S)
if self.fTime(u) < self.dTime(v):
V.fTime = min(V.fTime, U.fTime)
else:
while v != S.pop():
pass
S.push(v)
elif VStatus.DISCOVERD == U.status:
E.type = EType.BACKWARD
if u != V.parent:
V.fTime = min(V.fTime, U.dTime)
else:
E.type = EType.FORWARD if V.dTime < U.dTime else EType.CROSS
u = self.nextNbr(v, u)
V.status = VStatus.VISITED
def tSort(self, s: int):
"""基于DFS的拓扑排序算法:
每一个顶点都不会通过边,指向其在此序列中的前驱顶点,这样的一个线性序列,称作原有向图的一个拓扑排序
"""
assert (0 <= s) and (s < self.n)
self.__reset()
self.clock = 0
v = s
S = Stack([], self.n) # 用栈记录排序顶点
while True:
if VStatus.UNDISCOVERD == self.status(v):
if not self.__TSORT(v, S):
while not S.empty(): # 任一连通域(亦即整图)非DAG
S.pop()
break # 则 不必继续计算,故直接返回
v += 1
v %= self.n
if s == v:
break
def __TSORT(self, v: int, S: Stack):
"""(连通域)基于DFS的拓扑排序算法"""
assert (0 <= v) and (v < self.n)
V = self.V[v]
self.clock += 1
V.dTime = self.clock
V.status = VStatus.DISCOVERD
u = self.firstNbr(v)
while -1 < u:
U = self.V[u]
E = self.E[v][u]
if VStatus.UNDISCOVERD == U.status:
U.parent = v
E.type = EType.TREE
print(U, end='\t')
if not self.__TSORT(u, S): # 从顶点u出发深入搜索
return False # 若u及其后代不能拓扑排序(则全图亦必如此),故返回并报告
elif VStatus.DISCOVERD == U.status: # 一旦发现后向边(非DAG),则不必深入,故返回报告
E.type = EType.BACKWARD
return False
else:
E.type = EType.FORWARD if V.dTime > U.dTime else EType.CROSS
u = self.nextNbr(v, u)
V.status = VStatus.VISITED
S.push(self.vertex(v))
return True # v及后代可以拓扑排序
def prim(self, v: Any):
"""最小支撑树"""
pass
def dijkstra(self, v: Any):
"""最短路径Dijkstra算法"""
pass
def pfs(self, s: int, prioUpdate):
"""优先级搜索框架"""
assert (0 <= s) and (s < self.n)
self.__reset()
v = s
while True:
if VStatus.UNDISCOVERD == self.status(v):
self.__PFS(v, prioUpdate)
v += 1
v %= self.n
if s == v:
break
def __PFS(self, s: int, prioUpdate):
"""(连通域)优先级搜素框架"""
assert (0 <= s) and (s < self.n)
S = self.V[s]
S.priority = 0
S.status = VStatus.VISITED
S.parent = -1
while True:
w = self.firstNbr(s)
while -1 < w:
prioUpdate(self, s, w) # 更新顶点w的优先级及其顶点
w = self.nextNbr(s, w)
shortest = np.inf
w = 0
while w < self.n:
if VStatus.UNDISCOVERD == self.status(w):
if shortest > self.priority(w):
shortest = self.priority(w)
s = w
w += 1
if VStatus.VISITED == self.status(s):
break
S = self.V[s]
S.status = VStatus.VISITED
E = self.E[S.parent][s]
E.type = EType.TREE
print(S, end='\t')
class GraphMatrix(Graph):
def __init__(self):
super(GraphMatrix, self).__init__()
self.n = 0
self.e = 0
self.V: List[Vertex] = [] # 顶点集
self.E: List[List[Edge]] = [] # 边集
def vertex(self, i: Any):
return self.V[i].data
def inDegree(self, i: Any):
return self.V[i].inDegree
def outDegree(self, i: Any):
return self.V[i].outDegree
def firstNbr(self, i: Any):
return self.nextNbr(i, self.n)
def nextNbr(self, i: Any, j: Any):
"""
相对于顶点j的下一个邻接顶点(改用邻接表可提高数据)
逆向线性试探
"""
while -1 < j:
j -= 1
if self.exists(i, j):
break
return j
def status(self, i: Any):
return self.V[i].status
def dTime(self, i: Any):
return self.V[i].dTime
def fTime(self, i: Any):
return self.V[i].fTime
def parent(self, i: Any):
return self.V[i].parent
def priority(self, i: Any):
return self.V[i].priority
def insert(self, v: Vertex):
"""插入顶点, 返回编号"""
for i in range(self.n):
self.E[i].append(None)
self.n += 1
self.E.append([None] * self.n)
self.V.append(v)
return self.n - 1
def remove(self, i: Any):
"""删除第i顶点及其关联边(0<=i<n)"""
for j in range(self.n): # 所有出边
if self.exists(i, j): # 逐条删除
self.E[i][j] = None
self.V[j].inDegree -= 1
self.e -= 1
self.E.pop(i) # 删除第i行
self.n -= 1
vBak = self.V.pop(i).data
for j in range(self.n):
e = self.E[j].pop(i)
if e:
e = None
self.V[j].outDegree -= 1
return vBak
def exists(self, i: Any, j: Any):
return (0 <= i) and (i < self.n) and (0 <= j) and (j < self.n) and (self.E[i][j] is not None)
def type(self, i: Any, j: Any):
return self.E[i][j].type
def edge(self, i: Any, j: Any):
return self.E[i][j].data
def weight(self, i: Any, j: Any):
return self.E[i][j].weight
def insert_edge(self, e, i: Any, j: Any, w):
"""插入权重为w的边e = (i, j)"""
if self.exists(i, j):
return
self.E[i][j] = Edge(e, w)
self.e += 1
self.V[i].outDegree += 1
self.V[j].inDegree += 1
def remove_edge(self, i: Any, j: Any):
"""删除顶点i和j之间的连边"""
if not self.exists(i, j):
return
eBak = self.edge(i, j)
self.E[i][j] = None
self.e -= 1
self.V[i].outDegree -= 1
self.V[j].inDegree -= 1
return eBak
if __name__ == '__main__':
graph = GraphMatrix()
v0 = Vertex(0)
v1 = Vertex(1)
v2 = Vertex(2)
v3 = Vertex(3)
v4 = Vertex(4)
v = [v0, v1, v2, v3, v4]
for x in v: graph.insert(x)
graph.insert_edge(7, 0, 1, 0.1)
graph.insert_edge(8, 1, 2, 0.1)
graph.insert_edge(9, 2, 3, 0.1)
graph.insert_edge(10, 3, 4, 0.1)
graph.insert_edge(11, 4, 4, 0.1)
# graph.remove(0)
# print(graph.V)
# print(graph.E)
# graph.remove_edge(3, 3)
# print(graph.V)
# print(graph.E)
# print(graph.n)
# print(graph.e)
graph.bfs(1)
print()
graph.dfs(1)
print()
graph.tSort(1)
print()
graph.bcc(1)
pu = PrimPU()
dpu = DijkstraPU()
print()
graph.pfs(1, pu)
print()
graph.pfs(1, dpu)
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
#Modelo de los productos
class Producto(models.Model):
_name = 'inventario.producto'
name = fields.Char(string="Nombre", required=True)
#duration = fields.Integer(string="Cantidad", required=True)
#calculo_stock = fields.Integer(string = "Stock", compute = "_calculo_stock")
price = fields.Monetary('Precio', 'currency_id')
currency_id = fields.Many2one('res.currency')
date_contract = fields.Date(string = "Fecha de llegada")
existencia = fields.Integer(compute='_calculo_stock')
active = fields.Boolean('Disponibilidad', default=True)
categorias_id = fields.Many2one('inventario.categoria_producto', string="Categoría")
@api.one
def _calculo_stock(self):
compras = self.env['compra.detalle'].search([('producto_id','=', self.id)])
total_compra = 0
for compra in compras:
total_compra += compra.cantidad
ventas = self.env['venta.detalle_boleta'].search([('producto_id','=', self.id)])
total_venta = 0
for venta in ventas:
total_venta += venta.cantidad
self.existencia = total_compra - total_venta
#Modelo de las categorias de productos
class CategoriaProducto(models.Model):
_name = 'inventario.categoria_producto'
name = fields.Char(string="Nombre", required=True)
producto_ids = fields.One2many(
'inventario.producto',
'categorias_id',
string = 'Productos')
total_productos = fields.Integer(string = "Total Productos", compute = "_total_productos")
@api.one
def _total_productos(self):
self.total_productos = len(self.producto_ids)
|
#-------------------------------------------------------------------------------
# Name: DBSearchLoanBook
# Version: 1.0
# Purpose:
#
# Author: Matthew
#
# Created: 05/31/2014
# Copyright: (c) Matthew 2014
# Licence: <your licence>
# Modified: 05/31/2014
#-------------------------------------------------------------------------------
import MySQLdb
db = MySQLdb.connect("localhost","root","***","exchangedatabase")
cursor = db.cursor()
cursor.execute("SELECT VERSION()")
data = cursor.fetchone()
print "Database version : %s " % data
ContractHeaderPrinted = False
ContractFound = False
SearchParameter = raw_input("Search by: ")
SearchParameter = SearchParameter.upper()
ParameterCheck = "DESCRIBE LoanBook"
try:
cursor.execute(ParameterCheck)
TableDescription = cursor.fetchall()
for Row in TableDescription:
TargetParameter = Row[0]
if TargetParameter.upper() == SearchParameter or SearchParameter == "CONTRACT NUMBER" or SearchParameter == "DATE ENTERED" or SearchParameter == "INTEREST COMPOUND RATE" or SearchParameter == "INTEREST RATE" or SearchParameter == "END POINT" or SearchParameter == "DIVIDEND TYPE" or SearchParameter == "MINIMUM BORROWER CONSTRAINTS" or SearchParameter == "USER INTERVENTION CONSTRAINTS" or SearchParameter == "USER REQUESTS":
break;
while TargetParameter.upper() != SearchParameter and SearchParameter != "CONTRACT NUMBER" and SearchParameter != "DATE ENTERED" and SearchParameter != "INTEREST COMPOUND RATE" and SearchParameter != "INTEREST RATE" and SearchParameter != "END POINT" and SearchParameter != "DIVIDEND TYPE" and SearchParameter != "MINIMUM BORROWER CONSTRAINTS" and SearchParameter != "USER INTERVENTION CONSTRAINTS" and SearchParameter != "USER REQUESTS":
print "Cannot search by that attribute. Please enter again:"
print "Choices: " + str([Row[0] for Row in TableDescription])
SearchParameter = raw_input("Search by: ")
SearchParameter = SearchParameter.upper()
for Row in TableDescription:
TargetParameter = Row[0]
if TargetParameter.upper() == SearchParameter or SearchParameter == "CONTRACT NUMBER" or SearchParameter == "DATE ENTERED" or SearchParameter == "INTEREST COMPOUND RATE" or SearchParameter == "INTEREST RATE" or SearchParameter == "END POINT" or SearchParameter == "DIVIDEND TYPE" or SearchParameter == "MINIMUM BORROWER CONSTRAINTS" or SearchParameter == "USER INTERVENTION CONSTRAINTS" or SearchParameter == "USER REQUESTS":
break;
except:
print "ERROR: Database execution unsuccessful"
'''Standardizing Parameter Names'''
if SearchParameter == "CONTRACTNUMBER":
SearchParameter = "CONTRACT NUMBER"
print "Searching by: Contract Number"
elif SearchParameter == "INTERESTCOMPOUNDRATE":
SearchParameter = "INTEREST COMPOUND RATE"
print "Searching by: Interest Compound Rate"
elif SearchParameter == "INTERESTRATE":
SearchParameter = "INTEREST RATE"
print "Searching by: Interest Rate"
elif SearchParameter == "ENDPOINT":
SearchParameter = "END POINT"
print "Searching by: End Point"
elif SearchParameter == "DIVIDENDTYPE":
SearchParameter = "DIVIDEND TYPE"
print "Searching by: Dividend Type"
elif SearchParameter == "MINIMUMBORROWERCONSTRAINTS":
SearchParameter = "MINIMUM BORROWER CONSTRAINTS"
print "Searching by: Minimum Borrower Constraints"
elif SearchParameter == "USERINTERVENTIONCONSTRAINTS":
SearchParameter = "USER INTERVENTION CONSTRAINTS"
print "Searching by: User Intervention Constraints"
elif SearchParameter == "DATEENTERED":
SearchParameter = "DATE ENTERED"
print "Searching by: Date Entered"
else:
print "Searching by: " + SearchParameter.title()
'''Prompting For Extra Inputs'''
if SearchParameter == "INTEREST COMPOUND RATE" or SearchParameter == "DURATION":
SearchValueInterval = raw_input("Search for interval: ")
SearchValueValue = raw_input("Search for value: ")
elif SearchParameter != "DATE ENTERED":
SearchValue = raw_input("Search for value: ")
else:
DateSearchYear = raw_input("Search Year: ")
try:
DateSearchYear = int(DateSearchYear)
DateSearchYearOmit = False
except:
if DateSearchYear == "":
DateSearchYearOmit = True
else:
DateSearchYearOmit = False
DateSearchMonth = raw_input("Search Month: ")
try:
DateSearchMonth = int(DateSearchMonth)
DateSearchMonthOmit = False
except:
if DateSearchMonth == "":
DateSearchMonthOmit = True
else:
DateSearchMonthOmit = False
DateSearchDay = raw_input("Search Day: ")
try:
DateSearchDay = int(DateSearchDay)
DateSearchDayOmit = False
except:
if DateSearchDay == "":
DateSearchDayOmit = True
else:
DateSearchDayOmit = False
DateSearchHour = raw_input("Search Hour: ")
try:
DateSearchHour = int(DateSearchHour)
DateSearchHourOmit = False
except:
if DateSearchHour == "":
DateSearchHourOmit = True
else:
DateSearchHourOmit = False
DateSearchMinute = raw_input("Search Minute: ")
try:
DateSearchMinute = int(DateSearchMinute)
DateSearchMinuteOmit = False
except:
if DateSearchMinute == "":
DateSearchMinuteOmit = True
else:
DateSearchMinuteOmit = False
DateSearchSecond = raw_input("Search Second: ")
try:
DateSearchSecond = int(DateSearchSecond)
DateSearchSecondOmit = False
except:
if DateSearchSecond == "":
DateSearchSecondOmit = True
else:
DateSearchSecondOmit = False
'''Checking Parameters'''
if SearchParameter == "CONTRACT NUMBER":
SearchValue = int(SearchValue)
try:
sql = "SELECT * FROM LoanBook WHERE ContractNumber = %d" % (SearchValue)
cursor.execute(sql)
Contract = cursor.fetchall()
print ""
if Contract != ():
Contract = Contract[0]
print "Contract Number: " + str(Contract[0])
print "Username: " + Contract[1]
print "Type: Loan"
print "Action: " + Contract[4]
print "Medium: " + str(Contract[2])
print "Volume: " + str(Contract[3])
print "Date Entered: " + str(Contract[13])
ContractFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "USERNAME":
SearchValue = str(SearchValue.capitalize())
try:
sql = """SELECT * FROM LoanBook WHERE Username = "%s" """ % (SearchValue)
cursor.execute(sql)
ContractList = cursor.fetchall()
for Contract in ContractList:
if ContractHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
ContractHeaderPrinted = True
print ""
print "Contract Number: " + str(Contract[0])
print "Username: " + Contract[1]
print "Type: Loan"
print "Action: " + Contract[4]
print "Medium: " + str(Contract[2])
print "Volume: " + str(Contract[3])
print "Date Entered: " + str(Contract[13])
ContractFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "MEDIUM":
SearchValue = str(SearchValue)
try:
sql = """SELECT * FROM LoanBook WHERE Medium = "%s" """ % (SearchValue)
cursor.execute(sql)
ContractList = cursor.fetchall()
for Contract in ContractList:
if ContractHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
ContractHeaderPrinted = True
print ""
print "Contract Number: " + str(Contract[0])
print "Username: " + Contract[1]
print "Type: Loan"
print "Action: " + Contract[4]
print "Medium: " + str(Contract[2])
print "Volume: " + str(Contract[3])
print "Date Entered: " + str(Contract[13])
ContractFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "VOLUME":
SearchValue = float(SearchValue)
try:
sql = "SELECT * FROM LoanBook WHERE Volume = %f" % (SearchValue)
cursor.execute(sql)
ContractList = cursor.fetchall()
for Contract in ContractList:
if ContractHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
ContractHeaderPrinted = True
print ""
print "Contract Number: " + str(Contract[0])
print "Username: " + Contract[1]
print "Type: Loan"
print "Action: " + Contract[4]
print "Medium: " + str(Contract[2])
print "Volume: " + str(Contract[3])
print "Date Entered: " + str(Contract[13])
ContractFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "ACTION":
SearchValue = str(SearchValue.capitalize())
try:
sql = """SELECT * FROM LoanBook WHERE Action = "%s" """ % (SearchValue)
cursor.execute(sql)
ContractList = cursor.fetchall()
for Contract in ContractList:
if ContractHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
ContractHeaderPrinted = True
print ""
print "Contract Number: " + str(Contract[0])
print "Username: " + Contract[1]
print "Type: Loan"
print "Action: " + Contract[4]
print "Medium: " + str(Contract[2])
print "Volume: " + str(Contract[3])
print "Date Entered: " + str(Contract[13])
ContractFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "INTEREST COMPOUND RATE":
SearchValueInterval = str(SearchValueInterval.upper())
SearchValueValue = float(SearchValueValue)
SearchValue = str(SearchValueValue) + " " + SearchValueInterval
#print "Interest Compound Rate: " + str(SearchValue)
try:
sql = """SELECT * FROM LoanBook WHERE InterestCompoundRate = "%s" """ % (SearchValue)
cursor.execute(sql)
ContractList = cursor.fetchall()
for Contract in ContractList:
if ContractHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
ContractHeaderPrinted = True
print ""
print "Contract Number: " + str(Contract[0])
print "Username: " + Contract[1]
print "Type: MTC"
print "Action: " + Contract[4]
print "Medium: " + str(Contract[2])
print "Volume: " + str(Contract[3])
print "Date Entered: " + str(Contract[13])
ContractFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "INTEREST RATE":
SearchValue = float(SearchValue)
try:
sql = "SELECT * FROM LoanBook WHERE InterestRate = %f" % (SearchValue)
cursor.execute(sql)
ContractList = cursor.fetchall()
for Contract in ContractList:
if ContractHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
ContractHeaderPrinted = True
print ""
print "Contract Number: " + str(Contract[0])
print "Username: " + Contract[1]
print "Type: Loan"
print "Action: " + Contract[4]
print "Medium: " + str(Contract[2])
print "Volume: " + str(Contract[3])
print "Date Entered: " + str(Contract[13])
ContractFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "DURATION":
SearchValueInterval = str(SearchValueInterval.upper())
SearchValueValue = float(SearchValueValue)
SearchValue = str(SearchValueValue) + " " + SearchValueInterval
#print "Duration: " + str(SearchValue)
try:
sql = """SELECT * FROM LoanBook WHERE Duration = "%s" """ % (SearchValue)
cursor.execute(sql)
ContractList = cursor.fetchall()
for Contract in ContractList:
if ContractHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
ContractHeaderPrinted = True
print ""
print "Contract Number: " + str(Contract[0])
print "Username: " + Contract[1]
print "Type: MTC"
print "Action: " + Contract[4]
print "Medium: " + str(Contract[2])
print "Volume: " + str(Contract[3])
print "Date Entered: " + str(Contract[13])
ContractFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "DIVIDEND TYPE":
SearchValue = str(SearchValue.capitalize())
try:
sql = """SELECT * FROM LoanBook WHERE DividendType = "%s" """ % (SearchValue)
cursor.execute(sql)
ContractList = cursor.fetchall()
for Contract in ContractList:
if ContractHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
ContractHeaderPrinted = True
print ""
print "Contract Number: " + str(Contract[0])
print "Username: " + Contract[1]
print "Type: MTC"
print "Action: " + Contract[4]
print "Medium: " + str(Contract[2])
print "Volume: " + str(Contract[3])
print "Date Entered: " + str(Contract[13])
ContractFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "MINIMUM BORROWER CONSTRAINTS":
SearchValue = int(SearchValue)
sql = "SELECT * FROM LoanBook WHERE MinimumBorrowerConstraints = %d" % (SearchValue)
try:
cursor.execute(sql)
ContractList = cursor.fetchall()
for Contract in ContractList:
if ContractHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
ContractHeaderPrinted = True
print ""
print "Contract Number: " + str(Contract[0])
print "Username: " + Contract[1]
print "Type: Loan"
print "Action: " + Contract[4]
print "Medium: " + str(Contract[2])
print "Volume: " + str(Contract[3])
print "Date Entered: " + str(Contract[13])
ContractFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "USER INTERVENTION CONSTRAINTS":
SearchValue = int(SearchValue)
sql = "SELECT * FROM LoanBook WHERE UserInterventionConstraints = %d" % (SearchValue)
try:
cursor.execute(sql)
ContractList = cursor.fetchall()
for Contract in ContractList:
if ContractHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
ContractHeaderPrinted = True
print ""
print "Contract Number: " + str(Contract[0])
print "Username: " + Contract[1]
print "Type: Loan"
print "Action: " + Contract[4]
print "Medium: " + str(Contract[2])
print "Volume: " + str(Contract[3])
print "Date Entered: " + str(Contract[13])
ContractFound = True
except:
print "ERROR: Database fetch exception"
#Test SQL Checking After FormattedDate
if SearchParameter == "DATEENTERED" or SearchParameter == "DATE ENTERED":
try:
sql = "SELECT * FROM LoanBook"
cursor.execute(sql)
ContractList = cursor.fetchall()
for Contract in ContractList:
if DateSearchYearOmit == True:
YearValue = ""
for Character in str(Contract[13])[:4]:
YearValue += str(Character)
DateSearchYear = YearValue
if DateSearchMonthOmit == True:
MonthValue = ""
for Character in str(Contract[13])[5:7]:
MonthValue += str(Character)
DateSearchMonth = MonthValue
if DateSearchDayOmit == True:
DayValue = ""
for Character in str(Contract[13])[8:10]:
DayValue += str(Character)
DateSearchDay = DayValue
if DateSearchHourOmit == True:
HourValue = ""
for Character in str(Contract[13])[11:13]:
HourValue += str(Character)
DateSearchHour = HourValue
if DateSearchMinuteOmit == True:
MinuteValue = ""
for Character in str(Contract[13])[14:16]:
MinuteValue += str(Character)
DateSearchMinute = MinuteValue
if DateSearchSecondOmit == True:
SecondValue = ""
for Character in str(Contract[13])[17:19]:
SecondValue += str(Character)
DateSearchSecond = SecondValue
if DateSearchMonth < 10:
DateSearchMonth = "0" + str(DateSearchMonth)
if DateSearchDay < 10:
DateSearchDay = "0" + str(DateSearchDay)
if DateSearchHour < 10:
DateSearchHour = "0" + str(DateSearchHour)
if DateSearchMinute < 10:
DateSearchMinute = "0" + str(DateSearchMinute)
if DateSearchSecond < 10:
DateSearchSecond = "0" + str(DateSearchSecond)
if str(DateSearchYear) == str(Contract[13])[:4] and str(DateSearchMonth) == str(Contract[13])[5:7] and str(DateSearchDay) == str(Contract[13])[8:10] and str(DateSearchHour) == str(Contract[13])[11:13] and str(DateSearchMinute) == str(Contract[13])[14:16] and str(DateSearchSecond) == str(Contract[13])[17:19]:
if ContractHeaderPrinted != True:
ContractHeaderPrinted = True
print ""
print "Orders that meet search parameters:"
print ""
print "Contract Number: " + str(Contract[0])
print "Username: " + Contract[1]
print "Type: Loan"
print "Action: " + Contract[4]
print "Price: " + str(Contract[2])
print "Volume: " + str(Contract[3])
print "Date Entered: " + str(Contract[13])
ContractFound = True
except:
print "ERROR: Database fetch exception"
if ContractFound != True:
print ""
print "No orders meet search criteria"
db.close() |
from ibmcloudant import CloudantV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
import os
#getting dotenv file
load_dotenv(verbose=True)
class Db_coneection():
authenticator = IAMAuthenticator(os.getenv('IBM_CLOUDANT_API_KEY'))
service = CloudantV1(authenticator=authenticator)
def __init__(self):
self.service.set_service_url(os.getenv('IBM_CLOUDANT_URL'))
def get_service(self):
return self.service
|
#Implement a progam to convert the input string to lower case ( without using standard library)
str=input("string in uppercase letter\n")
new_string=''
for char in str:
#print(ch)
new_string+= chr(ord(char) + 32)
print("string in lowercase:",new_string) |
from django.urls import path
from webapp2.views import webapp2_view
urlpatterns = [
path('demo', webapp2_view),
]
|
''' #####################################################
''' # Auto Sell SBD in BitTrex v1
''' # by Murat Tatar
''' # January 2018
''' #####################################################
''' #####################################################
''' # --!-- WARNING! --!--
''' # YOU MUST, FIRST TRY WITH yesreal ==0
''' # WHILE SETUP OR CONFIGURATION
''' # YOU MAY LOSE SBD/STEEM
''' # !! ALL RESPONSIBILITIES YOUR OWN !!
''' ##################################################### '''
''' import needed moduls '''
from selenium import webdriver as web
from selenium.webdriver.common.keys import Keys
import win32api, win32con
from controls import *
''' #####################################################
''' # --!-- WARNING! --!--
''' # YOU MUST, FIRST TRY WITH yesreal ==0
''' # WHILE SETUP OR CONFIGURATION
''' # YOU MAY LOSE SBD/STEEM
''' # !! ALL RESPONSIBILITIES YOUR OWN !!
''' ##################################################### '''
yesreal = 0
''' ##################################################### '''
def e():
exit()
''' ## Auto Sell SBD @ BitTrex ######################### '''
bitTrexUser = 'root@yahoo.com'
bitTrexPass = 'imnotroot123'
''' # Call the chromedriver '''
driver = web.Chrome("chromedriver.exe")
''' # set chromedriver window size and position '''
''' # if you get an error about the click location, '''
''' # you should set the pixel location '''
driver.set_window_size(1360, 768)
driver.set_window_position(0,0)
''' # You have already logged in '''
logurl = 'https://bittrex.com/Account/Login'
driver.get(logurl)
driver.implicitly_wait(30)
time.sleep(1)
''' # bitTrex user is your login e-mail '''
userbox = driver.find_element_by_name('UserName')
userbox.send_keys(bitTrexUser)
time.sleep(.2)
keybox = driver.find_element_by_name('Password')
keybox.send_keys(bitTrexPass)
time.sleep(.2)
butonSL = driver.find_element_by_xpath(".//button[@class='g-recaptcha btn btn-primary login']")
time.sleep(.2)
keybox.send_keys(Keys.RETURN)
time.sleep(2)
''' # Wait for manuel loggin or recaptcha '''
time.sleep(48)
''' # Bring an address specific to SBD '''
sbdurl = 'https://bittrex.com/Market/Index?MarketName=BTC-SBD'
driver.get(sbdurl)
driver.implicitly_wait(30)
''' # Wait for driver done '''
time.sleep(7)
''' # find element created via javasctipt that there is NOT in Ctrl + U '''
''' ## like as <span data-bind="text: summary.lastUsd()">6.20</span> '''
js_code = '''
b = document.getElementsByTagName('span');
return b
'''
spns = driver.execute_script(js_code)
x=0
for element in spns:
element_text = element.text
if x==38: price_text= element_text
x= x+1
''' # may be you want, calculate %10+ and sell after that '''
sbd_price = float(price_text)
print 'SBD price: ', sbd_price
''' # scroll down at page '''
Cliq(1346,427)
time.sleep(1)
''' # unit box '''
Cliq(950,245)
time.sleep(1)
''' # select all '''
PressHoldRelease('ctrl', 'a')
time.sleep(1)
''' # clear box '''
PressHoldRelease('del')
time.sleep(1)
''' # write amount '''
''' # you can adjust the units as you want amount. '''
''' # Or you can click on "Max button" using Clicq. '''
Write('1.25')
time.sleep(1)
''' # Price box '''
Cliq(800,285)
time.sleep(1)
''' # select bid '''
Cliq(877,344)
time.sleep(1)
''' # clik to Sell button '''
Cliq(988,423)
time.sleep(1)
''' # First, Read Warning! '''
if yesreal == 1:
''' # clik to Confirm '''
Cliq(912,692)
e() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
core.py
Created on `{% now 'local', '%Y-%m-%d' %}
by {{ cookiecutter.author_name }}
{{ cookiecutter.author_email }}
"""
import logging as log
def main():
# TODO:
# run its processes
pass
if __name__ == "__main__":
log.basicConfig(level=log.DEBUG,
format='%(asctime)s %(message)s',
datefmt="%b %d %H:%M:%S %Z")
main()
|
import unittest
from datetime import datetime
from oaipmh.formatters import oai_dc_openaire
class FetchPubTypeFromVocabularyTests(unittest.TestCase):
def test_research_article_returns_article(self):
self.assertEqual(oai_dc_openaire.fetch_pubtype_from_vocabulary('research-article'),
'info:eu-repo/semantics/article')
def test_article_commentary_returns_article(self):
self.assertEqual(oai_dc_openaire.fetch_pubtype_from_vocabulary('article-commentary'),
'info:eu-repo/semantics/other')
def test_book_review_returns_review(self):
self.assertEqual(oai_dc_openaire.fetch_pubtype_from_vocabulary('book-review'),
'info:eu-repo/semantics/review')
def test_brief_report_returns_report(self):
self.assertEqual(oai_dc_openaire.fetch_pubtype_from_vocabulary('brief-report'),
'info:eu-repo/semantics/report')
def test_case_report_returns_report(self):
self.assertEqual(oai_dc_openaire.fetch_pubtype_from_vocabulary('case-report'),
'info:eu-repo/semantics/report')
def test_correction_returns_other(self):
self.assertEqual(oai_dc_openaire.fetch_pubtype_from_vocabulary('correction'),
'info:eu-repo/semantics/other')
def test_editorial_returns_other(self):
self.assertEqual(oai_dc_openaire.fetch_pubtype_from_vocabulary('editorial'),
'info:eu-repo/semantics/other')
def test_in_brief_returns_other(self):
self.assertEqual(oai_dc_openaire.fetch_pubtype_from_vocabulary('in-brief'),
'info:eu-repo/semantics/other')
def test_letter_returns_other(self):
self.assertEqual(oai_dc_openaire.fetch_pubtype_from_vocabulary('letter'),
'info:eu-repo/semantics/other')
def test_other_returns_other(self):
self.assertEqual(oai_dc_openaire.fetch_pubtype_from_vocabulary('other'),
'info:eu-repo/semantics/other')
def test_partial_retraction_returns_other(self):
self.assertEqual(oai_dc_openaire.fetch_pubtype_from_vocabulary('partial-retraction'),
'info:eu-repo/semantics/other')
def test_rapid_communication_returns_other(self):
self.assertEqual(oai_dc_openaire.fetch_pubtype_from_vocabulary('rapid-communication'),
'info:eu-repo/semantics/other')
def test_reply_returns_other(self):
self.assertEqual(oai_dc_openaire.fetch_pubtype_from_vocabulary('reply'),
'info:eu-repo/semantics/other')
def test_retraction_returns_other(self):
self.assertEqual(oai_dc_openaire.fetch_pubtype_from_vocabulary('retraction'),
'info:eu-repo/semantics/other')
def test_review_article_returns_article(self):
self.assertEqual(oai_dc_openaire.fetch_pubtype_from_vocabulary('review-article'),
'info:eu-repo/semantics/article')
def test_unknown_value_returns_other(self):
self.assertEqual(oai_dc_openaire.fetch_pubtype_from_vocabulary('foo'),
'info:eu-repo/semantics/other')
class MakeTitleTests(unittest.TestCase):
def setUp(self):
self.resource = {'title': [('en', 'foo'), ('en', 'bar')]}
def test_titles_are_multivalued(self):
title_elements = oai_dc_openaire.make_title(self.resource)
self.assertEqual(len(title_elements), 2)
def test_titles_are_ordered(self):
title_elements = oai_dc_openaire.make_title(self.resource)
self.assertEqual(title_elements[0].text, 'foo')
self.assertEqual(title_elements[1].text, 'bar')
def test_titles_have_no_attrs(self):
title_elements = oai_dc_openaire.make_title(self.resource)
for element in title_elements:
self.assertEqual(element.attrib, {})
def test_dc_namespace(self):
title_elements = oai_dc_openaire.make_title(self.resource)
for element in title_elements:
self.assertEqual(element.tag,
'{http://purl.org/dc/elements/1.1/}title')
class MakeCreatorTests(unittest.TestCase):
def setUp(self):
self.resource = {'creator': ['foo', 'bar']}
def test_creators_are_multivalued(self):
creator_elements = oai_dc_openaire.make_creator(self.resource)
self.assertEqual(len(creator_elements), 2)
def test_creators_are_ordered(self):
creator_elements = oai_dc_openaire.make_creator(self.resource)
self.assertEqual(creator_elements[0].text, 'foo')
self.assertEqual(creator_elements[1].text, 'bar')
def test_creators_have_no_attrs(self):
creator_elements = oai_dc_openaire.make_creator(self.resource)
for element in creator_elements:
self.assertEqual(element.attrib, {})
def test_dc_namespace(self):
creator_elements = oai_dc_openaire.make_creator(self.resource)
for element in creator_elements:
self.assertEqual(element.tag,
'{http://purl.org/dc/elements/1.1/}creator')
class MakeContributorTests(unittest.TestCase):
def setUp(self):
self.resource = {'contributor': ['foo', 'bar']}
def test_contributors_are_multivalued(self):
contributor_elements = oai_dc_openaire.make_contributor(self.resource)
self.assertEqual(len(contributor_elements), 2)
def test_contributors_are_ordered(self):
contributor_elements = oai_dc_openaire.make_contributor(self.resource)
self.assertEqual(contributor_elements[0].text, 'foo')
self.assertEqual(contributor_elements[1].text, 'bar')
def test_contributors_have_no_attrs(self):
contributor_elements = oai_dc_openaire.make_contributor(self.resource)
for element in contributor_elements:
self.assertEqual(element.attrib, {})
def test_dc_namespace(self):
contributor_elements = oai_dc_openaire.make_contributor(self.resource)
for element in contributor_elements:
self.assertEqual(element.tag,
'{http://purl.org/dc/elements/1.1/}contributor')
class MakeDescriptionTests(unittest.TestCase):
def setUp(self):
self.resource = {'description': [('en', 'foo'), ('en', 'bar')]}
def test_descriptions_are_multivalued(self):
description_elements = oai_dc_openaire.make_description(self.resource)
self.assertEqual(len(description_elements), 2)
def test_contributors_are_ordered(self):
description_elements = oai_dc_openaire.make_description(self.resource)
self.assertEqual(description_elements[0].text, 'foo')
self.assertEqual(description_elements[1].text, 'bar')
def test_contributors_have_no_attrs(self):
description_elements = oai_dc_openaire.make_description(self.resource)
for element in description_elements:
self.assertEqual(element.attrib, {})
def test_dc_namespace(self):
description_elements = oai_dc_openaire.make_description(self.resource)
for element in description_elements:
self.assertEqual(element.tag,
'{http://purl.org/dc/elements/1.1/}description')
class MakePublisherTests(unittest.TestCase):
def setUp(self):
self.resource = {'publisher': ['foo', 'bar']}
def test_publishers_are_multivalued(self):
publisher_elements = oai_dc_openaire.make_publisher(self.resource)
self.assertEqual(len(publisher_elements), 2)
def test_publishers_are_ordered(self):
publisher_elements = oai_dc_openaire.make_publisher(self.resource)
self.assertEqual(publisher_elements[0].text, 'foo')
self.assertEqual(publisher_elements[1].text, 'bar')
def test_publishers_have_no_attrs(self):
publisher_elements = oai_dc_openaire.make_publisher(self.resource)
for element in publisher_elements:
self.assertEqual(element.attrib, {})
def test_dc_namespace(self):
publisher_elements = oai_dc_openaire.make_publisher(self.resource)
for element in publisher_elements:
self.assertEqual(element.tag,
'{http://purl.org/dc/elements/1.1/}publisher')
class MakeDateTests(unittest.TestCase):
def setUp(self):
self.resource = {'date': [datetime(2017, 6, 29), datetime(2016, 6, 1)]}
def test_dates_are_multivalued(self):
date_elements = oai_dc_openaire.make_date(self.resource)
self.assertEqual(len(date_elements), 2)
def test_dates_are_ordered(self):
date_elements = oai_dc_openaire.make_date(self.resource)
self.assertEqual(date_elements[0].text, '2017-06-29')
self.assertEqual(date_elements[1].text, '2016-06-01')
def test_publishers_have_no_attrs(self):
date_elements = oai_dc_openaire.make_date(self.resource)
for element in date_elements:
self.assertEqual(element.attrib, {})
def test_dc_namespace(self):
date_elements = oai_dc_openaire.make_date(self.resource)
for element in date_elements:
self.assertEqual(element.tag,
'{http://purl.org/dc/elements/1.1/}date')
class MakeTypeTests(unittest.TestCase):
def setUp(self):
self.resource = {'type': ['foo', 'bar']}
def test_type_are_multivalued(self):
type_elements = oai_dc_openaire.make_type(self.resource)
self.assertEqual(len(type_elements), 2)
def test_dates_are_ordered(self):
type_elements = oai_dc_openaire.make_type(self.resource)
self.assertEqual(type_elements[0].text, 'foo')
self.assertEqual(type_elements[1].text, 'bar')
def test_publishers_have_no_attrs(self):
type_elements = oai_dc_openaire.make_type(self.resource)
for element in type_elements:
self.assertEqual(element.attrib, {})
def test_dc_namespace(self):
type_elements = oai_dc_openaire.make_type(self.resource)
for element in type_elements:
self.assertEqual(element.tag,
'{http://purl.org/dc/elements/1.1/}type')
class MakeFormatTests(unittest.TestCase):
def setUp(self):
self.resource = {'format': ['foo', 'bar']}
def test_formats_are_multivalued(self):
format_elements = oai_dc_openaire.make_format(self.resource)
self.assertEqual(len(format_elements), 2)
def test_formats_are_ordered(self):
format_elements = oai_dc_openaire.make_format(self.resource)
self.assertEqual(format_elements[0].text, 'foo')
self.assertEqual(format_elements[1].text, 'bar')
def test_formats_have_no_attrs(self):
format_elements = oai_dc_openaire.make_format(self.resource)
for element in format_elements:
self.assertEqual(element.attrib, {})
def test_dc_namespace(self):
format_elements = oai_dc_openaire.make_format(self.resource)
for element in format_elements:
self.assertEqual(element.tag,
'{http://purl.org/dc/elements/1.1/}format')
class MakeIdentifierTests(unittest.TestCase):
def setUp(self):
self.resource = {'identifier': ['foo', 'bar']}
def test_identifiers_are_multivalued(self):
identifier_elements = oai_dc_openaire.make_identifier(self.resource)
self.assertEqual(len(identifier_elements), 2)
def test_identifiers_are_ordered(self):
identifier_elements = oai_dc_openaire.make_identifier(self.resource)
self.assertEqual(identifier_elements[0].text, 'foo')
self.assertEqual(identifier_elements[1].text, 'bar')
def test_identifiers_have_no_attrs(self):
identifier_elements = oai_dc_openaire.make_identifier(self.resource)
for element in identifier_elements:
self.assertEqual(element.attrib, {})
def test_dc_namespace(self):
identifier_elements = oai_dc_openaire.make_identifier(self.resource)
for element in identifier_elements:
self.assertEqual(element.tag,
'{http://purl.org/dc/elements/1.1/}identifier')
class MakeLanguageTests(unittest.TestCase):
def setUp(self):
self.resource = {'language': ['foo', 'bar']}
def test_languages_are_multivalued(self):
language_elements = oai_dc_openaire.make_language(self.resource)
self.assertEqual(len(language_elements), 2)
def test_languages_are_ordered(self):
language_elements = oai_dc_openaire.make_language(self.resource)
self.assertEqual(language_elements[0].text, 'foo')
self.assertEqual(language_elements[1].text, 'bar')
def test_languages_have_no_attrs(self):
language_elements = oai_dc_openaire.make_language(self.resource)
for element in language_elements:
self.assertEqual(element.attrib, {})
def test_dc_namespace(self):
language_elements = oai_dc_openaire.make_language(self.resource)
for element in language_elements:
self.assertEqual(element.tag,
'{http://purl.org/dc/elements/1.1/}language')
class MakeMetadataTests(unittest.TestCase):
def setUp(self):
self.resource = {
'ridentifier': 'oai:arXiv:cs/0112017',
'datestamp': datetime(2017, 6, 14),
'setspec': ['set1', 'set2'],
'title': [('en', 'MICROBIAL COUNTS OF DARK RED...')],
'creator': ['Vieira, Francisco Cleber Sousa'],
'subject': [('en', 'bacteria'), ('pt', 'bactéria')],
'description': [('en', 'The number of colony forming units (CFU)...')],
'publisher': ['Sociedade Brasileira de Microbiologia'],
'contributor': ['Evans, R. J.'],
'date': [datetime(1998, 9, 1)],
'type': ['research-article'],
'format': ['text/html'],
'identifier': ['https://ref.scielo.org/7vy47j'],
'source': ['Revista de Microbiologia v.29 n.3 1998'],
'language': ['en'],
'relation': [],
'rights': ['http://creativecommons.org/licenses/by-nc/4.0/'],
}
def test_xml_creation(self):
metadata = oai_dc_openaire.make_metadata(self.resource)
self.assertEqual(metadata.tag, 'metadata')
class MakeSourceTests(unittest.TestCase):
def setUp(self):
self.resource = {'source': ['foo', 'bar']}
def test_sources_are_multivalued(self):
source_elements = oai_dc_openaire.make_source(self.resource)
self.assertEqual(len(source_elements), 2)
def test_sources_are_ordered(self):
source_elements = oai_dc_openaire.make_source(self.resource)
self.assertEqual(source_elements[0].text, 'foo')
self.assertEqual(source_elements[1].text, 'bar')
def test_sources_have_no_attrs(self):
source_elements = oai_dc_openaire.make_source(self.resource)
for element in source_elements:
self.assertEqual(element.attrib, {})
def test_dc_namespace(self):
source_elements = oai_dc_openaire.make_source(self.resource)
for element in source_elements:
self.assertEqual(element.tag,
'{http://purl.org/dc/elements/1.1/}source')
class MakeRightsTests(unittest.TestCase):
def setUp(self):
self.resource = {'rights': ['foo', 'bar']}
def test_rights_are_multivalued(self):
rights_elements = oai_dc_openaire.make_rights(self.resource)
self.assertEqual(len(rights_elements), 2)
def test_rights_are_ordered(self):
rights_elements = oai_dc_openaire.make_rights(self.resource)
self.assertEqual(rights_elements[0].text, 'foo')
self.assertEqual(rights_elements[1].text, 'bar')
def test_rights_have_no_attrs(self):
rights_elements = oai_dc_openaire.make_rights(self.resource)
for element in rights_elements:
self.assertEqual(element.attrib, {})
def test_dc_namespace(self):
rights_elements = oai_dc_openaire.make_rights(self.resource)
for element in rights_elements:
self.assertEqual(element.tag,
'{http://purl.org/dc/elements/1.1/}rights')
class MakeSubjectTests(unittest.TestCase):
def setUp(self):
self.resource = {'subject': [('en', 'foo'), ('en', 'bar')]}
def test_subjects_are_multivalued(self):
subjects_elements = oai_dc_openaire.make_subject(self.resource)
self.assertEqual(len(subjects_elements), 2)
def test_subjects_are_ordered(self):
subjects_elements = oai_dc_openaire.make_subject(self.resource)
self.assertEqual(subjects_elements[0].text, 'foo')
self.assertEqual(subjects_elements[1].text, 'bar')
def test_subjects_have_no_attrs(self):
subjects_elements = oai_dc_openaire.make_subject(self.resource)
for element in subjects_elements:
self.assertEqual(element.attrib, {})
def test_dc_namespace(self):
subjects_elements = oai_dc_openaire.make_subject(self.resource)
for element in subjects_elements:
self.assertEqual(element.tag,
'{http://purl.org/dc/elements/1.1/}subject')
|
from flaskblog import create_app
# Used to inject debug to templates (http://flask.pocoo.org/docs/1.0/templating/#context-processors)
# @app.context_processor
# def inject_debug():
# return dict(debug=app.debug)
app = create_app()
# this allows you to run the app without "flask run"
# you can just type: "python flaskblog.py"
if __name__ == '__main__':
app.run(debug=True)
# run in dev mode:
#export FLASK_ENV=development
|
#!/bin/python
S = raw_input().strip()
try:
i = int(S) # python will error if it can make the specified conversion
print i
except Exception as msg: # msg holds the exception description
#e = sys.exc_info()[0] # Gets the first line of the error message, may have to 'import sys'
#write_to_page( "<p>Error: %s</p>" % e )
print "Bad String"
"""
import math
#Write your code here
class Calculator:
def power(self,n,p):
if any((n < 0, p < 0)):
raise ValueError('n and p should be non-negative')
return int(math.pow(n,p))
myCalculator=Calculator()
T=int(raw_input())
for i in range(T):
n,p = map(int, raw_input().split())
try:
ans=myCalculator.power(n,p)
print ans
except Exception,e:
print e
"""
|
# coding=utf-8
import math
#G(s)=400/(s^2+50s)
class ControlledObjectOne:
__Uk_1 = 0.0
__Uk_2 = 0.0
__Yk = 0.0
__Yk_1 = 0.0
__Yk_2 = 0.0
__Uk = 0.0
#从控制器获取控制量
def InputCv(self,Uk):
self.__Uk = Uk
return
#控制对象输出的过程值
def OutputPv(self,):
#self.__Yk = 1.9512 * self.__Yk_1 - 0.9512 * self.__Yk_2 + 1.9671 * math.pow(10, -4) * self.__Uk_1 + 1.9346 * math.pow(10, -4) * self.__Uk_2
self.__Yk = 1.9512 * self.__Yk_1 - 0.9512 * self.__Yk_2 + 0.00019671 * self.__Uk_1 + 0.00019346 * self.__Uk_2
self.__Uk_2 = self.__Uk_1
self.__Uk_1 = self.__Uk
self.__Yk_2 = self.__Yk_1
self.__Yk_1 = self.__Yk
return self.__Yk
# 增量型数字PID控制算法
class PIDcontroller:
#__PV = 0.0.001
#__SV = 0.0.001
__TC = 1 #周期,ms
__Delta = 0.0
__Ti = 0.0
__Td = 0.0
__Kp = 0.0
__Ki = 0.0
__Kd = 0.0 #PID参数
__error = 0.0
__error_1 = 0.0
__error_2 = 0.0 #误差
__Uk = 0.0
__Uk_1 = 0.0 #PID当前周期输出,前一周期输出
__dUk = 0.0 #增量输出
#输入采样周期和PID三个参数
def __init__(self,TC,Delta,Ti,Td):
self.__TC = TC
self.__Delta = Delta
self.__Ti = Ti
self.__Td = Td
self.__CalPidParameters()
return
#计算PID参数
def __CalPidParameters(self,): #私有方法
self.__Kp = 1 / self.__Delta
self.__Ki = self.__Kp * self.__TC / self.__Ti
self.__Kd = self.__Kp * self.__Td / self.__TC
return
#输入设定值SV和采样值PV,控制器计算输出CV值
def Exec(self,SV,PV):
#计算偏差
self.__error = SV - PV
#PID增量计算
self.__dUk = self.__Kp * (self.__error - self.__error_1) + self.__Ki * self.__error + self.__Kd * (self.__error - 2 * self.__error_1 + self.__error_2)
#全量输出
self.__Uk = self.__Uk_1 + self.__dUk
#历史数据
self.__error_2 = self.__error_1
self.__error_1 = self.__error
self.__Uk_1 = self.__Uk
return self.__Uk
pid1 = PIDcontroller(1,0.2,800,0.01)
co1 = ControlledObjectOne()
SvList = []
PvList = []
for i in range(0,15000,1):
sv = 10
pv = co1.OutputPv() #从被控对象采样
cv = pid1.Exec(sv,pv) #计算控制输出值
co1.InputCv(cv)#控制量送给被控对象
SvList.append(sv)
PvList.append(pv)
import matplotlib.pyplot as plt
fig = plt.figure()
plt.plot(SvList,color='r', linestyle='-')
plt.plot(PvList,color='b', linestyle='-')
plt.xlabel('Time')
plt.ylabel('PV')
plt.show() |
# module1.py
def prog(n):
if n <= 1:
return 'gaegul'
return prog(n - 1) + ' gaegul'
maru = 'bulgom'
if __name__ == '__main__':
print('module1 starting')
print(prog(7))
|
# coding: utf-8
# flake8: noqa
"""
NBA v3 Scores
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from sportsdata.nba_scores.nba_scores.default_api import DefaultApi
# import ApiClient
from sportsdata.nba_scores.api_client import ApiClient
from sportsdata.nba_scores.configuration import Configuration
# import models into sdk package
from sportsdata.nba_scores.models.nba_scores_game import NbaScoresGame
from sportsdata.nba_scores.models.nba_scores_news import NbaScoresNews
from sportsdata.nba_scores.models.nba_scores_opponent_season import NbaScoresOpponentSeason
from sportsdata.nba_scores.models.nba_scores_player import NbaScoresPlayer
from sportsdata.nba_scores.models.nba_scores_quarter import NbaScoresQuarter
from sportsdata.nba_scores.models.nba_scores_referee import NbaScoresReferee
from sportsdata.nba_scores.models.nba_scores_season import NbaScoresSeason
from sportsdata.nba_scores.models.nba_scores_stadium import NbaScoresStadium
from sportsdata.nba_scores.models.nba_scores_standing import NbaScoresStanding
from sportsdata.nba_scores.models.nba_scores_team import NbaScoresTeam
from sportsdata.nba_scores.models.nba_scores_team_game import NbaScoresTeamGame
from sportsdata.nba_scores.models.nba_scores_team_season import NbaScoresTeamSeason
|
import streamlit as st
# To make things easier later, we're also importing numpy and pandas for
# working with sample data.
import numpy as np
import pandas as pd
st.title('My first app')
st.write("Here's our first attempt at using data to create a table:")
st.write(pd.DataFrame({
'first column': [1, 2, 3, 4],
'second column': [10, 20, 30, 40]
}))
|
def judge_score(in_score):
if in_score < 0:
print("Invalid score")
else:
if in_score > 100:
return "Invalid score"
elif in_score > 90:
return "Excellent"
elif in_score > 50:
return "Passable"
else:
return "Bad"
score = float(input("Enter score: "))
print(judge_score(score))
|
from cgo import write_go_mod
import re
from suffix import get_version_type, get_major, get_revision_type
from glide import get_hash
import requests
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
import json
import random
import pymysql
import chardet
from dealdep import deal_local_repo_dir, get_db_search, get_db_insert, deal_local_repo, get_requires_from_file
from missing import *
import os
import subprocess
from dealdep import get_mod_require, deal_dep_version, get_repo_name
from download import *
import hashlib
def get_results(url, headers):
request = Request(url, headers=headers)
response = urlopen(request).read()
result = json.loads(response.decode())
return result
def get_token(): # download 重复
f = open('../tokens/tk.txt', 'r')
data = f.read()
return data
def get_headers():
# headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0',
# 'Content-Type': 'application/json', 'Accept': 'application/json'}
token = get_token()
token_str = 'token ' + token
headers = {'User-Agent': 'Mozilla/5.0',
'Content-Type': 'application/json', 'Accept': 'application/json',
'Authorization': token_str}
# headers_2 = {'User-Agent': 'Mozilla/5.0',
# 'Content-Type': 'application/json', 'Accept': 'application/json',
# 'Authorization': 'token a8ad3ffb79d2ef67a1f19da8245ff361e624dc20'}
# headers_3 = {'User-Agent': 'Mozilla/5.0',
# 'Content-Type': 'application/json', 'Accept': 'application/json',
# 'Authorization': 'token 0a6cca72aa3cc98993950500c87831bfef7e5707'}
return headers
def get_last_version(fullname):
headers = get_headers()
repo_name_list = fullname.split('/')
repo_name = repo_name_list[0] + '/' + repo_name_list[1]
subdir_name = ''
c = 0
for n in repo_name_list:
c = c + 1
if c > 2:
subdir_name = subdir_name + '/' + n
if subdir_name:
# print('1.:', repo_name, subdir_name, '************************************get_releases_url*******')
d_url = 'https://api.github.com/repos/' + repo_name
else:
d_url = 'https://api.github.com/repos/' + fullname
try:
one_page_results = get_results(d_url, headers)
releases_url = one_page_results['releases_url']
(v_name, semantic) = get_version(releases_url)
except Exception as exp:
print("************** get search releases_url error", exp, '*******************************************')
v_name = 'master'
semantic = False
return v_name, semantic
def get_version(releases_url):
headers = get_headers()
v_url = releases_url.replace('{/id}', '')
version_result = get_results(v_url, headers)
# v_id = ''
v_name = ''
semantic = True
if version_result:
v_url = releases_url.replace('{/id}', '/latest')
try:
result = get_results(v_url, headers)
except Exception as exp:
result = version_result[0]
print("When find version: get search error", exp, '-------------------------------------------------------')
v_name = result['tag_name']
else:
semantic = False
return v_name, semantic
def get_last_hash(repo_name):
repo_name_list = repo_name.split('/')
fullname = repo_name_list[0] + '/' + repo_name_list[1]
# https://api.github.com/repos/robfig/cron/commits
url = 'https://api.github.com/repos/' + fullname + '/commits'
headers = get_headers()
try:
commts = get_results(url, headers)
last_commt = commts[0]["sha"][0:7]
# print('%%%%get the last commit hash is:', last_commt, fullname)
except Exception as exp:
last_commt = ''
print("************** get search releases_url error", exp, '*******************************************')
return last_commt
def get_last_version_or_hashi(repo_name, search_e):
v_name = ''
v_hashi = ''
(v_name, semantic) = get_last_version(repo_name)
if not semantic:
v_hashi = get_last_hash(repo_name)
else:
url = "https://github.com/" + repo_name + '/tree/' + v_name
(v_hash, search_e) = get_hash(url, search_e)
return v_name, v_hashi, search_e
def check_repo_exist_web(repo_name):
url = 'https://github.com/' + repo_name.replace('github.com/', '').strip()
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0'
}
try:
response = requests.get(url, headers=headers)
content = response.content.decode('utf-8')
soup = BeautifulSoup(content, "lxml")
str_all = str(soup)
# f6 link-gray text-mono ml-2 d-none d-lg-inline
# main = str(soup.find('body'))
div_msg = str_all.strip('').replace('\n', '')
# print(div_msg)
error_str = re.findall(r"https://github.githubassets.com/_error.js", div_msg)
notfound_str = re.findall(r"Not Found", div_msg)
# print(hash_str)
if error_str or notfound_str:
repo_exit = -1
else:
repo_exit = 1
except Exception as exp:
repo_exit = 0
print("get repo error:", exp, "**************************************************")
return repo_exit
def check_version_exist(repo_name, repo_version):
url = 'https://github.com/' + repo_name.replace('github.com/', '').strip() + '/tree/' + repo_version.strip()
# print(url)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0'
}
try:
response = requests.get(url, headers=headers)
content = response.content.decode('utf-8')
soup = BeautifulSoup(content, "lxml")
# f6 link-gray text-mono ml-2 d-none d-lg-inline
# main = str(soup.find('body'))
main = str(soup)
div_msg = main.strip('').replace('\n', '')
# print(div_msg)
hash_str = re.findall(r"https://github.githubassets.com/_error.js", div_msg)
notfound_str = re.findall(r"Not Found", div_msg)
# print(hash_str)
if hash_str or notfound_str:
repo_exit = -1
else:
repo_exit = check_repo_exist_web(repo_name)
if repo_exit == 1:
repo_exit = -2
elif repo_exit == -1:
repo_exit = -1
except Exception as exp:
repo_exit = 0
print("get repo error:", exp, "**************************************************")
return repo_exit
def check_repo_exist(repo_name):
# print('check_repo_exit', repo_name, type(repo_name))
repo_name = repo_name.replace('github.com/', '')
repo_name = repo_name.strip()
repo_exit = check_repo_exist_web(repo_name)
if repo_exit == 0:
repo_url = 'https://api.github.com/repos/' + repo_name
headers = get_headers()
try:
repo_exit = 1
page_detail = get_results(repo_url, headers)
if 'message' in page_detail:
if page_detail['message'] == 'Not Found':
repo_exit = -1
except Exception as exp:
repo_exit = -1
print("The repo name is not correct: ", exp, '**************************************************')
return repo_exit
def check_repo_valid(in_repo_name, in_version):
insert_error = 0
in_repo_name = in_repo_name.replace('github.com/', '')
repo_exit = check_version_exist(in_repo_name, in_version)
# print('check_repo_version_exit_web: ', repo_exit)
if repo_exit == 0 or repo_exit < 0:
repo_url = 'https://api.github.com/repos/' + in_repo_name + '/contents?ref=' + in_version
# print(repo_url)
headers = get_headers()
# print(headers)
try:
insert_error = 0
page_detail = get_results(repo_url, headers)
except Exception as exp:
print("Maybe cannot find version: ", exp, '**************************************************')
repo_url = 'https://api.github.com/repos/' + in_repo_name
try:
page_detail = get_results(repo_url, headers)
insert_error = 2
print(in_repo_name, insert_error, 'The repo version name is not correct!')
except Exception as exp:
insert_error = 1
print(in_repo_name, insert_error, 'The repo name is not correct:', exp, '*************************')
else:
insert_error = 0
# print('check_insert_mes', insert_error)
return insert_error
def check_repo_db_v_name(repo_name, repo_version):
check_db_name = 'repo'
(host, user, password, db_name) = get_db_insert()
# sql = "SELECT * FROM " + check_db_name + " LIMIT 5"
# sql = "SHOW FULL COLUMNS FROM " + check_db_name
sql = "SELECT * FROM " + check_db_name + " WHERE repo_name = '%s' AND v_name = '%s'" % (repo_name, repo_version)
db_check = pymysql.connect(host=host, user=user, password=password, database=db_name)
try:
check_cursor = db_check.cursor()
check_cursor.execute(sql)
check_result = check_cursor.fetchall()
check_cursor.close()
db_check.close()
if check_result:
return check_result
else:
return
except Exception as exp:
print(check_db_name, " error",
exp, '%%%%%%%%%%%%%')
return
def check_repo_db_v_hash(repo_name, repo_hash):
check_db_name = 'repo'
(host, user, password, db_name) = get_db_insert()
# sql = "SELECT * FROM " + check_db_name + " LIMIT 5"
# sql = "SHOW FULL COLUMNS FROM " + check_db_name
sql = "SELECT * FROM " + check_db_name + " WHERE repo_name = '%s' AND v_hash = '%s'" % (repo_name, repo_hash)
db_check = pymysql.connect(host=host, user=user, password=password, database=db_name)
try:
check_cursor = db_check.cursor()
check_cursor.execute(sql)
check_result = check_cursor.fetchall()
check_cursor.close()
db_check.close()
if check_result:
return check_result
else:
return
except Exception as exp:
print(check_db_name, " error",
exp, '%%%%%%%%%%%%%')
return
def check_repo_db_for_valid(repo_name, repo_version, repo_hash):
if repo_version != "":
r = check_repo_db_v_name(repo_name, repo_version)
else:
r = check_repo_db_v_hash(repo_name, repo_hash)
if r:
print(r[0])
return 0
else:
return -1
def get_redirect_repo(old_repo):
# repo_name_update
check_db_name = 'repo_name_update'
(host, user, password, db_name) = get_db_search()
sql = "SELECT now_repo_name FROM " + check_db_name + " WHERE now_repo_name!='0' and old_repo_name='%s'" % old_repo
db_check = pymysql.connect(host, user, password, db_name)
try:
check_cursor = db_check.cursor()
check_cursor.execute(sql)
check_result = check_cursor.fetchall()
check_cursor.close()
db_check.close()
if check_result:
return check_result[0][0]
else:
return ''
except Exception as exp:
print("get redirected repo name from ", check_db_name, " error",
exp, '%%%%%%%%%%%%%')
print(sql)
return ''
def get_new_url(old_url):
# new_web_name
check_db_name = 'new_web_name'
(host, user, password, db_name) = get_db_search()
sql = "SELECT now_url FROM " + check_db_name + " WHERE old_url='%s' or " \
"old_url='%s'" % (old_url, 'github.com/' + old_url)
db_check = pymysql.connect(host, user, password, db_name)
try:
check_cursor = db_check.cursor()
check_cursor.execute(sql)
check_result = check_cursor.fetchall()
check_cursor.close()
db_check.close()
if check_result:
return check_result[0][0]
else:
return ''
except Exception as exp:
print("2. get new url from ", check_db_name, " error",
exp, '%%%%%%%%%%%%%')
print(sql)
return ''
def get_diffs(reqlist, all_direct_r, all_direct_dep):
requires = []
replaces = []
mod_dep_list = []
diffs = []
(requires, replaces) = get_mod_require('./pkg/hgfgdsy=migtry@v0.0.0/go.mod', requires, replaces)
for m in requires:
dep = m.replace('+replace', '').replace('// indirect', '').strip().split(' ')
if len(dep) > 1:
dep_version = deal_dep_version(dep[1])
if re.findall(r"\+replace", m) and dep:
mod_dep_list.append([dep[0], dep_version, 3]) # replace
elif re.findall(r"// indirect", m) and dep:
mod_dep_list.append([dep[0], dep_version, 2]) # dep from old repo
elif dep:
mod_dep_list.append([dep[0], dep_version, 1]) # normal
for d in mod_dep_list:
repo = d[0]
ver = d[1]
rec = None
recver = ''
if d[2] == 3:
continue
for r in reqlist:
vr = r[1]
if vr[0] != 'v':
vr = vr[0:7]
if r[0] == repo:
rec = r
if vr == ver:
recver = vr
break
if rec is None: # a new dependency
diffs.append([d, 1])
else:
if recver == '':
diffs.append([d, 2])
return diffs
def out_to_list(a, b):
lines = b.split('\n')
alll = []
# lines = lines[2:]
for line in lines:
if not re.findall(r'^ERROR:', line):
alll.append(line)
chain = []
alll = alll[2:]
for line in alll:
if line != '':
if re.findall(r'^.+?\..+?/', line):
chain.append(line)
return chain
def download_a_repo(repo, version):
if not re.findall(r'^github.com/', repo):
(repo_name, siv_path) = get_repo_name(repo)
else:
repo_name = repo.replace('github.com/', '')
if repo_name == '':
return [1, '']
pkg_name = repo_name.replace('/', '=') + '@' + version
if os.path.isdir('./pkg/' + pkg_name):
return [0, pkg_name]
get_dep = DOWNLOAD([repo_name, version])
get_dep.down_load_unzip()
download_result = get_dep.download_result
if download_result == -1:
return [-1, '']
return [0, pkg_name]
def write_modify_to_mod(modifies):
repos = []
dic = {}
for m in modifies:
repos.append(m[0])
dic[m[0]] = m[1]
f = open('./pkg/hgfgdsy=migtry@v0.0.0/go.mod', 'r')
go_mod_content = f.read()
require_part = go_mod_content.replace('"', '')
f.close()
requires_list = []
mod_requires = re.findall(r"require\s*\(\n*(.+?)\n*\)", require_part, re.S) # 括号括起来的requires
if mod_requires:
require_l = mod_requires[0].split('\n')
for require_r in require_l:
require_r = require_r.strip().replace('+incompatible', '')
# (not re.findall(r"^[0-9a-zA-Z]+?/[0-9a-zA-Z]+?$", require_r))
# and (not re.findall(r"^[0-9a-zA-Z]+?$", require_r)) and
if require_r and (not re.findall(r"^//.+?", require_r)) and (require_r not in requires_list):
requires_list.append(require_r)
# print(require_r)
mod_requires = re.findall(r"^require\s+([^(]+?)$", require_part, re.M) # 不是括号括起来的requires
for require_r in mod_requires:
require_r = require_r.strip().replace('+incompatible', '')
if require_r and (require_r not in requires_list):
requires_list.append(require_r)
ansr = []
for r in requires_list:
temp = r.split()
if temp[0] in repos:
msg = temp[0] + ' ' + dic[temp[0]]
ansr.append(msg)
else:
ansr.append(r)
tag = 0
msg = ''
lines = go_mod_content.split('\n')
label = 0
for line in lines:
if re.findall(r'^require\s*', line):
tag = 1
if tag == 0:
msg = msg + line + '\n'
continue
if tag == 1:
if re.findall(r"^replace", line):
tag = 2
label = 1
msg = msg + 'require' + ' (' + '\n'
for r in ansr:
msg = msg + r + '\n'
msg = msg + ')\n'
else:
continue
if tag == 2:
msg = msg + line + '\n'
if label == 0:
msg = ''
tag = 0
for line in lines:
if re.findall(r'^require\s*', line):
tag = 1
if tag == 0:
msg = msg + line + '\n'
continue
if tag == 1:
msg = msg + 'require' + ' (' + '\n'
for r in ansr:
msg = msg + r + '\n'
msg = msg + ')\n'
break
f = open('./pkg/hgfgdsy=migtry@v0.0.0/go.mod', 'w')
f.write(msg)
f.close()
# get all require
# mod_requires = re.findall(r"require\s*\(\n*(.+?)\n*\)", require_part, re.S) # 括号括起来的requires
# if mod_requires:
# require_l = mod_requires[0].split('\n')
# for require_r in require_l:
# require_r = require_r.strip().replace('+incompatible', '')
#
# # (not re.findall(r"^[0-9a-zA-Z]+?/[0-9a-zA-Z]+?$", require_r))
# # and (not re.findall(r"^[0-9a-zA-Z]+?$", require_r)) and
# if require_r and (not re.findall(r"^//.+?", require_r)):
# rp = require_r[0]
# if rp in repos:
#
# # print(require_r)
# mod_requires = re.findall(r"^require\s+([^(]+?)$", require_part, re.M) # 不是括号括起来的requires
# for require_r in mod_requires:
# require_r = require_r.strip().replace('+incompatible', '')
# if require_r and (require_r not in requires_list):
# requires_list.append(require_r)
def write_extra_rps_to_mod(rps):
f = open('./pkg/hgfgdsy=migtry@v0.0.0/go.mod', 'r')
go_mod_content = f.read()
lines = go_mod_content.split('\n')
label = 0
msg = ''
asr = []
for line in lines:
if re.findall(r'^replace', line):
label = 1
if re.findall(r'=>', line):
asr.append(line.replace('replace', '').strip())
break
if label == 0:
msg = msg + line
if label == 1:
if re.findall(r'\)', line):
break
if re.findall(r'=>', line):
asr.append(line.replace('replace', '').strip())
msg = msg + '\n'
msg = msg + 'replace (' + '\n'
for rep in asr:
msg = msg + rep + '\n'
for r in rps:
msg = msg + r[0] + ' ' + ' => ' + r[1] + ' ' + r[2] + '\n'
msg = msg + ')\n'
f = open('./pkg/hgfgdsy=migtry@v0.0.0/go.mod', 'w')
f.write(msg)
f.close()
return
def hash_name(repo):
sha1 = hashlib.sha1()
sha1.update(repo.encode('utf-8'))
return sha1.hexdigest()
def write_modify_to_go_file(old, new, file_url):
f = open(file_url, 'rb')
f_content = f.read()
f_charInfo = chardet.detect(f_content)
# print(f_charInfo)
if not f_charInfo['encoding']:
file_content = f_content.decode('utf-8', 'ignore')
elif f_charInfo['encoding'] == 'EUC-TW':
file_content = f_content.decode('utf-8', 'ignore')
else:
file_content = f_content.decode(f_charInfo['encoding'], errors='ignore')
f.close()
# lines = file_content.split('\n')
# label = 0
# msg = ''
# for line in lines:
# if label == 0:
# msg = msg
fwrite = file_content.replace(old, new)
f = open(file_url, 'w')
f.write(fwrite)
f.close()
return
def modify_go_files(old, new, file_url):
import_list = []
import_list = get_requires_from_file(file_url, import_list)
tag = 0
for imp in import_list:
if imp == old:
tag = 1
if tag == 1:
# print('indeed')
write_modify_to_go_file(old, new, file_url)
return
def add_suffix(old, new, repo_url, go_list):
for f_url in go_list:
file_url = repo_url + f_url
modify_go_files(old, new, file_url)
return
def check_now_repo(old):
return get_new_url(old)
def check_redirected(old, github_repo_name):
new_path = get_redirect_repo(github_repo_name)
if new_path == '':
new_path = check_now_repo(old)
if new_path != '':
domain = re.findall(r'^([^/]+?)/', new_path)
if domain == 'github.com':
return 1, new_path.replace('github.com/', '')
else:
return 2, new_path
else:
return 0, ''
else:
return 1, new_path
def revision_major(origin_repo_name, file_type_descriptor, errors, redirected, replaces, requires,
reqlist, github_repo_name, r, repo_url, go_list):
use_version = get_revision_type(github_repo_name, r.Revision)
if use_version == -10:
err = MessageMiss(origin_repo_name, r.Revision, -10, file_type_descriptor)
errors.append(err)
if redirected == 1:
replaces.append((origin_repo_name, 'github.com/' + github_repo_name, r.Revision))
requires.append(origin_repo_name + ' ' + 'v0.0.0')
reqlist.append([origin_repo_name, 'v0.0.0'])
else:
requires.append(origin_repo_name + ' ' + r.Revision)
reqlist.append([origin_repo_name, r.Revision])
if use_version == -1:
print("It should not occur!(where major version doesn't equal to version in module path)")
if use_version == 0: # no go.mod in dst pkg
if redirected == 1:
replaces.append((origin_repo_name, 'github.com/' + github_repo_name, r.Revision))
requires.append(origin_repo_name + ' ' + 'v0.0.0')
reqlist.append([origin_repo_name, 'v0.0.0'])
else:
requires.append(origin_repo_name + ' ' + r.Revision)
reqlist.append([origin_repo_name, r.Revision])
if use_version == 1: # has go.mod but in module path no version suffix
if redirected == 1:
replaces.append((origin_repo_name, 'github.com/' + github_repo_name, r.Revision))
requires.append(origin_repo_name + ' ' + 'v0.0.0')
reqlist.append([origin_repo_name, 'v0.0.0'])
else:
requires.append(origin_repo_name + ' ' + r.Revision)
reqlist.append([origin_repo_name, r.Revision])
if use_version >= 2:
if redirected == 1:
replaces.append(
(origin_repo_name, 'github.com/' + github_repo_name + '/' + 'v' + str(use_version), r.Revision))
requires.append(origin_repo_name + ' ' + 'v0.0.0')
reqlist.append([origin_repo_name, 'v0.0.0'])
else:
requires.append(origin_repo_name + '/' + 'v' + str(use_version) + ' ' + r.Revision)
reqlist.append([origin_repo_name, r.Revision])
err = MessageMiss(origin_repo_name, r.Revision, 7, file_type_descriptor)
errors.append(err)
add_suffix(origin_repo_name, origin_repo_name + '/' + 'v' + str(use_version), repo_url, go_list)
return errors, replaces, requires, reqlist
def re_module_path(f_content, modulediff):
lines = f_content.split('\n')
label = 0
real = ''
for line in lines:
if label == 1:
require = re.findall(r'but was required as: (.*)$', line)
if require and version != '' and real != '':
modulediff.append((real, require[0], version))
version = ''
real = ''
label = 0
continue
versionll = re.findall(r'@(.*): parsing go.mod:$', line)
if versionll:
version = versionll[0]
continue
reallist = re.findall(r'module declares its path as: (.*)$', line)
if reallist:
real = reallist[0]
label = 1
return modulediff
def simple_repo_exist(repo):
if not re.findall(r'^github.com/', repo):
return -1
header = get_headers()
repo_url = 'https://api.github.com/repos/' + repo.replace('github.com', '')
insert_error = 0
try:
page_detail = get_results(repo_url, header)
insert_error = 0
except Exception as exp:
insert_error = 1
print('repo', repo, 'does not exist, cause is ####', exp, '####')
return insert_error
def download_extra_repo(need, version):
namerw = need.replace('github.com/', '')
pkg_name = namerw.replace('/', '=') + '@' + version
if os.path.isdir('./pkg/hgfgdsy=migtry@v0.0.0/extra_module_path_wrong_pkgs/' + pkg_name):
return [0, './extra_module_path_wrong_pkgs/' + pkg_name]
get_dep = DOWNLOAD([need, version])
get_dep.down_load_unzip_extra()
download_result = get_dep.download_result
if download_result == -1:
return [-1, '']
return[0, './extra_module_path_wrong_pkgs/' + pkg_name]
def module_path_wrong(rps, need, real, version):
(ddid, ret_need) = download_extra_repo(need, version)
if ddid == 0:
rps.append((need, ret_need, ''))
return rps
def read_in_file(pathname, file_type_descriptor, rrf, input_module_path):
dic_rec_ver = {}
errors = []
replaces = []
if file_type_descriptor != 0:
# path = os.path.join(pathname, 'Gopkg.lock')
# f = open(path)
# data = f.read()
# f.close()
# reference = parse_gopkg_lock(file_type_descriptor, data)
reference = rrf
repo_id = re.findall(r'/([^/]+?)$', pathname)[0]
requires = []
reqlist = []
upgrade_list = []
go_list = deal_local_repo_dir(repo_id, 0, reference)
nd_path = os.path.join('.', 'pkg')
repo_url = os.path.join(nd_path, repo_id)
(all_direct_r, all_direct_dep) = deal_local_repo_dir(repo_id, 1, reference)
count = 0
shut_down = 0
for d in all_direct_dep:
redirected = 0
r = all_direct_r[count]
count = count + 1
origin_repo_name = d[2]
github_repo_name = d[0]
if r.Version != '':
if d[4] != '':
github_repo_name = d[4]
replaces.append((origin_repo_name, r.Source, r.Version))
requires.append(origin_repo_name + ' ' + r.Version)
reqlist.append([origin_repo_name, r.Version])
continue
path = 'github.com/' + github_repo_name
if github_repo_name != '':
# valid = check_repo_db_for_valid(github_repo_name, r.Version, "")
# if valid == -1:
valid = check_repo_valid(path, r.Version)
new_path = ''
if valid == 1:
new_path = get_redirect_repo(github_repo_name)
if new_path == '':
err = MessageMiss(origin_repo_name, r.Version, 1, file_type_descriptor)
errors.append(err)
shut_down = 1
break
# new_path = get_new_url(path)
else:
# replaces.append((origin_repo_name, 'github.com/' + new_path, r.Version))
github_repo_name = new_path
redirected = 1
valid = 0
err = MessageMiss(origin_repo_name, 'github.com/' + github_repo_name, 8, file_type_descriptor)
errors.append(err)
if redirected == 0:
(redirected, new_path) = check_redirected(origin_repo_name, github_repo_name)
if redirected == 2:
err = MessageMiss(origin_repo_name, new_path, 8,
file_type_descriptor)
use_version = get_version_type(github_repo_name, r.Version)
if use_version >= 2:
replaces.append((origin_repo_name,
new_path + '/' + 'v' + str(use_version),
r.Version))
requires.append(origin_repo_name + ' ' + 'v0.0.0')
reqlist.append([origin_repo_name, 'v0.0.0'])
continue
errors.append(err)
replaces.append((origin_repo_name, new_path, r.Version))
requires.append(origin_repo_name + ' ' + 'v0.0.0')
reqlist.append([origin_repo_name, 'v0.0.0'])
continue
elif redirected == 1:
err = MessageMiss(origin_repo_name, 'github.com/' + new_path, 8,
file_type_descriptor)
errors.append(err)
github_repo_name = new_path
if valid == 2:
err = MessageMiss(origin_repo_name, r.Version, 2, file_type_descriptor)
errors.append(err)
# valid = check_repo_db_for_valid(origin_repo_name, "", r.Revision)
#
# if valid == -1:
valid = check_repo_valid(path, r.Revision)
if origin_repo_name == 'github.com/kataras/iris':
print(valid)
if valid == 2: # TODO get last version here
(v_name, v_hash, search_e) = get_last_version_or_hashi(github_repo_name, 0)
print('This repo is ' + origin_repo_name + ', and its version gone. \n vname is ' + v_name +
', v_hash is ' + v_hash)
if v_name != '':
if redirected == 1:
replaces.append((origin_repo_name, 'github.com/' + github_repo_name, v_name))
requires.append(origin_repo_name + ' ' + 'v0.0.0')
reqlist.append([origin_repo_name, 'v0.0.0'])
else:
requires.append(origin_repo_name + ' ' + v_name)
reqlist.append([origin_repo_name, v_name])
err = MessageMiss(origin_repo_name, v_name, 3, file_type_descriptor)
errors.append(err)
continue
elif v_hash != '':
if redirected == 1:
replaces.append((origin_repo_name, 'github.com/' + github_repo_name, v_hash))
requires.append(origin_repo_name + ' ' + 'v0.0.0')
reqlist.append([origin_repo_name, 'v0.0.0'])
else:
requires.append(origin_repo_name + ' ' + v_hash)
reqlist.append([origin_repo_name, v_hash])
err = MessageMiss(origin_repo_name, v_hash, 3, file_type_descriptor)
errors.append(err)
continue
if valid == 0:
(errors, replaces, requires, reqlist) = revision_major(origin_repo_name,
file_type_descriptor, errors,
redirected, replaces,
requires, reqlist,
github_repo_name, r,
repo_url, go_list)
continue
else:
err = MessageMiss(origin_repo_name, r.Revision, 4, file_type_descriptor)
errors.append(err)
continue
use_version = get_version_type(github_repo_name, r.Version)
if use_version == -11:
if redirected == 1:
replaces.append((origin_repo_name, 'github.com/' + github_repo_name, r.Version))
requires.append(origin_repo_name + ' ' + 'v0.0.0')
reqlist.append([origin_repo_name, 'v0.0.0'])
else:
requires.append(origin_repo_name + ' ' + r.Version)
reqlist.append([origin_repo_name, r.Version])
if use_version == -10:
err = MessageMiss(origin_repo_name, r.Version, -10, file_type_descriptor)
errors.append(err)
if redirected == 1:
replaces.append((origin_repo_name, 'github.com/' + github_repo_name, r.Version))
requires.append(origin_repo_name + ' ' + 'v0.0.0')
reqlist.append([origin_repo_name, 'v0.0.0'])
else:
requires.append(origin_repo_name + ' ' + r.Version)
reqlist.append([origin_repo_name, r.Version])
if use_version == -1:
i = re.findall(r'gopkg.in/', origin_repo_name)
if not i:
raw_replaces_suffix = []
raw_replaces_suffix = module_path_wrong(raw_replaces_suffix, origin_repo_name, ' ',
r.Version)
if raw_replaces_suffix:
reqlist.append((origin_repo_name, 'v0.0.0'))
requires.append(origin_repo_name + ' ' + 'v0.0.0')
replaces.append(raw_replaces_suffix[0])
print("It should not occur!(where major version doesn't equal to version in module path)")
if use_version == 0: # no go.mod in dst pkg
i = re.findall(r'gopkg.in/', origin_repo_name)
if not i:
if redirected == 1:
replaces.append((origin_repo_name, 'github.com/' + github_repo_name, r.Version))
requires.append(origin_repo_name + ' ' + 'v0.0.0')
reqlist.append([origin_repo_name, 'v0.0.0'])
else:
requires.append(origin_repo_name + ' ' + r.Version + '+incompatible')
reqlist.append([origin_repo_name, r.Version])
if use_version == 1: # has go.mod but in module path no version suffix
i = re.findall(r'gopkg.in/', origin_repo_name)
if not i:
if redirected == 1:
replaces.append((origin_repo_name, 'github.com/' + github_repo_name, r.Revision))
requires.append(origin_repo_name + ' ' + 'v0.0.0')
reqlist.append([origin_repo_name, 'v0.0.0'])
else:
requires.append(origin_repo_name + ' ' + r.Revision)
reqlist.append([origin_repo_name, r.Revision])
else:
requires.append(origin_repo_name + ' ' + r.Version)
reqlist.append([origin_repo_name, r.Version])
if use_version >= 2:
if redirected == 1:
replaces.append((origin_repo_name, 'github.com/' + github_repo_name + '/' + 'v' + str(use_version), r.Version))
requires.append(origin_repo_name + ' ' + 'v0.0.0')
reqlist.append([origin_repo_name, 'v0.0.0'])
else:
requires.append(origin_repo_name + '/' + 'v' + str(use_version) + ' ' + r.Version)
reqlist.append([origin_repo_name, r.Version])
err = MessageMiss(origin_repo_name, r.Version, 7, file_type_descriptor)
errors.append(err)
add_suffix(origin_repo_name, origin_repo_name + '/' + 'v' + str(use_version), repo_url, go_list)
else:
requires.append(origin_repo_name + ' ' + r.Version)
reqlist.append([origin_repo_name, r.Version])
else:
if d[4] != '':
github_repo_name = d[4]
replaces.append((origin_repo_name, r.Source, r.Revision))
requires.append(origin_repo_name + ' ' + r.Revision)
reqlist.append([origin_repo_name, r.Revision])
continue
path = 'github.com/' + github_repo_name
if github_repo_name != '':
# valid = check_repo_db_for_valid(github_repo_name, "", r.Revision)
#
# if valid == -1:
valid = check_repo_valid(path, r.Revision)
if valid == 1:
new_path = get_redirect_repo(github_repo_name)
if new_path == '':
err = MessageMiss(origin_repo_name, r.Revision, 1, file_type_descriptor)
errors.append(err)
shut_down = 1
break
else:
# replaces.append((origin_repo_name, 'github.com/' + new_path, r.Revision))
github_repo_name = new_path
redirected = 1
valid = 0
err = MessageMiss(origin_repo_name, 'github.com/' + github_repo_name, 8,
file_type_descriptor)
errors.append(err)
if redirected == 0:
(redirected, new_path) = check_redirected(origin_repo_name, github_repo_name)
if redirected == 2:
err = MessageMiss(origin_repo_name, new_path, 8,
file_type_descriptor)
errors.append(err)
replaces.append((origin_repo_name, new_path, r.Version))
requires.append(origin_repo_name + ' ' + 'v0.0.0')
reqlist.append([origin_repo_name, 'v0.0.0'])
continue
elif redirected == 1:
err = MessageMiss(origin_repo_name, 'github.com/' + github_repo_name, 8,
file_type_descriptor)
errors.append(err)
github_repo_name = new_path
if valid == 2: # TODO get latest version or hash here
(v_name, v_hash, search_e) = get_last_version_or_hashi(github_repo_name, 0)
if v_name != '':
if redirected == 1:
replaces.append((origin_repo_name, 'github.com/' + github_repo_name, v_name))
requires.append(origin_repo_name + ' ' + 'v0.0.0')
reqlist.append([origin_repo_name, 'v0.0.0'])
else:
requires.append(origin_repo_name + ' ' + v_name)
reqlist.append([origin_repo_name, v_name])
err = MessageMiss(origin_repo_name, v_name, 3, file_type_descriptor)
errors.append(err)
continue
elif v_hash != '':
if redirected == 1:
replaces.append((origin_repo_name, 'github.com/' + github_repo_name, v_hash))
requires.append(origin_repo_name + ' ' + 'v0.0.0')
reqlist.append([origin_repo_name, 'v0.0.0'])
else:
requires.append(origin_repo_name + ' ' + v_hash)
reqlist.append([origin_repo_name, v_hash])
err = MessageMiss(origin_repo_name, v_hash, 3, file_type_descriptor)
errors.append(err)
continue
if valid != 0:
err = MessageMiss(origin_repo_name, r.Revision, 4, file_type_descriptor)
errors.append(err)
continue
(errors, replaces, requires, reqlist) = revision_major(origin_repo_name,
file_type_descriptor, errors,
redirected, replaces,
requires, reqlist,
github_repo_name, r,
repo_url, go_list)
# if redirected == 1:
# replaces.append((origin_repo_name, 'github.com/' + github_repo_name, r.Revision))
# requires.append(origin_repo_name + ' ' + 'v0.0.0')
# reqlist.append([origin_repo_name, 'v0.0.0'])
# else:
# requires.append(origin_repo_name + ' ' + r.Revision)
# reqlist.append([origin_repo_name, r.Revision])
else:
requires.append(origin_repo_name + ' ' + r.Revision)
reqlist.append([origin_repo_name, r.Revision])
if shut_down == 1:
print('some dependency has missing')
msg = tackle_errors(errors)
return [0, msg]
for r in reference:
if r not in all_direct_r:
if r.Source != '':
path = r.Path
if r.Version != '':
replaces.append((r.Path, r.Source, r.Version))
else:
replaces.append((r.Path, r.Source, r.Revision))
else:
path = r.Path
if r.Version != '':
major = get_major(r.Version)
if int(major) >= 2:
if re.findall(r'$github\.com/', r.Source):
use_version = get_version_type(r.Source, r.Version)
if use_version < 2:
requires.append(path + ' ' + r.Version)
reqlist.append([path, r.Version])
else:
requires.append(path + ' ' + r.Version)
reqlist.append([path, r.Version])
elif r.Revision != '':
(repo_name, siv_path) = get_repo_name(path)
if repo_name != '':
use_version = get_revision_type(repo_name, r.Revision)
if use_version >= 2:
continue
requires.append(path + ' ' + r.Revision)
reqlist.append([path, r.Revision])
# TODO write a initial go.mod
write_go_mod(requires, replaces, reqlist, input_module_path)
repnames = []
for rep in replaces:
repnames.append(rep[0])
(a, b) = subprocess.getstatusoutput('cd pkg/hgfgdsy=migtry@v0.0.0 && go mod tidy')
f = open('../tokens/recordtidy.txt', 'w+')
f.write(b)
f.close()
raw_replaces = []
rpsfirst = []
if a != 0:
bcon = re.findall('module declares its path as', b)
if not bcon:
print('encounter errors when migrate(in go mod tidy)')
msg = tackle_errors(errors)
return [1, msg]
else:
mm = []
mm = re_module_path(b, mm)
if mm:
for r in mm:
real = r[0]
need = r[1]
version = r[2]
valid = simple_repo_exist(need)
if valid == -1:
print('encounter errors when migrate')
msg = tackle_errors(errors)
return [2, msg]
else:
raw_replaces = module_path_wrong(raw_replaces, need, real, version)
rpsfirst.append((need, 'v0.0.0'))
write_modify_to_mod(rpsfirst)
write_extra_rps_to_mod(raw_replaces)
diffs = get_diffs(reqlist, all_direct_r, all_direct_dep)
modifies = []
f = open('../tokens/recordwhy.txt', 'w+')
f.write(b)
f.close()
for dif in diffs: # 可以优化
after = dif[0]
diff_type = dif[1]
print(after[0])
(a, b) = subprocess.getstatusoutput('cd pkg/hgfgdsy=migtry@v0.0.0 && go mod why ' + after[0])
f = open('../tokens/recordwhy.txt', 'a+')
f.write(b)
f.close()
chain = out_to_list(a, b) # chain is start with the project itself
length = len(chain)
print(length)
for i in chain:
print(i)
print('\n')
if length == 1 or length == 0:
continue
if chain[0] in repnames:
continue
now_dep_list = []
for d in all_direct_dep:
now_dep_list.append([d[2], d[1]])
if diff_type == 1:
moditag = 0
rec_name = ''
rec_version = ''
cnt = 0
for repo in chain:
ver = ''
if not now_dep_list:
err = MessageMiss(repo, chain[0], 9, file_type_descriptor)
errors.append(err)
moditag = 1
break
for d in now_dep_list:
if d[0] == repo:
ver = d[1]
moditag = 1
break
if ver == '':
err = MessageMiss(repo, chain[0], 5, file_type_descriptor)
errors.append(err)
moditag = 1
break
else:
cnt = cnt + 1
rec_name = repo
rec_version = ver
if cnt >= length:
break
hname = str(hash_name(repo))
if hname in dic_rec_ver.keys():
now_dep_list = dic_rec_ver[hname]
else:
ret = download_a_repo(repo, ver)
if ret[0] != 0:
err = MessageMiss(repo, chain[0], 6, file_type_descriptor)
errors.append(err)
moditag = 1
break
all_deps = deal_local_repo_dir(ret[1], 2, [])
dic_rec_ver[hname] = all_deps
now_dep_list = all_deps
if rec_name != '' and rec_version != '' and moditag == 0:
if rec_version != after[1]:
err = MessageMiss(after[1], chain[0], 90, file_type_descriptor)
errors.append(err)
modifies.append([rec_name, rec_version])
else:
moditag = 0
rec_name = ''
rec_version = ''
cnt = 0
for repo in chain:
ver = ''
if not now_dep_list:
err = MessageMiss(repo, chain[0], 9, file_type_descriptor)
errors.append(err)
moditag = 1
break
for d in now_dep_list:
if d[0] == repo:
ver = d[1]
moditag = 1
break
if ver == '':
err = MessageMiss(repo, chain[0], 5, file_type_descriptor)
errors.append(err)
moditag = 1
break
else:
cnt = cnt + 1
rec_name = repo
rec_version = ver
if cnt >= length:
break
hname = str(hash_name(repo))
if hname in dic_rec_ver.keys():
now_dep_list = dic_rec_ver[hname]
else:
ret = download_a_repo(repo, ver)
if ret[0] != 0:
err = MessageMiss(repo, chain[0], 6, file_type_descriptor)
errors.append(err)
moditag = 1
break
all_deps = deal_local_repo_dir(ret[1], 2, [])
dic_rec_ver[hname] = all_deps
now_dep_list = all_deps
if rec_name != '' and rec_version != '' and moditag == 0:
if rec_version != after[1]:
err = MessageMiss(after[1], chain[0], 90, file_type_descriptor)
errors.append(err)
modifies.append([rec_name, rec_version])
write_modify_to_mod(modifies)
msg = tackle_errors(errors)
return [3, msg]
else:
f = open(pathname + "/glide.lock")
data = f.read() |
import datetime
from django.http import HttpResponsePermanentRedirect
from django.conf import settings
from django.core.cache import cache
from django.core.files import storage
from django.core.xheaders import populate_xheaders
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils import dates
from django.utils.http import urlquote
from middleware import get_current_user
import os
import random
import subprocess
def renders(template, request_context=True, mimetype=None):
"""Shortcut decorator for render_to_response; takes a template filename
Return a dictionary from your view function to render it.
Adds debugging niceties if there is a template variable named "object"."""
def dec(f):
def new_view(req, *args, **kwargs):
d = f(req, *args, **kwargs)
if isinstance(d, dict):
t = d.get('template', template)
c = request_context and RequestContext(req) or None
resp = render_to_response(t, d, context_instance=c,
mimetype=mimetype)
# If there is a unique object for this view, add headers
obj = d.get('object')
if obj is not None:
populate_xheaders(req, resp, obj.__class__, obj.pk)
if 'canonical' in d:
resp['Content-Location'] = d['canonical']
header, val = random.choice(extra_headers)
resp['X-' + header.replace(' ', '-')] = val
return resp
return d
# Impersonate the original view function
new_view.__name__ = f.__name__
new_view.__module__ = f.__module__
return new_view
return dec
extra_headers = {
'Godot': 'waiting', 'Coffee': 'hotter and more bitter than Hell itself',
'They Told Me': '"Kid, you\'re special. You\'ll do great things."'
' You know what? They were right.',
'Chandrasekhar Limit': '1.4 solar masses', 'Spiral Power': 'infinite',
'Sagittarius A*': 'four million solar masses',
'Singularity': 'impossible to observe', 'Buster Machine #7': 'Nono',
'Schwarzchild Radius': 'decreasing', 'Header': 'mispelled',
'Kyon-kun': 'denwa', 'Policy 9': 'violated',
'Lawsuit': 'pending', 'YUKI.N': 'sleeping beauty'}.items()
extra_headers.append(("Schrodinger's cat", 'dead'))
extra_headers.append(("Schrodinger's cat", 'alive'))
def send_file(url):
response = HttpResponsePermanentRedirect(url)
if url.startswith(settings.MEDIA_URL):
response['X-Sendfile'] = settings.MEDIA_ROOT + url[
len(settings.MEDIA_URL):]
return response
def unescape(html):
"Returns the given HTML with ampersands, quotes and carets decoded."
if not isinstance(html, basestring):
html = str(html)
return html.replace('&', '&').replace('<', '<').replace('>',
'>').replace('"', '"').replace(''',"'")
def cache_with_key(key_func, not_found=object()):
def decorate(f):
def decorated(*args, **kwargs):
key = key_func(*args, **kwargs)
user = get_current_user()
key = key + '_staff' + ('Y' if (user and user.is_staff) else 'N')
cached = cache.get(key, not_found)
if cached is not not_found:
return cached
ret = f(*args, **kwargs)
cache.set(key, ret)
return ret
decorated.__name__ = f.__name__
decorated.__module__ = f.__module__
return decorated
return decorate
def date_tuple(date):
return (date.year, unicode(dates.MONTHS_3[date.month]), date.day)
def parse_ymd(y, m, d):
try:
return datetime.date(int(y), dates.MONTHS_3_REV[m], int(d))
except:
return None
def format_ymd(y, m, d):
date = parse_ymd(y, m, d)
if date:
return date.strftime('%Y-%m-%d')
class FileSystemStorage(storage.FileSystemStorage):
def url(self, name):
"""Urlencode the file path properly."""
name = '/'.join(map(urlquote, name.split(os.path.sep)))
return super(FileSystemStorage, self).url(name)
def imagemagick(cmd, *args):
assert cmd in ('identify', 'convert')
p = subprocess.Popen((cmd,) + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception(stderr.strip())
return stdout.strip()
try:
imagemagick('identify', '-version')
except Exception, e:
raise Exception("There is a problem with imagemagick: %s" % e)
# vi: set sw=4 ts=4 sts=4 tw=79 ai et nocindent:
|
import os
import math
import pickle
import random
import argparse
from util import read_pickles
from sys import argv
import operator as op
from functools import reduce
def ncr(n, r):
r = min(r, n-r)
numer = reduce(op.mul, range(n, n-r, -1), 1)
denom = reduce(op.mul, range(1, r+1), 1)
return numer * 1.0 / denom
def get_phones(alphabet):
if alphabet == 'arpabet':
vowels = ['aa', 'ae', 'eh', 'ah', 'ea', 'ao', 'ia', 'ey', 'aw', 'ay', 'ax', 'er', 'ih', 'iy',
'uh', 'oh', 'oy', 'ow', 'ua', 'uw']
consonants = ['el', 'ch', 'en', 'ng', 'sh', 'th', 'zh', 'w', 'dh', 'hh', 'jh', 'em', 'b', 'd',
'g', 'f', 'h', 'k', 'm', 'l', 'n', 'p', 's', 'r', 't', 'v', 'y', 'z']+['sil']
phones = vowels+consonants
return vowels, consonants, phones
if alphabet == 'graphemic':
vowels = ['a','e','i','o','u']
consonants = ['b','c','d','f','g','h','j','k','l','m','n','o','p','q','r','s','t','v','w','x','y','z']+['sil']
phones = vowels+consonants
return vowels, consonants, phones
with open(alphabet,'r') as fin:
return None,None, [l.split()[0] for l in fin.readlines()]
def get_phone_instances(pkl):
instances = {}
for spk in range(len(pkl['plp'])):
instances[spk] = {}
for utt in range(len(pkl['plp'][spk])):
for w in range(len(pkl['plp'][spk][utt])):
for ph in range(len(pkl['plp'][spk][utt][w])):
phone_label = pkl['phone'][spk][utt][w][ph]
if phone_label not in instances[spk]:
instances[spk][phone_label] = {}
instances[spk][phone_label][len(instances[spk][phone_label])] = pkl['plp'][spk][utt][w][ph]
return instances
def get_pairs(instance_dict,N_total):
pairs = {}
for spk in instance_dict:
instances = instance_dict[spk]
if len(instances) > 0:
N = N_total*1.0/len(instance_dict)
ks = [k for k in instances]
f = sum([ncr(len(instances[ph]),2) for ph in instances])*1.0/N
for ph in instances:
n = int(math.ceil(ncr(len(instances[ph]), 2)*1.0/f))
ln = len(instances[ph])
if ln < n:
for j in range(ln):
for i in random.sample(range(j+1,ln), min(int(round(n*1.0/ln)),ln-j-1)):
pairs[len(pairs)] = (instances[ph][j],instances[ph][i],0)
else:
for j in random.sample(range(ln), n):
for i in random.sample(range(j+1,ln),min(1,ln-j-1)):
pairs[len(pairs)] = (instances[ph][j],instances[ph][i],0)
n = int(math.ceil(math.sqrt(2.0*N/(len(instances)*(len(instances)-1)))))
for j in range(len(instances)):
for l in random.sample(range(len(instances[ks[j]])), min(n, len(instances[ks[j]]))):
for i in range(j+1,len(instances)):
for m in random.sample(range(len(instances[ks[i]])),min(n,len(instances[ks[i]]))):
pairs[len(pairs)] = (instances[ks[j]][l],instances[ks[i]][m],1)
return pairs
def pkl2pairs(path, out_path, N = 1000000):
pkl = read_pickles(path)
instances = get_phone_instances(pkl)
pairs = get_pairs(instances,N)
if len(pairs) > 2*N:
inds = [p for p in pairs]
inds = random.sample(inds, 2*N)
pairs = {i:pairs[i] for i in inds}
pickle.dump(pairs, open(out_path,'wb'), protocol=2)
if __name__ == "__main__":
if not os.path.exists('CMDs'):
os.mkdir('CMDs')
with open('CMDs/get_pairs.cmds','a') as f:
f.write(' '.join(argv)+'\n')
parser = argparse.ArgumentParser()
parser.add_argument('-IN_PATH', nargs='+', required=True)
parser.add_argument('-OUT_PATH', required=True)
parser.add_argument('-N', required=False, default=100000, type=int)
parser.add_argument('-SEED', required=False, default=100, type=int)
args = parser.parse_args()
random.seed(args.SEED)
pkl2pairs(args.IN_PATH, args.OUT_PATH, args.N)
|
from gamegrid import *
import random
class Dwarf(Actor):
def __init__(self, name, size):
Actor.__init__(self, "sprites/dwarf" + str(size) + ".png")
self.name = name
self.size = size
def __eq__(self, a): # ==
return self.size == a.size
def __ne__(self, a): # !=
return self.size != a.size
def __gt__(self, a): # >
return self.size > a.size
def __lt__(self, a): # <
return self.size < a.size
def __ge__(self, a): # >=
return self.size >= a.size
def __le__(self, a): # <=
return self.size <= a.size
def __str__(self): # str() function
return self.name
def compare(dwarf1, dwarf2):
if dwarf1 < dwarf2:
return -1
elif dwarf1 > dwarf2:
return 1
else:
return 0
def updateGrid():
removeAllActors()
for i in range(len(row)):
addActor(row[i], Location(i, 0))
addActor(TextActor(str(row[i])), Location(i, 0))
n = 7
row = []
names = ["Monday", "Tuesday", "Wednesday", "Thursday",
"Friday", "Saturday", "Sunday"]
makeGameGrid(n, 1, 170, Color.red, False)
setBgColor(Color.white)
show()
for i in range(0 , n):
dwarf = Dwarf(names[i], i)
row.append(dwarf)
random.shuffle(row)
updateGrid()
setTitle("Press any key to get result...")
getKeyCodeWait()
row = sorted(row, cmp = compare)
updateGrid()
setTitle("All done.")
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Unit tests for the fielddetail servlet."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import mox
import unittest
import logging
import webapp2
from third_party import ezt
from framework import permissions
from proto import project_pb2
from proto import tracker_pb2
from services import service_manager
from testing import fake
from testing import testing_helpers
from tracker import fielddetail
from tracker import tracker_bizobj
from tracker import tracker_views
class FieldDetailTest(unittest.TestCase):
def setUp(self):
self.services = service_manager.Services(
user=fake.UserService(),
config=fake.ConfigService(),
project=fake.ProjectService())
self.servlet = fielddetail.FieldDetail(
'req', 'res', services=self.services)
self.project = self.services.project.TestAddProject('proj')
self.mr = testing_helpers.MakeMonorailRequest(
project=self.project, perms=permissions.OWNER_ACTIVE_PERMISSIONSET)
self.config = self.services.config.GetProjectConfig(
'fake cnxn', self.project.project_id)
self.services.config.StoreConfig('fake cnxn', self.config)
self.fd = tracker_bizobj.MakeFieldDef(
123, 789, 'CPU', tracker_pb2.FieldTypes.INT_TYPE, None,
'', False, False, False, None, None, '', False, '', '',
tracker_pb2.NotifyTriggers.NEVER, 'no_action', 'doc', False)
self.config.field_defs.append(self.fd)
self.services.user.TestAddUser('gatsby@example.com', 111)
self.services.user.TestAddUser('sport@example.com', 222)
self.mr.field_name = 'CPU'
# Approvals
self.approval_def = tracker_pb2.ApprovalDef(
approval_id=234, approver_ids=[111], survey='Question 1?')
self.sub_fd = tracker_pb2.FieldDef(
field_name='UIMocks', approval_id=234, applicable_type='')
self.sub_fd_deleted = tracker_pb2.FieldDef(
field_name='UIMocksDeleted', approval_id=234, applicable_type='',
is_deleted=True)
self.config.field_defs.extend([self.sub_fd, self.sub_fd_deleted])
self.config.approval_defs.append(self.approval_def)
self.approval_fd = tracker_bizobj.MakeFieldDef(
234, 789, 'UIReview', tracker_pb2.FieldTypes.APPROVAL_TYPE, None,
'', False, False, False, None, None, '', False, '', '',
tracker_pb2.NotifyTriggers.NEVER, 'no_action', 'doc', False)
self.config.field_defs.append(self.approval_fd)
self.mox = mox.Mox()
def tearDown(self):
self.mox.UnsetStubs()
self.mox.ResetAll()
def testGetFieldDef_NotFound(self):
self.mr.field_name = 'NeverHeardOfIt'
self.assertRaises(
webapp2.HTTPException,
self.servlet._GetFieldDef, self.mr)
def testGetFieldDef_Normal(self):
actual_config, actual_fd = self.servlet._GetFieldDef(self.mr)
self.assertEqual(self.config, actual_config)
self.assertEqual(self.fd, actual_fd)
def testAssertBasePermission_AnyoneCanView(self):
self.servlet.AssertBasePermission(self.mr)
self.mr.perms = permissions.COMMITTER_ACTIVE_PERMISSIONSET
self.servlet.AssertBasePermission(self.mr)
self.mr.perms = permissions.CONTRIBUTOR_ACTIVE_PERMISSIONSET
self.servlet.AssertBasePermission(self.mr)
self.mr.perms = permissions.READ_ONLY_PERMISSIONSET
self.servlet.AssertBasePermission(self.mr)
def testAssertBasePermission_MembersOnly(self):
self.project.access = project_pb2.ProjectAccess.MEMBERS_ONLY
# The project members can view the field definition.
self.servlet.AssertBasePermission(self.mr)
self.mr.perms = permissions.COMMITTER_ACTIVE_PERMISSIONSET
self.servlet.AssertBasePermission(self.mr)
self.mr.perms = permissions.CONTRIBUTOR_ACTIVE_PERMISSIONSET
self.servlet.AssertBasePermission(self.mr)
# Non-member is not allowed to view anything in the project.
self.mr.perms = permissions.EMPTY_PERMISSIONSET
self.assertRaises(
permissions.PermissionException,
self.servlet.AssertBasePermission, self.mr)
def testGatherPageData_ReadWrite(self):
page_data = self.servlet.GatherPageData(self.mr)
self.assertEqual(self.servlet.PROCESS_TAB_LABELS,
page_data['admin_tab_mode'])
self.assertTrue(page_data['allow_edit'])
self.assertEqual('', page_data['initial_admins'])
field_def_view = page_data['field_def']
self.assertEqual('CPU', field_def_view.field_name)
self.assertEqual(page_data['approval_subfields'], [])
self.assertEqual(page_data['initial_approvers'], '')
def testGatherPageData_ReadOnly(self):
self.mr.perms = permissions.READ_ONLY_PERMISSIONSET
page_data = self.servlet.GatherPageData(self.mr)
self.assertEqual(self.servlet.PROCESS_TAB_LABELS,
page_data['admin_tab_mode'])
self.assertFalse(page_data['allow_edit'])
self.assertEqual('', page_data['initial_admins'])
field_def_view = page_data['field_def']
self.assertEqual('CPU', field_def_view.field_name)
self.assertEqual(page_data['approval_subfields'], [])
self.assertEqual(page_data['initial_approvers'], '')
def testGatherPageData_Approval(self):
self.mr.field_name = 'UIReview'
page_data = self.servlet.GatherPageData(self.mr)
self.assertEqual(page_data['approval_subfields'], [self.sub_fd])
self.assertEqual(page_data['initial_approvers'], 'gatsby@example.com')
field_def_view = page_data['field_def']
self.assertEqual(field_def_view.field_name, 'UIReview')
self.assertEqual(field_def_view.survey, 'Question 1?')
def testProcessFormData_Permission(self):
"""Only owners can edit fields."""
mr = testing_helpers.MakeMonorailRequest(
project=self.project,
perms=permissions.CONTRIBUTOR_ACTIVE_PERMISSIONSET)
mr.field_name = 'CPU'
post_data = fake.PostData(
name=['CPU'],
deletefield=['Submit'])
self.assertRaises(permissions.PermissionException,
self.servlet.ProcessFormData, mr, post_data)
self.servlet.ProcessFormData(self.mr, post_data)
def testProcessFormData_Delete(self):
post_data = fake.PostData(
name=['CPU'],
deletefield=['Submit'])
url = self.servlet.ProcessFormData(self.mr, post_data)
self.assertTrue('/adminLabels?deleted=1&' in url)
fd = tracker_bizobj.FindFieldDef('CPU', self.config)
self.assertEqual('CPU', fd.field_name)
self.assertTrue(fd.is_deleted)
def testProcessFormData_Cancel(self):
post_data = fake.PostData(
name=['CPU'],
cancel=['Submit'],
max_value=['200'])
url = self.servlet.ProcessFormData(self.mr, post_data)
logging.info(url)
self.assertTrue('/adminLabels?ts=' in url)
config = self.services.config.GetProjectConfig(
self.mr.cnxn, self.mr.project_id)
fd = tracker_bizobj.FindFieldDef('CPU', config)
self.assertIsNone(fd.max_value)
self.assertIsNone(fd.min_value)
def testProcessFormData_Edit(self):
post_data = fake.PostData(
name=['CPU'],
field_type=['INT_TYPE'],
min_value=['2'],
max_value=['98'],
notify_on=['never'],
is_required=[],
is_multivalued=[],
docstring=['It is just some field'],
applicable_type=['Defect'],
admin_names=[''])
url = self.servlet.ProcessFormData(self.mr, post_data)
self.assertTrue('/fields/detail?field=CPU&saved=1&' in url)
config = self.services.config.GetProjectConfig(
self.mr.cnxn, self.mr.project_id)
fd = tracker_bizobj.FindFieldDef('CPU', config)
self.assertEqual('CPU', fd.field_name)
self.assertEqual(2, fd.min_value)
self.assertEqual(98, fd.max_value)
def testProcessDeleteField(self):
self.servlet._ProcessDeleteField(self.mr, self.config, self.fd)
self.assertTrue(self.fd.is_deleted)
def testProcessDeleteField_subfields(self):
approval_fd = tracker_bizobj.MakeFieldDef(
3, 789, 'Legal', tracker_pb2.FieldTypes.APPROVAL_TYPE, None,
'', False, False, False, None, None, '', False, '', '',
tracker_pb2.NotifyTriggers.NEVER, 'no_action', 'doc', False)
self.fd.approval_id=3
self.config.field_defs.append(approval_fd)
self.servlet._ProcessDeleteField(self.mr, self.config, approval_fd)
self.assertTrue(self.fd.is_deleted)
self.assertTrue(approval_fd.is_deleted)
def testProcessEditField_Normal(self):
post_data = fake.PostData(
name=['CPU'], field_type=['INT_TYPE'], min_value=['2'],
admin_names=[''])
self.servlet._ProcessEditField(
self.mr, post_data, self.config, self.fd)
fd = tracker_bizobj.FindFieldDef('CPU', self.config)
self.assertEqual('CPU', fd.field_name)
self.assertEqual(2, fd.min_value)
def testProcessEditField_Reject(self):
post_data = fake.PostData(
name=['CPU'], field_type=['INT_TYPE'], min_value=['4'],
max_value=['1'], admin_names=[''])
self.mox.StubOutWithMock(self.servlet, 'PleaseCorrect')
self.servlet.PleaseCorrect(
self.mr, field_def=mox.IgnoreArg(),
initial_applicable_type='',
initial_choices='',
initial_admins='',
initial_approvers='')
self.mox.ReplayAll()
url = self.servlet._ProcessEditField(
self.mr, post_data, self.config, self.fd)
self.assertEqual('Minimum value must be less than maximum.',
self.mr.errors.min_value)
self.assertIsNone(url)
fd = tracker_bizobj.FindFieldDef('CPU', self.config)
self.assertIsNone(fd.min_value)
self.assertIsNone(fd.max_value)
def testProcessEditField_RejectApproval(self):
self.mr.field_name = 'UIReview'
post_data = fake.PostData(
name=['UIReview'], admin_names=[''],
survey=['WIll there be UI changes?'],
approver_names=[''])
self.mox.StubOutWithMock(self.servlet, 'PleaseCorrect')
self.servlet.PleaseCorrect(
self.mr, field_def=mox.IgnoreArg(),
initial_applicable_type='',
initial_choices='',
initial_admins='',
initial_approvers='')
self.mox.ReplayAll()
url = self.servlet._ProcessEditField(
self.mr, post_data, self.config, self.approval_fd)
self.assertEqual('Please provide at least one default approver.',
self.mr.errors.approvers)
self.assertIsNone(url)
def testProcessEditField_Approval(self):
self.mr.field_name = 'UIReview'
post_data = fake.PostData(
name=['UIReview'], admin_names=[''],
survey=['WIll there be UI changes?'],
approver_names=['sport@example.com, gatsby@example.com'])
url = self.servlet._ProcessEditField(
self.mr, post_data, self.config, self.approval_fd)
self.assertTrue('/fields/detail?field=UIReview&saved=1&' in url)
approval_def = tracker_bizobj.FindApprovalDef('UIReview', self.config)
self.assertEqual(len(approval_def.approver_ids), 2)
self.assertEqual(sorted(approval_def.approver_ids), sorted([111, 222]))
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QHBoxLayout, QLabel
from commun.constants.colors import color_bleu_gris, color_blanc
from commun.constants.stylesheets import black_14_label_stylesheet, line_edit_stylesheet
from commun.ui.public.image import Image
from commun.ui.public.mondon_widget import MondonWidget
from commun.ui.public.text_edit import TextEdit
from gestion.stores.filter_store import filter_store
from gestion.ui.selector_ui.selector_collum_filter import SelectorCollumFilter
class SelectorFilter(MondonWidget):
def __init__(self, parent):
super(SelectorFilter, self).__init__(parent=parent)
self.setObjectName("SelectorFilter")
if filter_store.data_type == "perfo":
self.setFixedHeight(0)
self.setMinimumWidth(1150)
self.set_background_color(color_bleu_gris)
self.search_code = TextEdit(upper_mode=True)
self.init_widget()
def init_widget(self):
hbox = QHBoxLayout()
hbox.setContentsMargins(10, 10, 30, 10)
hbox.setSpacing(10)
if filter_store.data_type == "bobine":
hbox.addLayout(self.get_search_bar())
if filter_store.data_type == "bobine":
for index in range(len(filter_store.list_filter_bobine_fille)):
hbox.addWidget(SelectorCollumFilter(parent=self,
title=filter_store.title_filter_bobine_fille[index],
name_filter=filter_store.list_filter_bobine_fille[index],
sort_mode=filter_store.sort_mode_bobine_fille[index],
filter_mode=filter_store.filter_mode_bobine_fille[index]))
if filter_store.data_type == "poly":
for index in range(len(filter_store.list_filter_poly)):
hbox.addWidget(SelectorCollumFilter(parent=self,
title=filter_store.title_filter_poly[index],
name_filter=filter_store.list_filter_poly[index],
sort_mode=filter_store.sort_mode_poly[index],
filter_mode=filter_store.filter_mode_poly[index]))
if filter_store.data_type == "refente":
for index in range(len(filter_store.list_filter_refente)):
hbox.addWidget(SelectorCollumFilter(parent=self,
title=filter_store.title_filter_refente[index],
name_filter=filter_store.list_filter_refente[index],
sort_mode=filter_store.sort_mode_refente[index],
filter_mode=filter_store.filter_mode_refente[index]))
if filter_store.data_type == "papier":
for index in range(len(filter_store.list_filter_papier)):
hbox.addWidget(SelectorCollumFilter(parent=self,
title=filter_store.title_filter_papier[index],
name_filter=filter_store.list_filter_papier[index],
sort_mode=filter_store.sort_mode_papier[index],
filter_mode=filter_store.filter_mode_papier[index]))
self.setLayout(hbox)
@staticmethod
def get_label(text):
label = QLabel(text)
label.setStyleSheet(black_14_label_stylesheet)
return label
def get_search_bar(self):
self.search_code.setStyleSheet(line_edit_stylesheet)
self.search_code.textChanged.connect(self.handle_search_code_changed)
self.search_code.setFixedWidth(250-21)
icone_search = Image(parent=self,
img="commun/assets/images/icon_search.png",
size=21,
background_color=color_blanc)
layout_search_bar = QHBoxLayout()
layout_search_bar.setSpacing(0)
layout_search_bar.setContentsMargins(0, 0, 0, 0)
layout_search_bar.addWidget(self.search_code)
layout_search_bar.addWidget(icone_search)
return layout_search_bar
def handle_search_code_changed(self):
filter_store.set_search_code(search_code=self.search_code.text())
|
import os
import numpy as np
import pandas as pd
import geopandas as gpd
import shapely
import geohunter
class Data(object):
def __init__(self, folder_path, geodata=False, grid_resolution=1):
self.samples = pd.read_csv(os.path.join(folder_path,'samples.csv'),
index_col=0).set_index(['Variable','Sensor Name','Timestamp'])
self.metadata = gpd.read_file(os.path.join(folder_path,'sensors')).set_index('Sensor Nam')
self.metadata['lon'] = self.metadata.geometry.x
self.metadata['lat'] = self.metadata.geometry.y
self.city = gpd.read_file(os.path.join(folder_path,'city.geojson'))
bbox = {'north':self.city.bounds.max().values[3],
'east':self.city.bounds.max().values[2],
'south':self.city.bounds.min().values[1],
'west':self.city.bounds.min().values[0]}
self.geodata = {}
if geodata:
print('@ Data: Requesting geodata from OSM ...')
self.geodata = geohunter.features.Landmarks(self.city,
osm_folder=os.path.join(folder_path,'osm')).fit(**geodata)
print('@ Data: geodata loaded!')
self.grid = geohunter.features.Grid(grid_resolution).fit(self.city) #make_grid(self.city, resolution=)
|
import requests
import wget
import os
import hashlib
import tarfile
def has_new_ver(ver_fname,ver_url):
if not os.path.isfile(ver_fname): #判断本地有没有版本文件
return True
with open(ver_fname) as fobj: #如果存在则打开
local_ver = fobj.read() #读取版本文件内容赋值给local_ver
r = requests.get(ver_url) #get远程jenkins的版本文件
remote_ver = r.text #读取内容.文本格式,读取赋值给remote_ver
if local_ver != remote_ver: #本地和远程版本信息对比
return True
return False
def has_error(fname,md5_url):
m = hashlib.md5() #创建md5文件
with open(fname,'rb') as fobj: #打开
while True:
data = fobj.read(4096)
if not data:
break
m.update(data)
r = requests.get(md5_url) #
if m.hexdigest() == r.text.strip():
return False
return True
def deploy(app_fname):
##此时app_fname为/var/www/download/myweb-1.0.tar.gz
#先解压缩
deploy_dir = '/var/www/deploy/'
tar = tarfile.open(app_fname,'r:gz')
tar.extractall(path=deploy_dir)
tar.close()
#拼出解压目录的绝对路径
app_path = os.path.basename(app_fname) #只取myweb-1.0.tar.gz
app_path = app_path.replace('.tar.gz','') #只取myweb-1.0
app_path = os.path.join(deploy_dir,app_path) #合并网页实际路径/var/www/deploy/myweb-1.0
# print(app_path) #测试用
#创建连接,如果连接已存在,先删除它
link = '/var/www/html/nsd1811' #创建连接指向网页/var/www/deploy/myweb-1.0
if os.path.exists(link): #判断删除原链接
os.remove(link)
os.symlink(app_path,link)
if __name__ == '__main__':
app_dir = '/var/www/download'
ver_fname = '/var/www/deploy/live_ver'
ver_url = 'http://192.168.4.4/deploy/live_ver'
if not has_new_ver(ver_fname,ver_url): #使用has_new_ver函数判断有没有新版本
print('没有新版本')
exit(1)
#如果有新版本,则下载
r = requests.get(ver_url)
ver = r.text.strip() #获取服务器上的版本号
app_url = 'http://192.168.4.4/deploy/packages/myweb-%s.tar.gz' % ver #最新版本包的地址
wget.download(app_url, app_dir) #下载最新版本的包
#校验下载的压缩包是否损坏
app_fname = app_url.split('/')[-1]
app_fname = os.path.join(app_dir , app_fname) #拼接压缩包的绝对路径
md5_url = app_url + '.md5' #拼出md5值的网址
if has_error(app_fname, md5_url):
print('文件已损坏')
os.remove(app_fname) #如果文件损坏,则删除它
exit(2)
print('文件未损坏')
#如果下载的文件是完好的,则部署
deploy(app_fname)
#更新本地版本文件
with open(ver_fname,'w') as fobj:
fobj.write(r.text) #r在下载版本那requests.get(ver_url)
|
# This function takes a list of lists, each with 2 positive integers [start,end] and returns the total sum of (end-start).
# Some of the starts/ends from different list elements may overlap.
# All list values are positive integers less than 2^30-1.
def answer(intervals):
# sort pairs low to high by start time (pair = [start,end])
intervals.sort(key=lambda x: x[0])
total = 0
# determine overlaps, modify, find difference, add to sum
for i,val in enumerate(intervals):
if i < (len(intervals)-1):
if intervals[i][1]>intervals[i+1][1]:
temp = intervals[i][1]
intervals[i][1] = intervals[i+1][1]
intervals[i+1][1] = temp
if intervals[i][1]<intervals[i+1][0]:
continue
else:
intervals[i][1]=intervals[i+1][0]
total += intervals[i][1]-intervals[i][0]
# return the sum
return total |
#importing libraries
import argparse
def dataloader(X_train,y_train,X_test):
#importing libraries
import joblib
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
#loading the inputs
X_train = np.load(X_train)
X_test = np.load(X_test)
y_train = np.load(y_train)
#creating custom dataset for loading
#train data
class trainData(Dataset):
def __init__(self, X_data, y_data):
self.X_data = X_data
self.y_data = y_data
def __getitem__(self,index):
return self.X_data[index], self.y_data[index]
def __len__(self):
return len(self.X_data)
train_data = trainData(torch.FloatTensor(X_train), torch.FloatTensor(y_train))
#test data
class testData(Dataset):
def __init__(self, X_data):
self.X_data = X_data
def __getitem__(self,index):
return self.X_data[index]
def __len__(self):
return len(self.X_data)
test_data = testData(torch.FloatTensor(X_test))
#model hyperparameter
BATCH_SIZE =10
#defining dataloader to load data in batches
train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=0)
test_loader = DataLoader(dataset=test_data, batch_size=1, num_workers=0)
#saving the dataloaders
torch.save(train_loader,'train_loader.pth')
torch.save(test_loader,'test_loader.pth')
#defining and parsing arguments
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--X_train')
parser.add_argument('--y_train')
parser.add_argument('--X_test')
args = parser.parse_args()
print('Done with loading data')
dataloader(args.X_train,args.y_train,args.X_test)
|
from betfair.api import API
from time import sleep, time
# Created by Birchy 06/02/2012
# bespokebots.com
# NOTE:
# To make this bot fully automated for use on a (Linux) VPS server, you will
# need to remove the "print" statements and write the data to a log file.
# This is because the "print" will fail after you logout of the remote server
# due to the terminal no longer being available. You should launch the bot
# with the command "python simplebot.py &" rather than "python simplebot.py".
# Adding the "&" will detach the process from the terminal so it will continue
# running when you logout of the remote server.
class SimpleBot(object):
"""lay all odds-on horses in UK win-only races"""
def __init__(self):
rps = 1 # Refreshes Per Second
self.api = API('uk') # exchange ('uk' or 'aus')
self.no_session = True
self.throttle = {'rps': 1.0 / rps, 'next_req': time()}
def login(self, uname = '', pword = '', prod_id = '', vend_id = ''):
"""login to betfair"""
if uname and pword and prod_id and vend_id:
resp = self.api.login(uname, pword, prod_id, vend_id)
if resp == 'OK': self.no_session = False
return resp
else:
return 'login() ERROR: INCORRECT_INPUT_PARAMETERS'
def get_markets(self):
"""returns a list of markets or an error string"""
# NOTE: get_all_markets is NOT subject to data charges!
markets = self.api.get_all_markets(
events = ['7'], # horse racing
hours = 0.5, # starting in the next 30 mins (0.25 = 15 mins, 2 = 120 mins, etc)
include_started = False, # exclude in-play markets
countries = ['GBR'] # British racing only
)
if type(markets) is list:
# sort markets by start time + filter
for market in markets[:]: # loop through a COPY of markets as we're modifying it on the fly...
markets.remove(market)
if (market['bsp_market'] == 'Y' # BSP markets only
and market['market_name'] != 'To Be Placed' # NOT place markets
and market['market_status'] == 'ACTIVE' # market is active
and market['market_type'] == 'O' # Odds market only
and market['no_of_winners'] == 1 # single winner market
):
# calc seconds til start of race
delta = market['event_date'] - self.api.API_TIMESTAMP
sec_til_start = delta.days * 86400 + delta.seconds # 1 day = 86400 sec
temp = [sec_til_start, market]
markets.append(temp)
markets.sort() # sort into time order (earliest race first)
return markets
elif markets == 'API_ERROR: NO_SESSION':
self.no_session = True
else:
return markets
def do_throttle(self):
"""return only when it is safe to send another data request"""
wait = self.throttle['next_req'] - time()
if wait > 0: sleep(wait)
self.throttle['next_req'] = time() + self.throttle['rps']
def check_strategy(self, market_id = ''):
"""check market for suitable bet"""
if market_id:
# get market prices
self.do_throttle()
prices = self.api.get_market_prices(market_id)
if type(prices) is dict and prices['status'] == 'ACTIVE':
# loop through runners and prices and create bets
bets = []
for runner in prices['runners']:
if runner['back_prices']: # make sure prices are available!
back_price = runner['back_prices'][0]['price']
if back_price < 1.99:
# horse is odds-on, so lets lay it...
# set price to current back price + 1 pip (i.e. put our bet at front of queue)
bet_price = self.api.set_betfair_odds(price = back_price, pips = +1)
bet_size = 2.00 # minimum stake
bet = {
'marketId': market_id,
'selectionId': runner['selection_id'],
'betType': 'L',
'price': '%.2f' % bet_price, # set string to 2 decimal places
'size': '%.2f' % bet_size,
'betCategoryType': 'E',
'betPersistenceType': 'NONE',
'bspLiability': '0',
'asianLineId': '0'
}
bets.append(bet)
# place bets (if any have been created)
if bets:
resp = self.api.place_bets(bets)
s = 'PLACING BETS...\n'
s += 'Bets: ' + str(bets) + '\n'
s += 'Place bets response: ' + str(resp) + '\n'
s += '---------------------------------------------'
print s
# check session
if resp == 'API_ERROR: NO_SESSION':
self.no_session = True
elif prices == 'API_ERROR: NO_SESSION':
self.no_session = True
elif type(prices) is not dict:
s = 'check_strategy() ERROR: prices = ' + str(prices) + '\n'
s += '---------------------------------------------'
print s
def start(self, uname = '', pword = '', prod_id = '', vend_id = ''):
"""start the main loop"""
# login/monitor status
login_status = self.login(uname, pword, prod_id, vend_id)
while login_status == 'OK':
# get list of markets starting soon
markets = self.get_markets()
if type(markets) is list:
if len(markets) == 0:
# no markets found...
s = 'No markets found. Sleeping for 30 seconds...'
print s
sleep(30) # bandwidth saver!
else:
print 'Found', len(markets), 'markets. Checking strategy...'
for market in markets:
# do we have bets on this market?
market_id = market[1]['market_id']
mu_bets = self.api.get_mu_bets(market_id)
if mu_bets == 'NO_RESULTS':
# we have no bets on this market...
self.check_strategy(market_id)
# check if session is still OK
if self.no_session:
login_status = self.login(uname, pword, prod_id, vend_id)
s = 'API ERROR: NO_SESSION. Login resp =' + str(login_status) + '\n'
s += '---------------------------------------------'
print s
# main loop ended...
s = 'login_status = ' + str(login_status) + '\n'
s += 'MAIN LOOP ENDED...\n'
s += '---------------------------------------------'
print s
bot = SimpleBot()
bot.start('username', 'password', '82', '0') # product id 82 = free api
|
'''
######### Parallel Confusion Matrix #########
ang hap sad neu fea sur
^ang| 184 10 3 20 1 11
^hap| 4 232 7 16 1 1
^sad| 5 17 147 58 1 2
^neu| 53 45 64 244 3 8
^fea| 0 0 0 0 0 0
^sur| 0 0 0 0 0 0
#### . Sequential
ccuracy: 0.7836879432624113
--------------------
precision recall f1-score support
0 0.85 0.74 0.79 222
1 0.90 0.93 0.92 333
2 0.78 0.50 0.61 195
3 0.67 0.88 0.76 353
6 1.00 0.11 0.20 9
7 0.00 0.00 0.00 16
accuracy 0.78 1128
macro avg 0.70 0.53 0.55 1128
weighted avg 0.79 0.78 0.77 1128
--------------------
######### Confusion Matrix #########
ang hap sad neu fea sur
^ang| 164 1 22 6 0 1
^hap| 2 309 2 25 0 4
^sad| 12 0 98 10 3 2
^neu| 44 23 73 312 5 9
^fea| 0 0 0 0 1 0
^sur| 0 0 0 0 0 0
'''
import seaborn as sn
import pandas as pd
import matplotlib.pyplot as plt
emotions = ['Anger', 'Happy', 'Sad', 'Neutral', 'Fear', 'Surprise']
def draw_parallel():
parallel_matrix = [[184, 10, 3, 20, 1, 11],
[4, 232, 7, 16, 1, 1],
[5, 17, 147, 58, 1, 2],
[53, 45, 64, 244, 3, 8],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]
matrix = pd.DataFrame(parallel_matrix, index=emotions, columns=emotions)
plt.figure(figsize=(8, 8))
sn.heatmap(matrix, annot=True, cmap='BuGn_r', fmt='g')
plt.savefig(
f'/Users/martin/Documents/UNIVERSIDAD/CLASES/4º/2o Cuatri/TFG/scripts/plotting/conf-matrix/parallel_conf_matrix')
draw_parallel()
|
def transcription():
dna = open('rosalind_rna.txt' , 'r')
strand = ''
count = {}
for line in dna:
strand += line.strip()
for itme in strand:
print itme
ntcount()
|
# this file loads in text files
import os
import glob2
import datetime
my_path = '/Users/Bryan/Documents/Programming/Udemy_Python/Sample-Files'
os.chdir(my_path)
filenames = glob2.glob('*.txt')
def combine_file_data(files):
with open(datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f') + '.txt', 'w') as file:
for filename in files:
with open(filename, 'r') as f:
file.write(f.read()+'\n')
combine_file_data(filenames) |
import os
def getResult():
# Get all inputs
b, w = map( int, input().split() )
x, y, z = map( int, input().split() )
# Calculations
total = 0
if x > y + z:
total += ( y + z ) * b
total += y * w
elif y > x + z:
total += ( x + z ) * w
total += x * b
else:
total += b * x + w * y
print( total )
def main():
t = int( input() )
for i in range( t ):
getResult()
if __name__ == "__main__":
main()
|
import os
import sys
import itertools
import glob
sys.path.append('.')
import sphc
import commonlib.helpers
import commonlib.readconf
import conf_default
commonlib.helpers.setdefaultencoding()
config = commonlib.readconf.parse_config()
def sitecust(s):
return config.words.get(s, s)
commonlib.helpers.push_to_builtins('__', sitecust)
import fe
import fe.src.pages
import fe.src.pages as rootpages
import fe.src.pages.invoicing
import fe.src.pages.team
import fe.src.pages.member as memberpages
import fe.src.pages.bizplace as bizplacepages
import fe.src.pages.plan as planpages
import fe.src.pages.resource as resourcepages
import fe.src.pages.booking as bookingpages
import commonlib.shared.static as static
import fe.src.pages.taxes
option_no_themes = '--nothemes' in sys.argv
pathjoin = os.path.join
pubroot = 'pub'
buildroot = 'build'
contribroot = 'fe/contrib'
srcroot = 'fe/src'
contribs = ['js', 'css', 'images']
roles = ['admin', 'director', 'host', 'member', 'new']
themeroot = static.themeroot
themedirs = [os.path.basename(name) for name in glob.glob(themeroot + '/*') if os.path.isdir(name)]
themedirs.remove('base')
compass_bin = os.path.join(os.environ['HOME'], 'gems/bin/compass')
if not os.path.exists(compass_bin):
try:
compass_bin = (glob.glob('/var/lib/gems/*/gems/compass-*/bin/compass')[0])
except:
sys.exit('Error: compass executable not found')
def exec_cmd(cmd, fail_on_err=True):
print("Executing :" + cmd)
ret = os.system(cmd)
if fail_on_err and not ret == 0:
sys.exit("Command failed: %s" % cmd)
return ret
def compile_scss(prjdir):
opts = "-q -r susy -u susy --relative-assets --sass-dir scss --css-dir css" % locals()
project_cmd = compass_bin + " create %(prjdir)s %(opts)s" % locals()
exec_cmd(project_cmd)
compile_cmd = compass_bin + " compile %(prjdir)s -e production --force " % locals()
exec_cmd(compile_cmd)
themes = static.themes
theme_map = dict((theme['name'], theme) for theme in themes)
theme_codes = themedirs
languages = [dict(label=label, name=code) for label, code in [ ('English', 'en') ]]
lang_map = dict((lang['name'], lang) for lang in languages)
lang_codes = tuple(lang_map.keys())
class BuilderBase(object):
def __init__(self, page, path):
self.page = page
self.path = path
def gen_path_combinations(self):
build_data = dict(role=roles, theme=theme_codes, lang=lang_codes)
pathvars = [var[2:-2] for var in self.path.split(os.path.sep) if var.startswith('%')]
combinations = itertools.product(*([{var: v} for v in build_data[var]] for var in pathvars))
return combinations
def build(self):
"""
To be implemented by concrete class
"""
class PageBuilder(BuilderBase):
def build(self):
for path_data in self.gen_path_combinations():
page_data = dict(d.items()[0] for d in path_data)
path = pathjoin(pubroot, (self.path % page_data))
print("Building page: %s" % path)
page = self.page(page_data)
page_data['rroot'] = os.path.sep.join('..' for p in self.path.split(os.path.sep))
page.write(path, page_data)
class JSBuilder(BuilderBase):
"""
"""
prefix = '%(lang)s/%(role)s/%(theme)s/'
host_prefix = '%(lang)s/%(role)s/%(theme)s/'
pages = [PageBuilder(rootpages.InvoicingPage, prefix + 'invoices/home'),
PageBuilder(memberpages.MemberCreate, prefix + 'member/new'),
PageBuilder(memberpages.ListMember, prefix + 'member/list'),
PageBuilder(rootpages.Login, 'login'),
PageBuilder(rootpages.Activation, 'activate'),
PageBuilder(bizplacepages.Create, prefix + 'bizplace/new'),
PageBuilder(bizplacepages.List, prefix + 'bizplaces'),
PageBuilder(planpages.CreateTariff, prefix + 'tariff/new'),
PageBuilder(planpages.ListTariff, prefix + 'tariffs'),
PageBuilder(resourcepages.ResourceCreate, prefix + 'resource/new'),
PageBuilder(resourcepages.ResourceManage, prefix + 'resources'),
PageBuilder(rootpages.Dashboard, prefix + 'dashboard'),
PageBuilder(memberpages.EditProfile, prefix + 'member/edit'),
PageBuilder(fe.src.pages.invoicing.New, prefix + 'invoices/new'),
PageBuilder(rootpages.LogoutPage, 'logout'),
PageBuilder(fe.src.pages.invoicing.Preferences, prefix + 'invoices/preferences'),
PageBuilder(fe.src.pages.invoicing.History, prefix + 'invoices/history'),
PageBuilder(fe.src.pages.invoicing.Uninvoiced, prefix + 'invoices/uninvoiced'),
PageBuilder(bookingpages.Booking, prefix + '/booking/new'),
PageBuilder(bookingpages.WeekAgenda, prefix + '/booking/week'),
PageBuilder(fe.src.pages.team.List, prefix + 'team'),
PageBuilder(fe.src.pages.taxes.Taxes, prefix + 'taxes')
]
def copydirs(srcs, dst, verbose=False, overwrite=True):
src = srcs
if isinstance(srcs, basestring):
srcs = [srcs]
else:
srcs = list(srcs)
if not srcs:
raise Exception("No source specified")
print "%s -> %s" % (srcs, dst)
v = verbose and 'v' or ''
dstdir = os.path.dirname(dst)
if dstdir and not os.path.exists(dstdir):
os.makedirs(dstdir)
if overwrite:
srcs = ' '.join(srcs)
cmd = "/usr/bin/rsync -r %s --exclude='.git' %s %s" % (v, srcs, dst)
exec_cmd(cmd)
else:
for root, dirs, files in os.walk(src):
folder_path = os.path.join(dst, os.path.relpath(root, src))
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for each_file in files:
dst_file = os.path.join(folder_path, each_file)
src_file = os.path.join(root, each_file)
if not os.path.isfile(dst_file) or os.stat(src_file).st_mtime > os.stat(dst_file).st_mtime:
cmd = "/bin/cp --remove-destination %s %s" % (src_file, dst_file)
exec_cmd(cmd)
def copy_contribs():
contribdirs = (pathjoin(contribroot, name) for name in contribs)
copydirs(contribdirs, pubroot)
def build_themes():
"""
theme dir (built) goes to <pub>/themes/<theme-name>
"""
copydirs(themeroot, pubroot, overwrite=False)
base_themedir = pathjoin(themeroot, 'base')
src_scssdir = pathjoin(base_themedir, 'scss')
dst_scssdir = pathjoin(pubroot, 'themes/base/scss')
change_in_base = False
for each_file in os.listdir(src_scssdir):
if each_file.endswith(".scss") and (not os.path.isfile(pathjoin(dst_scssdir, each_file)) or os.stat(pathjoin(src_scssdir, each_file)).st_mtime > os.stat(pathjoin(dst_scssdir, each_file)).st_mtime):
change_in_base = True
break
for themedir in themedirs:
# cp -r fe/src/themes pub
# cp -r contrib/css/* pub/themes/default/scss/
# cp -r themes/base/scss pub/themes/default/scss/base
# cp pub/themes/default/scss/base/main.scss pub/themes/default/scss/
# compass create . --sass-dir themes/default/scss --css-dir themes/default/css
# rm pub/themes/default/scss
src_themedir = pathjoin(themeroot, themedir)
dst_themedir = pathjoin(pubroot, 'themes', themedir)
# 1. copy images
base_imagedir = pathjoin(base_themedir, 'images')
src_imagedir = pathjoin(src_themedir, 'images')
dst_imagedir = pathjoin(dst_themedir, 'images')
copydirs(base_imagedir, dst_imagedir)
if os.path.exists(src_imagedir) and os.listdir(src_imagedir):
copydirs(src_imagedir + '/*', dst_imagedir)
# 2. compile style
src_scssdir = pathjoin(src_themedir, 'css')
dst_scssdir = pathjoin(dst_themedir, 'scss')
dst_cssdir = pathjoin(dst_themedir, 'css')
change_in_theme = False
for each_file in os.listdir(pathjoin(src_themedir, 'scss')):
if not os.path.isfile(pathjoin(dst_scssdir, each_file)) or os.stat(pathjoin(src_themedir, 'scss', each_file)).st_mtime > os.stat(pathjoin(dst_scssdir, each_file)).st_mtime:
change_in_theme = True
break
if change_in_base or change_in_theme:
copydirs(themeroot, pubroot)
copydirs(pathjoin(contribroot, 'css/'), pathjoin(dst_scssdir, 'contrib'))
copydirs(pathjoin(base_themedir, 'scss/'), pathjoin(dst_scssdir, 'base'))
copydirs(pathjoin(dst_scssdir, 'base', 'main.scss'), dst_scssdir)
compile_scss(dst_themedir)
# 3. copy jquery-ui images
src_jqui_imagedir = pathjoin(contribroot, 'js', 'jquery-ui', 'images')
copydirs(src_jqui_imagedir, dst_cssdir)
def build_scripts():
"""
source scripts would need to know context (lang, theme, role) hence goes to go to /<theme>/<lang>/<role>/<path>
"""
scripts = pathjoin(srcroot, 'js', '*')
copydirs(scripts, pathjoin(pubroot, 'js'))
def copy_favicon():
src = pathjoin(srcroot, 'favicon.ico')
cmd = '/bin/cp %s %s' % (src, pubroot)
exec_cmd(cmd)
def build_be_template_styles():
base_dir = 'be/templates'
src_dir = pathjoin(base_dir, 'scss')
dst_dir = pathjoin(base_dir, 'css')
changes_in_template = False
for each_file in os.listdir(src_dir):
if not os.path.isdir(dst_dir):
changes_in_template = True
break
elif each_file.startswith('_'):
continue
elif not os.path.isfile(pathjoin(dst_dir, each_file.split('.')[0]+'.css')) or os.stat(pathjoin(src_dir, each_file)).st_mtime > os.stat(pathjoin(dst_dir, each_file.split('.')[0]+'.css')).st_mtime:
changes_in_template = True
break
if changes_in_template:
cmd = "/bin/rm -rf %s" % (dst_dir)
exec_cmd(cmd)
compile_scss(base_dir)
def build_all():
for page in pages:
page.build()
def build_help(help_format='html'):
''' Builds the help files in /help substituting the params from site config. '''
base_dir = 'help/'
cmd = "make -C %s %s" % (base_dir, help_format)
exec_cmd(cmd)
def main():
if not os.path.exists(pubroot):
os.makedirs(pubroot)
copy_contribs()
copy_favicon()
build_be_template_styles()
if not option_no_themes: build_themes()
build_scripts()
build_all()
build_help()
if __name__ == '__main__':
main()
|
from ocelot.services.mappers.pipeline import PipelineMapper
from ocelot.tests import DatabaseTestCase
class TestPipelineMapper(DatabaseTestCase):
def setUp(self):
self.install_fixture('pipeline')
def test_to_entity(self):
"""Test that a record can be converted into an entity."""
self.assertEquals(
PipelineMapper.to_entity(self.pipeline).to_native(),
{
'id': self.pipeline.id,
'name': self.pipeline.name,
}
)
def test_to_record(self):
"""Test that an entity can be converted into a record."""
entity = PipelineMapper.to_entity(self.pipeline)
record = PipelineMapper.to_record(entity)
for c in record.__table__.columns:
self.assertEquals(
getattr(record, c.name),
getattr(self.pipeline, c.name),
)
|
import json
from string import Template
import boto
from boto.mturk.question import Overview, QuestionContent, SelectionAnswer, Question, AnswerSpecification, QuestionForm
from gtd.turk import Task, get_mturk_connection, standard_quals
from gtd.utils import Config
from textmorph import data
"""
To review completed HITs:
- Go to: https://requester.mturk.com/mturk/manageHITs
To do a HIT:
- Go to: https://worker.mturk.com/
- Search for "percy liang"
- Click "Accept & Work"
- For some reason, I had trouble viewing these HITs on Google Chrome (invalid URL parameter error).
- On Firefox, things are fine.
"""
config = Config.from_file(data.workspace.config)
mtc = get_mturk_connection(config.aws_access_key_id,
config.aws_secret_access_key, sandbox=False)
class SimilarityTask(Task):
def __init__(self, debug):
# load from configuration
conf = Config.from_file(data.workspace.turk.similarity.config.txt)
self.title = conf.title
self.description = conf.description
self.keywords = conf.keywords
self.price = conf.price
self.duration = eval(conf.duration)
self.approval_delay = eval(conf.approval_delay)
# store form specification as JSON, to be built automatically on launch
with open(data.workspace.turk.similarity.form.json) as form_json:
self.form_json = form_json.read()
price_per_hit = 0.0 if debug else self.price
quals = standard_quals(debug)
hit_type_ids = mtc.register_hit_type(title=self.title, description=self.description, reward=price_per_hit,
duration=self.duration,
keywords=self.keywords, approval_delay=self.approval_delay, qual_req=quals)
hit_type_id = hit_type_ids[0].HITTypeId
super(SimilarityTask, self).__init__(hit_type_id, mtc)
def launch(self, data={}):
qf = QuestionForm()
form_json = BotoFormGenerator.inject_data(self.form_json, data)
BotoFormGenerator.from_json(qf, form_json)
return self.create_hit(qf)
class CoherenceTask(Task):
def __init__(self, debug):
# load from configuration
conf = Config.from_file(data.workspace.turk.coherence.config.txt)
self.title = conf.title
self.description = conf.description
self.keywords = conf.keywords
self.price = conf.price
self.duration = eval(conf.duration)
self.approval_delay = eval(conf.approval_delay)
# store form specification as JSON, to be built automatically on launch
with open(data.workspace.turk.coherence.form.json) as form_json:
self.form_json = form_json.read()
price_per_hit = 0.0 if debug else self.price
quals = standard_quals(debug)
hit_type_ids = mtc.register_hit_type(title=self.title, description=self.description, reward=price_per_hit,
duration=self.duration,
keywords=self.keywords, approval_delay=self.approval_delay, qual_req=quals)
hit_type_id = hit_type_ids[0].HITTypeId
super(CoherenceTask, self).__init__(hit_type_id, mtc)
def launch(self, data={}):
qf = QuestionForm()
form_json = BotoFormGenerator.inject_data(self.form_json, data)
BotoFormGenerator.from_json(qf, form_json)
return self.create_hit(qf)
class BotoFormGenerator(object):
form_types = {'Overview', 'QuestionContent', 'SelectionAnswer', 'Question', 'AnswerSpecification', 'QuestionForm', 'FormattedContent'}
@staticmethod
def from_json(question_form, json_data):
"""
Construct a QuestionForm from a JSON specification
"""
form_data = json.loads(json_data, strict=False)
# construct objects and build QuestionForm
for obj_data in form_data['form']:
obj = BotoFormGenerator._from_data(obj_data)
question_form.append(obj)
@staticmethod
def _from_data(form_data):
"""
Generates and populates boto.mturk.question objects from a specification.
"""
if type(form_data) is not dict:
return form_data
"""
Functions for creating form objects.
args_dict is a dictionary containing a mapping from names to arguments.
Positional and keyword arguments pertaining to the particular object
are extracted from args_dict and passed appropriately to the object
constructor.
It's very easy to add functionality to this scheme. Simply add a form_type
and a make_{} function with the correct required args_, and it can
immediately be used in the JSON spec.
"""
def make_args(args_dict, args_):
# positional arguments
args = [args_dict[k] for k in args_]
# keyword arguments
kwargs = {k: v for k, v in args_dict.iteritems() if k not in args_}
return args, kwargs
def add_field(obj, field):
(fl_name, fl_value) = next(field.iteritems())
obj.append_field(fl_name, fl_value)
def add_append(obj, append):
obj.append(append)
def make_Overview(args_dict, args_=[]):
args, kwargs = make_args(args_dict, args_)
return boto.mturk.question.Overview(*args, **kwargs)
def make_Question(args_dict, args_=['identifier', 'content', 'answer_spec']):
args, kwargs = make_args(args_dict, args_)
return boto.mturk.question.Question(*args, **kwargs)
def make_QuestionContent(args_dict, args_=[]):
args, kwargs = make_args(args_dict, args_)
return boto.mturk.question.QuestionContent(*args, **kwargs)
def make_SelectionAnswer(args_dict, args_=[]):
args, kwargs = make_args(args_dict, args_)
return boto.mturk.question.SelectionAnswer(*args, **kwargs)
def make_AnswerSpecification(args_dict, args_=['spec']):
args, kwargs = make_args(args_dict, args_)
return boto.mturk.question.AnswerSpecification(*args, **kwargs)
def make_FormattedContent(args_dict, args_=['content']):
args, kwargs = make_args(args_dict, args_)
return boto.mturk.question.FormattedContent(*args, **kwargs)
k, v = next(form_data.iteritems())
if k in BotoFormGenerator.form_types:
make_fn = eval("make_{}".format(k))
args = {} # arguments to the object, that may be other objects
# list of to be appended form objects (Field-type or otherwise
fields = []
for arg_k, arg_v in v.iteritems(): # iterate over arguments to the form object
# Fields _or_ form objects to be appended (e.g.
# FormattedContent)
if arg_k == "fields":
fields = arg_v
else: # recurse and build form object argument
args[arg_k] = BotoFormGenerator._from_data(arg_v)
obj = make_fn(args)
for fl in fields:
fl_k, fl_v = next(fl.iteritems())
if fl_k == "field":
add_field(obj, fl_v)
if fl_k == "append":
ap = BotoFormGenerator._from_data(fl_v)
add_append(obj, ap)
return obj
return None
@staticmethod
def inject_data(json_data, data):
"""
Insert data into the JSON format specification.
This is used to dynamically create forms with different questions using
the same specification.
"""
return Template(json_data).substitute(**data)
|
from django.db import models
from django.utils.safestring import mark_safe
import requests
class Student(models.Model):
COURSES = [
('B.A.', (
('BAHENG', 'B.A (Hons) English'),
('BAPENG', 'B.A. (Programme) compulsory English course'),
('BAHHIN', 'B.A (Hons) Hindi'),
('BAPHIN', 'B.A. (Programme) compulsory Hindi course'),
('BAHHIS', 'B.A. (Hons) History'),
('BAPHIS', 'B.A. (Programme)with History'),
('BAHPOL', 'B.A (Hons) Political Science'),
('BAPPOL', 'B.A (Prog.) with Political Science'),
('BAPECO', 'B.A. (Programme) with Economics'),
('BAPAC', 'B.A. Program Discipline and Application Course'),
)
),
('B.Com', (
('BCPENG', 'B.Com (Programme) compulsory English course'),
('BCPHIN', 'B.Com (Programme ) compulsory Hindi course'),
('BCH', 'B.Com (Hons)'),
('BCP', 'B.Com (Prog.)'),
)
),
('B.Sc.', (
('BSHCS', 'B.Sc. (Hons) Computer Science'),
('BSHGEO', 'B.Sc. (Hons) Geology'),
('BSHMIC', 'B.Sc (Hons) Microbiology'),
('BSHSTA', 'B.Sc. (Hon.) Statistics'),
('BSHMAT', 'B.Sc(Hons) Mathematics'),
)
),
('BMS','Bachelor of Management Studies'),
('BJMC','BJMC'),
]
YEARS= (
(1, '1st Year'),
(2, '2nd Year'),
(3, '3rd Year'),
)
BLOOD_GROUPS = [
('A',(
('A+','A +ve'),
('A-','A -ve'),
),
),
('B',(
('B+','B +ve'),
('B-','B -ve'),
),
),
('AB',(
('AB+','AB +ve'),
('AB-','AB -ve'),
),
),
('O',(
('O+','O +ve'),
('O-','O -ve'),
),
),
]
name = models.CharField(max_length=50)
roll = models.PositiveSmallIntegerField(unique=True)
course= models.CharField(max_length=50, choices = COURSES, default=1)
year = models.IntegerField(choices = YEARS, default = 1)
father_name = models.CharField(max_length=50)
mother_name = models.CharField(max_length=50)
address = models.TextField()
email = models.EmailField(unique=True)
date_of_birth = models.DateField()
date_of_admission = models.DateField()
blood_group = models.CharField(max_length=3, choices = BLOOD_GROUPS, default =1)
default_image_url = 'https://pwcenter.org/sites/default/files/default_images/default_profile.png'
image = models.URLField(max_length = 500, default = default_image_url)
def __str__(self):
label = self.name +' (' + str(self.roll) + ')'
return label
@property
def barcode(self):
barcode_url = 'http://barcodes4.me/barcode/c128a/'+ str(self.roll)+ '.png?margin=5&height=40&resolution=2'
return barcode_url
@property
def qrcode(self):
qrcode_url = 'http://barcodes4.me/barcode/qr/usercode.png?value=http://text.ujjawal.co/' + str(self.roll)
return qrcode_url
@property
def pdf(self):
pdf_url = '/generate/'+str(self.roll)
return pdf_url
def image_tag(self):
if self.image:
return mark_safe('<img src="%s" style="width: 160px; height:160px;" />' % self.image)
else:
return 'No Image Found'
image_tag.short_description = 'Image Preview'
def image_tag_list(self):
if self.image:
return mark_safe('<img src="%s" style="width: 25px; height:25px;" />' % self.image)
else:
return 'No Image Found'
image_tag_list.short_description = 'Image'
|
#!/usr/bin/env python3
import sys
import krb_side_car
# Basic tests of utility functions in krb_side_car.py
got_exception = False
try:
krb_side_car.get_secret("us-west-1","non_secret_arn")
except:
got_exception = True
if not got_exception:
print("**ERROR** [Test] get_secret exception test failed")
sys.exit(1)
print("[Test] get_secret exception test passed")
hostname = krb_side_car.get_dc_server_name("info.cern.ch")
if hostname != 'webafs706.cern.ch':
print("**ERROR** [Test] failed")
sys.exit(1)
print("[Test] get_dc_server_name hostname check passed")
got_exception = False
try:
krb_side_car.create_keytab("username", "nonexistent_password", "non-existent-dir",
"non-existent-spn", "/tmp/mykeytabfile")
except:
got_exception = True
if not got_exception:
print("**ERROR** [Test] create_keytab exception test failed")
sys.exit(1)
print("[Test] create_keytab exception test passed")
got_exception = False
try:
krb_side_car.execute_kinit_cmd("username", "non-existent-password", "non-existent-dir")
except:
got_exception = True
if not got_exception:
print("**ERROR** [Test] execute_kinit_cmd exception test failed")
sys.exit(1)
print("[Test] execute_kinit_cmd exception test passed")
env_vars = {}
if krb_side_car.check_ldap_info(env_vars):
print("**ERROR [Test] check_ldap_info exception test failed")
sys.exit(1)
print("[Test] check_ldap_info exception test passed")
print("All tests passed")
sys.exit(0)
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from subprocess import call
import random
import json
import os
from os import listdir
from os.path import isfile, join
import seaborn as sns
import pickle
from matplotlib import cm
seed_policy_adjust = json.load(open('nondom-tracker/seed_policy_adjust.json'))
basline_ind = json.load('misc-files/basline_testing_scenarios.json')
robust_dist = np.array([])
robust_pols_dicts = {}
for seed in range(10): #9 also
robust_seed_pols = []
optrun = 'training_scenarios_seed_%s'%seed
snapshots = pickle.load(open('snapshots/%s.pkl'%optrun, 'rb'))
f = snapshots['best_f'][-1]
P = snapshots['best_P'][-1]
if seed == 0:
f_all = f[seed_policy_adjust['%s'%seed]]
else:
f_all = np.concatenate((f_all, f[seed_policy_adjust['%s'%seed]]))
for j,pol_num in enumerate(seed_policy_adjust['%s'%seed]):
df = pd.DataFrame()
for scset in range(5):
dfscset = pd.read_csv('testing_outputs/ind_sc/seed_%s_pol_%s_scset_%s.csv'%(seed,pol_num,scset), index_col = 0)
df = pd.concat([df, dfscset], axis=1, sort=False)
meets_baseline = 0
for sc in df.columns:
baseline = basline_ind[sc]
opt = df[sc].values
if (-1*opt[1] > baseline[1]*0.9) & (opt[2] < baseline[2]*1.3) & (opt[3] < baseline[3]) & (opt[0] < 900):
meets_baseline +=1
score = meets_baseline/(47*5)
robust_dist = np.append(robust_dist,score)
if score > 0.8:
P[pol_num].graphviz_export('trees/nondom/seed_%s/pol_%s.pdf'%(seed,pol_num))
robust_seed_pols.append(pol_num)
robust_pols_dicts[seed] = robust_seed_pols
with open('nondom-tracker/seed_policy_adjust_robust.json', 'w') as fp:
json.dump(robust_pols_dicts, fp, indent=4)
|
from celery.task import task
@task
def test():
print('This is print text')
return 'This is return text'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.