text stringlengths 38 1.54M |
|---|
# Generated by Django 3.2.4 on 2021-06-20 11:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0012_auto_20210611_0144'),
]
operations = [
migrations.AddField(
model_name='order',
name='canceled',
field=models.BooleanField(blank=True, default=False),
),
]
|
import datetime
import decimal
import ipaddress
import uuid
import pytest
from pysandra import constants, exceptions, messages
from pysandra.constants import Consistency, Events, Opcode, SchemaChangeTarget
from pysandra.core import SBytes
from pysandra.types import Row
def test_messages_response_create_bad():
with pytest.raises(
exceptions.InternalDriverError, match=r"subclass should implement method"
):
class MyResponse(messages.ResponseMessage):
pass
MyResponse.create(1, 1, 1, SBytes(b""))
def test_messages_response_create_emtpy():
with pytest.raises(
exceptions.InternalDriverError, match=r"didn't generate a response message"
):
class MyResponse(messages.ResponseMessage):
@staticmethod
def build(version, flags, strem_id, body):
return None
MyResponse.create(1, 1, 1, SBytes(b""))
def test_messages_response_create_remains():
with pytest.raises(exceptions.InternalDriverError, match=r"left data remains"):
class MyResponse(messages.ResponseMessage):
@staticmethod
def build(version, flags, strem_id, body):
return MyResponse(1, 2, 3)
MyResponse.create(1, 1, 1, SBytes(b"asdf"))
def test_messages_response_create_good():
class MyResponse(messages.ResponseMessage):
@staticmethod
def build(version, flags, strem_id, body):
body.grab(4)
return MyResponse(1, 2, 3)
msg = MyResponse.create(1, 1, 1, SBytes(b"asdf"))
assert msg.version == 1
def test_messages_requestmessage_basic():
msg = messages.RequestMessage(1, 2, 3, 4)
msg.opcode = 1
assert bytes(msg) == b"\x01\x02\x00\x03\x01\x00\x00\x00\x00"
def test_messages_requestmessage_compress(monkeypatch):
monkeypatch.setattr(messages, "COMPRESS_MINIMUM", 20)
msg = messages.RequestMessage(1, 2, 3, lambda x: x[1:20])
def encode_body(*args):
return b"row row row your boat, gently down the stream" * 2
msg.encode_body = encode_body
msg.opcode = 1
assert bytes(msg) == b"\x01\x03\x00\x03\x01\x00\x00\x00\x13ow row row your boa"
def test_messages_responsemessage_basic():
msg = messages.ResponseMessage(1, 2, 3)
assert msg.flags == 2
def test_messages_responsemsg_build_err():
with pytest.raises(
exceptions.InternalDriverError, match=r"subclass should implement method"
):
msg = messages.ResponseMessage(1, 2, 3)
msg.build(1, 2, 3, b"123")
def test_messages_readymsg_build():
msg = messages.ReadyMessage.build(1, 2, 3, b"")
assert msg.opcode == Opcode.READY
def test_messages_supportedmsg_build():
msg = messages.SupportedMessage.build(
1, 2, 3, SBytes(b"\x00\x01\x00\x01a\x00\x02\x00\x01b\x00\x01c")
)
assert msg.options["a"] == ["b", "c"]
def test_messages_errormsg_build_good():
msg = messages.ErrorMessage.build(
1,
2,
3,
SBytes(
b'\x00\x00"\x00\x00;Invalid STRING constant (hillary) for "user_id" of type int'
),
)
assert msg.error_code == constants.ErrorCode.INVALID
def test_messages_errormsg_build_err():
with pytest.raises(
exceptions.InternalDriverError, match=r"unknown error_code=feebad"
):
messages.ErrorMessage.build(
1, 2, 3, SBytes(b"\x00\xfe\xeb\xad\x00"),
)
def test_messages_errormsg_build_unavailable():
body = b"\x00\x00\x10\x00\x00&Cannot achieve consistency level THREE\x00\x03\x00\x00\x00\x03\x00\x00\x00\x01"
msg = messages.ErrorMessage.build(1, 2, 3, SBytes(body),)
assert msg.error_code == constants.ErrorCode.UNAVAILABLE_EXCEPTION
def test_messages_event_build_good():
body = b"\x00\rSCHEMA_CHANGE\x00\x07CREATED\x00\x08KEYSPACE\x00\x0ctestkeyspace"
msg = messages.EventMessage.build(1, 2, 3, SBytes(body),)
assert msg.event_type == Events.SCHEMA_CHANGE
def test_messages_event_build_badevent():
with pytest.raises(
exceptions.UnknownPayloadException, match=r"got unexpected event=SCHEMA_change"
):
body = b"\x00\rSCHEMA_change\x00\x07CREATED\x00\x08KEYSPACE\x00\x0ctestkeyspace"
messages.EventMessage.build(
1, 2, 3, SBytes(body),
)
def test_messages_event_build_badchange():
with pytest.raises(
exceptions.UnknownPayloadException, match=r"got unexpected change_type=CRE4TED"
):
body = b"\x00\rSCHEMA_CHANGE\x00\x07CRE4TED\x00\x08KEYSPACE\x00\x0ctestkeyspace"
messages.EventMessage.build(
1, 2, 3, SBytes(body),
)
def test_messages_event_build_badtargete():
with pytest.raises(
exceptions.UnknownPayloadException, match=r"got unexpected target=K3YSPACE"
):
body = b"\x00\rSCHEMA_CHANGE\x00\x07CREATED\x00\x08K3YSPACE\x00\x0ctestkeyspace"
messages.EventMessage.build(
1, 2, 3, SBytes(body),
)
def test_messages_event_build_good_table():
body = (
b"\x00\rSCHEMA_CHANGE\x00\x07CREATED\x00\x05TABLE\x00\x07mykeysp\x00\x07mytable"
)
msg = messages.EventMessage.build(1, 2, 3, SBytes(body),)
assert (
msg.event.options["target_name"] == "mytable"
and msg.event.target == SchemaChangeTarget.TABLE
)
def test_messages_event_build_good_function():
body = b"\x00\rSCHEMA_CHANGE\x00\x07CREATED\x00\x08FUNCTION\x00\x07mykeysp\x00\x07mytable\x00\x02\x00\x03cat\x00\x04book"
msg = messages.EventMessage.build(1, 2, 3, SBytes(body),)
assert (
msg.event.options["argument_types"] == ["cat", "book"]
and msg.event.target == SchemaChangeTarget.FUNCTION
)
def test_messages_preparedresults_meta():
body = (
b"\x00\x00\x00\x04\x00\x10\xac\xfc\x0fW\xa9\x9c\x1cr\xaf\xcaP9<\xd2c\x8d\x00\x00\x00"
+ b"\x01\x00\x00\x00\x03\x00\x00\x00\x01\x00\x00\x00\x08uprofile\x00\x04user\x00\x07user_id\x00"
+ b"\t\x00\tuser_name\x00\r\x00\nuser_bcity\x00\r\x00\x00\x00\x04\x00\x00\x00\x00"
)
msg = messages.ResultMessage.build(1, 2, 3, SBytes(body),)
assert msg.col_specs == [
{"ksname": "uprofile", "name": "user_id", "option_id": 9, "tablename": "user"},
{
"ksname": "uprofile",
"name": "user_name",
"option_id": 13,
"tablename": "user",
},
{
"ksname": "uprofile",
"name": "user_bcity",
"option_id": 13,
"tablename": "user",
},
]
def test_messages_rowresults_global():
body = (
b"\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\x00\x08uprofile\x00\x04user"
+ b"\x00\x07user_id\x00\t\x00\tuser_name\x00\r\x00\nuser_bcity\x00\r\x00\x00\x00\x01"
+ b"\x00\x00\x00\x04\x00\x00\x00\x04\x00\x00\x00\x06Ehtevs\x00\x00\x00\x04Pune"
)
msg = messages.ResultMessage.build(1, 2, 3, SBytes(body),)
assert msg.rows.col_specs == [
{"ksname": "uprofile", "name": "user_id", "option_id": 9, "tablename": "user"},
{
"ksname": "uprofile",
"name": "user_name",
"option_id": 13,
"tablename": "user",
},
{
"ksname": "uprofile",
"name": "user_bcity",
"option_id": 13,
"tablename": "user",
},
]
def test_messages_rowresults_noglobal():
body = (
b"\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\x00\x08uprofile\x00\x04user\x00"
+ b"\x07user_id\x00\t\x00\tuser_name\x00\r\x00\nuser_bcity\x00\r\x00\x00\x00\x01\x00"
+ b"\x00\x00\x04\x00\x00\x00\x04\x00\x00\x00\x06Ehtevs\x00\x00\x00\x04Pune"
)
msg = messages.ResultMessage.build(1, 2, 3, SBytes(body),)
assert msg.rows.col_specs[0]["name"] == "user_id"
def test_messages_rowresults_pages():
body = (
b"\x00\x00\x00\x02\x00\x00\x00\x07\x00\x00\x00\x03\x00\x00\x00\x18\x04\x00\x00\x00"
+ b"\x04\x08\x00\x06Ehtevs\xf0\x7f\xff\xff\xfb\xf0\x7f\xff\xff\xfe\x00\x00\x00\x01"
+ b"\x00\x00\x00\x04\x00\x00\x00\x04\x00\x00\x00\x06Ehtevs\x00\x00\x00\x04Pune"
)
msg = messages.ResultMessage.build(1, 2, 3, SBytes(body),)
assert (
msg.rows.paging_state
== b"\x04\x00\x00\x00\x04\x08\x00\x06Ehtevs\xf0\x7f\xff\xff\xfb\xf0\x7f\xff\xff\xfe"
)
def test_messages_voidresults():
body = b"\x00\x00\x00\x01"
msg = messages.ResultMessage.build(1, 2, 3, SBytes(body),)
assert isinstance(msg, messages.VoidResultMessage)
def test_messages_setkeyspaceresult():
body = b"\x00\x00\x00\x03\x00\x08uprofile"
msg = messages.ResultMessage.build(1, 2, 3, SBytes(body),)
assert msg.keyspace == "uprofile"
def test_messages_register_bad():
with pytest.raises(
exceptions.TypeViolation,
match=r"unknown event=asdf. please use pysandra.Events",
):
msg = messages.RegisterMessage(["asdf"], 1, 1, 1)
msg.encode_body()
def test_messages_rowresults_alltypes():
body = (
b"\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x13\x00\x08uprofile\x00\x08"
+ b"alltypes\x00\x05myint\x00\t\x00\x07myascii\x00\x01\x00\x08mybigint\x00\x02\x00"
+ b"\x06myblob\x00\x03\x00\tmyboolean\x00\x04\x00\x06mydate\x00\x11\x00\tmydecimal"
+ b"\x00\x06\x00\x08mydouble\x00\x07\x00\x07myfloat\x00\x08\x00\x06myinet\x00\x10"
+ b"\x00\nmysmallint\x00\x13\x00\x06mytext\x00\r\x00\x06mytime\x00\x12\x00\x0b"
+ b"mytimestamp\x00\x0b\x00\nmytimeuuid\x00\x0f\x00\tmytinyint\x00\x14\x00\x06myuuid"
+ b"\x00\x0c\x00\tmyvarchar\x00\r\x00\x08myvarint\x00\x0e\x00\x00\x00\x01\x00\x00\x00"
+ b"\x04\x00\x00\x00\n\x00\x00\x00\x011\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00"
+ b"\x02\x00\x00\x00\x02\x03\x06\x00\x00\x00\x01\x00\x00\x00\x00\x04\x80\x00G5\x00"
+ b"\x00\x00\t\x00\x00\x00\x08\r\xf9\x03C?\x00\x00\x00\x08@\x1c~M\xe3\xb8\xa1\x9d\x00"
+ b"\x00\x00\x04A\x05\x82\xe4\x00\x00\x00\x10&\x07\xf8\xb0@\x06\x08\x13\x00\x00\x00"
+ b"\x00\x00\x00 \x0e\x00\x00\x00\x02\x00\x0b\x00\x00\x00\x0212\x00\x00\x00\x08\x00"
+ b"\x00\x00\x00\x00\x00\x00\r\x00\x00\x00\x08\x00\x00\x01n\xb8@\xa3\x1b\x00\x00\x00"
+ b"\x10v\x92\x80\xc8\x12\xf0\x11\xea\x88\x99`\xa4L\xe9tb\x00\x00\x00\x01\x10\x00\x00"
+ b"\x00\x10\xf9&0\xa6\xd9\x94D\x0e\xa2\xdc\xfek(\xe98)\x00\x00\x00\x0218\x00\x00\x00\x01\x13"
)
msg = messages.ResultMessage.build(1, 2, 3, SBytes(body),)
assert msg.rows[0] == Row(
myint=10,
myascii="1",
mybigint=2,
myblob=b"\x03\x06",
myboolean=False,
mydate=datetime.date(2019, 11, 29),
mydecimal=decimal.Decimal("600.12315455"),
mydouble=7.123344,
myfloat=8.34445571899414,
myinet=ipaddress.IPv6Address("2607:f8b0:4006:813::200e"),
mysmallint=11,
mytext="12",
mytime=13,
mytimestamp=datetime.datetime(2019, 11, 29, 17, 41, 14, 139000),
mytimeuuid=uuid.UUID("769280c8-12f0-11ea-8899-60a44ce97462"),
mytinyint=16,
myuuid=uuid.UUID("f92630a6-d994-440e-a2dc-fe6b28e93829"),
myvarchar="18",
myvarint=19,
)
def test_messages_execute_alltypes():
expected_body = (
b"\x00\x10W\xa5g\xe7\xd3r'\xc1\x85\xf7\x06}<\xc3\xadp\x00\x01\x03\x00\x13\x00"
+ b"\x00\x00\x011\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x02"
+ b"\x00\x00\x00\x02\x03\x06\x00\x00\x00\x01\x00\x00\x00\x00\x04\x80\x00G5\x00"
+ b"\x00\x00\t\x00\x00\x00\x08\r\xf9\x03C?\x00\x00\x00\x08@\x1c~M"
+ b"\xe3\xb8\xa1\x9d\x00\x00\x00\x04A\x05\x82\xe4\x00\x00\x00\x10&\x07\xf8\xb0"
+ b"@\x06\x08\x13\x00\x00\x00\x00\x00\x00 \x0e\x00\x00\x00\x04\x00\x00\x00\n"
+ b"\x00\x00\x00\x02\x00\x0b\x00\x00\x00\x0212\x00\x00\x00\x08\x00\x00\x00\x00"
+ b"\x00\x00\x00\r\x00\x00\x00\x08\x00\x00\x01n\xb8@\xa3\x1b\x00\x00\x00\x10"
+ b"v\x92\x80\xc8\x12\xf0\x11\xea\x88\x99`\xa4L\xe9tb\x00\x00\x00\x01"
+ b"\x10\x00\x00\x00\x10\xf9&0\xa6\xd9\x94D\x0e\xa2\xdc\xfek(\xe98)\x00\x00\x00"
+ b"\x0218\x00\x00\x00\x01\x13"
)
msg = messages.ExecuteMessage(
b"W\xa5g\xe7\xd3r'\xc1\x85\xf7\x06}<\xc3\xadp",
[
"1",
2,
b"\x03\x06",
False,
datetime.date(2019, 11, 29),
decimal.Decimal("600.12315455"),
7.123344,
8.344455999,
ipaddress.IPv6Address("2607:f8b0:4006:813::200e"),
10,
11,
"12",
13,
datetime.datetime(2019, 11, 29, 17, 41, 14, 138904),
uuid.UUID("769280c8-12f0-11ea-8899-60a44ce97462"),
16,
uuid.UUID("f92630a6-d994-440e-a2dc-fe6b28e93829"),
"18",
19,
],
False,
Consistency.ONE,
[
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "myascii",
"option_id": 1,
},
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "mybigint",
"option_id": 2,
},
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "myblob",
"option_id": 3,
},
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "myboolean",
"option_id": 4,
},
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "mydate",
"option_id": 17,
},
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "mydecimal",
"option_id": 6,
},
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "mydouble",
"option_id": 7,
},
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "myfloat",
"option_id": 8,
},
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "myinet",
"option_id": 16,
},
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "myint",
"option_id": 9,
},
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "mysmallint",
"option_id": 19,
},
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "mytext",
"option_id": 13,
},
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "mytime",
"option_id": 18,
},
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "mytimestamp",
"option_id": 11,
},
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "mytimeuuid",
"option_id": 15,
},
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "mytinyint",
"option_id": 20,
},
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "myuuid",
"option_id": 12,
},
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "myvarchar",
"option_id": 13,
},
{
"ksname": "uprofile",
"tablename": "alltypes",
"name": "myvarint",
"option_id": 14,
},
],
4,
0,
0,
)
assert msg.encode_body() == expected_body
|
import os
from PyTabParser import ByteReader, TabException
from abc import ABC, abstractmethod
class BaseGuitarproTab(ABC):
def __init__(self, file_path):
if not os.path.exists(file_path):
raise Exception(f'[{file_path}] does not exist')
if not os.path.isfile(file_path):
raise Exception(f'[{file_path}] is not a file')
try:
self.file_path = file_path
with open(file_path, 'rb') as file:
self.byte_reader = ByteReader(file)
self.metadata = dict()
self.__initialize()
except TabException as error:
raise error
except:
raise Exception(f"Corrupt file [{self.file_path}]")
def initialize(self):
self.metadata["title"] = self.readStringByteSizeOfInteger()
self.readStringByteSizeOfInteger()
self.metadata["artist"] = self.readStringByteSizeOfInteger()
self.metadata["album"] = self.readStringByteSizeOfInteger()
self.metadata["author"] = self.readStringByteSizeOfInteger()
self.metadata["copyright"] = self.readStringByteSizeOfInteger()
self.metadata["writer"] = self.readStringByteSizeOfInteger()
def __initialize(self):
self._read_version()
self.initialize()
def _read_version(self):
try:
length = self.byte_reader.readByteAsInt()
stringLength = length if length >= 0 and length <= 30 else 30
self.version = self.byte_reader.readStringBytes(stringLength)
pad_length = 30 - length
self.byte_reader.skip(pad_length)
self.version = self.version[0:stringLength]
except(UnicodeDecodeError):
self.version = 'CANT READ VERSION'
def get_metadata(self):
return self.metadata
# version = self.byte_reader.readStringLength(4)
# num = self.byte_reader.readShort()
# self.version = f"{version}-{num}"
# if not num in [1, 2, 3, 4]:
# raise TabException(f"Invalid PowerTab file version=[{self.version}] [{self.file_path}]")
def get_version(self):
return self.version
def readStringByteSizeOfInteger(self):
return self.readStringByte( self.byte_reader.readInt() - 1 )
def readStringByte(self, size):
return self.readString(size, self.byte_reader.readByteAsInt())
def readString(self, size, len):
string_len = len if len >= 0 and len <= size else size
pad_bytes = len - string_len
result = self.byte_reader.readStringBytes(string_len)
self.byte_reader.skip(pad_bytes)
return result
|
import asyncio
from typing import NoReturn
from gloop.models.remote_player import RemotePlayer
class RemoteParty(list):
async def broadcast(self, message: str) -> NoReturn:
player: RemotePlayer
for player in self:
await player.send(message)
async def multicast(self, emitter: RemotePlayer, message: str) -> NoReturn:
player: RemotePlayer
for player in self:
if player is not emitter:
await player.send(message)
class RemotePartyFactory:
def __init__(self, limit: int):
self._limit = limit
self._buffer = list()
self._signals = list()
self._party: RemoteParty = None
async def gather(self, player: RemotePlayer) -> RemoteParty:
self._party = None
signal = asyncio.Event()
self._buffer.append(player)
self._signals.append(signal)
if self._is_full():
self._party = self._create_party()
else:
await signal.wait()
return self._party
def _is_full(self):
return len(self._buffer) >= self._limit
def _create_party(self):
# TODO: test with a single loop
party = RemoteParty()
for player in self._buffer:
party.append(player)
self._buffer.clear()
for signal in self._signals:
signal.set()
self._signals.clear()
return party
|
import datetime
import time
import random
from flask import request
import data_handler
import bcrypt
def from_timestamp_datetime(user_questions):
for question in user_questions:
question['submission_time'] = datetime.datetime.fromtimestamp(int(question['submission_time'])).strftime('%Y-%m-%d %H:%M:%S')
return user_questions
def key_generator():
key = random.randint(1000000,10000000)
return key
def get_current_timestamp():
timestamp = time.time()
return round(timestamp)
def get_current_datetime():
current_datetime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
return current_datetime
def add_question_wrapper():
question = {
'id': key_generator(),
'submission_time': get_current_datetime(),
'view_number': '0',
'vote_number': '0',
'title': request.form.get('title'),
'message': request.form.get('message'),
'image': None
}
data_handler.add_new_question(question)
question_id = question['id']
return question_id
def add_answer_wrapper(question_id):
answer = {
'id': key_generator(),
'submission_time': get_current_datetime(),
'vote_number': '0',
'question_id': question_id,
'message': request.form.get('message')
}
data_handler.add_new_answer(answer)
def hash_password(plain_text_password):
# By using bcrypt, the salt is saved into the hash itself
hashed_bytes = bcrypt.hashpw(plain_text_password.encode('utf-8'), bcrypt.gensalt())
return hashed_bytes.decode('utf-8')
def verify_password(plain_text_password, hashed_password):
hashed_bytes_password = hashed_password.encode('utf-8')
return bcrypt.checkpw(plain_text_password.encode('utf-8'), hashed_bytes_password)
|
from pymongo import MongoClient
class MongoHandler:
def __init__(self):
self.__client = MongoClient(
'mongodb://admin:admin123@ds215563.mlab.com:15563/?authSource=berman_monitoring&authMechanism=SCRAM-SHA-1&retryWrites=false')
self.db = self.__client.berman_monitoring
|
#
#
#
def find_device(vc,vop,arg):
vn = 'mr_chooser_list' # vn: view name
vid_type = VID_TYPE_NAME
return view_list_op(vc,vn,vid_type,vop,arg)
#
#
#
def is_mr_chooser_list(vc,vop):
vn = 'mr_chooser_list' # vn: view name
vid_type = VID_TYPE_NAME
return view_op(vc,vn,vid_type,vop,vpackage=None,debug=False)
#
# main screen dispatch
#
def mr_chooser_list(vc,scmd,vop,arg=None): # scmd: screen command
vc.dump()
if scmd == SCMD_DEVICE:
return find_device(vc,vop,arg)
elif scmd == SCMD_IS_MR_CHOOSER_LIST:
return is_mr_chooser_list(vc,vop)
else:
return False
|
from typing import List, Optional
from django.conf import settings
from django.template.loader import render_to_string
from subscribe.models import Subscribe, Letter
# from .models import News
SUBSCRIPTION_CODE = 'news'
SITE_DOMAIN = getattr(settings, 'SITE_DOMAIN', 'localhost:8000')
#
# def create_subscription_letter(news_list: List[News]) -> Optional[Letter]:
# if not len(news_list):
# return
#
# subscribe = Subscribe.objects.filter(code=SUBSCRIPTION_CODE).first()
#
# if subscribe is None:
# subscribe = Subscribe(
# code=SUBSCRIPTION_CODE,
# name='Новости'
# )
# subscribe.save()
#
# content = render_to_string('news/email/subscription.html', {
# 'news_list': news_list,
# 'subscribe': subscribe,
# 'SITE_DOMAIN': SITE_DOMAIN
# })
#
# letter = Letter(
# subscribe=subscribe,
# subject='Новости',
# content_format='html',
# content=content
# )
#
# letter.save()
# return letter |
#funcion de saludo.
def saludar():
print ("Hola, bienvenido a mi primer programa de funciones!")
#Funcion de multiplicacion.
def multi():
print("Escribe un numero: ")
numX = int(input())
print("Escribe un segundo numero: ")
numY = int(input())
result = numX * numY
print ("El resultado de la multiplicacion es: "+str(result))
def division():
print("Escribe un numero: ")
numX = int(input())
print("Escribe un segundo numero: ")
numY = int(input())
result = numX / numY
print ("El resultado de la division es: "+str(result))
def pot():
print("Escribe un numero: ")
numX = int(input())
result = numX * numX
print ("El cuadrado de tu numero es: "+str(result))
#Funcion principal de mi programa.
def main():
saludar ()
print()
print ("Escribe 1 si quieres multiplicar.")
print ("Escribe 2 si quieres dividir.")
print ("Escribe 3 si quieres saber el cuadrado del numero.")
num1 = int(input())
if (num1 == 1):
multi ()
if (num1 == 2):
division ()
if (num1 == 3):
pot()
if __name__ == "__main__":
main() |
import numpy as np
import pandas as pd
import warnings
from .sniffer import CSVSnifferList
from .helpers_ui import *
# ******************************************************************
# helpers
# ******************************************************************
def sniff_settings_csv(fname_list):
sniff = CSVSnifferList(fname_list)
csv_sniff = {}
csv_sniff['delim'] = sniff.get_delim()
csv_sniff['skiprows'] = sniff.count_skiprows()
csv_sniff['has_header'] = sniff.has_header()
csv_sniff['header'] = 0 if sniff.has_header() else None
return csv_sniff
def apply_select_rename(dfg, columns_select, columns_rename):
if columns_rename:
# check no naming conflicts
columns_select2 = [columns_rename[k] if k in columns_rename.keys() else k for k in dfg.columns.tolist()]
df_rename_count = collections.Counter(columns_select2)
if df_rename_count and max(df_rename_count.values()) > 1: # would the rename create naming conflict?
warnings.warn('Renaming conflict: {}'.format([(k,v) for k,v in df_rename_count.items() if v>1]), UserWarning)
while df_rename_count and max(df_rename_count.values())>1:
# remove key value pair causing conflict
conflicting_keys = [i for i,j in df_rename_count.items() if j>1]
columns_rename = {k:v for k,v in columns_rename.items() if k in conflicting_keys}
columns_select2 = [columns_rename[k] if k in columns_rename.keys() else k for k in dfg.columns.tolist()]
df_rename_count = collections.Counter(columns_select2)
dfg = dfg.rename(columns=columns_rename)
if columns_select:
if columns_rename:
columns_select2 = list(dict.fromkeys([columns_rename[k] if k in columns_rename.keys() else k for k in columns_select])) # set of columns after rename
else:
columns_select2 = columns_select
dfg = dfg.reindex(columns=columns_select2)
return dfg
def create_sql_connection(cnxn_string):
from sqlalchemy.engine import create_engine
engine = create_engine(cnxn_string)
connection = engine.connect()
connection.dialect.supports_multivalues_insert = True
return connection
def convert_to_sql(df, connection, table_name, if_exists='replace', chunksize=5000):
df.to_sql(table_name, connection, schema=None, if_exists=if_exists, index=True, index_label=None,
chunksize=chunksize, dtype=None)
return True
# ******************************************************************
# combiner
# ******************************************************************
class CombinerCSV(object):
"""
Core combiner class. Checks columns, generates preview, combines.
Args:
fname_list (list): file names, eg ['a.csv','b.csv']
sep (string): CSV delimiter, see pandas.read_csv()
has_header (boolean): data has header row
all_strings (boolean): read all values as strings (faster)
header_row (int): header row, see pandas.read_csv()
skiprows (int): rows to skip at top of file, see pandas.read_csv()
nrows_preview (int): number of rows in preview
add_filename (bool): add filename column to output data frame. If `False`, will not add column.
columns_select (list): list of column names to keep
columns_rename (dict): dict of columns to rename `{'name_old':'name_new'}
apply_after_read (function): function to apply after reading each file. needs to return a dataframe
logger (object): logger object with send_log()
"""
def __init__(self, fname_list, sep=',', has_header = True, all_strings=False, nrows_preview=3, read_csv_params=None,
add_filename=True, columns_select=None, columns_rename=None, apply_after_read=None, logger=None):
if not fname_list:
raise ValueError("Filename list should not be empty")
self.fname_list = fname_list
self.all_strings = all_strings
self.nrows_preview = nrows_preview
self.read_csv_params = read_csv_params
if not self.read_csv_params:
self.read_csv_params = {}
self.read_csv_params['header'] = 0 if has_header else None
self.read_csv_params['sep'] = sep
self.logger = logger
self.col_preview = None
self.add_filename = add_filename
self.columns_select = columns_select
self.columns_rename = columns_rename
self.apply_after_read = apply_after_read
if not self.columns_select:
self.columns_select = []
else:
if max(collections.Counter(columns_select).values())>1:
raise ValueError('Duplicate entries in columns_select')
if not self.columns_rename:
self.columns_rename = {}
def read_csv(self, fname, is_preview=False, chunksize=None):
cfg_dype = str if self.all_strings else None
cfg_nrows = self.nrows_preview if is_preview else None
df = pd.read_csv(fname, dtype=cfg_dype, nrows=cfg_nrows, chunksize=chunksize,
**self.read_csv_params)
if self.apply_after_read:
df = self.apply_after_read(df)
return df
def read_csv_all(self, msg=None, is_preview=False, chunksize=None, columns_select=None,
columns_rename=None):
dfl_all = []
if not columns_select:
columns_select = []
if not columns_rename:
columns_rename = {}
for fname in self.fname_list:
if self.logger and msg:
self.logger.send_log(msg + ' ' + ntpath.basename(fname), 'ok')
df = self.read_csv(fname, is_preview=is_preview, chunksize=chunksize)
if columns_select or columns_rename:
df = apply_select_rename(df, columns_select, columns_rename)
if self.add_filename:
df['filename'] = ntpath.basename(fname)
dfl_all.append(df)
return dfl_all
def preview_columns(self):
"""
Checks column consistency in list of files. It checks both presence and order of columns in all files
Returns:
col_preview (dict): results dictionary with
files_columns (dict): dictionary with information, keys = filename, value = list of columns in file
columns_all (list): all columns in files
columns_common (list): only columns present in every file
is_all_equal (boolean): all files equal in all files?
df_columns_present (dataframe): which columns are present in which file?
df_columns_order (dataframe): where in the file is the column?
"""
dfl_all = self.read_csv_all(msg='scanning colums of', is_preview=True)
dfl_all_col = [df.columns.tolist() for df in dfl_all]
if self.add_filename:
[df.remove('filename') for df in dfl_all_col]
col_files = dict(zip(self.fname_list, dfl_all_col))
col_common = list_common(list(col_files.values()))
col_all = list_unique(list(col_files.values()))
col_unique = list(set(col_all) - set(col_common))
# find index in column list so can check order is correct
df_col_present = {}
for iFileName, iFileCol in col_files.items():
df_col_present[iFileName] = [ntpath.basename(iFileName), ] + [iCol in iFileCol for iCol in col_all]
df_col_present = pd.DataFrame(df_col_present, index=['filename'] + col_all).T
df_col_present.index.names = ['file_path']
# find index in column list so can check order is correct
df_col_order = {}
for iFileName, iFileCol in col_files.items():
df_col_order[iFileName] = [ntpath.basename(iFileName), ] + [
iFileCol.index(iCol) if iCol in iFileCol else np.nan for iCol in col_all]
df_col_order = pd.DataFrame(df_col_order, index=['filename'] + col_all).T
col_preview = {'files_columns': col_files, 'columns_all': col_all, 'columns_common': col_common,
'columns_unique': col_unique, 'is_all_equal': columns_all_equal(dfl_all_col),
'df_columns_present': df_col_present, 'df_columns_order': df_col_order}
self.col_preview = col_preview
return col_preview
def _preview_available(self):
if not self.col_preview:
self.preview_columns()
def is_all_equal(self):
"""
Return all files equal after checking if preview_columns has been run. If not run it.
Returns:
is_all_equal (boolean): If all files equal?
"""
self._preview_available()
return self.col_preview['is_all_equal']
def is_col_present(self):
"""
Checks if columns are present
Returns:
bool: if columns present
"""
self._preview_available()
return self.col_preview['df_columns_present'].reset_index(drop=True)
def is_col_present_unique(self):
"""
Shows unique columns by file
Returns:
bool: if columns present
"""
self._preview_available()
return self.is_col_present().set_index('filename')[self.col_preview['columns_unique']]
def is_col_present_common(self):
"""
Shows common columns by file
Returns:
bool: if columns present
"""
self._preview_available()
return self.is_col_present().set_index('filename')[self.col_preview['columns_common']]
def preview_combine(self, is_col_common=False):
"""
Preview of combines all files
Note:
Unlike `CombinerCSVAdvanced.combine()` this function supports simple combine operations
Args:
is_col_common (bool): keep only common columns? If `false` returns all columns filled with nans
Returns:
df_all (dataframe): pandas dataframe with combined data from all files, only self.nrows_preview top rows
"""
return self.combine(is_col_common, is_preview=True)
def combine(self, is_col_common=False, is_preview=False):
"""
Combines all files. This is in-memory. For out-of-core use `combine_save()`
Args:
is_col_common (bool): keep only common columns? If `false` returns all columns filled with nans
is_preview (bool): read only self.nrows_preview top rows
Returns:
df_all (dataframe): pandas dataframe with combined data from all files
"""
dfl_all = self.read_csv_all('reading full file', is_preview=is_preview, columns_select=self.columns_select,
columns_rename=self.columns_rename)
if self.logger:
self.logger.send_log('combining files', 'ok')
if is_col_common:
df_all = pd.concat(dfl_all, join='inner', sort=False)
else:
df_all = pd.concat(dfl_all, sort=False)
self.df_all = df_all
return df_all
def combine_preview_save(self, fname_out):
"""
Save preview to CSV
Args:
fname_out (str): filename
"""
df_all_preview = self.preview_combine()
df_all_preview.to_csv(fname_out, index=False)
return True
def get_output_filename(self, fname, prefix, parquet_output=False):
basename = os.path.basename(fname)
name_with_ext = os.path.splitext(basename)
new_name = prefix + name_with_ext[0]
if parquet_output:
new_name += ".parquet"
elif len(name_with_ext) == 2:
new_name += name_with_ext[1]
return new_name
def create_output_dir(self, output_dir):
if output_dir and not os.path.exists(output_dir):
os.makedirs(output_dir)
def get_columns_for_save(self, is_col_common=False):
if self.columns_select:
# set of columns after rename
columns_select2 = list(collections.OrderedDict.fromkeys([self.columns_rename[k]
if k in self.columns_rename.keys() else k
for k in self.columns_select]))
return columns_select2
else:
self._preview_available()
import copy
columns = copy.deepcopy(self.col_preview['columns_common'] if is_col_common
else self.col_preview['columns_all'])
if self.add_filename:
columns += ['filename', ]
return columns
def save_files(self, columns, out_filename=None, output_dir=None, prefix='d6tstack-', overwrite=False, chunksize=1e10,
columns_select2=None, parquet_output=False):
if parquet_output:
import pyarrow as pa
import pyarrow.parquet as pq
df_all_header = pd.DataFrame(columns=columns)
if out_filename and not overwrite and os.path.isfile(out_filename):
warnings.warn("File already exists. Please pass overwrite=True for overwriting")
return True
if out_filename:
if not parquet_output:
fhandle = open(out_filename, 'w')
df_all_header.to_csv(fhandle, header=True, index=False)
first = True
for fname in self.fname_list:
if self.logger:
self.logger.send_log('processing ' + ntpath.basename(fname), 'ok')
new_name = self.get_output_filename(fname, prefix, parquet_output=parquet_output)
if output_dir:
fname_out = os.path.join(output_dir, new_name)
else:
fname_out = os.path.join(os.path.dirname(fname), new_name)
if not out_filename and not overwrite and os.path.isfile(fname_out):
warnings.warn("File already exists. Please pass overwrite=True for overwriting")
else:
if not out_filename:
if not parquet_output:
fhandle = open(fname_out, 'w')
df_all_header.to_csv(fhandle, header=True, index=False)
first = True
for df_chunk in self.read_csv(fname, chunksize=chunksize):
if columns_select2 or self.columns_rename:
df_chunk = apply_select_rename(df_chunk, columns_select2, self.columns_rename)
if self.add_filename:
df_chunk['filename'] = ntpath.basename(new_name)
if parquet_output:
table = pa.Table.from_pandas(df_chunk)
if first:
if out_filename:
pqwriter = pq.ParquetWriter(out_filename, table.schema)
else:
pqwriter = pq.ParquetWriter(fname_out, table.schema)
first = False
pqwriter.write_table(table)
else:
df_chunk.to_csv(fhandle, header=False, index=False)
if not out_filename and parquet_output:
pqwriter.close()
if out_filename and parquet_output:
pqwriter.close()
return True
def align_save(self, output_dir=None, prefix='d6tstack-', overwrite=False, chunksize=1e10,
is_col_common=False, parquet_output=False):
"""
Save matched columns data directly to CSV for each of the files.
Args:
output_dir (str): output directory to save, default input file directory, optional
prefix (str): prefix to add to end of screen to input filename to create output file name, optional
overwrite (bool): overwrite file if exists, default True, optional
is_col_common (bool): Use common columns else all columns, default False, optional
"""
columns_select2 = self.get_columns_for_save(is_col_common=is_col_common)
columns = columns_select2
if self.add_filename and self.columns_select:
columns += ['filename', ]
return self.save_files(columns, output_dir=output_dir, prefix=prefix, overwrite=overwrite,
chunksize=chunksize, columns_select2=columns, parquet_output=parquet_output)
def combine_save(self, fname_out, chunksize=1e10, is_col_common=False, parquet_output=False, overwrite=True):
"""
Save combined data directly to CSV. This implements out-of-core combine functionality to combine large files. For in-memory use `combine()`
Args:
fname_out (str): filename
"""
columns_select2 = self.get_columns_for_save(is_col_common=is_col_common)
columns = columns_select2
if self.add_filename and self.columns_select:
columns += ['filename', ]
self.create_output_dir(os.path.dirname(fname_out))
return self.save_files(columns, out_filename=fname_out, chunksize=chunksize, columns_select2=columns_select2,
overwrite=overwrite, parquet_output=parquet_output)
def to_sql(self, cnxn_string, table_name, is_col_common=False, is_preview=False,
if_exists='replace', chunksize=5000):
"""
Save combined files to sql.
Args:
cnxn_string (str): connection string to connect to database
table_name (str): table name to be used to store the data to database
is_col_common (bool): Use common columns else all columns, default False, optional
is_preview (bool): read only self.nrows_preview top rows
if_exists (str): replace or append to existing table, optional
chunksize (int): Number of rows to be inserted to table at one time.
"""
df = self.combine(is_col_common=is_col_common, is_preview=is_preview)
connection = create_sql_connection(cnxn_string)
convert_to_sql(df, connection, table_name, if_exists=if_exists, chunksize=chunksize)
connection.close()
return True
def to_sql_stream(self, cnxn_string, table_name, if_exists='replace',
chunksize=1e10, sql_chunksize=5000, is_col_common=False):
"""
Save combined large files in chunks to sql.
Args:
cnxn_string (str): connection string to connect to database
table_name (str): table name to be used to store the data to database
is_col_common (bool): Use common columns else all columns, default False, optional
is_preview (bool): read only self.nrows_preview top rows
if_exists (str): replace or append to existing table, optional
chunksize (int): Number of lines to be used to extract from file each time.
sql_chunksize (int): Number of rows to be inserted to table at one time.
"""
columns_select = self.columns_select
if not columns_select:
columns_select = self.get_columns_for_save(is_col_common=is_col_common)
first_time = True
connection = create_sql_connection(cnxn_string)
for fname in self.fname_list:
if self.logger:
self.logger.send_log('processing ' + ntpath.basename(fname), 'ok')
for df_chunk in self.read_csv(fname, chunksize=chunksize):
if columns_select or self.columns_rename:
df_chunk = apply_select_rename(df_chunk, columns_select, self.columns_rename)
if self.add_filename:
df_chunk['filename'] = ntpath.basename(fname)
if first_time:
if_exists = if_exists
first_time = False
else:
if_exists = 'append'
convert_to_sql(df_chunk, connection, table_name, if_exists=if_exists,
chunksize=sql_chunksize)
connection.close()
return True
def convert_to_csv_parquet(self, out_filename=None, separate_files=True, output_dir=None, prefix='d6tstack-',
is_col_common=False, overwrite=False, streaming=True, chunksize=1e10,
parquet_output=False):
if separate_files:
self.align_save(output_dir=output_dir, prefix=prefix, overwrite=overwrite, is_col_common=is_col_common,
chunksize=chunksize, parquet_output=parquet_output)
elif streaming and out_filename:
self.combine_save(out_filename, chunksize=chunksize, parquet_output=parquet_output, overwrite=overwrite)
elif out_filename:
df = self.combine(is_col_common=is_col_common)
if parquet_output:
import pyarrow as pa
import pyarrow.parquet as pq
table = pa.Table.from_pandas(df)
pq.write_table(table, out_filename)
else:
fhandle = open(out_filename, 'w')
df.to_csv(fhandle, header=True, index=False)
else:
raise ValueError("out_filename is required")
def to_csv(self, out_filename=None, separate_files=True, output_dir=None, prefix='d6tstack-',
is_col_common=False, overwrite=False, streaming=False, chunksize=1e10):
"""
Convert the files to combined csv or separate csv after aligning the columns
Args:
out_filename (str): when combining this is mandatory
separate_files (bool): To decide whether combine files or save separately, default True
output_dir (str): output directory to save for separate files, default input file directory, optional
prefix (str): prepend to each input filename, optional
overwrite (bool): overwrite file if exists, default True, optional
chunksize (int): chunksize to be used for writing large files in chunks
"""
self.convert_to_csv_parquet(out_filename=out_filename, separate_files=separate_files, output_dir=output_dir,
is_col_common=is_col_common, prefix=prefix, overwrite=overwrite,
streaming=streaming, chunksize=chunksize)
def to_parquet(self, out_filename=None, separate_files=True, output_dir=None, prefix='d6tstack-',
is_col_common=False, overwrite=False, streaming=False, chunksize=1e10):
"""
Convert the files to combined csv or separate csv after aligning the columns
Args:
out_filename (str): when combining this is mandatory
separate_files (bool): convert to csv after aligning columns (without combining)
output_dir (str): output directory to save for separate files, default input file directory, optional
prefix (str): prefix to add to end of screen to input filename to create output file name, optional
overwrite (bool): overwrite file if exists, default True, optional
chunksize (int): chunksize to be used for writing large files in chunks
"""
self.convert_to_csv_parquet(out_filename=out_filename, separate_files=separate_files, output_dir=output_dir,
prefix=prefix, overwrite=overwrite, streaming=streaming, chunksize=chunksize,
is_col_common=is_col_common, parquet_output=True)
|
from pandas import read_csv
reader = read_csv('clean2.csv', chunksize=50001)
for chunk in reader:
print(chunk)
|
import torch
from .base_optimizer import Optimizer
class Adagrad(Optimizer):
def __init__(self, model_params, lr=1e-3, momentum=0.9):
super(Adagrad, self).__init__(model_params, lr, momentum)
self.acc_sqr_grads = [torch.zeros_like(p) for p in self.model_params]
@torch.no_grad()
def step(self, lr=None):
if lr: self.lr = lr
epsilon = 1e-8
for param, acc_sqr_grad in zip(self.model_params, self.acc_sqr_grads):
acc_sqr_grad.add_(param.grad * param.grad)
std = acc_sqr_grad.sqrt().add(epsilon)
param.sub_((self.lr / std) * param.grad) |
from django.conf.urls import url
from . import views
from django.contrib.auth import views as authViews
urlpatterns=[
url(r'^signup/$',views.signup,name='signup'),
url(r'^login/$',views.login_user,name="login.html"),
url(r'^logout/$',authViews.logout,{'template_name':'userauth/logout.html'}),
]
|
from django.db import models
from django_serializable_model import SerializableModel
class User(SerializableModel):
email = models.CharField(max_length=765, blank=True)
name = models.CharField(max_length=100)
# whitelisted fields that are allowed to be seen
WHITELISTED_FIELDS = set([
'name',
])
def serialize(self, *args, **kwargs):
"""Override serialize method to only serialize whitelisted fields"""
fields = kwargs.pop('fields', self.WHITELISTED_FIELDS)
return super(User, self).serialize(*args, fields=fields)
class Settings(SerializableModel):
user = models.OneToOneField(User, primary_key=True,
on_delete=models.CASCADE)
email_notifications = models.BooleanField(default=False)
def serialize(self, *args):
"""Override serialize method to not serialize the user field"""
return super(Settings, self).serialize(*args, exclude=['user'])
class Post(SerializableModel):
user = models.ForeignKey(User, on_delete=models.CASCADE)
text = models.TextField()
|
import tempfile
import os
SLACKVIEWER_TEMP_PATH = os.path.join(tempfile.gettempdir(), "_slackviewer")
|
from flask import abort, Flask, jsonify, request
import json
SOURCE = 'raw_content/items.json'
TARGET = 'contents/items.json'
records = read_raw_records()
id_counter = records[0]['id']
objects = load_objects()
app = Flask(__name__)
@app.route('/record', methods=['GET'])
def get_next_record():
"""Respond with the next record to be x-morged."""
next_record = records[id_counter]
id_counter += 1
return jsonify(next_record)
@app.route('/record/<int:record_id>', method=['GET'])
def get_specific_record(record_id):
specific_record = records.get(record_id, None)
if specific_record is None:
abort(404)
else:
return jsonify(specific_record)
@app.route('/record', methods=['POST'])
def save_new_object():
"""Save a x-morged object."""
new_object = request.json()
objects.append(new_object)
json.dump(objects, open(TARGET, 'w'))
def read_raw_records():
return json.load(open(SOURCE, 'r'))
def load_objects():
return json.load(open(TARGET, 'r'))
if __name__ == '__main__':
app.run()
|
from torch_snippets import stem, logger, os, unzip_file, P
from torch_snippets.registry import Config, AttrDict, registry
from fastai.vision.all import *
import torch_snippets
from auto_train.classification.custom_functions import *
from auto_train.classification.timmy import create_timm_model
from auto_train.common import Task
class ClassificationModel(Task):
def __init__(self, config, inference_only=True):
super().__init__(config)
config = self.config
self.model = create_timm_model(
config.architecture.backbone.model,
custom_head=config.architecture.head,
n_out=config.project.num_classes)
if inference_only:
self.dls = self.get_dataloaders()
self.learn = Learner(
self.dls, self.model,
splitter=default_split,
metrics=[accuracy])
def get_dataloaders(self):
training_dir = str(P(self.config.training.dir).expanduser())
if not os.path.exists(training_dir):
print(f'downloading data to {training_dir}...')
self.download_data()
dls = self.config.training.data.load_datablocks(self.config)
return dls
|
# -*-coding:utf-8-*-
from pymongo import MongoClient
from scrapy import Item
class MongoDBPipeline:
def open_spider(self, spider):
db_uri = spider.settings.get('MONGODB_URI', 'mongodb://localhost:27017')
db_name = spider.settings.get('MONGODB_DB_NAME', 'scrapy_default')
self.db_client = MongoClient('mongodb://localhost:27017')
self.db = self.db_client[db_name]
def close_spider(self, spider):
self.db_client.close()
def process_item(self, item, spider):
self.insert_db(item)
return item
def insert_db(self, item):
if isinstance(item, Item):
item = dict(item)
self.db.books.insert_one(item) |
import json
import r2pipe
global FILE
FILE = "nuevo"
def main():
addr_int = []
r2file = open_file()
aaanal_file(r2file)
export_int_file(r2file)
addr_int = import_int_file()
extract_eax(r2file, addr_int)
def open_file():
ff = r2pipe.open(FILE)
return ff
def aaanal_file(ff):
ff.cmd("aaa")
def export_int_file(ff):
intr = ff.cmd("/c int 0x80")
listaa = []
a = intr.split("\n")
for i in a:
b = i.split(" ")
for j in b:
if "0x0" in j:
listaa.append(j)
f = open("interrupt.txt", "w")
for i in intr:
f.write(i)
def import_int_file():
f = open("interrupt.txt", "r")
addrmem = []
for line in f:
addrmem.append(line.split(" ")[0])
return addrmem
def extract_eax(ff, addrss):
f = open("eeax_file.txt", "a")
for i in addrss:
print "************"
print "Analyzing "+i+" \n"
eaax = ff.cmd("pdf @"+ i)
f.write(eaax)
if __name__ == "__main__":
main()
|
def solution(S, P, Q):
min_factors = []
for p, q in zip(P, Q):
sequence = S[p:q+1]
if 'A' in sequence:
min_factors.append(1)
elif 'C' in sequence:
min_factors.append(2)
elif 'G' in sequence:
min_factors.append(3)
elif 'T' in sequence:
min_factors.append(4)
return min_factors
# test
A = 'CAGCCTA'
B = [2, 5, 0]
C = [4, 5, 6]
print(solution(A, B, C))
|
INVALID_ARGUMENT = 'invalid argument'
UNKNOWN = 'unknown'
NOT_FOUND = 'not found'
SERVER_UNAVAILABLE = 'service unavailable'
HEALTH_CHECK_ERROR = 'health check failed'
class TRError(Exception):
def __init__(self, code, message, type_='fatal'):
super().__init__()
self.code = code or UNKNOWN
self.message = message or 'Something went wrong.'
self.type_ = type_
@property
def json(self):
return {'type': self.type_,
'code': self.code,
'message': self.message}
class CybercrimetNotFoundError(TRError):
def __init__(self):
super().__init__(
code=NOT_FOUND,
message='The CyberCrime not found'
)
class CybercrimeUnexpectedError(TRError):
def __init__(self, payload):
error_payload = payload.get('error', 'unexpected error')
super().__init__(
code=UNKNOWN,
message=error_payload
)
class CybercrimeUnavailableError(TRError):
def __init__(self):
super().__init__(
SERVER_UNAVAILABLE,
'The CyberCrime is unavailable. Please, try again later.'
)
class BadRequestError(TRError):
def __init__(self, error_message):
super().__init__(
INVALID_ARGUMENT,
error_message
)
class CybercrimeSSLError(TRError):
def __init__(self, error):
error = error.args[0].reason.args[0]
message = getattr(error, 'verify_message', error.args[0]).capitalize()
super().__init__(
UNKNOWN,
f'Unable to verify SSL certificate: {message}'
)
class CybercrimeWatchdogError(TRError):
def __init__(self):
super().__init__(
HEALTH_CHECK_ERROR,
'Invalid Health Check'
)
|
#The insult machine!
import random
#Lol wow! Just having fun here
#Pretty simple program idea
#Getting familiar with the random module
insult_list1 = ['greasy','ludicrous', 'vicious', 'dozy', 'idiotic', 'imbecilic', 'pointless', 'mindless', 'moronic']
insult_list2 = ['piece']
insult_list3 = ['close', 'close-grained', 'compact', 'compressed', 'concentrated']
insult_list4 = ['monkey','dog', 'cow', 'goat', 'sheep', 'miser', 'wiener','turtleneck']
insult_list5 = ['vomit', 'disgorge', 'emit', 'eject', 'regurgitate']
word_1 = random.choice(insult_list1)
word_2 = random.choice(insult_list2)
word_3 = random.choice(insult_list3)
word_4 = random.choice(insult_list4)
word_5 = random.choice(insult_list5)
print('You', word_1, word_2, 'of', word_3, word_4, word_5)
|
import requests
import random
import re
from lxml import html
import execjs
from generate_function import generate_eid, generate_d
session = requests.Session()
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
}
def get_pubKey_sq_token():
url = 'https://passport.jd.com/new/login.aspx'
r = session.get(url, headers=headers, verify=False)
root = html.fromstring(r.text)
pubKey = root.xpath('//*[@id="pubKey"]/@value')[0]
sa_token = root.xpath('//*[@id="sa_token"]/@value')[0]
uuid = root.xpath('//*[@id="uuid"]/@value')[0]
return pubKey, sa_token, uuid
def get_seqSid():
url = 'https://seq.jd.com/jseqf.html?bizId=passport_jd_com_login_pc&platform=js&version=1'
r = session.get(url, headers=headers, verify=False)
seqSid = re.compile('sessionId="(.+?)"').findall(r.text)[0]
return seqSid
def get_authcode_c(eid):
url = 'https://iv.jd.com/slide/g.html'
payload = {
'appId': '1604ebb2287',
'scene': 'login',
'product': 'bind-suspend',
'e': eid,
'callback': '',
}
r = session.get(url, params=payload, verify=False)
return r.json()['challenge']
def get_authcode(d, c, eid, seqSid, username):
url = 'https://iv.jd.com/slide/s.html'
payload = {
'd': d,
'c': c,
'w': '0',
'appId': '1604ebb2287',
'scene': 'login',
'product': 'bind-suspend',
'e': eid,
's': seqSid,
'o': username,
'callback': '',
}
r = session.get(url, params=payload, verify=False)
print(r.text)
return r.json()['validate']
# 返回值有 authcode
def encrypt(key, pwd):
obj = execjs.compile(open('./jd_rsa.js').read())
password = obj.call('encryptStr', key, pwd)
return password
def login(uuid, eid, authcode, pubKey, sa_token, seqSid, username, password):
url = 'https://passport.jd.com/uc/loginService'
data = {
'uuid': uuid,
'eid': eid,
'fp': '',
'_t': '_t',
'loginType': 'c',
'loginname': username,
'nloginpwd': encrypt(pubKey, password),
'authcode': authcode,
'pubKey': pubKey,
'sa_token': sa_token,
'seqSid': seqSid,
'useSlideAuthCode': '1',
}
payload = {
'uuid': uuid,
'r': random.random(),
'version': '2015',
}
r = session.post(url, params=payload, data=data, verify=False)
print(r.text)
def get_info():
r = session.get('http://i.jd.com/user/info')
print(r.text)
if __name__ == '__main__':
username = ''
password = ''
eid = generate_eid()
pubKey, sa_token, uuid = get_pubKey_sq_token()
seqSid = get_seqSid()
challenge = get_authcode_c(eid)
d = generate_d()
validate = get_authcode(d, challenge, eid, seqSid, username)
# login(uuid, eid, validate, pubKey, sa_token, seqSid, username, password)
# get_info()
|
number = int(input("Enter a number, and I will tell you all of that numbers' divisors: "))
divisor_list = []
for n in range(1, number+1):
if(number % n == 0):
divisor_list.append(n)
print(divisor_list) |
#7/31/14
#Determine whether a number (N) is prime or not
#We only have to check numbers up to the integer square root of N.
import math
#This determines whether n is prime
#INPUT - a number to check
#OUTPUT - Ture if it is prime or False if is is not
def is_prime(n):
is_it_prime = True #We start with an assumption that it is prime
divisor = 2 #This is the first divisor
while (divisor <= math.sqrt(n)): #We use a while loop because we don't know how many iterations are needed
#Is it divisible?
if (n % divisor == 0): #Its divisible
is_it_prime = False
break #Stops checking
divisor += 1#Try the next divisor
return is_it_prime
def main():
how_many_primes = 1000
counter_primes = 0
n = 2
while(counter_primes < how_many_primes):
if is_prime(n) == True:
counter_primes += 1
print("%8d" % n,end = "")
n += 1
main()
|
import os
from keras.utils.np_utils import to_categorical
import cv2
from keras.preprocessing import image
from keras.preprocessing import image
import json
import params
import numpy as np
import random
def prep_image(filepath, train_dir):
filename = os.path.basename(filepath)
jsonData = json.load(open(filepath))
img_filename = filepath[:-5] + '.jpg'
basename = os.path.basename(img_filename[:-4])
allResults = []
allCats = []
for bb in jsonData['bounding_boxes']:
category = bb['category']
box = bb['box']
outBaseName = basename+'_'+ ('%d' % bb['ID']) + '.jpg'
cat_dir = os.path.join(train_dir, category+'/')
currOut = os.path.join(cat_dir, outBaseName)
if not os.path.exists(currOut):
if not os.path.exists(cat_dir):
if not os.path.exists(train_dir):
os.mkdir(train_dir)
os.mkdir(cat_dir)
img_pil = image.load_img(img_filename)
img = image.img_to_array(img_pil)
imgPath = os.path.join(currOut, img_filename)
# train with context around box
contextMultWidth = 0.15
contextMultHeight = 0.15
wRatio = float(box[2]) / img.shape[0]
hRatio = float(box[3]) / img.shape[1]
if wRatio < 0.5 and wRatio >= 0.4:
contextMultWidth = 0.2
if wRatio < 0.4 and wRatio >= 0.3:
contextMultWidth = 0.3
if wRatio < 0.3 and wRatio >= 0.2:
contextMultWidth = 0.5
if wRatio < 0.2 and wRatio >= 0.1:
contextMultWidth = 1
if wRatio < 0.1:
contextMultWidth = 2
if hRatio < 0.5 and hRatio >= 0.4:
contextMultHeight = 0.2
if hRatio < 0.4 and hRatio >= 0.3:
contextMultHeight = 0.3
if hRatio < 0.3 and hRatio >= 0.2:
contextMultHeight = 0.5
if hRatio < 0.2 and hRatio >= 0.1:
contextMultHeight = 1
if hRatio < 0.1:
contextMultHeight = 2
widthBuffer = int((box[2] * contextMultWidth) / 2.0)
heightBuffer = int((box[3] * contextMultHeight) / 2.0)
r1 = box[1] - heightBuffer
r2 = box[1] + box[3] + heightBuffer
c1 = box[0] - widthBuffer
c2 = box[0] + box[2] + widthBuffer
if r1 < 0:
r1 = 0
if r2 > img.shape[0]:
r2 = img.shape[0]
if c1 < 0:
c1 = 0
if c2 > img.shape[1]:
c2 = img.shape[1]
if r1 >= r2 or c1 >= c2:
continue
subImg = img[r1:r2, c1:c2, :]
subImg = cv2.resize(subImg, params.target_img_size)
allResults.append(subImg)
cv2.imwrite(currOut, subImg)
cat_value = params.category_names.index(category)
allCats.append(to_categorical(cat_value, params.num_labels))
return np.asarray(allResults), allCats
else:
return None, category
def load_from_full(data_dir,train_dir='train/'):
if not os.path.exists(os.path.join(train_dir,'fmow_all_filenames.npy')):
all_jsons = []
counter = 0
cats = os.listdir(data_dir)
for cat in cats:
cat_folder = os.path.join(data_dir,cat)
folders = os.listdir(cat_folder)
for folder in folders:
direc = os.path.join(cat_folder,folder)
for filename in os.listdir(direc):
if filename.endswith('.json'):
final_path = os.path.join(direc,filename)
all_jsons.append(final_path)
x,y = prep_image(final_path,train_dir)
all_jsons = np.asarray(all_jsons)
random.shuffle(all_jsons)
np.save((os.path.join(train_dir,'fmow_all_filenames.npy')), all_jsons)
else:
all_jsons = np.load(os.path.join(train_dir,'fmow_all_filenames.npy'))
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
data_dir = params.directories['dataset']
load_from_full(data_dir)
|
from django.db import models
# Create your models here.
class Designation(models.Model):
id=models.AutoField(primary_key=True)
type=models.CharField(max_length=50)
objects = models.Manager()
class Station(models.Model):
id=models.AutoField(primary_key=True)
contact=models.IntegerField()
address=models.CharField(max_length=200)
objects = models.Manager()
class Vehicle_type(models.Model):
id=models.AutoField(primary_key=True)
name=models.CharField(max_length=50)
mobile=models.IntegerField()
water_capacity=models.IntegerField()
objects = models.Manager()
class Vehicle(models.Model):
id=models.AutoField(primary_key=True)
vehicle_number=models.CharField(max_length=100)
purchase_date=models.DateTimeField()
status=models.CharField(max_length=100)
vehicle_type_id = models.ForeignKey(Vehicle_type, on_delete= models.CASCADE,default="")
station_id = models.ForeignKey(Station, on_delete=models.CASCADE,default="")
objects = models.Manager()
class Staff(models.Model):
id=models.AutoField(primary_key=True)
name=models.CharField(max_length=50)
gender=models.CharField(max_length=25)
dob=models.DateField()
contact=models.IntegerField()
doj=models.DateField()
salary=models.IntegerField()
station_id=models.ForeignKey(Station, on_delete=models.CASCADE,default="")
designation_id = models.ForeignKey(Designation, on_delete=models.CASCADE,default="")
objects = models.Manager()
class Reporter(models.Model):
id=models.AutoField(primary_key=True)
name=models.CharField(max_length=50)
email=models.CharField(max_length=50)
contact = models.IntegerField()
reporting_date = models.DateField(default="")
objects=models.Manager()
class Complaint(models.Model):
id=models.AutoField(primary_key=True)
description=models.CharField(max_length=2000)
pin=models.IntegerField()
street=models.CharField(max_length=100)
city=models.CharField(max_length=50)
state=models.CharField(max_length=50)
objects = models.Manager()
class Act(models.Model):
id=models.AutoField(primary_key=True)
status=models.CharField(max_length=100)
leaving_time=models.DateTimeField(auto_now_add=True)
reaching_time=models.DateTimeField(auto_now_add=True)
description=models.CharField(max_length=2000)
objects = models.Manager()
|
## solution1 .DFS
def riverSizes(matrix):
visited = [[False for _ in matrix[0]] for _ in matrix]
sizes = []
for row in range(len(matrix)):
for col in range(len(matrix[0])):
if not visited[row][col]:
if matrix[row][col] == 1:
currentSize = calculateSize(row,col,matrix,visited)
sizes.append(currentSize)
visited[row][col] = True
return sizes
def calculateSize(row, col, matrix, visited):
outOfBound = row<0 or row>=len(matrix) or col<0 or col>=len(matrix[0])
if outOfBound or visited[row][col]: return 0
visited[row][col] = True
if matrix[row][col] == 0: return 0
upSize = calculateSize(row - 1, col, matrix, visited)
bottomSize = calculateSize(row + 1, col, matrix, visited)
leftSize = calculateSize(row , col - 1 , matrix, visited)
rightSize = calculateSize(row , col + 1 , matrix, visited)
return 1 + upSize + bottomSize + leftSize + rightSize
##solution2 BFS
def riverSizes2(matrix):
visited = [[False for _ in matrix[0]] for _ in matrix]
sizes = []
for row in range(len(matrix)):
for col in range(len(matrix[0])):
if visited[row][col]: continue
visited[row][col] = True
traverse(matrix, row, col, visited, sizes)
return sizes
def traverse(matrix, row, col, visited, sizes):
queue = [[row,col]]
size = 0
while len(queue):
currentRow, currentCol = queue.pop(0)
if matrix[currentRow][currentCol] == 0:
visited[currentRow][currentCol] = True
else:
size += 1
neighbors = findNeighbors(matrix, currentRow, currentCol, visited)
for [Row,Col] in neighbors:
queue.append([Row, Col])
visited[Row][Col] = True
if size>0:
sizes.append(size)
def findNeighbors(matrix, row, col, visited):
neighbors = []
for Row,Col in [[row + 1, col], [row - 1, col ], [ row, col -1 ], [row, col + 1]]:
if Row>=0 and Row<len(matrix) and Col>=0 and Col<len(matrix[0]) and not visited[Row][Col]:
neighbors.append([Row, Col])
return neighbors |
"""
For this to work you have to install pdfkit and wkhtmltopdf see:
https://github.com/JazzCore/python-pdfkit
VERY IMPORTANT:
IN LINUX a bug had to be solved by patching the pdfkit library:
Had to install xvfb:
sudo apt-get install xvfb
And then add 'xvfb-run' infront of the wkhtmltopdf command inside the library done by adding:
yield 'xvfb-run'
in line 83 of /usr/local/lib/python3.5/dist-packages/pdfkit.py internal library...
see:
https://unix.stackexchange.com/questions/192642/wkhtmltopdf-qxcbconnection-could-not-connect-to-display/223694#223694
"""
import os
import sys
import platform
from django.core.wsgi import get_wsgi_application
# Environment can use the models as if inside the Django app
dir_separator = '\\' if 'Windows' == platform.system() else '/'
# how deep is this file from the project working directory?
dir_depth = 1
path_to_add = dir_separator.join(os.getcwd().split(dir_separator)[:-dir_depth])
sys.path.insert(0, path_to_add)
# Environment can use the models as if inside the Django app
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'testing_webpage.settings')
application = get_wsgi_application()
import pdfkit
from django.db.models import Q
from django.template.loader import get_template
from dashboard.models import Candidate
from decouple import config
from raven import Client
SENTRY_CLIENT = Client(config('sentry_dsn'))
MAX_NUM_OF_RENDERS = 10
def render_cv(candidate, pdf_path='cv_tmp.pdf'):
"""filename = 'cv_{}.pdf'.format(candidate_id)
base_url = 'http://127.0.0.1:8000' if settings.DEBUG else 'https://peaku.co'
file_path = os.path.join(path_to_cv, filename)
content_url = urllib.parse.urljoin(base_url, 'cv/{}'.format(candidate_id))
pdfkit.from_url(content_url, file_path)"""
nice_dir = os.path.dirname(os.path.realpath(__file__))
template_path = os.path.join(nice_dir, 'templates', 'nice', 'elon_cv.html')
template = get_template(template_path)
html = template.render({'candidate': candidate}) # Renders the template with the context data.
css = os.path.join(nice_dir, 'static', 'nice', 'css', 'cv.css')
pdfkit.from_string(html, pdf_path, css=css)
if __name__ == '__main__':
for c in Candidate.objects.filter(~Q(user__experiences=None), render_cv=False)[:MAX_NUM_OF_RENDERS]:
try:
render_cv(c, 'cv/{}.pdf'.format(c.id))
c.render_cv = True
c.save()
except:
SENTRY_CLIENT.captureException()
#render_cv(Candidate.objects.get(pk=23000))
|
# coding:utf-8
####################################################
#//E:/wamp/www/application/pycharm/phantomjs-2.1.1-windows/phantomjs-2.1.1-windows/bin/phantomjs.exe E:/wamp/www/application/pycharm/project1/Rendering.js 354
import sys
import re
import urllib
import urllib2
from bs4 import BeautifulSoup
from PIL import Image
import cStringIO
import os
import random
import sys
import time
pages = 376
command = '''
E:/wamp/www/application/pycharm/phantomjs-2.1.1-windows/phantomjs-2.1.1-windows/bin/phantomjs.exe --disk-cache=yes --ignore-ssl-errors=true --config=E:/wamp/www/application/pycharm/project1/config.json E:/wamp/www/application/pycharm/project1/Rendering.js %(page)d
'''
command = command % {"page": pages}
user_agent='Mozilla/5.0 (Windows NT 6.1; WOW64)'
headers = {'User-Agent': user_agent}
try:
#time1 = time.time()
result = os.popen(command)
res = result.read()
#time2 = time.time()
#print time2-time1
soup = BeautifulSoup(str(res), "html.parser")
imgs = soup.findAll('img')
for i in range(len(imgs)):
#print imgs[i].get("src")
if imgs[i].get("org_src") != None:
#pass
picurl = imgs[i].get("org_src")
picurl_arr = picurl.split("/")
imgname = picurl_arr[len(picurl_arr)-1]
print imgname
urllib.urlretrieve(picurl, './jandan/'+str(imgname))
else:
srcs = imgs[i].get("src")
srcs_arr = srcs.split("/")
imgname2 = srcs_arr[len(srcs_arr) - 1]
print imgname2
imgcontent = urllib2.Request(srcs, headers=headers)
imgcontents = cStringIO.StringIO(urllib2.urlopen(imgcontent, timeout=30).read())
im = Image.open(imgcontents)
im.save('./jandan/' + "/" + imgname2, 'PNG')
sys.exit()
except Exception, e:
print e
|
# --- Day 21: RPG Simulator 20XX ---
# Little Henry Case got a new video game for Christmas. It's an RPG, and he's stuck on a boss.
# He needs to know what equipment to buy at the shop. He hands you the controller.
#
# In this game, the player (you) and the enemy (the boss) take turns attacking. The player always goes first. Each
# attack reduces the opponent's hit points by at least 1. The first character at or below 0 hit points loses.
#
# Damage dealt by an attacker each turn is equal to the attacker's damage score minus the defender's armor score. An
# attacker always does at least 1 damage. So, if the attacker has a damage score of 8, and the defender has an armor
# score of 3, the defender loses 5 hit points. If the defender had an armor score of 300, the defender would still
# lose 1 hit point.
#
# Your damage score and armor score both start at zero. They can be increased by buying items in exchange for gold.
# You start with no items and have as much gold as you need. Your total damage or armor is equal to the sum of those
# stats from all of your items. You have 100 hit points.
#
# Here is what the item shop is selling:
#
# Weapons: Cost Damage Armor
# Dagger 8 4 0
# Shortsword 10 5 0
# Warhammer 25 6 0
# Longsword 40 7 0
# Greataxe 74 8 0
#
# Armor: Cost Damage Armor
# Leather 13 0 1
# Chainmail 31 0 2
# Splintmail 53 0 3
# Bandedmail 75 0 4
# Platemail 102 0 5
#
# Rings: Cost Damage Armor
# Damage +1 25 1 0
# Damage +2 50 2 0
# Damage +3 100 3 0
# Defense +1 20 0 1
# Defense +2 40 0 2
# Defense +3 80 0 3
# You must buy exactly one weapon; no dual-wielding. Armor is optional, but you can't use more than one. You can buy
# 0-2 rings (at most one for each hand). You must use any items you buy. The shop only has one of each item,
# so you can't buy, for example, two rings of Damage +3.
#
# For example, suppose you have 8 hit points, 5 damage, and 5 armor, and that the boss has 12 hit points, 7 damage,
# and 2 armor:
#
# The player deals 5-2 = 3 damage; the boss goes down to 9 hit points.
# The boss deals 7-5 = 2 damage; the player goes down to 6 hit points.
# The player deals 5-2 = 3 damage; the boss goes down to 6 hit points.
# The boss deals 7-5 = 2 damage; the player goes down to 4 hit points.
# The player deals 5-2 = 3 damage; the boss goes down to 3 hit points.
# The boss deals 7-5 = 2 damage; the player goes down to 2 hit points.
# The player deals 5-2 = 3 damage; the boss goes down to 0 hit points.
# In this scenario, the player wins! (Barely.)
#
# You have 100 hit points. The boss's actual stats are in your puzzle input. What is the least amount of gold you can
# spend and still win the fight?
from itertools import combinations, count
# constants
WEAPONS = {
'Dagger': {
'cost': 8, 'damage': 4, 'armor': 0
},
'Shortsword': {
'cost': 10, 'damage': 5, 'armor': 0
},
'Warhammer': {
'cost': 25, 'damage': 6, 'armor': 0
},
'Longsword': {
'cost': 40, 'damage': 7, 'armor': 0
},
'Greataxe': {
'cost': 74, 'damage': 8, 'armor': 0
}
}
# all armor with additional 'None' Armor for convenience
ARMOR = {
'Leather': {
'cost': 13, 'damage': 0, 'armor': 1
},
'Chainmail': {
'cost': 31, 'damage': 0, 'armor': 2
},
'Splintmail': {
'cost': 53, 'damage': 0, 'armor': 3
},
'Bandedmail': {
'cost': 75, 'damage': 0, 'armor': 4
},
'Platemail': {
'cost': 102, 'damage': 0, 'armor': 5
},
'Bare': {
'cost': 0, 'damage': 0, 'armor': 0
}
}
RINGS = {
'Damage +1': {
'cost': 25, 'damage': 1, 'armor': 0
},
'Damage +2': {
'cost': 50, 'damage': 2, 'armor': 0
},
'Damage +3': {
'cost': 100, 'damage': 3, 'armor': 0
},
'Defense +1': {
'cost': 20, 'damage': 0, 'armor': 1
},
'Defense +2': {
'cost': 40, 'damage': 0, 'armor': 2
},
'Defense +3': {
'cost': 80, 'damage': 0, 'armor': 3
}
}
# boss input here
BOSS = {
'damage': 8,
'armor': 2,
'hp': 100,
}
def game_handler() -> int:
# Play all possible games with 1 weapon, 0 or 1 armor, <0-2> rings. Pick cheapest winning set of items.
cheapest = float("inf")
for weapon in WEAPONS.keys():
for armor in ARMOR.keys():
# rings (min 0, max 2)
for i in range(0, 3):
for rings_set in combinations(RINGS.keys(), i):
# calculate current hero stats
current_attack = WEAPONS[weapon]['damage'] + sum(RINGS[ring]['damage'] for ring in rings_set)
current_armor = ARMOR[armor]['armor'] + sum(RINGS[ring]['armor'] for ring in rings_set)
current_cost = WEAPONS[weapon]['cost'] + ARMOR[armor]['cost'] + sum(RINGS[ring]['cost'] for ring
in rings_set)
# simulate fight - if boss is defeated, check current items set total price
if simulate_fight_boss(current_attack, current_armor, 100):
cheapest = min(cheapest, current_cost)
return cheapest
def simulate_fight_boss(attack: int, armor: int, hp: int) -> bool:
# initial fight information
player_stats = {
'damage': attack,
'armor': armor,
'hp': hp,
}
boss_stats = BOSS.copy()
# simulate fight
for game_round in count(0):
# even - players turn, odd - boss turn (min 1 dmg)
if game_round % 2 == 0:
boss_stats['hp'] -= (player_stats['damage'] - boss_stats['armor']) or 1
else:
player_stats['hp'] -= (boss_stats['damage'] - player_stats['armor']) or 1
# check if someone lost
if boss_stats['hp'] <= 0: # fight won, return True
return True
if player_stats['hp'] <= 0: # fight lost, return False
return False
if __name__ == '__main__':
result = game_handler()
print(f"Lowest cost to defeat boss: {result}")
|
from tkinter import *
class Window (Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.master=master
self.init_window()
def init_window(self):
self.master.title("GUI")
self.pack(fill=BOTH, expand=1)
# quitButton = Button(self, text="Quit", command=self.client_exit)
# quitButton.place(x=0, y=0)
menu=Menu(self.master)
self.master.config(menu=menu)
#Creating a file button, with a exit command in it
file = Menu(menu)
#.add_command adds a command to the drop down menu
file.add_command(label="Save")
file.add_command(label="Exit", command=self.client_exit)
menu.add_cascade(label="File", menu=file)
#Create an Edit button on the top menu
edit = Menu(menu)
edit.add_command(label="Undo")
menu.add_cascade(label="Edit", menu=edit)
#Create an empty menu on the top menu
emptymenu = Menu(menu)
menu.add_cascade(label="emptymenu", menu=emptymenu)
def client_exit(self):
exit()
root = Tk()
root.geometry("400x300")
app = Window(root)
root.mainloop()
|
a=int(input("enter a number:"))
if a>0:
print("positive number")
elif a==0:
print("0")
else:
print("negative number")
|
import numpy as np
import cv2
import random
cap = cv2.VideoCapture('rally2.mpg')
fgbg = cv2.BackgroundSubtractorMOG(1, 1, 0.9, 25)
while(cap.isOpened()):
ret, frame = cap.read()
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#hsv_img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
b,g,r = cv2.split(frame)
blur = cv2.GaussianBlur(frame, (5, 5), 0)
edges = cv2.Canny(blur, 10, 80)
#gmask = fgbg.apply(frame)
#cv2.rectangle(frame, (100, 100), (50, 50), (0,255,0), 1)
contours, hierarchy = cv2.findContours(edges.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
#print('hierarchy', contours, hierarchy)
#cv2.drawContours(frame, contours, -1, (0,255,0), 3)
nr = 0
for c in contours:
rect = cv2.boundingRect(c)
minrect = cv2.minAreaRect(c)
size = minrect[1][0] * minrect[1][1]
color = (random.randint(0,255),random.randint(0,255),random.randint(0,255))
color = ( 0, 255, 0 )
print('contour', rect)
if (size > 500):
cv2.drawContours(frame, contours[nr], -1, color, 3)
nr = nr + 1
#gray = cv2.cvtColor(frame, cv2.CV_BGR2GRAY)
#threshold(gray, gray,30, 255,THRESH_BINARY_INV) //Threshold the gray
#omg, lol = cv2.threshold(hsv_img,150,160,cv2.THRESH_TOZERO)
#if ret == True:
# img = cv2.rectangle(frame, (0, 0), (100, 100), 255,2)
# cv2.imshow('frame', img)
#mask = cv2.inRange(hsv_img, lower_purple, upper_purple)
#contours, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.imshow('edges', edges)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
http://docs.scipy.org/doc/scipy/reference/scipy-optimize-modindex.html
scipy
scipy.cluster
scipy.cluster.hierarchy
scipy.cluster.vq
scipy.constants
scipy.fftpack
scipy.fftpack.convolve
scipy.integrate
scipy.interpolate
scipy.io
scipy.io.arff
scipy.io.netcdf
scipy.io.wavfile
scipy.linalg
scipy.linalg.blas
scipy.linalg.cython_blas
scipy.linalg.cython_lapack
scipy.linalg.interpolative
scipy.linalg.lapack
scipy.misc
scipy.ndimage
scipy.ndimage.filters
scipy.ndimage.fourier
scipy.ndimage.interpolation
scipy.ndimage.measurements
scipy.ndimage.morphology
scipy.odr
scipy.optimize
scipy.optimize.nonlin
scipy.signal
scipy.sparse
scipy.sparse.csgraph
scipy.sparse.linalg
scipy.spatial
scipy.spatial.distance
scipy.special
scipy.stats
scipy.stats.mstats
scipy.weave
scipy.weave.ext_tools |
class Node:
def __init__(self,key,val):
self.key = key
self.val = val
self.children = []
self.color = None
def addChild(self,c):
self.children.append(c)
class Graph:
def __init__(self):
self.vertices = {}
def addVertex(self,key,val):
n = Node(key,val)
self.vertices[key] = val
def addEdge(self,key1,key2):
self.addChild(key1,key2)
self.addChild(key2,key1)
def addChild(self,parent,child):
if parent not in self.vertices:
p = Node(parent,parent)
self.vertices[parent] = p
if child not in self.vertices:
c = Node(child,child)
self.vertices[child] = c
self.vertices[parent].addChild(self.vertices[child])
def printGraph(self):
for d in self.vertices.keys():
print(self.vertices[d].val,'color',self.vertices[d].color)
for c in self.vertices[d].children:
print(c.val)
print('###')
def isBipartite(g,s):
if s not in g.vertices:
raise Exception("start vertex does not exist")
queue = [g.vertices[s]]
g.vertices[s].color = 1
visited = set()
while len(queue) > 0:
nq = []
for v in queue:
color = 1 if v.color==2 else 2
for c in v.children:
if c.color!=None and c.color != color:
return False
if c.key not in visited:
c.color = color
nq.append(c)
visited.add(c.key)
queue = nq
return True
g = Graph()
g.addEdge(1,2)
g.addEdge(1,3)
g.addEdge(2,3)
print(isBipartite(g,1))
g2 = Graph()
g2.addEdge(1,2)
g2.addEdge(1,3)
g2.addEdge(3,4)
g2.addEdge(2,4)
print(isBipartite(g2,1))
g3 = Graph()
g3.addEdge(1,2)
g3.addEdge(1,3)
g3.addEdge(4,2)
g3.addEdge(5,2)
g3.addEdge(3,4)
g3.addEdge(3,5)
print(isBipartite(g3,1))
|
import pandas as pd
def parseData(filename, outfilename=None):
if filename.endswith('.xlsx'):
data = pd.read_excel(filename)
elif filename.endswith('.csv'):
data = pd.read_csv(filename)
else:
raise Exception('File Type Not Supported: file should be xlsx or csv')
df_key = ['year', 'make', 'model', 'trim', 'msrp', 'sale_price', 'rebate', 'res', 'money_factor', 'term',
'yearly_mileage', 'payment', 'due', 'deposit']
df_data = [[] for _ in range(data.shape[1])]
for index, row in data.iterrows():
if pd.isnull(row[0]):
continue
for i in range(row.shape[0]):
if not pd.isnull(row[i]):
df_data[i].append(row[i])
else:
if i == 6:
df_data[i].append(0)
if i == 9:
df_data[i].append(36)
if i == 10:
df_data[i].append(10000)
if i == 12:
df_data[i].append('1st+plates+doc')
if i == 13:
df_data[i].append(0)
df_dict = {}
for i in range(len(df_key)):
# print(len(df_data))
df_dict[df_key[i]] = df_data[i]
car_df = pd.DataFrame.from_dict(df_dict)
if outfilename:
car_df.to_csv(outfilename.split('.')[0]+'.csv', index=False)
else:
car_df.to_csv(filename.split('.')[0] + '_cleaned.csv', index=False)
return car_df
|
rows = int(input())
bus = []
answer = "NO"
for row in range(rows):
seats = input()
if "OO" in seats and answer == "NO":
seats = seats.replace("OO", "++", 1)
answer = "YES"
bus.append(seats)
if answer == "YES":
print(answer)
for row in bus:
print(row)
else:
print("NO") |
import signal
from threading import Event
from types import FrameType
from django.core.management.base import BaseCommand
class DaemonCommand(BaseCommand):
def _setup_exit_event(self): # pragma: no cover
exit = Event()
def _quit(signo: int, frame: FrameType | None):
exit.set()
signal.signal(signal.SIGTERM, _quit)
return exit
|
from Automatos import Automato
class EpsilonTransicao(Automato):
def __init__(self, automato):
super(EpsilonTransicao, self).__init__()
self.Estados = automato.Estados
self.Alfabeto = automato.Alfabeto
self.Finais = automato.Finais
def imprimir(self):
return super().imprimir('\n\n# LIVRE DE EPSILON TRANSICOES:\n')
def eliminarEpsilonTransicoes(self):
self.buscarEpsilonTransicoes()
def buscarEpsilonTransicoes(self):
if super().EPSILON not in self.Alfabeto:
return
producoesComEpsilon = set()
qtdEpsilon = len(producoesComEpsilon)
qtdEstados = len(self.Estados)
idxEpsilon = self.Alfabeto
i = 0
while i < qtdEstados: # Faz um loop pelos estados
if i in self.Estados and len(self.Estados[i][super().EPSILON]) > 0: # Se houver uma transição com épsion
self.removerEpsilonTransicoes(i, self.Estados[i][super().EPSILON][0], producoesComEpsilon)
qtdEstados = len(self.Estados) # Atualiza a quantidade de estados
i += 1
self.removerEpsilonTransicoesEstados()
def removerEpsilonTransicoes(self, transicaoOriginal, transicaoEpsilon, producoesComEpsilon):
if len(self.Estados[transicaoOriginal][self.EPSILON]) > 0:
for producao in list(self.Estados[transicaoEpsilon]):
if producao != self.EPSILON and len(self.Estados[transicaoEpsilon][producao]) > 0:
self.Estados[transicaoEpsilon][producao] = (list(set(self.Estados[transicaoEpsilon][producao] + self.Estados[transicaoOriginal][producao])))
producoesComEpsilon.update(set(self.Estados[transicaoOriginal][self.EPSILON]))
if len(self.Estados[transicaoEpsilon][self.EPSILON]) > 0:
if self.Estados[transicaoEpsilon][self.EPSILON][0] not in producoesComEpsilon:
self.removerEpsilonTransicoes(transicaoEpsilon, self.Estados[transicaoEpsilon][self.EPSILON][0], producoesComEpsilon)
def removerEpsilonTransicoesEstados(self):
if self.EPSILON not in self.Alfabeto:
return
qtdEstados = len(self.Estados)
i = 0
while i < qtdEstados: # Faz um loop pelos estados
if i in self.Estados: # Se houver uma transição com Épsilon
self.Estados[i].pop(self.EPSILON) # Remove as produções do Épsilon
qtdEstados = len(self.Estados) # Atualiza a quantidade de estados
i += 1
self.Alfabeto.remove(self.EPSILON)
|
import enum
__all__ = ("FrequencyDbName",)
class FrequencyDbName(str, enum.Enum):
DAILY = "daily"
EASTER = "easter"
IRREGULAR = "irregular"
MONTHLY = "monthly"
ONCE = "once"
WEEKLY = "weekly"
XDAYS = "xdays"
YEARLY = "yearly"
def __str__(self) -> str:
return str.__str__(self)
|
# 떡볶이 떡 만들기
import sys
input = sys.stdin.readline
N, M = map(int, input().split())
rice_cakes = list(map(int, input().split()))
rice_cakes.sort()
def binary_search(target, rice_cakes):
start = 1
end = rice_cakes[-1]
answer = 0
while start <= end:
mid = (start + end) // 2
length = 0
for cake in rice_cakes:
if cake > mid:
length += (cake - mid)
if length >= target:
answer = mid
start = mid + 1
break
if length < target:
end = mid - 1
return answer
print(binary_search(M, rice_cakes))
# TestCase
# 4 6
# 19 15 17 10
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/11/13 15:23
# @Author : Zhangyp
# @File : response_jsonify.py
# @Software: PyCharm
# @license : Copyright(C), eWord Technology Co., Ltd.
# @Contact : yeahcheung213@163.com
"""返回的数据json序列化"""
from flask import jsonify
# 查询消息
def res_jsonify(data):
if data:
status = True
msg = '请求成功,有数据返回'
else:
status = False
msg = '请求成功,但无数据返回'
return jsonify({
'status': status,
'data': data,
'massage': msg
})
# 操作消息
def opt_jsonify(msg):
return jsonify({'massage': msg})
|
from jinja2 import Environment, FileSystemLoader, select_autoescape
JINJA_ENV = Environment(
loader=FileSystemLoader('templates'),
autoescape=select_autoescape(['html', 'xml'])
)
NAME = 'Connor Wallace'
WEBSITE = 'cowalla.me'
|
import os
import chainer
from chainer.datasets import mnist
from chainer import iterators
from chainer import optimizers
from chainer import training
from chainer.training import extensions
from fw.base_trainer import base_trainer
class chainer_trainer(base_trainer):
def __init__(self):
super(chainer_trainer, self).__init__()
self.out = os.path.join(self.out, 'chainer_trainer')
self.updater_class = None
self.converter = None
self.classifier = None
self.target = None
def set_param(self):
super(chainer_trainer, self).set_param()
if self.retain_num is None:
self.retain_num = -1
# (Not Implemented)seed
# device
def set_device(self):
raise NotImplementedError
# Dataset
def set_dataset(self):
if self.dataset == 'mnist':
self.train_dataset, self.valid_dataset = mnist.get_mnist()
else:
raise ValueError
# DataLoader, Iterator
def set_dataloader(self):
self.train_loader = iterators.SerialIterator(self.train_dataset, self.batch_size)
self.valid_loader = iterators.SerialIterator(self.valid_dataset, self.batch_size, repeat=False, shuffle=False)
# model
def set_model(self):
raise NotImplementedError
# Optimizer
def set_optimizer(self):
# self.optimizer = optimizers.SGD(lr=self.lr)
self.optimizer = optimizers.MomentumSGD(lr=self.lr, momentum = self.momentum)
# self.optimizer = optimizers.Adam()
self.optimizer.setup(self.model)
# pre trainer
def pre_set_trainer(self):
pass
# post trainer
def post_set_trainer(self):
pass
# Ignite, Updater
def set_trainer(self):
updater = self.updater_class(self.train_loader, self.optimizer, converter=self.converter, device=self.device, loss_func=self.classifier)
self.pre_set_trainer()
self.trainer = training.Trainer(updater, (self.max_epochs, 'epoch'), out=self.out)
self.post_set_trainer()
def set_target(self):
raise NotImplementedError
def set_additonal_event_handler(self):
pass
# event handler
def set_event_handler(self):
self.set_target()
# (Not Implemented)Evaluator(train)
self.trainer.extend(extensions.Evaluator(self.valid_loader, self.target, converter=self.converter, device=self.device,), trigger=(self.eval_interval, 'epoch'), call_before_training=self.call_before_training)
self.trainer.extend(extensions.ProgressBar())
self.trainer.extend(extensions.observe_lr())
# self.trainer.extend(extensions.MicroAverage('loss', 'lr', 'mav'))
self.trainer.extend(extensions.LogReport(trigger=(self.log_interval, 'epoch')), call_before_training=self.call_before_training)
self.trainer.extend(extensions.FailOnNonNumber())
# self.trainer.extend(extensions.ExponentialShift('lr', rate=0.9))
self.trainer.extend(extensions.ExponentialShift('lr', rate=0.99, init=self.lr*10.0))
# (Not Implemented)InverseShift
# (Not Implemented)LinearShift
# (Not Implemented)MultistepShift
# (Not Implemented)PolynomialShift
# (Not Implemented)StepShift
# (Not Implemented)WarmupShift
self.trainer.extend(extensions.ParameterStatistics(self.model, trigger=(self.eval_interval, 'epoch')))
self.trainer.extend(extensions.VariableStatisticsPlot(self.model))
self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy', 'validation/main/loss', 'validation/main/accuracy', 'elapsed_time']), call_before_training=self.call_before_training)
self.trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png'), call_before_training=self.call_before_training)
self.trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'], 'epoch', file_name='accuracy.png'), call_before_training=self.call_before_training)
self.trainer.extend(extensions.snapshot(n_retains=self.retain_num), trigger=(self.log_interval, 'epoch'))
self.set_additonal_event_handler()
def resume(self):
if self.resume_filepath is not None:
chainer.serializers.load_npz(self.resume_filepath, self.trainer)
def run(self):
self.trainer.run()
|
from texture_builder import TextureBuilder
from texture import Texture
from palette import Palette
from folder import Folder
import os.path
from functools import partial
from PyQt5.QtCore import QDir
from PyQt5.QtGui import QImage, QPalette, QPixmap
from PyQt5.QtWidgets import (QAction, QFileDialog, QLabel,
QMainWindow, QMenu, QScrollArea, QSizePolicy, QComboBox)
from color_ramp_widget import ColorRampWidget
from image_gallery import ImageGallery
class ViewerApp(QMainWindow):
def __init__(self):
super(ViewerApp, self).__init__()
self.name = 'Parkan Image Viewer'
self.scale_factor = 0.0
self.scale_step = 0.25
self.min_zoom = 0.333
self.max_zoom = 3
self.init_window_elements()
self.setWindowTitle(self.name)
self.resize(500, 400)
self.palette = None
self.last_image = None
self.palette_window = None
self.gallery = ImageGallery()
def init_window_elements(self):
self.create_image_label()
self.create_scroll_area()
self.create_actions()
self.create_menus()
def create_scroll_area(self):
scroll_area = QScrollArea()
scroll_area.setBackgroundRole(QPalette.Dark)
if not self.image_label:
raise 'Image label not initialized!'
scroll_area.setWidget(self.image_label)
self.setCentralWidget(scroll_area)
self.scroll_area = scroll_area
def create_image_label(self):
label = QLabel()
label.setBackgroundRole(QPalette.Base)
label.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
label.setScaledContents(True)
self.image_label = label
def open(self):
file_name, _ = QFileDialog.getOpenFileName(self, 'Open image', os.path.join(QDir.currentPath(), Texture.get_textures_folder()))
if file_name and self.palette:
self.last_image = TextureBuilder().get_texture(file_name)
self.scale_factor = 1.0
self.update_image()
self.update_actions()
def choose_palette(self, palette_file):
palette_file = Palette.get_abs_path(palette_file)
if palette_file:
self.palette = Palette(palette_file)
self.update_image()
self.update_actions()
self.palette_window = ColorRampWidget(self.palette)
def update_image(self):
if not self.last_image:
return
image_data = self.last_image.get_pixels(self.palette)
ht, wd, channels = image_data.shape
image = QImage(image_data, wd, ht, channels * wd, QImage.Format_RGB888)
if not image.isNull():
image = image.rgbSwapped()
QPixmap.fromImage(image)
self.image_label.setPixmap(QPixmap.fromImage(image))
self.fit_to_window_act.setEnabled(True)
if not self.fit_to_window_act.isChecked():
self.image_label.adjustSize()
def zoom_in(self):
self.scale_image(1 + self.scale_step)
def zoom_out(self):
self.scale_image(1 / (1 + self.scale_step))
def normal_size(self):
self.image_label.adjustSize()
self.scale_factor = 1.0
def fit_to_window(self):
fit_to_window = self.fit_to_window_act.isChecked()
self.scroll_area.setWidgetResizable(fit_to_window)
if not fit_to_window:
self.normal_size()
self.update_actions()
def create_choose_palette_action(self, palette_file):
return QAction(os.path.basename(palette_file), self,
enabled=True, triggered=partial(self.choose_palette, os.path.basename(palette_file)))
def create_actions(self):
self.open_act = QAction('&Open image...', self, shortcut='Ctrl+O',
enabled=False, triggered=self.open)
self.choose_palette_act_dict = {os.path.basename(palette_file): self.create_choose_palette_action(palette_file)
for palette_file in Palette.get_palette_files()}
self.exit_act = QAction('E&xit', self, shortcut='Ctrl+Q',
triggered=self.close)
self.zoom_in_act = QAction('Zoom &In ({}%)'.format(int(100*self.scale_step)), self, shortcut='Ctrl++',
enabled=False, triggered=self.zoom_in)
self.zoom_out_act = QAction('Zoom &Out ({}%)'.format(int(100*self.scale_step)), self, shortcut='Ctrl+-',
enabled=False, triggered=self.zoom_out)
self.normal_size_act = QAction('&Normal Size', self, shortcut='Ctrl+N',
enabled=False, triggered=self.normal_size)
self.fit_to_window_act = QAction('&Fit to Window', self, enabled=False,
checkable=True, shortcut='Ctrl+F', triggered=self.fit_to_window)
self.save_single_image_act = QAction('&Save image', self, enabled=False,
shortcut='Ctrl+S', triggered=self.save_single_image)
self.create_gallery_act = QAction('&Create gallery', self, shortcut='Ctrl+G',
enabled=False, triggered=self.create_gallery_folder)
def create_gallery_folder(self):
folder_path = QFileDialog.getExistingDirectory(self, 'Choose folder with images',
os.path.join(QDir.currentPath(), Texture.get_textures_folder()))
if folder_path:
self.create_gallery(folder_path)
def create_gallery(self, folder_path):
if not self.palette:
return
folder = Folder(os.path.basename(folder_path), folder_path)
all_textures = folder.get_texture_files()
images = []
for texture_filename in all_textures:
cur_texture = TextureBuilder().get_texture(texture_filename)
if not cur_texture:
continue
image_data = cur_texture.get_pixels(self.palette)
ht, wd, channels = image_data.shape
image = QImage(image_data, wd, ht, channels * wd, QImage.Format_RGB888)
if not image.isNull():
image = image.rgbSwapped()
images.append(QPixmap.fromImage(image))
self.gallery.populate(images=images, size=50)
self.gallery.show()
def create_menus(self):
self.fileMenu = QMenu("&File", self)
self.fileMenu.addAction(self.open_act)
self.fileMenu.addAction(self.save_single_image_act)
combo = QComboBox(self)
for palette_file in self.choose_palette_act_dict:
combo.addItem(palette_file)
combo.activated[str].connect(self.choose_palette)
combo.move(400,30)
choose_palette_menu = self.fileMenu.addMenu('&Choose Palette')
for palette_file, choose_palette_action in self.choose_palette_act_dict.items():
choose_palette_menu.addAction(choose_palette_action)
self.fileMenu.addAction(self.create_gallery_act)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exit_act)
self.viewMenu = QMenu("&View", self)
self.viewMenu.addAction(self.zoom_in_act)
self.viewMenu.addAction(self.zoom_out_act)
self.viewMenu.addAction(self.normal_size_act)
self.viewMenu.addSeparator()
self.viewMenu.addAction(self.fit_to_window_act)
self.menuBar().addMenu(self.fileMenu)
self.menuBar().addMenu(self.viewMenu)
def has_palette(self):
return self.palette is not None
def has_image(self):
return self.last_image is not None
def update_actions(self):
self.open_act.setEnabled(self.has_palette())
self.create_gallery_act.setEnabled(self.has_palette())
self.zoom_in_act.setEnabled(not self.fit_to_window_act.isChecked())
self.zoom_out_act.setEnabled(not self.fit_to_window_act.isChecked())
self.normal_size_act.setEnabled(not self.fit_to_window_act.isChecked())
self.save_single_image_act.setEnabled(self.has_image())
def save_single_image(self):
if not self.last_image or not self.palette:
return
file_name, _ = QFileDialog.getSaveFileName(self, 'Save image',
os.path.join(QDir.currentPath(), Texture.get_textures_folder()),
filter='*.png;;*.bmp')
if not file_name:
return
self.last_image.save(file_name, self.palette)
def scale_image(self, factor):
self.scale_factor *= factor
self.image_label.resize(self.scale_factor * self.image_label.pixmap().size())
self.adjust_scroll_bar(self.scroll_area.horizontalScrollBar(), factor)
self.adjust_scroll_bar(self.scroll_area.verticalScrollBar(), factor)
self.zoom_in_act.setEnabled(self.scale_factor < self.max_zoom)
self.zoom_out_act.setEnabled(self.scale_factor > self.min_zoom)
def adjust_scroll_bar(self, scroll_bar, factor):
scroll_bar.setValue(int(factor * scroll_bar.value()
+ ((factor - 1) * scroll_bar.pageStep() / 2)))
|
# -*- coding:utf-8 -*-
'''
@author: leisun
@contact: leisun98@gmail.com
@file: eng_K-STATE_parser.py
@time: 11/19/18 2:18 PM
'''
from utils.connection import *
import time
from db.operateSql import People, connect_db
db_url = "mysql+pymysql://root:123456@localhost/sc"
session = connect_db(db_url)
def getInfo(url, frontstr, org="Kansas State University"):
try:
res = fetch(url)
except:
return getInfo(url, frontstr, org)
tmp = extract("//tbody/tr/td", res, True)
# print(tmp)
major = extract("//div[@id='ksu-unitbar']/h2/a/text()", res)
for each in tmp:
source = str(etree.tostring(each))
# print(source)
email = extract("//a[contains(@href, '@')]/text()", source)
if not email:
continue
name = extract("//strong/text()", source)
if not name:
name = extract("//strong/a/span/text()", source)
web_url = extract("//a[contains(@href, '/people')]/@href", source)
# print(web_url)
img_url = ""
if web_url:
web_url= frontstr + web_url
try:
text = fetch(web_url)
img_url = extract("//img[contains(@src, '/docs/people')]/@src", text)
except:
pass
else:
continue
print(frontstr+str(img_url), " ", name, " ", email, " ", web_url, " ", major, " ", org)
if img_url:
try:
img_url = frontstr + img_url
pic = requests.Session().get(img_url, timeout=30)
with open("/Users/sunlei/scholar-private/out_of_data_module/pic/" + email + ".jpg", "wb") as f:
f.write(pic.content)
f.close()
except:
with open("/Users/sunlei/scholar-private/out_of_data_module/timeout.txt", "a") as f:
f.write(email + " : " + img_url + "\n")
user = People(email=email, name=name, major=major, web=web_url, orginazation=org)
session.add(user)
try:
session.commit()
except:
session.rollback()
time.sleep(1)
getInfo("http://www.ece.k-state.edu/people/faculty/index.html", "http://www.ece.k-state.edu")
|
# @Time : 2020/3/15 13:20
# @Author : AeishenLin
# @File : useCombobox.py
# @Describe: 使用列表框
# 列表框:(Listbox) 可供用户单选或多选所列条目以形成人机交互。
# 列表框控件的主要方法:
# 方法 功能描述
# curselection() 返回光标选中项目编号的元组,注意并不是单个的整数
# delete(起始位置,终止位置) 删除项目,终止位置可省略,全部清空为delete(0,END)
# get(起始位置,终止位) 返回范围所含项目文本的元组,终止位置可忽略
# insert(位置,项目元素) 插入项目元素(若有多项,可用列表或元组类型赋值),若位置为END,则将项目元素添加在最后
# size() 返回列表框行数
#
# 执行自定义函数时,通常使用“实例名.surselection()” 或 “selected” 来获取选中项的位置索引。
# 由于列表框实质上就是将Python 的列表类型数据可视化呈现,在程序实现时,也可直接对相关列表数据进行操作,
# 然后再通过列表框展示出来,而不必拘泥于可视化控件的方法。
# 看下面的一个例子:实现列表框的初始化、添加、插入、修改、删除和清空操作,如下:
from tkinter import *
root = Tk()
root.title('列表框使用')
root.geometry('320x320')
def initLstBox1():
LstBox1.delete(0, END)
listItems = ["数学", "物理", "化学", "语文", "外语"]
for item in listItems:
LstBox1.insert(END, item)
def clearLstBox1():
LstBox1.delete(0, END)
def insertLstBox1():
if entry.get() != '':
if LstBox1.curselection() == ():
LstBox1.insert(LstBox1.size(), entry.get())
else:
LstBox1.insert(LstBox1.curselection(), entry.get())
def updateLstBox1():
if entry.get() != '' and LstBox1.curselection() != ():
selected = LstBox1.curselection()[0]
LstBox1.delete(selected)
LstBox1.insert(selected, entry.get())
def deleteLstBox1():
if LstBox1.curselection() != ():
LstBox1.delete(Lstbox1.curselection())
frame1 = Frame(root, relief=RAISED)
frame1.place(relx=0.0)
frame2 = Frame(root, relief=GROOVE)
frame2.place(relx=0.5)
LstBox1 = Listbox(frame1)
LstBox1.pack()
entry = Entry(frame2)
entry.pack()
btn1 = Button(frame2, text='初始化', command=initLstBox1)
btn1.pack(fill=X)
btn2 = Button(frame2, text='添加', command=insertLstBox1)
btn2.pack(fill=X)
btn3 = Button(frame2, text='插入', command=insertLstBox1) # 添加和插入功能实质上是一样的
btn3.pack(fill=X)
btn4 = Button(frame2, text='修改', command=updateLstBox1)
btn4.pack(fill=X)
btn5 = Button(frame2, text='删除', command=deleteLstBox1)
btn5.pack(fill=X)
btn6 = Button(frame2, text='清空', command=clearLstBox1)
btn6.pack(fill=X)
root.mainloop()
|
import timeit
from adk.knapsack import record_best, Item
def trials():
"""
Search small space to determine input set to knapsack that offers greatest
difference between dynamic programming and approximate. Once computed, use
these values in adk.book.chapter11.py
"""
# 83 250 4 2 457
# 103 207 4 2 523
a = 23
b = 56
c = 8
d = 5
maxBestTotal = 0
for a in range(23, 113, 10):
for b2 in range(1, 8):
b = a*b2 + 1
for c in [4, 8, 16, 32, 64]:
for d in range(2, 7):
diffTotal = 0
W = 10
numReps = 1
numTrials = 1
while W <= 65536:
items = []
# Item (Value, Weight)
for i in range(a,b,c):
items.append(Item(i,i))
items.append(Item(d*i+1,d*i+1))
itemSet = 'items=[]\n'
for item in items:
itemSet = itemSet + 'items.append(Item(' + str(item.value) + ',' + str(item.weight) + '))\n'
#itemSet = itemSet + 'items.append(Item(' + str(W//4) + "," + str(W//4) + '))\n'
setup= '''
from adk.knapsack import knapsack_unbounded, knapsack_01, knapsack_approximate, Item, record_best
import random\n''' + itemSet + '''
'''
executeUnbound = '''
record_best (knapsack_unbounded(items,''' + str(W) + ''')[0])
'''
totalUnbound = min(timeit.Timer(executeUnbound, setup=setup).repeat(numReps,numTrials))
executeApproximate = '''
record_best (knapsack_approximate(items,''' + str(W) + ''')[0])
'''
totalApproximate = min(timeit.Timer(executeApproximate, setup=setup).repeat(numReps,numTrials))
#print (W, totalUnbound, totalApproximate, record_best())
best2 = record_best()
if len(best2) > 0:
diffTotal += best2[0] - best2[1]
W = W * 2 + 1
if diffTotal > maxBestTotal:
print (a,b,c,d,diffTotal)
maxBestTotal = diffTotal
if __name__ == '__main__':
trials()
|
from netCDF4 import Dataset
from netCDF4 import num2date
import datetime as dt
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import sys
import os
from matplotlib import rc
# rc('text', usetex=True)
for path_file in sys.argv[1:]:
nc = Dataset(path_file)
nc_dims = [dim for dim in nc.dimensions] # list of nc dimensions
nctime = nc.variables['TIME'][:]
t_unit = nc.variables['TIME'].units # get unit "days since 1950-01-01T00:00:00Z"
try:
t_cal = nc.variables['TIME'].calendar
except AttributeError: # Attribute doesn't exist
t_cal = u"gregorian" # or standard
dt_time = [num2date(t, units=t_unit, calendar=t_cal) for t in nctime]
# get a list of all variables
nc_vars_to_plot = [var for var in nc.variables]
# remove any dimensions from the list to plot
for i in nc_dims:
try:
nc_vars_to_plot.remove(i)
except ValueError:
print('did not remove ', i)
# remove an auxiliary variables from the list to plot
aux_vars = list()
for var in nc.variables:
try:
aux_vars.append(nc.variables[var].getncattr('ancillary_variables'))
except AttributeError:
pass
# remove any variables without a TIME dimension from the list to plot
to_plot = list()
for var in nc.variables:
# print var
if var in nc_dims:
continue
if var in aux_vars:
continue
if 'TIME' in nc.variables[var].dimensions:
print('to plot ', var)
to_plot.append(var)
# pdffile = path_file[path_file.rfind('/')+1:len(path_file)] + '-' + nc.getncattr('deployment_code') + '-plot.pdf'
pdffile = path_file + '.pdf'
pp = PdfPages(pdffile)
fig = plt.figure(figsize=(11.69, 8.27))
text = 'file name : ' + os.path.basename(path_file) + '\n'
# print "NetCDF Global Attributes:"
for nc_attr in nc.ncattrs():
# print '\t%s:' % nc_attr, repr(nc.getncattr(nc_attr))
text += nc_attr + ' : ' + str(nc.getncattr(nc_attr)) + '\n'
plt.text(-0.1, -0.1, text, fontsize=8, family='monospace')
plt.axis('off')
pp.savefig(fig)
for plot in to_plot:
plot_var = nc.variables[plot]
var = plot_var[:]
shape_len = len(var.shape)
fig = plt.figure(figsize=(11.69, 8.27))
text = "Variable : " + plot_var.name + str(plot_var.dimensions) + "\n"
nc_attrs = plot_var.ncattrs()
# print "NetCDF Variable Attributes:"
for nc_attr in nc_attrs:
attrVal = plot_var.getncattr(nc_attr)
print('\t%s:' % nc_attr, repr(plot_var.getncattr(nc_attr)), type(attrVal))
text += nc_attr + ' : ' + str(attrVal) + '\n'
if hasattr(plot_var, 'ancillary_variables'):
qc_var_name = plot_var.getncattr('ancillary_variables')
qc_var = nc.variables[qc_var_name];
text += "\nAUX : " + qc_var.name + str(qc_var.dimensions) + "\n"
nc_attrs = qc_var.ncattrs()
# print "NetCDF AUX Variable Attributes:"
for nc_attr in nc_attrs:
# print '\t%s:' % nc_attr, repr(nc.getncattr(nc_attr))
text += nc_attr + ' : ' + str(qc_var.getncattr(nc_attr)) + '\n'
qc = nc.variables[qc_var_name][:]
if plot_var.dimensions[0] != 'TIME':
qc = np.transpose(qc)
qc = np.squeeze(qc)
else:
qc = 0
plt.text(-0.1, 0.0, text, fontsize=8, family='monospace')
plt.axis('off')
pp.savefig(fig)
plt.close(fig)
print(plot_var.name, " shape ", var.shape, " len ", shape_len)
fig = plt.figure(figsize=(11.69, 8.27))
if plot_var.dimensions[0] != 'TIME':
var = np.transpose(var)
var = np.squeeze(var)
qc_m = np.ma.masked_where(~((qc == 1) | (qc == 7)), var)
mx = qc_m.max()
mi = qc_m.min()
marg = (mx - mi) * 0.1
print("max ", mx, " min ", mi)
plt.ylim([mi - marg, mx + marg])
if hasattr(plot_var, 'sensor_serial_number'):
sn = plot_var.getncattr('sensor_serial_number').split('; ')
else:
sn = nc.getncattr('instrument_serial_number').split('; ')
if hasattr(plot_var, 'sensor_depth'):
dpth = plot_var.getncattr('sensor_depth').split('; ')
elif hasattr(plot_var, 'sensor_height'):
dpth = plot_var.getncattr('sensor_height').split('; ')
elif hasattr(nc, 'instrument_nominal_depth'):
dpth = str(nc.getncattr('instrument_nominal_depth')).split('; ')
else:
dpth = 'unknown'
print("depth ", dpth)
leg = [x + ' (' + y + ' m)' for x, y in zip(sn, dpth)]
plot_marks = '-'
if len(dt_time) < 200:
plot_marks = '.-'
pl = plt.plot(dt_time, qc_m, plot_marks)
plt.legend(iter(pl), leg)
#plt.legend(iter(pl), leg, bbox_to_anchor=(0.0, -0.2, 1.0, -0.15), loc=3, ncol=6, mode="expand", borderaxespad=0.0, fontsize='x-small')
# mark suspect (2) or probably bad (3) as yellow
qc_m = np.ma.masked_where(~((qc == 2) | (qc == 3)), var)
plt.plot(dt_time, qc_m, 'yo')
# mark bad (4) points as red
qc_m = np.ma.masked_where((qc != 4), var)
plt.plot(dt_time, qc_m, 'ro')
# mark not_deployed (6) points as green
qc_m = np.ma.masked_where((qc != 6), var)
plt.plot(dt_time, qc_m, 'go')
#fig.autofmt_xdate()
plt.grid()
# add deployment/instrument/standard name as title
# plt.title(nc.getncattr('deployment_code') + ' : ' + plot_var.sensor_name + ' ' + \
# plot_var.sensor_serial_number + ' : ' + plot_var.name, fontsize=10)
# plt.title(nc.getncattr('deployment_code') + ' : ' + plot_var.getncattr('name'), fontsize=10)
try:
plt.title(nc.getncattr('deployment_code'), fontsize=10)
except AttributeError:
pass
# add units to Y axis
plt.ylabel(plot + ' (' + plot_var.units + ')')
# plot only the time of deployment
# date_time_start = dt.datetime.strptime(nc.getncattr('time_deployment_start'), '%Y-%m-%dT%H:%M:%SZ')
# date_time_end = dt.datetime.strptime(nc.getncattr('time_deployment_end'), '%Y-%m-%dT%H:%M:%SZ')
date_time_start = dt.datetime.strptime(nc.getncattr('time_coverage_start'), '%Y-%m-%dT%H:%M:%SZ')
date_time_end = dt.datetime.strptime(nc.getncattr('time_coverage_end'), '%Y-%m-%dT%H:%M:%SZ')
plt.xlim(date_time_start, date_time_end)
# plt.savefig(plot + '.pdf')
pp.savefig(fig, papertype='a4')
plt.close(fig)
# plt.show()
pp.close()
nc.close()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MyPlugin.ui'
#
# Created: Mon Mar 20 16:04:46 2017
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(881, 573)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.paramTree = QtGui.QTreeWidget(Form)
self.paramTree.setEnabled(True)
self.paramTree.setObjectName(_fromUtf8("paramTree"))
item_0 = QtGui.QTreeWidgetItem(self.paramTree)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_0 = QtGui.QTreeWidgetItem(self.paramTree)
item_1 = QtGui.QTreeWidgetItem(item_0)
self.verticalLayout.addWidget(self.paramTree)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.paramTree.setSortingEnabled(True)
self.paramTree.headerItem().setText(0, _translate("Form", "Name", None))
self.paramTree.headerItem().setText(1, _translate("Form", "Type", None))
self.paramTree.headerItem().setText(2, _translate("Form", "Value", None))
__sortingEnabled = self.paramTree.isSortingEnabled()
self.paramTree.setSortingEnabled(False)
self.paramTree.topLevelItem(0).setText(0, _translate("Form", "parent", None))
self.paramTree.topLevelItem(0).child(0).setText(0, _translate("Form", "Child", None))
self.paramTree.topLevelItem(1).setText(0, _translate("Form", " NewParent", None))
self.paramTree.topLevelItem(1).child(0).setText(0, _translate("Form", "NewChild", None))
self.paramTree.setSortingEnabled(__sortingEnabled)
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
|
#@+leo-ver=5-thin
#@+node:ekr.20101110092851.5812: * @file ../plugins/initinclass.py
#@+<< docstring >>
#@+node:ekr.20101112180523.5421: ** << docstring >>
""" Modifies the Python @auto importer so that the importer
puts the __init__ method (ctor) into the body of the class node.
This makes it easier to keep the instance variable docs in the class
docstring in sync. with the ivars as manipulated by __init__, saves
repeating explanations in both places.
Note that this is done *after* the consistency checks by the @auto
import code, so using this plugin is at your own risk. It will change
the order of declarations if other methods are declared before __init__.
"""
#@-<< docstring >>
__plugin_name__ = "__init__ in class"
from leo.core import leoGlobals as g
from leo.core import leoPlugins
#@@language python
#@@tabwidth -4
#@+others
#@+node:ekr.20101110093301.5816: ** InitInClass
def InitInClass(tag, keywords):
"""Move __init__ into the class node body in python @auto imports"""
cull = [] # __init__ nodes to remove
parent = keywords['p']
def moveInit(parent):
for p in parent.children_iter():
if '__init__' in p.headString():
cull.append(p.copy())
old = parent.bodyString().strip().split('\n')
new = '\n'.join([' ' + i if i.strip() else ''
for i in p.bodyString().strip().split('\n')])
new = '\n%s\n' % new
# insert before @others
for n, i in enumerate(old):
if i.strip() == '@others':
if parent.numberOfChildren() == 1:
del old[n]
old.insert(n, new)
old.append('')
break
else:
old.append(new)
parent.setBodyString('\n'.join(old))
moveInit(p)
moveInit(parent)
cull.reverse() # leaves first
for i in cull:
i._unlink()
#@+node:ekr.20101110093301.5817: ** init
def init():
"""Return True if the plugin has loaded successfully."""
leoPlugins.registerHandler("after-auto", InitInClass)
g.plugin_signon(__name__)
return True
#@-others
#@-leo
|
factor = int(input())
counter = int (input())
multiple = factor * counter
mylist = []
for num in range(factor, multiple +1, factor):
mylist.append(num)
print(mylist) |
from django.views import generic
from django.urls import reverse_lazy
from django.utils import timezone
from django.contrib.auth import decorators
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from ..forms import NewHourlyRateForm, NewUserForm, NewStaffForm, ResetPasswordForm, EditConfigForm
from ..models import Bill, HourlyRate, Parent, User, Config
from ..utils import reset_password_send_mail
# Contient les views concernant l'administrateur du site
# Accueil / panneau de contrôle de l'admin - statique et entièrement défini par son HTML
class AdminIndexView(generic.TemplateView):
template_name="garderie/admin_index.html"
# Formulaire de création d'un nouveau taux horaire
class NewHourlyRateView(LoginRequiredMixin, UserPassesTestMixin, generic.edit.CreateView):
template_name='garderie/forms/edit_rate_form.html'
form_class = NewHourlyRateForm
success_url = reverse_lazy('admin_index')
def test_func(self):
return self.request.user.is_superuser
def get_initial(self):
initial=super().get_initial()
initial['value']=HourlyRate.objects.latest('id').value
return initial
# Liste des parents
class ParentListView(LoginRequiredMixin, UserPassesTestMixin, generic.ListView):
template_name="garderie/parent_list.html"
context_object_name='parent_list'
def get_queryset(self):
return Parent.objects.all()
def test_func(self):
return self.request.user.is_superuser
# Formulaire de création d'un nouveau parent
class NewUserView(LoginRequiredMixin, UserPassesTestMixin, generic.edit.CreateView):
template_name = 'garderie/forms/new_user.html'
form_class = NewUserForm
success_url = '/parent/'
def test_func(self):
return self.request.user.is_superuser
# Formulaire de suppression d'un utilisateur
class UserDeleteView(LoginRequiredMixin, UserPassesTestMixin, generic.edit.DeleteView):
model = User
success_url = reverse_lazy('admin_index')
# Interdit la suppression de son propre compte
def delete(self, request, *args, **kwargs):
user=self.get_object()
if user.email==request.user.email:
pass # TODO renvoyer une erreur TODO
# return ('Vous ne pouvez pas supprimer votre propre compte !')
else:
return super().delete(self, request, *args, **kwargs)
return HttpResponseRedirect(self.success_url)
def test_func(self):
return self.request.user.is_superuser
class ResetPasswordView(LoginRequiredMixin, UserPassesTestMixin, generic.edit.FormView):
template_name="garderie/forms/reset_password.html"
form_class=ResetPasswordForm
success_url=reverse_lazy('admin_index')
def form_valid(self, form):
user=form.cleaned_data['user']
reset_password_send_mail(user)
return super().form_valid(form)
def test_func(self):
return self.request.user.is_superuser
class EditConfigView(LoginRequiredMixin, UserPassesTestMixin, generic.edit.UpdateView):
template_name="garderie/forms/reset_password.html"
model=Config
form_class=EditConfigForm
success_url=reverse_lazy('admin_index')
def get_object(self, queryset=None):
return Config.objects.get_config()
def test_func(self):
return self.request.user.is_superuser
# Formulaire de création d'un nouvel employé
class NewStaffView(LoginRequiredMixin, UserPassesTestMixin, generic.edit.CreateView):
template_name = 'garderie/forms/new_user.html'
form_class = NewStaffForm
success_url = reverse_lazy('admin_index')
def test_func(self):
return self.request.user.is_superuser
# Liste des employés
class StaffListView(LoginRequiredMixin, UserPassesTestMixin, generic.ListView):
template_name="garderie/staff_list.html"
context_object_name='user_list'
def get_queryset(self):
return User.objects.filter(is_staff=True)
def test_func(self):
return self.request.user.is_superuser
# Liste des factures par ordre de date décroissante, fournit au HTML tous les Bills
class BillsListView(LoginRequiredMixin, UserPassesTestMixin, generic.TemplateView):
template_name="garderie/bills_list.html"
# context_object_name='parents_list'
# def get_queryset(self):
# return Parent.objects.all().order_by()
def get_context_data(self, **kwargs):
context=super().get_context_data(**kwargs)
today=timezone.now()
context['parents_list']=[]
for parent in Parent.objects.all():
bills=parent.get_bills(today.month,today.year)
context['parents_list'].append({parent: (bills, sum(b.amount for b in bills))})
return context
def test_func(self):
return self.request.user.is_superuser
def form_valid(self, form):
form.save()
return super().form_valid(form)
# Page d'aide, variant selon le type d'utilisateur (template vierge initialement)
class HelpView(generic.TemplateView):
template_name="garderie/help.html"
|
#! usr/bin/env python3
t=int(input().strip())
for i in range(t):
m=int(input().strip())
n=int(input().strip())
arr=list(map(int,input().split()))
temp=[i for i in arr]
indexdict=dict()
for i in range(len(arr)):
if not arr[i] in indexdict:
a=[]
a.append(i)
indexdict[arr[i]]=a
del a
else:
indexdict[arr[i]].append(i)
arr.sort()
i=0
j=len(arr)-1
while(i<=j):
s=arr[i]+arr[j]
#print(s,m)
if(s==m):
#print(arr[i],arr[j])
if(arr[i]==arr[j]):
for i in indexdict[arr[i]]:
print(i+1,end=' ')
else:
if(indexdict[arr[i]][0]+1<indexdict[arr[j]][0]+1):
print(indexdict[arr[i]][0]+1,indexdict[arr[j]][0]+1)
else:
print(indexdict[arr[j]][0]+1,indexdict[arr[i]][0]+1)
break
elif s<m:
i+=1
else:
j-=1
|
"""
HTML Page module.
"""
import polyglot.text
import re
import parsel
import selectolax.parser
import sys
class HtmlPage:
def __init__(self, html):
self.html = html
self.selector = parsel.Selector(text=self.html)
self.alpha_numeric_regex = re.compile('[\W_]+', re.UNICODE)
def __str__(self):
return self.title or ''
def __repr__(self):
return 'HtmlPage(title={}, bytes={})'.format(self.title, self.bytes)
@property
def bytes(self):
return self._get_byte_size()
@property
def meta_keywords(self):
return self._get_meta_keywords()
@property
def docx(self):
return polyglot.text.Text(self.text)
@property
def meta_description(self):
return self._get_meta_description()
@property
def title(self):
return self._get_title()
@property
def text(self):
return self._get_text()
@property
def keywords(self):
return self._get_keywords()
@property
def language_code(self):
return self._get_language_code()
@property
def language_name(self):
return self._get_language_name()
#@property
#def lda_tokens(self):
# return self._lda_tokenize()
@property
def words(self):
return self._word_tokenize()
@property
def sentences(self):
return self._sentence_tokenize()
def _match_first_xpath(self, xpaths):
"""
Return first matching xpath from `xpaths`.
:param xpaths: list of xpaths
:type xpaths: list
:returns: parsel.Selector or None
"""
for xpath in xpaths:
try:
#return self.sel.xpath(xpath).extract()[0]
return self.selector.xpath(xpath).extract()[0]
except IndexError:
pass
return None
def _get_byte_size(self):
"""
Return size of HTML page in bytes.
returns: int
"""
return sys.getsizeof(self.html)
def _get_meta_keywords(self):
"""
Return content of meta keywords tag.
:returns: str or None
"""
xpaths = [
'//meta[@name="keywords"]/@content'
]
return self._match_first_xpath(xpaths)
def _get_meta_description(self):
"""
Return content of meta description tag.
returns: str or None
"""
xpaths = [
'//meta[@property="og:description"]/@content'
]
return self._match_first_xpath(xpaths)
def _get_title(self):
"""
Return title of page.
returns: str or None
"""
xpaths = [
'//title/text()',
'//meta[@property="og:title"]/@content'
]
return self._match_first_xpath(xpaths)
def _get_text(self):
"""
Return text of page.
returns: str
"""
tree = selectolax.parser.HTMLParser(self.html)
if tree.body is None:
return None
for tag in tree.css('script'):
tag.decompose()
for tag in tree.css('style'):
tag.decompose()
text = tree.body.text(separator='\n')
return text
def _get_language_code(self):
"""
Return language code of page.
"""
return self.docx.language.code
def _get_language_name(self):
"""
Return language name of page.
"""
return self.docx.language.name
def _word_tokenize(self):
"""
Return word tokens from text of page.
"""
tokens = []
for token in self.docx.words:
if len(token) == 1:
token = self.alpha_numeric_regex.sub('', token)
if token:
tokens.append(token)
return tokens
def _sentence_tokenize(self):
"""
Return sentence tokens from text of page.
"""
return self.docx.sentences
# def _lda_tokenize(self):
# lda_tokens = []
# parser = English()
# tokens = parser(self.text)
# for token in tokens:
# if token.orth_.isspace():
# continue
# elif token.like_url:
# lda_tokens.append('URL')
# elif token.orth_.startswith('@'):
# lda_tokens.append('SCREEN_NAME')
# else:
# lda_tokens.append(token.lower_)
# return lda_tokens
#def _get_lemma(self, word):
# lemma = wn.morphy(word)
# if lemma is None:
# return word
# else:
# return lemma
#def _get_lemma2(self, word):
# return wordNetLemmatizer().lemmatize(word)
#def _prepare_text_for_lda(self, text):
# tokens = self._lda_tokenize(text)
# tokens = [token for token in tokens if len(token) > 4]
# tokens = [token for token in tokens if token not in EN_STOP]
# tokens = [self._get_lemma(token) for token in tokens]
# return tokens
|
"""
332. Reconstruct Itinerary
Medium
1265
792
Add to List
Share
Given a list of airline tickets represented by pairs of departure and arrival airports [from, to], reconstruct the itinerary in order.
All of the tickets belong to a man who departs from JFK. Thus, the itinerary must begin with JFK.
Note:
If there are multiple valid itineraries, you should return the itinerary that has the smallest lexical order when read as a single string.
For example, the itinerary ["JFK", "LGA"] has a smaller lexical order than ["JFK", "LGB"].
All airports are represented by three capital letters (IATA code).
You may assume all tickets form at least one valid itinerary.
Example 1:
Input: [["MUC", "LHR"], ["JFK", "MUC"], ["SFO", "SJC"], ["LHR", "SFO"]]
Output: ["JFK", "MUC", "LHR", "SFO", "SJC"]
Example 2:
Input: [["JFK","SFO"],["JFK","ATL"],["SFO","ATL"],["ATL","JFK"],["ATL","SFO"]]
Output: ["JFK","ATL","JFK","SFO","ATL","SFO"]
Explanation: Another possible reconstruction is ["JFK","SFO","ATL","JFK","ATL","SFO"].
But it is larger in lexical order.
"""
# Every ticket can't be valid answer for that turn
# So we need to check everything
# [["JFK","KUL"],["JFK","NRT"],["NRT","JFK"]]
# In this case
# => if we choose ["JFK","KUL"] as the first one(because it is prior to ["JFK","NRT"] in lexical order)
# => it can't make valid answer
from collections import deque, defaultdict
# Every ticket can't be valid answer for that turn
# So we need to check everything
# [["JFK","KUL"],["JFK","NRT"],["NRT","JFK"]]
# In this case
# => if we choose ["JFK","KUL"] as the first one(because it is prior to ["JFK","NRT"] in lexical order)
# => it can't make valid answer
# Every ticket can't be valid answer for that turn
# So we need to check everything
# [["JFK","KUL"],["JFK","NRT"],["NRT","JFK"]]
# In this case
# => if we choose ["JFK","KUL"] as the first one(because it is prior to ["JFK","NRT"] in lexical order)
# => it can't make valid answer
import collections
class Solution(object):
def findItinerary(self, tickets):
n = len(tickets)
revLexTickets = sorted(tickets)[::-1]
revIdMap = collections.defaultdict(list)
for i, (d, a) in enumerate(revLexTickets):
revIdMap[d].append(i)
# print(graph)
stack = [["JFK", [], []]]
while stack:
f, path, seen = stack.pop()
if len(path) == n:
return path + [f]
# if the graph top's values are multiple
# lexical order's formal one is nearer from end and
# we use stack => later one lexically formal one => we can return instantly,
# if length is right
for i in revIdMap[f]:
if i not in seen:
stack.append([revLexTickets[i][1], path + [f], seen + [i]])
return
# looks same logic. diffrence is usign queue and usign stack
# even access number looks same
# the only thing is stack use "pop()" and queue use "pop(0)"
# In Python pop() => O(1), pop(k) => O(k) => 1 > 0 but... I think something was set like that
# This using stack time limit exceeds
def findItinerary(self, tickets):
n = len(tickets)
lexTickets = sorted(tickets)
idMap = collections.defaultdict(list)
for i, (d, a) in enumerate(lexTickets):
idMap[d].append(i)
queue = [["JFK", [], []]]
while queue:
f, path, seen = queue.pop(0)
if len(path) == n:
return path + [f]
for i in idMap[f]:
if i not in seen:
queue.append([lexTickets[i][1], path + [f], seen + [i]])
return
def findItinerary(self, tickets):
n = len(tickets)
revLexTickets = sorted(tickets)[::-1]
revIdMap = collections.defaultdict(list)
for i, (d, a) in enumerate(revLexTickets):
revIdMap[d].append(i)
stack = [("JFK", [], [])]
while stack:
f, path, seen = stack.pop()
if len(path) == n:
return path + [f]
for i in revIdMap[f]:
if i not in seen:
stack.append((revLexTickets[i][1], path + [f], seen + [i]))
return
S = Solution()
input = [["MUC", "LHR"], ["JFK", "MUC"], ["SFO", "SJC"], ["LHR", "SFO"]]
test = S.findItinerary(input)
print(test)
|
from selenium import webdriver
import time
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
driver = webdriver.Chrome('/Users/quatre/PycharmProjects/Bogo_hackathon/chromedriver')
driver.implicitly_wait(3)
# TODO: 불러오기가 끝났으니 제품명을 싸그리 크롤링해보자.
# TODO: 완료!!!
# TODO: 배열에 병렬로 append 하는게 가능이나 한가?
# TODO: └ 일단 기능 다 완성하고 해보자 이건
# TODO: 상품명 불러왔으니 이젠 가격 크롤링 할 차례임
# TODO:=======================================================
# TODO:=======================D O N E ! ======================
# TODO:=======================================================
# TODO: 이미지 주소까지 다 긁어왔음
# TODO: 정복한 편의점들 ┐
# TODO: [[[세븐일레븐, CU, GS25, 이마트24, 미니스탑]]]
# Connect to page
driver.get('http://gs25.gsretail.com/gscvs/ko/products/event-goods')
# GS는 안타깝게도 다른 방식으로 불러와야한다.
num = 1
col = 0
while 1:
try:
print((num+(col*8), "번째 상품 ->"))
# 상품의 이름 및 가격 (line1: name, line2: price, line3: img)
print(driver.find_element_by_css_selector('#contents > div.cnt > div.cnt_section.mt50 > div > div > div:nth-child(3) > ul > li:nth-child(%s) > div > p.tit' % num).text)
print(driver.find_element_by_css_selector('#contents > div.cnt > div.cnt_section.mt50 > div > div > div:nth-child(3) > ul > li:nth-child(%s) > div > p.price' % num).text)
try:
print(driver.find_element_by_css_selector('#contents > div.cnt > div.cnt_section.mt50 > div > div > div:nth-child(3) > ul > li:nth-child(%s) > div > p.img' % num).find_element_by_tag_name('img').get_attribute('src'))
except NoSuchElementException:
pass
num += 1
if num is 8:
col += 1
num = 1
driver.execute_script('goodsPageController.moveControl(1);')
except NoSuchElementException:
print("은 없다 ㅎ")
break
except StaleElementReferenceException:
time.sleep(2)
|
"""
Alexandra Triampol
6/21/16
This code creates a greeting that uses user input
such as their full name, age, and favorite ice cream.
"""
# takes in user input for the greeting
fullName = input("What is your first and last name?: ")
age = input("How old are you? (if you don't mind me asking): ")
iceCream = input("What kind of ice cream do you like?: ")
# prints out the greeting
print("") # aesthetic spacing
print("Welcome,",fullName + "!")
print("Congratulations on surviving for",age,"years!")
print("You deserve all the", iceCream, "ice cream in the world!")
# use .format{fullName,age,iceCream} |
import html
with open('processed_ingredients.txt', 'r') as infile:
with open('strings.xml', 'w') as outfile:
outfile.write("<string-array name=\"master_ingredients\">\n")
for line in infile:
htmlStuff = html.escape(line.strip())
words = htmlStuff.split(' ')
writeThis = ''
for word in words:
word.replace(''', '\\'')
writeThis += word[0].upper() + word[1 : len(word)] + ' '
writeThis = writeThis[0 : len(writeThis) - 1]
outfile.write("\t<item>{}</item>\n".format(writeThis))
outfile.write("</string-array>") |
with open('phone_data.txt', 'r') as file:
phone_numbers = file.read().splitlines()
# change lines to dictionaries
def parse(line):
"str -> dict"
line = line.replace("-", "")
if len(line) > 10:
line = line[1:]
new_dict = {'area': line[:3],
'exchange': line[3:6],
'subscriber': line[6:]}
return new_dict
# change dictionaries to strings
def format_phone(d):
"dict -> str"
return '(' + d['area'] + ') ' + d['exchange'] + '-' + d['subscriber']
# py.test exercise_10_31_16_redux.py --cov=exercise_10_31_16_redux.py --cov-report=html
def test_parse():
assert parse("1-476-177-8875") == {
'area': '476',
'exchange': '177',
'subscriber': '8875'
}
assert parse("8586359388") == {
'area': '858',
'exchange': '635',
'subscriber': '9388'
}
assert parse("17461538482") == {
'area': '746',
'exchange': '153',
'subscriber': '8482'
}
assert parse("218-690-7902") == {
'area': '218',
'exchange': '690',
'subscriber': '7902'
}
def test_format_phone():
assert format_phone({
'area': '476',
'exchange': '177',
'subscriber': '8875'
}) == "(476) 177-8875"
if __name__ == '__main__':
with open('new_phone_data.txt', 'w') as file:
for phone in phone_numbers:
file.write(format_phone(parse(phone)) + '\n')
|
import pandas as pd
import pandas.io as web
import numpy as np
def HA(df):
df["HA_Close"] = (df["Open"] + df["High"] + df["Low"] + df["Close"])/4
ha_o = df["Open"] + df["Close"]
df["HA_Open"] = 0.0
HA_O = df["HA_Open"].shift(1) + df["HA_Close"].shift(1)
df["HA_Open"] = np.where(df["HA_Open"] == np.nan, ha_o/2, HA_O/2)
df["HA_High"] = df[["HA_Open", "HA_Close", "High"]].max(axis=1)
df["HA_Low"] = df[["HA_Open", "HA_Close", "Low"]].min(axis=1)
return df
start = "2016-1-1"
end = "2016-10-30"
DAX = web.datareader("^GDAXI", "Yahoo", start, end)
del DAX["Volume"]
del DAX["Adj Close"]
print(HA(DAX).head(10).round(2))
def HA(df):
df['HA_Close'] = (df['Open']+ df['High']+ df['Low']+df['Close'])/4
idx = df.index.name
df.reset_index(inplace=True)
for i in range(0, len(df)):
if i == 0:
df.set_value(i, 'HA_Open', ((df.get_value(i, 'Open') + df.get_value(i, 'Close')) / 2))
else:
df.set_value(i, 'HA_Open', ((df.get_value(i - 1, 'HA_Open') + df.get_value(i - 1, 'HA_Close')) / 2))
if idx:
df.set_index(idx, inplace=True)
df['HA_High']=df[['HA_Open','HA_Close','High']].max(axis=1)
df['HA_Low']=df[['HA_Open','HA_Close','Low']].min(axis=1)
return df
def heikin_ashi(df):
heikin_ashi_df = pd.DataFrame(index=df.index.values, columns=['open', 'high', 'low', 'close'])
heikin_ashi_df['close'] = (df['open'] + df['high'] + df['low'] + df['close']) / 4
for i in range(len(df)):
if i == 0:
heikin_ashi_df.iat[0, 0] = df['open'].iloc[0]
else:
heikin_ashi_df.iat[i, 0] = (heikin_ashi_df.iat[i - 1, 0] + heikin_ashi_df.iat[i - 1, 3]) / 2
heikin_ashi_df['high'] = heikin_ashi_df.loc[:, ['open', 'close']].join(df['high']).max(axis=1)
heikin_ashi_df['low'] = heikin_ashi_df.loc[:, ['open', 'close']].join(df['low']).min(axis=1)
return heikin_ashi_df |
# This file should be used to store all of the magic numbers
# so that it is easy to modify them.
c_puct = 1 # This controls how much exploration happens in the MCST |
#!/usr/bin/python3
# (C)2014 L0j1k@L0j1k.com
"""Perform a variety of byte-level operations on files or strings.
optool.py [file1] [file2]
-> xor file1 with file2
optool.py -b [bytes1] [file1]
-> xor bytes1 with file1
optool.py [file1] -b [bytes1]
-> xor file1 with bytes1
optool.py -B [bytes1] [bytes2]
-> xor bytes1 with bytes2
optool.py -r
-> reverse file per-byte
optool.py -i
-> detail file info
optool.py -o [offset] -x [length]
-> extract chunk length x starting offset o
"""
import argparse
import sys
__version__ = 'v0.5a'
DEFAULT_ENCODING = 'utf-8'
ACCEPTED_ENCODINGS = ['utf-8', 'utf-16', 'latin', 'ebcdic']
assert DEFAULT_ENCODING in ACCEPTED_ENCODINGS
def command(name):
"""Decorator to add metadata to a func_* declaration.
"""
def command_decorator(function):
"""Actual decorator function returned by call to command().
Example:
@command("foo")
def func_foo(args):
...
Here, command("foo") is called at module import time, returning
this inner function. Which is then called with func_foo as
the first and only argument. Because this inner function
returns the original function, the name 'func_foo' is bound
to the original function just as if @command("foo") had not
been put above its declaration.
"""
function.name = name
function.help = function.__doc__.strip().split("\n")[0]
return function
return command_decorator
@command("extract")
def func_extract(args):
"""Extract a segment of specified length from specified offset in target file.
"""
if args.address:
args.offset = int("0x" + args.address, 0) + args.offset
if args.file1:
filedata = args.file1.read()
if args.length == 0:
outputdata = filedata[args.offset:len(filedata):1]
elif args.length > 0:
outputdata = filedata[args.offset:args.offset + args.length:1]
else:
outputdata = filedata[args.offset:args.offset + args.length:-1]
sys.stdout.write(outputdata)
return 0
@command("find")
def func_find(args):
"""Attempt to find separate files inside input file, such as JPG, GIF, PNG, etc.
"""
print("[+] find")
return 0
## output to:
#
# 00000 0011 2233 4455 6677 8899 aabb ccdd eeff ................
# 00010 0011 2233 4455 6677 8899 aabb ccdd eeff ................
###
##
# -> fix this nub shit
##
###
@command("hexdump")
def func_hexdump(args):
"""Output target file into hexadecimal-formatted output.
"""
#debug
print("[+] hex")
if args.encoding in ACCEPTED_ENCODINGS:
print(args.encoding)
else:
return usage()
filedata = args.file1.read()
currentaddress = 0
for i in range(0, len(filedata[0::8])):
currentaddress += 8
thischunk = str(filedata[i*8:i*8+8:1])
#debug
print("THISCHUNK:[", thischunk, "]len[", len(filedata[0::8]), "]")
for thisbyte in thischunk:
block_byte = "{}{} {}{} {}{} {}{} {}{} {}{} {}{} {}{}".format(thisbyte)
block_addr = str("{}".format(hex(currentaddress)))
block_data = ""
outputline = block_addr + block_byte + block_data
print(outputline)
return 0
@command("info")
def func_info(args):
"""Display detailed information about target and system.
"""
encoding = args.encoding
filedata = args.file1.read()
#debug
print("BLAH[", filedata[0], "]")
minbyte = bytes(min(filedata), encoding)
maxbyte = bytes(max(filedata), encoding)
print("[+] file information")
print("[name]:", args.file1.name)
print("[size]:", len(filedata), "bytes")
print("[minbyte]:", minbyte)
print("[maxbyte]:", maxbyte)
print("\n[+] system information")
print("[byteorder]:", sys.byteorder)
return 0
@command("output")
def func_output(args):
"""Outputs specified byte sequence.
"""
print("[+] output")
if args.encoding in ACCEPTED_ENCODINGS:
print(args.encoding)
else:
return usage()
print()
return 0
@command("reverse")
def func_reverse(args):
"""Reverse an input.
"""
filedata = args.file1.read()
print(filedata[::-1])
return 0
@command("swap")
def func_swap(args):
"""Swap byte order of input (toggles big-/little-endian).
"""
print("[+] swap")
return 0
@command("xor")
def func_xor(args):
"""XOR provided targets with one another.
"""
# todo: byte sequences
if args.offset1:
offset1 = args.offset1
else:
offset1 = 0
if args.offset2:
offset2 = args.offset2
else:
offset2 = 0
if args.length:
length = True
else:
length = False
args.file1.seek(offset1)
args.file2.seek(offset2)
thislength = 0
for thischar in args.file1.read():
thislength += 1
if length:
if thislength > args.length:
break
otherchar = args.file2.read(1)
xored = hex(ord(thischar) ^ ord(otherchar))
sys.stdout.write(xored)
return 0
def usage():
print("")
return 0
def make_parser():
"""Create ArgumentParser instance with the specs we need.
"""
parser = argparse.ArgumentParser(
description="Perform a variety of byte-level operations on files or byte sequences.",
)
# here goes [OPTIONS] you want to feed to your command
parser.add_argument("--version",
action='version',
version='optool.py ' + __version__ + ' by L0j1k'
)
# parser.set_defaults(func=None)
subparsers = parser.add_subparsers(help="sub-command help")
def add_subparser(func):
"""Make a func_* subparser.
"""
subparser = subparsers.add_parser(func.name, help=func.help)
subparser.set_defaults(func=func)
return subparser
encodings_message = (
"valid options are {0}. "
"default is {1}.".format(
', '.join(repr(encoding) for encoding in ACCEPTED_ENCODINGS),
repr(DEFAULT_ENCODING)
)
)
# extract subparser
parser_extract = add_subparser(func_extract)
parser_extract.add_argument("-a", "--address",
help="(hex) base hexadecimal address for extraction",
const=0,
default=0,
metavar='hex_address',
nargs='?',
type=str
)
parser_extract.add_argument("offset",
help="(int) extraction offset. use zero for file start. negative values reference from EOF",
metavar='offset',
type=int
)
parser_extract.add_argument("length",
help="(int) length of bytes to extract. 0 extracts data from offset to EOF. if negative, returns reversed output (extracts backwards from offset)",
metavar='length',
type=int
)
parser_extract.add_argument("file1",
help="primary target input file",
type=argparse.FileType('r')
)
# find subparser
parser_find = add_subparser(func_find)
parser_find.add_argument("file1",
help="primary target input file",
type=argparse.FileType('r')
)
# hex subparser
parser_hexdump = add_subparser(func_hexdump)
parser_hexdump.add_argument("-e", "--encoding",
help="encoding for hexdump output. " + encodings_message,
default=DEFAULT_ENCODING,
metavar='encoding',
nargs='?',
type=str
)
parser_hexdump.add_argument("file1",
help="primary target input file",
type=argparse.FileType('r')
)
# info subparser
parser_info = add_subparser(func_info)
parser_info.add_argument("-e", "--encoding",
help="encoding to use for file. " + encodings_message,
default=DEFAULT_ENCODING,
metavar='encoding',
nargs='?',
type=str
)
parser_info.add_argument("file1",
help="primary target input file",
type=argparse.FileType('r')
)
# output subparser
parser_output = add_subparser(func_output)
parser_output.add_argument("output1",
help="sequence to output",
type=str
)
parser_output.add_argument("-e", "--encoding",
help="character encoding to use for decoding to bytes. " + encodings_message,
default=DEFAULT_ENCODING,
metavar='encoding',
nargs='?',
type=str
)
# reverse subparser
parser_reverse = add_subparser(func_reverse)
parser_reverse.add_argument("file1",
help="primary target input file",
type=argparse.FileType('r')
)
# swap subparser
parser_swap = add_subparser(func_swap)
parser_swap.add_argument("file1",
help="primary target input file",
type=argparse.FileType('r')
)
# xor subparser
parser_xor = add_subparser(func_xor)
parser_xor.add_argument("file1",
help="primary input file",
nargs='?',
type=argparse.FileType('r')
)
parser_xor.add_argument("file2",
help="secondary input file",
nargs='?',
type=argparse.FileType('r')
)
parser_xor.add_argument("-b", "--byte",
help="byte sequence to xor with entire input file",
default=False,
metavar='byte',
type=str
)
parser_xor.add_argument("-B", "--bytes",
help="use provided byte sequences for xor operation instead",
default=False,
metavar=('byte1', 'byte2'),
nargs=2,
type=str
)
parser_xor.add_argument("-l", "--length",
help="xor for specified length. default all",
metavar='length',
nargs='?',
type=int
)
parser_xor.add_argument("-o1", "--offset1",
help="xor beginning at provided offset in first input file. default 0",
default=0,
metavar='offset',
nargs='?',
type=int
)
parser_xor.add_argument("-o2", "--offset2",
help="xor beginning at provided offset in second input file. default 0",
default=0,
metavar='offset',
nargs='?',
type=int
)
return parser
def main():
"""Entry point orchestrating what module does when run as a script.
"""
parser = make_parser()
args = parser.parse_args()
if 'func' not in args:
parser.print_help()
status = 1
else:
status = args.func(args)
sys.exit(status or 0)
if __name__ == "__main__":
main()
## argparse subparsers:
# subparser xor [-o offset] file1 file2 | -b byte1 file1 | file1 -b byte1 | -B byte1 byte2
# subparser info
# subparser extract length offset file1
# subparser find
# subparser swap
# subparser reverse
# todo:
# xor files
# xor file with byteseq
# xor two byteseqs
# reverse file -- DONE
# info about file -- DONE
# extract from file -- DONE
# swap file
############################################################3
#open files
#with open(infileone, 'r') as inputone:
# indataone = inputone.read()
#print status and impending operations
#if xorfiles:
# with open(infiletwo, 'r') as inputtwo:
# indatatwo = inputtwo.read()
# print("xor",infileone,"and",infiletwo,"...")
#xor
#if xorfiles:
# for i in range(0, totallen):
# byte1 = indataone[i:1]
# byte2 = indatatwo[i:1]
# outputdata = outputdata + (byte1 ^ byte2)
|
from gmail import GMail, Message
from random import randint
sickness_list = ["trĩ", "đau bụng", "tiêu chảy", "thương hàn"]
i = randint(0, len(sickness_list) - 1)
#1. Select a sickness randomly
sickness = sickness_list[i]
html_template = '''
<p>Chào sếp,</p>
<p style="padding-left: 30px;">Ngày mai em bị <strong>{{sickness}}</strong>, xin sếp cho em nghỉ 1 hôm ạ.</p>
<p>Em cảm ơn <img src="https://html5-editor.net/tinymce/plugins/emoticons/img/smiley-tongue-out.gif" alt="tongue-out" /></p>
'''
#2. Sickness + html_template => html_content
#Hint: gg: string replace
html_content = html_template.replace("{{sickness}}", sickness)
gmail = GMail('Trung<trngle0409@gmail.com>','colenemmei')
msg = Message('Đơn xin nghỉ ốm',to='c4e.techkidsvn@gmail.com>',html=html_content)
gmail.send(msg) |
# -----------------------------
# Using the PdfFileWriter Class
# -----------------------------
from PyPDF2 import PdfFileWriter
pdf_writer = PdfFileWriter()
page = pdf_writer.addBlankPage(width=72, height=72)
print(type(page))
from pathlib import Path # noqa
with Path("blank.pdf").open(mode="wb") as output_file:
pdf_writer.write(output_file)
# -----------------------------------
# Extracting a Single Page From a PDF
# -----------------------------------
from pathlib import Path # noqa
from PyPDF2 import PdfFileReader, PdfFileWriter # noqa
# Change the path to work on your computer if necessary
pdf_path = (
Path.home()
/ "creating-and-modifying-pdfs"
/ "practice_files"
/ "Pride_and_Prejudice.pdf"
)
input_pdf = PdfFileReader(str(pdf_path))
first_page = input_pdf.getPage(0)
pdf_writer = PdfFileWriter()
pdf_writer.addPage(first_page)
with Path("first_page.pdf").open(mode="wb") as output_file:
pdf_writer.write(output_file)
# ------------------------------------
# Extracting Multiple Pages From a PDF
# ------------------------------------
from PyPDF2 import PdfFileReader, PdfFileWriter # noqa
from pathlib import Path # noqa
pdf_path = (
Path.home()
/ "creating-and-modifying-pdfs"
/ "practice_files"
/ "Pride_and_Prejudice.pdf"
)
input_pdf = PdfFileReader(str(pdf_path))
pdf_writer = PdfFileWriter()
for n in range(1, 4):
page = input_pdf.getPage(n)
pdf_writer.addPage(page)
print(pdf_writer.getNumPages())
pdf_writer = PdfFileWriter()
for page in input_pdf.pages[1:4]:
pdf_writer.addPage(page)
with Path("chapter1_slice.pdf").open(mode="wb") as output_file:
pdf_writer.write(output_file)
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from slack_g_cal.parse import JSON
from .datetime_ import WitDatetimeContainer
class MessageResponse(JSON):
""" Container wrapping responses from the Message API """
def __init__(self, **res_json):
self.datetime_keys = ['datetime']
entities = res_json.pop('entities')
super(MessageResponse, self).__init__(**dict(res_json, **entities))
self._parse_dt_props()
def _parse_dt_props(self):
for key in self.datetime_keys:
if hasattr(self, key):
dt_json = getattr(self, key).__dict__
setattr(self, key, WitDatetimeContainer(**dt_json))
def set_prop(self, fmted_key, val):
if isinstance(val, list):
val_to_set = self.get_highest_confidence_field(val)
super(MessageResponse, self).set_prop(fmted_key, val_to_set)
else:
super(MessageResponse, self).set_prop(fmted_key, val)
def get_highest_confidence_field(self, field_):
""" Returns the value with the highest confidence if there is more tha one """
if field_ is not None and isinstance(field_, list):
res = field_[0]
for dict_ in field_:
res = dict_ if dict_['confidence'] > res['confidence'] else res
return res
elif isinstance(field_, str):
return field_
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import sys
import time
import json
import subprocess
import argparse
from framework.print.buffer import PrintBuffer
from framework.cmd.repository import RepositoryCmd
class ScriptTestCmd(RepositoryCmd):
"""
Superclass for structuring a command that invokes a script in order to test
it.
"""
def __init__(self, settings):
super().__init__(settings)
self.title = "script test cmd superclass"
def _exec(self, tests):
start_time = time.time()
tests(self.settings)
return {'elapsed_time': time.time() - start_time}
def _output(self, results):
b = PrintBuffer()
b.separator()
b.add_green("%s passed!\n" % self.title)
b.add("Elapsed time: %.2fs\n" % results['elapsed_time'])
b.separator()
return str(b)
|
import pickle
from flask import Flask, request
app = Flask(__name__)
@app.route('/get_tf-idf', methods=['GET'])
def print_square():
recieved_docs = request.get_json(force=True)['sents']
tf_idf_matrix = tf_idf.transform(recieved_docs).toarray().tolist()
return {'output': tf_idf_matrix}
if __name__ == "__main__":
with open('./tfidf_model.pkl', 'rb') as f:
tf_idf = pickle.load(f)
app.run(host='0.0.0.0')
|
from collections import deque
squares = [1, 4, 9, 16, 25]
# slice operators return a new copy of the list on which the operator was invoked
var = squares[:]
print "squares[0] was not changed: " + str(squares)
# the concat operator can be used to append a list to another list
l1 = [1, 2]
l2 = [3, 4]
print "concat lists: " + str(l1+l2)
# the method append can be used to add items to a list
squares.append(36)
print "squares with appended element: " + str(squares)
# objects of any type can be added to a list!
squares.append("hmmm")
squares.append(u"\u1234")
print "squares with new obj type: " + str(squares)
# len can also be applied to lists
print "length of squares is " + str(len(squares))
# lists can be nested to create a two-dimensional array like type
nest = [["baby bird 1", "baby bird 2"], ["baby bird 3", "baby bird 4"]]
print nest[0][1]
# extend appends the elements of a given list to the end of the list on which the method was invoked
squares.extend([121, 144])
print squares
# insert inserts an item at the specified index, shifting as necessary
squares.insert(0, 0)
print squares
# remove removes an item at the specified index, shifting as necessary
squares.remove(0)
print squares
# index returns the index of the first occurrence of the element given in the parameter,
# and raises an error if the element is not present
squares.append(4)
print squares
print "found 4 at", squares.index(4)
# pop removes AND returns the element at the specified index. If no index is given,
# the last item in the list is removed
squares.pop()
print squares
# count counts the number of occurrences of the parameter in the list
print squares.count(36)
temp = squares[0]
squares[0] = squares[4]
squares[4] = temp
print squares
squares.sort()
print squares
############## DEQUE ################
# deques have fast append and pop operation from both ends
queue = deque(["taco", "siesta", "trabajo", "tetas"])
# remove and get first item in the list
print queue.popleft()
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright (c) 2014, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
#
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization
# that has cooperated in the development of these materials, makes
# any warranty, express or implied, or assumes any legal liability
# or responsibility for the accuracy, completeness, or usefulness or
# any information, apparatus, product, software, or process disclosed,
# or represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does
# not necessarily constitute or imply its endorsement, recommendation,
# or favoring by the United States Government or any agency thereof,
# or Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#
#}}}
'''Ingest CSV files and parse them according to a sensor defintion.'''
from collections import namedtuple
from datetime import datetime, timedelta
import json
import os
import sys
import pytz
import dateutil.parser
from .csvfile import CSVFile
class IngestError(ValueError):
def __init__(self, value, column, exc=None):
self.value = value
self.column = column
msg = self.__class__._fmt.format(self)
super().__init__(msg, value, column)
if exc is not None:
self.__cause__ = exc
@property
def column_num(self):
'''Get the one-based index of the column source(s).'''
column = self.column.column
if not isinstance(column, int):
return [i + 1 for i in column]
return column + 1
@property
def data_type(self):
return self.column.data_type
class ParseError(IngestError):
_fmt = 'could not convert string to {0.data_type}: {0.value!r}'
class OutOfRangeError(IngestError):
_fmt = '{0.value} is out of range [{0._min},{0._max}]'
@property
def _min(self):
return self.column.minimum or ''
@property
def _max(self):
return self.column.maximum or ''
class BaseColumn:
'''Base class for column parsers.
Provides default handling of column number and default value as well
as a repr formatter. Also allows minimum and maximum keyword values
to be passed through without error. Parsing is performed by calling
the class instance. The default value is returned if the column is
blank and a ValueError is raised if parsing fails.
'''
def __init__(self, column, *, default=None, minimum=None, maximum=None):
self.column = column
self.default = default
def __repr__(self, *args, **kwargs):
options = ['']
if self.default:
options.append(('default', self.default))
for name in args:
attr = getattr(self, name)
if attr:
options.append((name, attr))
options.extend(kwargs.items())
return '{}({}{})'.format(self.__class__.__name__, self.column,
', '.join(opt and '{}={!r}'.format(*opt)
for opt in options))
class DateTimeColumn(BaseColumn):
'''Parse columns of date/time data.
In this case column can be a single column or a list or tuple of
columns, in which case the values will be concatenated together
(separated by sep -- a space by default) before being parsed.
Multiple formatting strings may be attempted by passing them via the
formats argument. Final parsing is attempted by dateutil.
'''
data_type = 'datetime'
def __init__(self, column, *, formats=(), sep=' ', tzinfo=pytz.utc,
time_offset=0, **kwargs):
super().__init__(column, **kwargs)
self.formats = formats
self.sep = sep
self.tzinfo = tzinfo
self.time_offset = time_offset
def _ensure_tz(self, dt):
if not dt.tzinfo and self.tzinfo:
dt = self.tzinfo.localize(dt)
if self.time_offset:
dt += timedelta(seconds=self.time_offset)
return dt.astimezone(pytz.utc)
def __call__(self, row):
columns = [self.column] if isinstance(self.column, int) else self.column
raw_value = self.sep.join([row[i].strip() for i in columns])
if not raw_value.strip():
return self.default
for fmt in self.formats:
try:
return self._ensure_tz(datetime.strptime(raw_value, fmt))
except ValueError:
pass
try:
return self._ensure_tz(dateutil.parser.parse(raw_value))
except (ValueError, TypeError):
pass
return ParseError(raw_value, self)
def __repr__(self):
kwargs = {'sep': self.sep} if self.sep != ' ' else {}
return super().__repr__('formats', **kwargs)
class StringColumn(BaseColumn):
'''Parse a column as string data (this one is easy).'''
data_type = 'string'
def __call__(self, row):
return row[self.column] or self.default
class IntegerColumn(BaseColumn):
'''Parse a column as integer data.
Automatic detection of the numeric base is performed and the value
is tested against the minimum and maximum, if given.
'''
data_type = 'integer'
def __init__(self, column, *,
minimum=None, maximum=None, **kwargs):
super().__init__(column, **kwargs)
self.minimum = minimum
self.maximum = maximum
def __call__(self, row):
raw_value = row[self.column]
if not raw_value:
return self.default
base = 10
if raw_value.startswith('0') and len(raw_value) > 1:
prefix = raw_value[:2].lower()
if prefix == '0x':
base = 16
elif prefix == '0o' or prefix[1].isdigit():
base = 8
elif prefix == '0b':
base = 2
try:
value = int(raw_value, base)
except ValueError as e:
return ParseError(raw_value, self)
if ((self.minimum and value < self.minimum) or
(self.maximum and value > self.maximum)):
return OutOfRangeError(value, self)
return value
def __repr__(self):
return super().__repr__('minimum', 'maximum')
class FloatColumn(BaseColumn):
'''Parse a float column.'''
data_type = 'float'
def __init__(self, column, *, minimum=None, maximum=None, **kwargs):
super().__init__(column, **kwargs)
self.minimum = minimum
self.maximum = maximum
def __call__(self, row):
raw_value = row[self.column]
if not raw_value:
return self.default
try:
value = float(raw_value)
except ValueError as e:
return ParseError(raw_value, self)
if ((self.minimum and value < self.minimum) or
(self.maximum and value > self.maximum)):
return OutOfRangeError(value, self)
return value
def __repr__(self):
return super().__repr__('minimum', 'maximum')
class BooleanColumn(BaseColumn):
'''Parse a boolean column.'''
data_type = 'boolean'
parse_map = {
'true': True,
'yes': True,
'y': True,
't': True,
'false': False,
'no': False,
'n': False,
'f': False
}
def __call__(self, row):
raw_value = row[self.column]
if not raw_value:
return self.default
try:
return bool(float(raw_value))
except ValueError:
pass
try:
return self.parse_map[raw_value.strip().lower()]
except KeyError:
return ParseError(raw_value, self)
Row = namedtuple('Row', 'line_num position columns')
def ingest_file(file, columns):
'''Return a generator to parse a file according to a column map.
The file should be seekable and opened for reading. columns should
be a list or tuple of column parser instances. For each row read
from file, a list of parsed columns is generated in the order they
were specified in the columns argument and returned in the columns
attribute of a Row instances. Errors are indicated by the column
value being an instance of IngestError. This function will not close
the file object.
'''
csv_file = CSVFile(file)
if csv_file.has_header:
next(csv_file)
return (Row(csv_file.reader.line_num, file.tell(),
[col(row) for col in columns]) for row in csv_file if row)
def get_sensor_parsers(datamap, files):
'''Generate a mapping of files and sensor paths to columns.
Returns a dictionary with the files from datamap['files'] as keys
and a list of 3-tuples as values, with each tuple containing a
sensor path, a data type, and a column parser. The first entry in
the list has a path of None and is the timestamp parser.
'''
def column_number(filename, column):
if isinstance(column, (list, tuple)):
return [column_number(filename, col) for col in column]
if isinstance(column, str):
file = datamap['files'][filename]
return file['signature']['headers'].index(column)
return column
def get_tz(fileid):
return pytz.timezone(files[fileid].get('time_zone') or 'UTC')
def date_format(file):
fmt = file['timestamp'].get('format')
return [fmt] if fmt else []
path = os.path.join(os.path.dirname(os.path.dirname(__file__)),
'static', 'projects', 'json', 'general_definition.json')
columns = {name: [(None, DateTimeColumn.data_type,
# DateTimeColumn(column_number(name, file['timestamp']['columns']),
DateTimeColumn(column_number(name, file['timestamp']['columns']),
tzinfo=get_tz(name),
time_offset=files[name]['time_offset'],
formats=date_format(file)))]
for name, file in datamap['files'].items()}
with open(path) as file:
prototypes = json.load(file)['sensors']
for name, sensor in sorted(datamap['sensors'].items()):
if 'type' not in sensor:
continue
type = sensor['type']
filename = sensor['file']
column = column_number(filename, sensor['column'])
proto = prototypes[type]
minimum = proto.get('minimum')
maximum = proto.get('maximum')
data_type = proto['data_type']
cls = globals()[data_type.capitalize() + 'Column']
obj = cls(column, minimum=minimum, maximum=maximum)
columns[filename].append((name, cls.data_type, obj))
return columns
IngestFile = namedtuple('IngestFile', 'name size sensors types rows time_zone time_offset')
def ingest_files(datamap, files):
'''Iterate over each file_dict in files to return a file parser iterator.
file_dict is a dictionary with file, time_offset, and time_zone as keys.
Creates a generator to iterate over each file in files and yield
IngestFile objects with the following attributes:
name - File name mapping keys from files to datamap['files'].
size - Total size of the file.
sensors - List of sensor names from datamap['sensors'].
types - List of data types to expect in data.
rows - Iterator to return Row instances of parsed file data.
There are the same number of elements in sensors, types, and each
row.columns representing columns of sensors. The first item in
sensors, types, and rows.columns is the timestamp and is represented
by a sensor name of None.
'''
columnmap = get_sensor_parsers(datamap, files)
if hasattr(files, 'items'):
files = sorted(files.items())
for file_id, file_dict in files:
file = file_dict['file']
time_zone = file_dict['time_zone']
time_offset = file_dict['time_offset']
try:
size = file.size
except AttributeError:
size = os.stat(file.fileno()).st_size
names, types, columns = zip(*columnmap[file_id])
rows = ingest_file(file, columns)
yield IngestFile(file_id, size, names, types, rows, time_zone, time_offset)
def iter_rows(file):
for row in file.rows:
columns = []
for column in row.columns:
if isinstance(column, IngestError):
column = {
'file': file.name,
'row': row.line_num,
'column': column.column_num,
'error': str(column)
}
columns.append(column)
yield columns
def main(argv=sys.argv):
'''Parse input files according to a given sensor map definition.
The first argument is a sensor map definition file. The remaining
arguments are name-file pairs mapping the sensor map files to real
files.
'''
def log(name, row, col, exc):
sys.stderr.write('error: {}:{}:{}: {}\n'.format(name, row, col, exc))
with open(argv[1]) as file:
datamap = json.load(file)
files = zip(argv[2::2],
[open(filename, 'rb') for filename in argv[3::2]])
errmsg = 'error: {0.name}:{1.line_num}:{1.column}[{1.index}]: {1.exc}\n'
for file in ingest_files(datamap, files):
print(file.name)
print(file.sensors)
for row in file.rows:
print(row.columns, row.position * 100 // file.size)
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import logging
LEVEL = logging.INFO
LOGFILE, LOGFILEMODE = '', 'w'
HANDLERS = [logging.StreamHandler(sys.stdout)]
if LOGFILE:
HANDLERS.append(logging.FileHandler(filename=LOGFILE, mode=LOGFILEMODE))
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(name)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', handlers=HANDLERS, level=LEVEL)
logger = logging.getLogger('[iTOL]')
warn, info, error = logger.warning, logger.info, logger.error
if __name__ == '__main__':
pass
|
words = ['Grapes','Mango','Tabel','Apple','Cricket','Republic','Elephant','College','Traveller','Achieve','Beleiver','America','Indi','Facebook','Birthday','Monsoon','Movies','Finished','Process','Untitled','Script','Project']
def labelSlider():
global count,sliderWords
text = 'Welcome to typing speed Increaser'
if(count >= len(text)):
count = 0
sliderWords = ''
sliderWords += text[count]
count += 1
fontLabel.configure(text=sliderWords)
fontLabel.after(150,labelSlider)
def time():
global timeleft,score,miss
if(timeleft >=11):
pass
else:
timeLableCount.configure(fg='red')
if(timeleft>0):
timeleft -= 1
timeLableCount.configure(text=timeleft)
timeLableCount.after(1000,time)
else:
gamePlayDetailLabel.configure(text='Hit = {} | Miss = {} | Total Score = {}'.format(score,miss,score-miss))
rr= messagebox.askyesnocancel('Notification', 'For Play Agin Hit Try Button')
if(rr==True):
score = 0
timeleft= 60
miss = 0
timeLableCount.configure(text=timeleft)
worldLabel.configure(text=words[0])
scoreLableount.configure(text=score)
def startGame(event):
global score,miss
if(timeleft == 60):
time()
gamePlayDetailLabel.configure(text='')
if(worldEntry.get() == worldLabel['text']):
score +=1
scoreLableount.configure(text=score)
else:
miss +=1
random.shuffle(words)
worldLabel.configure(text=words[0])
worldEntry.delete(0,END)
from tkinter import *
import random
from tkinter import messagebox
################################################## ROOT METHOD
from tkinter import Label
root = Tk()
root.geometry('800x500+400+100')
root.configure(bg='powder blue')
root.title('Typing game')
root.iconbitmap('img\Typing speed.png')
################################################# Variable
score = 0
timeleft = 60
count = 0
sliderwords = ''
miss = 0
################################################# LABEL METHOD
fontLabel = Label(root,text='Welcome to typing Speed Increase',font=('airal',25,'italic bold'),
bg='powder blue',fg='red',width=40)
fontLabel.place(x=10,y=10)
random.shuffle(words)
worldLabel = Label(root,text=words[0],font=('airal',40,'italic bold'),bg='powder blue')
worldLabel.place(x=370,y=200)
scoreLabel = Label(root,text='Your Score : ',font=('airal',25,'italic bold'),bg='powder blue')
scoreLabel.place(x=10,y=100)
scoreLableount = Label(root,text=score,font=('airal',25,'italic bold'),bg='powder blue',fg='blue')
scoreLableount.place(x=80,y=180)
timerLabel = Label(root,text='Time Left:',font=('airal',25,'italic bold'),bg='powder blue')
timerLabel.place(x=600,y=100)
timeLableCount = Label(root,text=60,font=('airal',25,'italic bold'),bg='powder blue',fg='blue')
timeLableCount.place(x=680,y=180)
gamePlayDetailLabel = Label(root,text='Type Word and Hit Enter Button',font=('arial',30,'italic bold'),
bg='powder blue',fg='dark grey')
gamePlayDetailLabel.place(x=150,y=400)
################################################### Entery Method
worldEntry = Entry(root,font=('airal',25,'italic bold'),bd=10,justify='center')
worldEntry.place(x=250,y=300)
worldEntry.focus_set()
#####################################################
root.bind('<Return>',startGame)
root.mainloop() |
from django.urls import path
from .views import EmpresaCreate, EmpresaUpdadte
urlpatterns = (
path('novo', EmpresaCreate.as_view() , name = 'create_empresa'),
path('editar/<int:pk>', EmpresaUpdadte.as_view() , name = 'update_empresa'),
)
|
# Created by: Gavin Zhou
# Created on: Oct 12 2017
# Created for: ICS3U
import ui
tax = 0.13
def calculate_touch_up_inside(sender):
pizza_size = view['pizza_size_textfield'].text
topping = view['amount_topping_textfield'].text
if pizza_size == 'large':
pizza_size_cost = 6
elif pizza_size == 'extra large':
pizza_size_cost = 10
if topping == '1':
pizza_topping_cost = 1.00
elif topping == '2':
pizza_topping_cost = 1.75
elif topping == '3':
pizza_topping_cost = 2.50
elif topping == '4':
pizza_topping_cost = 3.25
else:
view['sorry_textfield'].text ='follow what i put topping only can be 1-4 thanks'
sub_tatal = pizza_size_cost + pizza_topping_cost
price = sub_tatal * tax + sub_tatal
view['price_textfield'].text = 'show price: ' + str(price)
view = ui.load_view()
view.present('full screen')
|
from .api import goods_jingfen_query
from .api import goods_promotiongoodsinfo_query
from .api import goods_query
|
def getHint(index):
hints = ["11 players.", "Tired and very far.", "Yellow ball and racket.", "The sport from china.", "Shuttlecock."]
return (hints[index])
def getAnswer(index):
answers = ["soccer", "marathon", "tennis", "tabletennis", "badminton"]
return (answers[index]) |
from time import process_time as pt
def runtime(function):
time = pt()
function()
print(f'Runtime: {pt() - time} seconds')
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
from torchvision.datasets import CIFAR10
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import DataLoader
from architecture import CNN
# The output of torchvision datasets are PILImage images of range [0, 1].
# We transform them to Tensors of normalized range [-1, 1].
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = CIFAR10(root='../../../data', train=False, download=False, transform=transform)
testset = CIFAR10(root='../../../data', train=False, download=False, transform=transform)
trainloader = DataLoader(trainset, batch_size=4, shuffle=True, num_workers=0)
testloader = DataLoader(testset, batch_size=4, shuffle=False, num_workers=0)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
cnn = CNN()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(cnn.parameters(), lr=0.001, momentum=0.9)
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = cnn(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('>>>>>>> Training is Done! <<<<<<<<<')
PATH = './cifar_10.mdl'
torch.save(cnn.state_dict(), PATH)
print('Model is saved! in: ', PATH)
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = cnn(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f'Accuracy of the network on the 10000 test images: {100 * correct / total} %')
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = cnn(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print(f'Accuracy of {classes[i]} : 100 * {class_correct[i] / class_total[i]:0.2f} %')
|
from __future__ import print_function
from RegOptim.image_utils import check_for_padding
from RegOptim.utils import get_subset
from RegOptim.utils import save_params, import_func
from RegOptim.optimization.pipeline_utils import create_exp_folders, create_template, \
pad_template_data_after_loop
import numpy as np
import json
import os
from sklearn.model_selection import StratifiedShuffleSplit
def metric_learning_to_template(PATH):
# path to experiment config
print("START EXPERIMENT")
pipeline_params = json.load(open(PATH, 'r'))
# extract params to shorter usage
pipeline_main_loop = import_func(**pipeline_params['pipeline_main_loop'])
random_state = pipeline_params['random_state']
experiment_name = pipeline_params['experiment_name']
experiment_path = os.path.join(pipeline_params['path_to_exp'], experiment_name)
path_to_template = os.path.join(experiment_path, 'templates/')
template_name = 'template_0.nii'
# create folder and path
create_exp_folders(experiment_path, params=pipeline_params)
print('experiment name: ', experiment_name)
load_data = import_func(**pipeline_params['load_func'])
data, y = load_data(**pipeline_params['load_params'])
if isinstance(data[0],(str, np.unicode, np.str, np.string_, np.unicode_)):
np.savez(os.path.join(experiment_path, 'data_path.npz'), np.array(data))
if pipeline_params['subset'] != 1.:
data, y = get_subset(data, y, pipeline_params['subset'], pipeline_params['random_state'])
np.savez(os.path.join(experiment_path, 'target.npz'), y)
print("Data size: ", data.shape, " target mean: ", y.mean())
# create splits for (train+val) and test
idx_out_train, idx_out_test = list(StratifiedShuffleSplit(n_splits=1, test_size=0.3,
random_state=random_state).split(
np.arange(len(data)), y))[0]
splits = {'train_val': idx_out_train.tolist(), 'test': idx_out_test.tolist()}
save_params(experiment_path, 'splits_indices', splits)
# create template on train data and save it
template = create_template(path_to_data=data, train_idx=idx_out_train,
path_to_template=os.path.join(experiment_path, 'templates/'),
template_name=template_name, resolution=pipeline_params['resolution'],
load_func_template=pipeline_params['load_func_template'])
# check if template needs padding
if check_for_padding(template):
template = pad_template_data_after_loop(
template.copy(),
os.path.join(path_to_template, template_name),
pad_size=pipeline_params['pipeline_optimization_params']['pad_size'],
ndim=pipeline_params['ndim']
)
pipeline_params['pipeline_optimization_params']['add_padding'] = True
pad_size = pipeline_params['pipeline_optimization_params']['pad_size']
else:
pad_size = 0
pipeline_main_loop(data=data, template=template, y=y, idx_out_train=idx_out_train,
idx_out_test=idx_out_test, experiment_path=experiment_path,
path_to_template=path_to_template, template_name=template_name,
pipeline_params=pipeline_params, pad_size=pad_size)
print("FINISHED")
|
#int(a,b,c)
print("Введите числа для обмена")
a=input()
b=input()
c=a
a=b
b=c
print(a,b) |
# Generated by Django 2.1.5 on 2019-05-25 18:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('settings', '0007_auto_20190525_2015'),
]
operations = [
migrations.CreateModel(
name='SocialUrl',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('social_network_title', models.CharField(max_length=255, verbose_name='Название социальной сети')),
('url', models.URLField(verbose_name='Ссылка')),
('icon', models.CharField(max_length=255, verbose_name='HTML-код иконки')),
('weight', models.IntegerField(default=1000, help_text='Чем выше вес, тем левее элемент в списке ссылок на соц. сети.', verbose_name='Вес')),
('is_enabled', models.BooleanField(default=True, verbose_name='Включено')),
],
options={
'verbose_name': 'Ссылка на социальную сеть',
'verbose_name_plural': 'Ссылки на социальные сети',
},
),
]
|
'''
Script to plot PPSD data based in
https://docs.obspy.org/tutorial/code_snippets/probabilistic_power_spectral_density.html
'''
import os
import glob
from obspy.signal import PPSD
from obspy.imaging.cm import pqlx
from parameters_py.config import (
OUTPUT_FIGURE_DIR,TIME_OF_WEEKDAY_DAY,TIME_OF_WEEKDAY_START_HOUR,TIME_OF_WEEKDAY_FINAL_HOUR
)
# ==================================
# Function to plot TOTAL PPSD DATA
# ==================================
def plot_PPSD_TOTAL_data(date_lst):
os.chdir(date_lst)
files = sorted(glob.glob('*.npz'))
ppsd = PPSD.load_npz(files[0])
[ppsd.add_npz(i) for i in files[1:]]
os.makedirs(OUTPUT_FIGURE_DIR+'TOTAL/'+ppsd.station+'/',exist_ok=True)
ppsd.plot(cmap=pqlx,filename=OUTPUT_FIGURE_DIR+'TOTAL/'+ppsd.station+'/'+ppsd.network+'.'+ppsd.station+'.'+ppsd.channel+'.'+str(ppsd.times_processed[0].year)+'.pdf')
def plot_PPSD_WINDOWED_data(date_lst):
os.chdir(date_lst)
files = sorted(glob.glob('*.npz'))
ppsd = PPSD.load_npz(files[0])
[ppsd.add_npz(i) for i in files[1:]]
ppsd.calculate_histogram(time_of_weekday=[(TIME_OF_WEEKDAY_DAY, TIME_OF_WEEKDAY_START_HOUR, TIME_OF_WEEKDAY_FINAL_HOUR)])
folder_output = OUTPUT_FIGURE_DIR+'WINDOWED_'+str(int(TIME_OF_WEEKDAY_START_HOUR))+'_'+str(int(TIME_OF_WEEKDAY_FINAL_HOUR))+'/'+ppsd.station+'/'
os.makedirs(folder_output,exist_ok=True)
ppsd.plot(cmap=pqlx,filename=folder_output+ppsd.network+'.'+ppsd.station+'.'+ppsd.channel+'.'+str(ppsd.times_processed[0].year)+'.pdf') |
import os
try:
while True:
print(" FUS RO DA!!")
os.system('cls')
except KeyboardInterrupt:
os.systme("Pause")
|
from WealthStream import app
from flask import render_template
@app.route("/")
def index():
return render_template('index.html')
@app.after_request
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
r.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate, public, max-age=0'
return r
|
import os
import sys
import argparse
import optparse
import platform
import pkgutil
UNIX_DIR_FALLBACK = '~/.config'
WINDOWS_DIR_VAR = 'APPDATA'
WINDOWS_DIR_FALLBACK = '~\\AppData\\Roaming'
MAC_DIR = '~/Library/Application Support'
def iter_first(sequence):
"""Get the first element from an iterable or raise a ValueError if
the iterator generates no values.
"""
it = iter(sequence)
try:
return next(it)
except StopIteration:
raise ValueError()
def namespace_to_dict(obj):
"""If obj is argparse.Namespace or optparse.Values we'll return
a dict representation of it, else return the original object.
Redefine this method if using other parsers.
:param obj: *
:return:
:rtype: dict or *
"""
if isinstance(obj, (argparse.Namespace, optparse.Values)):
return vars(obj)
return obj
def build_dict(obj, sep='', keep_none=False):
"""Recursively builds a dictionary from an argparse.Namespace,
optparse.Values, or dict object.
Additionally, if `sep` is a non-empty string, the keys will be split
by `sep` and expanded into a nested dict. Keys with a `None` value
are dropped by default to avoid unsetting options but can be kept
by setting `keep_none` to `True`.
:param obj: Namespace, Values, or dict to iterate over. Other
values will simply be returned.
:type obj: argparse.Namespace or optparse.Values or dict or *
:param sep: Separator to use for splitting properties/keys of `obj`
for expansion into nested dictionaries.
:type sep: str
:param keep_none: Whether to keep keys whose value is `None`.
:type keep_none: bool
:return: A new dictionary or the value passed if obj was not a
dict, Namespace, or Values.
:rtype: dict or *
"""
# We expect our root object to be a dict, but it may come in as
# a namespace
obj = namespace_to_dict(obj)
# We only deal with dictionaries
if not isinstance(obj, dict):
return obj
# Get keys iterator
keys = obj.keys()
if sep:
# Splitting keys by `sep` needs sorted keys to prevent parents
# from clobbering children
keys = sorted(list(keys))
output = {}
for key in keys:
value = obj[key]
if value is None and not keep_none: # Avoid unset options.
continue
save_to = output
result = build_dict(value, sep, keep_none)
if sep:
# Split keys by `sep` as this signifies nesting
split = key.split(sep)
if len(split) > 1:
# The last index will be the key we assign result to
key = split.pop()
# Build the dict tree if needed and change where
# we're saving to
for child_key in split:
if child_key in save_to and \
isinstance(save_to[child_key], dict):
save_to = save_to[child_key]
else:
# Clobber or create
save_to[child_key] = {}
save_to = save_to[child_key]
# Save
if key in save_to:
save_to[key].update(result)
else:
save_to[key] = result
return output
# Config file paths, including platform-specific paths and in-package
# defaults.
def find_package_path(name):
"""Returns the path to the package containing the named module or
None if the path could not be identified (e.g., if
``name == "__main__"``).
"""
# Based on get_root_path from Flask by Armin Ronacher.
loader = pkgutil.get_loader(name)
if loader is None or name == '__main__':
return None
if hasattr(loader, 'get_filename'):
filepath = loader.get_filename(name)
else:
# Fall back to importing the specified module.
__import__(name)
filepath = sys.modules[name].__file__
return os.path.dirname(os.path.abspath(filepath))
def xdg_config_dirs():
"""Returns a list of paths taken from the XDG_CONFIG_DIRS
and XDG_CONFIG_HOME environment varibables if they exist
"""
paths = []
if 'XDG_CONFIG_HOME' in os.environ:
paths.append(os.environ['XDG_CONFIG_HOME'])
if 'XDG_CONFIG_DIRS' in os.environ:
paths.extend(os.environ['XDG_CONFIG_DIRS'].split(':'))
else:
paths.append('/etc/xdg')
paths.append('/etc')
return paths
def config_dirs():
"""Return a platform-specific list of candidates for user
configuration directories on the system.
The candidates are in order of priority, from highest to lowest. The
last element is the "fallback" location to be used when no
higher-priority config file exists.
"""
paths = []
if platform.system() == 'Darwin':
paths.append(UNIX_DIR_FALLBACK)
paths.append(MAC_DIR)
paths.extend(xdg_config_dirs())
elif platform.system() == 'Windows':
paths.append(WINDOWS_DIR_FALLBACK)
if WINDOWS_DIR_VAR in os.environ:
paths.append(os.environ[WINDOWS_DIR_VAR])
else:
# Assume Unix.
paths.append(UNIX_DIR_FALLBACK)
paths.extend(xdg_config_dirs())
# Expand and deduplicate paths.
out = []
for path in paths:
path = os.path.abspath(os.path.expanduser(path))
if path not in out:
out.append(path)
return out
|
class xPaths(object):
keys = ['rank', 'company', '3-yr growth', 'revenue', 'industry', 'state', 'metro', 'years']
view_per_page_input = '//*[@id="show_input"]'
scroll_down = '//*[@id="show_input"]/div[3]'
two_hundred_per_page = '//*[@id="show_input"]/div[3]/div[6]'
more_columns = '//*[@id="column_headers"]/div[6]'
next_page = '//*[@id="page_tuner"]/div[9]'
|
import dataclasses
import json
import logging
from typing import List
import pyfuse3
import pyfuse3_asyncio
from telethon.hints import Entity
from telethon.tl import types
from telethon.utils import get_display_name
from tqdm import tqdm
from .tgclient import TelegramFsClient
from .tgvfs import TelegramFsAsync
from .util import DateTimeEncoder
async def list_dialogs(client: TelegramFsClient, limit=None, json_output=False, offset_id=0):
dialogs = await client.get_dialogs_dict(limit=limit, offset_id=offset_id)
result = [{
'name': name,
'id': dialog.id,
} for name, dialog in dialogs.items()]
if json_output:
print(json.dumps(result))
else:
for d in result:
print("%s\t%s" % (d['id'], d['name']))
async def list_documents(client, id, offset_id: int = 0, limit: int = None,
filter_music=False, reverse=False, json_output=False):
logging.debug("list_documents(id=%s, offset_id=%s, limit=%s)" %
(id, offset_id, limit))
logging.debug("Querying entity %s(%s)" % (type(id), id))
entity = await client.get_entity(id)
logging.debug("Querying documents")
messages, documents_handles = await client.get_documents(entity,
limit=limit,
offset_id=offset_id,
filter_music=filter_music,
reverse=reverse)
result = [dataclasses.asdict(dh.document) for dh in documents_handles]
if json_output:
print(json.dumps(result, cls=DateTimeEncoder))
else:
for d in result:
print("%s\t%s" % (d['message_id'], d['attributes']['file_name']))
def create_new_files_handler(client: TelegramFsClient, telegram_fs, entity: Entity):
async def new_files_handler(update):
if not isinstance(update, (types.UpdateNewMessage, types.UpdateNewChannelMessage)):
# logging.debug("Not instance UpdateNewMessage or UpdateNewChannelMessage")
return
if isinstance(update, types.UpdateNewChannelMessage):
if not update.message.to_id:
return
update_entity_id = update.message.to_id.channel_id
if update_entity_id != entity.id:
# logging.debug("Not required channel id %d != %d" % (update_entity_id, entity.id))
return
elif isinstance(update, types.UpdateNewMessage):
update_entity_id = update.message.chat_id
if update_entity_id != entity.id:
# logging.debug("Not required chat id %d != %d" % (update_entity_id, entity.id))
return
msg = update.message
if not getattr(msg, 'media', None):
return
if not getattr(msg.media, 'document', None):
return
document_handle = client.get_document_handle(msg)
if not document_handle:
return
logging.debug(f'new msg: {msg}')
logging.debug(f'new file: {document_handle.document}')
telegram_fs.add_file(msg, document_handle)
return new_files_handler
async def mount(client, id, destination: str, offset_id=0, limit=None,
filter_music=False, debug_fuse=False, reverse=False, updates=False, fsname="tgfs"):
pyfuse3_asyncio.enable()
fuse_options = set(pyfuse3.default_options)
fuse_options.add('fsname=' + fsname)
if debug_fuse:
fuse_options.add('debug')
# in order to use numeric id
if isinstance(id, int):
await client.get_dialogs()
logging.debug("Querying entity %s" % id)
entity: Entity = await client.get_entity(id)
logging.debug("Got '%s'" % get_display_name(entity))
logging.info("Querying %s messages starting with message_id %d, music: %s" %
(limit if limit else "all", offset_id, filter_music))
messages, documents_handles = await client.get_documents(entity,
limit=limit,
filter_music=filter_music,
offset_id=offset_id,
reverse=reverse)
logging.info("Mounting %d files to %s" % (len(documents_handles), destination))
# logging.debug("Files: %s" % ([doc['id'] for msg, doc in documents], ))
telegram_fs = TelegramFsAsync()
for msg, dh in zip(messages, documents_handles):
telegram_fs.add_file(msg, dh)
if updates:
client.add_event_handler(
create_new_files_handler(client, telegram_fs, entity),
)
pyfuse3.init(telegram_fs, destination, fuse_options)
await pyfuse3.main(min_tasks=10)
async def download(client: TelegramFsClient, id, destination: str, files: List[int]):
logging.info("Download files %s from %s to %s" %
(files, id, destination))
logging.debug("Querying entity %s(%s)" % (type(id), id))
entity = await client.get_entity(id)
documents = await client.get_documents(entity, ids=files)
logging.info("Files %s" %
([d['attributes']['file_name'] for m, d in documents],))
# logging.debug("Files %s" % ([m.id for m, d in documents], ))
for (msg, doc) in documents:
if msg.id not in files:
logging.error("Wrong message id %d" % msg.id)
continue
file_name = doc['attributes']['file_name']
size = doc['size']
logging.info("Downloading %s, %d bytes" % (file_name, doc['size']))
with tqdm(total=int(size / 1024), unit='KB') as t:
await client.download_media(
msg,
"%s/%d %s" % (destination, msg.id, file_name),
progress_callback=lambda recvd, total: t.update(int(131072 / 1024)))
|
import os
import sys
print("CPU:{}".format(os.cpu_count()),end=',')
print("RECLIMIT:{}".format(sys.getrecursionlimit()),end=',')
print("EXEPATH:{}".format(sys.executable),end=',')
print("ENDIAN:{}".format(sys.byteorder),end=',')
print("UNICODE:{}".format(sys.maxunicode))
|
# -*- coding: utf-8 -*-
from odoo import fields, models, api
from collections import namedtuple
from odoo.exceptions import UserError
from odoo.tools.float_utils import float_compare
import re
#Bugget Expense Report
class Bex(models.Model):
_name = 'budget.expense.report'
_inherit = ['mail.thread']
_order = "date desc, id desc"
_description = "Budget Expense Report"
be_lie_count = fields.Integer(compute='_compute_be_lie')
be_ids = fields.One2many('stock.picking', string="be_ids", compute='_compute_be_lie')
@api.multi
def _compute_be_lie(self):
for bex in self:
be_ids = self.env['stock.picking'].search([('bex_id','=',bex.id)])
if be_ids:
bex.be_ids = be_ids
bex.be_lie_count = len(be_ids)
def get_number(self, chaine, prefixe):
if not chaine or chaine=='':
raise UserError(u"Caractère en paramètre vide pour la fonction get_number()")
liste_entier = re.findall("\d+", chaine)
res = prefixe+''+str(liste_entier[len(liste_entier)-1])
return res
def _name_change(self):
for bex in self :
if bex.origin:
res = re.findall("\d+", bex.origin)
longeur_res = len(res)
res_final = res[longeur_res-1]
bex.name = "BEX" + "".join(res_final)
@api.multi
def action_view_be(self):
action = self.env.ref('stock.action_picking_tree')
result = action.read()[0]
result.pop('id', None)
result['context'] = {}
pick_ids = sum([bex.be_ids.ids for bex in self], [])
if len(pick_ids) > 1:
result['domain'] = "[('id','in',[" + ','.join(map(str, pick_ids)) + "])]"
elif len(pick_ids) == 1:
res = self.env.ref('stock_heri.view_be_picking_form_advanced', False)
result['views'] = [(res and res.id or False, 'form')]
result['res_id'] = pick_ids and pick_ids[0] or False
return result
@api.depends('remise','bex_lines','bex_lines.qty_done','bex_lines.taxes_id','bex_lines.prix_unitaire')
def _amount_all(self):
for bex in self:
amount_untaxed_bex = 0.0
amount_taxed_bex = 0.0
remise = 0.0
if bex.remise:
remise = bex.remise
for line in bex.bex_lines :
amount_untaxed_bex += line.montant_realise
amount_taxed_bex += line.montant_realise_taxe
amount_untaxed_bex = amount_untaxed_bex*(1-(remise/100))
amount_taxed_bex = amount_taxed_bex*(1-(remise/100))
bex.update({
'amount_untaxed_bex': amount_untaxed_bex,
'amount_tax_bex': amount_taxed_bex-amount_untaxed_bex,
'amount_total_bex': amount_taxed_bex,
})
@api.depends('amount_total_en_ar')
def _amount_en_ar(self):
for res in self :
res.amount_total_en_ar = res.amount_untaxed_en_ar - res.amount_tax_en_ar
def attente_hierarchie(self):
self.state = 'attente_hierarchie'
self.change_state_date = fields.Datetime.now()
def annuler_attente_hierarchie(self):
self.state = 'draft'
self.change_state_date = fields.Datetime.now()
def hierarchie_ok(self):
self.state = 'hierarchie_ok'
self.change_state_date = fields.Datetime.now()
def annuler_hierarchie_ok(self):
self.state = 'attente_hierarchie'
self.change_state_date = fields.Datetime.now()
def annuler_comptabiliser(self):
self.state = 'hierarchie_ok'
self.change_state_date = fields.Datetime.now()
def comptabiliser(self):
if self.breq_id and self.breq_id.purchase_type == 'purchase_import' and self.state == 'hierarchie_ok' and self.amount_untaxed_en_ar == 0.0:
raise UserError("Merci de remplir le Montant Hors Taxes en Ar!")
if self.breq_id and self.breq_id.purchase_type != 'purchase_not_stored':
self.create_be()
self.state = 'comptabilise'
@api.one
def _get_is_manager(self):
self.is_manager = False
current_employee_id = self.env['hr.employee'].search([('user_id','=',self.env.uid)]).id
manager_id = self.employee_id.coach_id.id
if current_employee_id == manager_id:
self.is_manager = True
@api.one
def _get_is_creator(self):
self.is_creator = False
current_employee_id = self.env['hr.employee'].search([('user_id','=',self.env.uid)], limit=1).id
employee_id = self.employee_id.id
if current_employee_id == employee_id:
self.is_creator = True
def _currency_en_ar(self):
for bex in self :
bex.currency_en_ar = bex.env.ref('base.MGA').id
name = fields.Char(compute="_name_change", readonly=True)
breq_id = fields.Many2one('purchase.order', string=u"Budget Request lié", readonly=True)
state = fields.Selection([
('draft', 'Nouveau'), ('cancel', 'Cancelled'),
('attente_hierarchie','Avis supérieur hierarchique'),
('hierarchie_ok','Validation supérieur hierarchique'),
('comptabilise','Comptabilisé')], string='Statut', track_visibility='onchange',
help="Etat", default='draft')
partner_id = fields.Many2one('res.partner',related='breq_id.partner_id', readonly=True)
location_id = fields.Many2one('stock.location', "Source Location Zone", readonly=True)
location_dest_id = fields.Many2one('stock.location', "Source Location Zone", readonly=True)
purchase_type = fields.Selection(related='breq_id.purchase_type')
employee_id = fields.Many2one('hr.employee', related='breq_id.employee_id', readonly=True)
department_id = fields.Many2one('hr.department', related='breq_id.department_id', readonly=True)
objet = fields.Text(related='breq_id.objet', readonly=True)
section = fields.Char(related='breq_id.section', readonly=True)
nature = fields.Char(related='breq_id.nature', readonly=True)
manager_id = fields.Many2one('hr.employee', related='breq_id.manager_id', readonly=True)
is_manager = fields.Boolean(compute="_get_is_manager", string='Est un manager')
currency_id = fields.Many2one('res.currency', related='breq_id.currency_id', string='Devise', readonly=True)
currency_en_ar = fields.Many2one('res.currency',compute="_currency_en_ar", readonly=True)
is_creator = fields.Boolean(compute="_get_is_creator", string='Est le demandeur')
date = fields.Datetime('Date', default=fields.Datetime.now, readonly=True)
origin = fields.Char('Document d\'origine', readonly=True)
change_state_date = fields.Datetime(string="Date changement d\'état", readonly=True, help="Date du dernier changement d\'état.")
budgetise = fields.Float("Budgetisé")
cumul = fields.Float("Cumul Real. + ENgag.")
solde = fields.Float("Solde de budget")
journal_id = fields.Many2one('account.journal', string='Mode de paiement', domain=[('type', 'in', ('bank', 'cash'))])
amount_untaxed_bex = fields.Float(compute='_amount_all', string='Montant HT', readonly=True, store=True)
amount_tax_bex = fields.Float(compute='_amount_all', string='Taxes', readonly=True, store=True)
amount_total_bex = fields.Float(compute='_amount_all', string='Total', readonly=True, store=True)
#Budget request
amount_untaxed_breq = fields.Float('Montant HT', readonly=True)
amount_tax_breq = fields.Float('Taxes', readonly=True)
amount_total_breq = fields.Float('Total', readonly=True)
#Budget expenses Montant en Ariary
amount_untaxed_en_ar = fields.Float('Montant HT')
amount_tax_en_ar = fields.Float('Taxes')
amount_total_en_ar = fields.Float(compute='_amount_en_ar',string='Total')
observation = fields.Text("Obsevations")
solde_rembourser = fields.Monetary('Solde à rembourser/payer')
bex_lines = fields.One2many('bex.line', 'bex_id', string="BEX LINES")
remise = fields.Float('Remise (%)')
move_type = fields.Selection([
('direct', 'Partial'), ('one', 'All at once')], 'Delivery Type',
default='direct',
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
help="It specifies goods to be deliver partially or all at once")
picking_type_id = fields.Many2one('stock.picking.type', 'Type de préparation', readonly=True)
company_id = fields.Many2one('res.company', 'Company', readonly=True)
group_id = fields.Many2one('procurement.group', 'Procurement Group', readonly=True)
#Fonction dans achat
@api.model
def _prepare_picking(self):
if not self.group_id:
self.group_id = self.group_id.create({
'name': self.origin,
'partner_id': self.partner_id.id
})
if not self.partner_id.property_stock_supplier.id:
raise UserError(_("You must set a Vendor Location for this partner %s") % self.partner_id.name)
return {
'picking_type_id': self.picking_type_id.id,
'partner_id': self.partner_id.id,
'bex_id': self.id,
'date': self.date,
'origin': self.name,
'location_dest_id': self.picking_type_id.default_location_dest_id.id,
'location_id': self.partner_id.property_stock_supplier.id,
'company_id': self.company_id.id,
'mouvement_type': 'be',
'name': self.get_number(self.name,'BE'),
}
@api.multi
def create_be(self):
StockPicking = self.env['stock.picking']
for bex in self:
res = bex._prepare_picking()
picking = StockPicking.create(res)
moves = bex.bex_lines._create_stock_moves(picking)
moves = moves.filtered(lambda x: x.state not in ('done', 'cancel')).action_confirm()
moves.force_assign()
picking.message_post_with_view('mail.message_origin_link',
values={'self': picking, 'origin': bex},
subtype_id=self.env.ref('mail.mt_note').id)
return True
class BexLine(models.Model):
_name = "bex.line"
name = fields.Char('Désignation')
bex_id = fields.Many2one('budget.expense.report', 'Reference Bex')
product_id = fields.Many2one('product.product', 'Article')
product_qty = fields.Float('Quantité BReq',readonly=True)
qty_done = fields.Float('Quantité reçue')
prix_unitaire = fields.Float('PUMP', readonly=True)
montant_br = fields.Float('Montant BReq HT',readonly=True)
montant_realise = fields.Float(compute='_compute_amount', string='Montant HT', readonly=True, store=True)
montant_realise_taxe = fields.Float(compute='_compute_amount', string='Montant TTC', readonly=True, store=True)
taxes_id = fields.Many2many('account.tax', string='Taxes', domain=['|', ('active', '=', False), ('active', '=', True)])
purchase_type = fields.Selection([
('purchase_stored', 'Achats locaux stockés'),
('purchase_not_stored', 'Achats locaux non stockés'),
('purchase_import', 'Achats à l\'importation')
], string='Type d\'achat')
breq_id = fields.Many2one('purchase.order', string='ID Breq')
purchase_line_id = fields.Many2one('purchase.order.line', string='ID ligne de commande')
product_uom = fields.Many2one('product.uom', string='Unité de mesure')
@api.depends('qty_done', 'taxes_id')
def _compute_amount(self):
for line in self:
if line.qty_done > line.product_qty or line.qty_done < 0.0:
raise UserError(u'La quantité reçue doit être inférieur ou égale à la quantité du BReq')
taxes = line.taxes_id.compute_all(line.prix_unitaire, line.bex_id.currency_id, line.qty_done, product=line.product_id, partner=False)
line.update({
'montant_realise': taxes['total_excluded'],
'montant_realise_taxe': taxes['total_included'],
})
@api.multi
def _create_stock_moves(self, picking):
moves = self.env['stock.move']
done = self.env['stock.move'].browse()
for line in self:
if line.product_id.type not in ['product', 'consu']:
continue
template = {
'name': line.product_id.name or '',
'product_id': line.product_id.id,
'product_uom': line.purchase_line_id.product_uom.id,
'product_uom_qty': line.qty_done,
'date': line.bex_id.date,
'date_expected': line.purchase_line_id.date_planned,
'location_id': line.bex_id.location_id.id,
'location_dest_id': line.bex_id.location_dest_id.id,
'picking_id': picking.id,
'partner_id': line.bex_id.breq_id.dest_address_id.id,
'move_dest_id': False,
'state': 'draft',
'purchase_line_id': line.purchase_line_id.id,
'company_id': line.bex_id.breq_id.company_id.id,
'price_unit': line.prix_unitaire,
'picking_type_id': line.bex_id.picking_type_id.id,
'group_id': line.bex_id.group_id.id,
'procurement_id': False,
'origin': line.bex_id.name,
'route_ids': line.bex_id.breq_id.picking_type_id.warehouse_id and [(6, 0, [x.id for x in line.bex_id.breq_id.picking_type_id.warehouse_id.route_ids])] or [],
'warehouse_id':line.bex_id.breq_id.picking_type_id.warehouse_id.id,
}
done += moves.create(template)
return done |
class SignalPoint(object):
def __init__(self):
self.lat = 0.0
self.lon = 0.0
self.sig = 0.0
def new_point(self, lat, lon, sig):
self.lat = lat
self.lon = lon
self.sig = sig
def print_point(self):
print str(self.lat) + " " + str(self.lon) + " " + str(self.sig)
def print_loc(self):
print str(self.lat) + ", " + str(self.lon)
class Tuple3(object):
def newTuple(self, sp1, sp2, sp3):
self.sp1 = sp1
self.sp2 = sp2
self.sp3 = sp3
def printTuple(self):
self.sp1.print_loc()
self.sp2.print_loc()
self.sp3.print_loc()
|
"""terraform_to_ansible/cli.py"""
import argparse
from terraform_to_ansible.release import __package_name__, __version__
def cli_args():
"""Console script for terraform_to_ansible."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--ansibleHost",
help="Use private or public IPs",
choices=["private", "public"],
default=None,
)
# parser.add_argument('--backend',
# help='Define which Terraform backend to parse',
# choices=['local', 'consul'], default='local')
# parser.add_argument('--consulHost',
# help='Define Consul host when using Consul backend')
# parser.add_argument(
# '--consulKV',
# help='Define Consul KV Pair to query. Ex. Azure/Test')
# parser.add_argument('--consulPort',
# help='Define Consul host port', default='8500')
# parser.add_argument('--consulScheme',
# help='Define Consul connection scheme.',
# choices=['http', 'https'], default='http')
parser.add_argument("--force", help="Force overwrite", action="store_true")
parser.add_argument(
"--output", help="Output file to save Ansible inventory to"
)
# parser.add_argument('--logLevel', help='Define logging level output',
# choices=['CRITICAL', 'ERROR', 'WARNING',
# 'INFO', 'DEBUG'], default='INFO')
parser.add_argument(
"--format",
help="Format to output inventory as",
choices=["json", "yaml"],
default="yaml",
)
parser.add_argument("--tfstate", help="Terraform tftstate file to parse")
parser.add_argument("--tfstatedir", help="Terraform tftstate dir to parse")
parser.add_argument(
"--version",
action="version",
version=f"{__package_name__} {__version__}",
)
args = parser.parse_args()
if args.tfstate is None and args.tfstatedir is None:
parser.error("--tfstate or --tfstatedir are required!")
# if args.backend == 'consul' and args.consulHost is None:
# parser.error('Consul host is required when using Consul backend.')
# if args.backend == 'consul' and args.consulKV is None:
# parser.error('Consul KV pair is required when using Consul backend')
return args
|
import logging
import numpy as np
import sys
import topi.nn
import tvm
from tvm import autotvm
def bitpack(Anp, dtype="uint32"):
A = tvm.placeholder(Anp.shape, name="A", dtype=str(Anp.dtype))
B = topi.nn.bitpack(A, 1, 1, 1, dtype)
s = tvm.create_schedule(B.op)
fn = tvm.build(s, [A, B], "llvm -mcpu=core-avx2")
ctx = tvm.context("llvm -mcpu=core-avx2")
Bshape = topi.util.get_const_tuple(B.shape)
b = tvm.nd.array(np.zeros(Bshape, dtype=dtype), ctx)
fn(tvm.nd.array(Anp, ctx), b)
return b.asnumpy() # .reshape(b.shape[0], b.shape[2])
@autotvm.template
def bgemm_topi(Y, X, K, dtype="uint64"):
DB = 1
WB = 1
out_dtype = dtype
data_packed = tvm.placeholder((Y, DB, K), dtype=dtype, name="A")
weight_packed = tvm.placeholder((X, WB, K), dtype=dtype, name="B")
oshape = (Y, X)
k = tvm.reduce_axis((0, K), name='k')
db = tvm.reduce_axis((0, DB), name='db')
wb = tvm.reduce_axis((0, WB), name='wb')
matmul = tvm.compute(oshape, lambda i, j: tvm.sum(
tvm.popcount(weight_packed[j, wb, k] & data_packed[i, db, k]).astype(out_dtype)
<< (db+wb).astype(out_dtype), axis=[wb, db, k]), tag='bitserial_dense')
s = tvm.create_schedule(matmul.op)
cfg = autotvm.get_config()
CC = s.cache_write(matmul, "global")
y, x = s[matmul].op.axis
yo, yi = cfg.define_split("tile_y", y, num_outputs=2, filter=lambda x: x.size[-1] <= 8)
xo, xi = cfg.define_split("tile_x", x, num_outputs=2, filter=lambda x: x.size[-1] <= 8)
yo, yi = cfg["tile_y"].apply(s, matmul, y)
xo, xi = cfg["tile_x"].apply(s, matmul, x)
s[matmul].reorder(yo, xo, yi, xi)
cfg.define_knob("compute_at_axis", [0, 1, 2])
if cfg["compute_at_axis"].val == 0:
s[CC].compute_at(s[matmul], xo)
elif cfg["compute_at_axis"].val == 1:
s[CC].compute_at(s[matmul], yi)
elif cfg["compute_at_axis"].val == 2:
s[CC].compute_at(s[matmul], xi)
yc, xc = s[CC].op.axis
wb, db, k = s[CC].op.reduce_axis
cfg.define_reorder("reorder_0", [k, yc, xc], policy="all")
cfg["reorder_0"].apply(s, CC, [k, yc, xc])
cfg.add_flop(2 * Y * X * K * int(dtype[4:]))
return s, [data_packed, weight_packed, matmul]
def bgemm_topi_tuned(Y, X, K, dtype="uint64"):
with autotvm.apply_history_best("matmul_wc.log"):
with tvm.target.create("llvm -mcpu=core-avx2"):
s, arg_bufs = bgemm_topi(Y, X, K, "uint64")
return tvm.build(s, arg_bufs), s, arg_bufs
def test_bgemm(name, bgemm, dtype, M=2, N=2, K=8192, verbose=True):
ctx = tvm.context("llvm -mcpu=core-avx2")
a = np.random.randint(0, 2, (M, K))
b = np.random.randint(0, 2, (N, K))
pa = bitpack(a, dtype=dtype)
pb = bitpack(b, dtype=dtype)
fn, s, bufs = bgemm(pa.shape[0], pb.shape[0], pa.shape[2], dtype)
print(tvm.lower(s, bufs, simple_mode=True))
c = tvm.nd.array(np.zeros((pa.shape[0], pb.shape[0]), dtype=dtype), ctx)
fn(tvm.nd.array(pa, ctx), tvm.nd.array(pb, ctx), c)
tvm.testing.assert_allclose(c.asnumpy(), a.dot(b.T))
evaluator = fn.time_evaluator(fn.entry_name, ctx, number=40)
time = evaluator(tvm.nd.array(pa, ctx), tvm.nd.array(pb, ctx), c).mean
print("{} {}/{}/{} GFLOPS: {}".format(
name,
M, N, K,
2 * a.shape[0] * a.shape[1] * b.shape[0] / time / 1e9))
Y, X, K = 32, 32, 1024//64
if True:
task = autotvm.task.create(bgemm_topi, args=(Y, X, K, "uint64"), target="llvm -mcpu=core-avx2")
print(task.config_space)
logging.getLogger('autotvm').setLevel(logging.DEBUG)
logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))
measure_option = autotvm.measure_option(
builder='local',
runner=autotvm.LocalRunner(number=40))
tuner = autotvm.tuner.GridSearchTuner(task)
tuner.tune(n_trial=288,
measure_option=measure_option,
callbacks=[autotvm.callback.log_to_file('matmul_wc.log')])
test_bgemm("bgemm_topi_tuned", bgemm_topi_tuned, "uint64", Y, X, K*64)
|
# External Libraries
import bcrypt
from quart import abort, request
# Sayonika Internals
from framework.models import Mod, User, ModAuthor
from framework.objects import jwt_service
class Authenticator:
"""
Class for checking permissions of users, and hashing passwords.
"""
@classmethod
async def has_authorized_access(cls, _, **kwargs) -> bool:
"""Checks if a user has a valid token and has verified email."""
token = request.headers.get("Authorization", request.cookies.get("token"))
if token is None:
abort(401, "No token")
parsed_token = await jwt_service.verify_login_token(token, True)
if parsed_token is False:
abort(400, "Invalid token")
user = await User.get(parsed_token["id"])
if not user.email_verified:
abort(401, "User email needs to be verified")
# XXX: this gives site devs unrestricted access. Limit in v2.
if user.developer:
return True
if request.method != "GET": # Check all methods other than get
if "mod_id" in kwargs:
user_mods = await Mod.query.where(
ModAuthor.user_id == user.id
).gino.all()
if not (user.developer or user.moderator) and kwargs["mod_id"] not in (
mod.id for mod in user_mods
):
abort(
403,
"User does not have the required permissions to fulfill the request.",
)
return True
@classmethod
async def has_supporter_features(cls) -> bool:
"""Check if a user is a supporter."""
token = request.headers.get("Authorization", request.cookies.get("token"))
if token is None:
abort(401)
parsed_token = await jwt_service.verify_login_token(token, True)
if parsed_token is False:
abort(400, "Invalid token")
user = await User.get(parsed_token["id"])
if not user.supporter:
abort(
403,
"User does not have the required permissions to fulfill the request.",
)
return True
@classmethod
async def has_admin_access(cls, developer_only: bool = False) -> bool:
"""Check if a user is an admin."""
token = request.headers.get("Authorization", request.cookies.get("token"))
if token is None:
abort(401)
parsed_token = await jwt_service.verify_login_token(token, True)
if parsed_token is False:
abort(400, "Invalid token")
user = await User.get(parsed_token["id"])
if (developer_only and not user.developer) or (
not developer_only and (not user.moderator and not user.developer)
):
abort(
403,
"User does not have the required permissions to fulfill the request.",
)
return True
@classmethod
def hash_password(cls, password: str) -> bytes:
"""Hashes a password and returns the digest."""
return bcrypt.hashpw(password.encode(), bcrypt.gensalt())
@classmethod
def compare_password(cls, password: str, hash_: bytes):
"""Compares a password against hash_"""
return bcrypt.checkpw(password.encode(), hash_)
|
arr = list(map(int, input().split(' ')))
k = int(input())
arr.sort()
count = 0
print(arr)
for i in range(len(arr)):
if(arr[i]<k):
count += 1
for j in range(i+1,len(arr)):
if((arr[i]*arr[j])<k):
count += 1
else:
break
else:
break
print(count) |
# 定义苹果的单价 8.5/斤
price = 8.5
# 2. 挑选苹果(7.5斤)
weight = 7.5
# 3. 计算付款金额
money = weight * price
# 只要买苹果,就返5元
money = money - 5
print(money) |
#!/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import json
import tornado.ioloop
import tornado.websocket
import do_sql
from tornado.web import url
class BaseHandler(tornado.web.RequestHandler):
cookie_username = 'username'
def get_current_user(self):
username = self.get_secure_cookie(self.cookie_username)
if not username: return None
return tornado.escape.utf8(username)
def set_cuurent_user(self,username):
self.set_secure_cookie(self.cookie_username,tornado.escape.utf8(username))
def clear_current_user(self):
self.clear_cookie(self.cookie_username)
class HomeHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
friend_list = do_sql.DoSql.select_friends(self.current_user.decode('utf-8'))
self.render('home.html', friend_list = friend_list)
class TalkHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
self.render('talk.html')
class ChatHandler(tornado.websocket.WebSocketHandler,BaseHandler):
waiters = set()
messages = []
def open(self, *args, **kwargs):
if self.current_user not in self.waiters:
self.waiters.add(self.current_user)
self.write_message({'messages': self.messages})
def on_message(self,message):
message = json.loads(message)
self.messages.append(message)
for waiter in self.waiters:
if waiter == self.current_user:
continue
waiter.write_message({'message':message['message']})
def on_close(self):
self.waiters.remove(self.current_user)
class LoginHandler(BaseHandler):
def get(self):
self.render('login.html')
def post(self):
username = self.get_argument('username')
password = self.get_argument('password')
try:
if (username,password) == (do_sql.DoSql.select_user(username,password)):
self.set_cuurent_user(username)
self.redirect('/home')
except:
self.redirect('/')
class LogoutHanler(BaseHandler):
@tornado.web.authenticated
def get(self):
self.clear_current_user()
self.redirect('/')
class EntryHandler(tornado.web.RequestHandler):
def get(self):
self.render('entry.html')
def post(self):
try:
username = self.get_argument('username')
email = self.get_argument('email')
password = self.get_argument('password')
# do_sql.DoSql.entry_user(username,email,password) #insert to db(no protected)
self.render('/')
except Exception:
self.redirect('/entry')
class ProfileHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
self.render('profile.html')
|
import demistomock as demisto # noqa
import ExpanseRefreshIssueAssets
EXAMPLE_INCIDENT = {
'CustomFields': {
'expanseasset': [
{'assettype': 'Certificate', 'assetkey': 'fakeMD5', 'id': 'id-old-certificate'},
{'assettype': 'IpRange', 'assetkey': 'fakeIPRange', 'id': 'id-old-iprange'},
{'assettype': 'Domain', 'assetkey': 'fakeDomain', 'id': 'id-old-domain'},
]
}
}
REFRESH_RESULT = {'expanseasset': [{'assettype': 'Certificate',
'assetkey': 'fakeMD5',
'tags': 'tag-certificate',
'id': 'id-certificate',
'attributionReasons': 'fake-certificate-reason1\nfake-certificate-reason2'},
{'assettype': 'IpRange',
'assetkey': 'fakeIPRange',
'tags': 'tag-iprange1\ntag-iprange2',
'id': 'id-iprange',
'attributionReasons': 'fake-iprange-reason'},
{'assettype': 'Domain',
'assetkey': 'fakeDomain',
'tags': 'tag-domain',
'id': 'id-domain',
'attributionReasons': 'fake-domain-reason'},
]}
ASSET_CERTIFICATE = {
'annotations': {
'tags': [{'name': 'tag-certificate'}]
},
'attributionReasons': [{'reason': 'fake-certificate-reason1'}, {'reason': 'fake-certificate-reason2'}],
'id': 'id-certificate'
}
ASSET_IPRANGE = {
'annotations': {
'tags': [{'name': 'tag-iprange1'}, {'name': 'tag-iprange2'}]
},
'attributionReasons': [{'reason': 'fake-iprange-reason'}],
'id': 'id-iprange'
}
ASSET_DOMAIN = {
'annotations': {
'tags': [{'name': 'tag-domain'}]
},
'attributionReasons': [{'reason': 'fake-domain-reason'}],
'id': 'id-domain'
}
def test_refresh_issue_assets_command(mocker):
"""
Given:
- current incident with iprange, domain and certificate assets
When
- Refreshing Expanse assets for an incident
Then
- commands are invoked to refresh asset data
- incident is updated with the refreshed asset data
"""
def executeCommand(name, args):
if name == "expanse-get-domain" and args['domain'] == 'fakeDomain':
return [{'Contents': ASSET_DOMAIN}]
elif name == "expanse-get-iprange" and args['id'] == 'fakeIPRange':
return [{'Contents': ASSET_IPRANGE}]
elif name == "expanse-get-certificate" and args['md5_hash'] == 'fakeMD5':
return [{'Contents': ASSET_CERTIFICATE}]
elif name == "setIncident":
return "OK"
raise ValueError(f"Error: Unknown command or command/argument pair: {name} {args!r}")
mocker.patch.object(demisto, 'incident', return_value=EXAMPLE_INCIDENT)
ec_mock = mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
result = ExpanseRefreshIssueAssets.refresh_issue_assets_command({})
assert result.readable_output == "OK"
assert len(ec_mock.call_args_list) == 4
assert ec_mock.call_args_list[3][0][0] == "setIncident"
assert ec_mock.call_args_list[3][0][1] == REFRESH_RESULT
|
#python是动态语言,根据类创建的实例可以任意绑定属性,给实例绑定属性的方法是通过实例变量,或者通过self变量
class Student(object):
def __init__(self,name):
self.name=name
s=Student('Bob')
s.score=90
#在编写程序的时候,千万不要对实例属性和类属性使用相同的名字,因为相同名称的实例属性将会屏蔽掉类属性,但是当你删除实例属性后,再使用相同的名称
#访问到的是类属性
|
class Solution:
# @param {character[][]} board
# @return {void} Do not return anything, modify board in-place instead.
def bfs(self, x, y, board):
m = len(board)
n = len(board[0])
if board[x][y] != '0': return
board[x][y] = '*'
queue = [(x, y)]
d = [[-1, 0], [1, 0], [0, 1],[0, -1]]
while queue:
x, y = queue.pop()
for i in range(4):
nx = x + d[i][0]
ny = y + d[i][1]
if nx >= 0 and nx < m and ny >= 0 and ny < n and board[nx][ny] == '0':
board[nx][ny] = '*'
queue.append((nx, ny))
def solve(self, board):
m = len(board)
if m <= 2 or board is None: return
n = len(board[0])
if n <= 2: return
for i in range(m):
self.bfs(i, 0, board)
self.bfs(i, n - 1, board)
for i in range(n):
self.bfs(0, i, board)
self.bfs(m - 1, i, board)
for i in range(m):
for j in range(n):
if board[i][j] == '*':
board[i][j] = '0'
elif board[i][j] == '0':
board[i][j] = 'X'
return board
test = Solution()
board = [['X', 'X', 'X', 'X'],
['X', '0', '0', 'X'],
['X', 'X', '0', 'X'],
['X', '0', 'X', 'X']]
print(test.solve(board))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.