text stringlengths 38 1.54M |
|---|
person = {'first_name': 'marek', 'last_name': 'czekalski', 'age': 23, 'city': 'kielce',}
print(person)
name = person['first_name'].title()
print(name)
possesion = person.get('possesion', 'no possesion declared')
print(possesion.title())
person['possesion'] = 'student'
possesion = person.get('possesion', 'no possesion declared')
print(possesion.title())
print('-------------------FOR-------------------')
for key, value in person.items():
new_val = str(value)
# print(new_val)
print(type(new_val))
# if type(person[value]) is int:
# print(f'{key}: {value}')
# else:
print(f'{key}: {new_val.title()}')
print()
|
"""Access to VoxelBrain.
https://nip.humanbrainproject.eu/documentation/user-manual.html#voxel-brain
"""
import abc
import json
import os
import urllib
import numpy as np
import requests
from voxcell import RegionMap, VoxelData, math_utils
from voxcell.exceptions import VoxcellError
def _download_file(url, filepath, overwrite, allow_empty=False):
"""Download file from `url` if it is missing."""
if os.path.exists(filepath) and not overwrite:
return filepath
tmp_filepath = filepath + ".download"
try:
resp = requests.get(url, timeout=None)
resp.raise_for_status()
if not (allow_empty or resp.content):
raise VoxcellError("Empty content")
with open(tmp_filepath, "wb") as f:
f.write(resp.content)
os.rename(tmp_filepath, filepath)
finally:
try:
os.unlink(tmp_filepath)
except OSError:
pass
return filepath
class Atlas:
"""Helper class for atlas access."""
__metaclass__ = abc.ABCMeta
def __init__(self):
"""Init Atlas."""
self._memcache = {}
@staticmethod
def open(url, cache_dir=None):
"""Get Atlas object to access atlas stored at URL."""
parsed = urllib.parse.urlsplit(url)
if parsed.scheme in ('', 'file'):
return LocalAtlas(url)
if parsed.scheme in ('http', 'https'):
if not parsed.path.startswith('/api/analytics/atlas/releases/'):
raise VoxcellError(f"Unexpected URL: '{url}'")
if cache_dir is None:
raise VoxcellError("`cache_dir` should be specified")
return VoxelBrainAtlas(url, cache_dir)
raise VoxcellError(f"Unexpected URL: '{url}'")
@abc.abstractmethod
def fetch_data(self, data_type):
"""Fetch `data_type` NRRD."""
@abc.abstractmethod
def fetch_hierarchy(self):
"""Fetch brain region hierarchy JSON."""
def _check_cache(self, key, callback, memcache):
if key in self._memcache:
return self._memcache[key]
result = callback()
if memcache:
self._memcache[key] = result
return result
def load_data(self, data_type, cls=VoxelData, memcache=False):
"""Load atlas data layer."""
def _callback():
return cls.load_nrrd(self.fetch_data(data_type))
return self._check_cache(
('data', data_type, cls),
callback=_callback,
memcache=memcache
)
def load_region_map(self, memcache=False):
"""Load brain region hierarchy as RegionMap."""
def _callback():
return RegionMap.load_json(self.fetch_hierarchy())
return self._check_cache(
('region_map',),
callback=_callback,
memcache=memcache
)
def get_region_mask(self, value, attr='acronym', with_descendants=True,
ignore_case=False, memcache=False):
"""Get VoxelData with 0/1 mask indicating regions matching `value`."""
def _callback():
rmap = self.load_region_map()
brain_regions = self.load_data('brain_regions')
region_ids = rmap.find(
value, attr=attr, with_descendants=with_descendants,
ignore_case=ignore_case
)
if not region_ids:
raise VoxcellError(f"Region not found: '{value}'")
result = math_utils.isin(brain_regions.raw, region_ids)
return brain_regions.with_data(result)
return self._check_cache(
('region_mask', value, attr, with_descendants),
callback=_callback,
memcache=memcache
)
class VoxelBrainAtlas(Atlas):
"""Helper class for VoxelBrain atlas."""
def __init__(self, url, cache_dir):
"""Init VoxelBrainAtlas."""
super().__init__()
self._url = url.rstrip("/")
resp = requests.get(self._url, timeout=None)
resp.raise_for_status()
atlas_id = resp.json()[0]['id']
assert self._url.endswith(atlas_id)
self._cache_dir = os.path.join(cache_dir, atlas_id)
if not os.path.exists(self._cache_dir):
os.makedirs(self._cache_dir)
def fetch_data(self, data_type):
"""Fetch `data_type` NRRD."""
resp = requests.get(self._url + "/data", timeout=None)
resp.raise_for_status()
data_types = []
for item in resp.json():
if item['data_type'] == data_type:
url = item['url']
break
data_types.append(item['data_type'])
else:
raise VoxcellError(
# pylint: disable=consider-using-f-string
"`data_type` should be one of ({0}), provided: {1}".format(
",".join(data_types), data_type
)
)
filepath = os.path.join(self._cache_dir, f"{data_type}.nrrd")
return _download_file(url, filepath, overwrite=False)
def fetch_hierarchy(self):
"""Fetch brain region hierarchy JSON."""
url = self._url + "/filters/brain_region/65535"
filepath = os.path.join(self._cache_dir, "hierarchy.json")
return _download_file(url, filepath, overwrite=False)
class LocalAtlas(Atlas):
"""Helper class for locally stored atlas."""
def __init__(self, dirpath):
"""Init LocalAtlas."""
super().__init__()
self.dirpath = dirpath
def _get_filepath(self, filename):
result = os.path.join(self.dirpath, filename)
if not os.path.exists(result):
raise VoxcellError(f"File not found: '{result}'")
return result
def fetch_data(self, data_type):
"""Return filepath to `data_type` NRRD."""
return self._get_filepath(f"{data_type}.nrrd")
def fetch_hierarchy(self):
"""Return filepath to brain region hierarchy JSON."""
return self._get_filepath("hierarchy.json")
def fetch_metadata(self):
"""Return filepath to metadata JSON."""
return self._get_filepath("metadata.json")
def load_metadata(self, memcache=False):
"""Load brain region metadata as dict."""
def _callback():
with open(self.fetch_metadata(), 'r', encoding='utf-8') as f:
return json.load(f)
return self._check_cache(('metadata',), callback=_callback, memcache=memcache)
def get_layers(self):
"""Retrieve and cache the identifiers of each layer of a laminar brain region.
For each layer of an annotated brain volume, the function returns the set of identifiers of
all regions included in this layer.
Note: this function relies on the existence of the files
* hierarchy.json
* metadata.json
See get_layer for the description of the metadata.json file.
Returns: tuple (names, ids) where `names` is a list of layer names where `ids` is a list
of sets and. Each set contains the identifiers (ints) of the corresponding layer name
(str).
Raises: VoxcellError
* if the hierarchy file or the metadata file doesn't exist.
* if metadata.json doesn't contain the key "layers"
* if the value of "layers" doesn't contain all the required keys:
"names", "queries" and "attribute".
* if the value of "names" or "ids" is not a list, or if these objects are two lists
of different lengths.
"""
def _callback():
metadata = self.load_metadata()
if 'layers' not in metadata:
raise VoxcellError('Missing "layers" key')
layers = metadata['layers']
if not all(
key in metadata['layers'] for key in ['names', 'queries', 'attribute']
):
err_msg = (
'Missing some "layers" key. The "layers" dictionary has '
'the following mandatory keys: "names", "queries" and "attribute"'
)
raise VoxcellError(err_msg)
if not (
isinstance(layers['names'], list)
and isinstance(layers['queries'], list)
and len(layers['names']) == len(layers['queries'])
):
raise VoxcellError(
'The values of "names" and "queries" must be lists of the same length'
)
region_map = self.load_region_map()
ids = [
region_map.find(query, attr=layers['attribute'], with_descendants=True)
for query in layers['queries']
]
return (layers['names'], ids)
return self._check_cache(
('get_layer_ids'),
callback=_callback,
memcache=True,
)
def get_layer(self, layer_index):
"""Retrieve the identifiers of a specified layer in laminar brain region.
Given a layer of an annotated brain volume, the function returns the set of identifiers of
all regions included in this layer.
Note: this function relies on the existence of the files
* hierarchy.json
* metadata.json
The content of metadata.json must be of the following form::
{
...
"layers": {
"names": [
"layer 1", "layer 2/3", "layer 4", "layer 5", "layer 6", "Olfactory areas"
],
"queries": ["@.*1$", "@.*2/3$", "@.*4$", "@.*5$", ".*6[a|b]?$", "OLF"],
"attribute": "acronym"
},
...
}
The strings of the `queries` list are in one-to-one correspondence with layer `names`.
The layer `names` list is a user-defined string list which can vary depending on which
hierarchy.json is used and which brain region is under scrutiny.
Each query string is used to retrieve the region identifiers of the corresponding
layer by means of the Atlas RegionMap object instantiated from the hierarchy.json file.
The syntax of a query string is the same as the one in use for RegionMap.find:
if the string starts with the symbol '@', the remainder is interpreted as a regular
expression. Otherwise it is a plain string value and RegionMap.find looks for a
character-wise full match.
The value of `attribute` is `acronym` or `name`. This unique value applies for every query
string.
Returns: tuple (name, ids) where `name` is the name of the layer (str) with index
`layer_index` and `ids` is the set of region identifiers of every region
in this layer according to hierarchy.json.
Raises: VoxcellError
* if the hierarchy file or the metadata file doesn't exist.
* if metadata.json doesn't contain the key "layers"
* if the value of "layers" doesn't contain all the required keys:
"names", "queries" and "attribute".
* if the value of "names" or "ids" is not a list, or if these objects are two lists
of different lengths.
"""
layer_names, layer_ids = self.get_layers()
return (layer_names[layer_index], layer_ids[layer_index])
def get_layer_volume(self, memcache=False):
"""Get VoxelData whose voxels are labeled with the layer indices of `brain_regions` voxels.
Layer indices range from 1 to `number of layers` - 1. The layers of the atlas are defined
within metadata.json as a list of RegionMap.find query strings.
If two layer definitions in metadata.json involve the same region identifier,
the voxels bearing this identifier will be labled with the largest layer index.
Args:
memcache: If True, use cache, Otherwise re-compute the layer volume.
Defaults to False.
Returns:
VoxelData object with the same shape and same metadata as brain_regions. Voxels have
uint8 labels (char type) and each voxel label corresponds to the voxel layer index.
"""
def _callback():
brain_regions = self.load_data('brain_regions')
_, layer_ids = self.get_layers()
layers = np.zeros_like(brain_regions.raw, dtype=np.uint8)
for index, ids in enumerate(layer_ids, 1):
mask = math_utils.isin(brain_regions.raw, ids)
layers[mask] = index
return brain_regions.with_data(layers)
return self._check_cache(
('layer_volume'),
callback=_callback,
memcache=memcache,
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('votes', '0002_liste_nombre_votes'),
]
operations = [
migrations.AlterField(
model_name='liste',
name='liste_couleur',
field=models.CharField(verbose_name='Couleur (hexa)', max_length=7, default='#fff'),
preserve_default=True,
),
migrations.AlterField(
model_name='liste',
name='liste_logo',
field=models.ImageField(upload_to='logos_listes'),
preserve_default=True,
),
migrations.AlterField(
model_name='liste',
name='liste_nom',
field=models.CharField(verbose_name='Nom', max_length=50),
preserve_default=True,
),
migrations.AlterField(
model_name='liste',
name='nombre_votes',
field=models.IntegerField(verbose_name='Nombre de votes', default=0),
preserve_default=True,
),
migrations.AlterField(
model_name='liste',
name='type',
field=models.ForeignKey(verbose_name='Type de liste', to='votes.TypeListe'),
preserve_default=True,
),
migrations.AlterField(
model_name='typeliste',
name='typeliste_nom',
field=models.CharField(verbose_name='Nom du type', max_length=10),
preserve_default=True,
),
migrations.AlterField(
model_name='vote',
name='date',
field=models.DateTimeField(verbose_name='Date et heure'),
preserve_default=True,
),
migrations.AlterField(
model_name='vote',
name='ip',
field=models.IPAddressField(verbose_name='Adresse IP'),
preserve_default=True,
),
migrations.AlterField(
model_name='vote',
name='liste',
field=models.ForeignKey(verbose_name='Liste', to='votes.Liste'),
preserve_default=True,
),
migrations.AlterField(
model_name='vote',
name='pseudo',
field=models.CharField(verbose_name='Pseudo du voteur', max_length=50),
preserve_default=True,
),
]
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from navbar.models import Lang, Contact, Navbar_lang, Social_media, Toetaja
from .models import Treenerid_lang, Treener
# Create your views here.
def treenerid(request):
if 'lang' not in request.session:
request.session['lang'] = 'est'
if bool(request.POST) == True:
if request.POST['submit-btn'] == 'lang':
request.session['lang'] = request.POST['langselect']
return HttpResponseRedirect('/treenerid') #INDEX\i puhul '/'
return render(request, 'treenerid.html', context={
'contact':Contact.objects.all()[0],
'navbar_lang':Navbar_lang.objects.get(lang=Lang.objects.get(lang=request.session['lang'])),
'flags': Lang.objects.all(),
'lang':Treenerid_lang.objects.get(lang=Lang.objects.get(lang=request.session['lang'])),
'social_media': Social_media.objects.all(),
'toetajad': Toetaja.objects.all(),
'treenerid': Treener.objects.filter(lang=Lang.objects.get(lang=request.session['lang']))
})
|
#>>> s1 = 'vivid'
#>>> s2 = 'dvivi'
#>>> s3 = 'vivid'
#>>> def is_anagram(s1, s2):
#... if s1.lower() == s2.lower():
#... return False
#... return sorted(s1.lower()) == sorted(s2.lower())
s1 = input("Enter the first string:")
s2 = input("Enter the second string:")
def anagramCheck(s1,s2):
if s1.lower() == s2.lower():
return False
return sorted(s1.lower()) == sorted(s2.lower())
print("Anagram :", anagramCheck(s1, s2))
|
import os
import random
import time
#live_file_tmp = open("live_log","r")
def read():
file = open("accesslog","r")
global live_file
live_file = open("live_log","a")
cnt = random_val()
#Timer.start()
#time.time()
while (True):
data = file.readline()
if cnt == 0:
time.sleep(2)
cnt = random_val()
else :
write_log(str(data))
cnt -=1
def write_log(data):
#for line in data:
live_file.write(data)
#tmp=live_file_tmp.readline()
def random_val():
n = random.randrange(100,500,20)
return n
if __name__ == '__main__':
read()
"""
# THREADING
def read():
file = open("log/access","r")
while (True):
data = file.readlines()
write_log(n,data)
#t = random.randrange(1,5,1)
#time.sleep(6)
def write_log(n,data):
for line in data:
live_file.write(data)
if __name__ == '__main__':
read()
"""
"""
# ORIGINAL
#LIVE DATA READ AND WRITE
live_file_tmp = open("live_log","r")
def read():
file = open("log/access","r")
global live_file
live_file = open("live_log","a")
while (True):
#n = random.randrange(10,50,100)
#print(n)
data = file.readline()
#pdb.set_trace()
#print(data)
#pdb.set_trace()
write_log(str(data))
#pdb.set_trace()
#t = random.randrange(1,5,1)
def write_log(data):
#for line in data:
live_file.write(data)
tmp=live_file_tmp.readline()
print(tmp)
#print(data)
time.sleep(0.1)
if __name__ == '__main__':
read()
""" |
"""
This module contains the eth2 HTTP validator API connecting a validator client to a beacon node.
"""
from abc import ABC
from dataclasses import asdict, dataclass, field
from enum import Enum, unique
import logging
from typing import Collection, Iterable, Optional, Set
from eth_typing import BLSPubkey, BLSSignature
from eth_utils import decode_hex, encode_hex, humanize_hash, to_tuple
from ssz.tools.dump import to_formatted_dict
from ssz.tools.parse import from_formatted_dict
from eth2.beacon.chains.abc import BaseBeaconChain, advance_state_to_slot
from eth2.beacon.constants import GENESIS_EPOCH
from eth2.beacon.exceptions import NoCommitteeAssignment
from eth2.beacon.helpers import (
compute_epoch_at_slot,
compute_start_slot_at_epoch,
get_block_root_at_slot,
)
from eth2.beacon.tools.builder.committee_assignment import get_committee_assignment
from eth2.beacon.tools.builder.proposer import create_block_proposal, is_proposer
from eth2.beacon.types.attestations import Attestation, AttestationData
from eth2.beacon.types.blocks import BeaconBlock, SignedBeaconBlock
from eth2.beacon.types.checkpoints import Checkpoint
from eth2.beacon.types.states import BeaconState
from eth2.beacon.typing import Bitfield, CommitteeIndex, Epoch, Root, Slot
from eth2.clock import Clock
from eth2.configs import Eth2Config
from trinity._utils.trio_utils import Request, Response
logger = logging.getLogger("eth2.api.http.validator")
# TODO what is a reasonable number here?
MAX_SEARCH_SLOTS = 500
class ServerError(Exception):
pass
class InvalidRequest(Exception):
pass
def _get_target_checkpoint(
state: BeaconState, head_root: Root, config: Eth2Config
) -> Checkpoint:
epoch = state.current_epoch(config.SLOTS_PER_EPOCH)
start_slot = compute_start_slot_at_epoch(epoch, config.SLOTS_PER_EPOCH)
if start_slot == state.slot:
root = head_root
else:
root = get_block_root_at_slot(
state, start_slot, config.SLOTS_PER_HISTORICAL_ROOT
)
return Checkpoint.create(epoch=epoch, root=root)
@unique
class Paths(Enum):
chain_info = "/chain/info"
node_version = "/node/version"
genesis_time = "/node/genesis_time"
sync_status = "/node/syncing"
validator_duties = "/validator/duties"
block_proposal = "/validator/block"
attestation = "/validator/attestation"
@dataclass
class SyncStatus:
is_syncing: bool
starting_slot: Slot
current_slot: Slot
highest_slot: Slot
class SyncerAPI(ABC):
async def get_status(self) -> SyncStatus:
...
class BlockBroadcasterAPI(ABC):
async def broadcast_block(self, block: SignedBeaconBlock) -> None:
...
@dataclass
class ValidatorDuty:
validator_pubkey: BLSPubkey
attestation_slot: Slot
committee_index: CommitteeIndex
block_proposal_slot: Slot
@dataclass
class Context:
client_identifier: str
genesis_time: int # Unix timestamp
eth2_config: Eth2Config
syncer: SyncerAPI
chain: BaseBeaconChain
clock: Clock
block_broadcaster: BlockBroadcasterAPI
_broadcast_operations: Set[Root] = field(default_factory=set)
async def get_sync_status(self) -> SyncStatus:
return await self.syncer.get_status()
@to_tuple
def get_validator_duties(
self, public_keys: Collection[BLSPubkey], epoch: Epoch
) -> Iterable[ValidatorDuty]:
if epoch < GENESIS_EPOCH:
return ()
current_tick = self.clock.compute_current_tick()
state = advance_state_to_slot(self.chain, current_tick.slot)
for public_key in public_keys:
validator_index = state.get_validator_index_for_public_key(public_key)
try:
committee_assignment = get_committee_assignment(
state, self.eth2_config, epoch, validator_index
)
except NoCommitteeAssignment:
continue
if is_proposer(state, validator_index, self.eth2_config):
# TODO (ralexstokes) clean this up!
if state.slot != 0:
block_proposal_slot = state.slot
else:
block_proposal_slot = Slot((1 << 64) - 1)
else:
# NOTE: temporary sentinel value for "no slot"
# The API has since been updated w/ much better ergonomics
block_proposal_slot = Slot((1 << 64) - 1)
yield ValidatorDuty(
public_key,
committee_assignment.slot,
committee_assignment.committee_index,
block_proposal_slot,
)
def _search_linearly_for_parent(
self, target_slot: Slot
) -> Optional[SignedBeaconBlock]:
"""
Linear search for a canonical block in the chain starting at ``target_slot``
and going backwards until finding a block. This search happens when
there are skipped slots in the chain.
NOTE: The expected number of skipped slots during normal protocol operation is very low.
"""
for _slot in range(target_slot - 1, target_slot - 1 - MAX_SEARCH_SLOTS, -1):
block = self.chain.get_block_by_slot(Slot(_slot))
if block:
return block
return None
def get_block_proposal(
self, slot: Slot, randao_reveal: BLSSignature
) -> BeaconBlock:
if slot < 1:
raise InvalidRequest()
parent_slot = Slot(max(slot - 1, 0))
parent = self.chain.get_block_by_slot(parent_slot)
if not parent:
# as an optimization, try checking the head
parent = self.chain.get_canonical_head()
if parent.slot > parent_slot:
# NOTE: if parent.slot == target_slot, we are not under this block.
# NOTE: head has greater slot than the target, while it is odd
# a client may want a block here, let's try to satisfy the request.
# TODO: should we allow this behavior?
# TODO: consider a more sophisticated search strategy if we can detect ``slot``
# is far from the canonical head, e.g. binary search across slots
parent = self._search_linearly_for_parent(parent_slot)
if not parent:
raise ServerError()
parent_block_root = parent.message.hash_tree_root
else:
# the head is a satisfactory parent, continue!
parent_block_root = parent.hash_tree_root
else:
parent_block_root = parent.message.hash_tree_root
parent_state = self.chain.db.get_state_by_root(parent.state_root, BeaconState)
parent_state = advance_state_to_slot(self.chain, parent_slot, parent_state)
state_machine = self.chain.get_state_machine(slot)
# TODO: query for latest eth1 data...
eth1_data = parent_state.eth1_data
# TODO: query for relevant attestations
attestations = ()
return create_block_proposal(
slot,
parent_block_root,
randao_reveal,
eth1_data,
attestations,
parent_state,
state_machine,
)
async def broadcast_block(self, block: SignedBeaconBlock) -> bool:
logger.debug("broadcasting block with root %s", block.hash_tree_root.hex())
await self.block_broadcaster.broadcast_block(block)
self._broadcast_operations.add(block.hash_tree_root)
return True
def get_attestation(
self, public_key: BLSPubkey, slot: Slot, committee_index: CommitteeIndex
) -> Attestation:
current_tick = self.clock.compute_current_tick()
state = advance_state_to_slot(self.chain, current_tick.slot)
block = self.chain.get_block_by_slot(slot)
if not block:
# try to find earlier block, assuming skipped slots
block = self.chain.get_canonical_head()
# sanity check the assumption in this leg of the conditional
assert block.slot < slot
else:
block = block.message
target_checkpoint = _get_target_checkpoint(
state, block.hash_tree_root, self.eth2_config
)
data = AttestationData.create(
slot=slot,
index=committee_index,
beacon_block_root=block.hash_tree_root,
source=state.current_justified_checkpoint,
target=target_checkpoint,
)
validator_index = state.get_validator_index_for_public_key(public_key)
epoch = compute_epoch_at_slot(slot, self.eth2_config.SLOTS_PER_EPOCH)
committee_assignment = get_committee_assignment(
state, self.eth2_config, epoch, validator_index
)
committee = committee_assignment.committee
committee_validator_index = committee.index(validator_index)
aggregation_bits = Bitfield(
tuple(i == committee_validator_index for i in range(len(committee)))
)
return Attestation.create(aggregation_bits=aggregation_bits, data=data)
async def broadcast_attestation(self, attestation: Attestation) -> bool:
logger.debug(
"broadcasting attestation with root %s", attestation.hash_tree_root.hex()
)
# TODO the actual brodcast
self._broadcast_operations.add(attestation.hash_tree_root)
return True
async def _get_node_version(context: Context, _request: Request) -> Response:
return context.client_identifier
async def _get_genesis_time(context: Context, _request: Request) -> Response:
return context.genesis_time
async def _get_sync_status(context: Context, _request: Request) -> Response:
status = await context.get_sync_status()
status_data = asdict(status)
del status_data["is_syncing"]
return {"is_syncing": status.is_syncing, "sync_status": status_data}
def _marshal_duty(duty: ValidatorDuty) -> Response:
duty_data = asdict(duty)
duty_data["validator_pubkey"] = encode_hex(duty.validator_pubkey)
return duty_data
async def _get_validator_duties(context: Context, request: Request) -> Response:
if not isinstance(request, dict):
return ()
if "validator_pubkeys" not in request:
return ()
public_keys = tuple(map(decode_hex, request["validator_pubkeys"].split(",")))
epoch = Epoch(int(request["epoch"]))
duties = context.get_validator_duties(public_keys, epoch)
return tuple(map(_marshal_duty, duties))
async def _get_block_proposal(context: Context, request: Request) -> Response:
if not isinstance(request, dict):
return {}
slot = Slot(int(request["slot"]))
randao_reveal = BLSSignature(
decode_hex(request["randao_reveal"]).ljust(96, b"\x00")
)
try:
block = context.get_block_proposal(slot, randao_reveal)
return to_formatted_dict(block)
except Exception as e:
# TODO error handling...
return {"error": str(e)}
async def _post_block_proposal(context: Context, request: Request) -> Response:
block = from_formatted_dict(request, SignedBeaconBlock)
return await context.broadcast_block(block)
async def _get_attestation(context: Context, request: Request) -> Response:
if not isinstance(request, dict):
return {}
public_key = BLSPubkey(decode_hex(request["validator_pubkey"]))
slot = Slot(int(request["slot"]))
committee_index = CommitteeIndex(int(request["committee_index"]))
attestation = context.get_attestation(public_key, slot, committee_index)
return to_formatted_dict(attestation)
async def _post_attestation(context: Context, request: Request) -> Response:
attestation = from_formatted_dict(request, Attestation)
return await context.broadcast_attestation(attestation)
async def _get_chain_info(context: Context, request: Request) -> Response:
head = context.chain.get_canonical_head()
return {"slot": head.slot, "root": humanize_hash(head.hash_tree_root)}
GET = "GET"
POST = "POST"
ServerHandlers = {
Paths.chain_info.value: {GET: _get_chain_info},
Paths.node_version.value: {GET: _get_node_version},
Paths.genesis_time.value: {GET: _get_genesis_time},
Paths.sync_status.value: {GET: _get_sync_status},
Paths.validator_duties.value: {GET: _get_validator_duties},
Paths.block_proposal.value: {GET: _get_block_proposal, POST: _post_block_proposal},
Paths.attestation.value: {GET: _get_attestation, POST: _post_attestation},
}
|
class Solution:
def leastInterval(self, tasks: List[str], n: int) -> int:
table = {}
max_count = 0
number_of_max = 0
for task in tasks:
if task not in table:
table[task] = 0
table[task] += 1
if table[task] == max_count:
number_of_max += 1
elif table[task] > max_count:
number_of_max = 1
max_count = table[task]
slots = max_count - 1
idles_per_slot = n - number_of_max + 1
idles = slots * idles_per_slot
available_tasks = len(tasks) - number_of_max * max_count
return len(tasks) + max(0, idles - available_tasks)
|
"""Given: A DNA string s of length at most 1000 nt.
Return: Four integers (separated by spaces) counting the respective
number of times that the symbols 'A', 'C', 'G', and 'T' occur in s.
Sample:AGCTTTTCATTCTGACTGCAACGGGCAATATGTCTCTGTGTGGATTAAAAAAAGAGTGTCTGATAGCAGC
Output: 20 12 17 21 """
acount = 0
ccount = 0
tcount = 0
gcount = 0
nuc = raw_input()
for item in nuc:
if item == "A":
acount += 1
elif item == "C":
ccount += 1
elif item == 'T':
tcount += 1
elif item == 'G':
gcount +=1
else:
acount += 0
print acount, ccount, gcount, tcount
|
def calc(a, op, b):
if op == "+":
return a + b
else:
return a - b
s = input()
A, B, C, D = [int(c) for c in s]
ops = ["+", "-"]
ans = ""
for op1 in ops:
for op2 in ops:
for op3 in ops:
if calc(calc(calc(A, op1, B), op2, C), op3, D) == 7:
ans = str(A) + op1 + str(B) + op2 + str(C) + op3 + str(D) + "=7"
print(ans)
|
from django.urls import include, path
from drf_spectacular.views import SpectacularAPIView, SpectacularRedocView, SpectacularSwaggerView
from rest_framework.routers import DefaultRouter
from . import views
app_name = "api"
api_router = DefaultRouter(trailing_slash=False)
api_router.register(r"journal", views.JournalViewSet, "journal")
api_router.register(r"schoolyear", views.SchoolYearViewSet, "schoolyear")
api_router.register(r"user", views.UserViewSet, "user")
urlpatterns = [
path("api/", include(api_router.urls)),
path("schema/", SpectacularAPIView.as_view(), name="schema"),
path("swagger/", SpectacularSwaggerView.as_view(url_name="api:schema"), name="swagger"),
path("docs/", SpectacularRedocView.as_view(url_name="api:schema"), name="redoc"),
]
|
from typing import List
from Tree.PrintBST import PrintBST
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
# Time complexity : O(n),n 是二叉搜索树的节点数。每一个节点恰好被遍历一次。
# Space complexity : O(n)时间复杂度:O(n),其中 n
# 空间复杂度:O(n),为迭代过程中显式栈的开销,平均情况下为 O(logn),最坏情况下树呈现链状,为 O(n)。
def postorderTraversal(self, root: TreeNode) -> List[int]:
ans = []
if root is None:
return ans
stack1 = []
stack2 = []
node = root
stack1.append(node)
while stack1:
node = stack1.pop()
if node.left:
stack1.append(node.left)
if node.right:
stack1.append(node.right)
stack2.append(node.val)
# while stack2:
# ans.append(stack2.pop())
return stack2[::-1]
def postorderTraversal2(self, root: TreeNode) -> List[int]:
def postorder(node):
if node is None:
return
postorder(node.left)
postorder(node.right)
ans.append(node.val)
ans = []
postorder(root)
return ans
if __name__ == '__main__':
root = TreeNode(1)
root.left = TreeNode(5)
a = TreeNode(4)
root.right = a
a.left = TreeNode(3)
a.right = TreeNode(6)
PrintBST.printBST(root)
solution = Solution()
result = solution.postorderTraversal(root)
print('post order BT:{}'.format(result))
result = solution.postorderTraversal2(root)
print('post order BT:{}'.format(result))
|
from collections import OrderedDict
import re
from fsm import fsm
from persistence import DiceType, DiceFace, DiceThrowType, DiceThrowAdjustmentType, DiceThrow, Player, Dice, \
DiceThrowResult, DiceThrowAdjustment
class LogFileParser:
def __init__(self, session):
self.session = session
self.lines = []
self.game_tape = []
self.players = OrderedDict()
self.current_attack_set = 0
self.current_throw = None
def get_players(self):
return list(self.players.keys())
def add_line(self, line):
self.clean_up_lines([line])
def read_input_from_string(self, input):
lines = input.split('\n')[:-1]
self.clean_up_lines( lines )
def read_input_from_file(self, file):
f = open( file or "log.txt", 'r')
#strip out all the non-dice rolls.
alllines = f.readlines()
f.close()
self.clean_up_lines(alllines)
def clean_up_lines(self, lines):
for line in lines:
if self.player_is_rolling_dice(line) and len(line) > 0:
# print(line, end="")
self.lines.append( line )
START = "Start"
PLAYER_ATTACKING = "Player Rolling Attack Dice"
PLAYER_NOT_ROLLING_DICE = "Player Not Rolling Dice"
PLAYER_DEFENDING = "Player Defending"
PLAYER_MODIFYING_DEFENSE_DICE = "Player Modifying Defense Dice"
PLAYER_MODIFYING_ATTACK_DICE = "Player Modifying Attack Dice"
PLAYER_ADDING_ATTACK_DICE = "Player Adding Attack Dice"
PLAYER_ADDING_DEFENSE_DICE = "Player Adding Defense Dice"
def run_finite_state_machine(self ):
fs = fsm( [
(
LogFileParser.START,
LogFileParser.PLAYER_ATTACKING,
lambda x: self.player_is_rolling_attack_dice(x),
self.begin_attack_set
),
(
LogFileParser.START,
LogFileParser.PLAYER_DEFENDING,
lambda x: self.player_is_defending(x)
),
(
LogFileParser.PLAYER_ATTACKING,
LogFileParser.START,
lambda x: not self.player_is_rolling_dice(x),
self.end_attack_set
),
(
LogFileParser.PLAYER_ATTACKING,
LogFileParser.PLAYER_ATTACKING,
lambda x: self.player_is_rolling_attack_dice(x),
self.end_attack_set_and_begin_new_attack_set
),
(
LogFileParser.PLAYER_ATTACKING,
LogFileParser.PLAYER_DEFENDING,
lambda x: self.player_is_defending(x),
self.add_defense_roll
),
(
LogFileParser.PLAYER_ATTACKING,
LogFileParser.PLAYER_MODIFYING_ATTACK_DICE,
lambda x: self.player_is_modifying_attack_dice(x),
self.add_attack_modification
),
(
LogFileParser.PLAYER_ATTACKING,
LogFileParser.PLAYER_ADDING_ATTACK_DICE,
lambda x: self.player_added_attack_dice(x),
self.add_attack_dice
),
(
LogFileParser.PLAYER_ADDING_ATTACK_DICE,
LogFileParser.START,
lambda x: not self.player_is_rolling_dice(x),
self.end_attack_set
),
(
LogFileParser.PLAYER_ADDING_ATTACK_DICE,
LogFileParser.PLAYER_MODIFYING_ATTACK_DICE,
lambda x: self.player_is_modifying_attack_dice(x),
self.add_attack_modification
),
(
LogFileParser.PLAYER_ADDING_ATTACK_DICE,
LogFileParser.PLAYER_ADDING_ATTACK_DICE,
lambda x: self.player_added_attack_dice(x),
self.add_attack_dice
),
(
LogFileParser.PLAYER_ADDING_ATTACK_DICE,
LogFileParser.PLAYER_DEFENDING,
lambda x: self.player_is_defending(x),
self.add_defense_roll
),
(
LogFileParser.PLAYER_MODIFYING_ATTACK_DICE,
LogFileParser.START,
lambda x: not self.player_is_rolling_dice(x),
self.end_attack_set
),
(
LogFileParser.PLAYER_MODIFYING_ATTACK_DICE,
LogFileParser.PLAYER_MODIFYING_ATTACK_DICE,
lambda x: self.player_is_modifying_attack_dice(x),
self.add_attack_modification
),
(
LogFileParser.PLAYER_MODIFYING_ATTACK_DICE,
LogFileParser.PLAYER_ADDING_ATTACK_DICE,
lambda x: self.player_added_attack_dice(x),
self.add_attack_dice
),
(
LogFileParser.PLAYER_MODIFYING_ATTACK_DICE,
LogFileParser.PLAYER_ATTACKING,
lambda x: self.player_is_rolling_attack_dice(x),
self.end_attack_set_and_begin_new_attack_set
),
(
LogFileParser.PLAYER_MODIFYING_ATTACK_DICE,
LogFileParser.PLAYER_DEFENDING,
lambda x: self.player_is_defending(x),
self.add_defense_roll
),
(
LogFileParser.PLAYER_DEFENDING,
LogFileParser.START,
lambda x: not self.player_is_rolling_dice(x),
self.end_attack_set
),
(
LogFileParser.PLAYER_DEFENDING,
LogFileParser.PLAYER_ATTACKING,
lambda x: self.player_is_rolling_attack_dice(x),
self.end_attack_set_and_begin_new_attack_set
),
(
LogFileParser.PLAYER_DEFENDING,
LogFileParser.PLAYER_DEFENDING,
lambda x: self.player_is_defending(x),
self.add_defense_roll
),
(
LogFileParser.PLAYER_DEFENDING,
LogFileParser.PLAYER_MODIFYING_DEFENSE_DICE,
lambda x: self.player_is_modifying_defense_dice(x),
self.add_defense_modification
),
(
LogFileParser.PLAYER_DEFENDING,
LogFileParser.PLAYER_ADDING_DEFENSE_DICE,
lambda x: self.player_added_defense_dice(x),
self.add_defense_dice
),
(
LogFileParser.PLAYER_MODIFYING_DEFENSE_DICE,
LogFileParser.START,
lambda x: not self.player_is_rolling_dice(x),
self.end_attack_set
),
( LogFileParser.PLAYER_MODIFYING_DEFENSE_DICE,
LogFileParser.PLAYER_ADDING_DEFENSE_DICE,
lambda x: self.player_added_defense_dice(x),
self.add_defense_dice
),
(
LogFileParser.PLAYER_MODIFYING_DEFENSE_DICE,
LogFileParser.PLAYER_MODIFYING_DEFENSE_DICE,
lambda x: self.player_is_modifying_defense_dice(x),
self.add_defense_modification
),
(
LogFileParser.PLAYER_MODIFYING_DEFENSE_DICE,
LogFileParser.PLAYER_ATTACKING,
lambda x: self.player_is_rolling_attack_dice(x),
self.end_attack_set_and_begin_new_attack_set
),
(
LogFileParser.PLAYER_ADDING_DEFENSE_DICE,
LogFileParser.PLAYER_ADDING_DEFENSE_DICE,
lambda x: self.player_added_defense_dice(x),
self.add_defense_dice
),
(
LogFileParser.PLAYER_ADDING_DEFENSE_DICE,
LogFileParser.PLAYER_MODIFYING_DEFENSE_DICE,
lambda x: self.player_is_modifying_defense_dice(x),
self.add_defense_modification
),
(
LogFileParser.PLAYER_ADDING_DEFENSE_DICE,
LogFileParser.PLAYER_ATTACKING,
lambda x: self.player_is_rolling_attack_dice(x),
self.end_attack_set_and_begin_new_attack_set
),
(
LogFileParser.PLAYER_ADDING_DEFENSE_DICE,
LogFileParser.START,
lambda x: not self.player_is_rolling_dice(x),
self.end_attack_set
),
] )
fs.start(LogFileParser.START)
i = 0
lines = self.get_lines()
for line in lines:
try:
fs.event(line)
except ValueError:
print("Unable to transition from state {0} ({1}) using input {2}, ignoring and continuing on ...".format(fs.currentState, lines[i-1], lines[i]))
i = i + 1 #just for debugging purposes
fs.event("")
def is_player_one(self,line):
player = self.player_rolling_dice(line)
if self.player1 == None and self.player2 == None:
self.player1 = player
return True
elif self.player1 == player:
return True
elif self.player1 != None and self.player2 == None:
self.player2 = player
return False
elif self.player2 == player:
return False
else:
RuntimeError("Third player {0} found??!!".format(player))
def is_player_two(self, line):
return not self.is_player_one(line)
def get_lines(self):
return self.lines
face_translate = { "Hit" : DiceFace.HIT,
"Crit" : DiceFace.CRIT,
"Focus" : DiceFace.FOCUS,
"Blank" : DiceFace.BLANK,
"Evade" : DiceFace.EVADE }
def get_dice_cancelled(self, line):
dice_added = re.findall( r'cancels\s+.*?(\w+)\s+\*\*\*', line)
dice_added[:] = (LogFileParser.face_translate[value] for value in dice_added if len(value) > 0)
return dice_added[0]
def get_dice_added(self, line):
dice_added = re.findall( r'added\s+a[n]*\s+(\w+)', line)
dice_added[:] = (LogFileParser.face_translate[value] for value in dice_added if len(value) > 0)
return dice_added[0]
def get_dice_rolled(self, line):
#some players have the habit of putting []'s in their names, for example 'sepyx [FR]
#these have to be stripped out before providing them to the below
pre, post = line.split(':')
dice_rolled = re.findall(r'\[(.*?)\]', post)
dice_rolled[:] = (LogFileParser.face_translate[value] for value in dice_rolled if len(value) > 0)
return dice_rolled
def player_cancelled_attack_dice(self, line):
return re.search(r'^\* \*\*\*\s+.*?\s+cancels\s+.*?[Hit|Crit|Focus|Blank].*?\*\*\*',line)
def player_cancelled_defense_dice(self, line):
return re.search(r'^\* \*\*\*\s+.*?\s+cancels\s+.*?[Evade|Focus|Blank].*?\*\*\*',line)
def player_added_attack_dice(self, line):
return re.search(r'^\* \*\*\*\s+.*?\s+added\s+a\s+[Hit|Crit|Focus|Blank].*?\*\*\*',line)
def player_added_defense_dice(self, line):
return re.search(r'^\* \*\*\*\s+.*?\s+added\s+a[n]*\s+[Evade|Focus|Blank].*?\*\*\*',line)
def player_is_rolling_dice(self, line):
return re.search(r'^\* \*\*\*\s+.*?\s+[Rolls|Re-rolls|turns|added|cancels].*?\*\*\*',line)
def player_rolling_dice(self, line):
match = re.match(r'^\* \*\*\*\s+(.*?)\s+[Rolls|Re-rolls|turns].*?\*\*\*',line)
if match:
player = match.group(1)
player = re.sub('[\[\]]', '', player) #strip out pesky brackets
self.players[player] = 1
return player
else:
return None
def is_attack_roll(self,line):
return re.search(r'^\* \*\*\*\s+.*?\s+Rolls\s+to\s+Attack.*?\*\*\*',line)
def is_defense_roll(self,line):
return re.search(r'^\* \*\*\*\s+.*?\s+Rolls\s+to\s+Defend.*?\*\*\*',line)
#* *** Veldrin used Focus on Attack Dice ***
def player_using_focus_token_on_attack(self, line):
return re.search(r'^\* \*\*\*\s+.*?\s+used\s+Focus\s+on\s+Attack\s+Dice.*?\*\*\*',line)
#* *** Veldrin used Focus on Defense Dice ***
def player_using_focus_token_on_defense(self, line):
return re.search(r'^\* \*\*\*\s+.*?\s+used\s+Focus\s+on\s+Defense\s+Dice.*?\*\*\*',line)
def player_rerolled_defense_dice(self,line):
return re.search(r'^\* \*\*\*\s+.*?\s+Re-Rolls\s+Defense\s+Die.*?\*\*\*',line)
def player_turned_defense_dice(self, line):
return re.search(r'^\* \*\*\*\s+.*?\s+turns\s+Defense\s+Die.*?\*\*\*',line)
def player_rerolled_attack_dice(self,line):
return re.search(r'^\* \*\*\*\s+.*?\s+Re-Rolls\s+Attack\s+Die.*?\*\*\*',line)
def player_turned_attack_dice(self, line):
return re.search(r'^\* \*\*\*\s+.*?\s+turns\s+Attack\s+Die.*?\*\*\*',line)
def player_is_defending(self, line):
if self.player_is_rolling_dice(line):
if self.is_defense_roll(line):
return True
return False
def player_is_rolling_attack_dice(self, line ):
return self.player_is_rolling_dice(line) and self.is_attack_roll(line)
def begin_attack_set(self, game_state, value ):
self.current_attack_set += 1
attacking_player = self.player_rolling_dice(value)
dice_rolled = self.get_dice_rolled( value )
dice_number = 1
dice_throw = DiceThrow( throw_type=DiceThrowType.ATTACK,
attack_set_num=self.current_attack_set,
player=Player.as_unique( self.session, name=attacking_player))
for dice_value in dice_rolled:
dice = Dice(dice_type=DiceType.RED,
dice_face=dice_value,
dice_origination=Dice.ROLLED)
throw_result = DiceThrowResult(dice_num=dice_number, dice=dice, final_dice=dice)
dice_throw.results.append(throw_result)
dice_number += 1
self.game_tape.append(dice_throw)
self.current_throw = dice_throw
def end_attack_set(self, fss, value):
return True
def end_attack_set_and_begin_new_attack_set(self, game_state, value):
self.begin_attack_set(game_state, value)
def add_defense_roll(self, fss, value):
dice_rolled = self.get_dice_rolled(value)
defending_player = self.player_rolling_dice(value)
dice_number = 1
dice_throw = DiceThrow( throw_type=DiceThrowType.DEFEND,
attack_set_num=self.current_attack_set,
player=Player.as_unique(self.session, name=defending_player))
for dice_value in dice_rolled:
dice = Dice( dice_type=DiceType.GREEN,
dice_face=dice_value,
dice_origination=Dice.ROLLED)
throw_result = DiceThrowResult(dice_num=dice_number,
dice=dice,
final_dice=dice)
dice_throw.results.append(throw_result)
dice_number += 1
self.game_tape.append(dice_throw)
self.current_throw = dice_throw
def player_is_modifying_defense_dice(self, value):
return self.player_rerolled_defense_dice(value) or\
self.player_turned_defense_dice(value) or \
self.player_using_focus_token_on_defense(value) or \
self.player_cancelled_defense_dice(value)
def player_is_modifying_attack_dice(self, value):
return self.player_rerolled_attack_dice(value) or\
self.player_turned_attack_dice(value) or\
self.player_using_focus_token_on_attack(value) or\
self.player_cancelled_attack_dice(value)
def p(self, line):
return re.search(r'^\* \*\*\*\s+.*?\s+turns\s+Attack\s+Die.*?\*\*\*',line)
#* *** sozin Re-Rolls Attack Die 1 [Focus] and gets a [Hit] ***
def get_attack_dice_rerolled(self, line):
dice_rolled = re.findall(r'.*?Re-Rolls\s+Attack\s+Die\s+(\d+).*?and\s+gets\s+a\s+\[(.*?)\]', line)
return dice_rolled
def add_defense_dice(self,fss,value):
dice_added = self.get_dice_added(value)
dice = Dice(dice_type=DiceType.GREEN,
dice_face=dice_added,
dice_origination=Dice.ADDED)
evade_throw = self.current_throw
results = evade_throw.results
dice_number = len(results)+1
result = DiceThrowResult(dice_num=dice_number,
dice=dice,
final_dice=dice)
results.append(result)
def add_attack_dice(self, fss, value):
dice_added = self.get_dice_added(value)
#this is probably too blissful, but give it a chance!
dice = Dice(dice_type=DiceType.RED,
dice_face=dice_added,
dice_origination=Dice.ADDED)
dice_number = len(self.current_throw.results)+1
throw_result = DiceThrowResult(dice_num=dice_number, dice=dice, final_dice=dice)
self.current_throw.results.append(throw_result)
def process_attack_dice_cancelled(self, value):
#go through and try to find a dice to be cancelled
dice_cancelled = self.get_dice_cancelled(value)
for throw_result in self.current_throw.results:
if not throw_result.was_cancelled():
if throw_result.final_dice.is_hit() and dice_cancelled == DiceFace.HIT or\
throw_result.final_dice.is_crit() and dice_cancelled == DiceFace.CRIT:
self.process_attack_dice_modify(DiceThrowAdjustmentType.CANCELLED,
throw_result.dice_num,
dice_cancelled)
break
def process_attack_dice_turn(self, value):
dice = self.get_attack_dice_changed_by_set(value)
adjustment_type = DiceThrowAdjustmentType.TURNED
dice_number = int(dice[0][0])
dice_value = LogFileParser.face_translate[dice[0][1]]
self.process_attack_dice_modify(adjustment_type, dice_number, dice_value)
def process_attack_reroll(self,value):
dice = self.get_attack_dice_rerolled(value)
adjustment_type = DiceThrowAdjustmentType.REROLL
dice_number = int(dice[0][0])
dice_value = LogFileParser.face_translate[dice[0][1]]
self.process_attack_dice_modify(adjustment_type, dice_number, dice_value)
def process_attack_dice_modify(self, adjustment_type, dice_number, dice_value):
modified_result = self.current_throw.results[dice_number - 1]
# if there were no adjustments, then the from is just from the base result
# otherwise its from the last adjustment
from_dice = None
if len(modified_result.adjustments) == 0:
from_dice = modified_result.dice
else:
from_dice = modified_result.adjustments[-1].to_dice
to_dice = Dice(dice_type=DiceType.RED,
dice_face=dice_value,
dice_origination=from_dice.dice_origination)
modified_result.final_dice = to_dice
adjustment = DiceThrowAdjustment(adjustment_type=adjustment_type,
from_dice=from_dice,
to_dice=to_dice)
modified_result.adjustments.append(adjustment)
def process_attack_focus(self):
for dtr in self.current_throw.results:
if dtr.final_dice.dice_face == DiceFace.FOCUS:
from_dice = None
if len(dtr.adjustments) == 0:
from_dice = dtr.dice
else:
from_dice = dtr.adjustments[-1].to_dice
to_dice = Dice(dice_type=DiceType.RED,
dice_face=DiceFace.HIT,
dice_origination=from_dice.dice_origination)
adjustment = DiceThrowAdjustment(adjustment_type=DiceThrowAdjustmentType.CONVERT,
from_dice=from_dice,
to_dice=to_dice)
dtr.adjustments.append( adjustment )
dtr.final_dice = to_dice
def add_attack_modification(self,game_state,value):
if self.player_rerolled_attack_dice(value):
self.process_attack_reroll(value)
elif self.player_using_focus_token_on_attack(value):
self.process_attack_focus()
elif self.player_turned_attack_dice(value):
self.process_attack_dice_turn(value)
elif self.player_cancelled_attack_dice(value):
self.process_attack_dice_cancelled(value)
def get_defense_dice_rerolled(self, line):
dice_rolled = re.findall(r'.*?Re-Rolls\s+Defense\s+Die\s+(\d+).*?and\s+gets\s+a\s+\[(.*?)\]', line)
return dice_rolled
def get_defense_dice_changed_by_set(self, line):
dice_rolled = re.findall(r'.*?turns\s+Defense\s+Die\s+(\d+).*?into\s+a\s+\[(.*?)\]', line)
return dice_rolled
def get_attack_dice_changed_by_set(self, line):
dice_rolled = re.findall(r'.*?turns\s+Attack\s+Die\s+(\d+).*?into\s+a\s+\[(.*?)\]', line)
return dice_rolled
def add_defense_modification(self,game_state,value):
if self.player_using_focus_token_on_defense(value):
self.process_defense_focus()
elif self.player_rerolled_defense_dice(value):
self.process_defense_reroll(value)
elif self.player_turned_defense_dice(value):
self.process_defense_dice_turn(value)
def process_defense_focus(self):
for dtr in self.current_throw.results:
if dtr.final_dice.dice_face == DiceFace.FOCUS:
from_dice = None
if len(dtr.adjustments) == 0:
from_dice = dtr.dice
else:
from_dice = dtr.adjustments[-1].to_dice
to_dice = Dice(dice_type=DiceType.GREEN,
dice_face=DiceFace.EVADE,
dice_origination=from_dice.dice_origination)
adjustment = DiceThrowAdjustment(adjustment_type=DiceThrowAdjustmentType.CONVERT,
from_dice=from_dice,
to_dice=to_dice)
dtr.adjustments.append( adjustment )
dtr.final_dice = to_dice
def process_defense_dice_turn(self, value):
dice = self.get_defense_dice_changed_by_set(value)
adjustment_type = DiceThrowAdjustmentType.CONVERT
self.process_defense_dice_modification( dice, adjustment_type)
def process_defense_reroll(self,value):
dice = self.get_defense_dice_rerolled(value)
adjustment_type = DiceThrowAdjustmentType.REROLL
self.process_defense_dice_modification( dice, adjustment_type)
def process_defense_dice_modification(self,dice,adjustment_type):
dice_number = int(dice[0][0])
dice_value = LogFileParser.face_translate[dice[0][1]]
modified_result = self.current_throw.results[ dice_number - 1 ]
from_dice = None
if len(modified_result.adjustments) == 0:
from_dice = modified_result.dice
else:
from_dice = modified_result.adjustments[-1].to_dice
to_dice = Dice(dice_type=DiceType.GREEN,
dice_face=dice_value,
dice_origination=from_dice.dice_origination)
modified_result.final_dice = to_dice
adjustment = DiceThrowAdjustment(adjustment_type=adjustment_type,
from_dice=from_dice,
to_dice=to_dice)
modified_result.adjustments.append( adjustment )
|
from django.urls import path
from carts import views
app_name = 'carts'
urlpatterns = [
path('', views.cart, name='cart'),
path('checkout', views.checkout, name='checkout'),
path('item_remove_cart/<int:pk>/', views.item_remove_cart, name='item_remove_cart'),
path('item_quantity_minus/<int:pk>/', views.item_quantity_minus, name='item_quantity_minus'),
path('item_quantity_plus/<int:pk>/', views.item_quantity_plus ,name='item_quantity_plus'),
]
|
#!/usr/bin/python3
x=[]
x.append(0x25)
x.append(0x2b)
x.append(0x20)
x.append(0x26)
x.append(0x3a)
x.append(0x28)
x.append(0x25)
x.append(0x1e)
x.append(0x28)
x.append(0x1e)
x.append(0x32)
x.append(0x34)
x.append(0x21)
x.append(0x2c)
x.append(0x28)
x.append(0x33)
x.append(0x1e)
x.append(0x33)
x.append(0x27)
x.append(0x28)
x.append(0x32)
x.append(0x1e)
x.append(0x25)
x.append(0x2b)
x.append(0x20)
x.append(0x26)
x.append(0x1e)
x.append(0x33)
x.append(0x27)
x.append(0x24)
x.append(0x2d)
x.append(0x1e)
x.append(0x28)
x.append(0x1e)
x.append(0x36)
x.append(0x28)
x.append(0x2b)
x.append(0x2b)
x.append(0x1e)
x.append(0x26)
x.append(0x24)
x.append(0x33)
x.append(0x1e)
x.append(0x2f)
x.append(0x2e)
x.append(0x28)
x.append(0x2d)
x.append(0x33)
x.append(0x32)
x.append(0x3c)
for i in x:
print(chr(i+0x41),end="")
|
# dp를 사용하지 않음
# import sys
# n = int(sys.stdin.readline())
# temp = 1 #1에서 더해나가는 방식
# count = 0
# if n ==1:
# print(0)
# else:
# while True:
# if temp*3 < n:
# temp = temp*3
# elif temp*2 <n:
# temp = temp*2
# temp += 1
# count += 1
# if temp == n: #temp랑 n이 같아졌을때 멈춤
# break
# print(count)
#다른사람 코드
testcase=int(input())
dp=[ 0 for _ in range(testcase+2) ]
dp[2]=1
for i in range(2, len(dp)):
dp[i]=dp[i-1]+1
if i%3==0:
if dp[i]> dp[int(i/3)]+1:
dp[i]=dp[int(i/3)]+1
if i%2==0:
if dp[i]> dp[int(i/2)]+1:
dp[i] =dp[int(i/2)]+1
print(dp[testcase])
|
"""
OpenBikes API library
--------------------------------
"""
from obpy.obpy import *
__project__ = 'obpy'
__author__ = 'Axel Bellec'
__copyright__ = 'OpenBikes'
__licence__ = 'MIT'
__version__ = '1.0.0'
|
#-------------------------------------------------------------------------------
# Name: blue2cosd.py
# Purpose:
#
"""
To update SDEP2 with zipped FGDB's stored on an FTP site.
NOTES: For the purposes of this script a 'Dataset' can be any type of data
(Feature Class, Table, etc.). A Feature Dataset (FDS) is the specific object in
ArcCatalog that can contain Feature Classes of all the same Projection.
PROCESS:
1. Downloads zipped FGDB's from an FTP site to a staging folder,
unzips, and deletes zipped FGDB's from the FTP site and in the staging
folder.
2. Gets a list of each unzipped FGDB in the staging folder.
3. Gets the FDS the dataset belongs to in the County SDEP2 from the
manifestTable.
4. Tests to confirm that the dataset exists in SDEP2.
5. Tests to confirm that the dataset in the FGDB has the all the fields in
the SDEP2 dataset.
6. Creates a backup of the existing dataset.
7. Registers the FDS as versioned (if a FC - because a backup was created).
Not needed if the dataset is a Table since a Table backup doesn't need to
be registered as versioned.
8. Deletes the rows in the SDEP2 dataset.
9. Appends rows from the FGDB to the SDEP2 dataset.
10. Deletes the FGDB used to update SDEP2 from the staging folder.
UPDATES:
July 2017: Updated to allow Tables to update SDEP2 from the FTP site.
"""
# Author: Gary Ross
# Editors: Gary Ross, Mike Grue
#-------------------------------------------------------------------------------
import arcpy
import ConfigParser
import datetime
import ftplib
import math
import os
import re
import string
import sys
import time
import zipfile
#-------------------------------------------------------------------------------
# User Set Variables
#-------------------------------------------------------------------------------
stopTimeStr = "05:00:00" # time of day (next day) to stop copying
# Set paths
root = "D:\\sde_cosd_and_blue"
##root = r'U:\grue\Projects\VDrive_to_SDEP_flow\FALSE_root' # MG 07/17/17: Set variable to DEV settings. TODO: Delete after testing
sdePath = os.path.join(root,"connection","Connection to Workspace (sangis user).sde")
##sdePath = r'U:\grue\Projects\VDrive_to_SDEP_flow\FALSE_root\connection\FALSE_SDEP2.gdb' # MG 07/17/17: Set variable to DEV settings. TODO: Delete after testing
ftpFolder = "ftp/LUEG/transfer_to_cosd"
##ftpFolder = r'U:\grue\Projects\VDrive_to_SDEP_flow\FALSE_FTP_folder' # MG 07/17/17: Set variable to DEV settings. TODO: Delete after testing
sdePre = "SDEP2.SANGIS."
##sdePre = '' # MG 07/17/17: Set variable to DEV settings. TODO: Delete after testing
manifestTable = "manifest_blue2cosd"
#-------------------------------------------------------------------------------
# Script Set Variables
#-------------------------------------------------------------------------------
# Set paths
dataPath = os.path.join(root,"data")
logPath = os.path.join(root,"log")
configFile = os.path.join(root,"connection","ftp.txt")
errorFile = os.path.join(logPath,"ERROR_" + str(time.strftime("%Y%m%d", time.localtime())) + "_blue2cosd.txt")
arcpy.env.workspace = dataPath
# Set fields to ignore when comparing datasets in FGDB and SDEP2
ignoreFields = [
"Shape",
"SHAPE",
"Shape_Area",
"SHAPE_Area",
"Shape.STArea()",
"SHAPE.STArea()",
"Shape_STArea__",
"Shape_Length",
"SHAPE_Length",
"Shape.STLength()",
"SHAPE.STLength()",
"Shape_STLength__"]
# Set log file variables
old_output = sys.stdout
logFileName = os.path.join(logPath,"blue2cosd" + str(time.strftime("%Y%m%d%H%M", time.localtime())) + ".txt")
logFile = open(logFileName,"w")
sys.stdout = logFile
# Format stop time for processing
tomorrow = datetime.date.today() + datetime.timedelta(days=1)
stopTime = datetime.datetime.strptime(str(tomorrow) + " " + str(stopTimeStr),"%Y-%m-%d %H:%M:%S")
# Flags
errorFlag = False # Set 'False' here only.
# Flipped to 'True' if there is any error.
fieldError = False # Set 'False' here AND at the beginning of each dataset.
# Flipped to 'True' if there is a field error for that dataset.
delete_FGDB = False # Set 'False' here AND at the beginning of each dataset.
# Flipped to 'True' for each dataset if it successfully
# updates SDEP2, will allow the FGDB in the Staging folder
# to be deleted.
#-------------------------------------------------------------------------------
# Start Running Script
#-------------------------------------------------------------------------------
print '************************************************************************'
print ' Starting blue2cosd.py'
print '************************************************************************'
# Download and delete all files from ftp
print 'Downloading and deleting files from FTP'
print '------------------------------------------------------------------------'
try:
os.chdir(dataPath)
print str(time.strftime("%H:%M:%S", time.localtime())),"| Connecting to ftp"
config = ConfigParser.ConfigParser()
config.read(configFile)
usr = config.get("sangis","usr")
pwd = config.get("sangis","pwd")
adr = config.get("sangis","adr")
ftp = ftplib.FTP(adr)
ftp.login(usr,pwd)
ftp.cwd(ftpFolder)
filenames = ftp.nlst()
number_of_files = int(len(filenames))
if number_of_files < 2:
print "WARNING: No files on FTP site\n"
sys.stdout = old_output
logFile.close()
sys.exit(0)
else:
for filename in filenames:
fc = filename.strip(".gdb.zip")
gdb = filename.strip(".zip")
print str(time.strftime("%H:%M:%S", time.localtime())),"| Downloading and unzipping",fc
zipPath = os.path.join(dataPath,filename)
# download
with open(zipPath,'wb') as openFile:
ftp.retrbinary('RETR '+ filename,openFile.write)
# delete existing gdb
if arcpy.Exists(gdb):
arcpy.management.Delete(gdb)
# unzip
with zipfile.ZipFile(zipPath,"r") as z:
z.extractall(dataPath)
# delete zip file from ftp (except manifest file)
if not filename == manifestTable + ".gdb.zip":
ftp.delete(filename)
# delete zip file from staging area
if arcpy.Exists(zipPath):
print ' Deleting file from "{}"'.format(zipPath)
os.unlink(zipPath)
print '\n--------------------------------------------------------'
ftp.quit()
except:
errorFlag = True
print "ERROR: Failed to download or unzip file from ftp site\n"
print arcpy.GetMessages()
print ""
# Get a list of each FGDB
print '++++++++++++++++++++++++++++++++++++++++++++++++++++++++'
print '--------------------------------------------------------'
arcpy.env.workspace = dataPath
workspaces = arcpy.ListWorkspaces("","FileGDB")
# Go through each FGDB
for workspace in workspaces:
print '\n------------------------------------------------------------------'
print '{} | Processing workspace: "{}"'.format(str(time.strftime("%H:%M:%S", time.localtime())), workspace)
delete_FGDB = False
timenow = datetime.datetime.now()
deltaTime = stopTime - timenow
deltaDays = int(deltaTime.days)
if deltaDays >= 0:
manifest_FDGB = os.path.join(dataPath,manifestTable + ".gdb")
if workspace == manifest_FDGB:
print ' Not processing the manifest table'
if not workspace == manifest_FDGB and not workspace == os.path.join(dataPath,"cosd2blue.gdb"):
# Load the Dataset in the FGDB to the SDE
try:
arcpy.env.workspace = workspace
# The FC's name should be the same as the FGDB's name
# and there should only be one
fc = (os.path.basename(workspace)).split('.')[0]
gdbFC = os.path.join(workspace, fc)
# Get the dataset type to decide how to handle the dataset
# (i.e. as a Feature Class or as a Table)
desc = arcpy.Describe(gdbFC)
dataset_type = desc.datasetType
print "Processing '{}' as a '{}':".format(fc, dataset_type)
# Get FDS from manifest table
inManifest = False
inSDE = False
manifest_path = os.path.join(manifest_FDGB, manifestTable)
where_clause = "LAYER_NAME = '" + fc + "'"
with arcpy.da.SearchCursor(manifest_path, ["COUNTY_FDS","LAYER_NAME"], where_clause) as cursor:
for row in cursor:
inManifest = True
fds = row[0]
print ' "{}" is in Manifest = "{}". County FDS = "{}"'.format(fc, inManifest, fds)
if inManifest:
# Set path to 'sdeFDS' and 'sdeFC' depending on 'dataset_type'
if dataset_type == 'FeatureClass':
sdeFDS = os.path.join(sdePath,sdePre + fds)
sdeFC = os.path.join(sdeFDS,sdePre + fc)
if dataset_type == 'Table':
sdeFDS = os.path.join(sdePath,sdePre) # No fds for a Table
sdeFC = os.path.join(sdeFDS,sdePre + fc)
# Verify FC exists in County SDE
print ' Verifying "{}" exists in County SDE at "{}":'.format(fc, sdeFC)
if arcpy.Exists(sdeFC):
inSDE = True
print ' "Dataset Exists"'
else:
print "*** ERROR: '{}' does not exist in '{}' in County Workspace ***".format(fc, fds)
errorFlag = True
inSDE = False
else:
print "*** ERROR: '{}' does not exist in manifest table".format(fc)
errorFlag = True
# Go through each field in SDE Dataset and make sure it exists in GDB Dataset
if inSDE and inManifest:
print ' Analyzing Fields for schema mismatch between SDE Dataset and FGDB Dataset:'
# Get list of FGDB fields for this Dataset
gdbFieldsOrig = arcpy.ListFields(gdbFC)
gdbFields = []
for fld in gdbFieldsOrig:
if fld.name not in ignoreFields:
gdbFields.append(fld)
# Get list of SDE fields for this Dataset
sdeFieldsOrig = arcpy.ListFields(sdeFC)
sdeFields = []
for fld in sdeFieldsOrig:
if fld.name not in ignoreFields:
sdeFields.append(fld)
# Using lists from above, make sure field types match
fieldError = False
for sdeField in sdeFields:
gdbFieldExists = False
for gdbField in gdbFields:
# Check field name exists
if gdbField.name == sdeField.name:
gdbFieldExists = True
# Check field type
if gdbField.type <> sdeField.type:
fieldError = True
errorFlag = True
print '*** ERROR: Field "{}" does not have the same field type in FGDB and SDE ***'.format(gdbField.name)
# Check field length
if gdbField.type == "String" and gdbField.length > sdeField.length:
fieldError = True
errorFlag = True
print '*** ERROR: Field "{}" is too long in FGDB for SDE ***'.format(gdbField.name)
if not gdbFieldExists:
fieldError = True # MG 07/17/17: I added this to prevent SDE from being updated by FGDB dataset with missing field(s) TODO: Confirm with Gary this is OK and delete this comment if OK.
errorFlag = True # MG 07/17/17: I added this to prevent SDE from being updated by FGDB dataset with missing field(s) TODO: Confirm with Gary this is OK and delete this comment if OK.
print '*** ERROR: Field "{}" does not exist in FGDB ***'.format(sdeField.name)
if fieldError:
print ' New Dataset from FGDB not copied over to SDE, please fix above errors.'
# Only update data if there are no field errors
if not fieldError:
print ' "No field errors detected"'
# Set 'backup_path' depending on 'dataset_type'
if dataset_type == 'FeatureClass':
backup_path = os.path.join(sdePath,sdePre + fds,sdePre + fc + "_BAK")
if dataset_type == 'Table':
backup_path = os.path.join(sdePath,sdePre + fc + "_BAK")
# Delete existing backup if it exists
if arcpy.Exists(backup_path):
print ' Deleting old backup at "{}"'.format(backup_path)
arcpy.management.Delete(backup_path)
# Backup existing data in SDE
print ' Backing up "{}"\n From: "{}"\n To: "{}"'.format(fc, sdeFC, backup_path)
arcpy.management.Copy(sdeFC, backup_path)
if dataset_type == 'FeatureClass': # Register Feature Dataset As Versioned
print ' Registering as versioned FDS "{}"'.format(sdeFDS)
arcpy.management.RegisterAsVersioned(sdeFDS,"NO_EDITS_TO_BASE") # MG 07/17/17: Set variable to DEV settings. TODO: delete comment
# Delete all records in Dataset
print ' Deleting rows in SDE at "{}"'.format(sdeFC)
arcpy.DeleteRows_management(sdeFC)
# Append data from FGDB to SDE
try:
print ' Appending data:\n From: "{}"\n To: "{}"'.format(gdbFC, sdeFC)
arcpy.Append_management(gdbFC, sdeFC, 'TEST')
delete_FGDB = True
except:
print '*** ERROR Appending data to "{}"'.format(fc)
errorFlag = True
# If Append was successful delete FGDB used to update SDEP2
if delete_FGDB:
try:
print ' Deleting FGDB used to update SDE "{}"'.format(workspace)
arcpy.management.Delete(workspace) # MG 07/17/17: Set variable to DEV settings. TODO: delete comment
except:
errorFlag = True
print '*** ERROR: Problem with deleting FGDB used to update SDE ***'
except Exception as e:
errorFlag = True
print '*** ERROR with processing FC ***'
print str(e)
print arcpy.GetMessages()
if errorFlag == True:
print '\n\n*** ERRORS in script, please see above for specifics. ***'
else:
print '\n\nSUCCESSFUL run of script.'
sys.stdout = old_output
logFile.close()
if errorFlag:
# email message
eFile = open(errorFile,"w")
eFile.close()
sys.exit()
|
from flask import Flask,render_template,request
from datetime import datetime
import random
import requests
from bs4 import BeautifulSoup
app=Flask(__name__)
#print(datetime.today())
@app.route("/")
def hello():
return render_template("index.html")
@app.route("/hello/<string:name>")
def hellojs(name):
return render_template("hello.html",n=name)
#/cube/숫자
@app.route("/cube/<int:number>")
def cube(number):
return render_template("cube.html",n=number*number)
@app.route("/lunch")
def lunch():
lunch_box=['20층','양자강','바스버거','김까','시골집']
lunch=random.choice(lunch_box)
return render_template("lunch.html",lunch=lunch,box=lunch_box)
@app.route("/vonvon/<string:name>")
def vonvon(name):
hour_arr=range(1,1001)
minuate_arr=range(-1,60)
hour=random.choice(hour_arr)
minuate=random.choice(minuate_arr)
return render_template("vonvon.html",hour=hour,minuate=minuate,name=name)
@app.route("/christmas")
def christmas():
christmas=""
if datetime.today().month==12 & datetime.today().day==25:
christmas="맞아"
else:
christmas="아니야"
return render_template("christmas.html",christmas=christmas)
@app.route('/google')
def google():
return render_template("google.html")
@app.route('/opgg')
def opgg():
return render_template("opgg.html")
@app.route('/opggresult')
def opggresult():
name=request.args.get('q')
res=requests.get("http://www.op.gg/summoner/userName="+name)
soup = BeautifulSoup(res.content, 'html.parser')
wins=soup.select('#SummonerLayoutContent > div.tabItem.Content.SummonerLayoutContent.summonerLayout-summary > div.SideContent > div.TierBox.Box > div.SummonerRatingMedium > div.TierRankInfo > div.TierInfo > span.WinLose > span.wins')
losses=soup.select('#SummonerLayoutContent > div.tabItem.Content.SummonerLayoutContent.summonerLayout-summary > div.SideContent > div.TierBox.Box > div.SummonerRatingMedium > div.TierRankInfo > div.TierInfo > span.WinLose > span.losses')
return render_template("opggresult.html",name=name,wins=wins[0].text,losses=losses[0].text)
if __name__ =="__main__":
app.run(host='0.0.0.0',port=8080)
|
import time
import json
from nba_api.stats.static import teams
from nba_api.stats.endpoints import leaguegamefinder
from data_gathering.get_plays import plays_to_json
def get_processed_game_ids():
with open('out/all_plays.json') as f:
game_ids = [json.loads(row)['game_id'] for row in f.readlines()]
return game_ids
nba_teams = teams.get_teams()
team_ids = [team['id'] for team in nba_teams]
processed_game_ids = get_processed_game_ids()
for team_id in team_ids:
print('Getting games for team: {}'.format(team_id))
gamefinder = leaguegamefinder.LeagueGameFinder(team_id_nullable=team_id)
games = gamefinder.get_data_frames()[0]
# game_ids = games[games['GAME_DATE'] > '2018-10-15']['GAME_ID']
game_ids = games[(games['GAME_DATE'] > '2017-10-16') & (games['GAME_DATE'] < '2018-04-05')]['GAME_ID']
for game_id in game_ids:
if game_id not in processed_game_ids:
try:
print('Getting records for gameid: {}'.format(game_id))
plays_to_json(game_id, 'out/all_plays.json')
time.sleep(5)
processed_game_ids.append(game_id)
except Exception as e:
print('ERROR: {}'.format(e)) |
"""
Runs the job_hunter program
"""
import pandas as pd
import pull_indeed as pull_indeed
import waze
from tqdm import tqdm
def job_hunter():
CITY = str(input("Search which City, State?: "))
CITY = CITY.replace(" ", "+")
JOB_TITLE = str(input("what job title?: "))
JOB_TITLE = JOB_TITLE.replace(" ", "+")
df = pull_indeed.scrape_indeed(city=CITY, title=JOB_TITLE)
return df
if __name__ == "__main__":
TO_ADDRESS = str(input("What is your address?: "))
JOB_HUNTER = job_hunter()
print(JOB_HUNTER)
COMMUTES = []
for address in tqdm(JOB_HUNTER['clean_address']):
COMMUTES.append(waze.calc_route(TO_ADDRESS=TO_ADDRESS,
FROM_ADDRESS=address))
commutes = pd.DataFrame(COMMUTES, columns=['waze'])
results = pd.concat([JOB_HUNTER, commutes], axis=1, sort=False)
results.to_csv("results/results.csv", encoding='utf-8')
|
class Solution:
def ways(self, pizza: List[str], k: int) -> int:
MOD = 10**9+7
amt = 0
@lru_cache(None)
def cuts(c, x1, x2, y1, y2):
amt = 0
if c+1 == k:
for i in range(y1, y2):
if 'A' in pizza[i][x1:x2]:
return 1
else:
validCut = False
for i in range(x1, x2):
if not validCut and 'A' in [row[i] for row in pizza[y1:y2]]:
validCut = True
elif validCut and i > x1:
amt += cuts(c+1, i, x2, y1, y2)
amt %= MOD
validCut = False
for i in range(y1, y2):
if not validCut and 'A' in pizza[i][x1:x2]:
validCut = True
elif validCut and i > y1:
amt += cuts(c+1, x1, x2, i, y2)
amt %= MOD
return amt % MOD
return cuts(0, 0, len(pizza[0]), 0, len(pizza))
|
from django.utils import timezone
from cmd_controller.models import CommandController
class CommandContextManager:
def __init__(self, script_name, is_reenterent=False):
self.script_name = script_name
self.is_reenterent = is_reenterent
def __enter__(self):
self.command_controller_qs = CommandController.objects.filter(script_name=self.script_name)
return self.command_controller_qs
def __exit__(self, exc_type, exc_val, exc_tb):
if self.command_controller_qs:
self.command_controller_qs.update(updated_on=timezone.now())
else:
cmd_controller_data = {'script_name': self.script_name, 'is_reentrant': self.is_reenterent}
CommandController.objects.create(**cmd_controller_data)
# The decerator won't call the calling function if the script is not re_enterant, if run the script again and again.
def check_command(script_name, re_enterant):
def decerator(func):
def wrapper(*args, **kwargs):
with CommandContextManager(script_name, re_enterant) as script:
if script:
# calling the handle functon iff its re_enterant( can run again and again).
if script.first().is_reentrant:
# calling the handle functon iff its re_enterant( can run again and again).
func(*args, **kwargs)
else:
# calling for the first time
func(*args, **kwargs)
return wrapper
return decerator |
import io
import logging
from time import time
from urllib.parse import quote
import pyqrcode
from flask import Blueprint, current_app, redirect, url_for, g, request, Response, flash, send_file
from flask_babel import lazy_gettext as _
from flask_mongoengine.wtf import model_form
from mongoengine import NotUniqueError, ValidationError
from werkzeug.exceptions import abort
from werkzeug.utils import secure_filename
from flask_classy import route
from lore.api.pdf import fingerprint_pdf
from lore.api.resource import (
Authorization,
FilterableFields,
ImprovedBaseForm,
ImprovedModelConverter,
ItemResponse,
ListResponse,
ResourceAccessPolicy,
ResourceView,
filterable_fields_parser,
prefillable_fields_parser,
set_theme,
)
from lore.model.asset import FileAccessType, FileAsset, get_google_urls
from lore.model.misc import set_lang_options, filter_is_owner
from lore.model.shop import products_owned_by_user, user_has_asset
from lore.model.world import Publisher, filter_authorized_by_publisher
logger = current_app.logger if current_app else logging.getLogger(__name__)
asset_app = Blueprint("assets", __name__)
def set_cache(rv, cache_timeout):
if cache_timeout is not None:
rv.cache_control.public = True
rv.cache_control.max_age = cache_timeout
rv.expires = int(time() + cache_timeout)
return rv
# Inspiration
# https://github.com/RedBeard0531/python-gridfs-server/blob/master/gridfs_server.py
def send_gridfs_file(
gridfile,
mimetype=None,
as_attachment=False,
attachment_filename=None,
add_etags=True,
cache_timeout=2628000,
conditional=True,
fingerprint_user_id=None,
):
# Default cache timeout is 1 month in seconds
if not mimetype:
if not gridfile.content_type:
raise ValueError("No Mimetype given and none in the gridfile")
mimetype = gridfile.content_type
# TODO check that this is in UTC-time
headers = {
"Content-Length": gridfile.length,
"Last-Modified": gridfile.upload_date.strftime("%a, %d %b %Y %H:%M:%S GMT"),
} #
if as_attachment:
if not attachment_filename:
if not gridfile.name:
raise ValueError("No attachment file name given and none in the gridfile")
attachment_filename = gridfile.name
# Handles unicode filenames in most browsers, see
# https://stackoverflow.com/questions/21818855/flask-handling-unicode-text-with-werkzeug/30953380#30953380
headers["Content-Disposition"] = "attachment; filename*=UTF-8''{quoted_filename}".format(
quoted_filename=quote(attachment_filename.encode("utf8"))
)
md5 = gridfile.md5 # as we may overwrite gridfile with own iterator, save this
if fingerprint_user_id:
gridfile = fingerprint_pdf(gridfile, fingerprint_user_id)
rv = Response(gridfile, headers=headers, content_type=mimetype, direct_passthrough=True) # is an iterator
set_cache(rv, cache_timeout)
if add_etags:
rv.set_etag(md5)
if conditional:
rv.make_conditional(request)
return rv
def authorize_and_return(fileasset_slug, as_attachment=False):
asset = FileAsset.objects(slug=fileasset_slug).first_or_404()
publisher = Publisher.objects(slug=g.pub_host).first()
if publisher:
# For better error pages
set_theme(g, "publisher", publisher.theme)
if asset.file_data_exists():
attachment_filename = asset.get_attachment_filename() if as_attachment else None
mime = asset.get_mimetype()
if asset.is_public():
rv = send_gridfs_file(
asset.file_data.get(),
mimetype=mime,
as_attachment=as_attachment,
attachment_filename=attachment_filename,
)
return rv
# If we come this far the file is private to a user and should not be cached
# by any proxies
if not g.user:
# Should be caught by error handler in app.py that does SSO redirect if applicable
abort(401)
if g.user.admin or user_has_asset(g.user, asset):
# A pdf that should be unique per user - we need to fingerprint it
if mime == "application/pdf" and asset.access_type == FileAccessType.user:
fpid = g.user.id
else:
fpid = None
rv = send_gridfs_file(
asset.file_data.get(),
mimetype=mime,
as_attachment=as_attachment,
attachment_filename=attachment_filename,
fingerprint_user_id=fpid,
)
# rv.headers['Cache-Control'] = 'private' # Override the public cache
return rv
elif asset.source_file_url:
return_url = asset.source_file_url
google_urls = get_google_urls(asset.source_file_url)
if google_urls:
return_url = google_urls["dl"] if as_attachment else google_urls["direct"]
if asset.is_public():
return redirect(return_url)
if not g.user:
# Should be caught by error handler in app.py that does SSO redirect if applicable
abort(401)
if g.user.admin or user_has_asset(g.user, asset):
return redirect(return_url)
abort(403)
class AssetAccessPolicy(ResourceAccessPolicy):
def is_editor(self, op, user, res):
if user == res.owner or (res.publisher and user in res.publisher.editors):
return Authorization(
True, _('Allowed access to %(op)s "%(res)s" as editor', op=op, res=res), privileged=True
)
else:
return Authorization(False, _('Not allowed access to %(op)s "%(res)s" as not an editor', op=op, res=res))
def is_reader(self, op, user, res):
if user == res.owner or (res.publisher and user in res.publisher.readers):
return Authorization(
True, _('Allowed access to %(op)s "%(res)s" as reader', op=op, res=res), privileged=True
)
else:
return Authorization(False, _('Not allowed access to %(op)s "%(res)s" as not a reader', op=op, res=res))
class FileAssetsView(ResourceView):
subdomain = "<pub_host>"
route_base = "/media/"
access_policy = AssetAccessPolicy()
model = FileAsset
list_template = "asset/fileasset_list.html"
item_template = "asset/fileasset_item.html"
form_class = model_form(
FileAsset,
exclude=["md5", "source_filename", "length", "created_date", "content_type", "width", "height", "file_data"],
base_class=ImprovedBaseForm,
converter=ImprovedModelConverter(),
)
filterable_fields = FilterableFields(
FileAsset,
[
("slug", _("File")),
"owner",
"access_type",
"content_type",
"tags",
"length",
("publisher.title", FileAsset.publisher.verbose_name),
],
choice=lambda x: x if x in ["single", "multiple"] else "multiple",
select=lambda x: x.split(","),
position=lambda x: x if x in ["gallery-center", "gallery-card", "gallery-wide"] else "gallery-center",
)
item_arg_parser = prefillable_fields_parser(["slug", "owner", "access_type", "tags", "length"])
def index(self, **kwargs):
publisher = Publisher.objects(slug=g.pub_host).first()
set_lang_options(publisher)
r = ListResponse(
FileAssetsView,
[("files", FileAsset.objects().order_by("-created_date")), ("publisher", publisher)],
extra_args=kwargs,
)
r.set_theme("publisher", publisher.theme if publisher else None)
r.auth_or_abort()
if not (g.user and g.user.admin):
r.query = r.query.filter(filter_is_owner() | filter_authorized_by_publisher(publisher))
r.finalize_query()
# This will re-order so that any selected files are guaranteed to show first
if r.args["select"] and len(r.args["select"]) > 0:
head, tail = [], []
for item in r.files:
if item.slug in r.args["select"]:
head.append(item)
else:
tail.append(item)
r.files = head + tail
return r
@route("<path:id>", methods=["GET"])
def get(self, id):
publisher = Publisher.objects(slug=g.pub_host).first()
set_lang_options(publisher)
if id == "post":
r = ItemResponse(FileAssetsView, [("fileasset", None)], extra_args={"intent": "post"})
r.set_theme("publisher", publisher.theme if publisher else None)
r.auth_or_abort(res=None)
else:
fileasset = FileAsset.objects(slug=id).first_or_404()
r = ItemResponse(FileAssetsView, [("fileasset", fileasset)])
r.set_theme("publisher", publisher.theme if publisher else None)
r.auth_or_abort()
return r
@route("<path:id>", methods=["PATCH"])
def patch(self, id):
publisher = Publisher.objects(slug=g.pub_host).first()
set_lang_options(publisher)
fileasset = FileAsset.objects(slug=id).first_or_404()
r = ItemResponse(FileAssetsView, [("fileasset", fileasset)], method="patch")
r.set_theme("publisher", publisher.theme if publisher else None)
r.auth_or_abort()
if not r.validate():
# return same page but with form errors?
flash(_("Error in form"), "danger")
return r, 400 # BadRequest
# only populate selected keys. will skip empty selects!
r.form.populate_obj(fileasset)
try:
r.commit()
except (NotUniqueError, ValidationError) as err:
return r.error_response(err)
return redirect(r.args["next"] or url_for("assets.FileAssetsView:get", id=fileasset.slug))
def post(self):
publisher = Publisher.objects(slug=g.pub_host).first()
set_lang_options(publisher)
r = ItemResponse(FileAssetsView, [("fileasset", None)], method="post")
r.set_theme("publisher", publisher.theme if publisher else None)
r.auth_or_abort()
fileasset = FileAsset()
if not r.validate():
flash(_("Error in form"), "danger")
return r, 400
r.form.populate_obj(fileasset)
try:
r.commit(new_instance=fileasset)
except (NotUniqueError, ValidationError) as err:
return r.error_response(err)
return redirect(r.args["next"] or url_for("assets.FileAssetsView:get", id=fileasset.slug))
def file_selector(self, type):
kwargs = {
"out": "modal",
"intent": "patch",
"view": "card",
}
if type == "image":
kwargs["content_type__startswith"] = "image/"
elif type == "document":
kwargs["content_type__not__startswith"] = "image/"
elif type == "any":
pass # no content_type requirement
else:
abort(404)
r = self.index(**kwargs)
return r
@route("<path:id>", methods=["DELETE"])
def delete(self, id):
publisher = Publisher.objects(slug=g.pub_host).first()
set_lang_options(publisher)
fileasset = FileAsset.objects(slug=id).first_or_404()
r = ItemResponse(FileAssetsView, [("fileasset", fileasset)], method="delete")
r.auth_or_abort()
r.commit()
return redirect(r.args["next"] or url_for("assets.FileAssetsView:index", pub_host=publisher.slug))
FileAssetsView.register_with_access(asset_app, "files")
@asset_app.route("/", subdomain="<pub_host>")
def index():
return redirect(url_for("assets.FileAssetsView:index"))
@current_app.route("/asset/link/<path:fileasset>")
def link(fileasset):
return authorize_and_return(fileasset)
@current_app.route("/asset/qr/<code>.svg")
def qrcode(code):
host = current_app.config["DEFAULT_HOST"].upper()
code = code.upper()
# Uppercase letters give a more compact QR code
qr = pyqrcode.create(f"HTTPS://{host}/+{code}", error="L")
out = io.BytesIO()
qr.svg(out, scale=5)
out.seek(0)
return send_file(out, attachment_filename="qrcode.svg", mimetype="image/svg+xml")
@current_app.route("/asset/download/<path:fileasset>")
def download(fileasset):
return authorize_and_return(fileasset, as_attachment=True)
@current_app.route("/asset/image/<path:slug>")
def image(slug):
asset = FileAsset.objects(slug=slug).first_or_404()
if asset.content_type and asset.content_type.startswith("image/"):
grid_file = asset.file_data.get() if asset.file_data else None
if grid_file:
r = send_gridfs_file(grid_file, mimetype=asset.content_type)
else:
abort(404)
else:
r = redirect(url_for("static", filename="img/icon/%s-icon.svg" % secure_filename(asset.content_type)))
# Redirect default uses 302 temporary redirect, but we want to cache it for a while
set_cache(r, 10) # 10 seconds, should be 2628000 = 1 month
return r
@current_app.route("/asset/image/thumbs/<path:slug>")
def image_thumb(slug):
return image(slug.lower()) # thumbs temporarily out of play
# asset = FileAsset.objects(slug=slug).first_or_404()
# return send_gridfs_file(asset.file_data.thumbnail, mimetype=asset.content_type)
|
from tkinter import *
import os
class Demo1:
def __init__(self, master):
self.master = master
self.master.option_add('*font', ('Ariel', 12, 'bold'))
self.master.title("Chatbox - Menu")
self.master.geometry("320x320")
fm = Frame(self.master)
w = Label(self.master, text="Welcome to Chatbox App\n Choose to continue")
w.pack()
self.T = Text(self.master, height=1, width=20)
self.T.pack()
self.T.insert(END, "Enter your name here")
Button(fm, text='Broadcast', command = self.menu_broadcast).pack(side=TOP, expand=YES)
Button(fm, text='Listen', command = self.menu_listen).pack(side=TOP, expand=YES)
Button(fm, text='Help', command = self.new_windows).pack(side=TOP, expand=YES)
w2 = Label(self.master, text="Made with LOVE\n By @uramirbin")
w2.pack(side=BOTTOM)
fm.pack(fill=BOTH, expand=YES)
self.action = ""
self.name = ""
def new_windows(self):
self.newWindow = Toplevel(self.master)
self.app = help_menu(self.newWindow)
def menu_broadcast(self):
self.name = self.T.get("1.0",END)
self.action = "1"
self.master.destroy()
def menu_listen(self):
self.name = self.T.get("1.0",END)
self.action = "2"
self.master.destroy()
def get(self):
return self.action, self.name
class help_menu:
def __init__(self, master):
self.master = master
self.master.option_add('*font', ('Ariel', 12))
self.master.title("Chatbox - Help")
self.master.geometry("480x240")
self.fm = Frame(self.master)
w = Label(self.fm, text="First Run the broadcast then listener")
w.pack()
w = Label(self.master, text="WARNING: Broadcaster, broadcasts for about 4 seconds")
w.pack()
w = Label(self.master, text="Type in [!q] in chat to disconnect")
w.pack()
Button(self.fm, text='Back', command = self.close_windows).pack(side=TOP, expand=YES)
w2 = Label(self.master, text="Made with LOVE\n By @uramirbin")
w2.pack(side=BOTTOM)
self.fm.pack(fill=BOTH, expand=YES)
def close_windows(self):
self.master.destroy()
def main():
root = Tk()
app = Demo1(root)
root.mainloop()
myaction, myname = app.get()
address = "python server.py "
command = address + myaction + " " + myname
os.system(command)
if __name__ == '__main__':
main() |
# Generated by Django 2.2.4 on 2019-10-25 01:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('persona', '0001_initial'),
('venta', '0006_auto_20191024_1612'),
]
operations = [
migrations.AlterField(
model_name='venta',
name='tipoPago',
field=models.CharField(blank=True, choices=[('Efectivo', 'Efectivo'), ('Debito', 'Débito'), ('Credito', 'Crédito'), ('Cuenta', 'Cta. Corriente')], max_length=50, null=True),
),
migrations.CreateModel(
name='Sueldo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tipo_empleado', models.CharField(blank=True, choices=[('Peluquero', 'Peluquero'), ('Ayudante', 'Ayudante')], max_length=50, null=True)),
('pago_semanal', models.FloatField(blank=True, null=True)),
('sueldo_basico', models.FloatField(blank=True, null=True)),
('adelanto', models.FloatField(blank=True, null=True)),
('peluquero', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='persona.Empleado')),
],
),
]
|
__version__ = "0.1.2"
from .data import *
from .nn import *
from .bocos import *
from .pde import PDE
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.http import HttpResponse,JsonResponse,HttpResponseRedirect
import random
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate,login,logout
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
import json
from .models import *
from django.views import View
from django.forms.models import model_to_dict
from django.views.generic import TemplateView
from django.utils.decorators import method_decorator
from django.views.generic import ListView,DetailView
import datetime
from django.http import QueryDict
from page import JuncheePaginator
from django.db.models import Q
@method_decorator(login_required,name='dispatch')
class firstpage(TemplateView):
template_name = "index2.html"
# @csrf_exempt
class mylogin(TemplateView):
template_name = "login.html"
def get_context_data(self, **kwargs):
nexturl = self.request.GET.get("next")
return {'nexturl':nexturl}
def post(self,request):
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username,password=password)
ret = {}
if user is not None:
login(request,user)
ret['status'] = 0
else:
ret['status'] = 1
return JsonResponse(ret)
class mylogout(View):
def get(self,request,*args,**kwargs):
logout(request)
return HttpResponseRedirect(reverse('firstpage'))
class bookquery(View):
def get(self,request):
data = [i.todict for i in Book.objects.all()]
return JsonResponse({'status': 0, 'data': data})
class authorquery(View):
def get(self,request):
qs = Author.objects.all()
qsfans = qs.order_by('-fans')[:2]
qsincome = qs.order_by('income')[:2]
qsret = list(set(qsfans).union(set(qsincome)))
data = [i.todict for i in Author.objects.all()]
return JsonResponse({'status': 0, 'data': data})
class users(View):
def get(self,request):
return HttpResponse('Hello world')
class users1(View):
def get(self,request,**kwargs):
print request.user
pk1 = self.kwargs.get('pk')
return HttpResponse(pk1)
class hello(TemplateView):
template_name = 'hello.html'
def get_context_data(self,**kwargs):
kwargs['username'] = "韩寒"
kwargs['lans'] = ['python','flask','django','java']
print kwargs
return kwargs
# class authorlist(ListView):
# model = Author
# template_name = 'authors.html'
# context_object_name = 'authors'
# paginate_by = 3
# search = 'search'
#
# def get_context_data(self, **kwargs):
# context = super(authorlist,self).get_context_data(**kwargs)
# context['job'] = 'pythonor'
# return context
# def get_queryset(self):
# return self.model.objects.order_by('-name')
class authorapi(View):
pk_url_kwarg = 'pk'
model = Author
def get(self,request,*args,**kwargs):
pk = kwargs.get(self.pk_url_kwarg)
if pk:
obj = self.model.objects.get(pk=pk)
data = obj.todict
return JsonResponse(data)
else:
qs = self.model.objects.all()
qs = qs.filter(createtime__gt='2018-01-05')
qsincome = qs.order_by('-income')[:2]
qsfansnum = qs.order_by('-fansnum')[:2]
qslist = list(set(qsincome).union(set(qsfansnum)))
data = [i.todict for i in qslist]
ret = {}
ret['data'] = data
ret['status'] = 0
return JsonResponse(ret)
# class authordetail(DetailView):
# model = Author
# bookmodel = Book
# context_object_name = "author"
# pk_url_kwarg = 'pk'
# template_name = "authordetail.html"
#
# def get_context_data(self, **kwargs):
# kwargs['timenow'] = datetime.datetime.now()
# return super(authordetail,self).get_context_data(**kwargs)
#
# # 增
# def post(self,request,*args,**kwargs):
# # 接收前端数据
# rqs = QueryDict(request.body).dict()
# rqsbooks = rqs['books']
# print rqsbooks
# books = [book.strip() for book in json.loads(rqsbooks)]
# # 判断书在不在数据库,在单独话存数据,不在返回错误信息
# errorinfo = []
# bookobjs = []
# for book in books:
# try:
# bookobj = self.bookmodel.objects.get(name=book)
# except self.bookmodel.DoesNotExist:
# errorinfo.append({'name':book})
# else:
# bookobjs.append(bookobj)
# if errorinfo:
# return JsonResponse({'status':-1,'data':errorinfo})
# # 创建作者对象(即除了书以外的)
# rqs.pop('books')
# authorobj = self.model.objects.create(**rqs)
# # 写作者和书的manytomany关系
# for bookobj in bookobjs: bookobj.author.add(authorobj)
# return JsonResponse({'status':0})
#
# # 改
# def put(self,request,*args,**kwargs):
# bookidskey = 'bookids'
# pk = self.kwargs.get(self.pk_url_kwarg)
# rqs = QueryDict(request.body).dict()
# print rqs
# bks = rqs.get(bookidskey)
# print bks
# bks = [int(bookid) for bookid in json.loads(bks)]
# if rqs.get(bookidskey): rqs.pop(bookidskey)
# authorbookids = [bk.id for bk in self.model.objects.get(pk=pk).book_set.all()]
# diffids = list(set(authorbookids).difference(set(bks)))
# if diffids:
# for bookid in diffids:
# bkobj = self.bookmodel.objects.get(pk=bookid)
# authorobj = self.model.objects.get(pk=pk)
# authorobj.book_set.remove(bkobj)
# self.model.objects.filter(pk=self.kwargs.get(self.pk_url_kwarg)).update(**rqs)
# return JsonResponse({'status':0})
#
# # 删
# def delete(self,request,*args,**kwargs):
# pk=self.kwargs.get(self.pk_url_kwarg)
# self.model.objects.get(pk=pk).delete()
# return JsonResponse({'status': 0})
class authorinfo(View):
template_name="authordetail.html"
model = Author
def get(self,request,*args,**kwargs):
pk = kwargs.get("pk")
obj = self.model.objects.get(pk=pk)
data = model_to_dict(obj, exclude=[])
print data
return render(request,self.template_name, {'data': data})
class authorlist(View):
template_name = 'authors.html' # 模板名
model = Author
bookmodel = Book
pk_url_kwarg = 'pk'
search = 'search'
def get_queryset(self, search): # 数据库查询的结果
qs = self.model.objects.all()
if search:
qs = qs.filter(Q(name__contains=search)|Q(note__contains=search)) # 模糊搜索匹配search的(name或note)
return qs
def get(self, request, *args, **kwargs):
pk = kwargs.get(self.pk_url_kwarg)
if pk:
obj = self.model.objects.get(pk=pk)
data = model_to_dict(obj, exclude=[])
return JsonResponse({'status':0, 'data':data})
else:
page_num = request.GET.get('page', 1) # 请求的第几页
search = request.GET.get(self.search)
qs = self.get_queryset(search)
pageobj = JuncheePaginator(qs)
wdata = pageobj.pagecomputer(page_num)
return render(request, self.template_name, {'res_data': wdata[0], 'allpages': wdata[1], 'search':search})
# 增
def post(self,request,*args,**kwargs):
# 接收前端数据
rqs = QueryDict(request.body).dict()
rqsbooks = rqs['books']
print rqsbooks
books = [book.strip() for book in json.loads(rqsbooks)]
# 判断书在不在数据库,在单独话存数据,不在返回错误信息
errorinfo = []
bookobjs = []
for book in books:
try:
bookobj = self.bookmodel.objects.get(name=book)
except self.bookmodel.DoesNotExist:
errorinfo.append({'name':book})
else:
bookobjs.append(bookobj)
if errorinfo:
return JsonResponse({'status':-1,'data':errorinfo})
# 创建作者对象(即除了书以外的)
rqs.pop('books')
authorobj = self.model.objects.create(**rqs)
# 写作者和书的manytomany关系
for bookobj in bookobjs: bookobj.author.add(authorobj)
return JsonResponse({'status':0})
# 改
def put(self,request,*args,**kwargs):
bookidskey = 'bookids'
rqs = QueryDict(request.body).dict()
pk = rqs.get('pk')
bks = rqs.get(bookidskey)
bks = [int(bookid) for bookid in json.loads(bks)]
if rqs.get(bookidskey): rqs.pop(bookidskey)
authorbookids = [bk.id for bk in self.model.objects.get(pk=pk).book_set.all()]
diffids = list(set(authorbookids).difference(set(bks)))
if diffids:
for bookid in diffids:
bkobj = self.bookmodel.objects.get(pk=bookid)
authorobj = self.model.objects.get(pk=pk)
authorobj.book_set.remove(bkobj)
rqs.pop("pk")
self.model.objects.filter(pk=pk).update(**rqs)
return JsonResponse({'status':0})
# 删
def delete(self,request,*args,**kwargs):
pk=self.kwargs.get(self.pk_url_kwarg)
self.model.objects.get(pk=pk).delete()
return JsonResponse({'status': 0}) |
#!/usr/bin/env python
import sys
import os
import fnmatch
dirs = ['initialFit','finalFit','mhFit','simFit']
procs = ['ggh','vbf','wzh','tth']
ncats = 9
def organise():
for dir in dirs:
for proc in procs:
for cat in range(ncats):
os.system('mkdir -p plots/%s/%s_cat%d'%(dir,proc,cat))
os.system('mv plots/%s/*%s_cat%d*.p* plots/%s/%s_cat%d/'%(dir,proc,cat,dir,proc,cat))
if len(sys.argv)!=2:
sys.exit('usage checkJobs.py <dir_with_jobs>')
jobdir=sys.argv[1]
if not os.path.isfile('%s/filestocombine.dat'%jobdir):
sys.exit('no filestocombine.dat file found')
combinefile = '%s/filestocombine.dat'%jobdir
for root, dirs, files in os.walk(jobdir):
running_jobs = fnmatch.filter(files,'*.sh.run')
finished_jobs = fnmatch.filter(files,'*.sh.done')
failed_jobs = fnmatch.filter(files,'*.sh.fail')
all_jobs = fnmatch.filter(files,'*.sh')
print 'Jobs complete:'
print '\t', finished_jobs
print 'Jobs running:'
print '\t', running_jobs
print 'Jobs failed:'
print '\t', failed_jobs
queued_jobs=[]
for job in all_jobs:
if os.path.isfile('%s%s.done'%(jobdir,job)): continue
elif os.path.isfile('%s%s.run'%(jobdir,job)): continue
elif os.path.isfile('%s%s.fail'%(jobdir,job)): continue
else: queued_jobs.append(job)
print 'Jobs queued:'
print '\t', queued_jobs
if len(all_jobs)!=len(finished_jobs):
sys.exit('Some jobs are still queued or have not been submitted. Please check')
if len(running_jobs)==0 and len(failed_jobs)==0 and len(finished_jobs)>0:
print 'All jobs complete'
print 'Will run script to package output.'
#filRes = raw_input('Do you want to organise plots? (y/n)\n')
#if filRes=='y' or filRes=='Y':
# organise()
webRes = raw_input('Do you want to publish plots to web? (y/n)\n')
if webRes=='n' or webRes=='N':
os.system('./bin/PackageOutput -i %s -o %s/CMS-HGG_sigfit.root'%(combinefile,jobdir))
elif webRes=='y' or webRes=='Y':
webdir = raw_input('Enter web directory\n')
os.system('./bin/PackageOutput -i %s -o %s/CMS-HGG_sigfit.root -w %s'%(combinefile,webdir,jobdir))
else:
sys.exit('Result not recognised')
|
from __future__ import absolute_import, unicode_literals
from dash.orgs.models import Org
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Count
from django.utils.translation import ugettext_lazy as _
from tracpro.contacts.tasks import sync_org_contacts
class AbstractGroup(models.Model):
"""
Corresponds to a RapidPro contact group
"""
uuid = models.CharField(max_length=36, unique=True)
org = models.ForeignKey(Org, verbose_name=_("Organization"), related_name="%(class)ss")
name = models.CharField(verbose_name=_("Name"), max_length=128, blank=True,
help_text=_("The name of this region"))
is_active = models.BooleanField(default=True, help_text="Whether this item is active")
@classmethod
def create(cls, org, name, uuid):
return cls.objects.create(org=org, name=name, uuid=uuid)
@classmethod
def sync_with_groups(cls, org, group_uuids):
"""
Updates an org's groups based on the selected groups UUIDs
"""
# de-activate any active groups not included
cls.objects.filter(org=org, is_active=True).exclude(uuid__in=group_uuids).update(is_active=False)
# fetch group details
groups = org.get_temba_client().get_groups()
group_names = {group.uuid: group.name for group in groups}
for group_uuid in group_uuids:
existing = cls.objects.filter(org=org, uuid=group_uuid).first()
if existing:
existing.name = group_names[group_uuid]
existing.is_active = True
existing.save()
else:
cls.create(org, group_names[group_uuid], group_uuid)
sync_org_contacts.delay(org.id)
@classmethod
def get_all(cls, org):
return cls.objects.filter(org=org, is_active=True)
@classmethod
def get_response_counts(cls, org, window=None, include_empty=False):
from tracpro.polls.models import Response, RESPONSE_EMPTY
qs = Response.objects.filter(issue__poll__org=org, is_active=True)
if not include_empty:
qs = qs.exclude(status=RESPONSE_EMPTY)
if window:
window_min, window_max = window.to_range()
qs = qs.filter(updated_on__gte=window_min, updated_on__lt=window_max)
field = 'contact__%s' % cls.__name__.lower()
qs = qs.filter(**{'%s__is_active' % field: True})
counts = qs.values(field).annotate(count=Count(field))
return {count[field]: count['count'] for count in counts}
@classmethod
def get_most_active(cls, org):
from tracpro.polls.models import Window
count_by_id = cls.get_response_counts(org, window=Window.last_30_days, include_empty=False)
groups = []
for group in cls.get_all(org):
count = count_by_id.get(group.pk, 0)
if count:
group.response_count = count
groups.append(group)
return sorted(groups, key=lambda g: g.response_count, reverse=True)
def get_contacts(self):
return self.contacts.filter(is_active=True)
def __unicode__(self):
return self.name
class Meta:
abstract = True
class Region(AbstractGroup):
"""
A geographical region modelled as a group
"""
users = models.ManyToManyField(User, verbose_name=_("Users"), related_name='regions',
help_text=_("Users who can access this region"))
def get_users(self):
return self.users.filter(is_active=True).select_related('profile')
class Group(AbstractGroup):
"""
A data reporting group
"""
pass |
print('i can only go to two countries now. QAQ')
conutries = ['A','B','C',]
conutries[2] = 'D'
conutries.insert(0,'E')
conutries.insert(2,'F')
conutries.append('G')
print(conutries)
print(conutries.pop(0) + ' Sorry, i am not going yet')
print(conutries.pop(1) + ' Sorry, i am not going yet')
print(conutries.pop(2) + ' Sorry, i am not going yet')
print(conutries.pop(0) + ' Sorry, i am not going yet')
print(conutries) |
# Title : Find sub array of specified sum
# Author : Kiran raj R.
# Date : 01:11:2020
def find_sub_array(list_in, sum_list):
list_start = 0
list_end = 0
current_sum = list_in[0]
length = len(list_in)
if length < 1:
print(f"List has length of zero")
while list_end < length:
if current_sum == sum_list:
break
elif current_sum < sum_list:
# print(list_end)
list_end += 1
current_sum += list_in[list_end]
else:
current_sum -= list_in[list_start]
list_start += 1
return print(
f"Sub array of sum {sum_list} is {list_in[list_start : list_end+1]}")
find_sub_array([1, 2, 3, 4, 5, 6, 7], 12)
find_sub_array([1, 2, 3, 4, 5, 6, 7], 13)
find_sub_array([1, 2, 3, 4, 5, 6, 7], 6)
find_sub_array([1, 2, 3, 4, 5, 6, 7], 5)
find_sub_array([1, 2, 3, 4, 5, 6, 7], 3)
|
#!/usr/bin/env/ python
n,k = map(int, input().split())
interview = list()
interview = [list( map( int , input().split() ) ) for i in range(n)]
interview.sort(key = lambda x: x[1])
local = int(k*1.5)
line = interview[n - local][1]
sum1 = 0
i = n - 1
while interview[i][1] >= line:
sum1 += 1
i -= 1
print(line,sum1)
interview = interview[n-sum1:]
interview.sort(key = lambda x: x[0])
for i in range( n-1, n-sum1, -1):
print(interview[i][0],interview[i][1])
|
"""The game's constants"""
#Paramètre de la fenêtre
NUMBER_OF_SPRITE = 15
SPRITE_SIZE = 30
SIDE = NUMBER_OF_SPRITE * SPRITE_SIZE
WINDOW_TITLE = "Get out MacGyver!!"
#Ensemble des images
IMAGE_MAC = "images/mg.png"
IMAGE_GUARD = "images/guard.png"
IMAGE_FLOOR = "images/floor.png"
IMAGE_WALL = "images/wall.png"
IMAGE_NEEDLE = "images/needle.png"
IMAGE_ETHER = "images/ether.png"
IMAGE_TUBE = "images/tube.png"
GOT_NEEDLE = "images/needlepicked.png"
GOT_ETHER = "images/etherpicked.png"
GOT_TUBE = "images/tubepicked.png"
GOT_ALL = "images/allpicked.png"
LOSE = "images/loser.png"
|
MAY_12_MATCHES = """
1899 Hoffenheim - Borussia Dortmund
Hertha BSC - RB Leipzig
VfL Wolfsburg - 1. FC Köln
1. FSV Mainz 05 - Werder Bremen
Hamburger SV - Bor. Mönchengladbach
FC Schalke 04 - Eintracht Frankfurt
SC Freiburg - FC Augsburg
Bayer Leverkusen - Hannover 96
Bayern München - VfB Stuttgart
SpVgg Unterhaching - VfL Osnabrück
Karlsruher SC - FC Carl Zeiss Jena
SV Wehen Wiesbaden - VfR Aalen
Fortuna Köln - SC Paderborn 07
Hallescher FC - FSV Zwickau
Chemnitzer FC - Hansa Rostock
Sportfreunde Lotte - 1. FC Magdeburg
SG Sonnenhof Großaspach - Preußen Münster
Rot-Weiß Erfurt - Würzburger Kickers
Werder Bremen II - SV Meppen
Lüneburger SK Hansa - VfB Lübeck
Eutin 08 - Weiche Flensburg 08
Hamburger SV II - Eintracht Braunschweig II
Hannover 96 II - Schwarz-Weiß Rehden
SSV Jeddeloh - VfV Hildesheim
SV Drochtersen/Assel - Germania Egestorf/Langreder
VfL Wolfsburg II - Eintracht Norderstedt
FC St. Pauli II - Altona 93
TSV Havelse - VfB Oldenburg
1. FC Lok Leipzig - Hertha BSC II
SV Babelsberg 03 - Berliner AK 07
Union Fürstenwalde - BFC Dynamo
FSV 63 Luckenwalde - Germania Halberstadt
Wacker Nordhausen - ZFC Meuselwitz
FC Viktoria 1889 Berlin - VfB Auerbach
VSG Altglienicke - FSV Budissa Bautzen
FC Oberlausitz - Energie Cottbus
BSG Chemie Leipzig - TSG Neustrelitz
SV 07 Elversberg - TuS Koblenz
SC Freiburg II - Wormatia Worms
1899 Hoffenheim II - VfB Stuttgart II
Hessen Kassel - Eintracht Stadtallendorf
SV Röchling Völklingen - FC Astoria Walldorf
TSV Schott Mainz - Kickers Offenbach
Stuttgarter Kickers - FSV Frankfurt
TSV Steinbach - 1. FC Saarbrücken
SSV Ulm 1846 - Waldhof Mannheim
1. FC Schweinfurt 05 - FC Unterföhring
VfB Eichstätt - SpVgg Greuther Fürth II
FC Pipinsried - VfR Garching
FC Augsburg II - FC Ingolstadt 04 II
Bayern München II - SV Seligenporten
SpVgg Oberfranken Bayreuth - TSV 1860 München
FC Memmingen - SV Schalding-Heining
Wacker Burghausen - TSV 1860 Rosenheim
1. FC Nürnberg II - TSV Buchbach
DJK Gebenbach - FC Amberg
SpVgg Jahn Forchheim - SpVgg Ansbach
Viktoria Aschaffenburg - DJK Ammerthal
TSV Aubstadt - Würzburger FV
DJK Bamberg - SC Eltersdorf
SV Erlenbach - SpVgg Bayern Hof
SpVgg Weiden - Würzburger Kickers II
1. FC Schweinfurt 05 II - 1. FC Sand
TSV 1860 München II - SpVgg Hankofen-Hailing
TSV Schwabmünchen - ASV Neumarkt
1. FC Sonthofen - TSV 1865 Dachau
TSV Kottern-St. Mang - FC Ismaning
TSV Schwaben Augsburg - TSV Landsberg
TSV Kornburg - SV Heimstetten
SV Pullach - SV Kirchanschöring
BCF Wolfratshausen - DJK Vilzing
SB Chiemgau Traunstein - TuS Holzkirchen
Brinkumer SV - Bremer SV
BTS Neustadt - LTS Bremerhaven
ESC Geestemünde - TSV Grolland
Habenhauser FV - FC Oberneuland
BSC Hastedt - SG Aumund-Vegesack
OSC Bremerhaven - TuS Schwachhausen
VfL 07 Bremen - Vatan Spor Bremen
Werder Bremen III - Blumenthaler SV
Bayern Alzenau - Hessen Dreieich
Borussia Fulda - FSC Lohfelden
SV Rot-Weiss Hadamar - SV Steinbach
Rot-Weiß Frankfurt - SpVgg Neu-Isenburg
OSC Vellmar - FC Ederbergland
Viktoria Griesheim - SV Buchonia Flieden
Teutonia Watzenb.-Steinb. - TSV Lehnerz
Göppinger SV - FV Ravensburg
1. CfR Pforzheim - TSG Backnang
TSG Balingen - FSV 08 Bissingen
Neckarsulmer Sport-Union - SV Sandhausen II
SV Oberachern - SV Spielberg
FV 07 Diefflen - FV Eppelborn
FV Dudenhofen - FC Karbach
SC 07 Idar-Oberstein - TSG Pfeddersheim
1. FC Kaiserslautern II - TuS Mechtersheim
SV Morlautern - Eintracht Trier
FK Pirmasens - FC 08 Homburg
FSV Salmrohr - Saar 05 Saarbrücken
Weiche Flensburg 08 II - TSV Lägerdorf
PSV Union Neumünster - Heider SV
NTSV Strand 08 - Inter Türkspor Kiel
FC Dornbreite Lübeck - SV Frisia 03
TSV Schilksee - TSV Wankendorf
BV Cloppenburg - Bor. Mönchengladbach
SV Henstedt-Ulzburg - Turbine Potsdam II
VfL Wolfsburg II - Arminia Bielefeld
SV Meppen - TV Jahn Delmenhorst
FF USV Jena II - BW Hohen Neuendorf
FSV Gütersloh 2009 - Herforder SV
SG 99 Andernach - 1. FC Köln II
1899 Hoffenheim II - 1. FFC 08 Niederkirchen
Bayer Leverkusen - FSV Hessen Wetzlar
1. FC Saarbrücken - VfL Sindelfingen
1. FFC Frankfurt II - SC Freiburg II
Bayern München II - TSV Schott Mainz
Borussia Bocholt - Bor. Mönchengladbach II
GSV Moers - Germania Hauenhorst
Bayer Leverkusen II - Warendorfer SU
SV Menden - Alemannia Aachen
VfL Bochum - SGS Essen II
SpVg Berghofen - Düsseldorfer CfR links
Sportfreunde 1930 Uevekoven - Fortuna Köln
SV 67 Weinberg - TSV Crailsheim
Eintracht Frankfurt - ETSV Würzburg
FV Löchgau - FC Ingolstadt 04
SpVgg Greuther Fürth - 1. FC Nürnberg
Wacker München - TSV Schwaben Augsburg
SV Frauenbiburg - SC Sand II
SC Regensburg - SV Alberweiler
"""
|
#!/usr/bin/env python
# coding: utf-8
# ### Hello , this is my first kernel.
# ### I will be exploring the housing sale prices in King County, USA between the time period May 2014 - May 2015.
# #### Firstly, I will go through a thorough data exploration to identify most important features and to explore the intercorrelation between features. After that I apply data normalization between varialbes and conduct feature engineering, Finally, I will be applying different machine learning algorithms and evaluating their respective success to a cross-validated splitted train-test set.
# In[ ]:
#Importing fundamental data exploration libaries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from scipy.stats import skew
from scipy.stats.stats import pearsonr
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'png' #set 'png' here when working on notebook")
get_ipython().run_line_magic('matplotlib', 'inline')
df_train = pd.read_csv("../input/kc_house_data.csv")
df_train.rename(columns ={'price': 'SalePrice'}, inplace =True)
df_train.head()
# In[ ]:
#checking the columns in the dataset
df_train.columns
# #### At a first glance, the column date can be removed as it serves unsignificant value (timeframe is one year). The features seem to be pre-selected as important influencers for a sale price of a house.
# In[ ]:
df_train['SalePrice'].describe()
df_train['SalePrice']=df_train['SalePrice']
# In[ ]:
#histogram
sns.distplot(df_train['SalePrice'], bins=50, kde=False);
# Given from the histogram: The saleprice has appreciable positive skewness,
# deviates from the normal distribution and
# shows peakedness.
# Let's take a look at the skewness and kurtosis in numbers:
# In[ ]:
print("Skewness: %f" % df_train['SalePrice'].skew())
print("Kurtosis: %f" % df_train['SalePrice'].kurt())
# This is quite significant. At the data standardisation section, we will fix this.
# # 2. Feature exploration
#
# ##### In this section I will be investigating different features by plotting them to determine the relationship to SalePrice.
# In[ ]:
var = 'sqft_living15'
data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)
data.plot.scatter(x=var, y='SalePrice', ylim=(3,8000000));
# There's clearly a linear relationship with a significant portion of outliers.
# In[ ]:
var = 'bedrooms'
data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)
f, ax = plt.subplots(figsize=(14, 6))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=3500000);
# In[ ]:
var = 'grade'
data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=3500000);
# In[ ]:
var = 'bathrooms'
data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)
f, ax = plt.subplots(figsize=(20, 20))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=3500000);
# #### Clearly, as the features "bathrooms", "grade", "bedrooms" increase, so does the SalePrice. This is most evident in case of the features bathrooms and grade.
# In[ ]:
var = 'yr_built'
data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)
f, ax = plt.subplots(figsize=(16, 8))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=800000);
plt.xticks(rotation=90);
# #### Interestingly enough, one would expect a linear relationship with newer houses being significantly more expensive. However, this is not the case, as seen by the graph. Next let's explore intercolleration between features.
# In[ ]:
#correlation matrix
corrmat = df_train.corr()
f, ax = plt.subplots(figsize=(12, 9))
sns.heatmap(corrmat, vmax=.8, square=True);
# #### There are some interesting correlation between variables - let's take a closer look:
# In[ ]:
#saleprice correlation matrix
k = 10 #number of variables for heatmap
cols = corrmat.nlargest(k, 'SalePrice')['SalePrice'].index
cm = np.corrcoef(df_train[cols].values.T)
sns.set(font_scale=1.25)
hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values)
plt.show()
# #### Okay, so the features: square foot living area, grade(amount of floors), square feet abouve the ground level and sqft_15 features displayed the highest correlation wih the price of the house.
# #### Moreover, there is a high correlation of sqft_living with e.g. number of bathrooms and grade. This is common sense, as the square feet increase, so does the number of floors and bathrooms. There is potential to implement clever feature engineering here.
# In[ ]:
#scatterplot
sns.set()
cols = ['SalePrice', 'sqft_living', 'grade', 'sqft_above', 'view', 'bathrooms']
sns.pairplot(df_train[cols], size = 2.5)
plt.show();
# #### These overall scatterplots confirm the findings of the heatmap. There is myriad of linear correlation between sqft_living, sqft_above, bathrooms and grade. This yields an opportunity for to combine features. Moreover, what we learned that the above mentioned features have the biggest impact on sale price. One would also expect location to play a role, but as they are in latitude/longitude coordinates, it requires advanced data manipulation to take it into account. Finally, due to many linear relationships we can apply regression models.
# Let's check for missing data before we proceed any further:
# In[ ]:
#missing data
total = df_train.isnull().sum().sort_values(ascending=False)
percent = (df_train.isnull().sum()/df_train.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing_data.head(20)
# #### This dataset is clean of missing data. What a miracle.
# # Standardization of data
# In[ ]:
#standardizing data to mitigate skewdness and kurtosis
from sklearn.preprocessing import StandardScaler
saleprice_scaled = StandardScaler().fit_transform(df_train['SalePrice'][:,np.newaxis]);
low_range = saleprice_scaled[saleprice_scaled[:,0].argsort()][:10]
high_range= saleprice_scaled[saleprice_scaled[:,0].argsort()][-10:]
print('outer range (low) of the distribution:')
print(low_range)
print('\nouter range (high) of the distribution:')
print(high_range)
#
#
# ### Let's normalise the Saleprice and sqft_living feature
# In[ ]:
from scipy.stats import norm
from scipy import stats
#histogram and normal probability plot
sns.distplot(df_train['SalePrice'], fit=norm, bins=50, kde=False);
fig = plt.figure()
res = stats.probplot(df_train['SalePrice'], plot=plt)
# #### We'll fix it by taking the log and fitting the plot to a normal distribution curve
# In[ ]:
#applying log transformation
df_train['SalePrice'] = np.log(df_train['SalePrice'])
# In[ ]:
#transformed histogram and normal probability plot
sns.distplot(df_train['SalePrice'], fit=norm, bins=50, kde=False);
fig = plt.figure()
res = stats.probplot(df_train['SalePrice'], plot=plt)
# Done! Now for sqft_living
# In[ ]:
#histogram and normal probability plot
sns.distplot(df_train['sqft_living'], fit=norm, bins=50, kde=False);
fig = plt.figure()
res = stats.probplot(df_train['sqft_living'], plot=plt)
# In[ ]:
#data transformation
df_train['sqft_living'] = np.log(df_train['sqft_living'])
# In[ ]:
#transformed histogram and normal probability plot
sns.distplot(df_train['sqft_living'], fit=norm, bins=50, kde=False);
fig = plt.figure()
res = stats.probplot(df_train['sqft_living'], plot=plt)
# In[ ]:
#scatter plot
plt.scatter(df_train['sqft_living'], df_train['SalePrice']);
# #### Now there is a nice linear relationship between the features
# ## 4. Fitting Machine Learning Models
# In[ ]:
df_train.head()
# In[ ]:
Y = df_train.SalePrice.values
# In[ ]:
feature_cols = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors',
'view', 'condition', 'grade', 'sqft_above',
'sqft_basement', 'yr_built', 'yr_renovated', 'zipcode', 'lat', 'long',
'sqft_living15', 'sqft_lot15']
X=df_train[feature_cols]
# In[ ]:
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(X, Y, random_state=3)
# In[ ]:
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(x_train, y_train)
# In[ ]:
accuracy = regressor.score(x_test, y_test)
"Accuracy: {}%".format(int(round(accuracy * 100)))
# #### So 77% accuracy with simple linear regression. Let's try more advanced algorithms.
# ## Elastic Net
# In[ ]:
from sklearn import ensemble, tree, linear_model
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.utils import shuffle
# In[ ]:
# For accurate scoring
def get_score(prediction, lables):
print('R2: {}'.format(r2_score(prediction, lables)))
print('RMSE: {}'.format(np.sqrt(mean_squared_error(prediction, lables))))
# In[ ]:
def train_test(estimator, x_trn, x_tst, y_trn, y_tst):
prediction_train = estimator.predict(x_trn)
# Printing estimator
print(estimator)
# Printing train scores
get_score(prediction_train, y_trn)
prediction_test = estimator.predict(x_tst)
# Printing test scores
print("Test")
get_score(prediction_test, y_tst)
# In[ ]:
from sklearn import ensemble, tree, linear_model
ENSTest = linear_model.ElasticNetCV(alphas=[0.0001, 0.0005, 0.001, 0.01, 0.1, 1, 10], l1_ratio=[.01, .1, .5, .9, .99], max_iter=5000).fit(x_train, y_train)
# In[ ]:
train_test(ENSTest, x_train, x_test, y_train, y_test)
# In[ ]:
# Average R2 score and standart deviation of 5-fold cross-validation
scores = cross_val_score(ENSTest, x_test, y_test, cv=5)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# #### Interesting that the accuracy is the same. RSME, which is a better measure of performance, is 0.25
# ## Gradient Boosting
# #### As previously seen, we have many outliers. So I'm using max_features='sqrt' to reduce overfitting of my model. I also use loss='huber' because it more tolerant to outliers. All other hyper-parameters was chosen using GridSearchCV.
# In[ ]:
GBest = ensemble.GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05, max_depth=3, max_features='sqrt',
min_samples_leaf=15, min_samples_split=10, loss='huber').fit(x_train, y_train)
train_test(GBest, x_train, x_test, y_train, y_test)
# In[ ]:
# Average R2 score and standart deviation of 5-fold cross-validation
scores = cross_val_score(GBest, x_test, y_test, cv=5)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# Gradient boosting seems to work well for this data set
# In[ ]:
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression, RidgeCV, LassoCV, ElasticNetCV
from sklearn.metrics import mean_squared_error, make_scorer
from scipy.stats import skew
from IPython.display import display
import matplotlib.pyplot as plt
# ## Linear Regression and Lasso
# In[ ]:
# Defining two functions for error measuring: RMSE
scorer = make_scorer(mean_squared_error, greater_is_better = False)
def rmse_cv_train(model):
rmse= np.sqrt(-cross_val_score(model, X_train, y_train, scoring = scorer, cv = 10))
return(rmse)
def rmse_cv_test(model):
rmse= np.sqrt(-cross_val_score(model, X_test, y_test, scoring = scorer, cv = 10))
return(rmse)
X_train= x_train
X_test= x_test
# In[ ]:
# Linear Regression
lr = LinearRegression()
lr.fit(x_train, y_train)
# Look at predictions on training and validation set
print("RMSE on Training set :", rmse_cv_train(lr).mean())
print("RMSE on Test set :", rmse_cv_test(lr).mean())
y_train_pred = lr.predict(x_train)
y_test_pred = lr.predict(x_test)
# Plot residuals
plt.scatter(y_train_pred, y_train_pred - y_train, c = "blue", marker = "s", label = "Training data")
plt.scatter(y_test_pred, y_test_pred - y_test, c = "lightgreen", marker = "s", label = "Validation data")
plt.title("Linear regression")
plt.xlabel("Predicted values")
plt.ylabel("Residuals")
plt.legend(loc = "upper left")
plt.hlines(y = 0, xmin = 11.5, xmax = 15.5, color = "red")
plt.show()
# Plot predictions
plt.scatter(y_train_pred, y_train, c = "blue", marker = "s", label = "Training data")
plt.scatter(y_test_pred, y_test, c = "lightgreen", marker = "s", label = "Validation data")
plt.title("Linear regression")
plt.xlabel("Predicted values")
plt.ylabel("Real values")
plt.legend(loc = "upper left")
plt.plot([11.5, 15.5], [11.5, 15.5], c = "red")
plt.show()
# #### Fundamentally same result as ElasticNet and simple linear regression
# In[ ]:
# 2* Ridge
ridge = RidgeCV(alphas = [0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1, 3, 6, 10, 30, 60])
ridge.fit(X_train, y_train)
alpha = ridge.alpha_
print("Best alpha :", alpha)
print("Try again for more precision with alphas centered around " + str(alpha))
ridge = RidgeCV(alphas = [alpha * .6, alpha * .65, alpha * .7, alpha * .75, alpha * .8, alpha * .85,
alpha * .9, alpha * .95, alpha, alpha * 1.05, alpha * 1.1, alpha * 1.15,
alpha * 1.25, alpha * 1.3, alpha * 1.35, alpha * 1.4],
cv = 10)
ridge.fit(X_train, y_train)
alpha = ridge.alpha_
print("Best alpha :", alpha)
print("Ridge RMSE on Training set :", rmse_cv_train(ridge).mean())
print("Ridge RMSE on Test set :", rmse_cv_test(ridge).mean())
y_train_rdg = ridge.predict(X_train)
y_test_rdg = ridge.predict(X_test)
# Plot residuals
plt.scatter(y_train_rdg, y_train_rdg - y_train, c = "blue", marker = "s", label = "Training data")
plt.scatter(y_test_rdg, y_test_rdg - y_test, c = "lightgreen", marker = "s", label = "Validation data")
plt.title("Linear regression with Ridge regularization")
plt.xlabel("Predicted values")
plt.ylabel("Residuals")
plt.legend(loc = "upper left")
plt.hlines(y = 0, xmin = 11.5, xmax = 15.5, color = "red")
plt.show()
# Plot predictions
plt.scatter(y_train_rdg, y_train, c = "blue", marker = "s", label = "Training data")
plt.scatter(y_test_rdg, y_test, c = "lightgreen", marker = "s", label = "Validation data")
plt.title("Linear regression with Ridge regularization")
plt.xlabel("Predicted values")
plt.ylabel("Real values")
plt.legend(loc = "upper left")
plt.plot([11.5, 15.5], [11.5, 15.5], c = "red")
plt.show()
# Plot important coefficients
coefs = pd.Series(ridge.coef_, index = X_train.columns)
print("Ridge picked " + str(sum(coefs != 0)) + " features and eliminated the other " + str(sum(coefs == 0)) + " features")
imp_coefs = pd.concat([coefs.sort_values().head(10),
coefs.sort_values().tail(10)])
imp_coefs.plot(kind = "barh")
plt.title("Coefficients in the Ridge Model")
plt.show()
# In[ ]:
# 3* Lasso
lasso = LassoCV(alphas = [0.0001, 0.0003, 0.0006, 0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1,
0.3, 0.6, 1],
max_iter = 50000, cv = 10)
lasso.fit(X_train, y_train)
alpha = lasso.alpha_
print("Best alpha :", alpha)
print("Try again for more precision with alphas centered around " + str(alpha))
lasso = LassoCV(alphas = [alpha * .6, alpha * .65, alpha * .7, alpha * .75, alpha * .8,
alpha * .85, alpha * .9, alpha * .95, alpha, alpha * 1.05,
alpha * 1.1, alpha * 1.15, alpha * 1.25, alpha * 1.3, alpha * 1.35,
alpha * 1.4],
max_iter = 50000, cv = 10)
lasso.fit(X_train, y_train)
alpha = lasso.alpha_
print("Best alpha :", alpha)
print("Lasso RMSE on Training set :", rmse_cv_train(lasso).mean())
print("Lasso RMSE on Test set :", rmse_cv_test(lasso).mean())
y_train_las = lasso.predict(X_train)
y_test_las = lasso.predict(X_test)
# Plot residuals
plt.scatter(y_train_las, y_train_las - y_train, c = "blue", marker = "s", label = "Training data")
plt.scatter(y_test_las, y_test_las - y_test, c = "lightgreen", marker = "s", label = "Validation data")
plt.title("Linear regression with Lasso regularization")
plt.xlabel("Predicted values")
plt.ylabel("Residuals")
plt.legend(loc = "upper left")
plt.hlines(y = 0, xmin = 11.5, xmax = 15.5, color = "red")
plt.show()
# Plot predictions
plt.scatter(y_train_las, y_train, c = "blue", marker = "s", label = "Training data")
plt.scatter(y_test_las, y_test, c = "lightgreen", marker = "s", label = "Validation data")
plt.title("Linear regression with Lasso regularization")
plt.xlabel("Predicted values")
plt.ylabel("Real values")
plt.legend(loc = "upper left")
plt.plot([11.5, 15.5], [11.5, 15.5], c = "red")
plt.show()
# Plot important coefficients
coefs = pd.Series(lasso.coef_, index = X_train.columns)
print("Lasso picked " + str(sum(coefs != 0)) + " features and eliminated the other " + str(sum(coefs == 0)) + " features")
imp_coefs = pd.concat([coefs.sort_values().head(10),
coefs.sort_values().tail(10)])
imp_coefs.plot(kind = "barh")
plt.title("Coefficients in the Lasso Model")
plt.show()
# In[ ]:
# 4* ElasticNet
elasticNet = ElasticNetCV(l1_ratio = [0.1, 0.3, 0.5, 0.6, 0.7, 0.8, 0.85, 0.9, 0.95, 1],
alphas = [0.0001, 0.0003, 0.0006, 0.001, 0.003, 0.006,
0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1, 3, 6],
max_iter = 50000, cv = 10)
elasticNet.fit(X_train, y_train)
alpha = elasticNet.alpha_
ratio = elasticNet.l1_ratio_
print("Best l1_ratio :", ratio)
print("Best alpha :", alpha )
print("Try again for more precision with l1_ratio centered around " + str(ratio))
elasticNet = ElasticNetCV(l1_ratio = [ratio * .85, ratio * .9, ratio * .95, ratio, ratio * 1.05, ratio * 1.1, ratio * 1.15],
alphas = [0.0001, 0.0003, 0.0006, 0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1, 3, 6],
max_iter = 50000, cv = 10)
elasticNet.fit(X_train, y_train)
if (elasticNet.l1_ratio_ > 1):
elasticNet.l1_ratio_ = 1
alpha = elasticNet.alpha_
ratio = elasticNet.l1_ratio_
print("Best l1_ratio :", ratio)
print("Best alpha :", alpha )
print("Now try again for more precision on alpha, with l1_ratio fixed at " + str(ratio) +
" and alpha centered around " + str(alpha))
elasticNet = ElasticNetCV(l1_ratio = ratio,
alphas = [alpha * .6, alpha * .65, alpha * .7, alpha * .75, alpha * .8, alpha * .85, alpha * .9,
alpha * .95, alpha, alpha * 1.05, alpha * 1.1, alpha * 1.15, alpha * 1.25, alpha * 1.3,
alpha * 1.35, alpha * 1.4],
max_iter = 50000, cv = 10)
elasticNet.fit(X_train, y_train)
if (elasticNet.l1_ratio_ > 1):
elasticNet.l1_ratio_ = 1
alpha = elasticNet.alpha_
ratio = elasticNet.l1_ratio_
print("Best l1_ratio :", ratio)
print("Best alpha :", alpha )
print("ElasticNet RMSE on Training set :", rmse_cv_train(elasticNet).mean())
print("ElasticNet RMSE on Test set :", rmse_cv_test(elasticNet).mean())
y_train_ela = elasticNet.predict(X_train)
y_test_ela = elasticNet.predict(X_test)
# Plot residuals
plt.scatter(y_train_ela, y_train_ela - y_train, c = "blue", marker = "s", label = "Training data")
plt.scatter(y_test_ela, y_test_ela - y_test, c = "lightgreen", marker = "s", label = "Validation data")
plt.title("Linear regression with ElasticNet regularization")
plt.xlabel("Predicted values")
plt.ylabel("Residuals")
plt.legend(loc = "upper left")
plt.hlines(y = 0, xmin = 11.5, xmax = 15.5, color = "red")
plt.show()
# Plot predictions
plt.scatter(y_train, y_train_ela, c = "blue", marker = "s", label = "Training data")
plt.scatter(y_test, y_test_ela, c = "lightgreen", marker = "s", label = "Validation data")
plt.title("Linear regression with ElasticNet regularization")
plt.xlabel("Predicted values")
plt.ylabel("Real values")
plt.legend(loc = "upper left")
plt.plot([11.5, 15.5], [11.5, 15.5], c = "red")
plt.show()
# Plot important coefficients
coefs = pd.Series(elasticNet.coef_, index = X_train.columns)
print("ElasticNet picked " + str(sum(coefs != 0)) + " features and eliminated the other " + str(sum(coefs == 0)) + " features")
imp_coefs = pd.concat([coefs.sort_values().head(10),
coefs.sort_values().tail(10)])
imp_coefs.plot(kind = "barh")
plt.title("Coefficients in the ElasticNet Model")
plt.show()
# ### The previous linear regressions with different regulations yielded almost identical results. Conclusively, Gradient boosting seemed to work best for this dataset at accurately predicting the sale prices. With R2: 0.89, RMSE 0.164 and accuracy of 0.87
#
#
# #### Any feedback or comments are wholeheartedly welcome, as this is my first kernel
# In[ ]:
|
import csv
if __name__=="__main__":
fday=open("info_day.csv","r")
fnight=open("info_night.csv","r")
combined=open("info_combined.csv","w", newline='')
rday=list(csv.reader(fday))
rnight=list(csv.reader(fnight))
writer=csv.writer(combined)
writer.writerow(['Day',"Temperature(Day)","Temperature(Night)","Humidity(Night)","Humidity(Night)","Light(Day)","Light(Night)","CO2(Day)","CO2(Night)"])
for row in range(1,len(rday)):
day=rday[row][1:]
night=rnight[row][1:]
# print(day)
# print(night)
buff=['']*(2*len(day)+1)
buff[1::2]=day
buff[2::2]=night
buff[0]=rday[row][0]
print(buff)
writer.writerow(buff)
|
import random
# This function simulates a single roll of a die.
def rollOneDie():
m = 0
for i in range(1, 100001):
if random.randint(1,6) == 4:
m += 1
print("Total four is ", m, " out of", i)
print("Probability of four is ", m/i)
print("error", ((m/i)-1/6)/(1/6))
print("\n")
# Roll two dice
def rollTwoDice():
m = 0
totalThrowedDice = 0
for i in range(1, 100001):
totalThrowedDice += 2
if random.randint(1,6) == 4 or random.randint(1,6) == 4:
m += 1
print("All posibilitys ", )
print("Total four is ", m, " out of", i)
print("Probability of four is ", m/i)
print("error", ((m/i)-1/36)/(1/36))
print("Total throwed dice: ", totalThrowedDice)
print("\n")
# Calculate the probability of a die
def calculateProbability(n):
m = 0
for i in range(1, 100001):
if random.randint(1,6) == n:
m += 1
print("Total ", n, " is ", m, " out of", i)
print("Probability of ", n, " is ", m/i)
print("error", ((m/i)-1/6)/(1/6))
print("\n")
return m/i
# Calculate the probability of two dice
def calculateTwoDiceProbability():
m = 0
totalThrowedDice = 0
for i in range(1, 100001):
totalThrowedDice += 2
if random.randint(1,6) == 4 or random.randint(1,6) == 4:
m += 1
print("Total four is ", m, " out of", i)
print("Probability of four is ", m/i)
print("error", ((m/i)-1/36)/(1/36))
print("Total throwed dice: ", totalThrowedDice)
print("\n")
return m/i
# Calculate all posibilities of a die
def calculateAllProbability():
posibilities = 0
for i in range(1, 7):
posibilities += calculateProbability(i)
print("All posibilities: ", posibilities)
|
b=4
a=10
print(id(a))
def pola():
a=15
c=4
print("in function",a,b)
def lola():
global b
print("in 2nd function",b)
lola()
pola()
print("outside function",a)
def loka():
x=globals()['a']
print(id(x))
print(x)
globals()['a']=1
print(a)
loka()
print(a) |
import numpy as np
def actf(x):
return 1/(1+np.exp(-x))
def actf_deriv(x):
return x*(1-x)
X = np.array([[0,0,1],[0,1,1],[1,0,1],[1,1,1]])
print(X.shape)
y = np.array([[0],[1],[1],[0]])
#y = np.array([[0],[0],[0],[1]])
np.random.seed(5)
inputs =3 # 입력층 노드는 바이어스를 위해 1개 추가
hiddens =6 # 은닉층도 바이어스를 위해 1개 추가
outputs =1
# 가중치 초기화 (-1.0 ~ 1.0 사이의 난수)
weight0 =2*np.random.random((inputs,hiddens))-1 # 입력층 -> 은닉층의 가중치 넘파이 배열
weight1 =2*np.random.random((hiddens,outputs))-1 # 은닉층 -> 출력층의 가중치 넘파이 배열
for i in range(10000):
# 순방향 계산
layer0 =X # 입력 대입
net1 =np.dot(layer0,weight0) # 행렬 곱 계산
layer1 =actf(net1) # 활성화 함수 적용
layer1[:,-1]=1.0 # 바이어스 1.0으로 세팅
net2 = np.dot(layer1,weight1) # 행렬 곱 계산
layer2 = actf(net2) # 활성화 함수 적용
# 역방향 전파
layer2_error = layer2-y # 오차 계산
layer2_delta = layer2_error*actf_deriv(layer2) # 델타 값 계산
layer1_error = np.dot(layer2_delta,weight1.T) # 가중치의 전치행렬을 구한 후 은닉층에서 델타 계산
layer1_delta = layer1_error * actf_deriv(layer1)
weight1 += -0.2*np.dot(layer1.T,layer2_delta)
weight0 += -0.2*np.dot(layer0.T,layer1_delta)
print("-----",i+1,"번째 반복-----")
print("I1의 가중치 : ", weight0[0])
print("I2의 가중치 : ", weight0[1])
print("I3의 가중치 : ", weight0[2],"(바이어스와 같음)")
print("H1의가중치 : ",weight1[0])
print("H2의가중치 : ",weight1[1])
print("H3의가중치 : ",weight1[2])
print("H4의가중치 : ",weight1[3])
print("H5의가중치 : ",weight1[4])
print("H6의가중치 : ",weight1[5])
print("\n\n")
print(layer2)
|
def calc():
result = 0
while True:
expression = (yield result)
split = expression.split()
lvalue = int(split[0])
rvalue = int(split[2])
if split[1] == '+':
result = lvalue + rvalue
elif split[1] == '-':
result = lvalue - rvalue
elif split[1] == '*':
result = lvalue * rvalue
elif split[1] == '/':
result = lvalue / rvalue
expressions = input().split(', ')
c = calc()
next(c)
for e in expressions:
print(c.send(e))
c.close() |
"""Compare two HDF5 files.
If the function does not output anything all datasets are present in both files,
and all the content of the datasets is equal.
Each output line corresponds to a mismatch between the files.
:usage:
G5compare [options] <source> <other>
:arguments:
<source>
HDF5-file.
<other>
HDF5-file.
:options:
-t, --dtype
Verify that the type of the datasets match.
-r, --renamed=ARG
Renamed paths: this option takes two arguments, one for ``source`` and one for ``other``.
It can repeated, e.g. ``G5compare a.h5 b.h5 -r /a /b -r /c /d``
-h, --help
Show help.
--version
Show version.
(c - MIT) T.W.J. de Geus | tom@geus.me | www.geus.me | github.com/tdegeus/GooseHDF5
"""
import argparse
import os
import warnings
import h5py
from .. import equal
from .. import getpaths
from .. import version
warnings.filterwarnings("ignore")
def check_isfile(fname):
if not os.path.isfile(fname):
raise OSError(f'"{fname}" does not exist')
def check_dataset(source, dest, source_dataset, dest_dataset, check_dtype):
r"""
Check if the datasets (read outside) "a" and "b" are equal.
If not print a message with the "path" to the screen and return "False".
"""
if not equal(source, dest, source_dataset, dest_dataset):
print(f" != {source_dataset}")
return False
if check_dtype:
if source[source_dataset].dtype != dest[dest_dataset].dtype:
print(f"type {source_dataset}")
return False
return True
def _check_plain(source, other, check_dtype):
r"""
Support function for "check_plain."
"""
for path in getpaths(source):
if path not in other:
print(f"-> {path}")
for path in getpaths(other):
if path not in source:
print(f"<- {path}")
for path in getpaths(source):
if path in other:
check_dataset(source, other, path, path, check_dtype)
def check_plain(source_name, other_name, check_dtype):
r"""
Check all datasets (without allowing for renamed datasets).
"""
with h5py.File(source_name, "r") as source:
with h5py.File(other_name, "r") as other:
_check_plain(source, other, check_dtype)
def _check_renamed(source, other, renamed, check_dtype):
r"""
Support function for "check_renamed."
"""
s2o = {i: i for i in list(getpaths(source))}
o2s = {i: i for i in list(getpaths(other))}
for s, o in renamed:
s2o[s] = o
o2s[o] = s
for _, path in s2o.items():
if path not in o2s:
print(f" -> {path}")
for _, path in o2s.items():
if path not in s2o:
print(f" <- {path}")
for new_path, path in s2o.items():
if new_path in o2s:
check_dataset(source, other, path, new_path, check_dtype)
def check_renamed(source_name, other_name, renamed, check_dtype):
r"""
Check all datasets while allowing for renamed datasets.
renamed = [['source_name1', 'other_name1'], ['source_name2', 'other_name2'], ...]
"""
with h5py.File(source_name, "r") as source:
with h5py.File(other_name, "r") as other:
_check_renamed(source, other, renamed, check_dtype)
def main():
try:
class Parser(argparse.ArgumentParser):
def print_help(self):
print(__doc__)
parser = Parser()
parser.add_argument("-t", "--dtype", required=False, action="store_true")
parser.add_argument("-r", "--renamed", required=False, nargs=2, action="append")
parser.add_argument("-v", "--version", action="version", version=version)
parser.add_argument("source")
parser.add_argument("other")
args = parser.parse_args()
check_isfile(args.source)
check_isfile(args.other)
if not args.renamed:
check_plain(args.source, args.other, args.dtype)
return 0
check_renamed(args.source, args.other, args.renamed, args.dtype)
except Exception as e:
print(e)
return 1
if __name__ == "__main__":
main()
|
__author__ = 'Perevalov'
import json
from pprint import pprint
def converting(fl):
# converting weird ASTRA output format to float
if fl is None or fl == "":
return
elif fl.isalnum() and not fl.isdigit():
return
elif len(fl) >= 2 and (fl[-2] == "-" or fl[-2] == "+") and len(fl) != 2:
fl = fl[0:-2]+"E"+fl[-2:]
return float(fl)
else:
return float(fl)
# Opening JSON file
with open('all_results.json') as json_data:
data = json.load(json_data)
def char_to_float_in_data():
# Fixing format in all database
for exp_time in data:
# print(exp_time)
# print()
# print()
for data_set in data[exp_time]:
if data_set.lower() == "scalar_data":
for scalar_data in data[exp_time][data_set]:
data[exp_time][data_set][scalar_data] = converting(data[exp_time][data_set][scalar_data][0])
# print(scalar_data, ' = ', data[exp_time][data_set][scalar_data])
# print()
# elif data_set.lower() == "type":
# print("Type = ", data[exp_time][data_set])
# print()
# elif data_set.lower() == "equation":
# print("Equ = ", data[exp_time][data_set])
# print()
# elif data_set.lower() == "experiment":
# print("Exp = ", data[exp_time][data_set])
# print()
elif data_set.lower() == "standard_data":
for stand_data in data[exp_time][data_set]:
data[exp_time][data_set][stand_data] = converting(data[exp_time][data_set][stand_data])
# print(stand_data, " = ", data[exp_time][data_set][stand_data])
# print()
elif data_set.lower() == "vector_data":
for vector in data[exp_time][data_set]:
try:
for i in range(len(data[exp_time][data_set][vector])):
data[exp_time][data_set][vector][i] = converting(data[exp_time][data_set][vector][i])
# print(vector, ' = ', data[exp_time][data_set][vector])
except ValueError:
data[exp_time][data_set][vector][i] = 0
def printing_all_data():
for exp_time in data:
print(exp_time)
# print()
# print()
for data_set in data[exp_time]:
if data_set.lower() == "scalar_data":
for scalar_data in data[exp_time][data_set]:
# data[exp_time][data_set][scalar_data] = converting(data[exp_time][data_set][scalar_data][0])
print(scalar_data, ' = ', data[exp_time][data_set][scalar_data])
print()
elif data_set.lower() == "type":
print("Type = ", data[exp_time][data_set])
print()
elif data_set.lower() == "equation":
print("Equ = ", data[exp_time][data_set])
print()
elif data_set.lower() == "experiment":
print("Exp = ", data[exp_time][data_set])
print()
elif data_set.lower() == "standard_data":
for stand_data in data[exp_time][data_set]:
# data[exp_time][data_set][stand_data] = converting(data[exp_time][data_set][stand_data])
print(stand_data, " = ", data[exp_time][data_set][stand_data])
print()
elif data_set.lower() == "vector_data":
for vector in data[exp_time][data_set]:
# for i in range(len(data[exp_time][data_set][vector])):
# data[exp_time][data_set][vector][i] = converting(data[exp_time][data_set][vector][i])
print(vector, ' = ', data[exp_time][data_set][vector])
print()
# ############################# IMPORTANT ################################
# Necessary to start this at the very beginning to switch weird strings in database to the float format
char_to_float_in_data()
#########################################################################
# EXAMPLES
# Printing q and zeff in Ohmic experiments where q(0) < 1 and ne(0) > 4
'''for experiment_time in data:
=======
for experiment_time in data:
>>>>>>> 7168faa824ec1a9b78b0f779f6e81be2f80d7000
if data[experiment_time]["type"] == "OH":
if data[experiment_time]["vector_data"]["q"][0] <= 1 and data[experiment_time]["vector_data"]["ne"][0] >= 4:
print(experiment_time, "q =", data[experiment_time]["vector_data"]["q"][0], sep=" ")
try:
print("Zeff =", data[experiment_time]["vector_data"]["zef"][0], sep=" ")
except KeyError:
print(experiment_time, ' does not have zeff vector')
<<<<<<< HEAD
'''
# printing all vector data names in first dataset
'''for experiment_time in data:
for vector in data[experiment_time]["vector_data"]:
print(vector)
break'''
'''for experiment_time in data:
if data[experiment_time]["type"] == "OH":
# print(data[experiment_time]["vector_data"])
if data[experiment_time]["standard_data"]["i"] >= 0.020:
print("Day =", data[experiment_time]["experiment"],
" Time =", 1000*data[experiment_time]["standard_data"]["time"],
" Current = ", data[experiment_time]["standard_data"]["i"]*1000
)
# print(data[experiment_time]["vector_data"]["te"])
'''
# Printing electron energy confinement time, scaling times and mass of main ions
'''
print('Exp Time Taue TNA TGO a')
for experiment_time in data:
if data[experiment_time]["type"] == "OH":
print(experiment_time[0:-6], ' ', experiment_time[-5:],
data[experiment_time]["scalar_data"]["taue"],
data[experiment_time]["scalar_data"]["tna"],
data[experiment_time]["scalar_data"]["tgo"],
data[experiment_time]["scalar_data"]["amj"],
sep=' ')
'''
# Ways to print all data that is in the database
# Written above
'''
printing_all_data()
'''
# Standard Python JSON printing
'''
pprint(data)
'''
# or specific exp
'''
pprint(data["101209_0.027"])
'''
# Exporting database back to json
'''
with open('data.json', 'w') as outfile:
json.dump(data, outfile)
'''
|
from time import sleep
from machine import UART
from utils.pinout import set_pinout
from gc import mem_free
from components.rfid import PN532_UART
print("--- RAM free ---> " + str(mem_free()))
pinout = set_pinout()
uart = UART(2, 115200)
#UART1:
#uart.init(baudrate=115200, tx=pinout.TXD1, rx=pinout.RXD1, timeout=100)
#UART2:
uart.init(baudrate=115200, tx=pinout.PWM1_PIN, rx=pinout.PWM2_PIN, timeout=100)
pn532 = PN532_UART(uart, debug=False)
ic, ver, rev, support = pn532.firmware_version
print("Found PN532 with firmware version: {0}.{1}".format(ver, rev))
# Configure PN532 to communicate with MiFare cards
pn532.SAM_configuration()
print("Waiting for RFID/NFC card...")
while True:
# Check if a card is available to read
uid = pn532.read_passive_target(timeout=0.5)
print(".", end="")
if uid is not None:
card_id = ""
for i in uid:
card_id += str(hex(i))[2:]
print("Found card with UID:", card_id)
#piezzo.beep()
pn532.power_down()
sleep(1.0)
|
# coding = utf-8
"""
Author:micheryu
Date:2020/3/23
Motto: 能用脑,就别动手~
"""
import os
import time
import pytest
from common import Shell
from config import Config
from log import logconf
from report import SendReport
def run(time_str):
conf = Config.Config()
log = logconf.logconf()
log.info('初始化配置文件,path= ' + conf.conf_path)
shell = Shell.Shell()
root_dir = os.getcwd()
# print(root_dir)
root_path = root_dir.replace(root_dir.split('\\')[-1], 'report') + r'\allure-report\{0}'.format(time_str)
# print(root_path)
log.info('创建按时间生成测试报告文件夹')
os.mkdir(root_path)
xml_dir = root_path + r'\xml'
html_dir = root_path + r'\html'
os.mkdir(xml_dir)
os.mkdir(html_dir)
# args = ['-s', '-q', '--alluredir', xmldir]
args = ['-s', 'test_search.py', '--alluredir', xml_dir]
pytest.main(args)
cmd = 'allure generate {0} -o {1} --clean'.format(xml_dir, html_dir)
# os.system('allure generate {0} -o {1} --clean'.format(xmldir, htmldir))
# print(rootdir)
try:
shell.invoke(cmd)
except Exception:
log.error('执行测试用例失败,请检查环境配置')
raise
try:
mail = SendReport.SendReportEmail(root_dir.replace(root_dir.split('\\')[-1], 'report') + r'\allure-report')
mail.Find_File()
except Exception as e:
log.error('发送邮件失败,请检查邮件配置')
raise
if __name__ == '__main__':
time = time.strftime('%Y-%m-%d_%H_%M_%S')
run(time)
|
from django.db import models
from django.conf import settings
from django.contrib.auth.models import AbstractUser
USER_TYPES = [('cogmaker', 'Cog Maker'), ('coguser', 'Cog User')]
class User(AbstractUser):
usertype = models.CharField(choices=USER_TYPES, max_length=8)
def __str__(self):
return self.username
class Cog(models.Model):
name = models.CharField(max_length=32)
tooth_count = models.IntegerField(default=4)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='cogs')
def __str__(self):
return self.name
class Note(models.Model):
title = models.CharField(max_length=50)
text = models.TextField()
owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='notes')
def __str__(self):
return self.title
class PersonalNote(Note):
pass
|
import json
import sys
import lief
from elftools.common.py3compat import bytes2str
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import NullSection, StringTableSection, SymbolTableSection
from elftools.elf.segments import InterpSegment
from func import compute_entropy
from func import get_section_name
from func import get_segment_name
from func import overlap_address
from func import changeEntryPoint
#from func import changeStringTableIndex
class FileLoader:
def __init__(self, path):
self.path = path
f = open(self.path,"rb")
self.stream = f
self.binary = f.read()
self.file = ELFFile(f)
header = self.file.header
### Header infos
self.elfclass = header["e_ident"]["EI_CLASS"]
self.data = header["e_ident"]["EI_DATA"]
self.version = header["e_ident"]["EI_VERSION"]
self.osabi = header["e_ident"]["EI_OSABI"]
self.abiversion = str(header["e_ident"]["EI_ABIVERSION"])
self.type = header["e_type"]
self.machine = header["e_machine"]
self.machine_version = header["e_version"]
self.e_entry = header["e_entry"]
self.e_phoff = header["e_phoff"]
self.e_shoff = header["e_shoff"]
self.e_flags = header["e_flags"]
self.e_ehsize = header["e_ehsize"]
self.e_phentsize = header["e_phentsize"]
self.e_phnum = header["e_phnum"]
self.e_shentsize = header["e_shentsize"]
self.e_shnum = header["e_shnum"]
self.e_shstrndx = header["e_shstrndx"]
### Write the file in hex format in binary.txt file
def writeBinary(self):
with open("binary.txt","w") as f:
f.write(self.binary.hex())
f.close()
### Check for unusual entropy
def entropy(self,threshold):
s = {}
for section in self.file.iter_sections():
size = section.header['sh_size']
if size != 0:
data = section.data()
entropy, frequence = compute_entropy(data)
if entropy > threshold:
stringTable = self.file.get_section(self.e_shstrndx)
offset = section.header['sh_name']
if isinstance(stringTable,StringTableSection):
name = stringTable.get_string(offset)
s[name]=entropy
else:
s[section.header['sh_name']]
if s == {}:
print("General entropy is normal")
else:
print("Some section have unusual high entropy : {}".format(s))
### Check for potential overlapping segments
def overlappingSegments(self):
segments = []
overlap = False
for segment in self.file.iter_segments():
header = segment.header
addr = header["p_vaddr"]
size = header['p_memsz']
addr1,addr2 = '0x'+'0'*(9-len(hex(addr)))+hex(addr)[2:],'0x'+'0'*(9-len(hex(addr+size)))+hex(addr+size)[2:]
if segments!=[]:
for seg in segments:
addr3,addr4,p_type = seg
if overlap_address(addr1,addr2,addr3,addr4):
print("Overlapping detected between two segments: Range [{}-{}],{} overlap range [{}-{}],{}".format(addr1,addr2,header['p_type'],addr3,addr4,p_type))
overlap = True
segments.append((addr1,addr2,header['p_type']))
if not overlap:
print("No overlap detected between segments")
### Check for potential overlapping sections
def overlappingSections(self):
sections = []
overlap = False
for section in self.file.iter_sections():
header = section.header
addr = header["sh_addr"]
size = header['sh_size']
stringTable = self.file.get_section(self.e_shstrndx)
if isinstance(stringTable,StringTableSection):
name = stringTable.get_string(header['sh_name'])
else:
name = header['sh_name']
addr1,addr2 = '0x'+'0'*(9-len(hex(addr)))+hex(addr)[2:],'0x'+'0'*(9-len(hex(addr+size)))+hex(addr+size)[2:]
if sections!=[]:
for seg in sections:
addr3,addr4,sh_name = seg
if overlap_address(addr1,addr2,addr3,addr4):
print("Overlapping detected between two sections: Range [{}-{}],{} overlap range [{}-{}],{}".format(addr1,addr2,name,addr3,addr4,sh_name))
overlap = True
sections.append((addr1,addr2,name))
if not overlap:
print("No overlap detected between sections")
### Check for unusual segments permissions
def segmentPermissions(self):
problem = False
for segment in self.file.iter_segments():
header = segment.header
if header['p_type'] == 'PT_LOAD':
text = self.file.get_section_by_name('.text')
data = self.file.get_section_by_name('.data')
bss = self.file.get_section_by_name('.bss')
flag = bin(header['p_flags'])[2:]
permission = ["READ","WRITE","EXECUTE"]
for index in range(3):
if flag[index] == '0':
permission[index] = ""
if segment.section_in_segment(text):
if header['p_flags']!=5:
print("Typical text segments have read and execute, but not write permissions : Found {}".format(permission))
problem = True
if segment.section_in_segment(data) and segment.section_in_segment(bss):
if header['p_flags']!=7:
print("Data segments normally have read, write, and execute permissions : Found {}".format(permission))
problem = True
if not problem:
print("All the permissions are normally set")
#### Chech for unusual sections permissions
def sectionPermissions(self):
problem = False
try:
text = self.file.get_section_by_name('.text')
flag = text.header['sh_flags']
permission = ["SHF_EXECINSTR","SHF_ALLOC","SHF_WRITE"]
f = bin(flag)[-3:]
for index in range(3):
if f[index] == '0':
permission[index]
if flag != 6:
print("Permission for .text section should be SHF_ALLOC,SHF_EXECINSTR but {} have been found".format(permission))
problem = True
except :
print("There is no .text sections")
try:
data = self.file.get_section_by_name('.data')
flag = data.header['sh_flags']
permission = ["SHF_EXECINSTR","SHF_ALLOC","SHF_WRITE"]
f = bin(flag)[-3:]
for index in range(3):
if f[index] == '0':
permission[index]
if flag != 3:
print("Permission for .data section should be SHF_ALLOC,SHF_WRITE but {} have been found".format(permission))
problem = True
except :
print("There is no .data sections")
try:
data = self.file.get_section_by_name('.bss')
flag = data.header['sh_flags']
permission = ["SHF_EXECINSTR","SHF_ALLOC","SHF_WRITE"]
f = bin(flag)[-3:]
for index in range(3):
if f[index] == '0':
permission[index]
if flag != 3:
print("Permission for .bss section should be SHF_ALLOC,SHF_WRITE but {} have been found".format(permission))
problem = True
except :
print("There is no .bss sections")
try:
data = self.file.get_section_by_name('.rodata')
flag = data.header['sh_flags']
permission = ["SHF_EXECINSTR","SHF_ALLOC","SHF_WRITE"]
f = bin(flag)[-3:]
for index in range(3):
if f[index] == '0':
permission[index]
if flag != 2:
print("Permission for .rodata section should be SHF_ALLOC but {} have been found".format(permission))
problem = True
except :
print("There is no .rodata sections")
try:
data = self.file.get_section_by_name('.init')
flag = data.header['sh_flags']
permission = ["SHF_EXECINSTR","SHF_ALLOC","SHF_WRITE"]
f = bin(flag)[-3:]
for index in range(3):
if f[index] == '0':
permission[index]
if flag != 6:
print("Permission for .init section should be SHF_ALLOC, SHF_EXECINSTR but {} have been found".format(permission))
problem = True
except :
print("There is no .init sections")
try:
data = self.file.get_section_by_name('.fini')
flag = data.header['sh_flags']
permission = ["SHF_EXECINSTR","SHF_ALLOC","SHF_WRITE"]
f = bin(flag)[-3:]
for index in range(3):
if f[index] == '0':
permission[index]
if flag != 6:
print("Permission for .fini section should be SHF_ALLOC, SHF_EXECINSTR but {} have been found".format(permission))
problem = True
except :
print("There is no .fini sections")
try:
data = self.file.get_section_by_name('.dynstr')
flag = data.header['sh_flags']
permission = ["SHF_EXECINSTR","SHF_ALLOC","SHF_WRITE"]
f = bin(flag)[-3:]
for index in range(3):
if f[index] == '0':
permission[index]
if flag != 2:
print("Permission for .dynstr section should be SHF_ALLOC but {} have been found".format(permission))
problem = True
except :
print("There is no .dynstr sections")
try:
data = self.file.get_section_by_name('.dynsym')
flag = data.header['sh_flags']
permission = ["SHF_EXECINSTR","SHF_ALLOC","SHF_WRITE"]
f = bin(flag)[-3:]
for index in range(3):
if f[index] == '0':
permission[index]
if flag != 2:
print("Permission for .dynsym section should be SHF_ALLOC but {} have been found".format(permission))
problem = True
except :
print("There is no .dynsym sections")
try:
data = self.file.get_section_by_name('.init_array')
flag = data.header['sh_flags']
permission = ["SHF_EXECINSTR","SHF_ALLOC","SHF_WRITE"]
f = bin(flag)[-3:]
for index in range(3):
if f[index] == '0':
permission[index]
if flag != 3:
print("Permission for .init_array section should be SHF_ALLOC,SHF_WRITE but {} have been found".format(permission))
problem = True
except :
print("There is no .init_array sections")
try:
data = self.file.get_section_by_name('.fini_array')
flag = data.header['sh_flags']
permission = ["SHF_EXECINSTR","SHF_ALLOC","SHF_WRITE"]
f = bin(flag)[-3:]
for index in range(3):
if f[index] == '0':
permission[index]
if flag != 3:
print("Permission for .fini_array section should be SHF_ALLOC,SHF_WRITE but {} have been found".format(permission))
problem = True
except :
print("There is no .fini_array sections")
if not problem:
print("No strange section permissions have been reported")
### Check the program interpreter
def programInterpreter(self):
for segment in self.file.iter_segments():
header = segment.header
if header['p_type'] == 'PT_INTERP':
INTERP = InterpSegment(segment.header,self.stream)
interpreter = INTERP.get_interp_name()
if interpreter == None:
print("There is no interpreter for this ELF")
else:
print("Interpreter found : {}".format(interpreter))
### Check the number of section and segment
def sNumber(self):
load=0
for segment in self.file.iter_segments():
if segment.header['p_type'] == 'PT_LOAD':
load+=1
if self.e_phnum == 0 or self.e_shnum == 0:
print("Unusual number of segment or sections : {} segment(s) and {} section(s) ({} PT_LOAD segment(s))".format(self.e_phnum,self.e_shnum,load))
else:
print("There is {} segment(s) and {} section(s) ({} PT_LOAD segment(s))".format(self.e_phnum,self.e_shnum,load))
### Check the entry point
def entryPoint(self):
(sh_name, sh_type) = get_section_name(self.file,hex(self.e_entry),self.e_shstrndx)
p_type = get_segment_name(self.file,hex(self.e_entry))
print("Entry point to section {} in segment {}".format(sh_name,p_type))
section = self.file.get_section_by_name(sh_name)
addr = section.header['sh_addr']
if addr != self.e_entry:
print("You should change the entry point, it doesn't point at the beginning of {}".format(sh_name))
### Check the symbol table
def symbolTable(self):
sym = False
symbol = []
for section in self.file.iter_sections():
stringTable = self.file.get_section(self.e_shstrndx)
if isinstance(stringTable,StringTableSection):
name = stringTable.get_string(section.header['sh_name'])
else:
name = section.header['sh_name']
if isinstance(section,SymbolTableSection):
sym = True
for s in section.iter_symbols():
symbol.append(s.name)
print("Instance of symbol table found : {} {}\n".format(name,symbol))
if not sym:
print("There is no instance of symbol table")
### Check the string table index
def stringTable(self):
stringTable = self.file.get_section(self.e_shstrndx)
if isinstance(stringTable,StringTableSection):
print("The string table index is correct")
else:
print("The string table index is wrong")
### Change the entry point
def changeEPoint(self):
changeEntryPoint(self.file,self.e_entry,self.path)
### Change the string index table
def changeSTableIndex(self):
### changeStringTableIndex is in this file
changeStringTableIndex(self.file,self.path)
### Change the string table index
def changeStringTableIndex(file,path):
string_index = file.header["e_shstrndx"]
index=-1
for section in file.iter_sections():
index+=1
if isinstance(section,StringTableSection):
string_index = index
binary = lief.parse(path)
header = binary.header
header.section_name_table_idx = string_index
binary.write("output/sample") |
from assist_model import *
from assist_view import *
import os
import sys
class ConsoleController:
def __init__(self, command_birthday, command_create, command_delete, command_help, command_edit, command_exit,
command_search, command_show, view):
self.command_birthday = command_birthday
self.command_create = command_create
self.command_delete = command_delete
self.command_help = command_help
self.command_edit = command_edit
self.command_exit = command_exit
self.command_search = command_search
self.command_show = command_show
self.view = view
def start(self):
self.view.start_view()
def requests(self):
self.view.requests_command()
command = input().strip().casefold()
try:
self.get_command_handler(command)(self)
except KeyError:
self.view.displays_key_error(command)
def get_command_handler(self, command):
return self.COMMANDS[command]
def organizes_the_congratulate(self):
self.command_birthday.organizes_the_congratulate()
def organizes_the_create(self):
self.command_create.organizes_the_create()
def organizes_the_delete(self):
self.command_delete.organizes_the_delete()
def organizes_the_help(self):
self.command_help.organizes_the_help()
def organizes_the_edit(self):
self.command_edit.organizes_the_edit()
def organizes_the_exit(self):
self.command_exit.organizes_the_exit()
def organizes_the_search(self):
self.command_search.organizes_the_search()
def organizes_the_show(self):
self.command_show.organizes_the_show()
COMMANDS = {
'birthday': organizes_the_congratulate,
'create': organizes_the_create,
'delete': organizes_the_delete,
'help': organizes_the_help,
'edit': organizes_the_edit,
'exit': organizes_the_exit,
'search': organizes_the_search,
'show': organizes_the_show,
}
class CommandBirthday:
def __init__(self, model, view):
self.model = model
self.view = view
def organizes_the_congratulate(self):
# Validating input data
while True:
try:
self.view.requests_days_left_to_the_birthday()
n = input()
if n.isdigit:
break
except ValueError:
self.view.requests_input_number()
users = self.model.to_congratulate(n)
if users:
self.view.display_who_to_wish_happy_birthday(users)
else:
self.view.reports_no_birthdays()
class CommandCreate:
def __init__(self, model, view):
self.model = model
self.view = view
def organizes_the_create(self):
name = self.input_name()
phone = self.input_phone_number()
email = self.input_email()
birthday = self.input_birthday()
note = self.add_note()
self.model.to_create_record(name, phone, email, birthday, note)
def add_note(self):
self.view.requests_enter_the_note()
return input() + ' '
def input_birthday(self):
while True:
self.view.requests_enter_birthday()
try:
birthday = ValidationCheck.check_birthday(input())
break
except ValueError:
self.view.requests_re_entry_of_the_data()
return birthday
def input_email(self):
while True:
self.view.requests_enter_email()
try:
email = ValidationCheck.check_email(input().strip())
break
except AttributeError:
self.view.requests_re_entry_of_the_data()
return email
def input_name(self):
while True:
self.view.requests_enter_name()
name = input()
if ValidationCheck.check_is_name_exist(name):
self.view.reports_the_existence_of_name_in_database(name)
continue
break
return name
def input_phone_number(self):
while True:
self.view.requests_enter_phone()
try:
phone = ValidationCheck.check_phone_number(input())
break
except AttributeError:
self.view.requests_re_entry_of_the_data()
return phone
class CommandDelete:
def __init__(self, model, view):
self.model = model
self.view = view
def organizes_the_delete(self):
"""
Function deletes records by specified name.
:return: None
"""
self.view.requests_enter_name_to_changes_data()
name = input()
if ValidationCheck.check_is_name_exist(name):
self.model.to_delete(name)
self.view.reports_the_deletion_of_a_contact(name)
else:
self.view.reports_the_not_exist_of_name_in_database(name)
class CommandHelp:
def __init__(self, view):
self.view = view
def organizes_the_help(self):
self.view.describes_commands()
class CommandEdit:
def __init__(self, model, view):
self.model = model
self.view = view
def organizes_the_edit(self):
"""
The function edits data (name, phone, ...) by the name of the contact. Name, phone, email, birthday, note
can only be replaced, and notes can be replaced and supplemented.
:return: None
"""
self.view.requests_enter_name_to_changes_data()
name = input()
if not ValidationCheck.check_is_name_exist(name):
self.view.reports_the_not_exist_of_name_in_database(name)
else:
while True:
self.view.requests_what_edit()
command_edit = input().casefold().strip()
try:
updated_data, updated_command = self.get_updated_data(command_edit)
self.model.change_field_value(name, command_edit, updated_data, updated_command)
break
except KeyError:
self.view.requests_re_entry_of_the_data()
self.view.reports_the_updated_of_a_contact(name)
def get_updated_name(self, command):
updated_data = CommandCreate.input_name(self).strip()
return updated_data, command
def get_updated_phone(self, command):
updated_data = CommandCreate.input_phone_number(self).strip()
return updated_data, command
def get_updated_email(self, command):
updated_data = CommandCreate.input_email(self).strip()
return updated_data, command
def get_updated_birthday(self, command):
updated_data = CommandCreate.input_birthday(self).strip()
return updated_data, command
def get_updated_note(self, command):
"""
The function replaces or adds data to an existing note.
:return: (dict) updated entry
"""
while True:
self.view.requests_command_edit_note()
com_edit_note = input().strip().casefold()
if com_edit_note in ('change', 'add', 'delete'):
break
print(f'Incorrect, once more please')
if com_edit_note == 'delete':
com_edit_note = command + '_delete'
updated_data = None
return updated_data, com_edit_note
updated_data = CommandCreate.add_note(self)
if com_edit_note == 'change':
com_edit_note = command + '_change'
elif com_edit_note == 'add':
com_edit_note = command + '_add'
return updated_data, com_edit_note
def get_updated_data(self, command_edit):
UPDATE_DATA = {'name': self.get_updated_name,
'phone': self.get_updated_phone,
'email': self.get_updated_email,
'birthday': self.get_updated_birthday,
'note': self.get_updated_note}
return UPDATE_DATA[command_edit](command_edit)
class CommandExit:
def __init__(self, view):
self.view = view
def organizes_the_exit(self):
self.view.displays_see_ya()
sys.exit(0)
class CommandSearch:
def __init__(self, model, view):
self.model = model
self.view = view
def organizes_the_search(self):
self.view.requests_key_word_for_search()
key_word = input()
users = self.model.to_search(key_word)
if users:
self.view.displays_matches(key_word, users)
else:
self.view.reports_no_matches()
class CommandShow:
def __init__(self, model, view):
self.model = model
self.view = view
def organizes_the_show(self):
users = self.model.to_get_all()
self.view.displays_users(users)
if __name__ == '__main__':
command_birthday = CommandBirthday(BirthdayPeople(), ConsoleBirthdayCommandNotifications())
command_create = CommandCreate(RecordCreator(), ConsoleCreateCommandNotifications())
command_delete = CommandDelete(RecordForDeletion(), ConsoleDeleteCommandNotifications())
command_help = CommandHelp(ConsoleHelpCommandNotifications())
command_edit = CommandEdit(RecordEditor(), ConsoleEditCommandNotifications())
command_exit = CommandExit(ConsoleExitCommandNotifications())
command_search = CommandSearch(RecordSearcher(), ConsoleSearchCommandNotifications())
command_show = CommandShow(DatabaseContent(), ConsoleShowCommandNotifications())
view = ConsoleControllerNotifications()
controller = ConsoleController(command_birthday, command_create, command_delete, command_help, command_edit,
command_exit, command_search, command_show, view)
controller.start()
while True:
try:
os.mkdir('data')
except FileExistsError:
pass
controller.requests() |
__author__ = 'Vanc Levstik'
import unittest
from pyflare import PyflareHosting
from mock_responses import mock_response_hosting
class PyflareTest(unittest.TestCase):
def setUp(self):
self.pyflare = PyflareHosting('your_api_key')
@mock_response_hosting
def test_host_key_regen(self):
response = self.pyflare.host_key_regen()
self.assertEqual(response['result'], 'success')
@mock_response_hosting
def test_user_create(self):
response = self.pyflare.user_create('newuser@example.com', 'password')
self.assertEqual(
response['response']['cloudflare_email'],
'newuser@example.com'
)
@mock_response_hosting
def test_user_create_unique_id(self):
response = self.pyflare.user_create(
'newuser@example.com',
'password',
unique_id='dummy_id')
self.assertEqual(
response['response']['cloudflare_email'],
'newuser@example.com'
)
@mock_response_hosting
def test_zone_set(self):
response = self.pyflare.zone_set(
'user_key',
'someexample.com',
'cloudflare-resolve-to.someexample.com',
'www,blog,wordpress:cloudflare-rs2.someexample.com')
self.assertEqual(
response['response']['zone_name'],
'someexample.com'
)
self.assertEqual(
response['response']['resolving_to'],
'cloudflare-resolve-to.someexample.com'
)
@mock_response_hosting
def test_full_zone_set(self):
response = self.pyflare.full_zone_set(
'user_key',
'someexample.com')
self.assertEqual(
response['response']['zone_name'],
'someexample.com'
)
self.assertEqual(
response['response']['jumpstart'],
'true'
)
@mock_response_hosting
def test_user_lookup_email(self):
response = self.pyflare.user_lookup(
cloudflare_email='newuser@example.com')
self.assertEqual(
response['response']['cloudflare_email'],
'newuser@example.com'
)
self.assertEqual(
response['response']['unique_id'],
'someuniqueid'
)
@mock_response_hosting
def test_user_lookup_unique_id(self):
response = self.pyflare.user_lookup(
unique_id='someuniqueid')
self.assertEqual(
response['response']['cloudflare_email'],
'newuser@example.com'
)
self.assertEqual(
response['response']['unique_id'],
'someuniqueid'
)
@mock_response_hosting
def test_user_auth_password(self):
response = self.pyflare.user_auth(
cloudflare_email='newuser@example.com',
cloudflare_pass='password',
)
self.assertEqual(
response['response']['user_key'],
'8afbe6dea02407989af4dd4c97bb6e25'
)
@mock_response_hosting
def test_user_auth_unique_id(self):
response = self.pyflare.user_auth(
unique_id='dummy_id'
)
self.assertEqual(
response['response']['user_key'],
'8afbe6dea02407989af4dd4c97bb6e25'
)
@mock_response_hosting
def test_zone_lookup(self):
response = self.pyflare.zone_lookup(
user_key='user_key',
zone_name='someexample.com'
)
self.assertEqual(
response['response']['zone_name'],
'someexample.com'
)
self.assertEqual(
response['response']['zone_exists'],
'true'
)
@mock_response_hosting
def test_zone_delete(self):
response = self.pyflare.zone_delete(
user_key='user_key',
zone_name='someexample.com'
)
self.assertEqual(
response['response']['zone_name'],
'someexample.com'
)
self.assertEqual(
response['response']['zone_deleted'],
'true'
)
@mock_response_hosting
def test_zone_list(self):
response = self.pyflare.zone_list(
user_key='user_key',
limit=10,
zone_status='V'
)
self.assertEqual(
response['response'][0]['zone_name'],
'example.com'
)
self.assertEqual(
response['response'][0]['zone_status'],
'V'
)
if __name__ == '__main__':
unittest.main()
|
"""Base testcases for integrations unit tests."""
from __future__ import annotations
from typing import List
from django.conf import settings
from django.core.cache import cache
from djblets.integrations.manager import shutdown_integration_managers
from djblets.testing.testcases import TestCase, TestModelsLoaderMixin
class IntegrationsTestCase(TestModelsLoaderMixin, TestCase):
"""Base class for unit tests that work with integrations."""
tests_app = 'djblets.integrations.tests'
old_middleware_classes: List[str]
def setUp(self) -> None:
super().setUp()
self.old_middleware_classes = list(settings.MIDDLEWARE)
settings.MIDDLEWARE = self.old_middleware_classes + [
'djblets.integrations.middleware.IntegrationsMiddleware',
]
cache.clear()
def tearDown(self) -> None:
settings.MIDDLEWARE = self.old_middleware_classes
shutdown_integration_managers()
super().tearDown()
|
from Core.Metadata.Columns.ColumnMetadata import ColumnMetadata
from Core.Metadata.Columns.ColumnType import ColumnType
from GoogleTuring.Infrastructure.Domain.GoogleAttributeFieldsMetadata import GoogleAttributeFieldsMetadata
class GoogleAttributeMetadataColumnsPool:
# Structure fields and parameters
# Object names, IDs, statuses, and dates
accent_color = ColumnMetadata(GoogleAttributeFieldsMetadata.accent_color.name, ColumnType.text)
account_currency_code = ColumnMetadata(GoogleAttributeFieldsMetadata.account_currency_code.name,
ColumnType.text)
account_time_zone = ColumnMetadata(GoogleAttributeFieldsMetadata.account_time_zone.name, ColumnType.text)
ad_group_status = ColumnMetadata(GoogleAttributeFieldsMetadata.ad_group_status.name, ColumnType.text)
ad_strength_info = ColumnMetadata(GoogleAttributeFieldsMetadata.ad_strength_info.name, ColumnType.text)
ad_type = ColumnMetadata(GoogleAttributeFieldsMetadata.ad_type.name, ColumnType.text)
allow_flexible_color = ColumnMetadata(GoogleAttributeFieldsMetadata.allow_flexible_color.name,
ColumnType.text)
automated = ColumnMetadata(GoogleAttributeFieldsMetadata.automated.name, ColumnType.text)
base_ad_group_id = ColumnMetadata(GoogleAttributeFieldsMetadata.base_ad_group_id.name, ColumnType.text)
base_campaignId = ColumnMetadata(GoogleAttributeFieldsMetadata.base_campaignId.name, ColumnType.text)
business_name = ColumnMetadata(GoogleAttributeFieldsMetadata.business_name.name, ColumnType.text)
call_only_phone_number = ColumnMetadata(GoogleAttributeFieldsMetadata.call_only_phone_number.name,
ColumnType.text)
call_to_action_text = ColumnMetadata(GoogleAttributeFieldsMetadata.call_to_action_text.name, ColumnType.text)
campaign_id = ColumnMetadata(GoogleAttributeFieldsMetadata.campaign_id.name, ColumnType.text)
campaign_name = ColumnMetadata(GoogleAttributeFieldsMetadata.campaign_name.name, ColumnType.text)
campaign_status = ColumnMetadata(GoogleAttributeFieldsMetadata.campaign_status.name, ColumnType.text)
combined_approval_status = ColumnMetadata(GoogleAttributeFieldsMetadata.combined_approval_status.name,
ColumnType.text)
conversion_adjustment = ColumnMetadata(GoogleAttributeFieldsMetadata.conversion_adjustment.name,
ColumnType.text)
creative_destination_url = ColumnMetadata(GoogleAttributeFieldsMetadata.creative_destination_url.name,
ColumnType.text)
creative_final_app_urls = ColumnMetadata(GoogleAttributeFieldsMetadata.creative_final_app_urls.name,
ColumnType.text)
creative_final_mobile_urls = ColumnMetadata(GoogleAttributeFieldsMetadata.creative_final_mobile_urls.name,
ColumnType.text)
creative_final_urls = ColumnMetadata(GoogleAttributeFieldsMetadata.creative_final_urls.name, ColumnType.text)
creative_final_url_suffix = ColumnMetadata(GoogleAttributeFieldsMetadata.creative_final_url_suffix.name,
ColumnType.text)
creative_tracking_url_template = ColumnMetadata(
GoogleAttributeFieldsMetadata.creative_tracking_url_template.name,
ColumnType.text)
creative_url_custom_parameters = ColumnMetadata(
GoogleAttributeFieldsMetadata.creative_url_custom_parameters.name,
ColumnType.text)
customer_descriptive_name = ColumnMetadata(GoogleAttributeFieldsMetadata.customer_descriptive_name.name,
ColumnType.text)
description = ColumnMetadata(GoogleAttributeFieldsMetadata.description.name, ColumnType.text)
description_1 = ColumnMetadata(GoogleAttributeFieldsMetadata.description_1.name, ColumnType.text)
description_2 = ColumnMetadata(GoogleAttributeFieldsMetadata.description_2.name, ColumnType.text)
device_preference = ColumnMetadata(GoogleAttributeFieldsMetadata.device_preference.name, ColumnType.text)
display_url = ColumnMetadata(GoogleAttributeFieldsMetadata.display_url.name, ColumnType.text)
enhanced_display_creative_landscape_logo_image_media_id = ColumnMetadata(
GoogleAttributeFieldsMetadata.enhanced_display_creative_landscape_logo_image_media_id.name,
ColumnType.text)
enhanced_display_creative_logo_image_media_id = ColumnMetadata(
GoogleAttributeFieldsMetadata.enhanced_display_creative_logo_image_media_id.name,
ColumnType.text)
enhanced_display_creative_marketing_image_media_id = ColumnMetadata(
GoogleAttributeFieldsMetadata.enhanced_display_creative_marketing_image_media_id.name,
ColumnType.text)
enhanced_display_creative_marketing_image_square_media_id = ColumnMetadata(
GoogleAttributeFieldsMetadata.enhanced_display_creative_marketing_image_square_media_id.name,
ColumnType.text)
expanded_dynamic_search_creative_description_2 = ColumnMetadata(
GoogleAttributeFieldsMetadata.expanded_dynamic_search_creative_description_2.name,
ColumnType.text)
expanded_text_ad_description_2 = ColumnMetadata(
GoogleAttributeFieldsMetadata.expanded_text_ad_description_2.name,
ColumnType.text)
expanded_text_ad_headline_part_3 = ColumnMetadata(
GoogleAttributeFieldsMetadata.expanded_text_ad_headline_part_3.name,
ColumnType.text)
external_customer_id = ColumnMetadata(GoogleAttributeFieldsMetadata.external_customer_id.name,
ColumnType.text)
format_setting = ColumnMetadata(GoogleAttributeFieldsMetadata.format_setting.name, ColumnType.text)
gmail_creative_header_image_media_id = ColumnMetadata(
GoogleAttributeFieldsMetadata.gmail_creative_header_image_media_id.name, ColumnType.text)
gmail_creative_logo_image_media_id = ColumnMetadata(
GoogleAttributeFieldsMetadata.gmail_creative_logo_image_media_id.name, ColumnType.text)
gmail_creative_marketing_image_media_id = ColumnMetadata(
GoogleAttributeFieldsMetadata.gmail_creative_marketing_image_media_id.name, ColumnType.text)
gmail_teaser_business_name = ColumnMetadata(GoogleAttributeFieldsMetadata.gmail_teaser_business_name.name,
ColumnType.text)
gmail_teaser_description = ColumnMetadata(GoogleAttributeFieldsMetadata.gmail_teaser_description.name,
ColumnType.text)
gmail_teaser_headline = ColumnMetadata(GoogleAttributeFieldsMetadata.gmail_teaser_headline.name,
ColumnType.text)
headline = ColumnMetadata(GoogleAttributeFieldsMetadata.headline.name, ColumnType.text)
headline_part_1 = ColumnMetadata(GoogleAttributeFieldsMetadata.headline_part_1.name, ColumnType.text)
headline_part_2 = ColumnMetadata(GoogleAttributeFieldsMetadata.headline_part_2.name, ColumnType.text)
id = ColumnMetadata(GoogleAttributeFieldsMetadata.id.name, ColumnType.text)
ad_id = ColumnMetadata(GoogleAttributeFieldsMetadata.ad_id.name, ColumnType.text)
image_ad_url = ColumnMetadata(GoogleAttributeFieldsMetadata.image_ad_url.name, ColumnType.text)
image_creative_image_height = ColumnMetadata(GoogleAttributeFieldsMetadata.image_creative_image_height.name,
ColumnType.text)
image_creative_image_width = ColumnMetadata(GoogleAttributeFieldsMetadata.image_creative_image_width.name,
ColumnType.text)
image_creative_mime_type = ColumnMetadata(GoogleAttributeFieldsMetadata.image_creative_mime_type.name,
ColumnType.text)
image_creative_name = ColumnMetadata(GoogleAttributeFieldsMetadata.image_creative_name.name, ColumnType.text)
is_negative = ColumnMetadata(GoogleAttributeFieldsMetadata.is_negative.name, ColumnType.text)
label_ids = ColumnMetadata(GoogleAttributeFieldsMetadata.label_ids.name, ColumnType.text)
labels = ColumnMetadata(GoogleAttributeFieldsMetadata.labels.name, ColumnType.text)
long_headline = ColumnMetadata(GoogleAttributeFieldsMetadata.long_headline.name, ColumnType.text)
main_color = ColumnMetadata(GoogleAttributeFieldsMetadata.main_color.name, ColumnType.text)
marketing_image_call_to_action_text = ColumnMetadata(
GoogleAttributeFieldsMetadata.marketing_image_call_to_action_text.name, ColumnType.text)
marketing_image_call_to_action_text_color = ColumnMetadata(
GoogleAttributeFieldsMetadata.marketing_image_call_to_action_text_color.name, ColumnType.text)
marketing_image_description = ColumnMetadata(GoogleAttributeFieldsMetadata.marketing_image_description.name,
ColumnType.text)
marketing_image_headline = ColumnMetadata(GoogleAttributeFieldsMetadata.marketing_image_headline.name,
ColumnType.text)
multi_asset_responsive_display_ad_accent_color = ColumnMetadata(
GoogleAttributeFieldsMetadata.multi_asset_responsive_display_ad_accent_color.name,
ColumnType.text)
multi_asset_responsive_display_ad_allow_flexible_color = ColumnMetadata(
GoogleAttributeFieldsMetadata.multi_asset_responsive_display_ad_allow_flexible_color.name,
ColumnType.text)
multi_asset_responsive_display_ad_business_name = ColumnMetadata(
GoogleAttributeFieldsMetadata.multi_asset_responsive_display_ad_business_name.name,
ColumnType.text)
multi_asset_responsive_display_ad_call_to_action_text = ColumnMetadata(
GoogleAttributeFieldsMetadata.multi_asset_responsive_display_ad_call_to_action_text.name,
ColumnType.text)
multi_asset_responsive_display_ad_descriptions = ColumnMetadata(
GoogleAttributeFieldsMetadata.multi_asset_responsive_display_ad_descriptions.name,
ColumnType.text)
multi_asset_responsive_display_ad_dynamic_settings_price_prefix = ColumnMetadata(
GoogleAttributeFieldsMetadata.multi_asset_responsive_display_ad_dynamic_settings_price_prefix.name,
ColumnType.text)
multi_asset_responsive_display_ad_dynamic_settings_promo_text = ColumnMetadata(
GoogleAttributeFieldsMetadata.multi_asset_responsive_display_ad_dynamic_settings_promo_text.name,
ColumnType.text)
multi_asset_responsive_display_ad_format_setting = ColumnMetadata(
GoogleAttributeFieldsMetadata.multi_asset_responsive_display_ad_format_setting.name,
ColumnType.text)
multi_asset_responsive_display_ad_headlines = ColumnMetadata(
GoogleAttributeFieldsMetadata.multi_asset_responsive_display_ad_headlines.name, ColumnType.text)
multi_asset_responsive_display_ad_landscape_logo_images = ColumnMetadata(
GoogleAttributeFieldsMetadata.multi_asset_responsive_display_ad_landscape_logo_images.name,
ColumnType.text)
multi_asset_responsive_display_ad_logo_images = ColumnMetadata(
GoogleAttributeFieldsMetadata.multi_asset_responsive_display_ad_logo_images.name,
ColumnType.text)
multi_asset_responsive_display_ad_long_headline = ColumnMetadata(
GoogleAttributeFieldsMetadata.multi_asset_responsive_display_ad_long_headline.name,
ColumnType.text)
multi_asset_responsive_display_ad_main_color = ColumnMetadata(
GoogleAttributeFieldsMetadata.multi_asset_responsive_display_ad_main_color.name, ColumnType.text)
multi_asset_responsive_display_ad_marketing_images = ColumnMetadata(
GoogleAttributeFieldsMetadata.multi_asset_responsive_display_ad_marketing_images.name,
ColumnType.text)
multi_asset_responsive_display_ad_square_marketing_images = ColumnMetadata(
GoogleAttributeFieldsMetadata.multi_asset_responsive_display_ad_square_marketing_images.name,
ColumnType.text)
multi_asset_responsive_display_ad_you_tube_videos = ColumnMetadata(
GoogleAttributeFieldsMetadata.multi_asset_responsive_display_ad_you_tube_videos.name,
ColumnType.text)
path_1 = ColumnMetadata(GoogleAttributeFieldsMetadata.path_1.name, ColumnType.text)
path_2 = ColumnMetadata(GoogleAttributeFieldsMetadata.path_2.name, ColumnType.text)
policy_summary = ColumnMetadata(GoogleAttributeFieldsMetadata.policy_summary.name, ColumnType.text)
price_prefix = ColumnMetadata(GoogleAttributeFieldsMetadata.price_prefix.name, ColumnType.text)
promo_text = ColumnMetadata(GoogleAttributeFieldsMetadata.promo_text.name, ColumnType.text)
responsive_search_ad_descriptions = ColumnMetadata(
GoogleAttributeFieldsMetadata.responsive_search_ad_descriptions.name, ColumnType.text)
responsive_search_ad_headlines = ColumnMetadata(
GoogleAttributeFieldsMetadata.responsive_search_ad_headlines.name,
ColumnType.text)
responsive_search_ad_path_1 = ColumnMetadata(GoogleAttributeFieldsMetadata.responsive_search_ad_path_1.name,
ColumnType.text)
responsive_search_ad_path_2 = ColumnMetadata(GoogleAttributeFieldsMetadata.responsive_search_ad_path_2.name,
ColumnType.text)
short_headline = ColumnMetadata(GoogleAttributeFieldsMetadata.short_headline.name, ColumnType.text)
status = ColumnMetadata(GoogleAttributeFieldsMetadata.status.name, ColumnType.text)
system_managed_entity_source = ColumnMetadata(
GoogleAttributeFieldsMetadata.system_managed_entity_source.name,
ColumnType.text)
universal_app_ad_descriptions = ColumnMetadata(
GoogleAttributeFieldsMetadata.universal_app_ad_descriptions.name,
ColumnType.text)
universal_app_ad_headlines = ColumnMetadata(GoogleAttributeFieldsMetadata.universal_app_ad_headlines.name,
ColumnType.text)
universal_app_ad_html_5_media_bundles = ColumnMetadata(
GoogleAttributeFieldsMetadata.universal_app_ad_html_5_media_bundles.name, ColumnType.text)
universal_app_ad_images = ColumnMetadata(GoogleAttributeFieldsMetadata.universal_app_ad_images.name,
ColumnType.text)
universal_app_ad_mandatory_ad_text = ColumnMetadata(
GoogleAttributeFieldsMetadata.universal_app_ad_mandatory_ad_text.name, ColumnType.text)
universal_app_ad_you_tube_videos = ColumnMetadata(
GoogleAttributeFieldsMetadata.universal_app_ad_you_tube_videos.name,
ColumnType.text)
account_descriptive_name = ColumnMetadata(GoogleAttributeFieldsMetadata.account_descriptive_name.name,
ColumnType.text)
ad_group_desktop_bid_modifier = ColumnMetadata(
GoogleAttributeFieldsMetadata.ad_group_desktop_bid_modifier.name,
ColumnType.text)
ad_group_id = ColumnMetadata(GoogleAttributeFieldsMetadata.ad_group_id.name, ColumnType.text)
ad_group_mobile_bid_modifier = ColumnMetadata(
GoogleAttributeFieldsMetadata.ad_group_mobile_bid_modifier.name,
ColumnType.text)
ad_group_name = ColumnMetadata(GoogleAttributeFieldsMetadata.ad_group_name.name, ColumnType.text)
ad_group_tablet_bid_modifier = ColumnMetadata(
GoogleAttributeFieldsMetadata.ad_group_tablet_bid_modifier.name,
ColumnType.text)
ad_group_type = ColumnMetadata(GoogleAttributeFieldsMetadata.ad_group_type.name, ColumnType.text)
ad_rotation_mode = ColumnMetadata(GoogleAttributeFieldsMetadata.ad_rotation_mode.name, ColumnType.text)
bidding_strategy_id = ColumnMetadata(GoogleAttributeFieldsMetadata.bidding_strategy_id.name, ColumnType.text)
bidding_strategy_name = ColumnMetadata(GoogleAttributeFieldsMetadata.bidding_strategy_name.name,
ColumnType.text)
bidding_strategy_source = ColumnMetadata(GoogleAttributeFieldsMetadata.bidding_strategy_source.name,
ColumnType.text)
bidding_strategy_type = ColumnMetadata(GoogleAttributeFieldsMetadata.bidding_strategy_type.name,
ColumnType.text)
content_bid_criterion_type_group = ColumnMetadata(
GoogleAttributeFieldsMetadata.content_bid_criterion_type_group.name,
ColumnType.text)
cpc_bid = ColumnMetadata(GoogleAttributeFieldsMetadata.cpc_bid.name, ColumnType.text)
cpm_bid = ColumnMetadata(GoogleAttributeFieldsMetadata.cpm_bid.name, ColumnType.text)
cpv_bid = ColumnMetadata(GoogleAttributeFieldsMetadata.cpv_bid.name, ColumnType.text)
effective_target_roas = ColumnMetadata(GoogleAttributeFieldsMetadata.effective_target_roas.name,
ColumnType.text)
effective_target_roas_source = ColumnMetadata(
GoogleAttributeFieldsMetadata.effective_target_roas_source.name,
ColumnType.text)
enhanced_cpc_enabled = ColumnMetadata(GoogleAttributeFieldsMetadata.enhanced_cpc_enabled.name,
ColumnType.text)
final_url_suffix = ColumnMetadata(GoogleAttributeFieldsMetadata.final_url_suffix.name, ColumnType.text)
target_cpa = ColumnMetadata(GoogleAttributeFieldsMetadata.target_cpa.name, ColumnType.text)
target_cpa_bid_source = ColumnMetadata(GoogleAttributeFieldsMetadata.target_cpa_bid_source.name,
ColumnType.text)
tracking_url_template = ColumnMetadata(GoogleAttributeFieldsMetadata.tracking_url_template.name,
ColumnType.text)
url_custom_parameters = ColumnMetadata(GoogleAttributeFieldsMetadata.url_custom_parameters.name,
ColumnType.text)
advertising_channel_sub_type = ColumnMetadata(
GoogleAttributeFieldsMetadata.advertising_channel_sub_type.name,
ColumnType.text)
advertising_channel_type = ColumnMetadata(GoogleAttributeFieldsMetadata.advertising_channel_type.name,
ColumnType.text)
amount = ColumnMetadata(GoogleAttributeFieldsMetadata.amount.name, ColumnType.text)
budget_id = ColumnMetadata(GoogleAttributeFieldsMetadata.budget_id.name, ColumnType.text)
campaign_desktop_bid_modifier = ColumnMetadata(
GoogleAttributeFieldsMetadata.campaign_desktop_bid_modifier.name,
ColumnType.text)
campaign_group_id = ColumnMetadata(GoogleAttributeFieldsMetadata.campaign_group_id.name, ColumnType.text)
campaign_mobile_bid_modifier = ColumnMetadata(
GoogleAttributeFieldsMetadata.campaign_mobile_bid_modifier.name,
ColumnType.text)
campaign_tablet_bid_modifier = ColumnMetadata(
GoogleAttributeFieldsMetadata.campaign_tablet_bid_modifier.name,
ColumnType.text)
campaign_trial_type = ColumnMetadata(GoogleAttributeFieldsMetadata.campaign_trial_type.name, ColumnType.text)
end_date = ColumnMetadata(GoogleAttributeFieldsMetadata.end_date.name, ColumnType.text)
has_recommended_budget = ColumnMetadata(GoogleAttributeFieldsMetadata.has_recommended_budget.name,
ColumnType.text)
is_budget_explicitly_shared = ColumnMetadata(GoogleAttributeFieldsMetadata.is_budget_explicitly_shared.name,
ColumnType.text)
maximize_conversion_value_target_roas = ColumnMetadata(
GoogleAttributeFieldsMetadata.maximize_conversion_value_target_roas.name, ColumnType.text)
period = ColumnMetadata(GoogleAttributeFieldsMetadata.period.name, ColumnType.text)
recommended_budget_amount = ColumnMetadata(GoogleAttributeFieldsMetadata.recommended_budget_amount.name,
ColumnType.text)
serving_status = ColumnMetadata(GoogleAttributeFieldsMetadata.serving_status.name, ColumnType.text)
start_date = ColumnMetadata(GoogleAttributeFieldsMetadata.start_date.name, ColumnType.text)
total_amount = ColumnMetadata(GoogleAttributeFieldsMetadata.total_amount.name, ColumnType.text)
approval_status = ColumnMetadata(GoogleAttributeFieldsMetadata.approval_status.name, ColumnType.text)
cpc_bid_source = ColumnMetadata(GoogleAttributeFieldsMetadata.cpc_bid_source.name, ColumnType.text)
creative_quality_score = ColumnMetadata(GoogleAttributeFieldsMetadata.creative_quality_score.name,
ColumnType.text)
criteria = ColumnMetadata(GoogleAttributeFieldsMetadata.criteria.name, ColumnType.text)
gender = ColumnMetadata(GoogleAttributeFieldsMetadata.gender.name, ColumnType.text)
age_range = ColumnMetadata(GoogleAttributeFieldsMetadata.age_range.name, ColumnType.text)
keywords = ColumnMetadata(GoogleAttributeFieldsMetadata.keywords.name, ColumnType.text)
criteria_destination_url = ColumnMetadata(GoogleAttributeFieldsMetadata.criteria_destination_url.name,
ColumnType.text)
estimated_add_clicks_at_first_position_cpc = ColumnMetadata(
GoogleAttributeFieldsMetadata.estimated_add_clicks_at_first_position_cpc.name, ColumnType.text)
estimated_add_cost_at_first_position_cpc = ColumnMetadata(
GoogleAttributeFieldsMetadata.estimated_add_cost_at_first_position_cpc.name, ColumnType.text)
final_app_urls = ColumnMetadata(GoogleAttributeFieldsMetadata.final_app_urls.name, ColumnType.text)
final_mobile_urls = ColumnMetadata(GoogleAttributeFieldsMetadata.final_mobile_urls.name, ColumnType.text)
final_urls = ColumnMetadata(GoogleAttributeFieldsMetadata.final_urls.name, ColumnType.text)
first_page_cpc = ColumnMetadata(GoogleAttributeFieldsMetadata.first_page_cpc.name, ColumnType.text)
first_position_cpc = ColumnMetadata(GoogleAttributeFieldsMetadata.first_position_cpc.name, ColumnType.text)
has_quality_score = ColumnMetadata(GoogleAttributeFieldsMetadata.has_quality_score.name, ColumnType.text)
keyword_match_type = ColumnMetadata(GoogleAttributeFieldsMetadata.keyword_match_type.name, ColumnType.text)
post_click_quality_score = ColumnMetadata(GoogleAttributeFieldsMetadata.post_click_quality_score.name,
ColumnType.text)
quality_score = ColumnMetadata(GoogleAttributeFieldsMetadata.quality_score.name, ColumnType.text)
search_predicted_ctr = ColumnMetadata(GoogleAttributeFieldsMetadata.search_predicted_ctr.name,
ColumnType.text)
system_serving_status = ColumnMetadata(GoogleAttributeFieldsMetadata.system_serving_status.name,
ColumnType.text)
top_of_page_cpc = ColumnMetadata(GoogleAttributeFieldsMetadata.top_of_page_cpc.name, ColumnType.text)
vertical_id = ColumnMetadata(GoogleAttributeFieldsMetadata.vertical_id.name, ColumnType.text)
city_name = ColumnMetadata(GoogleAttributeFieldsMetadata.city_name.name, ColumnType.text)
country_name = ColumnMetadata(GoogleAttributeFieldsMetadata.country_name.name, ColumnType.text)
is_targeting_location = ColumnMetadata(GoogleAttributeFieldsMetadata.is_targeting_location.name,
ColumnType.text)
metro_criteria_id = ColumnMetadata(GoogleAttributeFieldsMetadata.metro_criteria_id.name, ColumnType.text)
most_specific_criteria_id = ColumnMetadata(GoogleAttributeFieldsMetadata.most_specific_criteria_id.name,
ColumnType.text)
region_name = ColumnMetadata(GoogleAttributeFieldsMetadata.region_name.name, ColumnType.text)
base_campaign_id = ColumnMetadata(GoogleAttributeFieldsMetadata.base_campaign_id.name, ColumnType.text)
bid_modifier = ColumnMetadata(GoogleAttributeFieldsMetadata.bid_modifier.name, ColumnType.text)
cpm_bid_source = ColumnMetadata(GoogleAttributeFieldsMetadata.cpm_bid_source.name, ColumnType.text)
is_restrict = ColumnMetadata(GoogleAttributeFieldsMetadata.is_restrict.name, ColumnType.text)
|
#!/usr/bin/python3
"""
listen.py
This is the clearpixel listener that is used to determine when an email has been opened by the recipient.
Listener id: 5
Activity id: 1 only.
"""
import os
import cgi
import time
import database
# the data we want is encoded in the following format:
# <16 bytes of junk>AAACC<more junk>
#
# where AAA is the user identifier and
# CC is the batch number
data = cgi.FieldStorage()
dataString = ""
if 'x' in data:
dataString = data['x'].value
if len(dataString) > 20:
user_id = dataString[16:19]
batch_id = dataString[19:21]
whatCode = "51" # 5 for the clearpixel listener, 1 for the 'page opened'
ip = os.environ["REMOTE_ADDR"]
# confirmation of a valid code, so save to the database.
insertQuery = "INSERT INTO activity (what, user_id, batch_id, datetime, ip_address) VALUES (" \
+ whatCode + ", " + str(int(user_id)) + ", " + str(int(batch_id)) + ", " + str(int(time.time())) \
+ ", '" + ip + "')"
db = database.Database()
db.ExecuteInsert(insertQuery)
db.Close()
# return the clearpixel image.
print("Location: http://listen.cybercrime-observatory.tech/clear.png\n\n")
|
"""Unit test module for PSyGrid class."""
# import unittest
# import os
# import shutil
# import zipfile
# from posydon.utils.common_functions import PATH_TO_POSYDON
# from posydon.grids.psygrid import PSyGrid
#
#
# class TestPSyGrid(unittest.TestCase):
# """Class for unit-testing the PSyGrid object."""
#
# @classmethod
# def setUpClass(cls):
# """Prepare the data for the individual tests."""
# # find the input/output paths
# path_to_workdir = os.path.join(
# PATH_TO_POSYDON, "posydon/tests/data/POSYDON-UNIT-TESTS/grids/")
# path_to_zipfile = os.path.join(
# path_to_workdir, "sample_HMSHMS_grid.zip")
# cls.path_to_extract = os.path.join(path_to_workdir, "tmp")
# cls.path_to_psygrid = os.path.join(path_to_workdir, "tmp.h5")
# cls.path_to_psyslim = os.path.join(path_to_workdir, "tmp_slim.h5")
#
# # unzip the test data
# with zipfile.ZipFile(path_to_zipfile) as zipf:
# zipf.extractall(path=cls.path_to_extract)
#
# # try to create the PSyGrid objects, and store whether they failed
# try:
# psygrid = PSyGrid()
# psygrid.create(cls.path_to_extract, cls.path_to_psygrid)
# del psygrid
# cls.failure_msg = None
# except Exception as e:
# cls.failure_msg = str(e)
# try:
# psygrid = PSyGrid()
# psygrid.create(cls.path_to_extract, cls.path_to_psyslim, slim=True)
# del psygrid
# cls.failure_msg_slim = None
# except Exception as e:
# cls.failure_msg_slim = str(e)
#
# @classmethod
# def tearDownClass(cls):
# """Remove the unzipped an newly created files."""
# shutil.rmtree(cls.path_to_extract)
# os.remove(cls.path_to_psygrid)
# os.remove(cls.path_to_psyslim)
#
# def setUp(self):
# """Open the grids before each test."""
# # if failed to the create the grid objects, do not continue
# if self.failure_msg is not None:
# self.fail("Cannot create a PSyGrid object: "
# + self.failure_msg)
#
# if self.failure_msg_slim is not None:
# self.fail("Cannot create a slim PSyGrid object: "
# + self.failure_msg_slim)
#
# # load the grids before any testing
# try:
# self.psygrid = PSyGrid(self.path_to_psygrid)
# except Exception as e:
# self.fail("Cannot load the PSyGrid object: " + str(e))
#
# try:
# self.psygrid_slim = PSyGrid(self.path_to_psyslim)
# except Exception as e:
# self.fail("Cannot load the slim PSyGrid object: " + str(e))
#
# # keep them together for use in loops
# self.grids = [self.psygrid, self.psygrid_slim]
#
# def tearDown(self):
# """Close and delete the grid objects after each test."""
# del self.psygrid
# del self.psygrid_slim
#
# def test_filename(self):
# """Check grid paths, and that PSyGrid objects know them."""
# self.assertEqual(self.psygrid.filepath, self.path_to_psygrid)
# self.assertEqual(self.psygrid_slim.filepath, self.path_to_psyslim)
#
# def test_print_and_str(self):
# """Test whether print works with grids (or `str` method)."""
# for grid in self.grids:
# s = str(grid)
# self.assertTrue(len(s) > 0)
#
# def test_config(self):
# """Check that the ConfigFile instance works and reports HDF5 format."""
# for grid in self.grids:
# self.assertEqual(grid.config.format, "hdf5")
#
# def test_number_of_runs(self):
# """The normal grid must have run data, while the slim one must not."""
# self.assertTrue(self.psygrid.n_runs > 0)
# self.assertTrue(self.psygrid_slim.n_runs == 0)
#
# def test_in_keyword(self):
# """Check `in` keyword and extreme values of run indices."""
# N = self.psygrid.n_runs
# self.assertTrue(0 in self.psygrid)
# self.assertTrue(N not in self.psygrid)
# self.assertTrue(0 not in self.psygrid_slim)
#
# def test_len_list_set(self):
# """Test the `len`, `list` and `set` methods."""
# N = self.psygrid.n_runs
# self.assertEqual(N, len(self.psygrid))
# self.assertEqual(N, len(list(self.psygrid)))
# self.assertEqual(N, len(set(self.psygrid)))
#
# def test_data_integrity(self):
# """Test whether the run data agree with the initial/finial values."""
# self.assertEqual(self.psygrid[0].binary_history["age"][0],
# self.psygrid.initial_values[0]["age"])
# self.assertEqual(self.psygrid.initial_values["age"][0],
# self.psygrid.initial_values[0]["age"])
# self.assertEqual(self.psygrid.final_values["age"][0],
# self.psygrid[0].final_values["age"])
# self.assertEqual(self.psygrid_slim.initial_values["age"][0],
# self.psygrid_slim.initial_values[0]["age"])
#
#
# if __name__ == "__main__":
# unittest.main()
|
import copy
import datetime
import hashlib
import json
import re
from threading import Lock
import semver
class ResourceKeyExistsError(Exception):
pass
class ConstructResourceError(Exception):
def __init__(self, msg):
super().__init__("error constructing openshift resource: " + str(msg))
# Regexes for kubernetes objects fields which have to adhere to DNS-1123
DNS_SUBDOMAIN_MAX_LENGTH = 253
DNS_SUBDOMAIN_RE = re.compile(
r'^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$')
DNS_LABEL_MAX_LENGTH = 63
DNS_LABEL_RE = re.compile(r'^[a-z0-9]([-a-z0-9]*[a-z0-9])?$')
DNS_NAMES_URL = \
'https://kubernetes.io/docs/concepts/overview/working-with-objects/names/'
IGNORABLE_DATA_FIELDS = ['service-ca.crt']
class OpenshiftResource:
def __init__(self, body, integration, integration_version,
error_details='', caller_name=None):
self.body = body
self.integration = integration
self.integration_version = integration_version
self.error_details = error_details
self.caller_name = caller_name
self.verify_valid_k8s_object()
def __eq__(self, other):
return self.obj_intersect_equal(self.body, other.body)
def obj_intersect_equal(self, obj1, obj2):
# obj1 == d_item
# obj2 == c_item
if obj1.__class__ != obj2.__class__:
return False
if isinstance(obj1, dict):
for obj1_k, obj1_v in obj1.items():
obj2_v = obj2.get(obj1_k, None)
if obj2_v is None:
if obj1_v not in [None, '']:
return False
if self.ignorable_field(obj1_k):
pass
elif self.ignorable_key_value_pair(obj1_k, obj1_v):
pass
elif obj1_k in ['data', 'labels', 'matchLabels']:
diff = [k for k in obj2_v
if k not in obj1_v
and k not in IGNORABLE_DATA_FIELDS]
if diff or not self.obj_intersect_equal(obj1_v, obj2_v):
return False
elif obj1_k == 'cpu':
equal = self.cpu_equal(obj1_v, obj2_v)
if not equal:
return False
elif obj1_k == 'apiVersion':
valid = self.api_version_mutation(obj1_v, obj2_v)
if not valid:
return False
elif obj1_k == 'imagePullSecrets':
# remove default pull secrets added by k8s
obj2_v_clean = [s for s in obj2_v
if '-dockercfg-' not in s['name']]
if not self.obj_intersect_equal(obj1_v, obj2_v_clean):
return False
elif not self.obj_intersect_equal(obj1_v, obj2_v):
return False
elif isinstance(obj1, list):
if len(obj1) != len(obj2):
return False
for index, item in enumerate(obj1):
if not self.obj_intersect_equal(item, obj2[index]):
return False
elif obj1 != obj2:
return False
return True
@staticmethod
def ignorable_field(val):
ignorable_fields = [
'kubectl.kubernetes.io/last-applied-configuration',
'creationTimestamp',
'resourceVersion',
'generation',
'selfLink',
'uid',
'status',
'fieldRef'
]
if val in ignorable_fields:
return True
return False
@staticmethod
def ignorable_key_value_pair(key, val):
ignorable_key_value_pair = {
'annotations': None,
'divisor': '0'
}
if key in ignorable_key_value_pair and \
ignorable_key_value_pair[key] == val:
return True
return False
@staticmethod
def cpu_equal(val1, val2):
# normalize both to string
try:
val1 = f"{int(float(val1) * 1000)}m"
except Exception:
pass
try:
val2 = f"{int(float(val2) * 1000)}m"
except Exception:
pass
return val1 == val2
@staticmethod
def api_version_mutation(val1, val2):
# required temporarily, pending response on
# https://redhat.service-now.com/surl.do?n=INC1224482
if val1 == 'apps/v1' and val2 == 'extensions/v1beta1':
return True
if val1 == 'extensions/v1beta1' and val2 == 'apps/v1':
return True
if val1 == 'networking.k8s.io/v1' and val2 == 'extensions/v1beta1':
return True
return val1 == val2
@property
def name(self):
return self.body['metadata']['name']
@property
def kind(self):
return self.body['kind']
@property
def caller(self):
try:
return self.caller_name or \
self.body['metadata']['annotations']['qontract.caller_name']
except KeyError:
return None
def verify_valid_k8s_object(self):
try:
self.name
self.kind
except (KeyError, TypeError) as e:
msg = "resource invalid data ({}). details: {}".format(
e.__class__.__name__, self.error_details)
raise ConstructResourceError(msg)
if self.kind not in \
['Role', 'RoleBinding', 'ClusterRole', 'ClusterRoleBinding'] \
and (not DNS_SUBDOMAIN_RE.match(self.name) or
not len(self.name) <= DNS_SUBDOMAIN_MAX_LENGTH):
msg = f"The {self.kind} \"{self.name}\" is invalid: " + \
f"metadata.name: Invalid value: \"{self.name}\". " + \
"This field must adhere to DNS-1123 subdomain names spec." + \
f"More info can be found at {DNS_NAMES_URL}."
raise ConstructResourceError(msg)
# All objects that have a spec.template.spec.containers[]
try:
containers = self.body['spec']['template']['spec']['containers']
if not isinstance(containers, list):
msg = f"The {self.kind} \"{self.name}\" is invalid: " + \
"spec.template.spec.containers is not a list"
raise ConstructResourceError(msg)
for c in containers:
cname = c.get('name', None)
if cname is None:
msg = f"The {self.kind} \"{self.name}\" is invalid: " + \
"an item in spec.template.spec.containers was " + \
"found without a required name field"
raise ConstructResourceError(msg)
if (not DNS_LABEL_RE.match(cname) or
not len(cname) <= DNS_LABEL_MAX_LENGTH):
msg = f"The {self.kind} \"{self.name}\" is invalid: " + \
"an container in spec.template.spec.containers " + \
f"was found with an invalid name ({cname}). More " + \
f"info at {DNS_NAMES_URL}."
raise ConstructResourceError(msg)
except KeyError:
pass
def has_qontract_annotations(self):
try:
annotations = self.body['metadata']['annotations']
assert annotations['qontract.integration'] == self.integration
integration_version = annotations['qontract.integration_version']
assert semver.VersionInfo.parse(integration_version).major == \
semver.VersionInfo.parse(self.integration_version).major
assert annotations['qontract.sha256sum'] is not None
except KeyError:
return False
except AssertionError:
return False
except ValueError:
# raised by semver.VersionInfo.parse
return False
return True
def has_owner_reference(self):
return bool(self.body['metadata'].get('ownerReferences', []))
def has_valid_sha256sum(self):
try:
current_sha256sum = \
self.body['metadata']['annotations']['qontract.sha256sum']
return current_sha256sum == self.sha256sum()
except KeyError:
return False
def annotate(self):
"""
Creates a OpenshiftResource with the qontract annotations, and removes
unneeded Openshift fields.
Returns:
openshift_resource: new OpenshiftResource object with
annotations.
"""
# calculate sha256sum of canonical body
canonical_body = self.canonicalize(self.body)
sha256sum = self.calculate_sha256sum(self.serialize(canonical_body))
# create new body object
body = copy.deepcopy(self.body)
# create annotations if not present
body['metadata'].setdefault('annotations', {})
if body['metadata']['annotations'] is None:
body['metadata']['annotations'] = {}
annotations = body['metadata']['annotations']
# add qontract annotations
annotations['qontract.integration'] = self.integration
annotations['qontract.integration_version'] = \
self.integration_version
annotations['qontract.sha256sum'] = sha256sum
now = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
annotations['qontract.update'] = now
if self.caller_name:
annotations['qontract.caller_name'] = self.caller_name
return OpenshiftResource(body, self.integration,
self.integration_version)
def sha256sum(self):
body = self.annotate().body
annotations = body['metadata']['annotations']
return annotations['qontract.sha256sum']
def toJSON(self):
return self.serialize(self.body)
@staticmethod
def canonicalize(body):
body = copy.deepcopy(body)
# create annotations if not present
body['metadata'].setdefault('annotations', {})
if body['metadata']['annotations'] is None:
body['metadata']['annotations'] = {}
annotations = body['metadata']['annotations']
# remove openshift specific params
body['metadata'].pop('creationTimestamp', None)
body['metadata'].pop('resourceVersion', None)
body['metadata'].pop('generation', None)
body['metadata'].pop('selfLink', None)
body['metadata'].pop('uid', None)
body['metadata'].pop('namespace', None)
body['metadata'].pop('managedFields', None)
annotations.pop('kubectl.kubernetes.io/last-applied-configuration',
None)
# remove status
body.pop('status', None)
# Default fields for specific resource types
# ConfigMaps and Secrets are by default Opaque
if body['kind'] in ('ConfigMap', 'Secret') and \
body.get('type') == 'Opaque':
body.pop('type')
if body['kind'] == 'Deployment':
annotations.pop('deployment.kubernetes.io/revision', None)
if body['kind'] == 'Route':
if body['spec'].get('wildcardPolicy') == 'None':
body['spec'].pop('wildcardPolicy')
# remove tls-acme specific params from Route
if 'kubernetes.io/tls-acme' in annotations:
annotations.pop(
'kubernetes.io/tls-acme-awaiting-authorization-owner',
None)
annotations.pop(
'kubernetes.io/tls-acme-awaiting-authorization-at-url',
None)
if 'tls' in body['spec']:
tls = body['spec']['tls']
tls.pop('key', None)
tls.pop('certificate', None)
subdomain = body['spec'].get('subdomain', None)
if subdomain == '':
body['spec'].pop('subdomain', None)
if body['kind'] == 'ServiceAccount':
if 'imagePullSecrets' in body:
# remove default pull secrets added by k8s
imagePullSecrets = \
[s for s in body.pop('imagePullSecrets')
if '-dockercfg-' not in s['name']]
if imagePullSecrets:
body['imagePullSecrets'] = imagePullSecrets
if 'secrets' in body:
body.pop('secrets')
if body['kind'] == 'Role':
for rule in body['rules']:
if 'resources' in rule:
rule['resources'].sort()
if 'verbs' in rule:
rule['verbs'].sort()
if 'attributeRestrictions' in rule and \
not rule['attributeRestrictions']:
rule.pop('attributeRestrictions')
# TODO: remove this once we have no 3.11 clusters
if body['apiVersion'] == 'authorization.openshift.io/v1':
body['apiVersion'] = 'rbac.authorization.k8s.io/v1'
if body['kind'] == 'OperatorGroup':
annotations.pop('olm.providedAPIs', None)
if body['kind'] == 'RoleBinding':
if 'groupNames' in body:
body.pop('groupNames')
if 'userNames' in body:
body.pop('userNames')
if 'roleRef' in body:
roleRef = body['roleRef']
if 'namespace' in roleRef:
roleRef.pop('namespace')
if 'apiGroup' in roleRef and \
roleRef['apiGroup'] in body['apiVersion']:
roleRef.pop('apiGroup')
if 'kind' in roleRef:
roleRef.pop('kind')
for subject in body['subjects']:
if 'namespace' in subject:
subject.pop('namespace')
if 'apiGroup' in subject and \
(subject['apiGroup'] == '' or
subject['apiGroup'] in body['apiVersion']):
subject.pop('apiGroup')
# TODO: remove this once we have no 3.11 clusters
if body['apiVersion'] == 'rbac.authorization.k8s.io/v1':
body['apiVersion'] = 'authorization.openshift.io/v1'
if body['kind'] == 'ClusterRoleBinding':
# TODO: remove this once we have no 3.11 clusters
if body['apiVersion'] == 'authorization.openshift.io/v1':
body['apiVersion'] = 'rbac.authorization.k8s.io/v1'
if 'userNames' in body:
body.pop('userNames')
if 'roleRef' in body:
roleRef = body['roleRef']
if 'apiGroup' in roleRef and \
roleRef['apiGroup'] in body['apiVersion']:
roleRef.pop('apiGroup')
if 'kind' in roleRef:
roleRef.pop('kind')
if 'groupNames' in body:
body.pop('groupNames')
if body['kind'] == 'Service':
spec = body['spec']
if spec.get('sessionAffinity') == 'None':
spec.pop('sessionAffinity')
if spec.get('type') == 'ClusterIP':
spec.pop('clusterIP', None)
# remove qontract specific params
annotations.pop('qontract.integration', None)
annotations.pop('qontract.integration_version', None)
annotations.pop('qontract.sha256sum', None)
annotations.pop('qontract.update', None)
annotations.pop('qontract.caller_name', None)
return body
@staticmethod
def serialize(body):
return json.dumps(body, sort_keys=True)
@staticmethod
def calculate_sha256sum(body):
m = hashlib.sha256()
m.update(body.encode('utf-8'))
return m.hexdigest()
class ResourceInventory:
def __init__(self):
self._clusters = {}
self._error_registered = False
self._error_registered_clusters = {}
self._lock = Lock()
def initialize_resource_type(self, cluster, namespace, resource_type):
self._clusters.setdefault(cluster, {})
self._clusters[cluster].setdefault(namespace, {})
self._clusters[cluster][namespace].setdefault(resource_type, {
'current': {},
'desired': {},
'use_admin_token': {}
})
def add_desired(self, cluster, namespace, resource_type, name, value,
privileged=False):
# privileged permissions to apply resources to clusters are managed on
# a per-namespace level in qontract-schema namespace files, but are
# tracked on a per-resource level in ResourceInventory and the
# state-specs that lead up to add_desired calls. while this is a
# mismatch between schema and implementation for now, it will enable
# us to implement per-resource configuration in the future
with self._lock:
desired = \
(self._clusters[cluster][namespace][resource_type]
['desired'])
if name in desired:
raise ResourceKeyExistsError(name)
desired[name] = value
admin_token_usage = \
(self._clusters[cluster][namespace][resource_type]
['use_admin_token'])
admin_token_usage[name] = privileged
def add_current(self, cluster, namespace, resource_type, name, value):
with self._lock:
current = \
(self._clusters[cluster][namespace][resource_type]
['current'])
current[name] = value
def __iter__(self):
for cluster in self._clusters:
for namespace in self._clusters[cluster]:
for resource_type in self._clusters[cluster][namespace]:
data = self._clusters[cluster][namespace][resource_type]
yield (cluster, namespace, resource_type, data)
def register_error(self, cluster=None):
self._error_registered = True
if cluster is not None:
self._error_registered_clusters[cluster] = True
def has_error_registered(self, cluster=None):
if cluster is not None:
return self._error_registered_clusters.get(cluster, False)
return self._error_registered
|
from dqn import DQNTrainer
from utils.grid_search import RandomGridSearch
from joblib import Parallel, delayed
import multiprocessing
import gc
#from guppy import hpy
#from memory_profiler import profile
#@profile
def parallelize(game, params):
print(params)
#game = "/home/eilab/Raj/tw-drl/Games/obj_20_qlen_5_room_10/train/game_" + str(10) + ".ulx"
trainer = DQNTrainer(game, params)
trainer.train()
#del trainer.model
#del trainer
#gc.collect()
"""
while not grid_search.is_done():
params = grid_search.get_config()
#trainer = DQNTrainer(game, params)
#trainer.train()
"""
if __name__ == "__main__":
param_grid = {
'num_episodes': [1000, 5000],
'num_frames': [500, 1000, 5000],
'replay_buffer_type': ['priority', 'standard'],
'replay_buffer_size': [10000, 50000],
#'num_frames': [100000, 500000],
'batch_size': [64],
'lr': [0.01, 0.001],
'gamma': [0.5, 0.2, 0.05],
'rho': [0.25],
'scheduler_type': ['exponential', 'linear'],
'e_decay': [500, 100],
'e_final': [0.01, 0.1, 0.2],
'hidden_dims': [[64, 32], [128, 64], [256, 128]],
'update_frequency': [1, 4, 10]
}
grid_search = RandomGridSearch(param_grid, 0.2, 21)
game = "/home/eilab/Raj/tw-drl/Games/obj_20_qlen_5_room_10/train/game_" + str(10) + ".ulx"
all_params = grid_search.get_configs()#[:4]
#print(len(all_params))
#pool = multiprocessing.Pool(processes=4)
#pool.map(parallelize, all_params)
#pool.close()
#pool.join()
#@profile
#def run():
Parallel(n_jobs=2, prefer='processes')(delayed(parallelize)(game, params) for params in all_params)
#run()
|
import numpy as np
from tqdm import tqdm
import glob
import os
import cv2
from joblib import Parallel, delayed
import multiprocessing
import fonctions
from datetime import datetime
import fonctions_yal
from sklearn import preprocessing
def calculate(type,n, path_faces): #calculer les histogrammes lbp du dossier n qui trouve dans le lien path_faces
derName = str(n) # convertir le nom du dossier en string
name = str(n)
i = 0
nombimg = len(glob.glob1(path_faces + '/' + derName + '/', '*'))#calculer le nombre des visages dans le dossier derName
histo = np.zeros((nombimg, 256), dtype='int') #matrice pour sauvegarder les histogrammes lbp du dossier derName
for ImageName in os.listdir(path_faces + '/' + name):
Image_path = os.path.join(path_faces + '/' + name, ImageName)
img = cv2.imread(Image_path, 0)
img = cv2.resize(img, (128,128 ))
matrice_lbp = calculate_matrice_lbp(img) #calculer matrice lbp de (img )
hi = fonctions.histogramme(type,matrice_lbp) #calculer histogramme lbp
histo[i, :] = np.array(hi).flatten() #Sauvegarder histogramme dans la matrice des histogrammes
print(name,'-------->', i) #afficher le nombre des images déjà traité
i += 1
return histo
def calculate_matrice_lbp(image):
img_lbp = np.zeros((np.shape(image)[0] - 2, np.shape(image)[1] - 2)) #matrice lbp -2 puisque LBP(3*3)
for line in range(np.shape(image)[0] - 2):
for column in range(np.shape(image)[1] - 2):
img_lbp[line, column] = calculateLBP(image, column + 1, line + 1) #calculer le code lbp pour le pixel (line,column)
return img_lbp
def calculateLBP(image, column, line):
neighbours = get_neighbours(image, column, line)
center = np.array(image)[line, column]
values = calculate_biniry_code(center, neighbours) #calculer le code binaire lbp
# convertir le code binaire en décimale
weights = [1, 2, 4, 8, 16, 32, 64, 128]
lbp = 0
for i in range(0, len(values)):
lbp += values[i] * weights[i]
return lbp
def get_neighbours(image, column, line): #obtenir les voisins du pixel (column,line)
bloc = image[line - 1:line - 1 + 3, column - 1:column - 1 + 3]
a = np.array(bloc).flatten()
neighbours = [a[0], a[1], a[2], a[5], a[8], a[7], a[6], a[3]]
return neighbours
def calculate_biniry_code(center, neighbours): #calculer le code binaire
result = []
for neighbour in neighbours:
if neighbour >= center:
result.append(1)
else:
result.append(0)
return result
if __name__ == '__main__':
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Current Time =", current_time) #afficher l'heure de démarrage
train_path = "D://datasets//crossvalidation//data//feret_gray/feret_gray5//train" # lien du dossier (train)
test_path = "D://datasets//crossvalidation//data//feret_gray//feret_gray5//test" # lien du dossier (test)
xls_path = 'orl_train'
exel_line=6 #Numéro de la ligne dans le fichier Excel pour écrire les résultats de la reconnaissance
nbder_train=len(os.listdir(train_path))#nombre des dossiers (personnes) dans le dossier train
num_cores = multiprocessing.cpu_count()#nombre des coeurs cpu
histogrammes= Parallel(n_jobs=num_cores)(delayed(calculate)(1,kl, train_path) for kl in tqdm(range(1,nbder_train+1)))#calculer histogrammes lbp (train)
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Current Time =", current_time)#l'heure de fin
np.save('orl_histo_lbp_train.npy', histogrammes )#souvgarder les histogrammes dans le disque dur
histogrammes_train = np.load('orl_histo_lbp_train.npy',allow_pickle=True)
print(histogrammes_train.shape)
histogrammes_train=fonctions.conv_list_arr(histogrammes_train,fonctions.nomb_img( train_path))#convertir histogrammes en 2 dimensions puisque chaque coeur calculer une matrice dépendante
label_train = fonctions.labels(train_path)#calculer labels train
nbder_test = len(os.listdir(test_path))#nombre des dossiers (personnes) dans le dossier test
histogrammes_test= Parallel(n_jobs=num_cores)(delayed(calculate)(1,kl, test_path) for kl in tqdm(range(1, nbder_test+1)))#calculer histogrammes lbp (test)
np.save('orl_histo_lbp_test.npy', histogrammes_test )#souvgarder les histogrammes dans le disque dur
histogrammes_test = np.load('orl_histo_lbp_test.npy',allow_pickle=True)
print(histogrammes_test.shape)
histogrammes_test=fonctions.conv_list_arr(histogrammes_test,fonctions.nomb_img( test_path)) #convertir histogrammes_test en 2 dimensions puisque chaque coeur calculer une matrice dépendante
'''mix=np.concatenate((histogrammes_train,histogrammes_test),axis=0)
prepare=preprocessing.MinMaxScaler()
x=prepare.fit_transform(mix.astype(float))
histogrammes_train=x[0:histogrammes_train.shape[0],:]
histogrammes_test=x[histogrammes_train.shape[0]:,:]'''
label_test = fonctions.labels(test_path) #calculer labels test pour vérifer la reconnaissance
nom_file_excel = 'knn_' + xls_path[:-6] + '.xlsx' #nom du fichier Excel qui est utilisé pour écrire les résultats de la reconnaissance
Parallel(n_jobs=num_cores)(delayed(fonctions.Reconnaisance)(2,histogrammes_train ,histogrammes_test,k_nn,label_train,label_test,exel_line,nom_file_excel) for k_nn in range(1, 9, 2))#convertir histogrammes en 2 dimensions puisque chaque coeur calculer une matrice dépendante
'''os.system( 'python LTP.py')
os.system('python ltp_acp.py')
os.system('python ALTP.py')
os.system('python Altp_acp.py')
os.system('python UniformLBP.py')
os.system('python CS_LTP.py')
for k_nn in range(1, 9, 2):
fonctions.Reconnaisance(3,histogrammes_train ,histogrammes_test,k_nn,label_train,label_test,exel_line,nom_file_excel) #fonction de la reconnaissance k-nn 1,3,5,7'''
|
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as ss
from nems_lbhb.baphy_experiment import BAPHYExperiment
parmfile = '/auto/data/daq/Cordyceps/training2020/Cordyceps_2020_05_25_BVT_1.m'
options = {'pupil': True, 'rasterfs': 100}
manager = BAPHYExperiment(parmfile=parmfile)
rec = manager.get_recording(**options)
pupil = rec['pupil']._data.squeeze()
# load facemap analysis
fmap_fn = '/auto/data/daq/Cordyceps/training2020/Cordyceps_2020_05_25_facemap.npy'
fmap_results = np.load(fmap_fn, allow_pickle=True).item()
fmap_pupil = fmap_results['pupil'][0]['area']
# resample nems pupil to match length
pupil = ss.resample(pupil, fmap_pupil.shape[0])
# plot results
f = plt.figure(figsize=(12, 4))
p1 = plt.subplot2grid((2, 3), (0, 0), colspan=2)
p2 = plt.subplot2grid((2, 3), (1, 0), colspan=2)
scat = plt.subplot2grid((2, 3), (0, 2), rowspan=2)
p1.plot(pupil)
p1.set_title('lbhb results')
p1.set_ylabel('Pupil size')
p2.plot(fmap_pupil)
p2.set_title('facemap results')
p2.set_ylabel('Pupil size')
scat.scatter(fmap_pupil, pupil, s=10, color='k', alpha=0.2)
scat.set_xlabel('facemap')
scat.set_ylabel('lbhb')
f.tight_layout()
plt.show()
|
from datadog import initialize, api
options = {
'api_key': '9775a026f1ca7d1c6c5af9d94d9595a4',
'app_key': '87ce4a24b5553d2e482ea8a8500e71b8ad4554ff'
}
initialize(**options)
newcomment = api.Comment.create(message='Should we use COBOL or Fortran?')
api.Comment.delete(newcomment['comment']['id']) |
from bs4 import BeautifulSoup
import datetime
import helpers
import hockey_scraper as hs
import sys
sys.path.append("..")
from machine_info import *
def fix_name(player):
"""
Get rid of (A) or (C) when a player has it attached to their name
Also fix to "correct" name -> The full list is in helpers.py
:param player: list of player info -> [number, position, name]
:return: fixed list
"""
if player.find('(A)') != -1:
player = player[:player.find('(A)')].strip()
elif player.find('(C)') != -1:
player = player[:player.find('(C)')].strip()
return helpers.fix_name(player)
def fix_players_names(players):
"""
Fix players names ... not done by hockey_scraper
"""
for team in ['Home', 'Away']:
for index in range(len(players[team])):
players[team][index][2] = helpers.fix_name(players[team][index][2])
return players
def transform_data(players, goalies):
"""
Combine the players and goalies list. Also move stuff into the way I want it
"""
combined_players_list = {"Home": {"F": [], "D": [], "G": dict()},
"Away": {"F": [], "D": [], "G": dict()}
}
for venue in ['Home', 'Away']:
# First deal with players
for player in players[venue]:
if not player[3]:
if player[1] in ["R", "C", "L", "RW", "LW", "F"]:
combined_players_list[venue]["F"].append({"player": player[2], 'number': player[0]})
if player[1] in ["D", "LD", "RD", "DR", "DL"]:
combined_players_list[venue]["D"].append({"player": player[2], 'number': player[0]})
# Now I add goalies
for goalie_type in goalies[venue].keys():
combined_players_list[venue]["G"][goalie_type] = goalies[venue][goalie_type]
return combined_players_list
def get_soup(roster):
"""
Get the "souped" up doc
NOTE: Scrapping combines this and getting the players and head coaches...not the cleanest design decision
:return: Soup and players
"""
soup = BeautifulSoup(roster, "lxml")
players = hs.nhl.playing_roster.get_players(soup)
if len(players) == 0:
soup = BeautifulSoup(roster, "html.parser")
players = hs.nhl.playing_roster.get_players(soup)
if len(players) == 0:
soup = BeautifulSoup(roster, "html5lib")
players = hs.nhl.playing_roster.get_players(soup)
return soup, players
def get_teams(soup):
"""
Get the home and away teams from the file
# Extra Space at end for away team
Away = <td align="center" width="50%" class="teamHeading + border ">NEW JERSEY DEVILS</td> y
Home = <td align="center" width="50%" class="teamHeading + border">PHILADELPHIA FLYERS</td>
"""
away_team = soup.find_all("td", {"class": "teamHeading + border "})[0].get_text()
home_team = soup.find_all("td", {"class": "teamHeading + border"})[0].get_text()
return {"Away": helpers.TEAMS[away_team], "Home": helpers.TEAMS[home_team]}
def get_goalies(soup, players):
"""
Get the starting and backup goalie for each team for a given game
"""
goalies = {"Home": dict(), "Away": dict()}
# Should just be 2
player_tables = soup.find_all("td", {"valign": "top", "width": "50%", "class": "border"})
# Get the bolded players for each team (with fixed names)
bold_players = dict()
for table, team in zip(player_tables, ["Away", "Home"]):
bolds = table.find_all("td", {"align": "left", "width": "70%", "class": "bold"})
bold_players[team] = [fix_name(player.get_text()) for player in bolds]
# Go through each list of players and pluck out both goalies (and differentiate between them)
for team in ['Home', 'Away']:
for player in players[team]:
# First check if a Goalie - then place as a Stater or a Backup
if player[1] == "G":
if player[2] in bold_players[team]:
goalies[team]["Starter"] = player[2]
else:
goalies[team]["Backup"] = player[2]
return goalies
def get_roster(game_id):
"""
Is given a collections of game_id's and returns the corresponding rosters for each
If it's not there return None
"""
# Make sure to rescrape
hs.shared.re_scrape = True
file = hs.nhl.playing_roster.get_roster(game_id)
# Checks if it's there -> Return None
if not file:
return None
soup, players = get_soup(file)
teams = get_teams(soup)
players = fix_players_names(players)
goalies = get_goalies(soup, players)
combined_list = transform_data(players, goalies)
# Make sure have two goalies for each team
if len(combined_list['Home']['G'].keys()) != 2 or len(combined_list['Away']['G'].keys()) != 2:
print("For game", str(game_id), "the number of goalies is wrong.")
# Make sure have 20 players for each team
if (len(combined_list['Home']['F']) + len(combined_list['Home']['D'])) != 18 or \
(len(combined_list['Away']['F']) + len(combined_list['Away']['D'])) != 18:
print("For game", str(game_id), "the number of players is wrong.")
return {'players': combined_list, 'teams': teams}
def get_yesterdays_rosters(date):
"""
Get Rosters for the games that took place yesterday. Used for checking B2B's
:param date: Date of games - we go one back
:return: rosters dict
"""
# Get Yesterdays date
yesterday = datetime.datetime.strptime(date, '%Y-%m-%d') - datetime.timedelta(1)
yesterday = '-'.join([str(yesterday.year), str(yesterday.month), str(yesterday.day)])
# Get Schedule for yesterday
hs.shared.docs_dir = scraper_data_dir
schedule = hs.nhl.json_schedule.get_schedule(yesterday, yesterday)
# Get Rosters for each game
games = dict()
for date in schedule['dates']:
for game in date['games']:
if int(str(game['gamePk'])[-5:]) > 20000:
games[str(game['gamePk'])] = get_roster(game['gamePk'])
return games
|
def swap_cases(s):
ans = []
for i in s:
if i.isupper() == True: #checking if the letter is in upper case, if true then it we convert it to lowercase
ans.append(i.lower())
else:
ans.append(i.upper())
s1 = ''.join(ans) # joining the elements of ans list to form a string
print(s1)
swap_cases(input()) |
from __future__ import absolute_import
from rlib import jit
from rlib.min_heap_queue import heappush, heappop, HeapEntry
from som.compiler.bc.bytecode_generator import (
emit1,
emit3,
emit_push_constant,
emit_return_local,
emit_return_non_local,
emit_send,
emit_super_send,
emit_push_global,
emit_push_block,
emit_push_field_with_index,
emit_pop_field_with_index,
emit3_with_dummy,
compute_offset,
)
from som.interpreter.ast.frame import (
get_inner_as_context,
mark_as_no_longer_on_stack,
FRAME_AND_INNER_RCVR_IDX,
create_frame_1,
create_frame_2,
)
from som.interpreter.bc.bytecodes import (
Bytecodes,
bytecode_length,
RUN_TIME_ONLY_BYTECODES,
bytecode_as_str,
NOT_EXPECTED_IN_BLOCK_BYTECODES,
)
from som.interpreter.bc.frame import (
create_frame,
stack_pop_old_arguments_and_push_result,
create_frame_3,
)
from som.interpreter.bc.interpreter import interpret
from som.interpreter.control_flow import ReturnException
from som.vmobjects.abstract_object import AbstractObject
from som.vmobjects.method import AbstractMethod
class BcAbstractMethod(AbstractMethod):
_immutable_fields_ = [
"_bytecodes?[*]",
"_literals[*]",
"_inline_cache",
"_number_of_locals",
"_maximum_number_of_stack_elements",
"_number_of_arguments",
"_arg_inner_access[*]",
"_size_frame",
"_size_inner",
"_lexical_scope",
"_inlined_loops[*]",
]
def __init__(
self,
literals,
num_locals,
max_stack_elements,
num_bytecodes,
signature,
arg_inner_access,
size_frame,
size_inner,
lexical_scope,
inlined_loops,
):
AbstractMethod.__init__(self, signature)
# Set the number of bytecodes in this method
self._bytecodes = ["\x00"] * num_bytecodes
self._inline_cache = [None] * num_bytecodes
self._literals = literals
self._number_of_arguments = signature.get_number_of_signature_arguments()
self._number_of_locals = num_locals
self._maximum_number_of_stack_elements = max_stack_elements + 2
self._arg_inner_access = arg_inner_access
self._size_frame = size_frame
self._size_inner = size_inner
self._lexical_scope = lexical_scope
self._inlined_loops = inlined_loops
def get_number_of_locals(self):
return self._number_of_locals
@jit.elidable_promote("all")
def get_maximum_number_of_stack_elements(self):
# Compute the maximum number of stack locations (including
# extra buffer to support doesNotUnderstand) and set the
# number of indexable fields accordingly
return self._maximum_number_of_stack_elements
def set_holder(self, value):
self._holder = value
# Make sure all nested invokables have the same holder
for obj in self._literals:
assert isinstance(obj, AbstractObject)
if obj.is_invokable():
obj.set_holder(value)
# XXX this means that the JIT doesn't see changes to the constants
@jit.elidable_promote("all")
def get_constant(self, bytecode_index):
# Get the constant associated to a given bytecode index
return self._literals[self.get_bytecode(bytecode_index + 1)]
@jit.elidable_promote("all")
def get_number_of_arguments(self):
return self._number_of_arguments
@jit.elidable_promote("all")
def get_number_of_signature_arguments(self):
return self._number_of_arguments
def get_number_of_bytecodes(self):
# Get the number of bytecodes in this method
return len(self._bytecodes)
@jit.elidable_promote("all")
def get_bytecode(self, index):
# Get the bytecode at the given index
assert 0 <= index < len(self._bytecodes)
return ord(self._bytecodes[index])
def get_bytecodes(self):
"""For testing purposes only"""
return [ord(b) for b in self._bytecodes]
def set_bytecode(self, index, value):
# Set the bytecode at the given index to the given value
assert (
0 <= value <= 255
), "Expected bytecode in the range of [0..255], but was: " + str(value)
self._bytecodes[index] = chr(value)
@jit.elidable
def get_inline_cache(self, bytecode_index):
assert 0 <= bytecode_index < len(self._inline_cache)
return self._inline_cache[bytecode_index]
def set_inline_cache(self, bytecode_index, dispatch_node):
self._inline_cache[bytecode_index] = dispatch_node
def drop_old_inline_cache_entries(self, bytecode_index):
# Keep in sync with _AbstractGenericMessageNode._get_cache_size_and_drop_old_entries
prev = None
cache = self._inline_cache[bytecode_index]
while cache is not None:
if not cache.expected_layout.is_latest:
# drop old layout from cache
if prev is None:
self._inline_cache[bytecode_index] = cache.next_entry
else:
prev.next_entry = cache.next_entry
else:
prev = cache
cache = cache.next_entry
def patch_variable_access(self, bytecode_index):
bc = self.get_bytecode(bytecode_index)
idx = self.get_bytecode(bytecode_index + 1)
ctx_level = self.get_bytecode(bytecode_index + 2)
if bc == Bytecodes.push_argument:
var = self._lexical_scope.get_argument(idx, ctx_level)
self.set_bytecode(bytecode_index, var.get_push_bytecode(ctx_level))
elif bc == Bytecodes.pop_argument:
var = self._lexical_scope.get_argument(idx, ctx_level)
self.set_bytecode(bytecode_index, var.get_pop_bytecode(ctx_level))
elif bc == Bytecodes.push_local:
var = self._lexical_scope.get_local(idx, ctx_level)
self.set_bytecode(bytecode_index, var.get_push_bytecode(ctx_level))
elif bc == Bytecodes.pop_local:
var = self._lexical_scope.get_local(idx, ctx_level)
self.set_bytecode(bytecode_index, var.get_pop_bytecode(ctx_level))
else:
raise Exception("Unsupported bytecode?")
assert (
FRAME_AND_INNER_RCVR_IDX <= var.access_idx <= 255
), "Expected variable access index to be in valid range, but was " + str(
var.access_idx
)
self.set_bytecode(bytecode_index + 1, var.access_idx)
def _interp_with_nlr(method, new_frame, max_stack_size):
inner = get_inner_as_context(new_frame)
try:
result = interpret(method, new_frame, max_stack_size)
mark_as_no_longer_on_stack(inner)
return result
except ReturnException as e:
mark_as_no_longer_on_stack(inner)
if e.has_reached_target(inner):
return e.get_result()
raise e
class BcMethod(BcAbstractMethod):
def invoke_1(self, rcvr):
new_frame = create_frame_1(rcvr, self._size_frame, self._size_inner)
return interpret(self, new_frame, self._maximum_number_of_stack_elements)
def invoke_2(self, rcvr, arg1):
new_frame = create_frame_2(
rcvr,
arg1,
self._arg_inner_access[0],
self._size_frame,
self._size_inner,
)
return interpret(self, new_frame, self._maximum_number_of_stack_elements)
def invoke_3(self, rcvr, arg1, arg2):
new_frame = create_frame_3(
self._arg_inner_access,
self._size_frame,
self._size_inner,
rcvr,
arg1,
arg2,
)
return interpret(self, new_frame, self._maximum_number_of_stack_elements)
def invoke_n(self, stack, stack_ptr):
new_frame = create_frame(
self._arg_inner_access,
self._size_frame,
self._size_inner,
stack,
stack_ptr,
self._number_of_arguments,
)
result = interpret(self, new_frame, self._maximum_number_of_stack_elements)
return stack_pop_old_arguments_and_push_result(
stack, stack_ptr, self._number_of_arguments, result
)
def inline(self, mgenc):
mgenc.merge_into_scope(self._lexical_scope)
self._inline_into(mgenc)
def _create_back_jump_heap(self):
heap = []
if self._inlined_loops:
for loop in self._inlined_loops:
heappush(heap, loop)
return heap
@staticmethod
def _prepare_back_jump_to_current_address(
back_jumps, back_jumps_to_patch, i, mgenc
):
while back_jumps and back_jumps[0].address <= i:
jump = heappop(back_jumps)
assert (
jump.address == i
), "we use the less or equal, but actually expect it to be strictly equal"
heappush(
back_jumps_to_patch,
_BackJumpPatch(
jump.backward_jump_idx, mgenc.offset_of_next_instruction()
),
)
@staticmethod
def _patch_jump_to_current_address(i, jumps, mgenc):
while jumps and jumps[0].address <= i:
jump = heappop(jumps)
assert (
jump.address == i
), "we use the less or equal, but actually expect it to be strictly equal"
mgenc.patch_jump_offset_to_point_to_next_instruction(jump.idx, None)
def _inline_into(self, mgenc):
jumps = [] # a sorted list/priority queue. sorted by original_target index
back_jumps = self._create_back_jump_heap()
back_jumps_to_patch = []
i = 0
while i < len(self._bytecodes):
self._prepare_back_jump_to_current_address(
back_jumps, back_jumps_to_patch, i, mgenc
)
self._patch_jump_to_current_address(i, jumps, mgenc)
bytecode = self.get_bytecode(i)
bc_length = bytecode_length(bytecode)
if bytecode == Bytecodes.halt:
emit1(mgenc, bytecode, 0)
elif bytecode == Bytecodes.dup:
emit1(mgenc, bytecode, 1)
elif (
bytecode == Bytecodes.push_field
or bytecode == Bytecodes.pop_field
or bytecode == Bytecodes.push_argument
or bytecode == Bytecodes.pop_argument
):
idx = self.get_bytecode(i + 1)
ctx_level = self.get_bytecode(i + 2)
assert ctx_level > 0
if bytecode == Bytecodes.push_field:
emit_push_field_with_index(mgenc, idx, ctx_level - 1)
elif bytecode == Bytecodes.pop_field:
emit_pop_field_with_index(mgenc, idx, ctx_level - 1)
else:
emit3(
mgenc,
bytecode,
idx,
ctx_level - 1,
1 if Bytecodes.push_argument else -1,
)
elif (
bytecode == Bytecodes.inc_field or bytecode == Bytecodes.inc_field_push
):
idx = self.get_bytecode(i + 1)
ctx_level = self.get_bytecode(i + 2)
assert ctx_level > 0
emit3(mgenc, bytecode, idx, ctx_level - 1, 1)
elif bytecode == Bytecodes.push_local or bytecode == Bytecodes.pop_local:
idx = self.get_bytecode(i + 1)
ctx_level = self.get_bytecode(i + 2)
if ctx_level == 0:
# these have been inlined into the outer context already
# so, we need to look up the right one
var = self._lexical_scope.get_local(idx, 0)
idx = mgenc.get_inlined_local_idx(var, 0)
else:
ctx_level -= 1
if bytecode == Bytecodes.push_local:
emit3(mgenc, bytecode, idx, ctx_level, 1)
else:
emit3(mgenc, bytecode, idx, ctx_level, -1)
elif bytecode == Bytecodes.push_block:
literal_idx = self.get_bytecode(i + 1)
block_method = self._literals[literal_idx]
block_method.adapt_after_outer_inlined(1, mgenc)
emit_push_block(mgenc, block_method, True)
elif bytecode == Bytecodes.push_block_no_ctx:
literal_idx = self.get_bytecode(i + 1)
block_method = self._literals[literal_idx]
emit_push_block(mgenc, block_method, False)
elif bytecode == Bytecodes.push_constant:
literal_idx = self.get_bytecode(i + 1)
literal = self._literals[literal_idx]
emit_push_constant(mgenc, literal)
elif (
bytecode == Bytecodes.push_constant_0
or bytecode == Bytecodes.push_constant_1
or bytecode == Bytecodes.push_constant_2
):
literal_idx = bytecode - Bytecodes.push_constant_0
literal = self._literals[literal_idx]
emit_push_constant(mgenc, literal)
elif (
bytecode == Bytecodes.push_0
or bytecode == Bytecodes.push_1
or bytecode == Bytecodes.push_nil
):
emit1(mgenc, bytecode, 1)
elif bytecode == Bytecodes.pop:
emit1(mgenc, bytecode, -1)
elif bytecode == Bytecodes.inc or bytecode == Bytecodes.dec:
emit1(mgenc, bytecode, 0)
elif bytecode == Bytecodes.push_global:
literal_idx = self.get_bytecode(i + 1)
sym = self._literals[literal_idx]
emit_push_global(mgenc, sym)
elif (
bytecode == Bytecodes.send_1
or bytecode == Bytecodes.send_2
or bytecode == Bytecodes.send_3
or bytecode == Bytecodes.send_n
):
literal_idx = self.get_bytecode(i + 1)
sym = self._literals[literal_idx]
emit_send(mgenc, sym)
elif bytecode == Bytecodes.super_send:
literal_idx = self.get_bytecode(i + 1)
sym = self._literals[literal_idx]
emit_super_send(mgenc, sym)
elif bytecode == Bytecodes.return_local:
# NO OP, doesn't need to be translated
pass
elif bytecode == Bytecodes.return_non_local:
new_ctx_level = self.get_bytecode(i + 1) - 1
if new_ctx_level == 0:
emit_return_local(mgenc)
else:
assert new_ctx_level == mgenc.get_max_context_level()
emit_return_non_local(mgenc)
elif (
bytecode == Bytecodes.return_field_0
or bytecode == Bytecodes.return_field_1
or bytecode == Bytecodes.return_field_2
):
emit1(mgenc, bytecode, 0)
elif (
bytecode == Bytecodes.jump
or bytecode == Bytecodes.jump_on_true_top_nil
or bytecode == Bytecodes.jump_on_false_top_nil
or bytecode == Bytecodes.jump2
or bytecode == Bytecodes.jump2_on_true_top_nil
or bytecode == Bytecodes.jump2_on_false_top_nil
):
# emit the jump, but instead of the offset, emit a dummy
idx = emit3_with_dummy(mgenc, bytecode, 0)
offset = compute_offset(
self.get_bytecode(i + 1), self.get_bytecode(i + 2)
)
jump = _Jump(i + offset, bytecode, idx)
heappush(jumps, jump)
elif (
bytecode == Bytecodes.jump_on_true_pop
or bytecode == Bytecodes.jump_on_false_pop
or bytecode == Bytecodes.jump2_on_true_pop
or bytecode == Bytecodes.jump2_on_false_pop
):
# emit the jump, but instead of the offset, emit a dummy
idx = emit3_with_dummy(mgenc, bytecode, -1)
offset = compute_offset(
self.get_bytecode(i + 1), self.get_bytecode(i + 2)
)
jump = _Jump(i + offset, bytecode, idx)
heappush(jumps, jump)
elif (
bytecode == Bytecodes.jump_backward
or bytecode == Bytecodes.jump2_backward
):
jump = heappop(back_jumps_to_patch)
assert (
jump.address == i
), "the jump should match with the jump instructions"
mgenc.emit_backwards_jump_offset_to_target(jump.loop_begin_idx, None)
elif bytecode in RUN_TIME_ONLY_BYTECODES:
raise Exception(
"Found an unexpected bytecode. i: "
+ str(i)
+ " bytecode: "
+ bytecode_as_str(bytecode)
)
elif bytecode in NOT_EXPECTED_IN_BLOCK_BYTECODES:
raise Exception(
"Found "
+ bytecode_as_str(bytecode)
+ " bytecode, but it's not expected in a block method"
)
else:
raise Exception(
"Found "
+ bytecode_as_str(bytecode)
+ " bytecode, but inlining does not handle it yet."
)
i += bc_length
assert not jumps
def adapt_after_outer_inlined(self, removed_ctx_level, mgenc_with_inlined):
i = 0
while i < len(self._bytecodes):
bytecode = self.get_bytecode(i)
bc_length = bytecode_length(bytecode)
if (
bytecode == Bytecodes.halt
or bytecode == Bytecodes.dup
or bytecode == Bytecodes.push_block_no_ctx
or bytecode == Bytecodes.push_constant
or bytecode == Bytecodes.push_constant_0
or bytecode == Bytecodes.push_constant_1
or bytecode == Bytecodes.push_constant_2
or bytecode == Bytecodes.push_0
or bytecode == Bytecodes.push_1
or bytecode == Bytecodes.push_nil
or bytecode == Bytecodes.push_global
or bytecode == Bytecodes.pop # push_global doesn't encode context
or bytecode == Bytecodes.send_1
or bytecode == Bytecodes.send_2
or bytecode == Bytecodes.send_3
or bytecode == Bytecodes.send_n
or bytecode == Bytecodes.super_send
or bytecode == Bytecodes.return_local
or bytecode == Bytecodes.return_field_0
or bytecode == Bytecodes.return_field_1
or bytecode == Bytecodes.return_field_2
or bytecode == Bytecodes.inc
or bytecode == Bytecodes.dec
or bytecode == Bytecodes.jump
or bytecode == Bytecodes.jump_on_true_top_nil
or bytecode == Bytecodes.jump_on_true_pop
or bytecode == Bytecodes.jump_on_false_top_nil
or bytecode == Bytecodes.jump_on_false_pop
or bytecode == Bytecodes.jump_backward
or bytecode == Bytecodes.jump2
or bytecode == Bytecodes.jump2_on_true_top_nil
or bytecode == Bytecodes.jump2_on_true_pop
or bytecode == Bytecodes.jump2_on_false_top_nil
or bytecode == Bytecodes.jump2_on_false_pop
or bytecode == Bytecodes.jump2_backward
):
# don't use context
pass
elif (
bytecode == Bytecodes.push_field
or bytecode == Bytecodes.pop_field
or bytecode == Bytecodes.push_argument
or bytecode == Bytecodes.pop_argument
or bytecode == Bytecodes.inc_field_push
or bytecode == Bytecodes.inc_field
):
ctx_level = self.get_bytecode(i + 2)
if ctx_level > removed_ctx_level:
self.set_bytecode(i + 2, ctx_level - 1)
elif bytecode == Bytecodes.push_block:
literal_idx = self.get_bytecode(i + 1)
block_method = self._literals[literal_idx]
block_method.adapt_after_outer_inlined(
removed_ctx_level + 1, mgenc_with_inlined
)
elif bytecode == Bytecodes.push_local or bytecode == Bytecodes.pop_local:
ctx_level = self.get_bytecode(i + 2)
if ctx_level == removed_ctx_level:
idx = self.get_bytecode(i + 1)
# locals have been inlined into the outer context already
# so, we need to look up the right one and fix up the index
# at this point, the lexical scope has not been changed
# so, we should still be able to find the right one
old_var = self._lexical_scope.get_local(idx, ctx_level)
new_idx = mgenc_with_inlined.get_inlined_local_idx(
old_var, ctx_level
)
self.set_bytecode(i + 1, new_idx)
elif ctx_level > removed_ctx_level:
self.set_bytecode(i + 2, ctx_level - 1)
elif bytecode == Bytecodes.return_non_local:
ctx_level = self.get_bytecode(i + 1)
self.set_bytecode(i + 1, ctx_level - 1)
elif bytecode in RUN_TIME_ONLY_BYTECODES:
raise Exception(
"Found an unexpected bytecode. i: "
+ str(i)
+ " bytecode: "
+ bytecode_as_str(bytecode)
)
elif bytecode in NOT_EXPECTED_IN_BLOCK_BYTECODES:
raise Exception(
"Found "
+ bytecode_as_str(bytecode)
+ " bytecode, but it's not expected in a block method"
)
else:
raise Exception(
"Found "
+ bytecode_as_str(bytecode)
+ " bytecode, but adapt_after_outer_inlined does not handle it yet."
)
i += bc_length
if removed_ctx_level == 1:
self._lexical_scope.drop_inlined_scope()
class _Jump(HeapEntry):
def __init__(self, jump_target, bytecode, idx):
HeapEntry.__init__(self, jump_target)
self.bytecode = bytecode
self.idx = idx
class BackJump(HeapEntry):
def __init__(self, loop_begin_idx, backward_jump_idx):
HeapEntry.__init__(self, loop_begin_idx)
self.backward_jump_idx = backward_jump_idx
class _BackJumpPatch(HeapEntry):
def __init__(self, backward_jump_idx, loop_begin_idx):
HeapEntry.__init__(self, backward_jump_idx)
self.loop_begin_idx = loop_begin_idx
class BcMethodNLR(BcMethod):
def invoke_1(self, rcvr):
new_frame = create_frame_1(rcvr, self._size_frame, self._size_inner)
return _interp_with_nlr(self, new_frame, self._maximum_number_of_stack_elements)
def invoke_2(self, rcvr, arg1):
new_frame = create_frame_2(
rcvr,
arg1,
self._arg_inner_access[0],
self._size_frame,
self._size_inner,
)
return _interp_with_nlr(self, new_frame, self._maximum_number_of_stack_elements)
def invoke_3(self, rcvr, arg1, arg2):
new_frame = create_frame_3(
self._arg_inner_access,
self._size_frame,
self._size_inner,
rcvr,
arg1,
arg2,
)
return _interp_with_nlr(self, new_frame, self._maximum_number_of_stack_elements)
def invoke_n(self, stack, stack_ptr):
new_frame = create_frame(
self._arg_inner_access,
self._size_frame,
self._size_inner,
stack,
stack_ptr,
self._number_of_arguments,
)
inner = get_inner_as_context(new_frame)
try:
result = interpret(self, new_frame, self._maximum_number_of_stack_elements)
stack_ptr = stack_pop_old_arguments_and_push_result(
stack, stack_ptr, self._number_of_arguments, result
)
mark_as_no_longer_on_stack(inner)
return stack_ptr
except ReturnException as e:
mark_as_no_longer_on_stack(inner)
if e.has_reached_target(inner):
return stack_pop_old_arguments_and_push_result(
stack, stack_ptr, self._number_of_arguments, e.get_result()
)
raise e
def inline(self, mgenc):
raise Exception(
"Blocks should never handle non-local returns. "
"So, this should not happen."
)
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.static_menu, name='menu'),
path('dynamic_menu', views.all_dishes, name='dmenu'),
path('<str:dish_id>/', views.detail, name='menu_detail'),
] |
values = input("Please enter the numbers by period : ")
list = values.split('.')
tuple = tuple(list)
print ("List of values are :" , list)
print ("tuple of values are :" , tuple)
color_list = ["Red","Green","White" ,"Black"]
print( "%s %s"%(color_list[0],color_list[-1])) |
#! /usr/bin/env python
'''
Calculate UTR from gff containing ONLY mRNA and CDS
for Apollo gff files: coordinates are ordered
'''
# last update: 7/11/2017
import re
import sys
GFF = sys.argv[1] # single gene gff
with open(GFF) as fin:
for line in fin:
line = line.rstrip()
line = line.split()
lastCDS = None
if "mRNA" in line:
mStart = line[3]
mEnd = line[4]
i = 0
elif "CDS" in line:
lastCDS = line
i += 1
if i == 1:
pStart = line[3]
pStart_rv = line[4]
line[8] = re.sub("ID=(.+?);", "", line[8])
# here lastCDS is the last line containing CDS
# if mRNA start/end = start_codon/stop_codon then may end up with -1
if line[6] == "+":
five_utr = [line[0], line[1], "five_prime_UTR", str(mStart), str(int(pStart) - 1), line[5], line[6], ".", line[8]]
three_utr = [line[0], line[1], "three_prime_UTR", str(int(lastCDS[4]) + 1), str(mEnd), line[5], line[6], ".", line[8]]
elif line[6] == "-":
five_utr = [line[0], line[1], "five_prime_UTR", str(int(pStart_rv) + 1), str(mEnd), line[5], line[6], ".", line[8]]
three_utr = [line[0], line[1], "three_prime_UTR", str(mStart), str(int(lastCDS[3]) - 1), line[5], line[6], ".", line[8]]
print("\t".join(five_utr))
print("\t".join(three_utr))
|
from matplotlib.pyplot import *
from mlxtend.plotting import plot_confusion_matrix
from tensorflow.keras import backend
from sklearn.metrics import confusion_matrix
import scipy as sp
import numpy as np
import pandas as pd
import skimage.transform
import PIL
import scipy.ndimage as spi
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam,Adagrad, Adadelta
from tensorflow.keras.applications.inception_v3 import InceptionV3
import imageio
import model_evaluation_utils as meu
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras import optimizers
from tensorflow.keras.models import load_model
import argparse
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from tensorflow.keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D, BatchNormalization
from tensorflow.keras.models import model_from_json
# Esta función prepara un lote aleatorio del conjunto de datos.
def load_batch(dataset_df, batch_size = 25):
batch_df = dataset_df.loc[np.random.permutation(np.arange(0,
len(dataset_df)))[:batch_size],:]
return batch_df
# Esta función traza imágenes de muestra en un tamaño especificado y en una cuadrícula definida
def plot_batch(img_type, images_df, grid_width, grid_height, im_scale_x, im_scale_y):
f, ax = plt.subplots(grid_width, grid_height)
f.set_size_inches(12, 12)
img_idx = 0
for i in range(0, grid_width):
for j in range(0, grid_height):
ax[i][j].axis('off')
ax[i][j].set_title(images_df.iloc[img_idx]['clase'][:10])
ax[i][j].imshow(skimage.transform.resize(imageio.imread(DATASET_PATH + images_df.iloc[img_idx]['id'] + img_type),
(im_scale_x,im_scale_y)))
img_idx += 1
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0.25)
plt.show()
def datos_flo(tar_six,tar_siy,test_si,rand_sta,test_si2,rand_sta2):
# cargar dataset
train_data = np.array([img_to_array(load_img(img, target_size=(tar_six, tar_siy)))
for img in data_labels['image_path'].values.tolist()]).astype('float32')
# crear datasets de entrenamiento y test
x_train, x_test, y_train, y_test = train_test_split(train_data, target_labels,
test_size=test_si,
stratify=np.array(target_labels),
random_state=rand_sta)
# crear datasets de entrenamiento y validacion
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train,
test_size=test_si2,
stratify=np.array(y_train),
random_state=rand_sta2)
return train_data, x_train, x_test, y_train, y_test, x_val, y_val
def data_gen(BATCH_SIZE, rot_ran, width_s_r, height_s_r, hor_flip, seed):
#Imagen data generation
BATCH_SIZE = 32
# Create train generator.
train_datagen = ImageDataGenerator(rescale=1./255,
rotation_range=rot_ran,
width_shift_range=width_s_r,
height_shift_range=height_s_r,
horizontal_flip = hor_flip)
train_generator = train_datagen.flow(x_train, y_train_ohe, shuffle=False,
batch_size=BATCH_SIZE, seed=seed)
# Create validation generator
val_datagen = ImageDataGenerator(rescale = 1./255)
val_generator = train_datagen.flow(x_val, y_val_ohe, shuffle=False,
batch_size=BATCH_SIZE, seed=1)
return train_datagen, train_generator, val_datagen, val_generator
def tranf_learn(pesos,shapex,shapey,shapez,activat,activat2,loss,learning_rate,moment,BATCH_SIZE,epochs,save_file_path,save_json):
# Get the InceptionV3 model so we can do transfer learning
base_inception = InceptionV3(weights=pesos, include_top=False,
input_shape=(shapex, shapey, shapez))
out = base_inception.output
out = GlobalAveragePooling2D()(out)
out = Dense(1024, activation='relu')(out)
out = Dense(512, activation='relu')(out)
out = Flatten()(out)
out = Dense(512, activation="relu")(out)
#out = BatchNormalization()(out)
#out = Dropout(0.5)(out)
#out = Dense(512, activation="relu")(out)
#out = BatchNormalization()(out)
#out = Dropout(0.5)(out)
#out = Dense(512, activation="relu")(out)
#out = BatchNormalization()(out)
#out = Dropout(0.5)(out)
#out = Dense(512, activation="relu")(out)
# Add top layers on top of freezed (not re-trained) layers of VGG16
total_classes = y_train_ohe.shape[1]
#Este es un problema de clasificación binaria, por lo que utilizamos
# la función de activación sigmoidea en la capa de salida.
predictions = Dense(total_classes, activation=activat2)(out)
model = Model(inputs=base_inception.input, outputs=predictions)
opt1 = optimizers.SGD(lr=learning_rate, momentum=moment, nesterov=True)
opt2 = Adadelta(lr=learning_rate, rho=0.95)
opt3 = Adagrad(lr=0.0001)
#opt2 = Adagrad(lr=learning_rate, momentum=moment)
# Compile
model.compile(loss=loss, optimizer=opt1, metrics=["accuracy"])
# Imprime la arquitectura de inception V3
model.summary()
# Entrenar modelo
batch_size = BATCH_SIZE
train_steps_per_epoch = x_train.shape[0] // batch_size
val_steps_per_epoch = x_val.shape[0] // batch_size
history = model.fit_generator(train_generator,
steps_per_epoch=train_steps_per_epoch,
validation_data=val_generator,
validation_steps=val_steps_per_epoch,
epochs=epochs, verbose=1)
# serialize model to JSON
model_json = model.to_json()
with open(save_json, "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(save_file_path)
print("Saved model to disk")
#model.save(save_file_path)
print(history.history.keys())
return history
def plot_eval(total_epchs,plot_name,space):
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
t = f.suptitle('Inception V3 Performance', fontsize=12)
f.subplots_adjust(top=0.85, wspace=0.3)
epoch_list = list(range(1,total_epchs))
ax1.plot(epoch_list, history.history['accuracy'], label='Train Accuracy')
ax1.plot(epoch_list, history.history['val_accuracy'], label='Validation Accuracy')
ax1.set_xticks(np.arange(0, total_epchs, space))
ax1.set_ylabel('Accuracy Value')
ax1.set_xlabel('Epoch')
ax1.set_title('Accuracy')
l1 = ax1.legend(loc="best")
ax2.plot(epoch_list, history.history['loss'], label='Train Loss')
ax2.plot(epoch_list, history.history['val_loss'], label='Validation Loss')
ax2.set_xticks(np.arange(0, total_epchs, space))
ax2.set_ylabel('Loss Value')
ax2.set_xlabel('Epoch')
ax2.set_title('Loss')
l2 = ax2.legend(loc="best")
plt.savefig(plot_name)
plt.show()
if __name__ == "__main__":
# Argumentos
ap = argparse.ArgumentParser()
ap.add_argument("-e", "--epochs", type=int, required=True,
help="Número de epochs para entrenamiento")
ap.add_argument("-p", "--path", type=str, required=True,
help="Path directorio imagenes ej. '/home/jyosa/all_images'")
ap.add_argument("-l", "--labels", type=str, required=True,
help="Path archivo labels.csv ej. '/home/jyosa/labels.csv'")
ap.add_argument("-ex", "--ext", type=str, required=True,
help="Tipo de imágen. Ejemplo '.jpeg'")
args = vars(ap.parse_args())
np.random.seed(42)
#si no tiene los datos etiquetados en un archivo diferente use get_labels.py
DATASET_PATH = args["path"]
LABEL_PATH = args["labels"]
# cargar el conjunto de datos y visualizar datos de muestra
dataset_df = pd.read_csv(LABEL_PATH)
batch_df = load_batch(dataset_df, batch_size=36)
plot_batch(args["ext"], batch_df, grid_width=6, grid_height=6,
im_scale_x=64, im_scale_y=64)
#mirando cómo se ven las etiquetas del conjunto de datos para tener una idea de todas la posible eqtiquetas.
data_labels = pd.read_csv(LABEL_PATH)
target_labels = data_labels['clase']
print("Etiquetas encontradas: ", len(set(target_labels)))
data_labels.head()
#Lo que hacemos a continuación es agregar la ruta de imagen exacta para cada
# imagen presente en el disco usando el siguiente código. Esto nos ayudará a
# localizar y cargar fácilmente las imágenes durante el entrenamiento del modelo.
train_folder = DATASET_PATH
data_labels['image_path'] = data_labels.apply(lambda row: (train_folder + row["id"] + args["ext"] ),
axis=1)
data_labels.head()
#Preparar conjuntos de datos de entrenamiento, prueba y validación.
#Parámetros
target_size_x = 299
target_size_y = 299
test_size = 0.3
random_state = 42
test_size2 = 0.15
random_state2 = 42
train_data, x_train, x_test, y_train, y_test, x_val, y_val = datos_flo(target_size_x,target_size_y,test_size,random_state,test_size2,random_state2)
print('Tamaño inicial del conjunto de datos:', train_data.shape)
print('Tamaño inicial de conjuntos de datos de prueba y entrenamiento:', x_train.shape, x_test.shape)
print('Tamaño de conjuntos de datos de entrenamiento y validación:', x_train.shape, x_val.shape)
print('Tamaño de conjuntos de datos de entrenamiento, prueba y validación:\n', x_train.shape, x_test.shape, x_val.shape)
#conviertir las etiquetas de clase de texto en etiquetas codificadas one-hot
y_train_ohe = pd.get_dummies(y_train.reset_index(drop=True)).values
y_val_ohe = pd.get_dummies(y_val.reset_index(drop=True)).values
y_test_ohe = pd.get_dummies(y_test.reset_index(drop=True)).values
print(y_train_ohe.shape, y_test_ohe.shape, y_val_ohe.shape)
#Parámetros
batch_size = 32
rotation_range = 30
width_shift_range = 0.2
height_shift_range = 0.2
horizontal_flip = 'True'
seed = 25
train_datagen, train_generator, val_datagen, val_generator = data_gen(batch_size, rotation_range, width_shift_range, height_shift_range, horizontal_flip, seed)
#Transfer Learning with Google’s Inception V3 Model
#Parámetros
weights = 'imagenet'
input_shapex = 299
input_shapey = 299
input_shapez = 3
activation = 'relu'
activation_pred = 'sigmoid'
loss = "binary_crossentropy"
learning_rate = 0.0001
momentum = 0.8
batch_size = 32
epochs = args["epochs"]
model_path_save = 'models/transfer_inceptionV3.h5'
model_path_save_json = 'models/transfer_inceptionV3.json'
history = tranf_learn(weights,input_shapex,input_shapey,input_shapez,activation,activation_pred,loss,learning_rate,momentum,batch_size,epochs,model_path_save,model_path_save_json)
# Evaluación Inception V3
#Parámetros
num_epochs = epochs + 1
Plot_name = 'Permormance_1.png'
space = 50
plot_eval(num_epochs,Plot_name,space)
#Evaluación del modelo
base_model = InceptionV3(weights='imagenet', include_top=False)
#model = load_model(model_path_save)
# load json and create model
json_file = open(model_path_save_json, 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights(model_path_save)
print("Loaded model from disk")
# scaling test features
x_test /= 255.
# getting model predictions
test_predictions = model.predict(x_test)
labels_ohe_names = pd.get_dummies(target_labels, sparse=True)
predictions = pd.DataFrame(test_predictions, columns=labels_ohe_names.columns)
predictions = list(predictions.idxmax(axis=1))
test_labels = list(y_test)
#evaluate model performance
meu.get_metrics(true_labels=test_labels,
predicted_labels=predictions)
meu.display_classification_report(true_labels=test_labels,
predicted_labels=predictions,
classes=list(labels_ohe_names.columns))
# print(meu.display_confusion_matrix_pretty(true_labels=test_labels,
# predicted_labels=predictions,
# classes=list(labels_ohe_names.columns)))
font = {
'family': 'Times New Roman',
'size': 12
}
matplotlib.rc('font', **font)
mat = confusion_matrix(test_labels, predictions)
plot_confusion_matrix(conf_mat=mat, figsize=(4, 4), class_names = list(labels_ohe_names.columns), show_normed=False)
grid_width = 5
grid_height = 5
f, ax = plt.subplots(grid_width, grid_height)
f.set_size_inches(15, 15)
batch_size = 25
dataset = x_test
labels_ohe_names = pd.get_dummies(target_labels, sparse=True)
labels_ohe = np.asarray(labels_ohe_names)
label_dict = dict(enumerate(labels_ohe_names.columns.values))
model_input_shape = (1,)+model.get_input_shape_at(0)[1:]
random_batch_indx = np.random.permutation(np.arange(0,len(dataset)))[:batch_size]
img_idx = 0
for i in range(0, grid_width):
for j in range(0, grid_height):
actual_label = np.array(y_test)[random_batch_indx[img_idx]]
prediction = model.predict(dataset[random_batch_indx[img_idx]].reshape(model_input_shape))[0]
label_idx = np.argmax(prediction)
predicted_label = label_dict.get(label_idx)
conf = round(prediction[label_idx], 2)
ax[i][j].axis('off')
ax[i][j].set_title('Actual: '+actual_label+'\nPred: '+predicted_label + '\nConf: ' +str(conf))
ax[i][j].imshow(dataset[random_batch_indx[img_idx]])
img_idx += 1
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0.5, hspace=0.55)
plt.show() |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration Parameter data """
from datetime import datetime
from six.moves import range # pylint: disable=F0401
from sqlalchemy import (Column, Integer, DateTime, Sequence, ForeignKey,
UniqueConstraint)
from sqlalchemy.orm import relation, backref, deferred
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.ext.mutable import MutableDict
from aquilon.exceptions_ import NotFoundException, ArgumentError, InternalError
from aquilon.aqdb.column_types import JSONEncodedDict, AqStr
from aquilon.aqdb.model import (Base, PersonalityStage, ParamDefinition,
ParamDefHolder)
from aquilon.utils import validate_nlist_key
_TN = 'parameter'
class ParameterPathNotFound(Exception):
"""
Custom exception used by the path walker.
It does not have to carry any extra information around - the error which
will eventually be generated should always contain the original path
requested by the user.
"""
pass
class Parameter(Base):
"""
The dbobj with which this parameter is associaetd with.
"""
__tablename__ = _TN
_instance_label = 'holder_name'
id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)
param_def_holder_id = Column(ForeignKey(ParamDefHolder.id), nullable=False)
value = Column(MutableDict.as_mutable(JSONEncodedDict), nullable=False)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
holder_type = Column(AqStr(16), nullable=False)
param_def_holder = relation(ParamDefHolder, innerjoin=True)
__mapper_args__ = {'polymorphic_on': holder_type}
@property
def holder_name(self): # pragma: no cover
raise InternalError("Abstract base method called")
@property
def holder_object(self): # pragma: no cover
raise InternalError("Abstract base method called")
def path_walk(self, path, vivify=False):
"""
Walk the path given as parameter, and return the list of intermediate
values being walked over. If vivify is True, then non-existend nodes
will be created as needed.
Returned is a list of (path_part, path_value) pairs, so that
path_value[path_part] is always the object referenced by the given path
component. path_value is always a reference into self.value, so
modifying path_value[path_part] modifies self.value.
"""
# Initial conditions: path cannot be empty, and self.value is always a
# dict.
if not path: # pragma: no cover
raise InternalError("Path must not be empty")
current = self.value
route = []
parts = ParamDefinition.split_path(path)
def handle_vivify(idx):
# We need to look ahead at the next path component to figure out if
# the component to be vivified should be a leaf, a dictionary or a
# list
try:
next_part = parts[idx + 1]
except IndexError:
# Leaf
return None
# Not leaf - can be either a dict or list, depending on if the next
# path component is a number or not
try:
next_part = int(next_part)
except ValueError:
return {}
# Ok, it's a list - only the first item in the list can be
# auto-vifified
if next_part != 0:
raise ParameterPathNotFound
return []
# We need look-ahead for auto-vivification, so we loop over the indexes
# rather than the list directly
for idx in range(0, len(parts)):
part = parts[idx]
if isinstance(current, dict):
if part not in current:
if vivify:
current[part] = handle_vivify(idx)
else:
raise ParameterPathNotFound
elif isinstance(current, list):
try:
part = int(part)
if part < 0:
# We could allow index -1 to mean 'append to the end'...
raise ValueError
except ValueError:
raise ArgumentError("Invalid list index '%s'." % part)
if part > len(current):
raise ParameterPathNotFound
elif part == len(current):
if vivify:
# pylint: disable=E1101
current.append(handle_vivify(idx))
else:
raise ParameterPathNotFound
else:
raise ArgumentError("Value %r cannot be indexed." % current)
route.append((part, current))
current = current[part]
return route
def get_path(self, path, compel=True):
if not path:
return self.value
try:
route = self.path_walk(path)
except ParameterPathNotFound:
if compel:
raise NotFoundException("No parameter of path=%s defined." %
path)
else:
return None
index, value = route.pop()
return value[index]
def set_path(self, path, value, update=False):
if not path:
if self.value and not update:
raise ArgumentError("Parameter value already exists.")
self.value = value
self.value.changed() # pylint: disable=E1101
return
try:
route = self.path_walk(path, not update)
except ParameterPathNotFound:
raise NotFoundException("No parameter of path=%s defined." % path)
index, lastvalue = route.pop()
# We don't want to allow overwriting False or "". So we need to spell
# out the emptiness criteria, instead of just evaluating
# lastvalue[index] as a boolean
if not update and not (lastvalue[index] is None or
(isinstance(lastvalue[index], dict) and
len(lastvalue[index]) == 0) or
(isinstance(lastvalue[index], list) and
len(lastvalue[index]) == 0)):
raise ArgumentError("Parameter with path=%s already exists." % path)
# Dictionary keys must be valid for PAN
if isinstance(lastvalue, dict):
validate_nlist_key("a path component", index)
lastvalue[index] = value
# coerce mutation of parameter since sqlalchemy
# cannot recognize parameter change
self.value.changed() # pylint: disable=E1101
def del_path(self, path, compel=True):
if not path:
self.value = {}
self.value.changed() # pylint: disable=E1101
return
try:
route = self.path_walk(path)
except ParameterPathNotFound:
if compel:
raise NotFoundException("No parameter of path=%s defined." % path)
else:
return
while route:
index, lastvalue = route.pop()
del lastvalue[index]
# We want to remove dictionaries which become empty, but not lists
if isinstance(lastvalue, dict) and len(lastvalue) == 0:
continue
break
# coerce mutation of parameter since sqlalchemy
# cannot recognize parameter change
self.value.changed() # pylint: disable=E1101
@staticmethod
def flatten(data, key="", path="", flattened=None):
if flattened is None:
flattened = {}
if isinstance(data, list):
for i, item in enumerate(data):
Parameter.flatten(item, "%d" % i, path + "/" + key,
flattened)
elif isinstance(data, dict):
for new_key, value in data.items():
Parameter.flatten(value, new_key, path + "/" + key,
flattened)
else:
flattened[((path + "/") if path else "") + key] = data
return flattened
def copy(self):
return self.__class__(param_def_holder=self.param_def_holder,
value=self.value.copy())
class PersonalityParameter(Parameter):
""" Association of parameters with Personality """
personality_stage_id = Column(ForeignKey(PersonalityStage.id,
ondelete='CASCADE'),
nullable=True, index=True)
personality_stage = relation(PersonalityStage,
backref=backref('parameters',
cascade='all, delete-orphan',
collection_class=attribute_mapped_collection('param_def_holder')))
__mapper_args__ = {'polymorphic_identity': 'personality'}
__extra_table_args__ = (UniqueConstraint('param_def_holder_id',
'personality_stage_id'),)
@property
def holder_name(self):
return self.personality_stage.qualified_name
@property
def holder_object(self):
return self.personality_stage
|
from sklearn.datasets import make_classification
X, y = make_classification(
n_samples=5000,
n_features=2,
n_informative=2,
n_redundant=0,
n_repeated=0,
n_classes=3,
n_clusters_per_class=1,
weights=[0.01, 0.05, 0.94],
class_sep=0.8,
random_state=0,
)
from imblearn.over_sampling import RandomOverSampler
ros = RandomOverSampler(random_state=0)
X_resampled, y_resampled = ros.fit_resample(X, y)
from collections import Counter
print(sorted(Counter(y_resampled).items()))
|
class SilentGenerator():
def __init__(self, rate):
self._RATE = rate
def get_silent(self, seconds):
"""Возвращает массив нулей длинной соответствующей тишине
в seconds секунд при частоте RATE
"""
return [0 for i in range(int(seconds*self._RATE))]
|
#Time: O(n)
#Space: O(n)
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
#brute force:
#iterate and do the calcuation from any index
#lead to O(n^2)
#method 1, preprocess, and divide by every index value
#Time: O(n)
#method 2, preprocess from both side, product of all
#at every index we pick the left and right from each list and multiply them
n = len(nums)
forward = [1] * n
backward = [1] * n
forward[0] = 1
backward[-1] = 1
for i in range(1,len(nums)):
forward[i] = forward[i-1] * nums[i-1]
backward[~i] = backward[~i+1] * nums[~i+1]
print(forward,backward)
ans = [1] * n
for i in range(len(nums)):
ans[i] = forward[i] * backward[i]
return ans
#My optimization
#Time: O(n)
#Space: O(1)
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
#brute force:
#iterate and do the calcuation from any index
#lead to O(n^2)
#method 1, preprocess, and divide by every index value
#Time: O(n)
#method 2, preprocess from both side, product of all
#at every index we pick the left and right from each list and multiply them
n = len(nums)
forward = 1
backward = 1
ans = [1] * n
for i in range(1,len(nums)):
ans[i] *= forward * nums[i-1]
ans[~i] *= backward * nums[~i+1]
forward = forward * nums[i-1]
backward = backward * nums[~i+1]
print(forward,backward)
return ans
#two pass clear answer
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
#brute force:
#iterate and do the calcuation from any index
#lead to O(n^2)
#method 1, preprocess, and divide by every index value
#Time: O(n)
#method 2, preprocess from both side, product of all
#at every index we pick the left and right from each list and multiply them
n = len(nums)
ans = [1] * n
for i in range(1,len(nums)):
#forward pass
ans[i]*= ans[i-1] * nums[i-1]
right = 1
for i in range(len(nums)-1,-1,-1):
#backward pass
ans[i] *= right
right *= nums[i]
#print(ans)
return ans
#second try
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
n = len(nums)
ans = [1] * n
for i in range(1,len(nums)):
#forward pass
ans[i]*= ans[i-1] * nums[i-1]
right = 1
for i in range(len(nums)-1,-1,-1):
ans[i] *= right
right *= nums[i]
return ans |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 15 18:21:21 2017
@author: Eghosa
"""
import numpy as np
import pandas as pd
import sklearn as skl
from sklearn.datasets import make_spd_matrix as make_cov
import matplotlib.pyplot as plt
import scipy as sp
import os
import plotly.offline as py
import plotly.graph_objs as go
import plotly.figure_factory as ff
import math
import pdb
import networkx as nx
import plotly.plotly as py_o
from plotly.grid_objs import Grid, Column
import time
from functools import partial
import pickle
from functools import reduce
import sqlite3 as sql
load_from_file = False
def read_vectors():
all_vectors = {}
all_actors = set()
for subdir, dirs, files in os.walk(os.getcwd()):
# con = sql.connect("cooffending.db")
for f in files:
if f.endswith(".csv"):
fname = f.split(".")[0]
vec = pd.read_csv(f,header=0)
# vec.to_sql("cooffending",con)
all_actors.union(set(vec.index.values))
# vec[vec==0] = np.nan
all_vectors[fname] = vec
return all_vectors
def plotGraph(G,org,phase,plot=False,center_node=None):
pos = nx.fruchterman_reingold_layout(G)
dmin=1
# ncenter=0
for n in pos:
x,y=pos[n]
d=(x-0.5)**2+(y-0.5)**2
if d<dmin:
# ncenter=n
dmin=d
edge_trace = go.Scatter(
x=[],
y=[],
text = [],
visible = False,
line=go.Line(width=0.5,color='#888'),
hoverinfo='text',
mode='lines+markers',
marker= go.Marker(
showscale=True,
# colorscale options
# 'Greys' | 'Greens' | 'Bluered' | 'Hot' | 'Picnic' | 'Portland' |
# Jet' | 'RdBu' | 'Blackbody' | 'Earth' | 'Electric' | 'YIOrRd' | 'YIGnBu'
colorscale='YIGnBu',
reversescale=True,
color=[],
size=10,
colorbar=dict(
thickness=15,
title='Node Connections',
xanchor='left',
titleside='right'
),
line=dict(width=2)))
def node_info(node,freq=None):
if freq==None:
in_adj = 0
out_adj = G.degree(node)
else:
out_adj = G.get_edge_data(node,freq,default=0)
in_adj = G.get_edge_data(freq,node,default=0)
if isinstance(out_adj,dict):
out_adj = out_adj["weight"]
if isinstance(in_adj,dict):
in_adj = in_adj["weight"]
if node in org:
node_info = org[node] + "("+str(node)+")"+' <br># of outgoing connections: '+str(out_adj) + ' <br># of incoming connections: '+str(in_adj)
elif node <= 82:
node_info = "Criminal # "+str(node) +' <br># of outgoing connections: '+str(out_adj) + ' <br># of incoming connections: '+str(in_adj)
else:
node_info = "Criminal # "+str(node) +' <br># of outgoing connections: '+str(out_adj) + ' <br># of incoming connections: '+str(in_adj)
return node_info
if center_node != None:
neighbors = G.neighbors(center_node)
total_in = 0
total_out = 0
for n in neighbors:
x0, y0 = pos[center_node]
x1, y1 = pos[n]
adj0 = G.neighbors(center_node)
adj1 = G.neighbors(n)
edge_trace['x'] += [x0, x1, None]
edge_trace['y'] += [y0, y1, None]
edge_trace['marker']['color'] += [len(adj0),len(adj1),None]
weightOut = G.get_edge_data(center_node,n,default=0)
weightIn = G.get_edge_data(n,center_node,default=0)
if isinstance(weightOut,dict):
total_out += weightOut["weight"]
if isinstance(weightIn,dict):
total_in += weightIn["weight"]
info0 = None
info1 = node_info(n,center_node)
edge_trace['text'] += [info0, info1, None]
total_info = org[center_node] + "("+str(center_node)+")"+' <br># of outgoing connections: '+str(total_out) + ' <br># of incoming connections: '+str(total_in)
edge_trace['text'][0] = total_info
else:
for edge in G.edges():
x0, y0 = pos[edge[0]]
x1, y1 = pos[edge[1]]
adj0 = G.neighbors(edge[0])
adj1 = G.neighbors(edge[1])
edge_trace['x'] += [x0, x1, None]
edge_trace['y'] += [y0, y1, None]
edge_trace['marker']['color'] += [len(adj0),len(adj1),None]
info0 = node_info(edge[0])
info1 = node_info(edge[1])
edge_trace['text'] += [info0, info1, None]
fig = go.Figure(data=go.Data([edge_trace]),
layout=go.Layout(
title='<br>Network graph of Criminal Activity in Quebec from ' + phase,
titlefont=dict(size=16),
showlegend=False,
hovermode='closest',
margin=dict(b=20,l=5,r=5,t=40),
xaxis=go.XAxis(showgrid=False, zeroline=False, showticklabels=False),
yaxis=go.YAxis(showgrid=False, zeroline=False, showticklabels=False)))
if plot:
py.plot(fig, filename=phase+'.html')
else:
print(edge_trace)
return edge_trace
class Offender(object):
def __init__(self,id_num,DOB,sex):
self.id_num = id_num
self.DOB = DOB
self.sex = sex
self.crimes = {}
self.cooffenders = {}
def __repr__(self):
return str(self.id_num)
# def __eq__(self,other):
# return self.id_num == other.id_num
def add_crime(self,year,date,event,crime):
crime_year = self.crimes.setdefault(year,{})
crime_date = crime_year.setdefault(date,{})
crime_event = crime_date.setdefault(event,{})
crime = crime_event.setdefault("crimes",[])
crime.append(crime)
def add_crimes(self,year,date,event,crimes):
for crime in crimes:
self.add_crime(year,date,event,crime)
def get_crimes(self,year=None):
return self.crimes.get(year,self.crimes)
def add_cooffender(self,year,date,event,offender):
crime_year = self.crimes.setdefault(year,{})
crime_date = crime_year.setdefault(date,{})
crime_event = crime_date.setdefault(event,{})
cooffenders = crime_event.setdefault("cooffenders",[])
cooffenders.append(offender)
def add_cooffenders(self,year,date,event,offenders):
ooff_year = self.cooffenders.setdefault(year,set())
for offender in offenders:
self.add_cooffender(year,date,event,offender)
ooff_year.add(offender)
def get_cooffenders_for_year(self,year=None):
val = self.cooffenders.get(year,[])
if len(val) > 0:
return val
all_co = self.cooffenders.values()
if len(all_co) > 0:
ans = reduce(set.union,all_co,set())
# print(ans)
return list(ans)
return []
@property
def num_crimes(self):
return sum(len(crimes) for crimes in self.crimes.values())
class CrimeEvent(object):
def __init__(self,event,year,date,crime_types,cooffenders,location,municipalites,youths,adults):
self.event = event
self.year = year
self.date = date
self.crime_types = crime_types
self.cooffenders = cooffenders
self.location = location
self.mun = municipalites
self.youths = youths
self.adults = adults
def create_data_dict(data):
crime_dict = {}
offenders = {}
dict_order = ["annee","Date", "SeqE"]
for index,row in data.iterrows():
current_dict = crime_dict
for key in dict_order:
# current_dict = current_dict.setdefault(key,{})
current_dict = current_dict.setdefault(row[key],{})
current_dict["Location"] = row["ED1"]
current_dict["MUN"] = row["MUN"]
offender = offenders.get(row['NoUnique'],Offender(row['NoUnique'],row['Naissance'],row['SEXE']))
crimes = list(filter(lambda x: x !='',map(lambda x: x.strip(),row[['NCD1','NCD2','NCD3','NCD4']])))
offender.add_crimes(row["annee"],row['Date'],row["SeqE"],crimes)
current_dict = current_dict.setdefault("offenders",{})
current_dict[offender] = offender.num_crimes
offenders[row['NoUnique']] = offender
return crime_dict,offenders
if not load_from_file:
cooffend = read_vectors()
# conn = sql.connect("cooffending.db")
# cur = conn.cursor()
# cur.execute('SELECT * FROM cooffending WHERE annee=?',(2003,))
# for row in cur.fetchall():
# print(row)
# assert True == False
crime_dict,offenders = create_data_dict(cooffend["Cooffending"])
pickle.dump(cooffend,open('cooffend.p',"wb"))
pickle.dump(crime_dict,open("crimes.p","wb"))
pickle.dump(offenders,open('offenders.p', "wb"))
else:
cooffend = pickle.load(open("cooffend.p","rb"))
crime_dict = pickle.load(open("crimes.p","rb"))
offenders = pickle.load(open("offenders.p","rb"))
#graph = create_graph(cooeffend["Cooffending"])
#print(graph)
"""
Problem 5.1
"""
#num_offenders = len(offenders)
#num_cases = len(cooffend["Cooffending"]["SeqE"])
#def num_crimes(year):
# num = 0
# for date in year:
# num += len(year[date])
# return num
#
#total_crimes = 0
#crimes_per_year = {}
#for year,dates in crime_dict.items():
# crimes = num_crimes(dates)
# total_crimes += crimes
# crimes_per_year[year] = crimes
#
#all_crimes = []
#for year,dates in crime_dict.items():
# for date,events in dates.items():
# for event,info in events.items():
# crime_data = (event,len(info["offenders"]),info["MUN"])
# all_crimes.append(crime_data)
#
#
#sort_crimes = sorted(all_crimes,key=lambda x: x[1],reverse=True)
#top_five = sort_crimes[:5]
#print("Number of ooffenders:",num_offenders)
#print("Number of cases:",num_cases)
#print("Total num of crimes:",total_crimes)
#print("crimes per yer:",crimes_per_year)
#print("top five crimes:",top_five)
"""
Problem 5.2
"""
def update_cooffenders(crime_dict,cooffenders):
cooff_count = {}
for year,dates in crime_dict.items():
for date,events in dates.items():
for event,info in events.items():
offenders = info["offenders"]
for offender in offenders:
others = list(set(offenders).difference(set([offender])))
if others:
cooffenders[offender.id_num].add_cooffenders(year,date,event,others)
count = cooff_count.setdefault(offender,set())
count.update(others)
return cooff_count
def crimes_committed_together(off1,off2,year=None):
off1_crimes = off1.get_crimes(year)
off2_crimes = off2.get_crimes(year)
num_matches = 0
if year != None:
for date,events in off1_crimes.items():
off1_events = set(events.keys())
off2_events = set(off2_crimes.get(date,{}).keys())
num_matches += len(off1_events.intersection(off2_events))
else:
for year,dates in off1_crimes.items():
for date,events in dates.items():
off1_events = set(events.keys())
off2_events = set(off2_crimes.get(year,{}).get(date,{}).keys())
num_matches += len(off1_events.intersection(off2_events))
return num_matches
def create_cooffending_network(offenders):
years = {k:nx.Graph() for k in range(2003,2011)}
overall = nx.Graph()
for key,offender in offenders.items():
# for year in years:
# cooffenders = offender.get_cooffenders_for_year(year)
## print("cooff",offender.cooffenders.values())
# edges = []
#
## print(cooffenders)
# for cooff in cooffenders:
# crimes_toget = crimes_committed_together(offender,cooff,year)
# old_weight = years[year].get_edge_data(offender,cooff,default=0)
#
# if isinstance(old_weight,dict):
# old_weight = old_weight["weight"]
#
# crimes_toget += old_weight
#
# edges.append((offender,cooff,crimes_toget))
#
# years[year].add_weighted_edges_from(edges,weight="weight")
o_edges = []
all_cooffenders = offender.get_cooffenders_for_year()
for cooff in all_cooffenders:
all_toget = crimes_committed_together(offender,cooff)
old_weight_o = overall.get_edge_data(offender,cooff,default=0)
if isinstance(old_weight_o,dict):
old_weight_o = old_weight_o["weight"]
new_weight = old_weight_o + all_toget
o_edges.append((offender,cooff,new_weight))
overall.add_weighted_edges_from(o_edges,weight="weight")
years["overall"] = overall
# num_individual_offenders = 0
# for key,graph in years.items():
# deg = graph.degree()
# to_remove = [i for i in deg if deg[i] < 1]
# if key=="overall":
# print("removing nodes",len(to_remove))
# num_individual_offenders += len(to_remove)
# graph.remove_nodes_from(to_remove)
return years
def plotDist(x,y,x_label,y_label,title):
trace1 = go.Scatter(
x = x,
y = y,
mode = 'lines+markers')
data = [trace1]
layout = go.Layout(
title = title,
xaxis = dict(title=x_label),
yaxis = dict(title=y_label))
fig = go.Figure(data=data,layout=layout)
py.plot(fig,filename= title +".html")
def plotHist(x,bin_size,x_label,y_label,title):
trace1 = go.Histogram(
x = x,
xbins = dict(start=np.min(x), size=bin_size, end=np.max(x)))
data = [trace1]
layout = go.Layout(
title = title,
xaxis = dict(title=x_label),
yaxis = dict(title=y_label))
fig = go.Figure(data=data,layout=layout)
py.plot(fig,filename= title +".html")
counts = update_cooffenders(crime_dict,offenders)
num_lone = len(list(filter(lambda x: len(counts[x])==0,counts)))
#if load_from_file:
# print(len(offenders))
# overall = nx.read_gpickle("graph.p")
# print(len(offenders))
#else:
# print(len(offenders))
overall = create_cooffending_network(offenders)["overall"]
print(len(offenders))
#nx.write_gpickle(overall,"graph.p")
print("number of nodes:",nx.number_of_nodes(overall))
print("number of lone_wolves:",num_lone)
print("number of edges:",nx.number_of_edges(overall))
deg_freq = nx.degree_histogram(overall)
degrees = np.arange(len(deg_freq))
connected_components = [c for c in sorted(nx.connected_components(overall), key=len, reverse=True)]
largest_component = connected_components[0]
print("number of connected components:",len(connected_components))
print("num of nodes in largest cc:",len(largest_component))
plotDist(degrees,deg_freq,"Number of Degrees","Frequency of Degrees","Frequency of nodes with degrees")
"""
Problem 5.3
"""
def sort_dict_by_value(data_dict,reverse=False):
return sorted([(k,v) for k,v in data_dict.items()],key=lambda tup: tup[1],reverse=reverse)
lcc_graph = overall.subgraph(largest_component)
#larg_deg_freq = nx.degree_histogram(lcc_graph)
#lcc_degrees = np.arange(len(larg_deg_freq))
#plotDist(lcc_degrees,larg_deg_freq,"Number of Degrees","Frequency of Degrees","Frequency of nodes with degrees in Largest Connected Component")
#
#node_degrees = sort_dict_by_value(lcc_graph.degree(),True)
#top5_deg = node_degrees[:5]
#print(top5_deg)
btwCent = nx.betweenness_centrality(lcc_graph,weight="weight")
sort_btw = sort_dict_by_value(btwCent,True)
top5_btw = sort_btw[:5]
print(top5_btw)
#eigCent = nx.eigenvector_centrality(lcc_graph,max_iter = 2000, tol = 1e-2, weight="weight")
#sort_eig = sort_dict_by_value(eigCent,True)
#top5_eig = sort_eig[:5]
#print(top5_eig)
#clustering = nx.clustering(lcc_graph,weight="weight")
#sort_clust = sort_dict_by_value(clustering,True)
#top5_clust = sort_clust[:5]
#print(top5_clust)
plotHist([round(val,4) for val in btwCent.values()],0.002,"Betweeness Centraliity","Frequency of nodes","Betweeness Centrality Distribution for nodes in LCC")
#plotHist([round(val,4) for val in eigCent.values()],0.002,"Eigenvector Centraliity","Frequency of nodes","Eigenvector Centrality Distribution for nodes in LCC")
plotGraph(lcc_graph,{},"2003-2010",plot=True)
"""
Bonus
"""
def create_crime_events_network(crime_dict):
pass
|
import json
import time
import base64
from flask import Blueprint, request, jsonify
from common import url_constant, param_constant, constant
from werkzeug.exceptions import BadRequest, InternalServerError
from utils import log_service, utils, postgres_util, s3_util
from services.audio_collection_services import AudioCollectionServices
from services.audio_prediction_services import AudioPredCNNService
from services.audio_checking_services import AudioCheckServices
from instance import environment
from multiprocessing import Queue
mod = Blueprint('audio_prediction_app', __name__)
# Store flag
q = Queue()
@mod.route(url_constant.CHECK_EMAIL, methods=['POST', 'GET'])
def check_email():
try:
is_base64_email = False
data = request.get_data()
my_json = data.decode('utf8')
json_data = json.loads(my_json)
email = json_data["email"]
if '@' not in email:
is_base64_email = True
if not is_base64_email:
email = email.lower()
db = postgres_util.PostgresDB()
query = "SELECT * FROM collections WHERE email= %s"
cursor = db.execute_query_with_data(query, data=(email,))
data = cursor.fetchone()
if data is None:
response = {
'email': email,
'status_check': 'not exist',
'health_status': 'None'
}
else:
response = {
'email': email,
'status_check': 'exist',
'health_status': data[7],
'created_time': data[8].timestamp() * 1000,
'updated_time': data[9].timestamp() * 1000
}
return jsonify(response)
except:
return jsonify({
'check status': {
'status_code': 500, 'message': 'Server internal error'
}})
@mod.route(url_constant.AUDIO_PREDICTION_VGG16_V1, methods=['POST'])
def audio_prediction_vgg16_v1():
submit_token = request.args.get(param_constant.PARAM_SUBMIT_ID)
submit_time = request.args.get(param_constant.PARAM_SUBMIT_TIME)
cough_sound = request.files.get(param_constant.PARAM_COUGH_SOUND)
mouth_sound = request.files.get(param_constant.PARAM_MOUTH_SOUND)
nose_sound = request.files.get(param_constant.PARAM_NOSE_SOUND)
email = request.form.get(param_constant.PARAM_EMAIL)
info = request.form.get(param_constant.PARAM_INFO)
if cough_sound is None and mouth_sound is None and nose_sound is None:
raise BadRequest()
# Collect data
collect_ser = AudioCollectionServices()
submit_id = collect_ser.collect(info, cough_sound, mouth_sound, nose_sound)
base_dir = "{}/{}".format(constant.TMP_DIR, submit_id)
base_token_dir = "{}/{}".format(constant.RESULT_DIR, submit_token)
try:
# Create directory if not exist
utils.create_directory(base_dir)
audio_service = AudioPredCNNService(max_period=10, submit_id=submit_id, submit_time=submit_time)
s3_cough_dir = None
s3_mouth_dir = None
s3_nose_dir = None
cough_predict_result = ''
mouth_predict_result = ''
nose_predict_result = ''
if cough_sound is not None:
cough_sound_dir = "{}/{}_original.wav".format(base_dir, "cough")
cough_save_dir = f"{base_token_dir}/{constant.COUGH}_original.wav"
cough_predict_result = audio_service.predict(cough_sound_dir, type="cough")
s3_util.upload_file(cough_sound_dir, cough_save_dir)
s3_cough_dir = s3_util.generate_url(cough_save_dir)
if mouth_sound is not None:
mouth_sound_dir = "{}/{}_original.wav".format(base_dir, "mouth")
mouth_save_dir = f"{base_token_dir}/{constant.MOUTH}_original.wav"
mouth_predict_result = audio_service.predict(mouth_sound_dir, type="mouth")
s3_util.upload_file(mouth_sound_dir, mouth_save_dir)
s3_mouth_dir = s3_util.generate_url(mouth_save_dir)
if nose_sound is not None:
nose_sound_dir = "{}/{}_original.wav".format(base_dir, "nose")
nose_save_dir = f"{base_token_dir}/{constant.NOSE}_original.wav"
nose_predict_result = audio_service.predict(nose_sound_dir, type="nose")
s3_util.upload_file(nose_sound_dir, nose_save_dir)
s3_nose_dir = s3_util.generate_url(nose_save_dir)
result = json.dumps({'cough_result': cough_predict_result,
'mouth_result': mouth_predict_result,
'nose_result': nose_predict_result})
db = postgres_util.PostgresDB()
query = "INSERT INTO results(id,cough,breathe_nose,breathe_mouth,results,email,info) VALUES (%s,%s,%s,%s,%s,%s,%s)"
db.execute_query_with_data(query,
data=(
submit_token, s3_cough_dir, s3_nose_dir, s3_mouth_dir, str(result), email, info))
db.close_connection()
utils.remove_folder(base_dir)
return result
except:
utils.remove_folder(base_dir)
return jsonify({
'check status': {
'status_code': 500, 'message': 'Server internal error'
}})
@mod.route(url_constant.AUDIO_VISUALIZATION_VGG16_V1, methods=['GET'])
def audio_visualization_vgg16_v1():
try:
# Handle multi request
if q.qsize() > 10:
return json.dumps({'server_busy': True})
q.put(constant.LOCK)
submit_id = request.args.get(param_constant.PARAM_SUBMIT_ID)
submit_time = request.args.get(param_constant.PARAM_SUBMIT_TIME)
base_dir = "{}/{}".format(constant.TMP_DIR, submit_id)
# Create directory if not exist
utils.create_directory(base_dir)
cough_sound_dir = "{}/{}_original.wav".format(base_dir, "cough")
cough_save_dir = cough_sound_dir.replace(constant.TMP_DIR, constant.RESULT_DIR)
mouth_sound_dir = "{}/{}_original.wav".format(base_dir, "mouth")
mouth_save_dir = mouth_sound_dir.replace(constant.TMP_DIR, constant.RESULT_DIR)
nose_sound_dir = "{}/{}_original.wav".format(base_dir, "nose")
nose_save_dir = nose_sound_dir.replace(constant.TMP_DIR, constant.RESULT_DIR)
if submit_id is None:
raise BadRequest()
is_cough_existed = s3_util.download_file(cough_save_dir, cough_sound_dir)
is_mouth_existed = s3_util.download_file(mouth_save_dir, mouth_sound_dir)
is_nose_existed = s3_util.download_file(nose_save_dir, nose_sound_dir)
audio_service = AudioPredCNNService(max_period=10, submit_id=submit_id, submit_time=submit_time)
feature_cough_url = None
feature_nose_url = None
feature_mouth_url = None
if is_cough_existed:
feature_cough_image_dir = audio_service.visualize(cough_sound_dir, dest="{}/{}.jpg".format(base_dir, "cough"))
s3_util.upload_file(feature_cough_image_dir, feature_cough_image_dir,
extra_args=constant.S3_IMAGE_EXTRA_PARAM)
feature_cough_url = s3_util.generate_url(feature_cough_image_dir)
if is_mouth_existed:
feature_mouth_image_dir = audio_service.visualize(mouth_sound_dir, dest="{}/{}.jpg".format(base_dir, "mouth"))
s3_util.upload_file(feature_mouth_image_dir, feature_mouth_image_dir,
extra_args=constant.S3_IMAGE_EXTRA_PARAM)
feature_mouth_url = s3_util.generate_url(feature_mouth_image_dir)
if is_nose_existed:
feature_nose_image_dir = audio_service.visualize(nose_sound_dir, dest="{}/{}.jpg".format(base_dir, "nose"))
s3_util.upload_file(feature_nose_image_dir, feature_nose_image_dir,
extra_args=constant.S3_IMAGE_EXTRA_PARAM)
feature_nose_url = s3_util.generate_url(feature_nose_image_dir)
db = postgres_util.PostgresDB()
query = "UPDATE results SET cough_img= %s, breathe_nose_img= %s, breathe_mouth_img= %s WHERE id = %s"
db.execute_query_with_data(query, data=(feature_cough_url, feature_nose_url, feature_mouth_url, submit_id))
db.close_connection()
utils.remove_folder(base_dir)
q.get()
feature_cough_url_presined = s3_util.get_presigned_url_from_original_url(feature_cough_url)
feature_mouth_url_presined = s3_util.get_presigned_url_from_original_url(feature_mouth_url)
feature_nose_url_presined = s3_util.get_presigned_url_from_original_url(feature_nose_url)
return json.dumps({'cough_feature_url': feature_cough_url_presined,
'mouth_feature_url': feature_mouth_url_presined,
'nose_feature_url': feature_nose_url_presined})
except Exception as e:
utils.remove_folder(base_dir)
q.get()
return jsonify({
'check status': {
'status_code': 500, 'message': 'Server internal error'
}})
@mod.route(url_constant.AUDIO_GET_RESULT, methods=['GET'])
def get_results():
submit_id = request.args.get(param_constant.PARAM_SUBMIT_ID)
if submit_id is None:
raise BadRequest()
db = postgres_util.PostgresDB()
query = "SELECT id, breathe_nose, breathe_mouth, cough, results, cough_img, breathe_nose_img, breathe_mouth_img " \
"FROM results " \
"WHERE id= %s"
cursor = db.execute_query(query, (submit_id,))
data = cursor.fetchone()
db.close_connection()
if data is None:
raise BadRequest()
result = {}
result["id"] = data[0]
result["nose"] = s3_util.get_presigned_url_from_original_url(data[1])
result["mouth"] = s3_util.get_presigned_url_from_original_url(data[2])
result["cough"] = s3_util.get_presigned_url_from_original_url(data[3])
result["results"] = json.loads(data[4])
if data[5] is not None:
result['cough_feature_url'] = s3_util.get_presigned_url_from_original_url(data[5])
if data[6] is not None:
result['nose_feature_url'] = s3_util.get_presigned_url_from_original_url(data[6])
if data[7] is not None:
result['mouth_feature_url'] = s3_util.get_presigned_url_from_original_url(data[7])
return json.dumps(result)
@mod.route(url_constant.ADD_FEEDBACK, methods=['GET'])
def get_feedback():
submit_id = request.args.get(param_constant.PARAM_SUSBMIT_ID)
if submit_id is None:
raise BadRequest()
db = postgres_util.PostgresDB()
query = "SELECT id, type FROM feedbacks WHERE id= %s"
cursor = db.execute_query(query, data=(submit_id))
data = cursor.fetchone()
db.close_connection()
return ""
@mod.route(url_constant.ADD_FEEDBACK, methods=['POST'])
def add_feedback():
submit_id = request.form.get(param_constant.PARAM_SUBMIT_ID)
type = request.form.get(param_constant.PARAM_TYPE)
if submit_id is None or type is None:
raise BadRequest()
db = postgres_util.PostgresDB()
query = "SELECT id, type FROM feedbacks WHERE id= %s"
cursor = db.execute_query_with_data(query, data=(submit_id,))
data = cursor.fetchone()
if data is None or data[0] is None:
query = "INSERT INTO feedbacks(id, type) VALUES (%s,%s)"
db.execute_query_with_data(query, data=(submit_id, type,))
else:
query = "UPDATE feedbacks SET type= %s WHERE id = %s"
db.execute_query_with_data(query, data=(type, submit_id,))
db.close_connection()
return ""
@mod.route(url_constant.CHECK_AUDIO, methods=['POST'])
def check_audio():
audio = request.files.get(param_constant.PARAM_AUDIO)
millis = int(round(time.time() * 1000))
base_dir = "{}/{}".format(constant.TMP_DIR, f'sound-checking-{millis}')
if audio is None:
raise BadRequest()
try:
# Create directory if not exist
utils.create_directory(base_dir)
sound_dir = "{}/{}_original.wav".format(base_dir, "audio")
audio.save(sound_dir)
check_services = AudioCheckServices(max_noise_period_weight=environment.NOISE_DURATION_WEIGHT_FILTER,
max_period=environment.LENGTH_FILTER)
result = check_services.check(sound_dir, fix_length=False)
utils.remove_folder(base_dir)
return result
except Exception as e:
utils.remove_folder(base_dir)
raise InternalServerError(description=str(e))
@mod.route(url_constant.HEALTH_CHECK, methods=['GET'])
def health_check():
log_service.info("health_check() Start")
return "ok"
|
from typing import Tuple, List, Callable
# Types needed for various functions
Lambda = float
Delta = float
Precision = float
Recall = float
Frame = List[List[List[int]]] # n x m x 3 dims
Video = List[Frame] # t length video
VideoDataset = List[Tuple[str, List[Tuple[str, bool]]]]
FrameDataset = List[Tuple[Video, List[Tuple[Video, bool]]]]
ConfusionMatrix = List[List[int]]
DistanceFN = Callable[[Frame, Frame], float]
|
import os
import glob
import shutil
import pandas as pd
import importlib
import logging
import pytest
import tests.test_utils as test_utils
import drep
from drep import argumentParser
from drep.controller import Controller
from drep.WorkDirectory import WorkDirectory
@pytest.fixture()
def self():
self = test_utils.load_common_self()
yield self
self.teardown()
def test_tertiary_clustering_1(self):
'''
Test --run_tertiary_clustering fully
'''
test_dir = self.test_dir
# Check that wont run without dereplicate
args = drep.argumentParser.parse_args(
['compare', self.wd_loc, '--run_tertiary_clustering', '-g'] + self.genomes)
try:
drep.controller.Controller().parseArguments(args)
assert False
except ValueError:
pass
args = drep.argumentParser.parse_args(
['dereplicate', self.wd_loc, '--run_tertiary_clustering', '--ignoreGenomeQuality', '-g'] + self.genomes)
drep.controller.Controller().parseArguments(args)
# Load test results
wd = drep.WorkDirectory.WorkDirectory(self.wd_loc)
Cdb = wd.get_db('Cdb').sort_values('genome').reset_index(drop=True)
# Load solutions
wdS = drep.WorkDirectory.WorkDirectory(self.s_wd_loc)
CdbS = wdS.get_db('Cdb').sort_values('genome').reset_index(drop=True)
assert 'original_secondary_cluster' not in CdbS.columns
assert 'original_secondary_cluster' in Cdb.columns
def test_tertiary_clustering_2(self):
'''
Quick tests for --run_tertiary_clustering fully
'''
test_dir = self.test_dir
# Edit Cdb and Wdb
wd = drep.WorkDirectory.WorkDirectory(self.working_wd_loc)
Cdb = wd.get_db('Cdb')
Cdb['secondary_cluster'] = [c if g != 'Enterococcus_faecalis_T2.fna' else '1_3' for c, g in zip(Cdb['secondary_cluster'], Cdb['genome'])]
Wdb = wd.get_db('Wdb')
db = pd.DataFrame({'genome':['Enterococcus_faecalis_T2.fna'], 'cluster':['1_3'], 'score':[50]})
Wdb = pd.concat([Wdb, db])
assert len(Wdb) == 5
wd.store_db(Wdb, 'Wdb')
wd.store_db(Cdb, 'Cdb')
# Run tertiary clustering
args = drep.argumentParser.parse_args(
['dereplicate', self.working_wd_loc, '--run_tertiary_clustering', '--S_algorithm', 'ANImf', '-sa', '0.99', '-g'] + self.genomes)
drep.d_evaluate.d_evaluate_wrapper(args.work_directory, evaluate=['2'], **vars(args))
wd = drep.WorkDirectory.WorkDirectory(self.working_wd_loc)
Cdb = wd.get_db('Cdb').sort_values('genome').reset_index(drop=True)
Wdb = wd.get_db('Wdb').sort_values('genome').reset_index(drop=True)
assert len(Cdb['secondary_cluster'].unique()) == 4
assert len(Cdb['original_secondary_cluster'].unique()) == 5
assert len(Wdb) == 4
assert '1_1.3' in Cdb['secondary_cluster'].tolist() |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 23 18:37:56 2014
@author: hang
"""
import sys
for i,j in enumerate(sys.argv):
print(i),
print(j)
|
import numpy as np
import os
import cv2
import random
import pickle
import tensorflow
from tensorflow.keras import utils
DATADIR = "/Users/ankithudupa/Documents/Personal Projects/Python/MNIST CNN/trainingSet"
CATEGORIES = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
IMG_SIZE = 28
training_data = []
def create_training_data():
curHot = 0
for category in CATEGORIES:
path = os.path.join(DATADIR, category) # navigate into dir
class_num = CATEGORIES.index(category)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path,img), cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
training_data.append([img_array, class_num])
except Exception as e:
pass
create_training_data()
print(len(training_data))
random.shuffle(training_data)
X = [] #features
y = [] #labels
for features, label in training_data:
X.append(features)
y.append(label)
X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
one_hot_labels = utils.to_categorical(y, num_classes=10)
print(one_hot_labels)
print(X[1])
print(one_hot_labels[1])
pickle_out = open("X.pickle", "wb")
pickle.dump(X, pickle_out)
pickle_out.close()
pickle_out = open("y.pickle", "wb")
pickle.dump(one_hot_labels, pickle_out)
pickle_out.close() |
#compute grade problem
#this program is written using the ideas in bypass_ifelse.py
def score_1(score):
#this function is written using the ideas in bypass_ifelse.py
print "Score out of range, Enter a score between 0.0 to 1.0 only"*((score<0.0)+(score>1.0))
#that is the equivalent of:
#if (score<0.0) or (score >1.0):
# print "Score out of range, Enter a score between 0.0 to 1.0 only"
print "The grade is: "*(score<=1.0)*(score>=0.0)+"A"*(score>=0.9)*(score<=1.0)+"B"*(score<0.9)*(score>=0.8)+"C"*(score<0.8)*(score>=0.7)+"D"*(score<0.7)*(score>=0.6)+"F"*(score<=0.6)*(score>=0.0)#the part (score<=1.0) and the (score>=0.0) is to check out the part where the score is beyond range
def score_2(score):
#this is the normal way to write this function
if (score>1.0)or(score<0.0):
print "The entered Score is out of the range 0.0 to 1.0"
elif score>=0.9:
print "Grade: A"
elif score>=0.8:
print "Grade: B"
elif score>=0.7:
print "Grade: C"
elif score>=0.6:
print "Grade: D"
else:
print "Grade: F"
try:
score_input=float(raw_input("Enter the score: "))
#score_1(score_input)
score_2(score_input) #uncomment one of these lines and see the output
except:
print "You must enter a number" |
import logging
import os
from argparse import ArgumentParser
import sentry_sdk
from sentry_sdk.integrations.logging import LoggingIntegration
from sqlalchemy import create_engine
from feed_proxy import handlers
from feed_proxy.conf import settings
from feed_proxy.fetchers import fetch_sources
from feed_proxy.parsers import parse_posts
from feed_proxy.utils import DEFAULT_DB_URL, validate_file
parser = ArgumentParser()
group = parser.add_argument_group('Main options')
group.add_argument('sources_file',
type=lambda x: validate_file(parser, x),
help='Path to sources ini file')
group.add_argument('--db-url', default=os.getenv(f'{settings.ENV_PREFIX}DB_URL', DEFAULT_DB_URL),
help=f'Database URL [env var: {settings.ENV_PREFIX}DB_URL]')
group.add_argument('--proxy-bot-url', default='http://localhost:8081',
help='Proxy bot for upload large files URL URL [http://localhost:8081]')
group = parser.add_argument_group('Logging options')
group.add_argument('--log-level', default='info',
choices=('debug', 'info', 'warning', 'error', 'fatal'))
HANDLERS = [
handlers.FilterNewSource,
handlers.FilterProcessed,
handlers.SendToTelegram,
]
def main():
sentry_logging = LoggingIntegration(
level=logging.INFO, # Capture info and above as breadcrumbs
event_level=logging.ERROR # Send errors as events
)
sentry_sdk.init(
dsn=os.getenv(f'{settings.ENV_PREFIX}SENTRY_DSN', None),
environment=os.getenv(f'{settings.ENV_PREFIX}SENTRY_ENVIRONMENT', None),
integrations=[sentry_logging]
)
args = parser.parse_args()
logging.basicConfig(level=args.log_level.upper())
settings.configure(args.sources_file, PROXY_BOT_URL=args.proxy_bot_url)
engine = create_engine(args.db_url)
fetched = fetch_sources()
parsed = parse_posts(fetched)
with engine.connect() as conn:
result = HANDLERS[0](conn)(parsed)
for handler in HANDLERS[1:]:
result = handler(conn)(result)
if __name__ == '__main__':
main()
|
from datetime import date
import copy
import json
import csv
from django.test import LiveServerTestCase
from rest_framework import serializers
from rest_framework.test import APITestCase
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait, Select
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
from api.models import *
from api.exceptions import *
USERS = [
{
'email': 'test@test.com',
'password': 'test',
},
{
'email': 'anothertest@test.com',
'password': 'test2',
},
{
'email': 'someone@else.tv',
'password': '1234'
}]
CSV_TEMPLATES_DIR = 'app/provider2/csv-templates/'
class SimpleTest(APITestCase):
def test_basic_signup(self):
"""
Testing signup endpoint
"""
user_cred = {
'email': USERS[0]['email'],
}
data = dict(user_cred.items() + {
'first_name': "test_name",
'last_name': "test_last_name"
}.items())
""" CREATE """
wrong_data = copy.copy(data)
wrong_data['email'] = 'wrong_email'
resp = self.client.post('/api/user', wrong_data)
self.assertEqual(resp.status_code, 400)
return
# TODO: Fake the email service. User creation sends email,
# which fails in the test environment.
resp = self.client.post('/api/user', data)
self.assertEqual(resp.status_code, 201)
user_id = resp.data['id']
user = User.objects.get(id=user_id)
self.assertSequenceEqual(
[user.email, user.first_name, user.last_name],
[data['email'], data['first_name'], data['last_name']]
)
""" LOGIN """
# move method to next test
resp = self.client.post('/api/login', user_cred)
self.assertEqual(resp.status_code, 403)
# TODO: Test login with correct temporary password, which was mailed.
def test_provider_creation(self):
notf = Notification.objects.create()
class ProviderSerializer(serializers.ModelSerializer):
class Meta:
model = Provider
fields = (
'name', 'summary', 'details',
'business_street', 'business_city', 'business_state',
'business_zip', 'discount_unit', 'notification',
'visible', 'provider_visible')
provider = {
'name': 'test',
'summary': 'summary',
'details': 'details',
'business_street': 'street',
'business_city': 'city',
'business_state': 'WS',
'business_zip': '12345',
'discount_unit': '%',
'notification': notf.id,
'visible': True,
'provider_visible': True
}
serializer = ProviderSerializer(data=provider)
serializer.is_valid()
serializer.save()
notf = Notification.objects.create()
provider2 = {
'name': 'test',
'summary': 'summary',
'details': 'details',
'business_street': 'street',
'business_city': 'city',
'business_state': 'WS',
'business_zip': '12345',
'discount_unit': '%',
'notification': notf.id,
'visible': True,
'provider_visible': True
}
serializer = ProviderSerializer(data=provider2)
self.assertRaises(IntegrityAPIError, serializer.is_valid)
notf = Notification.objects.create()
provider3 = {
'name': 'test',
'summary': 'summary',
'details': 'details',
'business_street': 'street',
'business_city': 'city',
'business_state': 'CA',
'business_zip': '12345',
'discount_unit': '%',
'notification': notf.id,
'visible': True,
'provider_visible': True
}
serializer = ProviderSerializer(data=provider3)
serializer.is_valid()
serializer.save()
def test_school_search(self):
School.objects.create(
name='school name',
street='street',
city='city',
state='WA',
zip='zip',
phone='phone',
details='details',
lat=11.1234567,
lng=11.1234567)
data = {
'lat': 11.1234567,
'lng': 11.1234567,
}
resp = self.client.get('/api/school', data)
self.assertEqual(len(resp.data), 1)
# strict retrieval
data = {
'name': 'school name',
'lat': 11.1234567,
'lng': 11.1234567,
}
resp = self.client.get('/api/school', data)
self.assertEqual(len(resp.data), 1)
# not strict retrieval
data = {
'name': 'school name',
'lat': 11.1234,
'lng': 11.1235,
}
resp = self.client.get('/api/school', data)
self.assertEqual(len(resp.data), 1)
data = {
'name': 'school name',
'lat': 11.124,
'lng': 11.125,
}
resp = self.client.get('/api/school', data)
self.assertEqual(len(resp.data), 0)
def test_school_serializer(self):
class SchoolSerializer(serializers.ModelSerializer):
class Meta:
model = School
fields = (
'name', 'visible',
'lat', 'lng', 'street', 'city', 'state',
'zip', 'phone', 'details', 'country')
school = {
'name': 'school name',
'street': 'street',
'city': 'city',
'state': 'WA',
'zip': 'zip',
'phone': 'phone',
'details': 'details',
'lat': 11.1234567,
'lng': 11.1234567}
schools = School.objects.all()
self.assertEqual(len(schools), 0)
serializer = SchoolSerializer(data=school)
self.assertIs(serializer.is_valid(), True)
serializer.save()
schools = School.objects.all()
self.assertEqual(len(schools), 1)
serializer = SchoolSerializer(data=school)
self.assertIs(serializer.is_valid(), False)
school['name'] = 'another school name'
serializer = SchoolSerializer(data=school)
self.assertIs(serializer.is_valid(), True)
serializer.save()
schools = School.objects.all()
self.assertEqual(len(schools), 2)
def test_school_permissions(self):
class SchoolSerializer(serializers.ModelSerializer):
class Meta:
model = School
fields = (
'name', 'visible',
'lat', 'lng', 'street', 'city', 'state',
'zip', 'phone', 'details', 'country')
school = {
'name': 'school name',
'street': 'street',
'city': 'city',
'state': 'WA',
'zip': 'zip',
'phone': 'phone',
'details': 'details',
'lat': 11.1234567,
'lng': 11.1234567}
schools = School.objects.all()
self.assertEqual(len(schools), 0)
serializer = SchoolSerializer(data=school)
self.assertIs(serializer.is_valid(), True)
serializer.save()
schools = School.objects.order_by('-id')
school = schools[0]
resp = self.client.get('/api/school/%s' % school.id)
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
# unauthenticated user should not know about authorized users
self.assertEqual(data.has_key('authorized_users'), False)
resp = self.client.get('/api/school')
self.assertEqual(resp.status_code, 200)
self.login_user()
resp = self.client.get('/api/school/%s' % school.id)
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
# unauthorized user should not know about authorized users
self.assertEqual(data.has_key('authorized_users'), False)
resp = self.client.get('/api/school')
self.assertEqual(resp.status_code, 200)
SchoolAuthorizedUser.objects.create(school=school, email=USERS[0]['email'])
resp = self.client.get('/api/school/%s' % school.id)
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
# authorized user should know about authorized users
self.assertEqual(len(data['authorized_users']), 1)
def test_pd_calendar_filtring(self):
providers = Provider.objects.all()
self.assertEqual(len(providers), 0)
notf = Notification.objects.create()
provider = {
'name': 'test',
'summary': 'summary',
'details': 'details',
'business_street': 'street',
'business_city': 'city',
'business_state': 'WS',
'business_zip': '12345',
'discount_unit': '%',
'notification': notf,
'visible': True,
'provider_visible': True
}
Provider.objects.create(**provider)
resp = self.client.get('/api/provider_directory')
self.assertEqual(len(resp.data), 1)
calendar = Calendar.objects.create()
row = {
'start_date': date(2015, 1, 1),
'end_date': date(2015, 2, 1),
'days_of_week': 1
}
calendar.rows.create(**row)
data = {
'calendar': 1
}
resp = self.client.get('/api/provider_directory', data)
self.assertEqual(len(resp.data), 0)
def create_provider(self):
notf = Notification.objects.create()
provider = {
'name': 'test',
'summary': 'summary',
'details': 'details',
'business_street': 'street',
'business_city': 'city',
'business_state': 'WS',
'business_zip': '12345',
'discount_unit': '%',
'notification': notf,
'visible': True,
'provider_visible': True
}
provider = Provider.objects.create(**provider)
provider.authorized_users.create(email=USERS[0]['email'])
def login_user(self, num=0):
user = User.objects.create_user(USERS[num]['email'], USERS[num]['password'])
logined = self.client.login(username=USERS[num]['email'], password=USERS[num]['password'])
self.assertEqual(logined, True)
return user
def test_program_creation(self):
providers = Provider.objects.all()
self.assertEqual(len(providers), 0)
self.create_provider()
providers = Provider.objects.all()
self.assertEqual(len(providers), 1)
id = providers[0].id
self.login_user()
data = [{
"program_code": "test",
"name": "test",
"summary": "summary",
"min_age": 0,
"max_age": 0,
"min_grade": 0,
"max_grade": 0,
"price": "0",
"incr_price": "0",
"categories": None,
"special_need": True,
"details": "",
"inactive": False,
"overnight": True
}]
data = json.dumps(data)
programs = Provider.objects.get(id=id).programs.all()
self.assertEqual(len(programs), 0)
resp = self.client.patch(
'/api/provider/%s/program' % id,
content_type='application/json',
data=data)
self.assertEqual(resp.status_code, 200)
programs = Provider.objects.get(id=id).programs.all()
self.assertEqual(len(programs), 1)
def test_program_update(self):
providers = Provider.objects.all()
self.assertEqual(len(providers), 0)
self.create_provider()
self.login_user()
providers = Provider.objects.all()
self.assertEqual(len(providers), 1)
provider_id = providers[0].id
data = {
"provider_id": provider_id,
"program_code": "test",
"name": "test",
"summary": "summary",
"min_age": 0,
"max_age": 0,
"min_grade": 0,
"max_grade": 0,
"price": "0",
"incr_price": "0",
"special_need": True,
"details": "",
"inactive": False,
"overnight": True
}
programs = Program.objects.all()
self.assertEqual(len(programs), 0)
Program.objects.create(**data)
programs = Program.objects.all()
self.assertEqual(len(programs), 1)
program = programs[0]
program_id = program.id
data = [{
"id": program_id,
"program_code": "test",
"min_age": 11
}]
data = json.dumps(data)
self.assertEqual(program.min_age, 0)
resp = self.client.patch(
'/api/provider/%s/program' % provider_id,
content_type='application/json',
data=data)
self.assertEqual(resp.status_code, 200)
programs = Program.objects.all()
program = programs[0]
self.assertEqual(program.min_age, 11)
categories = program.categories.all()
self.assertEqual(len(categories), 0)
data = [{
"id": program_id,
"program_code": "test",
"categories": [{
"id": 2,
"name": "Board games"
}]
}]
data = json.dumps(data)
resp = self.client.patch(
'/api/provider/%s/program' % provider_id,
content_type='application/json',
data=data)
self.assertEqual(resp.status_code, 400)
data = {
"id": 2,
"name": "Board games"
}
Category.objects.create(**data)
data = {
"id": 3,
"name": "Video games"
}
Category.objects.create(**data)
categories = program.categories.all()
self.assertEqual(len(categories), 0)
data = [{
"id": program_id,
"program_code": "test",
"categories": [{
"id": 2,
"name": "Board games"
}]
}]
data = json.dumps(data)
resp = self.client.patch(
'/api/provider/%s/program' % provider_id,
content_type='application/json',
data=data)
self.assertEqual(resp.status_code, 200)
categories = program.categories.all()
self.assertEqual(len(categories), 1)
self.assertEqual(program.name, 'test')
data = "id,program_code,name,summary,min_age,max_age,min_grade,max_grade,price,incr_price,categories,special_need,details,inactive,overnight\r\n %s,test,name2,test,,,,,,,,,,1," % program_id
# inactive and summery fields are required for csv PATCH method
# which is not consisted with json content_type
resp = self.client.patch(
'/api/provider/%s/program' % provider_id,
content_type='text/csv',
data=data)
programs = Program.objects.all()
program = programs[0]
self.assertEqual(program.name, 'name2')
categories = program.categories.all()
self.assertEqual(len(categories), 1)
self.assertEqual(categories[0].id, 2)
data = "id,program_code,name,summary,min_age,max_age,min_grade,max_grade,price,incr_price,categories,special_need,details,inactive,overnight\r\n %s,test,name2,test,,,,,,,3,,,1," % program_id
resp = self.client.patch(
'/api/provider/%s/program' % provider_id,
content_type='text/csv',
data=data)
categories = program.categories.all()
self.assertEqual(categories[0].id, 3)
def test_program_download(self):
providers = Provider.objects.all()
self.assertEqual(len(providers), 0)
self.create_provider()
self.login_user()
providers = Provider.objects.all()
self.assertEqual(len(providers), 1)
provider_id = providers[0].id
data = {
"provider_id": provider_id,
"program_code": "test",
"name": "test",
"summary": "summary",
"min_age": 0,
"max_age": 0,
"min_grade": 0,
"max_grade": 0,
"price": "0",
"incr_price": "0",
"special_need": True,
"details": "",
"inactive": False,
"overnight": True
}
programs = Program.objects.all()
self.assertEqual(len(programs), 0)
Program.objects.create(**data)
programs = Program.objects.all()
self.assertEqual(len(programs), 1)
program = programs[0]
category = program.categories.create(name="test")
category_id = category.id
categories = program.categories.all()
self.assertEqual(len(categories), 1)
resp = self.client.get(
'/api/provider/%s/program?format=csv' % provider_id,
content_type='text/csv')
csv = resp.content
headers, program, end_line = csv.split('\r\n')
category = program.split(',')[10]
self.assertIsNot(category, '')
def test_waitlist(self):
self.create_provider()
user = self.login_user()
providers = Provider.objects.all()
self.assertEqual(len(providers), 1)
provider_id = providers[0].id
data = {
"provider_id": provider_id,
"program_code": "test",
"name": "test",
"summary": "summary",
"min_age": 0,
"max_age": 0,
"min_grade": 0,
"max_grade": 0,
"price": "0",
"incr_price": "0",
"special_need": True,
"details": "",
"inactive": False,
"overnight": True
}
Program.objects.create(**data)
session_start = date(2015,1,1)
program = Program.objects.all()[0]
session = program.sessions.create(
provider=providers[0],
seats_quota=1,
start_date=session_start,
days_of_week=1,
start_time="10:00",
end_time="17:00",
cancelled=False,
price=0)
user = self.login_user(1)
child1 = user.children.all().create(
first_name="test",
last_name="test",
birth_date_estimated=False,
special_needs=False,
dietary_restrictions=False,
allergy=False,
physical_restrictions=False)
schedule_item1 = child1.schedule_items.all().create(
unenrolled=False,
child=child1,
session=session,
vacation=False,
cancelled_by_parent=False)
self.assertEqual(schedule_item1.has_available_seat(), True)
# TODO: make a proper request here
session.seats_sold += 1
session.save()
user = self.login_user(2)
child2 = user.children.all().create(
first_name="test2",
last_name="test2",
birth_date_estimated=False,
special_needs=False,
dietary_restrictions=False,
allergy=False,
physical_restrictions=False)
schedule_item2 = child2.schedule_items.all().create(
unenrolled=False,
child=child2,
session=session,
vacation=False,
cancelled_by_parent=False)
self.assertEqual(schedule_item2.has_available_seat(), False)
def test_roster_and_attendance(self):
providers = Provider.objects.all()
self.assertEqual(len(providers), 0)
self.create_provider()
user = self.login_user()
providers = Provider.objects.all()
self.assertEqual(len(providers), 1)
provider_id = providers[0].id
data = {
"provider_id": provider_id,
"program_code": "test",
"name": "test",
"summary": "summary",
"min_age": 0,
"max_age": 0,
"min_grade": 0,
"max_grade": 0,
"price": "0",
"incr_price": "0",
"special_need": True,
"details": "",
"inactive": False,
"overnight": True
}
programs = Program.objects.all()
self.assertEqual(len(programs), 0)
Program.objects.create(**data)
programs = Program.objects.all()
self.assertEqual(len(programs), 1)
session_start = date(2015,1,1)
program = programs[0]
session = program.sessions.create(
provider=providers[0],
seats_quota=5,
start_date=session_start,
days_of_week=1,
start_time="10:00",
end_time="17:00",
cancelled=False)
child = user.children.all().create(
first_name="test",
last_name="test",
birth_date_estimated=False,
special_needs=False,
dietary_restrictions=False,
allergy=False,
physical_restrictions=False)
purchase = Purchase.objects.create(
session=session,
captured=True,
user=user,
incr_seats=1,
live=True)
schedule_item = child.schedule_items.all().create(
paid=True,
unenrolled=False,
child=child,
session=session,
purchase=purchase,
vacation=False,
cancelled_by_parent=False)
resp = self.client.get('/api/provider/%s/roster.csv' % provider_id, content_type='text/csv')
csv = resp.content
headers, roster, end_line = csv.split('\r\n')
self.assertIsNot(roster, None)
resp = self.client.get('/api/attendance/%d/%s' % (
session.pk, session_start.strftime('%Y-%m-%d')))
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
resp = self.client.post('/api/attendance/%d/%s' % (
session.pk, session_start.strftime('%Y-%m-%d')),
json.dumps(data),
content_type='application/json')
self.assertEqual(resp.status_code, 201)
# instructor roster
instructor = self.login_user(1)
assignment = InstructorAssignment.objects.create(
session=session,
class_date=session_start,
instructor_email=instructor.email)
resp = self.client.get('/api/instructor-assignments')
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
self.assertEqual(len(data), 1)
self.assertEqual(data[0].has_key('children'), True)
self.assertEqual(len(data[0]['children']), 1)
resp = self.client.get('/api/attendance/%d/%s' % (
session.pk, session_start.strftime('%Y-%m-%d')))
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
resp = self.client.post('/api/attendance/%d/%s' % (
session.pk, session_start.strftime('%Y-%m-%d')),
json.dumps(data),
content_type='application/json')
self.assertEqual(resp.status_code, 201)
# someone else
user = self.login_user(2)
resp = self.client.get('/api/instructor-assignments')
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
self.assertEqual(len(data), 0)
resp = self.client.get('/api/attendance/%d/%s' % (
session.pk, session_start.strftime('%Y-%m-%d')))
self.assertEqual(resp.status_code, 403)
resp = self.client.post('/api/attendance/%d/%s' % (
session.pk, session_start.strftime('%Y-%m-%d')),
json.dumps({}),
content_type='application/json')
self.assertEqual(resp.status_code, 403)
def test_csv_upload(self):
providers = Provider.objects.all()
self.assertEqual(len(providers), 0)
self.create_provider()
self.login_user()
providers = Provider.objects.all()
self.assertEqual(len(providers), 1)
provider = providers[0]
provider_id = provider.id
data = ""
with open(CSV_TEMPLATES_DIR + 'location.csv', 'rb') as csvfile:
data = csvfile.read()
resp = self.client.patch(
'/api/provider/%s/location' % provider_id,
content_type='text/csv',
data=data)
self.assertEqual(resp.status_code, 200)
locations = Location.objects.all()
self.assertEqual(len(locations), 2)
data = ""
with open(CSV_TEMPLATES_DIR + 'program.csv', 'rb') as csvfile:
data = csvfile.read()
resp = self.client.patch(
'/api/provider/%s/program' % provider_id,
content_type='text/csv',
data=data)
self.assertEqual(resp.status_code, 200)
programs = Program.objects.all()
self.assertEqual(len(programs), 2)
data = ""
with open(CSV_TEMPLATES_DIR + 'session.csv', 'rb') as csvfile:
data = csvfile.read()
resp = self.client.patch(
'/api/provider/%s/session' % provider_id,
content_type='text/csv',
data=data)
self.assertEqual(resp.status_code, 200)
sessions = Session.objects.all()
self.assertEqual(len(sessions), 4)
session = sessions[0]
self.assertEqual(session.period, 1)
session = sessions[1]
self.assertEqual(session.period, 0)
def test_provider_directory_social_tags(self):
providers = Provider.objects.all()
self.assertEqual(len(providers), 0)
self.create_provider()
user = self.login_user()
providers = Provider.objects.all()
self.assertEqual(len(providers), 1)
provider = providers[0]
provider_id = provider.id
data = {
"provider_id": provider_id,
"program_code": "test",
"name": "test",
"summary": "summary",
"min_age": 0,
"max_age": 0,
"min_grade": 0,
"max_grade": 0,
"price": "0",
"incr_price": "0",
"special_need": True,
"details": "",
"inactive": False,
"overnight": True
}
programs = Program.objects.all()
self.assertEqual(len(programs), 0)
Program.objects.create(**data)
programs = Program.objects.all()
self.assertEqual(len(programs), 1)
program = Program.objects.all()[0]
resp = self.client.get(
'/providerDirectory/US/%s/%s/%s-%s' % (
provider.business_state, provider.business_city,
provider.name, provider.md5sum
)
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.context['meta']['title'], provider.name)
self.assertEqual(resp.context['meta']['description'], provider.summary)
"""
# We don't need e2e tests at the moment
# Commenting to improve perfomance
class LiveTest(LiveServerTestCase):
#fixtures = ['user-data.json']
def setUp(self):
self.live_test_server_url = 'http://127.0.0.1:9000/app'
chrome_path = 'node_modules/protractor/selenium/chromedriver'
self.browser = webdriver.Chrome(chrome_path)
User.objects.create_superuser(
first_name='admin',
password='admin',
email='admin@example.com'
)
super(LiveTest, self).setUp()
def tearDown(self):
self.browser.quit()
super(LiveTest, self).tearDown()
def test_login(self):
driver = self.browser
driver.maximize_window()
notification = Notification.objects.create()
provider = Provider.objects.create(
name='test',
summary='summary',
details='details',
business_street='street',
business_city='city',
business_state='WS',
business_zip='12345',
discount_unit='%',
notification=notification
)
AuthorizedUser.objects.create(
provider=provider,
email='admin@example.com'
)
Location.objects.create(
provider=provider,
street='street',
city='Bellevue',
state='WA',
location_shorthand='location',
name='name'
)
Program.objects.create(
provider=provider,
program_code='test_code',
name='name',
summary='summary'
)
driver.get('%s%s' % (self.live_test_server_url, '#/provider/'))
#LOGGING IN
element = WebDriverWait(driver, 2).until(
EC.element_to_be_clickable((By.XPATH, '//*[@id="navbar"]/div/div/div[1]/a[2]'))
)
#TODO: in master we have different paths, FIX
driver.find_element_by_xpath('//*[@id="navbar"]/div/div/div[1]/a[2]').click()
driver.find_element_by_xpath('/html/body/div[5]/div/div/div[1]/form/div[2]/div[1]/input').clear()
driver.find_element_by_xpath('/html/body/div[5]/div/div/div[1]/form/div[2]/div[1]/input').send_keys('admin@example.com')
driver.find_element_by_xpath('/html/body/div[5]/div/div/div[1]/form/div[2]/div[2]/input').clear()
driver.find_element_by_xpath('/html/body/div[5]/div/div/div[1]/form/div[2]/div[2]/input').send_keys('admin')
driver.find_element_by_xpath('/html/body/div[5]/div/div/div[1]/form/div[2]/button').click()
element = WebDriverWait(driver, 2).until(
EC.element_to_be_clickable((By.CSS_SELECTOR, '.dropdown-toggle'))
)
driver.find_element_by_xpath('/html/body/div[2]/div/accordion/div/div[5]/div[1]/h4/a/div/b').click()
element = WebDriverWait(driver, 2).until(
EC.element_to_be_clickable((By.XPATH, '//*[@id="sections-search-results"]/fieldset/div/form/div[1]/div[1]/button'))
)
driver.find_element_by_xpath('//*[@id="sections-search-results"]/fieldset/div/form/div[1]/div[1]/button').click()
Select(driver.find_element_by_xpath("//table[@id='sessionsTable']/tbody/tr/td[3]/select")).select_by_visible_text("name")
Select(driver.find_element_by_xpath("//table[@id='sessionsTable']/tbody/tr/td[6]/select")).select_by_visible_text("location")
driver.find_element_by_xpath("//table[@id='sessionsTable']/tbody/tr/td[7]").click()
driver.find_element_by_xpath("(//input[@type='text'])[24]").clear()
driver.find_element_by_xpath("(//input[@type='text'])[24]").send_keys("11")
driver.find_element_by_xpath("//table[@id='sessionsTable']/tbody/tr/td[10]").click()
driver.find_element_by_xpath("(//button[@type='button'])[16]").click()
driver.find_element_by_xpath("//table[@id='sessionsTable']/tbody/tr/td[13]").click()
driver.find_element_by_xpath("(//input[@type='text'])[28]").clear()
driver.find_element_by_xpath("(//input[@type='text'])[28]").send_keys("11")
driver.find_element_by_xpath("//table[@id='sessionsTable']/tbody/tr/td[14]").click()
driver.find_element_by_xpath("(//input[@type='text'])[29]").clear()
driver.find_element_by_xpath("(//input[@type='text'])[29]").send_keys("12")
driver.find_element_by_xpath("(//button[@type='button'])[6]").click()
driver.find_element_by_xpath("(//button[@type='button'])[10]").click()
driver.find_element_by_xpath("//table[@id='sessionsTable']/tbody/tr/td[16]").click()
driver.find_element_by_xpath("(//input[@type='text'])[30]").clear()
driver.find_element_by_xpath("(//input[@type='text'])[30]").send_keys("11")
element = WebDriverWait(driver, 2).until(
EC.element_to_be_clickable((By.XPATH, '//*[@id="sections-search-results"]/fieldset/div/form/div[1]/div[1]'))
)
driver.find_element_by_xpath('//*[@id="sections-search-results"]/fieldset/div/form/div[1]/div[1]').click()
driver.find_element_by_xpath('//*[@id="sections-search-results"]/fieldset/div/form/div[1]/div[1]/save-revert-buttons/button[1]').click()
import time
time.sleep(2)
session = Session.objects.get(id=1)
self.assertEqual(session.seats_quota, 11)
"""
|
import unittest
from solution import Solution
class TestStringMethods(unittest.TestCase):
def test(self):
sol = Solution()
self.assertEqual(sol.updateMatrix([[0,0,0], [0,1,0], [0,0,0]]),
[ [0,0,0], [0,1,0], [0,0,0 ]])
def test2(self):
sol = Solution()
self.assertEqual(sol.updateMatrix([[0,0,0], [0,1,0], [1,1,1]]),
[[0,0,0], [0,1,0], [1,2,1]] )
if __name__ == '__main__':
unittest.main()
|
from pycocotools import mask as maskUtils
import json
import cv2
import os
import numpy as np
ff = open("output.pkl.json")
data = json.load(ff)
print(len(data))
#print(data[0].keys())
valid_dir = "data/valid/JPEGImages/421/"
valid_annotations= "data/annotations/instances_val_sub.json"
save_dir = "data/save/"
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
i = 0
gt_file = open(valid_annotations,'r')
data_gt = json.load(gt_file)
for path in sorted(os.listdir(valid_dir)):
im = cv2.imread(os.path.join(valid_dir,path))
segm_pancreas = data[2]['segmentations'][i]
segm_cancer = data[3]['segmentations'][i]
#red is cancer predicted
#green is pancreas predicted
if segm_pancreas:
mask = maskUtils.decode(segm_pancreas)
padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
im = apply_mask(im, mask, (0.0,1,0.0)).astype(np.uint8)
if segm_cancer:
mask = maskUtils.decode(segm_cancer)
padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
im = apply_mask(im, mask, (0.0,0.0,1)).astype(np.uint8)
cv2.putText(im, "Prediction", (10,50),cv2.FONT_HERSHEY_SIMPLEX,1, (255,255,255), 1)
cv2.imwrite(os.path.join(save_dir,str(i)+".jpg"),im)
i = i+1
"""
print(data[0]["score"])
print(data[0]["category_id"])
print(data[1]["score"])
print(data[1]["category_id"])
print(data[2]["score"])
print(data[2]["category_id"])
print(data[3]["score"])
print(data[3]["category_id"])
print(data[4]["score"])
print(data[4]["category_id"])
print(data[5]["score"])
print(data[5]["category_id"])
0.18806762993335724
1
0.30180230736732483
1
0.7768524289131165
1
0.22801423072814941
2
0.1430695354938507
2
"""
i = 0
pancreas = data_gt["annotations"][0]
cancer = data_gt["annotations"][1]
for path in sorted(os.listdir(valid_dir)):
im_gt = cv2.imread(os.path.join(valid_dir,path))
ht, wt = im_gt.shape[:2]
segm = pancreas['segmentations'][i]
if segm:
rles = maskUtils.frPyObjects(segm, ht, wt)
rle = maskUtils.merge(rles)
mask = maskUtils.decode(rle)
padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
im_gt = apply_mask(im_gt, mask, (0.0,1,0.0)).astype(np.uint8)
segm = cancer['segmentations'][i]
if segm:
rles = maskUtils.frPyObjects(segm, ht, wt)
rle = maskUtils.merge(rles)
mask = maskUtils.decode(rle)
padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
im_gt = apply_mask(im_gt, mask, (0.0,0.0,1)).astype(np.uint8)
cv2.putText(im_gt, "Ground Truth", (10,50),cv2.FONT_HERSHEY_SIMPLEX,1, (255,255,255), 1)
cv2.imwrite(os.path.join("data/gt",str(i)+".jpg"),im_gt)
i = i+1
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class CurrenciesConfig(AppConfig):
name = "nucoro_currency.currencies"
verbose_name = _("Currencies")
def ready(self):
try:
import nucoro_currency.currencies.signals # noqa F401
except ImportError:
pass
|
# -*- coding: utf-8 -*-
from json import dumps
from django.core.urlresolvers import reverse
from django.conf import settings
from django.forms.formsets import formset_factory
from django.http import Http404
from django.shortcuts import (render_to_response, get_object_or_404,
redirect, HttpResponse)
from django.template import RequestContext
from django.template.defaultfilters import slugify
from django.utils.datastructures import SortedDict
from django.utils.translation import gettext as _
from guardian.decorators import permission_required
from data.models import Data, MediaNode
from data.forms import (NodeForm, RelationshipForm, TypeBaseFormSet,
MediaFileFormSet, MediaLinkFormSet,
ItemDeleteConfirmForm,
ITEM_FIELD_NAME)
from graphs.models import Graph
from schemas.models import NodeType, RelationshipType
def search(request, graph_slug, node_type_id=None,
relationship_type_id=None):
graph = get_object_or_404(Graph, slug=graph_slug)
data = request.GET.copy()
results = []
if data:
q = data.get("q", "")
display = bool(data.get("display", True))
if node_type_id:
node_types = NodeType.objects.filter(id=node_type_id,
schema__graph=graph)
else:
node_types = graph.schema.nodetype_set.all()
for node_type in node_types:
result = {}
result["type"] = node_type
result["key"] = node_type.name
query_list = []
if display:
properties = node_type.properties.filter(display=True)
if not properties:
properties = node_type.properties.all()[:2]
else:
properties = node_type.properties.all()
for prop in properties:
query = {
"property": prop.key,
"match": q,
"lookup": "contains",
}
query_list.append(query)
nodes = graph.nodes.filter(label=node_type.id, *query_list)
result["list"] = nodes
results.append(result)
return render_to_response('search_results.html', {
"graph": graph,
"results": results,
}, context_instance=RequestContext(request))
@permission_required("data.view_data", (Data, "graph__slug", "graph_slug"))
def graph_search(request, graph_slug):
return search(request, graph_slug)
@permission_required("data.view_data", (Data, "graph__slug", "graph_slug"))
def graph_nodetype_search(request, graph_slug, node_type_id=None):
return search(request, graph_slug, node_type_id=node_type_id)
@permission_required("data.view_data", (Data, "graph__slug", "graph_slug"))
def graph_relationshiptype_search(request, graph_slug,
relationship_type_id=None):
return search(request, graph_slug,
relationship_type_id=relationship_type_id)
|
import labrad
import numpy as np
from matplotlib import pyplot
import lmfit
def expo_model(params, x):
amplitude = params['amplitude'].value
time_offset = params['time_offset'].value
amplitude_offset = params['amplitude_offset'].value
decay_time = params['decay_time'].value
model = amplitude*np.exp(-(x-time_offset)/decay_time)-amplitude_offset
return model
def expo_fit(params , x, data):
model = expo_model(params, x)
return model - data
data = np.genfromtxt("ring_down.csv",delimiter=",")
x_data = data[:,0]
y_data = data[:,1]
params = lmfit.Parameters()
params.add('amplitude', value = 6.0E-2)
params.add('time_offset', value = -1.5E-5)
params.add('amplitude_offset', value = 1E-2)
params.add('decay_time', value = 1.5E-5, min=0.0)
result = lmfit.minimize(expo_fit, params, args = (x_data, y_data))
fit_values = y_data + result.residual
lmfit.report_errors(params)
x_data_theory = np.linspace(-1.18E-5,4.50E-5,10000)
print params['decay_time']*1000000
figure = pyplot.figure(1)
figure.clf()
pyplot.plot(x_data*1000000, y_data,'o',markersize = 3.0)
pyplot.plot(x_data_theory*1000000, expo_model(params,x_data_theory),'-',linewidth = 3.0)
#pyplot.plot(data[:,0], data[:,2],'o-')
pyplot.show()
|
# 6.0001 Spring 2020
# Problem Set 3
# Written by: sylvant, muneezap, charz, anabell, nhung, wang19k, asinelni, shahul, jcsands
# Problem Set 3
# Name: Gyalpo Dongo
# Collaborators: Paterne Byiringiro
# Time Spent: 4:00
# Late Days Used: (only if you are using any)
import string
# - - - - - - - - - -
# Check for similarity by comparing two texts to see how similar they are to each other
### Problem 1: Prep Data ###
# Make a *small* change to separate the data by whitespace rather than just tabs
def load_file(filename):
"""
Args:
filename: string, name of file to read
Returns:
list of strings holding the file contents where
each string was separated by an empty space in the file
"""
inFile = open(filename, 'r')
line = inFile.read()
inFile.close()
line = line.strip().lower()
for char in string.punctuation:
line = line.replace(char, "")
#Change from "/t" to ""
return line.split()
### Problem 2: Find Ngrams ###
def find_ngrams(single_words, n):
"""
Args:
single_words: list of words in the text, in the order they appear in the text
all words are made of lowercase characters
n: length of 'n-gram' window
Returns:
list of n-grams from input text list, or an empty list if n is not a valid value
"""
ngrams = []
if n <= 0 or n > len(single_words):
return ngrams
#returns empty list
elif n == 1:
return single_words
#returns original input
else:
for i in range(len(single_words)):
if n + i > len(single_words):
break
#done so that the very last word of the n-gram is the
#very last word of the list single_words
else:
mini_list = single_words[i:n+i]
#creates a list with the words in the ngram
#the list contains the words between index i and n+i so that
#the maximum possible value of n+i is the length of single_words
ngrams_word = ' '.join([str(item) for item in mini_list])
#the list is transformed into a string with spaces in between
#and no spaces at the beginning or end
ngrams.append(ngrams_word)
#the n-gram string is added to the list
return ngrams
### Problem 3: Word Frequency ###
def compute_frequencies(words):
"""
Args:
words: list of words (or n-grams), all are made of lowercase characters
Returns:
dictionary that maps string:int where each string
is a word (or n-gram) in words and the corresponding int
is the frequency of the word (or n-gram) in words
"""
frequency_dict = {}
for i in words:
if i in frequency_dict:
frequency_dict[i] +=1
#if the word/n-gram is already in the dictionnary, its frequency
#keeps on increasing by one every time it appears in the list
#of words/n-grams
else:
frequency_dict[i] = 1
#if the word isn't in the dictionnary, its frequency is then set
#to one and if it appears again then it will go through the
#previous conditional
return frequency_dict
### Problem 4: Similarity ###
def get_similarity_score(dict1, dict2, dissimilarity = False):
"""
The keys of dict1 and dict2 are all lowercase,
you will NOT need to worry about case sensitivity.
Args:
dict1: frequency dictionary of words or n-grams for one text
dict2: frequency dictionary of words or n-grams for another text
dissimilarity: Boolean, optional parameter. Default to False.
If this is True, return the dissimilarity score, 100*(DIFF/ALL), instead.
Returns:
int, a percentage between 0 and 100, inclusive
representing how similar the texts are to each other
The difference in text frequencies = DIFF sums words
from these three scenarios:
* If a word or n-gram occurs in dict1 and dict2 then
get the difference in frequencies
* If a word or n-gram occurs only in dict1 then take the
frequency from dict1
* If a word or n-gram occurs only in dict2 then take the
frequency from dict2
The total frequencies = ALL is calculated by summing
all frequencies in both dict1 and dict2.
Return 100*(1-(DIFF/ALL)) rounded to the nearest whole number if dissimilarity
is False, otherwise returns 100*(DIFF/ALL)
"""
DIFF = 0
for i in dict1:
x = False
#Boolean used to not add repeated frequencies as it will be seen later
for j in dict2:
if i == j:
#use of == instead of i in j as for example word "meme" could
#be in "memes" and would therefore cause a problem
DIFF += abs(dict1[i] - dict2[j])
#if the word/n-gram appears in both dictionnaires then
#the absolute value of the difference between the frequencies
#in each dictionnary is added to DIFF
x = True
if x == False:
#Boolean used so that frequencies of a word/n-gram are not added again
#and again to DIFF
DIFF += dict1[i]
for j in dict2:
x = False
#same use of boolean for same reasons as previou for loop
for i in dict1:
if i == j:
#use of == due to the same reason
x = True
#this time the absolute value of the difference between the
#frequencies doesn't have to be added as it already has been
if x == False:
DIFF += dict2[j]
ALL = 0
for i in dict1:
ALL += dict1[i]
#all the frequencies of the first dictionnary are added to ALL
for j in dict2:
ALL += dict2[j]
#same occurs as in the previous loop but for the second dictionnary
#Depending on the input of dissimilarity this will occur
if dissimilarity == False:
result = round(100*(1 - (DIFF/ALL)))
#similarity between the dictionnaries of word/n-grams is the result
else:
result = round(100*(DIFF/ALL))
#dissimilarity between the dictionnaries of word/n-grams is the result
return result
### Problem 5: Most Frequent Word(s) ###
def compute_most_frequent(dict1, dict2):
"""
The keys of dict1 and dict2 are all lowercase,
you will NOT need to worry about case sensitivity.
Args:
dict1: frequency dictionary for one text
dict2: frequency dictionary for another text
Returns:
list of the most frequent word(s) in the input dictionaries
The most frequent word:
* is based on the combined word frequencies across both dictionaries.
If a word occurs in both dictionaries, consider the sum the
freqencies as the combined word frequency.
* need not be in both dictionaries, i.e it can be exclusively in
dict1, dict2, or shared by dict1 and dict2.
If multiple words are tied (i.e. share the same highest frequency),
return an alphabetically ordered list of all these words.
"""
list_freq = []
most_freq = []
#different frequencies of each word/n-gram in the dictionnaries will be
#appended to list_freq in the following 2 for loops
for i in dict1:
list_freq.append(dict1[i])
for i in dict2:
list_freq.append(dict2[i])
#Using the maximum value of list_freq, if this maximum value is the
#value of any of the words/n-grams in the dictionnaries, these words
#will be added to the list most_freq
for i in dict1:
if dict1[i] == max(list_freq):
most_freq.append(i)
for i in dict2:
if dict2[i] == max(list_freq):
most_freq.append(i)
return sorted(most_freq)
#use of sorted() as specification establises that the list has to be
#alphabetically ordered
### Problem 6: Finding closest artist ###
def find_closest_artist(artist_to_songfiles, mystery_lyrics, ngrams = 1):
"""
Args:
artist_to_songfiles:
dictionary that maps string:list of strings
where each string key is an artist name
and the corresponding list is a list of filenames (including the extension),
each holding lyrics to a song by that artist
mystery_lyrics: list of single word strings
Can be more than one or two words (can also be an empty list)
assume each string is made of lowercase characters
ngrams: int, optional parameter. Default set to False.
If it is greater than 1, n-grams of text in files
and n-grams of mystery_lyrics should be used in analysis, with n
set to the value of the parameter ngrams
Returns:
list of artists (in alphabetical order) that best match the mystery lyrics
(i.e. list of artists that share the highest average similarity score (to the nearest whole number))
The best match is defined as the artist(s) whose songs have the highest average
similarity score (after rounding) with the mystery lyrics
If there is only one such artist, then this function should return a singleton list
containing only that artist.
However, if all artists have an average similarity score of zero with respect to the
mystery_lyrics, then this function should return an empty list. When no artists
are included in the artist_to_songfiles, this function returns an empty list.
"""
if len(artist_to_songfiles) == 0:
#if the dictionnary artist_to_songfiles is empty, an empty list is
#returned
return []
else:
#artist_score is an empty dictionnary where each artist will be linked
#to the average mean similarity of mystery_lyrics with all of the
#artist songs
artist_score = {}
#the list of the scores will be the previous average mentioned
list_scores = []
#the closest artist will be a list of the artist/s with the maximum
#similarity as long as it is greater than 0, otherwise the list will
#be empty
closest_artist = []
for artist in artist_to_songfiles:
sum_scores = 0
#variable created so that the total sum of the similarity scores
#can be added and then the average can be taken
for song in artist_to_songfiles[artist]:
if ngrams > 1:
#if value of n-grams is an integer and greater than one
#then ngrams will be used for the similarity comparation
#instead of single words
dict_mystery = compute_frequencies(find_ngrams(mystery_lyrics,ngrams))
#firstly the find_grams functions converts mystery_lyrics()
#into a list of n-grams and then compute_frequencies()
#converts it into a dictionnary with the respective
#frequencies
dict_songs = compute_frequencies(find_ngrams(load_file(song),ngrams))
#same as in previous case but also load_file() is used
#initially to convert the song into a list of single words
else:
dict_mystery = compute_frequencies(mystery_lyrics)
dict_songs = compute_frequencies(load_file(song))
#in both cases, once both of mystery_lyrics and song
#are a list of single words, a dictionnary with the
#respective frequency of each word is created
sum_scores += get_similarity_score(dict_mystery,dict_songs)
#this is used to keep on adding each similarity score from each
#song with the respective mystery_lyrics
average_score = round(sum_scores/len(artist_to_songfiles[artist]))
#aveareg obtained by dividing sum_scores by the number of songs
#of the artist
artist_score[artist] = average_score
#artist and average_score are added to the artist_score dictionnary
list_scores.append(average_score)
#the average_score is also added to this list_scores
for artist in artist_score:
if max(list_scores) > 0: #otherwise an empty list of closest_artist
#will be returned
if artist_score[artist] == max(list_scores):
closest_artist.append(artist)
return sorted(closest_artist)
#use of sorted() as specification established that list had to be
#alphabetically in order
if __name__ == "__main__":
pass
##Uncomment the following lines to test your implementation
## Tests Problem 0: Prep Data
#test_directory = "tests/student_tests/"
#world, friend = load_file(test_directory + 'hello_world.txt'), load_file(test_directory + 'hello_friends.txt')
#print(world) ## should print ['hello', 'world', 'hello']
#print(friend) ## should print ['hello', 'friends']
## Tests Problem 1: Find Ngrams
#world_ngrams, friend_ngrams = find_ngrams(world, 2), find_ngrams(friend, 2)
#longer_ngrams = find_ngrams(world+world, 3)
#print(world_ngrams) ## should print ['hello world', 'world hello']
#print(friend_ngrams) ## should print ['hello friends']
#print(longer_ngrams) ## should print ['hello world hello', 'world hello hello', 'hello hello world', 'hello world hello']
## Tests Problem 2: Get frequency
#world_word_freq, world_ngram_freq = compute_frequencies(world), compute_frequencies(world_ngrams)
#friend_word_freq, friend_ngram_freq = compute_frequencies(friend), compute_frequencies(friend_ngrams)
#print(world_word_freq) ## should print {'hello': 2, 'world': 1}
#(world_ngram_freq) ## should print {'hello world': 1, 'world hello': 1}
#print(friend_word_freq) ## should print {'hello': 1, 'friends': 1}
#print(friend_ngram_freq) ## should print {'hello friends': 1}
## Tests Problem 3: Similarity
#word_similarity = get_similarity_score(world_word_freq, friend_word_freq)
#ngram_similarity = get_similarity_score(world_ngram_freq, friend_ngram_freq)
#print(word_similarity) ## should print 40
#print(ngram_similarity) ## should print 0
## Tests Problem 4: Most Frequent Word(s)
#freq1, freq2 = {"hello":5, "world":1}, {"hello":1, "world":5}
#most_frequent = compute_most_frequent(freq1, freq2)
#print(most_frequent) ## should print ["hello", "world"]
## Tests Problem 5: Find closest matching artist
#test_directory = "tests/student_tests/"
#artist_to_songfiles_map = {
#"artist_1": [test_directory + "artist_1/song_1.txt", test_directory + "artist_1/song_2.txt", test_directory + "artist_1/song_3.txt"],
#"artist_2": [test_directory + "artist_2/song_1.txt", test_directory + "artist_2/song_2.txt", test_directory + "artist_2/song_3.txt"],
#}
#mystery_lyrics = load_file(test_directory + "mystery_lyrics/mystery_5.txt") # change which number mystery lyrics (1-5)
#print(find_closest_artist(artist_to_songfiles_map, mystery_lyrics, ngrams = 1)) # should print ['artist_1']
|
import tkinter
import tkinter
from tkinter import *
from tkinter import messagebox
from tkinter import filedialog
from tkinter import Menu
from PIL import ImageTk,Image
import cv2
import PIL.Image, PIL.ImageTk
import pygame
import pygame.camera
import keras
import numpy as np
import matplotlib
matplotlib.use('TkAgg') # Use only for MAC OS
import matplotlib.pyplot as pyplot
from scipy.misc import toimage
from keras.models import model_from_json
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array, array_to_img
from keras.applications.vgg16 import preprocess_input
from keras.applications.vgg16 import decode_predictions
StartWindow = tkinter.Tk()
StartWindow.title("ObjectAI- AN AI PROJECT PROTOTYPE")
b1name = "nowBack.gif"
bg_image = tkinter.PhotoImage(file=b1name)
w = bg_image.width()
h = bg_image.height()
StartWindow.geometry("1013x568")
cv = tkinter.Canvas(width=w, height=h)
cv.pack(side='top', fill='both', expand='yes')
cv.create_image(0, 0, image=bg_image, anchor='nw')
menubar = Menu(StartWindow)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Open", )
filemenu.add_command(label="New", )
filemenu.add_command(label="Save", )
filemenu.add_separator()
filemenu.add_command(label="Exit", command=StartWindow.quit)
menubar.add_cascade(label="File", menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(label="Undo")
editmenu.add_command(label="Redo")
editmenu.add_command(label="Preferences")
menubar.add_cascade(label="Edit", menu=editmenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="About")
menubar.add_cascade(label="Help", menu=helpmenu)
StartWindow.config(menu=menubar)
ftypes = [
("Image Files", "*.jpg; *.gif"),
("JPEG", '*.jpg'),
("GIF", '*.gif'),
('All', '*')
]
def chooseFile():
StartWindow.sourceFile = filedialog.askopenfilename(parent=StartWindow, initialdir="/",
title='Please select a file', filetypes=ftypes)
filename= StartWindow.sourceFile
##this is the previous prediction.py integrated as a event for the button
def show_imgs(X):
pyplot.figure(1)
k = 0
for i in range(0, 4):
for j in range(0, 4):
pyplot.subplot2grid((4, 4), (i, j))
pyplot.imshow(toimage(X[k]))
k = k + 1
# show the plot
pyplot.show()
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# mean-std normalization
mean = np.mean(x_train, axis=(0, 1, 2, 3))
std = np.std(x_train, axis=(0, 1, 2, 3))
x_train = (x_train - mean) / (std + 1e-7)
x_test = (x_test - mean) / (std + 1e-7)
#show_imgs(x_test[:16])
# Load trained CNN model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.load_weights('model.h5')
image = load_img(filename, target_size=(32, 32))
image = img_to_array(image)
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
image = preprocess_input(image)
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
indices = np.argmax(model.predict(image))
print("The detected object in the image was a", labels[indices])
objectD = labels[indices]
messagebox.showinfo('ObjectAI- Result', 'The Object detected is: ' + objectD)
#print('saving output to output.jpg')
#indices = indices[0]
#indices_img = image.array_to_img(indices)
#indices_img.save('output.jpg')
#def takePic():
# cap = cv2.VideoCapture(0)
# Check if the webcam is opened correctly
# if not cap.isOpened():
## while True:
# ret, frame = cap.read()
## cv2.imshow('Input', frame)
##if c == 27:
# break
#cap.release()
#cv2.destroyAllWindows()
b_chooseFile = tkinter.Button(StartWindow, text ="Upload Image",bg="black",fg="white", width = 18, height = 1, command = chooseFile)
b_chooseFile.place(x = 439,y = 310, anchor='nw')
#b_chooseFile.width = 300
#b_chooseFile.pack(side='left', padx=200, pady=5, anchor='nw')
b_capture= tkinter.Button(StartWindow, text ="Capture",bg="black",fg="white", width = 18, height = 1)
b_capture.place(x = 439,y = 350, anchor='nw')
StartWindow.mainloop()
|
import tkinter as tk
from tkinter import messagebox
app = tk.Tk()
app.title('Calculator')
entry = tk.Entry(app)
entry.pack()
def calc():
#print("clicked")
inp = entry.get()
print(f"'{inp}'")
try:
out = eval(inp)
except Exception as err:
messagebox.showwarning(title = "Error", message = f"Could not do the computation {err}")
return
entry.delete(0, tk.END)
entry.insert(0, out)
def close():
app.destroy()
calc_btn = tk.Button(app, text='Calculate', width=25, command=calc)
calc_btn.pack()
close_btn = tk.Button(app, text='Close', width=25, command=close)
close_btn.pack()
app.mainloop()
|
# Generated by Django 3.2 on 2021-04-17 11:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("backend", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="todoitem",
name="finished",
field=models.BooleanField(default=False),
),
]
|
## 138. Copy List with Random Pointer
#
# A linked list is given such that each node contains an additional random pointer which could point to any node in the list or null.
#
# Return a deep copy of the list.
##
# Definition for singly-linked list with a random pointer.
class RandomListNode(object):
def __init__(self, x):
self.label = x
self.next = None
self.random = None
##################################
## IMPORTANT CONCEPT!
## how to do deep copy( like A = new SomeClass_or_Objects(), located @Heap_memory )
## and swallow copy ( like newVariable = oldVariable, located @stack_memory )
##################################
## reference
## https://leetcode.com/problems/copy-list-with-random-pointer/discuss/43485/Clear-and-short-python-O(2n)-and-O(n)-solution
## O(2n)
from collections import defaultdict
class Solution1(object):
def copyRandomList(self, head):
"""
:type head: RandomListNode
:rtype: RandomListNode
"""
dict2 = dict()
node = head
while node:
dict2[node] = RandomListNode(node.label)
node = node.next
node = head
while node:
dict2[node].next = dict2.get(node.next)
dict2[node].random = dict2.get(node.random)
node = node.next
return dict2.get(head)
## reference
## https://leetcode.com/problems/copy-list-with-random-pointer/discuss/43485/Clear-and-short-python-O(2n)-and-O(n)-solution
## O(n), using defaultdict()
class Solution2(object):
def copyRandomList(self, head):
"""
:type head: RandomListNode
:rtype: RandomListNode
"""
dict1 = defaultdict(lambda: RandomListNode(0))
dict1[None] = None
n = head
while n:
dict1[n].label = n.label
dict1[n].next = dict1[n.next]
dict1[n].random = dict1[n.random]
n = n.next
return dict1[head]
## recursive approach, like graph
## time : O(N), space : O(N), asymptotically
## reference, Approach 1: Recursive
## https://leetcode.com/problems/copy-list-with-random-pointer/solution/
class Solution3(object):
"""
:type head: RandomListNode
:rtype: RandomListNode
"""
def __init__(self):
# Dictionary which holds old nodes as keys and new nodes as its values.
self.visitedHash = {}
def copyRandomList(self, head):
if head == None:
return None
# If we have already processed the current node, then we simply return the cloned version of it.
if head in self.visitedHash:
return self.visitedHash[head]
# create a new node
# with the label same as old node.
node = RandomListNode(head.label)
# Save this value in the hash map. This is needed since there might be
# loops during traversal due to randomness of random pointers and this would help us avoid them.
self.visitedHash[head] = node
# Recursively copy the remaining linked list starting once from the next pointer and then from the random pointer.
# Thus we have two independent recursive calls.
# Finally we update the next and random pointers for the new node created.
node.next = self.copyRandomList(head.next)
node.random = self.copyRandomList(head.random)
return node
## Iterative approach using dictionary
## time : O(N), space : O(N)
## reference, Approach 2: Iterative with O(N) space
## https://leetcode.com/problems/copy-list-with-random-pointer/solution/
class Solution4(object):
def __init__(self):
# Creating a visited dictionary to hold old node reference as "key" and new node reference as the "value"
self.visited = {}
def getClonedNode(self, node):
# If node exists then
if node:
# Check if its in the visited dictionary
if node in self.visited:
# If its in the visited dictionary then return the new node reference from the dictionary
return self.visited[node]
else:
# Otherwise create a new node, save the reference in the visited dictionary and return it.
self.visited[node] = RandomListNode(node.label)
return self.visited[node]
return None
def copyRandomList(self, head):
"""
:type head: RandomListNode
:rtype: RandomListNode
"""
if not head:
return head
old_node = head
# Creating the new head node.
new_node = RandomListNode(old_node.label)
self.visited[old_node] = new_node
# Iterate on the linked list until all nodes are cloned.
while old_node != None:
# Get the clones of the nodes referenced by random and next pointers.
new_node.random = self.getClonedNode(old_node.random)
new_node.next = self.getClonedNode(old_node.next)
# Move one step ahead in the linked list.
old_node = old_node.next
new_node = new_node.next
return self.visited[head]
## iterative approach with O(1) space
## time : O(N), space : O(1)
## reference, Approach 3: Iterative with O(1) Space
## https://leetcode.com/problems/copy-list-with-random-pointer/solution/
class Solution5(object):
def copyRandomList(self, head):
"""
:type head: RandomListNode
:rtype: RandomListNode
"""
if not head:
return head
# Creating a new weaved list of original and copied nodes.
ptr = head
while ptr:
# Cloned node
new_node = RandomListNode(ptr.label)
# Inserting the cloned node just next to the original node.
# If A->B->C is the original linked list,
# Linked list after weaving cloned nodes would be A->A'->B->B'->C->C'
new_node.next = ptr.next
ptr.next = new_node
ptr = new_node.next
ptr = head
# Now link the random pointers of the new nodes created.
# Iterate the newly created list and use the original nodes random pointers,
# to assign references to random pointers for cloned nodes.
while ptr:
ptr.next.random = ptr.random.next if ptr.random else None
ptr = ptr.next.next
# Unweave the linked list to get back the original linked list and the cloned list.
# i.e. A->A'->B->B'->C->C' would be broken to A->B->C and A'->B'->C'
ptr_old_list = head # A->B->C
ptr_new_list = head.next # A'->B'->C'
head_old = head.next
while ptr_old_list:
ptr_old_list.next = ptr_old_list.next.next
ptr_new_list.next = ptr_new_list.next.next if ptr_new_list.next else None
ptr_old_list = ptr_old_list.next
ptr_new_list = ptr_new_list.next
return head_old
if __name__ == "__main__":
a1 = RandomListNode(1)
a2 = RandomListNode(2)
a3 = RandomListNode(3)
a4 = RandomListNode(4)
a5 = RandomListNode(5)
a1.next, a1.random = a2, a4
a2.next, a2.random = a3, a1
a3.next, a3.random = a4, a2
a4.next, a4.random = a5, a5
a5.random = a3
results, count = [], 1
results.append(Solution1().copyRandomList(a1))
results.append(Solution2().copyRandomList(a1))
results.append(Solution3().copyRandomList(a1))
results.append(Solution4().copyRandomList(a1))
results.append(Solution5().copyRandomList(a1))
for result in results:
print("====== Solution", count, "======")
count+=1
node = result
while node:
print(node.label, node.random.label)
node = node.next
|
import hashlib
import json
from urllib.parse import urlparse
import scrapy
from kingfisher_scrapy.base_spider import ZipSpider
class Malta(ZipSpider):
name = 'malta'
def start_requests(self):
yield scrapy.Request(
'http://demowww.etenders.gov.mt/ocds/services/recordpackage/getrecordpackagelist',
meta={'kf_filename': 'start_requests'},
callback=self.parse_list
)
def parse_list(self, response):
if response.status == 200:
url = 'http://demowww.etenders.gov.mt{}'
json_data = json.loads(response.text)
packages = json_data['packagesPerMonth']
for package in packages:
parsed = urlparse(package)
path = parsed.path
if path:
yield scrapy.Request(
url.format(path),
meta={'kf_filename': hashlib.md5(path.encode('utf-8')).hexdigest() + '.json'}
)
if self.sample:
break
else:
yield self.build_file_error_from_response(response)
def parse(self, response):
yield from self.parse_zipfile(response, data_type='record_package')
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import LinearRegression
import numpy
import pickle
class TechnicalRiskModel:
def __init__(self):
pass
def data_reader(self, path):
self._path = path
return pd.read_csv(self._path)
def data_preprocessor(self, df, rating_factors):
self._rating_factors_col = rating_factors
self._claims_col = df.columns[-1]
self._num_features = list(df[self._rating_factors_col].select_dtypes(include=["int64", "float64"]).columns)
self._cat_features = [col for col in self._rating_factors_col if col not in self._num_features]
self._preprocessor = ColumnTransformer(
transformers = [
("numerical", "passthrough", self._num_features),
("categorical", OneHotEncoder(sparse=False, handle_unknown="ignore"), self._cat_features)
]
)
X, y = df[self._rating_factors_col], df[self._claims_col]
ohe_categories = self._preprocessor.fit(X).named_transformers_["categorical"].categories_
ohe_categories_concat = [f"{col}_{val}" for col, vals in zip(self._cat_features, ohe_categories) for val in vals]
self._rating_factors_encoded = self._num_features + ohe_categories_concat
self._preprocessor.fit(X)
X = self._preprocessor.transform(X)
return X, y, self._rating_factors_encoded
def train_tech_model(self, X, y):
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.2,
random_state=123
)
self.model = LinearRegression().fit(X_train, y_train)
# TODO: add some output logs and statistics
# TODO: add timestamps and string formatting in output
filename = "latest_model.sav"
pickle.dump(self.model, open(filename, "wb"))
def score_tech_model(self, pricing_call, pricing_rule):
loaded_model = pickle.load(open(pricing_rule, "rb"))
pricing_call_parsed = self._preprocessor.transform(pd.DataFrame(pricing_call, index=[0]))
return loaded_model.predict(pricing_call_parsed)[0]
if __name__ == "__main__":
import os
import json
src = TechnicalRiskModel()
df = src.data_reader(r"C:\Users\jtsw1\Desktop\projects\pricing_api\data\pif_data.csv")
X, y, col_names = src.data_preprocessor(df, ["destination_region", "ski_flag", "gender_code", "date_of_birth"])
src.train_tech_model(X, y)
with open(r"C:\Users\jtsw1\Desktop\projects\pricing_api\data\sample_travel_pricing_call.json") as f:
call = json.load(f)
mapping_table = pd.read_csv(r"C:\Users\jtsw1\Desktop\projects\pricing_api\mapping_tables\travel_mapping.csv")
mapping_dict = dict(zip(mapping_table["global_variable"], mapping_table["data_column"]))
call_mapped = {mapping_dict[key]: value for (key, value) in call.items()}
premium = src.score_tech_model(call_mapped, "latest_model.sav")
|
"""
Django settings for dotaparty project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import secret
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = secret.DJANGO_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social.apps.django_app.default',
'huey.djhuey',
'compressor',
'core',
'community'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'api.middleware.VisitMiddleware'
)
ROOT_URLCONF = 'dotaparty.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'dotaparty.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': secret.PS_DB_NAME,
'USER': secret.PS_DB_USER,
'PASSWORD': secret.PS_DB_PW,
'HOST': secret.PS_DB_HOST,
'PORT': secret.PS_DB_PORT
}
}
'''DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}'''
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'staticfiles'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders''.CompressorFinder',
)
HUEY = {
'backend': 'huey.backends.redis_backend',
'name': 'dotaparty',
'connection': {'host': 'localhost', 'port': 6379},
'consumer_options': {'workers': 4,
'periodic_task_interval': 3 },
}
SOCIAL_AUTH_STEAM_API_KEY = secret.D2_API_KEYS[0]
LOGIN_REDIRECT_URL = '/'
AUTH_USER_MODEL = 'community.User'
SOCIAL_AUTH_STORAGE = 'community.models.CommunityStorage'
AUTHENTICATION_BACKENDS = (
'social.backends.steam.SteamOpenId',
'django.contrib.auth.backends.ModelBackend',
)
from core import tasks
from django.contrib import admin
from community.models import *
from core.models import *
admin.site.register(Report)
admin.site.register(Visit)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'normal': {
'format': '%(threadName)s %(asctime)s %(name)s %(levelname)s %(message)s'
}
},
'handlers': {
'file_downloader': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': secret.LOGGING['file_downloader'],
'maxBytes': 1024 * 1024,
'backupCount': 30,
'formatter': 'normal'
},
'file_tasks': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': secret.LOGGING['file_tasks'],
'maxBytes': 1024 * 1024,
'backupCount': 30,
'formatter': 'normal'
},
'file_valve': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': secret.LOGGING['file_valve'],
'maxBytes': 1024 * 1024,
'backupCount': 30,
'formatter': 'normal'
},
},
'loggers': {
'dotaparty.downloader': {
'handlers': ['file_downloader'],
'level': 'DEBUG',
'propagate': True,
},
'dotaparty.valve': {
'handlers': ['file_valve'],
'level': 'DEBUG',
'propagate': True,
},
'dotaparty.tasks': {
'handlers': ['file_tasks'],
'level': 'DEBUG',
'propagate': True,
},
'huey.consumer.ConsumerThread': {
'handlers': ['file_downloader'],
'level': 'ERROR',
'propagate': True,
},
},
} |
# Scene class components:
## ._manager
## .finished
## .pause()
## .resume()
## .end()
## .handle_input(events, pressed_keys)
## .update(dt)
## .draw(surface)
# Transition class components (in addition to Scene):
## .from_scene
## .to_scene
class scene_manager(object):
def __init__(self):
# Stack of scenes
self._scenes = []
@property
def current_scene(self):
if self._scenes:
return self._scenes[-1]
else:
return None
def append(self, value):
self._scenes.append(value)
def pop(self):
return self._scenes.pop()
def change(self, value, transition = None):
if transition:
transition.from_scene = self.pop()
transition.to_scene = value
self.append(value)
self.append(transition)
else:
self.pop()
self.append(value)
def update(self, dt):
cleanup_complete = False
while not cleanup_complete:
if self.current_scene and not self.current_scene.finished:
self.current_scene.update(dt)
cleanup_complete = True
elif self.current_scene and self.current_scene.finished:
self.pop()
else:
cleanup_complete = True
def draw(self, surface):
# Draw all scenes (from bottom to top)
surface.fill((0, 0, 0))
for scene in self._scenes:
scene.draw(surface)
|
def constructSquare(s): # долгое решение
if len(s) == 0: return -1
ans, val =- 1, 10
used, nums = {}, {}
for ch in s:
if ch not in used:
val -= 1
if val <0: return ans
used[ch] = s.count(ch)
sq = int("9"*len(s))
min = int("1"+"0"*(len(s)-1))**0.5
sq = int(sq)
n = int(sq ** 0.5)
while n >= min:
ss = n*n
for ch in str(ss):
nums[ch] = str(ss).count(ch)
if sorted(list(used.values())) == sorted(list(nums.values())): return ss
nums = {}
n -=1
return ans
# def constructSquare(s):
# p = len(s)
# d_max = int((10**p)**.5)
# d_min = int((10**(p-1))**.5)
# for d in range(d_max, d_min-1, -1):
# n = str(d * d)
# if sorted(s.count(c) for c in s) == sorted(n.count(c) for c in n):
# return int(n)
# return -1
task = "abccccdddd"
# Expected Output: 999950884
print(constructSquare(task))
'''
Given a string consisting of lowercase English letters, find the largest square number
which can be obtained by reordering the string's characters and replacing them with any digits you need
(leading zeros are not allowed) where same characters always map to the same digits
and different characters always map to different digits.
If there is no solution, return -1.
Example
For s = "ab", the output should be
constructSquare(s) = 81.
The largest 2-digit square number with different digits is 81.
For s = "zzz", the output should be
constructSquare(s) = -1.
There are no 3-digit square numbers with identical digits.
For s = "aba", the output should be
constructSquare(s) = 900.
It can be obtained after reordering the initial string into "baa" and replacing "a" with 0 and "b" with 9.
'''
|
from lib.gopherpysat import Gophersat
from typing import Dict, Tuple, List, Union
import itertools
import random
import time
import sys
from wumpus_cli.lib.wumpus_client import WumpusWorldRemote
gophersat_exec = "./lib/gophersat-1.1.6"
## On genere le voca avec toujours 4 chiffres pour extraire plus facilement les coordonnées lors de l'insertion des regles
# Retourne une List composée de tous les symboles representant les positions possible du Wumpus
# Ex: [W0000, W0001, W0002, W0003, W0100, W0101, W0102, ..., W0303] pour une grille 4*4
def generate_wumpus_voca (taille_grille: int) :
res = []
for i in range(taille_grille):
for j in range(taille_grille):
if (i < 10 and j < 10) :
res.append(f"W0{i}0{j}")
elif (i < 10 and j > 9) :
res.append(f"W0{i}{j}")
elif (i > 9 and j < 10) :
res.append(f"W{i}0{j}")
else :
res.append(f"W{i}{j}")
return res
def generate_stench_voca (taille_grille: int) :
res = []
for i in range(taille_grille):
for j in range(taille_grille):
if (i < 10 and j < 10) :
res.append(f"S0{i}0{j}")
elif (i < 10 and j > 9) :
res.append(f"S0{i}{j}")
elif (i > 9 and j < 10) :
res.append(f"S{i}0{j}")
else :
res.append(f"S{i}{j}")
return res
def generate_gold_voca (taille_grille: int) :
res = []
for i in range(taille_grille):
for j in range(taille_grille):
if (i < 10 and j < 10) :
res.append(f"G0{i}0{j}")
elif (i < 10 and j > 9) :
res.append(f"G0{i}{j}")
elif (i > 9 and j < 10) :
res.append(f"G{i}0{j}")
else :
res.append(f"G{i}{j}")
return res
def generate_brise_voca (taille_grille: int) :
res = []
for i in range(taille_grille):
for j in range(taille_grille):
if (i < 10 and j < 10) :
res.append(f"B0{i}0{j}")
elif (i < 10 and j > 9) :
res.append(f"B0{i}{j}")
elif (i > 9 and j < 10) :
res.append(f"B{i}0{j}")
else :
res.append(f"B{i}{j}")
return res
def generate_trou_voca (taille_grille: int) :
res = []
for i in range(taille_grille):
for j in range(taille_grille):
if (i < 10 and j < 10) :
res.append(f"T0{i}0{j}")
elif (i < 10 and j > 9) :
res.append(f"T0{i}{j}")
elif (i > 9 and j < 10) :
res.append(f"T{i}0{j}")
else :
res.append(f"T{i}{j}")
return res
## Fin generation voca
def insert_all_regles (gs:Gophersat, wumpus_voca:List, trou_voca:List, brise_voca:List, stench_voca:List) :
insert_only_one_wumpus_regle(gs, wumpus_voca)
insert_safety_regle(gs)
insert_trou_regle(gs, trou_voca, brise_voca)
insert_brise_regle(gs, brise_voca, trou_voca)
insert_wumpus_stench_regle(gs, wumpus_voca, stench_voca)
insert_stench_regle(gs, wumpus_voca, stench_voca)
insert_une_menace_par_case_regle(gs, wumpus_voca, trou_voca)
# Il y a forcement un Wumpus et il en existe un seul
# WIJ <-> non(WAB) et non(WAC) et non(WAD) ...
# Tested and Working
def insert_only_one_wumpus_regle (gs:Gophersat, wumpus_voca:List) :
wumpus_voca2 = wumpus_voca.copy()
for case in wumpus_voca :
wumpus_voca2.remove(case)
for c in wumpus_voca2 :
gs.push_pretty_clause([f"-{case}", f"-{c}"])
gs.push_pretty_clause(wumpus_voca)
# Il n'y a ni wumpus, ni trou en (0,0)
# Tested and Working
def insert_safety_regle (gs:Gophersat) :
gs.push_pretty_clause(["-W0000"])
gs.push_pretty_clause(["-T0000"])
# Wij -> -Tij
# Tij -> -Wij
# On ne peut pas avoir de wumpus en (i, j) si il y a un trou en (i, j)
def insert_une_menace_par_case_regle (gs:Gophersat, wumpus_voca:List, trou_voca:List) :
trou_voca_pile = trou_voca.copy()
wumpus_voca_pile = wumpus_voca.copy()
for i in range(len(wumpus_voca)) :
trou = trou_voca_pile.pop()
wumpus = wumpus_voca_pile.pop()
gs.push_pretty_clause([f"-{wumpus}", f"-{trou}"])
gs.push_pretty_clause([f"-{trou}", f"-{wumpus}"])
def int_to_two_digits_str (i:int, number_to_apply:int = 0) -> str :
i = i + number_to_apply
if(i < 10) :
return f"0{i}"
else :
return f"{i}"
# Si il y a un trou en (i, j) alors il y a une brise en (i-1, j), (i+1, j), (i, j-1) et (i, j+1)
# Tij -> B(i-1)j ET B(i+1)j ET Bi(j-1) ET Bi(j+1)
# Devenant -Tij v B(i-1)j ; -Tij v B(i+1)j ; -Tij v Bi(j-1) ; -Tij v Bi(j+1)
# Tested and working
def insert_trou_regle (gs:Gophersat, trou_voca:List, brise_voca:List) :
for trou in trou_voca :
# Soit le symbole Tij, une chaine de caractere avec i et j appartient a [0..99]
# On enleve la lettre et on recupere i et j
ij = trou[1:]
i = int(ij[:2]) # On prend les deux premiers chiffres pour l'indice i
j = int(ij[2:]) # On prend les deux derniers chiffres pour l'indice j
i_moins_1 = int_to_two_digits_str(i, -1)
i_plus_1 = int_to_two_digits_str(i, 1)
j_moins_1 = int_to_two_digits_str(j, -1)
j_plus_1 = int_to_two_digits_str(j, 1)
i = int_to_two_digits_str(i)
j = int_to_two_digits_str(j)
brises = []
if f"B{i_plus_1}{j}" in brise_voca :
brises.append(f"B{i_plus_1}{j}")
if f"B{i_moins_1}{j}" in brise_voca :
brises.append(f"B{i_moins_1}{j}")
if f"B{i}{j_plus_1}" in brise_voca :
brises.append(f"B{i}{j_plus_1}")
if f"B{i}{j_moins_1}" in brise_voca :
brises.append(f"B{i}{j_moins_1}")
for brise in brises :
gs.push_pretty_clause([f"-{trou}", brise])
# Bij <-> ( T(i-1)j ou T(i+1)j ou Ti(j-1) ou Ti(j+1) )
# C'est a dire que une brise en (i, j) equivault à au moins un trou autour
# On insere -Bij ou T(i-1)j ou T(i+1)j ou Ti(j-1) ou Ti(j+1) ; -Tij ou B(i-1)j ou B(i+1)j ou Bi(j-1) ou Bi(j+1)
# Tested and working
def insert_brise_regle (gs:Gophersat, brise_voca:List, trou_voca:List) :
#-Bij ou T(i-1)j ou T(i+1)j ou Ti(j-1) ou Ti(j+1)
for brise in brise_voca :
# Soit le symbole Bij
# On enleve la lettre et on recupere i et j
ij = brise[1:]
i = int(ij[:2]) # On prend les deux premiers chiffres pour l'indice i
j = int(ij[2:]) # On prend les deux derniers chiffres pour l'indice j
i_moins_1 = int_to_two_digits_str(i, -1)
i_plus_1 = int_to_two_digits_str(i, 1)
j_moins_1 = int_to_two_digits_str(j, -1)
j_plus_1 = int_to_two_digits_str(j, 1)
i = int_to_two_digits_str(i)
j = int_to_two_digits_str(j)
trous = []
if f"T{i_plus_1}{j}" in trou_voca :
trous.append(f"T{i_plus_1}{j}")
if f"T{i_moins_1}{j}" in trou_voca :
trous.append(f"T{i_moins_1}{j}")
if f"T{i}{j_plus_1}" in trou_voca :
trous.append(f"T{i}{j_plus_1}")
if f"T{i}{j_moins_1}" in trou_voca :
trous.append(f"T{i}{j_moins_1}")
clause = trous + [f"-{brise}"]
gs.push_pretty_clause(clause)
# -Tij ou B(i-1)j ou B(i+1)j ou Bi(j-1) ou Bi(j+1)
for trou in trou_voca :
# Soit le symbole Tij
ij = trou[1:] # On eneleve la lettre
i = int(ij[:2]) # On prend les deux premiers chiffres pour l'indice i
j = int(ij[2:]) # On prend les deux derniers chiffres pour l'indice j
i_moins_1 = int_to_two_digits_str(i, -1)
i_plus_1 = int_to_two_digits_str(i, 1)
j_moins_1 = int_to_two_digits_str(j, -1)
j_plus_1 = int_to_two_digits_str(j, 1)
i = int_to_two_digits_str(i)
j = int_to_two_digits_str(j)
brises = []
if f"B{i_plus_1}{j}" in brise_voca :
brises.append(f"B{i_plus_1}{j}")
if f"B{i_moins_1}{j}" in brise_voca :
brises.append(f"B{i_moins_1}{j}")
if f"B{i}{j_plus_1}" in brise_voca :
brises.append(f"B{i}{j_plus_1}")
if f"B{i}{j_moins_1}" in brise_voca :
brises.append(f"B{i}{j_moins_1}")
clause = brises + [f"-{trou}"]
gs.push_pretty_clause(clause)
# Si il y a un wumpus en (i, j) Alors il y a une stench en (i-1, j), (i+1, j), (i, j-1) et (i, j+1)
# Wij -> S(i-1)j ET S(i+1)j ET Si(j-1) ET Si(j+1)
# Devenant -Wij v S(i-1)j ; -Wij v S(i+1)j ; -Wij v Si(j-1) ; -Wij v Si(j+1)
# Tested and Working
def insert_wumpus_stench_regle (gs:Gophersat, wumpus_voca:List, stench_voca:List) :
for wumpus in wumpus_voca :
# Soit le symbole Tij
ij = wumpus[1:] # On enleve la lettre
i = int(ij[:2]) # On prend les deux premiers chiffres pour l'indice i
j = int(ij[2:]) # On prend les deux derniers chiffres pour l'indice j
i_moins_1 = int_to_two_digits_str(i, -1)
i_plus_1 = int_to_two_digits_str(i, 1)
j_moins_1 = int_to_two_digits_str(j, -1)
j_plus_1 = int_to_two_digits_str(j, 1)
i = int_to_two_digits_str(i)
j = int_to_two_digits_str(j)
stenches = []
if f"S{i_plus_1}{j}" in stench_voca :
stenches.append(f"S{i_plus_1}{j}")
if f"S{i_moins_1}{j}" in stench_voca :
stenches.append(f"S{i_moins_1}{j}")
if f"S{i}{j_plus_1}" in stench_voca :
stenches.append(f"S{i}{j_plus_1}")
if f"S{i}{j_moins_1}" in stench_voca :
stenches.append(f"S{i}{j_moins_1}")
for stench in stenches :
gs.push_pretty_clause([f"-{wumpus}", stench])
# On insere a chaque fin de boucle -Sij ou W(i-1)j ou W(i+1)j ou Wi(j-1) ou Wi(j+1)
# Tested and Working
def insert_stench_regle (gs:Gophersat, wumpus_voca:List, stench_voca:List) :
for stench in stench_voca :
# Soit le symbole Wij
ij = stench[1:] # On enleve la lettre
i = int(ij[:2]) # On prend les deux premiers chiffres pour l'indice i
j = int(ij[2:]) # On prend les deux derniers chiffres pour l'indice j
i_moins_1 = int_to_two_digits_str(i, -1)
i_plus_1 = int_to_two_digits_str(i, 1)
j_moins_1 = int_to_two_digits_str(j, -1)
j_plus_1 = int_to_two_digits_str(j, 1)
i = int_to_two_digits_str(i)
j = int_to_two_digits_str(j)
wumpuses = []
if f"W{i_plus_1}{j}" in wumpus_voca :
wumpuses.append(f"W{i_plus_1}{j}")
if f"W{i_moins_1}{j}" in wumpus_voca :
wumpuses.append(f"W{i_moins_1}{j}")
if f"W{i}{j_plus_1}" in wumpus_voca :
wumpuses.append(f"W{i}{j_plus_1}")
if f"W{i}{j_moins_1}" in wumpus_voca :
wumpuses.append(f"W{i}{j_moins_1}")
clause = wumpuses + [f"-{stench}"]
gs.push_pretty_clause(clause)
# Prend un caractere de la chaine de description d'une case et la position du contenu
# Retourne un Tuple avec les clauses a inserer dans le modèle
def wumpus_to_clause (single_case_content:str, position:Tuple[str, str]) :
switcher = {
".":[
f"-W{position[0]}{position[1]}",
f"-T{position[0]}{position[1]}",
f"-G{position[0]}{position[1]}",
f"-B{position[0]}{position[1]}",
f"-S{position[0]}{position[1]}"
],
"W":[f"W{position[0]}{position[1]}"],
"P":[f"T{position[0]}{position[1]}"],
"S":[f"S{position[0]}{position[1]}"],
"G":[f"G{position[0]}{position[1]}"],
"B":[f"B{position[0]}{position[1]}"],
}
return switcher.get(single_case_content, -1)
def push_clause_from_wumpus (gs:Gophersat, case_contents:str, position:Tuple[str, str], is_from_know_method:bool = False, enable_log:bool = False):
if enable_log :
print(f"contents is : {case_contents}")
facts = []
for case_content in case_contents :
if enable_log :
print(f"inserting from wumpus : {case_content}\n")
tmp_facts = wumpus_to_clause(case_content, position)
if tmp_facts == -1 :
print("Error : Invalid case contents : wumpus to clause")
return -1
else :
facts = facts + tmp_facts
if is_from_know_method == False : # Si on utilise pas la methode know, on est certains de ce qu'il n'y a pas dans la case
facts = facts + get_implicit_negative_facts(facts, position)
for fact in facts : # On doit inserer les clauses une a une
gs.push_pretty_clause([fact])
# On ajoute les faits perçu de par leur non-presence
# Exemple : On repere Bij, cela veut dire qu'il n'a pas de wumpus, ni de trou, ni de stench etc....
def get_implicit_negative_facts (facts:List, position:Tuple[str, str]) :
possible_case_content = ["W", "T", "G", "S", "B"]
need_to_add = True
facts_to_add = []
for content in possible_case_content :
for fact in facts :
if content in fact :
need_to_add = False
if need_to_add :
facts_to_add.append(f"-{content}{position[0]}{position[1]}")
need_to_add = True
return facts_to_add
# True si un wumpus en i, j est possible
# False si un wumpus i, j est impossible
def is_wumpus_possible(gs, position:Tuple[str, str]) -> bool :
gs.push_pretty_clause([f"W{position[0]}{position[1]}"])
res = gs.solve()
gs.pop_clause()
return res
def is_trou_possible(gs, position:Tuple[str, str]) -> bool :
gs.push_pretty_clause([f"T{position[0]}{position[1]}"])
res = gs.solve()
gs.pop_clause()
return res
def idx_str_op (i_str:str, number_to_apply:int) -> str :
i = int(i_str)
i = i + number_to_apply
if(i < 10) :
return f"0{i}"
else :
return f"{i}"
# si le wumpus est possible en (i, j)
# on regarde si il possible en [(i-1, j-1), (i-1, j+1), (i+1, j-1), (i+1, j+1)]
# Si il n'est pas possible a ces coord, alors il est obligatoirement au seul endroit possible
def is_wumpus_mandatory(gs, position:Tuple[str, str], wumpus_voca) -> bool :
i = position[0]
j = position[1]
pos_to_test = [
f"W{idx_str_op(i, -1)}{idx_str_op(j, -1)}",
f"W{idx_str_op(i, -1)}{idx_str_op(j, 1)}",
f"W{idx_str_op(i, 1)}{idx_str_op(j, -1)}",
f"W{idx_str_op(i, 1)}{idx_str_op(j, 1)}"
]
res = []
for pos in pos_to_test :
if pos in wumpus_voca :
gs.push_pretty_clause([pos])
res.append(gs.solve())
gs.pop_clause()
for possibility in res :
if possibility :
return False;
return True;
def get_brises_to_test (carte, i:str, j:str) -> Tuple :
b_tmp = [
[int(i)-1, int(j)], # On regarde derriere la case et en haut, on ne peut connaitre qu'eux
[int(i), int(j)-1]
]
b_to_test = []
for b in b_tmp :
if b[0] > -1 and b[1] > -1 :
if "B" in carte[b[0]][b[1]] :
b_to_test.append(b)
return b_to_test
# On recupere toutes les cases autours de la brise qui ne sont pas la position que l'on teste
def get_pit_to_test (brise_pos:Tuple, i:str, j:str, taille_grille:int) -> Tuple :
pit_tmp = [
[brise_pos[0]-1, brise_pos[1]],
[brise_pos[0], brise_pos[1]-1],
[brise_pos[0]+1, brise_pos[1]],
[brise_pos[0], brise_pos[1]+1]
]
pit_to_test = []
for p in pit_tmp :
if p[0] > -1 and p[1] > -1 :
if p[0] < taille_grille and p[1] < taille_grille :
if p[0] != int(i) or p[1] != int(j) :
pit_to_test.append(p)
return pit_to_test
def is_trou_mandatory(gs, position:Tuple[str, str], trou_voca, carte, taille_grille) -> bool :
i = position[0]
j = position[1]
b_to_test = get_brises_to_test(carte, i, j)
if len(b_to_test) < 2 : return False # Si il n'y a qu'une brise autour (ou moins), on ne peut pas savoir si c'est obligatoire
for b in b_to_test : # On regarde autour des brises si il existe des trous auxquels ils correspondent
pit_to_test = get_pit_to_test(b, i, j, taille_grille)
# On va regarder si il existe un trou dans les positions récuperées
# Si il en existe au moins un, l'obligation de trou dans la position [i, j] est compromise
# il faut donc faire attention
# Sinon, le trou est obligatoirement là
for p in pit_to_test :
if "P" in carte[p[0]][p[1]] :
return False
return True
def should_I_be_cautious (gs:Gophersat, position:Tuple[str, str], enable_log:bool = False) -> bool :
if enable_log :
print(f"wumpus possible : {is_wumpus_possible(gs, position)} ----- trou possible : {is_trou_possible(gs, position)}")
return is_wumpus_possible(gs, position) or is_trou_possible(gs, position)
def print_case_contents_post_insertion (i:int, j:int, case_contents, wwr:WumpusWorldRemote, solvability:bool) :
print(f"[{i}, {j}] - case_contents : {case_contents}")
print(f"Satisfiabilité : {solvability}")
print("\n --- \n")
def init_res (taille_grille) -> Tuple[Tuple] :
return [['?' for j in range(taille_grille)] for i in range(taille_grille)]
def cartographier (wwr:WumpusWorldRemote, taille_grille:int = 4, enable_log:bool = False):
wumpus_voca = generate_wumpus_voca(taille_grille)
gold_voca = generate_gold_voca(taille_grille)
stench_voca = generate_stench_voca(taille_grille)
brise_voca = generate_brise_voca(taille_grille)
trou_voca = generate_trou_voca(taille_grille)
voc = wumpus_voca + gold_voca + stench_voca + brise_voca + trou_voca
gs = Gophersat(gophersat_exec, voc)
res = init_res(taille_grille)
# On modelise les regles
insert_all_regles(gs, wumpus_voca, trou_voca, brise_voca, stench_voca)
if(gs.solve() and enable_log) :
print(f"vocabulaire inseré sans contradiction")
print(wwr)
elif (gs.solve() == False) :
print(f"contradiction dans le vocabulaire inseré")
return -1
if(enable_log) :
print("\nBefore loop log :\n")
print(f"{gs.solve()}")
print(f"{gs.get_pretty_model()}")
print("\n==========================\n")
i_str = ""
j_str = ""
status, percepts, cost = wwr.probe(0,0)
print(status, percepts, cost)
res[0][0] = percepts
is_from_know_method = False
push_clause_from_wumpus(gs, percepts, ["00", "00"], is_from_know_method, False)
for i in range(taille_grille) :
for j in range(taille_grille) :
if i != 0 or j != 0 :
i_str = int_to_two_digits_str(i)
j_str = int_to_two_digits_str(j)
if is_wumpus_possible(gs, [i_str, j_str]) :
if is_wumpus_mandatory(gs, [i_str, j_str], wumpus_voca) :
status, percepts, cost = wwr.know_wumpus(i,j)
if percepts == "Correct wumpus position." :
percepts = "W"
is_from_know_method = True
else :
status, percepts, cost = wwr.cautious_probe(i, j)
elif is_trou_possible(gs, [i_str, j_str]) :
if is_trou_mandatory(gs, [i_str, j_str], trou_voca, res, taille_grille) :
status, percepts, cost = wwr.know_pit(i,j)
if percepts == "Correct pit position." :
percepts = "P"
is_from_know_method = True
else :
status, percepts, cost = wwr.cautious_probe(i, j)
else :
status, percepts, cost = wwr.probe(i,j)
if enable_log :
print(status, percepts, cost, i, j)
res[i][j] = percepts
if(push_clause_from_wumpus(gs, percepts, [i_str, j_str], is_from_know_method, False) == -1) : # Si erreur lors de l'insertion de clauses on arrete tout : on fausse le modele
gs.solve()
print(f"Modele :\n {gs.get_pretty_model()} \n ---")
return -1
is_from_know_method = False
if(enable_log) :
print_case_contents_post_insertion(i, j, percepts, wwr, gs.solve())
if (gs.solve() == False) :
return -1
if(enable_log) :
print("\n\n==========================")
print(f"Satisfiabilité : {gs.solve()}")
print(f"Modele trouvé :\n {gs.get_pretty_model()} \n ---")
return res
if __name__ == "__main__":
server = "http://localhost:8080"
groupe_id = "PRJ45" # votre vrai numéro de groupe
names = "Ulysse Brehon et Luis Enrique Gonzalez Hilario" # vos prénoms et noms
try:
wwr = WumpusWorldRemote(server, groupe_id, names)
except HTTPError as e:
print(e)
print("Try to close the server (Ctrl-C in terminal) and restart it")
sys.exit(-1)
status, msg, taille_grille = wwr.next_maze()
res = cartographier(wwr, taille_grille, True)
if(res == -1) :
print(f"echec sur une taille de : {taille_grille}x{taille_grille}")
sys.exit(-1)
print(res) |
x = [1, 2, 3, [4, 5, 6], 4, 5, [7, 8, [9, 10]]]
# the input being sent to the function has lists inside lists
def sum_of_all_nums_recursive(nums_and_num_lists: list) -> int:
# param is a list - which returns an int
# specifying this is optional
total = 0
for element in nums_and_num_lists:
if isinstance(element, int):
total += element
elif isinstance(element, list):
total += sum_of_all_nums_recursive(element)
return total
print(sum_of_all_nums_recursive(x))
|
# -*- coding:utf-8 -*-
#
# 开发人员 : sunshenggang
# 开始时间 : 19-6-20 下午9:18
# 开发工具 : PyCharm Community Edition
from flask import render_template
from . import home
@home.app_errorhandler(404)
def page_not_found(e):
return render_template('/home/error/404.html'), 404
@home.app_errorhandler(500)
def internal_server_error(e):
return render_template('/home/error/500.html'), 500 |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('Iris.csv')
df_norm = data[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']].apply(lambda x: (x - x.min()) / (x.max() - x.min()))
X_5= df_norm.head(5)
X_5 = np.array(X_5, dtype='float32')
target = data[['Species']].replace(['Iris-setosa','Iris-versicolor','Iris-virginica'],[0,1,2])
y_5=target.head(5)
df = pd.concat([df_norm, target], axis=1)
test2 = df.head(5)
train_test_per = 60/100.0
df['train'] = np.random.rand(len(df)) < train_test_per
train = df[df.train == 1]
train = train.drop('train', axis=1).sample(frac=1)
test = df[df.train == 0]
test = test.drop('train', axis=1)
X = train.values[:,:4]
targets = [[1,0,0],[0,1,0],[0,0,1]]
y_5 = np.array([targets[int(x)] for x in y_5.values[:,:1]])
y = np.array([targets[int(x)] for x in train.values[:,4:5]])
num_inputs = len(X[0])
hidden_layer_neurons = 5
np.random.seed(4)
w1 = 2*np.random.random((num_inputs, hidden_layer_neurons)) - 1
num_outputs = len(y[0])
w2 = 2*np.random.random((hidden_layer_neurons, num_outputs)) - 1
learning_rate = 0.3
print('\nNo. of Inputs: ',num_inputs)
print('No. Of hidden layers: 1')
print('No. Of nodes in hidden layer: ',hidden_layer_neurons)
print('Learning Rate: ',learning_rate)
max_iterations=50000
epoch=0
prev_er=0
while True:
l1 = 1/(1 + np.exp(-(np.dot(X, w1))))
l2 = 1/(1 + np.exp(-(np.dot(l1, w2))))
er = (abs(y - l2)).mean()
l2_delta = (y - l2)*(l2 * (1-l2))
l1_delta = l2_delta.dot(w2.T) * (l1 * (1-l1))
w2 += l1.T.dot(l2_delta) * learning_rate
w1 += X.T.dot(l1_delta) * learning_rate
if ((epoch!=0) and (er<0.05) and (abs(prev_er-er)<0.0001)):
epoch+=1
print()
print('Status: Converged')
break
if epoch>=max_iterations:
print('Status: Diverged')
break
prev_er=er
epoch=epoch+1
print('\nCost Value:', er)
print('Number Of Iterations: ',epoch)
X = test.values[:,:4]
y = np.array([targets[int(x)] for x in test.values[:,4:5]])
l1 = 1/(1 + np.exp(-(np.dot(X, w1))))
l2 = 1/(1 + np.exp(-(np.dot(l1, w2))))
np.round(l2,3)
yp = np.argmax(l2, axis=1)
res = yp == np.argmax(y, axis=1)
correct = np.sum(res)/len(res)
testres = test[['Species']].replace([0,1,2], ['Iris-setosa','Iris-versicolor','Iris-virginica'])
testres['Prediction'] = yp
testres['Prediction'] = testres['Prediction'].replace([0,1,2], ['Iris-setosa','Iris-versicolor','Iris-virginica'])
#print(testres)
print('Result for 40% test data:',sum(res),'/',len(res), ':','Accuracy', (correct*100),'%\n')
#predicting output for first 5 examples
l1 = 1/(1 + np.exp(-(np.dot(X_5, w1))))
l2 = 1/(1 + np.exp(-(np.dot(l1, w2))))
np.round(l2,3)
yp = np.argmax(l2, axis=1)
res = yp == np.argmax(y_5, axis=1)
correct = np.sum(res)/len(res)
testres = test2[['Species']].replace([0,1,2], ['Iris-setosa','Iris-versicolor','Iris-virginica'])
testres['Prediction'] = yp
testres['Prediction'] = testres['Prediction'].replace([0,1,2], ['Iris-setosa','Iris-versicolor','Iris-virginica'])
print('Prediction result for First 5 input data:\n',testres)
|
from dbfread import dbf
table = dbf.DBF("data/Kommun_RT90_region.dbf")
def pull_names(row):
return f"{row['KnNamn']} kommun"
queries = list(map(pull_names, table))
|
numeros = []
num = int(input("Informe um número e, caso deseje sair, informe o valor -1: "))
numeros.append(num)
while(num != -1):
num = int(input("Informe um número e, caso deseje sair, informe o valor -1: "))
numeros.append(num)
def calculaMedia(numeros):
soma = 0
tamNumeros = len(numeros)
for cont in range(tamNumeros):
soma+=numeros[cont]
media = soma/tamNumeros
return media
def calculaMenorNumero(numeros):
menorNumero = 0
tamNumeros = len(numeros)
for cont in range(tamNumeros):
numero = numeros[cont]
if(numero < menorNumero):
menorNumero = numero
return menorNumero
def salvaHtml(numeros, nomeArquivo):
arquivo = open(nomeArquivo, 'w')
tamNumeros = len(numeros)
arquivo.write("<h1 style='color:red'>Números da lista:</h1>")
for cont in range(tamNumeros):
texto = "<p style='margin-right:10px'>" + str(numeros[cont]) + "</p>"
arquivo.write(texto)
media = calculaMedia(numeros)
texto = "<h2>A média desses números é igual a: " + str(media) + "</h2>"
arquivo.write(texto)
menorNumero = calculaMenorNumero(numeros)
texto = "<h2>O menor desses números é: " + str(menorNumero) + ".</h2>"
arquivo.write(texto)
salvaHtml(numeros, "testeNumeros.html")
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import time
PY3 = sys.version_info[0] == 3
dirpath = os.getcwd()
if PY3:
xrange = range
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from geometry_msgs.msg import Twist
from geometry_msgs.msg import Quaternion
import rospy
from std_msgs.msg import Empty
import numpy as np
import cv2
import imutils
hardcoded_yaw = -120 #total yaw youd like to try
pauselength=4.5 #seconds to wait after first yaw to regroup and do second one
yawsteps_number=2 #number of steps you want to break your yaw command up into
pause_active=False
pause_start_time=0
yawstep=0
flag = 0
velocity = Quaternion()
bridge = CvBridge()
mask_pub = rospy.Publisher('/mask', Image, queue_size=1)
vel_pub = rospy.Publisher('/moveto_cmd_body', Quaternion, queue_size=1)
pub_land= rospy.Publisher('bebop/land',Empty,queue_size=1)
def find_circles(my_img):
global flag, hardcoded_yaw
global pause_start_time
global pauselength
global pause_active
global yawstep
global yawsteps_number
img = bridge.imgmsg_to_cv2(my_img, "bgr8")
img_orig=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
img_orig = clahe.apply(img_orig)
img = cv2.medianBlur(img_orig,3)
ret,thresh_binary = cv2.threshold(img,210,255,cv2.THRESH_BINARY)
dilate = cv2.dilate(thresh_binary, np.ones((3,3), np.uint8), iterations=1)
im, cnts, hierarchy = cv2.findContours(dilate, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
drawing = np.zeros((dilate.shape[0], dilate.shape[1], 3), np.uint8)
cnt_area = []
cnt_num = []
for c in cnts:
cnt_area.append(cv2.contourArea(c))
cnt_num = np.argsort(cnt_area)
cnt_area.sort()
# print(cnt_area)
# large_cnts = np.zeros(np.shape(mask))
fresh_im = np.zeros(np.shape(img_orig))
size_im = np.shape(img_orig)
y_im = int(size_im[0]/2)
x_im = int(size_im[1]/2)
for i in range(5): # in the 5 largest contours, check if cnt_area > 5000
if cnt_area[len(cnt_area)-1-i] > 1000:
cv2.drawContours(fresh_im, cnts, cnt_num[len(cnt_num)-1-i], (255, 255, 255), -1)
mask = cv2.bitwise_and(img_orig, img_orig, mask = np.uint8(fresh_im))
detected_circles = cv2.HoughCircles(mask,
cv2.HOUGH_GRADIENT, 1, 20, param1 = 50,
param2 = 30, minRadius = 1, maxRadius = 40)
scale_x = 0.02
scale_y = 0.02
# Draw circles that are detected.
if detected_circles is not None:
# Convert the circle parameters a, b and r to integers.
detected_circles = np.uint16(np.around(detected_circles))
for pt in detected_circles[0, :]:
a, b, r = pt[0], pt[1], pt[2]
# Draw the circumference of the circle.
cv2.circle(mask, (a, b), r, (0, 255, 0), 2)
# Draw a small circle (of radius 1) to show the center.
cv2.circle(mask, (a, b), 1, (0, 0, 255), 3)
# first control in x dirn in image space (y in bebop space)
del_x = x_im-a # if +ve go left (+y direction bebop)
del_y = y_im-b
print("del_x: ",del_x, "del_y: ",del_y)
# if the error is greater than 20 pixels, then only change motion in y direction
move_x = del_y*scale_y
move_y = del_x*scale_x
if abs(del_x)>20:
flag = 0
velocity.w = 0
velocity.x = 0
velocity.y = move_y
velocity.z = 0
print("y")
else:
flag = 1
velocity.w = 0
velocity.x = 0
velocity.y = 0
velocity.z = 0
print("y done")
if flag == 1:
if abs(del_y)>20:
velocity.w = 0
velocity.x = move_x
velocity.y = 0
velocity.z = 0
print("x")
else:
flag = 2
velocity.w = 0
velocity.x = 0
velocity.y = 0
velocity.z = 0
print("x done")
if flag == 2:
if abs(del_x) > 10 and abs(del_y)>10:
velocity.w = 0
velocity.x = move_x
velocity.y = move_y
velocity.z = 0
print("xy")
else:
if yawstep>=yawsteps_number and pause_active==False:
print('FINISHED YAWING AND CAME BACK TO BULLSEYE')
time.sleep(1)
rospy.signal_shutdown('Node is finished, shut down')
time.sleep(1)
break
# cv2.imshow("Detected Circle", mask)
# cv2.waitKey(0)
mask_pub.publish(bridge.cv2_to_imgmsg(mask, "mono8"))
vel_pub.publish(velocity)
print('yawstep: ',yawstep)
if time.time() - pause_start_time > pauselength: #this is non-blocking pause implementation
pause_active=False #if youve waited the time then the pause is off
else:
print('pausing like a good boi') #still waiting bitch
if pause_active==False: #only if youre not currently in a pause
if yawstep<yawsteps_number: #only if you havent done all of the yawsteps yet
velocity.x = 0
velocity.y = 0
velocity.z = 0
velocity.w = hardcoded_yaw/yawsteps_number # Yaw Command, positive left
# SEND IT
print('\n \n sending Yaw towards the window: ',hardcoded_yaw/yawsteps_number)
print('yawstep: ',yawstep)
vel_pub.publish(velocity)
time.sleep(.5)
#better wait a bit before trying to send another
yawstep=yawstep+1
pause_active=True
pause_start_time=time.time()
def main():
rospy.init_node('bullseye_yaw', anonymous=False)
rospy.Subscriber('/duo3d/left/image_rect', Image, find_circles)
rospy.spin()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.