id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
11338018 | from __future__ import unicode_literals
import dataent
def execute():
dataent.reload_doc("core", "doctype", "docperm")
# delete same as cancel (map old permissions)
dataent.db.sql("""update tabDocPerm set `delete`=ifnull(`cancel`,0)""")
# can't cancel if can't submit
dataent.db.sql("""update tabDocPerm set `cancel`=0 where ifnull(`submit`,0)=0""")
dataent.clear_cache() | StarcoderdataPython |
249525 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# When using bytestrings in Python 2, Windows requires full unicode
# filenames and paths. Therefore any bytestring paths *must* be utf-8
# encoded as they will need to be converted on the fly to full unicode
# for Windows platforms.
#
# Both Linunx and Mac OS X platforms will happily support utf-8 encoded paths
#
# These are simple support routines to ease use of utf-8 encoded bytestrings
# as paths in main program to be converted on the fly to full unicode as
# temporary un-named values to prevent the potential for inadvertent mixing
# of unicode and bytestring and auto promotion issues elsewhere in the main
# program
#
# These include routines for path manipulation and encoding and decoding uri/iri
import sys, os
import locale
import codecs
from utf8_utils import utf8_str
from urllib import unquote
import path
import unicodedata
_iswindows = sys.platform.startswith('win')
# UNIBYTE_CHARS = set(chr(x) for x in xrange(256))
ASCII_CHARS = set(chr(x) for x in xrange(128))
URL_SAFE = set('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789' '_.-/~')
IRI_UNSAFE = ASCII_CHARS - URL_SAFE
# returns a utf-8 encoded quoted IRI (not a URI)
def quoteurl(href):
href = utf8_str(href)
result = []
for char in href:
if char in IRI_UNSAFE:
char = "%%%02x" % ord(char)
result.append(char)
return ''.join(result)
# unquotes url to create a utf-8 encoded string
def unquoteurl(href):
href = utf8_str(href)
href = unquote(href)
return href
# convert utf-8 encoded path string to proper type
# on windows that is full unicode
# on macosx and linux this is utf-8
def pathof(s):
if s is None:
return None
if isinstance(s, unicode):
print "Warning: pathof expects utf-8 encoded byestring: ", s
if _iswindows:
return s
return s.encode('utf-8')
if _iswindows:
return s.decode('utf-8')
return s
def exists(s):
return os.path.exists(pathof(s))
def isfile(s):
return os.path.isfile(pathof(s))
def isdir(s):
return os.path.isdir(pathof(s))
def mkdir(s):
return os.mkdir(pathof(s))
def listdir(s):
rv = []
for file in os.listdir(pathof(s)):
rv.append(utf8_str(file, enc=sys.getfilesystemencoding()))
return rv
def getcwd():
return utf8_str(os.getcwdu())
def walk(top):
toppath = top
rv = []
for base, dnames, names in os.walk(pathof(top)):
base = utf8_str(base, enc=sys.getfilesystemencoding())
for name in names:
name = utf8_str(name, enc=sys.getfilesystemencoding())
filepath = relpath(os.path.join(base,name), toppath)
rv.append(filepath)
return rv
def relpath(path, start=None):
rpath = os.path.relpath(utf8_str(path),utf8_str(start))
return rpath
def abspath(path):
return utf8_str(os.path.abspath(pathof(path)))
| StarcoderdataPython |
1944746 | <filename>src/schemathesis/specs/graphql/schemas.py
from functools import partial
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, cast
from urllib.parse import urlsplit
import attr
import graphql
from hypothesis import strategies as st
from hypothesis.strategies import SearchStrategy
from hypothesis_graphql import strategies as gql_st
from ...checks import not_a_server_error
from ...hooks import HookDispatcher
from ...models import Case, CheckFunction, Endpoint
from ...schemas import BaseSchema
from ...stateful import Feedback
from ...utils import GenericResponse
@attr.s() # pragma: no mutate
class GraphQLCase(Case):
def as_requests_kwargs(
self, base_url: Optional[str] = None, headers: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
final_headers = self._get_headers(headers)
base_url = self._get_base_url(base_url)
return {"method": self.method, "url": base_url, "json": {"query": self.body}, "headers": final_headers}
def as_werkzeug_kwargs(self, headers: Optional[Dict[str, str]] = None) -> Dict[str, Any]:
final_headers = self._get_headers(headers)
return {
"method": self.method,
"path": self.endpoint.schema.get_full_path(self.formatted_path),
"headers": final_headers,
"query_string": self.query,
"json": {"query": self.body},
}
def validate_response(
self,
response: GenericResponse,
checks: Tuple[CheckFunction, ...] = (),
) -> None:
checks = checks or (not_a_server_error,)
return super().validate_response(response, checks)
@attr.s() # pragma: no mutate
class GraphQLSchema(BaseSchema):
def get_full_path(self, path: str) -> str:
return self.base_path
@property # pragma: no mutate
def verbose_name(self) -> str:
return "GraphQL"
@property
def base_path(self) -> str:
if self.base_url:
return urlsplit(self.base_url).path
return self._get_base_path()
def _get_base_path(self) -> str:
return cast(str, urlsplit(self.location).path)
def get_all_endpoints(self) -> Generator[Endpoint, None, None]:
yield Endpoint(
base_url=self.location, path=self.base_path, method="POST", schema=self, definition=None # type: ignore
)
def get_case_strategy(
self, endpoint: Endpoint, hooks: Optional[HookDispatcher] = None, feedback: Optional[Feedback] = None
) -> SearchStrategy:
constructor = partial(GraphQLCase, endpoint=endpoint)
schema = graphql.build_client_schema(self.raw_schema)
return st.builds(constructor, body=gql_st.query(schema))
def get_strategies_from_examples(self, endpoint: Endpoint) -> List[SearchStrategy[Case]]:
return []
def get_hypothesis_conversion(self, endpoint: Endpoint, location: str) -> Optional[Callable]:
return None
| StarcoderdataPython |
4940740 | # Generated by Django 3.1.5 on 2021-02-06 16:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_publication_file'),
]
operations = [
migrations.RemoveField(
model_name='presentation',
name='authors',
),
migrations.AddField(
model_name='presentation',
name='type',
field=models.CharField(choices=[('guest_lecture', 'Guest lecture'), ('conference', 'Conference presentation'), ('other', 'Other')], default='guest_lecture', max_length=20),
),
migrations.AlterField(
model_name='presentation',
name='location',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| StarcoderdataPython |
178316 | import os, pickle,uuid
class ControlBase(object):
_value = None
_label = None
_controlHTML = ""
def __init__(self, *args, **kwargs):
self._id = uuid.uuid4()
self._value = kwargs.get('default', None)
self._parent = 1
self._label = kwargs.get('label', args[0] if len(args)>0 else '')
def init_form(self): pass
def load_form(self, data, path=None):
oldvalue = self.value
self.value = data.get('value', None)
if oldvalue!=self.value: self.changed_event()
def changed_event(self):
"""
Function called when ever the Control value is changed
"""
return True
def show(self):pass
def hide(self):pass
def open_popup_menu(self, position): pass
def add_popup_submenu_option(self, label, options): pass
def add_popup_menu_option(self, label, functionAction = None): pass
def __repr__(self): return self.value
############################################################################
############ Properties ####################################################
############################################################################
@property
def enabled(self): return True
@enabled.setter
def enabled(self, value): pass
############################################################################
@property
def value(self): return self._value
@value.setter
def value(self, value):
oldvalue = self._value
self._value = value
if oldvalue!=value: self.changed_event()
############################################################################
@property
def label(self): return self._label
@label.setter
def label(self, value): self._label = value
############################################################################
@property
def form(self): return None
############################################################################
@property
def parent(self): return self._parent
@parent.setter
def parent(self, value): self._parent = value
| StarcoderdataPython |
3331268 | """
nmeta flows.py Unit Tests
Note: no testing of max_interpacket_interval and
min_interpacket_interval as they become imprecise due
to floating point and when tried using decimal module
found that would not serialise into Pymongo db.
Note that packets + metadata are imported from local packets_* modules
TBD duplicate packets (retx / diff switch)
TBD: test flow.tcp_urg(), flow.tcp_ece(), flow.tcp_cwr()
TBD: IPv6 tests
TBD: ARP
TBD: ICMP
TBD: UDP
"""
#*** Handle tests being in different directory branch to app code:
import sys
import struct
sys.path.insert(0, '../nmeta')
import logging
#*** JSON imports:
import json
from json import JSONEncoder
import binascii
#*** For timestamps:
import datetime
import time
#*** Import dpkt for packet parsing:
import dpkt
#*** Testing imports:
import mock
import unittest
#*** Ryu imports:
from ryu.base import app_manager # To suppress cyclic import
from ryu.controller import controller
from ryu.controller import handler
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_3_parser
from ryu.ofproto import ofproto_protocol
from ryu.ofproto import ofproto_parser
from ryu.lib import addrconv
#*** nmeta imports:
import nmeta
import config
import flows as flows_module
import policy as policy_module
import identities as identities_module
import nethash
#*** nmeta test packet imports:
import packets_ipv4_http as pkts
import packets_ipv4_http2 as pkts2
import packets_ipv4_tcp_reset as pkts3
import packets_lldp as pkts_lldp
import packets_ipv4_ARP_2 as pkts_ARP_2
#*** Instantiate Config class:
config = config.Config()
logger = logging.getLogger(__name__)
#*** Test DPIDs and in ports:
DPID1 = 1
DPID2 = 2
INPORT1 = 1
INPORT2 = 2
#======================== flows.py Unit Tests ============================
def test_flow_ipv4_http():
"""
Test ingesting packets from an IPv4 HTTP flow, with a packet
from a different flow ingested mid-stream.
This flow is not torn down.
"""
#*** Sanity check can read into dpkt:
eth = dpkt.ethernet.Ethernet(pkts.RAW[0])
eth_src = mac_addr(eth.src)
assert eth_src == '08:00:27:2a:d6:dd'
#*** Instantiate a flow object:
flow = flows_module.Flow(config)
#*** Test Flow 1 Packet 1 (Client TCP SYN):
flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())
pkt_test(flow, pkts, 1, 1)
#*** Test Flow 1 Packet 2 (Server TCP SYN ACK):
flow.ingest_packet(DPID1, INPORT2, pkts.RAW[1], datetime.datetime.now())
pkt_test(flow, pkts, 2, 2)
#*** Test Flow 1 Packet 3 (Client ACK):
flow.ingest_packet(DPID1, INPORT1, pkts.RAW[2], datetime.datetime.now())
pkt_test(flow, pkts, 3, 3)
#*** Random packet to ensure it doesn't count against flow 1:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[1], datetime.datetime.now())
#*** Test Flow 1 Packet 4 (Client to Server HTTP GET):
flow.ingest_packet(DPID1, INPORT1, pkts.RAW[3], datetime.datetime.now())
pkt_test(flow, pkts, 4, 4)
#*** Test Flow 1 Packet 5 (Server ACK):
flow.ingest_packet(DPID1, INPORT2, pkts.RAW[4], datetime.datetime.now())
pkt_test(flow, pkts, 5, 5)
#*** Test Flow 1 Packet 6 (Server to Client HTTP 400 Bad Request):
flow.ingest_packet(DPID1, INPORT2, pkts.RAW[5], datetime.datetime.now())
pkt_test(flow, pkts, 6, 6)
#*** Test Flow 1 Packet 7 (Client ACK):
flow.ingest_packet(DPID1, INPORT1, pkts.RAW[6], datetime.datetime.now())
pkt_test(flow, pkts, 7, 7)
#*** Test Flow 1 Packet 7 (Client ACK) - different DPID:
flow.ingest_packet(DPID2, INPORT1, pkts.RAW[6], datetime.datetime.now())
pkt_test(flow, pkts, 7, 7)
def test_flow_ipv4_http2():
"""
Test ingesting packets from an IPv4 HTTP flow, with a packet
from a different flow ingested mid-stream. This flow is a
successful retrieval of an HTTP object with connection close
so TCP session nicely torn down with FINs
"""
#*** Instantiate a flow object:
flow = flows_module.Flow(config)
#*** Test Flow 2 Packet 1 (Client TCP SYN):
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[0], datetime.datetime.now())
pkt_test(flow, pkts2, 1, 1)
#*** Test Flow 2 Packet 2 (Server TCP SYN ACK):
flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[1], datetime.datetime.now())
pkt_test(flow, pkts2, 2, 2)
#*** Test Flow 2 Packet 3 (Client ACK):
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[2], datetime.datetime.now())
pkt_test(flow, pkts2, 3, 3)
#*** Random packet to ensure it doesn't count against flow 2:
flow.ingest_packet(DPID1, INPORT1, pkts.RAW[1], datetime.datetime.now())
#*** Test Flow 2 Packet 4 (Client HTTP GET):
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[3], datetime.datetime.now())
pkt_test(flow, pkts2, 4, 4)
#*** Test Flow 2 Packet 5 (Server ACK):
flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[4], datetime.datetime.now())
pkt_test(flow, pkts2, 5, 5)
#*** Test Flow 2 Packet 6 (Server HTTP 200 OK):
flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[5], datetime.datetime.now())
pkt_test(flow, pkts2, 6, 6)
#*** Test Flow 2 Packet 7 (Client ACK):
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[6], datetime.datetime.now())
pkt_test(flow, pkts2, 7, 7)
#*** Test Flow 2 Packet 8 (Server sends HTML Page to Client):
flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[7], datetime.datetime.now())
pkt_test(flow, pkts2, 8, 8)
#*** Test Flow 2 Packet 9 (Client ACK):
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[8], datetime.datetime.now())
pkt_test(flow, pkts2, 9, 9)
#*** Test Flow 2 Packet 10 (Server FIN ACK):
flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[9], datetime.datetime.now())
pkt_test(flow, pkts2, 10, 10)
#*** Test Flow 2 Packet 11 (Client FIN ACK):
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[10], datetime.datetime.now())
pkt_test(flow, pkts2, 11, 11)
#*** Test Flow 2 Packet 12 (Final ACK from Server):
flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[11], datetime.datetime.now())
pkt_test(flow, pkts2, 12, 12)
def test_flow_ipv4_tcp_reset():
"""
Test ingesting packets from an IPv4 TCP flow that is immediately
shutdown with a TCP RST
"""
#*** Instantiate a flow object:
flow = flows_module.Flow(config)
#*** Test Flow 2 Packet 1 (Client SYN on TCP-81):
flow.ingest_packet(DPID1, INPORT1, pkts3.RAW[0], datetime.datetime.now())
pkt_test(flow, pkts3, 1, 1)
#*** Test Flow 2 Packet 2 (Server RST):
flow.ingest_packet(DPID1, INPORT2, pkts3.RAW[1], datetime.datetime.now())
pkt_test(flow, pkts3, 2, 2)
def test_flow_LLDP():
"""
Test ingesting LLDP (non-IP) packets
"""
#*** Instantiate a flow object:
flow = flows_module.Flow(config)
#*** Test LLDP ingestion:
flow.ingest_packet(DPID1, INPORT1, pkts_lldp.RAW[0],
datetime.datetime.now())
assert flow.packet_count() == 1
assert flow.packet.length == pkts_lldp.LEN[0]
assert flow.packet.eth_src == pkts_lldp.ETH_SRC[0]
assert flow.packet.eth_dst == pkts_lldp.ETH_DST[0]
#*** Ingest same packet again, shouldn't increase flow count as isn't flow:
flow.ingest_packet(DPID1, INPORT1, pkts_lldp.RAW[0],
datetime.datetime.now())
assert flow.packet_count() == 1
assert flow.packet.length == pkts_lldp.LEN[0]
assert flow.packet.eth_src == pkts_lldp.ETH_SRC[0]
assert flow.packet.eth_dst == pkts_lldp.ETH_DST[0]
def test_classification_static():
"""
Test that classification returns correct information for a static
classification.
Create a classification object, record it to DB then check
that classification can be retrieved
"""
#*** Initial main_policy won't match as looking for tcp-1234:
policy = policy_module.Policy(config,
pol_dir_default="config/tests/regression",
pol_dir_user="config/tests/foo",
pol_filename="main_policy_regression_static.yaml")
#*** Instantiate flow and identities objects:
flow = flows_module.Flow(config)
ident = identities_module.Identities(config, policy)
#*** Ingest Flow 2 Packet 0 (Client TCP SYN):
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[0], datetime.datetime.now())
#*** Base classification state:
assert flow.classification.flow_hash == flow.packet.flow_hash
assert flow.classification.classified == 0
assert flow.classification.classification_tag == ""
assert flow.classification.classification_time == 0
assert flow.classification.actions == {}
#*** Classify the packet:
policy.check_policy(flow, ident)
#*** Unmatched classification state:
assert flow.classification.flow_hash == flow.packet.flow_hash
assert flow.classification.classified == 1
assert flow.classification.classification_tag == ""
assert flow.classification.classification_time == 0
assert flow.classification.actions == {}
#*** Initial main_policy that matches tcp-80:
policy = policy_module.Policy(config,
pol_dir_default="config/tests/regression",
pol_dir_user="config/tests/foo",
pol_filename="main_policy_regression_static_3.yaml")
#*** Classify the packet:
policy.check_policy(flow, ident)
#*** Matched classification state:
assert flow.classification.flow_hash == flow.packet.flow_hash
assert flow.classification.classified == 1
assert flow.classification.classification_tag == "Constrained Bandwidth Traffic"
assert flow.classification.actions == {'qos_treatment': 'constrained_bw',
'set_desc': 'Constrained Bandwidth Traffic'}
#*** Now test that classification remains after ingesting more packets
#*** on same flow.
#*** Load main_policy that matches dst tcp-80:
policy = policy_module.Policy(config,
pol_dir_default="config/tests/regression",
pol_dir_user="config/tests/foo",
pol_filename="main_policy_regression_static_4.yaml")
#*** Ingest Flow 1 Packet 0 (Client TCP SYN):
flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())
#*** Classify the packet:
policy.check_policy(flow, ident)
logger.debug("pkt0 flow classification is %s", flow.classification.dbdict())
#*** Matched classification state:
assert flow.classification.flow_hash == flow.packet.flow_hash
assert flow.classification.classified == 1
assert flow.classification.classification_tag == "Constrained Bandwidth Traffic"
assert flow.classification.actions == {'qos_treatment': 'constrained_bw',
'set_desc': 'Constrained Bandwidth Traffic'}
#*** Write classification result to classifications collection:
flow.classification.commit()
#*** Ingest Flow 1 Packet 1 (Client TCP SYN+ACK):
flow.ingest_packet(DPID1, INPORT1, pkts.RAW[1], datetime.datetime.now())
logger.debug("pkt1a flow classification is %s", flow.classification.dbdict())
assert flow.classification.classified == 1
#*** We would never run this as otherwise above test would have failed.
#*** Left it in here to make the point that you shouldn't classify if
#*** classified is set.
if not flow.classification.classified:
#*** Classify the packet:
policy.check_policy(flow, ident)
logger.debug("pkt1b flow classification is %s", flow.classification.dbdict())
#*** Matched classification state (shouldn't be changed by second packet):
assert flow.classification.flow_hash == flow.packet.flow_hash
assert flow.classification.classified == 1
assert flow.classification.classification_tag == "Constrained Bandwidth Traffic"
assert flow.classification.actions == {'qos_treatment': 'constrained_bw',
'set_desc': 'Constrained Bandwidth Traffic'}
def test_record_removal():
"""
Test the recording of an idle-timeout flow removal message
sent by a switch into the flow_rems database collection
Synthesise flow removal messages to test with.
"""
#*** Supports OpenFlow version 1.3:
OFP_VERSION = ofproto_v1_3.OFP_VERSION
#*** Instantiate Flow class:
flow = flows_module.Flow(config)
#*** Load JSON representations of flow removed messages:
with open('OFPMsgs/OFPFlowRemoved_1.json', 'r') as json_file:
json_str_tx = json_file.read()
json_dict_tx = json.loads(json_str_tx)
with open('OFPMsgs/OFPFlowRemoved_2.json', 'r') as json_file:
json_str_rx = json_file.read()
json_dict_rx = json.loads(json_str_rx)
#*** Set up fake datapath and synthesise messages:
datapath = ofproto_protocol.ProtocolDesc(version=OFP_VERSION)
datapath.id = 1
msg_tx = ofproto_parser.ofp_msg_from_jsondict(datapath, json_dict_tx)
msg_rx = ofproto_parser.ofp_msg_from_jsondict(datapath, json_dict_rx)
logger.debug("msg_tx=%s", msg_tx)
#*** Call our method that we're testing with the synthesised flow rems:
flow.record_removal(msg_tx)
flow.record_removal(msg_rx)
#*** Check that messages recorded correctly in database collection:
db_data_tx = {'ip_A': '10.1.0.1', 'tp_B': 80}
result = flow.flow_rems.find(db_data_tx).sort('$natural', -1).limit(1)
result_tx = list(result)[0]
logger.debug("result=%s", result_tx)
assert result_tx['table_id'] == 1
assert result_tx['ip_B'] == '10.1.0.2'
assert result_tx['tp_A'] == 43297
assert result_tx['packet_count'] == 10
assert result_tx['flow_hash'] == nethash.hash_flow(('10.1.0.1',
'10.1.0.2', 43297, 80, 6))
assert result_tx['cookie'] == 23
assert result_tx['direction'] == 'forward'
#*** Return leg of flow:
db_data_tx = {'ip_B': '10.1.0.1', 'tp_A': 80}
result = flow.flow_rems.find(db_data_tx).sort('$natural', -1).limit(1)
result_tx = list(result)[0]
logger.debug("result=%s", result_tx)
assert result_tx['table_id'] == 1
assert result_tx['ip_A'] == '10.1.0.2'
assert result_tx['tp_B'] == 43297
assert result_tx['packet_count'] == 9
assert result_tx['flow_hash'] == nethash.hash_flow(('10.1.0.2',
'10.1.0.1', 80, 43297, 6))
assert result_tx['cookie'] == 1000000023
assert result_tx['direction'] == 'reverse'
def test_classification_identity():
"""
Test that classification returns correct information for an identity
classification.
Create a classification object, record it to DB then check
that classification can be retrieved
"""
#*** Load main_policy that matches identity pc1
#*** and has action to constrain it's bandwidth:
policy = policy_module.Policy(config,
pol_dir_default="config/tests/regression",
pol_dir_user="config/tests/foo",
pol_filename="main_policy_regression_identity_2.yaml")
#*** Instantiate flow and identities objects:
flow = flows_module.Flow(config)
ident = identities_module.Identities(config, policy)
#*** Ingest and harvest LLDP Packet 2 (lg1) that shouldn't match:
# 206 08:00:27:21:4f:ea 01:80:c2:00:00:0e LLDP NoS = 08:00:27:21:4f:ea
# TTL = 120 System Name = lg1.example.com
flow.ingest_packet(DPID1, INPORT1, pkts_lldp.RAW[2], datetime.datetime.now())
ident.harvest(pkts_lldp.RAW[2], flow.packet)
#*** Ingest a packet from pc1:
# 10.1.0.1 10.1.0.2 TCP 74 43297 > http [SYN]
flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())
#*** Classify the packet:
policy.check_policy(flow, ident)
#*** Unmatched classification state:
assert flow.classification.flow_hash == flow.packet.flow_hash
assert flow.classification.classified == 1
assert flow.classification.classification_tag == ""
assert flow.classification.actions == {}
#*** Ingest ARP response for pc1 so we know MAC to IP mapping:
flow.ingest_packet(DPID1, INPORT1, pkts_ARP_2.RAW[1], datetime.datetime.now())
ident.harvest(pkts_ARP_2.RAW[1], flow.packet)
#*** Ingest and harvest LLDP Packet 0 (pc1) that should match:
# 206 08:00:27:2a:d6:dd 01:80:c2:00:00:0e LLDP NoS = 08:00:27:2a:d6:dd
# TTL = 120 System Name = pc1.example.com
flow.ingest_packet(DPID1, INPORT1, pkts_lldp.RAW[0], datetime.datetime.now())
ident.harvest(pkts_lldp.RAW[0], flow.packet)
#*** Ingest a packet from pc1:
# 10.1.0.1 10.1.0.2 TCP 74 43297 > http [SYN]
flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())
#*** Classify the packet:
policy.check_policy(flow, ident)
#*** Matched classification state:
assert flow.classification.flow_hash == flow.packet.flow_hash
assert flow.classification.classified == 1
assert flow.classification.classification_tag == "Constrained Bandwidth Traffic"
assert flow.classification.actions == {'qos_treatment': 'constrained_bw',
'set_desc': 'Constrained Bandwidth Traffic'}
def test_indexing():
"""
Test indexing of packet_ins and classification database collections
Packets are ingested from 3 flows.
Packets from one of the flows are too old to be significant
The most recent packet is the one that the flow context is in
and it only has one other packet ingested (i.e. packet_count == 2)
"""
#*** Initial main_policy won't match as looking for tcp-1234:
policy = policy_module.Policy(config,
pol_dir_default="config/tests/regression",
pol_dir_user="config/tests/foo",
pol_filename="main_policy_regression_static.yaml")
#*** Instantiate flow and identities objects:
flow = flows_module.Flow(config)
ident = identities_module.Identities(config, policy)
#*** Ingest packets older than flow timeout:
flow.ingest_packet(DPID1, INPORT1, pkts_ARP_2.RAW[0], datetime.datetime.now() - datetime.timedelta \
(seconds=config.get_value("flow_time_limit")+1))
flow.ingest_packet(DPID1, INPORT1, pkts_ARP_2.RAW[1], datetime.datetime.now() - datetime.timedelta \
(seconds=config.get_value("flow_time_limit")+1))
#*** Ingest current packets from two different flows:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[0], datetime.datetime.now())
#*** Classify the packet:
policy.check_policy(flow, ident)
flow.classification.commit()
flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[1], datetime.datetime.now())
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[2], datetime.datetime.now())
flow.ingest_packet(DPID1, INPORT1, pkts.RAW[1], datetime.datetime.now())
#*** Classify the packet:
policy.check_policy(flow, ident)
flow.classification.commit()
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[3], datetime.datetime.now())
flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[4], datetime.datetime.now())
flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[5], datetime.datetime.now())
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[6], datetime.datetime.now())
#*** Classify the packet:
policy.check_policy(flow, ident)
flow.classification.commit()
flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[7], datetime.datetime.now())
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[8], datetime.datetime.now())
flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[9], datetime.datetime.now())
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[10], datetime.datetime.now())
flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[11], datetime.datetime.now())
flow.ingest_packet(DPID1, INPORT1, pkts.RAW[2], datetime.datetime.now())
#*** Classify the packet:
policy.check_policy(flow, ident)
flow.classification.commit()
#*** Test packet_ins collection indexing...
#*** Should be 16 documents in packet_ins collection:
assert flow.packet_ins.count() == 16
#*** Get query execution statistics:
explain = flow.packet_count(test=1)
#*** Check an index is used:
assert explain['queryPlanner']['winningPlan']['inputStage']['stage'] == 'IXSCAN'
#*** Check how query ran:
assert explain['executionStats']['executionSuccess'] == True
assert explain['executionStats']['nReturned'] == 2
#*** MongoDB returns 2 or 3 for this, not sure why...???:
assert explain['executionStats']['totalKeysExamined'] > 1
assert explain['executionStats']['totalKeysExamined'] < 4
assert explain['executionStats']['totalDocsExamined'] == 2
#*** Test classifications collection indexing...
#*** Should be 4 documents in classifications collection:
assert flow.classifications.count() == 4
#*** Get query execution statistics:
explain2 = flow.classification.test_query()
#*** Check an index is used:
assert explain2['queryPlanner']['winningPlan']['inputStage']['stage'] == 'FETCH'
#*** Check how query ran:
assert explain2['executionStats']['executionSuccess'] == True
assert explain2['executionStats']['nReturned'] == 1
assert explain2['executionStats']['totalKeysExamined'] == 1
assert explain2['executionStats']['totalDocsExamined'] == 1
def test_not_suppressed():
"""
Test this query that checks to see if a flow mod to a switch
is not suppressed (preventing possible duplicate flow mods)
"""
#*** Instantiate Flow class:
flow = flows_module.Flow(config)
#*** Create a sample result to use:
ipv4_src='10.1.0.1'
ipv4_dst='10.1.0.2'
result = {'match_type': 'single', 'forward_cookie': 1,
'forward_match': {'eth_type': 0x0800,
'ipv4_src': ipv4_src, 'ipv4_dst': ipv4_dst,
'ip_proto': 6}, 'reverse_cookie': 0, 'reverse_match': {},
'client_ip': ipv4_src}
#*** Ingest a packet from pc1:
# 10.1.0.1 10.1.0.2 TCP 74 43297 > http [SYN]
flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())
#*** Check to see if this flow is suppressed:
assert flow.not_suppressed(DPID1, 'suppress') == 1
#*** Record suppressing this flow:
flow.record_suppression(DPID1, 'suppress', result)
#*** Check to see if this flow is suppressed now:
assert flow.not_suppressed(DPID1, 'suppress') == 0
#*** Check to see if this flow is not suppressed for different DPID:
assert flow.not_suppressed(DPID2, 'suppress') == 1
#*** Record suppressing this flow for DPID2:
flow.record_suppression(DPID2, 'suppress', result)
#*** Check to see if this flow is now suppressed for DPID2:
assert flow.not_suppressed(DPID2, 'suppress') == 0
#*** Check to see if this flow is not suppressed for different
#*** suppress_type:
assert flow.not_suppressed(DPID1, 'drop') == 1
#*** Record suppressing this flow for suppress_type drop:
flow.record_suppression(DPID1, 'drop', result)
#*** Check to see if this flow is now suppressed for drop
assert flow.not_suppressed(DPID1, 'drop') == 0
def test_record_suppression():
"""
Test the recording of a flow suppression event
"""
#*** Instantiate Flow class:
flow = flows_module.Flow(config)
#*** Create a sample result to use:
ipv4_src='10.1.0.1'
ipv4_dst='10.1.0.2'
result = {'match_type': 'single', 'forward_cookie': 1,
'forward_match': {'eth_type': 0x0800,
'ipv4_src': ipv4_src, 'ipv4_dst': ipv4_dst,
'ip_proto': 6}, 'reverse_cookie': 0, 'reverse_match': {},
'client_ip': ipv4_src}
#*** Ingest a packet from pc1:
# 10.1.0.1 10.1.0.2 TCP 74 43297 > http [SYN]
flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())
#*** Record suppressing this flow
flow.record_suppression(DPID1, 'forward', result)
#*** Note: don't need further tests as it gets worked out by
#*** test_api_external in test_flow_mods
def test_origin():
"""
Test origin method that returns tuple of client IP and first DPID
We ingest multiple packets on flow but origin should always return
the first source IP and DPID
"""
#*** Instantiate a flow object:
flow = flows_module.Flow(config)
#*** First packet, this should lock as the source IP and DPID:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[0], datetime.datetime.now())
assert flow.origin()[0] == pkts2.IP_SRC[0]
assert flow.origin()[1] == DPID1
#*** Same packet, different DPID, should be ignored:
flow.ingest_packet(DPID2, INPORT1, pkts2.RAW[0], datetime.datetime.now())
assert flow.origin()[0] == pkts2.IP_SRC[0]
assert flow.origin()[1] == DPID1
#*** Another packet, should be ignored:
flow.ingest_packet(DPID2, INPORT1, pkts2.RAW[1], datetime.datetime.now())
assert flow.origin()[0] == pkts2.IP_SRC[0]
assert flow.origin()[1] == DPID1
def test_max_interpacket_interval():
"""
Test max_interpacket_interval method
Remember, assessed per direction in flow
"""
#*** Instantiate a flow object:
flow = flows_module.Flow(config)
#*** Create some packet times to use, based of current time as otherwise
#*** will break db search time limits:
base_time = datetime.datetime.now()
time_2 = base_time + datetime.timedelta(milliseconds=10)
time_3 = base_time + datetime.timedelta(milliseconds=30)
time_4 = base_time + datetime.timedelta(milliseconds=80)
time_5 = base_time + datetime.timedelta(milliseconds=90)
time_6 = base_time + datetime.timedelta(milliseconds=190)
#*** Ingest packets, note 3rd packet is duplicate from diff DPID to ignore:
#*** Forward direction:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[0], base_time)
#*** Reverse direction:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[1], time_2)
flow.ingest_packet(DPID2, INPORT1, pkts2.RAW[1], time_3)
#*** Forward direction:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[2], time_4)
#*** Largest interpacket interval is in forward direction between base_time
#*** and time_4
assert flow.max_interpacket_interval() == 0.080
#*** Forward direction:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[3], time_5)
#*** Reverse direction:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[4], time_6)
#*** Largest interpacket interval is in reverse direction between time_2
#*** (time_3 excluded as different DPID) and time_6
assert flow.max_interpacket_interval() == 0.180
def test_min_interpacket_interval():
"""
Test min_interpacket_interval method
Remember, assessed per direction in flow
"""
#*** Instantiate a flow object:
flow = flows_module.Flow(config)
#*** Create some packet times to use, based of current time as otherwise
#*** will break db search time limits:
base_time = datetime.datetime.now()
time_2 = base_time + datetime.timedelta(milliseconds=10)
time_3 = base_time + datetime.timedelta(milliseconds=30)
time_4 = base_time + datetime.timedelta(milliseconds=80)
time_5 = base_time + datetime.timedelta(milliseconds=90)
time_6 = base_time + datetime.timedelta(milliseconds=190)
#*** Ingest packets, note 3rd packet is duplicate from diff DPID to ignore:
#*** Forward direction:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[0], base_time)
#*** Reverse direction:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[1], time_2)
flow.ingest_packet(DPID2, INPORT1, pkts2.RAW[1], time_3)
#*** Forward direction:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[2], time_4)
#*** Smallest interpacket interval is in forward direction between
#*** base_time and time_4
assert flow.min_interpacket_interval() == 0.080
#*** Forward direction:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[3], time_5)
#*** Reverse direction:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[4], time_6)
#*** Smallest interpacket interval is in forward direction between time_4
#*** and time_5
assert flow.min_interpacket_interval() == 0.010
def test_packet_directions():
"""
Test packet_directions method
"""
#*** Instantiate a flow object:
flow = flows_module.Flow(config)
#*** Forward direction:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[0], datetime.datetime.now())
#*** Reverse direction (ignore second one as diff DPID):
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[1], datetime.datetime.now())
flow.ingest_packet(DPID2, INPORT1, pkts2.RAW[1], datetime.datetime.now())
#*** Forward direction:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[2], datetime.datetime.now())
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[3], datetime.datetime.now())
#*** Reverse direction:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[4], datetime.datetime.now())
#*** Check packet directions:
assert flow.packet_directions() == [1, 0, 1, 1, 0]
def test_packet_sizes():
"""
Test packet_sizes method
"""
#*** Instantiate a flow object:
flow = flows_module.Flow(config)
#*** Forward direction:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[0], datetime.datetime.now())
#*** Reverse direction (ignore second one as diff DPID):
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[1], datetime.datetime.now())
flow.ingest_packet(DPID2, INPORT1, pkts2.RAW[1], datetime.datetime.now())
#*** Forward direction:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[2], datetime.datetime.now())
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[3], datetime.datetime.now())
#*** Reverse direction:
flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[4], datetime.datetime.now())
#*** Check packet sizes:
assert flow.packet_sizes() == [74, 74, 66, 321, 66]
#================= HELPER FUNCTIONS ===========================================
def pkt_test(flow, pkts, pkt_num, flow_packet_count):
"""
Passed a flow object, packets object, packet number
from the packets object and the number of unique packets
in the flow and check parameters match
"""
assert flow.packet_count() == flow_packet_count
assert flow.packet.length == pkts.LEN[pkt_num - 1]
assert flow.packet.eth_src == pkts.ETH_SRC[pkt_num - 1]
assert flow.packet.eth_dst == pkts.ETH_DST[pkt_num - 1]
assert flow.packet.eth_type == pkts.ETH_TYPE[pkt_num - 1]
assert flow.packet.ip_src == pkts.IP_SRC[pkt_num - 1]
assert flow.packet.ip_dst == pkts.IP_DST[pkt_num - 1]
assert flow.packet.proto == pkts.PROTO[pkt_num - 1]
assert flow.packet.tp_src == pkts.TP_SRC[pkt_num - 1]
assert flow.packet.tp_dst == pkts.TP_DST[pkt_num - 1]
assert flow.packet.tp_seq_src == pkts.TP_SEQ_SRC[pkt_num - 1]
assert flow.packet.tp_seq_dst == pkts.TP_SEQ_DST[pkt_num - 1]
assert flow.packet.tcp_syn() == pkts.TCP_SYN[pkt_num - 1]
assert flow.packet.tcp_fin() == pkts.TCP_FIN[pkt_num - 1]
assert flow.packet.tcp_rst() == pkts.TCP_RST[pkt_num - 1]
assert flow.packet.tcp_psh() == pkts.TCP_PSH[pkt_num - 1]
assert flow.packet.tcp_ack() == pkts.TCP_ACK[pkt_num - 1]
assert flow.packet.payload.encode("hex") == pkts.PAYLOAD[pkt_num - 1]
assert flow.client() == pkts.FLOW_IP_CLIENT
assert flow.server() == pkts.FLOW_IP_SERVER
assert flow.packet_direction() == pkts.DIRECTION[pkt_num - 1]
assert flow.max_packet_size() == max(pkts.LEN[0:pkt_num])
def mac_addr(address):
"""
Convert a MAC address to a readable/printable string
"""
return ':'.join('%02x' % ord(b) for b in address)
def _ipv4_t2i(ip_text):
"""
Turns an IPv4 address in text format into an integer.
Borrowed from rest_router.py code
"""
if ip_text == 0:
return ip_text
assert isinstance(ip_text, str)
return struct.unpack('!I', addrconv.ipv4.text_to_bin(ip_text))[0]
| StarcoderdataPython |
11295633 | <filename>simple_notes/notes/tasks.py
from celery import shared_task
from .utils import send_email
from django.utils.translation import gettext as _
from .models import Reminder
@shared_task
def send_email_task(subject: str, email: str, content: str):
print(f'SEND_EMAIL_TASK: Sending an email to {email}')
if send_email(subject, email, content):
print(f'SEND_EMAIL_TASK: Email was sent to {email}')
return
print(f'ERROR_DURING_SEND_EMAIL_TASK: Error')
@shared_task
def send_reminder_task(
username: str,
email: str,
notebook_title: str,
note_name: str,
note_full_link: str
):
print(f'SEND_REMINDER_TASK: Sending a reminder for {username}')
send_email(
email=email,
subject=_('Don\'t forget about {note_name} | Simple Notes Reminder').format(note_name=note_name),
content='''
<html>
<body>
<p>{greeting}</p>
<p>{text}</p>
<p>
<a href="{note_full_url}">{go_to_note}</a>
</p>
</body>
</html>
'''.format(
greeting=_('Hey {username}!').format(username=username),
text=_('You wanted to be reminded about {note_name}. Click on the link below in order to open it.')
.format(note_name=note_name),
note_full_url=note_full_link,
go_to_note=_('Open {note_name}').format(note_name=note_name),
)
)
Reminder.objects.get(
note__notebook__user__username=username,
note__notebook__title=notebook_title,
note__title=note_name,
).delete()
print(f'SEND_REMINDER_TASK: Reminder for {username} was sent')
| StarcoderdataPython |
6669364 | <reponame>Samuel-Melo890/Python-Desafios<gh_stars>0
def tabela(list):
print(f''' 0 1 2
0 | {list[0][0]} | {list[0][1]} | {list[0][2]} |
1 | {list[1][0]} | {list[1][1]} | {list[1][2]} |
2 | {list[2][0]} | {list[2][1]} | {list[2][2]} |
''')
from os import system
from module.interface import *
import random
from time import sleep
system('cls')
print('='*8,'Jogo da Velha', '='*8)
l = [[' ', ' ', ' '], [' ', ' ', ' '], [' ', ' ', ' ']]
nj = v = d = e = 0
while True:
while True:
system('cls')
print('')
tabela(l)
if l[0][0] == l[0][1] == l[0][2] == 'X' or l[1][0] == l[1][1] == l[1][2] == 'X' or l[2][0] == l[2][1] == l[2][2] == 'X':
print('\033[32mO jogador venceu! Parabéns!\033[m')
v += 1
break
elif l[0][0] == l[1][0] == l[2][0] == 'X' or l[0][1] == l[1][1] == l[2][1] == 'X' or l[0][2] == l[1][2] == l[2][2] == 'X':
print('\033[32mO jogador venceu! Parabéns!\033[m')
v += 1
break
elif l[0][0] == l[1][1] == l[2][2] == 'X' or l[2][0] == l[1][1] == l[0][2] == 'X':
print('\033[32mO jogador venceu! Parabéns!\033[m')
v += 1
break
elif l[0][0] == l[0][1] == l[0][2] == 'O' or l[1][0] == l[1][1] == l[1][2] == 'O' or l[2][0] == l[2][1] == l[2][2] == 'O':
print('\033[31mO jogador perdeu! Que pena!\033[m')
d += 1
break
elif l[0][0] == l[1][0] == l[2][0] == 'O' or l[0][1] == l[1][1] == l[2][1] == 'O' or l[0][2] == l[1][2] == l[2][2] == 'O':
print('\033[31mO jogador perdeu! Que pena!\033[m')
d += 1
break
elif l[0][0] == l[1][1] == l[2][2] == 'O' or l[2][0] == l[1][1] == l[0][2] == 'O':
print('\033[31mO jogador perdeu! Que pena!\033[m')
d += 1
break
else:
if nj == 9:
print('\033[36mDeu velha! Empate!\033[m')
e += 1
break
try:
sleep(1)
print('')
titulo('Escolha a posição')
while True:
jl = int(input('Qual linha deseja escolher? '))
if jl < 0 or jl > 2:
print('\033[31mPreencha os dados corretamente!\033[m')
continue
jc = int(input('Qual coluna deseja escolher? '))
if jc < 0 or jc > 2:
print('\033[31mPreencha os dados corretamente!\033[m')
continue
if l[jl][jc] in ' ':
l[jl][jc] = 'X'
else:
print('\033[31mLocal inválido!\033[m')
continue
nj += 1
sleep(1)
break
if nj == 9:
continue
while True:
cl = random.randint(0, 2)
cc = random.randint(0, 2)
if l[cl][cc] in ' ':
l[cl][cc] = 'O'
nj += 1
break
except (ValueError, TypeError, KeyboardInterrupt):
print('\033[31mERRO! Degite um número inteiro válido!\033[m')
r = ' '
while r not in 'SN':
r = str(input('Deseja jogar novamente? [S/N] ')).strip().upper()[0]
if r in 'N':
break
else:
nj = 0
l[0][0] = l[0][1] = l[0][2] = ' '
l[1][0] = l[1][1] = l[1][2] = ' '
l[2][0] = l[2][1] = l[2][2] = ' '
system('cls')
print('\033[36mPrograma Finalizado!\033[m')
print(f'\033[35mVitórias:\033[m {v}\n\033[35mDerrotas:\033[m {d}\n\033[35mEmpates:\033[m {e}\n\033[32mVolte sempre!\033[m')
| StarcoderdataPython |
12876 | import os
import databases
import sqlalchemy
DB_CONNECTOR = os.getenv('APP_DB_CONNECTOR')
DB_USERNAME = os.getenv('APP_DB_USERNAME')
DB_PASSWORD = os.getenv('APP_DB_PASSWORD')
DB_HOST = os.getenv('APP_DB_HOST')
DB_PORT = os.getenv('APP_DB_PORT')
DB_DATABASE = os.getenv('APP_DB_DATABASE')
DB_URL = f'{DB_CONNECTOR}://{DB_USERNAME}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_DATABASE}'
db: databases.Database = databases.Database(DB_URL)
metadata: sqlalchemy.MetaData = sqlalchemy.MetaData()
| StarcoderdataPython |
15270 | # Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.stateful."""
from absl.testing import absltest
from haiku._src import base
from haiku._src import module
from haiku._src import stateful
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
import numpy as np
class StatefulTest(absltest.TestCase):
@test_utils.transform_and_run
def test_grad(self):
x = jnp.array(3.)
g = stateful.grad(SquareModule())(x)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
def test_grad_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.grad() instead"):
stateful.grad(lambda x: x**2)(x)
@test_utils.transform_and_run
def test_value_and_grad(self):
x = jnp.array(2.)
y, g = stateful.value_and_grad(SquareModule())(x)
self.assertEqual(y, x ** 2)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
def test_value_and_grad_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.grad() instead"):
stateful.value_and_grad(lambda x: x**2)(x)
@test_utils.transform_and_run
def test_grad_aux(self):
o = object()
def f(x):
m = SquareModule()
return m(x), o
x = jnp.array(3.)
g, aux = stateful.grad(f, has_aux=True)(x)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
self.assertIs(aux, o)
@test_utils.transform_and_run
def test_value_and_grad_aux(self):
o = object()
def f(x):
m = SquareModule()
return m(x), o
x = jnp.array(3.)
(y, aux), g = stateful.value_and_grad(f, has_aux=True)(x)
self.assertEqual(y, x ** 2)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
self.assertIs(aux, o)
def test_grad_and_jit(self):
def f(x):
g = stateful.grad(SquareModule())(x)
return g
x = jnp.array(3.)
f = transform.transform_with_state(f)
params, state = jax.jit(f.init)(None, x)
g, state = jax.jit(f.apply)(params, state, None, x)
np.testing.assert_allclose(g, 2 * x, rtol=1e-3)
def test_value_and_grad_and_jit(self):
def f(x):
y, g = stateful.value_and_grad(SquareModule())(x)
return y, g
x = jnp.array(3.)
f = transform.transform_with_state(f)
params, state = jax.jit(f.init)(None, x)
(y, g), state = jax.jit(f.apply)(params, state, None, x)
np.testing.assert_allclose(y, x ** 2, rtol=1e-3)
np.testing.assert_allclose(g, 2 * x, rtol=1e-3)
@test_utils.transform_and_run
def test_jit(self):
mod = SquareModule()
x = jnp.array(2)
y = stateful.jit(mod)(x)
self.assertEqual(y, x ** 2)
def test_jit_no_transform(self):
x = jnp.array(2)
with self.assertRaises(ValueError, msg="Use jax.jit() instead"):
stateful.jit(lambda x: x**2)(x)
@test_utils.transform_and_run
def test_remat(self):
forward, backward = [], []
callback = _callback_prim(lambda: forward.append(None),
lambda: backward.append(None))
def test(remat):
x = jnp.array(3.)
mod = CountingModule()
self.assertEqual(mod.count, 0)
f = lambda x: callback(mod(x))
if remat:
f = stateful.remat(f)
y, g = stateful.value_and_grad(f)(x)
np.testing.assert_allclose(y, x ** 2, rtol=1e-3)
np.testing.assert_allclose(g, 2 * x, rtol=1e-3)
self.assertEqual(mod.count, 1)
num_forward = len(forward)
num_backward = len(backward)
del forward[:], backward[:]
return num_forward, num_backward
# Sanity check.
self.assertEqual(test(remat=True), test(remat=True))
self.assertEqual(test(remat=False), test(remat=False))
# NOTE: JAX does not guarantee to execute primitives once and only once for
# a given function (we observe f=2,b=1 without remat and f=5,b=1 with
# remat), but we do expect that JAX will execute our primitive forward at
# least one more time with remat than without it.
num_forward_remat, num_backward_remat = test(remat=True)
num_forward_no_remat, num_backward_no_remat = test(remat=False)
self.assertGreater(num_forward_remat, num_forward_no_remat)
self.assertEqual(num_backward_remat, num_backward_no_remat)
def test_remat_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.remat() instead"):
stateful.remat(lambda x: x**2)(x)
def test_cond(self):
def f(x):
mod = SquareModule()
return stateful.cond(x == 2, x, mod, x, lambda x: mod(x + 1))
f = transform.transform_with_state(f)
for x, y in ((1, 4), (2, 4), (3, 16)):
x, y = map(jnp.array, (x, y))
params, state = f.init(None, x)
out, state = f.apply(params, state, None, x)
self.assertEqual(state, {"square_module": {"y": y}})
self.assertEqual(out, y)
def test_cond_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.cond() instead"):
stateful.cond(x == 2, x, lambda x: x**2, x, lambda x: (x + 1)**2)
def _callback_prim(forward, backward):
def f_impl(x):
forward()
return x
def b_impl(x):
backward()
return (x,)
prim = jax.core.Primitive("hk_callback")
prim.def_impl(f_impl)
prim.def_abstract_eval(f_impl)
jax.ad.deflinear(prim, b_impl)
return prim.bind
class CountingModule(module.Module):
@property
def count(self):
return base.get_state("count", [], init=jnp.zeros)
def __call__(self, x):
y = x ** 2
base.set_state("count", self.count + 1)
return y
class SquareModule(module.Module):
def __call__(self, x):
assert x.ndim == 0
p = base.get_parameter("p", [], jnp.int32, init=lambda *_: jnp.array(2))
y = x ** p
base.set_state("y", y)
return y
if __name__ == "__main__":
absltest.main()
| StarcoderdataPython |
29107 | <filename>itertable/gis/mixins.py<gh_stars>10-100
import fiona
from shapely import wkt, geometry
from ..loaders import FileLoader
from ..parsers.base import BaseParser
from ..mappers import TupleMapper
class FionaLoaderParser(FileLoader, BaseParser):
"""
Composite loader & parser mixin for GIS data, powered by Fiona
"""
layer_id = None
meta = {}
key_field = 'id'
def load(self):
try:
self.layers = fiona.listlayers(self.filename)
except (ValueError, IOError):
driver = guess_driver(self.filename)
self.meta = {'driver': driver}
self.empty_file = True
def parse(self):
# If multiple layers, parse all of them (!)
if len(self.layers) > 1 and self.layer_id is None:
cls = type(self)
self.data = [{
'id': id,
'name': name,
'data': cls(filename=self.filename, layer_id=id)
} for id, name in enumerate(self.layers)]
else:
# One layer, load & parse GIS data
with fiona.open(self.filename, layer=self.layer_id) as f:
self.meta = f.meta
if 'id' in f.meta.get('schema', {}).get('properties', {}):
# TODO: Is this correct?
del f.meta['schema']['properties']['id']
self.data = list(map(self.parse_feature, f))
def parse_feature(self, f):
# Flatten Fiona's GeoJSON-style representation into something more
# amenable to namedtuple-ing
feat = {key: value for key, value in f['properties'].items()}
if 'id' not in feat and 'ID' not in feat:
feat['id'] = f['id']
feat['geometry'] = f['geometry']
return feat
def dump_feature(self, feat, i):
# Undo aforementioned flattening
return {
'id': feat.get('id', feat.get('ID', i)),
'geometry': feat['geometry'],
'properties': {
key: value for key, value in feat.items()
if key not in ('geometry', 'id',)
}
}
def dump(self):
# Dump and save the dataset at the same time via Fiona
pass
def save(self):
with fiona.open(self.filename, 'w', **self.meta) as f:
for i, feat in enumerate(self.data):
f.write(self.dump_feature(feat, i))
class GisMapper(TupleMapper):
"""
GIS-aware tuple mapper
"""
def as_dataframe(self):
# Mimic BaseIter.as_dataframe() but with GeoDataFrame
# (also, key_field is always set)
from geopandas import GeoDataFrame
key = self.get_key_field()
data = [self.item_dict(row) for row in self.values()]
df = GeoDataFrame(data)
df.set_index(key, inplace=True)
return df
def item_dict(self, uitem):
# Turn usable item into GeoDataFrame-friendly dict
data = uitem._asdict()
data['geometry'] = geometry.shape(data['geometry'])
return data
class ShapeMapper(GisMapper):
"""
Map Fiona's GeoJSON-style geometries to and from Shapely shapes
"""
def map_value(self, field, value):
value = super(ShapeMapper, self).map_value(field, value)
if field == 'geometry':
value = geometry.shape(value)
return value
def unmap_value(self, field, value):
if field == 'geometry':
value = geometry.mapping(value)
return super(ShapeMapper, self).unmap_value(field, value)
def item_dict(self, uitem):
return uitem._asdict()
class WktMapper(ShapeMapper):
"""
Map geometries to and from WKT (good for Django integration)
"""
def map_value(self, field, value):
value = super(WktMapper, self).map_value(field, value)
if field == 'geometry':
value = wkt.dumps(value)
return value
def unmap_value(self, field, value):
if field == 'geometry':
value = wkt.loads(value)
return super(WktMapper, self).unmap_value(field, value)
def item_dict(self, uitem):
data = uitem._asdict()
data['geometry'] = wkt.loads(data['geometry'])
return data
def guess_driver(filename):
if filename.endswith(".shp"):
return "ESRI Shapefile"
else:
return "GeoJSON"
| StarcoderdataPython |
1775943 | import pytest
from indy import pool
from indy.error import ErrorCode, IndyError
@pytest.mark.asyncio
async def test_create_pool_ledger_config_works(pool_ledger_config):
pass
@pytest.mark.asyncio
async def test_create_pool_ledger_config_works_for_empty_name():
with pytest.raises(IndyError) as e:
await pool.create_pool_ledger_config("", None)
assert ErrorCode.CommonInvalidParam2 == e.value.error_code
| StarcoderdataPython |
4800915 | <reponame>atentas/ennemi<filename>tests/unit/test_entropy_estimators.py
# MIT License - Copyright <NAME> and contributors
# See the LICENSE.md file included in this source code package
"""Tests for ennemi._estimate_single_mi() and friends."""
import math
from math import log
import numpy as np
from scipy.special import gamma, psi
from scipy.stats import gamma as gamma_dist
import unittest
from ennemi._entropy_estimators import _estimate_single_entropy,\
_estimate_single_mi, _estimate_conditional_mi,\
_estimate_semidiscrete_mi, _estimate_conditional_semidiscrete_mi
class TestEstimateSingleEntropy(unittest.TestCase):
def test_univariate_gaussian(self) -> None:
cases = [ (1, 100, 2, 0.2),
(1, 200, 3, 0.05),
(1, 2000, 3, 0.02),
(0.5, 2000, 3, 0.02),
(2.0, 2000, 1, 0.002),
(2.0, 2000, 3, 0.02),
(2.0, 2000, 30, 0.02), ]
for (sd, n, k, delta) in cases:
with self.subTest(sd=sd, n=n, k=k):
rng = np.random.default_rng(0)
x = rng.normal(0, sd, size=n)
actual = _estimate_single_entropy(x, k=k)
expected = 0.5 * log(2 * math.pi * math.e * sd**2)
self.assertAlmostEqual(actual, expected, delta=delta)
def test_uniform(self) -> None:
cases = [ (0, 1, 1000, 3, 0.05),
(1, 2, 1000, 3, 0.05),
(-1, 1, 1000, 3, 0.05),
(-0.1, 0.1, 1000, 3, 0.05), ]
for (a, b, n, k, delta) in cases:
with self.subTest(a=a, b=b, n=n, k=k):
rng = np.random.default_rng(1)
x = rng.uniform(a, b, size=n)
actual = _estimate_single_entropy(x, k=k)
expected = log(b - a)
self.assertAlmostEqual(actual, expected, delta=delta)
def test_bivariate_gaussian(self) -> None:
cases = [ (0, 1, 200, 3, 0.09),
(0, 2, 2000, 3, 0.03),
(0.2, 1, 2000, 3, 0.03),
(0.2, 2, 2000, 5, 0.03),
(0.6, 1, 2000, 1, 0.02),
(0.6, 0.5, 2000, 3, 0.04),
(0.9, 1, 2000, 3, 0.04),
(-0.5, 1, 2000, 5, 0.03), ]
for (rho, var1, n, k, delta) in cases:
with self.subTest(rho=rho, var1=var1, n=n, k=k):
rng = np.random.default_rng(2)
cov = np.array([[var1, rho], [rho, 1]])
data = rng.multivariate_normal([0, 0], cov, size=n)
actual = _estimate_single_entropy(data, k=k)
expected = 0.5 * log(np.linalg.det(2 * math.pi * math.e * cov))
self.assertAlmostEqual(actual, expected, delta=delta)
def test_4d_gaussian(self) -> None:
rng = np.random.default_rng(3)
cov = np.array([
[ 1.0, 0.5, 0.6, -0.2],
[ 0.5, 1.0, 0.7, -0.5],
[ 0.6, 0.7, 2.0, -0.1],
[-0.2, -0.5, -0.1, 0.5]])
data = rng.multivariate_normal([0, 0, 0, 0], cov, size=2000)
actual = _estimate_single_entropy(data, k=3)
expected = 0.5 * log(np.linalg.det(2 * math.pi * math.e * cov))
self.assertAlmostEqual(actual, expected, delta=0.05)
def test_gamma_exponential(self) -> None:
# As in the MI test, the analytical result is due to doi:10.1109/18.825848.
#
# x1 ~ Gamma(rate, shape)
# x2 | x1 ~ Exp(t * x1)
rng = np.random.default_rng(4)
r = 1.2
s = 3.4
t = 0.56
x1 = rng.gamma(shape=s, scale=1/r, size=1000)
x2 = rng.exponential(x1 * t)
data = np.asarray([x1, x2]).T
raw = _estimate_single_entropy(data)
trans = _estimate_single_entropy(np.log(data))
# The estimate with unlogarithmed data is very bad
expected = 1 + s - s*psi(s) + log(gamma(s)) - log(t)
self.assertAlmostEqual(raw, expected, delta=0.65)
self.assertAlmostEqual(trans, expected, delta=0.01)
class TestEstimateSingleMi(unittest.TestCase):
def test_bivariate_gaussian(self) -> None:
cases = [ (0, 40, 3, 0.1),
(0, 200, 3, 0.06),
(0, 2000, 3, 0.005),
(0, 2000, 5, 0.006),
(0, 2000, 20, 0.003),
(0.5, 200, 3, 0.05),
(0.5, 200, 5, 0.02),
(0.5, 2000, 3, 0.02),
(-0.9, 200, 3, 0.05),
(-0.9, 2000, 3, 0.05),
(-0.9, 2000, 5, 0.02), ]
for (rho, n, k, delta) in cases:
with self.subTest(rho=rho, n=n, k=k):
rng = np.random.default_rng(0)
cov = np.array([[1, rho], [rho, 1]])
data = rng.multivariate_normal([0, 0], cov, size=n)
x = data[:,0]
y = data[:,1]
actual = _estimate_single_mi(x, y, k=k)
expected = -0.5 * log(1 - rho**2)
self.assertAlmostEqual(actual, expected, delta=delta)
def test_sum_of_exponentials(self) -> None:
# We define X ~ Exp(1), W ~ Exp(2) and Y = X + W.
# Now by arXiv:1609.02911, Y has known, closed-form entropy.
cases = [ (1, 2), (0.2, 0.3), (3, 3.1) ]
for (a, b) in cases:
with self.subTest(a=a, b=b):
rng = np.random.default_rng(20200302)
x = rng.exponential(1/a, 1000)
w = rng.exponential(1/b, 1000)
y = x + w
actual = _estimate_single_mi(x, y, k=5)
expected = np.euler_gamma + log((b-a)/a) + psi(b/(b-a))
self.assertAlmostEqual(actual, expected, delta=0.025)
def test_independent_uniform(self) -> None:
# We have to use independent random numbers instead of linspace,
# because the algorithm has trouble with evenly spaced values
rng = np.random.default_rng(1)
x = rng.uniform(0.0, 1.0, 1024)
y = rng.uniform(0.0, 1.0, 1024)
actual = _estimate_single_mi(x, y, k=8)
actual2 = _estimate_single_mi(y, x, k=8)
self.assertAlmostEqual(actual, 0, delta=0.04)
self.assertAlmostEqual(actual, actual2, delta=0.00001)
def test_independent_transformed_uniform(self) -> None:
# Very non-uniform density, but MI should still be zero
rng = np.random.default_rng(1)
x = rng.uniform(0.0, 10.0, 1024)
y = np.exp(rng.uniform(0.0, 1.0, 1024))
actual = _estimate_single_mi(x, y, k=8)
self.assertAlmostEqual(actual, 0, delta=0.02)
def test_gamma_exponential(self) -> None:
# Kraskov et al. mention that this distribution is hard to estimate
# without logarithming the values.
# The analytical result is due to doi:10.1109/18.825848.
#
# x1 ~ Gamma(rate, shape)
# x2 | x1 ~ Exp(t * x1)
rng = np.random.default_rng(2)
r = 1.2
s = 3.4
t = 0.56
x1 = rng.gamma(shape=s, scale=1/r, size=1000)
x2 = rng.exponential(x1 * t)
raw = _estimate_single_mi(x1, x2)
trans = _estimate_single_mi(np.log(x1), np.log(x2))
expected = psi(s) - np.log(s) + 1/s
self.assertAlmostEqual(raw, expected, delta=0.04)
self.assertAlmostEqual(trans, expected, delta=0.005)
class TestEstimateConditionalMi(unittest.TestCase):
def test_gaussian_with_independent_condition(self) -> None:
# In this case, the results should be same as in ordinary MI
cases = [ (0.5, 200, 3, 0.03),
(0.75, 400, 3, 0.01),
(-0.9, 4000, 5, 0.03), ]
for (rho, n, k, delta) in cases:
with self.subTest(rho=rho, n=n, k=k):
rng = np.random.default_rng(0)
cov = np.array([[1, rho], [rho, 1]])
data = rng.multivariate_normal([0, 0], cov, size=n)
x = data[:,0]
y = data[:,1]
cond = rng.uniform(0, 1, size=n)
actual = _estimate_conditional_mi(x, y, cond, k=k)
expected = -0.5 * log(1 - rho**2)
self.assertAlmostEqual(actual, expected, delta=delta)
def test_gaussian_with_condition_equal_to_y(self) -> None:
# MI(X;Y | Y) should be equal to 0
rng = np.random.default_rng(4)
cov = np.array([[1, 0.6], [0.6, 1]])
data = rng.multivariate_normal([0, 0], cov, size=314)
x = data[:,0]
y = data[:,1]
actual = _estimate_conditional_mi(x, y, y, k=4)
self.assertAlmostEqual(actual, 0.0, delta=0.001)
def test_three_gaussians(self) -> None:
# First example in doi:10.1103/PhysRevLett.99.204101,
# we know the analytic expression for conditional MI of a three-
# dimensional Gaussian random variable. Here, the covariance matrix
# is not particularly interesting. The expected CMI expression
# contains determinants of submatrices.
rng = np.random.default_rng(5)
cov = np.array([[1, 1, 1], [1, 4, 1], [1, 1, 9]])
data = rng.multivariate_normal([0, 0, 0], cov, size=1000)
actual = _estimate_conditional_mi(data[:,0], data[:,1], data[:,2])
expected = 0.5 * (log(8) + log(35) - log(9) - log(24))
self.assertAlmostEqual(actual, expected, delta=0.015)
def test_four_gaussians(self) -> None:
# As above, but now the condition is two-dimensional.
# The covariance matrix is defined by transforming a standard normal
# distribution (u1, u2, u3, u4) as follows:
# x = u1,
# y = u2 + u3 + 2*u4,
# z1 = 2*u1 + u3,
# z2 = u1 + u4.
# Unconditionally, x and y are independent, but conditionally they aren't.
rng = np.random.default_rng(25)
cov = np.array([[1, 0, 2, 1],
[0, 6, 1, 2],
[2, 1, 5, 2],
[1, 2, 2, 2]])
# The data needs to be normalized for estimation accuracy,
# and the sample size must be quite large
data = rng.multivariate_normal([0, 0, 0, 0], cov, size=8000)
data = data / np.sqrt(np.var(data, axis=0))
actual = _estimate_conditional_mi(data[:,0], data[:,1], data[:,2:])
expected = 0.64964
self.assertAlmostEqual(actual, expected, delta=0.04)
class TestEstimateSemiDiscreteMi(unittest.TestCase):
def test_independent_variables(self) -> None:
cases = [ (2, 200, 3, 0.04),
(2, 400, 1, 0.02),
(2, 400, 3, 0.02),
(2, 800, 8, 0.02),
(4, 2000, 2, 0.01) ]
for (discrete_count, n, k, delta) in cases:
with self.subTest(count=discrete_count, n=n, k=k):
rng = np.random.default_rng(50)
x = rng.normal(0.0, 1.0, size=n)
y = rng.choice(np.arange(discrete_count), size=n)
mi = _estimate_semidiscrete_mi(x, y, k)
self.assertAlmostEqual(max(mi, 0.0), 0.0, delta=delta)
def test_two_disjoint_uniforms(self) -> None:
# Y takes two equally probable values, and then X is sampled
# from two disjoint distributions depending on Y.
# Therefore I(X;Y) = H(Y) = log(2).
rng = np.random.default_rng(51)
y = rng.choice([0, 2], size=800)
x = rng.uniform(y, y+1)
mi = _estimate_semidiscrete_mi(x, y)
self.assertAlmostEqual(mi, log(2), delta=0.02)
def test_three_disjoint_uniforms(self) -> None:
# As above, but with three equally probable values for Y.
rng = np.random.default_rng(51)
y = rng.choice([0, 2, 5], size=800)
x = rng.uniform(y, y+1)
mi = _estimate_semidiscrete_mi(x, y)
self.assertAlmostEqual(mi, log(3), delta=0.02)
def test_two_overlapping_uniforms(self) -> None:
# Here there are two values for Y, but the associated X intervals overlap.
# Additionally, one of the values is more likely than the other.
rng = np.random.default_rng(52)
y = rng.choice([0, 0.7, 0.7], size=2000)
x = rng.uniform(y, y+1)
mi = _estimate_semidiscrete_mi(x, y)
expected = log(3)*7/30 + log(1)*9/30 + log(3/2)*14/30
self.assertAlmostEqual(mi, expected, delta=0.05)
class TestEstimateConditionalSemiDiscreteMi(unittest.TestCase):
def test_independent_vars_and_condition(self) -> None:
cases = [ (2, 200, 3, 0.02),
(2, 400, 1, 0.05),
(2, 400, 3, 0.02),
(2, 800, 8, 0.02),
(4, 2000, 2, 0.02) ]
for (discrete_count, n, k, delta) in cases:
with self.subTest(count=discrete_count, n=n, k=k):
rng = np.random.default_rng(50)
x = rng.normal(0.0, 1.0, size=n)
y = rng.choice(np.arange(discrete_count), size=n)
z = rng.normal(0.0, 1.0, size=n)
mi = _estimate_conditional_semidiscrete_mi(x, y, z, k)
self.assertAlmostEqual(max(mi, 0.0), 0.0, delta=delta)
def test_irrelevant_condition(self) -> None:
# Two disjoint uniforms, but with independent condition
rng = np.random.default_rng(51)
y = rng.choice([0, 2], size=800)
x = rng.uniform(y, y+1)
z = rng.beta(2, 3, size=800)
mi = _estimate_conditional_semidiscrete_mi(x, y, z)
self.assertAlmostEqual(mi, log(2), delta=0.02)
def test_condition_equal_to_x(self) -> None:
# Y = sign(X), and I(Y;X | X) = 0
rng = np.random.default_rng(52)
x = rng.normal(0.0, 1.0, size=800)
y = np.sign(x)
# Consistency check: the unconditional MI should be equal to y entropy
uncond = _estimate_semidiscrete_mi(x, y)
self.assertAlmostEqual(uncond, log(2), delta=0.01)
mi = _estimate_conditional_semidiscrete_mi(x, y, x)
self.assertAlmostEqual(mi, 0.0, delta=0.02)
def test_condition_increases_mi(self) -> None:
# X, Y are normal, Z = X + Y
# W = sign(X)
# I(Z; W) < I(X; W), but I(Z; W | Y) = I(X; W)
rng = np.random.default_rng(53)
x = rng.normal(0.0, 1.0, size=1500)
y = rng.normal(0.0, 0.3, size=1500)
z = x + y
w = np.sign(x)
# Consistency check
uncond_zw = _estimate_semidiscrete_mi(z, w)
uncond_xw = _estimate_semidiscrete_mi(x, w)
self.assertLess(uncond_zw, uncond_xw - 0.2)
self.assertAlmostEqual(uncond_xw, log(2), delta=0.01)
# Conditioning should increase MI
cond_zw = _estimate_conditional_semidiscrete_mi(z, w, y)
self.assertAlmostEqual(cond_zw, log(2), delta=0.02)
def test_multiple_levels(self) -> None:
# X is a Gamma random variable, Y takes levels based on X,
# and W is X with added Gaussian noise. Conditioning on noise increases MI.
rng = np.random.default_rng(54)
x = rng.gamma(shape=1.5, scale=1.0, size=2000)
z = rng.normal(size=x.shape)
w = x + z
# The 1e-4 level would cause issues with the continuous-continuous algorithm
# as it would be picked up in neighbor searches on the y=0 plane
y = np.zeros(x.shape)
y[x < 0.5] = 1e-4
y[x > 2.0] = 4
uncond = _estimate_semidiscrete_mi(w, y, k=1)
cond = _estimate_conditional_semidiscrete_mi(w, y, z, k=1)
# The expected MI is the discrete entropy of Y
p_low = gamma_dist.cdf(0.5, a=1.5)
p_high = 1 - gamma_dist.cdf(2.0, a=1.5)
p_mid = 1 - p_low - p_high
expected = -log(p_low)*p_low - log(p_mid)*p_mid - log(p_high)*p_high
self.assertLess(uncond, cond - 0.6)
self.assertAlmostEqual(cond, expected, delta=0.06)
# Test our custom implementation of the digamma function
from ennemi._entropy_estimators import _psi
class TestPsi(unittest.TestCase):
def test_comparison_with_scipy(self) -> None:
# Zero
self.assertEqual(_psi(0), np.inf)
# Small values
for x in range(1, 20):
with self.subTest(x=x):
self.assertAlmostEqual(_psi(x), psi(x), delta=0.0001)
# Large values
for x in range(2, 1000):
with self.subTest(x=10*x):
self.assertAlmostEqual(_psi(x*10), psi(x*10), delta=0.0001)
| StarcoderdataPython |
1986178 | import torch.nn as nn
import torch
from torch.autograd import Variable
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
# logistic regression model
def createlogisticRegression():
linear = nn.Linear(2, 1, bias = True)
sigmoid = nn.Sigmoid()
model_logistic_regression = nn.Sequential(linear, sigmoid)
return model_logistic_regression
# relu classifier
def createReLUModel():
linear = nn.Linear(2, 1, bias = True)
sigmoid = nn.Sigmoid()
relu = nn.ReLU()
model_relu = nn.Sequential(relu, linear, sigmoid)
return model_relu | StarcoderdataPython |
1978659 | <gh_stars>0
import torch
class GlobalConfig:
seed = 1992
num_classes = 10
batch_size = 128
EPOCHS = 70
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# data
input_image_size = (3, 32, 32)
# trainloader = train_loader
# testloader = test_loader
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# model data normalization
channel_norm = 'BatchNorm2d'
channel_norm_params = {'GroupNorm': {'num_groups':2},
'BatchNorm2d':{'affine':True},
'LayerNorm': {'normalized_shape':10}}
# regularizer
dropout_value = 0.05
# loss
loss_function = 'CrossEntropyLoss'
# scheduler
# lr_scheduler = 'StepLR'
# lr_scheduler_params = {'StepLR': {'step_size':7, 'gamma':0.5},
# 'ReduceLROnPlateau': {'mode':'min', 'factor':0.5, 'patience':2, 'threshold':0.0001,
# 'threshold_mode':'rel', 'cooldown':0, 'min_lr':1e-4,
# 'eps':1e-08, 'verbose':True},
# 'OneCycleLR': {'max_lr': 0.2, 'steps_per_epoch':len(trainloader), 'div_factor':20, 'epochs':EPOCHS }}
# optimizer
optimizer = 'SGD'
optimizer_params = {'SGD':{'lr':0.01, 'momentum':0.9}}
# store model stats
model_results = {'TestAccuracy': [],
'TrainAccuracy': [],
'TestLoss': [],
'TrainLoss':[]}
# Params
misclassified=True
L1Lambda=False
| StarcoderdataPython |
6409398 | <reponame>TrinhQuocNguyen/labelme_DOTA<gh_stars>1-10
# -*- coding: utf-8 -*-
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import sys
QTextCodec.setCodecForTr(QTextCodec.codecForName("utf8"))
from libs.shape import Shape
class MouseEvent(QMainWindow):
def __init__(self, parent=None):
super(MouseEvent, self).__init__(parent)
self.resize(800, 800)
self.count = 0
self.drawingLineColor = QColor(0, 0, 255)
self.drawingRectColor = QColor(0, 0, 255)
self.line = Shape(line_color=self.drawingLineColor)
labelStatus = QLabel();
labelStatus.setText(self.tr("Mouse Position:"))
labelStatus.setFixedWidth(100)
self.labelMousePos = QLabel();
self.labelMousePos.setText(self.tr(""))
self.labelMousePos.setFixedWidth(100)
self._painter = QPainter()
self.sBar = self.statusBar()
self.sBar.addPermanentWidget(labelStatus)
self.sBar.addPermanentWidget(self.labelMousePos)
def mouseMoveEvent(self, e):
self.labelMousePos.setText("(" + QString.number(e.x()) + "," + QString.number(e.y()) + ")")
self.update()
def mousePressEvent(self, e):
str = "(" + QString.number(e.x()) + "," + QString.number(e.y()) + ")"
if e.button() == Qt.LeftButton:
self.sBar.showMessage(self.tr("Mouse Left Button Pressed:") + str)
x = QString.number(e.x()).toInt()[0]
y = QString.number(e.y()).toInt()[0]
q = QPoint(x, y)
print(x, y)
if self.count == 4:
self.count = 0
self.line.clear()
else:
self.count = self.count + 1
self.line.addPoint(q)
self.update()
elif e.button() == Qt.RightButton:
self.sBar.showMessage(self.tr("Mouse Right Button Pressed:") + str)
elif e.button() == Qt.MidButton:
self.sBar.showMessage(self.tr("Mouse Middle Button Pressed:") + str)
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
qp.setPen(QPen(Qt.red, 5)) ######可以试下画刷 setBrush,10指定点的大小
l = len(self.line.points)
if l == 0:
qp.end()
elif l < 4:
p = QPoint(self.line.points[0])
qp.drawPoint(p)
for i in range(0, l-1):
p = QPoint(self.line.points[i%4])
q = QPoint(self.line.points[(i+1)%4])
qp.drawLine(q, p)
else:
for i in range(0, 3):
p = QPoint(self.line.points[i%4])
q = QPoint(self.line.points[(i+1)%4])
qp.drawLine(q, p)
p = QPoint(self.line.points[3])
q = QPoint(self.line.points[0])
qp.drawLine(q, p)
# self.drawLines(qp)######画线
# self.drawPoints(qp) ###画点
# self.drawRect(qp) ##画矩形
# self.drawEllipse(qp) ##画圆,椭圆
qp.end()
app = QApplication(sys.argv)
dialog = MouseEvent()
dialog.show()
app.exec_() | StarcoderdataPython |
4893088 | import argparse
import cv2
import numpy as np
import imutils
args={}
"""
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
"""
args["image"] = "../jp.png"
image = cv2.imread(args["image"])
cv2.imshow("original", image)
cv2.waitKey()
#crop Jessica's Face
cropped = image[10:220,80:290]
cv2.imshow("Crop of Jessica's Face", cropped)
cv2.waitKey()
cv2.destroyAllWindows() | StarcoderdataPython |
83022 | # Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Test that `LocalSnapshot` and `LocalSnapshotGPU` work."""
from copy import deepcopy
import hoomd
from hoomd.data.array import HOOMDGPUArray
import numpy as np
import pytest
try:
# This try block is purely to allow testing locally without mpi4py. We could
# require it for testing, and simplify the logic here. The CI containers all
# have mpi4py.
from mpi4py import MPI
except ImportError:
skip_mpi4py = True
else:
skip_mpi4py = False
skip_mpi4py = pytest.mark.skipif(skip_mpi4py,
reason='mpi4py could not be imported.')
try:
# We use the CUPY_IMPORTED variable to allow for local GPU testing without
# CuPy installed. This code could be simplified to only work with CuPy, by
# requiring its installation for testing. The CI containers already have
# CuPy installed when build for the GPU.
import cupy
CUPY_IMPORTED = True
except ImportError:
CUPY_IMPORTED = False
"""
_N and _types are distinct in that the local snapshot does not know about them.
We use the underscore to signify this. Those keys are skipped when testing the
local snapshots, though are still used to define the state.
"""
Np = 5
_particle_data = dict(
_N=Np,
position=dict(np_type=np.floating,
value=[[-1, -1, -1], [-1, -1, 0], [-1, 0, 0], [1, 1, 1],
[1, 0, 0]],
new_value=[[5, 5, 5]] * Np,
shape=(Np, 3)),
velocity=dict(np_type=np.floating,
value=np.linspace(-4, 4, Np * 3).reshape((Np, 3)),
new_value=np.linspace(4, 8, Np * 3).reshape((Np, 3)),
shape=(Np, 3)),
acceleration=dict(np_type=np.floating,
value=np.linspace(-4, 4, Np * 3).reshape((Np, 3)),
new_value=np.linspace(4, 8, Np * 3).reshape((Np, 3)),
shape=(Np, 3)),
typeid=dict(np_type=np.integer,
value=[0, 0, 0, 1, 1],
new_value=[1, 1, 1, 0, 0],
shape=(Np,)),
mass=dict(np_type=np.floating,
value=[5, 4, 3, 2, 1],
new_value=[1, 2, 3, 4, 5],
shape=(Np,)),
charge=dict(np_type=np.floating,
value=[1, 2, 3, 2, 1],
new_value=[-1, -1, -3, -2, -1],
shape=(Np,)),
diameter=dict(np_type=np.floating,
value=[5, 2, 3, 2, 5],
new_value=[2, 1, 0.5, 1, 2],
shape=(Np,)),
image=dict(np_type=np.integer,
value=np.linspace(-10, 20, Np * 3, dtype=int).reshape(Np, 3),
new_value=np.linspace(-20, 10, Np * 3, dtype=int).reshape(Np, 3),
shape=(Np, 3)),
tag=dict(np_type=np.unsignedinteger, value=None, shape=(Np,)),
_types=['p1', 'p2'])
_particle_local_data = dict(
net_force=dict(np_type=np.floating,
value=np.linspace(0.5, 4.5, Np * 3).reshape((Np, 3)),
new_value=np.linspace(6, 12, Np * 3).reshape((Np, 3)),
shape=(Np, 3)),
net_torque=dict(np_type=np.floating,
value=np.linspace(-0.5, 2.5, Np * 3).reshape((Np, 3)),
new_value=np.linspace(12.75, 25, Np * 3).reshape((Np, 3)),
shape=(Np, 3)),
net_virial=dict(np_type=np.floating,
value=np.linspace(-1.5, 6.5, Np * 6).reshape((Np, 6)),
new_value=np.linspace(9.75, 13.12, Np * 6).reshape((Np, 6)),
shape=(Np, 6)),
net_energy=dict(np_type=np.floating,
value=np.linspace(0.5, 3.5, Np),
new_value=np.linspace(0, 4.2, Np),
shape=(Np,)),
)
Nb = 2
_bond_data = dict(_N=Nb,
typeid=dict(np_type=np.unsignedinteger,
value=[0, 1],
new_value=[1, 0],
shape=(Nb,)),
group=dict(np_type=np.unsignedinteger,
value=[[0, 1], [2, 3]],
new_value=[[1, 0], [3, 2]],
shape=(Nb, 2)),
tag=dict(np_type=np.unsignedinteger, value=None, shape=(Nb,)),
_types=['b1', 'b2'])
Na = 2
_angle_data = dict(_N=Na,
typeid=dict(np_type=np.unsignedinteger,
value=[1, 0],
new_value=[0, 1],
shape=(Na,)),
group=dict(np_type=np.unsignedinteger,
value=[[0, 1, 2], [2, 3, 4]],
new_value=[[1, 3, 4], [0, 2, 4]],
shape=(Na, 3)),
tag=dict(np_type=np.unsignedinteger, value=None,
shape=(Na,)),
_types=['a1', 'a2'])
Nd = 2
_dihedral_data = dict(_N=Nd,
typeid=dict(np_type=np.unsignedinteger,
value=[1, 0],
new_value=[0, 1],
shape=(Nd,)),
group=dict(np_type=np.unsignedinteger,
value=[[0, 1, 2, 3], [1, 2, 3, 4]],
new_value=[[4, 3, 2, 1], [2, 4, 0, 1]],
shape=(Nd, 4)),
tag=dict(np_type=np.unsignedinteger,
value=None,
shape=(Nd,)),
_types=['d1', 'd2'])
Ni = 2
_improper_data = dict(_N=Ni,
typeid=dict(np_type=np.unsignedinteger,
value=[0, 0],
shape=(Ni,)),
group=dict(np_type=np.unsignedinteger,
value=[[3, 2, 1, 0], [1, 2, 3, 4]],
new_value=[[1, 2, 3, 0], [4, 2, 3, 1]],
shape=(Ni, 4)),
tag=dict(np_type=np.unsignedinteger,
value=None,
shape=(Ni,)),
_types=['i1'])
Nc = 3
_constraint_data = dict(
_N=Nc,
value=dict(np_type=np.floating,
value=[2.5, 0.5, 2.],
new_value=[3., 1.5, 1.],
shape=(Nc,)),
group=dict(np_type=np.unsignedinteger,
value=[[0, 1], [2, 3], [1, 3]],
new_value=[[4, 1], [3, 1], [2, 4]],
shape=(Nc, 2)),
tag=dict(np_type=np.unsignedinteger, value=None, shape=(Nc,)),
)
Npa = 2
_pair_data = dict(_N=Npa,
typeid=dict(np_type=np.unsignedinteger,
value=[0, 1],
new_value=[1, 0],
shape=(Npa,)),
group=dict(np_type=np.unsignedinteger,
value=[[0, 1], [2, 3]],
new_value=[[4, 1], [0, 3]],
shape=(Npa, 2)),
tag=dict(np_type=np.unsignedinteger, value=None,
shape=(Npa,)),
_types=['p1', 'p2'])
_global_dict = dict(rtag=dict(
particles=dict(np_type=np.unsignedinteger, value=None, shape=(Np,)),
bonds=dict(np_type=np.unsignedinteger, value=None, shape=(Nb,)),
angles=dict(np_type=np.unsignedinteger, value=None, shape=(Na,)),
dihedrals=dict(np_type=np.unsignedinteger, value=None, shape=(Nd,)),
impropers=dict(np_type=np.unsignedinteger, value=None, shape=(Ni,)),
constraints=dict(np_type=np.unsignedinteger, value=None, shape=(Nc,)),
pairs=dict(np_type=np.unsignedinteger, value=None, shape=(Npa,)),
))
@pytest.fixture(scope='session')
def base_snapshot(device):
"""Defines a snapshot using the data given above."""
def set_snapshot(snap, data, base):
"""Sets individual sections of snapshot (e.g. particles)."""
snap_section = getattr(snap, base)
for k in data:
if k.startswith('_'):
setattr(snap_section, k[1:], data[k])
continue
elif data[k]['value'] is None:
continue
try:
array = getattr(snap_section, k)
array[:] = data[k]['value']
except TypeError:
setattr(snap_section, k, data[k]['value'])
snapshot = hoomd.Snapshot(device.communicator)
if snapshot.communicator.rank == 0:
snapshot.configuration.box = [2.1, 2.1, 2.1, 0, 0, 0]
set_snapshot(snapshot, _particle_data, 'particles')
set_snapshot(snapshot, _bond_data, 'bonds')
set_snapshot(snapshot, _angle_data, 'angles')
set_snapshot(snapshot, _dihedral_data, 'dihedrals')
set_snapshot(snapshot, _improper_data, 'impropers')
set_snapshot(snapshot, _constraint_data, 'constraints')
set_snapshot(snapshot, _pair_data, 'pairs')
return snapshot
@pytest.fixture(params=[
'particles', 'bonds', 'angles', 'dihedrals', 'impropers', 'constraints',
'pairs'
])
def snapshot_section(request):
return request.param
@pytest.fixture(scope="function",
params=[(section_name, prop_name, prop_dict)
for prop_name, global_prop_dict in _global_dict.items()
for section_name, prop_dict in global_prop_dict.items()
],
ids=lambda x: x[0] + '-' + x[1])
def global_property(request):
return request.param
@pytest.fixture(
scope='function',
params=[(name, prop_name, prop_dict)
for name, section_dict in [('particles', {
**_particle_data,
**_particle_local_data
}), ('bonds', _bond_data), (
'angles', _angle_data), (
'dihedrals',
_dihedral_data), (
'impropers',
_improper_data), (
'constraints',
_constraint_data), ('pairs', _pair_data)]
for prop_name, prop_dict in section_dict.items()
if not prop_name.startswith('_')],
ids=lambda x: x[0] + '-' + x[1])
def section_name_dict(request):
"""Parameterization of expected values for local_snapshot properties.
Examples include ``('particles', 'position', position_dict)`` where
``position_dict`` is the dictionary with the expected typecodes, shape, and
value of particle positions.
"""
return deepcopy(request.param)
@pytest.fixture(scope='function',
params=['', 'ghost_', '_with_ghost'],
ids=lambda x: x.strip('_'))
def affix(request):
"""Parameterizes over the different variations of a local_snapshot property.
These include ``property``, ``ghost_property``, and
``property_with_ghosts``.
"""
return request.param
def get_property_name_from_affix(name, affix):
if affix.startswith('_'):
return name + affix
elif affix.endswith('_'):
return affix + name
else:
return name
def general_array_equality(arr1, arr2):
"""Allows checking of equality with both HOOMDArrays and HOOMDGPUArrays."""
if any(np.issubdtype(a.dtype, np.floating) for a in (arr1, arr2)):
if any(isinstance(a, HOOMDGPUArray) for a in (arr1, arr2)):
return cupy.allclose(arr1, arr2)
else:
return np.allclose(arr1, arr2)
else:
return all(arr1.ravel() == arr2.ravel())
def check_type(data, prop_dict, tags):
"""Check that the expected dtype is found for local snapshots."""
assert np.issubdtype(data.dtype, prop_dict['np_type'])
def check_shape(data, prop_dict, tags):
"""Check shape of properties in the snapshot."""
# checks size of prop_dict values and tags.
if isinstance(data, HOOMDGPUArray):
if len(tags) == 0:
assert data.shape == (0,)
else:
assert data.shape == (len(tags),) + prop_dict['shape'][1:]
else:
assert data.shape == (len(tags),) + prop_dict['shape'][1:]
def check_getting(data, prop_dict, tags):
"""Checks getting properties of the state through a local snapshot."""
# Check to end test early
if isinstance(data, HOOMDGPUArray) and not CUPY_IMPORTED:
pytest.skip("Not available for HOOMDGPUArray without CuPy.")
if len(tags) == 0 or prop_dict['value'] is None:
return None
if isinstance(data, HOOMDGPUArray):
expected_values = cupy.array(prop_dict['value'])
else:
expected_values = np.array(prop_dict['value'])
assert general_array_equality(data, expected_values[tags.tolist()])
def check_setting(data, prop_dict, tags):
"""Checks setting properties of the state through a local snapshot.
Also tests error raising for read only arrays.
"""
# Test if test should be skipped or just return
if isinstance(data, HOOMDGPUArray) and not CUPY_IMPORTED:
pytest.skip("Not available for HOOMDGPUArray without CuPy.")
if 'new_value' not in prop_dict:
return None
if isinstance(data, HOOMDGPUArray):
new_values = cupy.array(prop_dict['new_value'])[tags.tolist()]
else:
new_values = np.array(prop_dict['new_value'])[tags]
if data.read_only:
with pytest.raises(ValueError):
data[:] = new_values
else:
data[:] = new_values
assert general_array_equality(data, new_values)
@pytest.fixture(scope='function',
params=[check_type, check_shape, check_getting, check_setting])
def property_check(request):
"""Parameterizes differnt types of checks on local_snapshot properties."""
return request.param
class TestLocalSnapshots:
"""Base class for CPU and GPU based localsnapshot tests."""
@staticmethod
def check_box(local_snapshot, global_box, ranks):
"""General check that ``box`` and ``local_box`` properties work."""
assert type(local_snapshot.global_box) == hoomd.Box
assert type(local_snapshot.local_box) == hoomd.Box
assert local_snapshot.global_box == global_box
# The local box and global box are equal if and only if
# we run on a single rank.
assert (local_snapshot.local_box == global_box) == (ranks == 1)
def test_box(self, base_simulation, base_snapshot):
sim = base_simulation()
for lcl_snapshot_attr in self.get_snapshot_attr(sim):
with getattr(sim.state, lcl_snapshot_attr) as data:
self.check_box(data, sim.state.box,
sim.device.communicator.num_ranks)
@staticmethod
def check_tag_shape(base_snapshot, local_snapshot, group, ranks):
mpi_comm = MPI.COMM_WORLD
if base_snapshot.communicator.rank == 0:
N = getattr(base_snapshot, group).N
else:
N = None
N = mpi_comm.bcast(N, root=0)
# check particles tag size
if group == 'particles':
total_len = mpi_comm.allreduce(len(local_snapshot.particles.tag),
op=MPI.SUM)
assert total_len == N
else:
local_snapshot_section = getattr(local_snapshot, group)
if ranks > 1:
assert len(local_snapshot_section.tag) <= N
else:
assert len(local_snapshot_section.tag) == N
@skip_mpi4py
@pytest.mark.cupy_optional
def test_tags_shape(self, base_simulation, base_snapshot, snapshot_section):
"""Checks that tags are the appropriate size from local snapshots.
tags are used for checking other shapes so this is necessary to validate
those tests.
"""
sim = base_simulation()
for lcl_snapshot_attr in self.get_snapshot_attr(sim):
with getattr(sim.state, lcl_snapshot_attr) as data:
self.check_tag_shape(base_snapshot, data, snapshot_section,
sim.device.communicator.num_ranks)
@staticmethod
def check_global_properties(prop, global_property_dict, N):
assert prop.shape == global_property_dict['shape']
assert np.issubdtype(prop.dtype, global_property_dict['np_type'])
if isinstance(prop, HOOMDGPUArray) and not CUPY_IMPORTED:
return
else:
if global_property_dict['value'] is not None:
general_array_equality(prop, global_property_dict['value'])
with pytest.raises(ValueError):
prop[:] = 1
@skip_mpi4py
@pytest.mark.cupy_optional
def test_cpu_global_properties(self, base_simulation, base_snapshot,
global_property):
section_name, prop_name, prop_dict = global_property
sim = base_simulation()
snapshot = sim.state.snapshot
mpi_comm = MPI.COMM_WORLD
if snapshot.communicator.rank == 0:
N = getattr(snapshot, section_name).N
else:
N = None
N = mpi_comm.bcast(N, root=0)
with sim.state.cpu_local_snapshot as data:
self.check_global_properties(
getattr(getattr(data, section_name), prop_name), prop_dict, N)
@pytest.mark.cupy_optional
def test_arrays_properties(self, base_simulation, section_name_dict, affix,
property_check):
"""This test makes extensive use of parameterizing in pytest.
This test tests the type, shape, getting, and setting of array values in
the local snapshot. We test all properties including ghost and both
ghost and normal particles, bonds, etc.
"""
name, property_name, property_dict = section_name_dict
property_name = get_property_name_from_affix(property_name, affix)
tag_name = get_property_name_from_affix('tag', affix)
sim = base_simulation()
for lcl_snapshot_attr in self.get_snapshot_attr(sim):
with getattr(sim.state, lcl_snapshot_attr) as data:
# gets the particle, bond, etc data
snapshot_section = getattr(data, name)
hoomd_buffer = getattr(snapshot_section, property_name)
tags = getattr(snapshot_section, tag_name)
property_check(hoomd_buffer, property_dict, tags)
def test_run_failure(self, base_simulation):
sim = base_simulation()
for lcl_snapshot_attr in self.get_snapshot_attr(sim):
with getattr(sim.state, lcl_snapshot_attr):
with pytest.raises(RuntimeError):
sim.run(1)
def test_setting_snapshot_failure(self, base_simulation, base_snapshot):
sim = base_simulation()
for lcl_snapshot_attr in self.get_snapshot_attr(sim):
with getattr(sim.state, lcl_snapshot_attr):
with pytest.raises(RuntimeError):
sim.state.snapshot = base_snapshot
@pytest.fixture
def base_simulation(self, simulation_factory, base_snapshot):
"""Creates the simulation from the base_snapshot."""
def factory():
sim = simulation_factory(base_snapshot)
with sim.state.cpu_local_snapshot as snap:
particle_data = getattr(snap, 'particles')
tags = snap.particles.tag
for attr, inner_dict in _particle_local_data.items():
arr_values = np.array(inner_dict['value'])[tags]
getattr(particle_data, attr)[:] = arr_values
return sim
return factory
def get_snapshot_attr(self, sim):
if isinstance(sim.device, hoomd.device.CPU):
yield 'cpu_local_snapshot'
else:
yield 'cpu_local_snapshot'
yield 'gpu_local_snapshot'
| StarcoderdataPython |
8132721 | import sms
sms.send_sms("+639959064795", "BAL")
| StarcoderdataPython |
8127890 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.prestige import prestige
def test_prestige():
"""Test module prestige.py by downloading
prestige.csv and testing shape of
extracted data has 102 rows and 6 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = prestige(test_path)
try:
assert x_train.shape == (102, 6)
except:
shutil.rmtree(test_path)
raise()
| StarcoderdataPython |
3476489 | import re
from heapq import heappop, heappush
from collections import Counter, defaultdict
def calc(n):
return n // 3 - 2
def solve(d):
return sum(calc(n) for n in d)
def read_and_solve():
with open('input_1.txt') as f:
data = [int(line.rstrip()) for line in f]
return solve(data)
if __name__ == '__main__':
print(read_and_solve()) | StarcoderdataPython |
8167819 | <gh_stars>0
# coding=utf-8
"""
<NAME>, CC3501, 2019-2
vertices and indices for simple shapes
"""
import numpy as np
# A simple class container to store vertices and indices that define a shape
class Shape:
def __init__(self, vertices, indices, textureFileName=None):
self.vertices = vertices
self.indices = indices
self.textureFileName = textureFileName
def createAxis(length=1.0):
# Defining the location and colors of each vertex of the shape
vertices = [
# positions colors
-length, 0.0, 0.0, 0.0, 0.0, 0.0,
length, 0.0, 0.0, 1.0, 0.0, 0.0,
0.0, -length, 0.0, 0.0, 0.0, 0.0,
0.0, length, 0.0, 0.0, 1.0, 0.0,
0.0, 0.0, -length, 0.0, 0.0, 0.0,
0.0, 0.0, length, 0.0, 0.0, 1.0]
# This shape is meant to be drawn with GL_LINES,
# i.e. every 2 indices, we have 1 line.
indices = [
0, 1,
2, 3,
4, 5]
return Shape(vertices, indices)
def createColorTriangle(r, g, b):
# Defining the location and colors of each vertex of the shape
vertices = [
# positions colors
-0.5, -0.5, 0.0, r, g, b,
0.5, -0.5, 0.0, r, g, b,
0.0, 0.5, 0.0, r, g, b]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [0, 1, 2]
return Shape(vertices, indices)
def createColorCircle(r, g, b):
# Defining the location and colors of each vertex of the shape
n = 25
c = [r,g,b]
angulosVertices = [j * 2 * np.pi / n for j in range(0, n)]
vertices = []
z = 0
vertices += [0, 0, 0]
vertices += c
for i in range(1, n + 1):
x = 0.5 * np.cos(angulosVertices[i - 1])
y = 0.5 * np.sin(angulosVertices[i - 1])
vertices += [x, y, z]
vertices += c
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = []
for i in range(1, n + 1):
indices += [0, i, i + 1]
indices[-1] = 1
return Shape(vertices, indices)
def createRainbowTriangle():
# Defining the location and colors of each vertex of the shape
vertices = [
# positions colors
-0.5, -0.5, 0.0, 1.0, 0.0, 0.0,
0.5, -0.5, 0.0, 0.0, 1.0, 0.0,
0.0, 0.5, 0.0, 0.0, 0.0, 1.0]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [0, 1, 2]
return Shape(vertices, indices)
def createRainbowQuad():
# Defining the location and colors of each vertex of the shape
vertices = [
# positions colors
-0.5, -0.5, 0.0, 1.0, 0.0, 0.0,
0.5, -0.5, 0.0, 0.0, 1.0, 0.0,
0.5, 0.5, 0.0, 0.0, 0.0, 1.0,
-0.5, 0.5, 0.0, 1.0, 1.0, 1.0]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [
0, 1, 2,
2, 3, 0]
return Shape(vertices, indices)
def createColorQuad(r, g, b):
# Defining locations and colors for each vertex of the shape
vertices = [
# positions colors
-0.5, -0.5, 0.0, r, g, b,
0.5, -0.5, 0.0, r, g, b,
0.5, 0.5, 0.0, r, g, b,
-0.5, 0.5, 0.0, r, g, b]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [
0, 1, 2,
2, 3, 0]
return Shape(vertices, indices)
def createTextureQuad(image_filename, nx=1, ny=1):
# Defining locations and texture coordinates for each vertex of the shape
vertices = [
# positions texture
-0.5, -0.5, 0.0, 0, ny,
0.5, -0.5, 0.0, nx, ny,
0.5, 0.5, 0.0, nx, 0,
-0.5, 0.5, 0.0, 0, 0]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [
0, 1, 2,
2, 3, 0]
textureFileName = image_filename
return Shape(vertices, indices, textureFileName)
def createRainbowCube():
# Defining the location and colors of each vertex of the shape
vertices = [
# positions colors
-0.5, -0.5, 0.5, 1.0, 0.0, 0.0,
0.5, -0.5, 0.5, 0.0, 1.0, 0.0,
0.5, 0.5, 0.5, 0.0, 0.0, 1.0,
-0.5, 0.5, 0.5, 1.0, 1.0, 1.0,
-0.5, -0.5, -0.5, 1.0, 1.0, 0.0,
0.5, -0.5, -0.5, 0.0, 1.0, 1.0,
0.5, 0.5, -0.5, 1.0, 0.0, 1.0,
-0.5, 0.5, -0.5, 1.0, 1.0, 1.0]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [
0, 1, 2, 2, 3, 0,
4, 5, 6, 6, 7, 4,
4, 5, 1, 1, 0, 4,
6, 7, 3, 3, 2, 6,
5, 6, 2, 2, 1, 5,
7, 4, 0, 0, 3, 7]
return Shape(vertices, indices)
def createColorCube(r, g, b):
# Defining the location and colors of each vertex of the shape
vertices = [
# positions colors
-0.5, -0.5, 0.5, r, g, b,
0.5, -0.5, 0.5, r, g, b,
0.5, 0.5, 0.5, r, g, b,
-0.5, 0.5, 0.5, r, g, b,
-0.5, -0.5, -0.5, r, g, b,
0.5, -0.5, -0.5, r, g, b,
0.5, 0.5, -0.5, r, g, b,
-0.5, 0.5, -0.5, r, g, b]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [
0, 1, 2, 2, 3, 0,
4, 5, 6, 6, 7, 4,
4, 5, 1, 1, 0, 4,
6, 7, 3, 3, 2, 6,
5, 6, 2, 2, 1, 5,
7, 4, 0, 0, 3, 7]
return Shape(vertices, indices)
def createTextureCube(image_filename):
# Defining locations and texture coordinates for each vertex of the shape
vertices = [
# positions texture coordinates
# Z+
-0.5, -0.5, 0.5, 0, 1,
0.5, -0.5, 0.5, 1, 1,
0.5, 0.5, 0.5, 1, 0,
-0.5, 0.5, 0.5, 0, 0,
# Z-
-0.5, -0.5, -0.5, 0, 1,
0.5, -0.5, -0.5, 1, 1,
0.5, 0.5, -0.5, 1, 0,
-0.5, 0.5, -0.5, 0, 0,
# X+
0.5, -0.5, -0.5, 0, 1,
0.5, 0.5, -0.5, 1, 1,
0.5, 0.5, 0.5, 1, 0,
0.5, -0.5, 0.5, 0, 0
,
# X-
-0.5, -0.5, -0.5, 0, 1,
-0.5, 0.5, -0.5, 1, 1,
-0.5, 0.5, 0.5, 1, 0,
-0.5, -0.5, 0.5, 0, 0,
# Y+
-0.5, 0.5, -0.5, 0, 1,
0.5, 0.5, -0.5, 1, 1,
0.5, 0.5, 0.5, 1, 0,
-0.5, 0.5, 0.5, 0, 0,
# Y-
-0.5, -0.5, -0.5, 0, 1,
0.5, -0.5, -0.5, 1, 1,
0.5, -0.5, 0.5, 1, 0,
-0.5, -0.5, 0.5, 0, 0
]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [
0, 1, 2, 2, 3, 0, # Z+
7, 6, 5, 5, 4, 7, # Z-
8, 9,10,10,11, 8, # X+
15,14,13,13,12,15, # X-
19,18,17,17,16,19, # Y+
20,21,22,22,23,20] # Y-
return Shape(vertices, indices, image_filename)
def createRainbowNormalsCube():
sq3 = 0.57735027
# Defining the location and colors of each vertex of the shape
vertices = [
# positions colors normals
-0.5, -0.5, 0.5, 1.0, 0.0, 0.0, -sq3, -sq3, sq3,
0.5, -0.5, 0.5, 0.0, 1.0, 0.0, sq3, -sq3, sq3,
0.5, 0.5, 0.5, 0.0, 0.0, 1.0, sq3, sq3, sq3,
-0.5, 0.5, 0.5, 1.0, 1.0, 1.0, -sq3, sq3, sq3,
-0.5, -0.5, -0.5, 1.0, 1.0, 0.0, -sq3, -sq3, -sq3,
0.5, -0.5, -0.5, 0.0, 1.0, 1.0, sq3, -sq3, -sq3,
0.5, 0.5, -0.5, 1.0, 0.0, 1.0, sq3, sq3, -sq3,
-0.5, 0.5, -0.5, 1.0, 1.0, 1.0, -sq3, sq3, -sq3]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [0, 1, 2, 2, 3, 0,
4, 5, 6, 6, 7, 4,
4, 5, 1, 1, 0, 4,
6, 7, 3, 3, 2, 6,
5, 6, 2, 2, 1, 5,
7, 4, 0, 0, 3, 7]
return Shape(vertices, indices)
def createColorNormalsCube(r, g, b):
# Defining the location and colors of each vertex of the shape
vertices = [
# positions colors normals
# Z+
-0.5, -0.5, 0.5, r, g, b, 0,0,1,
0.5, -0.5, 0.5, r, g, b, 0,0,1,
0.5, 0.5, 0.5, r, g, b, 0,0,1,
-0.5, 0.5, 0.5, r, g, b, 0,0,1,
# Z-
-0.5, -0.5, -0.5, r, g, b, 0,0,-1,
0.5, -0.5, -0.5, r, g, b, 0,0,-1,
0.5, 0.5, -0.5, r, g, b, 0,0,-1,
-0.5, 0.5, -0.5, r, g, b, 0,0,-1,
# X+
0.5, -0.5, -0.5, r, g, b, 1,0,0,
0.5, 0.5, -0.5, r, g, b, 1,0,0,
0.5, 0.5, 0.5, r, g, b, 1,0,0,
0.5, -0.5, 0.5, r, g, b, 1,0,0,
# X-
-0.5, -0.5, -0.5, r, g, b, -1,0,0,
-0.5, 0.5, -0.5, r, g, b, -1,0,0,
-0.5, 0.5, 0.5, r, g, b, -1,0,0,
-0.5, -0.5, 0.5, r, g, b, -1,0,0,
# Y+
-0.5, 0.5, -0.5, r, g, b, 0,1,0,
0.5, 0.5, -0.5, r, g, b, 0,1,0,
0.5, 0.5, 0.5, r, g, b, 0,1,0,
-0.5, 0.5, 0.5, r, g, b, 0,1,0,
# Y-
-0.5, -0.5, -0.5, r, g, b, 0,-1,0,
0.5, -0.5, -0.5, r, g, b, 0,-1,0,
0.5, -0.5, 0.5, r, g, b, 0,-1,0,
-0.5, -0.5, 0.5, r, g, b, 0,-1,0
]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [
0, 1, 2, 2, 3, 0, # Z+
7, 6, 5, 5, 4, 7, # Z-
8, 9,10,10,11, 8, # X+
15,14,13,13,12,15, # X-
19,18,17,17,16,19, # Y+
20,21,22,22,23,20] # Y-
return Shape(vertices, indices)
def createTextureNormalsCube(image_filename):
# Defining locations,texture coordinates and normals for each vertex of the shape
vertices = [
# positions tex coords normals
# Z+
-0.5, -0.5, 0.5, 0, 1, 0,0,1,
0.5, -0.5, 0.5, 1, 1, 0,0,1,
0.5, 0.5, 0.5, 1, 0, 0,0,1,
-0.5, 0.5, 0.5, 0, 0, 0,0,1,
# Z-
-0.5, -0.5, -0.5, 0, 1, 0,0,-1,
0.5, -0.5, -0.5, 1, 1, 0,0,-1,
0.5, 0.5, -0.5, 1, 0, 0,0,-1,
-0.5, 0.5, -0.5, 0, 0, 0,0,-1,
# X+
0.5, -0.5, -0.5, 0, 1, 1,0,0,
0.5, 0.5, -0.5, 1, 1, 1,0,0,
0.5, 0.5, 0.5, 1, 0, 1,0,0,
0.5, -0.5, 0.5, 0, 0, 1,0,0,
# X-
-0.5, -0.5, -0.5, 0, 1, -1,0,0,
-0.5, 0.5, -0.5, 1, 1, -1,0,0,
-0.5, 0.5, 0.5, 1, 0, -1,0,0,
-0.5, -0.5, 0.5, 0, 0, -1,0,0,
# Y+
-0.5, 0.5, -0.5, 0, 1, 0,1,0,
0.5, 0.5, -0.5, 1, 1, 0,1,0,
0.5, 0.5, 0.5, 1, 0, 0,1,0,
-0.5, 0.5, 0.5, 0, 0, 0,1,0,
# Y-
-0.5, -0.5, -0.5, 0, 1, 0,-1,0,
0.5, -0.5, -0.5, 1, 1, 0,-1,0,
0.5, -0.5, 0.5, 1, 0, 0,-1,0,
-0.5, -0.5, 0.5, 0, 0, 0,-1,0
]
# Defining connections among vertices
# We have a triangle every 3 indices specified
indices = [
0, 1, 2, 2, 3, 0, # Z+
7, 6, 5, 5, 4, 7, # Z-
8, 9,10,10,11, 8, # X+
15,14,13,13,12,15, # X-
19,18,17,17,16,19, # Y+
20,21,22,22,23,20] # Y-
return Shape(vertices, indices, image_filename) | StarcoderdataPython |
324415 | # -*- coding: utf-8 -*-
"""
Created on Sep 20, 2012
@author: moloch
Copyright 2012 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=unused-wildcard-import,unused-variable
import os
import logging
import sys
import getpass
import codecs
from builtins import object
from libs.ConsoleColors import *
try:
from urllib.parse import quote, quote_plus
except ImportError:
from urllib import quote, quote_plus
from sqlalchemy import create_engine
from tornado.options import options
class DatabaseConnection(object):
def __init__(
self, database, hostname="", port="", username="", password="", dialect=""
):
self.database = database
self.hostname = hostname
self.port = port
self.username = username
self.password = password
self.dialect = dialect
def __str__(self):
""" Construct the database connection string """
if self.dialect == "sqlite":
db_conn = self._sqlite()
elif self.dialect.startswith("postgres"):
db_conn = self._postgresql()
elif self.dialect == "mysql":
db_conn = self._mysql()
else:
raise ValueError("Database dialect not supported")
self._test_connection(db_conn)
return db_conn
def _postgresql(self):
"""
Configured to use postgresql, there is no built-in support
for postgresql so make sure we can import the 3rd party
python lib 'pypostgresql'
"""
logging.debug("Configured to use Postgresql for a database")
try:
import pypostgresql
except ImportError:
print(WARN + "You must install 'pypostgresql'")
os._exit(1)
db_host, db_name, db_user, db_password = self._db_credentials()
postgres = "postgresql+pypostgresql://%s:%s@%s/%s" % (
db_user,
db_password,
db_host,
db_name,
)
if self._test_connection(postgres):
return postgres
else:
logging.fatal("Cannot connect to database with any available driver")
os._exit(1)
def _sqlite(self):
"""
SQLite connection string, always save db file to cwd, or in-memory
"""
logging.debug("Configured to use SQLite for a database")
db_name = self.database
if not len(db_name):
db_name = "rtb"
return "sqlite:///%s.db" % db_name
def _mysql(self):
""" Configure db_connection for MySQL """
logging.debug("Configured to use MySQL for a database")
db_server, db_name, db_user, db_password = self._db_credentials()
db_charset = "utf8mb4"
db_connection = "%s:%s@%s/%s?charset=%s" % (
db_user,
db_password,
db_server,
db_name,
db_charset,
)
codecs.register(
lambda name: codecs.lookup("utf8") if name == "utf8mb4" else None
)
__mysql = "mysql://%s" % db_connection
__mysqlclient = "mysql+mysqldb://%s" % db_connection
__pymysql = "mysql+pymysql://%s" % db_connection
__mysqlconnector = "mysql+mysqlconnector://%s" % db_connection
if self._test_connection(__mysql):
return __mysql
elif self._test_connection(__mysqlclient):
return __mysqlclient
elif self._test_connection(__pymysql):
return __pymysql
elif self._test_connection(__mysqlconnector):
return __mysqlconnector
else:
logging.fatal(
"Cannot connect to database with any available driver. Verify correct username & password in rootthebox.cfg and db dependecies."
)
os._exit(1)
def _test_connection(self, connection_string):
"""
Test the connection string to see if we can connect to the database
"""
try:
engine = create_engine(connection_string)
connection = engine.connect()
connection.close()
return True
except Exception as e:
if options.debug:
logging.exception("Database connection failed: %s" % e)
return False
def _db_credentials(self):
""" Pull db creds and return them url encoded """
if self.password == "" or self.password == "<PASSWORD>":
sys.stdout.write(PROMPT + "Database password: ")
sys.stdout.flush()
self.password = <PASSWORD>()
elif self.password == "ENV":
self.password = os.environ["sql_password"]
db_host = quote(self.hostname)
db_name = quote(self.database)
db_user = quote(self.username)
db_password = <PASSWORD>_<PASSWORD>(self.password)
return db_host, db_name, db_user, db_password
| StarcoderdataPython |
4904684 | <filename>chat/consumers.py
from channels.generic.websocket import AsyncJsonWebsocketConsumer
from channels.db import database_sync_to_async
from django.core.serializers import serialize
from django.utils import timezone
from django.core.paginator import Paginator
import json
import asyncio
from chat.models import RoomChatMessage, PrivateChatRoom, UnreadChatRoomMessages
from friend.models import FriendList
from account.utils import LazyAccountEncoder
from chat.utils import calculate_timestamp, LazyRoomChatMessageEncoder
from chat.exceptions import ClientError
from chat.constants import *
from account.models import Account
class ChatConsumer(AsyncJsonWebsocketConsumer):
async def connect(self):
"""
Called when the websocket is handshaking as part of initial connection.
"""
print("ChatConsumer: connect: " + str(self.scope["user"]))
# let everyone connect. But limit read/write to authenticated users
await self.accept()
# the room_id will define what it means to be "connected". If it is not None, then the user is connected.
self.room_id = None
async def receive_json(self, content):
"""
Called when we get a text frame. Channels will JSON-decode the payload
for us and pass it as the first argument.
"""
# Messages will have a "command" key we can switch on
print("ChatConsumer: receive_json")
command = content.get("command", None)
try:
if command == "join":
print("joining room: " + str(content['room_id']))
await self.join_room(content["room_id"])
elif command == "leave":
# Leave the room
await self.leave_room(content["room_id"])
elif command == "send":
if len(content["message"].lstrip()) == 0:
raise ClientError(422,"You can't send an empty message.")
await self.send_room(content["room_id"], content["message"])
elif command == "get_room_chat_messages":
await self.display_progress_bar(True)
room = await get_room_or_error(content['room_id'], self.scope["user"])
payload = await get_room_chat_messages(room, content['page_number'])
if payload != None:
payload = json.loads(payload)
await self.send_messages_payload(payload['messages'], payload['new_page_number'])
else:
raise ClientError(204,"Something went wrong retrieving the chatroom messages.")
await self.display_progress_bar(False)
elif command == "get_user_info":
await self.display_progress_bar(True)
room = await get_room_or_error(content['room_id'], self.scope["user"])
payload = get_user_info(room, self.scope["user"])
if payload != None:
payload = json.loads(payload)
await self.send_user_info_payload(payload['user_info'])
else:
raise ClientError(204, "Something went wrong retrieving the other users account details.")
await self.display_progress_bar(False)
except ClientError as e:
await self.handle_client_error(e)
async def disconnect(self, code):
"""
Called when the WebSocket closes for any reason.
"""
# Leave the room
print("ChatConsumer: disconnect")
try:
if self.room_id != None:
await self.leave_room(self.room_id)
except Exception as e:
print("EXCEPTION: " + str(e))
pass
async def join_room(self, room_id):
"""
Called by receive_json when someone sent a join command.
"""
# The logged-in user is in our scope thanks to the authentication ASGI middleware (AuthMiddlewareStack)
print("ChatConsumer: join_room: " + str(room_id))
try:
room = await get_room_or_error(room_id, self.scope["user"])
except ClientError as e:
return await self.handle_client_error(e)
# Add user to "users" list for room
await connect_user(room, self.scope["user"])
# Store that we're in the room
self.room_id = room.id
await on_user_connected(room, self.scope["user"])
# Add them to the group so they get room messages
await self.channel_layer.group_add(
room.group_name,
self.channel_name,
)
# Instruct their client to finish opening the room
await self.send_json({
"join": str(room.id),
})
async def leave_room(self, room_id):
"""
Called by receive_json when someone sent a leave command.
"""
# The logged-in user is in our scope thanks to the authentication ASGI middleware
print("ChatConsumer: leave_room")
room = await get_room_or_error(room_id, self.scope["user"])
# Remove user from "connected_users" list
await disconnect_user(room, self.scope["user"])
# Remove that we're in the room
self.room_id = None
# Remove them from the group so they no longer get room messages
await self.channel_layer.group_discard(
room.group_name,
self.channel_name,
)
# Instruct their client to finish closing the room
await self.send_json({
"leave": str(room.id),
})
async def send_room(self, room_id, message):
"""
Called by receive_json when someone sends a message to a room.
"""
print("ChatConsumer: send_room")
# Check they are in this room
if self.room_id != None:
if str(room_id) != str(self.room_id):
print("CLIENT ERRROR 1")
raise ClientError("ROOM_ACCESS_DENIED", "Room access denied")
else:
print("CLIENT ERRROR 2")
raise ClientError("ROOM_ACCESS_DENIED", "Room access denied")
# Get the room and send to the group about it
room = await get_room_or_error(room_id, self.scope["user"])
# get list of connected_users
connected_users = room.connected_users.all()
# Execute these functions asychronously
await asyncio.gather(*[
append_unread_msg_if_not_connected(room, room.user1, connected_users, message),
append_unread_msg_if_not_connected(room, room.user2, connected_users, message),
create_room_chat_message(room, self.scope["user"], message)
])
await self.channel_layer.group_send(
room.group_name,
{
"type": "chat.message",
"profile_image": self.scope["user"].profile_image.url,
"username": self.scope["user"].username,
"user_id": self.scope["user"].id,
"message": message,
}
)
async def chat_message(self, event):
"""
Called when someone has messaged our chat.
"""
# Send a message down to the client
print("ChatConsumer: chat_message")
timestamp = calculate_timestamp(timezone.now())
await self.send_json(
{
"msg_type": MSG_TYPE_MESSAGE,
"username": event["username"],
"user_id": event["user_id"],
"profile_image": event["profile_image"],
"message": event["message"],
"natural_timestamp": timestamp,
},
)
async def send_messages_payload(self, messages, new_page_number):
"""
Send a payload of messages to the ui
"""
print("ChatConsumer: send_messages_payload. ")
await self.send_json(
{
"messages_payload": "messages_payload",
"messages": messages,
"new_page_number": new_page_number,
},
)
async def send_user_info_payload(self, user_info):
"""
Send a payload of user information to the ui
"""
print("ChatConsumer: send_user_info_payload. ")
await self.send_json(
{
"user_info": user_info,
},
)
async def display_progress_bar(self, is_displayed):
"""
1. is_displayed = True
- Display the progress bar on UI
2. is_displayed = False
- Hide the progress bar on UI
"""
print("DISPLAY PROGRESS BAR: " + str(is_displayed))
await self.send_json(
{
"display_progress_bar": is_displayed
}
)
async def handle_client_error(self, e):
"""
Called when a ClientError is raised.
Sends error data to UI.
"""
errorData = {}
errorData['error'] = e.code
if e.message:
errorData['message'] = e.message
await self.send_json(errorData)
return
@database_sync_to_async
def get_room_or_error(room_id, user):
"""
Tries to fetch a room for the user, checking permissions along the way.
"""
try:
room = PrivateChatRoom.objects.get(pk=room_id)
except PrivateChatRoom.DoesNotExist:
raise ClientError("ROOM_INVALID", "Invalid room.")
# Is this user allowed in the room? (must be user1 or user2)
if user != room.user1 and user != room.user2:
raise ClientError("ROOM_ACCESS_DENIED", "You do not have permission to join this room.")
# Are the users in this room friends?
friend_list = FriendList.objects.get(user=user).friends.all()
if not room.user1 in friend_list:
if not room.user2 in friend_list:
raise ClientError("ROOM_ACCESS_DENIED", "You must be friends to chat.")
return room
# I don't think this requires @database_sync_to_async since we are just accessing a model field
# https://docs.djangoproject.com/en/3.1/ref/models/instances/#refreshing-objects-from-database
def get_user_info(room, user):
"""
Retrieve the user info for the user you are chatting with
"""
try:
# Determine who is who
other_user = room.user1
if other_user == user:
other_user = room.user2
payload = {}
s = LazyAccountEncoder()
# convert to list for serializer and select first entry (there will be only 1)
payload['user_info'] = s.serialize([other_user])[0]
return json.dumps(payload)
except ClientError as e:
raise ClientError("DATA_ERROR", "Unable to get that users information.")
return None
@database_sync_to_async
def create_room_chat_message(room, user, message):
return RoomChatMessage.objects.create(user=user, room=room, content=message)
@database_sync_to_async
def get_room_chat_messages(room, page_number):
try:
qs = RoomChatMessage.objects.by_room(room)
p = Paginator(qs, DEFAULT_ROOM_CHAT_MESSAGE_PAGE_SIZE)
payload = {}
messages_data = None
new_page_number = int(page_number)
if new_page_number <= p.num_pages:
new_page_number = new_page_number + 1
s = LazyRoomChatMessageEncoder()
payload['messages'] = s.serialize(p.page(page_number).object_list)
else:
payload['messages'] = "None"
payload['new_page_number'] = new_page_number
return json.dumps(payload)
except Exception as e:
print("EXCEPTION: " + str(e))
return None
@database_sync_to_async
def connect_user(room, user):
# add user to connected_users list
account = Account.objects.get(pk=user.id)
return room.connect_user(account)
@database_sync_to_async
def disconnect_user(room, user):
# remove from connected_users list
account = Account.objects.get(pk=user.id)
return room.disconnect_user(account)
# If the user is not connected to the chat, increment "unread messages" count
@database_sync_to_async
def append_unread_msg_if_not_connected(room, user, connected_users, message):
if not user in connected_users:
try:
unread_msgs = UnreadChatRoomMessages.objects.get(room=room, user=user)
unread_msgs.most_recent_message = message
unread_msgs.count += 1
unread_msgs.save()
except UnreadChatRoomMessages.DoesNotExist:
UnreadChatRoomMessages(room=room, user=user, count=1).save()
pass
return
# When a user connects, reset their unread message count
@database_sync_to_async
def on_user_connected(room, user):
# confirm they are in the connected users list
connected_users = room.connected_users.all()
if user in connected_users:
try:
# reset count
unread_msgs = UnreadChatRoomMessages.objects.get(room=room, user=user)
unread_msgs.count = 0
unread_msgs.save()
except UnreadChatRoomMessages.DoesNotExist:
UnreadChatRoomMessages(room=room, user=user).save()
pass
return | StarcoderdataPython |
1748545 | # Copyright (c) 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import copy
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
import datetime
import os
from oslo_utils import uuidutils
import time
import pki
subcas_available = True
try:
import pki.authority as authority
import pki.feature as feature
except ImportError:
subcas_available = False
import pki.cert
import pki.client
import pki.crypto as cryptoutil
import pki.key as key
import pki.kra
import pki.profile
from requests import exceptions as request_exceptions
from barbican.common import exception
from barbican.common import utils
from barbican import i18n as u
# we want to keep the dogtag config options separated. That way we
# do not need to import every dogtag requirement to generate the
# sample config
import barbican.plugin.dogtag_config_opts # noqa
import barbican.plugin.interface.certificate_manager as cm
import barbican.plugin.interface.secret_store as sstore
# reuse the conf object to not call config.new_config() twice
CONF = barbican.plugin.dogtag_config_opts.CONF
LOG = utils.getLogger(__name__)
CERT_HEADER = "-----BEGIN CERTIFICATE-----"
CERT_FOOTER = "-----END CERTIFICATE-----"
KRA_TRANSPORT_NICK = "KRA transport cert"
def _create_nss_db_if_needed(nss_db_path, nss_password):
"""Creates NSS DB if it's not setup already
:returns: True or False whether the database was created or not.
"""
if not os.path.exists(nss_db_path):
cryptoutil.NSSCryptoProvider.setup_database(
nss_db_path, nss_password, over_write=True)
return True
else:
LOG.info("The nss_db_path provided already exists, so the "
"database is assumed to be already set up.")
return False
def _setup_nss_db_services(conf):
"""Sets up NSS Crypto functions
This sets up the NSSCryptoProvider and the database it needs for it to
store certificates. If the path specified in the configuration is already
existent, it will assume that the database is already setup.
This will also import the transport cert needed by the KRA if the NSS DB
was created.
"""
nss_db_path, nss_password = (conf.dogtag_plugin.nss_db_path,
conf.dogtag_plugin.nss_password)
if nss_db_path is None:
LOG.warning("nss_db_path was not provided so the crypto "
"provider functions were not initialized.")
return None
if nss_password is None:
raise ValueError(u._("nss_password is required"))
nss_db_created = _create_nss_db_if_needed(nss_db_path, nss_password)
crypto = cryptoutil.NSSCryptoProvider(nss_db_path, nss_password)
if nss_db_created:
_import_kra_transport_cert_to_nss_db(conf, crypto)
return crypto
def _import_kra_transport_cert_to_nss_db(conf, crypto):
try:
connection = create_connection(conf, 'kra')
kraclient = pki.kra.KRAClient(connection, crypto)
systemcert_client = kraclient.system_certs
transport_cert = systemcert_client.get_transport_cert()
crypto.import_cert(KRA_TRANSPORT_NICK, transport_cert, "u,u,u")
except Exception as e:
LOG.error("Error in importing transport cert."
" KRA may not be enabled: %s", e)
def create_connection(conf, subsystem_path):
pem_path = conf.dogtag_plugin.pem_path
if pem_path is None:
raise ValueError(u._("pem_path is required"))
# port is string type in PKIConnection
connection = pki.client.PKIConnection(
'https',
conf.dogtag_plugin.dogtag_host,
str(conf.dogtag_plugin.dogtag_port),
subsystem_path)
connection.set_authentication_cert(pem_path)
return connection
crypto = _setup_nss_db_services(CONF)
if crypto:
crypto.initialize()
class DogtagPluginAlgorithmException(exception.BarbicanException):
message = u._("Invalid algorithm passed in")
class DogtagPluginNotSupportedException(exception.NotSupported):
message = u._("Operation not supported by Dogtag Plugin")
def __init__(self, msg=None):
if not msg:
message = self.message
else:
message = msg
super(DogtagPluginNotSupportedException, self).__init__(message)
class DogtagPluginArchivalException(exception.BarbicanException):
message = u._("Key archival failed. Error returned from KRA.")
class DogtagPluginGenerationException(exception.BarbicanException):
message = u._("Key generation failed. Error returned from KRA.")
class DogtagKRAPlugin(sstore.SecretStoreBase):
"""Implementation of the secret store plugin with KRA as the backend."""
# metadata constants
ALG = "alg"
BIT_LENGTH = "bit_length"
GENERATED = "generated"
KEY_ID = "key_id"
SECRET_MODE = "secret_mode" # nosec
PASSPHRASE_KEY_ID = "passphrase_key_id" # nosec
CONVERT_TO_PEM = "convert_to_pem"
# string constants
DSA_PRIVATE_KEY_HEADER = '-----BEGIN DSA PRIVATE KEY-----'
DSA_PRIVATE_KEY_FOOTER = '-----END DSA PRIVATE KEY-----'
DSA_PUBLIC_KEY_HEADER = '-----BEGIN DSA PUBLIC KEY-----'
DSA_PUBLIC_KEY_FOOTER = '-----END DSA PUBLIC KEY-----'
def __init__(self, conf=CONF):
"""Constructor - create the keyclient."""
LOG.debug("starting DogtagKRAPlugin init")
connection = create_connection(conf, 'kra')
# create kraclient
kraclient = pki.kra.KRAClient(connection, crypto)
self.keyclient = kraclient.keys
self.keyclient.set_transport_cert(KRA_TRANSPORT_NICK)
self.plugin_name = conf.dogtag_plugin.plugin_name
self.retries = conf.dogtag_plugin.retries
LOG.debug("completed DogtagKRAPlugin init")
def get_plugin_name(self):
return self.plugin_name
def store_secret(self, secret_dto):
"""Store a secret in the KRA
If secret_dto.transport_key is not None, then we expect
secret_dto.secret to include a base64 encoded PKIArchiveOptions
structure as defined in section 6.4 of RFC 2511. This package contains
a transport key wrapped session key, the session key wrapped secret
and parameters to specify the symmetric key wrapping.
Otherwise, the data is unencrypted and we use a call to archive_key()
to have the Dogtag KRA client generate the relevant session keys.
The secret_dto contains additional information on the type of secret
that is being stored. We will use that shortly. For, now, lets just
assume that its all PASS_PHRASE_TYPE
Returns a dict with the relevant metadata (which in this case is just
the key_id
"""
data_type = key.KeyClient.PASS_PHRASE_TYPE
key_id = None
attempts = 0
offset_time = 1
while attempts <= self.retries and key_id is None:
client_key_id = uuidutils.generate_uuid(dashed=False)
if secret_dto.transport_key is not None:
# TODO(alee-3) send the transport key with the archival request
# once the Dogtag Client API changes.
response = self.keyclient.archive_pki_options(
client_key_id,
data_type,
secret_dto.secret,
key_algorithm=None,
key_size=None)
else:
response = self.keyclient.archive_key(
client_key_id,
data_type,
secret_dto.secret,
key_algorithm=None,
key_size=None)
key_id = response.get_key_id()
if key_id is None:
LOG.warning("key_id is None. attempts: {}".format(attempts))
attempts += 1
time.sleep(offset_time)
offset_time += 1
if key_id is None:
raise DogtagPluginArchivalException
meta_dict = {DogtagKRAPlugin.KEY_ID: key_id}
self._store_secret_attributes(meta_dict, secret_dto)
return meta_dict
def get_secret(self, secret_type, secret_metadata):
"""Retrieve a secret from the KRA
The secret_metadata is simply the dict returned by a store_secret() or
get_secret() call. We will extract the key_id from this dict.
Note: There are two ways to retrieve secrets from the KRA.
The first method calls retrieve_key without a wrapping key. This
relies on the KRA client to generate a wrapping key (and wrap it with
the KRA transport cert), and is completely transparent to the
Barbican server. What is returned to the caller is the
unencrypted secret.
The second way is to provide a wrapping key that would be generated
on the barbican client. That way only the client will be
able to unwrap the secret. This wrapping key is provided in the
secret_metadata by Barbican core.
Format/Type of the secret returned in the SecretDTO object.
-----------------------------------------------------------
The type of the secret returned is always dependent on the way it is
stored using the store_secret method.
In case of strings - like passphrase/PEM strings, the return will be a
string.
In case of binary data - the return will be the actual binary data.
In case of retrieving an asymmetric key that is generated using the
dogtag plugin, then the binary representation of, the asymmetric key in
PEM format, is returned
"""
key_id = secret_metadata[DogtagKRAPlugin.KEY_ID]
key_spec = sstore.KeySpec(
alg=secret_metadata.get(DogtagKRAPlugin.ALG, None),
bit_length=secret_metadata.get(DogtagKRAPlugin.BIT_LENGTH, None),
mode=secret_metadata.get(DogtagKRAPlugin.SECRET_MODE, None),
passphrase=None
)
generated = secret_metadata.get(DogtagKRAPlugin.GENERATED, False)
passphrase = self._get_passphrase_for_a_private_key(
secret_type, secret_metadata, key_spec)
recovered_key = None
twsk = DogtagKRAPlugin._get_trans_wrapped_session_key(secret_type,
secret_metadata)
if DogtagKRAPlugin.CONVERT_TO_PEM in secret_metadata:
# Case for returning the asymmetric keys generated in KRA.
# Asymmetric keys generated in KRA are not generated in PEM format.
# This marker DogtagKRAPlugin.CONVERT_TO_PEM is set in the
# secret_metadata for asymmetric keys generated in KRA to
# help convert the returned private/public keys to PEM format and
# eventually return the binary data of the keys in PEM format.
if secret_type == sstore.SecretType.PUBLIC:
# Public key should be retrieved using the get_key_info method
# as it is treated as an attribute of the asymmetric key pair
# stored in the KRA database.
key_info = self.keyclient.get_key_info(key_id)
recovered_key = serialization.load_der_public_key(
key_info.public_key,
backend=default_backend()
).public_bytes(
serialization.Encoding.PEM,
serialization.PublicFormat.PKCS1)
elif secret_type == sstore.SecretType.PRIVATE:
key_data = self.keyclient.retrieve_key(key_id)
private_key = serialization.load_der_private_key(
key_data.data,
password=None,
backend=default_backend()
)
if passphrase is not None:
e_alg = serialization.BestAvailableEncryption(passphrase)
else:
e_alg = serialization.NoEncryption()
recovered_key = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=e_alg
)
else:
# TODO(alee-3) send transport key as well when dogtag client API
# changes in case the transport key has changed.
key_data = self.keyclient.retrieve_key(key_id, twsk)
if twsk:
# The data returned is a byte array.
recovered_key = key_data.encrypted_data
else:
recovered_key = key_data.data
# TODO(alee) remove final field when content_type is removed
# from secret_dto
if generated:
recovered_key = base64.b64encode(recovered_key)
ret = sstore.SecretDTO(
type=secret_type,
secret=recovered_key,
key_spec=key_spec,
content_type=None,
transport_key=None)
return ret
def delete_secret(self, secret_metadata):
"""Delete a secret from the KRA
There is currently no way to delete a secret in Dogtag.
We will be implementing such a method shortly.
"""
pass
def generate_symmetric_key(self, key_spec):
"""Generate a symmetric key
This calls generate_symmetric_key() on the KRA passing in the
algorithm, bit_length and id (used as the client_key_id) from
the secret. The remaining parameters are not used.
Returns a metadata object that can be used for retrieving the secret.
"""
usages = [key.SymKeyGenerationRequest.DECRYPT_USAGE,
key.SymKeyGenerationRequest.ENCRYPT_USAGE]
algorithm = self._map_algorithm(key_spec.alg.lower())
if algorithm is None:
raise DogtagPluginAlgorithmException
passphrase = key_spec.passphrase
if passphrase:
raise DogtagPluginNotSupportedException(
u._("Passphrase encryption is not supported for symmetric"
" key generating algorithms."))
key_id = None
attempts = 0
offset_time = 1
while attempts <= self.retries and key_id is None:
client_key_id = uuidutils.generate_uuid()
response = self.keyclient.generate_symmetric_key(
client_key_id,
algorithm,
key_spec.bit_length,
usages)
key_id = response.get_key_id()
if key_id is None:
LOG.warning("generate_symkey: key_id is None. attempts: {}"
.format(attempts))
attempts += 1
time.sleep(offset_time)
offset_time += 1
if key_id is None:
raise DogtagPluginGenerationException
# Barbican expects stored keys to be base 64 encoded. We need to
# add flag to the keyclient.generate_symmetric_key() call above
# to ensure that the key that is stored is base64 encoded.
#
# As a workaround until that update is available, we will store a
# parameter "generated" to indicate that the response must be base64
# encoded on retrieval. Note that this will not work for transport
# key encoded data.
return {DogtagKRAPlugin.ALG: key_spec.alg,
DogtagKRAPlugin.BIT_LENGTH: key_spec.bit_length,
DogtagKRAPlugin.KEY_ID: response.get_key_id(),
DogtagKRAPlugin.GENERATED: True}
def generate_asymmetric_key(self, key_spec):
"""Generate an asymmetric key.
Note that barbican expects all secrets to be base64 encoded.
"""
usages = [key.AsymKeyGenerationRequest.DECRYPT_USAGE,
key.AsymKeyGenerationRequest.ENCRYPT_USAGE]
client_key_id = uuidutils.generate_uuid()
algorithm = self._map_algorithm(key_spec.alg.lower())
passphrase = key_spec.passphrase
if algorithm is None:
raise DogtagPluginAlgorithmException
passphrase_key_id = None
passphrase_metadata = None
if passphrase:
if algorithm == key.KeyClient.DSA_ALGORITHM:
raise DogtagPluginNotSupportedException(
u._("Passphrase encryption is not "
"supported for DSA algorithm")
)
stored_passphrase_info = self.keyclient.archive_key(
uuidutils.generate_uuid(),
self.keyclient.PASS_PHRASE_TYPE,
base64.b64encode(passphrase))
passphrase_key_id = stored_passphrase_info.get_key_id()
passphrase_metadata = {
DogtagKRAPlugin.KEY_ID: passphrase_key_id
}
# Barbican expects stored keys to be base 64 encoded. We need to
# add flag to the keyclient.generate_asymmetric_key() call above
# to ensure that the key that is stored is base64 encoded.
#
# As a workaround until that update is available, we will store a
# parameter "generated" to indicate that the response must be base64
# encoded on retrieval. Note that this will not work for transport
# key encoded data.
response = self.keyclient.generate_asymmetric_key(
client_key_id,
algorithm,
key_spec.bit_length,
usages)
public_key_metadata = {
DogtagKRAPlugin.ALG: key_spec.alg,
DogtagKRAPlugin.BIT_LENGTH: key_spec.bit_length,
DogtagKRAPlugin.KEY_ID: response.get_key_id(),
DogtagKRAPlugin.CONVERT_TO_PEM: "true",
DogtagKRAPlugin.GENERATED: True
}
private_key_metadata = {
DogtagKRAPlugin.ALG: key_spec.alg,
DogtagKRAPlugin.BIT_LENGTH: key_spec.bit_length,
DogtagKRAPlugin.KEY_ID: response.get_key_id(),
DogtagKRAPlugin.CONVERT_TO_PEM: "true",
DogtagKRAPlugin.GENERATED: True
}
if passphrase_key_id:
private_key_metadata[DogtagKRAPlugin.PASSPHRASE_KEY_ID] = (
passphrase_key_id
)
return sstore.AsymmetricKeyMetadataDTO(private_key_metadata,
public_key_metadata,
passphrase_metadata)
def generate_supports(self, key_spec):
"""Key generation supported?
Specifies whether the plugin supports key generation with the
given key_spec.
For now, we will just check the algorithm. When dogtag adds a
call to check the bit length as well, we will use that call to
take advantage of the bit_length information
"""
return self._map_algorithm(key_spec.alg) is not None
def store_secret_supports(self, key_spec):
"""Key storage supported?
Specifies whether the plugin supports storage of the secret given
the attributes included in the KeySpec
"""
return True
@staticmethod
def _map_algorithm(algorithm):
"""Map Barbican algorithms to Dogtag plugin algorithms.
Note that only algorithms supported by Dogtag will be mapped.
"""
if algorithm is None:
return None
if algorithm.lower() == sstore.KeyAlgorithm.AES.lower():
return key.KeyClient.AES_ALGORITHM
elif algorithm.lower() == sstore.KeyAlgorithm.DES.lower():
return key.KeyClient.DES_ALGORITHM
elif algorithm.lower() == sstore.KeyAlgorithm.DESEDE.lower():
return key.KeyClient.DES3_ALGORITHM
elif algorithm.lower() == sstore.KeyAlgorithm.DSA.lower():
return key.KeyClient.DSA_ALGORITHM
elif algorithm.lower() == sstore.KeyAlgorithm.RSA.lower():
return key.KeyClient.RSA_ALGORITHM
elif algorithm.lower() == sstore.KeyAlgorithm.DIFFIE_HELLMAN.lower():
# may be supported, needs to be tested
return None
elif algorithm.lower() == sstore.KeyAlgorithm.EC.lower():
# asymmetric keys not yet supported
return None
else:
return None
@staticmethod
def _store_secret_attributes(meta_dict, secret_dto):
# store the following attributes for retrieval
key_spec = secret_dto.key_spec
if key_spec is not None:
if key_spec.alg is not None:
meta_dict[DogtagKRAPlugin.ALG] = key_spec.alg
if key_spec.bit_length is not None:
meta_dict[DogtagKRAPlugin.BIT_LENGTH] = key_spec.bit_length
if key_spec.mode is not None:
meta_dict[DogtagKRAPlugin.SECRET_MODE] = key_spec.mode
def _get_passphrase_for_a_private_key(self, secret_type, secret_metadata,
key_spec):
"""Retrieve the passphrase for the private key stored in the KRA."""
if secret_type is None:
return None
if key_spec.alg is None:
return None
passphrase = None
if DogtagKRAPlugin.PASSPHRASE_KEY_ID in secret_metadata:
if key_spec.alg.upper() == key.KeyClient.RSA_ALGORITHM:
passphrase = self.keyclient.retrieve_key(
secret_metadata.get(DogtagKRAPlugin.PASSPHRASE_KEY_ID)
).data
else:
if key_spec.alg.upper() == key.KeyClient.DSA_ALGORITHM:
raise sstore.SecretGeneralException(
u._("DSA keys should not have a passphrase in the"
" database, for being used during retrieval.")
)
raise sstore.SecretGeneralException(
u._("Secrets of type {secret_type} should not have a "
"passphrase in the database, for being used during "
"retrieval.").format(secret_type=secret_type)
)
# note that Barbican expects the passphrase to be base64 encoded when
# stored, so we need to decode it.
if passphrase:
passphrase = base64.b64decode(passphrase)
return passphrase
@staticmethod
def _get_trans_wrapped_session_key(secret_type, secret_metadata):
twsk = secret_metadata.get('trans_wrapped_session_key', None)
if secret_type in [sstore.SecretType.PUBLIC,
sstore.SecretType.PRIVATE]:
if twsk:
raise DogtagPluginNotSupportedException(
u._("Encryption using session key is not supported when "
"retrieving a {secret_type} "
"key.").format(secret_type=secret_type)
)
return twsk
def _catch_request_exception(ca_related_function):
def _catch_ca_unavailable(self, *args, **kwargs):
try:
return ca_related_function(self, *args, **kwargs)
except request_exceptions.RequestException:
return cm.ResultDTO(
cm.CertificateStatus.CA_UNAVAILABLE_FOR_REQUEST)
return _catch_ca_unavailable
def _catch_enrollment_exceptions(ca_related_function):
def _catch_enrollment_exception(self, *args, **kwargs):
try:
return ca_related_function(self, *args, **kwargs)
except pki.BadRequestException as e:
return cm.ResultDTO(
cm.CertificateStatus.CLIENT_DATA_ISSUE_SEEN,
status_message=e.message)
except pki.PKIException as e:
raise cm.CertificateGeneralException(
u._("Exception thrown by enroll_cert: {message}").format(
message=e.message))
return _catch_enrollment_exception
def _catch_subca_creation_exceptions(ca_related_function):
def _catch_subca_exception(self, *args, **kwargs):
try:
return ca_related_function(self, *args, **kwargs)
except pki.BadRequestException as e:
raise exception.BadSubCACreationRequest(reason=e.message)
except pki.PKIException as e:
raise exception.SubCACreationErrors(reason=e.message)
except request_exceptions.RequestException:
raise exception.SubCACreationErrors(
reason="Unable to connect to CA")
return _catch_subca_exception
def _catch_subca_deletion_exceptions(ca_related_function):
def _catch_subca_exception(self, *args, **kwargs):
try:
return ca_related_function(self, *args, **kwargs)
except pki.ResourceNotFoundException as e:
LOG.warning("Sub-CA already deleted")
pass
except pki.PKIException as e:
raise exception.SubCADeletionErrors(reason=e.message)
except request_exceptions.RequestException:
raise exception.SubCACreationErrors(
reason="Unable to connect to CA")
return _catch_subca_exception
class DogtagCAPlugin(cm.CertificatePluginBase):
"""Implementation of the cert plugin with Dogtag CA as the backend."""
# order_metadata fields
PROFILE_ID = "profile_id"
# plugin_metadata fields
REQUEST_ID = "request_id"
def __init__(self, conf=CONF):
"""Constructor - create the cert clients."""
connection = create_connection(conf, 'ca')
self.certclient = pki.cert.CertClient(connection)
self.simple_cmc_profile = conf.dogtag_plugin.simple_cmc_profile
self.auto_approved_profiles = conf.dogtag_plugin.auto_approved_profiles
self.working_dir = conf.dogtag_plugin.plugin_working_dir
if not os.path.isdir(self.working_dir):
os.mkdir(self.working_dir)
self._expiration = None
self._expiration_delta = conf.dogtag_plugin.ca_expiration_time
self._expiration_data_path = os.path.join(self.working_dir,
"expiration_data.txt")
self._host_aid_path = os.path.join(self.working_dir, "host_aid.txt")
self._host_aid = None
if not os.path.isfile(self._expiration_data_path):
self.expiration = datetime.datetime.utcnow()
global subcas_available
subcas_available = self._are_subcas_enabled_on_backend(connection)
if subcas_available:
self.authority_client = authority.AuthorityClient(connection)
if not os.path.isfile(self._host_aid_path):
self.host_aid = self.get_host_aid()
@property
def expiration(self):
if self._expiration is None:
try:
with open(self._expiration_data_path) as expiration_fh:
self._expiration = datetime.datetime.strptime(
expiration_fh.read(),
"%Y-%m-%d %H:%M:%S.%f"
)
except (ValueError, TypeError):
LOG.warning("Invalid data read from expiration file")
self.expiration = datetime.utcnow()
return self._expiration
@expiration.setter
def expiration(self, val):
with open(self._expiration_data_path, 'w') as expiration_fh:
expiration_fh.write(val.strftime("%Y-%m-%d %H:%M:%S.%f"))
self._expiration = val
@property
def host_aid(self):
if self._host_aid is None:
with open(self._host_aid_path) as host_aid_fh:
self._host_aid = host_aid_fh.read()
return self._host_aid
@host_aid.setter
def host_aid(self, val):
if val is not None:
with open(self._host_aid_path, 'w') as host_aid_fh:
host_aid_fh.write(val)
self._host_aid = val
def _are_subcas_enabled_on_backend(self, connection):
"""Check if subca feature is available
SubCA creation must be supported in both the Dogtag client as well
as on the back-end server. Moreover, it must be enabled on the
backend server. This method sets the subcas_available global variable.
:return: True/False
"""
global subcas_available
if subcas_available:
# subcas are supported in the Dogtag client
try:
feature_client = feature.FeatureClient(connection)
authority_feature = feature_client.get_feature("authority")
if authority_feature.enabled:
LOG.info("Sub-CAs are enabled by Dogtag server")
return True
else:
LOG.info("Sub-CAs are not enabled by Dogtag server")
except (request_exceptions.HTTPError,
pki.ResourceNotFoundException):
LOG.info("Sub-CAs are not supported by Dogtag server")
else:
LOG.info("Sub-CAs are not supported by Dogtag client")
return False
def _get_request_id(self, order_id, plugin_meta, operation):
request_id = plugin_meta.get(self.REQUEST_ID, None)
if not request_id:
raise cm.CertificateGeneralException(
u._(
"{request} not found for {operation} for "
"order_id {order_id}"
).format(
request=self.REQUEST_ID,
operation=operation,
order_id=order_id
)
)
return request_id
@_catch_request_exception
def _get_request(self, request_id):
try:
return self.certclient.get_request(request_id)
except pki.RequestNotFoundException:
return None
@_catch_request_exception
def _get_cert(self, cert_id):
try:
return self.certclient.get_cert(cert_id)
except pki.CertNotFoundException:
return None
def get_default_ca_name(self):
return "Dogtag CA"
def get_default_signing_cert(self):
# TODO(alee) Add code to get the signing cert
return None
def get_default_intermediates(self):
# TODO(alee) Add code to get the cert chain
return None
def check_certificate_status(self, order_id, order_meta, plugin_meta,
barbican_meta_dto):
"""Check the status of a certificate request.
:param order_id: ID of the order associated with this request
:param order_meta: order_metadata associated with this order
:param plugin_meta: data populated by previous calls for this order,
in particular the request_id
:param barbican_meta_dto: additional data needed to process order.
:return: cm.ResultDTO
"""
request_id = self._get_request_id(order_id, plugin_meta, "checking")
request = self._get_request(request_id)
if not request:
raise cm.CertificateGeneralException(
u._(
"No request found for request_id {request_id} for "
"order {order_id}"
).format(
request_id=request_id,
order_id=order_id
)
)
request_status = request.request_status
if request_status == pki.cert.CertRequestStatus.REJECTED:
return cm.ResultDTO(
cm.CertificateStatus.CLIENT_DATA_ISSUE_SEEN,
status_message=request.error_message)
elif request_status == pki.cert.CertRequestStatus.CANCELED:
return cm.ResultDTO(
cm.CertificateStatus.REQUEST_CANCELED)
elif request_status == pki.cert.CertRequestStatus.PENDING:
return cm.ResultDTO(
cm.CertificateStatus.WAITING_FOR_CA)
elif request_status == pki.cert.CertRequestStatus.COMPLETE:
# get the cert
cert_id = request.cert_id
if not cert_id:
raise cm.CertificateGeneralException(
u._(
"Request {request_id} reports status_complete, but no "
"cert_id has been returned"
).format(
request_id=request_id
)
)
cert = self._get_cert(cert_id)
if not cert:
raise cm.CertificateGeneralException(
u._("Certificate not found for cert_id: {cert_id}").format(
cert_id=cert_id
)
)
return cm.ResultDTO(
cm.CertificateStatus.CERTIFICATE_GENERATED,
certificate=cert.encoded,
intermediates=cert.pkcs7_cert_chain)
else:
raise cm.CertificateGeneralException(
u._("Invalid request_status returned by CA"))
@_catch_request_exception
def issue_certificate_request(self, order_id, order_meta, plugin_meta,
barbican_meta_dto):
"""Issue a certificate request to the Dogtag CA
Call the relevant certificate issuance function depending on the
Barbican defined request type in the order_meta.
:param order_id: ID of the order associated with this request
:param order_meta: dict containing all the inputs for this request.
This includes the request_type.
:param plugin_meta: Used to store data for status check
:param barbican_meta_dto: additional data needed to process order.
:return: cm.ResultDTO
"""
request_type = order_meta.get(
cm.REQUEST_TYPE,
cm.CertificateRequestType.CUSTOM_REQUEST)
jump_table = {
cm.CertificateRequestType.SIMPLE_CMC_REQUEST:
self._issue_simple_cmc_request,
cm.CertificateRequestType.FULL_CMC_REQUEST:
self._issue_full_cmc_request,
cm.CertificateRequestType.STORED_KEY_REQUEST:
self._issue_stored_key_request,
cm.CertificateRequestType.CUSTOM_REQUEST:
self._issue_custom_certificate_request
}
if request_type not in jump_table:
raise DogtagPluginNotSupportedException(u._(
"Dogtag plugin does not support %s request type").format(
request_type))
return jump_table[request_type](order_id, order_meta, plugin_meta,
barbican_meta_dto)
@_catch_enrollment_exceptions
def _issue_simple_cmc_request(self, order_id, order_meta, plugin_meta,
barbican_meta_dto):
"""Issue a simple CMC request to the Dogtag CA.
:param order_id:
:param order_meta:
:param plugin_meta:
:param barbican_meta_dto:
:return: cm.ResultDTO
"""
if barbican_meta_dto.generated_csr is not None:
csr = barbican_meta_dto.generated_csr
else:
# we expect the CSR to be base64 encoded PEM
# Dogtag CA needs it to be unencoded
csr = base64.b64decode(order_meta.get('request_data'))
profile_id = order_meta.get('profile', self.simple_cmc_profile)
inputs = {
'cert_request_type': 'pkcs10',
'cert_request': csr
}
return self._issue_certificate_request(
profile_id, inputs, plugin_meta, barbican_meta_dto)
@_catch_enrollment_exceptions
def _issue_full_cmc_request(self, order_id, order_meta, plugin_meta,
barbican_meta_dto):
"""Issue a full CMC request to the Dogtag CA.
:param order_id:
:param order_meta:
:param plugin_meta:
:param barbican_meta_dto:
:return: cm.ResultDTO
"""
raise DogtagPluginNotSupportedException(u._(
"Dogtag plugin does not support %s request type").format(
cm.CertificateRequestType.FULL_CMC_REQUEST))
@_catch_enrollment_exceptions
def _issue_stored_key_request(self, order_id, order_meta, plugin_meta,
barbican_meta_dto):
"""Issue a simple CMC request to the Dogtag CA.
:param order_id:
:param order_meta:
:param plugin_meta:
:param barbican_meta_dto:
:return: cm.ResultDTO
"""
return self._issue_simple_cmc_request(
order_id,
order_meta,
plugin_meta,
barbican_meta_dto)
@_catch_enrollment_exceptions
def _issue_custom_certificate_request(self, order_id, order_meta,
plugin_meta, barbican_meta_dto):
"""Issue a custom certificate request to Dogtag CA
:param order_id: ID of the order associated with this request
:param order_meta: dict containing all the inputs required for a
particular profile. One of these must be the profile_id.
The exact fields (both optional and mandatory) depend on the
profile, but they will be exposed to the user in a method to
expose syntax. Depending on the profile, only the relevant fields
will be populated in the request. All others will be ignored.
:param plugin_meta: Used to store data for status check.
:param barbican_meta_dto: Extra data to aid in processing.
:return: cm.ResultDTO
"""
profile_id = order_meta.get(self.PROFILE_ID, None)
if not profile_id:
return cm.ResultDTO(
cm.CertificateStatus.CLIENT_DATA_ISSUE_SEEN,
status_message=u._("No profile_id specified"))
# we expect the csr to be base64 encoded PEM data. Dogtag CA expects
# PEM data though so we need to decode it.
updated_meta = copy.deepcopy(order_meta)
if 'cert_request' in updated_meta:
updated_meta['cert_request'] = base64.b64decode(
updated_meta['cert_request'])
return self._issue_certificate_request(
profile_id, updated_meta, plugin_meta, barbican_meta_dto)
def _issue_certificate_request(self, profile_id, inputs, plugin_meta,
barbican_meta_dto):
"""Actually send the cert request to the Dogtag CA
If the profile_id is one of the auto-approved profiles, then use
the convenience enroll_cert() method to create and approve the request
using the Barbican agent cert credentials. If not, then submit the
request and wait for approval by a CA agent on the Dogtag CA.
:param profile_id: enrollment profile
:param inputs: dict of request inputs
:param plugin_meta: Used to store data for status check.
:param barbican_meta_dto: Extra data to aid in processing.
:return: cm.ResultDTO
"""
ca_id = barbican_meta_dto.plugin_ca_id or self.get_default_ca_name()
if profile_id in self.auto_approved_profiles:
if ca_id == self.get_default_ca_name():
results = self.certclient.enroll_cert(profile_id, inputs)
else:
results = self.certclient.enroll_cert(
profile_id, inputs, ca_id)
return self._process_auto_enrollment_results(
results, plugin_meta, barbican_meta_dto)
else:
request = self.certclient.create_enrollment_request(
profile_id, inputs)
if ca_id == self.get_default_ca_name():
results = self.certclient.submit_enrollment_request(request)
else:
results = self.certclient.submit_enrollment_request(
request, ca_id)
return self._process_pending_enrollment_results(
results, plugin_meta, barbican_meta_dto)
def _process_auto_enrollment_results(self, enrollment_results,
plugin_meta, barbican_meta_dto):
"""Process results received from Dogtag CA for auto-enrollment
This processes data from enroll_cert, which submits, approves and
gets the cert issued and returns as a list of CertEnrollment objects.
:param enrollment_results: list of CertEnrollmentResult objects
:param plugin_meta: metadata dict for storing plugin specific data
:param barbican_meta_dto: object containing extra data to help process
the request
:return: cm.ResultDTO
"""
# Although it is possible to create multiple certs in an invocation
# of enroll_cert, Barbican cannot handle this case. Assume
# only once cert and request generated for now.
enrollment_result = enrollment_results[0]
request = enrollment_result.request
if not request:
raise cm.CertificateGeneralException(
u._("No request returned in enrollment_results"))
# store the request_id in the plugin metadata
plugin_meta[self.REQUEST_ID] = request.request_id
cert = enrollment_result.cert
return self._create_dto(request.request_status,
request.request_id,
request.error_message,
cert)
def _process_pending_enrollment_results(self, results, plugin_meta,
barbican_meta_dto):
"""Process results received from Dogtag CA for pending enrollment
This method processes data returned by submit_enrollment_request(),
which creates requests that still need to be approved by an agent.
:param results: CertRequestInfoCollection object
:param plugin_meta: metadata dict for storing plugin specific data
:param barbican_meta_dto: object containing extra data to help process
the request
:return: cm.ResultDTO
"""
# Although it is possible to create multiple certs in an invocation
# of enroll_cert, Barbican cannot handle this case. Assume
# only once cert and request generated for now
cert_request_info = results.cert_request_info_list[0]
status = cert_request_info.request_status
request_id = getattr(cert_request_info, 'request_id', None)
error_message = getattr(cert_request_info, 'error_message', None)
# store the request_id in the plugin metadata
if request_id:
plugin_meta[self.REQUEST_ID] = request_id
return self._create_dto(status, request_id, error_message, None)
def _create_dto(self, request_status, request_id, error_message, cert):
dto = None
if request_status == pki.cert.CertRequestStatus.COMPLETE:
if cert is not None:
# Barbican is expecting base 64 encoded PEM, so we base64
# encode below.
#
# Currently there is an inconsistency in what Dogtag returns
# for certificates and intermediates. For certs, we return
# PEM, whereas for intermediates, we return headerless PEM.
# This is being addressed in Dogtag ticket:
# https://fedorahosted.org/pki/ticket/1374
#
# Until this is addressed, simply add the missing headers
cert_chain = (CERT_HEADER + "\r\n" + cert.pkcs7_cert_chain +
CERT_FOOTER)
dto = cm.ResultDTO(cm.CertificateStatus.CERTIFICATE_GENERATED,
certificate=base64.b64encode(cert.encoded),
intermediates=base64.b64encode(cert_chain))
else:
raise cm.CertificateGeneralException(
u._("request_id {req_id} returns COMPLETE but no cert "
"returned").format(req_id=request_id))
elif request_status == pki.cert.CertRequestStatus.REJECTED:
dto = cm.ResultDTO(cm.CertificateStatus.CLIENT_DATA_ISSUE_SEEN,
status_message=error_message)
elif request_status == pki.cert.CertRequestStatus.CANCELED:
dto = cm.ResultDTO(cm.CertificateStatus.REQUEST_CANCELED)
elif request_status == pki.cert.CertRequestStatus.PENDING:
dto = cm.ResultDTO(cm.CertificateStatus.WAITING_FOR_CA)
else:
raise cm.CertificateGeneralException(
u._("Invalid request_status {status} for "
"request_id {request_id}").format(
status=request_status,
request_id=request_id)
)
return dto
def modify_certificate_request(self, order_id, order_meta, plugin_meta,
barbican_meta_dto):
"""Modify a certificate request.
Once a certificate request is generated, it cannot be modified.
The only alternative is to cancel the request (if it has not already
completed) and attempt a fresh enrolment. That is what will be
attempted here.
:param order_id: ID for this order
:param order_meta: order metadata. It is assumed that the newly
modified request data will be present here.
:param plugin_meta: data stored on behalf of the plugin for further
operations
:param barbican_meta_dto: additional data needed to process order.
:return: ResultDTO:
"""
result_dto = self.cancel_certificate_request(
order_id, order_meta, plugin_meta, barbican_meta_dto)
if result_dto.status == cm.CertificateStatus.REQUEST_CANCELED:
return self.issue_certificate_request(
order_id, order_meta, plugin_meta, barbican_meta_dto)
elif result_dto.status == cm.CertificateStatus.INVALID_OPERATION:
return cm.ResultDTO(
cm.CertificateStatus.INVALID_OPERATION,
status_message=u._(
"Modify request: unable to cancel: "
"{message}").format(message=result_dto.status_message)
)
else:
# other status (ca_unavailable, client_data_issue)
# return result from cancel operation
return result_dto
@_catch_request_exception
def cancel_certificate_request(self, order_id, order_meta, plugin_meta,
barbican_meta_dto):
"""Cancel a certificate request.
:param order_id: ID for the order associated with this request
:param order_meta: order metadata fdr this request
:param plugin_meta: data stored by plugin for further processing.
In particular, the request_id
:param barbican_meta_dto: additional data needed to process order.
:return: cm.ResultDTO:
"""
request_id = self._get_request_id(order_id, plugin_meta, "cancelling")
try:
review_response = self.certclient.review_request(request_id)
self.certclient.cancel_request(request_id, review_response)
return cm.ResultDTO(cm.CertificateStatus.REQUEST_CANCELED)
except pki.RequestNotFoundException:
return cm.ResultDTO(
cm.CertificateStatus.CLIENT_DATA_ISSUE_SEEN,
status_message=u._("no request found for this order"))
except pki.ConflictingOperationException as e:
return cm.ResultDTO(
cm.CertificateStatus.INVALID_OPERATION,
status_message=e.message)
def supports(self, certificate_spec):
if cm.CA_TYPE in certificate_spec:
return certificate_spec[cm.CA_TYPE] == cm.CA_PLUGIN_TYPE_DOGTAG
if cm.CA_PLUGIN_TYPE_SYMANTEC in certificate_spec:
# TODO(alee-3) Handle case where SKI is provided
pass
return True
def supported_request_types(self):
"""Returns the request_types supported by this plugin.
:returns: a list of the Barbican-core defined request_types
supported by this plugin.
"""
return [cm.CertificateRequestType.SIMPLE_CMC_REQUEST,
cm.CertificateRequestType.STORED_KEY_REQUEST,
cm.CertificateRequestType.CUSTOM_REQUEST]
def supports_create_ca(self):
"""Returns if this plugin and the backend CA supports subCAs
:return: True/False
"""
return subcas_available
@_catch_subca_creation_exceptions
def create_ca(self, ca_create_dto):
"""Creates a subordinate CA upon request
:param ca_create_dto:
Data transfer object :class:`CACreateDTO` containing data
required to generate a subordinate CA. This data includes
the subject DN of the new CA signing certificate, a name for
the new CA and a reference to the CA that will issue the new
subordinate CA's signing certificate,
:return: ca_info:
Dictionary containing the data needed to create a
models.CertificateAuthority object
"""
if not subcas_available:
raise exception.SubCAsNotSupported(
"Subordinate CAs are not supported by this Dogtag CA")
parent_ca_id = self._get_correct_ca_id(ca_create_dto.parent_ca_id)
ca_data = authority.AuthorityData(
dn=ca_create_dto.subject_dn,
parent_aid=parent_ca_id,
description=ca_create_dto.name)
new_ca_data = self.authority_client.create_ca(ca_data)
cert = self.authority_client.get_cert(new_ca_data.aid, "PEM")
chain = self.authority_client.get_chain(new_ca_data.aid, "PEM")
return {
cm.INFO_NAME: new_ca_data.description,
cm.INFO_CA_SIGNING_CERT: cert,
cm.INFO_EXPIRATION: self.expiration.isoformat(),
cm.INFO_INTERMEDIATES: chain,
cm.PLUGIN_CA_ID: new_ca_data.aid
}
def _get_correct_ca_id(self, plugin_ca_id):
"""Returns the correct authority id
When the Dogtag plugin updates its CA list, any subcas will
have a plugin_ca_id that matches the authority_id (aid) as
returned from the backend CA.
For migration purposes, though, ie. migrating from a non-subca
environment to a subca one, we want the host CA to keep the same
plugin_ca_id (which is the default_ca_name) so that no disruption
occurs. Therefore, we need to store the host CA's authority ID
(in get_ca_info) and return it here instead.
"""
if plugin_ca_id == self.get_default_ca_name():
return self.host_aid
else:
return plugin_ca_id
@_catch_subca_deletion_exceptions
def delete_ca(self, ca_id):
"""Deletes a subordinate CA
:param ca_id: id for the CA as specified by the plugin
:return: None
"""
if not subcas_available:
raise exception.SubCAsNotSupported(
"Subordinate CAs are not supported by this Dogtag CA")
# ca must be disabled first
self.authority_client.disable_ca(ca_id)
self.authority_client.delete_ca(ca_id)
def get_ca_info(self):
if not subcas_available:
return super(DogtagCAPlugin, self).get_ca_info()
self.expiration = (datetime.datetime.utcnow() + datetime.timedelta(
days=int(self._expiration_delta)))
ret = {}
cas = self.authority_client.list_cas()
for ca_data in cas.ca_list:
if not ca_data.enabled:
continue
cert = self.authority_client.get_cert(ca_data.aid, "PEM")
chain = self.authority_client.get_chain(ca_data.aid, "PEM")
ca_info = {
cm.INFO_NAME: ca_data.description,
cm.INFO_CA_SIGNING_CERT: cert,
cm.INFO_INTERMEDIATES: chain,
cm.INFO_EXPIRATION: self.expiration.isoformat()
}
# handle the migration case. The top level CA should continue
# to work as before
if ca_data.is_host_authority:
ret[self.get_default_ca_name()] = ca_info
self.host_aid = ca_data.aid
else:
ret[ca_data.aid] = ca_info
return ret
def get_host_aid(self):
cas = self.authority_client.list_cas()
for ca_data in cas.ca_list:
if ca_data.is_host_authority:
return ca_data.aid
return None
| StarcoderdataPython |
1952375 | <gh_stars>10-100
import numpy as np
import sys
import matplotlib.pyplot as plt
from matplotlib import cm
import tensorflow.keras.layers as layers
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.optimizers import Adam
import os
import colorsys
import progressbar
LATENT_DIM = int(sys.argv[1])
RATE = int(sys.argv[2])
NUM_BATCHES = 600 * RATE
BATCH_SIZE = 512
PLOT_EVERY = 1 * RATE
FILE = ".".join(os.path.basename(__file__).split(".")[:-1])
THETA_MAPPING = np.arange(8).reshape((-1, 1)) / 8 # rotations
def target_function(Z):
"""
Map Z (N, N, onehot(8)) to 8 gaussians
"""
theta = (Z[:, 2:] @ THETA_MAPPING).flatten()
r = 2
results = 0.2 * Z[:, :2]
results[:, 0] += r * np.cos(theta * 2 * np.pi)
results[:, 1] += r * np.sin(theta * 2 * np.pi)
return results
def generate_noise(samples):
"""
Generate `samples` samples of uniform noise in
([-1,1], [-1,1])
"""
noise = np.random.normal(0, 1, (samples, LATENT_DIM))
return noise
def sample_from_target_function(samples):
"""
sample from the target function
"""
noise = np.zeros((samples, 2 + 8))
noise[:, :2] = np.random.normal(0, 1, (samples, 2))
noise[np.arange(samples), 2 + np.random.randint(0, 8, samples)] = 1
return target_function(noise)
def build_generator():
"""
Build a generator mapping LATENT_DIM to ([-1,1], [-1,1])
"""
input_layer = layers.Input((LATENT_DIM,))
X = input_layer
for i in range(3):
X = layers.Dense(256, activation='relu')(X)
output_layer = layers.Dense(2)(X)
G = Model(input_layer, output_layer)
return G
def build_discriminator():
"""
Build a discriminator mapping (R, R) to [0, 1]
"""
input_layer = layers.Input((2,))
X = input_layer
for i in range(5):
X = layers.Dense(128)(X)
X = layers.LeakyReLU(0.01)(X)
output_layer = layers.Dense(1, activation="sigmoid")(X)
D = Model(input_layer, output_layer)
D.compile(
Adam(learning_rate=0.001, beta_1=0.5),
loss="binary_crossentropy",
metrics=["accuracy"],
)
return D
def build_GAN(G, D):
"""
Given a generator and a discriminator, build a GAN
"""
D.trainable = False
input_layer = layers.Input((LATENT_DIM,))
X = G(input_layer)
output_layer = D(X)
GAN = Model(input_layer, output_layer)
GAN.compile(
Adam(learning_rate=0.0002, beta_1=0.5),
loss="binary_crossentropy",
metrics=["accuracy"],
)
return GAN
test_samples = sample_from_target_function(5000)
test_noise = generate_noise(5000)
image_dir = os.path.join("ims")
script_dir = os.path.join(image_dir, FILE)
RESULTS_DIR = os.path.join(script_dir, f'l{LATENT_DIM}_r{RATE}')
for i in [image_dir, script_dir, RESULTS_DIR,]:
if not os.path.exists(i):
os.makedirs(i)
test_samples = sample_from_target_function(5000)
test_noise = generate_noise(5000)
def plot(
G,
D,
GAN,
step,
step_count,
D_accuracy,
D_loss,
G_accuracy,
G_loss,
filename,
):
"""
Plots for the GAN gif
"""
fake_samples = G.predict(test_noise, batch_size=len(test_noise))
confidences = D.predict(fake_samples).flatten()
c = 'r'
plt.figure(figsize=(6, 6))
plt.scatter(
fake_samples[:, 0],
fake_samples[:, 1],
edgecolor=c,
facecolor="None",
s=5,
alpha=1,
linewidth=1,
zorder=50,
)
plt.scatter(
test_samples[:, 0],
test_samples[:, 1],
edgecolor="#AAAAAA",
facecolor="None",
s=5,
alpha=0.8,
linewidth=1,
zorder=30,
)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.gca().set_aspect(1)
plt.tight_layout()
plt.savefig(filename)
plt.close()
## Part 1: GAN
G = build_generator()
G.summary()
D = build_discriminator()
D.summary()
GAN = build_GAN(G, D)
step_count = []
D_accuracy = []
G_accuracy = []
D_loss = []
G_loss = []
count = 0
for step in progressbar.progressbar(range(NUM_BATCHES)):
# Train discriminator
D.trainable = True
real_data = sample_from_target_function(BATCH_SIZE // 2)
fake_data = G.predict(
generate_noise(BATCH_SIZE // 2), batch_size=BATCH_SIZE // 2
)
data = np.concatenate((real_data, fake_data), axis=0)
real_labels = np.ones((BATCH_SIZE // 2, 1))
fake_labels = np.zeros((BATCH_SIZE // 2, 1))
labels = np.concatenate((real_labels, fake_labels), axis=0)
_D_loss, _D_accuracy = D.train_on_batch(data, labels)
# Train generator
D.trainable = False
noise = generate_noise(BATCH_SIZE)
labels = np.ones((BATCH_SIZE, 1))
_G_loss, _G_accuracy = GAN.train_on_batch(noise, labels)
if step % PLOT_EVERY == 0:
step_count.append(step)
D_loss.append(_D_loss)
D_accuracy.append(_D_accuracy)
G_loss.append(_G_loss)
G_accuracy.append(_G_accuracy)
plot(
G=G,
D=D,
GAN=GAN,
step=step,
step_count=step_count,
D_accuracy=D_accuracy,
D_loss=D_loss,
G_accuracy=G_accuracy,
G_loss=G_loss,
filename=os.path.join(RESULTS_DIR, f"{count:06d}.png"),
)
count += 1
else:
print()
os.system(
f"ffmpeg -r 20 -i {os.path.join(RESULTS_DIR,'%06d.png')}"
f" -crf 15 {os.path.join(RESULTS_DIR, 'training.mp4')}"
)
| StarcoderdataPython |
5016765 | <filename>examples/ttgo_tdisplay_rp2040/truetype/chango.py<gh_stars>10-100
"""
chango.py
Test for font2bitmap converter for the driver.
See the font2bitmap program in the utils directory.
"""
from machine import Pin, SoftSPI
import st7789py as st7789
import gc
from truetype import chango_16 as font_16
from truetype import chango_32 as font_32
from truetype import chango_64 as font_64
gc.collect()
def main():
# enable display and clear screen
spi = SoftSPI(
baudrate=20000000,
polarity=1,
phase=0,
sck=Pin(18),
mosi=Pin(19),
miso=Pin(13))
tft = st7789.ST7789(
spi,
135,
240,
reset=Pin(23, Pin.OUT),
cs=Pin(5, Pin.OUT),
dc=Pin(16, Pin.OUT),
backlight=Pin(4, Pin.OUT),
rotation=1)
tft.fill(st7789.BLACK)
row = 0
tft.write(font_16, "abcdefghijklmnopqrst", 0, row, st7789.RED)
row += font_16.HEIGHT
tft.write(font_32, "abcdefghij", 0, row, st7789.GREEN)
row += font_32.HEIGHT
tft.write(font_64, "abcd", 0, row, st7789.BLUE)
row += font_64.HEIGHT
main()
| StarcoderdataPython |
3222524 | # Для записи цифр римляне использовали буквы латинского алфафита:
# I, V, X, L, C, D, M. Например:
#
# 1 обозначалась с помощью буквы I
# 10 с помощью Х
# 7 с помощью VII
# Число 2020 в римской записи — это MMXX (2000 = MM, 20 = XX).
#
# Реализуйте функцию to_roman, которая переводит арабские числа в римские.
# Функция принимает на вход целое число из диапазона от 1 до 3000,
# а возвращает строку с римским представлением этого числа.
#
# Реализуйте функцию to_arabic, которая переводит число в римской записи
# в число, записанное арабскими цифрами.
# def to_arabic(number): # noqa: WPS210
# numbers = []
# for char in number:
# numbers.append(NUMERALS[char])
# # Сдвиг чисел с повтором последнего
# # Пример: [1,2,3,4] -> [2,3,4,4]
# shifted_numbers = numbers[1:] + numbers[-1:]
# result = 0
# for previous, current in zip(numbers, shifted_numbers):
# if previous >= current:
# result += previous
# else:
# result -= previous
# return result
arabic_numbers = {
"I": 1, "IV": 4, "V": 5, "IX": 9,
"X": 10, "XL": 40, "L": 50, "XC": 90,
"C": 100, "CD": 400, "D": 500, "CM": 900, "M": 1000,
}.get
def to_arabic(number: str) -> int:
if not number:
return 0
ch_num = number + "0"
list_of_int = []
i = 0
while i < len(number):
roman_number = arabic_numbers("{}{}".format(ch_num[i], ch_num[i + 1]))
if roman_number is not None:
list_of_int.append(roman_number)
i += 2
else:
list_of_int.append(arabic_numbers(ch_num[i]))
i += 1
return sum(list_of_int)
print(to_arabic("MCDXCVIII"))
def test_to_arabic():
assert to_arabic('') == 0
assert to_arabic('I') == 1
assert to_arabic('II') == 2
assert to_arabic('IV') == 4
assert to_arabic('V') == 5
assert to_arabic('VI') == 6
assert to_arabic('XXVII') == 27
assert to_arabic('XLVIII') == 48
assert to_arabic('LIX') == 59
assert to_arabic('XCIII') == 93
assert to_arabic('CXLI') == 141
assert to_arabic('CLXIII') == 163
assert to_arabic('CDII') == 402
assert to_arabic('DLXXV') == 575
assert to_arabic('CMXI') == 911
assert to_arabic('MXXIV') == 1024
assert to_arabic('MMXX') == 2020
assert to_arabic('MMM') == 3000
test_to_arabic()
| StarcoderdataPython |
8084261 | from scrounger.core.module import BaseModule
# helper functions
from scrounger.utils.android import ApktoolYaml
from scrounger.utils.config import Log
from scrounger.modules.misc.android.app.manifest import Module as ManifestModule
from os.path import exists
class Module(BaseModule):
meta = {
"author": "RDC",
"description": "Returns the contents of the application's apktool.yml \
in object format",
"certainty": 100
}
options = [
{
"name": "decompiled_apk",
"description": "local folder containing the decompiled apk file",
"required": True,
"default": None
}
]
def run(self):
Log.info("Checking for apktool.yml file")
# find apktool yml file
filename = "{}/apktool.yml".format(self.decompiled_apk)
if exists(filename):
Log.info("Creating apktool yaml object")
# get info
yaml = ApktoolYaml(filename)
# get identifier
identifier = yaml.apk_filename().lower().rsplit(".", 1)[0]
manifest_module = ManifestModule()
manifest_module.decompiled_apk = self.decompiled_apk
self.manifest = manifest_module.run()
if "print" not in self.manifest:
identifier = self.manifest.popitem()[1].package()
return {
"{}_yaml".format(identifier): yaml
}
return {"print": "Could not find apktool.yml"}
| StarcoderdataPython |
1695472 | from script.model.sklearn_like_model.NetModule.InceptionSructure.BaseInceptionNetModule import \
BaseInceptionNetModule
from script.util.Stacker import Stacker
from script.util.tensor_ops import *
class InceptionV4NetModule(BaseInceptionNetModule):
def stem(self, stacker, name='stem'):
with tf.variable_scope(name):
def mix_0(x, name='mix_0'):
with tf.variable_scope(name):
a = max_pooling(x, CONV_FILTER_3322, name='a')
b = conv_block(x, self.n_channel * 6, CONV_FILTER_3322, relu, name='b')
return concat([a, b], axis=3)
stacker.conv_block(self.n_channel * 2, CONV_FILTER_3322, relu)
stacker.conv_block(self.n_channel * 2, CONV_FILTER_3311, relu)
stacker.conv_block(self.n_channel * 4, CONV_FILTER_3311, relu)
stacker.add_layer(mix_0)
def mix_1(x, name='mix_1'):
with tf.variable_scope(name):
a = conv_block(x, self.n_channel * 4, CONV_FILTER_1111, relu, name='a0')
a = conv_block(a, self.n_channel * 6, CONV_FILTER_3311, relu, name='a1')
b = conv_block(x, self.n_channel * 4, CONV_FILTER_1111, relu, name='b0')
b = conv_block(b, self.n_channel * 4, CONV_FILTER_1711, relu, name='b1')
b = conv_block(b, self.n_channel * 4, CONV_FILTER_7111, relu, name='b2')
b = conv_block(b, self.n_channel * 6, CONV_FILTER_3311, relu, name='b3')
return concat([a, b], axis=3)
stacker.add_layer(mix_1)
def mix_2(x, name='mix_2'):
with tf.variable_scope(name):
a = conv_block(x, 196, CONV_FILTER_3322, relu, name='a0')
b = max_pooling(x, CONV_FILTER_3322, name='b0')
return concat([a, b], axis=3)
stacker.add_layer(mix_2)
return stacker
def inception_A(self, x, name='inception_A'):
with tf.variable_scope(name):
a = conv_block(x, self.n_channel * 6, CONV_FILTER_1111, relu, name='a0')
b = conv_block(x, self.n_channel * 4, CONV_FILTER_1111, relu, name='b0')
b = conv_block(b, self.n_channel * 6, CONV_FILTER_3311, relu, name='b1')
c = conv_block(x, self.n_channel * 4, CONV_FILTER_1111, relu, name='c0')
c = conv_block(c, self.n_channel * 6, CONV_FILTER_3311, relu, name='c1')
c = conv_block(c, self.n_channel * 6, CONV_FILTER_3311, relu, name='c2')
d = avg_pooling(x, CONV_FILTER_3311, name='d0')
d = conv_block(d, self.n_channel * 6, CONV_FILTER_1111, relu, name='d1')
return concat([a, b, c, d], axis=3)
def inception_B(self, x, name='inception_B'):
with tf.variable_scope(name):
a = conv_block(x, self.n_channel * 24, CONV_FILTER_1111, relu, name='a0')
b = conv_block(x, self.n_channel * 12, CONV_FILTER_1111, relu, name='b0')
b = conv_block(b, self.n_channel * 14, CONV_FILTER_7111, relu, name='b1')
b = conv_block(b, self.n_channel * 16, CONV_FILTER_1711, relu, name='b2')
c = conv_block(x, self.n_channel * 12, CONV_FILTER_1111, relu, name='c0')
c = conv_block(c, self.n_channel * 14, CONV_FILTER_7111, relu, name='c1')
c = conv_block(c, self.n_channel * 14, CONV_FILTER_1711, relu, name='c2')
c = conv_block(c, self.n_channel * 14, CONV_FILTER_7111, relu, name='c3')
c = conv_block(c, self.n_channel * 16, CONV_FILTER_1711, relu, name='c4')
d = avg_pooling(x, CONV_FILTER_3311, name='d0')
d = conv_block(d, self.n_channel * 8, CONV_FILTER_1111, relu, name='d1')
return concat([a, b, c, d], axis=3)
def inception_C(self, x, name='inception_C'):
with tf.variable_scope(name):
a = conv_block(x, self.n_channel * 16, CONV_FILTER_1111, relu, name='a0')
b = conv_block(x, self.n_channel * 24, CONV_FILTER_1111, relu, name='b0')
b0 = conv_block(b, self.n_channel * 16, CONV_FILTER_3111, relu, name='b1-0')
b1 = conv_block(b, self.n_channel * 16, CONV_FILTER_1311, relu, name='b1-1')
c = conv_block(x, self.n_channel * 24, CONV_FILTER_1111, relu, name='c0')
c = conv_block(c, self.n_channel * 28, CONV_FILTER_3111, relu, name='c1')
c = conv_block(c, self.n_channel * 32, CONV_FILTER_1311, relu, name='c2')
c0 = conv_block(c, self.n_channel * 16, CONV_FILTER_3111, relu, name='c3-0')
c1 = conv_block(c, self.n_channel * 16, CONV_FILTER_1311, relu, name='c3-1')
d = avg_pooling(x, CONV_FILTER_3311, name='d0')
d = conv_block(d, self.n_channel * 16, CONV_FILTER_1111, relu, name='d1')
return concat([a, b0, b1, c0, c1, d], axis=3)
def reduction_A(self, x, name='reduction_A'):
with tf.variable_scope(name):
a = conv_block(x, self.n_channel * 24, CONV_FILTER_3322, relu, name='a0')
b = conv_block(x, self.n_channel * 12, CONV_FILTER_1111, relu, name='b0')
b = conv_block(b, self.n_channel * 14, CONV_FILTER_3311, relu, name='b1')
b = conv_block(b, self.n_channel * 16, CONV_FILTER_3322, relu, name='b2')
c = max_pooling(x, CONV_FILTER_3322, name='c0')
return concat([a, b, c], axis=3)
def reduction_B(self, x, name='reduction_B'):
with tf.variable_scope(name):
a = conv_block(x, self.n_channel * 12, CONV_FILTER_1111, relu, name='a0')
a = conv_block(a, self.n_channel * 12, CONV_FILTER_3322, relu, name='a1')
b = conv_block(x, self.n_channel * 16, CONV_FILTER_1111, relu, name='b0')
b = conv_block(b, self.n_channel * 16, CONV_FILTER_7111, relu, name='b1')
b = conv_block(b, self.n_channel * 20, CONV_FILTER_1711, relu, name='b2')
b = conv_block(b, self.n_channel * 20, CONV_FILTER_3322, relu, name='b3')
c = max_pooling(x, CONV_FILTER_3322, name='c0')
return concat([a, b, c], axis=3)
def aux(self, x, n_classes, name='aux'):
with tf.variable_scope(name):
stack = Stacker(x)
stack.avg_pooling(CONV_FILTER_5533)
stack.conv_block(self.n_channel * 8, CONV_FILTER_1111, relu)
filter_ = list(stack.last_layer.shape[1:3]) + [1, 1]
stack.conv_block(self.n_channel * 48, filter_, relu)
stack.flatten()
logit = stack.linear(n_classes)
proba = stack.softmax()
return logit, proba
def build(self):
with tf.variable_scope(self.name):
self.stacker = Stacker(self.x)
self.stacker.resize_image(self.resize_shape)
stacker = self.stem(self.stacker)
for i in range(4):
stacker.add_layer(self.inception_A)
stacker.add_layer(self.reduction_A)
for i in range(7):
stacker.add_layer(self.inception_B)
self.aux_logit, self.aux_proba = self.aux(stacker.last_layer, self.n_classes)
stacker.add_layer(self.reduction_B)
for i in range(3):
stacker.add_layer(self.inception_C)
stacker.max_pooling((8, 8, 8, 8))
# dropout
self.flatten_layer = stacker.flatten()
stacker.linear_block(self.n_channel * 64, relu)
self.logit = stacker.linear(self.n_classes)
self.proba = stacker.softmax()
| StarcoderdataPython |
11237665 | import fontforge
from sys import argv
from typing import Iterable
from os import path
import json
DEFAULT_CONFIG_PATH = './font-subset.json'
def open_font(font_path) -> fontforge.font:
return fontforge.open(font_path)
def subset_of_font(source_font: fontforge.font, subset: Iterable[str]) -> fontforge.font:
# selecting the subset from source font
for c in subset:
fc = 'u%04x' % ord(c)
source_font.selection.select(("more", None), fc)
source_font.selection.invert()
for i in source_font.selection.byGlyphs:
source_font.removeGlyph(i)
return source_font
def save_font(source_font: fontforge.font, save_path: str):
source_font.save(save_path)
def generate_font(source_font: fontforge.font, generate_path: str):
source_font.generate(generate_path)
def main():
if len(argv) < 2:
config_path = DEFAULT_CONFIG_PATH
else:
command_line_config_path = argv[1]
if not path.exists(command_line_config_path):
config_path = DEFAULT_CONFIG_PATH
else:
config_path = command_line_config_path
with open(config_path, 'r', encoding='utf-8') as config_file:
config_content = config_file.read()
print(f"loading config from {path.abspath(config_path)}...")
config = json.loads(config_content)
print(f"config loaded!")
for c in config:
source_font_path = c['source_font_path']
project_save_path = c['project_save_path']
target_font_path = c['target_font_path']
subset = c['subset']
print(f'loading source font from "{path.abspath(source_font_path)}"...')
source_font = open_font(source_font_path)
print(f'selecting subset...')
target_font = subset_of_font(source_font, subset)
print(f'saving project file to "{path.abspath(project_save_path)}"...')
save_font(target_font, project_save_path)
print(f'generating target font file to "{path.abspath(target_font_path)}"...')
generate_font(target_font, target_font_path)
print("succeed!")
if __name__ == '__main__':
main() | StarcoderdataPython |
3334013 | <reponame>blorente/Open-Publisher
#!/usr/bin/env python3
from pathlib import Path
import argparse
import logging
import subprocess
import os
import shutil
from layouts.epub import EPUB_LAYOUT
from layouts.paperback import PAPERBACK_LAYOUT
project_dir = Path(__file__).parent.parent
POSSIBLE_FORMATS = ["epub", "paperback"]
log = logging.getLogger("Binder")
def pandoc(cmd, check=False):
log.debug(f"Running: pandoc {' '.join(cmd)}")
res = subprocess.run(["pandoc"] + cmd, check=check, capture_output=True)
if res.returncode != 0:
log.error(f"Pandoc failed: {res}")
return res
def stitch_document(manuscript: Path, outdir: Path, layout) -> Path:
true_layout = []
for item in layout:
if item == "manuscript":
true_layout.append(manuscript)
else:
true_layout.append(item)
stitched = outdir / f"{manuscript.name}.stitched.md"
content = "\n".join([file.read_text() for file in true_layout])
stitched.write_text(content)
log.info(f"Written stiched content to {stitched}")
log.debug(f"Stitched content: {content}")
return stitched
def compile_epub(manuscript: Path, book_name: str, outdir: Path):
stitched = stitch_document(manuscript, outdir, EPUB_LAYOUT)
output = f"{outdir}/{book_name}.epub"
cmd = [
"--top-level-division=chapter",
"--toc-depth=1",
"--template=src/pandoc/templates/custom-epub.html",
"--css=src/pandoc/css/style.css",
"-f",
"markdown+smart",
"-o",
str(output),
str(stitched),
]
res = pandoc(cmd)
if res.returncode == 0:
log.info(f"=> Compiled with epub at {output}")
def compile_paperback(manuscript: Path, book_name: str, outdir: Path):
stitched = stitch_document(manuscript, outdir, PAPERBACK_LAYOUT)
output = f"{outdir}/{book_name}-paperback.pdf"
cmd = [
"--top-level-division=chapter",
"--template=src/pandoc/templates/cs-5x8-pdf.latex",
"--pdf-engine=xelatex",
'--pdf-engine-opt=-output-driver="xdvipdfmx -V 3 -z 0"',
"-f",
"markdown+backtick_code_blocks",
"-o",
str(output),
str(stitched),
]
res = pandoc(cmd)
if res.returncode == 0:
log.info(f"=> Compiled with paperback at {output}")
def main(args):
logging.basicConfig(level=args.log_level)
book_file = Path(args.book_file[0])
book_name = book_file.name
log.info(f"=== Binding {book_name} ===")
log.debug(f"Args are {args}")
outdir = Path(args.out)
if outdir.exists():
log.info(f"=> Outdir exists: {outdir}. Removing.")
shutil.rmtree(outdir)
log.info(f"=> Creating outdir: {outdir}.")
outdir.mkdir(parents=True)
partialdir = outdir / "partial"
formats = POSSIBLE_FORMATS if args.format == "all" else [args.format]
if "epub" in formats:
compile_epub(book_file, book_name=book_name, outdir=outdir)
if "paperback" in formats:
compile_paperback(book_file, book_name=book_name, outdir=outdir)
def parse_args():
parser = argparse.ArgumentParser("Borja's Amazing Book Binder")
parser.add_argument(
"--format", "-f", choices=["all"] + POSSIBLE_FORMATS, default="all"
)
parser.add_argument(
"--template",
"-t",
type=str,
default=None,
help="Optional template file. Oherwise, default will be applied.",
)
parser.add_argument("--out", "-o", type=str, default=f"{project_dir}/out")
parser.add_argument("--log-level", "-l", default=logging.INFO, type=int)
parser.add_argument("book_file", nargs=1, type=str)
return parser.parse_args()
if __name__ == "__main__":
os.chdir(project_dir)
main(parse_args())
| StarcoderdataPython |
6600030 | import json
def load_commands():
with open('command_mapping.json') as data_file:
command_mapping = json.load(data_file)
return command_mapping
# structure for storing the graph
# TODO storing graphs and adding data/determining if the graph is being connected - no way to tell if node is different from others!!!
# TODO Research Research
# Goes to some keyboard interface or api for game
def run_command(command):
# Detect if movement occurs
return True
# Feedback mechanism - has to be visual
# Controls the actual interface
def controller(command_mapping, graph_storage_structure):
auto_movement_engaged = True
# movement loop
while auto_movement_engaged:
# command loop
for command in command_mapping:
command_successful = run_command(command)
if command_successful:
# Determine if it is a new node
# Add it if it is a new node
# Consolidate/improve the graph structure underneath
pass
pass
| StarcoderdataPython |
1968667 |
import numpy as np
import rllab.spaces
def build_space(shape, space_type, info={}):
if space_type == 'Box':
if 'low' in info and 'high' in info:
low = info['low']
high = info['high']
msg = 'shape = {}\tlow.shape = {}\thigh.shape={}'.format(
shape, low.shape, high.shape)
assert shape == np.shape(low) and shape == np.shape(high), msg
return rllab.spaces.Box(low=low, high=high)
else:
return rllab.spaces.Box(low=-np.inf, high=np.inf, shape=shape)
elif space_type == 'Discrete':
assert len(shape) == 1, 'invalid shape for Discrete space'
return rllab.spaces.Discrete(shape)
else:
raise(ValueError('space type not implemented: {}'.format(space_type))) | StarcoderdataPython |
3439980 | # -*- coding: utf-8 -*-
import urllib, urllib2, re, os, sys, math
import xbmcgui, xbmc, xbmcaddon, xbmcplugin
from urlparse import urlparse, parse_qs
#nie chciało mi się więc
# @autor - http://svn.sd-xbmc.org/
# Umieszczam stosowne info w changelogu
if sys.version_info >= (2, 7):
import json as json
else:
import simplejson as json
scriptID = 'plugin.video.mrknow'
scriptname = "Filmy online www.mrknow.pl - tvnplayer"
ptv = xbmcaddon.Addon(scriptID)
_thisPlugin = int(sys.argv[1])
BASE_RESOURCE_PATH = os.path.join( ptv.getAddonInfo('path'), "../resources" )
sys.path.append( os.path.join( BASE_RESOURCE_PATH, "lib" ) )
import mrknow_pLog
log = mrknow_pLog.pLog()
#log.info(BASE_RESOURCE_PATH1)
BASE_RESOURCE_PATH1 = os.path.join( ptv.getAddonInfo('path'), 'lib')
sys.path.append( os.path.join( BASE_RESOURCE_PATH1, "utils" ) )
import os, sys, time
import xbmcaddon, xbmcgui
import traceback
if sys.version_info >= (2,7): import json as _json
else: import simplejson as _json
from hashlib import sha1
import crypto.cipher.aes_cbc
import crypto.cipher.base, base64
import binascii
import urllib2, urllib, re
import mrknow_pLog, mrknow_pCommon, mrknow_Parser, mrknow_urlparser, mrknow_Pageparser, mrknow_Player
if sys.version_info >= (2,7): import json as _json
else: import simplejson as _json
dbg=False
SERVICE = 'tvn'
THUMB_SERVICE = 'http://sd-xbmc.org/repository/xbmc-addons/' + SERVICE + '.png'
platform = {
'Samsung': {
'platform': 'ConnectedTV',
'terminal': 'Samsung2',
'authKey': '453198a80ccc99e8485794789292f061',
'header': {'User-Agent': 'Mozilla/5.0 (SmartHub; SMART-TV; U; Linux/SmartTV; Maple2012) AppleWebKit/534.7 (KHTML, like Gecko) SmartTV Safari/534.7'},
'base_url': 'http://api.tvnplayer.pl/api',
'api': '3.6',
'fallback': 'Panasonic'
},
'Android': {
'platform': 'Mobile',
'terminal': 'Android',
'authKey': 'b4bc971840de63d105b3166403aa1bea',
'base_url': 'http://api.tvnplayer.pl/api',
'header': {'User-Agent': 'Apache-HttpClient/UNAVAILABLE (java 1.4)'},
'api': '3.0',
'fallback': 'Android2'
},
'Android2': {
'platform': 'Mobile',
'terminal': 'Android',
'authKey': 'b4bc971840de63d105b3166403aa1bea',
'header': {'User-Agent': 'Apache-HttpClient/UNAVAILABLE (java 1.4)'},
'base_url': 'http://api.tvnplayer.pl/api',
'api': '2.0',
'fallback': ''
},
'Android3': {
'platform': 'Mobile',
'terminal': 'Android',
'authKey': '<KEY>',
'base_url': 'http://api.tvnplayer.pl/api',
'header': {'User-Agent': 'Apache-HttpClient/UNAVAILABLE (java 1.4)'},
'api': '3.1',
'fallback': 'Android4'
},
'Android4': {
'platform': 'Mobile',
'terminal': 'Android',
'authKey': '<KEY>',
'base_url': 'http://tvnplayer2-prev-c.stage.online.tvwisla.com.pl/api2',
'header': {'User-Agent': 'Player/3.3.4 tablet Android/4.1.1 net/wifi', 'X-Api-Version': '3.7',
'Accept-Encoding': 'gzip'},
'api': '3.7',
'fallback': ''
},
#/api/?v=3.7&authKey=8a8a70a71f12073b24fea556f6a271f1&platform=Mobile&terminal=Apple&format=json&m=mainInfo&showTmobileContent=no
#https://api.tvnplayer.pl/api/?v=3.7&authKey=8a8a70a71f12073b24fea556f6a271f1&platform=Mobile&terminal=Apple&format=json&m=mainInfo&showTmobileContent=no
'Apple': {
'platform': 'Mobile',
'terminal': 'Apple',
'authKey': '<KEY>',
'base_url': 'http://api.tvnplayer.pl/api',
'header': {'User-Agent': 'Player/3.3.4 tablet Android/4.1.1 net/wifi', 'X-Api-Version': '3.7',
'Accept-Encoding': 'gzip'},
'api': '3.7',
'fallback': ''
},
'Panasonic': {
'platform': 'ConnectedTV',
'terminal': 'Panasonic',
'authKey': '<KEY>',
'header': {'User-Agent': 'Mozilla/5.0 (Linux; U; Android 2.3.4; en-us; Kindle Fire Build/GINGERBREAD) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'Accept-Encoding': 'gzip'},
'api': '3.1',
'base_url': 'http://api.tvnplayer.pl/api',
'fallback': 'Android2'
},
}
qualities = [
'HD',
'Bardzo wysoka',
'SD',
'Wysoka',
'Standard',
'Średnia'
'Niska',
'Bardzo niska'
]
tvn_proxy = ptv.getSetting('tvn_proxy')
tvn_quality = ptv.getSetting('tvn_quality')
tvn_sort = ptv.getSetting('tvn_sort')
tvn_platform = ptv.getSetting('tvn_platform')
pl_proxy = ptv.getSetting('pl_proxy')
pl_proxy_port = ptv.getSetting('pl_proxy_port')
tvn_url_keys = ("service", "id", "seriesId", "category")
MAINURL = 'https://api.tvnplayer.pl'
IMAGEURL = 'http://dcs-193-111-38-250.atmcdn.pl/scale/o2/tvn/web-content/m/'
class sdGUI:
def __init__(self):
self.cm = mrknow_pCommon.common()
#self.history = sdCommon.history()
self.parser = mrknow_Parser.mrknow_Parser()
def searchInput(self, SERVICE, heading='Wyszukaj'):
keyboard = xbmc.Keyboard('', heading, False)
keyboard.doModal()
if keyboard.isConfirmed():
text = keyboard.getText()
self.history.addHistoryItem(SERVICE, text)
return text
def dialog(self):
return xbmcgui.Dialog()
def percentDialog(self):
return xbmcgui.DialogProgress()
def notification(self, title=" ", msg=" ", time=5000):
xbmc.executebuiltin("XBMC.Notification(" + title + "," + msg + "," + str(time) + ")")
def getBaseImagePath(self):
return 'http://sd-xbmc.org/repository/xbmc-addons/'
def getThumbNext(self):
return self.getBaseImagePath() + "dalej.png"
def getLogoImage(self, title, ext="png"):
return self.getBaseImagePath() + title + "." + ext
def __setInfoLabels(self, params, pType):
InfoLabels = {}
if pType == "video":
infoLabelsKeys = ["genre", "year", "episode", "season", "top250", "tracknumber", "rating", "playcount",
"overlay",
"cast", "castandrole", "director", "mpaa", "plot", "plotoutline", "title",
"originaltitle", "sorttitle",
"duration", "studio", "tagline", "writer", "tvshowtitle", "premiered", "status", "code",
"aired", "credits",
"lastplayed", "album", "artist", "votes", "trailer", "dateadded"]
elif pType == "music":
infoLabelsKeys = ["tracknumber", "duration", "year", "genre", "album", "artist", "title", "rating",
"lyrics", "playcount", "lastplayed"]
for key, value in params.items():
if key in infoLabelsKeys:
InfoLabels[key] = value
return InfoLabels
def __play(self, params, isPlayable=False, isFolders=False, pType="video", params_keys_needed=None):
if pType == "video":
params['name'] = 'playSelectedVideo'
elif pType == "music":
params['name'] = 'playSelectedAudio'
# uproszczenie urli / niezbedne żeby dobrze działał status "watched"
if params_keys_needed == None:
u = sys.argv[0] + self.parser.setParams(params)
else:
needed_params = {}
for k in params_keys_needed:
if params.has_key(k):
needed_params[k] = params[k]
u = sys.argv[0] + self.parser.setParams(needed_params)
pType = pType.replace("dir_", "")
params['icon'] = params.get('icon') or "DefaultVideo.png"
if dbg == True:
log.info(" - " + pType + ": ")
self.parser.debugParams(params, True)
params['title'] = params.get('title') or None
if params['title'] == None: return False
params['series'] = params.get('series') or None
params['file_name'] = params['title']
if params['series'] != None:
params['file_name'] = "%s - %s" % (params['series'], params['title'])
liz = xbmcgui.ListItem(params['title'], iconImage="DefaultFolder.png", thumbnailImage=params['icon'])
if isPlayable:
liz.setProperty("IsPlayable", "true")
params['fanart'] = params.get('fanart') or "http://sd-xbmc.org/repository/repository.sd-xbmc.org/fanart.jpg"
params['banner'] = params.get('banner') or params['icon']
params['poster'] = params.get('poster') or params['icon']
meta = self.__setInfoLabels(params, pType)
liz.setProperty("fanart_image", params['fanart'])
liz.setArt({'banner': params['banner'], 'poster': params['poster']})
liz.setInfo(type=pType, infoLabels=meta)
if isPlayable and params_keys_needed != None: # uproszone url = wsparcje dla "watched"
liz.addContextMenuItems([('Oznacz jako (nie)obejrzane', 'Action(ToggleWatched)')])
# liz.addStreamInfo('video', { 'codec': 'h264', 'aspect': 1.78, 'width': 1280,'height': 720})
if self.cm.isEmptyDict(params, 'page'): params['page'] = ''
if (not self.cm.isEmptyDict(params, 'dstpath')) and pType == "video":
cm = self.__addDownloadContextMenu(
{'service': params['service'], 'title': params['file_name'], 'url': params['page'],
'path': os.path.join(params['dstpath'], params['service'])})
liz.addContextMenuItems(cm, replaceItems=False)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=isFolders)
def __addDownloadContextMenu(self, params={}):
params['action'] = 'download'
param = self.parser.setParams(params)
cm = []
cm.append(('Ściągnij', "XBMC.RunPlugin(%s%s)" % (sys.argv[0], param)))
cm.append(('Informacje', "XBMC.Action(Info)",))
return cm
def playVideo(self, params, isPlayable=False, isFolders=False, params_keys_needed=None):
self.__play(params, isPlayable, isFolders, "video", params_keys_needed)
def playAudio(self, params, isPlayable=False, isFolders=False, params_keys_needed=None):
self.__play(params, isPlayable, isFolders, "music", params_keys_needed)
def addDir(self, params, isFolders=True, params_keys_needed=None):
self.__play(params, False, isFolders, "dir_video", params_keys_needed)
def endDir(self, sort=False, content=None, viewMode=None, ps=None):
'''
ToDo:
Check is Confluence, not? other View Mode
Confluence View Modes:
http://www.xbmchub.com/forums/general-python-development/717-how-set-default-view-type-xbmc-lists.html#post4683
https://github.com/xbmc/xbmc/blob/master/addons/skin.confluence/720p/MyVideoNav.xml
'''
if ps == None:
ps = int(sys.argv[1])
if sort == True:
xbmcplugin.addSortMethod(ps, xbmcplugin.SORT_METHOD_LABEL)
canBeContent = ["files", "songs", "artists", "albums", "movies", "tvshows", "episodes", "musicvideos"]
if content in canBeContent:
xbmcplugin.setContent(ps, content)
if viewMode != None:
viewList = {}
if 'confluence' in xbmc.getSkinDir():
viewList = {
'List': '50',
'Big List': '51',
'ThumbnailView': '500',
'PosterWrapView': '501',
'PosterWrapView2_Fanart': '508',
'MediaInfo': '504',
'MediaInfo2': '503',
'MediaInfo3': '515',
'WideIconView': '505',
'MusicVideoInfoListView': '511',
'AddonInfoListView1': '550',
'AddonInfoThumbView1': '551',
'LiveTVView1': '560'
}
if viewMode in viewList:
view = viewList[viewMode]
else:
view = 'None'
xbmc.executebuiltin("Container.SetViewMode(%s)" % (view))
xbmcplugin.endOfDirectory(ps)
def new_playlist(self, playlist='audio'):
playlists = {'audio': 0, 'video': 1}
if playlist not in playlists.keys():
log.info('Playlista "%s" jest inwalidą ;).' % playlist)
selected_playlist = xbmc.PlayList(playlists[playlist])
selected_playlist.clear()
return selected_playlist
def add_to_playlist(self, playlist, items):
if isinstance(items, list):
for item in items:
playlist.add(item)
elif isinstance(items, str):
playlist.add(items)
def __LOAD_AND_PLAY(self, url, title, player=True, pType='video'):
if url == '':
d = xbmcgui.Dialog()
d.ok('Nie znaleziono streamingu', 'Może to chwilowa awaria.', 'Spróbuj ponownie za jakiś czas')
return False
thumbnail = xbmc.getInfoImage("ListItem.Thumb")
liz = xbmcgui.ListItem(title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail)
liz.setInfo(type="pType", infoLabels={"Title": title})
try:
if player != True:
print "custom player pCommon"
xbmcPlayer = player
else:
print "default player pCommon"
xbmcPlayer = xbmc.Player()
xbmcPlayer.play(url, liz)
except:
d = self.dialog()
if pType == "video":
d.ok('Wystąpił błąd!', 'Błąd przy przetwarzaniu, lub wyczerpany limit czasowy oglądania.',
'Zarejestruj się i opłać abonament.', 'Aby oglądać za darmo spróbuj ponownie za jakiś czas.')
elif pType == "music":
d.ok('Wystąpił błąd!', 'Błąd przy przetwarzaniu.', 'Aby wysłuchać spróbuj ponownie za jakiś czas.')
return False
return True
def __LOAD_AND_PLAY_WATCHED(self, url,
pType='video'): # NOWE wersja używa xbmcplugin.setResolvedUrl wspiera status "watched"
if url == '':
d = xbmcgui.Dialog()
d.ok('Nie znaleziono streamingu', 'Może to chwilowa awaria.', 'Spróbuj ponownie za jakiś czas')
return False
liz = xbmcgui.ListItem(path=url)
try:
return xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, liz)
except:
d = self.dialog()
if pType == "video":
d.ok('Wystąpił błąd!', 'Błąd przy przetwarzaniu, lub wyczerpany limit czasowy oglądania.',
'Zarejestruj się i opłać abonament.', 'Aby oglądać za darmo spróbuj ponownie za jakiś czas.')
elif pType == "music":
d.ok('Wystąpił błąd!', 'Błąd przy przetwarzaniu.', 'Aby wysłuchać spróbuj ponownie za jakiś czas.')
return False
def LOAD_AND_PLAY_VIDEO(self, url, title, player=True):
if url != False:
self.__LOAD_AND_PLAY(url, title, player, "video")
else:
d = xbmcgui.Dialog()
d.ok('Brak linku!', 'Przepraszamy, chwilowa awaria.', 'Zapraszamy w innym terminie.')
def LOAD_AND_PLAY_VIDEO_WATCHED(self, url): # NOWE wersja używa xbmcplugin.setResolvedUrl wspiera status "watched"
if url != False:
return self.__LOAD_AND_PLAY_WATCHED(url, 'video')
else:
d = xbmcgui.Dialog()
d.ok('Brak linku!', 'Przepraszamy, chwilowa awaria.', 'Zapraszamy w innym terminie.')
return False
def LOAD_AND_PLAY_AUDIO(self, url, title, player=True):
if url != False:
self.__LOAD_AND_PLAY(url, title, player, "music")
else:
d = xbmcgui.Dialog()
d.ok('Brak linku!', 'Przepraszamy, chwilowa awaria.', 'Zapraszamy w innym terminie.')
def LOAD_AND_PLAY_AUDIO_WATCHED(self, url): # NOWE wersja używa xbmcplugin.setResolvedUrl wspiera status "watched"
if url != False:
return self.__LOAD_AND_PLAY_WATCHED(url, 'audio')
else:
d = xbmcgui.Dialog()
d.ok('Brak linku!', 'Przepraszamy, chwilowa awaria.', 'Zapraszamy w innym terminie.')
return False
class tvn:
def __init__(self):
log.info('Loading ' + SERVICE)
self.parser = mrknow_Parser.mrknow_Parser()
self.gui = sdGUI()
self.common = mrknow_pCommon.common()
self.api = API()
def getMenu(self, args):
data = self.api.getAPI(args)
for item in data['categories']:
# pomin ULUBIONE i KONTYNUUJ / PAKIETY
if item['type'] != 'favorites' and item['type'] != 'pauses' and item['type'] != 'open_market' and item[
'type'] != 'landing_page' and item['type'] != 'stream':
if item['thumbnail'] != None:
icon = self.api.getImage(item['thumbnail'][0]['url'])
else:
icon = THUMB_SERVICE
params = {'service': SERVICE, 'category': item['type'], 'id': item['id'],
'title': item['name'].encode('UTF-8'), 'icon': icon}
self.gui.addDir(params, params_keys_needed=tvn_url_keys)
self.gui.endDir()
def getItems(self, args):
sort = True
data = self.api.getAPI(args)
if (not 'seasons' in data) or (len(data['seasons']) == 0) or (
'season=' in args): # bez sezonow albo odcinki w sezonie
for item in data['items']:
try:
icon = self.api.getImage(item['thumbnail'][0]['url'])
except Exception, exception:
icon = THUMB_SERVICE
title = item['title'].encode('UTF-8')
if item['type'] == 'episode':
sort = False
if item['season'] != 0 and item['season'] != None:
title = title + ', sezon ' + str(item['season'])
if item['episode'] != 0 and item['episode'] != None:
title = title + ', odcinek ' + str(item['episode'])
# 'preview_catchup' or 'preview_prepremier'
if ('preview_' in item['type_episode']):
title = title + ' [COLOR FFFF0000](' + item['start_date'].encode('UTF-8') + ')[/COLOR]'
if item['type'] == 'series':
# tu wsadzic wlaczanie/wylaczanie sortowania
if tvn_sort == "Alfabetycznie":
sort = True
else:
sort = False
if item['season'] != 0 and item['season'] != None:
title = title + ', sezon ' + str(item['season'])
subtitle = item.get('sub_title', None)
if subtitle != None and len(subtitle) > 0:
title = title + ' - ' + subtitle.encode('UTF-8')
params = {'service': SERVICE, 'category': item['type'], 'id': item['id'], 'title': title.strip(),
'icon': icon, 'fanart': icon}
duration = item.get('end_credits_start', None) # Czas trwania to |end_credits_start| lub |run_time|
if duration != None and len(duration) == 8: # format 00:23:34
l = duration.split(':')
sec = int(l[0]) * 3600 + int(l[1]) * 60 + int(l[2])
params.update({'duration': str(sec)})
rating = item.get('rating', None)
if rating != None and len(rating) > 0:
if rating != '0':
params.update({'mpaa': 'Od ' + rating + ' lat'})
else:
params.update({'mpaa': 'Bez ograniczeń'})
plot = item.get('lead', None)
if plot != None:
params.update({'plot': plot.replace('"', '"').encode('UTF-8')})
if item['type'] == 'episode':
self.gui.playVideo(params, isPlayable=True, params_keys_needed=tvn_url_keys)
else:
self.gui.addDir(params, params_keys_needed=tvn_url_keys)
else: # listuj sezony
for item in data['seasons']:
if item['thumbnail'] != None:
icon = self.api.getImage(item['thumbnail'][0]['url'])
else:
icon = THUMB_SERVICE
t = data['items'][0]['title'].encode('UTF-8')
params = {'service': SERVICE, 'category': item['type'], 'id': item['id'],
'title': t + ' - ' + item['name'].encode('UTF-8'), 'icon': icon, 'fanart': icon,
'seriesId': item['vdp_id']}
self.gui.addDir(params, params_keys_needed=tvn_url_keys)
self.gui.endDir(sort)
def getVideoUrl(self, args):
ret = ''
fallback = False
if tvn_proxy == 'true':
useProxy = True
else:
useProxy = False
data = self.api.getAPI(args, useProxy)
# brak video - spróbuj w innej wersji api
if data['item']['videos']['main']['video_content'] == None or len(data['item']['videos']['main']['video_content']) == 0 or \
('video_content_license_type' in data['item']['videos']['main'] and data['item']['videos']['main']['video_content_license_type'] == 'WIDEVINE'): # DRM v3.6
data = self.api.getAPI(args, useProxy, 'fallback')
fallback = True
if not ('item' in data) or not ('videos' in data['item']) or not (
'main' in data['item']['videos']): # proba uzycia Api zapasowego czasami konczy sie strzalem w próżnię
d = xbmcgui.Dialog()
d.ok(SERVICE, 'Brak materiału video', '')
exit()
# znajdz jakosc z settings wtyczki
if data['item']['videos']['main']['video_content'] != None and len(data['item']['videos']['main']['video_content']) != 0:
url = ''
for item in data['item']['videos']['main']['video_content']:
if item['profile_name'].encode('UTF-8') == tvn_quality:
url = item['url'] # znalazlem wybrana jakosc
break;
# jesli jakosc nie znaleziona (lub Maksymalna) znajdz pierwsza najwyzsza jakosc
if url == '':
for q in qualities:
for item in data['item']['videos']['main']['video_content']:
if str(item['profile_name'].encode('UTF-8')) == str(q):
url = item['url']
break
if url != '':
break
if fallback:
pl = platform[tvn_platform]['fallback']
else:
pl = tvn_platform
# dodaj token tylko do Androida
if 'Android' in pl: # pl == AndroidX
ret = self.api.generateToken(url).encode('UTF-8')
else:
ret = url
#query_data = {'url': url, 'use_host': False, 'use_header': True,'header': platform[pl]['header'],
# 'use_cookie': False, 'use_post': False, 'return_data': True}
#try:
# ret = self.common.getURLRequestData(query_data)
#except Exception, exception:
# #traceback.print_exc()
# #self.exception.getError(str(exception))
# log.info('getVideoUrl Exception: %s' % exception)
# exit()
# 02/07/2016
if useProxy:
opener = urllib2.build_opener(NoRedirectHandler())
urllib2.install_opener(opener)
response = urllib2.urlopen(urllib2.Request(ret))
ret = response.info().getheader('Location')
ret = re.sub('n-(.+?)\.dcs\.redcdn\.pl', 'n-1-25.dcs.redcdn.pl', ret)
log.info('############ RET FINAL %s FALLBACK:%s URL:%s' % (ret, fallback, args))
return ret
def handleService(self):
params = self.parser.getParams()
category = str(self.parser.getParam(params, "category"))
id = str(self.parser.getParam(params, "id"))
seriesId = str(self.parser.getParam(params, "seriesId"))
# MAINMENU
if category == 'None':
if self.api.geoCheck():
self.getMenu('m=mainInfo')
# WSZYSTKO
if category != 'None' and category != 'episode' and seriesId == 'None':
self.getItems('m=getItems&sort=newest&limit=500&type=' + category + '&id=' + id)
# ODCINKI W SEZONIE
if seriesId != 'None':
self.getItems('m=getItems&sort=newest&limit=500&type=series&id=' + seriesId + '&season=' + id)
# VIDEO
if category == 'episode':
#videoUrl = self.getVideoUrl('m=getItem&type=' + category + '&id=' + id)
videoUrl = self.getVideoUrl(
'showContentContractor=free%2Csamsung%2Cstandard&m=getItem&android23video=1&deviceType=Tablet&os=4.1.1&playlistType=&connectionType=WIFI&deviceScreenWidth=1920&deviceScreenHeight=1080&appVersion=3.3.4&manufacturer=unknown&model=androVMTablet&id=' + id)
self.gui.LOAD_AND_PLAY_VIDEO_WATCHED(videoUrl)
class NoRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
infourl.code = code
return infourl
http_error_300 = http_error_302
http_error_301 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
class API:
def __init__(self):
#self.exception = sdErrors.Exception()
self.common = mrknow_pCommon.common()
#self.proxy = sdCommon.proxy()
def geoCheck(self):
ret = True
if tvn_proxy != 'true':
data = self.getAPI('m=checkClientIp', False)
if data['result'] == False:
d = xbmcgui.Dialog()
d.ok(SERVICE, 'Serwis niedostepny na terenie twojego kraju.',
'Odwiedz sd-xbmc.org w celu uzyskania dostepu.')
ret = data['result']
return ret
def getAPIurl(self, fallback=''):
if fallback == 'fallback':
pl = platform[tvn_platform]['fallback']
else:
pl = tvn_platform
myurl = '%s/?platform=%s&terminal=%s&format=json&authKey=%s&v=%s&' % (
platform[pl]['base_url'],platform[pl]['platform'], platform[pl]['terminal'],
platform[pl]['authKey'], platform[pl]['api'])
return '%s/?platform=%s&terminal=%s&format=json&authKey=%s&v=%s&' % (
platform[pl]['base_url'],platform[pl]['platform'], platform[pl]['terminal'],
platform[pl]['authKey'], platform[pl]['api'])
def getAPI(self, args, useProxy=False, fallback=''):
url = self.getAPIurl(fallback) + args
if fallback == 'fallback':
pl = platform[tvn_platform]['fallback']
else:
pl = tvn_platform
query_data = {'url': url, 'use_host': False, 'use_header': True,'header': platform[pl]['header'],
'use_cookie': False, 'use_post': False, 'return_data': True}
try:
if useProxy:
myproxy = pl_proxy
if pl_proxy_port != '': myproxy = myproxy + ':' + pl_proxy_port
myproxy_check = self.is_bad_proxy(myproxy)
if not myproxy_check == '':
d = xbmcgui.Dialog()
d.ok('TVN PLayer.pl', 'Proxy error %s' % myproxy_check, '')
exit()
data = self.go_proxy(url,myproxy, platform[pl]['header'])
log.info('##DATA PROXY %s' % data)
else:
data = self.common.getURLRequestData(query_data)
#log.info(data)
result = _json.loads(data)
if not 'status' in result or result['status'] != 'success':
d = xbmcgui.Dialog()
d.ok(SERVICE, 'Blad API', '')
exit()
return result
except Exception, exception:
#traceback.print_exc()
#self.exception.getError(str(exception))
log.info("GetAPI ERROR %s" % exception)
exit()
def getImage(self, path):
return IMAGEURL + path + '?quality=85&dstw=870&dsth=560&type=1'
def generateToken(self, url):
url = url.replace('http://redir.atmcdn.pl/http/', '')
SecretKey = '<KEY>'
iv = 'ab5ef983454a21bd'
KeyStr = '0f12f35aa0c542e45926c43a39ee2a7b38ec2f26975c00a30e1292f7e137e120e5ae9d1cfe10dd682834e3754efc1733'
salt = sha1()
salt.update(os.urandom(16))
salt = salt.hexdigest()[:32]
tvncrypt = crypto.cipher.aes_cbc.AES_CBC(SecretKey, padding=crypto.cipher.base.noPadding(), keySize=32)
key = tvncrypt.decrypt(binascii.unhexlify(KeyStr), iv=iv)[:32]
expire = 3600000L + long(time.time() * 1000) - 946684800000L
unencryptedToken = "<PASSWORD>&<PASSWORD>" % (url, expire)
pkcs5_pad = lambda s: s + (16 - len(s) % 16) * chr(16 - len(s) % 16)
pkcs5_unpad = lambda s: s[0:-ord(s[-1])]
unencryptedToken = pkcs5_pad(unencryptedToken)
tvncrypt = crypto.cipher.aes_cbc.AES_CBC(binascii.unhexlify(key), padding=crypto.cipher.base.noPadding(),
keySize=16)
encryptedToken = tvncrypt.encrypt(unencryptedToken, iv=binascii.unhexlify(salt))
encryptedTokenHEX = binascii.hexlify(encryptedToken).upper()
return "http://redir.atmcdn.pl/http/%s?salt=%s&token=%s" % (url, salt, encryptedTokenHEX)
#return "http://redir.atmcdn.pl/http/%s?salt=%s&token=%s" % (url, salt, encryptedTokenHEX)
def is_bad_proxy(self, pip):
try:
proxy_handler = urllib2.ProxyHandler({'http': pip})
opener = urllib2.build_opener(proxy_handler)
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib2.install_opener(opener)
req = urllib2.Request('http://kodi.filmkodi.com') # change the url address here
sock = urllib2.urlopen(req, timeout=20)
except urllib2.HTTPError, e:
log.info('Error code: %s' % e.code)
return 'Error code: %s' % e.code
except Exception, detail:
log.info("ERROR: %s" % detail)
return "ERROR: %s" % detail
log.info("OK: %s" % sock)
return ""
def go_proxy(self, url, pip, headers):
try:
proxy_handler = urllib2.ProxyHandler({'http': pip})
opener = urllib2.build_opener(proxy_handler)
opener.addheaders = [headers]
urllib2.install_opener(opener)
req = urllib2.Request(url) # change the url address here
response = urllib2.urlopen(req, timeout=20)
data = response.read()
response.close()
except urllib2.HTTPError, e:
log.info('Error code: %s' % e.code)
return 'Error code: %s' % e.code
except Exception, detail:
log.info("ERROR: %s" % detail)
return "ERROR: %s" % detail
log.info("OK PROXY: %s" % data)
return data | StarcoderdataPython |
3564569 | <gh_stars>0
# Generated by Django 2.0 on 2017-12-19 13:24
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100, verbose_name='Primer Apellido')),
('last_name', models.CharField(max_length=100, verbose_name='Segundo Apellido')),
('date_of_birth', models.DateField(blank=True, null=True, verbose_name='Fecha Nacimiento')),
('date_of_death', models.DateField(blank=True, null=True, verbose_name='Died')),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Titulo')),
('summary', models.TextField(help_text='Entre una breve descripcion del libro.', max_length=1000, verbose_name='Descripcion')),
('isbn', models.CharField(help_text='13 Character <a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>', max_length=13, verbose_name='ISBN')),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.Author')),
],
),
migrations.CreateModel(
name='BookInstance',
fields=[
('id', models.UUIDField(default=uuid.uuid4, help_text='ID unico para el libro en toda la Libreria', primary_key=True, serialize=False, verbose_name='ID')),
('imprint', models.CharField(max_length=200, verbose_name='Version')),
('due_back', models.DateField(blank=True, null=True, verbose_name='Fecha reintegro')),
('status', models.CharField(blank=True, choices=[('m', 'Mantenimiento'), ('o', 'Prestado'), ('a', 'Disponible'), ('r', 'Reservado')], default='m', help_text='Disponibilidad', max_length=1, verbose_name='Disponibilidad')),
('book', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.Book')),
],
options={
'ordering': ['due_back'],
},
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Entre el genero del libro. Ej: Ciencia ficcion, Policiaca, Aventura, Romance, etc.)', max_length=200, verbose_name='Nombre de Genero')),
],
),
migrations.AddField(
model_name='book',
name='genre',
field=models.ManyToManyField(help_text='Seleccione un genero para el libro.', to='catalog.Genre', verbose_name='Genero'),
),
]
| StarcoderdataPython |
6478465 | <gh_stars>10-100
"""Import metric implementations so they can register themselves."""
from metrics import base
from metrics import absolute_coverage
from metrics import cherrypick_issue_count
from metrics import circleci_flakiness
from metrics import circleci_greenness
from metrics import circleci_presubmit_latency
from metrics import release_granularity
| StarcoderdataPython |
9717735 | <reponame>vovchykbratyk/geoindexer
"""Documentation to follow"""
from area import area
from collections import OrderedDict
from datetime import datetime
import fiona
from fiona.crs import from_epsg
from handlers import Container, Exif, Lidar, Log, Raster, Shapefile
import json
import os
from pathlib import Path
import sys
from tqdm import tqdm
def now(iso8601=True):
if iso8601:
return datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
else:
return datetime.now().strftime('%Y%m%dT%H%M%S')
class GeoCrawler:
def __init__(self, path, types):
"""
GeoCrawler constructor
:param path: The path to be crawled
:type path: str
:param types: List of file extensions
:type types: list
"""
self.path = path
self.types = types
def get_file_list(self, recursive=True):
"""
Searches path (default recursive) for filetypes and returns list of matches.
:param recursive: Traverse directories recursively (default: True)
:type recursive: bool
:return: list
"""
if recursive:
try:
return [str(p.resolve()) for p in Path(self.path).glob("**/*") if p.suffix[1:] in self.types]
except PermissionError:
pass
else:
try:
return [str(p.resolve()) for p in Path(self.path).glob("*") if p.suffix[1:] in self.types]
except PermissionError:
pass
class GeoIndexer:
def __init__(self, file_list):
"""
GeoIndexer constructor
"""
self.file_list = file_list
self.errors = []
self.failures = {'files': [],
'layers': []}
def get_extents(self, logging=None):
# Get total number of datasets to process, including geodatabase layers
if len(self.file_list) > 0:
to_process = 0
for f in self.file_list:
to_process += GeoIndexer.get_layer_num(self, f)
# Set up the output
points = []
polygons = []
extents = {'type': 'FeatureCollection',
'features': []}
# Set up the report
stats = {'container_layers': 0,
'web_images': 0,
'lidar_point_clouds': 0,
'rasters': 0,
'shapefiles': 0}
# Main iterator
for f in tqdm(self.file_list, desc='GeoIndexer progress', total=len(self.file_list), dynamic_ncols=True):
fext = GeoIndexer.get_extension(f)
if fext in ['gdb', 'gpkg', 'db', 'sqlite']:
try:
cf = Container(f).get_props()
for feat in cf['feats']:
if feat:
polygons.append(feat)
stats['container_layers'] += 1
else:
self.errors.append(f'{now()} - Problem processing layer {feat} in {f}')
self.failures['layers'].append(f'{feat} ({f})')
if len(cf['errors']) > 0:
self.errors.append([e for e in cf['errors']])
self.failures['layers'].append([f for f in cf['failed_layers']])
except Exception as e:
self.errors.append(f'{now()} - {e} - [{f}]')
self.failures['files'].append(f)
pass
elif fext in ['jpg', 'jpeg']:
try:
points.append(Exif(f).get_props())
stats['web_images'] += 1
except Exception as e:
self.errors.append(f'{now()} - {e} - [{f}]')
self.failures['files'].append(f)
pass
elif fext in ['laz', 'las']:
try:
lf = Lidar(f).get_props()
if lf:
polygons.append(lf)
stats['lidar_point_clouds'] += 1
else:
self.errors.append(f'{now()} - Problem processing Lidar file {f}')
self.failures['files'].append(f)
except Exception as e:
self.errors.append(f'{now()} - {e} - [{f}]')
pass
elif fext in ['tiff', 'tif', 'ntf', 'nitf', 'dt0', 'dt1', 'dt2']:
try:
feat = Raster(f).get_props()
if feat:
polygons.append(feat)
stats['rasters'] += 1
else:
self.errors.append(f'{now()} - Problem accessing Raster {f}')
self.failures['files'].append(f)
except Exception as e:
self.errors.append(f'{now()} - {e} - [{f}]')
self.failures['files'].append(f)
pass
elif fext == 'shp':
try:
feat = Shapefile(f).get_props()
if feat:
polygons.append(feat)
stats['shapefiles'] += 1
else:
self.errors.append(f'{now()} - Problem accessing Shapefile {f}')
self.failures['files'].append(f)
except Exception as e:
self.errors.append(f'{now()} - {e} - [{f}]')
self.failures['files'].append(f)
pass
# Assemble the GeoJSON object
if len(polygons) > 0:
for poly in polygons:
extents['features'].append(poly)
if len(points) > 0:
for point in points:
extents['features'].append(point)
# Summary statistics
stats['total_processed'] = sum([val for key, val in stats.items()])
stats['total_datasets'] = to_process
stats['success_rate'] = round(
((float(stats.get('total_processed', 0)) / float(stats.get('total_datasets', 0))) * 100), 2)
# Output log if true
if logging:
log = Log(self.errors)
logname = log.to_file(logging)
stats['logfile'] = f'file:///{str(os.path.join(logging, logname))}'.replace("\\", "/")
return extents, stats, self.failures
else:
sys.exit('No files found to process.')
def get_layer_num(self, filepath: str):
"""
Get the number of layers within a container, if the file is a container and can be read by fiona.
Otherwise, return 0 (if the container cannot be read) or 1 (if the file is not a container).
:return: int
"""
extension = GeoIndexer.get_extension(filepath)
if extension in ['gdb', 'gpkg', 'db', 'sqlite']:
try:
numlayers = len(fiona.listlayers(filepath))
return numlayers
except Exception as e:
self.errors.append(f'{now()} - {e} - [{filepath}]')
return 0
else:
return 1
@staticmethod
def get_extension(filepath: str):
if filepath:
return os.path.splitext(os.path.split(filepath)[1])[1][1:].lower()
return None
@staticmethod
def geojson_container():
return {'type': 'FeatureCollection',
'features': []}
@staticmethod
def get_schema(img_popup=False):
if img_popup:
return {'geometry': 'Point',
'properties': OrderedDict([
('dataType', 'str'),
('fname', 'str'),
('path', 'str'),
('img_popup', 'str'),
('native_crs', 'int'),
('lastmod', 'str')])}
return {'geometry': 'Polygon',
'properties': OrderedDict([
('path', 'str'),
('lastmod', 'str'),
('fname', 'str'),
('dataType', 'str'),
('native_crs', 'int')])}
@staticmethod
def to_geopackage(features: dict, path: str, scoped=True):
"""
Outputs to a geopackage container, with different layers of polygons based on size:
-- lv0: >= 175,000,000
-- lv1: >= 35,000,000 < 175,000,000
-- lv2: >= 5,000,000 < 35,000,000
-- lv3: >= 1,000,000, < 5,000,000
-- lv4: >= 500,000, < 1,000,000
-- lv5: >= 100,000, < 500,000
-- lv6: >= 50,000, < 100,000
-- lv7: > 0, < 50,000
"""
driver = "GPKG"
if scoped:
layers = {'level_00': GeoIndexer.geojson_container(),
'level_01': GeoIndexer.geojson_container(),
'level_02': GeoIndexer.geojson_container(),
'level_03': GeoIndexer.geojson_container(),
'level_04': GeoIndexer.geojson_container(),
'level_05': GeoIndexer.geojson_container(),
'level_06': GeoIndexer.geojson_container()}
for f in features['features']:
try:
feat_area = float(area(f['geometry']) / 1000000)
if feat_area >= 175000000: # lv0, world
layers['level_00']['features'].append(f)
elif 35000000 <= feat_area < 175000000:
layers['level_01']['features'].append(f)
elif 5000000 <= feat_area < 35000000:
layers['level_02']['features'].append(f)
elif 1000000 <= feat_area < 5000000:
layers['level_03']['features'].append(f)
elif 500000 <= feat_area < 1000000:
layers['level_04']['features'].append(f)
elif 100000 <= float(feat_area) < 500000:
layers['level_05']['features'].append(f)
elif 0 < float(feat_area) < 100000:
layers['level_06']['features'].append(f)
except (TypeError, KeyError, AttributeError):
pass
for k, v in layers.items():
if len(v['features']) >= 1:
with fiona.open(path, 'w',
schema=GeoIndexer.get_schema(),
driver=driver,
crs=from_epsg(4326),
layer=k) as outlyr:
outlyr.writerecords(v['features'])
# print(f'wrote layer {k}:')
# print(f'{json.dumps(v)}')
# Uncomment below to use geopandas instead of fiona
# import geopandas as gpd
# gdf = gpd.GeoDataFrame.from_features(v)
# gdf.crs = 'EPSG:4326'
# gdf.to_file(path, driver=driver, layer=k)
return True
else:
layername = f"coverages_{now(iso8601=False)}"
with fiona.open(path, 'w',
schema=GeoIndexer.get_schema(),
driver=driver,
crs=from_epsg(4326),
layer=layername) as outlyr:
outlyr.writerecords(features['features'])
# Uncomment below to use geopandas instead of fiona
# import geopandas as gpd
# gdf = gpd.GeoDataFrame.from_features(features)
# gdf.crs = 'EPSG:4326'
# gdf.to_file(path, driver=driver, layer=layername)
| StarcoderdataPython |
349085 | import codecs
import os
from setuptools import setup, find_packages
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
PACKAGE = "pinax_theme_foundation"
NAME = "pinax-theme-foundation"
DESCRIPTION = "Pinax theme based on Zurb's Foundation"
AUTHOR = "<NAME>"
AUTHOR_EMAIL = "<EMAIL>"
URL = "http://github.com/chrisdev/pinax-theme-foundation"
VERSION = __import__(PACKAGE).__version__
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=read('README.rst'),
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="MIT",
url=URL,
packages=find_packages(exclude=[
"tests.*",
"tests", "example_project"]),
include_package_data=True,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Framework :: Django",
],
zip_safe=False,
)
| StarcoderdataPython |
3283627 | <reponame>madtyn/mvcPython<filename>view/widgets/timepicker.py
import time
import datetime as dt
import tkinter as tk
from tkinter import ttk
class Timepicker(ttk.Frame):
DAY_HOURS = 24
MAX_HOUR = 23
MAX_MINUTES = 59
def __init__(self, parent, hour=None, minute=None, *args, **kwargs):
super().__init__(parent)
if not hour:
hour = int(time.strftime('%H'))
self.parent = parent
self.hourstr = tk.StringVar(self, f'{hour:02}')
self.hour = ttk.Spinbox(self, from_=0, to=23, wrap=True, name='hourspin',
textvariable=self.hourstr, width=4, format='%02.0f')
vcmd = (parent.register(self._validate_hour), '%P')
self.hour.configure(validate='key', validatecommand=vcmd)
if not minute:
minute = int(time.strftime('%M'))
self.minstr = tk.StringVar(self, f'{minute:02}')
self.minstr.trace("w", self.trace_var)
self.last_value = f'{self.minstr.get()}'
self.min = ttk.Spinbox(self, from_=0, to=59, wrap=True, name='minspin',
textvariable=self.minstr, width=4, format='%02.0f')
vcmd = (parent.register(self._validate_minutes), '%P')
self.min.configure(validate='key', validatecommand=vcmd)
self.hour.grid(row=0, column=0)
ttk.Label(self, text=':').grid(row=0, column=1)
self.min.grid(row=0, column=2)
def trace_var(self, *args):
"""
Traces the variables so that when minutes exceed 59 we add 1 to hours
and when minutes decrease from 0 to 59 we substract 1 from hours
:param args: the args
"""
if self.last_value == f"{Timepicker.MAX_MINUTES}" and self.minstr.get().strip() and int(self.minstr.get()) == 0:
self.hourstr.set(f'{(int(self.hourstr.get()) + 1) % Timepicker.DAY_HOURS:02}')
elif self.last_value.strip() and int(self.last_value) == 0 and self.minstr.get() == f"{Timepicker.MAX_MINUTES}":
self.hourstr.set(f'{(int(self.hourstr.get()) - 1) % Timepicker.DAY_HOURS:02}')
self.last_value = self.minstr.get()
def _validate_hour(self, new_value):
"""
Validates every key press from a blank state so that no character
entered makes the input illegal
:param new_value: the new_value as it would be with the new character
:return: True if we allow the edit to happen, False to prevent it
"""
if new_value == '':
return True
result = self._validate_generic(new_value, Timepicker.MAX_HOUR)
return result
def _validate_minutes(self, new_value):
"""
Validates every key press from a blank state so that no character
entered makes the input illegal
:param new_value: the new_value as it would be with the new character
:return: True if we allow the edit to happen, False to prevent it
"""
if new_value == '':
return True
result = self._validate_generic(new_value, Timepicker.MAX_MINUTES)
return result
def _validate_generic(self, new_value, maxvalue):
"""
Validates that new_value is a two-figure number
less or equal than maxvalue
:param new_value: the new_value
:param maxvalue: the maxvalue we won't exceed
:return: True if this validates, False otherwise
"""
if not new_value.isdigit():
return False
return len(new_value) <= 2 \
and 0 <= int(new_value) <= maxvalue
def validate_time(self):
hour_value = self.hourstr.get().strip()
if hour_value == '' or not self._validate_hour(hour_value):
return False
minute_value = self.minstr.get().strip()
if minute_value == '' or not self._validate_minutes(minute_value):
return False
return True
def get_time(self):
return dt.time(int(self.hourstr.get()), int(self.minstr.get()), 0, 0)
if __name__ == '__main__':
root = tk.Tk()
Timepicker(root).pack()
root.mainloop()
| StarcoderdataPython |
4971254 | <filename>vizsgaremek/tc01_registration_test.py<gh_stars>0
def test_registration():
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
options = Options()
options.add_argument("--headless")
options.add_argument("--disable-gpu")
driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=options)
try:
driver.get("http://localhost:1667/#/")
# Accept cookies
accept_btn = driver.find_element_by_xpath('//*[@id="cookie-policy-panel"]/div/div[2]/button[2]')
accept_btn.click()
# Registration
sign_up_btn = driver.find_element_by_xpath('//*[@id="app"]/nav/div/ul/li[3]/a')
sign_up_btn.click()
time.sleep(2)
def registration(un, em, pw):
user_name = driver.find_element_by_xpath('//*[@id="app"]//fieldset[1]/input')
email = driver.find_element_by_xpath('//*[@id="app"]//fieldset[2]/input')
password = driver.find_element_by_xpath('//*[@id="app"]//fieldset[3]/input')
button = driver.find_element_by_xpath('//*[@id="app"]//form/button')
user_name.send_keys(un)
email.send_keys(em)
password.send_keys(pw)
button.click()
registration("Milvus", "<EMAIL>", "Ab<PASSWORD>$")
time.sleep(3)
# Check feedback message
feedback = driver.find_element_by_xpath('/html/body/div[2]/div/div[3]')
assert feedback.text == "Your registration was successful!"
driver.find_element_by_xpath('/html/body/div[2]/div/div[4]/div/button').click()
finally:
pass
# driver.close()
| StarcoderdataPython |
3221604 | <gh_stars>0
# Parameter:
# config-file: path to cfg file
# weight_path: path to the pretrained weight
# dataset_path: path to a directory of images
# This script predicts bboxes of every image in the dataset path,
# write the ground truth into yolo format .txt filess
import argparse
import glob
import multiprocessing as mp
import os
import time
import cv2
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from predictor import VisualizationDemo
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 predict whole folder")
parser.add_argument(
"--config-file",
default="configs/quick_schedules/e2e_mask_rcnn_R_50_FPN_inference_acc_test.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--dataset_folder", help="A folder of images to predict")
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify model config options using the command-line",
default=[],
nargs=argparse.REMAINDER,
)
return parser
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
demo = VisualizationDemo(cfg)
while True:
#for each_img in glob.glob(os.path.join(args.dataset_folder, "*.jpg")):
# use PIL, to be consistent with evaluation
each_img = "/home/cambricon/Cambricon-MLU100/datasets_old/Tsinghua_traffic_sign/test_imgs/41372.jpg"
each_img = "/home/cambricon/Cambricon-MLU100/datasets_old/COCO/interested_val/000000011197.jpg"
img = read_image(each_img, format="BGR")
start_time = time.time()
predictions, visualized_output = demo.run_on_image(img)
logger.info(
"{}: detected {} instances in {:.2f}s".format(
each_img, len(predictions["instances"]), time.time() - start_time
)
)
pred_classes = predictions["instances"].pred_classes.cpu().numpy()
pred_boxes = predictions["instances"].pred_boxes.tensor.cpu().numpy()
records = []
print(pred_boxes)
for each_cls, each_box in zip(pred_classes, pred_boxes):
if int(each_cls) in interested_cls:
cls_id = interested_cls.index(int(each_cls))
[x1, y1, x2, y2] = each_box
w, h = x2 - x1, y2 - y1
(img_h, img_w) = predictions["instances"].image_size
x_center = (x1 + w/2)/img_w
y_center = (y1 + h/2)/img_h
w /= img_w
h /= img_h
records.append(" ".join([str(x) for x in [cls_id, x_center, y_center, w, h]]))
break
#each_txt = each_img.replace(".jpg", ".txt")
#txt_writer = open(each_txt, "a+")
#txt_writer.write("\n".join(records) + "\n")
| StarcoderdataPython |
1825228 | <filename>service/FunRep/ml.py
from .util import *
from .similarity import *
import os
import numpy as np
from . import lang
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from gensim import corpora, models, similarities
def cosine_kNearest(method, solutions, dictionary, model, k):
""" Provided a single vector and a list of vectors with the same dimensions, finds closest
based on cosine similarity. Features is just a list of features, so it also provides
a list of which features contributed to the cosine
Returns a list of tuples containing (score, solutions_index, similarityDictionary)
"""
similarities = [ gensim_lang_cossim(method, solution, dictionary, model) for solution in solutions]
# since we need indexes
sortedIndices = np.argsort(similarities)
kNearestIndices = sortedIndices[-k:]
results = []
method_bow = dictionary.doc2bow(method.tokens)
method_bow_keys = set([tup[0] for tup in method_bow])
for i in kNearestIndices[::-1]:
intersectingTokens = []
solution_bow = dictionary.doc2bow(solutions[i].tokens)
solution_bow_keys = set([tup[0] for tup in solution_bow])
intersect_keys = method_bow_keys & solution_bow_keys
intersect_tokens = [dictionary[key] for key in intersect_keys]
results.append((float(np.round(similarities[i], decimals = 4)), i, intersect_tokens))
return results
def create_tfIdf_model(documents):
""" Given solution vectors, return trained gensim language models for similarity """
dictionary = corpora.Dictionary(documents)
# remove the most infrequent words
dictionary.filter_extremes(no_below=1)
corpus = [dictionary.doc2bow(text) for text in documents]
tfidf_model = models.TfidfModel(corpus)
# corpus_tfidf = tfidf_model[corpus]
# lsi_model = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=100)
return dictionary, tfidf_model
def proposed_kNearest(method, solutions, nl_dict, nl_model, k, weights):
""" Our proposed similarity kernel. """
results = []
similarities = [proposed_similarity(method, solution, nl_dict, nl_model, weights) for solution in solutions]
sim_scores = [tup[0] for tup in similarities]
sorted_idx = np.argsort(sim_scores)
kNearest_idx = sorted_idx[-k:]
for i in kNearest_idx[::-1]:
results.append((float(np.round(similarities[i][0], decimals = 3)), i, similarities[i][1]))
return results
def concept_tag_kNearest(method, solutions, nl_dict, nl_model, k):
""" Our proposed similarity kernel. """
results = []
similarities = [concept_tags_similarity(method, solution, nl_dict, nl_model) for solution in solutions]
sim_scores = [tup[0] for tup in similarities]
sorted_idx = np.argsort(sim_scores)
kNearest_idx = sorted_idx[-k:]
for i in kNearest_idx[::-1]:
results.append((float(np.round(similarities[i][0], decimals = 3)), i, similarities[i][1]))
return results
| StarcoderdataPython |
9735289 | <filename>src/contato.py
class Contato:
def __init__(self) -> None:
self.__name = str()
self.__email = str()
self.__phone = str()
self.__message = str()
def __str__(self):
return f'''Name: {self.name}\nEmail: {self.email}\nTelephone: {self.phone}\nMessage: {self.message}'''
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value
@property
def email(self):
return self.__email
@email.setter
def email(self, value):
self.__email = value
@property
def phone(self):
return self.__phone
@phone.setter
def phone(self, value):
self.__phone = value
@property
def message(self):
return self.__message
@message.setter
def message(self, value):
self.__message = value
# contato = Contato()
#
# contato.name = '<NAME>'
# contato.email = '<EMAIL>'
# contato.phone = '0800010101010'
# contato.message = 'Lorem ipsum dolor sit, amet consectetur adipisicing elit. Quasi aut rerum cupiditate vel dolore ipsa, doloremque error modi? In, facere provident maxime modi quia dignissimos iusto voluptatem. Iusto consequuntur at ea quaerat voluptates odio culpa a laboriosam dolores accusamus, ipsa quos sed vitae quam fugit autem, dolorum quod eius quo?'
#
# print(contato) | StarcoderdataPython |
3335008 | <reponame>enqack/price-blotter<gh_stars>0
from __future__ import print_function
import sys
from tabulate import tabulate
def print_title(s):
"""
Print a string as a title with a strong underline
Args:
s: string to print as a title
"""
print(s)
print(len(s) * "=")
print("")
def print_subtitle(s):
"""
Print a string as a subtitle with an underline
Args:
s: string to print as a title
"""
print(s)
print(len(s) * "-")
print("")
def print_entity(entity, title=None, headers=True):
"""
Print an entity as a title along with the tabular representation
of the entity.
Args:
title: The title to print
entity: The entity to print
"""
if title is not None and len(title) > 0:
print_title(title)
headers = ["Name", "Value"]
headers=[]
tablefmt = "rst"
body = []
for field in entity.fields():
name = field.displayName
value = field.value
if field.typeClass.startswith("array"):
value = "[{}]".format(len(field.value))
elif field.typeClass.startswith("object"):
value = "<{}>".format(field.typeName)
body.append([name, value])
getattr(sys.stdout, 'buffer', sys.stdout).write(
tabulate
(
body,
headers,
tablefmt=tablefmt
).encode('utf-8')
)
print("")
def price_to_string(price):
return "{} ({}) bid: {} ask: {}".format(
price.instrument,
price.time,
price.bids[0].price,
price.asks[0].price
)
def heartbeat_to_string(heartbeat):
return "HEARTBEAT ({})".format(
heartbeat.time
) | StarcoderdataPython |
6568489 | import unittest
from lbrynet.daemon.Daemon import sort_claim_results
class ClaimsComparatorTest(unittest.TestCase):
def test_sort_claim_results_when_sorted_by_claim_id(self):
results = [{"height": 1, "name": "res", "claim_id": "ccc", "nout": 0, "txid": "fdsafa"},
{"height": 1, "name": "res", "claim_id": "aaa", "nout": 0, "txid": "w5tv8uorgt"},
{"height": 1, "name": "res", "claim_id": "bbb", "nout": 0, "txid": "aecfaewcfa"}]
self.run_test(results, 'claim_id', ['aaa', 'bbb', 'ccc'])
def test_sort_claim_results_when_sorted_by_height(self):
results = [{"height": 1, "name": "res", "claim_id": "ccc", "nout": 0, "txid": "aecfaewcfa"},
{"height": 3, "name": "res", "claim_id": "ccc", "nout": 0, "txid": "aecfaewcfa"},
{"height": 2, "name": "res", "claim_id": "ccc", "nout": 0, "txid": "aecfaewcfa"}]
self.run_test(results, 'height', [1, 2, 3])
def test_sort_claim_results_when_sorted_by_name(self):
results = [{"height": 1, "name": "res1", "claim_id": "ccc", "nout": 0, "txid": "aecfaewcfa"},
{"height": 1, "name": "res3", "claim_id": "ccc", "nout": 0, "txid": "aecfaewcfa"},
{"height": 1, "name": "res2", "claim_id": "ccc", "nout": 0, "txid": "aecfaewcfa"}]
self.run_test(results, 'name', ['res1', 'res2', 'res3'])
def test_sort_claim_results_when_sorted_by_txid(self):
results = [{"height": 1, "name": "res1", "claim_id": "ccc", "nout": 2, "txid": "111"},
{"height": 1, "name": "res1", "claim_id": "ccc", "nout": 1, "txid": "222"},
{"height": 1, "name": "res1", "claim_id": "ccc", "nout": 3, "txid": "333"}]
self.run_test(results, 'txid', ['111', '222', '333'])
def test_sort_claim_results_when_sorted_by_nout(self):
results = [{"height": 1, "name": "res1", "claim_id": "ccc", "nout": 2, "txid": "aecfaewcfa"},
{"height": 1, "name": "res1", "claim_id": "ccc", "nout": 1, "txid": "aecfaewcfa"},
{"height": 1, "name": "res1", "claim_id": "ccc", "nout": 3, "txid": "aecfaewcfa"}]
self.run_test(results, 'nout', [1, 2, 3])
def run_test(self, results, field, expected):
actual = sort_claim_results(results)
self.assertEqual(expected, [r[field] for r in actual])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
11304634 | """
A frequent baseline is to take the first three sentences of the article, which works especially well with news articles.
For our pre-processed data this should be relatively easy to extract, since it is already sentence-split, and we can
therefore simply copy the first few lines.
However, we first need to verify that these are properly split, since I already found some files that don't really
conform to this expectation. This has to do with some of the weird naming conventions of Wikipedia, which involves a
frequent use of ";" or bracket notation, which spaCy unfortunately interprets as some kind of sentence splitter.
See analysis/fix_lead_sentences.py for several fixes, and please execute that file first. Note that this will change
the underlying "raw" files in slight ways.
In contrast to news articles, Wikipedia actually provides a full *section* that is acting as a kind of summary.
In this example, we extract not a fixed number of (3) sentences, but instead take all content before the first "content
section" (we call this lead-k). Note that this can either lead to more *or less* content, since articles vary in length.
However, we can assume that there exists a correlation between the length of the Wikipedia and Klexikon articles, and
therefore consistently leads to a better performance.
Another baseline could be to take the same text as lead-k, but remove bracket content, since this is "difficult" text.
TL;DR: lead-3: Standard, first three sentences.
lead-k: Take the entire introduction section of a Wiki page
lead-k-simplified: Remove bracket content (round and square brackets) for "easier" text.
"""
import regex
import os
from .utils import directory_iterator
def generate_lead_3_summary(lines):
lead_3 = []
while len(lead_3) < 3 and lines:
curr_line = lines.pop(0)
if curr_line.strip("\n ") and not curr_line.startswith("=="):
lead_3.append(curr_line)
return lead_3
def generate_lead_k_summary(lines):
lead_k = []
while lines:
curr_line = lines.pop(0)
# This indicates the first content section
if curr_line.startswith("=="):
break
if curr_line.strip("\n "):
lead_k.append(curr_line)
return lead_k
def generate_lead_k_simplified_summary(lines):
lead_k_simplified = []
while lines:
curr_line = lines.pop(0)
# This indicates the first content section
if curr_line.startswith("=="):
break
if curr_line.strip("\n "):
clean_line = strip_bracket_content(curr_line)
# TODO: This doesn't capture all resulting whitespace issues, since there is something like
# "this particular model (in brackets), ...", which results in a space between "model" and ",".
clean_line = regex.sub(r"\s{2,}", " ", clean_line)
lead_k_simplified.append(clean_line)
return lead_k_simplified
def strip_bracket_content(line: str) -> str:
# Need to differentiate between parentheses and square brackets.
parentheses_close_idx = 0
parentheses_offset = 0
square_bracket_close_idx = 0
square_bracket_offset = 0
# Iterate backwards so we can remove content on the fly
backwards_idx_iter = range(len(line)-1, -1, -1)
for idx, char in zip(backwards_idx_iter, line[::-1]):
if char == ")":
if parentheses_offset == 0:
parentheses_close_idx = idx + 1
parentheses_offset += 1
# Check for larger > 0 to work with nested parentheses
elif char == "(" and parentheses_offset > 0:
if parentheses_offset == 1:
# cut out bracket part from line
line = line[:idx] + line[parentheses_close_idx:]
parentheses_offset -= 1
if char == "]":
if square_bracket_offset == 0:
square_bracket_close_idx = idx + 1
square_bracket_offset += 1
elif char == "[" and square_bracket_offset > 0:
if square_bracket_offset == 1:
# cut out bracket part from line
line = line[:idx] + line[square_bracket_close_idx:]
parentheses_offset -= 1
return line
if __name__ == "__main__":
target_dir_3 = "./data/baselines_all_articles/lead_3/"
os.makedirs(target_dir_3, exist_ok=True)
for in_fp, out_fp in directory_iterator(target_dir=target_dir_3):
with open(in_fp) as f:
lines = f.readlines()
lead_3 = generate_lead_3_summary(lines)
with open(out_fp, "w") as f:
f.write("".join(lead_3))
target_dir_k = "./data/baselines_all_articles/lead_k"
os.makedirs(target_dir_k, exist_ok=True)
for in_fp, out_fp in directory_iterator(target_dir=target_dir_k):
with open(in_fp) as f:
lines = f.readlines()
lead_k = generate_lead_k_summary(lines)
with open(out_fp, "w") as f:
f.write("".join(lead_k))
target_dir_k_simplified = "./data/baselines_all_articles/lead_k_simplified"
os.makedirs(target_dir_k_simplified, exist_ok=True)
for in_fp, out_fp in directory_iterator(target_dir=target_dir_k_simplified):
with open(in_fp) as f:
lines = f.readlines()
lead_k_simplified = generate_lead_k_simplified_summary(lines)
with open(out_fp, "w") as f:
f.write("".join(lead_k_simplified)) | StarcoderdataPython |
198490 | <gh_stars>1000+
import re
from functools import lru_cache
from validate_email import validate_email
import ipaddress
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
import uuid
import struct
from jinja2 import Template
import time
import sys
printer = ""
# Well known regex mapping.
regex_map = {
"UNKNOWN": "",
"HTTP_HEADER_NAME": r'^:?[0-9a-zA-Z!#$%&\'*+-.^_|~\x60]+$',
"HTTP_HEADER_VALUE": r'^[^\u0000-\u0008\u000A-\u001F\u007F]*$',
"HEADER_STRING": r'^[^\u0000\u000A\u000D]*$'
}
class ValidatingMessage(object):
"""Wrap a proto message to cache validate functions with the message class name.
A validate function is defined per message class in protoc-gen-validate,
so we can reuse an already generated function for the same message class.
"""
def __init__(self, proto_message):
self.DESCRIPTOR = proto_message.DESCRIPTOR
def __hash__(self):
return hash(self.DESCRIPTOR.full_name)
def __eq__(self, other):
if isinstance(other, ValidatingMessage):
return self.DESCRIPTOR.full_name == other.DESCRIPTOR.full_name
else:
return False
def validate(proto_message):
return _validate_inner(ValidatingMessage(proto_message))
# Cache generated functions to avoid the performance issue caused by repeated proto messages,
# which generate the same functions repeatedly.
@lru_cache()
def _validate_inner(proto_message):
func = file_template(proto_message)
global printer
printer += func + "\n"
exec(func)
try:
return generate_validate
except NameError:
return locals()['generate_validate']
def print_validate(proto_message):
return "".join([s for s in printer.splitlines(True) if s.strip()])
def has_validate(field):
if field.GetOptions() is None:
return False
for option_descriptor, option_value in field.GetOptions().ListFields():
if option_descriptor.full_name == "validate.rules":
return True
return False
def byte_len(s):
try:
return len(s.encode('utf-8'))
except:
return len(s)
def _validateHostName(host):
if not host:
return False
if len(host) > 253:
return False
if host[-1] == '.':
host = host[:-1]
for part in host.split("."):
if len(part) == 0 or len(part) > 63:
return False
# Host names cannot begin or end with hyphens
if part[0] == "-" or part[-1] == '-':
return False
for r in part:
if (r < 'A' or r > 'Z') and (r < 'a' or r > 'z') and (r < '0' or r > '9') and r != '-':
return False
return True
def _validateEmail(addr):
if '<' in addr and '>' in addr: addr = addr.split("<")[1].split(">")[0]
if not validate_email(addr):
return False
if len(addr) > 254:
return False
parts = addr.split("@")
if len(parts[0]) > 64:
return False
return _validateHostName(parts[1])
def _has_field(message_pb, property_name):
# NOTE: As of proto3, HasField() only works for message fields, not for
# singular (non-message) fields. First try to use HasField and
# if it fails (with a ValueError) we manually consult the fields.
try:
return message_pb.HasField(property_name)
except:
all_fields = set([field.name for field in message_pb.DESCRIPTOR.fields])
return property_name in all_fields
def const_template(option_value, name):
const_tmpl = """{%- if str(o.string) and o.string.HasField('const') -%}
if {{ name }} != \"{{ o.string['const'] }}\":
raise ValidationFailed(\"{{ name }} not equal to {{ o.string['const'] }}\")
{%- elif str(o.bool) and o.bool['const'] != "" -%}
if {{ name }} != {{ o.bool['const'] }}:
raise ValidationFailed(\"{{ name }} not equal to {{ o.bool['const'] }}\")
{%- elif str(o.enum) and o.enum['const'] -%}
if {{ name }} != {{ o.enum['const'] }}:
raise ValidationFailed(\"{{ name }} not equal to {{ o.enum['const'] }}\")
{%- elif str(o.bytes) and o.bytes.HasField('const') -%}
{% if sys.version_info[0] >= 3 %}
if {{ name }} != {{ o.bytes['const'] }}:
raise ValidationFailed(\"{{ name }} not equal to {{ o.bytes['const'] }}\")
{% else %}
if {{ name }} != b\"{{ o.bytes['const'].encode('string_escape') }}\":
raise ValidationFailed(\"{{ name }} not equal to {{ o.bytes['const'].encode('string_escape') }}\")
{% endif %}
{%- endif -%}
"""
return Template(const_tmpl).render(sys = sys, o = option_value, name = name, str = str)
def in_template(value, name):
in_tmpl = """
{%- if value['in'] %}
if {{ name }} not in {{ value['in'] }}:
raise ValidationFailed(\"{{ name }} not in {{ value['in'] }}\")
{%- endif -%}
{%- if value['not_in'] %}
if {{ name }} in {{ value['not_in'] }}:
raise ValidationFailed(\"{{ name }} in {{ value['not_in'] }}\")
{%- endif -%}
"""
return Template(in_tmpl).render(value = value, name = name)
def string_template(option_value, name):
if option_value.string.well_known_regex:
known_regex_type = option_value.string.DESCRIPTOR.fields_by_name['well_known_regex'].enum_type
regex_value = option_value.string.well_known_regex
regex_name = known_regex_type.values_by_number[regex_value].name
if regex_name in ["HTTP_HEADER_NAME", "HTTP_HEADER_VALUE"] and not option_value.string.strict:
option_value.string.pattern = regex_map["HEADER_STRING"]
else:
option_value.string.pattern = regex_map[regex_name]
str_templ = """
{{ const_template(o, name) -}}
{{ in_template(o.string, name) -}}
{%- set s = o.string -%}
{%- if s['len'] %}
if len({{ name }}) != {{ s['len'] }}:
raise ValidationFailed(\"{{ name }} length does not equal {{ s['len'] }}\")
{%- endif -%}
{%- if s['min_len'] %}
if len({{ name }}) < {{ s['min_len'] }}:
raise ValidationFailed(\"{{ name }} length is less than {{ s['min_len'] }}\")
{%- endif -%}
{%- if s['max_len'] %}
if len({{ name }}) > {{ s['max_len'] }}:
raise ValidationFailed(\"{{ name }} length is more than {{ s['max_len'] }}\")
{%- endif -%}
{%- if s['len_bytes'] %}
if byte_len({{ name }}) != {{ s['len_bytes'] }}:
raise ValidationFailed(\"{{ name }} length does not equal {{ s['len_bytes'] }}\")
{%- endif -%}
{%- if s['min_bytes'] %}
if byte_len({{ name }}) < {{ s['min_bytes'] }}:
raise ValidationFailed(\"{{ name }} length is less than {{ s['min_bytes'] }}\")
{%- endif -%}
{%- if s['max_bytes'] %}
if byte_len({{ name }}) > {{ s['max_bytes'] }}:
raise ValidationFailed(\"{{ name }} length is greater than {{ s['max_bytes'] }}\")
{%- endif -%}
{%- if s['pattern'] %}
if re.search(r\'{{ s['pattern'] }}\', {{ name }}) is None:
raise ValidationFailed(\"{{ name }} pattern does not match {{ s['pattern'] }}\")
{%- endif -%}
{%- if s['prefix'] %}
if not {{ name }}.startswith(\"{{ s['prefix'] }}\"):
raise ValidationFailed(\"{{ name }} does not start with prefix {{ s['prefix'] }}\")
{%- endif -%}
{%- if s['suffix'] %}
if not {{ name }}.endswith(\"{{ s['suffix'] }}\"):
raise ValidationFailed(\"{{ name }} does not end with suffix {{ s['suffix'] }}\")
{%- endif -%}
{%- if s['contains'] %}
if not \"{{ s['contains'] }}\" in {{ name }}:
raise ValidationFailed(\"{{ name }} does not contain {{ s['contains'] }}\")
{%- endif -%}
{%- if s['not_contains'] %}
if \"{{ s['not_contains'] }}\" in {{ name }}:
raise ValidationFailed(\"{{ name }} contains {{ s['not_contains'] }}\")
{%- endif -%}
{%- if s['email'] %}
if not _validateEmail({{ name }}):
raise ValidationFailed(\"{{ name }} is not a valid email\")
{%- endif -%}
{%- if s['hostname'] %}
if not _validateHostName({{ name }}):
raise ValidationFailed(\"{{ name }} is not a valid email\")
{%- endif -%}
{%- if s['address'] %}
try:
ipaddress.ip_address({{ name }})
except ValueError:
if not _validateHostName({{ name }}):
raise ValidationFailed(\"{{ name }} is not a valid address\")
{%- endif -%}
{%- if s['ip'] %}
try:
ipaddress.ip_address({{ name }})
except ValueError:
raise ValidationFailed(\"{{ name }} is not a valid ip\")
{%- endif -%}
{%- if s['ipv4'] %}
try:
ipaddress.IPv4Address({{ name }})
except ValueError:
raise ValidationFailed(\"{{ name }} is not a valid ipv4\")
{%- endif -%}
{%- if s['ipv6'] %}
try:
ipaddress.IPv6Address({{ name }})
except ValueError:
raise ValidationFailed(\"{{ name }} is not a valid ipv6\")
{%- endif %}
{%- if s['uri'] %}
url = urlparse.urlparse({{ name }})
if not all([url.scheme, url.netloc, url.path]):
raise ValidationFailed(\"{{ name }} is not a valid uri\")
{%- endif %}
{%- if s['uri_ref'] %}
url = urlparse.urlparse({{ name }})
if not all([url.scheme, url.path]) and url.fragment:
raise ValidationFailed(\"{{ name }} is not a valid uri ref\")
{%- endif -%}
{%- if s['uuid'] %}
try:
uuid.UUID({{ name }})
except ValueError:
raise ValidationFailed(\"{{ name }} is not a valid UUID\")
{%- endif -%}
"""
return Template(str_templ).render(o = option_value, name = name, const_template = const_template, in_template = in_template)
def required_template(value, name):
req_tmpl = """{%- if value['required'] -%}
if not _has_field(p, \"{{ name.split('.')[-1] }}\"):
raise ValidationFailed(\"{{ name }} is required.\")
{%- endif -%}
"""
return Template(req_tmpl).render(value = value, name = name)
def message_template(option_value, name, repeated = False):
message_tmpl = """{%- if m.message %}
{{- required_template(m.message, name) }}
{%- endif -%}
{%- if m.message and m.message['skip'] %}
# Skipping validation for {{ name }}
{%- else %}
{% if repeated %}
if {{ name }}:
{% else %}
if _has_field(p, \"{{ name.split('.')[-1] }}\"):
{% endif %}
embedded = validate(p.{{ name }})(p.{{ name }})
if embedded is not None:
return embedded
{%- endif -%}
"""
return Template(message_tmpl).render(m = option_value, name = name, required_template = required_template, repeated = repeated)
def bool_template(option_value, name):
bool_tmpl = """
{{ const_template(o, name) -}}
"""
return Template(bool_tmpl).render(o = option_value, name = name, const_template = const_template)
def num_template(option_value, name, num):
num_tmpl = """{%- if num.HasField('const') and str(o.float) == "" -%}
if {{ name }} != {{ num['const'] }}:
raise ValidationFailed(\"{{ name }} not equal to {{ num['const'] }}\")
{%- endif -%}
{%- if num.HasField('const') and str(o.float) != "" %}
if {{ name }} != struct.unpack(\"f\", struct.pack(\"f\", ({{ num['const'] }})))[0]:
raise ValidationFailed(\"{{ name }} not equal to {{ num['const'] }}\")
{%- endif -%}
{{ in_template(num, name) }}
{%- if num.HasField('lt') %}
{%- if num.HasField('gt') %}
{%- if num['lt'] > num['gt'] %}
if {{ name }} <= {{ num['gt'] }} or {{ name }} >= {{ num ['lt'] }}:
raise ValidationFailed(\"{{ name }} is not in range {{ num['lt'], num['gt'] }}\")
{%- else %}
if {{ name }} >= {{ num['lt'] }} and {{ name }} <= {{ num['gt'] }}:
raise ValidationFailed(\"{{ name }} is not in range {{ num['gt'], num['lt'] }}\")
{%- endif -%}
{%- elif num.HasField('gte') %}
{%- if num['lt'] > num['gte'] %}
if {{ name }} < {{ num['gte'] }} or {{ name }} >= {{ num ['lt'] }}:
raise ValidationFailed(\"{{ name }} is not in range {{ num['lt'], num['gte'] }}\")
{%- else %}
if {{ name }} >= {{ num['lt'] }} and {{ name }} < {{ num['gte'] }}:
raise ValidationFailed(\"{{ name }} is not in range {{ num['gte'], num['lt'] }}\")
{%- endif -%}
{%- else %}
if {{ name }} >= {{ num['lt'] }}:
raise ValidationFailed(\"{{ name }} is not lesser than {{ num['lt'] }}\")
{%- endif -%}
{%- elif num.HasField('lte') %}
{%- if num.HasField('gt') %}
{%- if num['lte'] > num['gt'] %}
if {{ name }} <= {{ num['gt'] }} or {{ name }} > {{ num ['lte'] }}:
raise ValidationFailed(\"{{ name }} is not in range {{ num['lte'], num['gt'] }}\")
{%- else %}
if {{ name }} > {{ num['lte'] }} and {{ name }} <= {{ num['gt'] }}:
raise ValidationFailed(\"{{ name }} is not in range {{ num['gt'], num['lte'] }}\")
{%- endif -%}
{%- elif num.HasField('gte') %}
{%- if num['lte'] > num['gte'] %}
if {{ name }} < {{ num['gte'] }} or {{ name }} > {{ num ['lte'] }}:
raise ValidationFailed(\"{{ name }} is not in range {{ num['lte'], num['gte'] }}\")
{%- else %}
if {{ name }} > {{ num['lte'] }} and {{ name }} < {{ num['gte'] }}:
raise ValidationFailed(\"{{ name }} is not in range {{ num['gte'], num['lte'] }}\")
{%- endif -%}
{%- else %}
if {{ name }} > {{ num['lte'] }}:
raise ValidationFailed(\"{{ name }} is not lesser than or equal to {{ num['lte'] }}\")
{%- endif -%}
{%- elif num.HasField('gt') %}
if {{ name }} <= {{ num['gt'] }}:
raise ValidationFailed(\"{{ name }} is not greater than {{ num['gt'] }}\")
{%- elif num.HasField('gte') %}
if {{ name }} < {{ num['gte'] }}:
raise ValidationFailed(\"{{ name }} is not greater than or equal to {{ num['gte'] }}\")
{%- endif -%}
"""
return Template(num_tmpl).render(o = option_value, name = name, num = num, in_template = in_template, str = str)
def dur_arr(dur):
value = 0
arr = []
for val in dur:
value += val.seconds
value += (10**-9 * val.nanos)
arr.append(value)
value = 0
return arr
def dur_lit(dur):
value = dur.seconds + (10**-9 * dur.nanos)
return value
def duration_template(option_value, name, repeated = False):
dur_tmpl = """
{{- required_template(o.duration, name) }}
{% if repeated %}
if {{ name }}:
{% else %}
if _has_field(p, \"{{ name.split('.')[-1] }}\"):
{% endif %}
dur = {{ name }}.seconds + round((10**-9 * {{ name }}.nanos), 9)
{%- set dur = o.duration -%}
{%- if dur.HasField('lt') %}
lt = {{ dur_lit(dur['lt']) }}
{% endif %}
{%- if dur.HasField('lte') %}
lte = {{ dur_lit(dur['lte']) }}
{% endif %}
{%- if dur.HasField('gt') %}
gt = {{ dur_lit(dur['gt']) }}
{% endif %}
{%- if dur.HasField('gte') %}
gte = {{ dur_lit(dur['gte']) }}
{% endif %}
{%- if dur.HasField('const') %}
if dur != {{ dur_lit(dur['const']) }}:
raise ValidationFailed(\"{{ name }} is not equal to {{ dur_lit(dur['const']) }}\")
{%- endif -%}
{%- if dur['in'] %}
if dur not in {{ dur_arr(dur['in']) }}:
raise ValidationFailed(\"{{ name }} is not in {{ dur_arr(dur['in']) }}\")
{%- endif -%}
{%- if dur['not_in'] %}
if dur in {{ dur_arr(dur['not_in']) }}:
raise ValidationFailed(\"{{ name }} is not in {{ dur_arr(dur['not_in']) }}\")
{%- endif -%}
{%- if dur.HasField('lt') %}
{%- if dur.HasField('gt') %}
{%- if dur_lit(dur['lt']) > dur_lit(dur['gt']) %}
if dur <= gt or dur >= lt:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(dur['lt']), dur_lit(dur['gt']) }}\")
{%- else -%}
if dur >= lt and dur <= gt:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(dur['gt']), dur_lit(dur['lt']) }}\")
{%- endif -%}
{%- elif dur.HasField('gte') %}
{%- if dur_lit(dur['lt']) > dur_lit(dur['gte']) %}
if dur < gte or dur >= lt:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(dur['lt']), dur_lit(dur['gte']) }}\")
{%- else -%}
if dur >= lt and dur < gte:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(dur['gte']), dur_lit(dur['lt']) }}\")
{%- endif -%}
{%- else -%}
if dur >= lt:
raise ValidationFailed(\"{{ name }} is not lesser than {{ dur_lit(dur['lt']) }}\")
{%- endif -%}
{%- elif dur.HasField('lte') %}
{%- if dur.HasField('gt') %}
{%- if dur_lit(dur['lte']) > dur_lit(dur['gt']) %}
if dur <= gt or dur > lte:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(dur['lte']), dur_lit(dur['gt']) }}\")
{%- else -%}
if dur > lte and dur <= gt:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(dur['gt']), dur_lit(dur['lte']) }}\")
{%- endif -%}
{%- elif dur.HasField('gte') %}
{%- if dur_lit(dur['lte']) > dur_lit(dur['gte']) %}
if dur < gte or dur > lte:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(dur['lte']), dur_lit(dur['gte']) }}\")
{%- else -%}
if dur > lte and dur < gte:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(dur['gte']), dur_lit(dur['lte']) }}\")
{%- endif -%}
{%- else -%}
if dur > lte:
raise ValidationFailed(\"{{ name }} is not lesser than or equal to {{ dur_lit(dur['lte']) }}\")
{%- endif -%}
{%- elif dur.HasField('gt') %}
if dur <= gt:
raise ValidationFailed(\"{{ name }} is not greater than {{ dur_lit(dur['gt']) }}\")
{%- elif dur.HasField('gte') %}
if dur < gte:
raise ValidationFailed(\"{{ name }} is not greater than or equal to {{ dur_lit(dur['gte']) }}\")
{%- endif -%}
"""
return Template(dur_tmpl).render(o = option_value, name = name, required_template = required_template, dur_lit = dur_lit, dur_arr = dur_arr, repeated = repeated)
def timestamp_template(option_value, name, repeated = False):
timestamp_tmpl = """
{{- required_template(o.timestamp, name) }}
{% if repeated %}
if {{ name }}:
{% else %}
if _has_field(p, \"{{ name.split('.')[-1] }}\"):
{% endif %}
ts = {{ name }}.seconds + round((10**-9 * {{ name }}.nanos), 9)
{%- set ts = o.timestamp -%}
{%- if ts.HasField('lt') %}
lt = {{ dur_lit(ts['lt']) }}
{% endif -%}
{%- if ts.HasField('lte') %}
lte = {{ dur_lit(ts['lte']) }}
{% endif -%}
{%- if ts.HasField('gt') %}
gt = {{ dur_lit(ts['gt']) }}
{% endif -%}
{%- if ts.HasField('gte') %}
gte = {{ dur_lit(ts['gte']) }}
{% endif -%}
{%- if ts.HasField('const') %}
if ts != {{ dur_lit(ts['const']) }}:
raise ValidationFailed(\"{{ name }} is not equal to {{ dur_lit(ts['const']) }}\")
{% endif %}
{%- if ts['in'] %}
if ts not in {{ dur_arr(ts['in']) }}:
raise ValidationFailed(\"{{ name }} is not in {{ dur_arr(ts['in']) }}\")
{%- endif %}
{%- if ts['not_in'] %}
if ts in {{ dur_arr(ts['not_in']) }}:
raise ValidationFailed(\"{{ name }} is not in {{ dur_arr(ts['not_in']) }}\")
{%- endif %}
{%- if ts.HasField('lt') %}
{%- if ts.HasField('gt') %}
{%- if dur_lit(ts['lt']) > dur_lit(ts['gt']) %}
if ts <= gt or ts >= lt:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(ts['lt']), dur_lit(ts['gt']) }}\")
{%- else -%}
if ts >= lt and ts <= gt:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(ts['gt']), dur_lit(ts['lt']) }}\")
{%- endif -%}
{%- elif ts.HasField('gte') %}
{%- if dur_lit(ts['lt']) > dur_lit(ts['gte']) %}
if ts < gte or ts >= lt:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(ts['lt']), dur_lit(ts['gte']) }}\")
{%- else -%}
if ts >= lt and ts < gte:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(ts['gte']), dur_lit(ts['lt']) }}\")
{%- endif -%}
{%- else -%}
if ts >= lt:
raise ValidationFailed(\"{{ name }} is not lesser than {{ dur_lit(ts['lt']) }}\")
{%- endif -%}
{%- elif ts.HasField('lte') %}
{%- if ts.HasField('gt') %}
{%- if dur_lit(ts['lte']) > dur_lit(ts['gt']) %}
if ts <= gt or ts > lte:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(ts['lte']), dur_lit(ts['gt']) }}\")
{%- else -%}
if ts > lte and ts <= gt:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(ts['gt']), dur_lit(ts['lte']) }}\")
{%- endif -%}
{%- elif ts.HasField('gte') %}
{%- if dur_lit(ts['lte']) > dur_lit(ts['gte']) %}
if ts < gte or ts > lte:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(ts['lte']), dur_lit(ts['gte']) }}\")
{%- else -%}
if ts > lte and ts < gte:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(ts['gte']), dur_lit(ts['lte']) }}\")
{%- endif -%}
{%- else -%}
if ts > lte:
raise ValidationFailed(\"{{ name }} is not lesser than or equal to {{ dur_lit(ts['lte']) }}\")
{%- endif -%}
{%- elif ts.HasField('gt') %}
if ts <= gt:
raise ValidationFailed(\"{{ name }} is not greater than {{ dur_lit(ts['gt']) }}\")
{%- elif ts.HasField('gte') %}
if ts < gte:
raise ValidationFailed(\"{{ name }} is not greater than or equal to {{ dur_lit(ts['gte']) }}\")
{%- elif ts.HasField('lt_now') %}
now = time.time()
{%- if ts.HasField('within') %}
within = {{ dur_lit(ts['within']) }}
if ts >= now or ts <= now - within:
raise ValidationFailed(\"{{ name }} is not within range {{ dur_lit(ts['within']) }}\")
{%- else %}
if ts >= now:
raise ValidationFailed(\"{{ name }} is not lesser than now\")
{%- endif -%}
{%- elif ts.HasField('gt_now') %}
now = time.time()
{%- if ts.HasField('within') %}
within = {{ dur_lit(ts['within']) }}
if ts <= now or ts >= now + within:
raise ValidationFailed(\"{{ name }} is not within range {{ dur_lit(ts['within']) }}\")
{%- else %}
if ts <= now:
raise ValidationFailed(\"{{ name }} is not greater than now\")
{%- endif -%}
{%- elif ts.HasField('within') %}
now = time.time()
within = {{ dur_lit(ts['within']) }}
if ts >= now + within or ts <= now - within:
raise ValidationFailed(\"{{ name }} is not within range {{ dur_lit(ts['within']) }}\")
{%- endif -%}
"""
return Template(timestamp_tmpl).render(o = option_value, name = name, required_template = required_template, dur_lit = dur_lit, dur_arr = dur_arr, repeated = repeated)
def wrapper_template(option_value, name, repeated = False):
wrapper_tmpl = """
{% if repeated %}
if {{ name }}:
{% else %}
if p.HasField(\"{{ name[2:] }}\"):
{% endif %}
{%- if str(option_value.float) %}
{{- num_template(option_value, name + ".value", option_value.float)|indent(4,True) -}}
{% endif -%}
{%- if str(option_value.double) %}
{{- num_template(option_value, name + ".value", option_value.double)|indent(4,True) -}}
{% endif -%}
{%- if str(option_value.int32) %}
{{- num_template(option_value, name + ".value", option_value.int32)|indent(4,True) -}}
{% endif -%}
{%- if str(option_value.int64) %}
{{- num_template(option_value, name + ".value", option_value.int64)|indent(4,True) -}}
{% endif -%}
{%- if str(option_value.uint32) %}
{{- num_template(option_value, name + ".value", option_value.uint32)|indent(4,True) -}}
{% endif -%}
{%- if str(option_value.uint64) %}
{{- num_template(option_value, name + ".value", option_value.uint64)|indent(4,True) -}}
{% endif -%}
{%- if str(option_value.bool) %}
{{- bool_template(option_value, name + ".value")|indent(4,True) -}}
{% endif -%}
{%- if str(option_value.string) %}
{{- string_template(option_value, name + ".value")|indent(4,True) -}}
{% endif -%}
{%- if str(option_value.bytes) %}
{{- bytes_template(option_value, name + ".value")|indent(4,True) -}}
{% endif -%}
{%- if str(option_value.message) and option_value.message['required'] %}
else:
raise ValidationFailed(\"{{ name }} is required.\")
{%- endif %}
"""
return Template(wrapper_tmpl).render(option_value = option_value, name = name, str = str, num_template = num_template, bool_template = bool_template, string_template = string_template, bytes_template = bytes_template, repeated = repeated)
def enum_values(field):
return [x.number for x in field.enum_type.values]
def enum_template(option_value, name, field):
enum_tmpl = """
{{ const_template(option_value, name) -}}
{{ in_template(option_value.enum, name) -}}
{% if option_value.enum['defined_only'] %}
if {{ name }} not in {{ enum_values(field) }}:
raise ValidationFailed(\"{{ name }} is not defined\")
{% endif %}
"""
return Template(enum_tmpl).render(option_value = option_value, name = name, const_template = const_template, in_template = in_template, field = field, enum_values = enum_values)
def any_template(option_value, name, repeated = False):
any_tmpl = """
{{- required_template(o, name) }}
{%- if o['in'] %}
{% if repeated %}
if {{ name }}:
{% else %}
if _has_field(p, \"{{ name.split('.')[-1] }}\"):
{% endif %}
if {{ name }}.type_url not in {{ o['in'] }}:
raise ValidationFailed(\"{{ name }} not in {{ o['in'] }}\")
{%- endif %}
{%- if o['not_in'] %}
{% if repeated %}
if {{ name }}:
{% else %}
if _has_field(p, \"{{ name.split('.')[-1] }}\"):
{% endif %}
if {{ name }}.type_url in {{ o['not_in'] }}:
raise ValidationFailed(\"{{ name }} in {{ o['not_in'] }}\")
{%- endif %}
"""
return Template(any_tmpl).render(o = option_value.any, name = name, required_template = required_template, repeated = repeated)
def bytes_template(option_value, name):
bytes_tmpl = """
{{ const_template(o, name) -}}
{{ in_template(o.bytes, name) -}}
{%- if b['len'] %}
if len({{ name }}) != {{ b['len'] }}:
raise ValidationFailed(\"{{ name }} length does not equal {{ b['len'] }}\")
{%- endif -%}
{%- if b['min_len'] %}
if len({{ name }}) < {{ b['min_len'] }}:
raise ValidationFailed(\"{{ name }} length is less than {{ b['min_len'] }}\")
{%- endif -%}
{%- if b['max_len'] %}
if len({{ name }}) > {{ b['max_len'] }}:
raise ValidationFailed(\"{{ name }} length is more than {{ b['max_len'] }}\")
{%- endif -%}
{%- if b['ip'] %}
try:
ipaddress.ip_address({{ name }})
except ValueError:
raise ValidationFailed(\"{{ name }} is not a valid ip\")
{%- endif -%}
{%- if b['ipv4'] %}
try:
ipaddress.IPv4Address({{ name }})
except ValueError:
raise ValidationFailed(\"{{ name }} is not a valid ipv4\")
{%- endif -%}
{%- if b['ipv6'] %}
try:
ipaddress.IPv6Address({{ name }})
except ValueError:
raise ValidationFailed(\"{{ name }} is not a valid ipv6\")
{%- endif -%}
{% if b['pattern'] %}
{% if sys.version_info[0] >= 3%}
if re.search({{ b['pattern'].encode('unicode-escape') }}, {{ name }}) is None:
raise ValidationFailed(\"{{ name }} pattern does not match b['pattern'].encode('unicode-escape')\")
{% else %}
if re.search(b\"{{ b['pattern'].encode('unicode-escape') }}\", {{ name }}) is None:
raise ValidationFailed(\"{{ name }} pattern does not match \")
{% endif %}
{% endif %}
{% if b['contains'] %}
{% if sys.version_info[0] >= 3 %}
if not {{ b['contains'] }} in {{ name }}:
raise ValidationFailed(\"{{ name }} does not contain {{ b['contains'] }}\")
{% else %}
if not b\"{{ b['contains'].encode('string_escape') }}\" in {{ name }}:
raise ValidationFailed(\"{{ name }} does not contain \")
{% endif %}
{% endif %}
{% if b['prefix'] %}
{% if sys.version_info[0] >= 3 %}
if not {{ name }}.startswith({{ b['prefix'] }}):
raise ValidationFailed(\"{{ name }} does not start with prefix {{ b['prefix'] }}\")
{% else %}
if not {{name}}.startswith(b\"{{ b['prefix'].encode('string_escape') }}\"):
raise ValidationFailed(\"{{ name }} does not start with prefix {{ b['prefix'].encode('string_escape') }}\")
{% endif %}
{% endif %}
{% if b['suffix'] %}
{% if sys.version_info[0] >= 3 %}
if not {{ name }}.endswith({{ b['suffix'] }}):
raise ValidationFailed(\"{{ name }} does not end with suffix {{ b['suffix'] }}\")
{% else %}
if not {{name}}.endswith(b\"{{ b['suffix'].encode('string_escape') }}\"):
raise ValidationFailed(\"{{ name }} does not end with suffix {{ b['suffix'] }}\")
{% endif %}
{% endif %}
"""
return Template(bytes_tmpl).render(sys=sys,o = option_value, name = name, const_template = const_template, in_template = in_template, b = option_value.bytes)
def switcher_template(accessor, name, field, map = False):
switcher_tmpl = """
{%- if str(accessor.float) %}
{{- num_template(accessor, name, accessor.float)|indent(4,True) -}}
{%- elif str(accessor.double) %}
{{- num_template(accessor, name, accessor.double)|indent(4,True) -}}
{%- elif str(accessor.int32) %}
{{- num_template(accessor, name, accessor.int32)|indent(4,True) -}}
{%- elif str(accessor.int64) %}
{{- num_template(accessor, name, accessor.int64)|indent(4,True) -}}
{%- elif str(accessor.uint32) %}
{{- num_template(accessor, name, accessor.uint32)|indent(4,True) -}}
{%- elif str(accessor.uint64) %}
{{- num_template(accessor, name, accessor.uint64)|indent(4,True) -}}
{%- elif str(accessor.sint32) %}
{{- num_template(accessor, name, accessor.sint32)|indent(4,True) -}}
{%- elif str(accessor.sint64) %}
{{- num_template(accessor, name, accessor.sint64)|indent(4,True) -}}
{%- elif str(accessor.fixed32) %}
{{- num_template(accessor, name, accessor.fixed32)|indent(4,True) -}}
{%- elif str(accessor.fixed64) %}
{{- num_template(accessor, name, accessor.fixed64)|indent(4,True) -}}
{%- elif str(accessor.sfixed32) %}
{{- num_template(accessor, name, accessor.sfixed32)|indent(4,True) -}}
{%- elif str(accessor.sfixed64) %}
{{- num_template(accessor, name, accessor.sfixed64)|indent(4,True) -}}
{%- elif str(accessor.bool) %}
{{- bool_template(accessor, name)|indent(4,True) -}}
{%- elif str(accessor.string) %}
{{- string_template(accessor, name)|indent(4,True) -}}
{%- elif str(accessor.enum) and map %}
{{- enum_template(accessor, name, field.message_type.fields[1])|indent(4,True) -}}
{%- elif str(accessor.enum) and not map %}
{{- enum_template(accessor, name, field)|indent(4,True) -}}
{%- elif str(accessor.duration) %}
{{- duration_template(accessor, name, True)|indent(4,True) -}}
{%- elif str(accessor.timestamp) %}
{{- timestamp_template(accessor, name, True)|indent(4,True) -}}
{%- elif str(accessor.message) %}
{{- message_template(accessor, name, True)|indent(4,True) -}}
{%- elif str(accessor.any) %}
{{- any_template(accessor, name, True)|indent(4,True) -}}
{%- elif str(accessor.message) %}
{{- message_template(accessor, name, True)|indent(4,True) -}}
{%- endif %}
"""
return Template(switcher_tmpl).render(accessor = accessor, name = name, str = str, num_template = num_template, bool_template = bool_template, string_template = string_template, enum_template = enum_template, duration_template = duration_template, timestamp_template = timestamp_template, any_template = any_template, message_template = message_template, field = field, map = map)
def repeated_template(option_value, name, field):
rep_tmpl = """
{%- if o and o.repeated['min_items'] %}
if len({{ name }}) < {{ o.repeated['min_items'] }}:
raise ValidationFailed(\"{{ name }} needs to contain at least {{ o.repeated['min_items'] }} items\")
{%- endif %}
{%- if o and o.repeated['max_items'] %}
if len({{ name }}) > {{ o.repeated['max_items'] }}:
raise ValidationFailed(\"{{ name }} needs to contain at most {{ o.repeated['max_items'] }} items\")
{%- endif %}
{%- if o and o.repeated['unique'] %}
seen = set()
for item in {{ name }}:
if item in seen:
raise ValidationFailed(\"{{ name }} must contain unique items. %s has been repeated.\" %item)
else:
seen.add(item)
{%- endif %}
{%- if message_type %}
for item in {{ name }}:
{%- if o and o.repeated and o.repeated.items.message.skip %}
pass
{% else %}
validate(item)(item)
{% endif %}
{%- endif %}
{%- if o and str(o.repeated['items']) %}
for item in {{ name }}:
{%- set accessor = o.repeated['items'] -%}
{{ switcher_template(accessor, 'item', field) }}
pass
{%- endif %}
"""
return Template(rep_tmpl).render(o = option_value, name = name, message_type = field.message_type, str = str, field = field, switcher_template = switcher_template)
def is_map(field):
return field.label == 3 and field.message_type and len(field.message_type.fields) == 2 and \
field.message_type.fields[0].name == "key" and field.message_type.fields[1].name == "value"
def map_template(option_value, name, field):
map_tmpl = """
{%- if o and o.map['min_pairs'] %}
if len({{ name }}) < {{ o.map['min_pairs'] }}:
raise ValidationFailed(\"{{ name }} needs to contain at least {{ o.map['min_pairs'] }} items\")
{%- endif %}
{%- if o and o.map['max_pairs'] %}
if len({{ name }}) > {{ o.map['max_pairs'] }}:
raise ValidationFailed(\"{{ name }} can contain at most {{ o.map['max_pairs'] }} items\")
{%- endif %}
{%- if o and o.map['no_sparse'] -%}
raise UnimplementedException(\"no_sparse validation is not implemented because protobuf maps cannot be sparse in Python\")
{%- endif %}
{%- if o and (str(o.map['keys']) or str(o.map['values']))%}
for key in {{ name }}:
{%- set keys = o.map['keys'] -%}
{%- set values = o.map['values'] -%}
{%- if str(keys.double) %}
{{- num_template(keys, 'key', keys.double)|indent(4,True) -}}
{%- elif str(keys.int32) %}
{{- num_template(keys, 'key', keys.int32)|indent(4,True) -}}
{%- elif str(keys.int64) %}
{{- num_template(keys, 'key', keys.int64)|indent(4,True) -}}
{%- elif str(keys.uint32) %}
{{- num_template(keys, 'key', keys.uint32)|indent(4,True) -}}
{%- elif str(keys.uint64) %}
{{- num_template(keys, 'key', keys.uint64)|indent(4,True) -}}
{%- elif str(keys.sint32) %}
{{- num_template(keys, 'key', keys.sint32)|indent(4,True) -}}
{%- elif str(keys.sint64) %}
{{- num_template(keys, 'key', keys.sint64)|indent(4,True) -}}
{%- elif str(keys.fixed32) %}
{{- num_template(keys, 'key', keys.fixed32)|indent(4,True) -}}
{%- elif str(keys.fixed64) %}
{{- num_template(keys, 'key', keys.fixed64)|indent(4,True) -}}
{%- elif str(keys.sfixed32) %}
{{- num_template(keys, 'key', keys.sfixed32)|indent(4,True) -}}
{%- elif str(keys.sfixed64) %}
{{- num_template(keys, 'key', keys.sfixed64)|indent(4,True) -}}
{%- elif str(keys.bool) %}
{{- bool_template(keys, 'key')|indent(4,True) -}}
{%- elif str(keys.string) %}
{{- string_template(keys, 'key')|indent(4,True) -}}
{%- endif %}
{%- set values = o.map['values'] -%}
{{ switcher_template(values, name +'[key]', field, True) }}
pass
{%- elif field.message_type.fields[1].message_type %}
for key in {{ name }}:
validate({{ name }}[key])({{ name }}[key])
{%- endif %}
"""
return Template(map_tmpl).render(o = option_value, name = name, message_type = field.message_type, str = str, field = field, switcher_template = switcher_template, num_template = num_template, string_template = string_template, bool_template = bool_template)
def rule_type(field):
name = "p."+ field.name
if has_validate(field) and field.message_type is None:
for option_descriptor, option_value in field.GetOptions().ListFields():
if option_descriptor.full_name == "validate.rules":
if str(option_value.string):
return string_template(option_value, name )
elif str(option_value.message):
return message_template(option_value, field.name)
elif str(option_value.bool):
return bool_template(option_value, name)
elif str(option_value.float):
return num_template(option_value, name, option_value.float)
elif str(option_value.double):
return num_template(option_value, name, option_value.double)
elif str(option_value.int32):
return num_template(option_value, name, option_value.int32)
elif str(option_value.int64):
return num_template(option_value, name, option_value.int64)
elif str(option_value.uint32):
return num_template(option_value, name, option_value.uint32)
elif str(option_value.uint64):
return num_template(option_value, name, option_value.uint64)
elif str(option_value.sint32):
return num_template(option_value, name, option_value.sint32)
elif str(option_value.sint64):
return num_template(option_value, name, option_value.sint64)
elif str(option_value.fixed32):
return num_template(option_value, name, option_value.fixed32)
elif str(option_value.fixed64):
return num_template(option_value, name, option_value.fixed64)
elif str(option_value.sfixed32):
return num_template(option_value, name, option_value.sfixed32)
elif str(option_value.sfixed64):
return num_template(option_value, name, option_value.sfixed64)
elif str(option_value.enum):
return enum_template(option_value, name, field)
elif str(option_value.bytes):
return bytes_template(option_value, name)
elif str(option_value.repeated):
return repeated_template(option_value, name, field)
elif str(option_value.map):
return map_template(option_value, name, field)
elif str(option_value.required):
return required_template(option_value, name)
if field.message_type:
for option_descriptor, option_value in field.GetOptions().ListFields():
if option_descriptor.full_name == "validate.rules":
if str(option_value.duration):
return duration_template(option_value, name)
elif str(option_value.timestamp):
return timestamp_template(option_value, name)
elif str(option_value.float) or str(option_value.int32) or str(option_value.int64) or \
str(option_value.double) or str(option_value.uint32) or str(option_value.uint64) or \
str(option_value.bool) or str(option_value.string) or str(option_value.bytes):
return wrapper_template(option_value, name)
elif str(option_value.message) != "":
return message_template(option_value, field.name)
elif str(option_value.any):
return any_template(option_value, name)
elif str(option_value.repeated):
return repeated_template(option_value, name, field)
elif str(option_value.map):
return map_template(option_value, name, field)
elif str(option_value.required):
return required_template(option_value, name)
if field.message_type.full_name.startswith("google.protobuf"):
return ""
elif is_map(field):
return map_template(None, name, field)
elif field.label == 3:
return repeated_template(None, name, field)
else:
return message_template(None, field.name)
return ""
def file_template(proto_message):
file_tmp = """
{%- set ns = namespace(ignored=false) -%}
{%- for option_descriptor, option_value in p.DESCRIPTOR.GetOptions().ListFields() -%}
{%- if option_descriptor.full_name == "validate.ignored" and option_value -%}
{%- set ns.found = true -%}
{%- endif -%}
{%- endfor -%}
{%- if not ns.ignored -%}
# Validates {{ p.DESCRIPTOR.name }}
def generate_validate(p):
{%- for option_descriptor, option_value in p.DESCRIPTOR.GetOptions().ListFields() %}
{%- if option_descriptor.full_name == "validate.disabled" and option_value %}
return None
{%- endif -%}
{%- endfor -%}
{%- for oneof in p.DESCRIPTOR.oneofs %}
present = False
{%- for field in oneof.fields %}
if _has_field(p, \"{{ field.name }}\"):
present = True
{{ rule_type(field)|indent(4,True) }}
{%- endfor %}
{% for option in oneof.GetOptions().ListFields() %}
{% if option[0].name == 'required' and option[1] %}
if not present:
raise ValidationFailed(\"Oneof {{ oneof.name }} is required\")
{% endif %}
{% endfor %}
{%- endfor %}
{%- for field in p.DESCRIPTOR.fields -%}
{%- if not field.containing_oneof %}
{{ rule_type(field) -}}
{%- endif %}
{%- endfor %}
return None
{%- endif -%}
"""
return Template(file_tmp).render(rule_type = rule_type, p = proto_message)
class UnimplementedException(Exception):
pass
class ValidationFailed(Exception):
pass
| StarcoderdataPython |
182892 | <reponame>JuniorCru/coastline
import json
from . import config
# Currently EnvState class is just a dict sub-class.
#
# A dict-like interface may be all we need, but we use a sub-class so we
# can change construction later, possibly use an IoC container or type
# checking, etc.
class EnvState(dict):
def __init__(self, *args, file_path=None, env_name=None, **kwargs):
self.file_path = file_path
self.env_name = env_name
super().__init__(*args, **kwargs)
# Add a nicer repr which includes the class name and extra attrs
def __repr__(self):
return ('{cls_name}('
'{parent_repr}, '
'file_path={file_path!r}, '
'env_name={env_name!r})'
).format(
cls_name=self.__class__.__name__,
parent_repr=super().__repr__(),
file_path=self.file_path,
env_name=self.env_name)
# Even an "empty" EnvState object is considered "true"
def __bool__(self):
return True
def envstate_from_path(state_path, env_name):
json_tree = json.load(open(state_path))
subtree = json_tree.get(env_name, {})
return EnvState(subtree, file_path=state_path, env_name=env_name)
def save_envstate_to_path(envstate, state_path, env_name):
if not state_path:
raise ValueError("Need a valid state_path, not {!r}".format(state_path))
if not env_name:
raise ValueError("Need a valid env_name, not {!r}".format(env_name))
with open(state_path, 'r') as f:
json_tree = json.load(f)
json_tree[env_name] = dict(envstate)
with open(state_path, 'w') as f:
json.dump(json_tree, f, indent=4, sort_keys=True)
| StarcoderdataPython |
8109564 | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
npzfile = np.load("/home/henry/dev/roomba/logger/2018-08-05_123107_wifi.npz")
points = npzfile["points"] * 11.8 # convert to mm
values = npzfile["values"]
minx=np.amin(points, axis=0)[0]
maxx=np.amax(points, axis=0)[0]
miny=np.amin(points, axis=0)[1]
maxy=np.amax(points, axis=0)[1]
fig, ax = plt.subplots()
#ax.set_title(u"Floor map", fontsize=20)
# fixed aspect ratio
ax.set_aspect('equal')
# set background colors
fig.patch.set_facecolor('#300030')
ax.set_facecolor('#300030')
ax.grid(which="both", zorder=5)
ax.xaxis.set_major_locator(matplotlib.ticker.MultipleLocator(500))
ax.yaxis.set_major_locator(matplotlib.ticker.MultipleLocator(500))
ax.spines['bottom'].set_color('#300030')
ax.spines['top'].set_color('#300030')
ax.spines['right'].set_color('#300030')
ax.spines['left'].set_color('#300030')
#ax.xaxis.label.set_color('grey')
ax.tick_params(axis='x', colors='grey')
ax.tick_params(axis='y', colors='grey')
plt.set_cmap("inferno")
grid_x, grid_y = np.mgrid[minx:maxx:100j, miny:maxy:100j]
raw = griddata(points, values, (grid_x, grid_y), method='linear')
#im = plt.imshow(raw, interpolation='lanczos', vmax=abs(raw).max(), vmin=-abs(raw).max())
plt.imshow(raw.T, origin='lower', extent=(minx,maxx,miny,maxy), alpha=.8, zorder=10)
plt.plot(points[:,0], points[:,1], 'k.', ms=5, zorder=11)
#plt.colorbar()
plt.show() | StarcoderdataPython |
1990947 | <reponame>salmanAndroidDev/shoply
from decimal import Decimal
from django.conf import settings
from coupons.models import Coupon
from shop.models import Product
class Cart:
"""Handy class to store cart data into session"""
def __init__(self, request):
self.session = request.session
cart = self.session.get(settings.CART_SESSION_ID)
if not cart:
cart = self.session[settings.CART_SESSION_ID] = {}
self.cart = cart
self.coupon_id = self.session.get('coupon_id')
def add(self, product, quantity=1, override=False):
"""Add a product to the cart"""
product_id = str(product.id)
if product_id not in self.cart:
self.cart[product_id] = {'quantity': 0,
'price': str(product.price)}
if override:
self.cart[product_id]['quantity'] = quantity
else:
self.cart[product_id]['quantity'] += quantity
self.save()
def save(self):
"""Make django save current modified session into db"""
self.session.modified = True
def remove(self, product):
"""Remove product from the cart"""
product_id = str(product.id)
if product_id in self.cart:
del self.cart[product_id]
self.save()
def __iter__(self):
"""Cart generator function that adds product and total_price"""
cart = self.cart.copy()
product_ids = self.cart.keys()
products = Product.objects.filter(id__in=product_ids)
for product in products:
cart[str(product.id)]['product'] = product
for item in cart.values():
item['price'] = Decimal(item['price'])
item['total_price'] = (item['price'] * item['quantity'])
yield item
def __len__(self):
"""Return total products on the cart"""
return sum(item['quantity'] for item in self.cart.values())
def get_total_price(self):
"""Return total price of the cart"""
return sum((item['quantity'] * Decimal(item['price'])) \
for item in self.cart.values())
def clear(self):
"""Delete the current cart from teh session"""
del self.session[settings.CART_SESSION_ID]
self.save()
@property
def coupon(self):
"""Return the current coupon specified in the session"""
if self.coupon_id:
try:
return Coupon.objects.get(id=self.coupon_id)
except Coupon.DoesNotExist:
pass
return None
def get_discount(self):
"""Return the amount that should be reduced from the total price"""
if self.coupon:
total = self.get_total_price()
return (self.coupon.discount / Decimal(100)) * total
return Decimal(0)
def get_total_price_after_discount(self):
"""Return total price after applying discount"""
total = self.get_total_price()
return total - self.get_discount()
| StarcoderdataPython |
3430822 | from .scicar import scicar_mouse_kidney
| StarcoderdataPython |
11211107 | <filename>source/bot/database/models/guild.py<gh_stars>0
import sqlalchemy
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import relationship
from bot.database import tableBase
class Guild(tableBase):
__tablename__ = 'guild'
guild_id = Column(String(20), primary_key=True)
team_1_channel_id = Column(String(20))
team_2_channel_id = Column(String(20))
general_channel_id = Column(String(20))
teams_message_id = Column(String(20))
team_1_emoji_text = Column(String(50), default=':one:')
team_2_emoji_text = Column(String(50), default=':two:')
players = relationship('ScrimPlayer')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.team_1_emoji_text = ':one:'
self.team_2_emoji_text = ':two:'
| StarcoderdataPython |
1882536 | import re, argparse
def replace_include(infile, outfile):
with open(infile, 'r') as data:
lines = data.readlines()
for line in lines:
if re.search("!include", line):
idx = lines.index(line)
extfile = line.split(' ')[-1].split('\n')[0]
with open(extfile, 'r') as ext:
content = ext.readlines()
content = ' '.join(content)
lines[idx] = content
lines = ''.join(lines)
with open(outfile, 'w') as newfile:
newfile.write(lines)
parser = argparse.ArgumentParser(description='Pandoc filter for include statement')
parser.add_argument('--infile', help='Give input filename.')
parser.add_argument('--outfile', default='new.md', help='Give output filename.')
args = parser.parse_args()
print("Arguments:", args)
replace_include(args.infile, args.outfile)
| StarcoderdataPython |
8171795 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-20 16:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mfctracker', '0003_branch_is_trunk'),
]
operations = [
migrations.CreateModel(
name='Change',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(max_length=1024)),
('operation', models.CharField(max_length=8)),
('commit', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='changes', to='mfctracker.Commit')),
],
),
]
| StarcoderdataPython |
210155 | path_lbls = "../data/test_labels.npy"
path_inpt = "../trial/kudo18.npy"
############
# analysis #
############
from sklearn import metrics
import matplotlib.pyplot as plt
import numpy as np
inpt = np.load(path_inpt)
# load labels
true_lbl = np.load(path_lbls)
topic, stance, reason = zip(*[lbl.split("-") for lbl in true_lbl])
labels = tuple(zip(topic, stance, reason))
stance = tuple(zip(topic, stance))
reason = tuple(zip(topic, reason))
top2idx = {lbl: idx for idx, lbl in enumerate(sorted(set(topic)))}
top_ids = np.array([top2idx[top] for top in topic])
stn2idx = {stn: idx for idx, stn in enumerate(sorted(set(stance)))}
stn_ids = np.array([stn2idx[stn] for stn in stance])
rsn2idx = {rsn: idx for idx, rsn in enumerate(sorted(set(reason)))}
rsn_ids = np.array([rsn2idx[rsn] for rsn in reason])
lbl2idx = {lbl: idx for idx, lbl in enumerate(sorted(set(labels)))}
lbl_ids = np.array([lbl2idx[lbl] for lbl in labels])
########################################
# pick the partition for analysis
gold, gold2idx = stn_ids, stn2idx
########################################
###################
# classifiication #
###################
from sklearn.linear_model import LogisticRegression
# train logistic classifier with l1 regularization
log_reg = LogisticRegression(penalty='l1', C=0.1, solver='liblinear', multi_class='auto')
log_reg.fit(inpt, gold)
pred = log_reg.predict(inpt)
metrics.f1_score(gold, pred, average='weighted')
conf = metrics.confusion_matrix(gold, pred)
plt.imshow(conf, cmap='gray')
plt.show()
# pick dimensions that the classifier sees as useful
use_dim = np.array([idx for idx,val in enumerate(log_reg.coef_.T) if not np.allclose(val, 0.0)])
np.save("../data/useful_dimension.npy",use_dim)
# reduce the dimensions of the clustering input
new_inpt = np.array([instance[use_dim] for instance in inpt])
############################
# embedding visualizatioin #
############################
#from sklearn.manifold import TSNE
#
#x = new_inpt
#x = new_inpt / np.linalg.norm(new_inpt, axis=-1, keepdims=True)
#
#e = TSNE(n_components=2).fit_transform(x)
#
#plt.scatter(e[:,0], e[:,1], c=gold)
#plt.show()
##############
# clustering #
##############
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import adjusted_rand_score, v_measure_score
# clustering by selection (topic, stance, reason)
for sel in sorted(gold2idx):
if type(sel) == str:
points = np.array([i for i,(t,_,_) in enumerate(labels) if t == sel])
elif len(sel[1]) == 1:
points = np.array([i for i,(t,s,_) in enumerate(labels) if (t,s) == sel])
else:
points = np.array([i for i,(t,_,r) in enumerate(labels) if (t,r) == sel])
# use instances with only the earlier determined useful dimensions
x, y = new_inpt[points], lbl_ids[points]
agglo_cluster = AgglomerativeClustering(n_clusters=len(set(y)))
agglo_cluster.fit(x)
pred_lbl = agglo_cluster.labels_
ars = adjusted_rand_score(y, pred_lbl) # [-1,1], 1 is perfect, 0 is random
v_msr = v_measure_score(y, pred_lbl) # [0,1], 1 is perfect
print("{:<20s}\tARS: {:.4F}\tV_MSR: {:.4f}".format(str(sel), ars, v_msr))
| StarcoderdataPython |
4964623 | <reponame>bacuarabrasil/krenak<filename>api/krenak_api/apps/common/models/__init__.py
from .core import CoreManager, CoreModel, CoreQuerySet
__all__ = ["CoreModel", "CoreManager", "CoreQuerySet"]
| StarcoderdataPython |
1712148 | <filename>mtp_noms_ops/apps/security/views/views.py<gh_stars>1-10
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.views.generic import TemplateView
class PolicyChangeView(TemplateView):
if settings.NOVEMBER_SECOND_CHANGES_LIVE:
title = _('What the Nov 2nd policy changes mean')
else:
title = _('Policy changes made on Nov 2nd 2020')
def get_template_names(self):
if settings.NOVEMBER_SECOND_CHANGES_LIVE:
return ['security/policy-change-info.html']
else:
return ['security/policy-change-warning.html']
class FAQView(TemplateView):
template_name = 'security/faq.html'
title = _('What do you need help with?')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['fiu_email'] = settings.FIU_EMAIL
return context
| StarcoderdataPython |
1724670 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 28 16:39:43 2018
Covariance Matrix Decomposition
@author: Satie
"""
import numpy as np
from numpy.linalg import matrix_rank
from multiprocessing import Pool
class Decomposer(object):
def __init__(self, data, preavg, delta):
self.__data = data
self.__sigma = preavg
self.__N, self.__p = data.shape # N by p
self.delta = delta # observation frequency
def __l1_norm_off_diag(self, matrix):
matrix_off = np.abs(matrix - np.diag(np.diag(matrix)))
return matrix_off.sum()
def __Obj(self, S, F, T, lam):
err = 0.5 * (np.linalg.norm(S - F - T, 'fro') ** 2)
reg_F = lam[0] * np.linalg.norm(F, 'nuc')
reg_T = lam[1] * self.__l1_norm_off_diag(T)
return err + reg_F + reg_T
def __Lag(self, S, F, T, F_cp, T_cp, LAM1, LAM2, mu, lam):
penF = ((2 * mu[0]) ** (-1)) * (np.linalg.norm(F - F_cp, 'fro') ** 2)
penT = ((2 * mu[1]) ** (-1)) * (np.linalg.norm(T - T_cp, 'fro') ** 2)
duaF = (LAM1 * (F_cp - F)).sum()
duaT = (LAM2 * (T_cp - T)).sum()
return self.__Obj(S, F, T, lam) + penF + penT + duaF + duaT
def Proj_SoftThres(self, matrix, eps):
_p, _q = matrix.shape
assert _p == _q
diag = matrix * np.eye(_p)
off = matrix - diag
truc = np.abs(off) - eps
sign = np.sign(off)
truc[truc < 0] = 0
sign[truc < 0] = 0
return sign * truc + diag
def Proj_SVT(self, matrix, eps):
_p, _q = matrix.shape
assert _p == _q
s, V = np.linalg.eig(matrix)
s = s - eps
s[s <= 0] = 0
return np.dot(V, np.dot(np.diag(s), V.T))
def Proj_PD(self, matrix, eps):
_p, _q = matrix.shape
assert _p == _q
e, V = np.linalg.eig(matrix)
# Handle complex eigenvalues due to numerical rounding.
if isinstance(e[0], complex):
if np.allclose(matrix, matrix.T):
e = np.real(e)
V = np.real(V)
else:
raise ValueError('Proj_PD: Complex eigens encountered.')
e[e < eps] = eps
return np.dot(V, np.dot(np.diag(e), V.T))
def GCV_Alt(self, L, S, lam1, eps, dof = 1):
# Temp version
def DoF1(eigens, lam):
b, V = np.linalg.eig(L)
b = np.real(b)
sig2 = np.outer(eigens, np.ones(len(eigens)))
deno = (sig2 - sig2.T)
np.fill_diagonal(deno, np.inf)
deno[deno == 0] = np.inf
assert np.all(deno != 0)
deno = deno ** -1
cons = np.sqrt(eigens) * (np.sqrt(eigens) - lam)
dof = 1 + 2 * cons * deno.sum(0)
ind = (b > 0).astype('int')
return np.dot(1 + 2 * dof, ind)
def DoF2(eigens, lam):
ind = (eigens >= lam).astype('int'); res = 0
for i in range(ind.sum()):
res1 = 0; res2 = 0
for jj in range(len(eigens)):
if jj != i:
res2 += eigens[i] / eigens[i] - eigens[jj]
if jj > ind.sum():
res1 += eigens[jj] / eigens[i] - eigens[jj]
res += res1 - 2 * lam * res2
return (2 * len(eigens) - ind.sum()) * ind.sum() + res
s, V = np.linalg.eig(self.__sigma - S)
s[s < eps] = eps; s = np.real(s) # Note s is already sorted.
if dof == 1:
df1 = DoF1(s, lam1)
else:
df1 = DoF2(s, lam1)
tot_df = df1 + np.count_nonzero(S)
err = np.linalg.norm(self.__sigma - L - S, 'fro')
aic = np.log(err) + (2 * tot_df) / (self.__p ** 2)
if self.__p ** 2 <= tot_df:
gcv = 999999
else:
gcv = err / (self.__p ** 2 - tot_df)
return gcv, aic
def __Initializer(self, S, mode, lam, verbose = False):
_p, _q = S.shape
if mode == 'SP':
res = self.Proj_SoftThres(S, lam)
elif mode == 'LR':
res = self.Proj_SVT(S, lam)
elif mode == 'random':
res = np.random.uniform(S.min(), S.max(), size = _p * _p)
res = res.reshape((_p, _p))
res = 0.5 * (res + res.T)
else:
res = np.zeros_like(S)
return res
def Solver_ADMM(self, lam, verbose = 2, args_dict = {}):
# def Solver_ADMM(self, params):
# lam, verbose, args_dict = map(params.get, ['lam', 'verbose', 'args_dict'])
params = {'tol': 1e-4, 'max_iter': 200,
'eps': 1e-4, 'mu': (2, 2), 'monitor': 1}
params.update(args_dict)
_S = self.__sigma
_p, _q = _S.shape
assert _p == _q
# Initialize.
if verbose >= 2:
print('------------------------------------')
print('Solver_ADMM: Initializing.')
lam1, lam2 = lam
mu1, mu2 = params['mu']
LAM1 = np.zeros((_p, _p))
LAM2 = np.zeros((_p, _p))
F = self.__Initializer(0.5 * _S, 'LR', lam1)
T = self.__Initializer(0.5 * _S, 'SP', lam2)
epoch = 1; converge = False; err = np.linalg.norm(_S - F - T, 'fro')
while (epoch <= params['max_iter']) and (not converge):
if verbose == 2:
print('Epoch: {}'.format(epoch))
last_F = F; last_T = T
if params['monitor'] >= 2:
last_e, last_V = np.linalg.eig(last_F)
# Low-rank: Projection.
F_cp = self.Proj_PD(F + mu1 * LAM1, 0.)
# Low-rank: Main update.
F = (1 + mu1) ** (-1) * self.Proj_SVT(mu1 * (_S - T - LAM1) + F_cp, lam1 * mu1)
# Low-rank: Dual update.
LAM1 = LAM1 + mu1 ** (-1) * (F - F_cp)
# Sparse: Projection.
T_cp = self.Proj_PD(T + mu2 * LAM2, params['eps'])
# Sparse: Main update.
T = (1 + mu2) ** (-1) * self.Proj_SoftThres(mu2 * (_S - F - LAM2) + T_cp, lam2 * mu2)
# Sparse: Dual update.
LAM2 = LAM2 - mu2 ** (-1) * (T_cp - T)
# Post processing.
epoch += 1
if params['monitor'] >= 2:
cur_e, cur_V = np.linalg.eig(F)
err = np.linalg.norm(cur_e - last_e, 2)
err += np.linalg.norm(cur_V - last_V, 'fro')
err += np.linalg.norm(T - last_T, 'fro')
else:
err = np.linalg.norm(last_F + last_T - F - T, 'fro')
if verbose >= 2:
print('Solver_ADMM: Frobenius error: {}.'.format(
np.linalg.norm(_S - F - T, 'fro')))
print('Solver_ADMM: Objective value: {}.'.format(
self.__Obj(_S, F, T, lam)))
print('Solver_ADMM: Lag value: {}.'.format(
self.__Lag(_S, F, T, F_cp, T_cp, LAM1, LAM2, params['mu'], lam)))
if np.abs(err) < params['tol']:
converge = True
if verbose:
print('Solver_ADMM: Converged with achieved tol {},'.format(err))
if epoch > params['max_iter'] and verbose:
print('Solver_ADMM: Maximum iteration {} reached.'.format(params['max_iter']))
return F, T
def __D(self, F, T, F_next, T_next):
return np.linalg.norm(F - F_next, 'fro') + np.linalg.norm(T - T_next, 'fro')
def Estimator(self, params_lam, params_gam, solver_args_dict = {},
verbose = 3, grid = False, use = 'GCV'):
# Fixme: Reduce args
solver_args = {'tol': 1e-3, 'max_iter': 100, 'eps': 1e-4, 'monitor': 1}
solver_args.update(solver_args_dict); solver_args['eps']
# Low rank penalty
if params_lam is None:
params_lam = (-2, 2, 20)
# Sparse penalty
if params_gam is None:
params_gam = (-2, 2, 20)
lam_try = 10 ** (np.linspace(*params_lam))
gam_try = 10 ** (np.linspace(*params_gam))
nl, ng = (params_lam[2], params_gam[2])
D = {'GCV': {'lam1': np.zeros(nl), 'lam2': np.zeros(ng)},
'AIC': {'lam1': np.zeros(nl), 'lam2': np.zeros(ng)}}
# Deal with use
if len(use) == 4:
dof = int(use[-1])
use = use[:3]
assert dof <= 2 and dof > 0
assert use in ['GCV', 'AIC']
elif use in ['GCV', 'AIC']:
dof = 1
else:
raise ValueError()
# Select lambda
for l in range(nl):
if verbose:
print("Estimator: Tuning lambda {} / {}".format(l + 1, nl))
lam_cur = (lam_try[l], gam_try[0])
f, t = self.Solver_ADMM(lam_cur, verbose - 1, solver_args)
D['GCV']['lam1'][l], D['AIC']['lam1'][l] = self.GCV_Alt(f, t, lam_cur[0], eps, dof)
if verbose:
print("Estimator: Current GCV {}".format(D['GCV']['lam1'][l]))
print("Estimator: Current AIC {}".format(D['AIC']['lam1'][l]))
lam_final = lam_try[D[use]['lam1'].argmin()] # Fixme: possible duplication
# Select gamma for lam_final
for g in range(ng):
if verbose:
print("Estimator: Tuning gamma {} / {}".format(g + 1, nl))
lam_cur = (lam_final, gam_try[g])
f, t = self.Solver_ADMM(lam_cur, verbose - 1, solver_args)
D['GCV']['lam2'][g], D['AIC']['lam2'][g] = self.GCV_Alt(f, t, lam_cur[0], eps)
if verbose:
print("Estimator: Current GCV {}".format(D['GCV']['lam2'][g]))
print("Estimator: Current AIC {}".format(D['AIC']['lam2'][g]))
gam_final = gam_try[D[use]['lam2'].argmin()]
lam_final_pair = (lam_final, gam_final)
if verbose:
print("Finalizing Results.")
print("Selected lam: {}".format(lam_final_pair))
print("Best {}: {}".format(use, D[use]['lam2'].min()))
# Finalize
f, t = self.Solver_ADMM(lam_final_pair, verbose - 1, solver_args)
if grid:
return f, t, D, lam_final_pair
else:
return f, t
def Estimator_Parallel_FullGrid(self, params_lam, params_gam, npool,
solver_args_dict = {}, grid = False,
use = 'GCV'):
# M by N grid search. Parallel computation over N for each m in [M].
solver_args = {'tol': 1e-3, 'max_iter': 100, 'eps': 1e-4, 'monitor': 1}
solver_args.update(solver_args_dict); eps = solver_args['eps']
# Low rank penalty
if params_lam is None:
params_lam = (-2, 2, 20)
# Sparse penalty
if params_gam is None:
params_gam = (-2, 2, 20)
lam_try = 10 ** (np.linspace(*params_lam))
gam_try = 10 ** (np.linspace(*params_gam))
D = {'GCV': np.zeros((len(lam_try), len(gam_try))),
'AIC': np.zeros((len(lam_try), len(gam_try)))}
# Deal with use
if len(use) == 4:
dof = int(use[-1])
use = use[:3]
assert dof <= 2 and dof > 0
assert use in ['GCV', 'AIC']
elif use in ['GCV', 'AIC']:
dof = 1
else:
raise ValueError()
for l in range(len(lam_try)):
pool = Pool(npool)
iteg = [((lam_try[l], g), 0, solver_args) for g in gam_try]
ft = pool.starmap(self.Solver_ADMM, iteg)
pool.terminate()
for g in range(len(gam_try)):
D['GCV'][l, g], D['AIC'][l, g] = self.GCV_Alt(ft[g][0], ft[g][1],
lam_try[l], eps, dof)
best_pos = np.unravel_index(D[use].argmin(), D[use].shape)
lam_final_pair = (lam_try[best_pos[0]], gam_try[best_pos[1]])
print("Finalizing Results.")
print("Selected lam: {}".format(lam_final_pair))
print("Best {}: {}".format(use, D[use][best_pos]))
print('-' * 30)
f, t = self.Solver_ADMM(lam_final_pair, 2, solver_args)
if grid:
return f, t, D, lam_final_pair
else:
return f, t
def Estimator_Parallel_Simplified(self, params_lam, params_gam, npool,
solver_args_dict = {}, grid = False,
use = 'GCV'):
# Offer M + N grid search.
solver_args = {'tol': 1e-3, 'max_iter': 100, 'eps': 1e-4, 'monitor': 1}
solver_args.update(solver_args_dict); eps = solver_args['eps']
# Low rank penalty
if params_lam is None:
params_lam = (-2, 2, 20)
# Sparse penalty
if params_gam is None:
params_gam = (-2, 2, 20)
lam_try = 10 ** (np.linspace(*params_lam))
gam_try = 10 ** (np.linspace(*params_gam))
nl, ng = (params_lam[2], params_gam[2])
D = {'GCV': {'lam1': np.zeros(nl), 'lam2': np.zeros(ng)},
'AIC': {'lam1': np.zeros(nl), 'lam2': np.zeros(ng)}}
# Deal with use
if len(use) == 4:
dof = int(use[-1])
use = use[:3]
assert dof <= 2 and dof > 0
assert use in ['GCV', 'AIC']
elif use in ['GCV', 'AIC']:
dof = 1
else:
raise ValueError()
# Select lambda
pool = Pool(npool)
ite1 = [((l, gam_try[0]), 0, solver_args) for l in lam_try]
ft = pool.starmap(self.Solver_ADMM, ite1)
# ite1 = [{'lam': (l, gam_try[0]), 'verbose': 0, 'args_dict': solver_args} for l in lam_try]
# ft = pool.map(self.Solver_ADMM, ite1)
pool.terminate()
for l in range(nl):
D['GCV']['lam1'][l], D['AIC']['lam1'][l] = self.GCV_Alt(ft[l][0], ft[l][1], lam_try[l], eps, dof)
lam_final = lam_try[D[use]['lam1'].argmin()] # Fixme: possible duplication
# Select gamma for lam_final
pool = Pool(npool)
ite2 = [((lam_final, g), 0, solver_args) for g in gam_try]
ft = pool.starmap(self.Solver_ADMM, ite2)
# ite2 = [{'lam': (lam_final, g), 'verbose': 0, 'args_dict': solver_args} for g in gam_try]
# ft = pool.map(self.Solver_ADMM, ite2)
pool.terminate()
for g in range(ng):
D['GCV']['lam2'][g], D['AIC']['lam2'][g] = self.GCV_Alt(ft[g][0], ft[g][1], lam_final, eps, dof)
gam_final = gam_try[D[use]['lam2'].argmin()]
lam_final_pair = (lam_final, gam_final)
# Finalize
print("Finalizing Results.")
print("Selected lam: {}".format(lam_final_pair))
print("Best {}: {}".format(use, D[use]['lam2'].min()))
print('-' * 30)
f, t = self.Solver_ADMM(lam_final_pair, 2, solver_args)
if grid:
return f, t, D, lam_final_pair
else:
return f, t
| StarcoderdataPython |
9620396 | <filename>huxley/api/views/register.py
# Copyright (c) 2011-2017 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
from django.db import transaction
from rest_framework import generics, response, status
from rest_framework.authentication import SessionAuthentication
from rest_framework.exceptions import PermissionDenied
from huxley.api.serializers import CreateUserSerializer, RegistrationSerializer
from huxley.core.models import Conference, School
class Register(generics.GenericAPIView):
authentication_classes = (SessionAuthentication, )
serializer_classes = {
'user': CreateUserSerializer,
'registration': RegistrationSerializer
}
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
if Conference.get_current().open_reg:
return self.register(request, *args, **kwargs)
raise PermissionDenied('Conference registration is closed.')
def register(self, request, *args, **kwargs):
user_data = request.data['user']
registration_data = request.data['registration']
with transaction.atomic():
user_serializer = self.serializer_classes['user'](data=user_data)
user_is_valid = user_serializer.is_valid()
if not user_is_valid:
registration_serializer = self.serializer_classes[
'registration'](data=registration_data)
registration_serializer.is_valid()
errors = registration_serializer.errors
errors.update(user_serializer.errors)
return response.Response(
errors, status=status.HTTP_400_BAD_REQUEST)
user_serializer.save()
school_id = user_serializer.data['school']['id']
registration_data['school'] = school_id
registration_serializer = self.serializer_classes['registration'](
data=registration_data)
registration_serializer.is_valid(raise_exception=True)
registration_serializer.save()
data = {'user': user_serializer.data,
'registration': registration_serializer.data}
return response.Response(data, status=status.HTTP_200_OK)
| StarcoderdataPython |
5093907 | import mmh3 # type: ignore
from neo3.core import serialization, types, Size as s, utils
from neo3 import vm
class StorageKey(serialization.ISerializable):
def __init__(self, id_: int, key: bytes):
self.id = id_
self.key = key
def __len__(self):
return s.uint32 + len(self.key)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.id == other.id and self.key == other.key
def __hash__(self):
return hash(self.id) + mmh3.hash(self.key, seed=0, signed=False)
def __repr__(self):
return f"<{self.__class__.__name__} at {hex(id(self))}> [{self.id}] {self.key}"
def __add__(self, other):
if type(other) in [bytes, bytearray]:
return StorageKey(self.id, self.key + other)
if isinstance(other, (serialization.ISerializable, vm.BigInteger)):
return StorageKey(self.id, self.key + other.to_array())
else:
return NotImplemented
def serialize(self, writer: serialization.BinaryWriter) -> None:
writer.write_int32(self.id)
writer.write_bytes(self.key)
def deserialize(self, reader: serialization.BinaryReader) -> None:
self.id = reader.read_int32()
remaining_stream_size = len(reader) - reader._stream.tell()
self.key = reader.read_bytes(remaining_stream_size)
@classmethod
def _serializable_init(cls):
return cls(0, b'')
| StarcoderdataPython |
5051536 | <reponame>gatech-sysml/sam
import argparse
import os
from pathlib import Path
import GPUtil
import numpy as np
import torch
from model.smooth_cross_entropy import smooth_crossentropy
from model.wide_res_net import WideResNet_Embeds
from sam import SAM
from utility.bypass_bn import disable_running_stats, enable_running_stats
from utility.cifar_utils import load_dataset
from utility.initialize import initialize
from utility.log import Log
from utility.step_lr import StepLR
def get_project_root() -> Path:
return Path(__file__).parent.parent
import sys
sys.path.append(get_project_root)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--gpu", default=-1, type=int, help="Index value for the GPU to use",
)
parser.add_argument("--fine_classes", dest="use_fine_classes", action="store_true")
parser.add_argument(
"--coarse_classes", dest="use_fine_classes", action="store_false",
)
parser.set_defaults(use_fine_classes=True)
parser.add_argument(
"--superclass", default="all", type=str, help="Superclass we want to use",
)
parser.add_argument(
"--adaptive",
default=True,
type=bool,
help="True if you want to use the Adaptive SAM.",
)
parser.add_argument(
"--crop_size",
default=32,
type=int,
help="Crop size used in data transformations.",
)
parser.add_argument(
"--kernel_size",
default=3,
type=int,
help="Kernel size for max pooling layer in WideResNet_Embeds",
)
parser.add_argument(
"--batch_size",
default=128,
type=int,
help="Batch size used in the training and validation loop.",
)
parser.add_argument("--depth", default=16, type=int, help="Number of layers.")
parser.add_argument("--dropout", default=0.0, type=float, help="Dropout rate.")
parser.add_argument(
"--epochs", default=200, type=int, help="Total number of epochs."
)
parser.add_argument(
"--label_smoothing",
default=0.1,
type=float,
help="Use 0.0 for no label smoothing.",
)
parser.add_argument(
"--learning_rate",
default=0.1,
type=float,
help="Base learning rate at the start of the training.",
)
parser.add_argument("--momentum", default=0.9, type=float, help="SGD Momentum.")
parser.add_argument(
"--threads", default=2, type=int, help="Number of CPU threads for dataloaders."
)
parser.add_argument("--rho", default=2.0, type=int, help="Rho parameter for SAM.")
parser.add_argument(
"--weight_decay", default=0.0005, type=float, help="L2 weight decay."
)
parser.add_argument(
"--width_factor",
default=8,
type=int,
help="How many times wider compared to normal ResNet.",
)
args = parser.parse_args()
print(args)
if args.use_fine_classes:
args.granularity = "fine"
if not args.superclass:
ValueError("Must provide superclass when training with fine labels")
superclass = str(args.superclass)
else:
args.granularity = "coarse"
if not args.superclass:
superclass = "all"
initialize(args, seed=42)
if args.gpu == -1:
# Set CUDA_DEVICE_ORDER so the IDs assigned by CUDA match those from nvidia-smi
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# Get the first available GPU
DEVICE_ID_LIST = GPUtil.getFirstAvailable()
DEVICE_ID = DEVICE_ID_LIST[0] # grab first element from list
device = torch.device(
f"cuda:{DEVICE_ID}" if torch.cuda.is_available() else "cpu"
)
# # Set CUDA_VISIBLE_DEVICES to mask out all other GPUs than the first available device id
# os.environ["CUDA_VISIBLE_DEVICES"] = str(DEVICE_ID)
# # Since all other GPUs are masked out, the first available GPU will now be identified as GPU:0
# print('Device ID (unmasked): ' + str(DEVICE_ID))
# print('Device ID (masked): ' + str(0))
# device = torch.device(f"cuda:0" if torch.cuda.is_available() else "cpu")
else:
device = torch.device(
f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu"
)
dataset_train = load_dataset("train", args)
dataset_test = load_dataset("test", args)
train_set = torch.utils.data.DataLoader(
dataset_train,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.threads,
)
test_set = torch.utils.data.DataLoader(
dataset_test,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.threads,
)
fp = (
get_project_root()
/ "models"
/ args.granularity
/ args.superclass
/ f"crop{str(args.crop_size)}"
/ f"kernel{str(args.kernel_size)}"
/ f"width{str(args.width_factor)}"
/ f"depth{str(args.depth)}"
/ f"model_{args.granularity}_{args.superclass}_crop{args.crop_size}_kernel{args.kernel_size}_width{args.width_factor}_depth{args.depth}.pt"
)
fp.parent.mkdir(parents=True, exist_ok=True)
log = Log(log_each=10)
if args.use_fine_classes:
model = WideResNet_Embeds(
depth=args.depth,
width_factor=args.width_factor,
dropout=args.dropout,
kernel_size=args.kernel_size,
in_channels=3,
labels=100,
).to(device)
else:
model = WideResNet_Embeds(
depth=args.depth,
width_factor=args.width_factor,
dropout=args.dropout,
kernel_size=args.kernel_size,
in_channels=3,
labels=20,
).to(device)
base_optimizer = torch.optim.SGD
optimizer = SAM(
model.parameters(),
base_optimizer,
rho=args.rho,
adaptive=args.adaptive,
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
scheduler = StepLR(optimizer, args.learning_rate, args.epochs)
lowest_loss = np.inf
for epoch in range(args.epochs):
model.train()
log.train(len_dataset=len(train_set))
for batch in train_set:
inputs, targets = (b.to(device) for b in batch)
# first forward-backward step
enable_running_stats(model)
predictions, _ = model(inputs) # Ignore the embedding output
loss = smooth_crossentropy(predictions, targets)
loss.mean().backward()
optimizer.first_step(zero_grad=True)
# second forward-backward step
disable_running_stats(model)
predictions_2nd, _ = model(inputs) # Ignore the embedding output
smooth_crossentropy(predictions_2nd, targets).mean().backward()
optimizer.second_step(zero_grad=True)
with torch.no_grad():
correct = torch.argmax(predictions.data, 1) == targets
log(model, loss.cpu(), correct.cpu(), scheduler.lr())
scheduler(epoch)
model.eval()
log.eval(len_dataset=len(test_set))
epoch_loss = 0.0
epoch_correct = 0.0
epoch_count = 0.0
with torch.no_grad():
for batch in test_set:
inputs, targets = (b.to(device) for b in batch)
predictions, _ = model(inputs) # XXXXX add embedding outputs
loss = smooth_crossentropy(predictions, targets)
batch_loss = loss.sum().item()
epoch_loss += batch_loss
correct = torch.argmax(predictions, 1) == targets
batch_correct = correct.sum().item()
epoch_correct += batch_correct
epoch_count += len(targets)
log(model, loss.cpu(), correct.cpu())
log.flush()
if epoch_loss < lowest_loss:
print(
f"Epoch {epoch} achieved a new lowest_loss of {epoch_loss}. Saving model to disk."
)
lowest_loss = epoch_loss
torch.save(
{
"epoch": epoch,
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"loss": epoch_loss,
"correct": epoch_correct,
"size": epoch_count,
"accuracy": epoch_correct / epoch_count,
},
str(fp),
)
| StarcoderdataPython |
3576368 | import src.GameOfLife as GoL
import src.BriansBrain as BB
from src import App
from src.const import *
def GameOfLife():
grid = App.new_grid()
# Spawn a really long vertical line with 10 dead cells on top and bottom
for row in range(10, CELLMAP_HEIGHT-10):
App.set_cell(grid, CELLMAP_WIDTH//2, row, ALIVE)
sim = App.Simulation(GoL.rule, grid)
app = App.App(sim)
app.run(paused=False)
def BriansBrain():
grid = App.new_grid()
#App.set_cell(grid, 99, 100+4, ALIVE)
#App.set_cell(grid, 99, 100-4, ALIVE)
#App.set_cell(grid, 100, 100+4, ALIVE)
#App.set_cell(grid, 100, 100-4, ALIVE)
App.set_cell(grid, 99, 100+5, ALIVE)
App.set_cell(grid, 99, 100-5, ALIVE)
App.set_cell(grid, 100, 100+5, ALIVE)
App.set_cell(grid, 100, 100-5, ALIVE)
sim = App.Simulation(BB.rule, grid)
app = App.App(sim)
app.run(paused=True)
if __name__ == '__main__':
GameOfLife()
#BriansBrain() | StarcoderdataPython |
6649366 | import pandas as pd
import yaml
import argparse
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from fast_image_classification.models import get_model_classification
from fast_image_classification.training_utilities import (
dataframe_to_list_samples,
batch_generator,
)
def train_from_csv(csv_path, data_config_path, training_config_path):
df = pd.read_csv(csv_path)
train, val = train_test_split(df, test_size=0.2, random_state=1337)
with open(data_config_path, "r") as f:
data_config = yaml.load(f, yaml.SafeLoader)
with open(training_config_path, "r") as f:
training_config = yaml.load(f, yaml.SafeLoader)
train_samples = dataframe_to_list_samples(
train,
binary_targets=data_config["targets"],
base_path=data_config["images_base_path"],
image_name_col=data_config["image_name_col"],
)
val_samples = dataframe_to_list_samples(
val,
binary_targets=data_config["targets"],
base_path=data_config["images_base_path"],
image_name_col=data_config["image_name_col"],
)
model = get_model_classification(
input_shape=tuple(data_config["input_shape"]),
n_classes=len(data_config["targets"]),
)
train_gen = batch_generator(
train_samples,
resize_size=data_config["resize_shape"],
augment=training_config["use_augmentation"],
)
val_gen = batch_generator(val_samples, resize_size=data_config["resize_shape"])
checkpoint = ModelCheckpoint(
training_config["model_path"],
monitor="val_loss",
verbose=1,
save_best_only=True,
mode="min",
)
reduce = ReduceLROnPlateau(monitor="val_loss", mode="min", patience=10, min_lr=1e-7)
early = EarlyStopping(monitor="val_loss", mode="min", patience=30)
model.fit_generator(
train_gen,
steps_per_epoch=len(train_samples) // training_config["batch_size"],
validation_data=val_gen,
validation_steps=len(val_samples) // training_config["batch_size"],
epochs=training_config["epochs"],
callbacks=[checkpoint, reduce, early],
)
if __name__ == "__main__":
"""
python train.py --csv_path "../example/data.csv" \
--data_config_path "../example/data_config.yaml" \
--training_config_path "../example/training_config.yaml"
"""
parser = argparse.ArgumentParser()
parser.add_argument("--csv_path", help="csv_path", default="../example/data.csv")
parser.add_argument(
"--data_config_path",
help="data_config_path",
default="../example/data_config.yaml",
)
parser.add_argument(
"--training_config_path",
help="training_config_path",
default="../example/training_config.yaml",
)
args = parser.parse_args()
csv_path = args.csv_path
data_config_path = args.data_config_path
training_config_path = args.training_config_path
train_from_csv(
csv_path=csv_path,
data_config_path=data_config_path,
training_config_path=training_config_path,
)
| StarcoderdataPython |
11344839 | from pathlib import Path
from rlbot.parsing.bot_config_bundle import BotConfigBundle
from autoleague.paths import WorkingDir
BotID = str # type alias
def make_bot_id(working_dir: WorkingDir, bot_config: BotConfigBundle) -> BotID:
path = Path(bot_config.config_directory) / bot_config.config_file_name
return str(path.relative_to(working_dir.bots))
| StarcoderdataPython |
137771 | from ..utils import to_value, len_batch
from .callback_tensorboard import CallbackTensorboardBased
from ..train import utilities
from ..train import outputs_trw as O
import functools
import collections
import torch
import numpy as np
import logging
logger = logging.getLogger(__name__)
def get_as_image(images):
"""
Return the images as (N, C, H, W) or None if not an image
TODO: smarter image detection!
:param images: the object to check
:return: None if not an image, or images with format (N, C, H, W)
"""
if isinstance(images, (np.ndarray, torch.Tensor)):
if len(images.shape) == 4 and (images.shape[1] == 1 or images.shape[1] == 3):
return images
return None
def keep_small_features(feature_name, feature_value):
"""
Keep only the small features (e.g., len(shape) == 1) for the embedding infos
:return: if True, keep the feature else discard it
"""
if isinstance(feature_value, torch.Tensor) and len(feature_value.shape) > 1:
return False
return True
def is_batch_vector(value, batch_size):
"""
Return true if a vector like
:param value: the value to test
:param batch_size: the expected size of the batch
"""
vector_size = 0
is_vector = False
if isinstance(value, (torch.Tensor, np.ndarray)):
is_vector = True
if len(value.shape) != 0:
vector_size = value.shape[0]
elif isinstance(value, list):
is_vector = True
vector_size = len(value)
return is_vector and vector_size == batch_size
def add_classification_strings_from_output(dataset_name, split_name, output, datasets_infos, prefix=''):
"""
Special classification helper: add the class name (output and output_truth) as a string using the class
mapping contained in `datasets_infos`
:param dataset_name: the dataset name
:param split_name: the split name
:param output: the output
:param datasets_infos: should contain the mapping
:param prefix: the output and output_truth will be prefixed with `prefix`
:return: the additional strings in a dictionary
"""
output_dict = {}
is_classification = False
output_ref = output.get('output_ref')
if output_ref is not None:
is_classification = isinstance(output_ref, O.OutputClassification)
if is_classification:
# special handling of the classification node: add class names in string too so it is easier
# to review the results, specially when we have many classes
mapping = utilities.get_classification_mapping(datasets_infos, dataset_name, split_name, output_ref.classes_name)
if mapping is not None:
output_values = output.get('output')
nb_samples = len(output_values)
output_strs = []
output_truth_strs = []
for n in range(nb_samples):
output_str = utilities.get_class_name(mapping, output_values[n])
output_truth_values = output.get('output_truth')
output_truth_str = utilities.get_class_name(mapping, output_truth_values[n])
output_strs.append(output_str)
output_truth_strs.append(output_truth_str)
output_dict[prefix + 'output_str'] = output_strs
output_dict[prefix + 'output_truth_str'] = output_truth_strs
return output_dict
class CallbackTensorboardEmbedding(CallbackTensorboardBased):
"""
This callback records the embedding to be displayed with tensorboard
Note: we must recalculate the embedding as we need to associate a specific input (i.e., we can't store
everything in memory so we need to collect what we need batch by batch)
"""
def __init__(self, embedding_name, dataset_name=None, split_name=None, image_name=None, maximum_samples=2000, keep_features_fn=keep_small_features):
"""
:param embedding_name: the name of the embedding to be used
:param dataset_name: the name of the dataset to export the embedding. If `None`,
we will try to find the best match
:param split_name: the split of the dataset to export the embedding. if
:param image_name: the image name to be used in tensorboard. If `None`, we will try to find
an image like tensor to be used. If the `image_name` is not None but is not found in the batch,
no image will be exported
:param maximum_samples: the maximum number of samples to be exported for this embedding
"""
self.embedding_name = embedding_name
self.dataset_name = dataset_name
self.split_name = split_name
self.image_name = image_name
self.maximum_samples = maximum_samples
self.keep_features_fn = keep_features_fn
self.features_to_discard = ['output_ref']
def first_time(self, datasets, options):
self.dataset_name, self.split_name = utilities.find_default_dataset_and_split_names(
datasets,
default_dataset_name=self.dataset_name,
default_split_name=self.split_name,
train_split_name=options.workflow_options.train_split
)
if self.dataset_name is None:
return
if self.image_name is None:
# try to find a tensor that has the shape of images
for batch in datasets[self.dataset_name][self.split_name]:
for feature_name, feature in batch.items():
as_image = get_as_image(feature)
if as_image is not None:
self.image_name = feature_name
break
break
if self.image_name is None:
# we haven't found a suitable image for the given dataset/split
# so use an impossible name
self.image_name = ''
def __call__(self, options, history, model, losses, outputs, datasets, datasets_infos, callbacks_per_batch, **kwargs):
root = options.workflow_options.current_logging_directory
logger.info('root={}, nb_samples={}'.format(root, self.maximum_samples))
logger_tb = CallbackTensorboardBased.create_logger(root)
if logger_tb is None:
return
if self.dataset_name is None or self.image_name is None:
self.first_time(datasets, options)
if self.dataset_name is None or self.image_name is None:
logger.info('embedding can not be calculated: dataset={}, split={}'.format(self.dataset_name, self.split_name))
return None
if datasets.get(self.dataset_name) is None or datasets[self.dataset_name].get(self.split_name) is None:
logger.info('embedding can not be calculated: dataset={}, split={}'.format(self.dataset_name, self.split_name))
return
logger.info('parameters: dataset={}, split={}, embedding={}, image_name={}'.format(self.dataset_name, self.split_name, self.embedding_name, self.image_name))
device = options.workflow_options.device
logger.info('collecting embeddings')
# here collect the embeddings and images
embedding = collections.defaultdict(list)
nb_samples_collected = 0
def fill_embedding(batch_size, output, prefix='', output_name=None):
# special handling of the classification node: add class names in string too so it is easier
# to review the results, specially when we have many classes
additional_strings = add_classification_strings_from_output(
self.dataset_name,
self.split_name,
output,
datasets_infos,
prefix=prefix
)
for name, value in additional_strings.items():
embedding[name].append(value)
# record the output metrics
for feature_name, feature_values in output.items():
if feature_name == self.image_name:
continue
if not self.keep_features_fn(feature_name, feature_values):
continue
if feature_name in self.features_to_discard:
continue
# if we have a vector, it means it is a per-sample feature
# else a global feature (e.g., learning rate, dropout rate...)
full_name = prefix + feature_name
if is_batch_vector(feature_values, batch_size):
embedding[full_name].append(to_value(feature_values))
def collect_embedding(dataset_name, split_name, batch, loss_terms, embedding, embedding_name, image_name, **kwargs):
batch_size = len_batch(batch)
embedding_values = loss_terms.get(embedding_name)
if embedding_values is not None:
embedding['output'].append(to_value(embedding_values['output']))
for output_name, output in loss_terms.items():
if output_name == embedding_name:
continue
fill_embedding(batch_size, output, prefix=output_name + '-', output_name=output_name)
images = batch.get(image_name)
if images is not None:
images = get_as_image(images)
embedding['images'].append(to_value(images))
fill_embedding(batch_size, batch)
nonlocal nb_samples_collected
if nb_samples_collected >= self.maximum_samples:
# we have exceeded the number of samples to collect, stop the loop
raise StopIteration()
nb_samples_collected += batch_size
from ..train.trainer import eval_loop
eval_loop(
device,
self.dataset_name,
self.split_name,
datasets[self.dataset_name][self.split_name],
model,
losses[self.dataset_name],
history,
callbacks_per_batch=callbacks_per_batch,
callbacks_per_batch_loss_terms=[functools.partial(collect_embedding, embedding=embedding, embedding_name=self.embedding_name, image_name=self.image_name)]
)
logger.info('collecting embeddings done!')
# merge the batches
merged_embedding = {}
for name, values in embedding.items():
merged_embedding[name] = np.concatenate(values)
embedding_values = merged_embedding.get('output')
if embedding_values is None:
logger.info('No embedding `output` could be found!')
return
images = merged_embedding.get('images')
if images is not None:
assert len(images.shape) == 4 and (images.shape[1] == 1 or images.shape[1] == 3), \
'Expected images format (N, C, H, W), got shape={}'.format(images.shape)
images = torch.Tensor(images)
# export the metada
metadata_header = []
metadata = []
for name, values in merged_embedding.items():
if name != 'output' and name != 'images':
metadata_header.append(name)
values_str = [str(v).replace('\n', ' ').replace('\t', ' ') for v in values]
metadata.append(values_str)
if len(metadata_header) != 0:
metadata = np.stack(metadata, axis=1)
# export the embedding to the tensorboard log
logger.info('adding embedding...')
logger_tb.add_embedding(
embedding_values,
label_img=images,
global_step=len(history) - 1,
metadata=metadata.tolist(),
metadata_header=metadata_header)
logger.info('embedding successfully added!')
| StarcoderdataPython |
357835 | import json
import logging
from gala_wit import GalaWit
from intenthandlers.utils import get_highest_confidence_entity
from intenthandlers.misc import say_quote
from intenthandlers.misc import randomize_options
from intenthandlers.misc import flip_coin
from intenthandlers.conversation_matching import onboarding_conversation_match
from intenthandlers.conversation_matching import nag_conversation_match
from intenthandlers.galastats import count_galateans
from intenthandlers.google_helpers import GoogleCredentials
from intenthandlers.drive import view_drive_file
from intenthandlers.drive import create_drive_file
from intenthandlers.drive import delete_drive_file
from intenthandlers.google_helpers import send_email
from intenthandlers.drive import get_google_drive_list
from state import WaitState
from state import ConversationState
from slack_clients import is_direct_message
from oauth2client import client
import os
from intenthandlers.google_helpers import SCOPES
logger = logging.getLogger(__name__)
# List of users for the bot to ignore
user_ignore_list = ['USLACKBOT']
# A list of intents which are part of conversations. Could be merged into intents as a separate entry in the tuple
conversation_intent_types = {
# NOTE: none of the functions have been implemented. This is an important TODO for conversation matching!
'accounts-setup': onboarding_conversation_match,
'desk-setup': onboarding_conversation_match,
'phones-setup': onboarding_conversation_match,
'email-setup': onboarding_conversation_match,
'slack-setup': onboarding_conversation_match,
'onboarding-start': None,
'nag-users': None,
'nag-response': nag_conversation_match
}
class RtmEventHandler(object):
def __init__(self, slack_clients, msg_writer, event_processing_q, state_updating_q):
self.state_updating_q = state_updating_q # this q holds objects which update some internal state
self.event_processing_q = event_processing_q # this q holds objects representing events to act upon
self.clients = slack_clients
self.msg_writer = msg_writer
self.wit_client = GalaWit()
self.conversations = {}
self.wait_states = {}
self.credentials = GoogleCredentials(msg_writer, slack_clients)
# this is a mapping of wit.ai intents to code that will handle those intents
self.intents = {
'movie-quote': (say_quote, 'movie quote'),
'galatean-count': (count_galateans, 'How many Galateans are in Boston?'),
'randomize': (randomize_options, 'Decide between burgers and tacos'),
'coin-flip': (flip_coin, 'flip a coin'),
'get-google-drive': (get_google_drive_list, "What is in your google drive?"),
'view-drive-file': (view_drive_file, "show getting started"),
'create-drive-file': (create_drive_file, "create filename"),
'delete-drive-file': (delete_drive_file, "delete filename"),
'nag-users': (self.clients.nag_users, "<NAME> about hal"),
'nag-response': (self.clients.nag_response, "I did the task"),
'send-email': (send_email, "hello <EMAIL>"),
}
def state_check(self):
"""
Called regularly by the slack bot. Used to ensure that state is maintained.
"""
if not self.state_updating_q.empty():
self._process_q()
def handle(self, event):
if 'type' in event:
self._handle_by_type(event['type'], event)
def _handle_by_type(self, event_type, event):
# See https://api.slack.com/rtm for a full list of events
# logger.info("event type is {}".format(event_type))
if event_type == 'error':
# error
self.msg_writer.write_error(event['channel'], json.dumps(event))
elif event_type == 'message':
# message was sent to channel
self._handle_message(event)
elif event_type == 'channel_joined':
# you joined a channel
self.msg_writer.say_hi(event['channel'], event.get('user', ""))
elif event_type == 'group_joined':
# you joined a private group
self.msg_writer.say_hi(event['channel'], event.get('user', ""))
else:
pass
def _handle_message(self, event):
if not self._proof_message(event):
return
msg_txt = event['text']
channel_id = event['channel']
# Remove mention of the bot so that the rest of the code doesn't need to
msg_txt = self.clients.remove_mention(msg_txt).strip()
# Ask wit to interpret the text and send back a list of entities
logger.info("Asking wit to interpret| {}".format(msg_txt))
wit_resp = self.wit_client.interpret(msg_txt)
# Add username and channel name, user dm, and cleaned text to the event object
user_name = self.clients.get_user_name_from_id(event['user'])
if is_direct_message(channel_id):
channel_name = "Direct Message"
else:
channel_name = self.clients.get_channel_name_from_id(channel_id)
event.update({
"user_name": user_name,
"channel_name": channel_name,
"user_dm": self.clients.get_dm_id_from_user_id(event['user']),
"cleaned_text": msg_txt
})
# Find the intent with the highest confidence that met our default threshold
intent_entity = get_highest_confidence_entity(wit_resp['entities'], 'intent')
# If we couldn't find an intent entity, let the user know
if intent_entity is None:
self.msg_writer.write_prompt(channel_id, self.intents)
return
intent_value = intent_entity['value']
if intent_value in conversation_intent_types:
match = self._conversation_match(intent_value, wit_resp, event)
if match:
event.update({"conversation": match})
if intent_value in self.intents:
t = {
'intent': self.intents[intent_value][0],
'msg_writer': self.msg_writer,
'event': event,
'wit_entities': wit_resp['entities'],
'credentials': self.credentials,
'state_q': self.state_updating_q
}
self.event_processing_q.put(t)
else:
raise ReferenceError("No function found to handle intent {}".format(intent_value))
def _process_q(self):
state = self.state_updating_q.get()
if state['type'] == 'flask_response':
self._check_flask(state)
elif state['type'] == 'state_update':
self._handle_state_change(state)
def _check_flask(self, auth_json):
"""
_check_flask checks to see if there are any messages from the flask thread. If there are, it processes them
by finishing the authentication flow, and then resuming the interrupted user command.
:return: None
"""
auth_code = auth_json.get('auth_code')
encrypted_state = auth_json.get('encrypted_state')
if auth_code is not None:
flow = client.OAuth2WebServerFlow(client_id=os.getenv("GOOGLE_CLIENT_ID", ""),
client_secret=os.getenv("GOOGLE_CLIENT_SECRET", ""),
scope=SCOPES,
redirect_uri=os.getenv("CALLBACK_URI", ""))
credentials = flow.step2_exchange(auth_code)
state_id = self.credentials.add_credential_return_state_id(credentials, encrypted_state)
logger.info('state id {}'.format(state_id))
logger.info('waitstates {}'.format(self.wait_states))
state = self.wait_states.get(state_id)
logger.info('state {}'.format(state))
if state is None:
raise KeyError
t = {
'intent': self.intents[state.get_intent_value()][0],
'msg_writer': self.msg_writer,
'event': state.get_event(),
'wit_entities': state.get_wit_entities(),
'credentials': state.get_credentials(),
'state_q': self.state_updating_q
}
self.event_processing_q.put(t)
self.wait_states.pop(state_id)
else:
state_id = self.credentials.return_state_id(encrypted_state)
self.wait_states.pop(state_id)
def _handle_state_change(self, state_json):
"""
:param state_json: The state returned by the intent handling function
Updates the state dicts based on they type of the state returned.
:return: None
"""
state = state_json['state']
if isinstance(state, ConversationState):
self._conversations_update(state)
elif isinstance(state, WaitState):
self.wait_states.update({state.get_id(): state})
def _proof_message(self, event):
"""
:param event: The triggering message event
Checks the event to see if this is a message that should be processed
:return: Bool indicating whether or not the Rtm should continue processing the message
"""
# Event won't have a user if slackbot is unfurling messages for you
if 'user' not in event:
return False
# Filter out messages from the bot itself
if self.clients.is_message_from_me(event['user']):
return False
msg_txt = event['text']
channel_id = event['channel']
# Filter out message unless this bot is mentioned or it is a direct message
if not (is_direct_message(channel_id) or self.clients.is_bot_mention(msg_txt)):
return False
# Ensure that we don't go to wit with messages posted by an ignored user
if event['user'] in user_ignore_list:
return False
return True
def _conversation_match(self, intent, wit_resp, event):
"""
:param intent: The most likely intended intent returned by wit
:param wit_resp: The total response from wit
:param event: The triggering event
_conversation_match attempts to return the conversation connected to the event based on event information and
the wit response
:return: A Conversation State from self.conversations
"""
possible_matches = []
for conversation in self.conversations:
if intent in self.conversations[conversation].get_waiting_for():
possible_matches.append(self.conversations[conversation])
if not possible_matches:
return
elif len(possible_matches) == 1:
return possible_matches[0]
else:
# Not fully implemented, will certainly break if called
return conversation_intent_types[intent](possible_matches, wit_resp, event)
def _conversations_update(self, conversation):
"""
:param conversation: A Conversation that needs to be updated, added to, or removed from self.conversations
_conversations_update adds to, updates, or removes from self.conversations based on the id and the state of the
passed conversation
:return: None
"""
conv_id = conversation.get_id()
if conv_id in self.conversations:
if conversation.complete():
self.conversations.pop(conv_id)
else:
self.conversations[conv_id] = conversation
else:
self.conversations[conv_id] = conversation
| StarcoderdataPython |
347481 | from distillation.datasets.imagenet_dataset import ImageNet
from distillation.datasets.cifar_dataset import CIFAR100
from distillation.datasets.mit67_datasets import MITScenes
def dataset_factory(dataset_name, *args, **kwargs):
datasets_collection = {}
datasets_collection['ImageNet'] = ImageNet
datasets_collection['CIFAR100'] = CIFAR100
datasets_collection['MITScenes'] = MITScenes
return datasets_collection[dataset_name](*args, **kwargs)
| StarcoderdataPython |
7109 | import numpy as np
img_dtype = np.float32
imgX, imgY, imgZ = (256, 256, 150)
imgs_path_withfaces = '../dataset/withfaces'
imgs_path_nofaces = '../dataset/nofaces'
imgX_dwt1, imgY_dwt1, imgZ_dwt1 = (128, 128, 75)
imgs_path_withfaces_dwt = './dataset/withfaces'
imgs_path_nofaces_dwt = './dataset/nofaces'
dwt_flag = (True, False)[0]
if dwt_flag:
imgX, imgY, imgZ = imgX_dwt1, imgY_dwt1, imgZ_dwt1
imgs_path_withfaces = imgs_path_withfaces_dwt
imgs_path_nofaces = imgs_path_nofaces_dwt
| StarcoderdataPython |
1790861 | #!/usr/bin/env python3
# precision.py - Precision program by Sergey 2015
# AlgoArt - The Art of Algorithms (github.com/algoart/algoart)
"""
Arbitrary precision math calculations.
decimal.getcontext().prec = p - setting the precision (Default: 40)
pi() - Calculates Pi with required precision
PI - precalculated value of Pi
inv(x, b, e, f) - calculates inverse function for a growing function f (b < e)
sin(x)/cos(x)/tan() - sine, cosine and tan functions
asin(x)/acos(x)/atan(x) - arcsine, arccosine and arctan functions
Time complexity: O(p^2)
"""
import unittest
import sys
import decimal
###############################################################################
# Precision (Main Program)
###############################################################################
# Default precision
decimal.getcontext().prec = 30
def pi():
decimal.getcontext().prec += 2
three = decimal.Decimal(3)
lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24
while s != lasts:
lasts = s
n, na = n+na, na+8
d, da = d+da, da+32
t = (t * n) / d
s += t
decimal.getcontext().prec -= 2
return +s
PI = pi()
def inv(x, b, e, f):
decimal.getcontext().prec += 2
while True:
mid = (b + e) / 2
if mid == e or mid == b:
break
if f(mid) <= x:
b = mid
else:
e = mid
dlog = decimal.Decimal(e - b).copy_abs().log10()
if dlog <= -decimal.getcontext().prec:
break
decimal.getcontext().prec -= 2
return +b
def sin(x):
decimal.getcontext().prec += 2
x = x.remainder_near(PI * 2)
i, lasts, s, fact, num, sign = 1, 0, x, 1, x, 1
while s != lasts:
lasts = s
i += 2
fact *= i * (i-1)
num *= x * x
sign *= -1
s += num / fact * sign
decimal.getcontext().prec -= 2
return +s
def cos(x):
return sin(x + PI/2)
def tan(x):
return sin(x)/cos(x)
def asin(x):
return inv(x, -PI/2, PI/2, sin)
def acos(x):
return PI/2 - asin(x)
def atan(x):
return asin(x / (x*x + 1).sqrt())
###############################################################################
# Unit Tests
###############################################################################
class unitTests(unittest.TestCase):
def test_Precision_(self):
""" Precision class testing """
# Pi
self.assertEqual(PI, pi())
# Inv
self.assertEqual(inv(10, 1, 100, abs), 10)
# Sin/Cos/Tan
invsq2 = 1 / decimal.Decimal(2).sqrt()
self.assertEqual(sin(PI * 100), 0)
self.assertAlmostEqual(sin(PI/4 + 2*PI), invsq2)
self.assertAlmostEqual(cos(PI/4), invsq2)
for i in range(50):
t = decimal.Decimal(0.1*(i-25))
self.assertAlmostEqual(cos(t)**2 + sin(t)**2, 1)
self.assertAlmostEqual(tan(PI/4), 1)
# Asin/Acos/Atan
for i in range(5):
t = decimal.Decimal(0.1*(i-2))
self.assertAlmostEqual(asin(sin(t)), t)
self.assertAlmostEqual(atan(tan(t)), t)
if __name__ == "__main__":
# Unit tests: -ut
unittest.main(argv=[" "])
| StarcoderdataPython |
6529214 | <reponame>nparkstar/nauta<filename>applications/cli/commands/experiment/tests/test_view.py
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from click.testing import CliRunner
import pytest
from unittest.mock import MagicMock
from kubernetes.client import V1Pod, V1PodStatus, V1Event, V1ObjectReference, V1ObjectMeta
from commands.experiment import view
from platform_resources.run import Run, RunStatus
from platform_resources.experiment import Experiment
from cli_text_consts import ExperimentViewCmdTexts as Texts
from util.k8s.k8s_statistics import ResourceUsage
from util.k8s.k8s_info import PodStatus
TEST_RUNS = [
Run(
name='test-experiment',
parameters=('a 1', 'b 2'),
metadata={'labels': {'runKind': 'training'}},
creation_timestamp='2018-04-26T13:43:01Z',
namespace='namespace-1',
state=RunStatus.RUNNING,
template_name='test-ex-template',
metrics={'any metrics': 'a'},
experiment_name='experiment_name',
pod_count=1,
pod_selector={}),
Run(
name='test-experiment-2',
metadata={'labels': {'runKind': 'training'}},
parameters=('a 1', 'b 2'),
creation_timestamp='2018-05-08T13:05:04Z',
namespace='namespace-2',
state=RunStatus.COMPLETE,
template_name='test-ex-template',
metrics={'any metrics': 'a'},
experiment_name='experiment_name',
pod_count=1,
pod_selector={})
]
QUEUED_RUN = [
Run(
name='test-experiment',
parameters=('a 1', 'b 2'),
metadata={'labels': {'runKind': 'training'}},
creation_timestamp='2018-04-26T13:43:01Z',
namespace='namespace-1',
state=RunStatus.QUEUED,
template_name='test-ex-template',
metrics={'any metrics': 'a'},
experiment_name='experiment_name',
pod_count=1,
pod_selector={})
]
TEST_EXPERIMENT = Experiment(
name='test-experiment',
template_name='template-name',
template_namespace='namespace',
template_version='0.1.0')
mocked_test_pod = MagicMock(spec=V1Pod)
mocked_test_pod.metadata.name = "test"
mocked_test_pod.metadata.uid = "uid"
TEST_PODS = [mocked_test_pod]
pending_pod = MagicMock(spec=V1Pod)
pending_pod.status = V1PodStatus(phase=PodStatus.PENDING.value)
pending_pod.metadata.name = "test"
pending_pod.metadata.uid = "uid"
PENDING_POD = [pending_pod]
TOP_USERS = [ResourceUsage(user_name="user_name", cpu_usage=2, mem_usage=1000)]
event = MagicMock(spec=V1Event)
event.message = "insufficient memory"
event.reason = "insufficient memory"
event.involved_object = V1ObjectReference(name="test-experiment")
event.metadata = V1ObjectMeta(name="test-experiment")
EVENTS = [event]
class ViewMocks:
def __init__(self, mocker):
self.get_run = mocker.patch('commands.experiment.view.Run.get')
self.get_run.return_value = TEST_RUNS[0]
self.get_pods = mocker.patch('commands.experiment.view.get_namespaced_pods')
self.get_pods.return_value = TEST_PODS
self.get_namespace = mocker.patch('commands.experiment.view.get_kubectl_current_context_namespace')
self.format_timestamp = mocker.patch('platform_resources.run.format_timestamp_for_cli')
self.format_timestamp.return_value = '2018-04-26 13:43:01'
self.sum_cpu_resources = mocker.patch("commands.experiment.view.sum_cpu_resources")
self.sum_cpu_resources.return_value = "100m"
self.sum_mem_resources = mocker.patch("commands.experiment.view.sum_mem_resources")
self.sum_mem_resources.return_value = "1Gi"
self.get_experiment = mocker.patch('commands.experiment.view.Experiment.get')
self.get_experiment.return_value = TEST_EXPERIMENT
@pytest.fixture
def prepare_mocks(mocker) -> ViewMocks:
return ViewMocks(mocker=mocker)
def test_view_experiment_success(prepare_mocks: ViewMocks):
runner = CliRunner()
result = runner.invoke(view.view, [TEST_RUNS[0].name], catch_exceptions=False)
assert prepare_mocks.get_run.call_count == 1, "Run was not retrieved"
assert TEST_RUNS[0].name in result.output, "Bad output."
assert TEST_RUNS[0].namespace in result.output, "Bad output."
assert "2018-04-26 13:43:01" in result.output, result.output
assert "100m" in result.output, "Bad output"
assert "1Gi" in result.output, "Bad output"
assert result.exit_code == 0
def test_view_experiment_cpu_resources_parse_fail(prepare_mocks: ViewMocks):
prepare_mocks.sum_cpu_resources.side_effect = ValueError("error")
runner = CliRunner()
result = runner.invoke(view.view, [TEST_RUNS[0].name], catch_exceptions=False)
assert Texts.RESOURCES_SUM_PARSING_ERROR_MSG.format(error_msg="error") in result.output, "Bad output"
assert result.exit_code == 1
def test_view_experiment_mem_resources_parse_fail(prepare_mocks: ViewMocks):
prepare_mocks.sum_mem_resources.side_effect = ValueError("error")
runner = CliRunner()
result = runner.invoke(view.view, [TEST_RUNS[0].name], catch_exceptions=False)
assert Texts.RESOURCES_SUM_PARSING_ERROR_MSG.format(error_msg="error") in result.output, "Bad output"
assert result.exit_code == 1
def test_view_experiments_not_found(prepare_mocks: ViewMocks):
prepare_mocks.get_run.return_value = None
runner = CliRunner()
result = runner.invoke(view.view, ["missing"])
assert prepare_mocks.get_run.call_count == 1, "Run retrieval was not called"
assert result.exit_code == 2
assert Texts.NOT_FOUND_ERROR_MSG.format(experiment_name="missing") in result.output, "Bad output."
def test_view_experiments_no_argument(prepare_mocks: ViewMocks):
runner = CliRunner()
result = runner.invoke(view.view, []) # missing argument
assert prepare_mocks.get_run.call_count == 0, "Experiments retrieval was not called"
assert "Usage:" in result.output, "Bad output."
def test_view_experiment_failure(prepare_mocks: ViewMocks):
prepare_mocks.get_run.side_effect = RuntimeError
runner = CliRunner()
result = runner.invoke(view.view, ["missing"])
assert prepare_mocks.get_run.call_count == 1, "Experiments retrieval was not called"
assert result.exit_code == 1
def test_view_experiment_no_pods(prepare_mocks: ViewMocks):
prepare_mocks.get_pods.return_value = []
runner = CliRunner()
result = runner.invoke(view.view, [TEST_RUNS[0].name])
assert prepare_mocks.get_run.call_count == 1, "Experiments were not retrieved"
assert result.output.count("\n") == 17, "Bad output."
def test_container_volume_mounts_to_msg():
volume_mount = MagicMock()
volume_mount.name = 'mount_name'
volume_mount.mount_path = 'mount_path'
volume_mount.rwro = 'ro'
volume_mounts = [volume_mount]
msg = view.container_volume_mounts_to_msg(volume_mounts=volume_mounts)
assert f'{volume_mount.name} <{volume_mount.rwro}> @ {volume_mount.mount_path}' in msg
def test_unify_units():
cpu_checks = [{'test': '4.3', 'expected': '4300m'},
{'test': '1m', 'expected': '1m'},
{'test': '0.1', 'expected': '100m'}
]
mem_checks = [{'test': '5Gi', 'expected': '5GiB'},
{'test': '2Mi', 'expected': '2MiB'},
{'test': '1kb', 'expected': '1kb'}
]
for check in cpu_checks:
resource_values = view.unify_units("cpu", check['test'])
assert check['expected'] in resource_values
for check in mem_checks:
resource_values = view.unify_units("memory", check['test'])
assert check['expected'] in resource_values
def test_container_resources_to_msg():
resources = MagicMock()
resources.requests = {'cpu': '1.0', 'memory': '1Gi'}
resources.limits = {'cpu': '4000m', 'memory': '2gi'}
msg = view.container_resources_to_msg(resources=resources)
assert Texts.CONTAINER_REQUESTS_LIST_HEADER.format("") in msg
assert 'cpu: 1000m' in msg
assert f'memory: {resources.requests["memory"]}B' in msg
assert '- Limits:' in msg
assert f'cpu: {resources.limits["cpu"]}' in msg
assert f'memory: {resources.limits["memory"]}' in msg
def test_sum_cpu_resources_empty_list():
cpu_resources = []
expected_result = "0m"
result = view.sum_cpu_resources(cpu_resources)
assert result == expected_result
def test_sum_cpu_resources_example_list():
cpu_resources = ["30m", "40m", "700m"]
expected_result = "770m"
result = view.sum_cpu_resources(cpu_resources)
assert result == expected_result
def test_sum_cpu_resources_example_list_with_none():
cpu_resources = ["30m", "40m", "700m", None, "700m", "33m"]
expected_result = "1503m"
result = view.sum_cpu_resources(cpu_resources)
assert result == expected_result
def test_sum_cpu_resources_example_list_with_mixed():
cpu_resources = ["30m", "40m", "700m", None, "700m", "33m", "1", "2.5"]
expected_result = "5003m"
result = view.sum_cpu_resources(cpu_resources)
assert result == expected_result
def test_sum_mem_resources_empty_list():
mem_resources = []
expected_result = "0KiB"
result = view.sum_mem_resources(mem_resources)
assert result == expected_result
def test_sum_mem_resources_example_list():
mem_resources = ["10Gi", "34Mi", "50Mi", "950Mi", "50Ki", "60Ei"]
expected_result = "60EiB 11GiB 10MiB 50KiB"
result = view.sum_mem_resources(mem_resources)
assert result == expected_result
def test_sum_mem_resources_example_with_none():
mem_resources = [None, "10Gi", "34Mi", None, "50Mi", "950Mi", None, "50Ki", "60Ei"]
expected_result = "60EiB 11GiB 10MiB 50KiB"
result = view.sum_mem_resources(mem_resources)
assert result == expected_result
def test_sum_mem_resources_example_with_mixed():
mem_resources = ["50Ki", "1000K", "1024", "1000000", "52Ki"]
expected_result = "2MiB 8KiB"
result = view.sum_mem_resources(mem_resources)
assert result == expected_result
def test_displaying_pending_pod(prepare_mocks: ViewMocks, mocker):
prepare_mocks.get_pods.return_value = PENDING_POD
highest_usage_mock = mocker.patch("commands.experiment.view.get_highest_usage")
highest_usage_mock.return_value = TOP_USERS, TOP_USERS
pod_events_mock = mocker.patch("commands.experiment.view.get_pod_events")
pod_events_mock.return_value = EVENTS
runner = CliRunner()
result = runner.invoke(view.view, [TEST_RUNS[0].name], catch_exceptions=False)
assert "Experiment is in QUEUED state due to insufficient amount of memory." in result.output
assert "Top CPU consumers: user_name" in result.output
assert "Top memory consumers: user_name" in result.output
| StarcoderdataPython |
1898570 | import os
import argparse
import sys
import pickle
from pathlib import Path
from jax import random
from sklearn.decomposition import PCA
from generate_data import gen_source_data
from models import init_invertible_mlp_params, invertible_mlp_fwd
from train import train
def parse():
"""Argument parser for all configs.
"""
parser = argparse.ArgumentParser(description='')
# data generation args
parser.add_argument('-n', type=int, default=5,
help="number of latent components")
parser.add_argument('-k', type=int, default=11,
help="number of latent states")
parser.add_argument('-t', type=int, default=100000,
help="number of time steps")
parser.add_argument('--mix-depth', type=int, default=4,
help="number of mixing layers")
parser.add_argument('--prob-stay', type=float, default=0.99,
help="probability of staying in a state")
parser.add_argument('--whiten', action='store_true', default=True,
help="PCA whiten data as preprocessing")
# set seeds
parser.add_argument('--data-seed', type=int, default=0,
help="seed for initializing data generation")
parser.add_argument('--mix-seed', type=int, default=0,
help="seed for initializing mixing mlp")
parser.add_argument('--est-seed', type=int, default=7,
help="seed for initializing function estimator mlp")
parser.add_argument('--distrib-seed', type=int, default=7,
help="seed for estimating distribution paramaters")
# training & optimization parameters
parser.add_argument('--hidden-units', type=int, default=10,
help="num. of hidden units in function estimator MLP")
parser.add_argument('--learning-rate', type=float, default=3e-4,
help="learning rate for training")
parser.add_argument('--num-epochs', type=int, default=100,
help="number of training epochs")
parser.add_argument('--subseq-len', type=int, default=100,
help="length of subsequences")
parser.add_argument('--minibatch-size', type=int, default=64,
help="number of subsequences in a minibatch")
parser.add_argument('--decay-rate', type=float, default=1.,
help="decay rate for training (default to no decay)")
parser.add_argument('--decay-interval', type=int, default=15000,
help="interval (in iterations) for full decay of LR")
# CUDA settings
parser.add_argument('--cuda', action='store_true', default=True,
help="use GPU training")
# saving
parser.add_argument('--out-dir', type=str, default="output/",
help="location where data is saved")
args = parser.parse_args()
return args
def main():
args = parse()
# check theoretical assumption satisfied
assert args.k > 2*args.n, "K not set high enough for given N"
# generate source data
s_data, state_seq, mu, D, A = gen_source_data(args.n, args.k, args.t,
args.prob_stay,
random_seed=args.data_seed)
# mix the sources to create observable signals
mix_key = random.PRNGKey(args.mix_seed)
mix_params = init_invertible_mlp_params(mix_key, args.n,
args.mix_depth)
x_data = invertible_mlp_fwd(mix_params, s_data)
# preprocessing
if args.whiten:
pca = PCA(whiten=True)
x_data = pca.fit_transform(x_data)
# create variable dicts for training
data_dict = {'x_data': x_data,
's_data': s_data,
'state_seq': state_seq}
train_dict = {'mix_depth': args.mix_depth,
'hidden_size': args.hidden_units,
'learning_rate': args.learning_rate,
'num_epochs': args.num_epochs,
'subseq_len': args.subseq_len,
'minib_size': args.minibatch_size,
'decay_rate': args.decay_rate,
'decay_steps': args.decay_interval}
seed_dict = {'est_mlp_seed': args.est_seed,
'est_distrib_seed': args.distrib_seed}
# set up dict to save results
results_dict = {}
results_dict['data_config'] = {'N': args.n, 'K': args.k, 'T': args.t,
'mix_depth': args.mix_depth,
'p_stay': args.prob_stay,
'data_seed': args.data_seed,
'mix_seed': args.mix_seed}
results_dict['train_config'] = {'train_vars': train_dict,
'train_seeds': seed_dict}
results_dict['results'] = []
# train HM-nICA model
s_est, sort_idx, results_dict, est_params = train(
data_dict, train_dict, seed_dict, results_dict
)
# save
if not os.path.exists(args.out_dir):
Path(args.out_dir).mkdir(parents=True)
with open(args.out_dir+"all_results.pickle", 'ab') as out:
pickle.dump(results_dict, out, pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
3420832 | <filename>flask_flatpages/page.py<gh_stars>100-1000
"""Define flatpage instance."""
import yaml
from werkzeug.utils import cached_property
class Page(object):
"""Simple class to store all necessary information about a flatpage.
Main purpose is to render the page's content with a ``html_renderer``
function.
"""
def __init__(self, path, meta, body, html_renderer, folder):
"""Initialize Page instance.
:param path: Page path.
:param meta: Page meta data in YAML format.
:param body: Page body.
:param html_renderer: HTML renderer function.
"""
#: Path this page was obtained from, as in ``pages.get(path)``
self.path = path
#: Content of the page
self._meta = meta
self.body = body
#: Renderer function
self.html_renderer = html_renderer
#: The name of the folder the page is contained in.
self.folder = folder
def __getitem__(self, name):
"""Shortcut for accessing metadata.
``page['title']`` or, in a template, ``{{ page.title }}`` are
equivalent to ``page.meta['title']``.
"""
return self.meta[name]
def __html__(self):
"""
Return HTML for use in Jinja templates.
In a template, ``{{ page }}`` is equivalent to
``{{ page.html|safe }}``.
"""
return self.html
def __repr__(self):
"""Machine representation of :class:`Page` instance."""
return '<Page %r>' % self.path
@cached_property
def html(self):
"""Content of the page, rendered as HTML by the configured renderer."""
return self.html_renderer(self)
@cached_property
def meta(self):
"""Store a dict of metadata parsed from the YAML header of the file."""
meta = yaml.safe_load(self._meta)
# YAML documents can be any type but we want a dict
# eg. yaml.safe_load('') -> None
# yaml.safe_load('- 1\n- a') -> [1, 'a']
if not meta:
return {}
if not isinstance(meta, dict):
raise ValueError("Expected a dict in metadata for '{0}', got {1}".
format(self.path, type(meta).__name__))
return meta
| StarcoderdataPython |
6488832 |
from distutils.core import setup
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
name='vimspector',
python_requires=">=3.6.*",
packages=[
'module',
'module.foobar'
],
install_requires=requirements,
entry_points={
'console_scripts': [
"mod=module.main:test_group",
]
}
)
| StarcoderdataPython |
258682 | <reponame>xiaohalo/LeetCode
from __future__ import print_function
# Time: O(n)
# Space: O(1)
#
# Given a string s consists of upper/lower-case alphabets and empty space characters ' ', return the length of last word in the string.
#
# If the last word does not exist, return 0.
#
# Note: A word is defined as a character sequence consists of non-space characters only.
#
# For example,
# Given s = "Hello World",
# return 5.
#
class Solution:
# @param s, a string
# @return an integer
def lengthOfLastWord(self, s):
length = 0
for i in reversed(s):
if i == ' ':
if length:
break
else:
length += 1
return length
# Time: O(n)
# Space: O(n)
class Solution2:
# @param s, a string
# @return an integer
def lengthOfLastWord(self, s):
return len(s.strip().split(" ")[-1])
if __name__ == "__main__":
print(Solution().lengthOfLastWord("Hello World"))
print(Solution2().lengthOfLastWord(""))
| StarcoderdataPython |
243820 | # Основной каркас программы
from app.player import Player
from app.rate import Rate
from app.track import Track
from app.session import Session
from app.file import File
from app.utils import *
from app.make import *
from app.string import *
from app.default import default__info
# данные программы (дефолт/из файла)
info = make__python(File.read(file__path, file__name, default__info))
# BLOCK: Инструкции для интерфейсов
def add_player():
caption(caption__1__1)
Player.add__player(info)
File.write(file__path, file__name, make__json(info))
def change_player():
caption(caption__1__2)
Player.change__player(info)
File.write(file__path, file__name, make__json(info))
def add_session():
caption(caption__2__1)
Session.add__session(info)
File.write(file__path, file__name, make__json(info))
def current_rate_in_interval():
caption(caption__2__2)
Session.sessions__in__interval(info)
def rezults_in_session():
caption(caption__2__3)
Session.rezults__in__session(info)
def summary_rezult_in_session():
caption(caption__2__4)
Session.summary__rezult__in__session(info)
def summary_rezult_in_all():
caption(caption__2__5)
Session.summary__rezult__in__all(info)
def add_rate():
caption(caption__3__1)
Rate.add__rate(info)
File.write(file__path, file__name, make__json(info))
def change_rate():
caption(caption__3__2)
Rate.change__rate(info)
File.write(file__path, file__name, make__json(info))
def club_profit():
caption(caption__3__3)
Rate.club__profit(info)
def add_track():
caption(caption__4__1)
Track.add__track(info)
File.write(file__path, file__name, make__json(info))
def change_track():
caption(caption__4__2)
Track.change__track(info)
File.write(file__path, file__name, make__json(info))
# <NAME>
caption(app__info)
while True:
caption(init__menu)
a = input()
if int(a) == 1:
while True:
caption(caption__1)
Player.player__list(info)
b = input(menu__1)
if int(b) == 1:
loop(add_player, menu__1__1)
elif int(b) == 2:
loop(change_player, menu__1__2)
else:
break
elif int(a) == 2:
while True:
caption(caption__2)
Session.session__table(info)
b = input(menu__2)
if int(b) == 1:
loop(add_session, menu__2__1)
elif int(b) == 2:
loop(current_rate_in_interval, menu__2__2)
elif int(b) == 3:
loop(rezults_in_session, menu__2__3)
elif int(b) == 4:
loop(summary_rezult_in_session, menu__2__3)
elif int(b) == 5:
loop(summary_rezult_in_all, menu__2__3)
else:
break
elif int(a) == 3:
while True:
caption(caption__3)
Rate.rate__table(info)
b = input(menu__3)
if int(b) == 1:
loop(add_rate, menu__3__1)
elif int(b) == 2:
loop(change_rate, menu__3__2)
elif int(b) == 3:
loop(club_profit, menu__3__3)
else:
break
elif int(a) == 4:
while True:
caption(caption__4)
Track.track__list(info)
b = input(menu__4)
if int(b) == 1:
loop(add_track, menu__4__1)
elif int(b) == 2:
loop(change_track, menu__4__2)
else:
break
else:
break | StarcoderdataPython |
6465593 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServerResponse.
From build dir, run: ctest -R PyQgsServerResponse -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import unittest
__author__ = '<NAME>'
__date__ = '29/04/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
from qgis.server import QgsBufferServerResponse
class QgsServerResponseTest(unittest.TestCase):
def test_responseHeaders(self):
"""Test response headers"""
headers = {'header-key-1': 'header-value-1', 'header-key-2': 'header-value-2'}
response = QgsBufferServerResponse()
for k, v in headers.items():
response.setHeader(k, v)
for k, v in response.headers().items():
self.assertEqual(headers[k], v)
response.removeHeader('header-key-1')
self.assertEqual(response.headers(), {'header-key-2': 'header-value-2'})
response.setHeader('header-key-1', 'header-value-1')
for k, v in response.headers().items():
self.assertEqual(headers[k], v)
def test_statusCode(self):
"""Test return status HTTP code"""
response = QgsBufferServerResponse()
response.setStatusCode(222)
self.assertEqual(response.statusCode(), 222)
def test_write(self):
"""Test that writing on the buffer sets the body"""
# Set as str
response = QgsBufferServerResponse()
response.write('Greetings from Essen Linux Hotel 2017 Hack Fest!')
self.assertEqual(bytes(response.body()), b'')
response.finish()
self.assertEqual(bytes(response.body()), b'Greetings from Essen Linux Hotel 2017 Hack Fest!')
self.assertEqual(response.headers(), {'Content-Length': '48'})
# Set as a byte array
response = QgsBufferServerResponse()
response.write(b'Greetings from Essen Linux Hotel 2017 Hack Fest!')
self.assertEqual(bytes(response.body()), b'')
response.finish()
self.assertEqual(bytes(response.body()), b'Greetings from Essen Linux Hotel 2017 Hack Fest!')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3436529 | # 给定不同面额的硬币 coins 和一个总金额 amount。编写一个函数来计算可以凑成总金额所需的最少的硬币个数。如果没有任何一种硬币组合能组成总金额,返回 -1。
#
# 示例 1:
#
# 输入: coins = [1, 2, 5], amount = 11
# 输出: 3
# 解释: 11 = 5 + 5 + 1
# 示例 2:
#
# 输入: coins = [2], amount = 3
# 输出: -1
# 说明:
# 你可以认为每种硬币的数量是无限的。
#
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/coin-change
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
from typing import List
class Solution:
def coinChange(self, coins: List[int], amount: int) -> int:
n = len(coins)
dp = [float('inf')] * (amount + 1)
for i in range(0, amount + 1):
if i == 0:
dp[i] = 0
continue
for c in coins:
if c <= i:
dp[i] = min(dp[i], dp[i - c] + 1)
if dp[amount] == float('inf'):
return -1
return dp[amount]
if __name__ == '__main__':
s = Solution()
print(s.coinChange([2], 3))
print(s.coinChange([1, 2, 5], 11))
print(s.coinChange([3, 7, 405, 436], 8839)) | StarcoderdataPython |
5083075 | """
This test is only for Chrome!
(Verify that your chromedriver is compatible with your version of Chrome.)
"""
import colorama
from seleniumbase import BaseCase
class ChromedriverTests(BaseCase):
def test_chromedriver_matches_chrome(self):
if self.browser != "chrome":
print("\n This test is only for Chrome!")
print(" (Run with: '--browser=chrome')")
self.skip("This test is only for Chrome!")
driver_capabilities = self.driver.capabilities
if "version" in driver_capabilities:
chrome_version = driver_capabilities["version"]
else:
chrome_version = driver_capabilities["browserVersion"]
major_chrome_version = chrome_version.split('.')[0]
chrome_dict = self.driver.capabilities["chrome"]
chromedriver_version = chrome_dict["chromedriverVersion"]
chromedriver_version = chromedriver_version.split(' ')[0]
major_chromedriver_version = chromedriver_version.split('.')[0]
colorama.init(autoreset=True)
c1 = colorama.Fore.BLUE + colorama.Back.LIGHTCYAN_EX
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
c4 = colorama.Fore.RED + colorama.Back.LIGHTYELLOW_EX
c5 = colorama.Fore.RED + colorama.Back.LIGHTGREEN_EX
cr = colorama.Style.RESET_ALL
pr_chromedriver_version = c3 + chromedriver_version + cr
pr_chrome_version = c2 + chrome_version + cr
message = (
"\n"
"* Your version of chromedriver is: %s\n"
"*\n* And your version of Chrome is: %s"
"" % (pr_chromedriver_version, pr_chrome_version))
print(message)
if major_chromedriver_version < major_chrome_version:
install_sb = (
"seleniumbase install chromedriver %s" % major_chrome_version)
pr_install_sb = c1 + install_sb + cr
up_msg = "You may want to upgrade your version of chromedriver:"
up_msg = c4 + up_msg + cr
message = ("*\n* %s\n*\n* >>> %s" % (up_msg, pr_install_sb))
print(message)
elif major_chromedriver_version > major_chrome_version:
up_msg = "You may want to upgrade your version of Chrome:"
up_msg = c5 + up_msg + cr
up_url = c1 + "chrome://settings/help" + cr
message = ("*\n* %s\n*\n* See: %s" % (up_msg, up_url))
print(message)
else:
up_msg = (
"Success! Your chromedriver is compatible with your Chrome!")
up_msg = c1 + up_msg + cr
message = ("*\n* %s\n" % up_msg)
print(message)
| StarcoderdataPython |
12826675 | <reponame>mhorowitz/pykrb5
# Copyright (c) 2013, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This is a hokey test client. It is nowhere near a real zephyr
notice creator."""
import socket
import struct
import sys
import time
import krb5.client
def z_make_ascii_16(value):
return "0x%04X" % value
def z_make_ascii_32(value):
return "0x%08X" % value
def z_make_ascii(value):
return " ".join("0x" + "".join("%02X" % ord(c) for c in value[i:i+4])
for i in xrange(0, len(value), 4))
def z_make_zcode(value):
return "Z" + value.replace("\xff", "\xff\xf1").replace("\x00", "\xff\xf0")
DELIM = "\0"
REALM = "ATHENA.MIT.EDU"
KEY_USAGE = 1027
from_ip = socket.inet_aton(socket.gethostbyname(socket.gethostname()))
kclient = krb5.client.Client()
session = kclient.get_session("zephyr/zephyr@" + REALM)
version = "ZEPH0.2"
kind = 0 # unsafe
uid = struct.pack("!4sii", from_ip, time.time(), 0)
ztime = time.time()
port = 0
auth = 1 # yes
authent = session.make_ap_req_bytes()
class_ = "message"
class_inst = "personal"
opcode = ""
sender = str(session.client)
recipient = sys.argv[1]
default_format = ""
multiuid = uid
checksum = 0
multinotice = ""
sig = "py"
message = sys.argv[2]
if "@" not in recipient:
recipient += "@" + REALM
before_checksum = [
version,
None,
z_make_ascii_32(kind),
z_make_ascii(uid),
z_make_ascii_16(port),
z_make_ascii_32(auth),
z_make_ascii_32(len(authent)),
z_make_zcode(authent),
class_,
class_inst,
opcode,
sender,
recipient,
default_format
]
after_checksum = [
multinotice,
z_make_ascii(multiuid)
]
body = [
sig,
message
]
header_count = len(before_checksum) + 1 + len(after_checksum)
before_checksum[1] = z_make_ascii_32(header_count)
checksum_data = DELIM.join(before_checksum + after_checksum + body)
checksum = z_make_zcode(session.key.make_checksum(KEY_USAGE, checksum_data))
fields = before_checksum + [checksum] + after_checksum + body
notice = DELIM.join(fields)
addr = socket.getaddrinfo("localhost", "zephyr-hm", 0, 0, socket.IPPROTO_UDP)[0]
s = socket.socket(*addr[0:3])
s.sendto(notice, addr[4])
| StarcoderdataPython |
11333213 | <gh_stars>0
import os
import sys
import shutil
import subprocess
import config
from utils import colors
from template import next_step
def log_error(logfile='error.log', error="", msg="", exit_on_error=True):
if not error:
return
with open(logfile, 'w') as fd:
fd.write(error.decode('utf-8'))
print("{red}{msg} Please consult {yellow}{logfile}{red} file for details.{end}".format(
msg=msg,
logfile=logfile,
red=colors.FAIL,
yellow=colors.WARNING,
end=colors.ENDC
))
if exit_on_error:
sys.exit(2)
def run(params, logfile, msg, exit_on_error=True):
output, error = subprocess.Popen(
params,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()
log_error(
logfile,
error,
msg,
exit_on_error
)
class External():
_util = None
errors = []
cmd_name = ""
error_msg = ""
@classmethod
def cmd(cls):
if cls._util is None:
cls._util = shutil.which(cls.cmd_name)
if not cls._util:
cls.errors.append(cls.error_msg)
return cls._util
class Bower(External):
cmd_name = "bower"
error_msg = 'Bower executable could not be found.'
@classmethod
def install(cls, static_dir, dependencies):
os.chdir(static_dir)
for dependency in dependencies:
cls.install_dependency(dependency)
@classmethod
@next_step("Bower")
def install_dependency(cls, dependency):
print("{}...\t\t\t".format(dependency.title()), end="", flush=True)
run(
[cls.cmd(), 'install', dependency],
config.LOG_BOWER,
"An error occured during the installation of {dep}.".format(
dep=dependency
),
False
)
class Virtualenv(External):
cmd_name = "virtualenv"
error_msg = 'Virtualenv executable could not be found.'
@classmethod
def venv_bin_dir(cls, venv_path):
return os.path.join(venv_path, 'bin')
@classmethod
def pip_bin(cls, venv_path):
return os.path.join(cls.venv_bin_dir(venv_path), 'pip')
@classmethod
def install(cls, venv_path, requirements_file):
cls.install_venv(venv_path)
cls.install_dependencies(venv_path, requirements_file)
@classmethod
@next_step("Creating the virtualenv...\t")
def install_venv(cls, venv_path):
# If virtualenv is requested, then create it and install the required libs to work
run(
[cls.cmd(), venv_path, '--no-site-package'],
config.LOG_VIRTUALENV,
"An error occured during the creation of the virtualenv."
)
@classmethod
@next_step("Installing Python Dependencies...")
def install_dependencies(cls, venv_path, requirements_file):
run(
[cls.pip_bin(venv_path), 'install', '-r', requirements_file],
config.LOG_PIP,
"An error occured during the installation of dependencies.",
)
class Git(External):
_util = "git"
cmd_name = "git"
@classmethod
def install(cls, app_path, gitignore_template, gitignore_file):
cls.install_git(app_path)
cls.install_gitignore(gitignore_template, gitignore_file)
@classmethod
@next_step("Git Init...\t\t\t")
def install_git(cls, app_path):
run(
[cls.cmd(), 'init', app_path],
config.LOG_GIT,
"An error occured during the creation of the virtualenv."
)
@classmethod
@next_step("Generating Gitignore...\t\t")
def install_gitignore(cls, gitignore_template, gitignore_file):
shutil.copyfile(gitignore_template, gitignore_file)
| StarcoderdataPython |
4934341 | """!
@brief Examples of usage and demonstration of abilities of CURE algorithm in cluster analysis.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
from pyclustering.utils import read_sample
from pyclustering.utils import timedcall
from pyclustering.samples.definitions import SIMPLE_SAMPLES
from pyclustering.samples.definitions import FCPS_SAMPLES
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.cure import cure
def template_clustering(number_clusters, path, number_represent_points=5, compression=0.5, draw=True, ccore_flag=True):
sample = read_sample(path)
cure_instance = cure(sample, number_clusters, number_represent_points, compression, ccore_flag)
(ticks, _) = timedcall(cure_instance.process)
clusters = cure_instance.get_clusters()
representors = cure_instance.get_representors()
means = cure_instance.get_means()
print("Sample: ", path, "\t\tExecution time: ", ticks, "\n")
#print([len(cluster) for cluster in clusters])
if draw is True:
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, sample)
for cluster_index in range(len(clusters)):
visualizer.append_cluster_attribute(0, cluster_index, representors[cluster_index], '*', 10)
visualizer.append_cluster_attribute(0, cluster_index, [ means[cluster_index] ], 'o')
visualizer.show()
def cluster_sample1():
template_clustering(2, SIMPLE_SAMPLES.SAMPLE_SIMPLE1)
def cluster_sample2():
template_clustering(3, SIMPLE_SAMPLES.SAMPLE_SIMPLE2)
def cluster_sample3():
template_clustering(4, SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
def cluster_sample4():
template_clustering(5, SIMPLE_SAMPLES.SAMPLE_SIMPLE4)
def cluster_sample5():
template_clustering(4, SIMPLE_SAMPLES.SAMPLE_SIMPLE5)
def cluster_sample6():
template_clustering(2, SIMPLE_SAMPLES.SAMPLE_SIMPLE6)
def cluster_elongate():
template_clustering(2, SIMPLE_SAMPLES.SAMPLE_ELONGATE)
def cluster_lsun():
template_clustering(3, FCPS_SAMPLES.SAMPLE_LSUN, 5, 0.3)
def cluster_target():
template_clustering(6, FCPS_SAMPLES.SAMPLE_TARGET, 10, 0.3)
def cluster_two_diamonds():
template_clustering(2, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, 5, 0.3)
def cluster_wing_nut(ccore_flag=True):
template_clustering(2, FCPS_SAMPLES.SAMPLE_WING_NUT, 4, 0.3, ccore_flag=ccore_flag)
def cluster_chainlink():
template_clustering(2, FCPS_SAMPLES.SAMPLE_CHAINLINK, 30, 0.2)
def cluster_hepta():
template_clustering(7, FCPS_SAMPLES.SAMPLE_HEPTA)
def cluster_tetra():
template_clustering(4, FCPS_SAMPLES.SAMPLE_TETRA)
def cluster_engy_time():
template_clustering(2, FCPS_SAMPLES.SAMPLE_ENGY_TIME, 50, 0.5)
def cluster_golf_ball():
template_clustering(1, FCPS_SAMPLES.SAMPLE_GOLF_BALL)
def cluster_atom():
"Impossible to obtain parameters that satisfy us, it seems to me that compression = 0.2 is key parameter here, because results of clustering doesn't depend on number of represented points, except 0."
"Thus the best parameters is following: number of points for representation: [5, 400]; compression: [0.2, 0.204]"
"Results of clustering is not so dramatically, but clusters are not allocated properly"
template_clustering(2, FCPS_SAMPLES.SAMPLE_ATOM, 20, 0.2)
def experiment_execution_time(draw, ccore):
template_clustering(3, FCPS_SAMPLES.SAMPLE_LSUN, 5, 0.3, draw, ccore)
template_clustering(6, FCPS_SAMPLES.SAMPLE_TARGET, 10, 0.3, draw, ccore)
template_clustering(2, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, 5, 0.3, draw, ccore)
template_clustering(2, FCPS_SAMPLES.SAMPLE_WING_NUT, 1, 1, draw, ccore)
template_clustering(2, FCPS_SAMPLES.SAMPLE_CHAINLINK, 5, 0.5, draw, ccore)
template_clustering(4, FCPS_SAMPLES.SAMPLE_TETRA, 5, 0.5, draw, ccore)
template_clustering(7, FCPS_SAMPLES.SAMPLE_HEPTA, 5, 0.5, draw, ccore)
template_clustering(2, FCPS_SAMPLES.SAMPLE_ATOM, 20, 0.2)
cluster_sample1()
cluster_sample2()
cluster_sample3()
cluster_sample4()
cluster_sample5()
cluster_sample6()
cluster_elongate()
cluster_lsun()
cluster_target()
cluster_two_diamonds()
cluster_wing_nut()
cluster_chainlink()
cluster_hepta()
cluster_tetra()
cluster_atom()
cluster_engy_time()
cluster_golf_ball()
experiment_execution_time(True, False)
experiment_execution_time(True, True)
| StarcoderdataPython |
1919668 | # -*- coding: utf-8 -*-
from os import listdir
import pandas
import numpy as np
from sklearn.externals import joblib
DIR = '/home/emil/Code/Kaggle/driver telematics analysis/'
drivers = listdir(DIR + 'drivers')
countdriver = len(drivers)
result = np.empty((0,77))
drivernames = np.empty((countdriver))
countdone = 0
#for every driver
for driver in drivers:
data = np.empty((200,77))
drivernames[countdone] = driver
#for every route
for route in range(1,201):
pwd = DIR + 'drivers/' + str(driver) + '/' + str(route) + '.csv'
csvfile = pandas.read_csv(pwd)
current = np.array(csvfile.values[:,:], dtype = "float64")
#speed
size = current.shape[0]
speed = np.sqrt(np.sum(np.power(current[1:,:]-current[:size-1,:], 2), axis=1))
#quantile of speed
data[route-1,0:10] = np.percentile(speed, [10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
#acceleration
acceleration = speed[1:] - speed[:speed.size-1]
#quantile of acceleration
data[route-1,10:20] = np.percentile(acceleration, [10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
#normal acceleration
ax = current[:size-2,0]
bx = current[1:size-1,0]
cx = current[2:size,0]
ay = current[:size-2,1]
by = current[1:size-1,1]
cy = current[2:size,1]
d = 2*(ax*(by-cy)+bx*(cy-ay)+cx*(ay-by))
ux = ((ax*ax+ay*ay)*(by-cy)+(bx*bx+by*by)*(cy-ay)+(cx*cx+cy*cy)*(ay-by))/d
uy = ((ax*ax+ay*ay)*(cx-bx)+(bx*bx+by*by)*(ax-cx)+(cx*cx+cy*cy)*(bx-ax))/d
r = np.sqrt((ax-ux)*(ax-ux)+(ay-uy)*(ay-uy))
normalacceleration = speed[1:size-1]*speed[1:size-1]/r
normalacceleration[d == 0] = 0
#quantile of acceleration
data[route-1,20:30] = np.percentile(normalacceleration, [10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
#angle
x_coor = (current[5:,0]-current[:size-5,0])
y_coor = (current[5:,1]-current[:size-5,1])
norm = np.sqrt(np.power(x_coor,2)+np.power(y_coor,2))
x_coor /= norm
y_coor /= norm
cos_angle = np.abs(x_coor[1:]*x_coor[:x_coor.size-1]+y_coor[1:]*y_coor[:y_coor.size-1])
cos_angle[np.isnan(cos_angle)] = 1
cos_angle[cos_angle < 0.5] = 1
count_angle = 0
for iter in range(1,cos_angle.size-2):
if np.sum(cos_angle[iter-1:iter+2] < 0.99) == 3:
count_angle += 1
#quantile of angle cos
data[route-1,30:40] = np.percentile(cos_angle, [10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
#speed param
#mean speed without stop
data[route-1,40] = np.sum(speed[speed > 0]) / np.sum(speed > 0)
#mean speed with stop
data[route-1,41] = speed.mean()
#max speed
data[route-1,42] = speed.max()
#speed deviation
data[route-1,43] = speed.std()
#acceleration param
#mean acceleration without stop
data[route-1,44] = np.sum(acceleration[acceleration != 0]) / np.sum(acceleration != 0)
#mean acceleration with stop
data[route-1,45] = acceleration.mean()
#mean abs of acceleration with stop
data[route-1,46] = np.abs(acceleration).mean()
#max acceleration
data[route-1,47] = acceleration.max()
#max deceleration
data[route-1,48] = acceleration.min()
#max acceleration or deceleration
data[route-1,49] = np.abs(acceleration).max()
#accelertation deviation
data[route-1,50] = acceleration.std()
#mean acceleration and deceleration
data[route-1,51] = acceleration[acceleration > 0].mean()
data[route-1,52] = acceleration[acceleration < 0].mean()
#normalacceleration param
#mean without stop
data[route-1,53] = (normalacceleration[normalacceleration != 0]).mean()
#mean with stop
data[route-1,54] = normalacceleration.mean()
#max
data[route-1,55] = normalacceleration.max()
#deviation
data[route-1,56] = normalacceleration.std()
#angle param
#mean all
data[route-1,57] = cos_angle.mean()
#mean without 1
data[route-1,58] = cos_angle[cos_angle != 1].mean()
#min
data[route-1,59] = cos_angle.min()
#deviation
data[route-1,60] = cos_angle.std()
#count angle
data[route-1,61] = count_angle / cos_angle.size
#count angle per km
data[route-1,62] = count_angle
#distance
data[route-1,63] = np.sum(speed)
data[route-1,62] /= data[route-1,63]
#moving
data[route-1,64] = np.sqrt(np.sum(np.power(current[current.shape[0]-1,:], 2)))
#time
#all
data[route-1,64] = size / 60
#desceleration time and acceleration time
data[route-1,65] = np.sum(acceleration > 0) / data[route-1,64]
data[route-1,66] = np.sum(acceleration < 0) / data[route-1,64]
#time of big normalacceleration
data[route-1,67] = np.sum(normalacceleration > 3) / data[route-1,64]
#number of turn
data[route-1,68] = 0
for iter in range(2,size-4):
if normalacceleration[iter] > 3 and normalacceleration[iter] == normalacceleration[iter-2:iter+3].max():
data[route-1,68] += 1
#number of stops
data[route-1,69] = 0
#number of long stop > 10 seconds
data[route-1,70] = 0
#time of stops
data[route-1,71] = 0
tmp = 0
for iter in range(2,size-1):
if speed[iter] < 0.1:
tmp += 1
if tmp == 10:
data[route-1,70] += 1
data[route-1,71] += 1
if speed[iter-1] != 0:
data[route-1,69] += 1
else:
tmp = 0
#mean time of stop
if data[route-1,69] != 0:
data[route-1,72] = data[route-1,71] / data[route-1,69]
else:
data[route-1,72] = 0
#stop per km
if data[route-1,63] != 0:
data[route-1,73] = data[route-1,69] / data[route-1,63]
else:
data[route-1,73] = 0
#long stop per km
if data[route-1,63] != 0:
data[route-1,74] = data[route-1,70] / data[route-1,63]
#stop per minute
if data[route-1,64] != 0:
data[route-1,75] = data[route-1,69] / (60 * data[route-1,64])
else:
data[route-1,75] = 0
#proport to energy
tmp = np.power(speed[1:], 2)-np.power(speed[:speed.size-1],2)
data[route-1,76] = np.sum(tmp[tmp > 0])
result = np.vstack((result, data))
countdone += 1
#saving
joblib.dump(result, 'features77.pkl')
joblib.dump(drivernames, 'drivernames.pkl')
| StarcoderdataPython |
269717 | <reponame>toonarmycaptain/deal<gh_stars>0
# built-in
import sys
from io import StringIO
from pathlib import Path
from textwrap import dedent
# external
import pytest
# project
import deal
from deal._cli._test import (
fast_iterator, format_coverage, format_exception,
has_pure_contract, run_cases, sys_path, test_command as command,
)
from deal._testing import TestCase
from deal._trace import TraceResult
from deal.linter._func import Func
def test_safe_violation(tmp_path: Path, capsys):
if 'example' in sys.modules:
del sys.modules['example']
text = """
import deal
@deal.pure
def func(a: int, b: int) -> float:
return a / b
"""
path = (tmp_path / 'example.py')
path.write_text(dedent(text))
stream = StringIO()
result = command(['--count', '1', str(path)], root=tmp_path, stream=stream)
assert result == 1
stream.seek(0)
captured = stream.read()
assert '/example.py' in captured
assert 'running func' in captured
assert 'func(a=0, b=0)' in captured
assert 'ZeroDivisionError' in captured
assert 'RaisesContractError' in captured
def test_no_violations(tmp_path: Path):
if 'example' in sys.modules:
del sys.modules['example']
text = """
import deal
@deal.pure
def func(a: int, b: int) -> float:
return a + b
def not_pure1(a: int, b: int) -> float:
return a / b
@deal.post(lambda result: result > 0)
def not_pure2(a: int, b: int) -> float:
return a / b
"""
path = (tmp_path / 'example.py')
path.write_text(dedent(text))
stream = StringIO()
result = command(['--count', '5', str(path)], root=tmp_path, stream=stream)
assert result == 0
stream.seek(0)
captured = stream.read()
assert '/example.py' in captured
assert 'running func' in captured
assert 'not_pure' not in captured
assert 'func(' not in captured
def test_no_matching_funcs(tmp_path: Path):
if 'example' in sys.modules:
del sys.modules['example']
text = """
import deal
def not_pure1(a: int, b: int) -> float:
return a / b
@deal.post(lambda result: result > 0)
def not_pure2(a: int, b: int) -> float:
return a / b
"""
path = (tmp_path / 'example.py')
path.write_text(dedent(text))
stream = StringIO()
result = command(['--count', '5', str(path)], root=tmp_path, stream=stream)
assert result == 0
stream.seek(0)
captured = stream.read()
assert '/example.py' not in captured
def test_sys_path():
path = Path('example')
size = len(sys.path)
assert sys.path[0] != 'example'
with sys_path(path):
assert sys.path[0] == 'example'
assert sys.path[0] != 'example'
assert len(sys.path) == size
with sys_path(path):
del sys.path[0]
assert len(sys.path) == size
@pytest.mark.parametrize('source, has', [
('@deal.pure \ndef f(): 0', True),
('@deal.pure() \ndef f(): 0', True),
('@deal.has() \ndef f(): 0', True),
# ('@deal.has\ndef f(): 0', True),
])
def test_has_pure_contract(source: str, has: bool) -> None:
funcs = Func.from_text(source)
assert len(funcs) == 1
assert has_pure_contract(funcs[0]) is has
def test_fast_iterator():
seq = [1, 2, 3, 4]
assert list(fast_iterator(iter(seq))) == seq
def test_print_exception():
try:
raise deal.PreContractError
except deal.PreContractError:
text = format_exception()
assert text.startswith(' Traceback (most recent call last):\n')
assert 'test_test.py' in text
assert 'PreContractError' in text
assert text.endswith('\x1b[39;49;00m')
@pytest.mark.parametrize('cov_l, all_l, exp', [
({2, 3, 4}, {2, 3, 4}, ' coverage <G>100%<E>'),
({2, 4}, {2, 3, 4}, ' coverage <Y>67%<E> (missing 3)'),
({2, 5}, {2, 3, 4, 5}, ' coverage <Y>50%<E> (missing 3-4)'),
(set(), {2, 3, 4, 5}, ' coverage <R>0%<E>'),
])
def test_format_coverage_100(cov_l, all_l, exp):
fake_colors = dict(
red='<R>',
yellow='<Y>',
green='<G>',
end='<E>',
)
tr = TraceResult(0, 0, covered_lines=cov_l, all_lines=all_l)
text = format_coverage(tr, colors=fake_colors)
assert text == exp
def test_run_cases_ok():
def func():
return 123
case = TestCase(
args=(),
kwargs={},
func=func,
exceptions=(),
check_types=False,
)
cases = [case]
colors = dict(
blue='<B>',
yellow='<Y>',
end='<E>',
)
stream = StringIO()
ok = run_cases(cases=cases, func_name='fname', stream=stream, colors=colors)
assert ok
stream.seek(0)
captured = stream.read()
assert captured
assert captured.split('\n')[0] == ' <B>running fname<E>'
def test_run_cases_bad():
def func(a, b):
raise ZeroDivisionError
case = TestCase(
args=(1, ),
kwargs=dict(b=2),
func=func,
exceptions=(),
check_types=False,
)
cases = [case]
colors = dict(
blue='<B>',
yellow='<Y>',
end='<E>',
)
stream = StringIO()
ok = run_cases(cases=cases, func_name='fname', stream=stream, colors=colors)
assert not ok
stream.seek(0)
captured = stream.read()
assert captured
assert captured.split('\n')[0] == ' <B>running fname<E>'
assert 'ZeroDivisionError' in captured
assert 'Traceback' in captured
assert ' <Y>fname(1, b=2)<E>' in captured
| StarcoderdataPython |
9765815 | <reponame>rancher/management-api<filename>tests/integration/core/common_fixtures.py<gh_stars>1-10
import base64
import cattle
import os
import pytest
import random
import time
import inspect
from datetime import datetime, timedelta
import requests
import fcntl
import logging
@pytest.fixture(scope='session', autouse=os.environ.get('DEBUG'))
def log():
logging.basicConfig(level=logging.DEBUG)
@pytest.fixture(scope='session')
def api_url():
return 'http://localhost:1234/v3/schemas'
@pytest.fixture
def client(api_url):
return cattle.from_env(url=api_url)
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
| StarcoderdataPython |
135049 | <reponame>flyingraijin98/Naive-Bayes-Spam-Classifier<filename>index.py
from collections import Counter
import pandas as pd
import stop_words
import random
class NLP():
def __init__(self):
self.vocab = None
def count_vectorizer(self, text, train=True, stop_word=None, view=False):
lower_case_documents = []
documents = text
for i in documents:
lower_case_documents.append(i.lower())
if view:
print('Step: Applying Lower Case.... Done\n')
# print(lower_case_documents)
sans_punctuation_documents = []
import string
for i in lower_case_documents:
punctuation = string.punctuation
k = ""
for j in i:
if j not in punctuation:
k += j
sans_punctuation_documents.append(k)
if view:
print('Step: Removed Punctuation....\n')
print(sans_punctuation_documents)
if stop_word == None:
stop_word = list(stop_words.ENGLISH_STOP_WORDS)
preprocessed_documents = []
for i in sans_punctuation_documents:
sentence = []
for word in i.split():
if word not in stop_word:
sentence.append(word)
preprocessed_documents.append(sentence)
if train != True:
return preprocessed_documents
if view:
print('Step: Bag of Words... Done\n')
print(preprocessed_documents)
frequency_list = []
from collections import Counter
for i in preprocessed_documents:
frequency_list.append(dict(Counter(i)))
if view:
print('Step: Frequency of words... Done\n')
# often called as vocabulary
all_words = list(set([j for i in preprocessed_documents for j in i]))
for doc in frequency_list:
for word in all_words:
if word not in list(doc.keys()):
doc[word] = 0
df = pd.DataFrame(frequency_list)
df = df[sorted(list(df.columns))]
self.vocab = df.columns.to_list()
if view:
print('Step: Count vectorizer... Done\n')
print(df.head())
return df
nlp = NLP()
class NaiveBayes():
def __init__(self, text, label):
self.text = text
self.label = label
self.cond_probs = {}
self.features = []
self.classes = []
self.class_prob = {}
def fit(self, view=False):
text = self.text
label = self.label
bow = nlp.count_vectorizer(text)
self.features = bow.columns.to_list()
if view:
print('Your BoW is:\n', bow)
classes = label
self.classes = list(Counter(classes).keys())
bow['out'] = classes
bow_class = bow.groupby(by='out', axis=0)
if view:
print('Your BoW is testing:\n')
# count of each class examples
counts = bow_class.count()
if view:
print(counts)
print
# used for prediction
class_prob = counts / counts.sum(axis=0)
class_prob = dict(class_prob.mean(axis=1))
self.class_prob = class_prob
if view:
print(class_prob)
# count of each word on each class
self.count_words_class = bow_class.sum()
# find prob of word in each class.... no. of that word in class / total word in class
prob_w_c = (bow_class.sum() + 1) / (counts )
if view:
print("chenck the testing prob")
print(prob_w_c)
# find p(word/class)
prob_w_c = round(prob_w_c, 5)
self.cond_probs = prob_w_c
if view:
print(prob_w_c)
def classes_(self):
"""
A method to see all classes counts for each word.
"""
return self.count_words_class
def predict(self, example):
txt = nlp.count_vectorizer(example, train=False)
words = dict(Counter(txt[0]))
vocab = self.features
classes = self.classes
class_prob = self.class_prob
p = self.cond_probs
prob_zero = class_prob['0']
prob_one = class_prob['1']
for w in words.keys():
if w in vocab:
prob_zero = prob_zero * p[w][0]
prob_one = prob_one * p[w][1]
else:
prob_zero = prob_zero * 10
prob_one = prob_one * 10
if (prob_zero < prob_one):
return 1
else:
return 0
f = open("dataset_NB.txt", "r")
t = f.read().splitlines()
f.close()
# print(np.shape(t))
a = random.sample(t, len(t))
X = []
Y = []
all_txt = []
classes = []
for i in a:
X.append(i[:-1])
Y.append(i[-1])
append_list_x = []
append_list_y = []
train_range_1 = 0
for k in range(7):
train_range_2=train_range_1 + 1000//7
append_list_x = X[0:train_range_1]
append_list_y = Y[0:train_range_1]
all_txt = X[train_range_2:] + append_list_x
classes = Y[train_range_2:] + append_list_y
testX = X[train_range_1:train_range_2]
testY = Y[train_range_1:train_range_2]
nb = NaiveBayes(all_txt, classes)
nb.fit()
count_total = 0
count_metrics = 0
res = 0
for i in range(0, len(testY)):
testY[i] = int(testY[i])
for i in testX:
a = 0
count_total = count_total + 1
res = nb.predict([i])
if res == testY[a]:
count_metrics = count_metrics + 1
else:
count_metrics = count_metrics + 0
a = a + 1
accuracy_final = count_metrics / count_total
print("Accuracy of fold %d is"%k,end = "")
print(accuracy_final)
train_range_1 = train_range_1 + 1000//7
| StarcoderdataPython |
4814011 | #!/usr/bin/env python
import os, sys
import json
from .common import parse_input
def write_metadata_file(dest_dir: str, source: dict, attribute: str):
with open(os.path.join(dest_dir, attribute), 'w') as metadata_file:
metadata_file.write(source[attribute])
def in_(dest_dir, stdin):
config = parse_input(stdin)
image_id = config.get('version', {}).get('image-digest')
if not image_id:
raise Exception("Expected a version but got none")
with open(os.path.join(dest_dir, 'timestamp'), 'w') as metadata_file:
metadata_file.write(image_id)
return {
'version': {
'image-digest': image_id
},
'metadata': []
}
def main():
versions = in_(sys.argv[1], sys.stdin)
print(json.dumps(versions))
| StarcoderdataPython |
4987960 | <filename>example/datasets.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME> @ UvA
"""
import pandas as pd
### REGRESSION
def abalone(wd):
'''
4176 x 7
The first categorical feature is removed
http://archive.ics.uci.edu/ml/datasets/Abalone
'''
df = pd.read_csv(wd+'abalone.csv', header = None)
df.columns = ['X_' + str(i) for i in range(len(df.columns)-1)] + ['y']
return df
def energy(wd):
'''
19735 x 26
https://archive.ics.uci.edu/ml/datasets/Appliances+energy+prediction
'''
df = pd.read_csv(wd+'energydata_complete.csv', header = None)
df.columns = ['X_' + str(i) for i in range(len(df.columns)-1)] + ['y']
return df
| StarcoderdataPython |
1697402 | import json
import functools
IOS_OSS_APPS_DATASET = "../oss_ios_apps/contents_july_2018.json"
@functools.lru_cache()
def get_project(gh_user, gh_project):
"""Ola."""
project_name = f"{gh_user}/{gh_project}"
datastore = _read_app_dataset()
projects = datastore['projects']
return next(
(project
for project in projects
if project_name in project['source']),
None
)
@functools.lru_cache()
def get_itunes_id(gh_user, gh_project):
project = get_project(gh_user, gh_project)
itunes_url = project.get('itunes')
if itunes_url:
return itunes_url.split('/id')[-1]
return None
@functools.lru_cache()
def _read_app_dataset():
"""Parse json object with app informatino."""
with open(IOS_OSS_APPS_DATASET, 'r') as input_file:
datastore = json.load(input_file)
return datastore
| StarcoderdataPython |
8131789 |
from __future__ import print_function
import os
import sys
import time
import pickle
import itertools
import numpy as np
import theano
import lasagne
from lasagne.utils import floatX
from utils import BColors, print_net_architecture
import theano.tensor as T
from data_pool import DataPool
from batch_iterators import threaded_generator_from_iterator
class Network(object):
"""
Neural Network
"""
def __init__(self, net, print_architecture=True):
"""
Constructor
"""
self.net = net
self.compute_output = None
self.compute_output_dict = dict()
self.saliency_function = None
# get input shape of network
l_in = lasagne.layers.helper.get_all_layers(self.net)[0]
self.input_shape = l_in.output_shape
if print_architecture:
print_net_architecture(net, detailed=True)
def fit(self, data, training_strategy, dump_file=None, log_file=None):
""" Train model """
print("Training neural network...")
col = BColors()
# create data pool if raw data is given
if "X_train" in data:
data_pools = dict()
data_pools['train'] = DataPool(data['X_train'], data['y_train'])
data_pools['valid'] = DataPool(data['X_valid'], data['y_valid'])
else:
data_pools = data
# check if out_path exists
if dump_file is not None:
out_path = os.path.dirname(dump_file)
if out_path != '' and not os.path.exists(out_path):
os.mkdir(out_path)
# log model evolution
if log_file is not None:
out_path = os.path.dirname(log_file)
if out_path != '' and not os.path.exists(out_path):
os.mkdir(out_path)
# adaptive learning rate
learn_rate = training_strategy.ini_learning_rate
learning_rate = theano.shared(floatX(learn_rate))
learning_rate.set_value(training_strategy.adapt_learn_rate(training_strategy.ini_learning_rate, 0))
# initialize evaluation output
pred_tr_err, pred_val_err, overfitting = [], [], []
tr_accs, va_accs = [], []
print("Compiling theano train functions...")
iter_funcs = self._create_iter_functions(y_tensor_type=training_strategy.y_tensor_type,
objective=training_strategy.objective, learning_rate=learning_rate,
l_2=training_strategy.L2,
compute_updates=training_strategy.update_parameters,
use_weights=training_strategy.use_weights,
debug_mode=training_strategy.debug_mode,
layer_update_filter=training_strategy.layer_update_filter)
print("Starting training...")
now = time.time()
try:
# initialize early stopping
last_improvement = 0
best_model = lasagne.layers.get_all_param_values(self.net)
# iterate training epochs
best_va_dice = 0.0
prev_tr_loss, prev_va_loss = 1e7, 1e7
prev_acc_tr, prev_acc_va = 0.0, 0.0
for epoch in self._train(iter_funcs, data_pools, training_strategy.build_train_batch_iterator(),
training_strategy.build_valid_batch_iterator(), training_strategy.report_dices,
debug_mode=training_strategy.debug_mode):
print("Epoch {} of {} took {:.3f}s".format(epoch['number'], training_strategy.max_epochs, time.time() - now))
now = time.time()
# --- collect train output ---
tr_loss, va_loss = epoch['train_loss'], epoch['valid_loss']
train_acc, valid_acc = epoch['train_acc'], epoch['valid_acc']
train_dices, valid_dices = epoch['train_dices'], epoch['valid_dices']
overfit = epoch['overfitting']
# prepare early stopping
if training_strategy.best_model_by_accurary:
improvement = valid_acc > prev_acc_va
else:
improvement = va_loss < prev_va_loss
if improvement:
last_improvement = 0
best_model = lasagne.layers.get_all_param_values(self.net)
best_epoch = epoch['number']
best_opt_state = [_u.get_value() for _u in iter_funcs['updates'].keys()]
# dump net parameters during training
if dump_file is not None:
with open(dump_file, 'wb') as fp:
pickle.dump(best_model, fp)
last_improvement += 1
# print train output
txt_tr = 'costs_tr %.5f ' % tr_loss
if tr_loss < prev_tr_loss:
txt_tr = col.print_colored(txt_tr, BColors.OKGREEN)
prev_tr_loss = tr_loss
txt_tr_acc = '(%.3f)' % train_acc
if train_acc > prev_acc_tr:
txt_tr_acc = col.print_colored(txt_tr_acc, BColors.OKGREEN)
prev_acc_tr = train_acc
txt_tr += txt_tr_acc + ', '
txt_val = 'costs_val %.5f ' % va_loss
if va_loss < prev_va_loss:
txt_val = col.print_colored(txt_val, BColors.OKGREEN)
prev_va_loss = va_loss
txt_va_acc = '(%.3f)' % valid_acc
if valid_acc > prev_acc_va:
txt_va_acc = col.print_colored(txt_va_acc, BColors.OKGREEN)
prev_acc_va = valid_acc
txt_val += txt_va_acc + ', '
print(' lr: %.7f, patience: %d' % (learn_rate, training_strategy.patience - last_improvement + 1))
print(' ' + txt_tr + txt_val + 'tr/val %.3f' % overfit)
# report dice coefficients
if training_strategy.report_dices:
train_str = ' train |'
for key in np.sort(train_dices.keys()):
train_str += ' %.2f: %.3f |' % (key, train_dices[key])
print(train_str)
train_acc = np.max(train_dices.values())
valid_str = ' valid |'
for key in np.sort(valid_dices.keys()):
txt_va_dice = ' %.2f: %.3f |' % (key, valid_dices[key])
if valid_dices[key] > best_va_dice and valid_dices[key] == np.max(valid_dices.values()):
best_va_dice = valid_dices[key]
txt_va_dice = col.print_colored(txt_va_dice, BColors.OKGREEN)
valid_str += txt_va_dice
print(valid_str)
valid_acc = np.max(valid_dices.values())
# collect model evolution data
tr_accs.append(train_acc)
va_accs.append(valid_acc)
pred_tr_err.append(tr_loss)
pred_val_err.append(va_loss)
overfitting.append(overfit)
# save results
exp_res = dict()
exp_res['pred_tr_err'] = pred_tr_err
exp_res['tr_accs'] = tr_accs
exp_res['pred_val_err'] = pred_val_err
exp_res['va_accs'] = va_accs
exp_res['overfitting'] = overfitting
if log_file is not None:
with open(log_file, 'w') as fp:
pickle.dump(exp_res, fp)
# --- early stopping: preserve best model ---
if last_improvement > training_strategy.patience:
print(col.print_colored("Early Stopping!", BColors.WARNING))
status = "Epoch: %d, Best Validation Loss: %.5f: Acc: %.5f" % (
best_epoch, prev_va_loss, prev_acc_va)
print(col.print_colored(status, BColors.WARNING))
if training_strategy.refinement_strategy.n_refinement_steps <= 0:
break
else:
status = "Loading best parameters so far and refining (%d) with decreased learn rate ..." % \
training_strategy.refinement_strategy.n_refinement_steps
print(col.print_colored(status, BColors.WARNING))
# reset net to best weights
lasagne.layers.set_all_param_values(self.net, best_model)
# reset optimizer
for _u, value in zip(iter_funcs['updates'].keys(), best_opt_state):
_u.set_value(value)
# update learn rate
learn_rate = training_strategy.refinement_strategy.adapt_learn_rate(learn_rate)
training_strategy.patience = training_strategy.refinement_strategy.refinement_patience
last_improvement = 0
# maximum number of epochs reached
if epoch['number'] >= training_strategy.max_epochs:
break
# update learning rate
learn_rate = training_strategy.adapt_learn_rate(learn_rate, epoch['number'])
learning_rate.set_value(learn_rate)
except KeyboardInterrupt:
pass
# set net to best weights
lasagne.layers.set_all_param_values(self.net, best_model)
# return best validation loss
if training_strategy.best_model_by_accurary:
return prev_acc_va
else:
return prev_va_loss
def predict_proba(self, input):
"""
Predict on test samples
"""
# prepare input for prediction
if not isinstance(input, list):
input = [input]
# reshape to network input
if input[0].ndim < len(self.input_shape):
input[0] = input[0].reshape([1] + list(input[0].shape))
if self.compute_output is None:
self.compute_output = self._compile_prediction_function()
return self.compute_output(*input)
def predict(self, input):
"""
Predict class labels on test samples
"""
return np.argmax(self.predict_proba(input), axis=1)
def compute_layer_output(self, input, layer):
"""
Compute output of given layer
layer: either a string (name of layer) or a layer object
"""
# prepare input for prediction
if not isinstance(input, list):
input = [input]
# reshape to network input
if input[0].ndim < len(self.input_shape):
input[0] = input[0].reshape([1] + list(input[0].shape))
# get layer by name
if not isinstance(layer, lasagne.layers.Layer):
for l in lasagne.layers.helper.get_all_layers(self.net):
if l.name == layer:
layer = l
break
# compile prediction function for target layer
if layer not in self.compute_output_dict:
self.compute_output_dict[layer] = self._compile_prediction_function(target_layer=layer)
return self.compute_output_dict[layer](*input)
def compute_saliency(self, input, nonlin=lasagne.nonlinearities.rectify):
"""
Compute saliency maps using guided backprop
"""
# prepare input for prediction
if not isinstance(input, list):
input = [input]
# reshape to network input
if input[0].ndim < len(self.input_shape):
input[0] = input[0].reshape([1] + list(input[0].shape))
if not self.saliency_function:
self.saliency_function = self._compile_saliency_function(nonlin)
return self.saliency_function(*input)
def save(self, file_path):
"""
Save model to disk
"""
with open(file_path, 'w') as fp:
params = lasagne.layers.get_all_param_values(self.net)
pickle.dump(params, fp, -1)
def load(self, file_path):
"""
load model from disk
"""
with open(file_path, 'r') as fp:
params = pickle.load(fp)
lasagne.layers.set_all_param_values(self.net, params)
def _compile_prediction_function(self, target_layer=None):
"""
Compile theano prediction function
"""
# get network output nad compile function
if target_layer is None:
target_layer = self.net
# collect input vars
all_layers = lasagne.layers.helper.get_all_layers(target_layer)
input_vars = []
for l in all_layers:
if isinstance(l, lasagne.layers.InputLayer):
input_vars.append(l.input_var)
net_output = lasagne.layers.get_output(target_layer, deterministic=True)
return theano.function(inputs=input_vars, outputs=net_output)
def _create_iter_functions(self, y_tensor_type, objective, learning_rate, l_2, compute_updates, use_weights,
debug_mode, layer_update_filter):
""" Create functions for training, validation and testing to iterate one epoch. """
# init target tensor
targets = y_tensor_type('y')
weights = y_tensor_type('w').astype("float32")
# get input layer
all_layers = lasagne.layers.helper.get_all_layers(self.net)
# collect input vars
input_vars = []
for l in all_layers:
if isinstance(l, lasagne.layers.InputLayer):
input_vars.append(l.input_var)
# compute train costs
tr_output = lasagne.layers.get_output(self.net, deterministic=False)
if use_weights:
tr_cost = objective(tr_output, targets, weights)
tr_input = input_vars + [targets, weights]
else:
tr_cost = objective(tr_output, targets)
tr_input = input_vars + [targets]
# regularization costs
tr_reg_cost = 0
# regularize RNNs
for l in all_layers:
# if l.name == "norm_reg_rnn":
#
# H = lasagne.layers.get_output(l, deterministic=False)
# H_l2 = T.sqrt(T.sum(H ** 2, axis=-1))
# norm_diffs = (H_l2[:, 1:] - H_l2[:, :-1]) ** 2
# norm_preserving_loss = T.mean(norm_diffs)
#
# beta = 1.0
# tr_cost += beta * norm_preserving_loss
if l.name == "norm_reg_rnn":
H = lasagne.layers.get_output(l, deterministic=False)
steps = T.arange(1, l.output_shape[1])
def compute_norm_diff(k, H):
n0 = ((H[:, k - 1, :]) ** 2).sum(1).sqrt()
n1 = ((H[:, k, :]) ** 2).sum(1).sqrt()
return (n1 - n0) ** 2
norm_diffs, _ = theano.scan(fn=compute_norm_diff, outputs_info=None,
non_sequences=[H], sequences=[steps])
beta = 1.0
norm_preserving_loss = T.mean(norm_diffs)
tr_reg_cost += beta * norm_preserving_loss
# compute validation costs
va_output = lasagne.layers.get_output(self.net, deterministic=True)
# estimate accuracy
if y_tensor_type == T.ivector:
va_acc = 100.0 * T.mean(T.eq(T.argmax(va_output, axis=1), targets), dtype=theano.config.floatX)
tr_acc = 100.0 * T.mean(T.eq(T.argmax(tr_output, axis=1), targets), dtype=theano.config.floatX)
elif y_tensor_type == T.vector:
va_acc = 100.0 * T.mean(T.eq(T.ge(va_output.flatten(), 0.5), targets), dtype=theano.config.floatX)
tr_acc = 100.0 * T.mean(T.eq(T.ge(tr_output.flatten(), 0.5), targets), dtype=theano.config.floatX)
else:
va_acc, tr_acc = None, None
# collect all parameters of net and compute updates
all_params = lasagne.layers.get_all_params(self.net, trainable=True)
# filter parameters to update by layer name
if layer_update_filter:
all_params = [p for p in all_params if layer_update_filter in p.name]
# add weight decay
if l_2 is not None:
all_layers = lasagne.layers.get_all_layers(self.net)
tr_reg_cost += l_2 * lasagne.regularization.regularize_layer_params(all_layers, lasagne.regularization.l2)
# compute updates
all_grads = lasagne.updates.get_or_compute_grads(tr_cost + tr_reg_cost, all_params)
updates = compute_updates(all_grads, all_params, learning_rate)
# compile iter functions
tr_outputs = [tr_cost, tr_output]
if tr_acc is not None:
tr_outputs.append(tr_acc)
iter_train = theano.function(tr_input, tr_outputs, updates=updates)
va_inputs = input_vars + [targets]
va_cost = objective(va_output, targets)
va_outputs = [va_cost, va_output]
if va_acc is not None:
va_outputs.append(va_acc)
iter_valid = theano.function(va_inputs, va_outputs)
# network debugging
compute_grad_norms = None
compute_layer_outputs = None
if debug_mode:
# compile gradient norm computation for weights
grad_norms = []
for i, p in enumerate(all_params):
if "W" in p.name:
g = all_grads[i]
grad_norm = T.sqrt(T.sum(g**2))
grad_norms.append(grad_norm)
compute_grad_norms = theano.function(tr_input, grad_norms)
# compute output of each layer
layer_outputs = lasagne.layers.get_output(all_layers)
compute_layer_outputs = theano.function(input_vars, layer_outputs)
return dict(train=iter_train, valid=iter_valid, test=iter_valid, updates=updates,
compute_grad_norms=compute_grad_norms,
compute_layer_outputs=compute_layer_outputs)
def _compile_saliency_function(self, nonlin=lasagne.nonlinearities.rectify):
"""
Compiles a function to compute the saliency maps and predicted classes
for a given mini batch of input images.
in_vars = lin.input_var
"""
class ModifiedBackprop(object):
def __init__(self, nonlinearity):
self.nonlinearity = nonlinearity
self.ops = {} # memoizes an OpFromGraph instance per tensor type
def __call__(self, x):
# OpFromGraph is oblique to Theano optimizations, so we need to move
# things to GPU ourselves if needed.
if theano.sandbox.cuda.cuda_enabled:
maybe_to_gpu = theano.sandbox.cuda.as_cuda_ndarray_variable
else:
maybe_to_gpu = lambda x: x
# We move the input to GPU if needed.
x = maybe_to_gpu(x)
# We note the tensor type of the input variable to the nonlinearity
# (mainly dimensionality and dtype); we need to create a fitting Op.
tensor_type = x.type
# If we did not create a suitable Op yet, this is the time to do so.
if tensor_type not in self.ops:
# For the graph, we create an input variable of the correct type:
inp = tensor_type()
# We pass it through the nonlinearity (and move to GPU if needed).
outp = maybe_to_gpu(self.nonlinearity(inp))
# Then we fix the forward expression...
op = theano.OpFromGraph([inp], [outp])
# ...and replace the gradient with our own (defined in a subclass).
op.grad = self.grad
# Finally, we memoize the new Op
self.ops[tensor_type] = op
# And apply the memorized Op to the input we got.
return self.ops[tensor_type](x)
class GuidedBackprop(ModifiedBackprop):
def grad(self, inputs, out_grads):
(inp,) = inputs
(grd,) = out_grads
dtype = inp.dtype
return (grd * (inp > 0).astype(dtype) * (grd > 0).astype(dtype),)
def fix_nonlins(l_out, nonlin):
""" Replace relus with guided-back-prop """
nonlin_layers = [layer for layer in lasagne.layers.get_all_layers(l_out)
if getattr(layer, 'nonlinearity', None) is nonlin]
modded_nonlin = GuidedBackprop(nonlin) # important: only instantiate this once!
for layer in nonlin_layers:
layer.nonlinearity = modded_nonlin
return l_out
# fix non-linearities
l_out = fix_nonlins(self.net, nonlin=nonlin)
# collect input vars
all_layers = lasagne.layers.helper.get_all_layers(l_out)
input_vars = []
for l in all_layers:
if isinstance(l, lasagne.layers.InputLayer):
input_vars.append(l.input_var)
outp = lasagne.layers.get_output(l_out.input_layer, deterministic=True)
max_outp = T.max(outp, axis=1)
saliency = theano.grad(max_outp.sum(), wrt=input_vars)
return theano.function(input_vars, saliency)
def _train(self, iter_funcs, data_pools, train_batch_iter, valid_batch_iter, estimate_dices, debug_mode):
"""
Train the model with `dataset` with mini-batch training.
Each mini-batch has `batch_size` recordings.
"""
col = BColors()
from segmentation_utils import dice
for epoch in itertools.count(1):
# evaluate various thresholds
if estimate_dices:
threshs = [0.3, 0.4, 0.5, 0.6, 0.7]
tr_dices = dict()
for thr in threshs:
tr_dices[thr] = []
va_dices = dict()
for thr in threshs:
va_dices[thr] = []
else:
tr_dices = None
va_dices = None
# iterate train batches
batch_train_losses, batch_train_accs = [], []
iterator = train_batch_iter(data_pools['train'])
generator = threaded_generator_from_iterator(iterator)
batch_times = np.zeros(5, dtype=np.float32)
start, after = time.time(), time.time()
for i_batch, train_input in enumerate(generator):
batch_res = iter_funcs['train'](*train_input)
batch_train_losses.append(batch_res[0])
# collect classification accuracies
if len(batch_res) > 2:
batch_train_accs.append(batch_res[2])
# estimate dices for various thresholds
if estimate_dices:
y_b = train_input[1]
pred = batch_res[1]
for thr in threshs:
for i in xrange(pred.shape[0]):
seg = pred[i, 0] > thr
tr_dices[thr].append(100 * dice(seg, y_b[i, 0]))
# train time
batch_time = time.time() - after
after = time.time()
train_time = (after - start)
# estimate updates per second (running avg)
batch_times[0:4] = batch_times[1:5]
batch_times[4] = batch_time
ups = 1.0 / batch_times.mean()
# report loss during training
perc = 100 * (float(i_batch) / train_batch_iter.n_batches)
dec = int(perc // 4)
progbar = "|" + dec * "#" + (25 - dec) * "-" + "|"
vals = (perc, progbar, train_time, ups, np.mean(batch_train_losses))
loss_str = " (%d%%) %s time: %.2fs, ups: %.2f, loss: %.5f" % vals
print(col.print_colored(loss_str, col.WARNING), end="\r")
sys.stdout.flush()
# some debug plots on gradients and hidden activations
if debug_mode:
import matplotlib.pyplot as plt
# compute gradient norm for last batch
grad_norms = iter_funcs['compute_grad_norms'](*train_input)
plt.figure("Gradient Norms")
plt.clf()
plt.plot(grad_norms, "g-", linewidth=3, alpha=0.7)
plt.grid('on')
plt.title("Gradient Norms")
plt.ylabel("Gradient Norm")
plt.xlabel("Weight $W_l$")
plt.draw()
# compute layer output for last batch
layer_outputs = iter_funcs['compute_layer_outputs'](*train_input[:-1])
n_outputs = len(layer_outputs)
sub_plot_dim = np.ceil(np.sqrt(n_outputs))
plt.figure("Hidden Activation Distributions")
plt.clf()
plt.subplots_adjust(bottom=0.05, top=0.98)
for i, l_out in enumerate(layer_outputs):
l_out = np.asarray(l_out).flatten()
h, b = np.histogram(l_out, bins='auto')
plt.subplot(sub_plot_dim, sub_plot_dim, i + 1)
plt.plot(b[:-1], h, "g-", linewidth=3, alpha=0.7,
label="%.2f $\pm$ %.5f" % (l_out.mean(), l_out.std()))
span = (b[-1] - b[0])
x_min = b[0] - 0.05 * span
x_max = b[-1] + 0.05 * span
plt.xlim([x_min, x_max])
plt.legend(fontsize=10)
plt.grid('on')
plt.yticks([])
plt.draw()
plt.pause(0.1)
print("\x1b[K", end="\r")
print(' ')
avg_train_loss = np.mean(batch_train_losses)
avg_train_acc = np.mean(batch_train_accs) if len(batch_train_accs) > 0 else 0.0
if estimate_dices:
for thr in threshs:
tr_dices[thr] = np.mean(tr_dices[thr])
# evaluate classification power of data set
# iterate validation batches
batch_valid_losses, batch_valid_accs = [], []
iterator = valid_batch_iter(data_pools['valid'])
generator = threaded_generator_from_iterator(iterator)
for va_input in generator:
batch_res = iter_funcs['valid'](*va_input)
batch_valid_losses.append(batch_res[0])
# collect classification accuracies
if len(batch_res) > 2:
batch_valid_accs.append(batch_res[2])
# estimate dices for various thresholds
if estimate_dices:
y_b = va_input[1]
pred = batch_res[1]
for thr in threshs:
for i in xrange(pred.shape[0]):
seg = pred[i, 0] > thr
va_dices[thr].append(100 * dice(seg, y_b[i, 0]))
avg_valid_loss = np.mean(batch_valid_losses)
avg_valid_accs = np.mean(batch_valid_accs) if len(batch_valid_accs) > 0 else 0.0
if estimate_dices:
for thr in threshs:
va_dices[thr] = np.mean(va_dices[thr])
# collect results
yield {
'number': epoch,
'train_loss': avg_train_loss,
'train_acc': avg_train_acc,
'valid_loss': avg_valid_loss,
'valid_acc': avg_valid_accs,
'valid_dices': va_dices,
'train_dices': tr_dices,
'overfitting': avg_train_loss / avg_valid_loss,
}
class SegmentationNetwork(Network):
"""
Segmentation Neural Network
"""
def predict_proba(self, input, squeeze=True, overlap=0.5):
"""
Predict on test samples
"""
if self.compute_output is None:
self.compute_output = self._compile_prediction_function()
# get network input shape
l_in = lasagne.layers.helper.get_all_layers(self.net)[0]
in_shape = l_in.output_shape[-2::]
# standard prediction
if input.shape[-2::] == in_shape:
proba = self.compute_output(input)
# sliding window prediction if images do not match
else:
proba = self._predict_proba_sliding_window(input, overlap=overlap)
if squeeze:
proba = proba.squeeze()
return proba
def predict(self, input, thresh=0.5):
"""
Predict label map on test samples
"""
P = self.predict_proba(input, squeeze=False)
# binary segmentation
if P.shape[1] == 1:
return (P > thresh).squeeze()
# categorical segmentation
else:
return np.argmax(P, axis=1).squeeze()
def _predict_proba_sliding_window(self, images, overlap=0.5):
"""
Sliding window prediction for images larger than the input layer
"""
images = images.copy()
n_images = images.shape[0]
h, w = images.shape[2:4]
_, Nc, sh, sw = self.net.output_shape
# pad images for sliding window prediction
missing_h = int(sh * np.ceil(float(h) / sh) - h)
missing_w = int(sw * np.ceil(float(w) / sw) - w)
pad_top = missing_h // 2
pad_bottom = missing_h - pad_top
pad_left = missing_w // 2
pad_right = missing_w - pad_left
images = np.pad(images, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant')
step_h = int(sh * (1.0 - overlap))
row_0 = np.arange(0, images.shape[2] - sh + 1, step_h)
row_1 = row_0 + sh
step_w = int(sw * (1.0 - overlap))
col_0 = np.arange(0, images.shape[3] - sw + 1, step_w)
col_1 = col_0 + sw
# import pdb
# pdb.set_trace()
# hamming window weighting
window_h = np.hamming(sh)
window_w = np.hamming(sw)
ham2d = np.sqrt(np.outer(window_h, window_w))[np.newaxis, np.newaxis]
# initialize result image
R = np.zeros((n_images, Nc, images.shape[2], images.shape[3]))
V = np.zeros((n_images, Nc, images.shape[2], images.shape[3]))
for ir in xrange(len(row_0)):
for ic in xrange(len(col_0)):
I = images[:, :, row_0[ir]:row_1[ir], col_0[ic]:col_1[ic]]
P = self.compute_output(I)
R[:, :, row_0[ir]:row_1[ir], col_0[ic]:col_1[ic]] += P * ham2d
V[:, :, row_0[ir]:row_1[ir], col_0[ic]:col_1[ic]] += ham2d
# clip to original image size again
R = R[:, :, pad_top:images.shape[2] - pad_bottom, pad_left:images.shape[3] - pad_right]
V = V[:, :, pad_top:images.shape[2] - pad_bottom, pad_left:images.shape[3] - pad_right]
# import matplotlib.pyplot as plt
# plt.figure()
# plt.imshow(V[0, 0])
# plt.colorbar()
# plt.show(block=True)
# normalize predictions
R /= V
return R
| StarcoderdataPython |
1831533 | import RPi.GPIO as GPIO
import time
sleep_time = 0.5
led_pin = 12
GPIO.setmode(GPIO.BOARD)
GPIO.setup(led_pin, GPIO.OUT)
def led_blink(sleep_time):
GPIO.output(led_pin, True)
time.sleep(sleep_time)
GPIO.output(led_pin, False)
time.sleep(sleep_time)
try:
while True:
led_blink(sleep_time)
except KeyboardInterrupt:
GPIO.cleanup()
| StarcoderdataPython |
3353662 |
import os, sys
import torch
def read_policy(filename, section='init', debug=False, verbose=print):
if not os.path.isfile(filename):
verbose("file no exist: %s" % filename)
return []
policies = []
attr = None
valid = False
found = False # found policy for the section
with open(filename) as f:
while(True):
line = f.readline()
if len(line) == 0:
break
items = line.strip('\n').strip(' ')
if len(items) == 0 or items[0] == "#":
continue
items = items.split('#')
items = items[0]
items = items.split(':')
if debug:
verbose(items)
if len(items) < 2:
break
if 'on' in items[0].split():
if section in items[0].split():
policies = [{"trigger": [ int(x) for x in items[1].split() ] }]
attr = None
valid = False
found = True
continue
else:
found = False
if not found:
continue
if 'by_' in items[0]:
if attr is None:
attr = dict()
elif valid:
policies.append(attr)
valid = False
attr = dict()
if attr is None:
continue
items[0] = items[0].strip()
items[1] = items[1].strip()
if ',' in items[1]:
items[1] = items[1].split(',')
if isinstance(items[1], list):
items[1] = [ i.strip() for i in items[1]]
elif ' ' in items[1]:
items[1] = items[1].split(' ')
for i, t in enumerate(items[1]):
if t in ['True', 'true']:
items[1][i] = True
elif t in ['False', 'false']:
items[1][i] = False
attr[items[0]] = items[1]
if 'by_' not in items[0]:
valid = True
if attr is not None and valid:
policies.append(attr)
return policies
def deploy_on_init(model, filename, verbose=print):
if not hasattr(model, 'modules'):
return
# conv / fc layer
index = 0
for m in model.modules():
if hasattr(m, 'update_quantization_parameter'):
m.update_quantization_parameter(index=index)
index = index + 1
policies = read_policy(filename, 'init', verbose=verbose)
verbose("loading 'init' section of policy")
verbose(policies)
for p in policies:
attributes = p
assert isinstance(attributes, dict), "Error attributes"
for m in model.modules():
if hasattr(m, 'update_quantization_parameter'):
m.update_quantization_parameter(**attributes)
# norm layer
index = 0
for m in model.modules():
if hasattr(m, 'update_norm_quantization_parameter'):
m.update_norm_quantization_parameter(index=index)
index = index + 1
policies = read_policy(filename, 'norm', verbose=verbose)
verbose("loading 'norm' section of policy")
verbose(policies)
for p in policies:
attributes = p
assert isinstance(attributes, dict), "Error attributes"
index = 0
for m in model.modules():
if hasattr(m, 'update_norm_quantization_parameter'):
m.update_norm_quantization_parameter(**attributes)
index = index + 1
# eltwise layer
index = 0
for m in model.modules():
if hasattr(m, 'update_eltwise_quantization_parameter'):
m.update_eltwise_quantization_parameter(index=index)
index = index + 1
policies = read_policy(filename, 'eltwise', verbose=verbose)
verbose("loading 'eltwise' section of policy")
verbose(policies)
for p in policies:
attributes = p
assert isinstance(attributes, dict), "Error attributes"
for m in model.modules():
if hasattr(m, 'update_eltwise_quantization_parameter'):
m.update_eltwise_quantization_parameter(**attributes)
# shuffle layer
index = 0
for m in model.modules():
if hasattr(m, 'update_shuffle_quantization_parameter'):
m.update_shuffle_quantization_parameter(index=index)
index = index + 1
policies = read_policy(filename, 'shuffle', verbose=verbose)
verbose("loading 'shuffle' section of policy")
verbose(policies)
for p in policies:
attributes = p
assert isinstance(attributes, dict), "Error attributes"
for m in model.modules():
if hasattr(m, 'update_shuffle_quantization_parameter'):
m.update_shuffle_quantization_parameter(**attributes)
# split layer
index = 0
for m in model.modules():
if hasattr(m, 'update_split_quantization_parameter'):
m.update_split_quantization_parameter(index=index)
index = index + 1
policies = read_policy(filename, 'split', verbose=verbose)
verbose("loading 'split' section of policy")
verbose(policies)
for p in policies:
attributes = p
assert isinstance(attributes, dict), "Error attributes"
for m in model.modules():
if hasattr(m, 'update_split_quantization_parameter'):
m.update_split_quantization_parameter(**attributes)
# concat layer
index = 0
for m in model.modules():
if hasattr(m, 'update_concat_quantization_parameter'):
m.update_concat_quantization_parameter(index=index)
index = index + 1
policies = read_policy(filename, 'concat', verbose=verbose)
verbose("loading 'concat' section of policy")
verbose(policies)
for p in policies:
attributes = p
assert isinstance(attributes, dict), "Error attributes"
for m in model.modules():
if hasattr(m, 'update_concat_quantization_parameter'):
m.update_concat_quantization_parameter(**attributes)
def deploy_on_epoch(model, policies, epoch, optimizer=None, verbose=print):
if not hasattr(model, 'modules'):
return
if len(policies) < 1:
return
assert 'trigger' in policies[0], "No trigger provided"
feedbacks = []
if epoch in policies[0]['trigger']:
for p in policies:
attributes = p
assert isinstance(attributes, dict), "Error attributes"
for m in model.modules():
if hasattr(m, 'update_quantization_parameter'):
feedback = m.update_quantization_parameter(**attributes)
feedbacks.append(feedback)
if optimizer is not None:
assert isinstance(optimizer, torch.optim.SGD), 'reset_momentum is only supported on SGD optimizer currently'
with torch.no_grad():
for fd in feedbacks:
if 'reset_momentum_list' in fd and isinstance(fd['reset_momentum_list'], list):
for p in fd['reset_momentum_list']:
param_state = optimizer.state[p]
if 'momentum_buffer' in param_state:
buf = param_state['momentum_buffer']
buf.mul_(0)
verbose("reset the momentum_buffer for tensor with id: %s" % id(p))
def deploy_on_iteration(model, policies, iteration, optimizer=None, verbose=print):
deploy_on_epoch(model, policies, iteration, optimizer, verbose)
if __name__ == "__main__":
print("Loading policy")
policies = read_policy('config/srresnet-policy.txt', debug=True)
print(policies)
| StarcoderdataPython |
327584 | # pylint: disable=invalid-name
"""
Tests for shilellagh.adapters.api.gsheets.fields.
"""
import datetime
import dateutil.tz
from shillelagh.adapters.api.gsheets.fields import GSheetsBoolean
from shillelagh.adapters.api.gsheets.fields import GSheetsDate
from shillelagh.adapters.api.gsheets.fields import GSheetsDateTime
from shillelagh.adapters.api.gsheets.fields import GSheetsNumber
from shillelagh.adapters.api.gsheets.fields import GSheetsString
from shillelagh.adapters.api.gsheets.fields import GSheetsTime
from shillelagh.fields import ISODateTime
from shillelagh.fields import Order
def test_comparison():
"""
Test that a GSheets field is different from a standard field.
"""
assert GSheetsDateTime([], Order.NONE, True) != ISODateTime([], Order.NONE, True)
assert GSheetsDateTime([], Order.NONE, True) == GSheetsDateTime(
[],
Order.NONE,
True,
)
def test_GSheetsDateTime():
"""
Test ``GSheetsDateTime``.
"""
assert GSheetsDateTime().parse(None) is None
assert GSheetsDateTime().parse("") is None
assert GSheetsDateTime(pattern="M/d/yyyy H:mm:ss").parse(
"12/31/2020 12:34:56",
) == datetime.datetime(
2020,
12,
31,
12,
34,
56,
)
assert GSheetsDateTime().format(None) == ""
assert (
GSheetsDateTime(pattern="M/d/yyyy H:mm:ss").format(
datetime.datetime(2020, 12, 31, 12, 34, 56),
)
== "12/31/2020 12:34:56"
)
assert GSheetsDateTime().quote(None) == "null"
assert GSheetsDateTime().quote("") == "null"
assert (
GSheetsDateTime(pattern="M/d/yyyy H:mm:ss").quote("12/31/2020 12:34:56")
== "datetime '2020-12-31 12:34:56'"
)
def test_GSheetsDateTime_timezone():
"""
Test GSheetsDateTime when timezone is set.
"""
timezone = dateutil.tz.gettz("America/Los_Angeles")
assert (
GSheetsDateTime(pattern="M/d/yyyy H:mm:ss", timezone=timezone).parse(
"12/31/2020 12:34:56",
)
== datetime.datetime(2020, 12, 31, 12, 34, 56, tzinfo=timezone)
)
assert (
GSheetsDateTime(pattern="M/d/yyyy H:mm:ss", timezone=timezone).format(
datetime.datetime(2020, 12, 31, 12, 34, 56, tzinfo=datetime.timezone.utc),
)
== "12/31/2020 4:34:56"
)
def test_GSheetsDate():
"""
Test ``GSheetsDate``.
"""
assert GSheetsDate().parse(None) is None
assert GSheetsDate().parse("") is None
assert GSheetsDate(pattern="M/d/yyyy").parse("12/31/2020") == datetime.date(
2020,
12,
31,
)
assert GSheetsDate().format(None) == ""
assert (
GSheetsDate(pattern="M/d/yyyy").format(datetime.date(2020, 12, 31))
== "12/31/2020"
)
assert GSheetsDate().quote(None) == "null"
assert GSheetsDate().quote("") == "null"
assert GSheetsDate(pattern="M/d/yyyy").quote("12/31/2020") == "date '2020-12-31'"
def test_GSheetsTime():
"""
Test ``GSheetsTime``.
"""
assert GSheetsTime().parse(None) is None
assert GSheetsTime().parse("") is None
assert GSheetsTime(pattern="h:mm:ss am/pm").parse("12:34:56 AM") == datetime.time(
0,
34,
56,
)
assert GSheetsTime().format(None) == ""
assert (
GSheetsTime(pattern="h:mm:ss am/pm").format(datetime.time(12, 34, 56))
== "12:34:56 PM"
)
assert GSheetsTime().quote(None) == "null"
assert GSheetsTime().quote("") == "null"
assert (
GSheetsTime(pattern="h:mm:ss am/pm").quote("12:34:56 AM")
== "timeofday '00:34:56'"
)
def test_GSheetsBoolean():
"""
Test ``GSheetsBoolean``.
"""
assert GSheetsBoolean().parse(None) is None
assert GSheetsBoolean().parse("") is None
assert GSheetsBoolean().parse("TRUE") is True
assert GSheetsBoolean().parse("FALSE") is False
assert GSheetsBoolean().format(None) == ""
assert GSheetsBoolean().format(True) == "TRUE"
assert GSheetsBoolean().format(False) == "FALSE"
assert GSheetsBoolean().quote(None) == "null"
assert GSheetsBoolean().quote("") == "null"
assert GSheetsBoolean().quote("TRUE") == "true"
assert GSheetsBoolean().quote("FALSE") == "false"
def test_GSheetsNumber():
"""
Test ``GSheetsNumber``.
"""
assert GSheetsNumber().parse(None) is None
assert GSheetsNumber().parse("") is None
assert GSheetsNumber().parse("1") == 1.0
assert GSheetsNumber().parse("1.0") == 1.0
assert isinstance(GSheetsNumber().parse("1"), int)
assert isinstance(GSheetsNumber().parse("1.0"), float)
assert GSheetsNumber().format(None) == ""
assert GSheetsNumber().format(1) == "1"
assert GSheetsNumber().format(1.0) == "1.0"
assert GSheetsNumber().quote(None) == "null"
assert GSheetsNumber().quote("") == "null"
assert GSheetsNumber().quote(1) == "1"
assert GSheetsNumber().quote(1.0) == "1.0"
def test_GSheetsString():
"""
Test ``GSheetsString``.
"""
assert GSheetsString().parse(None) is None
assert GSheetsString().parse("") is None
assert GSheetsString().parse("test") == "test"
assert GSheetsString().format(None) == ""
assert GSheetsString().format("test") == "test"
assert GSheetsString().quote(None) == "null"
assert GSheetsString().quote("") == "null"
assert GSheetsString().quote("test") == "'test'"
| StarcoderdataPython |
3521090 | import boto3
import grovepi
import random
import time
pir_sensor = 8
motion=0
grovepi.pinMode(pir_sensor,"INPUT")
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
mqtt = AWSIoTMQTTClient
mq = mqtt("zombie_sensor")
mq.configureEndpoint("YOUR.ENDPOINT",8883)
mq.configureCredentials("YOUR/ROOT/CA/PATH", "PRIVATE/KEY/PATH", "CERTIFICATE/PATH")
mq.configureAutoReconnectBackoffTime(1, 32, 20)
mq.configureOfflinePublishQueueing(-1)
mq.configureDrainingFrequency(2)
mq.configureMQTTOperationTimeout(5)
mq.connect()
cities = [
['London',51.507351,-0.127758],
['Las Vegas',36.169941,-115.139830],
['New York',40.712784,-74.005941],
['Singapore',1.352083,103.819836],
['Sydney',-33.867487,151.206990],
['Paris',48.856614,2.352222],
['Seattle',47.606209,-122.332071],
['San Francisco',37.774929,-122.419416],
['Montreal',45.501689,-73.567256],
['Rio De Janeiro',-22.906847,-43.172896],
['Beijing',39.904211,116.407395],
['Moscow',55.755826,37.617300],
['Buenos Aires',-34.603684,-58.381559],
['New Dehli',28.613939,77.209021],
['Cape Town',-33.924869,18.424055],
['Lagos',6.524379,3.379206],
['Munich',48.135125,11.581981]
]
def pickCity():
city = random.choice(cities)
return city
def generateAlert():
city = pickCity()
message = '{"message":"A Zombie has been detected in ' + city[0] + '!", "longitude":"' + str(city[2]) + '", "latitude":"' + str(city[1]) + '"}'
print(message)
mq.publish('zombie-alert',message,1)
def periodicActivity():
while 1:
motion=grovepi.digitalRead(pir_sensor)
if motion==0 or motion==1:
if motion==1:
generateAlert()
else:
print('--------------------All Clear--------------------')
time.sleep(1)
periodicActivity()
| StarcoderdataPython |
5190918 | <reponame>rhubarbdog/mpr121-keypad<gh_stars>1-10
# gnd rq 3.3v sda scl
import pyb
import pyboard_keypad as keypad
import time
i2c = pyb.I2C(2, pyb.I2C.MASTER)
switch = pyb.Switch()
keypad = keypad.KEYPAD(i2c, 'Y12')
ALL_KEYS = [ j+1 for j in range(9) ] + ['*', 0, '#']
while not switch.value():
if keypad.keypad.is_near():
print("P", end = ' ')
else:
print("-", end = ' ')
for i in ALL_KEYS:
if keypad.key[i].is_pressed():
print(str(keypad.key[i]), end = ' ')
else:
print('.', end = ' ')
print('', end = '\r')
time.sleep_ms(10)
print("")
for i in ALL_KEYS:
print(i,keypad.key[i].get_presses(),
keypad.key[i].was_pressed(),
keypad.key[i].get_presses(),
keypad.key[i].was_pressed())
| StarcoderdataPython |
12827456 | from django.apps import AppConfig
class TogglReportAppConfig(AppConfig):
name = 'toggl_report_app'
| StarcoderdataPython |
1923648 | <gh_stars>0
import os
from dash_extensions.enrich import DashProxy, MultiplexerTransform
# import dash
from flask import Flask
from flask_login import login_required
import dash_bootstrap_components as dbc
def create_app():
server = Flask(__name__)
server.secret_key = os.environ.get("FLASK_SECRET_KEY", "")
from app.dashboard.layout import layout as layout1
from app.dashchat.layout import layout as layout2
from app.dashboard.callbacks import register_callbacks as register_callbacks1
from app.dashchat.callbacks import register_callbacks as register_callbacks2
register_dashapp(server, 'Dashboard', 'dashboard', layout1, register_callbacks1, True) # Create admin dashboard
register_dashapp(server, 'Chat', 'chat', layout2, register_callbacks2, False) # Create chat page
register_extensions(server)
register_blueprints(server)
return server
# def register_dashapps(app):
# from app.dashapp1.layout import layout
# from app.dashapp1.callbacks import register_callbacks
#
# # Meta tags for viewport responsiveness
# meta_viewport = {
# "name": "viewport",
# "content": "width=device-width, initial-scale=1, shrink-to-fit=no"}
#
# dashapp1 = dash.Dash(__name__,
# server=app,
# url_base_pathname='/dashboard/',
# assets_folder=get_root_path(__name__) + '/dashboard/assets/',
# meta_tags=[meta_viewport])
#
# with app.app_context():
# dashapp1.title = 'Dashapp 1'
# dashapp1.layout = layout
# register_callbacks(dashapp1)
#
# _protect_dashviews(dashapp1)
def register_dashapp(app, title, base_pathname, layout, register_callbacks_fun, is_protected):
# Meta tags for viewport responsiveness
meta_viewport = {"name": "viewport", "content": "width=device-width, initial-scale=1, shrink-to-fit=no"}
# my_dashapp = dash.Dash(__name__,
# server=app,
# external_stylesheets=[dbc.themes.BOOTSTRAP],
# url_base_pathname=f'/{base_pathname}/',
# assets_folder=get_root_path(__name__) + f'/{base_pathname}/assets/',
# meta_tags=[meta_viewport])
my_dashapp = DashProxy(__name__,
server=app,
external_stylesheets=[dbc.themes.BOOTSTRAP],
url_base_pathname=f'/{base_pathname}/',
#assets_folder=get_root_path(__name__) + f'/{base_pathname}/assets/',
assets_folder='static/',
meta_tags=[meta_viewport],
prevent_initial_callbacks=True,
transforms=[MultiplexerTransform()])
with app.app_context():
my_dashapp.title = title
my_dashapp.layout = layout
#my_dashapp._favicon = "favicons.co"
register_callbacks_fun(my_dashapp)
if is_protected:
_protect_dashviews(my_dashapp)
def _protect_dashviews(dashapp):
for view_func in dashapp.server.view_functions:
if view_func.startswith(dashapp.config.url_base_pathname):
dashapp.server.view_functions[view_func] = login_required(
dashapp.server.view_functions[view_func])
def register_extensions(server):
from app.extensions import login
login.init_app(server)
login.login_view = 'main.login'
def register_blueprints(server):
from app.webapp import server_bp
server.register_blueprint(server_bp)
| StarcoderdataPython |
11346200 | <reponame>gregbuehler/DeepChat
import torch
import logging
import transformers
from abc import ABC, abstractmethod
class AbstractModel(ABC):
"""
Base abstract class for the model
"""
@abstractmethod
def predict(self, user_input, conversation):
raise NotImplementedError()
@abstractmethod
def fine_tune(self, data):
raise NotImplementedError()
def __get_device(self, device):
if device == "cuda":
if not torch.cuda.is_available():
logging.info("Cuda not available. Defaulting to CPU.")
device = "cpu"
return torch.device(device)
def __decode_bot_response(self, bot_output, input_ids):
"""decodes the output from the model"""
return self.tokenizer.decode(bot_output[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
| StarcoderdataPython |
3507121 | <gh_stars>0
# !/usr/bin/env python3
# -*- Coding: UTF-8 -*- #
# -*- System: Linux -*- #
# -*- Usage: *.py -*- #
# Owner: Cloud-Technology LLC.
# Source: gitlab.cloud-technology.io
# License: BSD 3-Clause License
"""
...
"""
# =============================================================================
# Local Imports
# =============================================================================
from . import *
import Database.Business.Schemas.Implicit
# =============================================================================
# Reference Type(s)
# =============================================================================
Configuration = Scheme.Config
# =============================================================================
# Class (Schema) Initialization
# =============================================================================
import Database.Business.Schemas
import Database.Business.Schemas.Company
__module__ = __name__
User = List[Type(Database.Business.Schemas.Implicit.User)]
class Base(Scheme):
"""
...
"""
Name: String = Field("Research & Development")
class Config(Configuration): title = "%s" % __name__
class Shard(Base):
"""
...
"""
ID: Optional[Union[UUID, String, Integer]]
User: Optional[User]
Creation: Optional[Date] = Field(UID(), alias = "Record-Creation-Date")
Modification: Optional[Date] = Field(None, alias = "Record-Modification-Date")
class Config(Configuration): title = "%s" % __name__ + "." + "Shard"
class Association(Shard):
"""
...
"""
FK: Union[String, UUID, Integer] = Field(..., alias = "Company-ID")
class Config(Configuration): title = "%s" % __name__ + "." + "Association"
class Create(Base):
"""
...
"""
class Config(Base.Config): title = Base.Config.title + "-" + "Create"
class Update(Base):
"""
"""
ID: UUID
Name: Optional[String]
class Config(Base.Config): title = Base.Config.title + "-" + "Update"
class Delete(Scheme):
"""
...
"""
ID: Optional[String] = None
class Config(Base.Config): title = Base.Config.title + "-" + "Delete"
class Reference(Base):
"""
...
"""
ID: Union[UUID, String, Integer]
Creation: Optional[Date] = Field(UID(), alias = "Record-Creation-Date")
Modification: Optional[Date] = Field(None, alias = "Record-Modification-Date")
class Config(Base.Config): title = Base.Config.title + "-" + "Reference"
class Schema(Create):
""" ... """
ID: Union[UUID, String, Integer]
CFK: Union[UUID, String, Integer] = Field(..., alias = "Company-ID")
Creation: Date = Field(..., alias = "Record-Creation-Date")
Modification: Optional[Date] = Field(alias = "Record-Modification-Date")
class Config(Base.Config): title = Base.Config.title + "-" + "Schema"
| StarcoderdataPython |
1898306 | import os
import copy
import pickle
from threading import main_thread
import numpy as np
from torch.optim import lr_scheduler
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import torch_xla
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
import torch_xla.utils.utils as xu
import torch_xla.core.xla_model as xm
import torch_xla.distributed.xla_multiprocessing as xmp
import torch_xla.test.test_utils as test_utils
from src.data.utils import _collate_fn_raw, _collate_fn_raw_multiclass
from torch.utils.data import DataLoader, sampler
from src.data.raw_transforms import get_raw_transforms, get_raw_transforms_v2, simple_supervised_transforms
from src.utilities.config_parser import parse_config, get_data_info, get_config
from src.models.model_helper import get_feature_extractor
from src.optim.agc import adaptive_clip_grad
from src.utilities.training_utils import setup_tpu_dataloaders, tpu_optimization_helper
import argparse
from src.data.raw_dataset import RawWaveformDataset as SpectrogramDataset
import wandb
from src.data.mixup import do_mixup, mixup_criterion
from src.models.contrastive_model import get_pretrained_weights_for_transfer
from src.utilities.map import calculate_mAP
class BaselineModel(nn.Module):
def __init__(self, cfg):
super().__init__()
num_classes = cfg['num_classes']
self.features = get_feature_extractor(cfg)
self.fc = nn.Linear(cfg['proj_out_dim'], num_classes)
def forward(self, x):
x, _ = self.features(x)
x = F.relu(x, inplace=True)
x = self.fc(x)
return x
def save_checkpoint(model, optimizer, scheduler, epoch,
tr_loss, tr_acc, val_acc):
archive = {
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"epoch": epoch,
"tr_loss": tr_loss,
"tr_acc": tr_acc,
"val_acc": val_acc
}
ckpt_path = os.path.join(ARGS.output_directory,
"epoch={:03d}_tr_loss={:.6f}_tr_acc={:.6f}_val_acc={:.6f}.pth".format(
epoch, tr_loss, tr_acc, val_acc
))
xm.save(archive, ckpt_path)
xm.master_print("Checkpoint written to -> {}".format(ckpt_path))
parser = argparse.ArgumentParser()
parser.description = "Training script for FSD50k baselines"
parser.add_argument("--cfg_file", type=str,
help='path to cfg file')
parser.add_argument("--expdir", "-e", type=str,
help="directory for logging and checkpointing")
parser.add_argument('--epochs', default=250, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument("--num_workers", type=int, default=4)
parser.add_argument("--cw", type=str, required=False,
help="path to serialized torch tensor containing class weights")
parser.add_argument("--resume_from", type=str,
help="checkpoint path to continue training from")
parser.add_argument('--mixer_prob', type=float, default=0.75,
help="background noise augmentation probability")
parser.add_argument("--fp16", action="store_true",
help='flag to train in FP16 mode')
parser.add_argument("--random_clip_size", type=float, default=5)
parser.add_argument("--val_clip_size", type=float, default=5)
parser.add_argument("--use_mixers", action="store_true")
parser.add_argument("--use_mixup", action="store_true")
parser.add_argument("--prefetch_factor", type=int, default=4)
parser.add_argument("--tpus", type=int, default=1)
parser.add_argument("--log_steps", default=10, type=int)
parser.add_argument("--no_wandb", action="store_true")
parser.add_argument("--high_aug", action="store_true")
parser.add_argument("--wandb_project", type=str, default="pgr-thesis-contrastive-baseline")
parser.add_argument("--wandb_group", type=str, default="dataset")
parser.add_argument("--labels_delimiter", type=str, default=",")
parser.add_argument("--wandb_watch_model", action="store_true")
parser.add_argument("--random_seed", type=int, default=8881)
parser.add_argument("--continue_from_ckpt", type=str, default=None)
ARGS = parser.parse_args()
ARGS.output_directory = os.path.join(ARGS.expdir, "ckpts")
ARGS.log_directory = os.path.join(ARGS.expdir, "logs")
def _train_update(device, step, loss, tracker, epoch, writer):
test_utils.print_training_update(
device,
step,
loss.item(),
tracker.rate(),
tracker.global_rate(),
epoch,
summary_writer=writer)
def load_checkpoint(ckpt_path, model, optimizer, scheduler):
ckpt = torch.load(ckpt_path)
model.load_state_dict(ckpt['model_state_dict'])
optimizer.load_state_dict(ckpt['optimizer_state_dict'])
scheduler.load_state_dict(ckpt['scheduler_state_dict'])
return ckpt['epoch']
def train(ARGS):
# cfg = parse_config(ARGS.cfg_file)
np.random.seed(ARGS.random_seed)
torch.manual_seed(ARGS.random_seed)
cfg = get_config(ARGS.cfg_file)
# data_cfg = get_data_info(cfg['data'])
# cfg['data'] = data_cfg
# assert cfg['model']['pretrained_hparams_path']
# assert cfg['model']['pretrained_ckpt_path']
mode = cfg['model']['type']
tpu_world_size = xm.xrt_world_size()
tpu_local_rank = xm.get_ordinal()
# random_clip_size = int(ARGS.random_clip_size * cfg['audio_config']['sample_rate'])
# val_clip_size = int(ARGS.val_clip_size * cfg['audio_config']['sample_rate'])
ac = cfg['audio_config']
random_clip_size = int(ac['random_clip_size'] * ac['sample_rate'])
val_clip_size = int(ac['val_clip_size'] * ac['sample_rate'])
if ARGS.high_aug:
tr_tfs = get_raw_transforms_v2(True, random_clip_size,
sample_rate=ac['sample_rate'])
val_tfs = get_raw_transforms_v2(False, val_clip_size, center_crop_val=True,
sample_rate=ac['sample_rate'])
else:
tr_tfs = simple_supervised_transforms(True, random_clip_size,
sample_rate=ac['sample_rate'])
val_tfs = simple_supervised_transforms(False, val_clip_size,
sample_rate=ac['sample_rate'])
train_set = SpectrogramDataset(cfg['data']['train'],
cfg['data']['labels'],
cfg['audio_config'],
mode=mode, augment=True,
mixer=None, delimiter=ARGS.labels_delimiter,
transform=tr_tfs, is_val=False,
use_tpu=True,
tpu_local_rank=tpu_local_rank,
tpu_world_rank=tpu_world_size)
val_set = SpectrogramDataset(cfg['data']['val'],
cfg['data']['labels'],
cfg['audio_config'],
mode=mode, augment=False,
mixer=None, delimiter=ARGS.labels_delimiter,
transform=val_tfs, is_val=True,
use_tpu=True,
tpu_local_rank=tpu_local_rank,
tpu_world_rank=tpu_world_size)
batch_size = cfg['opt']['batch_size']
device = xm.xla_device()
# model = model_helper(cfg['model']).to(device)
model = BaselineModel(cfg['model']).to(device)
collate_fn = _collate_fn_raw_multiclass if mode == "multiclass" else _collate_fn_raw
train_loader, val_loader = setup_tpu_dataloaders(train_set, val_set,
tpu_world_size=tpu_world_size, tpu_local_rank=tpu_local_rank,
batch_size=batch_size, collate_fn=collate_fn,
num_workers=ARGS.num_workers)
train_device_loader = pl.MpDeviceLoader(train_loader, device)
val_device_loader = pl.MpDeviceLoader(val_loader, device)
num_steps_per_epoch = len(train_loader)
optimizer, scheduler, scheduler_name = tpu_optimization_helper(model.parameters(), cfg, ARGS.tpus,
reduce_on_plateau_mode="max",
num_tr_steps_per_epoch=num_steps_per_epoch,
num_epochs=ARGS.epochs)
if ARGS.continue_from_ckpt:
xm.master_print("Attempting to load checkpoint {}".format(ARGS.continue_from_ckpt))
start_epoch = load_checkpoint(ARGS.continue_from_ckpt, model, optimizer, scheduler)
xm.master_print("Checkpoint loading successful.. Continuing training from Epoch {}".format(start_epoch))
else:
start_epoch = 1
writer = None
wandb_logger = None
if xm.is_master_ordinal():
if not os.path.exists(ARGS.output_directory):
os.makedirs(ARGS.output_directory)
if not os.path.exists(ARGS.log_directory):
os.makedirs(ARGS.log_directory)
log_name = ARGS.log_directory.split("/")[-2]
print("RUN NAME:", log_name)
writer = test_utils.get_summary_writer(ARGS.log_directory)
if not ARGS.no_wandb:
wandb_logger = wandb.init(project='{}'.format(ARGS.wandb_project),
group="{}".format(ARGS.wandb_group),
config=cfg, name=log_name)
print(model)
with open(os.path.join(ARGS.expdir, "hparams.pickle"), "wb") as handle:
args_to_save = copy.deepcopy(ARGS)
args_to_save.cfg = cfg
pickle.dump(args_to_save, handle, protocol=pickle.HIGHEST_PROTOCOL)
clip_factor = float(cfg['opt'].get("agc_clip_factor", 0.001))
if mode == "multiclass":
loss_fn = nn.CrossEntropyLoss()
elif mode == "multilabel":
loss_fn = nn.BCEWithLogitsLoss()
mixup_enabled = cfg["audio_config"].get("mixup", False) # and mode == "multilabel"
if mixup_enabled:
xm.master_print("Attention: Will use mixup while training..")
torch.set_grad_enabled(True)
if wandb_logger and ARGS.wandb_watch_model:
wandb_logger.watch(model, log="all", log_freq=100)
agc_clip = bool(cfg['opt'].get("agc_clipping", False))
accuracy, max_accuracy = 0.0, 0.0
for epoch in range(start_epoch, ARGS.epochs + 1):
xm.master_print("Epoch {:03d} train begin {}".format(epoch, test_utils.now()))
tr_step_counter = 0
model.train()
tracker = xm.RateTracker()
tr_loss = []
tr_correct = 0
tr_total_samples = 0
tr_preds = []
tr_gts = []
for batch in train_device_loader:
x, _, y = batch
if mixup_enabled:
if mode == "multilabel":
x, y, _, _ = do_mixup(x, y, mode=mode)
elif mode == "multiclass":
x, y_a, y_b, lam = do_mixup(x, y, mode=mode)
pred = model(x)
if mode == "multiclass":
pred_labels = pred.max(1, keepdim=True)[1]
tr_correct += pred_labels.eq(y.view_as(pred_labels)).sum()
tr_total_samples += x.size(0)
loss = mixup_criterion(loss_fn, pred, y_a, y_b, lam)
else:
y_pred_sigmoid = torch.sigmoid(pred)
tr_preds.append(y_pred_sigmoid.detach().cpu().float())
tr_gts.append(y.detach().cpu().float())
loss = loss_fn(pred, y)
optimizer.zero_grad()
loss.backward()
if agc_clip:
adaptive_clip_grad(model.features.parameters(), clip_factor=clip_factor)
xm.optimizer_step(optimizer)
tracker.add(batch_size)
if tr_step_counter % ARGS.log_steps == 0:
xm.add_step_closure(
_train_update, args=(device, tr_step_counter, loss, tracker, epoch, writer)
)
# if wandb_logger:
# wandb_logger.log({"batch_tr_loss": loss})
tr_loss.append(loss.item())
tr_step_counter += 1
if scheduler_name == "warmupcosine":
scheduler.step()
mean_tr_loss = np.mean(tr_loss)
epoch_tr_loss = xm.mesh_reduce("tr_loss", mean_tr_loss, np.mean)
if mode == "multiclass":
tr_acc = tr_correct.item() / tr_total_samples
else:
# calculate mAP
tr_acc = calculate_mAP(tr_preds, tr_gts, mixup_enabled, mode="weighted")
tr_acc = xm.mesh_reduce("train_accuracy", tr_acc, np.mean)
xm.master_print('Epoch {} train end {} | Mean Loss: {} | Mean Acc:{}'.format(epoch,
test_utils.now(), epoch_tr_loss, tr_acc))
val_step_counter = 0
model.eval()
total_samples = 0
correct = 0
del tr_gts, tr_preds
if xm.is_master_ordinal():
curr_lr = scheduler.get_lr()
xm.master_print("Validating..")
val_preds = []
val_gts = []
for batch in val_device_loader:
x, _, y = batch
with torch.no_grad():
pred = model(x)
# xm.master_print("pred.shape:", pred.shape)
if mode == "multiclass":
pred = pred.max(1, keepdim=True)[1]
correct += pred.eq(y.view_as(pred)).sum()
total_samples += x.size()[0]
else:
y_pred_sigmoid = torch.sigmoid(pred)
val_preds.append(y_pred_sigmoid.detach().cpu().float())
val_gts.append(y.detach().cpu().float())
if mode == "multiclass":
accuracy = correct.item() / total_samples
# accuracy = xm.mesh_reduce('test_accuracy', accuracy, np.mean)
else:
accuracy = calculate_mAP(val_preds, val_gts)
# val_preds = torch.cat(val_preds, 0)
# val_gts = torch.cat(val_gts, 0)
# all_val_preds = xm.mesh_reduce("all_val_preds", val_preds, torch.cat)
# xm.master_print("after all reduce, preds shape:", all_val_preds.shape)
xm.master_print('Epoch {} test end {}, Accuracy={:.4f}'.format(
epoch, test_utils.now(), accuracy))
max_accuracy = max(accuracy, max_accuracy)
dict_to_write = {
"tr_loss": epoch_tr_loss,
"tr_acc": tr_acc,
"val_acc": accuracy
}
del val_gts, val_preds
if wandb_logger:
wandb_logger.log(dict_to_write)
test_utils.write_to_summary(
writer,
epoch,
dict_to_write=dict_to_write,
write_xla_metrics=True)
save_checkpoint(model, optimizer, scheduler, epoch, epoch_tr_loss, tr_acc, accuracy)
if scheduler_name == "reduce":
scheduler.step(tr_acc)
else:
scheduler.step()
test_utils.close_summary_writer(writer)
xm.master_print("Training done, best acc: {}".format(max_accuracy))
if wandb_logger:
wandb_logger.finish()
return max_accuracy
def _mp_fn(index, flags):
# torch.set_default_tensor_type("torch.FloatTensor")
acc = train(flags)
if __name__ == "__main__":
xmp.spawn(_mp_fn, args=(ARGS,), nprocs=ARGS.tpus)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.