content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Console print helper
import simplejson as json
import pprint
INDENT = 2
pp = pprint.PrettyPrinter(indent=INDENT)
| [
198,
2,
24371,
3601,
31904,
198,
198,
11748,
2829,
17752,
355,
33918,
198,
11748,
279,
4798,
628,
198,
12115,
3525,
796,
362,
198,
198,
381,
796,
279,
4798,
13,
35700,
6836,
3849,
7,
521,
298,
28,
12115,
3525,
8,
198
] | 3 | 40 |
# Copyright The IETF Trust 2010-2020, All Rights Reserved
# -*- coding: utf-8 -*-
# Taken from http://code.google.com/p/soclone/source/browse/trunk/soclone/utils/html.py
"""Utilities for working with HTML."""
import bleach
import copy
import html2text
import lxml.etree
import lxml.html
import lxml.html.clean
import debug # pyflakes:ignore
from django import forms
from django.utils.functional import keep_lazy
from ietf.utils.mime import get_mime_type
acceptable_tags = ('a', 'abbr', 'acronym', 'address', 'b', 'big',
'blockquote', 'body', 'br', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'font',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'head', 'hr', 'html', 'i', 'ins', 'kbd',
'li', 'ol', 'p', 'pre', 'q', 's', 'samp', 'small', 'span', 'strike', 'style',
'strong', 'sub', 'sup', 'table', 'title', 'tbody', 'td', 'tfoot', 'th', 'thead',
'tr', 'tt', 'u', 'ul', 'var')
acceptable_protocols = ['http', 'https', 'mailto', 'xmpp', ]
def unescape(text):
"""
Returns the given text with ampersands, quotes and angle brackets decoded
for use in URLs.
This function undoes what django.utils.html.escape() does
"""
return text.replace(''', "'").replace('"', '"').replace('>', '>').replace('<', '<' ).replace('&', '&')
@keep_lazy(str)
def remove_tags(html, tags):
"""Returns the given HTML sanitized, and with the given tags removed."""
allowed = set(acceptable_tags) - set([ t.lower() for t in tags ])
return bleach.clean(html, tags=allowed)
# ----------------------------------------------------------------------
# Html fragment cleaning
bleach_cleaner = bleach.sanitizer.Cleaner(tags=acceptable_tags, protocols=acceptable_protocols, strip=True)
# ----------------------------------------------------------------------
# Page cleaning
# We will be saving as utf-8 later, so set that in the meta tag.
lxml_cleaner = Cleaner(allow_tags=acceptable_tags, remove_unknown_tags=None, style=False, page_structure=False, charset='utf-8')
# ----------------------------------------------------------------------
# Text field cleaning
| [
2,
15069,
383,
314,
22274,
9870,
3050,
12,
42334,
11,
1439,
6923,
33876,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
30222,
422,
2638,
1378,
8189,
13,
13297,
13,
785,
14,
79,
14,
35634,
75,
505,
14,
10... | 2.853816 | 773 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le contexte éditeur EdtBrouillons"""
from primaires.interpreteur.editeur import Editeur
from primaires.interpreteur.editeur.env_objet import EnveloppeObjet
from primaires.communication.editeurs.medit import EdtMedit
from primaires.communication.mudmail import BROUILLON
from primaires.format.fonctions import couper_phrase
class EdtBrouillons(Editeur):
"""Classe définissant le contexte-éditeur 'brouillons'.
Ce contexte liste les brouillons et propose des options d'édition.
"""
def __init__(self, pere, objet=None, attribut=None):
"""Constructeur de l'éditeur"""
Editeur.__init__(self, pere, objet, attribut)
self.ajouter_option("e", self.opt_editer)
self.ajouter_option("s", self.opt_supprimer)
def accueil(self):
"""Méthode d'accueil"""
joueur = self.pere.joueur
mails = type(self).importeur.communication.mails.get_mails_pour(
joueur, BROUILLON)
msg = "||tit| " + "Brouillons".ljust(76) + "|ff||\n"
msg += self.opts.separateur + "\n"
msg += self.aide_courte + "\n\n"
if not mails:
msg += "|att|Aucun message enregistré dans ce dossier.|ff|"
else:
taille = 0
for mail in mails:
t_sujet = len(couper_phrase(mail.sujet, 33))
if t_sujet > taille:
taille = t_sujet
taille = (taille < 5 and 5) or taille
msg += "+" + "-".ljust(taille + 41, "-") + "+\n"
msg += "| |tit|N°|ff| | |tit|" + "Sujet".ljust(taille)
msg += "|ff| | |tit|Destinataire|ff| | |tit|" + "Date".ljust(16)
msg += "|ff| |\n"
i = 1
for mail in mails:
msg += "| |rg|" + str(i).rjust(2) + "|ff| | "
msg += "|vr|" + couper_phrase(mail.sujet, 33).ljust( \
taille) + "|ff| | |blc|"
msg += couper_phrase(mail.aff_dest,12).ljust(12) + "|ff| | "
msg += "|jn|" + mail.date.isoformat(" ")[:16] + "|ff| |\n"
i += 1
msg += "+" + "-".ljust(taille + 41, "-") + "+"
return msg
def opt_editer(self, arguments):
"""Option éditer"""
if not arguments or arguments.isspace():
self.pere.joueur << "|err|Vous devez préciser le numéro d'un " \
"message.|ff|"
return
mails = type(self).importeur.communication.mails.get_mails_pour(
self.pere.joueur, BROUILLON)
try:
num = int(arguments.split(" ")[0])
except ValueError:
self.pere.joueur << "|err|Vous devez spécifier un nombre entier " \
"valide.|ff|"
else:
i = 1
e_mail = None
for mail in mails:
if num == i:
e_mail = mail
break
i += 1
if e_mail is None:
self.pere.joueur << "|err|Le numéro spécifié ne correspond " \
"à aucun message.|ff|"
return
brouillon = type(self).importeur.communication.mails.creer_mail( \
e_mail.expediteur, source=e_mail)
enveloppe = EnveloppeObjet(EdtMedit, brouillon, None)
enveloppe.parent = self
contexte = enveloppe.construire(self.pere.joueur)
contexte.opts.rci_ctx_prec = ""
self.pere.joueur.contextes.ajouter(contexte)
contexte.actualiser()
def opt_supprimer(self, arguments):
"""Option supprimer"""
if not arguments or arguments.isspace():
self.pere.joueur << "|err|Vous devez préciser le numéro d'un " \
"message.|ff|"
return
mails = type(self).importeur.communication.mails.get_mails_pour(
self.pere.joueur, BROUILLON)
try:
num = int(arguments.split(" ")[0])
except ValueError:
self.pere.joueur << "|err|Vous devez spécifier un nombre entier " \
"valide.|ff|"
else:
i = 1
s_mail = None
for mail in mails:
if num == i:
s_mail = mail
break
i += 1
if s_mail is None:
self.pere.joueur << "|err|Le numéro spécifié ne correspond à " \
"aucun message.|ff|"
return
del type(self).importeur.communication.mails[s_mail.id]
self.pere.joueur << "|att|Ce message a bien été supprimé.|ff|"
| [
2,
532,
9,
12,
66,
7656,
25,
18274,
69,
12,
23,
532,
9,
198,
198,
2,
15069,
357,
66,
8,
3050,
12,
5539,
12509,
10351,
5777,
18653,
198,
2,
1439,
2489,
10395,
13,
198,
2,
220,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
... | 2.054462 | 3,048 |
from django.contrib import admin
from .models import Hora
# Register your models here.
admin.site.register(Hora) | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
27530,
1330,
367,
5799,
198,
198,
2,
17296,
534,
4981,
994,
13,
198,
198,
28482,
13,
15654,
13,
30238,
7,
39,
5799,
8
] | 3.285714 | 35 |
import pytest
| [
11748,
12972,
9288,
628
] | 3.75 | 4 |
import torch
import torch.nn as nn
from timm.models.layers import DropPath, trunc_normal_
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
6738,
4628,
76,
13,
27530,
13,
75,
6962,
1330,
14258,
15235,
11,
40122,
62,
11265,
62,
628,
628
] | 3.133333 | 30 |
# -*- coding: utf-8 -*-
import os
import unittest
from pelican.contents import Article
from pelican.generators import Generator
from pelican.settings import DEFAULT_CONFIG
from categorytpl import category_template
if __name__ == '__main__':
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
198,
6738,
16176,
7490,
13,
3642,
658,
1330,
10172,
198,
6738,
16176,
7490,
13,
8612,
2024,
1330,
35986,
198,
6738,
16176,
... | 3.105882 | 85 |
import json
from collections import defaultdict
board = None
with open('trello.json', 'r') as file:
board = json.loads(file.read())
cards = board['cards']
lists = board['lists']
list_id_to_name = {}
list_names_to_pos = {}
for _list in lists:
if _list['closed']:
continue
list_id_to_name[_list['id']] = _list['name']
list_names_to_pos[_list['name']] = _list['pos']
keyset = set(str())
cards_cleaned = []
for card in cards:
votes = 0
if 'votes' in card['badges']:
votes = card['badges']['votes']
if votes == 0:
continue
cards_cleaned.append(
{
'name': card['name'],
'list': list_id_to_name[card['idList']],
'votes': votes
}
)
# separate by list
list_to_cards = defaultdict(list)
for card in cards_cleaned:
list_to_cards[card['list']].append(card)
for list_name in list(list_to_cards.keys()):
list_to_cards[list_name] = sorted(
list_to_cards[list_name],
key=lambda x: x['votes'],
reverse=True
)
with open('trello.csv', 'w') as file:
list_names = list(list_to_cards.keys())
list_names = sorted(list_names, key=lambda x: int(list_names_to_pos[x]))
header = ''
for list_name in list_names:
header += 'votes,{},'.format(list_name.encode('utf-8'))
header = header[:-1] + '\n'
file.write(header)
row_index = 0
rows = []
while True:
row = []
none_counter = 0
for list_name in list_names:
list_cards = list_to_cards[list_name]
if row_index < len(list_cards):
card = list_cards[row_index]
else:
card = None
none_counter += 1
row.append(card)
if none_counter == len(list_names):
break
else:
rows.append(row)
row_index += 1
for row in rows:
row_str = ''
for item in row:
if item is None:
row_str += ',,'
else:
row_str += '{},"{}",'.format(
item['votes'],
item['name'].encode('utf-8').replace('"', '\'')
)
row_str = row_str[:-1] + '\n'
file.write(row_str)
| [
11748,
33918,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
3526,
796,
6045,
198,
4480,
1280,
10786,
83,
11252,
78,
13,
17752,
3256,
705,
81,
11537,
355,
2393,
25,
198,
220,
220,
220,
3096,
796,
33918,
13,
46030,
7,
7753,
13,
961,
... | 1.999117 | 1,133 |
"""Tests for the Atag config flow."""
from pyatag import AtagException
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.atag import DOMAIN
from homeassistant.const import CONF_DEVICE, CONF_EMAIL, CONF_HOST, CONF_PORT
from tests.async_mock import PropertyMock, patch
from tests.common import MockConfigEntry
FIXTURE_USER_INPUT = {
CONF_HOST: "127.0.0.1",
CONF_EMAIL: "test@domain.com",
CONF_PORT: 10000,
}
FIXTURE_COMPLETE_ENTRY = FIXTURE_USER_INPUT.copy()
FIXTURE_COMPLETE_ENTRY[CONF_DEVICE] = "device_identifier"
async def test_show_form(hass):
"""Test that the form is served with no input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_one_config_allowed(hass):
"""Test that only one Atag configuration is allowed."""
MockConfigEntry(domain="atag", data=FIXTURE_USER_INPUT).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_connection_error(hass):
"""Test we show user form on Atag connection error."""
with patch(
"homeassistant.components.atag.config_flow.AtagOne.authorize",
side_effect=AtagException(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data=FIXTURE_USER_INPUT,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "connection_error"}
async def test_full_flow_implementation(hass):
"""Test registering an integration and finishing flow works."""
with patch("homeassistant.components.atag.AtagOne.authorize",), patch(
"homeassistant.components.atag.AtagOne.update",
), patch(
"homeassistant.components.atag.AtagOne.id",
new_callable=PropertyMock(return_value="device_identifier"),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data=FIXTURE_USER_INPUT,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == FIXTURE_COMPLETE_ENTRY[CONF_DEVICE]
assert result["data"] == FIXTURE_COMPLETE_ENTRY
| [
37811,
51,
3558,
329,
262,
1629,
363,
4566,
5202,
526,
15931,
198,
6738,
12972,
265,
363,
1330,
1629,
363,
16922,
198,
198,
6738,
1363,
562,
10167,
1330,
4566,
62,
298,
1678,
11,
1366,
62,
13000,
62,
11125,
198,
6738,
1363,
562,
10167... | 2.506115 | 1,063 |
from tests.integration.integration_test_case import IntegrationTestCase
ACCOUNT_SERVICE_SURVEYS_URL = "/surveys/todo"
| [
6738,
5254,
13,
18908,
1358,
13,
18908,
1358,
62,
9288,
62,
7442,
1330,
38410,
14402,
20448,
198,
198,
26861,
28270,
62,
35009,
27389,
62,
50,
4261,
6089,
16309,
62,
21886,
796,
12813,
11793,
303,
893,
14,
83,
24313,
1,
628
] | 3 | 40 |
"""Sans-I/O ventilator backend server protocol."""
from typing import Optional, Union, Tuple
import logging
import time as _time
import attr
from ventserver.protocols import backend
from ventserver.protocols import events
from ventserver.protocols import frontend
from ventserver.protocols import mcu
from ventserver.protocols import file
from ventserver.protocols import rotary_encoder
from ventserver.protocols import exceptions
from ventserver.sansio import channels
from ventserver.sansio import protocols
# Events
@attr.s
class FrontendConnectionEvent(events.Event):
"""Server frontend connection status event."""
last_connection_time: float = attr.ib(default=None)
is_frontend_connected: bool = attr.ib(default=False)
def has_data(self) -> bool:
"""Return whether the event has data."""
return (
self.last_connection_time is not None
and self.is_frontend_connected is not None
)
@attr.s
class ReceiveEvent(events.Event):
"""Server receive input event."""
time: Optional[float] = attr.ib(default=None)
serial_receive: Optional[bytes] = attr.ib(default=None)
websocket_receive: Optional[bytes] = attr.ib(default=None)
rotary_encoder_receive: Tuple[int, bool] = attr.ib(default=None)
file_receive: Optional[file.StateData] = attr.ib(default=None)
def has_data(self) -> bool:
"""Return whether the event has data."""
return (
self.time is not None
or bool(self.serial_receive)
or self.websocket_receive is not None
or self.rotary_encoder_receive is not None
or self.file_receive is not None
)
@attr.s
class ReceiveOutputEvent(events.Event):
"""Server receive output/send event."""
server_send: Optional[backend.OutputEvent] = attr.ib(default=None)
frontend_delayed: bool = attr.ib(default=False)
def has_data(self) -> bool:
"""Return whether the event has data."""
return self.server_send is not None and self.server_send.has_data()
SendEvent = Union[backend.Announcement, backend.OutputEvent]
@attr.s
class SendOutputEvent(events.Event):
"""Server send output/send event."""
serial_send: Optional[bytes] = attr.ib(default=None)
websocket_send: Optional[bytes] = attr.ib(default=None)
file_send: Optional[file.StateData] = attr.ib(default=None)
def has_data(self) -> bool:
"""Return whether the event has data."""
return (bool(self.serial_send) or
self.websocket_send is not None or
self.file_send is not None)
def make_serial_receive(
serial_receive: bytes,
time: float
) -> ReceiveEvent:
"""Make a ReceiveEvent from serial receive data."""
return ReceiveEvent(serial_receive=serial_receive, time=time)
def make_websocket_receive(
ws_receive: bytes,
time: float
) -> ReceiveEvent:
"""Make a ReceiveEvent from websocket receive data."""
return ReceiveEvent(websocket_receive=ws_receive, time=time)
def make_rotary_encoder_receive(
re_receive: Tuple[int, bool],
time: float
) -> ReceiveEvent:
"""Make a ReceiveEvent from rotary encoder receive data."""
return ReceiveEvent(rotary_encoder_receive=re_receive, time=time)
# Frontend kill props
@attr.s
class FrontendKillProps():
"""Variables used to implement frozen frontend kill logic."""
# fe = frontend
last_fe_event: float = attr.ib(default=0)
fe_connected: bool = attr.ib(default=False)
fe_connection_time: float = attr.ib(default=0)
last_fe_kill: float = attr.ib(factory=_time.time)
fe_delayed_duration: int = attr.ib(default=5)
# Filters
@attr.s
class ReceiveFilter(protocols.Filter[ReceiveEvent, ReceiveOutputEvent]):
"""Filter which transforms receive bytes into high-level events."""
_logger = logging.getLogger('.'.join((__name__, 'ReceiveFilter')))
_buffer: channels.DequeChannel[
Union[ReceiveEvent, FrontendConnectionEvent]
] = attr.ib(factory=channels.DequeChannel)
current_time: float = attr.ib(default=0)
_kill_props: FrontendKillProps = attr.ib(factory=FrontendKillProps)
_mcu: mcu.ReceiveFilter = attr.ib(factory=mcu.ReceiveFilter)
_frontend: frontend.ReceiveFilter = attr.ib(factory=frontend.ReceiveFilter)
_rotary_encoder: rotary_encoder.ReceiveFilter = attr.ib(
factory=rotary_encoder.ReceiveFilter
)
_backend: backend.ReceiveFilter = attr.ib(factory=backend.ReceiveFilter)
_file: file.ReceiveFilter = attr.ib(
factory=file.ReceiveFilter
)
def input(self, event: Optional[
Union[ReceiveEvent, FrontendConnectionEvent
]
]) -> None:
"""Handle input events."""
if event is None or not event.has_data():
return
self._buffer.input(event)
def output(self) -> Optional[ReceiveOutputEvent]:
"""Emit the next output event."""
self._process_buffer()
any_updated = False
# Process mcu output
any_updated = self._process_mcu() or any_updated
# Process frontend output
any_updated = self._process_frontend() or any_updated
# Process rotary encoder output
any_updated = self._process_rotary_encoder() or any_updated
# Process file output
try:
any_updated = self._process_file() or any_updated
except exceptions.ProtocolDataError as err:
self._logger.error(err)
# Process time
if not any_updated:
self._backend.input(backend.ReceiveEvent(time=self.current_time))
# Process backend output
# Consume any outputs as long as the backend is indicating that it still
# has receive data to process, even if it has no data to output
while True:
backend_output = self._backend.output()
if backend_output is None:
break
if backend_output.has_data():
break
any_updated = any_updated or backend_output is not None
if not any_updated:
return None
# Kill frontend process if it stops responding.
# The frontend service will automatically restart the frontend process.
delayed = False
if int(self.current_time - self._kill_props.last_fe_event) >\
self._kill_props.fe_delayed_duration:
if int(self.current_time - self._kill_props.last_fe_kill) > 2:
connection_duration = int(
self.current_time - self._kill_props.fe_connection_time
)
if self._kill_props.fe_connected and connection_duration > 2:
self._kill_props.last_fe_kill = self.current_time
delayed = True
output = ReceiveOutputEvent(
server_send=backend_output, frontend_delayed=delayed
)
return output
def _process_buffer(self) -> None:
"""Process the next event in the input buffer."""
event = self._buffer.output()
if event is None:
return
if isinstance(event, FrontendConnectionEvent):
self._kill_props.fe_connection_time = event.last_connection_time
self._kill_props.fe_connected = event.is_frontend_connected
return
if event.time is not None:
self.current_time = event.time
self._mcu.input(event.serial_receive)
self._frontend.input(event.websocket_receive)
self._rotary_encoder.input(
rotary_encoder.ReceiveEvent(
time=self.current_time,
re_data=event.rotary_encoder_receive
)
)
self._file.input(event.file_receive)
def _process_mcu(self) -> bool:
"""Process the next event from the mcu protocol."""
mcu_output = self._mcu.output()
if mcu_output is None:
return False
self._backend.input(backend.ReceiveEvent(
time=self.current_time, mcu_receive=mcu_output,
frontend_receive=None
))
return True
def _process_frontend(self) -> bool:
"""Process the next event from the frontend protocol."""
frontend_output = self._frontend.output()
if frontend_output is None:
return False
self._backend.input(backend.ReceiveEvent(
time=self.current_time, mcu_receive=None,
frontend_receive=frontend_output
))
self._kill_props.last_fe_event = self.current_time
return True
def _process_rotary_encoder(self) -> bool:
"""Process the next event from the rotary encoder."""
rotary_encoder_output = self._rotary_encoder.output()
if rotary_encoder_output is None:
return False
self._backend.input(backend.ReceiveEvent(
time=self.current_time, mcu_receive=None,
frontend_receive=rotary_encoder_output
))
return True
def _process_file(self) -> bool:
"""Process the next event from the file."""
file_output = self._file.output() # throws ProtocolDataError
if file_output is None:
return False
self._backend.input(backend.ReceiveEvent(
time=self.current_time, mcu_receive=None,
frontend_receive=None, file_receive=file_output
))
return True
def input_serial(self, serial_receive: bytes) -> None:
"""Input a ReceiveEvent corresponding to serial data.
This is just a convenience function intended for writing unit tests
more concisely.
"""
self.input(make_serial_receive(serial_receive, self.current_time))
def input_websocket(self, websocket: bytes) -> None:
"""Input a ReceiveEvent corresponding to websocket data.
This is just a convenience function intended for writing unit tests
more concisely.
"""
self.input(make_websocket_receive(websocket, self.current_time))
@property
def backend(self) -> backend.ReceiveFilter:
"""Return the backend receiver."""
return self._backend
@property
def file(self) -> file.ReceiveFilter:
"""Return the file receiver"""
return self._file
@attr.s
class SendFilter(protocols.Filter[SendEvent, SendOutputEvent]):
"""Filter which transforms high-level events into send bytes."""
_buffer: channels.DequeChannel[SendEvent] = attr.ib(
factory=channels.DequeChannel
)
_backend: backend.SendFilter = attr.ib(factory=backend.SendFilter)
_mcu: mcu.SendFilter = attr.ib(factory=mcu.SendFilter)
_frontend: frontend.SendFilter = attr.ib(factory=frontend.SendFilter)
_file: file.SendFilter = attr.ib(factory=file.SendFilter)
def input(self, event: Optional[SendEvent]) -> None:
"""Handle input events."""
if event is None or not event.has_data():
return
self._buffer.input(event)
def output(self) -> Optional[SendOutputEvent]:
"""Emit the next output event."""
any_updated = False
self._process_buffer()
backend_output = self._backend.output()
any_updated = (backend_output is not None) or any_updated
self._mcu.input(backend.get_mcu_send(backend_output))
mcu_output = self._mcu.output()
any_updated = (mcu_output is not None) or any_updated
self._frontend.input(backend.get_frontend_send(backend_output))
frontend_output = self._frontend.output()
any_updated = (frontend_output is not None) or any_updated
self._file.input(backend.get_file_send(backend_output))
file_output = self._file.output()
any_updated = (file_output is not None) or any_updated
if not any_updated:
return None
output = SendOutputEvent(
serial_send=mcu_output, websocket_send=frontend_output,
file_send=file_output
)
return output
def _process_buffer(self) -> None:
"""Process the next event in the input buffer."""
try:
event = self._buffer.output()
self._backend.input(event)
except IndexError:
pass
@property
def file(self) -> file.SendFilter:
"""Return file sendfilter"""
return self._file
# Protocols
@attr.s
class Protocol(protocols.Protocol[
ReceiveEvent, ReceiveOutputEvent, SendEvent, SendOutputEvent
]):
"""Backend communication protocol."""
_receive: ReceiveFilter = attr.ib(factory=ReceiveFilter)
_send: SendFilter = attr.ib(factory=SendFilter)
@property
def receive(self) -> ReceiveFilter:
"""Return a Filter interface for receive events."""
return self._receive
@property
def send(self) -> SendFilter:
"""Return a Filter interface for send events."""
return self._send
| [
37811,
50,
504,
12,
40,
14,
46,
7435,
346,
1352,
30203,
4382,
8435,
526,
15931,
198,
198,
6738,
19720,
1330,
32233,
11,
4479,
11,
309,
29291,
198,
11748,
18931,
198,
11748,
640,
355,
4808,
2435,
198,
198,
11748,
708,
81,
198,
198,
6... | 2.412016 | 5,393 |
'''Module to make metrics for evaluating reco'''
import numpy as np
import matplotlib.pyplot as plt
import tensorflow_probability as tfp
import tensorflow as tf
sig=((tf.TensorSpec(shape=(None, 5), dtype=tf.float64, name=None),
tf.SparseTensorSpec(tf.TensorShape([None, None]), tf.float64),
tf.TensorSpec(shape=(None,), dtype=tf.int64, name=None)),
tf.TensorSpec(shape=(None, 6), dtype=tf.float64, name=None)) | [
7061,
6,
26796,
284,
787,
20731,
329,
22232,
664,
78,
7061,
6,
201,
198,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
11748,
11192,
273,
11125,
62,
1676,
65,
1... | 2.559524 | 168 |
import pytest
import sqlalchemy as sa
from flexmock import flexmock
from sqlalchemy_utils import ColorType, types # noqa
@pytest.fixture
@pytest.fixture
@pytest.mark.skipif('types.color.python_colour_type is None')
| [
11748,
12972,
9288,
198,
11748,
44161,
282,
26599,
355,
473,
198,
6738,
7059,
76,
735,
1330,
7059,
76,
735,
198,
198,
6738,
44161,
282,
26599,
62,
26791,
1330,
5315,
6030,
11,
3858,
220,
1303,
645,
20402,
628,
198,
31,
9078,
9288,
13,... | 2.934211 | 76 |
if __name__ == '__main__':
s = Stack()
print(s)
print(s.is_empty())
s.push(1)
s.push(2)
print(s)
s.push(3)
print(s)
print(s.top())
print(s.pop())
print(s)
"""
[]
True
[1, 2]
[1, 2, 3]
3
3
[1, 2]
""" | [
220,
220,
220,
220,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
264,
796,
23881,
3419,
198,
220,
220,
220,
3601,
7,
82,
8,
198,
220,
220,
220,
3601,
7,
82,
13,
271,
62,
28920,
28955,
198,
2... | 1.636364 | 154 |
#standard imports
import arcpy
import os
from dnppy import core
from landsat_metadata import landsat_metadata
if arcpy.CheckExtension('Spatial')=='Available':
arcpy.CheckOutExtension('Spatial')
arcpy.env.overwriteOutput = True
__all__=['atsat_bright_temp_8', # complete
'atsat_bright_temp_457'] # complete
def atsat_bright_temp_8(meta_path, outdir = False):
"""
Converts Landsat 8 TIRS bands to at satellite brightnes temperature in Kelvins
To be performed on raw Landsat 8 level 1 data. See link below for details
see here http://landsat.usgs.gov/Landsat8_Using_Product.php
:param band_nums: A list of desired band numbers, which should be [10,11]
:param meta_path: The full filepath to the metadata file for those bands
:param outdir: Output directory to save converted files. If left False it will save ouput
files in the same directory as input files.
:return output_filelist: A list of all files created by this function
"""
#enforce the list of band numbers and grab metadata from the MTL file
band_nums = ["10", "11"]
meta_path = os.path.abspath(meta_path)
meta = landsat_metadata(meta_path)
output_filelist = []
#cycle through each band in the list for calculation, ensuring each is in the list of TIRS bands
for band_num in band_nums:
#scrape data from the given file path and attributes in the MTL file
band_path = meta_path.replace("MTL.txt","B{0}.tif".format(band_num))
Qcal = arcpy.Raster(band_path)
#get rid of the zero values that show as the black background to avoid skewing values
null_raster = arcpy.sa.SetNull(Qcal, Qcal, "VALUE = 0")
#requires first converting to radiance
Ml = getattr(meta,"RADIANCE_MULT_BAND_{0}".format(band_num)) # multiplicative scaling factor
Al = getattr(meta,"RADIANCE_ADD_BAND_{0}".format(band_num)) # additive rescaling factor
TOA_rad = (null_raster * Ml) + Al
#now convert to at-sattelite brightness temperature
K1 = getattr(meta,"K1_CONSTANT_BAND_{0}".format(band_num)) # thermal conversion constant 1
K2 = getattr(meta,"K2_CONSTANT_BAND_{0}".format(band_num)) # thermal conversion constant 2
#calculate brightness temperature at the satellite
Bright_Temp = K2/(arcpy.sa.Ln((K1/TOA_rad) + 1))
#save the data to the automated name if outdir is given or in the parent folder if not
if outdir:
outdir = os.path.abspath(outdir)
outname = core.create_outname(outdir, band_path, "ASBTemp", "tif")
else:
folder = os.path.split(meta_path)[0]
outname = core.create_outname(folder, band_path, "ASBTemp", "tif")
Bright_Temp.save(outname)
output_filelist.append(outname)
print("Saved output at {0}".format(outname))
del TOA_rad, null_raster
return output_filelist
def atsat_bright_temp_457(meta_path, outdir = None):
"""
Converts band 6 from Landsat 4 and 5 or bands 6 VCID 1 and 2 from Landsat 7
to at satellite brightness temperature in Kelvins
To be performed on raw Landsat 4, 5, or 7 level 1 data.
:param meta_path: The full filepath to the metadata file, labeled '_MTL.txt', which must
be in the same folder as band 6 or 6_VCID_1 and 6_VCID_2
:param outdir: Output directory to save converted files. If left False it will save ouput
files in the same directory as input files.
:return output_filelist: A list of all files created by this function
"""
output_filelist = []
meta_path = os.path.abspath(meta_path)
metadata = landsat_metadata(meta_path)
spacecraft = getattr(metadata, "SPACECRAFT_ID")
if "4" in spacecraft or "5" in spacecraft:
band_nums = ["6"]
elif "7" in spacecraft:
band_nums = ["6_VCID_1", "6_VCID_2"]
else:
print("Enter the MTL file corresponding to a Landsat 4, 5, or 7 dataset")
# These lists will be used to parse the meta data text file and locate relevant information
# metadata format was changed August 29, 2012. This tool can process either the new or old format
f = open(meta_path)
MText = f.read()
# the presence of a PRODUCT_CREATION_TIME category is used to identify old metadata
# if this is not present, the meta data is considered new.
# Band6length refers to the length of the Band 6 name string. In the new metadata this string is longer
if "PRODUCT_CREATION_TIME" in MText:
Meta = "oldMeta"
else:
Meta = "newMeta"
# The tile name is located using the newMeta/oldMeta indixes and the date of capture is recorded
if Meta == "newMeta":
TileName = getattr(metadata, "LANDSAT_SCENE_ID")
year = TileName[9:13]
jday = TileName[13:16]
date = getattr(metadata, "DATE_ACQUIRED")
elif Meta == "oldMeta":
TileName = getattr(metadata, "BAND1_FILE_NAME")
year = TileName[13:17]
jday = TileName[17:20]
date = getattr(metadata, "ACQUISITION_DATE")
# the spacecraft from which the imagery was capture is identified
# this info determines the solar exoatmospheric irradiance (ESun) for each band
# Calculating values for each band
for band_num in band_nums:
print("Processing Band {0}".format(band_num))
pathname = meta_path.replace("MTL.txt", "B{0}.tif".format(band_num))
Oraster = arcpy.Raster(pathname)
# get rid of the zero values that show as the black background to avoid skewing values
null_raster = arcpy.sa.SetNull(Oraster, Oraster, "VALUE = 0")
# using the oldMeta/newMeta indixes to pull the min/max for radiance/Digital numbers
if Meta == "newMeta":
LMax = getattr(metadata, "RADIANCE_MAXIMUM_BAND_{0}".format(band_num))
LMin = getattr(metadata, "RADIANCE_MINIMUM_BAND_{0}".format(band_num))
QCalMax = getattr(metadata, "QUANTIZE_CAL_MAX_BAND_{0}".format(band_num))
QCalMin = getattr(metadata, "QUANTIZE_CAL_MIN_BAND_{0}".format(band_num))
elif Meta == "oldMeta":
LMax = getattr(metadata, "LMAX_BAND{0}".format(band_num))
LMin = getattr(metadata, "LMIN_BAND{0}".format(band_num))
QCalMax = getattr(metadata, "QCALMAX_BAND{0}".format(band_num))
QCalMin = getattr(metadata, "QCALMIN_BAND{0}".format(band_num))
Radraster = (((LMax - LMin)/(QCalMax-QCalMin)) * (null_raster - QCalMin)) + LMin
Oraster = 0
# Calculating temperature for band 6 if present
if "4" in spacecraft or "5" in spacecraft:
Refraster = 1260.56/(arcpy.sa.Ln((607.76/Radraster) + 1.0))
if "7" in spacecraft:
Refraster = 1282.71/(arcpy.sa.Ln((666.09/Radraster) + 1.0))
band_temp = "{0}_B{1}".format(TileName, band_num)
# save the data to the automated name if outdir is given or in the parent folder if not
if outdir:
outdir = os.path.abspath(outdir)
BandPath = core.create_outname(outdir, band_temp, "ASBTemp", "tif")
else:
folder = os.path.split(meta_path)[0]
BandPath = core.create_outname(folder, band_temp, "ASBTemp", "tif")
Refraster.save(BandPath)
output_filelist.append(BandPath)
del Refraster, Radraster, null_raster
print("Temperature Calculated for Band {0}".format(band_num))
f.close()
return output_filelist
| [
198,
2,
20307,
17944,
198,
11748,
10389,
9078,
198,
11748,
28686,
198,
6738,
288,
77,
14097,
1330,
4755,
198,
6738,
8604,
265,
62,
38993,
1330,
8604,
265,
62,
38993,
198,
198,
361,
10389,
9078,
13,
9787,
11627,
3004,
10786,
4561,
34961,... | 2.43419 | 3,153 |
from cv2 import cv2
face_cascade = cv2.CascadeClassifier("../haarcascades/haarcascade_frontalface_default.xml")
cap = cv2.VideoCapture("../assets/video.mp4")
while True:
ret, frame = cap.read()
if ret:
frame = cv2.resize(frame, None, fx=1 / 2, fy=1 / 2, interpolation=cv2.INTER_AREA)
image_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
image_gray = cv2.equalizeHist(image_gray)
faces = face_cascade.detectMultiScale(image_gray)
for (x, y, w, h) in faces:
# Draw ellipse around the face
center = (x + w // 2, y + h // 2)
image = cv2.ellipse(frame, center, (w // 2, h // 2), 0, 0, 360, (255, 0, 255), 4)
cv2.imshow('Capture - Face detection', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cv2.destroyAllWindows()
| [
6738,
269,
85,
17,
1330,
269,
85,
17,
198,
198,
2550,
62,
66,
28966,
796,
269,
85,
17,
13,
34,
28966,
9487,
7483,
7203,
40720,
3099,
5605,
3372,
2367,
14,
3099,
5605,
28966,
62,
8534,
1604,
558,
62,
12286,
13,
19875,
4943,
198,
19... | 2.064439 | 419 |
if __name__ == '__main__':
import sys
sys.path.insert(0, 'C:\\Users\\James Jiang\\Documents\\Project Euler')
from progress import Progress
answers_list = ['dummy']
with open('C:\\Users\\James Jiang\\Documents\\Project Euler\\answers.txt') as answers:
for line in answers:
answers_list.append(int(line))
progress_ = Progress("Problem 063: Powerful digit counts", 0, answers_list[63])
for n in range(1, 22):
for a in range(1, 10):
num_str = str(a**n)
if len(num_str) == n:
progress_.count += 1
progress_.progress()
if __name__ == '__main__':
input()
| [
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1330,
25064,
198,
220,
220,
220,
25064,
13,
6978,
13,
28463,
7,
15,
11,
705,
34,
25,
6852,
14490,
6852,
14731,
32294,
6852,
38354,
6852,
16775,
412,
18173,
... | 2.495968 | 248 |
__author__ = 'Justin Scholz'
__copyright__ = "Copyright 2015 - 2017, Justin Scholz"
def recognize_user_input(user_choice: str, possibilities: [], default_answer=None):
"""
:param default_answer: the default answer that is returned if user just hits enter
:param user_choice: what the user entered
:param possibilities: a list containing all valid answers, make sure to include "" if you want default answers
with "enter" key pressing
:return: user_chosen_answer: one value of the possibilities passed to this function; user_input_understood: Whether
there was an error parsing the answer or not. Returns true if the input was matched.
"""
user_input_understood = False
user_chosen_answer = str
for answer in possibilities:
try:
if int(user_choice) == answer:
user_chosen_answer = user_choice
user_input_understood = True
except ValueError:
user_input_understood = False
if user_choice == "":
user_chosen_answer = default_answer
user_input_understood = True
return user_chosen_answer, user_input_understood
def ask_user_for_input(question: dict):
"""
:param question: a dictionary as it comes from elsewhere, meaning that it has the keys: "question_title", "question_text",
"default_answer", "option_type" (can be "yes_no", "multi_choice", "free_choice"),
"valid_options" (only needed for multi_choice), "valid_options_lower_limit" and "valid_options_upper_limit" and
"valid_options_steplength" in case of the free_choice. The "default answer" key is in case of a yes/no question
either True or False, in case of free choice, it's a float and in case of multi_choice, it's the index of the answer
out of the valid_options.
:type question: dict
- a sample dictionary for multi_choice is:
question = {"question_title": "Measurement Mode",
"question_text": "Please choose which measurement mode to use",
"default_answer": 2,
"optiontype": "multi_choice",
"valid_options": ["2-point", "3-point", "4-point"]}
- a sample dictionary for yes/no is:
question = {"question_title": "Connection check",
"question_text": "Connection check wasn't performed but is optional. Do you want to check the "
"connections?",
"default_answer": True,
"optiontype": "yes_no"}
- a sample dictionary for interval is:
question = {"question_title": "Excitation voltage",
"question_text": "Please enter an excitation voltage between 0 and 3 V. Maximum accuracy is 0.1 V.",
"default_answer": 1.0,
"optiontype": "free_choice",
"valid_options_lower_limit": 0.0,
"valid_options_upper_limit": 3.0,
"valid_options_steplength": 1e1}
Note: Steplength is the inverse steplength. If 0.05 is the allowed steplength, enter 20 as steplength here
- a sample dictionary to ask for free text:
question = {"question_title": "Working directory",
"question_text": "Please choose a working directory for the following session with this program",
"default_answer": "C:\Data\DiBaMePro",
"optiontype": "free_text"}
- a sample dictionary to ask for two indeces:
question = {"question_title": "Same-level-merge selection",
"question_text": "Please enter the two indeces, (you will get two input prompts) for the "
"two which are to be merged.",
"default_answer": "0",
"optiontype": "2_indeces"}
- a sample dictionary to ask for multiple indeces:
question = {"question_title": "Same-level-merge selection",
"question_text": "Please enter one or more indeces separated only by a comma",
"default_answer": "0,4,8,12",
"optiontype": "multi_indeces"}
"""
result = question.copy()
# first we get the dictionarie's values into local variables to ease the handling
question_title = question["question_title"] # type: str
question_title = "-------------" + question_title + "-------------"
question_text = question["question_text"] # type: str
default_answer = question["default_answer"] # can be int or True or str
optiontype = question["optiontype"] # type: str
valid_options = None
valid_options_lower_limit = None
valid_options_upper_limit = None
valid_options_steplength = None
print(question_title)
print(question_text)
# valid_options and option specifics only exist if it's not a yes/no question
if optiontype != "yes_no":
# in case of multi_choice, key "valid_options" exist and we need to match it
if optiontype == "multi_choice":
valid_options = question["valid_options"] # type: []
elif optiontype == "free_choice":
valid_options_lower_limit = question["valid_options_lower_limit"]
valid_options_upper_limit = question["valid_options_upper_limit"]
valid_options_steplength = question["valid_options_steplength"]
# Now let's make different parsing for the (currently) three question types:
# Yes/No questions are mapped to Bool True or False
if optiontype == "yes_no":
# here we can easily centrally exchange this to some logic to talk to a potential GUI
answer_understood = False
user_chosen_answer = None # type: bool
default_literal_answer = ""
if default_answer:
default_literal_answer = "yes"
elif not default_answer:
default_literal_answer = "no"
while not answer_understood:
user_entered_response = input(
"Default answer is: " + default_literal_answer + ". What do you want? Type y, n or confirm default: ")
user_chosen_answer, answer_understood = recognize_user_input_yes_or_no(user_entered_response,
default_answer)
result['answer'] = user_chosen_answer
# Code Block for handling multi-option question type
elif optiontype == "multi_choice":
# we will later need a list of valid responses from the user
valid_answers = []
# enumerate yields the index AND the value, so we can use both then
for index, item in enumerate(valid_options):
print(str(index) + ": " + item)
valid_answers.append(index)
answer_understood = False
user_chosen_answer = None # type: str
while not answer_understood:
print("The default is option #" + str(default_answer))
user_entered_response = input("Confirm with enter or put in your own choice and confirm: ")
user_chosen_answer, answer_understood = recognize_user_input(user_entered_response, valid_answers,
default_answer)
result['answer'] = int(user_chosen_answer)
elif optiontype == "free_choice":
answer_understood = False
user_chosen_answer = None
while not answer_understood:
print("Default answer is: " + str(default_answer))
user_entered_response = input("Please enter your desired value with '.' as decimal separator: ")
try:
user_entered_response = float(user_entered_response)
user_chosen_answer, answer_understood = parse_user_input_lower_upper_limit_with_interval(
user_entered_response, valid_options_lower_limit, valid_options_upper_limit,
valid_options_steplength)
except ValueError:
try:
if user_entered_response == "":
user_chosen_answer = default_answer
answer_understood = True
except ValueError:
answer_understood = False
result['answer'] = user_chosen_answer
elif optiontype == "free_text":
answer_understood = False
user_chosen_answer = None
while not answer_understood:
print("Default answer is: " + str(default_answer))
user_chosen_answer = input("Type your own now or confirm default with enter: ")
if user_chosen_answer == "":
user_chosen_answer = default_answer
answer_understood = True
result['answer'] = user_chosen_answer
elif optiontype == "2_indeces":
answer1_understood = False
answer2_understood = False
index1 = None
index2 = None
print("Default answer is: " + str(default_answer))
while not answer1_understood:
user_chosen_answer = input("Index 1: ")
try:
index1 = int(user_chosen_answer)
answer1_understood = True
except ValueError:
print("Please make sure to only enter a number!")
while not answer2_understood:
user_chosen_answer = input("Index 2: ")
try:
index2 = int(user_chosen_answer)
answer2_understood = True
except ValueError:
print("Please make sure to only enter a number!")
result["answer"] = [index1, index2]
elif optiontype == "multi_indeces":
answer_understood = False
user_chosen_answer = default_answer
indeces = None
while not answer_understood:
indeces = []
print("Default answer is: " + str(default_answer))
user_chosen_answer = input("Type your own now or confirm default with enter: ") # type: str
user_chosen_answer_list = user_chosen_answer.split(",")
error_happened = False
for item in user_chosen_answer_list:
try:
indeces.append(int(item))
except ValueError:
error_happened = True
print("Please make sure to only enter integer numbers separated by commas.")
if not error_happened:
answer_understood = True
result["answer"] = indeces
else:
print("You are trying to use a question type that is not supported or have a typoo in your code.+")
return result # type: dict
| [
834,
9800,
834,
796,
705,
33229,
3059,
349,
89,
6,
198,
834,
22163,
4766,
834,
796,
366,
15269,
1853,
532,
2177,
11,
10799,
3059,
349,
89,
1,
628,
628,
198,
4299,
7564,
62,
7220,
62,
15414,
7,
7220,
62,
25541,
25,
965,
11,
12779,
... | 2.35109 | 4,543 |
import matplotlib.pyplot as plt
import numpy as np
import os
import matplotlib.dates as mdates
from datetime import datetime
# read in timetable
dates = []
sleep = []
for file in os.listdir("./data"):
if file.endswith(".txt"):
dates.append(file.rstrip('.txt'))
with open(str('./data/'+file), "r") as filestream:
tick = []
radii = []
radii_mood = []
notes = []
for line in filestream:
currentline = line.rstrip('\n')
currentline = currentline.split(',')
tick.append(int(currentline[0]))
if currentline[1].strip():
radii.append(currentline[1].strip())
else:
radii.append(radii[-1])
if currentline[2].strip():
radii_mood.append(int(currentline[2]))
else:
radii_mood.append(radii_mood[-1])
notes.append(currentline[3].strip())
sleep.append(radii.count('sleep'))
dates_format = [datetime.strptime(d, "%Y%m%d") for d in dates]
# Create figure and plot a stem plot with the date
fig, ax = plt.subplots(figsize=(8.8, 4))
ax.set(title="SLEEP TRACKER")
markerline, stemline, baseline = ax.stem(dates_format, sleep,
linefmt="C3-", basefmt="k-")
plt.setp(markerline, mec="k", mfc="w", zorder=3)
# ax.get_xaxis().set_major_formatter(mdates.WeekdayLocator(byweekday=MO))
ax.get_xaxis().set_major_locator(mdates.DayLocator(interval=3))
ax.get_xaxis().set_major_formatter(mdates.DateFormatter("%a, %d %b"))
plt.setp(ax.get_xticklabels(), rotation=30, ha="right")
plt.savefig(str('./figures/'+'sleep_tracker.png'), dpi=100,bbox_inches='tight',
transparent=True,
pad_inches=0)
plt.show() | [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
2603,
29487,
8019,
13,
19581,
355,
285,
19581,
198,
6738,
4818,
8079,
1330,
4818,
8079,
628,
198,
2,
1100,
287,
... | 1.960929 | 947 |
# This is the gtk-dependent HTTPRequest module.
# For the pyjamas/javascript version, see platform/HTTPRequestPyJS.py
import sys
import pygwt
from __pyjamas__ import JS
if sys.platform not in ['mozilla', 'ie6', 'opera', 'oldmoz', 'safari']:
from __pyjamas__ import get_main_frame
import pyjd
handlers = {}
| [
2,
770,
318,
262,
308,
30488,
12,
21186,
14626,
18453,
8265,
13,
198,
2,
1114,
262,
12972,
73,
17485,
14,
37495,
2196,
11,
766,
3859,
14,
40717,
18453,
20519,
20120,
13,
9078,
198,
198,
11748,
25064,
198,
11748,
12972,
70,
46569,
198,... | 2.864865 | 111 |
# -*-: coding utf-8 -*-
""" Mopidy skill for Snips. """
from __future__ import unicode_literals
from mopidyclient import MopidyClient
GAIN = 4
class SnipsMopidy:
"""Mopidy skill for Snips.
:param mopidy_host: The hostname of the Mopidy http server
"""
| [
2,
532,
9,
12,
25,
19617,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
337,
404,
19325,
5032,
329,
5489,
2419,
13,
37227,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
285,
404,
19325,
16366,
1... | 2.605769 | 104 |
import django
from django.test import TestCase
from django.urls import reverse_lazy, reverse
from django.contrib.auth.models import User, Group, Permission
from django.core.exceptions import PermissionDenied
from .models import ApplicationTemplate, ApplicationQuestion, Application, ApplicationResponse
import uuid
| [
11748,
42625,
14208,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
62,
75,
12582,
11,
9575,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
11,
4912,
... | 4.051282 | 78 |
from django.conf import settings
from django.db import models
from django.utils import timezone
from events.models import Event
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
198,
6738,
2995,
13,
27530,
1330,
8558,
628,
628,
198
] | 3.8 | 35 |
import orm
import sqlalchemy
from ..config import database, metadata
| [
11748,
393,
76,
198,
11748,
44161,
282,
26599,
198,
198,
6738,
11485,
11250,
1330,
6831,
11,
20150,
628
] | 3.944444 | 18 |
##############################################################################
# Copyright (c) 2007 Open Kernel Labs, Inc. (Copyright Holder).
# All rights reserved.
#
# 1. Redistribution and use of OKL4 (Software) in source and binary
# forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# (a) Redistributions of source code must retain this clause 1
# (including paragraphs (a), (b) and (c)), clause 2 and clause 3
# (Licence Terms) and the above copyright notice.
#
# (b) Redistributions in binary form must reproduce the above
# copyright notice and the Licence Terms in the documentation and/or
# other materials provided with the distribution.
#
# (c) Redistributions in any form must be accompanied by information on
# how to obtain complete source code for:
# (i) the Software; and
# (ii) all accompanying software that uses (or is intended to
# use) the Software whether directly or indirectly. Such source
# code must:
# (iii) either be included in the distribution or be available
# for no more than the cost of distribution plus a nominal fee;
# and
# (iv) be licensed by each relevant holder of copyright under
# either the Licence Terms (with an appropriate copyright notice)
# or the terms of a licence which is approved by the Open Source
# Initative. For an executable file, "complete source code"
# means the source code for all modules it contains and includes
# associated build and other files reasonably required to produce
# the executable.
#
# 2. THIS SOFTWARE IS PROVIDED ``AS IS'' AND, TO THE EXTENT PERMITTED BY
# LAW, ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED. WHERE ANY WARRANTY IS
# IMPLIED AND IS PREVENTED BY LAW FROM BEING DISCLAIMED THEN TO THE
# EXTENT PERMISSIBLE BY LAW: (A) THE WARRANTY IS READ DOWN IN FAVOUR OF
# THE COPYRIGHT HOLDER (AND, IN THE CASE OF A PARTICIPANT, THAT
# PARTICIPANT) AND (B) ANY LIMITATIONS PERMITTED BY LAW (INCLUDING AS TO
# THE EXTENT OF THE WARRANTY AND THE REMEDIES AVAILABLE IN THE EVENT OF
# BREACH) ARE DEEMED PART OF THIS LICENCE IN A FORM MOST FAVOURABLE TO
# THE COPYRIGHT HOLDER (AND, IN THE CASE OF A PARTICIPANT, THAT
# PARTICIPANT). IN THE LICENCE TERMS, "PARTICIPANT" INCLUDES EVERY
# PERSON WHO HAS CONTRIBUTED TO THE SOFTWARE OR WHO HAS BEEN INVOLVED IN
# THE DISTRIBUTION OR DISSEMINATION OF THE SOFTWARE.
#
# 3. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ANY OTHER PARTICIPANT BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pylint: disable-msg=R0903
"""
A set of useful stuff
"""
import types
class IntString(long):
"""A sub-type of integer that allows you to associate
a string with a given integer"""
_show = {}
_default_string = None
def align_up(val, alignment):
"""Round val up to a given alignment."""
if not alignment:
return val
ovr = val % alignment
if (ovr):
val = val + alignment - ovr
return val
def align_down(val, alignment):
"""Round val down to a given alignment"""
if alignment:
val -= (val % alignment)
return val
def is_integer(val):
"""Return true if the val is an integer or long type."""
return isinstance(val, types.IntType) or isinstance(val, types.LongType)
class TransformableMixin:
"""This is a mix-in class which allows a class instance to be
specialised after creation. For example this allows us to make
subclasses of ElfSection, and then change existing instance into
the subclass. This is very useful if you consider that the subclass
may be being pointed to from various different places, and having back
links to them all would really complicate things. This mixin is used
for ElfSection."""
# This class is pretty magic, and pylint doesn't deal with magic very
# well. In particular, it doesn't like the fact we don't have __init__,
# and it doesn't realise that there is a builtin __class__ instance
# variable.
#pylint: disable-msg=W0232,W0201,E0203
def transform(self, cls):
"""Transform is called to morph the current instance into
a new class 'cls'. The new class must be a subclass of the
of the current instances class. After changing the class,
trasnformed() will be called to do any post fixups.
This is called after a it is prepared.
A subclass should provide a "transformer(self)" method.
This method is called immediately after the class transformation
and allows the subclass to doing any fixup necessary. It is designed
to be overridden by subclasses, and is simply empty here.
"""
if not issubclass(cls, self.__class__):
raise Exception, "Can only transform into subclassess"
self.__class__ = cls
self.transformer()
class Span:
"""A simple little helper class that lets you easily test if
a value is in between a given span of numbers. E.g:
>>> 3 in Span(1, 5)
True
>>> 0 in Span(1, 5)
False
>>> 1 in Span(1, 5)
True
>>> 5 in Span(1, 5)
False
"""
def __init__(self, base, end):
"""Initialise a span starting from base and going to end."""
self.base = base
self.end = end
def __contains__(self, val):
"""Return true if a given value is in the span."""
return val >= self.base and val < self.end
class Prepared(Exception):
"""This exception is raised when a method is called and the
object should be in the unprepared state, but is actually in
the prepared state."""
class Unprepared(Exception):
"""This exception is raised when a method is called and the
object should be in the prepared state, but is actually in the
unprepared state."""
| [
29113,
29113,
7804,
4242,
2235,
198,
2,
15069,
357,
66,
8,
4343,
4946,
32169,
23500,
11,
3457,
13,
357,
15269,
24210,
737,
198,
2,
1439,
2489,
10395,
13,
198,
2,
220,
198,
2,
352,
13,
2297,
396,
3890,
290,
779,
286,
7477,
43,
19,
... | 3.037317 | 2,117 |
import pyglet
from pyglet.gl import *
window = pyglet.window.Window()
label = pyglet.text.Label('Hello, world',
font_name='Times New Roman',
font_size=36,
x=window.width//2, y=window.height//2,
anchor_x='center', anchor_y='center')
@window.event
pyglet.app.run()
| [
11748,
12972,
70,
1616,
198,
6738,
12972,
70,
1616,
13,
4743,
1330,
1635,
198,
198,
17497,
796,
12972,
70,
1616,
13,
17497,
13,
27703,
3419,
198,
18242,
796,
12972,
70,
1616,
13,
5239,
13,
33986,
10786,
15496,
11,
995,
3256,
198,
220,... | 1.826733 | 202 |
from django import forms
from django.contrib.admin.helpers import ActionForm
from common.forms import AudioAssetCreateFormBase
from .models import AudioAsset, Playlist, Rotator, RotatorAsset
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
28482,
13,
16794,
364,
1330,
7561,
8479,
198,
198,
6738,
2219,
13,
23914,
1330,
13491,
45869,
16447,
8479,
14881,
198,
198,
6738,
764,
27530,
1330,
13491,
45869,
... | 3.716981 | 53 |
import cv2
import numpy as np
import time
import os
import Command
path = os.path.dirname(os.path.abspath(__file__))
pathTeste = path+"/test/"
arqTestInput = open(pathTeste+"testInput.txt", 'r')
textTestInput = arqTestInput.readlines()
for folder in textTestInput:
splitFolder = folder.split(",")
nameFolder = splitFolder[0]
arqInput = open(pathTeste+nameFolder+"/testInput.txt", 'r')
textInput = arqInput.readlines()
textOutput =""
for linha in textInput:
start_time = time.time()
split = linha.split(";")
nameFile = split[0]
resultExpected = split[1]
resultExpected = resultExpected.replace("\n", "")
image = cv2.imread(pathTeste+nameFolder+"/" + nameFile)
r = 1100.0 / image.shape[1]
dim = (1100, int(image.shape[0] * r))
image = cv2.resize(image,dim, interpolation = cv2.INTER_AREA)
pre_proc = Command.preprocess_image(image)
#cv2.imwrite(path+"/testeResultadoP"+ nameFile +".jpeg", pre_proc);
cnts, qntd_found, qtnd_squard = Command.find_cnts_commands(pre_proc)
commands = Command.find_commands(cnts, image)
response = Command.responseCommands(commands)
if response.lower().strip() == resultExpected.lower().strip():
textOutput = textOutput + "1;"
else:
textOutput = textOutput +"0;"
textOutput = textOutput + str(qntd_found) +"---"
textOutput = textOutput + str(qtnd_squard) +"---"
timeFormat = "%.2f" % (time.time() - start_time)
textOutput = textOutput + str(timeFormat) +"---"
textOutput = textOutput + nameFile +";"
textOutput = textOutput + response +"\n"
temp_cnts = []
for y in range(len(commands)):
for x in range(len(commands[y])):
temp_cnts.append(commands[y][x].contour)
cv2.drawContours(image,temp_cnts, -1, (255,0,0), 2)
cv2.putText(image,commands[y][x].best_command_match,(commands[y][x].center[0]-60, commands[y][x].center[1]+25),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,0),3,cv2.LINE_AA)
cv2.imwrite(pathTeste+nameFolder+"/testeResultado/"+ nameFile +".jpeg", image);
arqOutput = open(pathTeste+nameFolder+"/testOutput.txt", 'w')
arqOutput.write(textOutput)
arqOutput.close()
| [
11748,
269,
85,
17,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
640,
201,
198,
11748,
28686,
201,
198,
11748,
9455,
201,
198,
201,
198,
6978,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
2777,
776,... | 2.304394 | 933 |
from os import link
from django.contrib.auth.models import User, Group
from videoapp.models import videos
from rest_framework import serializers
| [
6738,
28686,
1330,
2792,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
11,
4912,
198,
6738,
2008,
1324,
13,
27530,
1330,
5861,
198,
6738,
1334,
62,
30604,
1330,
11389,
11341,
628,
198
] | 3.972973 | 37 |
import sys
import socket
import string
import re
HOST = ""
PORT = 6667
DEBUG_FLAG = False
| [
11748,
25064,
198,
11748,
17802,
198,
11748,
4731,
198,
11748,
302,
198,
198,
39,
10892,
796,
13538,
198,
15490,
796,
718,
28933,
198,
198,
30531,
62,
38948,
796,
10352,
628
] | 3.1 | 30 |
#!/usr/bin/python
## Python Launcher
import platform
import sys
from subprocess import call
import subprocess
import logging
#Make it a thread!
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2235,
11361,
26385,
198,
11748,
3859,
198,
11748,
25064,
198,
6738,
850,
14681,
1330,
869,
198,
11748,
850,
14681,
198,
11748,
18931,
628,
220,
220,
220,
1303,
12050,
340,
257,
4704,
0,
220,
... | 3.235294 | 51 |
import gen
import network as ann
words, imgs = gen.get_tuples(range(100, 10100))
word_mat, img_mat = gen.prepare_input_tensors(words, imgs)
print "Input images >>", img_mat.shape
print "Input labels >>", word_mat.shape
nn = ann.Network([100, img_mat.shape[1], img_mat.shape[2]], word_mat.shape, gen.get_default_total_code(), 100)
nn.train(img_mat, word_mat, "../artifacts/" + "test_weight", batch_size=100, max_iteration=100, continue_from_last=False)
| [
11748,
2429,
198,
11748,
3127,
355,
1529,
628,
198,
10879,
11,
545,
14542,
796,
2429,
13,
1136,
62,
28047,
2374,
7,
9521,
7,
3064,
11,
8949,
405,
4008,
198,
4775,
62,
6759,
11,
33705,
62,
6759,
796,
2429,
13,
46012,
533,
62,
15414,
... | 2.714286 | 168 |
"""Advent of Code 2021 - Day 3"""
import os
import statistics
from copy import deepcopy
def filter_numbers(numbers: list[str], position: int, value: str) -> list[str]:
"""
Filters the list of numbers based on the bit value at a given position
:param numbers: List of numbers as strings
:param position: The bit position to check
:param value: The value needed at a given pit position
:return: List of filtered numbers as strings
"""
return [number for number in numbers if number[position] == value]
def find_mode(numbers: list[str], position: int) -> str:
"""
Find the mode of a given list of numbers
:param numbers: List of numbers as strings
:param position: The bit position
:return: Mode of the digits at the bit position as a str
"""
numbers = [int(number[position]) for number in numbers]
# Reverse sorting to get 1 if there are equal number of 0s and 1s
numbers.sort(reverse=True)
mode = statistics.mode(numbers)
return str(mode)
def flip_bit(b: str) -> str:
"""
Flip the bit. 0 -> 1 and 1 -> 0
:param b: Bit to be flipped
:return: Flipped bit
"""
return "0" if b == "1" else "1"
if __name__ == '__main__':
main()
| [
37811,
2782,
1151,
286,
6127,
33448,
532,
3596,
513,
37811,
628,
198,
11748,
28686,
198,
11748,
7869,
198,
6738,
4866,
1330,
2769,
30073,
628,
628,
198,
198,
4299,
8106,
62,
77,
17024,
7,
77,
17024,
25,
1351,
58,
2536,
4357,
2292,
25,... | 3.021792 | 413 |
from enum import Enum
class Priority(Enum):
"""
Enum defining the priority of the notification
"""
immediately = 10
normal = 5
PRODUCTION_HOST = "api.push.apple.com"
SANDBOX_HOST = "api.development.push.apple.com"
DEFAULT_PORT = 443
ALT_PORT = 2197
MAX_NOTIFICATION_PAYLOAD_SIZE_VOIP = 5120
MAX_NOTIFICATION_PAYLOAD_SIZE_OTHER = 4096
| [
6738,
33829,
1330,
2039,
388,
628,
198,
4871,
34416,
7,
4834,
388,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2039,
388,
16215,
262,
8475,
286,
262,
14483,
198,
220,
220,
220,
37227,
628,
220,
220,
220,
3393,
796,
838,
198... | 2.659259 | 135 |
import pytest
from databases import Database
from sqlalchemy import (
Column,
ForeignKey,
Integer,
MetaData,
String,
Table,
create_engine,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy_to_ormar import ormar_model_str_repr, sqlalchemy_to_ormar
Base = declarative_base()
Database_URL = "sqlite:///test.db"
engine = create_engine(Database_URL)
database = Database(Database_URL)
metadata = MetaData(engine)
association_table = Table(
"association",
Base.metadata,
Column("id", Integer, primary_key=True),
Column("parent", Integer, ForeignKey("left.id", ondelete="CASCADE")),
Column("child", Integer, ForeignKey("right.id", ondelete="CASCADE")),
)
@pytest.fixture(autouse=True, scope="module")
@pytest.mark.asyncio
| [
11748,
12972,
9288,
198,
6738,
20083,
1330,
24047,
198,
6738,
44161,
282,
26599,
1330,
357,
198,
220,
220,
220,
29201,
11,
198,
220,
220,
220,
8708,
9218,
11,
198,
220,
220,
220,
34142,
11,
198,
220,
220,
220,
30277,
6601,
11,
198,
... | 2.818182 | 297 |
# Copyright 2021 Rikai Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for python reflections.
"""
import importlib
def has_func(func_name: str) -> bool:
"""
Assuming `x.y.z.name` as func_name,
Check if `from x.y import z; z.name` or `from x.y.z import name` works
"""
try:
module, cls, func = func_name.rsplit(".", 2)
mod = importlib.import_module(module)
return hasattr(getattr(mod, cls), func)
except (AttributeError, ValueError):
try:
module, cls = func_name.rsplit(".", 1)
mod = importlib.import_module(module)
return hasattr(mod, cls)
except (ValueError, ModuleNotFoundError):
return False
except ModuleNotFoundError:
return False
def find_func(func_name: str):
"""
Assuming `x.y.z.name` as func_name
Try `from x.y import z; z.name` first, and then `from x.y.z import name`
"""
module, cls, func = func_name.rsplit(".", 2)
try:
mod = importlib.import_module(module)
return getattr(getattr(mod, cls), func)
except AttributeError:
module, cls = func_name.rsplit(".", 1)
mod = importlib.import_module(module)
return getattr(mod, cls)
| [
2,
220,
15069,
33448,
50067,
1872,
46665,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
... | 2.610619 | 678 |
# This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import tempfile
import os
import re
import sys
from contextlib import contextmanager
from lxml import etree
from mapproxy.test import mocker
from mapproxy.compat import string_type, PY2
from nose.tools import eq_
class Mocker(object):
"""
This is a base class for unit-tests that use ``mocker``. This class follows
the nosetest naming conventions for setup and teardown methods.
`setup` will initialize a `mocker.Mocker`. The `teardown` method
will run ``mocker.verify()``.
"""
def expect_and_return(self, mock_call, return_val):
"""
Register a return value for the mock call.
:param return_val: The value mock_call should return.
"""
self.mocker.result(return_val)
def replay(self):
"""
Finish mock-record phase.
"""
self.mocker.replay()
def mock(self, base_cls=None):
"""
Return a new mock object.
:param base_cls: check method signatures of the mock-calls with this
base_cls signature (optional)
"""
if base_cls:
return self.mocker.mock(base_cls)
return self.mocker.mock()
class TempFiles(object):
"""
This class is a context manager for temporary files.
>>> with TempFiles(n=2, suffix='.png') as tmp:
... for f in tmp:
... assert os.path.exists(f)
>>> for f in tmp:
... assert not os.path.exists(f)
"""
def assert_re(value, regex):
"""
>>> assert_re('hello', 'l+')
>>> assert_re('hello', 'l{3}')
Traceback (most recent call last):
...
AssertionError: hello ~= l{3}
"""
match = re.search(regex, value)
assert match is not None, '%s ~= %s' % (value, regex)
def strip_whitespace(data):
"""
>>> strip_whitespace(' <foo> bar\\n zing\\t1')
'<foo>barzing1'
"""
if isinstance(data, bytes):
return re.sub(b'\s+', b'', data)
else:
return re.sub('\s+', '', data)
@contextmanager | [
2,
770,
2393,
318,
636,
286,
262,
9347,
44148,
1628,
13,
198,
2,
15069,
357,
34,
8,
3050,
31816,
2304,
1000,
1279,
4023,
1378,
296,
77,
2304,
1000,
13,
2934,
29,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
... | 2.618022 | 1,021 |
"""Module that generates valid dataset manifest files from various data sources."""
from typing import Dict, List
import sqlalchemy
from sqlalchemy.engine import Engine
from fideslang import manifests
from fideslang.models import Dataset, DatasetCollection, DatasetField
from .utils import get_db_engine, echo_green
def get_db_collections_and_fields(engine: Engine) -> Dict[str, Dict[str, List[str]]]:
"""
Get the name of every field in each table within database(s)
Args:
engine: A sqlalchemy DB connection engine
Returns:
db_tables: An object that contains a mapping of each field in each table of a database
(i.e. {schema: {schema.table_name: [fields, ...]}}
"""
inspector = sqlalchemy.inspect(engine)
schema_exclusion_list = ["information_schema"]
if engine.dialect.name == "mysql":
schema_exclusion_list.extend(["mysql", "performance_schema", "sys"])
db_tables: Dict[str, Dict[str, List]] = {}
for schema in inspector.get_schema_names():
if schema not in schema_exclusion_list:
db_tables[schema] = {}
for table in inspector.get_table_names(schema=schema):
db_tables[schema][f"{schema}.{table}"] = [
column["name"]
for column in inspector.get_columns(table, schema=schema)
]
return db_tables
def create_dataset_collections(
db_tables: Dict[str, Dict[str, List[str]]]
) -> List[Dataset]:
"""
Return an object of tables and columns formatted for a Fides manifest
with dummy values where needed.
"""
table_manifests = [
Dataset(
fides_key=schema_name,
name=schema_name,
description=f"Fides Generated Description for Schema: {schema_name}",
collections=[
DatasetCollection(
name=table_name,
description=f"Fides Generated Description for Table: {table_name}",
fields=[
DatasetField(
name=column,
description=f"Fides Generated Description for Column: {column}",
data_categories=[],
)
for column in table
],
)
for table_name, table in schema.items()
],
)
for schema_name, schema in db_tables.items()
]
return table_manifests
def create_dataset(engine: Engine, collections: List[DatasetCollection]) -> Dataset:
"""
Generate a partial dataset manifest, sans tables/fields,
given a database engine.
"""
url = engine.url
name = url.database
dataset = Dataset(
fides_key=name,
name=name,
description=f"Fides Generated Description for Dataset: {name}",
collections=collections,
)
return dataset
def generate_dataset(connection_string: str, file_name: str) -> str:
"""
Given a database connection string, extract all tables/fields from it
and write out a boilerplate dataset manifest.
"""
db_engine = get_db_engine(connection_string)
db_collections = get_db_collections_and_fields(db_engine)
collections = create_dataset_collections(db_collections)
manifests.write_manifest(file_name, [i.dict() for i in collections], "dataset")
echo_green(f"Generated dataset manifest written to {file_name}")
return file_name
| [
37811,
26796,
326,
18616,
4938,
27039,
10561,
3696,
422,
2972,
1366,
4237,
526,
15931,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
198,
198,
11748,
44161,
282,
26599,
198,
6738,
44161,
282,
26599,
13,
18392,
1330,
7117,
198,
198,
6738,
... | 2.313325 | 1,516 |
"""Eto SDK fluent API for managing jobs"""
import time
from typing import Iterable, Union
import pandas as pd
from eto._internal.model.dataset_details import DatasetDetails
from eto._internal.model.job import Job
from eto.connectors.coco import CocoConnector, CocoSource
from eto.connectors.rikai import RikaiConnector
from eto.fluent.client import get_api
def ingest_coco(
dataset_name: str,
source: Union[CocoSource, dict, Iterable[CocoSource], Iterable[dict]],
mode: str = "append",
partition: str = None,
) -> Job:
"""Create a data ingestion job to convert coco to Rikai format and create a new entry in the Eto dataset registry
Parameters
----------
dataset_name: str
The name of the new Eto dataset
source: dict, Iterable[dict], CocoSource, Iterable[CocoSource]
Specification for the raw data sources in Coco format. For multiple
sources, just specify all of the sources in a single list
Example:
{
'image_dir': 's3://path/to/images',
'annotation': 's3://path/to/annotation',
'extras': {'split': 'train'}
}
mode: str, default 'append'
Defines behavior when the dataset already exists
'overwrite' means existing data is replaced
'append' means the new data will be added
partition: str or list of str
Which field to partition on (ex. 'split')
"""
conn = CocoConnector(get_api("jobs"))
if "." in dataset_name:
project_id, dataset_id = dataset_name.split(".", 1)
else:
project_id, dataset_id = "default", dataset_name
conn.project_id = project_id
conn.dataset_id = dataset_id
if isinstance(source, (CocoSource, dict)):
source = [source]
[
conn.add_source(s if isinstance(s, CocoSource) else CocoSource(**s))
for s in source
]
conn.mode = mode or "append"
if partition is not None:
conn.partition = [partition] if isinstance(partition, str) else partition
return conn.ingest()
def ingest_rikai(
dataset_name: str,
url: str,
mode: str = "append",
partition: str = None,
) -> Job:
"""Create a data ingestion job to create a new dataset using existing Rikai format data
Parameters
----------
dataset_name: str
The name of the new Eto dataset
url: str
The url of the existing Rikai format data to be added to the catalog
mode: str, default 'append'
Defines behavior when the dataset already exists
'overwrite' means existing data is replaced
'append' means the new data will be added
partition: str or list of str
Which field to partition on (ex. 'split')
"""
conn = RikaiConnector(get_api("jobs"))
if "." in dataset_name:
project_id, dataset_id = dataset_name.split(".", 1)
else:
project_id, dataset_id = "default", dataset_name
conn.project_id = project_id
conn.dataset_id = dataset_id
conn.url = url
conn.mode = mode or "append"
if partition is not None:
conn.partition = [partition] if isinstance(partition, str) else partition
return conn.ingest()
def list_jobs(
project_id: str = "default", _page_size: int = 50, _start_page_token: int = 0
) -> pd.DataFrame:
"""List all jobs for a given project
Parameters
----------
project_id: str, default 'default'
Show jobs under this project
"""
jobs = get_api("jobs")
frames = []
page = jobs.list_ingest_jobs(
project_id, page_size=_page_size, page_token=_start_page_token
)
while len(page["jobs"]) > 0:
frames.append(pd.DataFrame([j.to_dict() for j in page["jobs"]]))
page = jobs.list_ingest_jobs(
project_id, page_size=_page_size, page_token=page["next_page_token"]
)
return pd.concat(frames, ignore_index=True).drop_duplicates(
["id"], ignore_index=True
)
def _wait_for_job(self, max_seconds: int = -1, poke_interval: int = 10) -> str:
"""Wait for the job to complete (either failed or success)
Parameters
----------
max_seconds: int, default -1
Max number of seconds to wait. If -1 wait forever.
poke_interval: int, default 10
Interval between checks in seconds
"""
status = self.status
sleep_sec = poke_interval if max_seconds < 0 else min(poke_interval, max_seconds)
elapsed = 0
while status not in ("failed", "success"):
time.sleep(sleep_sec)
status = self.check_status()
elapsed += poke_interval
if 0 <= max_seconds < elapsed:
break
return status
def _convert_types(schema: Union[str, dict]):
"""Convert schema fields for better display"""
if isinstance(schema, str):
# simple types
return schema
typ = schema["type"]
if typ == "array":
element_type = _convert_types(schema["elementType"])
return f"[{element_type}]"
elif typ == "struct":
fields = schema["fields"]
return {f["name"]: _convert_types(f["type"]) for f in fields}
elif typ == "map":
return {_convert_types(schema["keyType"]): _convert_types(schema["valueType"])}
elif typ == "udt":
return schema.get("pyClass", schema["class"]).rsplit(".", 1)[-1]
else:
raise ValueError(f"Unrecognized field type {typ}")
| [
37811,
36,
1462,
26144,
43472,
7824,
329,
11149,
3946,
37811,
198,
11748,
640,
198,
6738,
19720,
1330,
40806,
540,
11,
4479,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
304,
1462,
13557,
32538,
13,
19849,
13,
19608,
292,
... | 2.591768 | 2,065 |
# Just so this appears as an importable module | [
2,
2329,
523,
428,
3568,
355,
281,
1330,
540,
8265
] | 4.6 | 10 |
# adapted from https://github.com/open-mmlab/mmcv or
# https://github.com/open-mmlab/mmdetection
from collections import OrderedDict
from vedacore.misc import registry
from .base_hook import BaseHook
@registry.register_module('hook')
| [
2,
16573,
422,
3740,
1378,
12567,
13,
785,
14,
9654,
12,
3020,
23912,
14,
3020,
33967,
393,
198,
2,
3740,
1378,
12567,
13,
785,
14,
9654,
12,
3020,
23912,
14,
3020,
15255,
3213,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198... | 3.16 | 75 |
from flask import Flask, session, redirect, url_for, escape, request , render_template
app = Flask(__name__)
# Set the secret key to some random bytes. Keep this really secret!
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
@app.route('/')
@app.route('/login', methods=['GET', 'POST'])
@app.route('/logout')
if __name__ == "__main__":
app.run(debug= True)
| [
6738,
42903,
1330,
46947,
11,
6246,
11,
18941,
11,
19016,
62,
1640,
11,
6654,
11,
2581,
837,
8543,
62,
28243,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
198,
2,
5345,
262,
3200,
1994,
284,
617,
4738,
9881,
13,
9175,
42... | 2.571429 | 140 |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
| [
2,
30396,
329,
257,
13934,
5509,
10139,
13,
198,
2,
1398,
12200,
19667,
25,
198,
2,
11504,
825,
11593,
15003,
834,
7,
944,
11,
2124,
2599,
198,
2,
22686,
2116,
13,
2100,
796,
2124,
198,
2,
22686,
2116,
13,
9464,
796,
6045,
198,
2,... | 3.09434 | 53 |
import time
import Adafruit_DHT
import bluetooth
import sys
bd_addr = "DC:A6:32:37:EE:FC" # Server bluetooth address
port = 1
sock=bluetooth.BluetoothSocket( bluetooth.RFCOMM )
sock.connect((bd_addr, port))
DHT_SENSOR = Adafruit_DHT.DHT11
DHT_PIN = 4
while True:
humidity, temperature = Adafruit_DHT.read_retry(DHT_SENSOR, DHT_PIN)
if humidity is not None :
try:
x = "From Wifi Temp={0:0.2f}*C Humidity={1:0.2f}%".format(temperature, humidity)
y = "NO.3 Humidity={1:0.2f}%".format(temperature,humidity)
sock.send(y)
print(y)
time.sleep(0.5)
except:
try:
print('Try to conncent again')
sock=bluetooth.BluetoothSocket( bluetooth.RFCOMM )
sock.connect(('DC:A6:32:37:EE:FC', 1))
except:
pass
else:
print("Error!")
| [
11748,
640,
198,
11748,
1215,
1878,
4872,
62,
35,
6535,
198,
11748,
48208,
16271,
198,
11748,
25064,
198,
198,
17457,
62,
29851,
796,
366,
9697,
25,
32,
21,
25,
2624,
25,
2718,
25,
6500,
25,
4851,
1,
1303,
9652,
48208,
16271,
2209,
... | 1.876016 | 492 |
from unittest import mock
from flowp import ftypes
import types
################# CORE #################
############### ADAPTERS ###############
############## CONVERTERS ##############
########### OTHER STRUCTURES ###########
#@when('executes list')
#@when('executes list')
#@when('executes list')
#@when('executes list')
| [
6738,
555,
715,
395,
1330,
15290,
198,
6738,
5202,
79,
1330,
277,
19199,
198,
11748,
3858,
628,
198,
14468,
2,
327,
6965,
1303,
14468,
628,
628,
628,
198,
7804,
4242,
21017,
5984,
2969,
51,
4877,
1303,
7804,
4242,
2235,
628,
628,
198,... | 2.99187 | 123 |
import unittest
import numpy as np
import numpy.testing as npt
import wisdem.commonse.frustum as f
from wisdem.commonse import eps
myones = np.ones((100,))
rb = 4.0
rt = 2.0
t = 0.1
h = 3.0
if __name__ == "__main__":
result = unittest.TextTestRunner().run(suite())
if result.wasSuccessful():
exit(0)
else:
exit(1)
| [
11748,
555,
715,
395,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
299,
32152,
13,
33407,
355,
299,
457,
198,
11748,
266,
9409,
368,
13,
11321,
325,
13,
8310,
436,
388,
355,
277,
198,
6738,
266,
9409,
368,
13,
11321,
325,
13... | 2.251613 | 155 |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 13 22:46:41 2020
@author: D071127
"""
# encoding: utf-8
import platform
platform.architecture()
import sys
from psycopg2 import connect
import sys
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
con = None
con = connect(user='postgres', host='localhost', password='postgres', database='kuppingen')
con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = con.cursor()
#cur.execute("SELECT * from geo_flurstuecke inner join tausch on (geo_flurstuecke.gid = tausch.flurstuecke_id) WHERE tausch.tauschabende_id IN (SELECT MAX(id) FROM tauschabende);")
#Select mit st_dump führt schläge zusammen + join auf den letzten Tauschabend für einen speziellen bewirtschafter
cur.execute("SELECT ST_AsText((ST_DUMP(ST_UNION(geom))).geom), bewirtschafter_id FROM geo_flurstuecke g inner join tausch on (g.gid = tausch.flurstuecke_id) INNER JOIN flurstuecke f ON(f.geo_flurstuecke_id = g.gid) WHERE tausch.bewirtschafter_id = 188 and tausch.tauschabende_id ="+getMaxTauschabend(con)+"and f.nutzung LIKE '%Acker%' GROUP BY bewirtschafter_id;")
#rückgabewert für neue Schlaege wird in neue variable geschrieben
neueSchlaege = cur.fetchall()
cur.close()
former = con.cursor()
former.execute("SELECT ST_AsText((ST_DUMP(ST_UNION(geom))).geom), bewirtschafter_id FROM geo_flurstuecke g inner join tausch on (g.gid = tausch.flurstuecke_id) INNER JOIN flurstuecke f ON(f.geo_flurstuecke_id = g.gid) WHERE tausch.bewirtschafter_id = 188 and tausch.tauschabende_id = 1 and f.nutzung LIKE '%Acker%' GROUP BY bewirtschafter_id;")
alteSchlaege = former.fetchall()
tauschmax = getMaxTauschabend(con)
con.close()
former.close()
#VARIANTE A)
# CO2 Einspartnis = Dieseleinsparnis
# Dieseleinsparnis = Summe Diesel / Betrieb vorher - Summe Diesel/Betrieb nachher
# = größe durchschnittsgröße vorher * schläge - durschnittsgröße nachher* schläge
# 1. durchschnittsgröße ( dieselverbrauch ) anhand KTBL Kurve ermitteln --> Kurve interpolieren
# 2. Für einen Bewirtschafter summe fläche / Schläge vorher errechnen
# VARIANTE B)
# SUMME (Schlaggröße * Interpolationswert aus KTBL für Diesel) VORHER - SUMME(Schlaggröße * Interpolationswert aus KTBL für Diesel) NACHHER
# PROBLEM AKTUELL:
# 1. Transformation von Flurstücke in Schlage ( noch nicht da )
# 2. Liste aus SQL befehlen ( von Jens anfordern )
## import geopanda for area berechnung
import pandas as pd
# dataframe basteln aus neueSchlaege py list
dfObjpd=pd.DataFrame(neueSchlaege, columns =['geometry', 'bewirtschafterID'])
##das selbe für den initialen schlag stand bei tauschabend min
dfObjpdformer=pd.DataFrame(alteSchlaege, columns=['geometry', 'bewirtschafterID'] )
#print (dfObj)
import geopandas as gpd
# = gpd.read_file(dfObj)
#print (dfObj.crs)
from shapely import wkt
import shapely.wkt
#dfObj = dfObjpd
dfObjpd['geometry'] = dfObjpd['geometry'].map(shapely.wkt.loads)
dfObjpdformer = dfObjpdformer['geometry'].map(shapely.wkt.loads)
gdf = gpd.GeoDataFrame(dfObjpd, geometry ='geometry')
gdfformer = gpd.GeoDataFrame(dfObjpdformer, geometry ='geometry')
gdf.crs = "EPSG:31467"
gdfformer.crs = "EPSG:31467"
## Berechne Schlaggröße des zusammengefassten Schlages auf basis neuestem Tauschabend
gdf["area"] = gdf['geometry'].area/ 10**4
gdf.head(2)
#Kosten per Schlag nachher = area * xx
gdf["kosten"]= (0.4313*gdf["area"]**4 - 8.8792*gdf["area"]**3 + 66.062*gdf["area"]**2 - 223.83*gdf["area"] + 809.24)*gdf["area"]
#TBD: BewirtschafterMatrix mit kosten per bewirtschafter
#bewirtschafter[]
#for n in gdf['bewirtschafter']:
# bewirtschafter[n] = gdf['bewirtschafter'.sum().kosten
#summe aller kosten per bewirtschfter
schlaggroeseavg=gdf.sum().area/gdf.index.size
gdfgesamtkosten = gdf.sum().kosten
gdfgesamthektar = gdf.sum().area
kostenperarea = gdfgesamtkosten/gdfgesamthektar
#summe bewirtschafter nachher - vorher
## STand vorher:
gdfformer["area"] = gdfformer['geometry'].area/ 10**4
#Kosten per Schlag nachher = area * xx
gdfformer["kosten"]= (0.4313*gdfformer["area"]**4 - 8.8792*gdfformer["area"]**3 + 66.062*gdfformer["area"]**2 - 223.83*gdfformer["area"] + 809.24)*gdf["area"]
#summe aller kosten per bewirtschfter
schlaggroeseavgformer=gdfformer.sum().area/gdfformer.index.size
gdfgesamtkostenformer = gdfformer.sum().kosten
gdfgesamthektarformer = gdfformer.sum().area
kostenperareaformer = gdfgesamtkostenformer/gdfgesamthektarformer
Ersparnis = gdfgesamtkostenformer - gdfgesamtkosten
#jeden Schlag Umfang rechnen
#gdf["Umfang"]= gdf['geometry'].
gdf["diesel"]= (5.3254*gdf["area"]**2- 0.463*gdf["area"]**3 - 22.897*gdf["area"] + 156.67)*gdf["area"]
gdfformer["diesel"] = (-0.463*gdfformer["area"]**3 + 5.3254*gdfformer["area"]**2 - 22.897*gdfformer["area"] + 156.67)*gdfformer["area"]
Dieselersparnis = gdfformer.sum().diesel - gdf.sum().diesel
CO2_aequivalent_kg = Dieselersparnis * 2.6
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
2892,
2758,
1511,
2534,
25,
3510,
25,
3901,
12131,
201,
198,
201,
198,
31,
9800,
25,
360,
2998,
14686,
22,
201,
198,
37811,
201,
198,... | 2.245045 | 2,220 |
from models import DefiProtocolInfo, DefiLlamaLendingDailyStats
import pydash
from utils.date_util import DateUtil
| [
6738,
4981,
1330,
2896,
72,
19703,
4668,
12360,
11,
2896,
72,
43,
75,
1689,
43,
1571,
28545,
29668,
198,
11748,
279,
5173,
1077,
198,
6738,
3384,
4487,
13,
4475,
62,
22602,
1330,
7536,
18274,
346,
628,
628,
628,
198
] | 3.102564 | 39 |
# Copyright (C) 2015-2021 Virgil Security, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com>
from virgil_crypto_lib._libs import *
from ctypes import *
from virgil_crypto_lib.common._c_bridge import vsc_buffer_t
from virgil_crypto_lib.common._c_bridge import vsc_data_t
from ._vscf_impl import vscf_impl_t
class VscfSeedEntropySource(object):
"""Deterministic entropy source that is based only on the given seed."""
def __init__(self):
"""Create underlying C context."""
self._ll = LowLevelLibs()
self._lib = self._ll.foundation
def vscf_seed_entropy_source_is_strong(self, ctx):
"""Defines that implemented source is strong."""
vscf_seed_entropy_source_is_strong = self._lib.vscf_seed_entropy_source_is_strong
vscf_seed_entropy_source_is_strong.argtypes = [POINTER(vscf_seed_entropy_source_t)]
vscf_seed_entropy_source_is_strong.restype = c_bool
return vscf_seed_entropy_source_is_strong(ctx)
def vscf_seed_entropy_source_gather(self, ctx, len, out):
"""Gather entropy of the requested length."""
vscf_seed_entropy_source_gather = self._lib.vscf_seed_entropy_source_gather
vscf_seed_entropy_source_gather.argtypes = [POINTER(vscf_seed_entropy_source_t), c_size_t, POINTER(vsc_buffer_t)]
vscf_seed_entropy_source_gather.restype = c_int
return vscf_seed_entropy_source_gather(ctx, len, out)
def vscf_seed_entropy_source_reset_seed(self, ctx, seed):
"""Set a new seed as an entropy source."""
vscf_seed_entropy_source_reset_seed = self._lib.vscf_seed_entropy_source_reset_seed
vscf_seed_entropy_source_reset_seed.argtypes = [POINTER(vscf_seed_entropy_source_t), vsc_data_t]
vscf_seed_entropy_source_reset_seed.restype = None
return vscf_seed_entropy_source_reset_seed(ctx, seed)
| [
2,
15069,
357,
34,
8,
1853,
12,
1238,
2481,
16310,
37718,
4765,
11,
3457,
13,
198,
2,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
17613,
11,
... | 2.753884 | 1,223 |
from django.utils.timezone import now
from django.core.cache import cache
from django.db.models import Count
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from utils.api import APIView, validate_serializer
from utils.constants import CacheKey, CONTEST_PASSWORD_SESSION_KEY
from utils.shortcuts import datetime2str, check_is_id
from account.models import AdminType
from utils.decorators import login_required, check_contest_permission, check_contest_password
from utils.constants import ContestStatus
from ..models import ContestAnnouncement, Contest, ACMContestRank, ProblemBank
from problem.models import Problem
from ..serializers import ACMContestRankNoPenaltySerializer, ContestAnnouncementSerializer
from ..serializers import ContestSerializer, ContestPasswordVerifySerializer
from ..serializers import ACMContestRankSerializer
import random
import json
| [
6738,
42625,
14208,
13,
26791,
13,
2435,
11340,
1330,
783,
198,
6738,
42625,
14208,
13,
7295,
13,
23870,
1330,
12940,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
2764,
198,
6738,
1553,
69,
62,
88,
292,
70,
13,
26791,
1330,
150... | 3.581673 | 251 |
'''
Source codes for PyTorch 1.0 Reinforcement Learning (Packt Publishing)
Chapter 6: Scaling up Learning with Function Approximation
Author: Yuxi (Hayden) Liu
'''
import torch
from torch.autograd import Variable
import math
if __name__ == "__main__":
estimator = Estimator(10, 2, 1)
s1 = [0.5, 0.1]
print(estimator.get_feature(s1))
s_list = [[1, 2], [2, 2], [3, 4], [2, 3], [2, 1]]
target_list = [1, 1.5, 2, 2, 1.5]
for s, target in zip(s_list, target_list):
feature = estimator.get_feature(s)
estimator.update(s, 0, target)
print(estimator.predict([0.5, 0.1]))
print(estimator.predict([2, 3]))
| [
7061,
6,
198,
7416,
12416,
329,
9485,
15884,
354,
352,
13,
15,
22299,
13442,
18252,
357,
11869,
83,
23499,
8,
198,
14126,
718,
25,
1446,
4272,
510,
18252,
351,
15553,
2034,
13907,
18991,
198,
13838,
25,
575,
2821,
72,
357,
31306,
6559... | 2.323843 | 281 |
# -*- coding: utf-8 -*-
"""
Main script for reproducing results of the paper "Crowdsourcing Airway Segmentation"
Authors: Veronika Cheplygina, Adria Perez-Rovira
URL: https://github.com/adriapr/crowdairway
"""
import data as crowddata
import analysis as crowdanalysis
import figures as crowdfigures
import tables as crowdtables
#####################
# Data
#####################
# Process data and save the processed data frames. This only needs to be done if the preprocessing code changes
use_processed_data = True
# Redo all tables/plots
do_figures = False
do_tables = True
# Process data and save the proce
if use_processed_data == False:
crowddata.process_data()
# Load all the processed files
df_task, df_res, df_annot, df_truth, df_subject = crowddata.get_df_processed()
if do_figures:
crowdfigures.show_task(df_task, df_res, df_annot, task_id=27, result_index=0, save_file=True) #Valid
crowdfigures.show_task(df_task, df_res, df_annot, task_id=27, result_index=3, save_file=True) #Invalid - does not see an airway
crowdfigures.show_task(df_task, df_res, df_annot, task_id=27, result_index=10, save_file=True) #Invalid - unpaired ellipse, not resized
#####################
# Analysis
#####################
# Select valid results
df_res_valid, df_res_invalid = crowdanalysis.get_valid_results(df_res)
#Combine results per task in different ways
df_task_random = crowdanalysis.get_task_random(df_task, df_res_valid)
df_task_median = crowdanalysis.get_task_median(df_task, df_res_valid)
df_task_best = crowdanalysis.get_task_best(df_task, df_res_valid, df_truth) #optimistically biased!
df = crowdanalysis.get_cantsee(df_task,df_res_invalid,df_truth)
# From here on, medium combining is selected where only combining method is used.
# TODO this should be handled by a single variable
df_task_combined = df_task_median
combine_type = 'median'
#Get correlations between crowd and expert
df_corr = crowdanalysis.get_subject_correlation(df_subject, df_task_combined, df_truth, combine_type)
#####################
# Table
#####################
if do_tables:
# Statistics about workers and results
crowdtables.print_result(df_res_valid, df_res_invalid)
crowdtables.print_worker(df_res)
# Table 2- Correlations of different combining methods vs the expert
#crowdtables.print_corr_table(df_task_random, df_task_median, df_task_best, df_truth, df_res_valid)
# Table 3 - Characteristics per subjects
#crowdtables.print_subject(df_subject, df_task_median, df_truth, combine_type)
# Table 4 - Correlations between crowd quality and subject characteristics
crowdtables.print_subject_correlation(df_subject, df_task_median, df_truth, combine_type)
# Additions after revision 1
#crowdtables.print_airway_generation(df_truth)
#####################
# Figures
#####################
# Figures 1 to 3 are illustrating the method and are not produced from the data
if do_figures:
# Figure 4, statistics about workers and results
crowdfigures.plot_result_worker(df_res_valid) #
crowdfigures.scatter_worker_valid(df_res_valid, df_res_invalid)
# Figure 5, Inner airway
crowdfigures.scatter_correlation_by_part(df_task_random, df_task_median, df_task_best, df_truth, 'inner')
# Figure 6, Outer airway
crowdfigures.scatter_correlation_by_part(df_task_random, df_task_median, df_task_best, df_truth, 'outer')
# Figure 7, WAP
crowdfigures.scatter_correlation_by_part(df_task_random, df_task_median, df_task_best, df_truth, 'wap')
# Figure 8, WTR
crowdfigures.scatter_correlation_by_part(df_task_random, df_task_median, df_task_best, df_truth, 'wtr')
#Figure 9, Correlation vs minimum number of available valid results
crowdfigures.plot_correlation_valid(df_task_combined, df_truth, combine_type)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
13383,
4226,
329,
8186,
2259,
2482,
286,
262,
3348,
366,
34,
3986,
82,
29985,
3701,
1014,
1001,
5154,
341,
1,
220,
198,
198,
30515,
669,
25,
4643,
261,
923... | 2.773145 | 1,415 |
"""
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import apache_beam as beam
from backend_jobs.pipeline_utils import database_schema
from backend_jobs.pipeline_utils.data_types import VisibilityType
from backend_jobs.pipeline_utils.firestore_database import initialize_db, RANGE_OF_BATCH
import random
# pylint: disable=abstract-method
class GetBatchedImageDataset(beam.DoFn):
"""Gets the images data set by batches as requested by
the pipeline's input from the project's Firestore database.
Input:
integer index.
Output:
generator of image's documents in a Python dictionary form.
Each image is represented by a dict containing all the fields
of the document in the database and their values.
"""
# pylint: disable=arguments-differ
def process(self, index, ingestion_provider = None, ingestion_run = None):
"""Queries firestore database for images from
the ingestion_provider within a random range (by batch).
Args:
index: the index used for querying the database by the random field.
ingestion_provider: the input of the pipeline, determines the images dataset.
ingestion_run: the input of the pipeline, determines the dataset.
Only one out of ingestion_provider and ingestion_run is provided.
Returns:
A generator of dictionaries with all the information (fields and id)
of each one of the Firestore data set's image documents as stored in
the database_schema.COLLECTION_IMAGES.
Raises:
Value error if both ingestion_provider and ingestion_run
are not None or both are None.
"""
if ingestion_provider and ingestion_run:
raise ValueError('both ingestion provider and run are provided -\
one should be provided')
if not ingestion_provider and not ingestion_run:
raise ValueError('both ingestion provider and run are not provided -\
one should be provided')
# The lower limit for querying the database by the random field.
random_min = index * RANGE_OF_BATCH
# The higher limit for querying the database by the random field.
random_max = random_min + RANGE_OF_BATCH
if ingestion_run:
query = self.db.collection(database_schema.COLLECTION_IMAGES).\
where(database_schema.COLLECTION_IMAGES_FIELD_INGESTED_RUNS, \
u'array_contains', ingestion_run).\
where(database_schema.COLLECTION_IMAGES_FIELD_RANDOM, u'>=', random_min).\
where(database_schema.COLLECTION_IMAGES_FIELD_RANDOM, \
u'<', random_max).stream()
else:
query = self.db.collection(database_schema.COLLECTION_IMAGES).\
where(database_schema.COLLECTION_IMAGES_FIELD_INGESTED_PROVIDERS, \
u'array_contains', ingestion_provider).\
where(database_schema.COLLECTION_IMAGES_FIELD_RANDOM, u'>=', random_min).\
where(database_schema.COLLECTION_IMAGES_FIELD_RANDOM,\
u'<', random_max).stream()
return (add_id_to_dict(doc) for doc in query)
def add_id_to_dict(doc):
""" Adds the document's id to the document's fields dictionary.
"""
full_dict = doc.to_dict()
full_dict['id'] = doc.id
return full_dict
class UpdateImageLabelsInDatabase(beam.DoFn):
"""Stores parallelly the label information in the project's database
in the database_schema.COLLECTION_IMAGES_SUBCOLLECTION_LABELS.
"""
# pylint: disable=arguments-differ
def process(self, image_and_labels, run_id, provider_id):
"""Updates the project's database to contain documents with the currect fields
for each label in the Labels subcollection of each image.
Args:
image_and_labels: tuple of image document dictionary (Each image is represented by a
Python dictionary containing all the fields of the document in the
database_schema.COLLECTION_IMAGES and their values)
and a list of all labels.
(image_doc_dict, labels)
"""
image_doc = image_and_labels[0]
labels = image_and_labels[1]
doc_id = image_doc['id']
subcollection_ref = self.db.collection(database_schema.COLLECTION_IMAGES).document(doc_id).\
collection(database_schema.COLLECTION_IMAGES_SUBCOLLECTION_LABELS)
for label in labels:
doc = subcollection_ref.document()
doc.set({
database_schema.COLLECTION_IMAGES_SUBCOLLECTION_LABELS_FIELD_PROVIDER_ID:\
provider_id,
database_schema.COLLECTION_IMAGES_SUBCOLLECTION_LABELS_FIELD_PROVIDER_VERSION:\
'2.0.0',
database_schema.COLLECTION_IMAGES_SUBCOLLECTION_LABELS_FIELD_LABEL_NAME: label,
database_schema.COLLECTION_IMAGES_SUBCOLLECTION_LABELS_FIELD_VISIBILITY:\
VisibilityType.INVISIBLE.value,
database_schema.COLLECTION_IMAGES_SUBCOLLECTION_LABELS_FIELD_PARENT_IMAGE_ID:\
doc_id,
database_schema.COLLECTION_IMAGES_SUBCOLLECTION_LABELS_FIELD_PIPELINE_RUN_ID:\
run_id,
# Redundant for query optimisation reasons.
database_schema.COLLECTION_IMAGES_SUBCOLLECTION_LABELS_FIELD_HASHMAP:\
image_doc[database_schema.COLLECTION_IMAGES_FIELD_HASHMAP],
# Redundant for query optimisation reasons.
database_schema.COLLECTION_IMAGES_SUBCOLLECTION_LABELS_FIELD_RANDOM:\
random.random()
})
| [
37811,
198,
220,
15069,
12131,
3012,
11419,
198,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
220,
921,
... | 2.406061 | 2,640 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import date
from decimal import Decimal as D
from ralph_scrooge import models
from ralph_scrooge.plugins.cost.support import SupportPlugin
from ralph_scrooge.tests import ScroogeTestCase
from ralph_scrooge.tests.utils.factory import PricingObjectFactory
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,... | 3.476563 | 128 |
import csv
import subprocess
import sys
import os
import pytest
from typing import Optional, List
from pypeid import PEiDScanner, format_as_katc_peid
@pytest.fixture
@pytest.mark.parametrize(
"test_bin", ["TestExe_x64.exe", "TestExe_x86.exe", "TestDotNet.dll"]
)
| [
11748,
269,
21370,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
12972,
9288,
198,
6738,
19720,
1330,
32233,
11,
7343,
198,
6738,
279,
2981,
312,
1330,
18468,
72,
35,
33351,
1008,
11,
5794,
62,
292,
62,
41826... | 2.556604 | 106 |
# -*- coding: utf-8 -*-
from matplotlib import pyplot as plt
plt.plot([1,2,3,2,1,2,3,4,5,6,5,4,3,2,1])
plt.show()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
220,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
198,
489,
83,
13,
29487,
26933,
16,
11,
17,
11,
18,
11,
17,
11,
16,
11,
17,
11,
18,
11,
19,
... | 1.681159 | 69 |
#!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Tst the OpenClose3D filter.
# Image pipeline
reader = vtk.vtkPNGReader()
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/fullhead15.png")
thresh = vtk.vtkImageThreshold()
thresh.SetInputConnection(reader.GetOutputPort())
thresh.SetOutputScalarTypeToUnsignedChar()
thresh.ThresholdByUpper(2000.0)
thresh.SetInValue(255)
thresh.SetOutValue(0)
thresh.ReleaseDataFlagOff()
my_close = vtk.vtkImageOpenClose3D()
my_close.SetInputConnection(thresh.GetOutputPort())
my_close.SetOpenValue(0)
my_close.SetCloseValue(255)
my_close.SetKernelSize(5,5,3)
my_close.ReleaseDataFlagOff()
# for coverage (we could compare results to see if they are correct).
my_close.DebugOn()
my_close.DebugOff()
my_close.GetOutput()
my_close.GetCloseValue()
my_close.GetOpenValue()
#my_close AddObserver ProgressEvent {set pro [my_close GetProgress]; puts "Completed $pro"; flush stdout}
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(my_close.GetOutputPort())
viewer.SetColorWindow(255)
viewer.SetColorLevel(127.5)
viewer.Render()
# --- end of script --
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
410,
30488,
198,
6738,
410,
30488,
13,
22602,
13,
44374,
1330,
410,
30488,
3855,
6601,
30016,
198,
36392,
42,
62,
26947,
62,
13252,
2394,
796,
410,
30488,
3855,
6601,
30016,
3419,... | 2.754808 | 416 |
"""GetOptions request.
Gets list of available product options.
"""
from ccapi.cc_objects import ProductOption, ProductOptions
from ..apirequest import APIRequest
class GetOptions(APIRequest):
"""Wrapper for GetOptions request."""
uri = "Handlers/ProductOption/getOptions.ashx"
def process_response(self, response):
"""Handle request response."""
results = response.json()
return ProductOptions([ProductOption(item) for item in results])
def get_data(self):
"""Get data for request."""
return {"brandID": "341", "strOptionTypes": "1,+2,+6"}
| [
37811,
3855,
29046,
2581,
13,
198,
198,
38,
1039,
1351,
286,
1695,
1720,
3689,
13,
198,
37811,
198,
198,
6738,
269,
11128,
72,
13,
535,
62,
48205,
1330,
8721,
19722,
11,
8721,
29046,
198,
198,
6738,
11485,
499,
557,
6138,
1330,
7824,
... | 2.946341 | 205 |
from keras.callbacks import TensorBoard
from keras.callbacks import ModelCheckpoint
from keras.models import Sequential
from keras.layers.core import Dense, Activation
import numpy as np
from time import time
from keras.models import load_model
m = .3
c = 7
x_train = np.linspace(1, 100, 1000)
y_train = m * x_train + c
x_test = np.linspace(3, 200, 400)
y_test = m * x_test + c
output_dim = 1
input_dim = 1
model = Sequential()
model.add(Dense(output_dim, input_dim=input_dim))
model.add(Activation('linear'))
model.compile(loss='mse', optimizer='rmsprop')
model.fit(x_train, y_train, epochs=100, batch_size=16, verbose=1, validation_data=(x_test, y_test),
callbacks=[tensorboard, checkpointer])
loss = model.evaluate(x_test, y_test, batch_size=16)
print("LOSS", loss)
model.save('my_model_lin_reg.h5')
#loading the model and predicting
# model = load_model('my_model_lin_reg.h5')
# x = model.predict(np.array([100]))
# print(x)
| [
6738,
41927,
292,
13,
13345,
10146,
1330,
309,
22854,
29828,
198,
6738,
41927,
292,
13,
13345,
10146,
1330,
9104,
9787,
4122,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
198,
6738,
41927,
292,
13,
75,
6962,
13,
7295,
1330,
360,... | 2.654494 | 356 |
from django.conf.urls import url
from pages.views import PageDetailView
urlpatterns = [
url(r'(?P<slug>[-_\w]+)/$', PageDetailView.as_view(), name='page_detail'),
] | [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
5468,
13,
33571,
1330,
7873,
11242,
603,
7680,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
7,
30,
47,
27,
6649,
1018,
36937,
12,
62... | 2.522388 | 67 |
from typing import Callable, Generic, NamedTuple, TypeVar
from gym.vector import VectorEnv
from rl.agents.base import Agent
from rl.algorithms.base import Algorithm
from rl.modelbased.env_model import EnvModel
from rl.modelbased.simulated_env import SimulatedEnv
from rl.utils.sampler import EnvSampler
__all__ = ['SimulatedPolicyLearning']
M = TypeVar('M', bound=EnvModel)
B = TypeVar('B')
T = TypeVar('T', bound=NamedTuple)
class SimulatedPolicyLearning(Algorithm, Generic[M]):
"""TODO docstring"""
@property
def env_model(self) -> M:
"""TODO docstring"""
return self._env_model
@property
def real_env(self) -> VectorEnv:
"""TODO docstring"""
return self._real_env
@property
def real_agent(self) -> Agent:
"""TODO docstring"""
return self._real_agent
@real_agent.setter
def real_agent(self, agent: Agent) -> None:
"""TODO docstring"""
if agent.observation_space == self._env_model.simulated_observation_space:
# agent in simulation space
assert agent.action_space == self._env_model.simulated_action_space
elif agent.observation_space == self._env_model.real_observation_space:
# agent in real space
assert agent.action_space == self._env_model.real_action_space
self._real_agent = agent
if self._real_sampler is not None:
self._setup_real_sampler()
@property
def simulated_agent(self) -> Agent:
"""TODO docstring"""
return self._simulated_agent
@simulated_agent.setter
def simulated_agent(self, agent) -> None:
"""TODO docstring"""
assert agent.observation_space == self._env_model.simulated_observation_space
assert agent.action_space == self._env_model.simulated_action_space
self._simulated_agent = agent
if self._simulated_sampler is not None:
self._setup_simulated_sampler()
def start(self, initial_iteration: int = 0) -> None:
"""TODO docstring"""
self.iteration = initial_iteration
self._setup_real_sampler()
self._setup_simulated_sampler()
def update(self) -> None:
"""TODO docstring"""
# sample from real environment and store in buffer
self.collect_real_data_fn(self._real_sampler, self.real_data_buffer, self.iteration)
# train model supervised using collected data
self.train_supervised_fn(self.env_model, self.real_data_buffer, self.iteration)
# train agent model-free with simulated experience
self.train_rl_fn(self._simulated_sampler, self.iteration)
self.iteration += 1
| [
6738,
19720,
1330,
4889,
540,
11,
42044,
11,
34441,
51,
29291,
11,
5994,
19852,
198,
198,
6738,
11550,
13,
31364,
1330,
20650,
4834,
85,
198,
198,
6738,
374,
75,
13,
49638,
13,
8692,
1330,
15906,
198,
6738,
374,
75,
13,
282,
7727,
9... | 2.507016 | 1,069 |
import os
import pathlib
import pytest
from click.testing import CliRunner
from flytekit.clis.sdk_in_container import pyflyte
from flytekit.clis.sdk_in_container.run import get_entities_in_file
WORKFLOW_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "workflow.py")
@pytest.mark.parametrize(
"working_dir, wf_path",
[
(pathlib.Path("test_nested_wf"), os.path.join("a", "b", "c", "d", "wf.py")),
(pathlib.Path("test_nested_wf", "a"), os.path.join("b", "c", "d", "wf.py")),
(pathlib.Path("test_nested_wf", "a", "b"), os.path.join("c", "d", "wf.py")),
(pathlib.Path("test_nested_wf", "a", "b", "c"), os.path.join("d", "wf.py")),
(pathlib.Path("test_nested_wf", "a", "b", "c", "d"), os.path.join("wf.py")),
],
)
@pytest.mark.parametrize(
"wf_path",
[("collection_wf.py"), ("map_wf.py"), ("dataclass_wf.py")],
)
| [
11748,
28686,
198,
11748,
3108,
8019,
198,
198,
11748,
12972,
9288,
198,
6738,
3904,
13,
33407,
1330,
1012,
72,
49493,
198,
198,
6738,
6129,
660,
15813,
13,
565,
271,
13,
21282,
74,
62,
259,
62,
34924,
1330,
12972,
12254,
660,
198,
67... | 2.064665 | 433 |
from warnings import warn
| [
6738,
14601,
1330,
9828,
628
] | 5.4 | 5 |
import Teams
import csv
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
from constants import _Const
import dataLoader
CONST = _Const()
# Inputs
input_date = input("Date (dd/mm/yy format): ")
avg_window_sz = input("Window size for rolling average: ")
stop_date = dt.datetime.strptime(input_date, '%d/%m/%y')
avg_window = int(avg_window_sz)
# Load the data
teams = dataLoader.GetAllTeamsWithTotals(stop_date)
# Calculate and build the graph
for team in teams.keys():
print(team)
input_team = team
avg_shots = teams.get(input_team, None).shots
avg_sht = teams.get(input_team, None).shots_on_target
avg_corners = teams.get(input_team, None).corners
shots_mov_avg = Teams.mov_avg(avg_shots, avg_window)
sht_mov_avg = Teams.mov_avg(avg_sht, avg_window)
corners_mov_avg = Teams.mov_avg(avg_corners, avg_window)
plt.plot(shots_mov_avg, label = "Shots on goal")
plt.plot(sht_mov_avg, label = "Shots on target")
plt.plot(corners_mov_avg, label = "Corners")
plt.axis([0, 35, 0, 25])
plt.title("Rolling average")
plt.xlabel("Number of matches")
plt.ylabel("Average value")
plt.legend()
plt.savefig(CONST.OUTPUT_PATH + team + '_avg.jpg')
plt.clf()
| [
11748,
24690,
198,
11748,
269,
21370,
198,
11748,
4818,
8079,
355,
288,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
38491... | 2.363636 | 539 |
from django.conf.urls import url
from . import views
app_name = 'create_dataset'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^results/$', views.generate_results, name='generate-dataset-results'),
url(r'^results/(?P<dataset_id>[0-9]+)/$', views.results, name='create-dataset-results'),
url(r'^download/(?P<dataset_id>[0-9]+)/$', views.serve_file, name='download-dataset-results'),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
198,
6738,
764,
1330,
5009,
198,
198,
1324,
62,
3672,
796,
705,
17953,
62,
19608,
292,
316,
6,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
61,... | 2.424419 | 172 |
"""noisy_parameter_sweep_htcondor.py
NOTE: Jobs parameters (sweep values, repetitions) must be modified directly in
this script. This script is configured to be dispatched based on a single
int $(Process) passed in by the htcondor submission.
"""
import argparse
import os
import numpy as np
import time
from z2_sim.src.QuantumCircuits.Cirq_Code import production
from z2_sim.src.QuantumCircuits.Cirq_Code import io
from z2_sim.src.QuantumCircuits.Cirq_Code.noise.zz_crosstalk_v1 import ZZCrossTalkV1
from z2_sim.src.QuantumCircuits.Cirq_Code.noise.two_local_depol import TwoLocalDepol
import qsimcirq
# assert qsimcirq.qsim_gpu is not None
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-proc', metavar='proc', type=int, nargs=1,
help='htcondor process number')
parser.add_argument('-dest', metavar='dest', type=str, nargs=1,
help='Directory to save results.')
args = parser.parse_args()
dest = args.dest[0]
### DISPATCHER ###
proc = args.proc[0]
##################
n = 5
tstart = 1
tstop = 51
n_trajectories = 1000
# Hardcoded physical parameters based on Hank's input
dt = 0.25
#### Hardcoded TABLE of noise parameter sweep ####
j_sweep = [0.714285, 0.625, .555556]
zeta_sweep = [0, 150000, 300000, 450000, 600000, 750000]
eps_sweep = [0, 0.0005, 0.001, 0.0015, 0.002, 0.0025, 0.003]
##################################################
j_zeta_eps_table = []
for j in j_sweep:
for zeta in zeta_sweep:
for eps in eps_sweep:
j_zeta_eps_table.append((j, zeta, eps))
# Now dispatch.
jcoup, zeta, eps = j_zeta_eps_table[proc]
print(f"PROC {proc}: (zeta, epsilon)=({zeta}, {eps})")
target_gate="SIS"
GATE_DURATION = 1e-8
# Pre-compose noise models
zeta_model = ZZCrossTalkV1(zeta, target_gate, gate_duration=GATE_DURATION, sampled_error=False)
eps_noise_model = TwoLocalDepol(err_1q=eps / 10, err_2q=eps, sampled_error=False, verbose=False)
# Initialize simulators and noise model
N_FUSE = 4
qsim_options = qsimcirq.QSimOptions(
max_fused_gate_size=N_FUSE,
ev_noisy_repetitions=n_trajectories,
use_gpu=True,
gpu_sim_threads=256,
gpu_state_threads=512,
gpu_data_blocks=16,
verbosity=0,
denormals_are_zeros=True,
)
t0 = time.time()
# TODO: if eps == 0 we need to manually implement noiseless simulation
if eps < 1e-9:
print("Dispatching to noiseless simulator for eps=0")
# Reroute to a unitary simulation with intermediate state vectors.
out = production.compute_obs_with_intermediate_state_vector(
n=n,
trotter_steps=tstop - tstart,
jcoup=jcoup,
dt=dt,
all_observables=True,
qsim_options=dict(t=8, f=4, g=False), # GPU not necessary
decompbasis=target_gate,
obc=True,
noise_models=[zeta_model],
)
else:
print("Dispatching to triangle simulator for eps!=0")
out = production.compute_noisy_obs(
n=n,
trotter_start=tstart,
trotter_stop=tstop,
jcoup=jcoup,
dt=dt,
all_observables=True,
qsim_options=qsim_options,
noise_models=[zeta_model, eps_noise_model],
obc=True,
decompbasis=target_gate,
)
delta_time = time.time() - t0
print(f"size {n}, one pass, trotter interval=({tstart}, {tstop}), {n_trajectories} trajectories")
print(f"\t{delta_time}")
# This is to expedite file transfers from the submission nodes, since the .sub
# doesn't know the output file name
# !!!!!!!!
# Proc is hardcoded to `0` in the file output for forwards compatibility
# with the CondorCollector
fout = io.make_noisy_htcondor_run_fname(0, n, jcoup, dt, tstart, tstop, zeta, eps, n_trajectories)
np.save(os.path.join(dest, fout), out) | [
37811,
3919,
13560,
62,
17143,
2357,
62,
46280,
538,
62,
4352,
17561,
273,
13,
9078,
198,
198,
16580,
25,
19161,
10007,
357,
46280,
538,
3815,
11,
46152,
1756,
8,
1276,
307,
9518,
3264,
287,
198,
5661,
4226,
13,
770,
4226,
318,
17839,... | 2.384467 | 1,558 |
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
if __name__ == "__main__":
with open('droprate.pickle', 'rb') as f:
result = pickle.load(f)
rates = sorted(list(set([x[0] for x in result])))
ns = sorted(list(set([x[1] for x in result])))
with open('droprate.html', 'w') as wf:
wf.write(htmlTable(result))
for rate in rates:
sub = np.array([x for x in result if x[0] == rate])
print(sub)
fig = ciPlot(rate,sub)
fig.savefig('ci95-droprate-{:08.4f}.pdf'.format(rate), bbox_inches='tight')
fig.savefig('ci95-droprate-{:08.4f}.png'.format(rate), bbox_inches='tight')
plt.close()
| [
11748,
2298,
293,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
83,
15799,
355,
4378,
263,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
... | 2.034667 | 375 |
"""
Authors: Shubham Ugare.
Copyright:
Copyright (c) 2020 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy
import os
import _pickle as pickle
import re
| [
37811,
201,
198,
201,
198,
30515,
669,
25,
911,
549,
2763,
24384,
533,
13,
201,
198,
201,
198,
15269,
25,
201,
198,
15269,
357,
66,
8,
12131,
5413,
4992,
201,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1... | 3.433428 | 353 |
"""
Tests for metrics/completeness.py
"""
import copy
from .helpers import fixture, np
from vip_hci.psfsub import pca
from vip_hci.metrics import completeness_curve, completeness_map
from vip_hci.preproc import frame_crop
from vip_hci.fm import cube_planet_free
@fixture(scope="module")
def get_cube_empty(example_dataset_adi):
"""
Get the ADI sequence from conftest.py.
Parameters
----------
example_dataset_adi : fixture
Taken automatically from ``conftest.py``.
Returns
-------
dsi : VIP Dataset
"""
dsi = copy.copy(example_dataset_adi)
starphot = 764939.6 # Absil et al. (2013)
r_b = 0.452/0.0271 # Absil et al. (2013)
theta_b = 211.2+90 # Absil et al. (2013)
f_b = 648.2
psfn = frame_crop(dsi.psf[1:, 1:], 11)
dsi.cube = cube_planet_free([(r_b, theta_b, f_b)], dsi.cube, dsi.angles,
psfn=psfn)
return dsi, starphot
| [
37811,
198,
51,
3558,
329,
20731,
14,
785,
1154,
43205,
13,
9078,
198,
198,
37811,
198,
11748,
4866,
198,
6738,
764,
16794,
364,
1330,
29220,
11,
45941,
198,
6738,
410,
541,
62,
71,
979,
13,
862,
69,
7266,
1330,
279,
6888,
198,
6738... | 2.221698 | 424 |
from stripstream.algorithms.search.fast_downward import search_options, get_fd_root
from subprocess import call
import argparse
import os
COMMAND = '../../fast-downward.py'
MAX_TIME = 'infinity'
MAX_COST = 'infinity'
if __name__ == '__main__':
main() | [
6738,
10283,
5532,
13,
282,
7727,
907,
13,
12947,
13,
7217,
62,
2902,
904,
1330,
2989,
62,
25811,
11,
651,
62,
16344,
62,
15763,
198,
6738,
850,
14681,
1330,
869,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
198,
9858,
44,
6981,
... | 2.886364 | 88 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'AssessmentSizingCriterion',
'AssessmentStage',
'AzureHybridUseBenefit',
'AzureLocation',
'AzureOfferCode',
'AzurePricingTier',
'AzureStorageRedundancy',
'Currency',
'Percentile',
'ProvisioningState',
'TimeRange',
]
class AssessmentSizingCriterion(str, Enum):
"""
Assessment sizing criterion.
"""
PERFORMANCE_BASED = "PerformanceBased"
AS_ON_PREMISES = "AsOnPremises"
class AssessmentStage(str, Enum):
"""
User configurable setting that describes the status of the assessment.
"""
IN_PROGRESS = "InProgress"
UNDER_REVIEW = "UnderReview"
APPROVED = "Approved"
class AzureHybridUseBenefit(str, Enum):
"""
AHUB discount on windows virtual machines.
"""
UNKNOWN = "Unknown"
YES = "Yes"
NO = "No"
class AzureLocation(str, Enum):
"""
Target Azure location for which the machines should be assessed. These enums are the same as used by Compute API.
"""
UNKNOWN = "Unknown"
EAST_ASIA = "EastAsia"
SOUTHEAST_ASIA = "SoutheastAsia"
AUSTRALIA_EAST = "AustraliaEast"
AUSTRALIA_SOUTHEAST = "AustraliaSoutheast"
BRAZIL_SOUTH = "BrazilSouth"
CANADA_CENTRAL = "CanadaCentral"
CANADA_EAST = "CanadaEast"
WEST_EUROPE = "WestEurope"
NORTH_EUROPE = "NorthEurope"
CENTRAL_INDIA = "CentralIndia"
SOUTH_INDIA = "SouthIndia"
WEST_INDIA = "WestIndia"
JAPAN_EAST = "JapanEast"
JAPAN_WEST = "JapanWest"
KOREA_CENTRAL = "KoreaCentral"
KOREA_SOUTH = "KoreaSouth"
UK_WEST = "UkWest"
UK_SOUTH = "UkSouth"
NORTH_CENTRAL_US = "NorthCentralUs"
EAST_US = "EastUs"
WEST_US2 = "WestUs2"
SOUTH_CENTRAL_US = "SouthCentralUs"
CENTRAL_US = "CentralUs"
EAST_US2 = "EastUs2"
WEST_US = "WestUs"
WEST_CENTRAL_US = "WestCentralUs"
GERMANY_CENTRAL = "GermanyCentral"
GERMANY_NORTHEAST = "GermanyNortheast"
CHINA_NORTH = "ChinaNorth"
CHINA_EAST = "ChinaEast"
class AzureOfferCode(str, Enum):
"""
Offer code according to which cost estimation is done.
"""
UNKNOWN = "Unknown"
MSAZR0003_P = "MSAZR0003P"
MSAZR0044_P = "MSAZR0044P"
MSAZR0059_P = "MSAZR0059P"
MSAZR0060_P = "MSAZR0060P"
MSAZR0062_P = "MSAZR0062P"
MSAZR0063_P = "MSAZR0063P"
MSAZR0064_P = "MSAZR0064P"
MSAZR0029_P = "MSAZR0029P"
MSAZR0022_P = "MSAZR0022P"
MSAZR0023_P = "MSAZR0023P"
MSAZR0148_P = "MSAZR0148P"
MSAZR0025_P = "MSAZR0025P"
MSAZR0036_P = "MSAZR0036P"
MSAZR0120_P = "MSAZR0120P"
MSAZR0121_P = "MSAZR0121P"
MSAZR0122_P = "MSAZR0122P"
MSAZR0123_P = "MSAZR0123P"
MSAZR0124_P = "MSAZR0124P"
MSAZR0125_P = "MSAZR0125P"
MSAZR0126_P = "MSAZR0126P"
MSAZR0127_P = "MSAZR0127P"
MSAZR0128_P = "MSAZR0128P"
MSAZR0129_P = "MSAZR0129P"
MSAZR0130_P = "MSAZR0130P"
MSAZR0111_P = "MSAZR0111P"
MSAZR0144_P = "MSAZR0144P"
MSAZR0149_P = "MSAZR0149P"
MSMCAZR0044_P = "MSMCAZR0044P"
MSMCAZR0059_P = "MSMCAZR0059P"
MSMCAZR0060_P = "MSMCAZR0060P"
MSMCAZR0063_P = "MSMCAZR0063P"
MSMCAZR0120_P = "MSMCAZR0120P"
MSMCAZR0121_P = "MSMCAZR0121P"
MSMCAZR0125_P = "MSMCAZR0125P"
MSMCAZR0128_P = "MSMCAZR0128P"
MSAZRDE0003_P = "MSAZRDE0003P"
MSAZRDE0044_P = "MSAZRDE0044P"
class AzurePricingTier(str, Enum):
"""
Pricing tier for Size evaluation.
"""
STANDARD = "Standard"
BASIC = "Basic"
class AzureStorageRedundancy(str, Enum):
"""
Storage Redundancy type offered by Azure.
"""
UNKNOWN = "Unknown"
LOCALLY_REDUNDANT = "LocallyRedundant"
ZONE_REDUNDANT = "ZoneRedundant"
GEO_REDUNDANT = "GeoRedundant"
READ_ACCESS_GEO_REDUNDANT = "ReadAccessGeoRedundant"
class Currency(str, Enum):
"""
Currency to report prices in.
"""
UNKNOWN = "Unknown"
USD = "USD"
DKK = "DKK"
CAD = "CAD"
IDR = "IDR"
JPY = "JPY"
KRW = "KRW"
NZD = "NZD"
NOK = "NOK"
RUB = "RUB"
SAR = "SAR"
ZAR = "ZAR"
SEK = "SEK"
TRY_ = "TRY"
GBP = "GBP"
MXN = "MXN"
MYR = "MYR"
INR = "INR"
HKD = "HKD"
BRL = "BRL"
TWD = "TWD"
EUR = "EUR"
CHF = "CHF"
ARS = "ARS"
AUD = "AUD"
CNY = "CNY"
class Percentile(str, Enum):
"""
Percentile of performance data used to recommend Azure size.
"""
PERCENTILE50 = "Percentile50"
PERCENTILE90 = "Percentile90"
PERCENTILE95 = "Percentile95"
PERCENTILE99 = "Percentile99"
class ProvisioningState(str, Enum):
"""
Provisioning state of the project.
"""
ACCEPTED = "Accepted"
CREATING = "Creating"
DELETING = "Deleting"
FAILED = "Failed"
MOVING = "Moving"
SUCCEEDED = "Succeeded"
class TimeRange(str, Enum):
"""
Time range of performance data used to recommend a size.
"""
DAY = "Day"
WEEK = "Week"
MONTH = "Month"
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
26144,
35986,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760,
644,
345,
389,
1804,
0,
17202,
... | 2.021463 | 2,516 |
from selenium.webdriver.common.by import By
| [
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
1525,
1330,
2750,
628,
628,
628
] | 3.0625 | 16 |
from celery import Celery
from zsl.interface.importer import initialize_web_application
initialize_web_application()
from zsl.application.service_application import service_application
from zsl.interface.celery.worker import CeleryTaskQueueOutsideWorkerModule
service_application.add_injector_module(CeleryTaskQueueOutsideWorkerModule)
app = Celery(backend='rpc', broker='redis://localhost')
| [
6738,
18725,
1924,
1330,
15248,
1924,
198,
198,
6738,
1976,
6649,
13,
39994,
13,
320,
26634,
1330,
41216,
62,
12384,
62,
31438,
198,
36733,
1096,
62,
12384,
62,
31438,
3419,
198,
198,
6738,
1976,
6649,
13,
31438,
13,
15271,
62,
31438,
... | 3.576577 | 111 |
import json
import os
import secrets
from pathlib import Path
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
import dj_database_url
from elasticsearch import RequestsHttpConnection
from requests_aws4auth import AWS4Auth
from cfgov.util import admin_emails
# Repository root is 4 levels above this file
REPOSITORY_ROOT = Path(__file__).resolve().parents[3]
# This is the root of the Django project, 'cfgov'
PROJECT_ROOT = REPOSITORY_ROOT.joinpath("cfgov")
V1_TEMPLATE_ROOT = PROJECT_ROOT.joinpath("jinja2", "v1")
SECRET_KEY = os.environ.get("SECRET_KEY", os.urandom(32))
# Deploy environment
DEPLOY_ENVIRONMENT = os.getenv("DEPLOY_ENVIRONMENT")
# In certain environments, we allow DEBUG to be enabled
DEBUG = os.environ.get("DJANGO_DEBUG") == "True"
# signal that tells us that this is a proxied HTTPS request
# effects how request.is_secure() responds
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
USE_X_FORWARDED_HOST = True
# in some environments, we want to respect X-Forwarded-Port
USE_X_FORWARDED_PORT = os.environ.get("USE_X_FORWARDED_PORT") == "True"
# Use the django default password hashing
PASSWORD_HASHERS = global_settings.PASSWORD_HASHERS
# Application definition
INSTALLED_APPS = (
"permissions_viewer",
"wagtail.core",
"wagtail.admin",
"wagtail.documents",
"wagtail.snippets",
"wagtail.users",
"wagtail.images",
"wagtail.embeds",
"wagtail.contrib.frontend_cache",
"wagtail.contrib.redirects",
"wagtail.contrib.forms",
"wagtail.sites",
"wagtail.contrib.routable_page",
"wagtail.contrib.modeladmin",
"wagtail.contrib.table_block",
"wagtail.contrib.postgres_search",
"localflavor",
"modelcluster",
"taggit",
"wagtailinventory",
"wagtailsharing",
"flags",
"wagtailautocomplete",
"wagtailflags",
"watchman",
"ask_cfpb",
"agreements",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"django.contrib.humanize",
"wagtail.search",
"storages",
"data_research",
"v1",
"core",
"legacy",
"django_extensions",
"jobmanager",
"wellbeing",
"search",
"paying_for_college",
"prepaid_agreements",
"regulations3k",
"retirement_api",
"treemodeladmin",
"housing_counselor",
"hmda",
"youth_employment",
"diversity_inclusion",
"mega_menu.apps.MegaMenuConfig",
"form_explainer.apps.FormExplainerConfig",
"teachers_digital_platform",
"wagtailmedia",
"django_elasticsearch_dsl",
"corsheaders",
# Satellites
"ccdb5_ui",
"complaint_search",
"countylimits",
"crtool",
"mptt",
"ratechecker",
"rest_framework",
)
WAGTAILSEARCH_BACKENDS = {
# The default search backend for Wagtail is the db backend, which does not
# support the custom search_fields defined on Page model descendents when
# using `Page.objects.search()`.
#
# Other backends *do* support those custom search_fields, so for now to
# preserve the current behavior of /admin/pages/search (which calls
# `Page.objects.search()`), the default backend will remain `db`.
#
# This also preserves the current behavior of our external link search,
# /admin/external-links/, which calls each page model's `objects.search()`
# explicitly to get results, but which returns fewer results with the
# Postgres full text backend.
#
# An upcoming effort to overhaul search within consumerfinance.gov and
# Wagtail should address these issues. In the meantime, Postgres full text
# search with the custom search_fields defined on our models is available
# with the "fulltext" backend defined below.
'default': {
'BACKEND': 'wagtail.search.backends.db',
},
'fulltext': {
'BACKEND': 'wagtail.contrib.postgres_search.backend',
},
}
MIDDLEWARE = (
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.http.ConditionalGetMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"core.middleware.PathBasedCsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"core.middleware.ParseLinksMiddleware",
"core.middleware.DownstreamCacheControlMiddleware",
"core.middleware.SelfHealingMiddleware",
"wagtail.contrib.redirects.middleware.RedirectMiddleware",
"core.middleware.DeactivateTranslationsMiddleware",
"django.middleware.security.SecurityMiddleware",
)
CSP_MIDDLEWARE = ("csp.middleware.CSPMiddleware",)
if "CSP_ENFORCE" in os.environ:
MIDDLEWARE += CSP_MIDDLEWARE
ROOT_URLCONF = "cfgov.urls"
# We support two different template engines: Django templates and Jinja2
# templates. See https://docs.djangoproject.com/en/dev/topics/templates/
# for an overview of how Django templates work.
wagtail_extensions = [
"wagtail.core.jinja2tags.core",
"wagtail.admin.jinja2tags.userbar",
"wagtail.images.jinja2tags.images",
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
# Look for Django templates in these directories
"DIRS": [PROJECT_ROOT.joinpath("templates")],
# Look for Django templates in each app under a templates subdirectory
"APP_DIRS": True,
"OPTIONS": {
"builtins": [],
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
{
"NAME": "wagtail-env",
"BACKEND": "django.template.backends.jinja2.Jinja2",
# Look for Jinja2 templates in these directories
"DIRS": [
V1_TEMPLATE_ROOT,
V1_TEMPLATE_ROOT.joinpath("_includes"),
V1_TEMPLATE_ROOT.joinpath("_layouts"),
PROJECT_ROOT.joinpath("static_built"),
],
# Look for Jinja2 templates in each app under a jinja2 subdirectory
"APP_DIRS": True,
"OPTIONS": {
"environment": "v1.jinja2_environment.environment",
"extensions": wagtail_extensions
+ [
"jinja2.ext.do",
"jinja2.ext.i18n",
"jinja2.ext.loopcontrols",
"flags.jinja2tags.flags",
"core.jinja2tags.filters",
"agreements.jinja2tags.agreements",
"mega_menu.jinja2tags.MegaMenuExtension",
"prepaid_agreements.jinja2tags.prepaid_agreements",
"regulations3k.jinja2tags.regulations",
"v1.jinja2tags.datetimes_extension",
"v1.jinja2tags.fragment_cache_extension",
"v1.jinja2tags.v1_extension",
],
},
},
]
WSGI_APPLICATION = "cfgov.wsgi.application"
# Admin Url Access
ALLOW_ADMIN_URL = os.environ.get("ALLOW_ADMIN_URL", False)
if ALLOW_ADMIN_URL:
DATA_UPLOAD_MAX_NUMBER_FIELDS = 2000 # For heavy Wagtail pages
# Databases
DATABASES = {}
# If DATABASE_URL is defined in the environment, use it to set the Django DB
if os.getenv("DATABASE_URL"):
DATABASES["default"] = dj_database_url.config()
# Internationalization
# https://docs.djangoproject.com/en/stable/topics/i18n/
LANGUAGE_CODE = "en-us"
LANGUAGES = (
("en", _("English")),
("es", _("Spanish")),
)
LOCALE_PATHS = (os.path.join(PROJECT_ROOT, "locale"),)
TIME_ZONE = "America/New_York"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/stable/howto/static-files/
STATIC_URL = "/static/"
MEDIA_ROOT = os.environ.get("MEDIA_ROOT", os.path.join(PROJECT_ROOT, "f"))
MEDIA_URL = "/f/"
# List of finder classes that know how to find static files in
# various locations
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"django.contrib.staticfiles.finders.FileSystemFinder",
]
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.StaticFilesStorage"
# Used to include directories not traditionally found,
# app-specific 'static' directories.
STATICFILES_DIRS = [
PROJECT_ROOT.joinpath("static_built"),
PROJECT_ROOT.joinpath("templates", "wagtailadmin"),
]
# Also include any directories under static.in
STATICFILES_DIRS += [
d for d in REPOSITORY_ROOT.joinpath("static.in").iterdir() if d.is_dir()
]
ALLOWED_HOSTS = ["*"]
EXTERNAL_URL_ALLOWLIST = (
r"^https:\/\/facebook\.com\/cfpb$",
r"^https:\/\/twitter\.com\/cfpb$",
r"^https:\/\/www\.linkedin\.com\/company\/consumer-financial-protection-bureau$", # noqa 501
r"^https:\/\/www\.youtube\.com\/user\/cfpbvideo$",
r"https:\/\/www\.flickr\.com\/photos\/cfpbphotos$",
)
# Wagtail settings
WAGTAIL_SITE_NAME = "consumerfinance.gov"
WAGTAILIMAGES_IMAGE_MODEL = "v1.CFGOVImage"
TAGGIT_CASE_INSENSITIVE = True
WAGTAIL_USER_CREATION_FORM = "v1.auth_forms.UserCreationForm"
WAGTAIL_USER_EDIT_FORM = "v1.auth_forms.UserEditForm"
# LEGACY APPS
MAPBOX_ACCESS_TOKEN = os.environ.get("MAPBOX_ACCESS_TOKEN")
HOUSING_COUNSELOR_S3_PATH_TEMPLATE = (
"https://s3.amazonaws.com/files.consumerfinance.gov"
"/a/assets/hud/{file_format}s/{zipcode}.{file_format}"
)
# ElasticSearch 7 Configuration
ES7_HOST = os.getenv('ES7_HOST', 'localhost')
ES_PORT = os.getenv("ES_PORT", "9200")
ELASTICSEARCH_BIGINT = 50000
ELASTICSEARCH_DEFAULT_ANALYZER = "snowball"
if os.environ.get('USE_AWS_ES', False):
awsauth = AWS4Auth(
os.environ.get('AWS_ES_ACCESS_KEY'),
os.environ.get('AWS_ES_SECRET_KEY'),
'us-east-1',
'es'
)
ELASTICSEARCH_DSL = {
'default': {
'hosts': [{'host': ES7_HOST, 'port': 443}],
'http_auth': awsauth,
'use_ssl': True,
'connection_class': RequestsHttpConnection,
'timeout': 60
},
}
else:
ELASTICSEARCH_DSL = {
"default": {"hosts": f"http://{ES7_HOST}:{ES_PORT}"}
}
ELASTICSEARCH_DSL_SIGNAL_PROCESSOR = 'search.elasticsearch_helpers.WagtailSignalProcessor'
# S3 Configuration
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_LOCATION = "f" # A path prefix that will be prepended to all uploads
AWS_QUERYSTRING_AUTH = False # do not add auth-related query params to URL
AWS_S3_FILE_OVERWRITE = False
AWS_S3_SECURE_URLS = True # True = use https; False = use http
AWS_STORAGE_BUCKET_NAME = os.environ.get("AWS_STORAGE_BUCKET_NAME")
AWS_DEFAULT_ACL = None # Default to using the ACL of the bucket
if os.environ.get("S3_ENABLED", "False") == "True":
AWS_ACCESS_KEY_ID = os.environ["AWS_ACCESS_KEY_ID"]
AWS_SECRET_ACCESS_KEY = os.environ["AWS_SECRET_ACCESS_KEY"]
if os.environ.get("AWS_S3_CUSTOM_DOMAIN"):
AWS_S3_CUSTOM_DOMAIN = os.environ["AWS_S3_CUSTOM_DOMAIN"]
DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
MEDIA_URL = os.path.join(os.environ.get("AWS_S3_URL"), AWS_LOCATION, "")
# GovDelivery
GOVDELIVERY_ACCOUNT_CODE = os.environ.get("GOVDELIVERY_ACCOUNT_CODE")
# Removes wagtail version update check banner from admin page
WAGTAIL_ENABLE_UPDATE_CHECK = False
# Email
ADMINS = admin_emails(os.environ.get("ADMIN_EMAILS"))
if DEPLOY_ENVIRONMENT:
EMAIL_SUBJECT_PREFIX = "[{}] ".format(DEPLOY_ENVIRONMENT.title())
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
WAGTAILADMIN_NOTIFICATION_FROM_EMAIL = os.environ.get(
"WAGTAILADMIN_NOTIFICATION_FROM_EMAIL"
)
# Password Policies
# cfpb_common password rules
CFPB_COMMON_PASSWORD_RULES = [
[r".{12,}", "Minimum allowed length is 12 characters"],
[r"[A-Z]", "Password must include at least one capital letter"],
[r"[a-z]", "Password must include at least one lowercase letter"],
[r"[0-9]", "Password must include at least one digit"],
[
r"[@#$%&!]",
"Password must include at least one special character (@#$%&!)",
],
]
# cfpb_common login rules
# in seconds
LOGIN_FAIL_TIME_PERIOD = os.environ.get("LOGIN_FAIL_TIME_PERIOD", 120 * 60)
# number of failed attempts
LOGIN_FAILS_ALLOWED = os.environ.get("LOGIN_FAILS_ALLOWED", 5)
LOGIN_REDIRECT_URL = "/admin/"
LOGIN_URL = "/login/"
# When we generate an full HTML version of the regulation, we want to
# write it out somewhere. This is where.
OFFLINE_OUTPUT_DIR = ""
DATE_FORMAT = "n/j/Y"
GOOGLE_ANALYTICS_ID = ""
GOOGLE_ANALYTICS_SITE = ""
# Regulations.gov environment variables
REGSGOV_BASE_URL = os.environ.get("REGSGOV_BASE_URL")
REGSGOV_API_KEY = os.environ.get("REGSGOV_API_KEY")
# CDNs
WAGTAILFRONTENDCACHE = {}
ENABLE_AKAMAI_CACHE_PURGE = os.environ.get("ENABLE_AKAMAI_CACHE_PURGE", False)
if ENABLE_AKAMAI_CACHE_PURGE:
WAGTAILFRONTENDCACHE["akamai"] = {
"BACKEND": "v1.models.caching.AkamaiBackend",
"CLIENT_TOKEN": os.environ.get("AKAMAI_CLIENT_TOKEN"),
"CLIENT_SECRET": os.environ.get("AKAMAI_CLIENT_SECRET"),
"ACCESS_TOKEN": os.environ.get("AKAMAI_ACCESS_TOKEN"),
}
ENABLE_CLOUDFRONT_CACHE_PURGE = os.environ.get(
"ENABLE_CLOUDFRONT_CACHE_PURGE", False
)
if ENABLE_CLOUDFRONT_CACHE_PURGE:
WAGTAILFRONTENDCACHE["files"] = {
"BACKEND": "wagtail.contrib.frontend_cache.backends.CloudfrontBackend",
"DISTRIBUTION_ID": {
"files.consumerfinance.gov": os.environ.get(
"CLOUDFRONT_DISTRIBUTION_ID_FILES"
)
},
}
# CSP Allowlists
# These specify what is allowed in <script> tags
CSP_SCRIPT_SRC = (
"'self'",
"'unsafe-inline'",
"'unsafe-eval'",
"*.consumerfinance.gov",
"*.google-analytics.com",
"*.googletagmanager.com",
"*.googleoptimize.com",
"tagmanager.google.com",
"optimize.google.com",
"ajax.googleapis.com",
"search.usa.gov",
"api.mapbox.com",
"js-agent.newrelic.com",
"dnn506yrbagrg.cloudfront.net",
"bam.nr-data.net",
"*.youtube.com",
"*.ytimg.com",
"trk.cetrk.com",
"universal.iperceptions.com",
"cdn.mouseflow.com",
"n2.mouseflow.com",
"us.mouseflow.com",
"geocoding.geo.census.gov",
"tigerweb.geo.census.gov",
"about:",
"connect.facebook.net",
"www.federalregister.gov",
"storage.googleapis.com",
"*.qualtrics.com",
)
# These specify valid sources of CSS code
CSP_STYLE_SRC = (
"'self'",
"'unsafe-inline'",
"*.consumerfinance.gov",
"fast.fonts.net",
"tagmanager.google.com",
"optimize.google.com",
"api.mapbox.com",
"fonts.googleapis.com",
)
# These specify valid image sources
CSP_IMG_SRC = (
"'self'",
"*.consumerfinance.gov",
"www.ecfr.gov",
"s3.amazonaws.com",
"www.gstatic.com",
"ssl.gstatic.com",
"stats.g.doubleclick.net",
"img.youtube.com",
"*.google-analytics.com",
"trk.cetrk.com",
"searchstats.usa.gov",
"gtrk.s3.amazonaws.com",
"*.googletagmanager.com",
"tagmanager.google.com",
"maps.googleapis.com",
"optimize.google.com",
"api.mapbox.com",
"*.tiles.mapbox.com",
"stats.search.usa.gov",
"blob:",
"data:",
"www.facebook.com",
"www.gravatar.com",
"*.qualtrics.com",
"*.mouseflow.com",
)
# These specify what URL's we allow to appear in frames/iframes
CSP_FRAME_SRC = (
"'self'",
"*.consumerfinance.gov",
"*.googletagmanager.com",
"*.google-analytics.com",
"*.googleoptimize.com",
"optimize.google.com",
"www.youtube.com",
"*.doubleclick.net",
"universal.iperceptions.com",
"www.facebook.com",
"staticxx.facebook.com",
"mediasite.yorkcast.com",
"*.qualtrics.com",
)
# These specify where we allow fonts to come from
CSP_FONT_SRC = (
"'self'",
"data:",
"*.consumerfinance.gov",
"fast.fonts.net",
"fonts.google.com",
"fonts.gstatic.com",
)
# These specify hosts we can make (potentially) cross-domain AJAX requests to
CSP_CONNECT_SRC = (
"'self'",
"*.consumerfinance.gov",
"*.google-analytics.com",
"*.googleoptimize.com",
"*.tiles.mapbox.com",
"api.mapbox.com",
"bam.nr-data.net",
"s3.amazonaws.com",
"public.govdelivery.com",
"n2.mouseflow.com",
"api.iperceptions.com",
"*.qualtrics.com",
"raw.githubusercontent.com",
)
# These specify valid media sources (e.g., MP3 files)
CSP_MEDIA_SRC = (
"'self'",
"*.consumerfinance.gov",
)
# FEATURE FLAGS
# Flags can be declared here with an empty list, which will evaluate as false
# until the flag is enabled in the Wagtail admin, or with a list of conditions.
# Each condition should be a tuple or dict in one of these forms:
# (condition-string, value) or {"condition": condition-string, "value": value}
# An optional 3rd value, "required," can be set to True. It defaults to False.
# Flags can also be created (and deleted) in the Wagtail admin.
FLAGS = {
# Ask CFPB search spelling correction support
# When enabled, spelling suggestions will appear in Ask CFPB search and
# will be used when the given search term provides no results
"ASK_SEARCH_TYPOS": [],
# Beta banner, seen on beta.consumerfinance.gov
# When enabled, a banner appears across the top of the site proclaiming
# "This beta site is a work in progress."
"BETA_NOTICE": [("environment is", "beta")],
# When enabled, include a recruitment code comment in the base template
"CFPB_RECRUITING": [],
# When enabled, display a "technical issues" banner on /complaintdatabase
"CCDB_TECHNICAL_ISSUES": [],
# When enabled, display a banner stating the complaint intake form is down
"COMPLAINT_INTAKE_TECHNICAL_ISSUES": [
{
"condition": "path matches",
"value": r"^/complaint",
"required": True,
},
# Boolean to turn it off explicitly unless enabled by another condition
{"condition": "boolean", "value": False},
],
# When enabled, display a banner stating that the complaint intake form is
# offline for maintenance. A combination of 'after date'/'before date'
# conditions is expected.
"COMPLAINT_INTAKE_MAINTENANCE": [
{
"condition": "path matches",
"value": r"^/complaint",
"required": True,
},
# Boolean to turn it off explicitly unless enabled by another condition
{"condition": "boolean", "value": False},
],
# Google Optimize code snippets for A/B testing
# When enabled this flag will add various Google Optimize code snippets.
# Intended for use with path conditions.
"AB_TESTING": [],
# Email popups.
"EMAIL_POPUP_OAH": [("boolean", True)],
"EMAIL_POPUP_DEBT": [("boolean", True)],
# Ping google on page publication in production only
"PING_GOOGLE_ON_PUBLISH": [("environment is", "production")],
# Manually enabled when Beta is being used for an external test.
# Controls the /beta_external_testing endpoint, which Jenkins jobs
# query to determine whether to refresh Beta database.
"BETA_EXTERNAL_TESTING": [],
# During a Salesforce system outage, the following flag should be enabled
# to alert users that the Collect community is down.
"COLLECT_OUTAGE": [
{
"condition": "path matches",
"value": (
r"^/data-research/credit-card-data/terms-credit-card-plans-survey/$|" # noqa: E501
r"^/data-research/prepaid-accounts/$"
),
"required": True,
},
# Boolean to turn it off explicitly unless enabled by another condition
{"condition": "boolean", "value": False},
],
# During a Salesforce system outage, the following flag
# should be enabled to alert users that
# the OMWI assessment form and inclusivity portal are down.
"OMWI_SALESFORCE_OUTAGE": [
{
"condition": "path matches",
"value": (
r"^/about-us/diversity-and-inclusion/$|"
r"^/about-us/diversity-and-inclusion/self-assessment-financial-institutions/$"
), # noqa: E501
"required": True,
},
# Boolean to turn it off explicitly unless enabled by another condition
{"condition": "boolean", "value": False},
],
# Controls whether or not to include Qualtrics Web Intercept code for the
# Q42020 Ask CFPB customer satisfaction survey.
"ASK_SURVEY_INTERCEPT": [],
# Hide archive filter options in the filterable UI
"HIDE_ARCHIVE_FILTER_OPTIONS": [],
# Supports testing of a new 2021 version of the website home page.
# Enable by appending ?home_page_2021=True to home page URLs.
"HOME_PAGE_2021": [
("environment is not", "production", True),
("parameter", "home_page_2021", True),
],
}
# Watchman tokens, a comma-separated string of tokens used to authenticate
# global status endpoint. The Watchman status URL endpoint is only included if
# WATCHMAN_TOKENS is defined as an environment variable. A blank value for
# WATCHMAN_TOKENS will make the status endpoint accessible without a token.
WATCHMAN_TOKENS = os.environ.get("WATCHMAN_TOKENS")
# This specifies what checks Watchman should run and include in its output
# https://github.com/mwarkentin/django-watchman#custom-checks
WATCHMAN_CHECKS = (
"alerts.checks.elasticsearch_health",
)
# We want the ability to serve the latest drafts of some pages on beta
# This value is read by v1.wagtail_hooks
SERVE_LATEST_DRAFT_PAGES = []
# To expose a previously-published page's latest draft version on beta,
# add its primary key to the list below
if DEPLOY_ENVIRONMENT == "beta":
SERVE_LATEST_DRAFT_PAGES = []
# Email popup configuration. See v1.templatetags.email_popup.
EMAIL_POPUP_URLS = {
"debt": [
"/ask-cfpb/what-is-a-statute-of-limitations-on-a-debt-en-1389/",
"/ask-cfpb/what-is-the-best-way-to-negotiate-a-settlement-with-a-debt-collector-en-1447/", # noqa 501
"/ask-cfpb/what-should-i-do-when-a-debt-collector-contacts-me-en-1695/", # noqa 501
"/consumer-tools/debt-collection/",
],
"oah": ["/owning-a-home/", "/owning-a-home/mortgage-estimate/"],
}
REGULATIONS_REFERENCE_MAPPING = [
(
r"(?P<section>[\w]+)-(?P<paragraph>[\w-]*-Interp)",
"Interp-{section}",
"{section}-{paragraph}",
),
]
# See core.middleware.ParseLinksMiddleware. Normally all HTML responses get
# processed by this middleware so that their link content gets the proper
# markup (e.g., download icons). We want to exclude certain pages from this
# middleware. This list of regular expressions defines a set of URLs against
# which we don't want this logic to be run.
PARSE_LINKS_EXCLUSION_LIST = [
# Wagtail admin pages, except preview, draft, and debug views
(
r"^/admin/(?!"
r"pages/\d+/(edit/preview|view_draft)/|"
r"mega_menu/menu/preview/\w+/|"
r"template_debug/"
r")"
),
# Django admin pages
r"^/django-admin/",
# Our custom login pages
r"^/login/",
# Regulations pages that have their own link markup
r"^/policy-compliance/rulemaking/regulations/\d+/",
# DjangoRestFramework API pages where link icons are intrusive
r"^/oah-api/",
# External site interstitial (if we're here, the links have already been
# parsed)
r"^/external-site/",
]
# Required by django-extensions to determine the execution directory used by
# scripts executed with the "runscript" management command
# See https://django-extensions.readthedocs.io/en/latest/runscript.html
BASE_DIR = "scripts"
WAGTAILADMIN_RICH_TEXT_EDITORS = {
"default": {
"WIDGET": "wagtail.admin.rich_text.DraftailRichTextArea",
"OPTIONS": {
"features": [
"h2",
"h3",
"h4",
"h5",
"blockquote",
"hr",
"ol",
"ul",
"bold",
"italic",
"link",
"document-link",
"image",
]
},
},
}
# Serialize Decimal(3.14) as 3.14, not "3.14"
REST_FRAMEWORK = {
"COERCE_DECIMAL_TO_STRING": False
}
# We require CSRF only on authenticated paths. This setting is handled by our
# core.middleware.PathBasedCsrfViewMiddleware.
#
# Any paths listed here that are public-facing will receive an "
# "Edge-Control: no-store" header from our
# core.middleware.DownstreamCacheControlMiddleware and will not be cached.
CSRF_REQUIRED_PATHS = (
"/login",
"/admin",
"/django-admin",
)
# Django 2.2 Baseline required settings
# exempt beta from CSRF settings until it's converted to https
if DEPLOY_ENVIRONMENT and DEPLOY_ENVIRONMENT != "beta":
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
CSRF_COOKIE_SECURE = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_HSTS_SECONDS = 600
SECURE_CONTENT_TYPE_NOSNIFF = True
# Cache Settings
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'cfgov_default_cache',
'TIMEOUT': None,
},
'post_preview': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'post_preview_cache',
'TIMEOUT': None,
}
}
# Set our CORS allowed origins based on a JSON list in the
# CORS_ALLOWED_ORIGINS environment variable.
try:
CORS_ALLOWED_ORIGINS = json.loads(
os.environ.get("CORS_ALLOWED_ORIGINS", "[]")
)
except (TypeError, ValueError):
raise ImproperlyConfigured(
"Environment variable CORS_ALLOWED_ORIGINS is not valid JSON. "
"Expected a JSON array of allowed origins."
)
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
13141,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
3298,
62,
33692,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
12205,
525,
306,
16934,
1522... | 2.370031 | 11,018 |
import pytest
choice = '''
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
# raises IndexError if seq is empty
return seq[self._randbelow(len(seq))]
'''
match = '''
def match(pattern, string, flags=0):
"""Try to apply the pattern at the start of the string, returning
a Match object, or None if no match was found."""
return _compile(pattern, flags).match(string)
'''
getsource = '''
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
OSError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return ''.join(lines)
'''
SOURCE_CODE = dict(
choice=choice,
match=match,
getsource=getsource
)
@pytest.fixture(scope='module')
| [
11748,
12972,
9288,
198,
198,
25541,
796,
705,
7061,
198,
4299,
3572,
7,
944,
11,
33756,
2599,
198,
220,
220,
220,
37227,
31851,
257,
4738,
5002,
422,
257,
1729,
12,
28920,
8379,
526,
15931,
198,
220,
220,
220,
1303,
12073,
12901,
123... | 3.009772 | 307 |
from typing import Dict, Any, Callable, Optional
from .comm import Comm
| [
6738,
19720,
1330,
360,
713,
11,
4377,
11,
4889,
540,
11,
32233,
198,
198,
6738,
764,
9503,
1330,
1520,
628
] | 3.7 | 20 |
import pytest
from api import _get_wikipedia_page, _select_random_wiki_entry_from_page
| [
11748,
12972,
9288,
198,
6738,
40391,
1330,
4808,
1136,
62,
31266,
62,
7700,
11,
4808,
19738,
62,
25120,
62,
15466,
62,
13000,
62,
6738,
62,
7700,
628
] | 3.259259 | 27 |
#!/bin/env python
##
# @file This file is part of the ExaHyPE project.
# @author ExaHyPE Group (exahype@lists.lrz.de)
#
# @section LICENSE
#
# Copyright (c) 2016 http://exahype.eu
# All rights reserved.
#
# The project has received funding from the European Union's Horizon
# 2020 research and innovation programme under grant agreement
# No 671698. For copyrights and licensing, please consult the webpage.
#
# Released under the BSD 3 Open Source License.
# For the full license text, see LICENSE.txt
#
#
# @section DESCRIPTION
#
# ArgumentParser
#
# @note
# requires python3
import argparse
from enum import Enum
class ArgumentParser:
"""Public API
"""
class ArgType(Enum):
"""Types of arguments for the command line API"""
MandatoryString =1 # name, type, help
MandatoryInt =2 # name, type, help
OptionalBool =11 # name (will add --), type, help
OptionalInt =12 # name (will add --), type, help, default value, metavar
# List of all expected arguments for the command line or input validation
args = [
# mandatory arguments
("pathToApplication", ArgType.MandatoryString, "path to the application as given by the ExaHyPE specification file (application directory as root)"),
("pathToOptKernel", ArgType.MandatoryString, "desired relative path to the generated code (application directory as root)"),
("namespace", ArgType.MandatoryString, "desired namespace for the generated code"),
("solverName", ArgType.MandatoryString, "name of the user-solver"),
("numberOfVariables", ArgType.MandatoryInt, "the number of quantities"),
("numberOfParameters", ArgType.MandatoryInt, "the number of parameters (fixed quantities)"),
("order", ArgType.MandatoryInt, "the order of the approximation polynomial"),
("dimension", ArgType.MandatoryInt, "the number of spatial dimensions in the simulation (2 or 3)"),
("numerics", ArgType.MandatoryString, "linear or nonlinear"),
("architecture", ArgType.MandatoryString, "the microarchitecture of the target device"),
# optional arguments
("useFlux", ArgType.OptionalBool, "enable flux"),
("useFluxVect", ArgType.OptionalBool, "enable vectorized flux (include useFlux)"),
("useNCP", ArgType.OptionalBool, "enable non conservative product"),
("useNCPVect", ArgType.OptionalBool, "enable vectorized non conservative product (include useNCP)"),
("useSource", ArgType.OptionalBool, "enable source terms"),
("useSourceVect", ArgType.OptionalBool, "enable vectorized source terms (include useSource)"),
("useFusedSource", ArgType.OptionalBool, "enable fused source terms (include useSource)"),
("useFusedSourceVect", ArgType.OptionalBool, "enable vectorized fused source terms (include useFusedSource and useSourceVect)"),
("useMaterialParam", ArgType.OptionalBool, "enable material parameters"),
("useMaterialParamVect",ArgType.OptionalBool, "enable vectorized material parameters"),
("usePointSources", ArgType.OptionalInt , "enable numberOfPointSources point sources", -1, "numberOfPointSources"),
("useCERKGuess", ArgType.OptionalBool, "use CERK for SpaceTimePredictor inital guess (nonlinear only)"),
("useSplitCKScalar", ArgType.OptionalBool, "use split Cauchy–Kowalevski formulation (linear only)"),
("useSplitCKVect", ArgType.OptionalBool, "use split Cauchy–Kowalevski formulation with vect PDE (linear only)"),
("useGaussLobatto", ArgType.OptionalBool, "use Gauss Lobatto Quadrature instead of Gauss Legendre"),
("useLimiter", ArgType.OptionalInt, "enable limiter with the given number of observable", -1, "numberOfObservable"),
("ghostLayerWidth", ArgType.OptionalInt, "use limiter with the given ghostLayerWidth, requires useLimiter option, default = 0", 0, "width"),
("tempVarsOnStack", ArgType.OptionalBool, "put the big scratch arrays on the stack instead of the heap (you can use ulimit -s to increase the stack size)"),
]
@staticmethod
def parseArgs():
"""Process the command line arguments"""
parser = argparse.ArgumentParser(description="This is the front end of the ExaHyPE code generator.")
for arg in ArgumentParser.args:
key = arg[0]
type = arg[1]
info = arg[2]
if type == ArgumentParser.ArgType.MandatoryString:
parser.add_argument(key, help=info)
elif type == ArgumentParser.ArgType.MandatoryInt:
parser.add_argument(key, type=int, help=info)
elif type == ArgumentParser.ArgType.OptionalBool:
parser.add_argument("--"+key, action="store_true", help=info)
elif type == ArgumentParser.ArgType.OptionalInt:
parser.add_argument("--"+key, type=int, default=arg[3], metavar=arg[4], help=info)
return vars(parser.parse_args())
@staticmethod
def validateInputConfig(inputConfig):
"""Validate a config and add the default value of missing optional arguments"""
for arg in ArgumentParser.args:
key = arg[0]
type = arg[1]
#check mandatory and raise error if not set or wrong type
if type == ArgumentParser.ArgType.MandatoryString:
if key not in inputConfig or not isinstance(inputConfig[key], str):
raise ValueError("Invalid codegenerator configuration, argument "+key+" missing or of wrong type (string expected)")
elif type == ArgumentParser.ArgType.MandatoryInt:
if key not in inputConfig or not isinstance(inputConfig[key], int):
raise ValueError("Invalid codegenerator configuration, argument "+key+" missing or of wrong type (int expected)")
#check optional and set it to default if not set
elif type == ArgumentParser.ArgType.OptionalBool:
if key not in inputConfig:
inputConfig[key] = False
elif type == ArgumentParser.ArgType.OptionalInt:
if key not in inputConfig:
inputConfig[key] = arg[3] #default value
@staticmethod
def buildCommandLineFromConfig(inputConfig):
"""Build a valid command line for the given config"""
commandLine = "codegenerator "
for arg in ArgumentParser.args:
key = arg[0]
type = arg[1]
# add mandatory parameters
if type == ArgumentParser.ArgType.MandatoryString:
commandLine += inputConfig[key] + " "
elif type == ArgumentParser.ArgType.MandatoryInt:
commandLine += str(inputConfig[key]) + " "
# check optional and add them if set and non default
elif type == ArgumentParser.ArgType.OptionalBool:
if key in inputConfig and inputConfig[key]:
commandLine += "--" + key + " "
elif type == ArgumentParser.ArgType.OptionalInt:
if key in inputConfig and inputConfig[key] != arg[3]:
commandLine += "--" + key + " " + str(inputConfig[key]) + " "
return commandLine
| [
2,
48443,
8800,
14,
24330,
21015,
198,
2235,
198,
2,
2488,
7753,
770,
2393,
318,
636,
286,
262,
1475,
64,
21217,
11401,
1628,
13,
198,
2,
2488,
9800,
1475,
64,
21217,
11401,
4912,
357,
1069,
993,
2981,
31,
20713,
13,
14050,
89,
13,
... | 2.521914 | 2,989 |
from typing import List, Union
| [
6738,
19720,
1330,
7343,
11,
4479,
628,
198
] | 4.125 | 8 |
#!/usr/bin/env python
#<!-- coding=UTF-8 -->
from __future__ import absolute_import, with_statement
import re
import datetime
from .apps import first_template
from .apps import send_email
from .apps import hello
from .apps import layouts
from .apps import simplest
from .apps import standard
import webify
from webify.tests import get, post, difference
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
27,
28112,
19617,
28,
48504,
12,
23,
14610,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
351,
62,
26090,
198,
11748,
302,
198,
11748,
4818,
8079,
198,
198,
6738,
764,
... | 3.524272 | 103 |
''' FUN3D Wrapper '''
# --- Python/system level imports
import numpy as np
from string import Template
from scipy.optimize import fsolve
# --- OpenMDAO main and library imports
from openmdao.main.api import Component
from openmdao.lib.datatypes.api import Float
# --- Local Python imports
from StdAtm import Atmosphere
class Fun3D(Component):
''' OpenMDAO component for executing Fun3D Simulations '''
# -----------------------------------
# --- Initialize Input Parameters ---
# -----------------------------------
d_inf = Float(1.0,
iotype='in',
desc='freestream static density',
units='kg/m**3')
p_inf = Float(1.0,
iotype='in',
desc='freestream static pressure',
units='Pa')
t_inf = Float(1.0,
iotype='in',
desc='freestream static temperature',
units='K')
M_inf = Float(1.0, iotype='in', desc='freestream Mach No.')
alpha = Float(0.0, iotype='in', desc='vehicle AoA', units='deg')
R = Float(287.053,
iotype='in',
desc='specific gas constant',
units='J/kg/K')
alt = Float(0.0, iotype='in', desc='flight altitude', units='ft')
T4_0 = Float(1583.889,
iotype='in',
desc='nozzle plenum stagnation temperature',
units='K')
mdot = Float(1.0, iotype='in', desc='engine mass flow rate', units='kg/s')
pt2_ptL = Float(1.0,
iotype='in',
desc='total to freestream pressure ratio at engine face')
tt2_ttL = Float(
1.0,
iotype='in',
desc='total to freestream temperature ratio at engine face')
M2 = Float(0.0, iotype='in', desc='Mach No. at engine face')
A2 = Float(0.0,
iotype='in',
desc='Flow through area of the engine face',
units='m**2')
if __name__ == "__main__":
# -------------------------
# --- Default Test Case ---
# -------------------------
Fun3D_Comp = Fun3D()
Fun3D_Comp.run()
| [
7061,
6,
29397,
18,
35,
27323,
2848,
705,
7061,
198,
198,
2,
11420,
11361,
14,
10057,
1241,
17944,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
4731,
1330,
37350,
198,
6738,
629,
541,
88,
13,
40085,
1096,
1330,
43458,
6442,
198,
198... | 2.182927 | 984 |
from .builder import (build_schedule, build_lr_scheduler, build_model, build_optimizer, build_optimizer_wrapper,
build_layer, build_loss, build_hooks, build_dataset, build_transform, build_data_sampler,
build_gradient_handler)
from .pipeline import ModelInitializer
__all__ = [
'build_schedule', 'build_lr_scheduler', 'build_model', 'build_optimizer', 'build_optimizer_wrapper',
'build_layer', 'build_loss', 'build_hooks', 'build_dataset', 'build_transform', 'build_data_sampler',
'build_gradient_handler', 'ModelInitializer'
]
| [
6738,
764,
38272,
1330,
357,
11249,
62,
15952,
5950,
11,
1382,
62,
14050,
62,
1416,
704,
18173,
11,
1382,
62,
19849,
11,
1382,
62,
40085,
7509,
11,
1382,
62,
40085,
7509,
62,
48553,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.565789 | 228 |
from outputformat.base import BaseOutput
| [
6738,
5072,
18982,
13,
8692,
1330,
7308,
26410,
628
] | 4.666667 | 9 |
'''
Created on Oct 28, 2018
@author: nilson.nieto
'''
cadena = "You are Awesome "
paraffin= """
You are the creator
of your destiny"""
print(cadena)
print(paraffin)
print(cadena*3)
print(cadena[1:])
print(cadena[2:6:1])
print("opposite")
print(cadena[::-1])
print("remove spaces")
print(cadena.strip())
print("Find index 'me'")
print(cadena.find("me"))
print("Count appers e in",paraffin)
print(">",paraffin.count("e"))
print("Upper case >",cadena.upper())
print("Lower case >",cadena.lower())
print("Title >",cadena.title())
| [
7061,
6,
198,
41972,
319,
2556,
2579,
11,
2864,
198,
198,
31,
9800,
25,
18038,
1559,
13,
77,
1155,
78,
198,
7061,
6,
198,
198,
66,
38047,
796,
366,
1639,
389,
25020,
366,
628,
198,
1845,
2001,
259,
28,
37227,
198,
1639,
389,
262,
... | 2.549763 | 211 |
"""Million Galleries Tools
(c) 2012 Marc Boeren
"""
import os
import sys
import math
from PIL import Image
from PIL import ImageFilter
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
def list_files(directory, extlist = None, depth = -1):
"""Return a list of files (including relative pathnames) found in
the given directory that have a match in the given extension-list.
Subdirectories are included up to a given depth. The filenames do
not include the base directory.
Parameters:
directory: base directory to start the listing in
extlist: list of allowed extension, e.g. [".jpg", ".gif"]
if not specified or empty: any extension is allowed
depth: max. depth to descend subdirectories
if not specified all subdirectories are descended into
Example:
filelist = tools.list_files("sourcedir", [".jpg", ".gif"])
"""
dirlist = [os.path.normcase(f) for f in os.listdir(directory)]
filepaths = [f for f in dirlist if os.path.isfile(os.path.join(directory, f)) and (not extlist or os.path.splitext(f)[1] in extlist)]
#for p in dirlist:
# if depth and os.path.isdir(os.path.join(directory, p)):
# filepaths+= [os.path.join(p, f) for f in list_files(os.path.join(directory, p), extlist, depth-1)]
return filepaths
def convert_images(sourcedir, destdir, variations, callback = None):
"""Convert images from a folder plus all subfolders. The identical subfolder
structure will be copied to the destination folder. The images will be sized
to a max-box (i.e. aspect ratio will be preserved, image is downscaled so it
fits entirely in the box (note: NO upscale!)) and saved in the destination
format. Alternatively a portion of the image can be cropped, it will auto-select
a maximum box from the center and scale it down to the given dimensions.
Parameters:
sourcedir: source directory for conversion
destdir: destination directory for converted images
variations: a list of different variations of each image, defined by
name: append this to the original name (before the extension)
crop: a tuple of width, height
maxpixels: resize the image so it contains no more than the given number
of pixels (width * height)
Dependencies:
list_files()
"""
filelist = list_files(sourcedir, ['.jpg',])
f = open(os.path.join(sourcedir, 'index.json'), 'w')
f.write('[');
sep = ''
n = 0
# src.has_key('ext') and src['ext'] or None)
for filepath in filelist:
for variation in variations:
img = None
if 1:
img = Image.open(os.path.join(sourcedir, filepath))
if img.mode == '1':
img = img.convert("L")
elif img.mode == 'L':
pass
img = img.convert('RGB')
imgsize = img.size
imgratio = 1.0 * imgsize[0]/imgsize[1]
if 'crop' in variation:
destratio = 1.0 * variation['crop'][0]/variation['crop'][1]
if imgratio < destratio: # width bound
w = variation['crop'][0]
h = rounddown((1.0*w/imgsize[0])*imgsize[1])
yoffset = rounddown((h - variation['crop'][1]) / 2.0)
xoffset = 0
destsize = (w, h)
else: # height bound
h = variation['crop'][1]
w = rounddown((1.0*h/imgsize[1])*imgsize[0])
xoffset = rounddown((w - variation['crop'][0]) / 2.0)
yoffset = 0
destsize = (w, h)
img.thumbnail(destsize, Image.ANTIALIAS) # right scale
box = (xoffset, yoffset, variation['crop'][0]+xoffset, variation['crop'][1]+yoffset)
img = img.crop(box)
#imgcopy = img.resize(variation['crop']) # right size canvas
#print destsize, variation['crop'], img.size, imgcopy.size, box
#imgcopy.paste(img, box)
#img = imgcopy
#img = img.filter(ImageFilter.SHARPEN)
pass
elif 'maxpixels' in variation:
h = math.sqrt((1.0*variation['maxpixels'])/imgratio)
w = imgratio * h
h = rounddown(h)
w = rounddown(w)
destsize = (w, h)
img.thumbnail(destsize, Image.ANTIALIAS)
#print imgsize, variation['maxpixels'], imgratio, destsize, w*h
#img = img.filter(ImageFilter.SHARPEN)
pass
else:
# plain copy
pass
try:
basedir, filename= os.path.split(filepath)
directory = os.path.join(destdir, basedir)
if not os.path.exists(directory):
os.makedirs(directory)
destfile = os.path.splitext(filename)[0]+variation['name']+'.jpg'
img.save(os.path.join(directory, destfile))
if callback:
callback(os.path.join(sourcedir, filepath),
os.path.join(directory, destfile),
imgsize
)
except:
print("Save error:", filepath)
#except:
# print "Open error:", filepath
del img
f.write(sep + '{"name":"' + os.path.splitext(filename)[0] + '", "size":[' + str(imgsize[0]) + ', ' + str(imgsize[1]) + ']}\n');
sep = ','
n+= 1
f.write(']\n');
f.write(str(n));
| [
37811,
44,
1131,
7096,
10640,
20003,
198,
7,
66,
8,
2321,
13067,
3248,
14226,
198,
37811,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
10688,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
350,
4146,
1330,
7412,
22417,
198,
198,
2,
... | 2.028951 | 2,936 |
import urllib2
import json
import argparse
import csv
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
parser = argparse.ArgumentParser(description='Lists members on a Dropbox for Business Team')
parser.add_argument( '-q', '--quota', action='store_const', const=True, default=False, dest='quota',
help='Include usage quota statistics - may increase script time to completion')
parser.add_argument( '-l', '--links', action='store_const', const=True, default=False, dest='links',
help='Include shared link count - may increase script time to completion')
parser.add_argument( '-f', '--folders', action='store_const', const=True, default=False, dest='folders',
help='Include shared folder count - may increase script time to completion')
args = parser.parse_args()
dfbToken = raw_input('Enter your Dropbox Business API App token (Team Member File Access permission): ')
# Get all DfB members, paging through results if necessary
# Get a member's info (account details, quota usage)
# Get a dict of groupid - group name
# Get the count of shared links for the member
# Get the count of shared folders for the member
csvwriter = csv.writer(sys.stdout)
header = ['Email', 'First Name', 'Last Name', 'Status', 'Groups']
if args.quota:
header = header + ['Locale', 'Normal Usage', 'Normal Usage (bytes)', 'Team Shared Usage', 'Team Shared Usage (bytes)']
if args.links:
header = header + ['Shared Links']
if args.folders:
header = header + ['Shared Folders (Total)', 'Shared Folders (Owner)', 'Shared Folders (Member)']
csvwriter.writerow(header)
groupMap = getGroups()
for member in getDfbMembers(None):
# Get the group names from the ID array
groupstr = ''
if 'groups' in member["profile"]:
for group in member["profile"]["groups"]:
if group in groupMap:
if groupstr != '':
groupstr = groupstr + ", "
groupstr = groupstr + groupMap[group]
member_row = [member["profile"]["email"], \
member["profile"]["given_name"], \
member["profile"]["surname"], \
member["profile"]["status"],
groupstr]
# Member info & quota
if args.quota:
if member["profile"]["status"] == "active":
info = getMemberInfo(member["profile"]["member_id"])
member_row = member_row + [info["locale"], \
formatSize(info["quota_info"]["normal"]), \
str(info["quota_info"]["normal"]), \
formatSize(info["quota_info"]["shared"]), \
str(info["quota_info"]["shared"])]
else:
member_row = member_row + ['-', '-', '-', '-', '-']
# Shared links count
if args.links:
if member["profile"]["status"] == "active":
member_row = member_row + [countSharedLinks(member["profile"]["member_id"])]
else:
member_row = member_row + ['-']
# Shared folder count
if args.folders:
if member["profile"]["status"] == "active":
shares = countSharedFolders(member["profile"]["member_id"])
member_row = member_row + [shares["total"], shares["owner"], shares["member"]]
else:
member_row = member_row + ['-', '-', '-']
csvwriter.writerow(member_row)
| [
11748,
2956,
297,
571,
17,
198,
11748,
33918,
198,
11748,
1822,
29572,
198,
11748,
269,
21370,
198,
11748,
25064,
198,
198,
260,
2220,
7,
17597,
8,
198,
17597,
13,
2617,
12286,
12685,
7656,
10786,
48504,
23,
11537,
198,
198,
48610,
796,... | 2.364005 | 1,478 |
import numpy as np
from Channel import ChannelHub
from Ensemble import Ensemble
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
11102,
1330,
11102,
16066,
198,
6738,
2039,
15140,
1330,
2039,
15140,
628,
198
] | 3.952381 | 21 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pwn import *
p = process("craxme")
magic_addr = 0x804a038
p.recvline()
p.recv()
payload = p32(magic_addr)
payload += p32(magic_addr+2)
payload += "%{}c".format(0xb00c-8)
payload += "%7$hn"
payload += "%{}c".format(0xface-0xb00c)
payload += "%8$hn"
payload += "A"
p.send(payload)
p.recvuntil("A")
p.interactive()
p.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
279,
675,
1330,
1635,
198,
198,
79,
796,
1429,
7203,
66,
32040,
1326,
4943,
198,
198,
32707,
62,
29851,
796,
65... | 2.065217 | 184 |
from .app import Fair
__version__ = '0.2.0'
| [
198,
6738,
764,
1324,
1330,
7011,
198,
198,
834,
9641,
834,
796,
705,
15,
13,
17,
13,
15,
6,
198
] | 2.3 | 20 |
from logging import disable
from tkinter import Tk
from tkinter.constants import DISABLED
from tkinter.font import NORMAL
from tokenize import Double
from src import Anritsu_MS2830A as SPA
from src import Utils
import logging
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
from tkinter import ttk | [
6738,
18931,
1330,
15560,
198,
6738,
256,
74,
3849,
1330,
309,
74,
198,
6738,
256,
74,
3849,
13,
9979,
1187,
1330,
13954,
6242,
30465,
198,
6738,
256,
74,
3849,
13,
10331,
1330,
25273,
42126,
198,
6738,
11241,
1096,
1330,
11198,
198,
... | 2.853333 | 150 |
from bot.ts.ThreadSafeTSConnection import default_exception_handler
class User():
"""
Class that interfaces the Teamspeak-API with user-specific calls more convenient.
Since calls to the API are penalised, the class also tries to minimise those calls
by only resolving properties when they are actually needed and then caching them (if sensible).
"""
@property
@property
@property
@property
@property
| [
6738,
10214,
13,
912,
13,
16818,
31511,
4694,
32048,
1330,
4277,
62,
1069,
4516,
62,
30281,
628,
198,
4871,
11787,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
5016,
326,
20314,
262,
24690,
36729,
12,
17614,
351,
2836,
12,
11... | 3.592 | 125 |
import sys, os, pprint
def normalise_path(path):
"""
Normalises the path with os.path.normpath and then normalise the case with os.path.normcase.
"""
return os.path.normcase(os.path.normpath(path))
def scanDirectory(directory = os.curdir, extension = '.py', in_here = True):
"""
Find the largest file with a given extension in the current working directory if in_here is true (default),
else search the entire directory starting in the passed directory.
"""
visited = set()
allsizes = []
if in_here:
os.chdir(directory)
allsizes = [(os.path.getsize(f), normalise_path(os.path.join(directory, f))) for f in os.listdir('.') if os.path.isfile(f)] #Get all files in the CWD with the correct extension
allsizes = [(size, file) for (size, file) in allsizes if file.endswith(extension)]
visited = {directory: True} #The CWD is the only directory visited
else:
for (thisDir, subDir, filesHere) in os.walk(directory): #walk through the subdirectories of directory, starting in directory
fixcase = normalise_path(thisDir)
if fixcase in visited: #if a directory has already been visited, ignore it
continue
else:
visited.add(fixcase)
for filename in filesHere:
if filename.endswith(extension):
path = os.path.join(thisDir, filename)
try:
size = os.path.getsize(path)
except os.error:
print('skipping', path, sys.exc_info()[0])
else:
allsizes.append((size, path))
allsizes.sort()
return (visited, allsizes)
| [
11748,
25064,
11,
28686,
11,
279,
4798,
198,
198,
4299,
3487,
786,
62,
6978,
7,
6978,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
14435,
2696,
262,
3108,
351,
28686,
13,
6978,
13,
27237,
6978,
290,
788,
3487,
786,
262,
1339... | 2.309019 | 754 |
#!/usr/bin/python3 | [
2,
48443,
14629,
14,
8800,
14,
29412,
18
] | 2.25 | 8 |
# Creating more complex squares
import turtle as t
my_square()
t.penup()
t.setpos(-100,100)
t.pencolor("lightblue")
t.pendown()
my_square()
t.done() | [
2,
30481,
517,
3716,
24438,
198,
11748,
28699,
355,
256,
198,
198,
1820,
62,
23415,
3419,
198,
198,
83,
13,
3617,
929,
3419,
198,
83,
13,
2617,
1930,
32590,
3064,
11,
3064,
8,
198,
83,
13,
3617,
8043,
7203,
2971,
17585,
4943,
198,
... | 2.559322 | 59 |
"""
Created on September 18, 2015
@author: oleg-toporkov
"""
from datetime import datetime
import logging.config
import os
from utilities.config import Config
| [
37811,
198,
41972,
319,
2693,
1248,
11,
1853,
198,
198,
31,
9800,
25,
267,
1455,
12,
4852,
967,
709,
198,
37811,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
18931,
13,
11250,
198,
11748,
28686,
198,
198,
6738,
20081,
13,
1125... | 3.6 | 45 |
import json
import socket
import ssl
from urllib.request import Request, urlopen
from osbot_utils.utils.Files import save_bytes_as_file, file_size, file_bytes, file_open_bytes
from osbot_utils.utils.Python_Logger import Python_Logger
logger = Python_Logger('OSBot-utils').setup()
| [
11748,
33918,
198,
11748,
17802,
198,
11748,
264,
6649,
198,
6738,
220,
220,
2956,
297,
571,
13,
25927,
1330,
19390,
11,
19016,
9654,
198,
198,
6738,
28686,
13645,
62,
26791,
13,
26791,
13,
25876,
1330,
3613,
62,
33661,
62,
292,
62,
7... | 3.097826 | 92 |
import time
import serial
from time import sleep
import numpy as np
import cv2
from sys import argv
from array import array
import struct
np.set_printoptions(linewidth=np.inf)
INIT_FILE = "iwr1443_init_log_only_range"
#RS232Tx/RX is for config port
#AR_mss_logger is for data port
RPI = 1
Main()
quit()
#add to the end of config file if only dynamic points needed
#mmwDemo:/>clutterRemoval 1
#Done
| [
11748,
640,
198,
11748,
11389,
198,
6738,
640,
1330,
3993,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
6738,
25064,
1330,
1822,
85,
198,
6738,
7177,
1330,
7177,
198,
11748,
2878,
198,
198,
37659,
13,
2617,
62,
47... | 2.818182 | 143 |
from email.message import Message
from typing import (
Text
)
from sifter.grammar.test import Test
from sifter.grammar.state import EvaluationState
# section 5.2
| [
6738,
3053,
13,
20500,
1330,
16000,
198,
6738,
19720,
1330,
357,
198,
220,
220,
220,
8255,
198,
8,
198,
198,
6738,
264,
18171,
13,
4546,
3876,
13,
9288,
1330,
6208,
198,
6738,
264,
18171,
13,
4546,
3876,
13,
5219,
1330,
34959,
9012,
... | 3.38 | 50 |
"""Markdown Calendar Generator"""
import calendar
from datetime import datetime
import sys
if __name__ == "__main__":
argv = sys.argv
if len(argv) == 1:
today = datetime.now()
print_calendar(today.year, today.month)
elif len(argv) == 2:
year = int(argv[1])
for month in range(1, 13):
print_calendar(year, month, with_isoweek=True)
elif len(argv) == 3:
year, month = [int(a) for a in argv[1:3]]
print_calendar(year, month)
else:
print('Usage: python mdcal.py [year] [month]')
| [
37811,
9704,
2902,
26506,
35986,
37811,
198,
11748,
11845,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
25064,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1822,
85,
796,
25064... | 2.19305 | 259 |
# -*- encoding: utf-8 -*-
import json
import ovh
import time
from datetime import datetime, timezone
import logging
import sys
import traceback
import argparse
appName = "snapshot-check"
try:
from systemd.journal import JournalHandler
logger = logging.getLogger(appName)
logger.addHandler(JournalHandler(SYSLOG_IDENTIFIER=appName))
except ImportError:
logger = logging.getLogger(appName)
stdout = logging.StreamHandler(sys.stdout)
logger.addHandler(stdout)
finally:
logger.setLevel(logging.INFO)
if __name__ == '__main__':
try:
main()
except Exception as e:
logger.error('An unexpected error occurred')
logger.error("".join(traceback.format_exception(None,e, e.__traceback__)).replace("\n",""))
sys.exit(2)
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
33918,
198,
11748,
19643,
71,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
640,
11340,
198,
11748,
18931,
198,
11748,
25064,
198,
11748,
12854,
189... | 2.714286 | 287 |