id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
5034274 | <filename>tests/utils/test_is_connected.py<gh_stars>100-1000
import socket
import pytest
from janitor.utils import is_connected
"""
Tests the is_connected helper function,
which is a function to check if the client
is connected to the internet.
Example:
print(is_connected("www.google.com"))
console >> True
Test 1: happy path, ensures function work
Test 2: web addresses that are not recognized
will return false (comzzz is not a tld).
Test 3: web addresses that are not recognized
will return false (aadsfff.com does not exist
at time of testing).
If test 3 fails, perhaps this is because
the website now exists. If that is the case,
alter or delete the test.
"""
def test_is_connected():
assert is_connected("www.google.com")
with pytest.raises(socket.gaierror):
assert is_connected("www.google.comzzz") is False
with pytest.raises(socket.gaierror):
assert is_connected("aadsfff.com") is False
| StarcoderdataPython |
9758995 | <reponame>cfginn/sap-simulation-package
import unittest
from pysapets.sloth import Sloth
from pysapets.animal import Animal
import pysapets.constants as constants
from unittest.mock import patch
from io import StringIO
from copy import deepcopy
class SlothTest(unittest.TestCase):
def setUp(self):
self.sloth = Sloth()
self.friends = [self.sloth, Animal(2, 2), Animal(2, 2), Animal(2, 2), Animal(2, 2)]
# test that get_type returns the correct type
def test_get_type(self):
self.assertEqual(self.sloth.get_type(), constants.SLOTH)
# test that sloth starts with base health of 1
def test_get_health(self):
self.assertEqual(self.sloth.get_health(), 1)
# test that sloth starts with base attack of 1
def test_get_attack(self):
self.assertEqual(self.sloth.get_attack(), 1)
# test that initializing sloth with additional health increases health
def test_init_add_health(self):
newSloth = Sloth(addHealth = 3)
self.assertEqual(newSloth.get_health(), 1 + 3)
# test that initializing an sloth with additional attack increases attack
def test_init_add_attack(self):
newSloth = Sloth(addAttack = 3)
self.assertEqual(newSloth.get_attack(), 1 + 3)
# test that initializing sloth with additional health and attack increases health and attack
def test_init_add_health_attack(self):
newSloth = Sloth(addHealth = 3, addAttack = 3)
self.assertEqual(newSloth.get_health(), 1 + 3)
self.assertEqual(newSloth.get_attack(), 1 + 3)
# test that sloth ability has correct trigger
def test_get_ability_trigger(self):
self.assertEqual(self.sloth.get_ability_trigger(), constants.SELL)
# test that sloth ability has correct triggeredBy
def test_get_ability_triggeredBy(self):
self.assertEqual(self.sloth.get_ability_triggeredBy(), constants.SELF)
# TODO add relevant tests for sloth ability
def test_run_ability(self):
pass
def test_run_ability_level_1(self):
pass
def test_run_ability_level_2(self):
pass
def test_run_ability_level_3(self):
pass
| StarcoderdataPython |
1845993 | <gh_stars>10-100
#
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
from .client import client
# .Lambda imports greengrass_common, which only applies within Greengrass Core.
# Try-except as below to make sure the SDK is able to be imported outside of Greengrass Core
try:
from .Lambda import StreamingBody
except:
pass
__version__ = '1.6.0'
INTERFACE_VERSION = '1.5'
| StarcoderdataPython |
6630160 | """This demo shows how to use Traits TreeEditors with PyTables to walk the
heirarchy of an HDF5 file. This only picks out arrays and groups, but could
easily be extended to other structures, like tables.
In the demo, the path to the selected item is printed whenever the selection
changes. In order to run, a path to an existing HDF5 database must be given
at the bottom of this file.
"""
from __future__ import print_function
from traits.api import HasTraits, Str, List, Instance
from traitsui.api import TreeEditor, TreeNode, View, Item, Group
import sys
import tables as tb
import h5py
# View for objects that aren't edited
no_view = View()
# HDF5 Nodes in the tree
class Hdf5ArrayNode(HasTraits):
name = Str('<unknown>')
path = Str('<unknown>')
parent_path = Str('<unknown>')
class Hdf5GroupNode(HasTraits):
name = Str('<unknown>')
path = Str('<unknown>')
parent_path = Str('<unknown>')
# Can't have recursive traits? Really?
#groups = List( Hdf5GroupNode )
groups = List
arrays = List(Hdf5ArrayNode)
groups_and_arrays = List
class Hdf5FileNode(HasTraits):
name = Str('<unknown>')
path = Str('/')
groups = List(Hdf5GroupNode)
arrays = List(Hdf5ArrayNode)
groups_and_arrays = List
# Recurssively build tree, there is probably a better way of doing this.
def _get_sub_arrays(group, h5file):
"""Return a list of all arrays immediately below a group in an HDF5 file."""
l = []
for array in h5file.iter_nodes(group, classname='Array'):
a = Hdf5ArrayNode(
name=array._v_name,
path=array._v_pathname,
parent_path=array._v_parent._v_pathname,
)
l.append(a)
return l
def _get_sub_groups(group, h5file):
"""Return a list of all groups and arrays immediately below a group in an HDF5 file."""
l = []
for subgroup in h5file.iter_nodes(group, classname='Group'):
g = Hdf5GroupNode(
name=subgroup._v_name,
path=subgroup._v_pathname,
parent_path=subgroup._v_parent._v_pathname,
)
subarrays = _get_sub_arrays(subgroup, h5file)
if subarrays != []:
g.arrays = subarrays
subgroups = _get_sub_groups(subgroup, h5file)
if subgroups != []:
g.groups = subgroups
g.groups_and_arrays = []
g.groups_and_arrays.extend(subgroups)
g.groups_and_arrays.extend(subarrays)
l.append(g)
return l
def _new_get_sub_arrays(group, h5file):
"""Return a list of all arrays immediately below a group in an HDF5 file."""
l = []
for array in h5file.iter_nodes(group, classname='Array'):
a = Hdf5ArrayNode(
name=array._v_name,
path=array._v_pathname,
parent_path=array._v_parent._v_pathname,
)
l.append(a)
return l
def _new_get_sub_groups(group, h5file):
"""Return a list of all groups and arrays immediately below a group in an HDF5 file."""
l = []
for key, val in dict(group).iteritems():
det = group[key+'/instrument/detector/detector_positions']
print(key, det.value)
exit()
for subgroup in h5file.iter_nodes(group, classname='Group'):
g = Hdf5GroupNode(
name=subgroup._v_name,
path=subgroup._v_pathname,
parent_path=subgroup._v_parent._v_pathname,
)
subarrays = _get_sub_arrays(subgroup, h5file)
if subarrays != []:
g.arrays = subarrays
subgroups = _get_sub_groups(subgroup, h5file)
if subgroups != []:
g.groups = subgroups
g.groups_and_arrays = []
g.groups_and_arrays.extend(subgroups)
g.groups_and_arrays.extend(subarrays)
l.append(g)
return l
def _hdf5_tree(filename):
"""Return a list of all groups and arrays below the root group of an HDF5 file."""
try_pytables = False
if try_pytables:
h5file = tb.open_file(filename, 'r')
print("\nPyTables\n-------")
print(h5file.root)
file_tree = Hdf5FileNode(
name=filename,
groups=_get_sub_groups(h5file.root, h5file),
arrays=_get_sub_arrays(h5file.root, h5file),
)
file_tree.groups_and_arrays = []
file_tree.groups_and_arrays.extend(file_tree.groups)
file_tree.groups_and_arrays.extend(file_tree.arrays)
h5file.close()
# h5py attempt
try_h5py = True
if try_h5py:
h5py_file = h5py.File(filename, 'r')
print("\nh5py \n-------")
print(h5py_file.parent)
print(h5py_file.name)
new_file_tree = Hdf5FileNode(
name=filename,
groups=_new_get_sub_groups(h5py_file.parent, h5py_file),
arrays=_new_get_sub_arrays(h5py_file.parent, h5py_file)
)
return file_tree
# Get a tree editor
def _hdf5_tree_editor(selected=''):
"""Return a TreeEditor specifically for HDF5 file trees."""
return TreeEditor(
nodes=[
TreeNode(
node_for=[Hdf5FileNode],
auto_open=True,
children='groups_and_arrays',
label='name',
view=no_view,
),
TreeNode(
node_for=[Hdf5GroupNode],
auto_open=False,
children='groups_and_arrays',
label='name',
view=no_view,
),
TreeNode(
node_for=[Hdf5ArrayNode],
auto_open=False,
children='',
label='name',
view=no_view,
),
],
editable=False,
selected=selected,
)
if __name__ == '__main__':
from traits.api import Any
class ATree(HasTraits):
h5_tree = Instance(Hdf5FileNode)
node = Any
traits_view = View(
Group(
Item('h5_tree',
editor=_hdf5_tree_editor(selected='node'),
resizable=True
),
orientation='vertical',
),
title='HDF5 Tree Example',
buttons=['Undo', 'OK', 'Cancel'],
resizable=True,
width=.3,
height=.3
)
def _node_changed(self):
print(self.node.path)
if len(sys.argv) == 2:
a_tree = ATree(h5_tree=_hdf5_tree(sys.argv[1]))
a_tree.configure_traits()
else:
print("ERROR: Wrong number of arguements.")
print("Usage: python HDF5_tree_demo.py <hdf5 file>")
# a_tree.edit_traits()
| StarcoderdataPython |
5157037 | <reponame>Cadair/ginga<filename>ginga/Mixins.py
#
# Mixins.py -- Mixin classes for FITS viewer.
#
# <NAME> (<EMAIL>)
#
# Copyright (c) <NAME>. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
from ginga.misc.Callback import Callbacks
class UIMixin(object):
def __init__(self):
self.ui_active = False
for name in ('motion', 'button-press', 'button-release',
'key-press', 'key-release', 'drag-drop',
'scroll', 'map', 'focus', 'enter', 'leave',
):
self.enable_callback(name)
def ui_isActive(self):
return self.ui_active
def ui_setActive(self, tf):
# if tf:
# print "Layer %s set to active" % str(self)
# traceback.print_stack()
self.ui_active = tf
## def make_callback(self, name, *args, **kwdargs):
## if hasattr(self, 'objects'):
## # Invoke callbacks on all our layers that have the UI mixin
## for obj in self.objects:
## if isinstance(obj, UIMixin) and obj.ui_isActive():
## obj.make_callback(name, *args, **kwdargs)
## return super(UIMixin, self).make_callback(name, *args, **kwdargs)
def make_ui_callback(self, name, *args, **kwdargs):
"""Invoke callbacks on all objects (i.e. layers) from the top to
the bottom, returning when the first one returns True. If none
returns True, then make the callback on our 'native' layer.
"""
#print("(in %s)make callback %s" % (self.name, name))
if hasattr(self, 'objects'):
# Invoke callbacks on all our layers that have the UI mixin
num = len(self.objects) - 1
#print("make callback %s 2: num=%d" % (name, num))
while num >= 0:
obj = self.objects[num]
#print("make callback %s 3: obj=%s" % (name, obj.name))
if isinstance(obj, UIMixin) and obj.ui_isActive():
#if hasattr(obj, 'ui_isActive') and obj.ui_isActive():
#print(("(sub)making callback '%s' on %s" % (name, obj.name)))
res = obj.make_ui_callback(name, *args, **kwdargs)
#print(("(sub)result was %s" % (res)))
if res:
return res
num -= 1
if self.ui_active:
#print(("making callback '%s' on %s" % (name, self.name)))
return super(UIMixin, self).make_callback(name, *args, **kwdargs)
def make_callback_children(self, name, *args, **kwdargs):
"""Invoke callbacks on all objects (i.e. layers) from the top to
the bottom, returning when the first one returns True. If none
returns True, then make the callback on our 'native' layer.
"""
#print("(in %s)make callback %s" % (self.name, name))
if hasattr(self, 'objects'):
# Invoke callbacks on all our layers that have the UI mixin
num = len(self.objects) - 1
#print("make callback %s 2: num=%d" % (name, num))
while num >= 0:
obj = self.objects[num]
#print("make callback %s 3: obj=%s" % (name, obj.name))
#print(("(sub)making callback '%s' on %s" % (name, obj.name)))
if isinstance(obj, Callbacks):
res = obj.make_callback(name, *args, **kwdargs)
#print(("(sub)result was %s" % (res)))
## if res:
## return res
num -= 1
#print(("making callback '%s' on %s" % (name, self.name)))
return super(UIMixin, self).make_callback(name, *args, **kwdargs)
# END
| StarcoderdataPython |
1813548 | <gh_stars>0
from keras.layers import *
from keras.models import *
#定义模型
def get_my_model(shape=(64, 64, 1)):
nclass = 2
inp = Input(shape=shape)
x = Convolution2D(16, (3,3), padding="same")(inp)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPool2D(strides=(2, 2))(x)
# x = Convolution2D(16, (3,3), padding="same")(x)
# x = BatchNormalization()(x)
# x = Activation("relu")(x)
# x = MaxPool2D(strides=(2, 2))(x)
# x = Convolution2D(32, (3,3), padding="same")(x)
# x = BatchNormalization()(x)
# x = Activation("relu")(x)
# # x = MaxPool2D(strides=(2, 2))(x)
x = Convolution2D(32, (3,3), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPool2D(strides=(2, 2))(x)
# x = Convolution2D(64, (3,3), padding="same")(x)
# x = BatchNormalization()(x)
# x = Activation("relu")(x)
# # x = MaxPool2D(strides=(2, 2))(x)
x = Convolution2D(64, (3,3), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPool2D(strides=(2, 2))(x)
x = Convolution2D(128, (3,3), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
# x = MaxPool2D(strides=(2, 2))(x)
#
# x = Convolution2D(256, (3,3), padding="same")(x)
# x = BatchNormalization()(x)
# x = Activation("relu")(x)
x = GlobalAveragePooling2D()(x)
x = Dropout(0.1)(x)
# x = Dense(256)(x)
# x = BatchNormalization()(x)
# x = Dropout(0.5)(x)
# x = Activation("relu")(x)
out = Dense(nclass, activation='softmax', name='softmax')(x)
model = Model(inputs=inp, outputs=out)
# model.summary()
# print(len(model.layers))
return model
def get_NASNetMobile():
from keras.applications.nasnet import NASNetMobile
from keras.applications.mobilenetv2 import MobileNetV2
input_tensor = Input(shape=(64, 64, 1))
# create the base pre-trained model
base_model = NASNetMobile(input_tensor=input_tensor, weights=None, include_top=False)
# base_model = MobileNetV2(input_tensor=input_tensor, weights=None, include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# and a logistic layer -- let's say we have 200 classes
x = Dropout(0.2)(x)
output_tensor = Dense(2, activation='softmax')(x)
# this is the model we will train
model = Model(inputs=input_tensor, outputs=output_tensor)
# model.summary()
# print(len(model.layers))
return model
def get_MobileNetV2():
from keras.applications.nasnet import NASNetMobile
from keras.applications.mobilenetv2 import MobileNetV2
input_tensor = Input(shape=(64, 64, 1))
# create the base pre-trained model
base_model = MobileNetV2(input_tensor=input_tensor, weights=None, include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# and a logistic layer -- let's say we have 200 classes
x = Dropout(0.2)(x)
output_tensor = Dense(2, activation='softmax')(x)
# this is the model we will train
model = Model(inputs=input_tensor, outputs=output_tensor)
# model.summary()
# print(len(model.layers))
return model
def get_my_model_1d():
# with tf.device('/cpu:0'):
x_in = Input(shape=(64, 1))
x = Convolution1D(16, 3, padding='same')(x_in)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling1D(3, strides=2)(x)
x = Convolution1D(32, 3, padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling1D(3, strides=2)(x)
x = Convolution1D(64, 3, padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling1D(3, strides=2)(x)
x = Convolution1D(128, 3, padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = GlobalAveragePooling1D()(x)
x_out = Dense(2, activation='softmax')(x)
model = Model(inputs=x_in, outputs=x_out)
return model | StarcoderdataPython |
1900598 | <reponame>KeyoungLau/py4insect-specimen
# -*- coding:utf-8 -*-
# author:keyoung
# email:<EMAIL>
# date:2019-10-11
def draw_grid(amount_card, read_file, savefile_name):
from readList import read_csv_to_list
import xlsxwriter
workbook = xlsxwriter.Workbook(savefile_name) # 新建excel表
worksheet = workbook.add_worksheet('sheet1') # 新建sheet(sheet的名称为"sheet1")
# 设置右对齐格式
align_right = workbook.add_format()
align_right.set_align("right")
align_right.set_bottom(1)
align_right.set_bottom_color("black")
# 设置单元格格式1
border1 = workbook.add_format()
border1.set_bottom(1)
border1.set_bottom_color("black")
# 设置单元格格式2
border2 = workbook.add_format()
border2.set_left(2)
border2.set_left_color("black")
border2.set_right(2)
border2.set_right_color("black")
border2.set_top(2)
border2.set_top_color("black")
border2.set_bottom(1)
border2.set_bottom_color("black")
border2.set_valign("vcenter")
border2.set_align("center")
# 设置单元格格式3
border3 = workbook.add_format()
border3.set_left(1)
border3.set_left_color("black")
border3.set_right(1)
border3.set_right_color("black")
border3.set_top(1)
border3.set_top_color("black")
border3.set_bottom(1)
border3.set_bottom_color("black")
# 设置一个9号字体
border3_with_smaller_font = workbook.add_format()
border3_with_smaller_font.set_left(1)
border3_with_smaller_font.set_left_color("black")
border3_with_smaller_font.set_right(1)
border3_with_smaller_font.set_right_color("black")
border3_with_smaller_font.set_top(1)
border3_with_smaller_font.set_top_color("black")
border3_with_smaller_font.set_bottom(1)
border3_with_smaller_font.set_bottom_color("black")
border3_with_smaller_font.set_font_size(9)
# 设置一个8号字体
border3_with_very_smaller_font = workbook.add_format()
border3_with_very_smaller_font.set_left(1)
border3_with_very_smaller_font.set_left_color("black")
border3_with_very_smaller_font.set_right(1)
border3_with_very_smaller_font.set_right_color("black")
border3_with_very_smaller_font.set_top(1)
border3_with_very_smaller_font.set_top_color("black")
border3_with_very_smaller_font.set_bottom(1)
border3_with_very_smaller_font.set_bottom_color("black")
border3_with_very_smaller_font.set_font_size(8)
# 设置一个居中格式
border3_with_center = workbook.add_format()
border3_with_center.set_left(1)
border3_with_center.set_left_color("black")
border3_with_center.set_right(1)
border3_with_center.set_right_color("black")
border3_with_center.set_top(1)
border3_with_center.set_top_color("black")
border3_with_center.set_bottom(1)
border3_with_center.set_bottom_color("black")
border3_with_center.set_align("center")
# rewrite drawGrid
rownum = 0
colnum = 0
print("绘制卡片中......")
# 这里稍微处理一下amount_card,使得画卡片的时候永远是偶数张卡片,方便打印控制,而且不会使处理数据的时候混乱
if amount_card % 2 == 0:
draw_card_amount = amount_card
else:
draw_card_amount = amount_card + 1
for page in range(draw_card_amount):
if rownum >= (amount_card * 18) / 2: # 一个格子需要18行
# 这是控制换列
colnum = 5
rownum = 0
# 写前三行
worksheet.write(rownum, colnum, "科 名")
worksheet.write(rownum + 1, colnum, "学 名")
worksheet.write(rownum + 2, colnum, "中 名")
worksheet.write(rownum, colnum + 1, None, border1)
worksheet.write(rownum + 1, colnum + 1, None, border1)
worksheet.write(rownum + 2, colnum + 1, None, border1)
worksheet.write(rownum + 4, colnum, "登记号", border2)
worksheet.write(rownum + 4, colnum + 1, "采集地点", border2)
worksheet.write(rownum + 4, colnum + 2, "采集日期", border2)
worksheet.write(rownum + 4, colnum + 3, "标本概况", border2)
worksheet.write(rownum + 9, colnum, "登记号", border2)
worksheet.write(rownum + 9, colnum + 1, "采集地点", border2)
worksheet.write(rownum + 9, colnum + 2, "采集日期", border2)
worksheet.write(rownum + 9, colnum + 3, "标本概况", border2)
# 写个编号吧,如果不需要可以用随时注释掉
worksheet.write(rownum, colnum + 3, "第{}张".format(page + 1), align_right)
# 设置样式
worksheet.set_column(0, 0, 7.22) # 设置A列宽度
worksheet.set_column(5, 5, 7.22) # 设置F列宽度
worksheet.set_column(2, 2, 11.22) # 设置C列宽度
worksheet.set_column(7, 7, 11.22) # 设置H列宽度
worksheet.set_column(1, 1, 14.22) # 设置B列宽度
worksheet.set_column(6, 6, 14.22) # 设置G列宽度
worksheet.set_column(3, 3, 25.22) # 设置D列宽度
worksheet.set_column(8, 8, 25.22) # 设置I列宽度
worksheet.set_column(4, 4, 5.11) # 设置E列宽度,为了裁纸的时候舒服一点
# 调整行高
worksheet.set_row(rownum, 25.0, None)
worksheet.set_row(rownum + 1, 25.0, None)
worksheet.set_row(rownum + 2, 25.0, None)
worksheet.set_row(rownum + 4, 20.6, None)
worksheet.set_row(rownum + 5, 20.6, None)
worksheet.set_row(rownum + 6, 20.6, None)
worksheet.set_row(rownum + 7, 20.6, None)
worksheet.set_row(rownum + 9, 20.6, None)
worksheet.set_row(rownum + 10, 20.6, None)
worksheet.set_row(rownum + 11, 20.6, None)
worksheet.set_row(rownum + 12, 20.6, None)
worksheet.set_row(rownum + 13, 20.6, None)
worksheet.set_row(rownum + 14, 20.6, None)
worksheet.set_row(rownum + 15, 20.6, None)
worksheet.set_row(rownum + 16, 20.6, None)
worksheet.set_row(rownum + 17, 118.7, None)
worksheet.write_blank(rownum, colnum + 2, "", border1)
worksheet.write_blank(rownum + 1, colnum + 2, "", border1)
worksheet.write_blank(rownum + 1, colnum + 3, "", border1)
worksheet.write_blank(rownum + 2, colnum + 2, "", border1)
worksheet.write_blank(rownum + 2, colnum + 3, "", border1)
for j in range(5, 8):
for q in range(0, 4):
worksheet.write_blank(rownum + j, colnum + q, "", border3)
for j in range(10, 17):
for q in range(0, 4):
worksheet.write_blank(rownum + j, colnum + q, "", border3)
rownum = rownum + 18
# 这里定义一个内部函数,用来把学名的种名属名(specific_generic_name)和作者(author)分离出来
def split_scientific_name(scientific_name: str):
import re
global specific_generic_name, author_name
specific_generic_name = re.findall("(^[A-Z].*? .*?) .*", scientific_name)
if specific_generic_name:
specific_generic_name = specific_generic_name[0]
author_name = scientific_name[len(specific_generic_name) + 1:]
if len(author_name) == 0:
author_name = " "
return specific_generic_name, author_name
# rewrite handleListDate(重写老版的handleListDate)
print("处理数据中......")
lst = read_csv_to_list(read_file)
amount_of_lst = len(lst)
current_row = lst.pop() # 抽出数据
italic = workbook.add_format({'italic': True})
rownum = 0 # 行号
colnum = 0 # 列号
# 这里可以选者两种对比方式,7是选择学名,感觉科学一点
check_name = current_row[7]
# 先把第一个给填了,等会再来对比分析
generic_name, author = split_scientific_name(current_row[7])
worksheet.write(rownum, colnum + 1, current_row[2], border1) # 写科名
worksheet.write_rich_string(rownum + 1, colnum + 1, italic, f"{generic_name} ", f"{author}", border1) # 写学名
worksheet.write(rownum + 2, colnum + 1, current_row[1], border1) # 写中文名
worksheet.write(rownum + 5, colnum, current_row[0], border3) # 写登记号
if len(current_row[10]) < 8:
worksheet.write(rownum + 5, colnum + 1, current_row[10], border3) # 写采集地点
else:
worksheet.write(rownum + 5, colnum + 1, current_row[10], border3_with_smaller_font) # 写采集地点
worksheet.write(rownum + 5, colnum + 2, current_row[13], border3_with_center) # 写采集日期
if len(current_row[11] + "采集 " + current_row[12]) < 17:
worksheet.write(rownum + 5, colnum + 3, current_row[11] + "采集 " + current_row[12], border3) # 写标本概况
elif 17 <= len(current_row[11] + "采集 " + current_row[12]) < 24:
worksheet.write(rownum + 5, colnum + 3, current_row[11] + "采集 " + current_row[12], border3_with_smaller_font) # 写标本概况
else:
worksheet.write(rownum + 5, colnum + 3, current_row[11] + "采集 " + current_row[12],
border3_with_very_smaller_font) # 写标本概况
# 第一条数据录完之后就要对比分析了
row_counter = 1 # 设置一个行计数器
while lst: # 当列表lst不为空,就不断抽取数据
if rownum > (amount_card * 18) / 2: # 这个是控制换到另一边
colnum = 5
rownum = 0
current_row = lst.pop() # 又抽取一条数据
if current_row[7] == check_name:
if row_counter == 3:
# 因为要空一行,所以要多加一个判断
row_counter = 5
if row_counter > 11:
# 大于这么多就准备换页了
row_counter = 0
rownum = rownum + 18
if rownum >= (page * 9) + 1: # 这个数字应该还要计算一下,天灵灵地灵灵,保佑不出错
rownum = 0
colnum = 5
generic_name, author = split_scientific_name(current_row[7])
worksheet.write(rownum, colnum + 1, current_row[2], border1) # 写科名
worksheet.write_rich_string(rownum + 1, colnum + 1, italic, f"{generic_name} ", f"{author}", border1) # 写学名
worksheet.write(rownum + 2, colnum + 1, current_row[1], border1) # 写中文名
# 如果相等,意思就是同一种标本
# 同一种标本就不用再写科名,学名和中文名了
worksheet.write(rownum + 5 + row_counter, colnum, current_row[0], border3) # 写登记号
if len(current_row[10]) < 8:
worksheet.write(rownum + 5 + row_counter, colnum + 1, current_row[10], border3) # 写采集地点
else:
worksheet.write(rownum + 5 + row_counter, colnum + 1, current_row[10], border3_with_smaller_font) # 写采集地点
worksheet.write(rownum + 5 + row_counter, colnum + 2, current_row[13], border3_with_center) # 写采集日期
if len(current_row[11] + "采集 " + current_row[12]) < 17:
worksheet.write(rownum + 5 + row_counter, colnum + 3, current_row[11] + "采集 " + current_row[12], border3) # 写标本概况
elif 17 <= len(current_row[11] + "采集 " + current_row[12]) < 24:
worksheet.write(rownum + 5 + row_counter, colnum + 3, current_row[11] + "采集 " + current_row[12],
border3_with_smaller_font) # 写标本概况
else:
worksheet.write(rownum + 5 + row_counter, colnum + 3, current_row[11] + "采集 " + current_row[12],
border3_with_very_smaller_font) # 写标本概况
row_counter = row_counter + 1
else:
# 这是不等于的情况,意思就是不是同一种标本
# 就要跳到下一页去了
rownum = rownum + 18
# 在前后不同的情况下也要考虑换列的情况
if rownum >= (page * 9) + 1:
rownum = 0
colnum = 5
generic_name, author = split_scientific_name(current_row[7])
worksheet.write(rownum, colnum + 1, current_row[2], border1) # 写科名
worksheet.write_rich_string(rownum + 1, colnum + 1, italic, f"{generic_name} ", f"{author}", border1) # 写学名
worksheet.write(rownum + 2, colnum + 1, current_row[1], border1) # 写中文名
worksheet.write(rownum + 5, colnum, current_row[0], border3) # 写登记号
if len(current_row[10]):
worksheet.write(rownum + 5, colnum + 1, current_row[10], border3) # 写采集地点
else:
worksheet.write(rownum + 5, colnum + 1, current_row[10], border3_with_smaller_font) # 写采集地点
worksheet.write(rownum + 5, colnum + 2, current_row[13], border3_with_center) # 写采集日期
if len(current_row[11] + "采集 " + current_row[12]) < 17:
worksheet.write(rownum + 5, colnum + 3, current_row[11] + "采集 " + current_row[12], border3) # 写标本概况
elif 17 <= len(current_row[11] + "采集 " + current_row[12]) < 24:
worksheet.write(rownum + 5, colnum + 3, current_row[11] + "采集 " + current_row[12], border3_with_smaller_font) # 写标本概况
else:
worksheet.write(rownum + 5, colnum + 3, current_row[11] + "采集 " + current_row[12],
border3_with_very_smaller_font)
# 再把check_name重新赋值一下
check_name = current_row[7]
row_counter = 1
workbook.close()
print(f"数据处理完成,一共处理{amount_of_lst}条数据。")
print(f"保存文件<{savefile_name}>。")
print("-"*46) | StarcoderdataPython |
8145334 | <filename>python/tests/references/test_references.py
# Copyright 2019 Regents of the University of Minnesota.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
from subprocess import Popen, PIPE, STDOUT
import pytest
from mtap import EventsClient, RemoteProcessor, Pipeline, Event, GenericLabel
from mtap.utilities import subprocess_events_server, find_free_port
@pytest.fixture(name='python_events')
def fixture_python_events():
with subprocess_events_server(cwd=Path(__file__).parents[2]) as address:
yield address
@pytest.fixture(name='python_references_processor')
def fixture_python_references_processor(python_events, processor_watcher):
env = dict(os.environ)
port = str(find_free_port())
p = Popen(['python', '-m', 'mtap.examples.example_references_processor', '-p', port,
'--events', python_events, '--log-level', 'DEBUG'], stdin=PIPE, stdout=PIPE, stderr=STDOUT, env=env)
yield from processor_watcher(address="127.0.0.1:" + port, process=p)
@pytest.fixture(name="java_references_processor")
def fixture_java_references_processor(python_events, processor_watcher):
mtap_jar = os.environ['MTAP_JAR']
mtap_jar = mtap_jar + ':' + str(Path(__file__).parents[1] / 'slf4j-simple-1.7.30.jar')
env = dict(os.environ)
port = str(find_free_port())
p = Popen(['java', '-cp', mtap_jar,
'edu.umn.nlpie.mtap.examples.ReferenceLabelsExampleProcessor',
'-p', port, '-e', python_events], stdin=PIPE, stdout=PIPE, stderr=STDOUT, env=env)
yield from processor_watcher(address="127.0.0.1:" + port, process=p)
@pytest.mark.integration
def test_java_references(python_events, java_references_processor):
with EventsClient(address=python_events) as client, Pipeline(
RemoteProcessor('mtap-java-reference-labels-example-processor',
address=java_references_processor)
) as pipeline:
with Event(event_id='1', client=client) as event:
document = event.create_document('plaintext', 'abcd')
pipeline.run(document)
references = document.labels['references']
assert references[0].a == GenericLabel(0, 1)
assert references[0].b == GenericLabel(1, 2)
assert references[1].a == GenericLabel(2, 3)
assert references[1].b == GenericLabel(3, 4)
map_references = document.labels['map_references']
assert map_references[0].ref == {
'a': GenericLabel(0, 1),
'b': GenericLabel(1, 2),
'c': GenericLabel(2, 3),
'd': GenericLabel(3, 4)
}
list_references = document.labels['list_references']
assert list_references[0].ref == [GenericLabel(0, 1), GenericLabel(1, 2)]
assert list_references[1].ref == [GenericLabel(2, 3), GenericLabel(3, 4)]
@pytest.mark.integration
def test_python_references(python_events, python_references_processor):
with EventsClient(address=python_events) as client, Pipeline(
RemoteProcessor('mtap-python-references-example', address=python_references_processor)
) as pipeline:
with Event(event_id='1', client=client) as event:
document = event.create_document('plaintext', 'abcd')
pipeline.run(document)
references = document.labels['references']
assert references[0].a == GenericLabel(0, 1)
assert references[0].b == GenericLabel(1, 2)
assert references[1].a == GenericLabel(2, 3)
assert references[1].b == GenericLabel(3, 4)
map_references = document.labels['map_references']
assert map_references[0].ref == {
'a': GenericLabel(0, 1),
'b': GenericLabel(1, 2),
'c': GenericLabel(2, 3),
'd': GenericLabel(3, 4)
}
list_references = document.labels['list_references']
assert list_references[0].ref == [GenericLabel(0, 1), GenericLabel(1, 2)]
assert list_references[1].ref == [GenericLabel(2, 3), GenericLabel(3, 4)]
| StarcoderdataPython |
1782222 | from abc import ABCMeta, abstractmethod
from bisect import bisect_right
from random import uniform
import numpy as np
from numpy.random import choice
from pandas import Series
from generative_models.data_synthesiser_utils.utils import normalize_given_distribution
class AbstractAttribute(object):
__metaclass__ = ABCMeta
def __init__(self, name, data, histogram_size):
self.name = name
self.data = data
self.histogram_size = histogram_size
self.data_dropna = self.data.dropna()
self.missing_rate = (self.data.size - self.data_dropna.size) / (self.data.size or 1)
self.is_categorical = None
self.is_numerical = None
self.data_type = None
self.min = None
self.max = None
self.distribution_bins = None
self.distribution_probabilities = None
self.domain_size = None
def set_domain(self, domain):
return NotImplementedError('Method needs to be overwritten.')
@abstractmethod
def infer_distribution(self):
if self.is_categorical:
histogram = self.data_dropna.value_counts()
for value in set(self.distribution_bins) - set(histogram.index):
histogram[value] = 0
histogram = histogram[self.distribution_bins]
self.distribution_probabilities = normalize_given_distribution(histogram)
else:
histogram, _ = np.histogram(self.data_dropna, bins=self.distribution_bins)
self.distribution_probabilities = normalize_given_distribution(histogram)
def encode_values_into_bin_idx(self):
"""
Encode values into bin indices for Bayesian Network construction.
"""
if self.is_categorical:
value_to_bin_idx = {value: idx for idx, value in enumerate(self.distribution_bins)}
encoded = self.data.map(lambda x: value_to_bin_idx[x], na_action='ignore')
else:
encoded = self.data.map(lambda x: bisect_right(self.distribution_bins[:-1], x) - 1, na_action='ignore')
encoded.fillna(len(self.distribution_bins), inplace=True)
return encoded.astype(int, copy=False)
def to_json(self):
"""Encode attribution information in JSON format / Python dictionary.
"""
return {"name": self.name,
"data_type": self.data_type.value,
"is_categorical": self.is_categorical,
"min": self.min,
"max": self.max,
"missing_rate": self.missing_rate,
"distribution_bins": self.distribution_bins.tolist(),
"distribution_probabilities": self.distribution_probabilities.tolist()}
@abstractmethod
def generate_values_as_candidate_key(self, n):
"""When attribute should be a candidate key in output dataset.
"""
return np.arange(n)
def sample_binning_indices_in_independent_attribute_mode(self, n):
"""Sample an array of binning indices.
"""
return Series(choice(len(self.distribution_probabilities), size=n, p=self.distribution_probabilities))
@abstractmethod
def sample_values_from_binning_indices(self, binning_indices):
"""Convert binning indices into values in domain. Used by both independent and correlated attribute mode.
"""
return binning_indices.apply(lambda x: self.uniform_sampling_within_a_bin(x))
def uniform_sampling_within_a_bin(self, bin_idx):
num_bins = len(self.distribution_probabilities)
if bin_idx == num_bins:
return np.nan
elif self.is_categorical:
return self.distribution_bins[bin_idx]
else:
return uniform(self.distribution_bins[bin_idx], self.distribution_bins[bin_idx + 1])
| StarcoderdataPython |
6523971 | # Generated by Django 2.0.13 on 2019-05-24 09:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("terra_layer", "0001_initial")]
operations = [
migrations.AlterField(
model_name="filterfield",
name="field",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="filter_field",
to="django_geosource.Field",
),
),
migrations.AlterField(
model_name="filterfield",
name="filter_type",
field=models.IntegerField(default=0),
),
]
| StarcoderdataPython |
9610160 | import arcade # pip install arcade
# Setting size for main window
Window_width = 700
Window_height = 700
# Creating and opening the window
arcade.open_window(Window_width,Window_height,"Smiley")
arcade.set_background_color(arcade.color.BLACK)
arcade.start_render() # Always use this before starting
# Base Structure
x = 350; y = 350; radius = 200
arcade.draw_circle_filled(x,y,radius,arcade.color.YELLOW)
#Right Eye
x = 420; y = 420; radius = 25
arcade.draw_circle_filled(x,y,radius,arcade.color.BLACK)
#Left Eye
x = 280; y = 420; radius = 25
arcade.draw_circle_filled(x,y,radius,arcade.color.BLACK)
#Curve
x = 340; y= 310; width = 120; height = 100; start_angle = 180; end_angle = 360
arcade.draw_arc_outline(x,y,width,height,arcade.color.BLACK,start_angle,end_angle,10)
arcade.finish_render() # Ends the arcade
arcade.run() # Keeps the window running until closed | StarcoderdataPython |
81527 | def _find_patterns(content, pos, patterns):
max = len(content)
for i in range(pos, max):
for p in enumerate(patterns):
if content.startswith(p[1], i):
return struct(
pos = i,
pattern = p[0]
)
return None
_find_ending_escapes = {
'(': ')',
'"': '"',
"'": "'",
'{': '}',
}
def _find_ending(content, pos, endch, escapes = _find_ending_escapes):
max = len(content)
ending_search_stack = [ endch ]
for i in range(pos, max):
ch = content[i]
if ch == ending_search_stack[0]:
ending_search_stack.pop(0)
if not ending_search_stack:
return i
continue
for start, end in escapes.items():
if ch == start:
ending_search_stack.insert(0, end)
break
return None
_whitespace_chars = [ ' ', '\t', '\n' ]
def _is_whitespace(content, pos, end_pos, ws = _whitespace_chars):
for i in range(pos, end_pos):
if not content[i] in ws:
return False
return True
parse = struct(
find_patterns = _find_patterns,
find_ending = _find_ending,
is_whitespace = _is_whitespace,
) | StarcoderdataPython |
4929817 | import sys, os, inspect
if '/home/ubuntu/shared/GitHub' in sys.path:
sys.path.remove('/home/ubuntu/shared/GitHub')
try:
import generative_playground
except:
import sys, os, inspect
my_location = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# sys.path.append('../../../../../..')
# sys.path.append('../../../../DeepRL')
# sys.path.append('../../../../../transformer_pytorch')
# from deep_rl import *
# from generative_playground.models.problem.rl.network_heads import CategoricalActorCriticNet
# from generative_playground.train.rl.run_iterations import run_iterations
# from generative_playground.models.problem.rl.DeepRL_wrappers import BodyAdapter, MyA2CAgent
from generative_playground.molecules.model_settings import get_settings
from generative_playground.molecules.train.pg.hypergraph.main_train_policy_gradient_minimal import train_policy_gradient
from generative_playground.molecules.guacamol_utils import guacamol_goal_scoring_functions
from generative_playground.models.temperature_schedule import toothy_exp_schedule, shifted_cosine_schedule, \
seesaw_exp_schedule
from generative_playground.models.pg_runner import PolicyGradientRunner
batch_size = 20 # 20
num_batches = 5
grammar_cache = 'hyper_grammar_guac_10k_with_clique_collapse.pickle' # 'hyper_grammar.pickle'
grammar = 'hypergraph:' + grammar_cache
ver = 'v2'
obj_num = 0
reward_funs = guacamol_goal_scoring_functions(ver)
# this accepts a list of SMILES strings
reward_fun = reward_funs[obj_num]
root_name = 'xtest9' + ver + '_' + str(obj_num) + '_lr0.02'
max_steps = 60
root_location = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
root_location = root_location + '/../../../'
save_location = os.path.realpath(root_location + 'pretrained/')
runner_factory = lambda x: PolicyGradientRunner(grammar,
BATCH_SIZE=batch_size,
reward_fun=reward_fun,
max_steps=max_steps,
num_batches=num_batches,
lr=0.02,
entropy_wgt=0.1,
# lr_schedule=shifted_cosine_schedule,
root_name=root_name,
preload_file_root_name=None,
plot_metrics=True,
save_location=save_location,
metric_smooth=0.0,
decoder_type='graph_conditional', # 'rnn_graph',# 'attention',
on_policy_loss_type='advantage_record',
rule_temperature_schedule=None, #lambda x: toothy_exp_schedule(x, scale=num_batches),
eps=0.0,
priors='conditional',
)
# preload_file='policy_gradient_run.h5')
runner = runner_factory()
runner.set_root_name('whatever')
save_fn = runner.run()
runner = PolicyGradientRunner.load(save_fn)
runner.set_root_name('whatever2')
runner.run()
| StarcoderdataPython |
319434 | from .normal import Normal, HomoskedasticNormal
from .laplace import Laplace
from .lognormal import LogNormal, HomoskedasticLogNormal
from .loglaplace import LogLaplace
| StarcoderdataPython |
1723711 | from .abs_state import AbsState
class Waiting(AbsState):
def check(self):
m = self._model
m.logger.info('Checking for new round')
if (m.napi.check_new_round()
or m.test):
m.logger.info('New round available')
self._model.state = self._model.getting_data
m.new_round = True
else:
m.logger.info('No new round')
m.new_round = False
def stop_waiting(self):
self._model.state = self._model.getting_data
| StarcoderdataPython |
4965852 | <filename>deploy/slim/prune/sensitivity_anal.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
__dir__ = os.path.dirname(__file__)
sys.path.append(__dir__)
sys.path.append(os.path.join(__dir__, '..', '..', '..'))
sys.path.append(os.path.join(__dir__, '..', '..', '..', 'tools'))
import json
import cv2
from paddle import fluid
import paddleslim as slim
from copy import deepcopy
from tools.eval_utils.eval_det_utils import eval_det_run
from tools import program
from ppocr.utils.utility import initial_logger
from ppocr.data.reader_main import reader_main
from ppocr.utils.save_load import init_model
from ppocr.utils.character import CharacterOps
from ppocr.utils.utility import create_module
from ppocr.data.reader_main import reader_main
logger = initial_logger()
def get_pruned_params(program):
params = []
for param in program.global_block().all_parameters():
if len(
param.shape
) == 4 and 'depthwise' not in param.name and 'transpose' not in param.name:
params.append(param.name)
return params
def eval_function(eval_args, mode='eval'):
exe = eval_args['exe']
config = eval_args['config']
eval_info_dict = eval_args['eval_info_dict']
metrics = eval_det_run(exe, config, eval_info_dict, mode=mode)
return metrics['hmean']
def main():
config = program.load_config(FLAGS.config)
program.merge_config(FLAGS.opt)
logger.info(config)
# check if set use_gpu=True in paddlepaddle cpu version
use_gpu = config['Global']['use_gpu']
program.check_gpu(use_gpu)
alg = config['Global']['algorithm']
assert alg in ['EAST', 'DB', 'Rosetta', 'CRNN', 'STARNet', 'RARE']
if alg in ['Rosetta', 'CRNN', 'STARNet', 'RARE']:
config['Global']['char_ops'] = CharacterOps(config['Global'])
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
startup_prog = fluid.Program()
eval_program = fluid.Program()
eval_build_outputs = program.build(
config, eval_program, startup_prog, mode='test')
eval_fetch_name_list = eval_build_outputs[1]
eval_fetch_varname_list = eval_build_outputs[2]
eval_program = eval_program.clone(for_test=True)
exe = fluid.Executor(place)
exe.run(startup_prog)
init_model(config, eval_program, exe)
eval_reader = reader_main(config=config, mode="eval")
eval_info_dict = {'program':eval_program,\
'reader':eval_reader,\
'fetch_name_list':eval_fetch_name_list,\
'fetch_varname_list':eval_fetch_varname_list}
eval_args = dict()
eval_args = {'exe': exe, 'config': config, 'eval_info_dict': eval_info_dict}
metrics = eval_function(eval_args)
print("Baseline: {}".format(metrics))
params = get_pruned_params(eval_program)
print('Start to analyze')
sens_0 = slim.prune.sensitivity(
eval_program,
place,
params,
eval_function,
sensitivities_file="sensitivities_0.data",
pruned_ratios=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8],
eval_args=eval_args,
criterion='geometry_median')
if __name__ == '__main__':
parser = program.ArgsParser()
FLAGS = parser.parse_args()
main()
| StarcoderdataPython |
6665250 | <filename>panoptes_aggregation/scripts/reduce_panoptes_csv.py
from collections import OrderedDict
from multiprocessing import Pool
import io
import os
import progressbar
import yaml
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import numpy as np
import pandas
from panoptes_aggregation import reducers
from panoptes_aggregation.csv_utils import flatten_data, unflatten_data, order_columns
def first_filter(data):
first_time = data.created_at.min()
fdx = data.created_at == first_time
return data[fdx]
def last_filter(data):
last_time = data.created_at.max()
ldx = data.created_at == last_time
return data[ldx]
FILTER_TYPES = {
'first': first_filter,
'last': last_filter
}
def get_file_instance(file):
if not isinstance(file, io.IOBase):
file = open(file, 'r', encoding='utf-8') # pragma: no cover
return file
def reduce_subject(
subject,
classifications,
task,
reducer_name=None,
workflow_id=None,
filter=None,
keywords={}
):
reduced_data_list = []
classifications = classifications.drop_duplicates()
unique_users = classifications['user_name'].unique().shape[0]
if (filter in FILTER_TYPES) and (unique_users < classifications.shape[0]):
classifications = classifications.groupby(['user_name'], group_keys=False).apply(FILTER_TYPES[filter])
data = [unflatten_data(c) for cdx, c in classifications.iterrows()]
user_ids = [c.user_id for cdx, c in classifications.iterrows()]
reduction = reducers.reducers[reducer_name](data, user_id=user_ids, **keywords)
if isinstance(reduction, list):
for r in reduction:
reduced_data_list.append(OrderedDict([
('subject_id', subject),
('workflow_id', workflow_id),
('task', task),
('reducer', reducer_name),
('data', r)
]))
else:
reduced_data_list.append(OrderedDict([
('subject_id', subject),
('workflow_id', workflow_id),
('task', task),
('reducer', reducer_name),
('data', reduction)
]))
return reduced_data_list
CURRENT_PATH = os.path.abspath('.')
def reduce_csv(
extracted_csv,
reducer_config,
filter='first',
output_name='reductions',
output_dir=CURRENT_PATH,
order=False,
stream=False,
cpu_count=1
):
extracted_csv = get_file_instance(extracted_csv)
with extracted_csv as extracted_csv_in:
extracted = pandas.read_csv(
extracted_csv_in,
infer_datetime_format=True,
parse_dates=['created_at'],
encoding='utf-8'
)
extracted.sort_values(['subject_id', 'created_at'], inplace=True)
resume = False
subjects = extracted.subject_id.unique()
tasks = extracted.task.unique()
workflow_id = extracted.workflow_id.iloc[0]
reducer_config = get_file_instance(reducer_config)
with reducer_config as config:
config_yaml = yaml.load(config, Loader=yaml.SafeLoader)
assert (len(config_yaml['reducer_config']) == 1), 'There must be only one reducer in the config file.'
for key, value in config_yaml['reducer_config'].items():
reducer_name = key
keywords = value
assert (reducer_name in reducers.reducers), 'The reducer in the config files does not exist.'
output_base_name, _ = os.path.splitext(output_name)
output_path = os.path.join(output_dir, '{0}_{1}.csv'.format(reducer_name, output_base_name))
if stream:
if os.path.isfile(output_path):
print('resuming from last run')
resume = True
with open(output_path, 'r', encoding='utf-8') as reduced_file:
reduced_csv = pandas.read_csv(reduced_file, encoding='utf-8')
subjects = np.setdiff1d(subjects, reduced_csv.subject_id)
reduced_data = []
sdx = 0
apply_keywords = {
'reducer_name': reducer_name,
'workflow_id': workflow_id,
'filter': filter,
'keywords': keywords
}
widgets = [
'Reducing: ',
progressbar.Percentage(),
' ', progressbar.Bar(),
' ', progressbar.ETA()
]
number_of_rows = len(subjects) * len(tasks)
pbar = progressbar.ProgressBar(widgets=widgets, max_value=number_of_rows)
def callback(reduced_data_list):
nonlocal reduced_data
nonlocal sdx
nonlocal pbar
nonlocal stream
reduced_data += reduced_data_list
if stream:
if (sdx == 0) and (not resume):
pandas.DataFrame(reduced_data).to_csv(
output_path,
mode='w',
index=False,
encoding='utf-8'
)
else:
pandas.DataFrame(reduced_data).to_csv(
output_path,
mode='a',
index=False,
header=False,
encoding='utf-8'
)
reduced_data.clear()
sdx += 1
pbar.update(sdx)
pbar.start()
if cpu_count > 1:
pool = Pool(cpu_count)
for subject in subjects:
idx = extracted.subject_id == subject
for task in tasks:
jdx = extracted.task == task
classifications = extracted[idx & jdx]
if cpu_count > 1:
pool.apply_async(
reduce_subject,
args=(
subject,
classifications,
task
),
kwds=apply_keywords,
callback=callback
)
else:
reduced_data_list = reduce_subject(
subject,
classifications,
task,
**apply_keywords
)
callback(reduced_data_list)
if cpu_count > 1:
pool.close()
pool.join()
pbar.finish()
if stream:
reduced_csv = pandas.read_csv(output_path, encoding='utf-8')
if 'data' in reduced_csv:
def eval_func(a):
# pandas uses a local namespace, make sure it has the correct imports
from collections import OrderedDict # noqa
from numpy import nan # noqa
return eval(a)
reduced_csv.data = reduced_csv.data.apply(eval_func)
flat_reduced_data = flatten_data(reduced_csv)
else:
return output_path
else:
non_flat_data = pandas.DataFrame(reduced_data)
flat_reduced_data = flatten_data(non_flat_data)
if order:
flat_reduced_data = order_columns(flat_reduced_data, front=['choice', 'total_vote_count', 'choice_count'])
flat_reduced_data.to_csv(output_path, index=False, encoding='utf-8')
return output_path
| StarcoderdataPython |
6433714 | <reponame>ericchen12377/Leetcode-Algorithm-Python
class Solution(object):
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
left, right = 0, len(height) - 1
area = 0
while left < right:
area = max(area, min(height[left], height[right]) * (right - left))
if(height[left] < height[right]):
left += 1
else:
right -= 1
return area
height = [1, 2, 1]
p = Solution()
print(p.maxArea(height))
| StarcoderdataPython |
1655810 | from rest_framework.permissions import IsAuthenticated
from django.shortcuts import get_object_or_404
from django.db.models import Count, Q
from drinks.models import Recipe, Quantity, Ingredient, UserIngredient
from drinks.serializers import RecipeSerializer, RecipeListSerializer
from drinks.grammar import parse_search_and_filter
from .base import LazyViewSet, BookPermission
class RecipePermission(BookPermission):
owner_only = True
def get_book_from_body(self, data):
return data.get('book')
def get_book_from_obj(self, obj):
return obj.book_id
def check_user_object(self, obj, user):
# Recipes can only be modified by the creator or staff
return user.is_staff or obj.added_by.id == user.id
class RecipeViewSet(LazyViewSet):
permission_classes = (IsAuthenticated, RecipePermission)
queryset = Recipe.objects.all().order_by('name')
serializer_class = RecipeSerializer
list_serializer_class = RecipeListSerializer
filter_fields = {
'id': ['in'],
'name': ['exact'],
'book_id': ['exact'],
'created': ['gte', 'gt', 'lt'],
}
def get_queryset(self):
queryset = super(RecipeViewSet, self).get_queryset()
# Apply permissions first - users can only see what they're permitted
# to see, either public or groups they're a member of
permissions = Q(book__public=True) | Q(book__users=self.request.user)
queryset = queryset.filter(permissions)
# Set up eager loading to avoid N+1 selects
queryset = self.get_serializer_class().setup_eager_loading(queryset)
# Create various annotations
queryset = queryset.annotate(comment_count=Count('comments', distinct=True))
queryset = queryset.annotate(ul_count=Count(
'userlistrecipe',
filter=Q(userlistrecipe__user_list__user=self.request.user),
distinct=True
))
queryset = queryset.annotate(uc_count=Count(
'comments',
filter=Q(comments__user=self.request.user),
distinct=True
))
return queryset
def get_serializer_class(self):
if self.request.path == '/api/v1/recipes/' and self.request.method == 'GET':
return self.list_serializer_class
return self.serializer_class
def filter_queryset(self, *args, **kwargs):
qs = self.get_queryset()
# Searching names and ingredients
if self.request.GET.get('search'):
terms = self.request.GET.getlist('search')
for term in terms:
qs = parse_search_and_filter(term, qs, self.request.user)
qs = qs.distinct()
return super(RecipeViewSet, self).filter_queryset(qs)
def perform_create(self, serializer):
serializer.save(added_by=self.request.user)
def get_object(self):
"""
Get recipe by slug or PK
"""
queryset = self.get_queryset()
filter = {}
pk = self.kwargs['pk']
if pk.isdigit():
filter['pk'] = pk
else:
filter['slug'] = pk
obj = get_object_or_404(queryset, **filter)
self.check_object_permissions(self.request, obj)
return obj
| StarcoderdataPython |
8044234 | <reponame>yalina2787/NovPython
print ("This file tests variable definitions. ")
a=5
b="abc"
print(a)
print(b) | StarcoderdataPython |
246867 | <reponame>akshayanadahalli/vpp_ietf97
__import__('pkg_resources').declare_namespace(__name__)
from . vpp_papi import *
| StarcoderdataPython |
6440482 | <reponame>brandonjbryant/regression-exercises<filename>evaluate.py
import math
import sklearn.metrics
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def residuals(actual, predicted):
return actual - predicted
def sse(actual, predicted):
return (residuals(actual, predicted) **2).sum()
def mse(actual, predicted):
n = actual.shape[0]
return sse(actual, predicted) / n
def rmse(actual, predicted):
return math.sqrt(mse(actual, predicted))
def ess(actual, predicted):
return ((predicted - actual.mean()) ** 2).sum()
def tss(actual):
return ((actual - actual.mean()) ** 2).sum()
def regression_errors(actual, predicted):
return pd.Series({
'sse': sse(actual, predicted),
'ess': ess(actual, predicted),
'tss': tss(actual),
'mse': mse(actual, predicted),
'rmse': rmse(actual, predicted),
})
def baseline_mean_errors(actual):
predicted = actual.mean()
return {
'sse': sse(actual, predicted),
'mse': mse(actual, predicted),
'rmse': rmse(actual, predicted),
}
def better_than_baseline(actual, predicted):
rmse_baseline = rmse(actual, actual.mean())
rmse_model = rmse(actual, predicted)
return rmse_model < rmse_baseline
def model_significance(ols_model):
return {
'r^2 -- variance explained': ols_model.rsquared,
'p-value -- P(data|model == baseline)': ols_model.f_pvalue,
}
def plot_residuals(actual, predicted):
residuals = actual - predicted
plt.hlines(0, actual.min(), actual.max(), ls=':')
plt.scatter(actual, residuals)
plt.ylabel('residual ($y - \hat{y}$)')
plt.xlabel('actual value ($y$)')
plt.title('Actual vs Residual')
plt.show() | StarcoderdataPython |
9666305 | <filename>plugin_bot/plugins/__init__.py<gh_stars>0
"""Module for the BasePlugin class, as well as basic plugin recipies.
"""
import logging
class BasePlugin:
"""A convenience class to inherit from when making plugins."""
def __init_subclass__(cls, **kwargs):
"""Automatically create the unique logger object for the subclass.
"""
cls.logger = logging.getLogger(f'PluginBot.{cls.__name__}')
| StarcoderdataPython |
6513097 | <filename>tcrdist/mixcr.py
import sys
import re
import pandas as pd
import numpy as np
from tcrdist import repertoire_db
import warnings
def mixcr_to_tcrdist2(chain:str,
organism:str,
seqs_fn:str = None,
clones_fn:str = None):
"""
Converts .clns.txt or .result.txt outputs from mixcr to tcrdist2
formatted input.
Parameters
----------
chain : str
'alpha', 'beta', 'gamma', or 'delta'
organism : str
'human' or 'mouse"
seqs_fn : str or None
path to mixcr parses sequences files which can contain duplicates
clones_fn : str or None
path to mixcr parsed clones file (.clns.txt), clones have a clone_id and count
Returns
-------
df : pd.DataFrame
DataFrame with column names specified in notes.
Example
-------
.. code-block:: python
import os
from tcrdist.repertoire import TCRrep
from tcrdist import mixcr
clones_fn = os.path.join('tcrdist',
'test_files_compact',
'SRR5130260.1.test.fastq.output.clns.txt')
df = mixcr.mixcr_to_tcrdist2(chain = "delta",
organism = "human",
clones_fn = clones_fn)
df = mixcr.remove_entries_with_invalid_vgene(df,
chain = "delta",
organism = "human")
Notes
-----
A seq_fn or clones_fn may be passed as input but not both.
Columns of output `df` are:
"v_[abgd]_gene", "d_[abgd]_gene:, "j_[abgd]_gene",
"cdr3_d_nucseq", "cdr3_d_nucseq" where [abgd] matches the
chain argument.
If clones_fn is specifed, the df returned will contain
"clone_id" and "count" columns
"""
if seqs_fn is not None and clones_fn is not None:
raise ValueError ("one of seq_fn or clones_fn must be left blank")
if seqs_fn is None and clones_fn is None:
raise ValueError ("one of seq_fn or clones_fn must be provided")
gene_names = { 'alpha': ['v_a_gene','d_a_gene','j_a_gene',"cdr3_a_nucseq","cdr3_a_aa"],
'beta' : ['v_b_gene','d_b_gene','j_b_gene',"cdr3_b_nucseq","cdr3_b_aa"],
'gamma': ['v_g_gene','d_g_gene','j_g_gene',"cdr3_g_nucseq","cdr3_g_aa"],
'delta': ['v_d_gene','d_d_gene','j_d_gene',"cdr3_d_nucseq","cdr3_d_aa"]}
if chain not in gene_names.keys():
raise KeyError ("chain must be 'alpha','beta','gamma', or 'delta'")
if seqs_fn is not None:
seqs_df = pd.read_csv(seqs_fn, "\t")
seqs_df = seqs_df[['allVHitsWithScore','allDHitsWithScore', 'allJHitsWithScore', 'nSeqCDR3','aaSeqCDR3']].copy()
for k in ['allVHitsWithScore','allDHitsWithScore', 'allJHitsWithScore']:
# cleanup see function defintioins above (take only the top hit and convert allele *00 to *01)
seqs_df[k] = seqs_df[k].apply(_take_top_mixcr_gene_hit).\
apply(_allele_00_to_01).\
apply(_change_TRAVDV_to_TRAVdashDV)
seqs_df = seqs_df.rename(columns = { 'allVHitsWithScore' : gene_names[chain][0],
'allDHitsWithScore' : gene_names[chain][1],
'allJHitsWithScore' : gene_names[chain][2],
'nSeqCDR3' : gene_names[chain][3],
'aaSeqCDR3' : gene_names[chain][4]})
df = seqs_df.copy()
elif clones_fn is not None:
clones_df = pd.read_csv(clones_fn, "\t")
clones_df = clones_df[['cloneId', 'cloneCount','allVHitsWithScore','allDHitsWithScore', 'allJHitsWithScore', 'nSeqCDR3','aaSeqCDR3']].copy()
for k in ['allVHitsWithScore','allDHitsWithScore', 'allJHitsWithScore']:
# cleanup see function defintioins above (take only the top hit and convert allele *00 to *01)
clones_df[k] = clones_df[k].apply(_take_top_mixcr_gene_hit).\
apply(_allele_00_to_01).\
apply(_change_TRAVDV_to_TRAVdashDV)
clones_df = clones_df.rename(columns = { 'cloneId' : "clone_id",
'cloneCount' : "count",
'allVHitsWithScore' : gene_names[chain][0],
'allDHitsWithScore' : gene_names[chain][1],
'allJHitsWithScore' : gene_names[chain][2],
'nSeqCDR3' : gene_names[chain][3],
'aaSeqCDR3' : gene_names[chain][4]})
df = clones_df.copy()
return(df)
def remove_entries_with_invalid_vgene(df, chain:str,organism:str):
"""
Uses _validate_gene_names to remove cells, or clones rows that lack a valid v_gene name
This is based on checking gene name against:
repertoire_db.RefGeneSet(db_file = "gammadelta_db.tsv" OR "alphabesta_db.tsv).all_genes
It also removes genes not associated with the specified chain
Reports any gene names deemed invalid
Parameters
----------
df : pd.DataFrame
DataFrame produced by mixcr.mixcr_to_tcrdist2
chain : str
'alpha', 'beta', 'gamma', or 'delta'
organism : str
'human' or 'mouse"
Returns
-------
df : pd.DataFrame
a copied subset of the orginal dataframe containing only those rows with valid v gene names
"""
gene_names = { 'alpha': ['v_a_gene','d_a_gene','j_a_gene'],
'beta' : ['v_b_gene','d_b_gene','j_b_gene'],
'gamma': ['v_g_gene','d_g_gene','j_g_gene'],
'delta': ['v_d_gene','d_d_gene','j_d_gene']}
v = _validate_gene_names(series = df[gene_names[chain][0]], chain = chain, organism = organism)
n_invalid_v_names = df[v == False].shape[0]
invalid_names =df[v == False][gene_names[chain][0]].unique()
if n_invalid_v_names > 0:
sys.stderr.write(f"Because of invalid v_gene names, dropping {n_invalid_v_names} with names:\n")
for n in invalid_names:
sys.stderr.write(f"{n}\n")
return df[v].copy()
def _valid_cdr3(cdr3):
"""
Examples
--------
>>> _valid_cdr3("AAAA")
True
>>> _valid_cdr3("AA.A")
False
"""
amino_acids = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
valid = np.all([aa in amino_acids for aa in cdr3])
return valid
def remove_entries_with_invalid_cdr3(df, chain:str):
chain_names = { 'alpha': 'cdr3_a_aa',
'beta' : 'cdr3_b_aa',
'gamma': 'cdr3_g_aa',
'delta': 'cdr3_d_aa',}
cdr3_x_aa = chain_names[chain]
print(cdr3_x_aa)
v = df[cdr3_x_aa].apply(lambda x : _valid_cdr3(x))
n_invalid_cdr3 = df[v == False].shape[0]
invalid_names =df[v == False][cdr3_x_aa].unique()
warnings.warn(f"Because of invalid cdr3a names, dropping {n_invalid_cdr3}: {invalid_names}\n")
return df[v].copy()
def _change_TRAVDV_to_TRAVdashDV(s:str):
"""
Reconciles mixcr name like TRAV29/DV5*01 to tcrdist2 name TRAV29DV5*01
Parameters
----------
s : str
Examples
--------
>>> _change_TRAVDV_to_TRAVdashDV('TRAV29DV5*01')
'TRAV29/DV5*01'
>>> _change_TRAVDV_to_TRAVdashDV('TRAV38-2DV8*01')
'TRAV38-2/DV8*01'
>>> _change_TRAVDV_to_TRAVdashDV('TRDV*01')
'TRDV*01'
Notes
-----
This reconciles such gene names to match the tcrdist2 reference db.
see database for more details: repertoire_db.RefGeneSet(db_file = "gammadelta_db.tsv").all_genes
"""
if isinstance(s, str):
m = re.match(pattern = "(TRAV[0-9]+)(DV.*)", string = s)
m2 = re.match(pattern = "(TRAV[0-9]+-[1-2])(DV.*)", string = s)
if m:
new_s = "/".join(m.groups())
return(new_s)
elif m2:
new_s = "/".join(m2.groups())
return(new_s)
else:
return(s)
else:
return(np.NaN)
def _allele_00_to_01(s:str):
"""
Converts gene names from X*00 to X*01
Parameters
----------
s : str
Example
-------
>>> _allele_00_to_01('TRDD3*00')
'TRDD3*01'
"""
if isinstance(s, str):
allele01 = s.replace("*00","*01")
else:
allele01 = np.NaN
return(allele01)
def _take_top_mixcr_gene_hit(s):
"""
Parameters
----------
s : str
Examples
--------
>> _take_top_mixcr_gene_hit('TRDD3*00(45),TRDD2*00(40)')
'TRDD3*00'
>> _take_top_mixcr_gene_hit('TRDD3*00(45)')
'TRDD3*00'
>> _take_top_mixcr_gene_hit(None)
None
Tests
-----
assert _take_top_mixcr_gene_hit('TRDD3*00(45),TRDD2*00(40)') == 'TRDD3*00'
assert _take_top_mixcr_gene_hit('TRDD3*00(45)') == 'TRDD3*00'
assert isinstance(_take_top_mixcr_gene_hit(np.NaN),float)
assert _take_top_mixcr_gene_hit(np.NaN) is np.NaN
"""
if isinstance(s, str):
top_hit = s.split(",")[0].split("(")[0]
else:
top_hit = np.NaN
return(top_hit)
def _validate_gene_names(series, chain:str, organism:str):
"""
For efficiency reasons define the list of valid genes based on organism and chain.
then test an entire series of gene names against it
Parameters
----------
series : pd.Series
series containing gene names to be validated
chain : str
'alpha','beta','gamma', or 'delta'
organism : str
'human' or 'mouse"
Returns
-------
valid : pd.Series
series of booleans where True means name is valid and in tcrdist database
Example
-------
>>> df = pd.DataFrame({'v_d_gene':['TRDV3*01','TRDV1*01', 'TRAV29/DV5*01', 'TRAV38-2/DV8*01', "TRBV1*01"]})
>>> v = mixcr._validate_gene_names( series = df['v_d_gene'], chain = 'delta', organism = 'human')
>>> assert np.all(v == pd.Series([True,True,True,True,False]))
True
"""
# Check inputs
if organism not in ['human','mouse']:
raise KeyError("organism must be 'human' or 'mouse")
if chain not in ['alpha','beta','gamma','delta']:
raise KeyError("chain must be 'alpha','beta','gamma', or 'delta'")
if chain in ['gamma','delta']:
# Lookup appropriate gammadelta_db
all_genes = repertoire_db.RefGeneSet(db_file = "gammadelta_db.tsv").all_genes
# Lookup appropriate organism
all_genes= all_genes[organism]
if chain == 'gamma':
all_genes = [x for x in all_genes if all_genes[x].chain =='A']
if chain == 'delta':
all_genes = [x for x in all_genes if all_genes[x].chain =='B']
if chain in ['alpha','beta']:
# Lookup appropriate alphabeta_db
all_genes = repertoire_db.RefGeneSet(db_file = "alphabeta_db.tsv").all_genes
# Lookup appropriate organism
all_genes = all_genes[organism]
if chain == 'alpha':
all_genes = [x for x in all_genes if all_genes[x].chain =='A']
if chain == 'beta':
all_genes = [x for x in all_genes if all_genes[x].chain =='B']
valid = series.apply(lambda x : x in all_genes )
return(valid)
| StarcoderdataPython |
3205424 | #!/usr/bin/python3
# (C) <NAME> 2019
import os,sys,tty,termios
from datetime import datetime
from rpi.inputs import *
from rpi.camerainfo import *
ESC=27
ENTER=13
SPACE=32
exposure=1
framenumber=1
frame_default=1
digits=4
digits_default=4
quality_default=90
artist=""
artistfile="artist.txt"
# Uncomment to overide red and blue gains
# Calibration gains for Manfrotto Lumie LEDs
#awbg_red=1.6
#awbg_blue=1.4
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
def getch():
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd,termios.TCSADRAIN,old_settings)
return ch
print("Raspberry Pi capture pictures")
print("")
if camera_detected==0:
print("Raspberry Pi camera module not found!")
exit(0)
quality_default=90
quality=inputValue("image quality",1,100,quality_default,"","Value out of range!",True)
print("\nList disk and partitions:")
os.system('lsblk')
print("\nCurrent directory:")
os.system("pwd")
path=input('\nPath to images (current directory: <Enter>): ')
name=input('Project name (default=pic: <Enter>): ')
iso=100
iso_default=100
iso_modes=[100,200,320,400,500,640,800]
iso=inputListValue("ISO",iso_modes,iso_default,"Not a valid ISO value!",False)
print("")
# Exposure unit: µs
exp_min=1
exp_max=330000
exp_default=2000
exposure=inputValue("exposure time",exp_min,exp_max,exp_default,"µs","Exposure is out of range!",True)
# Gain value: 1.0 to 12.0 for the IMX219 sensor on Camera Module V2
print("")
awb_on="n"
default_awb="y"
awb_on=inputYesNo("AWB","AWB mode on",default_awb)
if awb_on=="n":
print("")
awbg_red=inputValue("red gain",1.0,8.0,awbg_red,"","Value out of range!",False)
awbg_blue=inputValue("blue gain",1.0,8.0,awbg_blue,"","Value out of range!",False)
# Digits
min_digits=len(str(framenumber))
max_digits=8
if min_digits>digits_default:
digits_default=min_digits
print("")
digits=inputValue("digits",min_digits,max_digits,digits_default,"","Digits is out of range!",True)
# Start frame
frame_min=1
frame_max=10**digits-1
frame_default=1
framenumber=inputValue("first frame",frame_min,frame_max,frame_default,"","Frame number is out of range!")
# Create a log file
logname=""
if (path!=""):
logname=path+"/"
artistfile=path+"/"+artistfile
if name=="":
name="pic"
logname+=name+".log"
now = datetime.now()
dt_string = now.strftime("%Y.%m.%d-%H:%M:%S")
file=open(logname,"w")
file.write("Log created on "+dt_string+"\n\n")
if (path!=""):
file.write("File path: "+path+"\n\n")
else:
file.write("File path: Not defined\n\n")
try:
f=open(artistfile,"r")
artist=f.readline()
artist=artist.strip()
print("Artist: "+artist)
f.close()
except IOError:
artist=""
# print("No artist.txt file")
print("")
quick_preview=inputYesNo("quick preview","Quick preview mode","y")
if artist!="":
file.write("Artist: "+artist+"\n")
file.write("Capture pictures parameters:\n")
file.write("Resolution: "+str(camera_maxx)+"x"+str(camera_maxy)+"\n")
file.write("Sensor: "+camera_revision+"\n")
file.write("Quality: "+str(quality)+"\n")
file.write("ISO value: "+str(iso)+"\n")
file.write("Exposure: "+str(exposure)+" µs\n")
file.write("AWB mode: ")
if awb_on=="y":
file.write("Enabled\n")
else:
file.write("Disabled\n")
file.write("Red gain: "+str(awbg_red)+"\n")
file.write("Blue gain: "+str(awbg_blue)+"\n")
file.write("Digits: "+str(digits)+"\n")
file.write("Start frame: "+str(framenumber)+"\n")
file.write("First file name: "+name+"_"+str(framenumber).rjust(digits,'0')+".png\n\n")
print("\nStart capturing images: ENTER")
print("Capture image: SPACE")
print("Exit program: ESC\n")
while True:
ch=getch()
if ch==chr(ENTER):
print("Capture mode enabled.")
break
if ch==chr(ESC):
file.close()
sys.exit()
while framenumber<10**digits:
ch=getch()
if ch==chr(SPACE):
fname=name+"_"+str(framenumber).rjust(digits,'0')
print(fname)
framenumber+=1
tmp="raspistill "
if quick_preview=="n":
tmp+="-n "
tmp+="-t 1 "
tmp+="-ISO "+str(iso)+" "
tmp+="-q "
tmp+=str(quality)+" "
tmp+="-ss "+str(exposure)+" "
# tmp+="-ex off "
#tmp+="-bm -drc high "
if awb_on=="n":
tmp+="-awb off -awbg "+str(awbg_red)+","+str(awbg_blue)+" "
if artist!="":
tmp+='-x IFD0.Artist="'+artist+'" '
tmp+='-x IFD0.Copyright="'+artist+'" '
if (path!=""):
tmp+='-o '+path+'/'+fname
else:
tmp+='-o '+fname
tmp=tmp+".png"
os.system(tmp)
file.write(tmp+"\n")
if ch==chr(ESC):
break
file.close()
| StarcoderdataPython |
77704 | <reponame>garysnake/structural-probes<gh_stars>0
from run_experiment import setup_new_experiment_dir, execute_experiment
import yaml
import torch
# CHANGE PATH
CONFIG_FILE = '/home/garysnake/Desktop/structural-probes/experiments/config/bert_base_distance_cola.yaml'
EXPERIMENT_NAME = '/home/garysnake/Desktop/structural-probes/experiments/results/bert_base_distance_cola'
SEED = 123
class Object(object):
pass
cli_args = Object()
cli_args.experiment_config = CONFIG_FILE
cli_args.results_dir = EXPERIMENT_NAME
cli_args.train_probe = True
cli_args.report_results = True
cli_args.seed = SEED
yaml_args = yaml.load(open(cli_args.experiment_config), Loader=yaml.FullLoader)
setup_new_experiment_dir(cli_args, yaml_args, cli_args.results_dir)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
yaml_args['device'] = device
execute_experiment(yaml_args, train_probe=cli_args.train_probe, report_results=cli_args.report_results) | StarcoderdataPython |
6527459 | <reponame>Harry73/Senpai
from lib import Command
from lib.Command import CommandType, register_command
from lib.Message import Message
def _get_help(request, author, channel, command_types, base_command):
# Generate list of all available commands
commands = {}
for command_type in command_types:
for command, help_text in Command.HELP_TEXT[command_type].items():
commands[command] = help_text
responses = []
if not request:
# Compile default help text
help_text = '```'
help_text += f'Use /{base_command} <command> for more information about a command.\n\n'
for command_type in command_types:
# Skip reporting the clips, they make the help text too large
if command_type == CommandType.CLIP:
continue
commands_list = ', '.join(sorted(['/{0}'.format(command) for command in Command.HELP_TEXT[command_type]]))
help_text += f'{command_type.name.capitalize()} commands: \n{commands_list}\n\n'
help_text += '```'
responses.append(Message(message=help_text, channel=author, cleanup_original=False, cleanup_self=False))
else:
if request not in commands:
return Message(message='No {0} command'.format(request), channel=author, cleanup_original=False,
cleanup_self=False)
responses.append(Message(message=commands[request], channel=author, cleanup_original=False, cleanup_self=False))
if 'Direct Message' not in str(channel) and responses:
responses.append(Message(message='Sent you a DM.'))
return responses
@register_command(lambda m: m.content.startswith('/senpai'))
async def senpai(bot, message):
"""```
Senpai's help function. Responses will be PM'd.
Usages:
* /senpai -> gives a list of available commands
* /senpai command -> gives information about a specific command
```"""
return _get_help(message.content[8:].strip(), message.author, message.channel,
[CommandType.GENERAL, CommandType.VOICE, CommandType.CLIP], 'senpai')
@register_command(lambda m: m.content.startswith('/secrets'), command_type=CommandType.OWNER)
async def secrets(bot, message):
"""```
List Senpai's owner-only commands. Responses will be PM'd.
Usage:
* /secrets
```"""
return _get_help(message.content[9:].strip(), message.author, message.channel,
[CommandType.OWNER], 'secrets')
| StarcoderdataPython |
73114 | <filename>scripts/angle_integrals.py
#!/home/colm.talbot/virtualenvironents/py-2.7/bin/python
from __future__ import division, print_function
import numpy as np
import pandas as pd
import sys
from gwmemory.angles import gamma
"""
Script to calculate the spherical harmonic decomposition of the output memory.
<NAME>
"""
delta_m = int(sys.argv[1])
# delta_m = 0
ells = np.arange(2, 5, 1)
coefficients = pd.DataFrame()
coefficients['l'] = np.arange(2, 21, 1)
for ell1 in ells:
for ell2 in ells:
# if ell2 > ell1:
# continue
for m1 in np.arange(-ell1, ell1+1, 1):
m2 = m1 - delta_m
if (m1 < -ell1) or (m1 > ell1) or (m2 < -ell2) or (m2 > ell2):
continue
lm1 = str(ell1) + str(m1)
lm2 = str(ell2) + str(m2)
print(lm1+lm2)
coefficients[lm1+lm2] = np.real(gamma(lm1, lm2))
out_file = "data/gamma_coefficients_delta_m_{}.dat".format(delta_m)
print("Saving to {}".format(out_file))
coefficients.to_csv(out_file, sep='\t', index=False)
| StarcoderdataPython |
4852595 | <gh_stars>10-100
__version__ = "1.1.0"
from nvelope.nvelope import *
from nvelope.conversions import *
| StarcoderdataPython |
132262 | <filename>road_damage.py
# -*- coding: utf-8 -*-
import six.moves.urllib as urllib
import os
try:
import urllib.request
except ImportError:
raise ImportError('You should use Python 3.x')
if not os.path.exists('./RoadDamageDataset.tar.gz'):
url_base = 'https://s3-ap-northeast-1.amazonaws.com/mycityreport/RoadDamageDataset.tar.gz'
urllib.request.urlretrieve(url_base, './RoadDamageDataset.tar.gz')
print("Download RoadDamageDataset.tar.gz Done")
else:
print("You have RoadDamageDataset.tar.gz")
if not os.path.exists('./trainedModels.tar.gz'):
url_base = 'https://s3-ap-northeast-1.amazonaws.com/mycityreport/trainedModels.tar.gz'
urllib.request.urlretrieve(url_base, './trainedModels.tar.gz')
print("Download trainedModels.tar.gz Done")
else:
print("You have trainedModels.tar.gz")
from xml.etree import ElementTree
from xml.dom import minidom
import collections
import os
import matplotlib.pyplot as plt
import matplotlib as matplot
import seaborn as sns
base_path = os.getcwd() + '/RoadDamageDataset/'
damageTypes = ["D00", "D01", "D10", "D11", "D20", "D40", "D43", "D44"]
# govs corresponds to municipality name.
govs = ["Adachi", "Chiba", "Ichihara",
"Muroran", "Nagakute", "Numazu", "Sumida"]
# the number of total images and total labels.
cls_names = []
total_images = 0
for gov in govs:
file_list = os.listdir(base_path + gov + '/Annotations/')
for file in file_list:
total_images = total_images + 1
if file == '.DS_Store':
pass
else:
try:
infile_xml = open(base_path + gov + '/Annotations/' + file)
tree = ElementTree.parse(infile_xml)
root = tree.getroot()
for obj in root.iter('object'):
cls_name = obj.find('name').text
cls_names.append(cls_name)
except:
pass
print("total")
print('# of images: ' + str(total_images))
print("# of labels:" + str(len(cls_names)))
# the number of each class labels.
import collections
count_dict = collections.Counter(cls_names)
cls_count = []
for damageType in damageTypes:
print(str(damageType) + ' : ' + str(count_dict[damageType]))
cls_count.append(count_dict[damageType])
sns.set_palette("winter", 8)
sns.barplot(damageTypes, cls_count)
# the number of each class labels for each municipality
for gov in govs:
cls_names = []
total_images = 0
file_list = os.listdir(base_path + gov + '/Annotations/')
for file in file_list:
total_images = total_images + 1
if file == '.DS_Store':
pass
else:
try:
infile_xml = open(base_path + gov + '/Annotations/' + file)
tree = ElementTree.parse(infile_xml)
root = tree.getroot()
for obj in root.iter('object'):
cls_name = obj.find('name').text
cls_names.append(cls_name)
except:
pass
print(gov)
print("# of images:" + str(total_images))
print("# of labels:" + str(len(cls_names)))
count_dict = collections.Counter(cls_names)
cls_count = []
for damageType in damageTypes:
print(str(damageType) + ' : ' + str(count_dict[damageType]))
cls_count.append(count_dict[damageType])
print('**************************************************')
import cv2
import random
def draw_images(image_file):
gov = image_file.split('_')[0]
img = cv2.imread(base_path + gov + '/JPEGImages/' +
image_file.split('.')[0] + '.jpg')
infile_xml = open(base_path + gov + '/Annotations/' + image_file)
tree = ElementTree.parse(infile_xml)
root = tree.getroot()
for obj in root.iter('object'):
cls_name = obj.find('name').text
xmlbox = obj.find('bndbox')
xmin = int(xmlbox.find('xmin').text)
xmax = int(xmlbox.find('xmax').text)
ymin = int(xmlbox.find('ymin').text)
ymax = int(xmlbox.find('ymax').text)
font = cv2.FONT_HERSHEY_SIMPLEX
# put text
cv2.putText(img, cls_name, (xmin, ymin - 10),
font, 1, (0, 255, 0), 2, cv2.LINE_AA)
# draw bounding box
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 3)
return img
for damageType in damageTypes:
tmp = []
for gov in govs:
file = open(base_path + gov +
'/ImageSets/Main/%s_trainval.txt' % damageType, 'r')
for line in file:
line = line.rstrip('\n').split('/')[-1]
if line.split(' ')[2] == '1':
tmp.append(line.split(' ')[0] + '.xml')
random.shuffle(tmp)
fig = plt.figure(figsize=(6, 6))
# for number, image in enumerate(tmp[0:1]):
# img = draw_images(image)
# plt.subplot(1, 1, number)
# plt.axis('off')
# plt.title('The image including ' + damageType)
# plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
| StarcoderdataPython |
5196759 | <filename>venv/lib/python3.9/site-packages/PyObjCTools/TestSupport.py
"""
Helper code for implementing unittests.
This module is unsupported and is primairily used in the PyObjC
testsuite.
"""
import contextlib
import gc as _gc
import os as _os
import re as _re
import struct as _struct
import sys as _sys
import typing as _typing
import unittest as _unittest
import subprocess as _subprocess
import pickle as _pickle
from distutils.sysconfig import get_config_var as _get_config_var
import objc
# Ensure that methods in this module get filtered in the tracebacks
# from unittest
__unittest = False
# Have a way to disable the autorelease pool behaviour
_usepool = not _os.environ.get("PYOBJC_NO_AUTORELEASE")
# XXX: Python 2 Compatibility for the PyObjC Test Suite
try:
unicode
except NameError:
unicode = str
try:
long
except NameError:
long = int
try:
basestring
except NameError:
basestring = str
try:
unichr
except NameError:
unichr = chr
def _typemap(tp):
if tp is None:
return None
return (
tp.replace(b"_NSRect", b"CGRect")
.replace(b"_NSPoint", b"CGPoint")
.replace(b"_NSSize", b"CGSize")
)
@contextlib.contextmanager
def pyobjc_options(**kwds):
orig = {}
try:
for k in kwds:
orig[k] = getattr(objc.options, k)
setattr(objc.options, k, kwds[k])
yield
finally:
for k in orig:
setattr(objc.options, k, orig[k])
def sdkForPython(_cache=[]): # noqa: B006, M511
"""
Return the SDK version used to compile Python itself,
or None if no framework was used
"""
if not _cache:
cflags = _get_config_var("CFLAGS")
m = _re.search(r"-isysroot\s+([^ ]*)(\s|$)", cflags)
if m is None:
_cache.append(None)
return None
path = m.group(1)
if path == "/":
result = tuple(map(int, os_release().split(".")))
_cache.append(result)
return result
bn = _os.path.basename(path)
version = bn[6:-4]
if version.endswith("u"):
version = version[:-1]
result = tuple(map(int, version.split(".")))
_cache.append(result)
return result
return _cache[0]
def fourcc(v):
"""
Decode four-character-code integer definition
(e.g. 'abcd')
"""
return _struct.unpack(">i", v)[0]
def cast_int(value):
"""
Cast value to 32bit integer
Usage:
cast_int(1 << 31) == -1
(where as: 1 << 31 == 2147483648)
"""
value = value & 0xFFFFFFFF
if value & 0x80000000:
value = ~value + 1 & 0xFFFFFFFF
return -value
else:
return value
def cast_longlong(value):
"""
Cast value to 64bit integer
Usage:
cast_longlong(1 << 63) == -1
"""
value = value & 0xFFFFFFFFFFFFFFFF
if value & 0x8000000000000000:
value = ~value + 1 & 0xFFFFFFFFFFFFFFFF
return -value
else:
return value
def cast_uint(value):
"""
Cast value to 32bit integer
Usage:
cast_int(1 << 31) == 2147483648
"""
value = value & 0xFFFFFFFF
return value
def cast_ulonglong(value):
"""
Cast value to 64bit integer
"""
value = value & 0xFFFFFFFFFFFFFFFF
return value
_os_release = None
def os_release():
"""
Returns the release of macOS (for example 10.5.1).
"""
global _os_release
if _os_release is not None:
return _os_release
_os_release = (
_subprocess.check_output(["sw_vers", "-productVersion"]).decode().strip()
)
return _os_release
def arch_only(arch):
"""
Usage::
class Tests (unittest.TestCase):
@arch_only("arm64")
def testArm64(self):
pass
The test runs only when the specified architecture matches
"""
def decorator(function):
return _unittest.skipUnless(objc.arch == arch, f"{arch} only")(function)
return decorator
def min_python_release(version):
"""
Usage::
class Tests (unittest.TestCase):
@min_python_release('3.2')
def test_python_3_2(self):
pass
"""
parts = tuple(map(int, version.split(".")))
return _unittest.skipUnless(
_sys.version_info[:2] >= parts, f"Requires Python {version} or later"
)
def _sort_key(version):
parts = version.split(".")
if len(parts) == 2:
parts.append("0")
if len(parts) != 3:
raise ValueError(f"Invalid version: {version!r}")
return tuple(int(x) for x in parts)
def os_level_key(release):
"""
Return an object that can be used to compare two releases.
"""
return _sort_key(release)
def min_sdk_level(release):
"""
Usage::
class Tests (unittest.TestCase):
@min_sdk_level('10.6')
def testSnowLeopardSDK(self):
pass
"""
v = (objc.PyObjC_BUILD_RELEASE // 100, objc.PyObjC_BUILD_RELEASE % 100, 0)
return _unittest.skipUnless(
v >= os_level_key(release), f"Requires build with SDK {release} or later"
)
def max_sdk_level(release):
"""
Usage::
class Tests (unittest.TestCase):
@max_sdk_level('10.5')
def testUntilLeopardSDK(self):
pass
"""
v = (objc.PyObjC_BUILD_RELEASE // 100, objc.PyObjC_BUILD_RELEASE % 100, 0)
return _unittest.skipUnless(
v <= os_level_key(release), f"Requires build with SDK {release} or later"
)
def min_os_level(release):
"""
Usage::
class Tests (unittest.TestCase):
@min_os_level('10.6')
def testSnowLeopardCode(self):
pass
"""
return _unittest.skipUnless(
os_level_key(os_release()) >= os_level_key(release),
f"Requires OSX {release} or later",
)
def max_os_level(release):
"""
Usage::
class Tests (unittest.TestCase):
@max_os_level('10.5')
def testUntilLeopard(self):
pass
"""
return _unittest.skipUnless(
os_level_key(os_release()) <= os_level_key(release),
f"Requires OSX up to {release}",
)
def os_level_between(min_release, max_release):
"""
Usage::
class Tests (unittest.TestCase):
@os_level_between('10.5', '10.8')
def testUntilLeopard(self):
pass
"""
return _unittest.skipUnless(
os_level_key(min_release)
<= os_level_key(os_release())
<= os_level_key(max_release),
f"Requires OSX {min_release} up to {max_release}",
)
_poolclass = objc.lookUpClass("NSAutoreleasePool")
# NOTE: On at least OSX 10.8 there are multiple proxy classes for CFTypeRef...
_nscftype = tuple(cls for cls in objc.getClassList() if "NSCFType" in cls.__name__)
_typealias = {}
_typealias[objc._C_LNG_LNG] = objc._C_LNG
_typealias[objc._C_ULNG_LNG] = objc._C_ULNG
class TestCase(_unittest.TestCase):
"""
A version of TestCase that wraps every test into its own
autorelease pool.
This also adds a number of useful assertion methods
"""
# New API for testing function/method signatures, with one assert for
# the callable and one assert each for every return value and argument.
#
# Primary reason for the new API is to ensure that all metadata overrides
# are explicitly tested.
def assertManualBinding(self, func):
if hasattr(func, "__metadata__"):
self.fail(f"{func} has automatic bindings")
def assertIsCFType(self, tp, message=None):
if not isinstance(tp, objc.objc_class):
self.fail(message or f"{tp!r} is not a CFTypeRef type")
if any(x is tp for x in _nscftype):
self.fail(message or f"{tp!r} is not a unique CFTypeRef type")
for cls in tp.__bases__:
if "NSCFType" in cls.__name__:
return
self.fail(message or f"{tp!r} is not a CFTypeRef type")
# NOTE: Don't test if this is a subclass of one of the known
# CF roots, this tests is mostly used to ensure that the
# type is distinct from one of those roots.
# NOTE: With the next two lines enabled there are spurious test
# failures when a CF type is toll-free bridged to an
# (undocumented) Cocoa class. It might be worthwhile to
# look for these, but not in the test suite.
# if not issubclass(tp, _nscftype):
# self.fail(message or "%r is not a CFTypeRef subclass"%(tp,))
def assertIsEnumType(self, tp):
tmp = _typing.NewType("tmp", int)
if isinstance(tmp, type(lambda: 1)):
# typing.NewType is not a type in Python 3.7 or earlier
if not isinstance(tp, type(lambda: 1)):
self.fail(f"{tp!r} is not a typing.NewType")
if "NewType" not in str(tp) or "new_type" not in str(tp):
self.fail(f"{tp!r} is not a typing.NewType")
if tp.__supertype__ != int:
self.fail(f"{tp!r} is not a typing.NewType based on 'int'")
return
if not isinstance(tp, _typing.NewType):
self.fail(f"{tp!r} is not a typing.NewType")
if tp.__supertype__ != int:
self.fail(f"{tp!r} is not a typing.NewType based on 'int'")
def assertIsOpaquePointer(self, tp, message=None):
if not hasattr(tp, "__pointer__"):
self.fail(message or f"{tp!r} is not an opaque-pointer")
if not hasattr(tp, "__typestr__"):
self.fail(message or f"{tp!r} is not an opaque-pointer")
def assertResultIsNullTerminated(self, method, message=None):
info = method.__metadata__()
if not info.get("retval", {}).get("c_array_delimited_by_null"):
self.fail(message or f"result of {method!r} is not a null-terminated array")
def assertIsNullTerminated(self, method, message=None):
info = method.__metadata__()
if not info.get("c_array_delimited_by_null") or not info.get("variadic"):
self.fail(
message
or "%s is not a variadic function with a "
"null-terminated list of arguments" % (method,)
)
def assertArgIsNullTerminated(self, method, argno, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
try:
if not info["arguments"][argno + offset].get("c_array_delimited_by_null"):
self.fail(
message
or "argument %d of %r is not a null-terminated array"
% (argno, method)
)
except (KeyError, IndexError):
self.fail(
message
or "argument %d of %r is not a null-terminated array" % (argno, method)
)
def assertArgIsVariableSize(self, method, argno, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
try:
if not info["arguments"][argno + offset].get("c_array_of_variable_length"):
self.fail(
message
or "argument %d of %r is not a variable sized array"
% (argno, method)
)
except (KeyError, IndexError):
self.fail(
message
or "argument %d of %r is not a variable sized array" % (argno, method)
)
def assertResultIsVariableSize(self, method, message=None):
info = method.__metadata__()
if not info.get("retval", {}).get("c_array_of_variable_length", False):
self.fail(message or f"result of {method!r} is not a variable sized array")
def assertArgSizeInResult(self, method, argno, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
try:
if not info["arguments"][argno + offset].get("c_array_length_in_result"):
self.fail(
message
or "argument %d of %r does not have size in result"
% (argno, method)
)
except (KeyError, IndexError):
self.fail(
message
or "argument %d of %r does not have size in result" % (argno, method)
)
def assertArgIsPrintf(self, method, argno, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
if not info.get("variadic"):
self.fail(message or f"{method!r} is not a variadic function")
try:
if not info["arguments"][argno + offset].get("printf_format"):
self.fail(
message
or "%r argument %d is not a printf format string" % (method, argno)
)
except (KeyError, IndexError):
self.fail(
message
or "%r argument %d is not a printf format string" % (method, argno)
)
def assertArgIsCFRetained(self, method, argno, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
try:
if not info["arguments"][argno + offset]["already_cfretained"]:
self.fail(
message or f"Argument {argno} of {method!r} is not cfretained"
)
except (KeyError, IndexError):
self.fail(message or f"Argument {argno} of {method!r} is not cfretained")
def assertArgIsNotCFRetained(self, method, argno, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
try:
if info["arguments"][argno + offset]["already_cfretained"]:
self.fail(message or f"Argument {argno} of {method!r} is cfretained")
except (KeyError, IndexError):
pass
def assertResultIsCFRetained(self, method, message=None):
info = method.__metadata__()
if not info.get("retval", {}).get("already_cfretained", False):
self.fail(message or f"{method!r} is not cfretained")
def assertResultIsNotCFRetained(self, method, message=None):
info = method.__metadata__()
if info.get("retval", {}).get("already_cfretained", False):
self.fail(message or f"{method!r} is cfretained")
def assertArgIsRetained(self, method, argno, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
try:
if not info["arguments"][argno + offset]["already_retained"]:
self.fail(message or f"Argument {argno} of {method!r} is not retained")
except (KeyError, IndexError):
self.fail(message or f"Argument {argno} of {method!r} is not retained")
def assertArgIsNotRetained(self, method, argno, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
try:
if info["arguments"][argno + offset]["already_retained"]:
self.fail(message or f"Argument {argno} of {method!r} is retained")
except (KeyError, IndexError):
pass
def assertResultIsRetained(self, method, message=None):
info = method.__metadata__()
if not info.get("retval", {}).get("already_retained", False):
self.fail(message or f"Result of {method!r} is not retained")
def assertResultIsNotRetained(self, method, message=None):
info = method.__metadata__()
if info.get("retval", {}).get("already_retained", False):
self.fail(message or f"Result of {method!r} is retained")
def assertResultHasType(self, method, tp, message=None):
info = method.__metadata__()
typestr = info.get("retval").get("type", b"v")
if (
typestr != tp
and _typemap(typestr) != _typemap(tp)
and _typealias.get(typestr, typestr) != _typealias.get(tp, tp)
):
self.fail(
message
or f"result of {method!r} is not of type {tp!r}, but {typestr!r}"
)
def assertArgHasType(self, method, argno, tp, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
try:
i = info["arguments"][argno + offset]
except (KeyError, IndexError):
self.fail(
message
or "arg %d of %s has no metadata (or doesn't exist)" % (argno, method)
)
else:
typestr = i.get("type", b"@")
if (
typestr != tp
and _typemap(typestr) != _typemap(tp)
and _typealias.get(typestr, typestr) != _typealias.get(tp, tp)
):
self.fail(
message
or "arg %d of %s is not of type %r, but %r"
% (argno, method, tp, typestr)
)
def assertArgIsFunction(self, method, argno, sel_type, retained, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
try:
i = info["arguments"][argno + offset]
except (KeyError, IndexError):
self.fail(
message
or "arg %d of %s has no metadata (or doesn't exist)" % (argno, method)
)
else:
typestr = i.get("type", b"@")
if typestr != b"^?":
self.fail(
message
or "arg %d of %s is not of type function_pointer" % (argno, method)
)
st = i.get("callable")
if st is None:
self.fail(
message
or "arg %d of %s is not of type function_pointer" % (argno, method)
)
try:
iface = st["retval"]["type"]
for a in st["arguments"]:
iface += a["type"]
except KeyError:
self.fail(
message
or "arg %d of %s is a function pointer with incomplete type information"
% (argno, method)
)
if iface != sel_type:
self.fail(
message
or "arg %d of %s is not a function_pointer with type %r, but %r"
% (argno, method, sel_type, iface)
)
st = info["arguments"][argno + offset].get("callable_retained", False)
if bool(st) != bool(retained):
self.fail(
message
or "arg %d of %s; retained: %r, expected: %r"
% (argno, method, st, retained)
)
def assertResultIsFunction(self, method, sel_type, message=None):
info = method.__metadata__()
try:
i = info["retval"]
except (KeyError, IndexError):
self.fail(
message or f"result of {method} has no metadata (or doesn't exist)"
)
else:
typestr = i.get("type", b"@")
if typestr != b"^?":
self.fail(message or f"result of {method} is not of type function_pointer")
st = i.get("callable")
if st is None:
self.fail(message or f"result of {method} is not of type function_pointer")
try:
iface = st["retval"]["type"]
for a in st["arguments"]:
iface += a["type"]
except KeyError:
self.fail(
message
or "result of %s is a function pointer with incomplete type information"
% (method,)
)
if iface != sel_type:
self.fail(
message
or "result of %s is not a function_pointer with type %r, but %r"
% (method, sel_type, iface)
)
def assertArgIsBlock(self, method, argno, sel_type, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
try:
typestr = info["arguments"][argno + offset]["type"]
except (IndexError, KeyError):
self.fail("arg %d of %s does not exist" % (argno, method))
if typestr != b"@?":
self.fail(
message
or "arg %d of %s is not of type block: %s" % (argno, method, typestr)
)
st = info["arguments"][argno + offset].get("callable")
if st is None:
self.fail(
message
or "arg %d of %s is not of type block: no callable" % (argno, method)
)
try:
iface = st["retval"]["type"]
if st["arguments"][0]["type"] != b"^v":
self.fail(
message
or "arg %d of %s has an invalid block signature %r for argument 0"
% (argno, method, st["arguments"][0]["type"])
)
for a in st["arguments"][1:]:
iface += a["type"]
except KeyError:
self.fail(
message
or "result of %s is a block pointer with incomplete type information"
% (method,)
)
if iface != sel_type:
self.fail(
message
or "arg %d of %s is not a block with type %r, but %r"
% (argno, method, sel_type, iface)
)
def assertResultIsBlock(self, method, sel_type, message=None):
info = method.__metadata__()
try:
typestr = info["retval"]["type"]
if typestr != b"@?":
self.fail(
message or f"result of {method} is not of type block: {typestr}"
)
except KeyError:
self.fail(
message or "result of {} is not of type block: {}".format(method, b"v")
)
st = info["retval"].get("callable")
if st is None:
self.fail(
message
or "result of %s is not of type block: no callable specified" % (method)
)
try:
iface = st["retval"]["type"]
if st["arguments"][0]["type"] != b"^v":
self.fail(
message
or "result %s has an invalid block signature %r for argument 0"
% (method, st["arguments"][0]["type"])
)
for a in st["arguments"][1:]:
iface += a["type"]
except KeyError:
self.fail(
message
or "result of %s is a block pointer with incomplete type information"
% (method,)
)
if iface != sel_type:
self.fail(
message
or "result of %s is not a block with type %r, but %r"
% (method, sel_type, iface)
)
def assertResultIsSEL(self, method, sel_type, message=None):
info = method.__metadata__()
try:
i = info["retval"]
except (KeyError, IndexError):
self.fail(
message or f"result of {method} has no metadata (or doesn't exist)"
)
typestr = i.get("type", b"@")
if typestr != objc._C_SEL:
self.fail(message or f"result of {method} is not of type SEL")
st = i.get("sel_of_type")
if st != sel_type and _typemap(st) != _typemap(sel_type):
self.fail(
message
or "result of %s doesn't have sel_type %r but %r"
% (method, sel_type, st)
)
def assertArgIsSEL(self, method, argno, sel_type, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
try:
i = info["arguments"][argno + offset]
except (KeyError, IndexError):
self.fail(
message
or "arg %d of %s has no metadata (or doesn't exist)" % (argno, method)
)
typestr = i.get("type", b"@")
if typestr != objc._C_SEL:
self.fail(message or "arg %d of %s is not of type SEL" % (argno, method))
st = i.get("sel_of_type")
if st != sel_type and _typemap(st) != _typemap(sel_type):
self.fail(
message
or "arg %d of %s doesn't have sel_type %r but %r"
% (argno, method, sel_type, st)
)
def assertResultIsBOOL(self, method, message=None):
info = method.__metadata__()
typestr = info["retval"]["type"]
if typestr != objc._C_NSBOOL:
self.fail(
message or f"result of {method} is not of type BOOL, but {typestr!r}"
)
def assertArgIsBOOL(self, method, argno, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
typestr = info["arguments"][argno + offset]["type"]
if typestr != objc._C_NSBOOL:
self.fail(
message
or "arg %d of %s is not of type BOOL, but %r" % (argno, method, typestr)
)
def assertArgIsFixedSize(self, method, argno, count, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
try:
cnt = info["arguments"][argno + offset]["c_array_of_fixed_length"]
if cnt != count:
self.fail(
message
or "arg %d of %s is not a C-array of length %d"
% (argno, method, count)
)
except (KeyError, IndexError):
self.fail(
message
or "arg %d of %s is not a C-array of length %d" % (argno, method, count)
)
def assertResultIsFixedSize(self, method, count, message=None):
info = method.__metadata__()
try:
cnt = info["retval"]["c_array_of_fixed_length"]
if cnt != count:
self.fail(
message
or "result of %s is not a C-array of length %d" % (method, count)
)
except (KeyError, IndexError):
self.fail(
message
or "result of %s is not a C-array of length %d" % (method, count)
)
def assertArgSizeInArg(self, method, argno, count, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
try:
cnt = info["arguments"][argno + offset]["c_array_length_in_arg"]
except (KeyError, IndexError):
self.fail(
message
or "arg %d of %s is not a C-array of with length in arg %s"
% (argno, method, count)
)
if isinstance(count, (list, tuple)):
count2 = tuple(x + offset for x in count)
else:
count2 = count + offset
if cnt != count2:
self.fail(
message
or "arg %d of %s is not a C-array of with length in arg %s"
% (argno, method, count)
)
def assertResultSizeInArg(self, method, count, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
cnt = info["retval"]["c_array_length_in_arg"]
if cnt != count + offset:
self.fail(
message
or "result %s is not a C-array of with length in arg %d"
% (method, count)
)
def assertArgIsOut(self, method, argno, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
typestr = info["arguments"][argno + offset]["type"]
if not typestr.startswith(b"o^") and not typestr.startswith(b"o*"):
self.fail(
message or "arg %d of %s is not an 'out' argument" % (argno, method)
)
def assertArgIsInOut(self, method, argno, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
typestr = info["arguments"][argno + offset]["type"]
if not typestr.startswith(b"N^") and not typestr.startswith(b"N*"):
self.fail(
message or "arg %d of %s is not an 'inout' argument" % (argno, method)
)
def assertArgIsIn(self, method, argno, message=None):
if isinstance(method, objc.selector):
offset = 2
else:
offset = 0
info = method.__metadata__()
typestr = info["arguments"][argno + offset]["type"]
if not typestr.startswith(b"n^") and not typestr.startswith(b"n*"):
self.fail(
message or "arg %d of %s is not an 'in' argument" % (argno, method)
)
def assertStartswith(self, value, test, message=None): # pragma: no cover
if not value.startswith(test):
self.fail(message or f"{value!r} does not start with {test!r}")
def assertHasAttr(self, value, key, message=None):
if not hasattr(value, key):
self.fail(message or f"{key} is not an attribute of {value!r}")
def assertNotHasAttr(self, value, key, message=None):
if hasattr(value, key):
self.fail(message or f"{key} is an attribute of {value!r}")
def assertIsSubclass(self, value, types, message=None):
if not issubclass(value, types):
self.fail(message or f"{value} is not a subclass of {types!r}")
def assertIsNotSubclass(self, value, types, message=None):
if issubclass(value, types):
self.fail(message or f"{value} is a subclass of {types!r}")
def assertClassIsFinal(self, cls):
if not isinstance(cls, objc.objc_class):
self.fail(f"{cls} is not an Objective-C class")
elif not cls.__objc_final__:
self.fail(f"{cls} is not a final class")
def assertProtocolExists(self, name):
ok = True
try:
proto = objc.protocolNamed(name)
except objc.ProtocolError:
ok = False
if not ok:
self.fail(f"Protocol {name!r} does not exist")
if not isinstance(proto, objc.formal_protocol):
# Should never happen
self.fail(f"Protocol {name!r} is not a protocol, but {type(proto)}")
def assertPickleRoundTrips(self, value):
try:
buf = _pickle.dumps(value)
clone = _pickle.loads(buf)
except Exception:
self.fail(f"{value} cannot be pickled")
self.assertEqual(clone, value)
self.assertIsInstance(clone, type(value))
def __init__(self, methodName="runTest"):
super().__init__(methodName)
testMethod = getattr(self, methodName)
if getattr(testMethod, "_no_autorelease_pool", False):
self._skip_usepool = True
else:
self._skip_usepool = False
def run(self, *args):
"""
Run the test, same as unittest.TestCase.run, but every test is
run with a fresh autorelease pool.
"""
if _usepool and not self._skip_usepool:
p = _poolclass.alloc().init()
else:
p = 1
try:
_unittest.TestCase.run(self, *args)
finally:
_gc.collect()
del p
_gc.collect()
main = _unittest.main
expectedFailure = _unittest.expectedFailure
skipUnless = _unittest.skipUnless
def expectedFailureIf(condition):
if condition:
return expectedFailure
else:
return lambda func: func
def no_autorelease_pool(func):
func._no_autorelease_pool = True
return func
| StarcoderdataPython |
220287 | <reponame>Novel-Public-Health/Novel-Public-Health
# Generated by Django 3.1.2 on 2021-03-26 08:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('catalog', '0052_auto_20210326_0248'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='director',
field=models.ForeignKey(blank=True, help_text='This field will be overwritten if given a valid IMDB id.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.director'),
),
migrations.AlterField(
model_name='movie',
name='found_articles',
field=models.TextField(blank=True, help_text='HTML list output of found research articles on Google Scholar. Clear the text to find new articles.', max_length=5000, null=True, verbose_name='Found Research Articles'),
),
migrations.AlterField(
model_name='movie',
name='genre',
field=models.ForeignKey(blank=True, help_text='This field will be overwritten if given a valid IMDB id.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.genre'),
),
migrations.AlterField(
model_name='movie',
name='imdb_link',
field=models.CharField(blank=True, help_text='For example, here is <a target="_blank" href="https://www.imdb.com/title/tt3322364/">Concussion\'s link</a>.', max_length=100, verbose_name='IMDB Link'),
),
migrations.AlterField(
model_name='movie',
name='language',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.language'),
),
migrations.AlterField(
model_name='movie',
name='summary',
field=models.TextField(blank=True, help_text='Enter a brief description of the movie. If left blank, the summary from a valid IMDB link will be used.', max_length=1000, null=True),
),
migrations.AlterField(
model_name='movie',
name='year',
field=models.CharField(blank=True, help_text='This field will be overwritten if given a valid IMDB id.', max_length=200, null=True),
),
]
| StarcoderdataPython |
6611464 | import copy
import logging
import time
import math
import numpy as np
import torch
import torch.utils.data as td
from sklearn.utils import shuffle
from PIL import Image
from torch.autograd import Variable
import torchvision.transforms.functional as trnF
from torch.nn import functional as F
from utils import *
class IncrementalLoader(td.Dataset):
def __init__(self, data, labels, classes, step_size, mem_sz, mode, transform=None, loader=None, shuffle_idx=None,
model=None,
base_classes=50, approach='wa'):
if shuffle_idx is not None:
# label shuffle
print("Label shuffled")
labels = shuffle_idx[labels]
set_seed(1)
sort_index = np.argsort(labels)
self.data = data[sort_index]
labels = np.array(labels)
self.labels = labels[sort_index]
self.labelsNormal = np.copy(self.labels)
self.transform = transform
self.loader = loader
self.total_classes = classes
self.labels_arr = np.unique(self.labels) # for sampling method
self.step_size = step_size
self.base_classes = base_classes
self.t = 0
self.mem_sz = mem_sz
self.validation_buffer_size = int(mem_sz / 10) * 1
self.mode = mode
self.start = 0
self.end = base_classes
self.start_idx = 0
self.end_idx = np.argmax(self.labelsNormal > (self.end - 1)) # end data index
if self.end == classes:
self.end_idx = len(labels) - 1
self.tr_idx = range(self.end_idx)
self.len = len(self.tr_idx)
self.current_len = self.len
self.approach = approach
self.memory_buffer = []
self.exemplar = []
self.validation_buffer = []
self.start_point = []
self.end_point = []
for i in range(classes):
self.start_point.append(np.argmin(self.labelsNormal < i))
self.end_point.append(np.argmax(self.labelsNormal > (i)))
self.memory_buffer.append([])
self.end_point[-1] = len(labels)
# variables for sampling strategy
self.count_forgetting = np.zeros(len(self.data))
self.count_forgetting_list = []
self.prev_forgetting = np.zeros(len(self.data))
self.new_forgetting = np.zeros(len(self.data))
self.class_index_list = []
self.class_forgetting_count = []
self.cls_sb_hard_idx = []
self.grad_list = []
self.temp_input_grad = np.zeros(len(self.data))
self.class_grad_list = []
self.class_grad_checkList = []
self.KD_prob_diff = np.zeros(len(self.data))
self.class_kd_diff_list = []
def task_change(self):
self.t += 1
self.start = self.end
self.end += self.step_size
print('dataset start, end: ', self.start, self.end)
self.start_idx = np.argmin(self.labelsNormal < self.start) # start data index
self.end_idx = np.argmax(self.labelsNormal > (self.end - 1)) # end data index
if self.end_idx == 0:
self.end_idx = self.labels.shape[0]
self.tr_idx = range(self.start_idx, self.end_idx)
# validation set for bic
if self.approach == 'bic' and self.start < self.total_classes and self.mode != 'test':
val_per_class = (self.validation_buffer_size // 2) // self.step_size
self.tr_idx = []
for i in range(self.step_size):
end = self.end_point[self.start + i]
start = self.start_point[self.start + i]
self.validation_buffer += range(end - val_per_class, end)
self.tr_idx += range(start, end - val_per_class)
print('exemplar, validation: ', len(self.exemplar), len(self.validation_buffer))
arr = []
for idx in self.validation_buffer:
arr.append(self.labelsNormal[idx])
self.len = len(self.tr_idx)
self.current_len = self.len
if self.approach == 'ft' or self.approach == 'icarl' or self.approach == 'bic' or self.approach == 'wa' or self.approach == 'eeil' or self.approach == "CTL" or self.approach == "vanilla" or self.approach == "joint":
print(self.approach)
self.len += len(self.exemplar)
def update_bft_buffer(self):
self.bft_buffer = copy.deepcopy(self.memory_buffer)
min_len = 1e8
for arr in self.bft_buffer:
min_len = max(min_len, len(arr))
buffer_per_class = min_len
buffer_per_class = math.ceil(self.mem_sz / self.start)
if buffer_per_class > 500:
buffer_per_class = 500
for i in range(self.start, self.end):
start_idx = self.start_point[i]
end_idx = self.end_point[i]
idx = range(start_idx, start_idx + buffer_per_class)
self.bft_buffer[i] += list(idx)
for arr in self.bft_buffer:
if len(arr) > buffer_per_class:
arr.pop()
self.bft_exemplar = []
for arr in self.bft_buffer:
self.bft_exemplar += arr
def update_exemplar(self, memory_mode=None, seed=None):
buffer_per_class = math.ceil(self.mem_sz / self.end)
if buffer_per_class > 5000:
buffer_per_class = 5000
# first, add new exemples
for i in range(self.start, self.end):
start_idx = self.start_point[i]
self.memory_buffer[i] += range(start_idx, start_idx + buffer_per_class)
# second, throw away the previous samples
if buffer_per_class > 0:
for i in range(self.start):
if len(self.memory_buffer[i]) > buffer_per_class:
del self.memory_buffer[i][buffer_per_class:]
# third, select classes from previous classes, and throw away only 1 samples per class
# randomly select classes. **random seed = self.t or start** <-- IMPORTANT!
length = sum([len(i) for i in self.memory_buffer])
remain = length - self.mem_sz
if remain > 0:
imgs_per_class = [len(i) for i in self.memory_buffer]
selected_classes = np.argsort(imgs_per_class)[-remain:]
for c in selected_classes:
self.memory_buffer[c].pop()
self.exemplar = []
for arr in self.memory_buffer:
self.exemplar += arr
# validation set for bic
if self.approach == 'bic':
self.bic_memory_buffer = copy.deepcopy(self.memory_buffer)
self.validation_buffer = []
validation_per_class = (self.validation_buffer_size // 2) // self.end
if validation_per_class > 0:
for i in range(self.end):
self.validation_buffer += self.bic_memory_buffer[i][-validation_per_class:]
del self.bic_memory_buffer[i][-validation_per_class:]
remain = self.validation_buffer_size // 2 - validation_per_class * self.end
if remain > 0:
imgs_per_class = [len(i) for i in self.bic_memory_buffer]
selected_classes = np.argsort(imgs_per_class)[-remain:]
for c in selected_classes:
self.validation_buffer.append(self.bic_memory_buffer[c].pop())
self.exemplar = []
for arr in self.bic_memory_buffer:
self.exemplar += arr
def update_exemplar_classAdaptively(self, mode=None, bal=None, selection=None, memory_mode=None, method=None,
throw_away=None):
print("Exemplar Choosing by adaptively DM+KD")
print("start : {}, end : {}".format(self.start, self.end))
change_rank = True
if memory_mode == "fixed":
buffer_per_class = 20
else:
buffer_per_class = math.ceil(self.mem_sz / self.end)
print("buffer_per_class :", buffer_per_class)
print("Arrange the KD information")
kd_diff_List = self.KD_prob_diff.copy()
print(np.where(kd_diff_List > 0)[0].shape)
print("Arrange the gradient information")
gradHist = np.stack(self.grad_list, axis=0)
weight = np.arange(1, 11)
weight = 1 / weight
weight_avg_grad = np.average(gradHist, weights=weight, axis=0) # weight avg
var_grad = np.var(gradHist, axis=0) # varaince
lamb = 0.5
newMeasure = (lamb * self.normalize(weight_avg_grad)) + (
(1 - lamb) * self.normalize(var_grad)) # to be revised later
check_gradList = np.sum(gradHist, axis=0)
if method == "weight_avg":
gradList = weight_avg_grad
elif method == "var":
gradList = var_grad
elif method == "metric":
gradList = newMeasure
# class_min = int(buffer_per_class / 5)
# shared_memory = (buffer_per_class - class_min) * self.end
# for the unbalanced but reasonable data set
# class_weight = class_min + np.array(class_weight * shared_memory)
# buffer_per_class = class_weight.astype(np.int)
print(np.where(check_gradList > 0)[0].shape)
temp = 0
exemplarRatio = buffer_per_class / 500
print("exemplar Ratio : ", exemplarRatio)
temp = 0
self.class_kd_diff_list = []
self.class_index_list = []
self.cls_sb_hard_idx = []
self.class_grad_list = []
self.class_grad_checkList = []
# throw away previous exemplar by highKD diff
for i in (self.labels_arr[0:self.end]):
self.class_index_list.append(np.where(self.labelsNormal == i)[0])
self.class_kd_diff_list.append(kd_diff_List[self.class_index_list[i]])
self.class_grad_list.append(gradList[self.class_index_list[i]])
self.class_grad_checkList.append(
check_gradList[self.class_index_list[i]]) # sum of gradient to check whether it
check_KD_list = np.where(self.class_kd_diff_list[i] > 0)
kd_argsort = np.argsort(self.class_kd_diff_list[i][check_KD_list])[::-1] # descending order
selection_check_idx = check_KD_list[0][kd_argsort]
selection_check = self.class_kd_diff_list[i][selection_check_idx].mean()
if self.class_kd_diff_list[i][selection_check_idx][0] < self.class_kd_diff_list[i][selection_check_idx][1]:
print("Something is wrong please Check")
if selection_check > 20: # hyperparameter
selection_check_flag = "diversity"
else:
selection_check_flag = "highKD"
##############################################################################################################################
# Choose by high KD
if selection_check_flag == "diversity":
print("Choose the exemplar by diveristy maximization")
rm_class_forget = np.where(self.class_grad_checkList[temp] > 0)
temp_argsort = np.argsort(self.class_grad_list[temp][rm_class_forget])
temp_argsort = rm_class_forget[0][temp_argsort]
temp_index = []
for j in range(buffer_per_class):
'''
if exemplarRatio < 0.1 :
exemplar_end = len(temp_argsort) * 0.5
elif (exemplarRatio < 0.5) and (exemplarRatio >= 0.1) :
exemplar_end = len(temp_argsort) * 0.75
else :
exemplar_end = len(temp_argsort)
'''
exemplar_end = len(temp_argsort)
num_index = exemplar_end / buffer_per_class
a = int(j * num_index)
temp_index.append(a)
temp_index = np.stack(temp_index)
temp_argsort = temp_argsort[temp_index]
temp_class_index = self.class_index_list[i][temp_argsort]
##########################################################################################################################################
# Choose by Diversity Maximization
else:
print("Choose the exemplar by highKD")
temp_argsort = selection_check_idx.copy()
# temp_argsort = np.argsort(rm_class_forget) # ascending order // select the hard one
# temp_argsort = np.argsort(rm_class_forget)[::-1] # descending order // select the easy one
temp_value = int(500 - int(buffer_per_class)) # select Hard
temp_argsort = temp_argsort[:buffer_per_class] # Easy
bad_index = temp_argsort[:(buffer_per_class - 10)]
temp_class_index = self.class_index_list[i][temp_argsort]
print("high diff mean :", self.KD_prob_diff[temp_class_index].mean())
print("more high diff mean : ", self.KD_prob_diff[self.class_index_list[i][bad_index]].mean())
self.cls_sb_hard_idx.append(temp_class_index)
temp += 1
for i in range(0, self.end):
start_idx = self.start_point[i]
self.memory_buffer[i] = self.cls_sb_hard_idx[i].copy()
'''
if change_rank == False:
# second, throw away the previous samples
if buffer_per_class > 0:
for i in range(self.start):
if len(self.memory_buffer[i]) > buffer_per_class:
self.memory_buffer[i] = self.memory_buffer[i][:buffer_per_class]
# third, select classes from previous classes, and throw away only 1 samples per class
# randomly select classes. **random seed = self.t or start** <-- IMPORTANT!
'''
length = sum([len(i) for i in self.memory_buffer])
remain = length - self.mem_sz
total_mem = 0
for i in range(self.end):
total_mem = total_mem + buffer_per_class
print(total_mem)
print(remain)
if remain > 0:
imgs_per_class = [len(i) for i in self.memory_buffer]
selected_classes = np.argsort(imgs_per_class)[-remain:]
for c in selected_classes:
self.memory_buffer[c] = self.memory_buffer[c][:-1]
# self.memory_buffer[c] = self.memory_buffer[c].pop()
self.exemplar = []
for arr in self.memory_buffer[:self.end]:
print(arr)
arr = arr.tolist()
self.exemplar += arr
print(self.exemplar.__len__())
# self.exemplar = np.concatenate(self.exemplar)
# print(self.exemplar)
# validation set for bic
if 'bic' in self.approach:
self.bic_memory_buffer = copy.deepcopy(self.memory_buffer)
self.validation_buffer = []
validation_per_class = (self.validation_buffer_size // 2) // self.end
if validation_per_class > 0:
for i in range(self.end):
self.validation_buffer += self.bic_memory_buffer[i][-validation_per_class:]
del self.bic_memory_buffer[i][-validation_per_class:]
remain = self.validation_buffer_size // 2 - validation_per_class * self.end
if remain > 0:
imgs_per_class = [len(i) for i in self.bic_memory_buffer]
selected_classes = np.argsort(imgs_per_class)[-remain:]
for c in selected_classes:
self.validation_buffer.append(self.bic_memory_buffer[c].pop())
self.exemplar = []
for arr in self.bic_memory_buffer:
self.exemplar += arr
def normalize(self, input):
ma = np.max(input)
mi = np.min(input)
output = (input - mi) / (ma - mi)
return output
def __len__(self):
if self.mode == 'train':
return self.len
elif self.mode == 'bias':
return len(self.validation_buffer)
elif self.mode == 'b-ft':
return len(self.bft_exemplar)
else:
return self.end_idx
def __getitem__(self, index):
# time.sleep(0.1)
if self.mode == 'train':
if index >= self.current_len: # for bic, ft, icarl, il2m
index = self.exemplar[index - self.current_len]
else:
index = self.tr_idx[index]
elif self.mode == 'bias': # for bic bias correction
index = self.validation_buffer[index]
elif self.mode == 'b-ft':
index = self.bft_exemplar[index]
img = self.data[index]
try:
img = Image.fromarray(img)
except:
img = self.loader(img)
if self.transform is not None:
img = self.transform(img)
return img, self.labelsNormal[index], index
class ResultLoader(td.Dataset):
def __init__(self, data, labels, transform=None, loader=None):
self.data = data
self.labels = labels
self.labelsNormal = np.copy(self.labels)
self.transform = transform
self.loader = loader
self.transformLabels()
def transformLabels(self):
'''Change labels to one hot coded vectors'''
b = np.zeros((self.labels.size, self.labels.max() + 1))
b[np.arange(self.labels.size), self.labels] = 1
self.labels = b
def __len__(self):
return self.labels.shape[0]
def __getitem__(self, index):
# time.sleep(0.1)
img = self.data[index]
try:
img = Image.fromarray(img)
except:
img = self.loader(img)
if self.transform is not None:
img = self.transform(img)
return img, self.labelsNormal[index], index
def make_ResultLoaders(data, labels, classes, step_size, transform=None, loader=None, shuffle_idx=None,
base_classes=50):
if shuffle_idx is not None:
labels = shuffle_idx[labels]
sort_index = np.argsort(labels)
data = data[sort_index]
labels = np.array(labels)
labels = labels[sort_index]
start = 0
end = base_classes
loaders = []
while (end <= classes):
start_idx = np.argmin(labels < start) # start data index
end_idx = np.argmax(labels > (end - 1)) # end data index
if end_idx == 0:
end_idx = data.shape[0]
loaders.append(
ResultLoader(data[start_idx:end_idx], labels[start_idx:end_idx], transform=transform, loader=loader))
start = end
end += step_size
return loaders
def iterator(dataset_loader, batch_size, shuffle=False, drop_last=False):
kwargs = {'num_workers': 0, 'pin_memory': False}
return torch.utils.data.DataLoader(dataset_loader, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last,
**kwargs) | StarcoderdataPython |
177180 | <reponame>sajjadt/competitive-programming
import time
from math import sqrt
from sys import stdout
num_tests = int(input())
cases = 0
start = time.time()
for t in range(num_tests):
a, b, c = list(map(int, input().split()))
# C0: xyz = b (positive) -> x and y and z > 0 and x < 0 and y < 0 and z > 0 since they will be sorted
# conclude that z > 0
# C1: -100 < x, y, z < 100
# C2: abs value of one of numbers must be less than 23 -> xyz < 10000
root3_b = int(pow(b, 0.3333)) + 2
root_c = int(sqrt(c))
found = False
for z in range(-root3_b, root3_b):
if found:
break
Z = z*z
for y in range(-root_c, root_c + 1):
if found:
break
if y == z:
continue
Y = y*y
x = a - y - z
if x == y or x ==z :
continue
if x > root_c:
continue
if x < -root_c:
break
if x * y * z != b:
continue
if x*x + y*y + z*z == c:
stdout.write(" ".join(map(str, sorted([x, y, z]))) + "\n")
found = True
if not found:
stdout.write("No solution.\n")
end = time.time()
# print(end - start)
| StarcoderdataPython |
11297263 | import tkinter as tk
# create container
window = tk.Tk()
window.geometry("312x200")
window.resizable(0, 0) # deactivate resizing
# create label and entry for firstname and lastname
FN = tk.Label(text="<NAME>", fg="black", bg="yellow", width=20, height=2)
FNbox = tk.Entry()
EM = tk.Label(text=" Email Address", fg="black", bg="red", width=20, height=2)
EMbox = tk.Entry()
# button and their functions
def close(): # function to close window
window.destroy()
button1 = tk.Button(text="Quit", fg="black", highlightbackground="white",
command=close) # button to that uses previous funcion
def name(): # function to display firstname and lastname together
print("Full name :", FNbox.get() + " " + "Email Address :",EMbox.get()) #getting the entries
button2 = tk.Button(text="Show", fg="black", highlightbackground="white",command=name) # button to that uses previous functioon
def clearme():
FNbox.delete(0, tk.END)
EMbox.delete(0, tk.END) #the command will delete the whole text written
button3 = tk.Button(text="Clear me!",width=10, fg="red", command=clearme) #this button to clear the current text
# insert GUI elemnt into window
FN.pack()
FNbox.pack()
EM.pack()
EMbox.pack()
button1.pack()
button2.pack()
button3.pack()
# keep focus on window
window.mainloop()
| StarcoderdataPython |
11283629 | individuals = {'Pluto': 'active', 'Goofy': 'inactive', 'Sofie': 'active'}
for individual, status in individuals.copy().items():
if status == 'inactive':
del individuals[individual]
active_individuals = {}
for individual, status in individuals.items():
if status == 'active':
active_individuals[individual] = status
# Create a sample collection
users = {'Hans': 'active', 'Éléonore': 'inactive', '景太郎': 'active'}
# Strategy: Iterate over a copy
for user, status in users.copy().items():
if status == 'inactive':
del users[user]
# Strategy: Create a new collection
active_users = {}
for user, status in users.items():
if status == 'active':
active_users[user] = status
for i in range(100):
print(i)
list(range(68, 150))
file = open("sample.bin", "wb")
file. write(b"This binary string will be written to sample.bin")
file | StarcoderdataPython |
11286239 | <filename>src/directories.py
import numpy as np
import os as os
def return_dirs( ):
dirs = directories('.','.','.','.','.')
dirs.get_dirs()
return dirs
class directories( dict ):
def __init__( self, data_dir, sex_files, psf_model_dir, code_dir, stilts_dir):
self.__dict__['sex_files'] = sex_files
self.__dict__['data_dir'] = data_dir
self.__dict__['psf_model_dir'] = psf_model_dir
self.__dict__['code_dir'] = code_dir
self.__dict__['stilts_dir'] = stilts_dir
def write_dirs( self ):
file_obj = open('directories.cat',"w")
file_obj.write("DATA_DIR: %s \n" %self.data_dir)
file_obj.write("SEX_FILES: %s \n" %self.sex_files)
file_obj.write("PSF_MODEL_DIR: %s \n" %self.psf_model_dir)
file_obj.write("CODE_DIR: %s \n" %self.code_dir)
file_obj.write("STILTS_DIR: %s \n" %self.stilts_dir)
def get_dirs( self ):
dtypes = [('NAME', object), ('PATH', object)]
directories = np.loadtxt('directories.cat',
dtype=dtypes)
self.__dict__['data_dir'] = directories['PATH'][0]
self.__dict__['sex_files'] = directories['PATH'][1]
self.__dict__['psf_model_dir'] = directories['PATH'][2]
self.__dict__['code_dir'] = directories['PATH'][3]
self.__dict__['stilts_dir'] = directories['PATH'][4]
def check_dirs( self ):
keys = list(self.keys())
for iKey in keys:
if not (os.path.isdir(self.__dict__[iKey])):
raise ValueError('Cant find directory, ensure the path is correct (%s)' % self.__dict__[iKey])
def keys(self):
return list(self.__dict__.keys())
def __getitem__(self, key):
return self.__dict__[key]
| StarcoderdataPython |
9763888 | <filename>algovenv/lib/python3.8/site-packages/algosdk/abi/bool_type.py
from typing import Union
from algosdk.abi.base_type import ABIType
from algosdk import error
class BoolType(ABIType):
"""
Represents a Bool ABI Type for encoding.
"""
def __init__(self) -> None:
super().__init__()
def __eq__(self, other: object) -> bool:
if not isinstance(other, BoolType):
return False
return True
def __str__(self) -> str:
return "bool"
def byte_len(self) -> int:
return 1
def is_dynamic(self) -> bool:
return False
def encode(self, value: bool) -> bytes:
"""
Encode a boolean value
Args:
value (bool): value to be encoded
Returns:
bytes: encoded bytes ("0x80" if True, "0x00" if False) of the boolean
"""
assert isinstance(value, bool)
if value:
# True value is encoded as having a 1 on the most significant bit (0x80)
return b"\x80"
return b"\x00"
def decode(self, bytestring: Union[bytes, bytearray]) -> bool:
"""
Decodes a bytestring to a single boolean.
Args:
bytestring (bytes | bytearray): bytestring to be decoded that contains a single boolean, i.e. "0x80" or "0x00"
Returns:
bool: boolean from the encoded bytestring
"""
if (
not (
isinstance(bytestring, bytes)
or isinstance(bytestring, bytearray)
)
or len(bytestring) != 1
):
raise error.ABIEncodingError(
"value string must be in bytes and correspond to a bool: {}".format(
bytestring
)
)
if bytestring == b"\x80":
return True
elif bytestring == b"\x00":
return False
else:
raise error.ABIEncodingError(
"boolean value could not be decoded: {}".format(bytestring)
)
| StarcoderdataPython |
9779883 | <reponame>andreatomassetti/open-cas-linux<filename>test/functional/tests/incremental_load/test_core_pool.py
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import pytest
from api.cas import casadm
from api.cas.core import CoreStatus
from core.test_run import TestRun
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
from test_tools.disk_utils import Filesystem
from test_utils.output import CmdException
from test_utils.size import Size, Unit
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_attach_core_pool():
"""
title: Attaching from core pool on cache load.
description: |
Check that CAS has the ability on cache load to attach core devices that were added to
core device pool if those devices were previously used by cache instance being loaded.
Prevent attaching core device if they were not previously used.
pass_criteria:
- No system crash while reloading CAS modules.
- Core device was added successfully to core pool.
- Core device has been successfully attached to cache on cache load.
- Second core device was not attached to the cache instance.
"""
with TestRun.step("Prepare devices."):
cache_disk = TestRun.disks["cache"]
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
cache_dev = cache_disk.partitions[0]
core_disk = TestRun.disks["core"]
core_disk.create_partitions([Size(2, Unit.GibiByte), Size(2, Unit.GibiByte)])
core_dev = core_disk.partitions[0]
second_core_dev = core_disk.partitions[1]
with TestRun.step("Start cache."):
cache = casadm.start_cache(cache_dev, force=True)
with TestRun.step("Add core device."):
cache.add_core(core_dev)
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Add previously used core device to core pool using --try-add flag."):
first_core = casadm.try_add(core_dev, cache.cache_id)
with TestRun.step("Add different core device to core pool using --try-add flag."):
second_core = casadm.try_add(second_core_dev, cache.cache_id)
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_dev)
with TestRun.step("Check each core status."):
if first_core.get_status() is not CoreStatus.active:
TestRun.fail(f"First core status should be active but is {first_core.get_status()}.")
if second_core.get_status() is not CoreStatus.detached:
TestRun.fail(
f"Second core status should be detached but is {second_core.get_status()}.")
with TestRun.step("Stop cache and remove core from core pool."):
casadm.remove_all_detached_cores()
cache.stop()
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_core_pool_exclusive_open():
"""
title: Exclusive open of core pool.
description: |
Check that CAS exclusively opens core devices from core device pool so that the core device
cannot be used in any other way.
pass_criteria:
- No system crash while reloading CAS modules.
- Core device was added successfully to core pool.
- Core device is exclusively open in the core pool and cannot be used otherwise.
"""
with TestRun.step("Prepare core device and create filesystem on it."):
core_disk = TestRun.disks["core"]
core_disk.create_partitions([Size(1, Unit.GibiByte)])
core_dev = core_disk.partitions[0]
core_dev.create_filesystem(Filesystem.ext4)
with TestRun.step("Add core device to core device pool using --try-add flag."):
core = casadm.try_add(core_dev, 1)
with TestRun.step("Check if core status of added core in core pool is detached."):
status = core.get_status()
if status is not CoreStatus.detached:
TestRun.fail(f"Core status should be detached but is {status}.")
with TestRun.step("Check if it is impossible to add core device from core pool to "
"running cache."):
TestRun.disks["cache"].create_partitions([Size(2, Unit.GibiByte)])
cache_dev = TestRun.disks["cache"].partitions[0]
cache = casadm.start_cache(cache_dev, force=True)
try:
cache.add_core(core_dev)
TestRun.fail("Core from core pool added to cache, this is unexpected behaviour.")
except CmdException:
TestRun.LOGGER.info("Adding core from core pool to cache is blocked as expected.")
cache.stop()
with TestRun.step("Check if it is impossible to start cache with casadm start command on the "
"core device from core pool."):
try:
cache = casadm.start_cache(core_dev)
cache.stop()
TestRun.fail("Cache started successfully on core device from core pool, "
"this is unexpected behaviour.")
except CmdException:
TestRun.LOGGER.info("Using core device from core pool as cache is blocked as expected.")
with TestRun.step("Check if it is impossible to make filesystem on the core device "
"from core pool."):
try:
core_dev.create_filesystem(Filesystem.ext4, force=False)
TestRun.fail("Successfully created filesystem on core from core pool, "
"this is unexpected behaviour.")
except Exception:
TestRun.LOGGER.info("Creating filesystem on core device from core pool is "
"blocked as expected.")
with TestRun.step("Check if it is impossible to mount the core device from core pool."):
try:
core_dev.mount("/mnt")
TestRun.fail("Successfully mounted core pool device, this is unexpected behaviour.")
except Exception:
TestRun.LOGGER.info("Mounting core device form core pool is blocked as expected.")
with TestRun.step("Remove core from core pool."):
casadm.remove_all_detached_cores()
| StarcoderdataPython |
4842317 | from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import os
import nn_utils.network_utils as network_utils
import gen_utils.seed_generator as seed_generator
import gen_utils.sequence_generator as sequence_generator
from data_utils.parse_files import *
import config.nn_config as nn_config
config = nn_config.get_neural_net_configuration()
sample_frequency = config['sampling_frequency']
inputFile = config['model_file']
model_basename = config['model_basename']
cur_iter = 0
model_filename = "bestmodel"#model_basename + str(cur_iter)
output_filename = './generated_song.wav'
#Load up the training data
print ('Loading training data')
#X_train is a tensor of size (num_train_examples, num_timesteps, num_frequency_dims)
#y_train is a tensor of size (num_train_examples, num_timesteps, num_frequency_dims)
#X_mean is a matrix of size (num_frequency_dims,) containing the mean for each frequency dimension
#X_var is a matrix of size (num_frequency_dims,) containing the variance for each frequency dimension
X_train = np.load(inputFile + '_x.npy')
y_train = np.load(inputFile + '_y.npy')
X_mean = np.load(inputFile + '_mean.npy')
X_var = np.load(inputFile + '_var.npy')
print ('Finished loading training data')
#Figure out how many frequencies we have in the data
freq_space_dims = X_train.shape[2]
hidden_dims = config['hidden_dimension_size']
#Creates a lstm network
model = network_utils.create_lstm_network(num_frequency_dimensions=freq_space_dims, num_hidden_dimensions=hidden_dims)
X=X_train
y=y_train
print(X.shape)
#Load existing weights if available
if os.path.isfile(model_filename):
history = model.fit(X, y, batch_size=8, nb_epoch=0, verbose=1,validation_split=0.0)
model.load_weights(model_filename)
history = model.fit(X, y, batch_size=8, nb_epoch=1, verbose=1,validation_split=0.0)
else:
print('Model filename ' + model_filename + ' could not be found!')
print ('Starting generation!')
#Here's the interesting part
#We need to create some seed sequence for the algorithm to start with
#Currently, we just grab an existing seed sequence from our training data and use that
#However, this will generally produce verbatum copies of the original songs
#In a sense, choosing good seed sequences = how you get interesting compositions
#There are many, many ways we can pick these seed sequences such as taking linear combinations of certain songs
#We could even provide a uniformly random sequence, but that is highly unlikely to produce good results
seed_len = 1
seed_seq = seed_generator.generate_copy_seed_sequence(seed_length=seed_len, training_data=X_train)
max_seq_len = 30 #Defines how long the final song is. Total song length in samples = max_seq_len * example_len
output = sequence_generator.generate_from_seed(model=model, seed=seed_seq,
sequence_length=max_seq_len, data_variance=X_var, data_mean=X_mean)
print ('Finished generation!')
#Save the generated sequence to a WAV file
save_generated_example(output_filename, output, sample_frequency=sample_frequency) | StarcoderdataPython |
4892582 | <filename>modules/info.py
#!/usr/bin/env python
"""
info.py - Phenny Information Module
Copyright 2008, <NAME>, inamidst.com
Licensed under the Eiffel Forum License 2.
http://inamidst.com/phenny/
"""
def doc(phenny, input):
"""Shows a command's documentation, and possibly an example."""
name = input.group(1)
name = name.lower()
if phenny.doc.has_key(name):
phenny.reply(phenny.doc[name][0])
if phenny.doc[name][1]:
phenny.say('e.g. ' + phenny.doc[name][1])
doc.rule = ('$nick', '(?i)(?:help|doc) +([A-Za-z]+)(?:\?+)?$')
doc.example = '$nickname: doc tell?'
doc.priority = 'low'
def commands(phenny, input):
# This function only works in private message
if input.sender.startswith('#'): return
names = ', '.join(sorted(phenny.doc.iterkeys()))
phenny.say('Commands I recognise: ' + names + '.')
phenny.say(("For help, do '%s: help example?' where example is the " +
"name of the command you want help for.") % phenny.nick)
commands.commands = ['commands']
commands.priority = 'low'
def help(phenny, input):
response = (
'Hi, I\'m a bot. Say ".commands" to me in private for a list ' +
'of my commands, or see http://inamidst.com/phenny/ for more ' +
'general details. My owner is %s.'
) % phenny.config.owner
phenny.reply(response)
help.rule = ('$nick', r'(?i)help(?:[?!]+)?$')
help.priority = 'low'
def stats(phenny, input):
"""Show information on command usage patterns."""
commands = {}
users = {}
channels = {}
ignore = set(['f_note', 'startup', 'message', 'noteuri'])
for (name, user), count in phenny.stats.items():
if name in ignore: continue
if not user: continue
if not user.startswith('#'):
try: users[user] += count
except KeyError: users[user] = count
else:
try: commands[name] += count
except KeyError: commands[name] = count
try: channels[user] += count
except KeyError: channels[user] = count
comrank = sorted([(b, a) for (a, b) in commands.iteritems()], reverse=True)
userank = sorted([(b, a) for (a, b) in users.iteritems()], reverse=True)
charank = sorted([(b, a) for (a, b) in channels.iteritems()], reverse=True)
# most heavily used commands
creply = 'most used commands: '
for count, command in comrank[:10]:
creply += '%s (%s), ' % (command, count)
phenny.say(creply.rstrip(', '))
# most heavy users
reply = 'power users: '
for count, user in userank[:10]:
reply += '%s (%s), ' % (user, count)
phenny.say(reply.rstrip(', '))
# most heavy channels
chreply = 'power channels: '
for count, channel in charank[:3]:
chreply += '%s (%s), ' % (channel, count)
phenny.say(chreply.rstrip(', '))
stats.commands = ['stats']
stats.priority = 'low'
if __name__ == '__main__':
print __doc__.strip()
| StarcoderdataPython |
1683231 | DATABRIDGE_START = "c_bridge_start"
DATABRIDGE_RESTART = "c_bridge_restart"
DATABRIDGE_RECONNECT = "c_bridge_reconnect"
DATABRIDGE_GET_CREDENTIALS = "c_bridge_get_tender_credentials"
DATABRIDGE_GOT_CREDENTIALS = "c_bridge_got_tender_credentials"
DATABRIDGE_FOUND_MULTILOT_COMPLETE = "c_bridge_found_multilot"
DATABRIDGE_FOUND_NOLOT_COMPLETE = "c_bridge_found_nolot"
DATABRIDGE_CONTRACT_TO_SYNC = "c_bridge_contract_to_sync"
DATABRIDGE_CONTRACT_EXISTS = "c_bridge_contract_exists"
DATABRIDGE_COPY_CONTRACT_ITEMS = "c_bridge_prepare_items"
DATABRIDGE_MISSING_CONTRACT_ITEMS = "c_bridge_missing_c_items"
DATABRIDGE_GET_EXTRA_INFO = "c_bridge_get_credentials"
DATABRIDGE_GOT_EXTRA_INFO = "c_bridge_got_credentials"
DATABRIDGE_CREATE_CONTRACT = "c_bridge_create_contract"
DATABRIDGE_CONTRACT_CREATED = "c_bridge_contract_created"
DATABRIDGE_RETRY_CREATE = "c_bridge_create_retry"
DATABRIDGE_TENDER_PROCESS = "c_bridge_tender_process"
DATABRIDGE_SKIP_NOT_MODIFIED = "c_bridge_not_modified"
DATABRIDGE_SYNC_SLEEP = "c_bridge_sleep"
DATABRIDGE_SYNC_RESUME = "c_bridge_resume"
DATABRIDGE_WORKER_DIED = "c_bridge_worker_died"
DATABRIDGE_EXCEPTION = "c_bridge_exception"
DATABRIDGE_CACHED = "c_bridge_cached"
DATABRIDGE_INFO = "c_bridge_info"
| StarcoderdataPython |
339434 | <gh_stars>0
from django import forms
from django.contrib.auth.models import User
from django.db.utils import OperationalError
from django.utils.translation import gettext_lazy as _
from .helper import generate_email, validate_emails
class DraftEmailForm(forms.Form):
"""
Field notes:
other_recipients is inputted with each email taking up a line and then seperated
by a comma and new line.
"""
try:
everyone = [(user.pk, user) for user in list(User.objects.all())]
staff_recipients = forms.MultipleChoiceField(choices=everyone, required=False, label=_('スタッフからの受取人'))
except OperationalError:
staff_recipients = forms.MultipleChoiceField(choices=[(None, None)], required=False, label=_('スタッフからの受取人'))
all_staff = forms.BooleanField(initial=False, required=False, label=_('全スタッフ'))
other_recipients = forms.CharField(widget=forms.Textarea(attrs={'rows':4}), required=False, validators=[validate_emails], label=_('他の受取人'))
subject = forms.CharField(max_length=100, label=_('件名'))
content = forms.CharField(widget=forms.Textarea, label=_('内容'))
admin_email = forms.BooleanField(initial=False, required=False, label=_('スタッフ専用'))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['other_recipients'].help_text = (
_('各メールは、最後にコンマを入れて、別々の行に入れてください。') +
f"""
e.g.
<EMAIL>,*{_('NEWLINE')}*
<EMAIL>,*{_('NEWLINE')}*
etc.
"""
)
def save_draft(self, request):
form = generate_email(request, self.cleaned_data)
form.save()
| StarcoderdataPython |
1677752 | #!/usr/bin/env python
############################################################################
# Copyright (C) by <NAME> #
# #
# You can redistribute and/or modify this program under the #
# terms of the SeisComP Public License. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# SeisComP Public License for more details. #
############################################################################
import sys
import seiscomp3.Client
class SendJournal(seiscomp3.Client.Application):
def __init__(self, argc, argv):
seiscomp3.Client.Application.__init__(self, argc, argv)
self.setDatabaseEnabled(False, False)
self.setMessagingEnabled(True)
self.setMessagingUsername("")
self.setPrimaryMessagingGroup("EVENT")
def init(self):
if not seiscomp3.Client.Application.init(self): return False
self.params = self.commandline().unrecognizedOptions()
if len(self.params) < 2:
sys.stderr.write(self.name() + " [opts] {objectID} {action} [parameters]\n")
return False
return True
def run(self):
msg = seiscomp3.DataModel.NotifierMessage()
entry = seiscomp3.DataModel.JournalEntry()
entry.setCreated(seiscomp3.Core.Time.GMT())
entry.setObjectID(self.params[0])
entry.setSender(self.author())
entry.setAction(self.params[1])
sys.stderr.write("Sending entry (" + entry.objectID() + "," + entry.action() + ")\n")
if len(self.params) > 2:
entry.setParameters(self.params[2])
n = seiscomp3.DataModel.Notifier(seiscomp3.DataModel.Journaling.ClassName(), seiscomp3.DataModel.OP_ADD, entry)
msg.attach(n)
self.connection().send(msg)
return True
def main(argc, argv):
app = SendJournal(argc, argv)
return app()
if __name__ == "__main__":
sys.exit(main(len(sys.argv), sys.argv))
| StarcoderdataPython |
5194760 | <filename>dataObjectClass.py
class dataObject:
"""
Each of the data points in the database
should be represented as one of these objects.
The actual data is put in the attributes vector.
"""
def __init__(self, numberOfAttributes):
self.attributes = [0.0]*numberOfAttributes
def setAttributes(self, attributesIn):
for index in range(len(attributesIn)):
self.attributes[index] = attributesIn[index]
class Database:
"""
A container class for the dataObjects.
"""
def __init__(self, database_in):
self.datapoints = database_in
self.numElements = len(database_in)
self.numAttributes = len(database_in[0].attributes)
self.pureData = [0.0]*self.numElements*self.numAttributes
index = 0
for element in range(self.numElements):
for attribute in range(self.numAttributes):
self.pureData[index] = self.datapoints[element].attributes[attribute]
index += 1
def getVectorWithAttributeNr(self, x):
return list(self.pureData[x*self.numAttributes:(x+1)*self.numAttributes])
| StarcoderdataPython |
9687957 | <filename>LintCode/DataStructure/20200106_158_valid_anagram.py
# -*-coding:utf-8 -*-
#Reference:**********************************************
# @Time : 2020-01-07 00:33
# @Author : <NAME>
# @File : 20200106_158_valid_anagram.py
# @User : liyihao
# @Software : PyCharm
# @Description: Write a method anagram(s,t) to decide if two strings are anagrams or not.
#Reference:**********************************************
"""
What is Anagram?
Two strings are anagram if they can be the same after change the order of characters.
Input: s = "ab", t = "ab"
Output: true
Input: s = "abcd", t = "dcba"
Output: true
Input: s = "ac", t = "ab"
Output: false
Challenge
O(n) time, O(1) extra space
"""
import collections
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
if len(s) != len(t):
return False
hash_table = collections.defaultdict(int)
for i in range(len(s)):
hash_table[s[i]] = hash_table[s[i]] + 1
hash_table[t[i]] = hash_table[t[i]] - 1
for value in hash_table.values():
if value:
return False
return True
if __name__ == '__main__':
s = Solution()
st = 'abcd'
t = 'dcba'
print(s.isAnagram(st, t))
| StarcoderdataPython |
3279361 | # MIT License
#
# Copyright (c) 2018 <NAME>, <NAME>, <EMAIL>, <EMAIL>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
import json
import logging
import os
import shutil
from time import time
from bitcoinetl.jobs.export_blocks_job import ExportBlocksJob
from bitcoinetl.jobs.enrich_transactions import EnrichTransactionsJob
from bitcoinetl.jobs.exporters.blocks_and_transactions_item_exporter import blocks_and_transactions_item_exporter
from bitcoinetl.rpc.bitcoin_rpc import BitcoinRpc
from blockchainetl.file_utils import smart_open
from blockchainetl.logging_utils import logging_basic_config
from blockchainetl.misc_utils import filter_items
from blockchainetl.thread_local_proxy import ThreadLocalProxy
logging_basic_config()
logger = logging.getLogger('export_all')
def export_all(chain, partitions, output_dir, provider_uri, max_workers, batch_size, enrich):
for batch_start_block, batch_end_block, partition_dir, *args in partitions:
# # # start # # #
start_time = time()
padded_batch_start_block = str(batch_start_block).zfill(8)
padded_batch_end_block = str(batch_end_block).zfill(8)
block_range = '{padded_batch_start_block}-{padded_batch_end_block}'.format(
padded_batch_start_block=padded_batch_start_block,
padded_batch_end_block=padded_batch_end_block,
)
file_name_suffix = '{padded_batch_start_block}_{padded_batch_end_block}'.format(
padded_batch_start_block=padded_batch_start_block,
padded_batch_end_block=padded_batch_end_block,
)
# # # blocks_and_transactions # # #
blocks_output_dir = '{output_dir}/blocks{partition_dir}'.format(
output_dir=output_dir,
partition_dir=partition_dir,
)
os.makedirs(os.path.dirname(blocks_output_dir), exist_ok=True)
transactions_output_dir = '{output_dir}/transactions{partition_dir}'.format(
output_dir=output_dir,
partition_dir=partition_dir,
)
os.makedirs(os.path.dirname(transactions_output_dir), exist_ok=True)
blocks_file = '{blocks_output_dir}/blocks_{file_name_suffix}.json'.format(
blocks_output_dir=blocks_output_dir,
file_name_suffix=file_name_suffix,
)
transactions_file = '{transactions_output_dir}/transactions_{file_name_suffix}.json'.format(
transactions_output_dir=transactions_output_dir,
file_name_suffix=file_name_suffix,
)
enriched_transactions_file = '{transactions_output_dir}/enriched_transactions_{file_name_suffix}.json'.format(
transactions_output_dir=transactions_output_dir,
file_name_suffix=file_name_suffix,
)
logger.info('Exporting blocks {block_range} to {blocks_file}'.format(
block_range=block_range,
blocks_file=blocks_file,
))
logger.info('Exporting transactions from blocks {block_range} to {transactions_file}'.format(
block_range=block_range,
transactions_file=transactions_file,
))
job = ExportBlocksJob(
chain=chain,
start_block=batch_start_block,
end_block=batch_end_block,
batch_size=batch_size,
bitcoin_rpc=ThreadLocalProxy(lambda: BitcoinRpc(provider_uri)),
max_workers=max_workers,
item_exporter=blocks_and_transactions_item_exporter(blocks_file, transactions_file),
export_blocks=blocks_file is not None,
export_transactions=transactions_file is not None)
job.run()
if enrich == True:
with smart_open(transactions_file, 'r') as transactions_file:
job = EnrichTransactionsJob(
transactions_iterable = (json.loads(transaction) for transaction in transactions_file),
batch_size = batch_size,
bitcoin_rpc = ThreadLocalProxy(lambda: BitcoinRpc(provider_uri)),
max_workers = max_workers,
item_exporter = blocks_and_transactions_item_exporter(None, enriched_transactions_file),
chain = chain
)
job.run()
if args is not None and len(args) > 0:
date = args[0]
logger.info('Filtering blocks {blocks_file} by date {date}'.format(
blocks_file=blocks_file,
date=date,
))
def filter_by_date(item, field):
return datetime.datetime.fromtimestamp(item[field]).astimezone(datetime.timezone.utc) \
.strftime('%Y-%m-%d') == date.strftime('%Y-%m-%d')
filtered_blocks_file = blocks_file + '.filtered'
filter_items(blocks_file, filtered_blocks_file, lambda item: filter_by_date(item, 'timestamp'))
shutil.move(filtered_blocks_file, blocks_file)
logger.info('Filtering transactions {transactions_file} by date {date}'.format(
transactions_file=transactions_file,
date=date,
))
filtered_transactions_file = transactions_file + '.filtered'
filter_items(transactions_file, filtered_transactions_file, lambda item: filter_by_date(item, 'block_timestamp'))
shutil.move(filtered_transactions_file, transactions_file)
# # # finish # # #
end_time = time()
time_diff = round(end_time - start_time, 5)
logger.info('Exporting blocks {block_range} took {time_diff} seconds'.format(
block_range=block_range,
time_diff=time_diff,
))
| StarcoderdataPython |
174961 | <reponame>rkislov/122callcenter
from django.contrib import admin
from import_export import resources
from import_export.admin import ImportExportModelAdmin
from .models import Subject, Sub_subject, Patient, Manipulation, City, Hospital, Call_result, Address, Call, Journal
class CallResource(resources.ModelResource):
class Meta:
model = Call
class CallAdmin(ImportExportModelAdmin):
# Перечисляем поля, которые должны отображаться в админке
list_display = (
'pk',
'date',
'call_number',
'question',
'hospital',
'call_result',
'manipulation',
'registration_covid_date',
'subject',
'sub_subject',
)
list_filter = (
'date',
'hospital',
'subject',
'sub_subject',
'manipulation',
'call_result',
'registration_covid_date',
'subject',
'sub_subject',
'call_operator',
)
# # Добавляем интерфейс для поиска по тексту постов
# search_fields = (
# 'call_number',
# 'date',
# 'hospital',
# )
# Добавляем возможность фильтрации по дате
list_filter = (
'date',
'subject',
'sub_subject',
'hospital',
'manipulation',
'call_operator',
)
# Это свойство сработает для всех колонок: где пусто — там будет эта строка
empty_value_display = '-пусто-'
search_fields = [
'call_number',
]
resource_class = CallResource
class HospitalAdmin(admin.ModelAdmin):
list_display = (
'pk',
'name',
'email',
)
admin.site.register(Subject)
admin.site.register(Sub_subject)
admin.site.register(Patient)
admin.site.register(Manipulation)
admin.site.register(City)
admin.site.register(Hospital, HospitalAdmin)
admin.site.register(Call_result)
admin.site.register(Address)
admin.site.register(Journal)
admin.site.register(Call, CallAdmin)
| StarcoderdataPython |
32943 | from app import app
from flask import render_template, flash, redirect, url_for
from app.forms import LoginForm
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/contato', methods=['GET','POST'])
def contato():
form = LoginForm()
if form.validate_on_submit():
mensagem = flash('A mensagem foi enviada com sucesso.')
return redirect('/index')
return render_template('contato.html', form=form)
@app.route('/features')
def features():
return render_template('features.html')
| StarcoderdataPython |
3516538 |
import os
import numpy as np
import argparse
def compare_tables(my_checkpoint_path, ta_checkpoint_path):
my_checkpoint = np.load(my_checkpoint_path)
ta_checkpoint = np.load(ta_checkpoint_path)
differences = my_checkpoint == ta_checkpoint
are_same = np.all(differences)
count = 0
if not are_same:
# find the locations where the arrays differ
indx_diff = np.where(differences == False)
location = []
for i in range (np.shape(indx_diff)[1]):
for j in range (np.shape(indx_diff)[0]):
location.append(indx_diff[j][i])
if my_checkpoint[tuple(location)] != ta_checkpoint[tuple(location)]:
print ("\nAt : " + str(location) + " , the values are: ")
print ("Your's: " + str(my_checkpoint[tuple(location)]))
print ("Solution's: " + str(ta_checkpoint[tuple(location)]))
count += 1
location = []
if not count:
return True
else:
return False
def main():
parser = argparse.ArgumentParser(description='CS440 MP7 Snake Table Comparator')
parser.add_argument('--test', dest = 'test', type=int, required=True, help='local test number (either 1, 2, or 3)')
parser.add_argument('--checkpoint', dest = 'checkpoint', type=str, required=True, help='path to checkpoint Q table')
parser.add_argument('--checkpoint-n', dest = 'checkpoint_n', type=str, required=True, help='path to checkpoint N table file')
args = parser.parse_args()
if args.test in [1, 2, 3]:
checkpoint_q = args.checkpoint
checkpoint_n = args.checkpoint_n
ta_checkpoint_q = './data/checkpoint' + str(args.test) + '.npy'
ta_checkpoint_n = './data/checkpoint' + str(args.test) + '_N.npy'
if os.path.exists(checkpoint_q) and os.path.exists(checkpoint_n):
compare_tables(checkpoint_q, ta_checkpoint_q)
compare_tables(checkpoint_n, ta_checkpoint_n)
else:
print('Please specify a valid path to both saved tables.')
else:
print('Please specify valid test number 1, 2, or 3. ')
if __name__ == "__main__":
main() | StarcoderdataPython |
1731380 | import pickle
import sys
from abc import abstractmethod
from threading import Lock
import logging
import zlib
from PyQt5.QtCore import QByteArray, QDataStream, QIODevice, QObject, pyqtSignal
from PyQt5.QtNetwork import QTcpSocket, QAbstractSocket
from PyQt5.QtTest import QSignalSpy
from PyQt5.QtWidgets import qApp
COMPRESS = True
class Transaction(QObject):
"""Use parent class for server and client transactions. Abstracts TCPSocket-Access"""
# empirical benchmarks using the echo command (send + recieve) shows following performance characteristics:
# - a server can handle thousands of messages on one open socket per second, more CPU performance --> faster server
# - as long as you don't saturate your network connection by increasing the message size there
# should be no problems at all
# - increasing the message size beyond the saturation of the network connection may lead to message drops
# and crash the server
#
# some examples:
# - a server on a laptop connected over a 2-3 mb/s wifi would crash if the message size is greater then aprox 6mb
# - the same laptop with an ethernet cable can handle up to aprox 20 mb per message
# - a server over localhost and the loopback interface could handle up to aprox 100mb
# TODO longer testing
# TODO close and reopen the socket after each / after n messages
# TODO investigate message drops and server crashed
# TODO maybe rewrite the class to split messages over certain size
# splitting messages refers to writing at most n bytes to the QDataStream and flushing regulary
# alternatively split the message altogether
# this depends if the QTcpSocket or the QDataStream is the bottleneck
bufferReady = pyqtSignal()
socketClosed = pyqtSignal()
_stopWaiting = pyqtSignal()
_staging = pyqtSignal()
def __init__(self):
# QObject.__init__(self)
super(Transaction, self).__init__()
self.tcpsocket = None # type: QTcpSocket
self.messageBuffer = QByteArray()
self.recieveBuffer = QByteArray()
self.messageOutput = list()
self.messageSize = 0
self.isRecieving = False
self.bufferReady.connect(self.processMessage)
self._isConnected = False
self._hasError = False
self.lock = Lock()
def connect(self, host, port):
"""connect to host on port"""
logging.debug("Connecting to %s:%i", host, port)
self._hasError = False
self.tcpsocket = QTcpSocket()
self.tcpsocket.error.connect(self.processError)
self.tcpsocket.connected.connect(self._connected)
self.tcpsocket.connected.connect(lambda: self._stopWaiting.emit())
self.tcpsocket.readyRead.connect(self.receive)
self.tcpsocket.connectToHost(host, port)
self.waitForConnection()
def processError(self, error):
"""
Process the errors on the tcpsocket
See: http://doc.qt.io/qt-5/qabstractsocket.html#SocketError-enum
"""
logging.debug("Error %i", error)
if error == 0:
# Remote Host can't be resolved
self.close()
elif error == 1:
# Remote Host closed the connection.
self.close()
else:
# TODO: somehow handle errors
# Code 7 is detected far too late, it waits till the socket itself times out
errortxt = ""
for k in dir(QAbstractSocket):
v = getattr(QAbstractSocket, k)
if type(v) is type(error) and str(v) is str(error):
errortxt = k
sys.stderr.write("QTcpSocket@Transaction Error Code: " + str(error) + " " + errortxt + "\n")
self.close()
self._isConnected = False
self._hasError = True
self._stopWaiting.emit()
def _connected(self):
"""On Connect mark this transactions as connected"""
logging.debug("Socket connected.")
self._isConnected = True
self._hasError = False
def acceptClient(self, socket):
"""set external socket"""
logging.debug("Client socket connected.")
self.tcpsocket = socket
self._isConnected = True
self._hasError = False
self.tcpsocket.error.connect(self.processError)
self.tcpsocket.readyRead.connect(self.receive)
def receive(self):
"""On recieve packet load message into buffer. But first get message length"""
logging.debug("Receiving message")
while self.tcpsocket.bytesAvailable() > 0:
# while some unread bytes available
stream = QDataStream(self.tcpsocket)
stream.setVersion(QDataStream.Qt_5_3)
if not self.isRecieving: # For a new Message get the Message Size
if self.tcpsocket.bytesAvailable >= 4: # since reading UInt32
self.messageSize = stream.readUInt32()
logging.debug("Start of new message of size %i", self.messageSize)
self.isRecieving = True
else:
break
else: # For a continued message keep reding until whole message is in buffer
s = min(self.tcpsocket.bytesAvailable(), self.messageSize - len(self.messageBuffer))
self.messageBuffer.append(stream.readRawData(s))
if len(self.messageBuffer) == self.messageSize:
logging.debug("Finished receiving message of size %i", self.messageSize)
self._processBuffer()
def send(self, msg):
"""Send first the message size, then the message in Pickle"""
if self.isConnected():
pmsg = pickle.dumps(msg)
if COMPRESS:
pmsg = zlib.compress(pmsg)
buffer = QByteArray()
stream = QDataStream(buffer, QIODevice.WriteOnly)
stream.setVersion(QDataStream.Qt_5_3)
stream.writeUInt32(len(pmsg))
stream.writeRawData(pmsg)
bytesWritten = self.tcpsocket.write(buffer)
self.tcpsocket.flush()
self.tcpsocket.waitForBytesWritten()
# qApp.processEvents() # send data immediately and don't wait for next mainloop
logging.debug("Bytes written: %i", bytesWritten)
if bytesWritten > 0:
return True
else:
logging.debug("Message not send. Not connected")
return False
def isConnected(self):
"""return the connected state"""
return self._isConnected
def close(self):
"""close this socket and fire signal socketClosed to notify slots to delete this obj"""
if self.tcpsocket:
logging.debug("socket closing")
self._isConnected = False
self.tcpsocket.close()
# self.tcpsocket = None
self.socketClosed.emit()
def _processBuffer(self):
"""If message is complete: read Pickle and move to output buffer"""
pmsg = str(self.messageBuffer)
if COMPRESS:
pmsg = zlib.decompress(pmsg)
msg = pickle.loads(pmsg)
logging.debug("Message received: %s", str(msg))
self.lock.acquire()
self.messageOutput.append(msg)
self.lock.release()
# reset state
self.isRecieving = False
self.messageBuffer.clear()
self._stopWaiting.emit()
self.bufferReady.emit()
@abstractmethod
def processMessage(self):
"""implement in server/client"""
pass
def asyncRead(self, timeout=5000, staging=False, attr=None):
"""For direct reading access, pop the first message in buffer
Explanation:
timeout: stop after waiting for 5s and return NONE object.
this is usefull if the server does not answer (e.g. crash)
or the networkconnection breaks
staging: If True the asynchronous wait will wait for a the staging signal instead of the messageOutput signal.
the staging signal should be used if you have a class which uses both: a signal/slot based processMessage
methode and asyncRead calls. This gives the processMessage calls priority.
!!!Don't forget to call the stage() method if your processMessage ignores the message!!!
attr: In some cases you have multiple asyncRead calls on the same transaction Item.
This may lead to preemtive behaviour resulting in reading the wrong messages and key errors.
(e.g. READ#1 reads messages destined for READ#2)
By providing a tupel (attr, value) the message is parsed and only poped if the value matches.
Not matching messages are appended back into the buffer and other waiting asyncReads are notified
"""
if self.isConnected():
turns = 0
while True:
turns += 1
if turns > 100:
logging.debug("Timeout on read after 100 iterations")
return None
result = True
logging.debug("MessageOutput size: %i", len(self.messageOutput))
if len(self.messageOutput) == 0 and not self.containsAttr(attr):
logging.debug("Waiting for new message.")
if not staging:
# spy = QSignalSpy(self._stopWaiting)
spy = QSignalSpy(self.bufferReady)
else:
spy = QSignalSpy(self._staging)
result = spy.wait(timeout) # Asynchronous wait, Timeout 5s
if result and not self._hasError:
self.lock.acquire()
if len(self.messageOutput) == 0:
self.lock.release()
logging.debug("Race condition triggered. Wait for next message.")
continue
found = False
result = self.messageOutput[0]
if attr is not None:
for msg in self.messageOutput:
if attr[0] in msg:
if attr[1] == msg[attr[0]]:
found = True
result = msg
break
if found or attr is None:
del self.messageOutput[self.messageOutput.index(result)]
logging.debug("MessageOutput size: %i", len(self.messageOutput))
self.lock.release()
if "error" not in result:
result["error"] = []
if "status" not in result:
result["status"] = True
return result
else:
logging.debug("Message not found. Release of lock.")
if attr is not None:
logging.debug("Miss '%s' with value '%s'", str(attr[0]), str(attr[1]))
self.lock.release()
self.bufferReady.emit()
qApp.processEvents()
else:
logging.debug("Nothing to read.")
break
else:
logging.debug("Not connected. Did not read.")
return None
def stage(self):
logging.debug("Stage!")
self._staging.emit()
def waitForConnection(self):
"""sometimes it takes some time to establish the connection. wait half a second"""
logging.debug("Establishing connection")
if self.tcpsocket is not None:
result = True
if not self.isConnected() and not self._hasError:
# spy = QSignalSpy(self.tcpsocket.connected)
spy = QSignalSpy(self._stopWaiting)
result = spy.wait(5000) # Asynchronous wait, Timeout 5 s
if not result:
# it is bad if the socket needs longer than the timeout and connects
# after the connection is deemed dead
self.close()
logging.debug("Connection not established, manually closing socket.")
return result and not self._hasError
return False
def getAttrOfFirst(self, attrlist):
"""check the attributes of the first item in the output buffer"""
self.lock.acquire()
result = []
for attr in attrlist:
if attr in self.messageOutput[0]:
result.append(self.messageOutput[0][attr])
else:
result.append(None)
self.lock.release()
return result
def containsAttr(self, attr):
"""check if any message in the output buffer contains the desired attribute"""
if attr is None:
return False
self.lock.acquire()
found = False
for msg in self.messageOutput:
if attr[0] in msg:
if msg[attr[0]] is attr[1]:
found = True
break
self.lock.release()
return found
| StarcoderdataPython |
240479 | import os.path
from collections import OrderedDict
from functools import partial
from typing import List, Union, Dict
import ancpbids
from ancpbids import CustomOpExpr, EntityExpr, AllExpr, ValidationPlugin
from . import load_dataset, LOGGER
from .plugins.plugin_query import FnMatchExpr, AnyExpr
from .utils import deepupdate
class BIDSLayout:
"""A convenience class to provide access to an in-memory representation of a BIDS dataset.
.. code-block::
dataset_path = 'path/to/your/dataset'
layout = BIDSLayout(dataset_path)
Parameters
----------
ds_dir:
the (absolute) path to the dataset to load
"""
def __init__(self, ds_dir: str, **kwargs):
self.dataset = load_dataset(ds_dir)
self.schema = self.dataset.get_schema()
def _to_any_expr(self, value, ctor):
# if the value is a list, then wrap it in an AnyExpr
if isinstance(value, list):
ops = []
for v in value:
ops.append(ctor(v))
return AnyExpr(*ops)
# else just return using the constructor function
return ctor(value)
def __getattr__(self, key, **kwargs):
k = key if not key.startswith("get_") else key[4:]
return partial(self.get, return_type='id', target=k, **kwargs)
def get_metadata(self, *args, **kwargs) -> dict:
"""Returns a dictionary of metadata matching the provided criteria (see :meth:`ancpbids.BIDSLayout.get`).
Also takes the BIDS inheritance principle into account, i.e. any metadata defined at dataset level
may be overridden by a more specific metadata entry at a lower level such as the subject level.
As of the BIDS specification, metadata is kept in JSON files,
i.e. only JSON files will be assumed to contain metadata.
"""
qry_result = filter(lambda a: isinstance(a, self.schema.MetadataFile), self.get(*args, **kwargs))
# build lists of ancestors + the leaf (metadata file)
ancestors = list(map(lambda e: (list(reversed(list(e.iterancestors()))), e), qry_result))
# sort by number of ancestors
# TODO must sort by the items within the list not just by length of list
# example: [xyz,abc] would be treated the same when it should be [abc, xyz]
ancestors.sort(key=lambda e: len(e[0]))
metadata = {}
if ancestors:
# start with first metadata file
deepupdate(metadata, ancestors[0][1].contents)
if len(ancestors) > 1:
for i in range(1, len(ancestors)):
# FIXME ancestors handling is unstable, disable it for now
if False:
a0 = ancestors[i - 1][0]
a1 = ancestors[i][0]
# remove the ancestors from a0 and make sure it is empty, i.e. both nodes have same ancestors
remaining_ancestors = set(a0).difference(*a1)
if remaining_ancestors:
# if remaining ancestors list is not empty,
# this is interpreted as having the leaves from different branches
# for example, metadata from func/sub-01/...json must not be mixed with func/sub-02/...json
LOGGER.warn("Query returned metadata files from incompatible sources.")
deepupdate(metadata, ancestors[i][1].contents)
return metadata
def _require_artifact(self, expr) -> AllExpr:
"""Wraps the provided expression in an expression that makes sure the context of evaluation is an Artifact.
Parameters
----------
expr :
the expression to wrap
Returns
-------
a wrapping expression to make sure that the provided object is an instance of Artifact
"""
return AllExpr(CustomOpExpr(lambda m: isinstance(m, self.schema.Artifact)), expr)
def get(self, return_type: str = 'object', target: str = None, scope: str = None,
extension: Union[str, List[str]] = None, suffix: Union[str, List[str]] = None,
**entities) -> Union[List[str], List[object]]:
"""Depending on the return_type value returns either paths to files that matched the filtering criteria
or :class:`Artifact <ancpbids.model_v1_7_0.Artifact>` objects for further processing by the caller.
Note that all provided filter criteria are AND combined, i.e. subj='02',task='lang' will match files containing
'02' as a subject AND 'lang' as a task. If you provide a list of values for a criteria, they will be OR combined.
.. code-block::
file_paths = layout.get(subj='02', task='lang', suffix='bold', return_type='files')
file_paths = layout.get(subj=['02', '03'], task='lang', return_type='files')
Parameters
----------
return_type:
Either 'files' to return paths of matched files
or 'object' to return :class:`Artifact <ancpbids.model_v1_7_0.Artifact>` object, defaults to 'object'
target:
Either `suffixes`, `extensions` or one of any valid BIDS entities key
(see :class:`EntityEnum <ancpbids.model_v1_7_0.EntityEnum>`, defaults to `None`
scope:
a hint where to search for files
If passed, only nodes/directories that match the specified scope will be
searched. Possible values include:
'all' (default): search all available directories.
'derivatives': search all derivatives directories.
'raw': search only BIDS-Raw directories.
'self': search only the directly called BIDSLayout.
<PipelineName>: the name of a BIDS-Derivatives pipeline.
extension:
criterion to match any files containing the provided extension only
suffix:
criterion to match any files containing the provided suffix only
entities
a list of key-values to match the entities of interest, example: subj='02',task='lang'
Returns
-------
depending on the return_type value either paths to files that matched the filtering criteria
or Artifact objects for further processing by the caller
"""
if scope is None:
scope = 'all'
if return_type == 'id':
if not target:
raise ValueError("return_type=id requires the target parameter to be set")
context = self.dataset
ops = []
target_type = self.schema.File
if scope.startswith("derivatives"):
context = self.dataset.derivatives
# we already consumed the first path segment
segments = os.path.normpath(scope).split(os.sep)[1:]
for segment in segments:
context = context.get_folder(segment)
# derivatives may contain non-artifacts which should also be considered
target_type = self.schema.File
select = context.select(target_type)
if scope == 'raw':
# the raw scope does not consider derivatives folder but everything else
select.subtree(CustomOpExpr(lambda m: not isinstance(m, self.schema.DerivativeFolder)))
result_extractor = None
if target:
if target in 'suffixes':
suffix = '*'
result_extractor = lambda artifacts: [a.suffix for a in artifacts]
elif target in 'extensions':
extension = '*'
result_extractor = lambda artifacts: [a.extension for a in artifacts]
else:
target = self.schema.fuzzy_match_entity_key(target)
entities = {**entities, target: '*'}
result_extractor = lambda artifacts: [entity.value for a in artifacts for entity in
filter(lambda e: e.key == target, a.entities)]
for k, v in entities.items():
entity_key = self.schema.fuzzy_match_entity(k)
v = self.schema.process_entity_value(k, v)
ops.append(
self._require_artifact(self._to_any_expr(v, lambda val: EntityExpr(self.schema, entity_key, val))))
if extension:
ops.append(self._require_artifact(
self._to_any_expr(extension, lambda ext: FnMatchExpr(self.schema.Artifact.extension, ext))))
if suffix:
ops.append(
self._require_artifact(
self._to_any_expr(suffix, lambda suf: FnMatchExpr(self.schema.Artifact.suffix, suf))))
select.where(AllExpr(*ops))
if return_type and return_type.startswith("file"):
return list(select.get_file_paths_absolute())
else:
artifacts = select.objects()
if result_extractor:
return sorted(set(result_extractor(artifacts)))
return list(artifacts)
def get_entities(self, scope: str = None, sort: bool = False) -> dict:
"""Returns a unique set of entities found within the dataset as a dict.
Each key of the resulting dict contains a list of values (with at least one element).
Example dict:
.. code-block::
{
'sub': ['01', '02', '03'],
'task': ['gamblestask']
}
Parameters
----------
scope:
see BIDSLayout.get()
sort: default is `False`
whether to sort the keys by name
Returns
-------
dict
a unique set of entities found within the dataset as a dict
"""
artifacts = filter(lambda m: isinstance(m, self.schema.Artifact), self.get(scope=scope))
result = OrderedDict()
for e in [e for a in artifacts for e in a.entities]:
if e.key not in result:
result[e.key] = set()
result[e.key].add(e.value)
if sort:
result = {k: sorted(v) for k, v in sorted(result.items())}
return result
def get_dataset_description(self) -> dict:
"""
Returns
-------
the dataset's dataset_description.json as a dictionary or None if not provided
"""
return self.dataset.dataset_description
def get_dataset(self) -> object:
"""
Returns
-------
the in-memory representation of this layout/dataset
"""
return self.dataset
def write_derivative(self, derivative):
"""Writes the provided derivative folder to the dataset.
Note that a 'derivatives' folder will be created if not present.
Parameters
----------
derivative:
the derivative folder to write
"""
assert isinstance(derivative, self.schema.DerivativeFolder)
ancpbids.write_derivative(self.dataset, derivative)
def validate(self) -> ValidationPlugin.ValidationReport:
"""Validates a dataset and returns a report object containing any detected validation errors.
Example:
.. code-block::
report = layout.validate()
for message in report.messages:
print(message)
if report.has_errors():
raise "The dataset contains validation errors, cannot continue".
Returns
-------
a report object containing any detected validation errors or warning
"""
return ancpbids.validate_dataset(self.dataset)
| StarcoderdataPython |
1613405 | """
Provides QtGui classes and functions.
.. warning:: All PyQt4/PySide gui classes are exposed but when you use
PyQt5, those classes are not available. Therefore, you should treat/use
this package as if it was ``PyQt5.QtGui`` module.
"""
import os
from pyqode.qt import QT_API
from pyqode.qt import PYQT5_API
from pyqode.qt import PYQT4_API
from pyqode.qt import PYSIDE_API
from pyqode.qt import PYSIDE2_API
if os.environ[QT_API] in PYQT5_API:
from PyQt5.QtGui import *
elif os.environ[QT_API] in PYQT4_API:
from PyQt4.QtGui import *
elif os.environ[QT_API] in PYSIDE_API:
from PySide.QtGui import *
elif os.environ[QT_API] in PYSIDE2_API:
from PySide2.QtGui import *
else:
raise ImportError('No Qt bindings could be found')
| StarcoderdataPython |
5134534 | <filename>penn/news.py
from .base import WrapperBase
BASE_URL = "https://esb.isc-seo.upenn.edu/8091/open_data/"
ENDPOINTS = {
'SEARCH': BASE_URL + 'news_events_maps'
}
class News(WrapperBase):
"""The client for the News Search API.
:param bearer: The user code for the API
:param token: The password code for the API
Usage::
>>> from penn import News
>>> n = News('MY_USERNAME_TOKEN', 'MY_PASSWORD_TOKEN')
"""
def search(self, keyword):
"""Return all news related to the provided query.
:param keyword:
The keyword for your news search
>>> results = n.search('interview')
"""
params = {
"source": "news",
"description": keyword
}
data = self._request(ENDPOINTS['SEARCH'], params)
data['result_data'] = [res for res in data['result_data'] if isinstance(res, dict)]
return data
| StarcoderdataPython |
11333847 | #!/usr/bin/env python3
from __future__ import print_function
# dsl1.py
import sys
import importlib
# the source file is the 1st argument to the script
if len(sys.argv) != 2:
print('usage: %s <src.dsl>' % sys.argv[0])
sys.exit(1)
sys.path.insert(0, '/Users/nathan/code/dsl/modules')
with open(sys.argv[1], 'r') as file:
for line in file:
line = line.strip()
if not line or line[0] == '#':
continue
parts = line.split()
print(parts)
mod = importlib.import_module(parts[0])
print(mod)
getattr(mod, parts[1])(parts[2], parts[3])
| StarcoderdataPython |
3397298 | from sense_hat import SenseHat
sense = SenseHat()
sense.set_rotation(0)
sense.show_message("halo",text_colour=(0,0,255), back_colour=(4, 34, 180))
sense.set_pixel(3,3, (116,255,231))
| StarcoderdataPython |
8055835 | <reponame>candyninja001/pypad
from enum import Enum
from .monster_type import MonsterType
from .dev import Dev
class LatentAwakening(Enum):
UNKNOWN = (-1, 2, [])
NONE = (0, 1, [])
IMPROVED_HP = (1, 1, [])
IMPROVED_ATTACK = (2, 1, [])
IMPROVED_RECOVERY = (3, 1, [])
EXTENDED_MOVE_TIME = (4, 1, [])
AUTO_RECOVER = (5, 1, [])
FIRE_DAMAGE_REDUCTION = (6, 1, [])
WATER_DAMAGE_REDUCTION = (7, 1, [])
WOOD_DAMAGE_REDUCTION = (8, 1, [])
LIGHT_DAMAGE_REDUCTION = (9, 1, [])
DARK_DAMAGE_REDUCTION = (10, 1, [])
SKILL_DELAY_RESISTANCE = (11, 1, [])
ALL_STATS_IMPROVED = (12, 2, [])
EVO_MATERIAL_KILLER = (16, 2, [])
AWOKEN_MATERIAL_KILLER = (17, 2, [])
ENHANCED_MATERIAL_KILLER = (18, 2, [])
REDEEMABLE_MATERIAL_KILLER = (19, 2, [])
GOD_KILLER = (20, 2, [MonsterType.BALANCED, MonsterType.DEVIL, MonsterType.MACHINE])
DRAGON_KILLER = (21, 2, [MonsterType.BALANCED, MonsterType.HEALER])
DEVIL_KILLER = (22, 2, [MonsterType.BALANCED, MonsterType.GOD, MonsterType.ATTACKER])
MACHINE_KILLER = (23, 2, [MonsterType.BALANCED, MonsterType.DRAGON, MonsterType.PHYSICAL])
BALANCED_KILLER = (24, 2, [MonsterType.BALANCED, MonsterType.MACHINE])
ATTACKER_KILLER = (25, 2, [MonsterType.BALANCED, MonsterType.HEALER])
PHYSICAL_KILLER = (26, 2, [MonsterType.BALANCED, MonsterType.ATTACKER])
HEALER_KILLER = (27, 2, [MonsterType.BALANCED, MonsterType.DRAGON, MonsterType.PHYSICAL])
IMPROVED_HP_PLUS = (28, 2, [])
IMPROVED_ATTACK_PLUS = (29, 2, [])
IMPROVED_RECOVERY_PLUS = (30, 2, [])
EXTENDED_MOVE_TIME_PLUS = (31, 2, [])
FIRE_DAMAGE_REDUCTION_PLUS = (32, 2, [])
WATER_DAMAGE_REDUCTION_PLUS = (33, 2, [])
WOOD_DAMAGE_REDUCTION_PLUS = (34, 2, [])
LIGHT_DAMAGE_REDUCTION_PLUS = (35, 2, [])
DARK_DAMAGE_REDUCTION_PLUS = (36, 2, [])
def __new__(cls, value, slots=None, types=None):
if slots == None and types == None: # value lookup instead of creation
return Enum.__new__(cls, value)
latent = object.__new__(cls)
latent._value_ = value
latent._slots = slots
latent._types = types
return latent
@classmethod
def _missing_(cls, value):
Dev.log(f'Unknown latent awakening: {value}')
return LatentAwakening.UNKNOWN
def slots(self) -> int:
return self._slots
def allowed_types(self) -> [MonsterType]:
if len(self._types) == 0:
return [t for t in MonsterType if t != MonsterType.NONE]
return self._types
def allowed_for_types(self, types: (MonsterType,)) -> bool:
if len(self._types) == 0:
return True
return any(t in self._types for t in types)
@classmethod
def get_killers_for_types(cls, types: (MonsterType,)) -> ['LatentAwakening']:
return [l for l in LatentAwakening if any(t in l._types for t in types)]
@classmethod
def get_latents_for_types(cls, types: (MonsterType,)) -> ['LatentAwakening']:
return [l for l in LatentAwakening if l.allowed_for_types(types)]
| StarcoderdataPython |
11310297 | # -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8:noet:tabstop=4:softtabstop=4:shiftwidth=8:expandtab
""" python3 method """
# Copyright (c) 2010 - 2020, © Badassops LLC / <NAME>
# All rights reserved.
# BSD 3-Clause License : http://www.freebsd.org/copyright/freebsd-license.html
from logging import warning
def create_resource_tag(**kwargs):
""" create tag via resource """
resource = kwargs.get('resource', {})
tag_name = kwargs.get('tag_name', {})
tag_value = kwargs.get('tag_value', {})
try:
resource.create_tags(
Tags=[{'Key': tag_name, 'Value': tag_value},]
)
return True
except Exception as err:
warning('Unable to set the {} tag, error: {}'.format(tag_name, err))
return False
def create_resource_id_tag(**kwargs):
""" create tag via resource id """
session = kwargs.get('session', {})
resource_id = kwargs.get('resource_id', {})
tag_name = kwargs.get('tag_name', {})
tag_value = kwargs.get('tag_value', {})
try:
tag_session = session.get_client_session(service='ec2')
tag_session.create_tags(
Resources=[resource_id],
Tags=[{'Key': tag_name, 'Value': tag_value},]
)
return True
except Exception as err:
warning('Unable to set the {} tag, error: {}'.format(tag_name, err))
return False
| StarcoderdataPython |
4958765 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
from dataclasses import dataclass
from itertools import chain, islice
from typing import TYPE_CHECKING, Dict, Iterable, Mapping, Optional, Set
from libcst.metadata import FullRepoManager, TypeInferenceProvider
if TYPE_CHECKING:
from logging import Logger
from libcst.metadata.base_provider import ProviderT
BATCH_SIZE: int = 100
PLACEHOLDER_CACHES: Dict["ProviderT", object] = {TypeInferenceProvider: {"types": []}}
@dataclass(frozen=True)
class FullRepoMetadataConfig:
providers: Set["ProviderT"]
timeout_seconds: int
repo_root_dir: str = ""
batch_size: int = BATCH_SIZE
logger: Optional["Logger"] = None
def get_repo_caches(
paths: Iterable[str],
config: FullRepoMetadataConfig,
) -> Mapping[str, Dict["ProviderT", object]]:
"""
Generate type metadata by instantiating a :class:`~libcst.metadata.FullRepoManager` with
:class:`~libcst.metadata.FullRepoManager` passed to ```providers``` parameter.
:param paths: An iterable of paths to files to pass to :class:`~libcst.metadata.FullRepoManager` constructor's
```paths``` argument. These will be split in batches where the combined length of each path in the batch is <=
```arg_size```.
:param timeout: The number of seconds at which to cap the pyre query which is run as a subprocess during cache resolving.
:param repo_root_dir: Root directory of paths in ```paths```.
:param batch_size: The size of the batch of paths to pass in each call to the :class:`~libcst.metadata.FullRepoManager` constructor.
"""
caches = {}
paths_iter = iter(paths)
head: Optional[str] = next(paths_iter, None)
while head is not None:
paths_batch = tuple(chain([head], islice(paths_iter, config.batch_size - 1)))
head = next(paths_iter, None)
frm = FullRepoManager(
repo_root_dir=config.repo_root_dir,
paths=paths_batch,
providers=config.providers,
timeout=config.timeout_seconds,
)
try:
frm.resolve_cache()
except Exception:
# We want to fail silently since some metadata providers can be flaky. If a logger is provided by the caller, we'll add a log here.
logger = config.logger
if logger is not None:
logger.warning(
"Failed to retrieve metadata cache.",
exc_info=True,
extra={"paths": paths_batch},
)
# Populate with placeholder caches to avoid failures down the line. This will however result in reduced functionality in cache-dependent lint rules.
caches.update(
dict.fromkeys(
paths_batch,
{
provider: PLACEHOLDER_CACHES[provider]
for provider in config.providers
},
)
)
else:
# TODO: remove access of private variable when public `cache` property is available in libcst.metadata.FullRepoManager API.
batch_caches = defaultdict(dict)
for provider, files in frm._cache.items():
for _path, cache in files.items():
batch_caches[_path][provider] = cache
caches.update(batch_caches)
return caches
| StarcoderdataPython |
1675956 | import configparser
from datetime import datetime
from math import cos
from skimage import filters
from skimage import measure
from math import radians
from scipy.interpolate import splprep, splev
import numpy as np
import pandas as pd
import scipy.ndimage as img
""" Tools to manipulate and analyze data """
def canopy_cover(data, radius):
"""Computes leaf area index: ratio of leaf to ground for a certain area.
Performs a convolve with an arbitrary radius to calculate how many
nonzero values are present within a
:param data: 2D or 3D numpy array of height data
:param radius: radius of the region (in number of 0.1 m squares)
:return: leaf area index computed for each point
"""
c = np.zeros_like(data, int)
kernel = np.ones((radius * 2 + 1, radius * 2 + 1))
for x in range(data.shape[2]):
d = data[:, :, x]
d[d > 0] = 1
conv = img.filters.convolve(d, kernel)
c[:, :, x] = conv
return c
def create_path_splines(points, data_shape, converter):
"""Create managable path splines from the thousands of datapoints"""
# Unpack rows and columns from data shape
rows, cols, _ = data_shape
# Convert the gps information and store it into a new DataFrame
path_pts = []
for p in points.itertuples():
# This gives us the data coordinates from the gps location
data_y = int(converter.lat_to_y(p[1]))
data_x = int(converter.lng_to_x(p[2]))
path_pts.append([data_x, data_y])
np.transpose(path_pts)
# Remove any duplicate points from the path,
# keeping the original array order
path_vals, ind = np.unique(path_pts, axis=0, return_index=True)
ind = sorted(ind)
path_pts = [path_pts[i] for i in ind]
# Create a spline from the remaining path points
# noinspection PyTupleAssignmentBalance
tck, u = splprep(np.transpose(path_pts), u=None, s=0.0)
return tck
def create_stress_map(height, canopy, rad, threshold):
"""Create map showing frequently stressed areas of plot.
Sums the stress masks for each individual date to determine which
areas are frequently coming up stressed.
:return - The 2D map of stressed locations
"""
# Deprecated create_suggestion_mask_v1 code
###########################################################################
# # Suggestion mask data (Normalized Height + Normalized Canopy)
# normh = np.divide(height, np.amax(height, axis=(0, 1))) * 50
# normc = np.divide(canopy, np.amax(canopy, axis=(0, 1))) * 50
#
# # Process data to create stress mask for each snapshot
# comb_data = np.add(normh, normc)
# stress_dates = create_suggestion_mask_v1(comb_data, rad, threshold)
###########################################################################
# Create the suggestion mask for each snapshot
stress_dates = create_suggestion_mask_v2(height, canopy, rad, threshold)
# Create stress map based on all data up to current date
stress_map = np.zeros_like(stress_dates)
for i in range(stress_dates.shape[2]):
stress_map[:, :, i] = np.sum(stress_dates[:, :, 0:(i+1)], axis=2)
stress_map[:, :, i] = np.divide(stress_map[:, :, i], (i+1))
return stress_map
def create_suggestion_mask_v1(d, rad, threshold):
"""Keep this here for a little while, then delete it. Suggestion mask v1
Uses statistical methods to determine outliers below the general
population of the data, and uses image processing techniques to discount
the edges of the plot from skewing the results.
:param d: The input data to create the mask
:param rad: The radius to average and desample the data
:param threshold: The percentile above which points will be filtered
:return: The mask from which suggested points are chosen
"""
# Create a new copy of the data to work on
data = np.copy(d)
# filter out data less than zero
data[data < 0] = 0
# Calculates each point as sum of nearby values within radius r
c = np.zeros_like(data, float)
kernel = np.ones((rad * 2 + 1, rad * 2 + 1))
for x in range(data.shape[2]):
conv = img.filters.convolve(data[:, :, x], kernel)
c[:, :, x] = conv
# Downsample array into pixels with same size as convolve
c = c[::rad * 2 + 1, ::rad * 2 + 1, :]
fullmask = np.zeros_like(d)
for i in range(c.shape[2]):
# Extract the ith layer of data
mask = c[:, :, i]
# Use image processing morphology to smooth out data
mask = img.grey_closing(mask, structure=np.ones((3, 3)))
# Use Sobel edge detection to decrease weight of edges
gx = img.sobel(mask, axis=0)
gy = img.sobel(mask, axis=1)
grad = np.hypot(gx, gy)
grad = (np.divide(grad, np.amax(grad))) * 100
mask = (np.divide(mask, np.amax(mask))) * 100
mask -= grad
# Calculate the threshold percentile, ignoring zeros
mask[mask <= 0] = np.nan
percent = np.nanpercentile(mask, threshold)
mask = np.nan_to_num(mask)
# Filter out data and create mask
mask[mask > percent] = 0
mask[mask > 0] = 1
# Perform binary opening to remove small regions
mask = img.binary_opening(mask)
# Rescale mask to fit data size
scale = np.divide(fullmask[:, :, 0].shape, mask.shape)
fullmask[:, :, i] = img.zoom(mask, scale, order=0)
return fullmask
def create_suggestion_mask_v2(height, canopy, rad=4, threshold=(20, 40)):
# Copy the data
height_data = np.copy(height)
# Silence isolated points (low canopy)
height_data[canopy < 5] = 0
# Downscale dataset to 0.5m squares, taking the max within each
height_data = downscale_max(height_data, rad)
# Place points into stress levels
stress_data = np.zeros_like(height_data)
for x in range(stress_data.shape[2]):
stress_layer = stress_data[:, :, x]
height_layer = height_data[:, :, x]
high_med_stress = np.percentile(height_layer[np.nonzero(height_layer)],
threshold[0])
med_low_stress = np.percentile(height_layer[np.nonzero(height_layer)],
threshold[1])
stress_layer[height_layer >= med_low_stress] = 0.01 # Low
height_layer[stress_layer > 0] = 0 # silence low points
stress_layer[height_layer >= high_med_stress] = 0.5 # Medium
height_layer[stress_layer > 0] = 0 # silence med points
stress_layer[0 < height_layer] = 0.99 # High
stress_data[:, :, x] = stress_layer
stress_data = rescale_like(stress_data, height)
return stress_data
def define_regions(data, rad):
"""Identify regions of high stress areas and """
region_map = np.copy(data)
region_map = region_map[::rad * 2 + 1, ::rad * 2 + 1]
val = filters.threshold_otsu(region_map)
mask = region_map > val
mask = img.binary_opening(mask, iterations=2)
scale = np.divide(data.shape, mask.shape)
mask = img.zoom(mask, scale, order=0)
labels = measure.label(mask, background=0)
regions = img.find_objects(labels)
small_regions = []
for i in range(len(regions)):
if np.nonzero(labels == i + 1)[0].size <= 500:
labels[regions[i]] = 0
small_regions.append(i)
for i in small_regions[::-1]:
del regions[i]
return StressMapWrapper(labels, regions)
def downscale_avg(data, radius):
# Calculates each point as sum of nearby values within radius r
diam = 2 * radius + 1
kernel = np.ones((diam, diam))
fullmap = np.zeros_like(data, float)
for x in range(data.shape[2]):
conv = img.filters.convolve(data[:, :, x], kernel)
fullmap[:, :, x] = conv
# Downsample array into pixels with same size as convolve
fullmap = fullmap[::diam, ::diam, :]
return fullmap
def downscale_max(data, radius):
# Turn radius into diameter centered at original point
diam = 2 * radius + 1
fullmap = np.zeros_like(data[::diam, ::diam, :], float)
for x in range(data.shape[2]):
layer = np.zeros_like(data[::diam, ::diam, 0])
for r in range((int(data.shape[0] / diam)) - 1):
for c in range((int(data.shape[1] / diam)) - 1):
selection = data[(r*diam):(r*diam + diam),
(c*diam):(c*diam + diam), x]
max_val = np.amax(selection)
layer[r, c] = max_val
fullmap[:, :, x] = layer
return fullmap
def evaluate_path_spline(spline, num_points):
u_vals = np.linspace(0, 1, num_points)
pts = splev(u_vals, spline)
pts = pd.DataFrame(np.transpose(pts))
return pts
def filter_outliers(data, rmin = 0.2, rmax = 1.0):
"""Sets outliers outside user defined range to zero.
Calculates the average and standard deviation of the dataset. Filters out
all data below rmin * std and avg + rmax * std.
:param data: 2D numpy array of data to filter
:param rmin: data within (r * standard deviation) of zero is set to zero
:param rmax: data above (average + r * standard deviation) is set to zero
:return: filtered data
"""
for x in range(data.shape[2]):
d = np.nan_to_num(data[:, :, x])
# Silence negative values
d[d < 0] = 0
# Calculate average and std
avg = np.average(d[np.nonzero(d)])
std = np.std(d[np.nonzero(d)])
d[np.absolute(d) < avg - (rmin * std)] = 0 # filter points below avg
d[np.absolute(d) > avg + (rmax * std)] = 0 # filter points above avg
data[:, :, x] = d
return data
def rescale_like(data, like):
# Rescale mask to fit data size
# scale = np.divide(like[:, :, 0].shape, data[:, :, 0].shape)
scale = np.true_divide(like[:, :, 0].shape, data[:, :, 0].shape)
fullmap = np.zeros_like(like, float)
for x in range(data.shape[2]):
fullmap[:, :, x] = img.zoom(data[:, :, x], scale, order=0)
return fullmap
class DataSet4D:
"""This class contains all of the data for a particular mode.
This class is responsible for handling the datasets for each different
mode or filter. It has behaviors to change the date, perform statistics,
and manipulate the data to some extent."""
def __init__(self, data, dates):
# Expand dimensions if necessary
if len(data.shape) == 2:
data = np.expand_dims(data, axis=2)
self.data = np.nan_to_num(data)
self.dates = sorted(dates)
self.filter = filter
date_length = self.dates.__len__()
data_length = self.data.shape[2]
# Duplicate last data element to match date length
while data_length < date_length:
self.data = np.concatenate((self.data,
np.expand_dims(data[:, :, -1], axis=2)),
axis=2)
data_length += 1
self._n_samples = self.data.shape[2]
self._active_sample = 0
self._active_data = self.data[:, :, self._active_sample]
self.max_val = 0
self.min_val = 0
self.average = 0
self.std_dev = 0
self.pct_coverage = 0
self.refresh_statistics()
def get_data(self):
"""Get the 2D array of the map at the current date."""
data = self._active_data
return data
def get_date(self):
"""Return the current date as a datetime object."""
return self.dates[self._active_sample]
def get_dates(self):
"""Return the backing array of datetime objects."""
return self.dates
def get_date_ind(self):
"""Return the index for the current date."""
return self._active_sample
def set_dates(self, dataset):
"""Copy the date object from another dataset."""
self._n_samples = dataset.data.shape[2]
self.dates = dataset.get_dates()
def derivative(self):
"""Take the derivative of the dataset over time."""
# If there is only one data set, return unchanged
if self.data.shape[2] == 1:
return self.data
derivatives = np.empty([self.data.shape[0], self.data.shape[1],
self._n_samples - 1])
diff = self.data[:, :, 1::] - self.data[:, :, 0:-1]
for i in range(len(self.dates) - 1):
date_interval = self.dates[i + 1] - self.dates[i]
derivatives[:, :, i] = np.divide(diff[:, :, i], date_interval.days)
return derivatives
def next_data(self):
"""Advance the active data to the next date, if possible."""
if self._active_sample >= self._n_samples - 1:
self._active_sample = 0
else:
self._active_sample += 1
self._active_data = self.data[:, :, self._active_sample]
self.refresh_statistics()
def prev_data(self):
"""Reqind the active data to the previous date, if possible."""
if self._active_sample == 0:
self._active_sample = self._n_samples - 1
else:
self._active_sample -= 1
self._active_data = self.data[:, :, self._active_sample]
self.refresh_statistics()
def refresh_statistics(self):
"""Recalculate statistics for this dataset."""
self.max_val = np.max(self._active_data)
self.min_val = np.min(self._active_data)
self.average = np.average(self._active_data[np.nonzero(
self._active_data)])
self.std_dev = np.std(self._active_data[np.nonzero(self._active_data)])
self.pct_coverage = np.count_nonzero(self._active_data)\
/ (self._active_data.shape[0] * self._active_data.shape[1]) * 100
def reset_date(self):
"""Reset the data to the first date in the dataset."""
self._active_sample = 0
self._active_data = self.data[:, :, 0]
class LatLngConverter:
"""This utility class converts x and y positions to coordinates."""
def __init__(self, config):
self.lng0 = float(config['GPS']['origin_lng'])
self.lat0 = float(config['GPS']['origin_lat'])
self.origin = (self.lat0, self.lng0)
self.diam = float(config['GPS']['size'])
def data_to_latlng(self, points):
latlng = pd.DataFrame.copy(points)
latlng.columns = ["field.latitude", "field.longitude"]
for pt in points.itertuples():
latlng["field.latitude"][pt[0]] = self.y_to_lat(pt[2])
latlng["field.longitude"][pt[0]] = self.x_to_lng(pt[1])
return latlng
def lat_to_y(self, lat):
# Inverse of above function
d_lat = self.lat0 - lat
m = d_lat * 111111
y = m / self.diam
return y
def lng_to_x(self, lng):
# Inverse of above function
d_lng = self.lng0 - lng
m = d_lng * (111111 * cos(radians(self.lat0)))
x = m / self.diam
return x
def x_to_lng(self, x):
# Convert data point to distance in meters
m = x * self.diam
# Convert data point to longitude with shortcut:
# 1 deg lng = 111111 * cos(lat) * m
d_lng = m / (111111 * cos(radians(self.lat0)))
# Determine new longitude from base longitude
return self.lng0 + d_lng
def y_to_lat(self, y):
# Convert data point to distance in meters
m = y * self.diam
# Convert data point to latitude with shortcut:
# 1 deg lng = 111111
d_lng = m / 111111
# Determine new longitude from base longitude
return self.lat0 + d_lng
class StressMapWrapper:
def __init__(self, stress_map, regions):
self.map = stress_map
self.regions = regions
if __name__ == '__main__':
'''Simple script to convert the lat/lng of a point relative to a given
anchor'''
config = configparser.ConfigParser()
config['GPS'] = {'origin_lat': '31.52036604680005',
'origin_lng': '-83.54861912284196',
'size': '0.2'}
convert = LatLngConverter(config)
x = -296
y = -601
print("lat: " + str(convert.y_to_lat(y)))
print("lng: " + str(convert.x_to_lng(x)))
| StarcoderdataPython |
4821993 | #!/usr/bin/env python3
# Usage:
# $0 -o <output-zip> <toplevel-directory>
#
# zips all files under <toplevel-directory>. includes .log of process and .tsv of contents
import zipfile
from xdfile.metadatabase import xd_sources_row, xd_sources_header
from xdfile.utils import find_files_with_time, get_log, get_args, filetime, args_parser, parse_pathname
from xdfile.utils import log, info, iso8601, open_output, strip_toplevel
def main():
p = args_parser('catalog source files and create source.tsv')
p.add_argument('-s', '--source', default=None, help='ExternalSource')
args = get_args(parser=p)
info("importing from %s" % args.source)
outf = open_output()
sources = []
for input_source in args.inputs:
for fn, contents, dt in find_files_with_time(input_source):
if len(contents) == 0:
info("ignoring empty file")
continue
outf.write_file(strip_toplevel(fn), contents, dt)
sources.append(xd_sources_row(fn, args.source or input_source, iso8601(dt)))
info("%s files cataloged" % len(sources))
outbase = parse_pathname(args.output).base
outf.write_file("%s.tsv" % outbase, xd_sources_header + "".join(sources))
outf.write_file("%s.log" % outbase, get_log())
main()
| StarcoderdataPython |
5053365 | <gh_stars>10-100
from __future__ import absolute_import
from base64 import b64encode, b64decode
from irods.message.ordered import OrderedProperty
import six
if six.PY3:
from html import escape
else:
from cgi import escape
class MessageProperty(OrderedProperty):
def __get__(self, obj, cls):
return obj._values[self.name]
def __set__(self, obj, value):
obj._values[self.name] = value
def dub(self, name):
self.name = name
return self
def pack(self, value):
values = []
values.append("<%s>" % self.name)
my_value = self.format(value)
if six.PY3 and isinstance(my_value, bytes):
my_value = my_value.decode("utf-8")
values.append(my_value)
values.append("</%s>" % self.name)
return "".join(values)
def unpack(self, els):
if len(els):
el = els[0]
return self.parse(el.text)
return None
class IntegerProperty(MessageProperty):
def format(self, value):
return str(value)
def parse(self, value):
return int(value)
class LongProperty(MessageProperty):
def format(self, value):
return str(value)
def parse(self, value):
return int(value)
class BinaryProperty(MessageProperty):
def __init__(self, length=None):
self.length = length
super(BinaryProperty, self).__init__()
if six.PY2:
def format(self, value):
return b64encode(value)
else:
# Python 3
def format(self, value):
if isinstance(value, bytes):
return b64encode(value)
else:
return b64encode(value.encode())
def parse(self, value):
val = b64decode(value)
return val
class StringProperty(MessageProperty):
def __init__(self, length=None):
self.length = length
super(StringProperty, self).__init__()
@staticmethod
def escape_xml_string(string):
return escape(string, quote=False)
if six.PY2:
def format(self, value):
if isinstance(value, str) or isinstance(value, unicode):
return self.escape_xml_string(value)
return self.escape_xml_string(str(value))
else:
# Python 3
def format(self, value):
if isinstance(value, str):
return self.escape_xml_string(value)
if isinstance(value, bytes):
return self.escape_xml_string(value.decode())
return self.escape_xml_string(str(value))
def parse(self, value):
return value
class ArrayProperty(MessageProperty):
def __init__(self, prop):
self.prop = prop
super(ArrayProperty, self).__init__()
def pack(self, values):
self.prop.dub(self.name)
return "".join([self.prop.pack(v) for v in values])
def unpack(self, els):
return [self.prop.unpack([el]) for el in els]
class SubmessageProperty(MessageProperty):
def __init__(self, message_cls=None):
self.message_cls = message_cls
super(SubmessageProperty, self).__init__()
def pack(self, value):
return value.pack()
def unpack(self, els):
if len(els):
el = els[0]
msg = self.message_cls()
msg.unpack(el)
return msg
return None
| StarcoderdataPython |
4906562 | class DefaultConfig(object):
data = {}
def get(self, path, getter):
self.data[path] = getter
__all__ = ["entries", "getters"]
| StarcoderdataPython |
9655800 | <gh_stars>10-100
import random
import struct
import sys
from bxutils.logging.log_level import LogLevel
from bxgateway.btc_constants import BTC_HDR_COMMON_OFF
from bxgateway.messages.btc.btc_message import BtcMessage
# FIXME dedup this against pongbtcmessage
from bxgateway.messages.btc.btc_message_type import BtcMessageType
class PingBtcMessage(BtcMessage):
MESSAGE_TYPE = BtcMessageType.PING
def __init__(self, magic=None, buf=None):
if buf is None:
buf = bytearray(BTC_HDR_COMMON_OFF + 8)
self.buf = buf
off = BTC_HDR_COMMON_OFF
struct.pack_into("<Q", buf, off, random.randint(0, sys.maxsize))
off += 8
BtcMessage.__init__(self, magic, self.MESSAGE_TYPE, off - BTC_HDR_COMMON_OFF, buf)
else:
self.buf = buf
self._memoryview = memoryview(buf)
self._magic = self._command = self._payload_len = self._checksum = None
self._payload = None
self._nonce = None
def nonce(self):
if self._nonce is None:
if len(self.buf) == BTC_HDR_COMMON_OFF:
self._nonce = -1
elif len(self.buf) == BTC_HDR_COMMON_OFF + 4:
self._nonce = struct.unpack_from("<L", self.buf, BTC_HDR_COMMON_OFF)[0]
else:
self._nonce = struct.unpack_from("<Q", self.buf, BTC_HDR_COMMON_OFF)[0]
return self._nonce
def log_level(self) -> LogLevel:
return LogLevel.DEBUG
| StarcoderdataPython |
1719708 | from utils import *
from rtid_out_info import RtidOutInfo
from rtid_config import RTIDConfig
from content_manager import ContentManager
from datetime import datetime
from os import path, makedirs
import json
import praw
import secret
import sys
class RTID(Logger):
def __init__(self, rtid_config: RTIDConfig):
super().__init__()
self.rtid_config = rtid_config
self.reddit = None
self.subreddit_instance = None
self.init()
self.rtid_out_info = RtidOutInfo(self.rtid_config.subreddit_name)
self.content_manager = ContentManager(self.subreddit_instance, self.rtid_config)
def init(self):
self.log.info("Starting RTID")
self.log.info("Instantiating Reddit instance")
self.reddit = praw.Reddit(
client_id=secret.reddit_client_id,
client_secret=secret.reddit_client_secret,
username=secret.reddit_username,
password=<PASSWORD>,
user_agent=secret.reddit_user_agent,
)
# Subreddits is a Listing class that provides various subreddit lists
try:
subreddits = self.reddit.subreddits
except Exception as e:
print(e)
sys.exit(1)
self.subreddit_instance = self.reddit.subreddit(self.rtid_config.subreddit_name)
sub_exist = check_subreddit_exists(subreddits, self.rtid_config.subreddit_name)
if not sub_exist:
print(f"r/{self.rtid_config.subreddit_name} does not exist.")
sys.exit(1)
def run(self):
hot_submission_contents = self.content_manager.get_hot_submission_contents()
for content in hot_submission_contents:
content.print_content_info()
img_download_path = path.join(self.rtid_out_info.subreddit_download_path, content.content_full_name)
self.log.info(f"Downloading [{content.title}] to path [{img_download_path}] . Image url: {content.content_url}")
self.content_manager.download_img(img_download_path=img_download_path, img_url=content.content_url)
self.log.info("Finished.")
| StarcoderdataPython |
5066356 | import json
import math
import numpy
from colorful.fields import RGBColorField
from django.conf import settings
from django.contrib.gis.db import models
from django.contrib.gis.gdal import Envelope, OGRGeometry, SpatialReference
from django.contrib.postgres.fields import ArrayField
from django.db.models import Max, Min
from django.db.models.signals import m2m_changed, post_save, pre_save
from django.dispatch import receiver
from .const import WEB_MERCATOR_SRID
from .utils import hex_to_rgba
from .valuecount import ValueCountMixin
class LegendSemantics(models.Model):
"""
Labels for pixel types (urban, forrest, warm, cold, etc)
"""
name = models.CharField(max_length=50)
description = models.TextField(null=True, blank=True)
keyword = models.TextField(null=True, blank=True, max_length=100)
def __str__(self):
return self.name
class LegendEntry(models.Model):
"""
One row in a Legend.
"""
semantics = models.ForeignKey(LegendSemantics)
expression = models.CharField(max_length=500,
help_text='Use a number or a valid numpy logical expression where x is the'
'pixel value. For instance: "(-3.0 < x) & (x <= 1)" or "x <= 1".')
color = RGBColorField()
def __str__(self):
return '{}, {}, {}'.format(self.semantics.name,
self.expression,
self.color)
class Legend(models.Model):
"""
Legend object for Rasters.
"""
title = models.CharField(max_length=200)
description = models.TextField(null=True, blank=True)
entries = models.ManyToManyField(LegendEntry)
json = models.TextField(null=True, blank=True)
modified = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
def update_json(self):
data = []
for val in self.entries.all():
data.append({
'name': val.semantics.name,
'expression': val.expression,
'color': val.color
})
self.json = json.dumps(data)
@property
def colormap(self):
legend = json.loads(self.json)
cmap = {}
for leg in legend:
cmap[leg['expression']] = hex_to_rgba(leg['color'])
return cmap
def save(self, *args, **kwargs):
if self.id:
self.update_json()
super(Legend, self).save(*args, **kwargs)
def legend_entries_changed(sender, instance, action, **kwargs):
"""
Updates style json upon adding or removing legend entries.
"""
if action in ('post_add', 'post_remove'):
instance.update_json()
instance.save()
m2m_changed.connect(legend_entries_changed, sender=Legend.entries.through)
@receiver(post_save, sender=LegendEntry)
def update_dependent_legends_on_entry_change(sender, instance, **kwargs):
"""
Updates dependent Legends on a change in Legend entries.
"""
for legend in Legend.objects.filter(entries__id=instance.id):
legend.update_json()
legend.save()
@receiver(post_save, sender=LegendSemantics)
def update_dependent_legends_on_semantics_change(sender, instance, **kwargs):
"""
Updates dependent Legends on a change in Semantics.
"""
for entry in LegendEntry.objects.filter(semantics_id=instance.id):
for legend in Legend.objects.filter(entries__id=entry.id):
legend.update_json()
legend.save()
class RasterLayer(models.Model, ValueCountMixin):
"""
Source data model for raster layers
"""
CONTINUOUS = 'co'
CATEGORICAL = 'ca'
MASK = 'ma'
RANK_ORDERED = 'ro'
DATATYPES = (
(CONTINUOUS, 'Continuous'),
(CATEGORICAL, 'Categorical'),
(MASK, 'Mask'),
(RANK_ORDERED, 'Rank Ordered')
)
name = models.CharField(max_length=100, blank=True, null=True)
description = models.TextField(blank=True, null=True)
datatype = models.CharField(max_length=2, choices=DATATYPES, default='co')
rasterfile = models.FileField(upload_to='rasters', null=True, blank=True)
nodata = models.CharField(max_length=100, null=True, blank=True,
help_text='Leave blank to keep the internal band nodata values. If a nodata'
'value is specified here, it will be used for all bands of this raster.')
legend = models.ForeignKey(Legend, blank=True, null=True)
modified = models.DateTimeField(auto_now=True)
def __str__(self):
return '{} {} (type: {})'.format(self.id, self.name, self.datatype)
@property
def discrete(self):
"""
Returns true for discrete rasters.
"""
return self.datatype in (self.CATEGORICAL, self.MASK, self.RANK_ORDERED)
_bbox = None
def extent(self, srid=WEB_MERCATOR_SRID):
"""
Returns bbox for layer.
"""
if not self._bbox:
# Get bbox for raster in original coordinates
meta = self.metadata
xmin = meta.uperleftx
ymax = meta.uperlefty
xmax = xmin + meta.width * meta.scalex
ymin = ymax + meta.height * meta.scaley
# Create Polygon box
geom = OGRGeometry(Envelope((xmin, ymin, xmax, ymax)).wkt)
# Set original srs
if meta.srs_wkt:
geom.srs = SpatialReference(meta.srs_wkt)
else:
geom.srid = meta.srid
# Transform to requested srid
geom.transform(srid)
# Calculate value range for bbox
coords = geom.coords[0]
xvals = [x[0] for x in coords]
yvals = [x[1] for x in coords]
# Set bbox
self._bbox = (min(xvals), min(yvals), max(xvals), max(yvals))
return self._bbox
def index_range(self, zoom):
"""
Returns the index range for
"""
return self.rastertile_set.filter(tilez=zoom).aggregate(
Min('tilex'), Max('tilex'), Min('tiley'), Max('tiley')
)
@receiver(pre_save, sender=RasterLayer)
def reset_parse_log_if_data_changed(sender, instance, **kwargs):
try:
obj = RasterLayer.objects.get(pk=instance.pk)
except RasterLayer.DoesNotExist:
pass
else:
if obj.rasterfile.name != instance.rasterfile.name:
instance.parsestatus.log = ''
@receiver(post_save, sender=RasterLayer)
def parse_raster_layer_if_log_is_empty(sender, instance, created, **kwargs):
if created:
RasterLayerParseStatus.objects.create(rasterlayer=instance)
RasterLayerMetadata.objects.create(rasterlayer=instance)
if instance.rasterfile.name and instance.parsestatus.log == '':
if hasattr(settings, 'RASTER_USE_CELERY') and settings.RASTER_USE_CELERY:
from raster.tasks import parse_raster_layer_with_celery
parse_raster_layer_with_celery.delay(instance)
else:
from raster.parser import RasterLayerParser
parser = RasterLayerParser(instance)
parser.parse_raster_layer()
class RasterLayerMetadata(models.Model):
"""
Stores meta data for a raster layer
"""
rasterlayer = models.OneToOneField(RasterLayer, related_name='metadata')
uperleftx = models.FloatField(null=True, blank=True)
uperlefty = models.FloatField(null=True, blank=True)
width = models.IntegerField(null=True, blank=True)
height = models.IntegerField(null=True, blank=True)
scalex = models.FloatField(null=True, blank=True)
scaley = models.FloatField(null=True, blank=True)
skewx = models.FloatField(null=True, blank=True)
skewy = models.FloatField(null=True, blank=True)
numbands = models.IntegerField(null=True, blank=True)
srs_wkt = models.TextField(null=True, blank=True)
srid = models.PositiveSmallIntegerField(null=True, blank=True)
max_zoom = models.PositiveSmallIntegerField(null=True, blank=True)
def __str__(self):
return self.rasterlayer.name
class RasterLayerParseStatus(models.Model):
"""
Tracks the parsing status of the raster layer.
"""
UNPARSED = 0
DOWNLOADING_FILE = 1
REPROJECTING_RASTER = 2
CREATING_TILES = 3
DROPPING_EMPTY_TILES = 4
FINISHED = 5
FAILED = 6
STATUS_CHOICES = (
(UNPARSED, 'Layer not yet parsed'),
(DOWNLOADING_FILE, 'Downloading file'),
(REPROJECTING_RASTER, 'Reprojecting'),
(CREATING_TILES, 'Creating tiles'),
(DROPPING_EMPTY_TILES, 'Dropping empty tiles'),
(FINISHED, 'Finished parsing'),
(FAILED, 'Failed parsing'),
)
rasterlayer = models.OneToOneField(RasterLayer, related_name='parsestatus')
status = models.IntegerField(choices=STATUS_CHOICES, default=UNPARSED)
tile_level = models.IntegerField(null=True, blank=True)
log = models.TextField(default='', editable=False)
def __str__(self):
return '{0} - {1}'.format(self.rasterlayer.name, self.get_status_display())
class RasterLayerBandMetadata(models.Model):
HISTOGRAM_BINS = 100
rasterlayer = models.ForeignKey(RasterLayer)
band = models.PositiveIntegerField()
nodata_value = models.FloatField(null=True)
max = models.FloatField()
min = models.FloatField()
hist_values = ArrayField(models.FloatField(), size=HISTOGRAM_BINS)
hist_bins = ArrayField(models.FloatField(), size=HISTOGRAM_BINS + 1)
def __str__(self):
return '{} - Min {} - Max {}'.format(self.rasterlayer.name, self.min, self.max)
def save(self, *args, **kwargs):
if not self.pk:
# Construct empty histogram
hist = numpy.histogram(
[],
range=(math.floor(self.min), math.ceil(self.max)),
bins=self.HISTOGRAM_BINS
)
# Set empty histogram values
self.hist_values = hist[0].tolist()
self.hist_bins = hist[1].tolist()
super(RasterLayerBandMetadata, self).save(*args, **kwargs)
class RasterTile(models.Model):
"""
Store individual tiles of a raster data source layer.
"""
ZOOMLEVELS = (
(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7),
(8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13),
(14, 14), (15, 15), (16, 16), (17, 17), (18, 18)
)
rid = models.AutoField(primary_key=True)
rast = models.RasterField(null=True, blank=True, srid=WEB_MERCATOR_SRID)
rasterlayer = models.ForeignKey(RasterLayer, null=True, blank=True, db_index=True)
tilex = models.IntegerField(db_index=True, null=True)
tiley = models.IntegerField(db_index=True, null=True)
tilez = models.IntegerField(db_index=True, null=True, choices=ZOOMLEVELS)
def __str__(self):
return '{0} {1}'.format(self.rid, self.filename)
| StarcoderdataPython |
3485095 | <gh_stars>1000+
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import parlai.utils.testing as testing_utils
class TestUnigram(unittest.TestCase):
def test_unigram(self):
valid, test = testing_utils.train_model(
{'model': 'unigram', 'task': 'integration_tests', 'num_epochs': 0.01}
)
assert valid['f1'] > 0
class TestUnigramTorchAgent(unittest.TestCase):
def test_unigram(self):
valid, test = testing_utils.train_model(
{
'model': 'test_agents/unigram',
'task': 'integration_tests',
'num_epochs': 1.0,
'batchsize': 32,
'truncate': 4,
}
)
assert valid['f1'] > 0
| StarcoderdataPython |
75052 | from .browser import *
from .graphql import *
| StarcoderdataPython |
3238962 | # Defination of Prime
# If n is prime, 1 | n and n | n
def isPrime(n):
for k in range(2, n):
# print('k =', k)
if n % k == 0:
# print(k, '|', n)
# print(n, 'is not a prime')
return False
# print(n, 'is a prime')
return True
counter = 0
i = 2
while counter < 1000000:
if isPrime(i):
counter += 1
print(i, 'is prime at', counter)
i += 1
| StarcoderdataPython |
5067248 | #!/usr/bin/env python
# Download and log the MTA's status updates. We only log changes.
from __future__ import print_function
import argparse
import doctest
import json
import os
import random
import string
import re
import sys
from datetime import datetime, timedelta
from filewrapper import FileWrapper
from parser import ParseMTA
from sqliter import Storage
import dicts
class Line(object):
""" A class for managing data specific to a particular line of transit service.
We log delays and planned work per-line. This class helps with that.
"""
def __init__(self, line):
"""
>>> l = Line('A')
>>> print(l.line)
A
"""
self.lines = {}
self.datetimes = []
self.intervals = []
self.line = line
self.last_alert = ''
self.cause = []
# *** HC
self.transit_type = 'subway'
@staticmethod
def parse_dt(dt):
""" Take a datetime such as 06/01/2017 10:31PM and turn it into
a datetime object.
>>> l = Line('L')
>>> dt = '06/01/2017 10:31PM'
>>> print(l.parse_dt(dt))
2017-06-01 22:31:00
"""
return datetime.strptime(dt, '%m/%d/%Y %I:%M%p')
def build_intervals(self):
""" Populate the self.intervals list with the time between each service alert.
>>>
"""
pass
class Logger:
""" We're logging how long it has been since each line's previous
service alert, and to do that we need to keep track of which lines
have active service alerts and the timestamp on that alert.
"""
def __init__(self, *args, **kwargs):
"""
>>> args = build_parser([])
>>> log = Logger(args)
"""
# Get the results from the last time we ran this.
try:
fh = open('_output/active.json', 'rb')
self.previous = json.load(fh)
fh.close()
except:
self.previous = None
self.args = []
if len(args) > 0:
self.args = args[0]
self.db = Storage('mta')
self.mta = ParseMTA(args[0])
self.double_check = { 'in_text': 0, 'objects': 0 }
self.new = { 'subway': {
'starts': dict(zip(dicts.lines['subway'], ([] for i in range(len(dicts.lines['subway']))))),
'stops': dict(zip(dicts.lines['subway'], ([] for i in range(len(dicts.lines['subway'])))))
}
}
self.transit_type = 'subway'
if hasattr(self.args, 'transit_type') and self.args.transit_type:
self.transit_type = self.args.transit_type
def initialize_table(self, table, dbname='mta'):
""" Resets database table.
>>> args = build_parser([])
>>> log = Logger(args)
>>> log.initialize_db('test')
True
>>> log.initialize_table('current')
True
"""
self.db = Storage(dbname)
self.db.setup(table)
return True
def initialize_db(self, dbname='mta'):
""" Resets database. Also sets the self.db value to the name of the db.
>>> args = build_parser([])
>>> log = Logger(args)
>>> log.initialize_db('test')
True
"""
os.remove('%s.db' % dbname)
self.db = Storage(dbname)
self.db.setup()
return True
def get_files(self, files_from_args):
"""
>>> args = build_parser([])
>>> log = Logger(args)
>>> log.get_files(['test.xml'])
['test.xml']
"""
if files_from_args == []:
# If we didn't pass any arguments to logger, we download the current XML
rando = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
url = 'http://web.mta.info/status/serviceStatus.txt?%s' % rando
fh = FileWrapper('_input/mta.xml')
fh.open()
try:
fh.write(fh.request(url))
except:
fh.write(fh.request(url))
fh.close()
files = ['mta.xml']
else:
files = files_from_args
if '*' in files[0]:
# Wildcard matching on filenames so we can process entire directories
# put example of that here.
pass
if files[0][-1] == '/':
# If the arg ends with a forward slash that means it's a dir
files = os.listdir(files[0])
return files
def parse_file(self, fn, *args):
""" Pull out the data we need from the MTA's XML.
You'd think XML would be well structured. You'd be about half right.
>>> args = build_parser([])
>>> log = Logger(args)
>>> log.get_files(['test.xml'])
['test.xml']
"""
transit_type = 'subway'
if hasattr(self.args, 'transit_type') and self.args.transit_type:
transit_type = self.args.transit_type
# TODO: Make this flexible to handle the other modes of transit
self.stop_check = { 'subway': [] }
items, lines = [], {}
entries = self.mta.parse_file(fn, transit_type)
for l in entries:
item = {
'status': l.find('status').text,
'status_detail': {},
'lines': l.find('name').text, # This is generic, the actual lines affected may be some of these, or others.
'datetime': '%s %s' % (l.find('Date').text, l.find('Time').text),
'text': l.find('text').text
}
# DOUBLE-CHECK
if item['text']:
self.double_check['in_text'] += len(re.findall('TitleDelay', item['text']))
if item['status']:
# Pull out the actual lines affected if we can
item['status_detail'] = self.mta.extract(item)
items.append(item)
if hasattr(self.args, 'verbose') and self.args.verbose:
if item['status_detail'] and item['status_detail']['TitleDelay'] != {}:
print('NOTICE: %(status)s: %(lines)s (%(datetime)s)' % item)
# Assemble this file's delays into its individual lines
for item in items:
if item['status_detail']:
for dict_ in item['status_detail']['TitleDelay']:
# dict_ looks like {u'1': u'Due to signal pr...'}
line = dict_
cause = item['status_detail']['TitleDelay'][dict_]
if line not in lines:
lines[line] = Line(line)
lines[line].cause = cause
# There will only be one datetime. It's pulled from the XML,
# even though there are datetime(s) in the XML's text markup blob.
dt = lines[line].parse_dt(item['datetime'])
if dt not in lines[line].datetimes:
self.double_check['objects'] += len(cause)
lines[line].datetimes.append(dt)
return lines
def commit_starts(self, lines):
""" If there are alerts in the XML that we don't have in the database,
add the alert to the database.
>>> args = build_parser([])
>>> log = Logger(args)
>>> log.initialize_db('test')
True
>>> files = log.get_files(['test.xml'])
>>> for fn in files:
... lines = log.parse_file(fn)
>>> log.commit_starts(lines)
0
"""
count = 0
# self.previous is a dict taken from the json written by the last time we ran this script.
# The only records in it will be lines with active alerts.
# We wantto add an alert if there's an alert we have that isn't in the previous list.
#
# We build a list of existing causes we can compare the causes we've found against to make sure we're only adding new causes
existing_causes = []
if self.previous:
for prev in self.previous:
if prev['cause'] not in existing_causes:
existing_causes.append(prev['cause'])
# Loop through the lines that have alerts
for line, item in lines.iteritems():
if line not in dicts.lines['subway']:
continue
if self.args.verbose:
print("NOTICE: Checking line", line)
#print(dir(item), item.last_alert, item.cause
for cause in item.cause:
# Log the cause -- we use this list of causes when comparing the previous
# version of data json against this version to see if any lines have stopped alerts.
if cause not in self.stop_check['subway']:
self.stop_check['subway'].extend(item.cause)
# Make sure this is a new record
# We only want to update the database with alerts we don't already have in there.
if cause in existing_causes:
continue
# ARCHIVE TABLE UPDATE
self.commit_archive_start(line, lines[line], cause)
self.new[item.transit_type]['starts'][line].extend(cause)
if self.args.verbose:
print("NOTICE: THIS LINE HAS A NEW ALERT", line)
# CURRENT TABLE and ACTIVE TABLE UPDATE
# ***HC
params = {'cause': cause, 'line': line, 'start': item.datetimes[0], 'transit_type': 'subway'}
self.db.q.update_active(**params)
self.db.q.update_current(**params)
# DOUBLE-CHECK
count += 1
return count
def commit_stops(self):
""" Check the previous file to see if there are active alerts with lines
matching a line in our stop_check file. If there are, we need to update
the stop value of that line's record in the database, because that means
an alert has ended.
>>> args = build_parser([])
>>> log = Logger(args)
>>> log.initialize_db('test')
True
>>> files = log.get_files(['test.xml'])
>>> for fn in files:
... lines = log.parse_file(fn)
>>> log.commit_starts(lines)
0
>>> log.commit_stops()
0
"""
count = 0
if self.previous:
# self.previous is a dict taken from the json written by the last time we ran this script.
for prev in self.previous:
# We only want to check for the stoppage of current alerts.
# Any line with a current alert *will be* in the stop_check list of alert causes.
# The stop_check list exists for this purpose: To check if an alert for a line has stopped.
# ***HC
if prev['cause'] not in self.stop_check['subway']:
if self.args.verbose:
print("NOTICE: THIS LINE'S ALERT HAS STOPPED", prev['line'])
# ARCHIVE TABLE UPDATE
prev['length'] = (datetime.now() - self.db.q.convert_to_datetime(prev['start'])).seconds
self.commit_archive_stop(prev['line'], prev)
self.new['subway']['stops'][prev['line']].append(prev['cause'])
# CURRENT TABLE and ACTIVE TABLE UPDATE
# ***HC
params = {'line': prev['line'], 'cause': prev['cause'], 'stop': datetime.now(), 'transit_type': 'subway'}
self.db.q.update_active(**params)
self.db.q.update_current(**params)
count += 1
return count
def commit_archive_start(self, line, item, cause):
""" Insert a record into the archive table.
"""
params = {'cause': cause, 'line': line, 'start': item.datetimes[0], 'transit_type': 'subway'}
self.db.q.update_archive(**params)
return True
def commit_archive_stop(self, line, item):
""" Update the record for this alert in the archive table.
"""
params = {'cause': item['cause'], 'length': item['length'], 'line': line, 'stop': datetime.now(), 'transit_type': 'subway'}
self.db.q.update_archive(**params)
return True
def write_json(self, table, *args, **kwargs):
""" Write the contents of a table to a json file.
>>> args = build_parser([])
>>> log = Logger(args)
>>> log.initialize_db('test')
True
"""
fh = open('_output/%s.json' % table, 'wb')
if table == 'current':
fields = self.db.q.get_table_fields(table)
rows = self.db.q.select_current()
json.dump(self.db.q.make_dict(fields, rows), fh)
elif table == 'active':
fields = self.db.q.get_table_fields(table)
rows = self.db.q.select_active()
json.dump(self.db.q.make_dict(fields, rows), fh)
elif table == 'archive':
fields = self.db.q.get_table_fields(table)
rows = self.db.q.select_archive(**kwargs)
json.dump(self.db.q.make_dict(fields, rows), fh)
elif table == 'archive_info':
pass
fh.close()
return True
def save_xml(self):
""" Save the XML for later.
"""
pass
def main(args):
""" There are two situations we run this from the command line:
1. When building archives from previous day's service alerts and
2. When keeping tabs on the current days's service alerts.
Most of what we do here for each is the same, but with #2 we only
process one file, and we have to look up stored information to ensure
the intervals values are current.
>>> args = build_parser([])
>>> main(args)
"""
log = Logger(args)
if args.initial:
log.initialize_db()
if args.reset_table:
tables = log.db.q.get_tables()
if args.verbose:
print("NOTICE: We are resetting the %s table (amongst %s)" % (args.reset_table, tables.__str__()))
#if args.reset_table in tables:
log.db.setup(args.reset_table)
files = log.get_files(args.files)
for fn in files:
lines = log.parse_file(fn)
commit_count = log.commit_starts(lines)
commit_count += log.commit_stops()
log.db.conn.commit()
log.write_json('current')
log.write_json('active')
params = { 'date': datetime.now().date().__str__() }
log.write_json('archive', **params)
if args.verbose:
print("NOTICE: ", log.double_check)
print("NOTICE: ", log.new['subway']['starts'].values())
print("NOTICE: ", log.new['subway']['stops'].values())
#new_len = sum(len(v) for v in log.new['subway']['starts'].itervalues()) + sum(len(v) for v in log.new['subway']['stops'].itervalues())
if commit_count > 0 and log.double_check['in_text'] != log.double_check['objects']:
log.save_xml()
log.db.conn.close()
def build_parser(args):
""" This method allows us to test the args.
>>> args = build_parser(['--verbose'])
>>> print(args.verbose))
True
"""
parser = argparse.ArgumentParser(usage='$ python logger.py',
description='Get the latest MTA alerts and add any new ones.',
epilog='Example use: python logger.py')
parser.add_argument("-i", "--initial", dest="initial", default=False, action="store_true")
parser.add_argument("-v", "--verbose", dest="verbose", default=False, action="store_true")
parser.add_argument("--test", dest="test", default=False, action="store_true")
parser.add_argument("-t", "--type", dest="transit_type", default=None)
parser.add_argument("files", nargs="*", help="Path to files to ingest manually")
parser.add_argument("--reset_table", dest="reset_table", default=False, help="Truncate and create a table in the database")
args = parser.parse_args(args)
return args
if __name__ == '__main__':
args = build_parser(sys.argv[1:])
if args.test:
doctest.testmod(verbose=args.verbose)
main(args)
| StarcoderdataPython |
11271634 | <reponame>fchapoton/sage
r"""
Subcrystals
These are the crystals that are subsets of a larger ambient crystal.
AUTHORS:
- <NAME> (2013-10-16): Initial implementation
"""
#*****************************************************************************
# Copyright (C) 2013 <NAME> <tscrim at ucdavis.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#****************************************************************************
from sage.misc.lazy_attribute import lazy_attribute
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.parent import Parent
from sage.structure.element_wrapper import ElementWrapper
from sage.categories.crystals import Crystals
from sage.categories.finite_crystals import FiniteCrystals
from sage.categories.supercrystals import SuperCrystals
from sage.combinat.root_system.cartan_type import CartanType
from sage.rings.integer import Integer
from sage.rings.infinity import infinity
from sage.structure.richcmp import richcmp
class Subcrystal(UniqueRepresentation, Parent):
r"""
A subcrystal `X` of an ambient crystal `Y` is a crystal formed by taking a
subset of `Y` and whose crystal structure is induced by `Y`.
INPUT:
- ``ambient`` -- the ambient crystal
- ``contained`` -- (optional) a set (or function) which specifies when an
element is contained in the subcrystal; the default is everything
possible is included
- ``generators`` -- (optional) the generators for the subcrystal; the
default is the generators for the ambient crystal
- ``virtualization``, ``scaling_factors`` -- (optional)
dictionaries whose key `i` corresponds to the sets `\sigma_i`
and `\gamma_i` respectively used to define virtual crystals; see
:class:`~sage.combinat.crystals.virtual_crystal.VirtualCrystal`
- ``cartan_type`` -- (optional) the Cartan type for the subcrystal; the
default is the Cartan type for the ambient crystal
- ``index_set`` -- (optional) the index set for the subcrystal; the
default is the index set for the Cartan type
- ``category`` -- (optional) the category for the subcrystal; the
default is the :class:`~sage.categories.crystals.Crystals` category
.. SEEALSO::
:meth:`~sage.categories.crystals.Crystals.ParentMethods.subcrystal`
EXAMPLES:
We build out a subcrystal starting from an element and only going
to the lowest weight::
sage: B = crystals.Tableaux(['A',3], shape=[2,1])
sage: S = B.subcrystal(generators=[B(3,1,2)], direction='lower')
sage: S.cardinality()
11
Here we build out in both directions starting from an element, but we
also have restricted ourselves to type `A_2`::
sage: T = B.subcrystal(index_set=[1,2], generators=[B(3,1,1)])
sage: T.cardinality()
8
sage: list(T)
[[[1, 1], [3]],
[[1, 2], [3]],
[[1, 1], [2]],
[[2, 2], [3]],
[[1, 2], [2]],
[[2, 3], [3]],
[[1, 3], [2]],
[[1, 3], [3]]]
Now we take the crystal corresponding to the intersection of
the previous two subcrystals::
sage: U = B.subcrystal(contained=lambda x: x in S and x in T, generators=B)
sage: list(U)
[[[2, 3], [3]], [[1, 2], [3]], [[2, 2], [3]]]
.. TODO::
Include support for subcrystals which only contains certain arrows.
TESTS:
Check that the subcrystal respects being in the category
of supercrystals (:trac:`27368`)::
sage: T = crystals.Tableaux(['A',[1,1]], [2,1])
sage: S = T.subcrystal(max_depth=3)
sage: S.category()
Category of finite super crystals
"""
@staticmethod
def __classcall_private__(cls, ambient, contained=None, generators=None,
virtualization=None, scaling_factors=None,
cartan_type=None, index_set=None, category=None):
"""
Normalize arguments to ensure a (relatively) unique representation.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S1 = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: S2 = B.subcrystal(generators=[B(2,1,1), B(5,2,4)], cartan_type=['A',4], index_set=(1,2))
sage: S1 is S2
True
"""
if isinstance(contained, (list, tuple, set, frozenset)):
contained = frozenset(contained)
#elif contained in Sets():
if cartan_type is None:
cartan_type = ambient.cartan_type()
else:
cartan_type = CartanType(cartan_type)
if index_set is None:
index_set = cartan_type.index_set
if generators is None:
generators = ambient.module_generators
category = Crystals().or_subcategory(category)
if ambient in SuperCrystals():
category = category & SuperCrystals()
if ambient in FiniteCrystals() or isinstance(contained, frozenset):
category = category.Finite()
if virtualization is not None:
if scaling_factors is None:
scaling_factors = {i:1 for i in index_set}
from sage.combinat.crystals.virtual_crystal import VirtualCrystal
return VirtualCrystal(ambient, virtualization, scaling_factors, contained,
generators, cartan_type, index_set, category)
if scaling_factors is not None:
# virtualization must be None
virtualization = {i:(i,) for i in index_set}
from sage.combinat.crystals.virtual_crystal import VirtualCrystal
return VirtualCrystal(ambient, virtualization, scaling_factors, contained,
generators, cartan_type, index_set, category)
# We need to give these as optional arguments so it unpickles correctly
return super(Subcrystal, cls).__classcall__(cls, ambient, contained,
tuple(generators),
cartan_type=cartan_type,
index_set=tuple(index_set),
category=category)
def __init__(self, ambient, contained, generators, cartan_type, index_set, category):
"""
Initialize ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: TestSuite(S).run()
"""
self._ambient = ambient
self._contained = contained
self._cardinality = None # ``None`` means currently unknown
self._cartan_type = cartan_type
self._index_set = tuple(index_set)
Parent.__init__(self, category=category)
self.module_generators = tuple(self.element_class(self, g) for g in generators
if self._containing(g))
if isinstance(contained, frozenset):
self._cardinality = Integer(len(contained))
self._list = [self.element_class(self, x) for x in contained]
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
Subcrystal of The crystal of tableaux of type ['A', 4] and shape(s) [[2, 1]]
"""
return "Subcrystal of {}".format(self._ambient)
@lazy_attribute
def _containing(self):
"""
Check if ``x`` is contained in ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: S._containing(B(5,2,4))
True
sage: S._containing(B(4,2,4))
True
"""
if self._contained is None:
return lambda x: True
if isinstance(self._contained, frozenset):
return self._contained.__contains__
return self._contained # Otherwise it should be a function
def __contains__(self, x):
"""
Check if ``x`` is in ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: B(5,2,4) in S
True
sage: mg = B.module_generators[0]
sage: mg in S
True
sage: mg.f(2).f(3) in S
False
"""
if isinstance(x, Subcrystal.Element) and x.parent() == self:
return True
if x in self._ambient:
if not self._containing(x):
return False
x = self.element_class(self, x)
if self in FiniteCrystals():
return x in self.list()
# TODO: make this work for infinite crystals
import warnings
warnings.warn("Testing containment in an infinite crystal"
" defaults to returning True")
return True
def cardinality(self):
"""
Return the cardinality of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=[B(2,1,1)], index_set=[1,2])
sage: S.cardinality()
8
sage: B = crystals.infinity.Tableaux(['A',2])
sage: S = B.subcrystal(max_depth=4)
sage: S.cardinality()
22
TESTS:
Check that :trac:`19481` is fixed::
sage: from sage.combinat.crystals.virtual_crystal import VirtualCrystal
sage: A = crystals.infinity.Tableaux(['A',3])
sage: V = VirtualCrystal(A, {1:(1,3), 2:(2,)}, {1:1, 2:2}, cartan_type=['C',2])
sage: V.cardinality()
Traceback (most recent call last):
...
NotImplementedError: unknown cardinality
"""
if self._cardinality is not None:
return self._cardinality
try:
card = Integer(len(self._list))
self._cardinality = card
return self._cardinality
except AttributeError:
if self in FiniteCrystals():
return Integer(len(self.list()))
try:
card = super(Subcrystal, self).cardinality()
except AttributeError:
raise NotImplementedError("unknown cardinality")
if card == infinity:
self._cardinality = card
return card
self._cardinality = Integer(len(self.list()))
return self._cardinality
def index_set(self):
"""
Return the index set of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: S.index_set()
(1, 2)
"""
return self._index_set
class Element(ElementWrapper):
"""
An element of a subcrystal. Wraps an element in the ambient crystal.
"""
def _richcmp_(self, other, op):
"""
EXAMPLES:
For == operator::
sage: A = crystals.KirillovReshetikhin(['C',2,1], 1,2).affinization()
sage: S = A.subcrystal(max_depth=2)
sage: sorted(S)
[[[1, 1]](-1),
[[1, 2]](-1),
[](0),
[[1, 1]](0),
[[1, 2]](0),
[[1, -2]](0),
[[2, 2]](0),
[](1),
[[2, -1]](1),
[[-2, -1]](1),
[[-1, -1]](1),
[[-1, -1]](2)]
For != operator::
sage: ([(i,j) for i in range(len(S)) for j in range(len(S)) if S[i]!=S[j]]
....: == [(i,j) for i in range(len(S)) for j in range(len(S)) if
....: S[i].value!=S[j].value])
True
For < operator::
sage: ([(i,j) for i in range(len(S)) for j in range(len(S)) if S[i]<S[j]]
....: == [(i,j) for i in range(len(S)) for j in range(len(S)) if
....: S[i].value<S[j].value])
True
For <= operator::
sage: ([(i,j) for i in range(len(S)) for j in range(len(S)) if S[i]<=S[j]]
....: == [(i,j) for i in range(len(S)) for j in range(len(S)) if
....: S[i].value<=S[j].value])
True
For > operator::
sage: ([(i,j) for i in range(len(S)) for j in range(len(S)) if S[i]>S[j]]
....: == [(i,j) for i in range(len(S)) for j in range(len(S)) if
....: S[i].value>S[j].value])
True
For >= operator::
sage: ([(i,j) for i in range(len(S)) for j in range(len(S)) if S[i]>=S[j]]
....: == [(i,j) for i in range(len(S)) for j in range(len(S)) if
....: S[i].value>=S[j].value])
True
"""
return richcmp(self.value, other.value, op)
def e(self, i):
"""
Return `e_i` of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: mg = S.module_generators[1]
sage: mg.e(2)
sage: mg.e(1)
[[1, 4], [5]]
"""
ret = self.value.e(i)
if ret is None or not self.parent()._containing(ret):
return None
return self.__class__(self.parent(), ret)
def f(self, i):
"""
Return `f_i` of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: mg = S.module_generators[1]
sage: mg.f(1)
sage: mg.f(2)
[[3, 4], [5]]
"""
ret = self.value.f(i)
if ret is None or not self.parent()._containing(ret):
return None
return self.__class__(self.parent(), ret)
def epsilon(self, i):
r"""
Return `\varepsilon_i` of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: mg = S.module_generators[1]
sage: mg.epsilon(1)
1
sage: mg.epsilon(2)
0
"""
return self.value.epsilon(i)
def phi(self, i):
r"""
Return `\varphi_i` of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: mg = S.module_generators[1]
sage: mg.phi(1)
0
sage: mg.phi(2)
1
"""
return self.value.phi(i)
def weight(self):
"""
Return the weight of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: mg = S.module_generators[1]
sage: mg.weight()
(0, 1, 0, 1, 1)
"""
return self.value.weight()
| StarcoderdataPython |
4891594 |
def partition(arr: [], low: int, high: int):
'''
Partition the selected part of the list in place.
That means to select one of the elements, put all lower to the left, and all higher to the right,
and the selected one (the pivot) in the middle. In this case we are selecting the rightmost item
as the pivot.
:param arr: list to be partitioned
:param low: low index of list slice (inclusive)
:param high: high index of list slice (inclusive)
:return: index of the pivot (the selected element)
'''
pivot = arr[high]
# so the index will have a valid value if we always increment before swapping
# this is one less than the midpoint between the lower partition and the higher partition
partition_index = low - 1
for j in range(low, high):
# if the value is greater than pivot, leave the partition index, and the value in place
if arr[j] < pivot:
# increment partition index, and move the lower number to the left of it.
partition_index += 1
temp = arr[partition_index]
arr[partition_index] = arr[j]
arr[j] = temp
# now move the pivot to the partition index
partition_index += 1
temp = arr[high]
arr[high] = arr[partition_index]
arr[partition_index] = temp
# return the updated pivot index
return partition_index
def quick_sort(arr: [], low: int, high: int):
'''
recursive quick sort.
This is a sort-in-place which means the indices will select which part of the list is being sorted
:param arr: list to be sorted
:param low: low index of arr (inclusive)
:param high: high index of arr (inclusive)
:return:
'''
# termination step
if low >= high:
return
# action step
pivot_index = partition(arr, low, high)
quick_sort(arr, low, pivot_index-1)
quick_sort(arr, pivot_index+1, high)
# let's try it out!
arr = [9, 1, 4, 5, 2, 8, 3, 6, 7]
print("Before quicksort {}".format(arr))
quick_sort(arr, 0, len(arr) - 1)
print("After quicksort {}".format(arr))
| StarcoderdataPython |
1636121 | <filename>ros/src/tl_detector/light_classification/tl_classifier.py
from styx_msgs.msg import TrafficLight
import tensorflow as tf
import numpy as np
class TLClassifier(object):
def __init__(self, is_sim):
self.label_map = dict()
self.label_text = dict()
if is_sim:
PATH_TO_GRAPH = r'light_classification/model/simulator/frozen_inference_graph.pb'
self.label_map = {1: TrafficLight.YELLOW, 2: TrafficLight.RED, 3: TrafficLight.GREEN}
self.label_text = {1: "Yellow", 2: "Red", 3: "Green"}
else:
PATH_TO_GRAPH = r'light_classification/model/site/frozen_inference_graph.pb'
red_classes = [4, 5, 7, 9, 13]
green_classes = [2, 3, 6, 8, 11, 12]
yellow_classes = [10]
unknown_classes = [1]
for i in red_classes:
self.label_map[i] = TrafficLight.RED
self.label_text[i] = "Red"
for i in green_classes:
self.label_map[i] = TrafficLight.GREEN
self.label_text[i] = "Green"
for i in yellow_classes:
self.label_map[i] = TrafficLight.YELLOW
self.label_text[i] = "Yellow"
for i in unknown_classes:
self.label_map[i] = TrafficLight.UNKNOWN
self.label_text[i] = "Unknown"
self.graph = tf.Graph()
self.threshold = .5
with self.graph.as_default():
graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_GRAPH, 'rb') as fid:
graph_def.ParseFromString(fid.read())
tf.import_graph_def(graph_def, name='')
self.image_tensor = self.graph.get_tensor_by_name('image_tensor:0')
self.boxes = self.graph.get_tensor_by_name('detection_boxes:0')
self.scores = self.graph.get_tensor_by_name('detection_scores:0')
self.classes = self.graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.graph.get_tensor_by_name('num_detections:0')
self.sess = tf.Session(graph=self.graph)
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
with self.graph.as_default():
img_expand = np.expand_dims(image, axis=0)
(boxes, scores, classes, num_detections) = self.sess.run(
[self.boxes, self.scores, self.classes, self.num_detections],
feed_dict={self.image_tensor: img_expand})
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes).astype(np.int32)
print('SCORES: ', scores[0])
print('CLASSES: ', classes[0])
if scores[0] > self.threshold:
print(self.label_text[classes[0]])
return self.label_map[classes[0]]
return TrafficLight.UNKNOWN | StarcoderdataPython |
5110871 | # -*- coding: utf-8 -*-
import gensim
import os
import sys
import io
class Word2VecModule():
def __init__(self):
pass
def run(self, line):
model = gensim.models.Word2Vec.load('ko.bin')
module = os.path.basename(sys.argv[0])
listB = line.split(",")
resList = []
for x in listB[:-1]:
print (x)
y = x.rstrip()
try:
# resDict[oneWord] = model.wv.most_similar(positive=[oneWord])
t = model.wv.most_similar(y)
except KeyError as e:
print('%s is not included Dictionary' % y)
t = '%s is not included Dictionary' % y
except Exception as ex:
# print(ex)
t = ex
resList.append(t)
print(resList)
inputString = sys.argv[1]
filename = '../workd2vecFile/res_%s' % inputString
fout = open(filename, 'w')
for t in enumerate(resList):
a = 'index : {} value: {}'.format(*t)
print(a)
fout.write(a)
def main():
f = open(sys.argv[1], "r")
line = f.readline()
wv = Word2VecModule()
wv.run(line)
if __name__ == '__main__':
try:
main()
except ValueError:
print(ValueError)
| StarcoderdataPython |
5195501 | import unittest
import sshim
from . import connect
class TestFailure(unittest.TestCase):
def test_unexpected(self):
def echo(script):
script.expect('moose')
script.writeline('return')
with sshim.Server(echo, address='127.0.0.1', port=0) as server:
with connect(server) as fileobj:
fileobj.write('goose\n')
fileobj.flush()
server.exceptions.get()
def test_eof(self):
def echo(script):
script.expect('goose')
self.assertRaises(EOFError, script.expect, '')
with sshim.Server(echo, address='127.0.0.1', port=0) as server:
with connect(server) as fileobj:
fileobj.write('goose\n')
fileobj.flush()
fileobj.close()
def test_remainder(self):
def echo(script):
script.expect('moose')
self.assertRaises(AssertionError, script.expect, '')
with sshim.Server(echo, address='127.0.0.1', port=0) as server:
with connect(server) as fileobj:
fileobj.write('moose\n')
fileobj.write('goose\n')
fileobj.flush()
fileobj.close()
| StarcoderdataPython |
5106996 | #!/usr/bin/env python
############################################################################
# Copyright (c) 2015-2019 Saint Petersburg State University
# Copyright (c) 2011-2014 Saint Petersburg Academic University
# All Rights Reserved
# See file LICENSE for details.
############################################################################
import os
import sys
from os.path import abspath, dirname, realpath, join, isfile
source_dirs = ["", "truspades", "common", "executors", "scripts"]
# developers configuration
spades_home = abspath(dirname(realpath(__file__)))
bin_home = join(spades_home, "bin")
python_modules_home = join(spades_home, "src")
ext_python_modules_home = join(spades_home, "ext", "src", "python_libs")
spades_version = ""
def init():
global spades_home
global bin_home
global python_modules_home
global spades_version
global ext_python_modules_home
# users configuration (spades_init.py and spades binary are in the same directory)
if isfile(os.path.join(spades_home, "spades-core")):
install_prefix = dirname(spades_home)
bin_home = join(install_prefix, "bin")
spades_home = join(install_prefix, "share", "spades")
python_modules_home = spades_home
ext_python_modules_home = spades_home
for dir in source_dirs:
sys.path.append(join(python_modules_home, "spades_pipeline", dir))
spades_version = open(join(spades_home, "VERSION"), 'r').readline().strip()
if __name__ == "__main__":
spades_py_path = join(dirname(realpath(__file__)), "spades.py")
sys.stderr.write("Please use " + spades_py_path + " for running SPAdes genome assembler\n")
| StarcoderdataPython |
37994 | <gh_stars>1-10
from collections import Counter
from konlpy.tag import Hannanum
import pytagcloud
f = open('D:\\KYH\\02.PYTHON\\crawled_data\\cbs2.txt', 'r', encoding='UTF-8')
data = f.read()
nlp = Hannanum()
nouns = nlp.nouns(data)
count = Counter(nouns)
tags2 = count.most_common(200)
taglist = pytagcloud.make_tags(tags2, maxsize=80)
pytagcloud.create_tag_image(taglist, 'D:\\KYH\\02.PYTHON\\crawled_data\\wordcloud.jpg', size=(400, 300), fontname='korean', rectangular=False)
f.close() | StarcoderdataPython |
8155630 | import json
import sys
from interface_lichess import lichess
from train import Training
def start_engine():
config = load_configuration()
connection = lichess(config['token'], config['train']['training_file_name'], config['train']['training_file_path'])
if config['mode_game'].lower() == 'ai':
game_ai(config['game']['num_games'], connection)
elif config['mode_game'].lower() == 'train':
train_ai(config['train']['training_file_name'],
config['train']['training_file_path'],
config['train']['training_num_lines'],
config['train']['num_trainings'],
config['train']['training_iterations'],
config['weights']['weights_file_name'],
config['weights']['weights_file_path'],
connection)
elif config['mode_game'].lower() == 'user':
game_user(config['game']['num_games'], connection)
else:
print('Bad mode_game: Options: AI, User, Train')
print(' - Edit config.json file and try again')
def load_configuration():
json_file = open('config.json',)
config = json.load(json_file)
return config
def train_ai(training_file_name: str, training_file_path: str, training_num_lines: int, num_trainings: int, training_iterations: int, weights_file_name: str, weights_file_path: str, connection):
for game_id in range(num_trainings):
print_start_game(game_id)
# 1 - Play Game (and generate Training sample Fens)
connection.start_challenge_ai()
connection.accept_challenge()
# 2 - Init Training
iteration=0
while iteration < training_iterations :
train = Training(training_num_lines, training_file_name, weights_file_name, training_file_path, weights_file_path)
train.run()
iteration+=1
def game_ai(num_games, connection):
for game_id in range(num_games):
print_start_game(game_id)
connection.start_challenge_ai()
connection.accept_challenge()
def game_user(num_games, connection):
for game_id in range(num_games):
print_start_game(game_id)
connection.accept_challenge()
def print_start_game(game_id):
print("\n######################")
print(f" Start Game - {game_id + 1}")
print("######################\n")
if __name__ == '__main__':
start_engine()
| StarcoderdataPython |
4985782 | """compressor.lib
High-level functions exposed as a library, that can be imported.
"""
from compressor.char_node import CharNode # pylint: disable=unused-import
from compressor.core import (create_tree_code, parse_tree_code,
process_frequencies)
from compressor.core import retrieve_compressed_file as extract_file # pylint: disable=unused-import
from compressor.core import save_compressed_file
from compressor.util import open_text_file
def compress_file(filename: str, dest_file: str = "") -> None:
"""
Open the <filename> and compress its contents on a new one.
:param filename: The path to the source file to compress.
:param dest_file: The name of the target file. If not provided (None),
a default will be used with `<filename>.comp`
"""
with open_text_file(filename, "r") as source:
freqs = process_frequencies(source.read())
checksum = sum(c.freq for c in freqs) # bytes
tree_code = create_tree_code(freqs)
table = parse_tree_code(tree_code)
save_compressed_file(filename, table, checksum, dest_file)
| StarcoderdataPython |
4983215 | <filename>utils.py<gh_stars>0
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import random
import numpy as np
def move_to_device(maybe_tensor, device):
"""
Args:
maybe_tensor:
device: torch.device('cuda'/'cpu', OPTIONAL[index])
Returns:处理完后的数据
"""
if torch.is_tensor(maybe_tensor):
return maybe_tensor.to(device)
elif isinstance(maybe_tensor, np.ndarray):
# from_numpy是浅拷贝方法,所以这个返回的tensor是不能改变大小的,对其的操作也会反映到对应的ndarray上,反之亦然。
# contiguous是申请把tensor内的元素排布变得在内存上连续,比如对一个3X4的tensor做转置成4X3的tensor,
# pytorch并不会修改这个tensor在内存上的存储方式,而是只是会修改一个元信息(用于从维度索引变成能在一维的内存上访存的索引)
# 这在flatten等操作上需要保证tensor内元素不仅语义连续,内存分布也要连续,见https://zhuanlan.zhihu.com/p/64551412
return torch.from_numpy(maybe_tensor).to(device).contiguous()
elif isinstance(maybe_tensor, dict):
# 如果传进来的是dict,还是返回dict,但是要把key-value对的value的device用to方法处理一下
return {
key: move_to_device(value, device)
for key, value in maybe_tensor.items()
}
elif isinstance(maybe_tensor, list):
# 还是返回list,但是对其中的[x1,x2,..] 的x1 做变换
return [move_to_device(x, device) for x in maybe_tensor]
elif isinstance(maybe_tensor, tuple):
# 不改变结构,只是简单地对tuple中的元素做变换
return tuple([move_to_device(x, device) for x in maybe_tensor])
return maybe_tensor
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def average_gradients(model):
size = float(dist.get_world_size())
for param in model.parameters():
if param.grad is not None:
dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
param.grad.data /= size
class Statistics:
'''
运行状态参数的字典类,默认情况下只会保存steps
'''
def __init__(self, key_value_dict=None, **kwargs):
self.statistics = {'steps': 0}
if key_value_dict is not None:
for x in key_value_dict:
self.statistics[x] = key_value_dict[x]
for x in kwargs:
self.statistics[x] = kwargs[x]
def update(self, key_or_dict, value=None):
if value is None:
assert isinstance(key_or_dict, dict)
for key in key_or_dict:
if key not in self.statistics:
self.statistics[key] = 0.
self.statistics[key] += key_or_dict[key]
else:
assert isinstance(key_or_dict, str)
if key_or_dict not in self.statistics:
self.statistics[key_or_dict] = 0.
self.statistics[key_or_dict] += value
def __getitem__(self, attr):
return self.statistics[attr]
def step(self):
'''
Returns:字典里的step+1
'''
self.statistics['steps'] += 1
def data_proc(data, queue):
'''
Args:
data: dataLoader 也就是一个[batch_size,seq_length] 这样的类
queue: 消息共享队列
Returns:
'''
#这个x就是一批批的数据,按不同的batch分
# 最后queue里的数据是这样的(第几批次的数据,该批次的第几句话,该批次的第几个词)
for x in data:
queue.put(x)
queue.put('EPOCHDONE')
def asynchronous_load(data_loader):
'''
Args:
data_loader: 数据包装类
Returns: 一个batch大小的数据
'''
queue = mp.Queue(10)
# 创建对象,这个做完之后会得到一个一维的队列,里面的每个元素是一个二维数组
# 这个二维数组的内容就是[[第一句话的第一个词,第二句话的第一个词...],[]]
# 也就是(句子长度,batch——size)
data_generator = mp.Process(target=data_proc, args=(data_loader, queue))
# 生成进程
data_generator.start()
done = False
while not done:
batch = queue.get()
# 这里为什么这么判断呢,因为在data_proc最后面加了一个'EPOCHDONE',这是个str
# 换句话说,这个data里本身没有字符串,因为已经全部在前面的ListToTensor变成词表独热码了
if isinstance(batch, str):
done = True
else:
#所以每次返回的都是一batch的数据,这是个二维的东西
yield batch
data_generator.join()
| StarcoderdataPython |
1987204 | <reponame>openredact/pii-identifier
from .base import Backend
from nerwhal.types import NamedEntity
from nerwhal.nlp_utils import load_stanza_nlp
# the stanza NER models have an F1 score between 74.3 and 94.8, https://stanfordnlp.github.io/stanza/performance.html
# we choose a hardcoded score in this scale
NER_SCORE = 0.8
class StanzaNerBackend(Backend):
"""This backend recognizes entities using Stanza's neural network models.
See https://stanfordnlp.github.io/stanza/ for more information on Stanza.
Work in process:
- this backend does not yet support different recognizers (i.e. custom models)
- instead it uses the default Stanza model for the provided language
"""
def __init__(self, language):
self.stanza_nlp = load_stanza_nlp(language, processors="tokenize,mwt,ner")
def register_recognizer(self, recognizer_cls):
raise NotImplementedError()
def run(self, text):
doc = self.stanza_nlp(text)
return [
NamedEntity(ent.start_char, ent.end_char, ent.type, ent.text, NER_SCORE, self.__class__.__name__)
for ent in doc.entities
]
| StarcoderdataPython |
5140828 | <reponame>ReinholdM/play_football_with_human<gh_stars>1-10
# -*- encoding: utf-8 -*-
# -----
# Created Date: 2021/1/21
# Author: <NAME>
# -----
# Last Modified:
# Modified By:
# -----
# Copyright (c) 2020 MARL @ SJTU
# -----
import os
import time
import grpc
import multiprocessing
import traceback
import numpy as np
from signal import signal, SIGTERM
from concurrent import futures
from threading import Lock
from readerwriterlock import rwlock
from typing import Any, Dict
import tensorboardX
from malib.rpc.chunk import deserialize, recv_chunks, deserialize_image
from malib.rpc.proto import exprmanager_pb2, exprmanager_pb2_grpc
from malib.utils.convert import (
utc_to_str,
dump_dict,
grpc_struct_to_dict,
tensor_to_dict,
)
class _ConcurrentTable:
def __init__(self):
self.lock = rwlock.RWLockFair()
# {name, idx}, {idx, writer}
self.table = [{}, {}]
def close(self):
for idx, (lock, writer) in self.table[1].items():
with lock:
writer.close()
def put(self, name):
idx = -1
with self.lock.gen_rlock():
if name in self.table[0]:
idx = self.table[0][name]
if idx == -1:
with self.lock.gen_wlock():
idx = len(self.table[0])
self.table[0][name] = idx
writer = tensorboardX.SummaryWriter(name)
self.table[1][idx] = (Lock(), writer)
return idx
def get(self, index):
with self.lock.gen_rlock():
wlock, writer = self.table[1][index]
return wlock, writer
class ExperimentManagerRPCServicer(exprmanager_pb2_grpc.ExperimentManagerRPCServicer):
def __init__(
self,
global_writer_table,
logdir="./",
flush_freq: int = -1,
debug=False,
verbose=False,
):
super().__init__()
self.root_dir = logdir
self.table = global_writer_table
self.debug = debug
self.verbose = verbose
def CreateTable(self, table_name, context):
if self.debug:
print(
"Get CreateTable Request:\n", dump_dict(grpc_struct_to_dict(table_name))
)
rec_path = os.path.join(self.root_dir, table_name.primary, table_name.secondary)
try:
os.makedirs(rec_path)
except Exception as e:
if self.verbose:
print("Error detected in making directory ", e)
idx = -1
try:
idx = self.table.put(rec_path)
except:
traceback.print_exc()
return exprmanager_pb2.TableKey(key=idx, time=time.time())
def SendText(self, text, context):
if self.debug:
print("Get SendText Request:\n", text.text)
try:
lock, writer = self.table.get(text.key)
with lock:
writer.add_text(
text.tag, text.text, global_step=text.step, walltime=text.time
)
return exprmanager_pb2.SendReply(status=1, time=time.time())
except Exception as e:
if self.verbose:
print("InternalError detected:", e)
return exprmanager_pb2.SendReply(status=0, time=time.time())
def SendScalar(self, scalar, context):
if self.debug:
print("Get SendScalar Request:\n", dump_dict(grpc_struct_to_dict(scalar)))
try:
lock, writer = self.table.get(scalar.key)
scalar_type = scalar.WhichOneof("ScalarType")
val = getattr(scalar, scalar_type)
with lock:
writer.add_scalar(
scalar.tag, val, global_step=scalar.step, walltime=scalar.time
)
return exprmanager_pb2.SendReply(status=1, time=time.time())
except Exception as e:
if self.verbose:
print("InternalError detected:", e)
return exprmanager_pb2.SendReply(status=0, time=time.time())
def SendImage(self, binary_iterator, context):
if self.debug:
print("Get SendImage Request")
try:
serial_img, fields = recv_chunks(binary_iterator, "blocks")
lock, writer = self.table.get(fields["key"])
# print(fields["tensor"])
if "tensor" in fields and not fields["tensor"]:
img = deserialize_image(serial_img)
img = np.array(img)
else:
img = deserialize(serial_img)
if self.debug:
print(img.shape)
img = np.transpose(img[:, :, 0:3], [2, 0, 1])
with lock:
writer.add_image(
tag=fields["tag"],
img_tensor=img,
global_step=fields["step"],
walltime=fields["time"],
)
return exprmanager_pb2.SendReply(status=1, time=time.time())
except Exception as e:
if self.verbose:
print(traceback.format_exc())
print("InternalError detected:", e)
return exprmanager_pb2.SendReply(status=0, time=time.time())
def SendObj(self, binary_iterator, context):
if self.debug:
print("Get SendObj Request")
print("Receiver currently act as method: send scalars")
try:
serial_obj, fields = recv_chunks(binary_iterator, "blocks")
obj = deserialize(serial_obj)
lock, writer = self.table.get(fields["key"])
# Received internal info, currently only payoff
if fields["tag"] == "__Payoff__":
with lock:
writer.add_text(
"payoff_update_info",
"\n".join(
[
f"Update-SendTime-{utc_to_str(fields['time'])}:\n",
"* Population:\n",
"| AgentName |"
+ "".join(
[
f" {agent} |"
for agent, _ in obj["Population"].items()
]
),
"| :-: |"
+ "".join(
[" :-: |" for _, _ in obj["Population"].items()]
),
"| Policy |"
+ "".join(
[
f" {pid} |"
for _, pid in obj["Population"].items()
]
)
+ "\n",
"* Reward:\n",
"| AgentName | "
+ "".join(
f" {agent} |" for (agent, _) in obj["Agents-Reward"]
),
"| :-: |"
+ "".join([" :-: |" for _, _ in obj["Agents-Reward"]]),
"| Reward |"
+ "".join(
f" {reward} |"
for (_, reward) in obj["Agents-Reward"]
)
+ "\n",
]
),
global_step=fields["step"],
walltime=fields["time"],
)
else:
def _flatten_obj(obj: Dict):
res = {}
for k, v in obj.items():
if isinstance(v, Dict):
temp_dict = _flatten_obj(v)
for tk, tv in temp_dict.items():
res[f"{k}/{tk}"] = tv
elif isinstance(v, float):
res[k] = v
else:
raise NotImplementedError
return res
with lock:
# TODO(ming): flatten obj key
obj = _flatten_obj(obj)
for k, v in obj.items():
writer.add_scalar(
f"{fields['tag']}/{k}",
v,
global_step=fields["step"],
walltime=fields["time"],
)
return exprmanager_pb2.SendReply(status=1, time=time.time())
except Exception as e:
if self.verbose:
print("InternalError detected:", e)
return exprmanager_pb2.SendReply(status=0, time=time.time())
def SendBinaryTensor(self, binary, context):
"""
Receive a tensor sent over rpc connection.
In current implementation, the tensor is only printed in command shell
since tensorboardX does not support adding tensors.
Future: needed in sacred version experiment manager
Parameters:import matplotlib.backends.backend_agg as plt_backend_agg
binary: received binary rpc structs as predefined in exprmanager.proto
context: rpc context
"""
if self.debug:
print("Get SendBinaryTensor Request")
try:
serial_tensor, key = recv_chunks(binary, "blocks")
tensor = deserialize(serial_tensor)
if self.debug:
field_description = grpc_struct_to_dict(binary, skip_fields=["blocks"])
print(field_description.update(tensor_to_dict(tensor)))
return exprmanager_pb2.SendReply(status=1, time=time.time())
except Exception as e:
if self.verbose:
print("InternalError detected:", e)
return exprmanager_pb2.SendReply(status=0, time=time.time())
class ExprManagerServer:
table = None
def __init__(
self, port, logdir="./", grace=5, max_workers=10, debug=False, verbose=False
):
self.port = port
self.grace = grace
self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_workers))
self.table = _ConcurrentTable()
exprmanager_pb2_grpc.add_ExperimentManagerRPCServicer_to_server(
ExperimentManagerRPCServicer(
global_writer_table=self.table,
logdir=logdir,
debug=debug,
verbose=verbose,
),
self.server,
)
self.server.add_insecure_port(self.port)
def start(self):
self.server.start()
def wait(self):
self.server.wait_for_termination()
def stop(self):
self.server.stop(grace=self.grace)
self.table.close()
def _create_logging(**kwargs):
s = ExprManagerServer(**kwargs)
s.start()
print("Logger server up!")
def terminate_server(*_):
s.stop()
print("Logging server stop")
signal(SIGTERM, terminate_server)
s.wait()
def start_logging_server(**kwargs):
process = multiprocessing.Process(target=_create_logging, kwargs=kwargs)
return process
| StarcoderdataPython |
8159809 | # -*- coding: utf8 -*-
import sys
import os
import pybossa_lc as plugin
# Use the PYBOSSA test settings
PB_PATH = os.environ.get('PYBOSSA_PATH', '..')
sys.path.append(os.path.abspath(os.path.join(PB_PATH, 'test')))
PYBOSSA_TEST_SETTINGS = os.path.join(PB_PATH, 'settings_test.py')
def setUpPackage():
"""Setup the plugin."""
from default import flask_app
with flask_app.app_context():
pb_settings = os.path.abspath(PYBOSSA_TEST_SETTINGS)
pb_lc_settings = os.path.abspath('settings_test.py')
flask_app.config.from_pyfile(pb_settings)
flask_app.config.from_pyfile(pb_lc_settings)
plugin_dir = os.path.dirname(plugin.__file__)
plugin.PyBossaLC(plugin_dir).setup()
| StarcoderdataPython |
3535733 | <gh_stars>1000+
import torch
from colossalai.gemini.stateful_tensor import StatefulTensor
from typing import Union, Tuple
def colo_tensor_mem_usage(tensor: Union[torch.Tensor, StatefulTensor]) -> Tuple[int, int]:
if isinstance(tensor, StatefulTensor):
t = tensor.payload
elif isinstance(tensor, torch.Tensor):
t = tensor
else:
return 0, 0
cuda_use, cpu_use = 0, 0
mem_use = t.storage().size() * t.element_size()
if t.device.type == 'cuda':
cuda_use += mem_use
elif t.device.type == 'cpu':
cpu_use += mem_use
return cuda_use, cpu_use
def colo_model_data_tensor_move(src_t: Union[StatefulTensor, torch.Tensor], tgt_t: Union[StatefulTensor,
torch.Tensor]) -> None:
"""
A colossal API for model data tensor move.
The src and target tensors could be resident on both CPU and GPU.
NOTE() The source tensor payload will be removed after this function.
The function will record the communication volume between CPU and GPU.
Args:
src_t (Union[StatefulTensor, torch.Tensor]): source tensor
tgt_t (Union[StatefulTensor, torch.Tensor]): target tensor
"""
if isinstance(src_t, StatefulTensor):
src_t_payload = src_t.payload
else:
src_t_payload = src_t.data
src_dev = src_t_payload.device
if isinstance(tgt_t, StatefulTensor):
tgt_t_payload = tgt_t.payload
else:
tgt_t_payload = tgt_t.data
tgt_t_payload.copy_(src_t_payload)
# remove payload of src_t
if isinstance(src_t, StatefulTensor):
src_t.set_null()
else:
src_t.data = torch.empty(0, device=src_dev, dtype=src_t_payload.dtype)
def colo_model_data_tensor_move_inline(t: Union[StatefulTensor, torch.Tensor], target_device: Union[torch.device,
int]) -> None:
"""
move a tensor to the target_device
Args:
t (Union[StatefulTensor, torch.Tensor]): the tensor be moved
target_device: a traget device, if type is int, it the index of cuda card.
"""
if not isinstance(target_device, torch.device):
target_device = torch.device(f'cuda:{target_device}')
if isinstance(t, torch.Tensor):
t.data = t.data.to(target_device)
elif isinstance(t, StatefulTensor):
t.move_to(target_device)
else:
raise TypeError(f'colo_model_data_tensor_move_inline dose not accept type {type(t)}')
def colo_model_data_move_to_cpu(t: Union[StatefulTensor, torch.Tensor]) -> None:
"""colo_model_data_move_to_cpu
move a model data tensor from gpu to cpu
Args:
t (Union[StatefulTensor, torch.Tensor]): _description_
"""
# TODO() optimize the tensor moving with non-blocking
if isinstance(t, torch.Tensor):
t.data = t.data.cpu()
elif isinstance(t, StatefulTensor):
t.move_to(torch.device('cpu'))
else:
raise TypeError(f'colo_model_data_move_to_cpu dose not accept type {type(t)}')
def colo_model_tensor_clone(t: Union[StatefulTensor, torch.Tensor], target_device: torch.device) -> torch.Tensor:
"""
Clone a model data tensor
Args:
t (Union[StatefulTensor, torch.Tensor]): a model data tensor
target_device (torch.device): the target device
Returns:
torch.Tensor: a cloned torch tensor
"""
# TODO() rename this function
colo_model_data_tensor_move_inline(t, target_device)
t_payload = t.payload if isinstance(t, StatefulTensor) else t
return t_payload
| StarcoderdataPython |
1700706 | from unittest import TestCase
from unittest.mock import MagicMock, patch, call
import tempfile
import shutil
import os
import pytest
from js9 import j
from zerorobot import config, template_collection
from zerorobot.template_uid import TemplateUID
from zerorobot.template.state import StateCheckError
def mockdecorator(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
patch("zerorobot.template.decorator.timeout", MagicMock(return_value=mockdecorator)).start()
patch("zerorobot.template.decorator.retry", MagicMock(return_value=mockdecorator)).start()
patch("gevent.sleep", MagicMock()).start()
patch("time.sleep", MagicMock()).start()
class TestBlockCreatorTemplate(TestCase):
@classmethod
def setUpClass(cls):
cls.valid_data = {
'node': 'node',
'rpcPort': 23112,
'apiPort': 23110,
'walletAddr': '',
'walletSeed': '',
'walletPassphrase': '<PASSWORD>',
'network': 'standard',
'parentInterface': '',
'tfchainFlist': 'https://hub.gig.tech/tfchain/ubuntu-16.04-tfchain-latest.flist',
}
config.DATA_DIR = tempfile.mkdtemp(prefix='0-templates_')
cls.type = template_collection._load_template(
"https://github.com/threefoldtoken/0-templates",
os.path.dirname(__file__)
)
@classmethod
def tearDownClass(cls):
if os.path.exists(config.DATA_DIR):
shutil.rmtree(config.DATA_DIR)
def setUp(self):
self.client_get = patch('js9.j.clients', MagicMock()).start()
def tearDown(self):
patch.stopall()
def test_create_with_valid_data(self):
"""
Test create blockcreator service
"""
bc = self.type(name='blockcreator', data=self.valid_data)
bc.validate()
assert bc.data == self.valid_data
def test_create_with_custom_network(self):
"""
Test create explorer service
"""
valid_data = self.valid_data.copy()
valid_data['network'] = 'testnet'
bc = self.type(name='blockcreator', data=valid_data)
bc.validate()
assert bc.data == valid_data
def test_node_sal(self):
"""
Test node_sal property
"""
get_node = patch('js9.j.clients.zero_os.sal.get_node', MagicMock(return_value='node_sal')).start()
bc = self.type(name='blockcreator', data=self.valid_data)
node_sal = bc._node_sal
get_node.assert_called_with(bc.data['node'])
assert node_sal == 'node_sal'
def test_install(self):
"""
Test node install
"""
bc = self.type(name='blockcreator', data=self.valid_data)
bc._daemon_sal.start = MagicMock()
bc.api.services.find_or_create = MagicMock()
fs = MagicMock(path='/var/cache')
sp = MagicMock()
sp.get = MagicMock(return_value=fs)
bc._node_sal.storagepools.get = MagicMock(return_value=sp)
list_of_candidates = [{'gw': '1.1.1.1', 'dev': 'one'}]
bc._node_sal.client.ip.route.list = MagicMock(return_value=list_of_candidates)
bc.api.services.find_or_create.return_value.state.check = MagicMock(side_effect=StateCheckError)
bc.install()
container_data = {
'flist': 'https://hub.gig.tech/tfchain/ubuntu-16.04-tfchain-latest.flist',
'node': bc.data['node'],
'nics': [{'type': 'macvlan', 'id': 'one', 'config': {'dhcp': True}, 'name': 'stoffel'}],
'mounts': [
{'source': '/var/cache/wallet',
'target': bc._DATA_DIR},
{'source': '/var/cache/backups',
'target': bc._BACKUP_DIR}
],
}
# test creation of container
bc.api.services.find_or_create.assert_called_once_with(
'github.com/zero-os/0-templates/container/0.0.1',
bc._container_name,
data=container_data)
bc.state.check('actions', 'install', 'ok')
assert bc.api.services.find_or_create.return_value.schedule_action.call_count == 2
def test_start_not_installed(self):
with pytest.raises(StateCheckError,
message='start action should raise an error if explorer is not installed'):
bc = self.type(name='blockcreator', data=self.valid_data)
bc.start()
def test_start_installed_wallet_not_inited(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc.state.set('actions', 'install', 'ok')
bc.api.services.find_or_create = MagicMock()
bc._daemon_sal.start = MagicMock()
bc._client_sal.wallet_init = MagicMock()
bc._client_sal.wallet_unlock = MagicMock()
list_of_candidates = [{'gw': '1.1.1.1', 'dev': 'one'}]
bc._node_sal.client.ip.route.list = MagicMock(return_value=list_of_candidates)
bc.start()
bc.state.check('actions', 'start', 'ok')
bc.state.check('status', 'running', 'ok')
bc.state.check('wallet', 'init', 'ok')
bc.state.check('wallet', 'unlock', 'ok')
bc._daemon_sal.start.assert_called_once_with()
bc._client_sal.wallet_init.assert_called_once_with()
bc._client_sal.wallet_unlock.assert_called_once_with()
assert bc.data['walletSeed'] != ''
def test_start_installed_wallet_inited(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc.state.set('actions', 'install', 'ok')
bc.state.set('wallet', 'init', 'ok')
bc.api.services.find_or_create = MagicMock()
bc._daemon_sal.start = MagicMock()
bc._client_sal.wallet_init = MagicMock()
list_of_candidates = [{'gw': '1.1.1.1', 'dev': 'one'}]
bc._node_sal.client.ip.route.list = MagicMock(return_value=list_of_candidates)
bc._client_sal.wallet_unlock = MagicMock()
bc.start()
bc.state.check('actions', 'start', 'ok')
bc.state.check('status', 'running', 'ok')
bc.state.check('wallet', 'init', 'ok')
bc.state.check('wallet', 'unlock', 'ok')
bc._daemon_sal.start.assert_called_once_with()
bc._client_sal.wallet_init.assert_not_called()
bc._client_sal.wallet_unlock.assert_called_with()
assert bc._client_sal.wallet_unlock.called
def test_uninstall(self):
bc = self.type(name='blockcreator', data=self.valid_data)
container = MagicMock()
container.schedule_action = MagicMock()
container.delete = MagicMock()
bc.stop = MagicMock()
bc.api.services.find_or_create = MagicMock()
bc.api.services.get = MagicMock(return_value=container)
fs = MagicMock()
fs.delete = MagicMock()
sp = MagicMock()
sp.get = MagicMock(return_value=fs)
bc._node_sal.storagepools.get = MagicMock(return_value=sp)
bc.uninstall()
with pytest.raises(StateCheckError):
bc.state.check('actions', 'install', 'ok')
with pytest.raises(StateCheckError):
bc.state.check('status', 'running', 'ok')
with pytest.raises(StateCheckError):
bc.state.check('status', 'init', 'ok')
with pytest.raises(StateCheckError):
bc.state.check('wallet', 'unlock', 'ok')
bc.stop.assert_called_once_with()
sp.get.assert_called_once_with(bc.guid)
fs.delete.assert_called_once_with()
container.delete.assert_called_once_with()
def test_uninstall_container_not_exists(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc.stop = MagicMock(side_effect=LookupError)
bc.api.services.find_or_create = MagicMock()
fs = MagicMock()
fs.delete = MagicMock()
sp = MagicMock()
sp.get = MagicMock(return_value=fs)
bc._node_sal.storagepools.get = MagicMock(side_effect=ValueError)
bc.uninstall()
with pytest.raises(StateCheckError):
bc.state.check('actions', 'install', 'ok')
with pytest.raises(StateCheckError):
bc.state.check('status', 'running', 'ok')
bc.stop.assert_called_once_with()
sp.get.assert_not_called()
fs.assert_not_called()
def test_stop(self):
container = MagicMock()
container.schedule_action = MagicMock()
container.delete = MagicMock()
bc = self.type(name='blockcreator', data=self.valid_data)
bc.state.set('actions', 'install', 'ok')
bc.api.services.get = MagicMock(return_value=container)
bc._daemon_sal.stop = MagicMock()
bc.stop()
with pytest.raises(StateCheckError):
bc.state.check('actions', 'start', 'ok')
with pytest.raises(StateCheckError):
bc.state.check('status', 'running', 'ok')
with pytest.raises(StateCheckError):
bc.state.check('wallet', 'unlock', 'ok')
bc._daemon_sal.stop.assert_called_once_with()
container.schedule_action.assert_called_once_with('stop')
container.delete.assert_not_called()
def test_stop_container_not_exists(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc.state.set('actions', 'install', 'ok')
container = MagicMock()
container.schedule_action = MagicMock()
container.delete = MagicMock()
bc.api.services.get = MagicMock(return_value=container)
bc._daemon_sal.stop = MagicMock(side_effect=LookupError)
with self.assertRaises(RuntimeError):
bc.stop()
with pytest.raises(StateCheckError):
bc.state.check('actions', 'start', 'ok')
with pytest.raises(StateCheckError):
bc.state.check('status', 'running', 'ok')
bc._daemon_sal.stop.assert_called_once_with()
container.schedule_action.assert_not_called()
container.delete.assert_not_called()
def test_upgrade_fail_no_candidates(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc._node_sal.storagepools.get = MagicMock()
bc.stop = MagicMock()
bc.start = MagicMock()
container = MagicMock()
container.schedule_action = MagicMock()
container.delete = MagicMock()
bc.api.services.get = MagicMock(return_value=container)
bc._node_sal.client.nft.drop_port = MagicMock()
with self.assertRaisesRegex(RuntimeError, 'Could not find interface for macvlan parent'):
bc.upgrade()
def test_upgrade_fail_too_many_candidates(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc._node_sal.storagepools.get = MagicMock()
bc.stop = MagicMock()
bc.start = MagicMock()
container = MagicMock()
container.schedule_action = MagicMock()
container.delete = MagicMock()
bc.api.services.get = MagicMock(return_value=container)
bc._node_sal.client.nft.drop_port = MagicMock()
list_of_candidates = [{'gw': '1.1.1.1', 'dev': 'one'}, {'gw': '1.1.1.2', 'dev':'two'}]
bc._node_sal.client.ip.route.list = MagicMock(return_value=list_of_candidates)
with self.assertRaisesRegex(RuntimeError, 'Found multiple eligible interfaces for macvlan parent: one, two'):
bc.upgrade()
def test_upgrade_success(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc._node_sal.storagepools.get = MagicMock()
bc.stop = MagicMock()
bc.start = MagicMock()
container = MagicMock()
container.schedule_action = MagicMock()
container.delete = MagicMock()
bc.api.services.get = MagicMock(return_value=container)
bc._node_sal.client.nft.drop_port = MagicMock()
list_of_candidates = [{'gw': '1.1.1.1', 'dev': 'one'}]
bc._node_sal.client.ip.route.list = MagicMock(return_value=list_of_candidates)
bc.upgrade()
bc.stop.assert_called_once_with()
bc.start.assert_called_once_with()
bc._node_sal.client.nft.drop_port.assert_called_once_with(23112)
def test_consensus_stat(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc.state.set('status', 'running', 'ok')
bc._client_sal.consensus_stat = MagicMock()
bc.consensus_stat()
bc._client_sal.consensus_stat.assert_called_once_with()
def test_consensus_stat_not_running(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc._client_sal.consensus_stat = MagicMock()
with pytest.raises(StateCheckError):
bc.consensus_stat()
bc._client_sal.consensus_stat.assert_not_called()
def test_wallet_amount(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc.state.set('status', 'running', 'ok')
bc.state.set('wallet', 'init', 'ok')
bc.state.set('wallet', 'unlock', 'ok')
bc._client_sal.wallet_amount = MagicMock()
bc.wallet_amount()
bc._client_sal.wallet_amount.assert_called_once_with()
def test_wallet_amount_wallet_not_unlocked(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc.state.set('status', 'running', 'ok')
bc._client_sal.wallet_amount = MagicMock()
with pytest.raises(StateCheckError):
bc.wallet_amount()
bc._client_sal.wallet_amount.assert_not_called()
def test_wallet_amount_not_running(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc._client_sal.wallet_amount = MagicMock()
with pytest.raises(StateCheckError):
bc.wallet_amount()
bc._client_sal.wallet_amount.assert_not_called()
def test_monitor_not_intalled(self):
bc = self.type(name='blockcreator', data=self.valid_data)
with pytest.raises(StateCheckError):
bc._monitor()
def test_monitor_not_started(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc.state.set('actions', 'start', 'ok')
with pytest.raises(StateCheckError):
bc._monitor()
def test_monitor_is_running(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc.state.set('actions', 'install', 'ok')
bc.state.set('actions', 'start', 'ok')
bc._daemon_sal.is_running = MagicMock(return_value=True)
bc.api.services.get = MagicMock()
bc._monitor()
bc.state.check('status', 'running', 'ok')
bc.api.services.get.assert_not_called()
def test_monitor_not_running(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc.state.set('actions', 'install', 'ok')
bc.state.set('actions', 'start', 'ok')
bc._daemon_sal.is_running = MagicMock(return_value=False)
container = MagicMock()
container.delete = MagicMock()
bc.api.services.get = MagicMock(return_value=container)
bc.install = MagicMock()
def set_running_state():
bc.state.set('status', 'running', 'ok')
bc.start = MagicMock(side_effect=set_running_state)
bc._monitor()
bc.state.check('status', 'running', 'ok')
bc.api.services.get.assert_called_with(template_uid='github.com/zero-os/0-templates/container/0.0.1', name=bc._container_name)
container.delete.assert_called_once_with()
bc.install.assert_called_once_with()
bc.start.assert_called_once_with()
def test_create_backup_success(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc.state.set('status', 'running', 'ok')
result_mock = MagicMock(state='SUCCESS')
bc._container_sal.client.system = MagicMock(return_value=MagicMock(get=MagicMock(return_value=result_mock)))
backup_name = 'backup.tar.gz'
bc.create_backup(backup_name)
bc._container_sal.client.system.assert_called_once_with(
'tar -zcf {} {} -P'.format(os.path.join(bc._BACKUP_DIR, backup_name), bc._DATA_DIR)
)
def test_create_backup_call_fail(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc.state.set('status', 'running', 'ok')
result_mock = MagicMock(state='ERROR', stderr='error message', data='error data')
bc._container_sal.client.system = MagicMock(return_value=MagicMock(get=MagicMock(return_value=result_mock)))
with self.assertRaisesRegex(RuntimeError, 'error occurred when creating backup: error message \n '):
bc.create_backup()
def test_create_backup_fail_state(self):
bc = self.type(name='blockcreator', data=self.valid_data)
with self.assertRaises(StateCheckError):
bc.create_backup('name')
def test_restore_backup_success(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc.state.set('status', 'running', 'ok')
result_mock = MagicMock(state='SUCCESS')
bc._container_sal.client.system = MagicMock(return_value=MagicMock(get=MagicMock(return_value=result_mock)))
backup_name = 'backup.tar.gz'
bc.restore_backup(backup_name)
bc._container_sal.client.system.assert_called_once_with(
'tar -zxf {} -P'.format(os.path.join(bc._BACKUP_DIR, backup_name))
)
def test_restore_backup_fail_call(self):
bc = self.type(name='blockcreator', data=self.valid_data)
bc.state.set('status', 'running', 'ok')
result_mock = MagicMock(state='ERROR', stderr='error message', data='error data')
bc._container_sal.client.system = MagicMock(return_value=MagicMock(get=MagicMock(return_value=result_mock)))
with self.assertRaisesRegex(RuntimeError, 'error occurred when restoring backup: error message \n '):
bc.restore_backup('name')
def test_restore_backup_fail_state(self):
bc = self.type(name='blockcreator', data=self.valid_data)
with self.assertRaises(StateCheckError):
bc.restore_backup('name') | StarcoderdataPython |
4957027 | # -*- coding: utf-8 -*-
"""
A collection of generally useful mathematical (or numerical) functions.
"""
def is_power_of_two(value: int) -> bool:
"""
Determine if the given value is a power of 2.
Negative numbers and 0 cannot be a power of 2 and will thus return `False`.
:param value: The value to check.
:return: `True` if the value is a power of two, 0 otherwise.
"""
if value <= 0:
return False
# If value is a power of two, its highest bit that is set to 1 will toggle to 0 when subtracting 1 because all
# other bits are 0; all other bits will thus be flipped to 1. Therefore, value and (value - 1) don't have any bits
# in common and the bitwise AND will return 0.
# On the other hand, if value is not a power of two, the highest bit set to 1 will not be flipped when subtracting 1
# and thus, value and (value - 1) will have bits in common and the bitwise AND will not return 0.
# Therefore, if the result is 0, value is a power of two.
return (value & (value - 1)) == 0
| StarcoderdataPython |
9687213 | '''Tokens class.
:copyright: 2021, <NAME> <<EMAIL>>
'''
from .elements import NamedElement, c_export, go_export, java_export
class Tokens(NamedElement):
__slots__ = ('_tokens',)
def __init__(self, tokens):
self._tokens = tokens.split()
self._tokens.sort(key=len, reverse=True)
def __repr__(self):
return ' '.join(self._tokens)
def _get_node_result(self, root, tree, rule, s, node):
for token in self._tokens:
if s.startswith(token):
root._append_tree(tree, node, node.start + len(token))
return True, node.end
root._expecting.update(self, node.start)
return False, node.start
def _run_export_js(self, js_indent, indent, classes, cname):
return 'Tokens(\'{}\')'.format(
' '.join(self._tokens).replace('\'', '\\\''))
def _run_export_py(self, py_indent, indent, classes):
return 'Tokens(\'{}\')'.format(
' '.join(self._tokens).replace('\'', '\\\''))
@c_export
def _run_export_c(self, c_indent, indent, enums, gid):
return 'cleri_tokens({}, "{}")'.format(
gid,
' '.join(self._tokens).replace('"', '\\"'))
@go_export
def _run_export_go(self, go_indent, indent, enums, gid):
return 'goleri.NewTokens({}, "{}")'.format(
gid,
' '.join(self._tokens).replace('"', '\\"'))
@java_export
def _run_export_java(self, java_indent, indent, enums, classes, gid):
return 'new Tokens({}"{}")'.format(
'' if gid is None else 'Ids.{}, '.format(gid),
' '.join(self._tokens).replace('"', '\\"'))
| StarcoderdataPython |
9664582 | import os
import click
import logging
import palettable
import subprocess
import pdb
import math
from jenks import jenks # pip install -e "git+https://github.com/perrygeo/jenks.git#egg=jenks"
import sys
from jenks import jenks
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
import matplotlib.ticker as tkr
from matplotlib import rcParams, ticker
from mpl_toolkits.basemap import Basemap, maskoceans
from matplotlib.ticker import MaxNLocator, AutoLocator
from matplotlib.ticker import LogFormatter
from matplotlib.colors import LogNorm
import matplotlib.colors as colors
from simplekml import (Kml, OverlayXY, ScreenXY, Units, RotationXY, AltitudeMode, Camera)
from random import shuffle
from cycler import cycler
from tqdm import tqdm
import seaborn.apionly as sns
import constants
import pygeoutil.util as util
# Logging.
cur_flname = os.path.splitext(os.path.basename(__file__))[0]
LOG_FILENAME = constants.log_dir + os.sep + 'Log_' + cur_flname + '.txt'
util.make_dir_if_missing(constants.log_dir)
logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO, filemode='w',
format='%(asctime)s %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt="%m-%d %H:%M") # Logging levels are DEBUG, INFO, WARNING, ERROR, and CRITICAL
# Output to screen
logger = logging.getLogger(cur_flname)
logger.addHandler(logging.StreamHandler(sys.stdout))
@click.group()
def plot_glm():
pass
def set_matplotlib_params():
"""
Set matplotlib defaults to nicer values.
"""
# rcParams dict
rcParams['mathtext.default'] = 'regular'
rcParams['axes.labelsize'] = 12
rcParams['xtick.labelsize'] = 12
rcParams['ytick.labelsize'] = 12
rcParams['legend.fontsize'] = 12
rcParams['font.family'] = 'sans-serif'
rcParams['font.serif'] = ['Helvetica']
# rcParams['figure.figsize'] = 7.3, 4.2
def get_colors(palette='colorbrewer', cmap=False):
"""
Get palettable colors, which are nicer
:param palette
:param cmap
"""
if palette == 'colorbrewer':
bmap = palettable.colorbrewer.diverging.PRGn_11.mpl_colors
if cmap:
bmap = palettable.colorbrewer.diverging.PRGn_11.mpl_colormap
elif palette == 'tableau':
bmap = palettable.tableau.Tableau_20.mpl_colors
if cmap:
bmap = palettable.tableau.Tableau_20.mpl_colormap
bmap = bmap[0::2] + bmap[1::2] # Move the even numbered colors to end of list (they are very light)
elif palette == 'cubehelix':
bmap = palettable.cubehelix.cubehelix2_16.mpl_colors
if cmap:
bmap = palettable.cubehelix.cubehelix2_16.mpl_colormap
elif palette == 'qualitative':
bmap = palettable.tableau.GreenOrange_12.mpl_colors
if cmap:
bmap = palettable.tableau.GreenOrange_12.mpl_colormap
all_markers = ['s', 'o', '^', '*', 'v', '<', '>', 4, 5] * 100
max_values = len(bmap) if len(bmap) < len(all_markers) else len(all_markers)
# Make sure equal number of cycle elements for color and markers
markers = all_markers[:max_values]
bmap = bmap[:max_values]
color_cycle = cycler('color', bmap) # color cycle
marker_cycle = cycler('marker', markers) # marker cycle
plt.rc('axes', prop_cycle=(color_cycle + marker_cycle))
return bmap
def simple_axis(ax):
"""
Remove spines from top and right
:param ax
"""
ax.spines['top'].set_visible(True)
ax.spines['right'].set_visible(True)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
def gridded_axis(ax):
"""
Plot major, minor ticks as well as a grid
:param ax:
:return:
"""
# Set number of major and minor ticks
ax.xaxis.set_major_locator(AutoLocator())
ax.xaxis.set_minor_locator(AutoLocator())
ax.yaxis.set_major_locator(AutoLocator())
ax.yaxis.set_minor_locator(AutoLocator())
# Create nice-looking grid for ease of visualization
ax.grid(which='minor', alpha=0.2, linestyle='--')
ax.grid(which='major', alpha=0.5, linestyle='--')
# For y-axis, format the numbers
scale = 1
ticks = tkr.FuncFormatter(lambda x, pos: '{:0,d}'.format(int(x/scale)))
ax.yaxis.set_major_formatter(ticks)
def simple_legend(ax):
"""
Remove previous legend if any and draw in best location, with no fancy-box and some alpha
:param ax:
:param num_rows:
:param position:
:return:
"""
leg = ax.legend(fancybox=None, prop={'size': 10}, loc='upper center', bbox_to_anchor=(0.5, -0.1), ncol=3,
frameon=True)
leg.get_frame().set_linewidth(0.0)
def get_cb_range(arr=np.empty([2, 2]), xaxis_min=0.0, xaxis_max=1.1, xaxis_step=0.1, do_jenks=True):
"""
https://github.com/perrygeo/jenks
:param arr:
:param xaxis_min:
:param xaxis_max:
:param xaxis_step:
:param do_jenks:
:return:
"""
# Array can only have shape == 2
if len(np.shape(arr)) != 2:
sys.exit(0)
if do_jenks:
# Select 11 elements, discard the highest
arr = np.array(jenks(np.unique(np.round_(arr, decimals=1)).data, 11))[:-1]
# return only the unique elements, sometimes jenks selects duplicate elements
return np.unique(arr)
else:
return np.arange(xaxis_min, xaxis_max, xaxis_step)
def truncate_colormap(cmap, minval=0.01, maxval=1.0, n=100):
"""
http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib
:param cmap:
:param minval:
:param maxval:
:param n:
:return:
"""
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def make_kml(llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat, figs, colorbar=None, **kw):
"""
https://ocefpaf.github.io/python4oceanographers/blog/2014/03/10/gearth/
:param llcrnrlon:
:param llcrnrlat:
:param urcrnrlon:
:param urcrnrlat:
:param figs:
:param colorbar
"""
"""TODO: LatLon bbox, list of figs, optional colorbar figure,
and several simplekml kw..."""
kml = Kml()
altitude = kw.pop('altitude', 2e7)
roll = kw.pop('roll', 0)
tilt = kw.pop('tilt', 0)
altitudemode = kw.pop('altitudemode', AltitudeMode.relativetoground)
camera = Camera(latitude=np.mean([urcrnrlat, llcrnrlat]),
longitude=np.mean([urcrnrlon, llcrnrlon]),
altitude=altitude, roll=roll, tilt=tilt,
altitudemode=altitudemode)
kml.document.camera = camera
draworder = 0
for fig in figs: # NOTE: Overlays are limited to the same bbox.
draworder += 1
ground = kml.newgroundoverlay(name='GroundOverlay')
ground.draworder = draworder
ground.visibility = kw.pop('visibility', 1)
ground.name = kw.pop('name', 'overlay')
ground.color = kw.pop('color', '9effffff')
ground.atomauthor = kw.pop('author', 'ocefpaf')
ground.latlonbox.rotation = kw.pop('rotation', 0)
ground.description = kw.pop('description', 'Matplotlib figure')
ground.gxaltitudemode = kw.pop('gxaltitudemode',
'clampToSeaFloor')
ground.icon.href = fig
ground.latlonbox.east = llcrnrlon
ground.latlonbox.south = llcrnrlat
ground.latlonbox.north = urcrnrlat
ground.latlonbox.west = urcrnrlon
if colorbar: # Options for colorbar are hard-coded (to avoid a big mess).
screen = kml.newscreenoverlay(name='ScreenOverlay')
screen.icon.href = colorbar
screen.overlayxy = OverlayXY(x=0, y=0,
xunits=Units.fraction,
yunits=Units.fraction)
screen.screenxy = ScreenXY(x=0.015, y=0.075,
xunits=Units.fraction,
yunits=Units.fraction)
screen.rotationXY = RotationXY(x=0.5, y=0.5,
xunits=Units.fraction,
yunits=Units.fraction)
screen.size.x = 0
screen.size.y = 0
screen.size.xunits = Units.fraction
screen.size.yunits = Units.fraction
screen.visibility = 1
kmzfile = kw.pop('kmzfile', 'overlay.kmz')
kml.savekmz(kmzfile)
def gearth_fig(llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat, pixels=20480):
"""
https://ocefpaf.github.io/python4oceanographers/blog/2014/03/10/gearth/
Return a Matplotlib `fig` and `ax` handles for a Google-Earth Image.
"""
aspect = np.cos(np.mean([llcrnrlat, urcrnrlat]) * np.pi/180.0)
xsize = np.ptp([urcrnrlon, llcrnrlon]) * aspect
ysize = np.ptp([urcrnrlat, llcrnrlat])
aspect = ysize / xsize
if aspect > 1.0:
figsize = (10.0 / aspect, 10.0)
else:
figsize = (10.0, 10.0 * aspect)
if False:
plt.ioff() # Make `True` to prevent the KML components from poping-up.
fig = plt.figure(figsize=figsize,
frameon=False,
dpi=pixels//10)
# KML friendly image. If using basemap try: `fix_aspect=False`.
ax = fig.add_axes([0, 0, 1, 1])
ax.set_xlim(llcrnrlon, urcrnrlon)
ax.set_ylim(llcrnrlat, urcrnrlat)
return fig, ax
def output_kml(trans, lon, lat, path_out, xmin, xmax, step, cmap, fname_out='out', name_legend='', label='',
do_log_cb=False):
"""
:param trans:
:param lon:
:param lat:
:param path_out:
:param xmin:
:param xmax:
:param step:
:param cmap:
:param fname_out:
:param name_legend:
:param label:
:param do_log_cb:
:return:
"""
# Return if xmin == xmax
if xmin == xmax:
return
logging.info('output_kml' + fname_out)
dir_output = path_out + os.sep + 'kml'
util.make_dir_if_missing(dir_output)
fig, ax = gearth_fig(llcrnrlon=lon.min(), llcrnrlat=lat.min(), urcrnrlon=lon.max(), urcrnrlat=lat.max())
lons, lats = np.meshgrid(lon, lat)
m = Basemap(projection='cyl', resolution='c')
x, y = m(lons, lats)
mask_data = maskoceans(lons, lats, trans)
m.etopo()
if do_log_cb and np.nanmin(mask_data) > 0.0:
# manually set log levels e.g. http://matplotlib.org/examples/images_contours_and_fields/contourf_log.html
lev_exp = np.arange(np.floor(np.log10(np.nanmin(mask_data)) - 1), np.ceil(np.log10(np.nanmax(mask_data)) + 1))
levs = np.power(10, lev_exp)
cs = m.contourf(x, y, mask_data, levs, norm=colors.LogNorm(), cmap=cmap)
else:
cs = m.contourf(x, y, mask_data, np.arange(xmin, xmax, step), cmap=cmap)
if abs(xmax - xmin) > 10000.0:
format = '%.1e'
elif abs(xmax - xmin) > 100.0:
format = '%.0f'
elif abs(xmax - xmin) > 1.0:
format = '%.1f'
elif abs(xmax - xmin) > 0.1:
format = '%.3f'
else:
format = '%.4f'
ax.set_axis_off()
fig.savefig(dir_output + os.sep + 'kml_' + fname_out + '.png', transparent=False, format='png', dpi=800)
# Colorbar
fig = plt.figure(figsize=(1.0, 4.0), facecolor=None, frameon=False)
# Colorbar legend, numbers represent: [bottom_left_x_coord, bottom_left_y_coord, width, height]
ax = fig.add_axes([0.02, 0.05, 0.2, 0.9])
if do_log_cb and np.nanmin(mask_data) >= 0.0:
cb = fig.colorbar(cs, cax=ax, spacing='uniform')
else:
cb = fig.colorbar(cs, cax=ax, format=format, spacing='uniform')
cb.set_label(name_legend + '\n' + label, rotation=-90, color='k', labelpad=25, size=7)
cb.ax.tick_params(labelsize=6)
fig.savefig(dir_output + os.sep + name_legend + '.png', transparent=False, format='png', dpi=125)
make_kml(llcrnrlon=lon.min(), llcrnrlat=lat.min(), urcrnrlon=lon.max(), urcrnrlat=lat.max(),
figs=[dir_output + os.sep + 'kml_' + fname_out + '.png'],
colorbar=dir_output + os.sep + name_legend + '.png',
kmzfile=dir_output + os.sep + fname_out + '.kmz', name=fname_out)
# Delte temp files
# os.remove(dir_output + os.sep + 'kml_' + fname_out + '.png')
# os.remove(dir_output + os.sep + name_legend + '.png')
plt.close('all')
def make_movie(list_images, out_path, out_fname):
"""
:param list_images:
:param out_path:
:return:
"""
util.make_dir_if_missing(out_path)
convert_cmd = 'convert -delay 50 -loop 1 '+' '.join(list_images) + ' ' + out_path + os.sep + out_fname
subprocess.call(convert_cmd, shell=True)
def plot_hist(hist, bin_edges, out_path, do_per=False, do_log=True, title='', xlabel='', ylabel=''):
"""
Plot histogram using information contained in hist (frequency count) and bin_edges.
:param hist: values of the histogram
:param bin_edges: return the bin edges
:param out_path: output path and file name
:param do_per: plot each bin as percentage of total
:param do_log: plot logarithmic y-axis
:param title:
:param xlabel:
:param ylabel:
:return:
"""
logger.info('Plot histogram')
sns.set_style('whitegrid')
if do_per:
# Compute percentage of total sum
per_hist = hist * 100.0 / sum(hist)
sns.barplot(bin_edges[:-1].astype(int), per_hist, color='purple', edgecolor='none')
plt.ylim(0.0, 100.0)
else:
sns.barplot(bin_edges[:-1].astype(int), hist, color='purple', edgecolor='none', log=do_log)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.tight_layout()
plt.savefig(out_path, dpi=constants.DPI)
plt.close()
def plot_np_ts(ax, np_arr, xvar, out_path, vert_yr=[], title='', leg_name='', xlabel='', ylabel='', col='k',
do_log=False):
"""
Plot single time-series. Add a vertical line for vert_yr
:param ax
:param np_arr:
:param xvar:
:param out_path:
:param vert_yr:
:param title:
:param leg_name:
:param xlabel:
:param ylabel:
:param col:
:param do_log:
:return:
"""
logger.info('Plot time-series of numpy array')
ax.plot(xvar, np_arr, label=leg_name, color=col, lw=1.75, markevery=int(len(xvar)/5.0), markeredgecolor='none')
if do_log:
ax.set_yscale('log')
if len(vert_yr):
# Set x-axis limit
ax.set_xlim([min(xvar), max(xvar)])
for yr in vert_yr:
plt.axvline(yr, linestyle='--', color='k', lw=1.5)
plt.annotate(str(yr), xy=(yr-5, ax.get_ylim()[0] + 0.01 * ax.get_ylim()[1]), color='k', size=8,
bbox=dict(edgecolor='none', fc='white', alpha=0.5))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
gridded_axis(ax)
# Show y-axis in scientific format
formatter = tkr.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-1, 1))
ax.yaxis.set_major_formatter(formatter)
simple_legend(ax)
plt.title(title)
plt.tight_layout()
plt.savefig(out_path, dpi=constants.DPI, bbox_inches='tight')
plt.close()
def plot_multiple_ts(ax, np_arr, xvar, out_path='', vert_yr=[], title='', leg_name='', xlabel='', ylabel='',
linestyle='-', col=None, pos='first', do_log=False, fill_between=False):
"""
Produces a single plot showing time-series of multiple variables
:param ax:
:param np_arr: Numpy array to plot on y-axis
:param xvar: Time series
:param out_path: Output path (includes output file name)
:param vert_yr:
:param title: Title of plot
:param leg_name:
:param xlabel:
:param ylabel:
:param linestyle:
:param col:
:param pos: if 'first', then set up axis, if 'last' then save image. Refers to whether current time-series is
first or last
:param do_log:
:param fill_between:
:return: Nothing, side-effect: save an image
"""
logger.info('Plot multiple time-series')
ax.plot(xvar, np_arr, label=leg_name, color=col, lw=1.75, linestyle=linestyle,
markevery=int(len(xvar)/5.0), markeredgecolor='none')
if fill_between:
ax.fill_between(xvar, np_arr[:len(xvar)], y2=0)
if do_log:
ax.set_yscale('log')
if pos == 'last':
# Set x-axis limit
ax.set_xlim([min(xvar), max(xvar)])
# Annotate
if len(vert_yr):
for yr in vert_yr:
ax.axvline(yr, linestyle='--', color='k', lw=1.5)
plt.annotate(str(yr), xy=(yr-5, ax.get_ylim()[0] + 0.01 * ax.get_ylim()[1]), color='k', size=8,
bbox=dict(edgecolor='none', fc='white', alpha=0.5))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
gridded_axis(ax)
# Show y-axis in scientific format
formatter = tkr.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-1, 1))
ax.yaxis.set_major_formatter(formatter)
simple_legend(ax)
plt.title(title.capitalize())
plt.tight_layout()
plt.savefig(out_path, dpi=constants.DPI, bbox_inches='tight')
def plot_hovmoller(nc_path, var, out_path, do_latitude=True, xlabel='', ylabel='', title='', cbar=''):
"""
Ref: http://scitools.org.uk/iris/docs/v0.9.1/examples/graphics/hovmoller.html
:param nc_path:
:param var:
:param out_path:
:param do_latitude:
:param xlabel:
:param ylabel:
:param title:
:param cbar:
:return:
"""
logger.info('Plot hovmoller')
# TODO: Iris install is not working on mac os x
if os.name == 'mac' or os.name == 'posix':
return
import iris
import iris.plot as iplt
iris.FUTURE.netcdf_promote = True
cubes = iris.load(nc_path, var)
# Take the mean over latitude/longitude
if do_latitude:
cube = cubes[0].collapsed('latitude', iris.analysis.MEAN)
else:
cube = cubes[0].collapsed('longitude', iris.analysis.MEAN)
# Create the plot contour with 20 levels
iplt.contourf(cube, 20, cmap=palettable.colorbrewer.diverging.RdYlGn_9.mpl_colormap)
if not do_latitude:
plt.ylabel(xlabel) # Latitude
plt.xlabel(ylabel) # Years
else:
plt.ylabel(ylabel) # Years
plt.xlabel(xlabel) # Longitude
plt.title(title)
plt.colorbar(orientation='horizontal', extend='both', drawedges=False, spacing='proportional').set_label(cbar)
# Stop matplotlib providing clever axes range padding and do not draw gridlines
plt.grid(b=False)
plt.axis('tight')
plt.tight_layout()
plt.savefig(out_path, dpi=constants.DPI)
plt.close()
# @plot_glm.command()
# @click.argument('path_nc')
# @click.argument('out_path')
# @click.argument('var_name')
# @click.option('--xaxis_min', default=0.0, help='')
# @click.option('--xaxis_max', default=1.1, help='')
# @click.option('--xaxis_step', default=0.1, help='')
# @click.option('--annotate_date', help='')
# @click.option('--yr', default=0, help='')
# @click.option('--date', default=-1, help='')
# @click.option('--xlabel', default='', help='')
# @click.option('--title', default='', help='')
# @click.option('--tme_name', default='time', help='')
# @click.option('--show_plot', help='')
# @click.option('--any_time_data', default=True, help='')
# @click.option('--format', default='%.2f', help='')
# @click.option('--land_bg', help='')
# @click.option('--cmap', default=plt.cm.RdBu, help='')
# @click.option('--grid', help='')
# @click.option('--fill_mask', help='')
def plot_map_from_nc(path_nc, out_path, var_name, xaxis_min=0.0, xaxis_max=1.1, xaxis_step=0.1,
annotate_date=False, yr=0, date=-1, xlabel='', title='', tme_name='time', show_plot=False,
any_time_data=True, format='%.2f', land_bg=True, cmap=plt.cm.RdBu, grid=False, fill_mask=False):
"""
Plot var_name variable from netCDF file
\b
Args:
path_nc: Name of netCDF file including path
out_path: Output directory path + file name
var_name: Name of variable in netCDF file to plot on map
Returns:
Nothing, side-effect: save an image
"""
logger.info('Plotting ' + var_name + ' in ' + path_nc)
# Read netCDF file and get time dimension
nc = util.open_or_die(path_nc, 'r', format='NETCDF4')
lon = nc.variables['lon'][:]
lat = nc.variables['lat'][:]
if any_time_data:
ts = nc.variables[tme_name][:] # time-series
if date == -1: # Plot either the last year {len(ts)-1} or whatever year the user wants
plot_yr = len(ts) - 1
else:
plot_yr = date - ts[0]
# Draw empty basemap
m = Basemap(projection='robin', resolution='c', lat_0=0, lon_0=0)
# m.drawcoastlines()
# m.drawcountries()
# Find x,y of map projection grid.
lons, lats = np.meshgrid(lon, lat)
x, y = m(lons, lats)
if fill_mask:
nc_vars = np.ma.filled(nc.variables[var_name], fill_value=np.nan)
else:
nc_vars = np.array(nc.variables[var_name])
# Plot
# Get data for the last year from the netCDF file array
if any_time_data:
mask_data = maskoceans(lons, lats, nc_vars[int(plot_yr), :, :])
else:
mask_data = maskoceans(lons, lats, nc_vars[:, :])
m.etopo()
if land_bg:
m.drawlsmask(land_color='white', ocean_color='none', lakes=True) # land_color = (0, 0, 0, 0) for transparent
else:
m.drawlsmask(land_color=(0, 0, 0, 0), ocean_color='none', lakes=True)
cs = m.contourf(x, y, mask_data, np.arange(xaxis_min, xaxis_max, xaxis_step), cmap=cmap)
if annotate_date:
plt.annotate(str(yr), xy=(0.45, 0.1), xycoords='axes fraction', size=20)
if grid:
# where labels intersect = [left, right, top, bottom]
m.drawmeridians(np.arange(-180, 180, 60), labels=[0,0,1,0], labelstyle='+/-', linewidth=0.5)
m.drawparallels([-40, 0, 40], labels=[1, 0, 0, 0], labelstyle='+/-', linewidth=0.5)
# Add colorbar
cb = m.colorbar(cs, "bottom", size="3%", pad='2%', extend='both', drawedges=False, spacing='proportional',
format=format)
cb.set_label(xlabel)
plt.title(title, y=1.08)
plt.tight_layout()
if not show_plot:
plt.savefig(out_path, dpi=constants.DPI)
plt.close()
else:
plt.show()
nc.close()
return out_path
def plot_maps_ts(arr_or_nc, ts, lon, lat, out_path, var_name='', xaxis_min=0.0, xaxis_max=1.1, xaxis_step=0.1,
save_name='fig', xlabel='', start_movie_yr=-1, title='', tme_name='time', land_bg=True, do_etopo=False,
do_log_cb=False, do_jenks=True, cmap=plt.cm.RdBu, grid=False):
"""
Args:
arr_or_nc: Input can be numpy array or netcdf path
ts:
lon:
lat:
out_path:
var_name:
xaxis_min:
xaxis_max:
xaxis_step:
save_name:
xlabel:
start_movie_yr:
title:
tme_name:
land_bg:
do_etopo:
do_log_cb: Draw logarithmic colorbar (true) or not (false). Default: False
do_jenks:
cmap:
grid:
Returns:
"""
logger.info('Plot time-series of maps')
if isinstance(arr_or_nc, (np.ndarray, np.generic)):
is_nc = False
arr = np.copy(arr_or_nc)
elif os.path.splitext(arr_or_nc)[1] == '.nc':
is_nc = True
else:
sys.exit(0)
list_pngs = []
base_yr = ts[0]
# Draw empty basemap
m = Basemap(projection='robin', resolution='c', lat_0=0, lon_0=0)
# m.drawcoastlines()
# m.drawcountries()
# Find x,y of map projection grid.
lons, lats = np.meshgrid(lon, lat)
x, y = m(lons, lats)
# Plot
# Get data for the last year from the netCDF file array
for yr in tqdm(ts[::constants.MOVIE_SEP], disable=(len(ts[::constants.MOVIE_SEP]) < 2)):
if do_etopo:
m.etopo()
if len(ts) > 1 and not is_nc:
mask_data = maskoceans(lons, lats, arr[int(yr - base_yr), :, :])
else:
if is_nc:
arr = util.get_nc_var3d(arr_or_nc, var_name, int(yr - base_yr))
mask_data = maskoceans(lons, lats, arr[:, :])
cb_range = get_cb_range(arr, xaxis_min=xaxis_min, xaxis_max=xaxis_max, xaxis_step=xaxis_step, do_jenks=do_jenks)
if land_bg:
m.drawlsmask(land_color='white', ocean_color='aqua', lakes=True) # land_color = (0, 0, 0, 0) transparent
else:
m.drawlsmask(land_color=(0, 0, 0, 0), ocean_color='none', lakes=True)
if np.any(cb_range < 0.0) or not do_log_cb:
# If any negative values exist in basemap then do not use log scale
cs = m.contourf(x, y, mask_data, cb_range, extend='both', cmap=cmap)
else:
# manually set log levels e.g. http://matplotlib.org/examples/images_contours_and_fields/contourf_log.html
lev_exp = np.arange(np.floor(np.log10(mask_data.min()) - 1), np.ceil(np.log10(mask_data.max()) + 1))
levs = np.power(10, lev_exp)
cs = m.contourf(x, y, mask_data, levs, norm=colors.LogNorm(), cmap=cmap)
plt.annotate(str(int(yr)), xy=(0.45, 0.1), xycoords='axes fraction', size=20)
if grid:
# where labels intersect = [left, right, top, bottom]
m.drawmeridians(np.arange(-180, 180, 60), labels=[0, 0, 1, 0], labelstyle='+/-', linewidth=0.5)
m.drawparallels([-40, 0, 40], labels=[1, 0, 0, 0], labelstyle='+/-', linewidth=0.5)
# Add colorbar
cb = m.colorbar(cs, "bottom", size="3%", pad='2%', extend='both', drawedges=False, spacing='uniform')
# Add label
cb.set_label(xlabel)
plt.title(title, y=1.08)
out_png_name = out_path + os.sep + save_name + '_' + str(int(yr)) + '.png'
list_pngs.append(out_png_name)
plt.tight_layout()
plt.savefig(out_png_name, dpi=constants.DPI)
plt.close()
return list_pngs
def plot_maps_ts_from_path(path_nc, var_name, lon, lat, out_path, xaxis_min=0.0, xaxis_max=1.1, xaxis_step=0.1,
save_name='fig', xlabel='', start_movie_yr=-1, title='', do_jenks=True,
tme_name='time', land_bg=True, cmap=plt.cm.RdBu, grid=False):
"""
Plot map for var_name variable from netCDF file
:param path_nc: Name of netCDF file
:param var_name: Name of variable in netCDF file to plot on map
:param xaxis_min:
:param xaxis_max:
:param xaxis_step:
:param lon: List of lon's
:param lat: List of lat's
:param out_path: Output directory path + file name
:return: List of paths of images produced, side-effect: save an image(s)
"""
logger.info('Plotting ' + var_name + ' in ' + path_nc)
util.make_dir_if_missing(out_path)
# Read netCDF file and get time dimension
nc = util.open_or_die(path_nc)
if start_movie_yr > 0:
ts = nc.variables[tme_name][:].astype(int) # time-series
ts = ts[start_movie_yr - ts[0]:]
else:
ts = nc.variables[tme_name][:] # time-series
nc.close()
return plot_maps_ts(path_nc, ts, lon, lat, out_path, var_name=var_name,
xaxis_min=xaxis_min, xaxis_max=xaxis_max, xaxis_step=xaxis_step,
save_name=save_name, xlabel=xlabel, do_jenks=do_jenks,
start_movie_yr=start_movie_yr, title=title, tme_name=tme_name, land_bg=land_bg, cmap=cmap,
grid=grid)
def plot_arr_to_map(path_arr, lon, lat, out_path, var_name='arr', xaxis_min=0.0, xaxis_max=1.1, xaxis_step=0.1,
plot_type='sequential', annotate_date=False, yr=0, date=-1, xlabel='', title='', tme_name='time',
any_time_data=True, format='%.2f', land_bg=True, cmap=plt.cm.RdBu, grid=False, fill_mask=False):
"""
Plot var_name variable from netCDF file
:param path_arr: array (2D)
:param lon: List of lon's
:param lat: List of lat's
:param out_path: Output directory path + file name
:param var_name: Name of variable in netCDF file to plot on map
:param xaxis_min:
:param xaxis_max:
:param xaxis_step:
:param plot_type:
:param annotate_date:
:param yr:
:param date:
:param xlabel:
:param title:
:param tme_name:
:param any_time_data: Is there any time dimension?
:param format:
:param land_bg:
:param cmap:
:param grid:
:param fill_mask:
:return: Nothing, side-effect: save an image
"""
logger.info('Plotting ' + xlabel)
# Bail if xaxis_min == xaxis_max
if xaxis_min == xaxis_max:
return
# Output diff to netCDF file
out_nc_path = os.path.split(out_path)[0] + os.sep + 'tmp.nc'
util.convert_arr_to_nc(path_arr, var_name, lat, lon, out_nc_path)
# Convert netCDF file to map
plot_map_from_nc(out_nc_path, out_path, var_name,
xaxis_min=xaxis_min, xaxis_max=xaxis_max, xaxis_step=xaxis_step,
annotate_date=annotate_date, yr=int(yr),
xlabel=xlabel,
date=date, title=title, tme_name=tme_name, show_plot=False, any_time_data=any_time_data,
format=format, land_bg=land_bg, cmap=cmap, grid=grid, fill_mask=fill_mask)
# Remove temporary netcdf file
os.remove(out_nc_path)
def plot_ascii_map(asc, out_path, xaxis_min=0.0, xaxis_max=1.1, xaxis_step=0.1, plot_type='sequential', map_label='',
append_name='', xlabel='', title='', var_name='data', skiprows=0, num_lats=constants.NUM_LATS,
num_lons=constants.NUM_LONS):
"""
:param asc:
:param out_path:
:param xaxis_min:
:param xaxis_max:
:param xaxis_step:
:param plot_type:
:param map_label:
:param append_name:
:param xlabel:
:param title:
:param var_name:
:param skiprows:
:param num_lats:
:param num_lons:
:return:
"""
logger.info('Plot ascii file as map')
out_nc = util.convert_ascii_nc(asc, out_path + os.sep + 'file_' + append_name + '.nc', skiprows=skiprows,
num_lats=num_lats, num_lons=num_lons, var_name=var_name, desc='netCDF')
nc_file = util.open_or_die(out_nc)
nc_file.close()
path = os.path.dirname(out_path)
map_path = path + os.sep + var_name + '_' + append_name + '.png'
plot_map_from_nc(out_nc, map_path, var_name, xaxis_min, xaxis_max, xaxis_step, plot_type, annotate_date=True,
yr=map_label, date=-1, xlabel=xlabel, title=title, any_time_data=False, land_bg=False,
cmap=plt.cm.RdBu, grid=True, fill_mask=True)
os.remove(out_nc)
return map_path
def plot_LUstate_top_regions(df, xlabel='', ylabel='', title='', out_path='', fname='', vert_yr=[]):
"""
:param df:
:param vert_yr:
:param xlabel:
:param ylabel:
:param title:
:param out_path:
:param fname:
:return:
"""
fig, ax = plt.subplots()
xvar = df.index
num_columns = len(df.columns.values)
idx = 0
for name_col, col in df.iteritems():
pos = 'first' if idx == 0 else 'last' if idx == (num_columns - 1) else 'mid'
plot_multiple_ts(ax, col.values, xvar, out_path + os.sep + fname, title=title, leg_name=name_col, xlabel=xlabel,
ylabel=ylabel, vert_yr=vert_yr, pos=pos)
idx += 1
plt.close(fig)
def plot_activity_matrix(df, cmap, normalized=False, annotate=True, out_path='', title=''):
"""
Plot activity matrix showing area of land transitioning between land-use types
:param df:
:param cmap:
:param normalized:
:param annotate:
:param out_path:
:param title:
:return:
"""
logger.info('Plot activity matrix')
sns.set(font_scale=0.8)
formatter = tkr.ScalarFormatter(useMathText=True)
# normalized scale is from 0 - 100, does not need scientific scale
if not normalized:
formatter.set_scientific(True)
formatter.set_powerlimits((-2, 2))
df = df * 100.0 if normalized else df * 1.0
vmin = math.ceil(np.nanmin(df))
vmax = math.ceil(np.nanmax(df)) # maximum value on colorbar
ax = sns.heatmap(df, cbar_kws={'format': formatter}, cmap=cmap,
linewidths=.5, linecolor='lightgray', annot=annotate, fmt='.2g', annot_kws={'size': 6}, vmin=vmin,
vmax=vmax)
# for annotation of heat map cells, use: annot=True, fmt='g', annot_kws={'size': 6}
# ax.invert_yaxis()
ax.set_ylabel('FROM')
ax.set_xlabel('TO')
ax.set_title(title)
locs, labels = plt.xticks()
plt.setp(labels, rotation=0)
locs, labels = plt.yticks()
plt.setp(labels, rotation=0)
plt.savefig(out_path, dpi=constants.DPI)
plt.close()
# revert matplotlib params
sns.reset_orig()
set_matplotlib_params()
get_colors(palette='tableau')
if __name__ == '__main__':
plot_glm()
| StarcoderdataPython |
5061809 | <reponame>drewrisinger/pyGSTi
from ..testutils import BaseTestCase, compare_files, temp_files
import unittest
import pygsti
import numpy as np
import pygsti.extras.rpe as rpe
import pygsti.extras.rpe.rpeconstruction as rc
from pygsti.extras.rpe.rpeconfig_GxPi2_GyPi2_UpDn import rpeconfig_GxPi2_GyPi2_UpDn
from pygsti.extras.rpe.rpeconfig_GxPi2_GyPi2_00 import rpeconfig_GxPi2_GyPi2_00
class TestRPEObjectMethods(BaseTestCase):
def test_rpe_datasets(self):
model = pygsti.construction.build_explicit_model([('Q0',)],['Gi','Gx','Gy','Gz'],
[ "I(Q0)","X(pi/8,Q0)", "Y(pi/8,Q0)", "Z(pi/2,Q0)"])
depol_gateset = model.depolarize(op_noise=0.1,spam_noise=0)
#test RPE datasets
rpeconfig_inst_list = [rpeconfig_GxPi2_GyPi2_UpDn,rpeconfig_GxPi2_GyPi2_00]
for rpeconfig_inst in rpeconfig_inst_list:
rpeGS = rc.make_parameterized_rpe_gate_set(np.pi/2, np.pi/4, 0, 0.1, 0.1, True, rpeconfig_inst=rpeconfig_inst)
rpeGS2 = rc.make_parameterized_rpe_gate_set(np.pi/2, np.pi/4, 0, 0.1, 0.1, False, rpeconfig_inst=rpeconfig_inst)
rpeGS3 = rc.make_parameterized_rpe_gate_set(np.pi/2, np.pi/4, np.pi/4, 0.1, 0.1, False, rpeconfig_inst=rpeconfig_inst)
kList = [0,1,2]
lst1 = rc.make_rpe_angle_str_lists(kList, "alpha", rpeconfig_inst)
lst2 = rc.make_rpe_angle_str_lists(kList, "epsilon", rpeconfig_inst)
lst3 = rc.make_rpe_angle_str_lists(kList, "theta", rpeconfig_inst)
lstDict = rc.make_rpe_angle_string_list_dict(2,rpeconfig_inst)
rpeDS = rc.make_rpe_data_set(depol_gateset,lstDict,1000,
sampleError='binomial',seed=1234)
if __name__ == "__main__":
unittest.main(verbosity=2)
| StarcoderdataPython |
9672404 | from unittest import TestCase
from mlcube.common.utils import StandardPaths
from mlcube_ssh.ssh_metadata import (PythonInterpreter, SystemInterpreter, VirtualEnvInterpreter)
class TestPythonInterpreters(TestCase):
def test_all_interpreters_present(self) -> None:
self.assertIsInstance(PythonInterpreter._interpreters, dict)
self.assertTrue(len(PythonInterpreter._interpreters) == 2)
self.assertIn('system', PythonInterpreter._interpreters)
self.assertIs(PythonInterpreter._interpreters['system'], SystemInterpreter)
self.assertIn('virtualenv', PythonInterpreter._interpreters)
self.assertIs(PythonInterpreter._interpreters['virtualenv'], VirtualEnvInterpreter)
class TestSystemInterpreter(TestCase):
def check_state(self, state: dict, interpreter: SystemInterpreter) -> None:
self.assertIsInstance(interpreter, SystemInterpreter)
self.assertEqual(state['type'], interpreter.type)
self.assertEqual(state['python'], interpreter.python)
self.assertEqual(state['requirements'], interpreter.requirements)
def test_system_interpreter_default_config(self) -> None:
self.check_state(
{'type': 'system', 'python': 'python', 'requirements': ''},
PythonInterpreter.create({'type': 'system'})
)
def test_system_interpreter_user_config(self) -> None:
config = {'type': 'system', 'python': 'python3.8', 'requirements': 'click==7.1.2 mlcube==0.2.2'}
self.check_state(
config,
PythonInterpreter.create(config)
)
class TestVirtualEnvInterpreter(TestCase):
def check_state(self, state: dict, interpreter: VirtualEnvInterpreter) -> None:
self.assertIsInstance(interpreter, VirtualEnvInterpreter)
self.assertEqual(state['type'], interpreter.type)
self.assertEqual(state['python'], interpreter.python)
self.assertEqual(state['requirements'], interpreter.requirements)
self.assertEqual(state['location'], interpreter.location)
self.assertEqual(state['name'], interpreter.name)
def test_virtualenv_interpreter_default_config(self):
self.check_state(
{'type': 'virtualenv', 'python': 'python', 'requirements': '', 'location': StandardPaths.ENVIRONMENTS,
'name': 'MY_NAME'},
PythonInterpreter.create({'type': 'virtualenv', 'name': 'MY_NAME'})
)
def test_virtualenv_interpreter_user_config(self):
config = {'type': 'virtualenv', 'python': 'python3.8', 'requirements': 'click==7.1.2 mlcube==0.2.2',
'location': '/opt/mlcube_resources/environments', 'name': 'docker_runner-0.2.2'}
self.check_state(
config,
PythonInterpreter.create(config)
)
| StarcoderdataPython |
3545598 | from __future__ import annotations
from functools import partial
from typing import Generic, Tuple
import jax.numpy as jnp
from jax.tree_util import tree_map, tree_reduce
from ..annotations import BooleanNumeric, ComplexNumeric, RealNumeric
from ..dataclasses import dataclass
from ..leaky_integral import leaky_data_weight, leaky_integrate
from ..tools import abs_square, divide_nonnegative
from .augmented import AugmentedState, State
from .combinator import Differentiand, IteratedFunctionWithCombinator
from .iterated_function import Comparand, IteratedFunction, Parameters, Trajectory
__all__ = ['StochasticState', 'StochasticIteratedFunction',
'StochasticIteratedFunctionWithCombinator']
@dataclass
class StochasticState(AugmentedState[State], Generic[State, Comparand]):
mean_state: Comparand
second_moment_state: Comparand
@dataclass
class StochasticIteratedFunction(
IteratedFunction[Parameters, State, Comparand, Trajectory,
StochasticState[State, Comparand]],
Generic[Parameters, State, Comparand, Trajectory]):
convergence_detection_decay: RealNumeric
# Implemented methods --------------------------------------------------------------------------
def initial_augmented(self, initial_state: State) -> StochasticState[State, Comparand]:
comparand = self.extract_comparand(initial_state)
zero_comparand = tree_map(jnp.zeros_like, comparand)
return StochasticState(current_state=initial_state,
iterations=0,
mean_state=zero_comparand,
second_moment_state=zero_comparand)
def iterate_augmented(self,
new_state: State,
augmented: StochasticState[State, Comparand]) -> (
StochasticState[State, Comparand]):
def f(value: ComplexNumeric, drift: ComplexNumeric) -> ComplexNumeric:
return leaky_integrate(value, 1.0, drift, self.convergence_detection_decay,
leaky_average=True)
mean_state, second_moment_state = self._sufficient_statistics(augmented.current_state)
new_mean_state = tree_map(f, augmented.mean_state, mean_state)
new_second_moment_state = tree_map(f, augmented.second_moment_state, second_moment_state)
return StochasticState(current_state=new_state,
iterations=augmented.iterations + 1,
mean_state=new_mean_state,
second_moment_state=new_second_moment_state)
def converged(self, augmented: StochasticState[State, Comparand]) -> BooleanNumeric:
data_weight = leaky_data_weight(augmented.iterations, self.convergence_detection_decay)
mean_squared = tree_map(abs_square, augmented.mean_state)
return tree_reduce(jnp.logical_and,
tree_map(partial(jnp.allclose, rtol=self.rtol * data_weight,
atol=self.atol * data_weight),
augmented.second_moment_state,
mean_squared),
True)
def minimum_tolerances(self,
augmented: StochasticState[State, Comparand]) -> Tuple[RealNumeric,
RealNumeric]:
"""
Returns:
The minimum value of atol that would lead to convergence now.
The minimum value of rtol that would lead to convergence now.
"""
data_weight = leaky_data_weight(augmented.iterations, self.convergence_detection_decay)
mean_squared = tree_map(abs_square, augmented.mean_state)
variance = tree_map(jnp.subtract, augmented.second_moment_state, mean_squared)
scaled_variance = tree_map(divide_nonnegative, variance, mean_squared)
minimum_atol = divide_nonnegative(tree_reduce(jnp.maximum, tree_map(jnp.amax, variance),
0.0),
data_weight)
minimum_rtol = divide_nonnegative(tree_reduce(jnp.maximum,
tree_map(jnp.amax, scaled_variance), 0.0),
data_weight)
assert not isinstance(minimum_atol, complex)
assert not isinstance(minimum_rtol, complex)
return minimum_atol, minimum_rtol
# Private methods ------------------------------------------------------------------------------
def _sufficient_statistics(self, state: State) -> Tuple[Comparand, Comparand]:
comparand = self.extract_comparand(state)
squared_comparand = tree_map(abs_square, comparand)
return comparand, squared_comparand
class StochasticIteratedFunctionWithCombinator(
IteratedFunctionWithCombinator[Parameters, State, Comparand, Differentiand, Trajectory,
StochasticState[State, Comparand]],
StochasticIteratedFunction[Parameters, State, Comparand, Trajectory],
Generic[Parameters, State, Comparand, Differentiand, Trajectory]):
pass
| StarcoderdataPython |
3373256 | from app import app_search, settings
from django.db import models
from django.db.models import ObjectDoesNotExist
from django.db.models.signals import m2m_changed, post_delete, post_init, post_save
from django.dispatch import receiver
class State(models.Model):
name = models.CharField(primary_key=True, max_length=64)
class Park(models.Model):
id = models.CharField(primary_key=True, max_length=128)
title = models.TextField()
description = models.TextField()
url = models.TextField()
latitude = models.DecimalField(decimal_places=4, max_digits=8)
longitude = models.DecimalField(decimal_places=4, max_digits=8)
area = models.FloatField()
established = models.DateTimeField()
world_heritage_site = models.BooleanField()
visitors = models.IntegerField()
states = models.ManyToManyField(State)
def to_app_search(self):
return {
"id": self.id,
"title": self.title,
"description": self.description,
"url": self.url,
"location": f"{self.latitude}, {self.longitude}",
"area": self.area,
"established": self.established,
"world_heritage_site": str(self.world_heritage_site).lower(),
"visitors": self.visitors,
"states": [state.name for state in self.states.all()],
}
@receiver(post_save, sender=Park)
@receiver(m2m_changed, sender=Park.states.through)
def signal_park_index_document(sender, **kwargs):
instance = kwargs["instance"]
# Filter out all 'm2m_changed' actions that aren't post_*
if "action" in kwargs and not kwargs["action"].startswith("post_"):
return
app_search.client.index_documents(
engine_name=settings.APP_SEARCH_ENGINE_NAME,
documents=[instance.to_app_search()],
)
@receiver(post_delete, sender=Park)
def signal_park_delete_document(sender, **kwargs):
instance = kwargs["instance"]
app_search.client.delete_documents(
engine_name=settings.APP_SEARCH_ENGINE_NAME,
document_ids=[instance.id],
ignore_status=404,
)
| StarcoderdataPython |
5021590 | from sqlalchemy import Column, ForeignKey, Integer, UniqueConstraint
from sqlalchemy.orm import backref, relationship
from sqlalchemy.types import PickleType
from fonduer.candidates.models.temporarycontext import TemporaryContext
from fonduer.parser.models.context import Context, construct_stable_id
class TemporarySpan(TemporaryContext):
"""The TemporaryContext version of Span"""
def __init__(self, sentence, char_start, char_end, meta=None):
super(TemporarySpan, self).__init__()
self.sentence = sentence # The sentence Context of the Span
self.char_end = char_end
self.char_start = char_start
self.meta = meta
def __len__(self):
return self.char_end - self.char_start + 1
def __eq__(self, other):
try:
return (
self.sentence == other.sentence
and self.char_start == other.char_start
and self.char_end == other.char_end
)
except AttributeError:
return False
def __ne__(self, other):
try:
return (
self.sentence != other.sentence
or self.char_start != other.char_start
or self.char_end != other.char_end
)
except AttributeError:
return True
def __hash__(self):
return hash(self.sentence) + hash(self.char_start) + hash(self.char_end)
def get_stable_id(self):
return construct_stable_id(
self.sentence,
self._get_polymorphic_identity(),
self.char_start,
self.char_end,
)
def _get_table_name(self):
return "span"
def _get_polymorphic_identity(self):
return "span"
def _get_insert_query(self):
return (
"INSERT INTO span VALUES"
+ "(:id, :sentence_id, :char_start, :char_end, :meta)"
)
def _get_insert_args(self):
return {
"sentence_id": self.sentence.id,
"char_start": self.char_start,
"char_end": self.char_end,
"meta": self.meta,
}
def get_word_start(self):
return self.char_to_word_index(self.char_start)
def get_word_end(self):
return self.char_to_word_index(self.char_end)
def get_n(self):
return self.get_word_end() - self.get_word_start() + 1
def char_to_word_index(self, ci):
"""Return the index of the **word this char is in**"""
i = None
for i, co in enumerate(self.sentence.char_offsets):
if ci == co:
return i
elif ci < co:
return i - 1
return i
def word_to_char_index(self, wi):
"""Return the character-level index (offset) of the word's start"""
return self.sentence.char_offsets[wi]
def get_attrib_tokens(self, a="words"):
"""Get the tokens of sentence attribute *a*."""
return self.sentence.__getattribute__(a)[
self.get_word_start() : self.get_word_end() + 1
]
def get_attrib_span(self, a, sep=" "):
"""Get the span of sentence attribute *a*."""
# NOTE: Special behavior for words currently (due to correspondence
# with char_offsets)
if a == "words":
return self.sentence.text[self.char_start : self.char_end + 1]
else:
return sep.join(self.get_attrib_tokens(a))
def get_span(self, sep=" "):
return self.get_attrib_span("words", sep)
def __contains__(self, other_span):
return (
self.sentence == other_span.sentence
and other_span.char_start >= self.char_start
and other_span.char_end <= self.char_end
)
def __getitem__(self, key):
"""
Slice operation returns a new candidate sliced according to **char index**
Note that the slicing is w.r.t. the candidate range (not the abs.
sentence char indexing).
"""
if isinstance(key, slice):
char_start = (
self.char_start if key.start is None else self.char_start + key.start
)
if key.stop is None:
char_end = self.char_end
elif key.stop >= 0:
char_end = self.char_start + key.stop - 1
else:
char_end = self.char_end + key.stop
return self._get_instance(
char_start=char_start, char_end=char_end, sentence=self.sentence
)
else:
raise NotImplementedError()
def __repr__(self):
return '{}("{}", sentence={}, chars=[{},{}], words=[{},{}])'.format(
self.__class__.__name__,
self.get_span(),
self.sentence.id,
self.char_start,
self.char_end,
self.get_word_start(),
self.get_word_end(),
)
def _get_instance(self, **kwargs):
return TemporarySpan(**kwargs)
class Span(Context, TemporarySpan):
"""
A span of chars, identified by Context ID and char-index start, end (inclusive).
char_offsets are **relative to the Context start**
"""
__tablename__ = "span"
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
sentence_id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"))
char_start = Column(Integer, nullable=False)
char_end = Column(Integer, nullable=False)
meta = Column(PickleType)
__table_args__ = (UniqueConstraint(sentence_id, char_start, char_end),)
__mapper_args__ = {
"polymorphic_identity": "span",
"inherit_condition": (id == Context.id),
}
sentence = relationship(
"Context",
backref=backref("spans", cascade="all, delete-orphan"),
foreign_keys=sentence_id,
)
def _get_instance(self, **kwargs):
return Span(**kwargs)
# We redefine these to use default semantics, overriding the operators
# inherited from TemporarySpan
def __eq__(self, other):
return self is other
def __ne__(self, other):
return self is not other
def __hash__(self):
return id(self)
| StarcoderdataPython |
311309 | <filename>TranskribusDU/graph/FeatureDefinition_PageXml_FeatSelect.py
# -*- coding: utf-8 -*-
"""
Standard PageXml features
Copyright Xerox(C) 2016 <NAME>
Developed for the EU project READ. The READ project has received funding
from the European Union�s Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import numpy as np
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import TfidfVectorizer
#from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from .Transformer_PageXml import NodeTransformerTextLen
from .Transformer_PageXml import NodeTransformerXYWH
from .Transformer_PageXml import NodeTransformerXYWH_v2
from .Transformer_PageXml import NodeTransformerNeighbors
from .Transformer_PageXml import Node1HotFeatures
from .Transformer_PageXml import NodeTransformerNeighborsAllText
from .Transformer_PageXml import NodeTransformerTextEnclosed
from .Transformer_PageXml import Edge1HotFeatures
from .Transformer_PageXml import EdgeBooleanFeatures
from .Transformer_PageXml import EdgeBooleanFeatures_v2
from .Transformer_PageXml import EdgeNumericalSelector
from .PageNumberSimpleSequenciality import PageNumberSimpleSequenciality
from .Transformer_PageXml import NodeEdgeTransformer
from .FeatureDefinition import FeatureDefinition
from .FeatureSelection import mutual_information, SelectRobinBest
#Should be able to discriminate between chi2 and mutual info, right ?
#(X, y, discrete_features='auto', n_neighbors=3, copy=True, random_state=None)[source]
#feat_selector=SelectRobinBest(mutual_information
def chi2_scores(X,y):
#This return only the scores of the chi2 function ...
# Define as a function as chi_score = lambda x,y : chi2(x,y)[0] #this can not be pickled ...
return chi2(X,y)[0]
class FeatureDefinition_PageXml_FeatSelect(FeatureDefinition):
def __init__(self, n_tfidf_node=None, t_ngrams_node=None, b_tfidf_node_lc=None
, n_tfidf_edge=None, t_ngrams_edge=None, b_tfidf_edge_lc=None
,feat_select=None,text_neighbors=False, n_tfidf_node_neighbors=500
,XYWH_v2=False, edge_features=False):
FeatureDefinition.__init__(self)
self.n_tfidf_node, self.t_ngrams_node, self.b_tfidf_node_lc = n_tfidf_node, t_ngrams_node, b_tfidf_node_lc
self.n_tfidf_edge, self.t_ngrams_edge, self.b_tfidf_edge_lc = n_tfidf_edge, t_ngrams_edge, b_tfidf_edge_lc
self.text_neighbors=text_neighbors
self.n_tfidf_node_neighbors=n_tfidf_node_neighbors
self.XYWH_v2=XYWH_v2
self.edge_features=edge_features
#TODO n_jobs=4
if feat_select=='chi2':
feat_selector=SelectKBest(chi2, k=self.n_tfidf_node)
elif feat_select == 'mi_rr':
print('Using Mutual Information Round Robin as Feature Selection')
feat_selector=SelectRobinBest(mutual_information,k=self.n_tfidf_node)
feat_selector_neigh=SelectRobinBest(mutual_information,k=self.n_tfidf_node)
elif feat_select =='chi2_rr':
#chi_score = lambda x,y : chi2(x,y)[0] #this can not be pickled ...
feat_selector=SelectRobinBest(chi2_scores, k=self.n_tfidf_node)
feat_selector_neigh=SelectRobinBest(chi2_scores, k=self.n_tfidf_node)
elif feat_select=='tf' or feat_select is None:
feat_selector=None
else:
raise ValueError('Invalid Feature Selection method',feat_select)
if feat_selector:
tdifNodeTextVectorizer = TfidfVectorizer(lowercase=self.b_tfidf_node_lc,max_features=10000, analyzer = 'char', ngram_range=self.t_ngrams_node) #(2,6)
text_pipeline = Pipeline([('selector', NodeTransformerTextEnclosed()),
('tf', tdifNodeTextVectorizer),
('word_selector',feat_selector),
#('todense', SparseToDense()) #Here we don't need to convert to Dense anymore
])
else:
tdifNodeTextVectorizer = TfidfVectorizer(lowercase=self.b_tfidf_node_lc, max_features=self.n_tfidf_node
, analyzer = 'char', ngram_range=self.t_ngrams_node #(2,6)
, dtype=np.float64)
text_pipeline= Pipeline([('selector', NodeTransformerTextEnclosed()),
('tf', tdifNodeTextVectorizer),
#('todense', SparseToDense()) #Here we don't need to convert to Dense anymore
])
node_transformer_ops =[("text", text_pipeline) ,
("textlen", Pipeline([
('selector', NodeTransformerTextLen()),
('textlen', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
])
)
, ("neighbors", Pipeline([
('selector', NodeTransformerNeighbors()),
('neighbors', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
])
)
, ("1hot", Pipeline([
('1hot', Node1HotFeatures()) #does the 1-hot encoding directly
])
)]
if self.XYWH_v2 is True:
feat_xy=("xywh", Pipeline([('selector', NodeTransformerXYWH_v2()),
('xywh', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
]) )
else:
feat_xy=("xywh", Pipeline([('selector', NodeTransformerXYWH()),
('xywh', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
]) )
node_transformer_ops.append(feat_xy)
if text_neighbors:
#BY DEFAULT we use chi2
if self.n_tfidf_node_neighbors>0:
feat_selector_neigh=SelectKBest(chi2, k=self.n_tfidf_node_neighbors)
neighborsTextVectorizer = TfidfVectorizer(lowercase=self.b_tfidf_node_lc,analyzer = 'char', ngram_range=self.t_ngrams_node) #(2,6)
neighbors_text_pipeline = Pipeline([('selector', NodeTransformerNeighborsAllText()),
('tf_neighbors', neighborsTextVectorizer),
('feat_selector',feat_selector_neigh),
])
else:
neighborsTextVectorizer = TfidfVectorizer(lowercase=self.b_tfidf_node_lc,analyzer = 'char', ngram_range=self.t_ngrams_node) #(2,6)
neighbors_text_pipeline = Pipeline([('selector', NodeTransformerNeighborsAllText()),
('tf_neighbors', neighborsTextVectorizer)
])
node_transformer_ops.append(('text_neighbors',neighbors_text_pipeline))
print(node_transformer_ops)
node_aggregated_edge_features=[('1hot_edge',NodeEdgeTransformer(Edge1HotFeatures(PageNumberSimpleSequenciality()),agg_func='sum')) ]
node_aggregated_edge_features.append(('boolean_edge',NodeEdgeTransformer(EdgeBooleanFeatures_v2(),agg_func='sum')) )
#Aggregated Numerical Features do not make a lot of sense here ....
if edge_features:
node_transformer_ops.extend(node_aggregated_edge_features)
print(node_transformer_ops)
node_transformer = FeatureUnion(node_transformer_ops)
#Minimal EdgeFeature Here
lEdgeFeature = [ #CAREFUL IF YOU CHANGE THIS - see cleanTransformers method!!!!
("1hot", Pipeline([
('1hot', Edge1HotFeatures(PageNumberSimpleSequenciality()))
])
)
, ("boolean", Pipeline([
('boolean', EdgeBooleanFeatures())
])
)
, ("numerical", Pipeline([
('selector', EdgeNumericalSelector()),
('numerical', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
])
)
]
edge_transformer = FeatureUnion(lEdgeFeature)
self._node_transformer = node_transformer
self._edge_transformer = edge_transformer
self.tfidfNodeTextVectorizer = tdifNodeTextVectorizer
def getTransformers(self):
"""
return (node transformer, edge transformer)
"""
#return self._node_transformer, self._edge_transformer
return self._node_transformer, self._edge_transformer
def cleanTransformers(self):
"""
the TFIDF transformers are keeping the stop words => huge pickled file!!!
Here the fix is a bit rough. There are better ways....
JL
"""
#TODO Better Cleaning for feature selection
self._node_transformer.transformer_list[0][1].steps[1][1].stop_words_ = None #is 1st in the union...
return self._node_transformer
@staticmethod
def getNodeTextSelectedFeatures(node_transformer):
#I have the impression this implem is not efficient
#as we still keep the 10,000 words from the vectorizer ...
#TODO Combine objects CountVectorizer with features selection that update and clean the vocabulary
text_pipeline =node_transformer.transformer_list[0][1]
cvect=node_transformer.transformer_list[0][1].named_steps['tf']
#Index to Word String array
I2S_array =np.array(cvect.get_feature_names())
#if hasattr(node_transformer,'feature_selection') and node_transformer.feature_selection is True:
if 'word_select' in text_pipeline.named_steps:
fs=node_transformer.named_steps['word_select']
selected_indices=fs.get_support(indices=True)
return I2S_array[selected_indices]
else:
return I2S_array
| StarcoderdataPython |
11253329 | from __future__ import annotations # To avoid circular import.
from .ulist import read_csv as _read_csv
from typing import List, TYPE_CHECKING
if TYPE_CHECKING: # To avoid circular import.
from . import UltraFastList
def read_csv() -> List[UltraFastList]:
from . import UltraFastList # To avoid circular import.
return [UltraFastList(x) for x in _read_csv()]
| StarcoderdataPython |
3376271 | <reponame>sthysel/rakali
import functools
import time
def cost(func):
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
start_time = time.perf_counter()
value = func(*args, **kwargs)
end_time = time.perf_counter()
wrapper_timer.cost = end_time - start_time
wrapper_timer.fps = 1 / (end_time - start_time)
return value
wrapper_timer.cost = 0
return wrapper_timer
class FPS:
"""
Keeps track of time spend doing various processing on frames
"""
def __init__(self):
self._start = None
self._stop = None
self._frames = 0
def start(self):
"""start the timer"""
self._start = time.perf_counter()
return self
def stop(self):
"""stop the timer"""
self._stop = time.perf_counter()
self._frames += 1
def cost(self):
"""
operation time cost
"""
return self._stop - self._start
def cost_in_ms(self):
"""
operation time cost in milliseconds
"""
return self.cost() * 1000
def fps(self):
"""the (approximate) frames per second"""
return 1 / self.cost()
| StarcoderdataPython |
366972 | import os
import urllib.request
from PIL import Image
# This will create a page with the settings in default_site.py
from bs4 import BeautifulSoup
from django.contrib.auth.models import User
from django.core.files import File
from django.core.management.base import BaseCommand
from filer.models import Image as FilerImage
from api.models import *
from djangocms_blog.cms_appconfig import BlogConfig
from djangocms_blog.models import Post as BlogPost
def create_blog_post(self, results, src, title, search):
supported_images = [".gif", ".png", ".jpg", ".jpeg"]
path = "media/"
image = False
# Google image search has limits
google_search_images = False
# We need a valid big image
page_images = False
# Manual search
manual_search = True
# First attempted not really working
if page_images:
for url in src.split(","):
if url.find("http") != -1:
name = url.split("/")[-1]
try:
urllib.request.urlretrieve(url, path + name)
im = Image.open(path + name)
except:
os.system("rm {}".format(path + name))
continue
width, height = im.size
if width > 850 and height > 400:
with open(path + name, "rb") as f:
file_obj = File(f, name=name)
image = FilerImage.objects.create(owner=User.objects.get(id=1),
original_filename=path + name,
file=file_obj)
os.system("rm {}".format(path + name))
break
else:
os.system("rm {}".format(path + name))
# Not in use limited requests
if google_search_images:
api_key = "<KEY>"
from google_images_search import GoogleImagesSearch
# you can provide API key and CX using arguments,
# or you can set environment variables: GCS_DEVELOPER_KEY, GCS_CX
gis = GoogleImagesSearch(api_key, "ff9add50d98a394d6")
# define search params:
_search_params = {
'q': title,
'num': 1,
# 'safe': 'off', #high|medium|off
# 'fileType': 'png', #jpg|gif|png
# 'imgType': 'photo', #clipart|face|lineart|news|photo
'imgSize': 'XXLARGE', # huge|icon|large|medium|small|xlarge|xxlarge
# 'imgDominantColor': 'white', #black|blue|brown|gray|green|pink|purple|teal|white|yellow
# 'rights': '' #cc_publicdomain|cc_attribute|cc_sharealike|cc_noncommercial|cc_nonderived
}
gis.search(search_params=_search_params)
for image in gis.results():
try:
image.download('media/')
with open(image.path, "rb") as f:
file_obj = File(f, name=image.path.split('/')[1])
image = FilerImage.objects.create(owner=User.objects.get(id=1),
original_filename=image.path,
file=file_obj)
break
except:
pass
image = False | StarcoderdataPython |
1700983 | # https://oj.leetcode.com/problems/reverse-words-in-a-string/
class Solution:
# @param s, a string
# @return a string
def reverseWords(self, s):
result, word, s = [], "", s[::-1]
for i in xrange(len(s)):
if s[i] != " ":
word += s[i]
elif word != "":
result.append(word[::-1])
word = ""
if word != "":
result.append(word[::-1])
return " ".join(result)
# TODO: try one-pass
s = Solution()
print s.reverseWords(" the sky is blue ")
| StarcoderdataPython |
12822895 | import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
class Data:
'''Obtains hydro data and preprocesses it.'''
def data(self, test_len):
names = ['date', 'price', 'avg_p', 'bid', 'ask',
'o', 'h', 'l', 'c', 'avgp', 'vol', 'oms', 'num']
# get data
df = pd.read_csv('pcell.csv', sep=';', header=1).iloc[:,:1]
df[[1, 2]] = pd.read_csv('pcell.csv', sep=';', header=1).iloc[:,6:8]
df = pd.concat([df, pd.read_csv('pcell.csv', sep=';', header=1).iloc[:,:-1].drop(
columns=['Date'])], axis=1).iloc[::-1].reset_index().drop(columns='index')
df.columns = names
# Filter out null
for name in names:
no_null = []
# check if null exist in column
if any(df[name].isnull()):
# traverse the boolean dataframe
for i, j in enumerate(df[name].isnull()):
if not j:
# hold a value from latest non null
tmp = df[name].iloc[i]
no_null.append(tmp)
else:
no_null.append(tmp)
# put back in dataframe
df[name] = pd.Series(no_null)
# Get float from string
for name in names[1:]:
if type(df[name].iloc[1]) == str:
df[name] = pd.Series([float(i.replace(',', '.')) for i in df[name]])
# Moving averages
ma_sizes = (26,)
ma = {i: [] for i in ma_sizes}
for size in ma_sizes:
for i in range(len(df)):
if i <= size:
ma[size].append(np.average(df['price']))
else:
value = sum(df['price'].values[i - size: i]) / size
ma[size].append(value)
# Exponential moving average
smoother = 2
em_sizes = (12, 20, 26)
em = {i: [] for i in em_sizes}
for size in em_sizes:
em_t = sum(df['price'][:size]) / size
for i in range(len(df)):
if i <= size:
em[size].append(0)
else:
em_t = (df['price'][i] * (
smoother / (1 + size)) + (em_t * (1 - (smoother / (1 + size)))))
em[size].append(em_t)
# MACD
macd1 = [i - j for i, j in zip(em[12], em[26])]
macd2 = []
macd3 = []
em_t = sum(macd1[:9]) / 9
for i in range(len(macd1)):
if i <= 9:
macd2.append(0)
else:
em_t = (macd1[i] * (
smoother / (1 + size)) + (em_t * (1 - (smoother / (1 + size)))))
macd2.append(em_t)
macd3 = [i - j for i, j in zip(macd1, macd2)]
tech = [ma[26], em[12], em[26], macd1, macd2, macd3]
names_df2 = ['ma1', 'em1', 'em2', 'md1', 'md2', 'md3']
names2 = names + names_df2
df2 = pd.DataFrame({i: j for i, j in zip(names_df2, tech)})
# slice the first 26 rows
df3 = pd.concat([df, df2], axis=1).iloc[27:]
# get diff and pct change
diff = df3[['vol', 'oms', 'num']].diff()
pct = df3[['bid', 'ask', 'o', 'h', 'l', 'c', 'avgp'] + names_df2].pct_change()
diff_pct = pd.concat([pct, diff], axis=1)
diff_pct.columns = [
name + '_' for name in [
'bid', 'ask', 'o', 'h', 'l', 'c', 'avgp'] + names_df2 + ['vol', 'oms', 'num']]
df4 = pd.concat([df3, diff_pct], axis=1).iloc[1:].reset_index().drop(columns='index')
names3 = df4.columns
# clipping outliers
for name in diff_pct.columns.tolist():
df4[[name]] = df4[[name]].clip(- 3 *df4[name].std(), 3 * df4[name].std())
# Normalizing
scaler = StandardScaler()
norm = scaler.fit_transform(
df4[list(diff_pct.columns)].values.reshape(-1, len(list(diff_pct.columns))))
# Add avgp__ to df4
df4[['avgp__']] = pd.DataFrame({None: norm[:,6:7].squeeze()})
# split into train and test
X_train = norm[:len(df4) - test_len]
X_test = norm[len(df4) - test_len:]
train = df4.iloc[:len(df4) - test_len]
test = df4.iloc[len(df4) - test_len:].reset_index().drop(columns='index')
data = df4
return X_train, X_test, train, test, data
| StarcoderdataPython |
11352895 | <reponame>imranq2/SparkAutoMapper.FHIR
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class VisionEyesCode(GenericTypeCode):
"""
VisionEyes
From: http://hl7.org/fhir/vision-eye-codes in valuesets.xml
A coded concept listing the eye codes.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://hl7.org/fhir/vision-eye-codes
"""
codeset: FhirUri = "http://hl7.org/fhir/vision-eye-codes"
class VisionEyesCodeValues:
"""
Right Eye.
From: http://hl7.org/fhir/vision-eye-codes in valuesets.xml
"""
RightEye = VisionEyesCode("right")
"""
Left Eye.
From: http://hl7.org/fhir/vision-eye-codes in valuesets.xml
"""
LeftEye = VisionEyesCode("left")
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.