id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6682260 | <gh_stars>0
from django.test import TestCase
from stocks.models import StockTable
class ModelTests(TestCase):
def test_check_stocktable(self):
scrip = 'RELIANCE'
stock = StockTable.objects.get(scrip=scrip)
self.assertEquals(stock.scrip, scrip)
| StarcoderdataPython |
8169587 | import rp2pio
import adafruit_pioasm
import array
from digitalio import DigitalInOut
from micropython import const
from . import HX711
hx711_read_code = """
set x, {0} ; number of cycles for post-readout gain setting
mov osr, x ; put the gain into osr for safe keeping
set x, 7 ; number of pad bits, 0-start
set y, {1} ; number of data bits, 0-start
padloop: ; build front-pad bits for 32-bit Pythonic int alignment
in pins, 1
jmp x-- padloop
wait 0 pin 0 ; wait for the hx711 DAC's cycle-complete signal
mov x, osr ; set up our gain loop counter, also delays first clock edge by a full cycle
bitloop: ; read in those bits!
set pins, 1 [3]
set pins, 0 [1]
in pins, 1
jmp y-- bitloop
gainloop: ; gain set, 1 pulse for default gain
set pins, 1 [3]
set pins, 0
jmp x-- gainloop
"""
HX_DATA_BITS = const(24)
HX_INIT_DELAY = const(10)
HX_MAX_VALUE = const(0x7FFFFF)
PAD_MASK = const(0x00FFFFFF)
COMPLMENT_MASK = const(0x1000000)
class HX711_PIO(HX711):
def __init__(
self,
pin_data: DigitalInOut,
pin_clk: DigitalInOut,
*,
gain: int = 1,
offset: int = 0,
scale: int = 1,
tare: bool = False,
pio_freq: int = 4000000
):
self._buffer = array.array('I', [0])
self._pin_data = pin_data
self._pin_clk = pin_clk
self._pio_freq = pio_freq
self.sm_init(gain)
super().__init__(gain, offset, scale, tare)
def sm_init(self, gain: int) -> None:
self._pioasm_read = adafruit_pioasm.assemble(
hx711_read_code.format(gain - 1, HX_DATA_BITS - 1))
self._sm = rp2pio.StateMachine(
self._pioasm_read,
frequency=self._pio_freq,
first_in_pin=self._pin_data,
in_pin_count=1,
first_set_pin=self._pin_clk,
set_pin_count=1,
in_shift_right=False,
push_threshold=32,
auto_push=True
)
def sm_deinit(self) -> None:
self._sm.deinit()
def read_raw(self, clear_fifo = True) -> int:
if clear_fifo:
self._sm.clear_rxfifo()
self._sm.readinto(self._buffer)
reading_aligned = self._buffer[0] & PAD_MASK # Mask out our pad bits
if reading_aligned > HX_MAX_VALUE: # Handle two's compliment negative numbers
reading_aligned -= COMPLMENT_MASK
return reading_aligned | StarcoderdataPython |
307493 | import pygame, random
from pygame.locals import *
def on_grid_random():
x = random.randint(0,59)
y = random.randint(0,59)
return(x//10*10,y//10*10)
def collision(c1,c2):
return (c1[0] == c2[0] and (c1[1]) == c2[1])
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
pygame.init()
screen = pygame.display.set_mode((600,600))
pygame.display.set_caption('Code-Save Snake')
snake = [(200,200),(210,200),(220,200)]
snake_skin = pygame.Surface((10,10))
snake_skin.fill((255,255,255))
apple_pos = on_grid_random()
apple = pygame.Surface((10,10))
pineaple = pygame.Surface((10,10))
apple.fill((255,0,0))
my_direction = LEFT
clock = pygame.time.Clock()
font = pygame.font.Font('freesansbold.ttf',18)
score = 0
game_over = False
while not game_over:
clock.tick(30)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
if event.type == KEYDOWN:
if event.key == K_UP:
my_direction = UP
if event.key == K_DOWN:
my_direction = DOWN
if event.key == K_LEFT:
my_direction = LEFT
if event.key == K_RIGHT:
my_direction = RIGHT
if collision(snake[0], apple_pos):
apple_pos = on_grid_random()
snake.append((0,0))
for i in range(len(snake) -1, 0, -1):
snake[i] = (snake[i-1][0], snake[i-1][1])
if snake[0][0] == 600 or snake [0][1] == 600 or snake[0][0] < 0 or snake [0][1] < 0:
game_over = True
break
for i in range(1,len(snake) - 1):
if snake[0][0] == snake[i][0] and snake[0][1] == snake [i][1]:
if my_direction == UP:
snake[0] = (snake[0][0], snake[0][1] - 10)
if my_direction == DOWN:
snake[0] = (snake[0][0], snake[0][1] + 10)
if my_direction == RIGHT:
snake[0] = (snake[0][0] + 10, snake[0][1])
if my_direction == LEFT:
snake[0] = (snake[0][0] - 10, snake[0][1])
if collision(snake[0], apple_pos):
apple_pos = on_grid_random()
snake.append((0,0))
score + score + 1
for x in range(0, 600, 10):
pygame.draw.line(screen,(40,40,40),(x,0),(x,600))
for y in range(0, 600, 10):
pygame.draw.line(screen,(40,40,40),(y,0),(y,600))
score_font = font.render('Score: %s' % score,True,(255,255,255))
score_rect = score_font.get_rect()
score_rect.topleft = (600 - 120, 10)
screen.blit(score_font, score_rect)
screen.fill((0,0,0))
screen.blit(apple, apple_pos)
for pos in snake:
screen.blit(snake_skin, pos)
pygame.display.update()
while True:
game_over_font = pygame.font.Font('freesansbold.ttf',75)
game_over_screen = game_over_font.render('Game Over', True,(255,0,0))
game_over_rect = game_over_screen.get_rect()
game_over_rect.midtop = (600 / 2, 10)
screen.blit(game_over_screen,game_over_rect)
pygame.display.update()
pygame.time.wait(500)
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
| StarcoderdataPython |
4891642 | <reponame>ExpressApp/pybotx
import uuid
import pytest
pytestmark = pytest.mark.asyncio
async def test_internal_bot_notification(client, message):
await client.bot.internal_bot_notification(
credentials=message.credentials,
group_chat_id=uuid.uuid4(),
text="ping",
sender=None,
recipients=None,
opts=None,
)
assert client.messages[0].data.message == "ping"
| StarcoderdataPython |
3385134 | #!/usr/bin/env python
# vim: et ts=4 sw=4
from django import template
register = template.Library()
from ..column import WrappedColumn
@register.inclusion_tag("djtables/cols.html")
def table_cols(table):
return {
"columns": [
WrappedColumn(table, column)
for column in table.columns ] }
@register.inclusion_tag("djtables/head.html")
def table_head(table):
return {
"columns": [
WrappedColumn(table, column)
for column in table.columns ] }
@register.inclusion_tag("djtables/body.html")
def table_body(table):
return {
"rows": table.rows,
"num_columns": len(table.columns) }
@register.inclusion_tag("djtables/foot.html")
def table_foot(table):
paginator = Paginator(table)
return {
"num_columns": len(table.columns),
"page": paginator.current(),
"paginator": paginator,
}
class Paginator(object):
def __init__(self, table):
self.table = table
def current(self):
return Page(self, self.table._meta.page)
@property
def num_pages(self):
return self.table.paginator.num_pages
def first(self):
return Page(self, 1)
def last(self):
return Page(self, self.num_pages)
class Page(object):
def __init__(self, paginator, number):
self.paginator = paginator
self.number = number
@property
def is_first(self):
return self.number == 1
@property
def is_last(self):
return self.number == self.paginator.num_pages
def previous(self):
if not self.is_first:
return Page(
self.paginator,
self.number-1)
def next(self):
if not self.is_last:
return Page(
self.paginator,
self.number+1)
def url(self):
return self.paginator.table.get_url(page=self.number)
| StarcoderdataPython |
1865267 | <filename>main/config.py
import os
class BaseConfig(object):
# directory above configure
basedir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
# configuration
DEBUG = False
TESTING = False
SQLALCHEMY_ECHO = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
#SQLALCHEMY_DATABASE_URI
SQLALCHEMY_ENGINE = 'sqlite://'
SQLALCHEMY_BINDS = {
'blog': 'sqlite:///' + os.path.join(basedir,'database', 'blog.db'),
'security': 'sqlite:///' + os.path.join(basedir,'database', 'security.db')
}
# flask-blogging
SECRET_KEY = "test a secret" # for WTF-forms and login
BLOGGING_URL_PREFIX = "/blog"
BLOGGING_DISQUS_SITENAME = "test"
BLOGGING_SITEURL = "http://localhost:8000"
# flask-security
SECURITY_PASSWORD_HASH = "<PASSWORD>"
SECURITY_PASSWORD_SALT = "<PASSWORD>"
class DevelopmentConfig(BaseConfig):
DEBUG = True
TESTING = True
SQLALCHEMY_ECHO = True
class TestingConfig(BaseConfig):
DEBUG = False
TESTING = True
config = {
"development": "main.config.DevelopmentConfig",
"testing": "main.config.TestingConfig",
"default": "main.config.DevelopmentConfig"
}
def configure_app(app):
config_name = os.getenv('FLAKS_CONFIGURATION', 'default')
print(config[config_name])
app.config.from_object(config[config_name])
app.config.from_pyfile('config.cfg', silent=True)
if config_name == 'default':
for key,value in app.config.items():
print(key,value)
| StarcoderdataPython |
4936991 | <reponame>Pyromanser/django-staging<gh_stars>0
import random
from django.db import models
from django import forms
from staging.generators import BaseGenerator
class Generator(BaseGenerator):
name = 'Random address'
slug = 'random-address'
for_fields = [models.CharField]
options_form = None
def __init__(self):
self.generated = []
self.streats = self._get_streats()
def save(self, obj, field, form_data):
if field.unique:
setattr(obj, field.name, self._generate_unique())
else:
setattr(obj, field.name, self._generate())
def _generate(self):
return '%s %s' % (random.randint(1, 1000), random.choice(self.streats))
def _generate_unique(self):
for _ in range(10000):
value = self._generate()
if value not in self.generated:
self.generated.append(value)
return value
def _get_streats(self):
with open(self.rel_path('_streats.txt'), 'r') as f:
return f.read().split()
| StarcoderdataPython |
1759651 | <gh_stars>1-10
from graphviz import Digraph
# 目标系统OTA - accept
def makeOTA(data, filePath, fileName):
dot = Digraph()
for state in data.states:
if state in data.acceptStates:
dot.node(name=str(state), label=str(state), shape='doublecircle')
else:
dot.node(name=str(state), label=str(state))
for tran in data.trans:
tranLabel = " " + str(tran.input) + " " + tran.showGuards() + " " + str(tran.isReset)
dot.edge(str(tran.source), str(tran.target), tranLabel)
newFilePath = filePath + fileName
dot.render(newFilePath, view=True)
# 猜想OTA - accept
def makeLearnedOTA(data, filePath, fileName):
dot = Digraph()
states = []
for state in data.states:
if state != data.sinkState:
states.append(state)
for s in states:
if s in data.acceptStates:
dot.node(name=str(s), label=str(s), shape='doublecircle')
else:
dot.node(name=str(s), label=str(s))
for tran in data.trans:
if tran.source != data.sinkState and tran.target != data.sinkState:
tranLabel = " " + str(tran.input) + " " + tran.showGuards() + " " + str(tran.isReset)
dot.edge(str(tran.source), str(tran.target), tranLabel)
newFilePath = filePath + fileName
dot.render(newFilePath, view=True)
# 补全OTA - accept+sink
def makeFullOTA(data, filePath, fileName):
dot = Digraph()
for s in data.states:
if s in data.acceptStates:
dot.node(name=str(s), label=str(s), shape='doublecircle')
elif s == data.sinkState:
dot.node(name=str(s), label=str(s), shape='diamond')
else:
dot.node(name=str(s), label=str(s))
for tran in data.trans:
tranLabel = " " + str(tran.input) + " " + tran.showGuards() + " " + str(tran.isReset)
dot.edge(str(tran.source), str(tran.target), tranLabel)
newFilePath = filePath + fileName
dot.render(newFilePath, view=True)
# 两个自动机的交集OTA - accept+sink
def makeMergeOTA(data, filePath, fileName):
dot = Digraph()
for state in data.states:
if state.stateName in data.acceptStates:
dot.node(name=str(state.stateName), label=str(state.stateName), shape='doublecircle')
elif state.stateName == data.sinkState:
dot.node(name=str(state.stateName), label=str(state.stateName), shape='diamond')
else:
dot.node(name=str(state.stateName), label=str(state.stateName))
for tran in data.trans:
tranLabel = " " + str(tran.input) + " " + tran.showGuards() + " " + str(tran.isReset)
dot.edge(str(tran.source), str(tran.target), tranLabel)
newFilePath = filePath + fileName
dot.render(newFilePath, view=True)
# 两个自动机的交集OTA - accept+sink
def makeMergeOTA1(data, filePath, fileName):
dot = Digraph()
for state in data.states:
if state.stateName in data.acceptStates:
dot.node(name=str(state.stateName), label=str(state.stateName), shape='doublecircle')
elif state.stateName == data.sinkState:
pass
else:
dot.node(name=str(state.stateName), label=str(state.stateName))
for tran in data.trans:
if tran.source != data.sinkState and tran.target != data.sinkState:
tranLabel = " " + str(tran.input) + " " + tran.showGuards() + " " + str(tran.isReset)
dot.edge(str(tran.source), str(tran.target), tranLabel)
newFilePath = filePath + fileName
dot.render(newFilePath, view=True)
# 交集OTA的补 - accept+sink+error
def makeComplementOTA(data, filePath, fileName):
dot = Digraph()
for state in data.states:
if state.stateName in data.acceptStates:
dot.node(name=str(state.stateName), label=str(state.stateName), shape='doublecircle')
elif state.stateName == data.sinkState:
dot.node(name=str(state.stateName), label=str(state.stateName), shape='diamond')
elif state.stateName == data.errorState:
dot.node(name=str(state.stateName), label=str(state.stateName), shape='box')
else:
dot.node(name=str(state.stateName), label=str(state.stateName))
for tran in data.trans:
tranLabel = " " + str(tran.input) + " " + tran.showGuards() + " " + str(tran.isReset)
dot.edge(str(tran.source), str(tran.target), tranLabel)
newFilePath = filePath + fileName
dot.render(newFilePath, view=True)
# 交集OTA的补 - accept+error
def makeComplementOTA1(data, filePath, fileName):
dot = Digraph()
for state in data.states:
if state.stateName in data.acceptStates:
dot.node(name=str(state.stateName), label=str(state.stateName), shape='doublecircle')
elif state.stateName == data.sinkState:
pass
elif state.stateName == data.errorState:
dot.node(name=str(state.stateName), label=str(state.stateName), shape='box')
else:
dot.node(name=str(state.stateName), label=str(state.stateName))
for tran in data.trans:
if tran.source != data.sinkState and tran.target != data.sinkState:
tranLabel = " " + str(tran.input) + " " + tran.showGuards() + " " + str(tran.isReset)
dot.edge(str(tran.source), str(tran.target), tranLabel)
newFilePath = filePath + fileName
dot.render(newFilePath, view=True)
| StarcoderdataPython |
8037987 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>, <NAME>
# *****************************************************************************/
"""
Brief:
intelTelemetryParser.py - Generic parser definitions for parsing telemetry data object blobs
Description:
This file contains the base class and function definitions needed to build a parser
for a telemetry data object
Classes:
Enter GetHeaders("parsers\\intelTelemetryDataObject.py") to display Class listings.
"""
from __future__ import absolute_import, division, print_function, unicode_literals # , nested_scopes, generators, generator_stop, with_statement, annotations
import os
import sys
import importlib
from ctypes import *
from array import array
################################################################################################################
################################################################################################################
OBJECT_HEADER_V2_SIZE = 12
class telemetryStruct_union(Union):
"""
Brief:
telemetryStruct_union() - Union to load structure from file data or array.
Description:
Fill union with data from a binary file or other array object.
Class(es):
None
Method(s):
None
Related: -
Author(s):
<NAME>
"""
_pack_ = 1
_fields_ = [
# parser developers should create the Bytes and Fields entries
]
def __init__(self, imageObject=None, maxBytes=None, ignoreSizeMismatch=False, imageBuffer=None, file=None, returnBlockSize=None):
"""
Brief:
Populates union via Bytes field from content stored in imageObject.
"""
Union.__init__(self)
# Test if we have a byte definition
if (hasattr(self, "Bytes") and (imageBuffer is not None)):
if (maxBytes is None):
dataSize = len(self.Bytes)
else:
dataSize = min(maxBytes,len(self.Bytes))
if ((maxBytes > len(self.Bytes)) and (ignoreSizeMismatch == False)):
print("WARNING: Input structure size is less than input data buffer size. Buffer truncated.\n")
if ((maxBytes < len(self.Bytes)) and (ignoreSizeMismatch == False)):
print("WARNING: Input data buffer size is less than input structure size. Structure not completely filled.\n")
# Test if the input is a file
if isinstance(imageObject, file):
# Create a byte array from the file data at the current file pointer
dataBuffer = array('B')
dataBuffer.fromfile(imageObject, returnBlockSize)
else:
# Assume that the object already has an array definition
dataBuffer = imageObject
# Fill the structure
for i in range (0, dataSize):
self.Bytes[i] = dataBuffer[i]
class intelTelemetryDataObjectHeaderV2_0_struct(Structure):
"""
Brief:
intelTelemetryDataObjectHeaderV2_0_struct() - Intel Telemetry object header structure definition.
Description:
Intel data object header structure definition.
Class(es):
None
Method(s):
None
Related: -
Author(s):
<NAME>
"""
_pack_ = 1
_fields_ = [
("idNumber", c_uint32), # Identification number of the object
("majorVersion", c_uint16), # Major version number of the object
("minorVersion", c_uint16), # Minor version number of the object
("cpuId", c_uint8), # CPU ID number associated with the object
("reserved", c_uint8 * 3), # Reserved for future expansion and data alignment
# End More Intel
]
def getIdNumber(self):
return (self.idNumber)
def getMajorVersion(self):
return (self.majorVersion)
def getMinorVersion(self):
return (self.minorVersion)
def getVersionName(self, includeMinor=True):
if (includeMinor):
return ('V'+str(self.majorVersion)+'_'+str(self.minorVersion))
else:
return ('V'+str(self.majorVersion))
def getCpuId(self):
return (self.cpuId)
def tostr(self):
retstr = "Object ID: "+str(self.idNumber)+"\n"
retstr += "Version : "+str(self.majorVersion)+"."+str(self.minorVersion)+"\n"
retstr += "CPU ID : "+str(self.cpuId)+"\n"
class intelTelemetryDataObjectHeaderV2_0_union(telemetryStruct_union):
"""
Brief:
intelTelemetryDataObjectHeaderV2_0_union() - Intel Telemetry object header union fill structure.
Description:
This class extends telemetryStruct_union to fill a data object header stucture.
Class(es):
None
Method(s):
None
Related: -
Author(s):
<NAME>
"""
_pack_ = 1
_fields_ = [
("header", intelTelemetryDataObjectHeaderV2_0_struct), # Version 2.0 data object header structure
("Bytes", c_ubyte * OBJECT_HEADER_V2_SIZE), # fill
# End More Intel
]
def __init__(self, imageBuffer=None):
self.telemetryStruct_union(imageBuffer, len(self.Bytes))
def getStruct(self):
return self.header
def getSize(self):
return (len(self.Bytes))
class DataParserV2_0(object):
"""
Brief:
ExampleDataParser() - Data parsing example code.
Description:
Example code for parsing a data object.
Class(es):
None
Method(s):
None
Related: -
Author(s):
<NAME>
"""
def _createParseStructure(self, moduleName, versionedStructName, file, objectSize, ignoreSizeMismatch):
"""
Create a parsing structure based on the telemetryStruct_union and load the data into it
Input:
moduleName - Name of module containing the versionedStructName class
versionedStructName - Name of the class to create
file - Open binary file to load into the class
objectSize - Number of bytes to load
ignoreSizeMismatch - True = ignore objectSize and versionedStructName.Bytes size mismatch,
False = Issue warning message on objectSize and versionedStructName.Bytes size mismatch
Output:
versionedStructName object or None if error
"""
try:
parseModule = importlib.import_module(moduleName)
# Check if this is a correct class
if (issubclass(versionedStructName, telemetryStruct_union)):
try:
return getattr(parseModule, versionedStructName)(file, objectSize, ignoreSizeMismatch)
except AttributeError:
print ("Versioned class %s does not exist in module %s\n" %(versionedStructName, moduleName))
return None
else:
print ("Class %s must be a subclass of telemetryStruct_union\n" %versionedStructName)
return None
except ImportError:
print ("Unable to find module %s\n" % moduleName)
return None
def _openFile(self, filename):
"""
Open the file and get the file size
Input:
filename - Name and path of the binary file to parse
Output:
file object, file size in bytes
"""
# Try to open the file
try:
file = open(filename,"rb")
fileSize = os.stat(filename).st_size
return file, fileSize
except IOError:
print ("Error: Unable to open \"%s\"." % filename)
return None, 0
def __init__(self, expectedId, filename, moduleName=None, baseStructName=None, ignoreSizeMismatch=False):
"""
Open, read and parse the file
input:
expectedId - Object identification number expected in the file
filename - Name and path of the binary file to parse
moduleName - Name of module containing the baseStructName class or None
baseStructName - Base unversioned name of the parsing class to create and load the version number from the header
in the form "Vx_y" where x is the major version number and y is the minor version number will be
appended to this base name
ignoreSizeMismatch - True = ignore objectSize and versionedStructName.Bytes size mismatch,
False = Issue warning message on objectSize and versionedStructName.Bytes size mismatch
"""
# default values
self.parseStruct = None
self.fileSize = 0
self.union = None
self.header = None
self.objectSize = 0
# Try to open the file
self.file, self.fileSize = self._openFile(filename)
if (self.fileSize > 0):
# Read the header and make sure it matches
self.union = intelTelemetryDataObjectHeaderV2_0_union(self.file)
self.header = self.union.getStruct()
self.objectSize = self.fileSize - self.union.getSize()
# Verify the header
if (self.header.getIdNumber() != expectedId):
print ("Error: Object identifier mismatch. Expected %d, Read %d." % (expectedId, self.header.getIdNumber()))
else:
if ((moduleName is not None) and (baseStructName is not None)):
self.parseStruct = self._createParseStructure(moduleName, baseStructName+self.header.getVersionName(), self.file, self.objectSize, ignoreSizeMismatch)
def FillDataStructure(self, moduleName, baseStructName, includeMinor=True, ignoreSizeMismatch=False):
self.parseStruct = self._createParseStructure(moduleName, baseStructName+self.header.getVersionName(includeMinor), self.file, self.objectSize, ignoreSizeMismatch)
return self.parseStruct
def getFile(self):
return self.file
def getVersionName(self, includeMinor=True):
return self.header.getVersionName(includeMinor)
def getObjectSize(self):
return self.objectSize
| StarcoderdataPython |
100915 | class UndefinedMockBehaviorError(Exception):
pass
class MethodWasNotCalledError(Exception):
pass
| StarcoderdataPython |
8112790 | <reponame>RachidStat/PyCX<filename>ds-exponential-growth.py
from pylab import *
a = 1.1
def initialize():
global x, result
x = 1.
result = [x]
def observe():
global x, result
result.append(x)
def update():
global x, result
x = a * x
initialize()
for t in range(30):
update()
observe()
plot(result)
show()
| StarcoderdataPython |
313806 | <gh_stars>100-1000
__________________________________________________________________________________________________
sample 104 ms submission
class Solution:
def flipgame(self, fronts: List[int], backs: List[int]) -> int:
#print(fronts + backs)
same = {x for i,x in enumerate(fronts) if x == backs[i]}
ans = float('inf')
for x in fronts + backs:
if x not in same:
ans = min(ans, x)
return ans if ans != float('inf') else 0
# for x in itertools.chain(fronts, backs):
__________________________________________________________________________________________________
sample 112 ms submission
class Solution:
def flipgame(self, fronts: List[int], backs: List[int]) -> int:
ans = 1 << 32
nums = set()
ex = set()
for a, b in zip(fronts, backs):
if a == b:
ex.add(a)
else:
nums.add(a)
nums.add(b)
diff = nums.difference(ex)
return min(diff) if diff else 0
__________________________________________________________________________________________________
| StarcoderdataPython |
6639555 | #Django Imports
from django.conf import settings
#Python Imports
import requests, os
#Local Imports
from .at_utils import AfricasTalkingException
#Import Afica's Talking Settings
AFRICAS_TALKING_SETTINGS = getattr(settings,'AFRICAS_TALKING',{})
API_KEY = AFRICAS_TALKING_SETTINGS.get('API_KEY',None)
USERNAME = AFRICAS_TALKING_SETTINGS.get('USERNAME',None)
SHORTCODE = AFRICAS_TALKING_SETTINGS.get('SHORTCODE',None)
AFRICAS_TALKING_SEND = AFRICAS_TALKING_SETTINGS.get('SEND',False)
AFRICAS_TALKING_API_BASE = 'http://api.africastalking.com/version1'
HEADERS = {'Accept': 'application/json','apikey':API_KEY}
PARAMS = {'username':USERNAME,'bulkSMSMode':1}
if SHORTCODE:
PARAMS['from'] = SHORTCODE
def send_raw(to,message):
if not AFRICAS_TALKING_SEND:
raise AfricasTalkingException("Africas Talking called when send not set to True")
if API_KEY is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set API_KEY')
if USERNAME is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set a USERNAME')
params = {'to':to,'message':message}
params.update(PARAMS)
send_url = os.path.join(AFRICAS_TALKING_API_BASE,'messaging')
post = requests.post(send_url,data=params,headers=HEADERS)
#Raise requests.exceptions.HTTPError if 4XX or 5XX
post.raise_for_status()
return post.json()
def send(to,message):
data = send_raw(to,message)
'''
Example of JSON Response
{u'SMSMessageData':
{u'Message': u'Sent to 1/1 Total Cost: USD 0.0109',
u'Recipients': [{
u'status': u'Success', #u'status': u'Invalid Phone Number',
u'cost': u'KES 1.0000',
u'number': u'+254708054321',
u'messageId': u'ATXid_b50fada5b1af078f2277cacb58ef2447'
}]
}
}
'''
# Return tuple (messageId, messageSuccess, extra_data)
recipients = data['SMSMessageData']['Recipients']
if len(recipients) == 1:
msg_id = recipients[0]['messageId']
msg_success = recipients[0]['status'] == 'Success'
return msg_id, msg_success, {'status':recipients[0]['status']}
def balance():
if API_KEY is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set API_KEY')
if USERNAME is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set a USERNAME')
params = {'username':USERNAME}
send_url = os.path.join(AFRICAS_TALKING_API_BASE,'user')
post = requests.get(send_url,params=params,headers=HEADERS)
#Raise requests.exceptions.HTTPError if 4XX or 5XX
post.raise_for_status()
data = post.json()
return data['UserData']['balance']
def fetch(last_received_id=0):
if API_KEY is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set API_KEY')
if USERNAME is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set a USERNAME')
params = {'username':USERNAME,'lastReceivedId':last_received_id}
send_url = os.path.join(AFRICAS_TALKING_API_BASE,'messaging')
post = requests.get(send_url,params=params,headers=HEADERS)
return post
| StarcoderdataPython |
343019 | <filename>sps/api/v1/socket/processor.py
from format import Format
from message import MessageType
from random import randint
class Processor(object):
def __init__(self):
pass
def process_message(self, data):
m = Format.format(data)
m_type = m.get_type()
body = m.get_body()
cert = "123456"
if self.identify(cert):
if m_type == MessageType.REG:
result = self.register(body)
if m_type == MessageType.STRA:
result = self.process_strategy(body)
return result
def identify(self, cert):
return True
def register(self, body):
uuid = self.create_uuid()
agent_ip = body['sps_agent_ip']
agent_port = body['sps_agent_port']
cert_type = body['certificate_type']
cert_value = body['certificate_value']
info = (uuid, agent_ip, agent_port, cert_type, cert_value)
self.sync_ckm(info)
if self.sync_database(info):
return {'return':'success', 'sps_agent_uuid':uuid}
def create_uuid(self):
uuid = randint(1, 1000)
return uuid
def sync_ckm(self, info):
print "send info %s to ckm success" % str(info)
return True
def sync_database(self, info):
print "write %s to database success" % str(info)
return True
def process_strategy(self, body):
vm_uuid = body['vm_uuid']
vm_info = self.get_info_from_nova(vm_uuid)
self.get_strategy_from_database()
self.send_info_to_ckm()
strategy = self.packet_strategy()
return strategy
| StarcoderdataPython |
9772159 | <reponame>scottgigante/molecular-cross-validation
#!/usr/bin/env python
import argparse
import logging
import pathlib
import pickle
import numpy as np
import scipy.sparse
import scanpy as sc
from molecular_cross_validation.util import poisson_fit
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, required=True)
parser.add_argument("--input_data", type=pathlib.Path, required=True)
parser.add_argument("--output_dir", type=pathlib.Path, required=True)
data_group = parser.add_argument_group("Parameters for dataset")
data_group.add_argument("--n_cells", type=int, help="Number of cells to select")
data_group.add_argument("--n_genes", type=int, help="Number of genes to select")
data_group.add_argument("--min_counts", type=int, help="Minimum counts per cell")
data_group.add_argument("--min_genes", type=int, help="Minimum genes per cell")
data_group.add_argument("--min_cells", type=int, help="Minimum cells per gene")
data_group.add_argument("--subsample", type=int, help="Number of UMIs to subsample")
args = parser.parse_args()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
seed = sum(map(ord, f"biohub_{args.seed}"))
dataset_file = args.output_dir / f"dataset_{args.seed}.pickle"
logger.info("loading data")
data = sc.read(args.input_data)
data.var_names_make_unique()
if args.min_counts:
sc.pp.filter_cells(data, min_counts=args.min_counts)
if args.min_genes:
sc.pp.filter_cells(data, min_genes=args.min_genes)
if args.min_cells:
sc.pp.filter_genes(data, min_cells=args.min_cells)
if scipy.sparse.issparse(data.X):
umis = np.asarray(data.X.astype(int).todense())
else:
umis = np.asarray(data.X.astype(int))
# take top cells by umi count
if args.n_cells and args.n_cells < umis.shape[0]:
# count umis per cell
cell_count = umis.sum(1)
top_cells = cell_count >= sorted(cell_count)[-args.n_cells]
logger.info(f"filtered to {args.n_cells} deepest cells")
umis = umis[top_cells, :]
# take most variable genes by poisson fit
if args.n_genes and args.n_genes < umis.shape[1]:
# compute deviation from poisson model
exp_p = poisson_fit(umis)
top_genes = exp_p < sorted(exp_p)[args.n_genes]
logger.info(f"filtering to {args.n_genes} genes")
umis = umis[:, top_genes]
# calculating expected means from deep data
true_counts = umis.sum(1, keepdims=True)
true_means = umis / true_counts
if args.subsample:
logger.info(f"downsampling to {args.subsample} counts per cell")
umis = sc.pp.downsample_counts(
sc.AnnData(umis),
args.subsample,
replace=False,
copy=True,
random_state=seed,
).X.astype(int)
logger.info(f"final umi matrix: {umis.shape}")
with open(dataset_file, "wb") as out:
pickle.dump((true_means, true_counts, umis), out)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3506420 | """Test for QueueGetConfigReply message."""
import unittest
from pyof.v0x01.common import queue
from pyof.v0x01.controller2switch import queue_get_config_reply
class TestQueueGetConfigReply(unittest.TestCase):
"""Test for QueueGetConfigReply message."""
def setUp(self):
"""Basic test setup."""
propertie01 = queue.QueuePropHeader()
propertie01.property = queue.QueueProperties.OFPQT_MIN_RATE
propertie01.len = 12
packet_queue = queue.PacketQueue()
packet_queue.queue_id = 1
packet_queue.length = 8
packet_queue.properties = [propertie01]
self.message = queue_get_config_reply.QueueGetConfigReply()
self.message.header.xid = 1
self.message.port = 80
self.message.queue = packet_queue
def test_get_size(self):
"""[Controller2Switch/QueueGetConfigReply] - size 16."""
self.assertEqual(self.message.get_size(), 16)
@unittest.skip('Not yet implemented')
def test_pack(self):
"""[Controller2Switch/QueueGetConfigReply] - packing."""
# TODO
pass
@unittest.skip('Not yet implemented')
def test_unpack(self):
"""[Controller2Switch/QueueGetConfigReply] - unpacking."""
# TODO
pass
| StarcoderdataPython |
1851443 | from rest_framework import serializers
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
class Meta:
extra_kwargs = {
'password': {'write_only': True}
}
model = User
fields = ['id', 'username', 'password']
def validate(self, attrs):
user = User.objects.filter(username=attrs['username']).first()
if user:
raise serializers.ValidationError(detail='User with this username already existes!')
return super().validate(attrs)
def create(self, validated_data):
user = User.objects.create(username=validated_data['username'])
user.set_password(validated_data['password'])
user.save()
return user
| StarcoderdataPython |
1972755 | """Tensor Class."""
import functools
import operator
import numpy as np
# PyCUDA initialization
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
from .gpu_kernels import add, arithmetic
from .states import TensorState
ops = {"+": operator.add, "-": operator.sub, "*": operator.mul, "/": operator.truediv}
class GPUConnectMixin:
"""Mixin for GPU connect"""
def _alloc_device_memory(self, shape):
"""_alloc_device_memory.
Allocate memory to device.
Args:
data:
"""
_nbytes = np.prod(shape) * 4
_device_data = cuda.mem_alloc(int(_nbytes))
_device_data.shape = tuple(shape)
_device_data.dtype = np.float32
return _device_data
def _memory_host_to_device(self, device_data, data):
"""_memory_host_to_device.
Copy memory host to device(GPU).
Args:
data:
device_data:
"""
cuda.memcpy_htod(device_data, data)
return
@staticmethod
def _idiv(a, b):
return a // b + 1
@staticmethod
def get_kernel(kernel, function):
"""get_kernel.
get the kernel.
Args:
kernel:
function:
"""
return kernel.get_function(function)
class GradientMixin:
""" Gradient Mixin class with grad tools. """
def _walk(self, leaf_out_node):
"""_walk.
Reverse Graph Traversal with gradients.
Args:
leaf_out_node: Leaf Node.
in_grad: Input Gradient.
"""
self.visited.add(leaf_out_node)
for node in leaf_out_node._child_nodes:
if node not in self.visited:
self._walk(node)
self.nodes.append(leaf_out_node)
return
def backward(self):
"""backward.
Backward Function with Input Gradient set 1.
Args:
out_node: Leaf Output Node.
"""
self.visited = set()
self.nodes = []
self._walk(self)
self.grad = 1.0
for node in reversed(self.nodes):
node._backward(node.grad)
return self.grad
class Tensor(GPUConnectMixin, GradientMixin):
"""Tensor Class."""
BLOCKSIZE = 256
"""
The dict wastes a lot of RAM. Python can’t just allocate a static amount of memory at
object creation to store all the attributes. Therefore it sucks a lot of RAM if you
create a lot of objects (I am talking in thousands and millions).
Still there is a way to circumvent this issue.
It involves the usage of __slots__ to tell Python not to use a dict,
and only allocate space for a fixed set of attributes.
"""
__slots__ = (
"_data",
"_name",
"_n",
"_dtype",
"_shape",
"gpu",
"state",
"device_name",
)
def __init__(self, data, name=None, dtype=None):
"""__init__.
Initializes Tensor Class.
Args:
data: list or np.array data.
gpu: use gpu?
::
Example:
>> a = Tensor([1, 2])
>> b = Tensor([2,3])
>> print(a + b)
(dp.Tensor, shape=(2,), dtype = int32, numpy:([3,5], dtype = int32)
"""
self.state = TensorState.HOST
if isinstance(data, (list, float, int)):
data = np.array(data, dtype=dtype if dtype else np.float32)
elif isinstance(data, pycuda._driver.DeviceAllocation):
self.state = TensorState.DEVICE
elif not (isinstance(data, np.ndarray) or isinstance(data, np.float32)):
raise TypeError(f"numpy excepted but {type(data)} passed.")
self._data = data
self._dtype = data.dtype
self._shape = data.shape
self._name = name
self.gpu = False
self.grad = 0.0
self._child_nodes = tuple()
def _backward(in_grad=0.0):
self.grad = in_grad
return (in_grad,)
self._backward = _backward
self.device_name = "cpu:0"
def detach(self):
"""detach.
Detach state.
"""
self.state = TensorState.DETACH
# TODO(kartik4949) : Write ME.
return Tensor(self._data)
@property
def shape(self):
return self._shape
@property
def name(self):
return self._name
@property
def data(self):
return self._data
@property
def dtype(self):
return self._dtype
@property
def where(self):
return self._device()
def _device(self):
if self.state == TensorState.DEVICE:
_cuda_device = "gpu"
if self.state == TensorState.HOST:
_cuda_device = "cpu"
return _cuda_device
def asarray(self, data: list = None, dtype: tuple = None):
"""asarray.
convert array to DP array.
Args:
data (list): data
dtype (tuple): dtype
"""
# Depracted!
return Tensor(np.asarray(data, dtype=dtype))
def device(self, name: str = None):
"""device.
register the data on device.
Args:
name (str): name of device
"""
assert name.startswith("cpu") or name.startswith("gpu"), "Wrong Device!!"
# set precision to float32.
assert (
self.dtype == np.float32
), "Only single precision is supported i.e float32"
if self.state != TensorState.DEVICE:
self.state = TensorState.DEVICE
self.device_name = name
data = self._alloc_device_memory(self.shape)
self._memory_host_to_device(data, self._data)
self._shape = self._data.shape
self._dtype = self._data.dtype
self._data = data
return self
def cpu(
self,
):
"""cpu.
copy buffer from device to cpu.
"""
_host_out_arry = np.empty(self.shape, dtype=np.float32)
cuda.memcpy_dtoh(_host_out_arry, self._data)
cuda.Context.synchronize()
return Tensor(_host_out_arry)
def sigmoid(self):
"""Sigmoid function."""
sig = 1 / (1 + np.exp(-self._data))
ret = Tensor(sig)
ret._child_nodes = (self,)
def _backward(in_grad):
self.grad += in_grad * (ret._data * (1 - ret._data))
return self.grad
ret._backward = _backward
return ret
def relu(self):
"""Relu function."""
_data = np.maximum(self._data, 0)
out = Tensor(_data)
out._child_nodes = (self,)
def _backward(in_grad):
self.grad += (out._data > 0) * in_grad
return (self.grad,)
out._backward = _backward
return out
def tanh(self):
"""Tanh Function."""
t2 = Tensor(
np.zeros(self.shape, dtype=self.data.dtype) + 2,
)
t1 = Tensor(np.zeros(self.shape, dtype=self.data.dtype))
return self.mul(t2).sigmoid().mul(t2) - t1 # 2*sigmoid(2*x)-1
def add(self, tensor):
"""add.
Vector Addition which adds Tensor with given Tensor.
Args:
tensor: Tensor class
"""
def _backward(in_grad):
self.grad += in_grad
tensor.grad += in_grad
return in_grad, in_grad
return self.arithmetic(tensor, _backward, "+")
def sub(self, tensor):
"""sub.
Vector Addition which substracts Tensor with given Tensor.
Args:
tensor: Tensor class
"""
def _backward(in_grad):
self.grad += in_grad
tensor.grad += -in_grad
return in_grad, -in_grad
return self.arithmetic(tensor, _backward, "-")
def mul(self, tensor):
"""mul.
Vector Addition which multiplies Tensor with given Tensor.
Args:
tensor: Tensor class
"""
def _backward(in_grad):
self_grad = in_grad * tensor._data
tensor_grad = in_grad * self._data
self.grad += self_grad
tensor.grad += tensor_grad
return self_grad, tensor_grad
return self.arithmetic(tensor, _backward, "*")
def arithmetic(self, tensor, backward=None, operation: str = "+"):
"""Arithmetic.
Vector arithmetic operations on given Tensor.
Args:
tensor: Tensor class
"""
if self.state != TensorState.DEVICE:
ret = Tensor(ops[operation](self._data, tensor.data))
ret._child_nodes = (self, tensor)
if backward:
ret._backward = backward
return ret
assert isinstance(
tensor, self.__class__
), f"Tensor is required but passed {type(tensor)}"
ret = self._alloc_device_memory(self.shape)
N = max(self.shape)
blockDim = (self.BLOCKSIZE, 1, 1)
gridDim = (self._idiv(N, self.BLOCKSIZE), 1, 1)
_vec_kernel = self.get_kernel(arithmetic(operation), "device_arithmetic")
_vec_kernel(
ret,
self._data,
tensor.data,
np.int32(N),
block=blockDim,
grid=gridDim,
)
ret = Tensor(ret)
ret._dtype = self.dtype
ret._shape = self.shape
return ret
def __pow__(self, value):
out = Tensor(self.data ** value)
out._child_nodes = (self,)
def _backward(in_grad):
self.grad += (value * self._data ** (value - 1)) * in_grad
return (self.grad,)
out._backward = _backward
return out
def __add__(self, tensor):
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
return self.add(tensor)
def __radd__(self, tensor):
return self + tensor
def __mul__(self, tensor):
return self.mul(tensor)
def __sub__(self, tensor):
return self.sub(tensor)
def __neg__(self):
return self * -1
def __rsub__(self, tensor):
return tensor + (-self)
def __rmul__(self, tensor):
return self * tensor
def __truediv__(self, value):
return self * value ** -1
def __rtruediv__(self, vale):
return value * self ** -1
def __repr__(self):
return "Tensor( %s shape: %s, numpy: (%s, dtype=%s), device: %s)" % (
f"name: {self.name}, " if self.name else "",
self.shape,
self._data,
self.dtype,
self.where,
)
| StarcoderdataPython |
1739222 | <gh_stars>0
from pytest import raises
from astropy.tests.helper import assert_quantity_allclose
from astropy import units as u
from astropy.wcs import WCS
from astropy.wcs.wcsapi.utils import deserialize_class, wcs_info_str
def test_construct():
result = deserialize_class(('astropy.units.Quantity', (10,), {'unit': 'deg'}))
assert_quantity_allclose(result, 10 * u.deg)
def test_noconstruct():
result = deserialize_class(('astropy.units.Quantity', (), {'unit': 'deg'}), construct=False)
assert result == (u.Quantity, (), {'unit': 'deg'})
def test_invalid():
with raises(ValueError) as exc:
deserialize_class(('astropy.units.Quantity', (), {'unit': 'deg'}, ()))
assert exc.value.args[0] == 'Expected a tuple of three values'
DEFAULT_1D_STR = """
WCS Transformation
This transformation has 1 pixel and 1 world dimensions
Array shape (Numpy order): None
Pixel Dim Data size Bounds
0 None None
World Dim Physical Type Units
0 None unknown
Correlation between pixel and world axes:
Pixel Dim
World Dim 0
0 yes
"""
def test_wcs_info_str():
# The tests in test_sliced_low_level_wcs.py excercise wcs_info_str
# extensively. This test is to ensure that the function exists and the
# API of the function works as expected.
wcs_empty = WCS(naxis=1)
assert wcs_info_str(wcs_empty).strip() == DEFAULT_1D_STR.strip()
| StarcoderdataPython |
6529753 | # coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from storops.vnx.resource import VNXCliResourceList
from storops.vnx.resource import VNXCliResource
__author__ = '<NAME>'
class VNXNduList(VNXCliResourceList):
@classmethod
def get_resource_class(cls):
return VNXNdu
def _get_raw_resource(self):
return self._cli.get_ndu(poll=self.poll)
def _check_package(self, name):
for ndu in self:
if ndu.name == name:
ret = VNXNdu.is_enabled(ndu)
break
else:
ret = False
return ret
def is_dedup_enabled(self):
return self._check_package(VNXNdu.DEDUP)
def is_compression_enabled(self):
return self._check_package(VNXNdu.COMPRESSION)
def is_auto_tiering_enabled(self):
return self._check_package(VNXNdu.AUTO_TIERING)
def is_mirror_view_async_enabled(self):
return self._check_package(VNXNdu.MIRROR_VIEW_ASYNC)
def is_mirror_view_sync_enabled(self):
return self._check_package(VNXNdu.MIRROR_VIEW_SYNC)
def is_mirror_view_enabled(self):
return (self.is_mirror_view_async_enabled() and
self.is_mirror_view_sync_enabled())
def is_sancopy_enabled(self):
return self._check_package(VNXNdu.SANCOPY)
def is_thin_enabled(self):
return self._check_package(VNXNdu.THIN)
def is_snap_enabled(self):
return self._check_package(VNXNdu.SNAP)
def is_fast_cache_enabled(self):
return self._check_package(VNXNdu.FAST_CACHE)
class VNXNdu(VNXCliResource):
DEDUP = '-Deduplication'
COMPRESSION = '-Compression'
AUTO_TIERING = '-FAST'
MIRROR_VIEW_ASYNC = '-MirrorView/A'
MIRROR_VIEW_SYNC = '-MirrorView/S'
SANCOPY = '-SANCopy'
THIN = '-ThinProvisioning'
SNAP = '-VNXSnapshots'
FAST_CACHE = '-FASTCache'
def __init__(self, name=None, cli=None):
super(VNXNdu, self).__init__()
self._cli = cli
self._name = name
def _get_raw_resource(self):
return self._cli.get_ndu(name=self._name, poll=self.poll)
@staticmethod
def is_enabled(ndu):
return ndu.existed and ndu.active_state and not ndu.commit_required
@classmethod
def _check_package(cls, cli, name):
ndu = VNXNdu(name, cli)
ndu.with_no_poll()
return cls.is_enabled(ndu)
@classmethod
def is_dedup_enabled(cls, cli):
return cls._check_package(cli, cls.DEDUP)
@classmethod
def is_compression_enabled(cls, cli):
return cls._check_package(cli, cls.COMPRESSION)
@classmethod
def is_auto_tiering_enabled(cls, cli):
return cls._check_package(cli, cls.AUTO_TIERING)
@classmethod
def is_mirror_view_async_enabled(cls, cli):
return cls._check_package(cli, cls.MIRROR_VIEW_ASYNC)
@classmethod
def is_mirror_view_sync_enabled(cls, cli):
return cls._check_package(cli, cls.MIRROR_VIEW_SYNC)
@classmethod
def is_mirror_view_enabled(cls, cli):
return (cls.is_mirror_view_async_enabled(
cli) and cls.is_mirror_view_sync_enabled(cli))
@classmethod
def is_sancopy_enabled(cls, cli):
return cls._check_package(cli, cls.SANCOPY)
@classmethod
def is_thin_enabled(cls, cli):
return cls._check_package(cli, cls.THIN)
@classmethod
def is_snap_enabled(cls, cli):
return cls._check_package(cli, cls.SNAP)
@classmethod
def is_fast_cache_enabled(cls, cli):
return cls._check_package(cli, cls.FAST_CACHE)
@classmethod
def get(cls, cli, name=None):
if name is None:
ret = VNXNduList(cli)
else:
ret = VNXNdu(name, cli)
return ret
| StarcoderdataPython |
6522890 | from __future__ import absolute_import, print_function
from sage.all import factorial
from moment_polytopes import *
import pytest
def test_hrepr_irred():
# unit square as an irredundant set of inequalities
unit_square = HRepr(ieqs=[((1, 0), 0), ((0, 1), 0), ((-1, 0), -1), ((0, -1), -1),])
assert unit_square == unit_square.irred()
assert (0, 0) in unit_square
assert (-1, 0) not in unit_square
# add a redundant inequality
unit_square_redund = unit_square & HRepr(ieqs=[((1, 1), 0)])
assert unit_square != unit_square_redund
assert unit_square == unit_square_redund.irred()
@pytest.mark.xfail
def test_irred_linearities():
line = HRepr(ieqs=[((1, -1), 0), ((-1, 1), 0),])
# irred does *NOT* convert the two inequalities into a linearity :-(
line_irred = line.irred()
assert len(line_irred.eqns) == 1
assert len(line_irred.ieqs) == 0
# line_irred = line.vrepr().hrepr().irred() <-- this works...
def test_hrepr_vrepr():
# triangle
triangle_hrepr = HRepr(ieqs=[((1, 0), 0), ((0, 1), 0), ((-1, -1), -1),])
triangle_vrepr = VRepr(vertices=[(0, 0), (1, 0), (0, 1),])
assert triangle_hrepr.vrepr() == triangle_vrepr
assert triangle_vrepr.hrepr() == triangle_hrepr
def test_hrepr_sage():
triangle_hrepr = HRepr(ieqs=[((1, 0), 0), ((0, 1), 0), ((-1, -1), -1),])
assert HRepr.from_sage(triangle_hrepr.to_sage()) == triangle_hrepr
def test_vrepr_sage():
triangle_vrepr = VRepr(vertices=[(0, 0), (1, 0), (0, 1),])
assert VRepr.from_sage(triangle_vrepr.to_sage()) == triangle_vrepr
def test_hrepr_vertices():
# fail if there are extremal rays
quadrant = HRepr(ieqs=[((1, 0), 0), ((0, 1), 0),])
with pytest.raises(ValueError):
quadrant.vertices()
def test_overflow_expectations():
# check my expectations with regards to numpy and long integers
import numpy as np
N = factorial(171)
# assert isinstance(N, long)
with pytest.raises(OverflowError):
np.array([N], dtype=np.int)
assert np.array([N]).dtype == object
def test_ambient_dim():
# usually, ambient dimensions are detected automatically
half_space = HRepr(ieqs=[((1, 0), 0)])
assert half_space.ambient_dim == 2
origin = VRepr(vertices=[(0, 0, 0)])
assert origin.ambient_dim == 3
# if no data is given, ambient dimension cannot be detected
with pytest.raises(ValueError):
HRepr()
with pytest.raises(ValueError):
VRepr()
# it works if we specify the ambient dimension manually
assert HRepr(ambient_dim=2).ambient_dim == 2
assert VRepr(ambient_dim=2).ambient_dim == 2
def test_default_hrepr():
assert not HRepr(ambient_dim=1).to_sage().is_empty()
def test_default_vrepr():
assert VRepr(ambient_dim=1).to_sage().is_empty()
| StarcoderdataPython |
6401142 | <gh_stars>0
import git_config
import os
import pager
import platform
import re
import shutil
import socket
import stat
import sys
import subprocess
import threading
from trace import Trace
def isUnix():
return platform.system() != "Windows"
if isUnix():
import fcntl
def to_windows_path(path):
return path.replace('/', '\\')
def rmtree(path):
shutil.rmtree(path, onerror=onerror)
def rename(src, dst):
if isUnix():
os.rename(src, dst)
else:
if os.path.exists(dst):
os.remove(dst)
os.rename(src, dst)
def onerror(function, path, excinfo):
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
function(path)
else:
raise
def input_reader(src, dest, std_name):
if isUnix():
return file_reader(src, dest, std_name)
else:
return socket_reader(src, dest, std_name)
class file_reader(object):
"""select file descriptor class"""
def __init__(self, fd, dest, std_name):
assert std_name in ('stdout', 'stderr')
self.fd = fd
self.dest = dest
self.std_name = std_name
self.setup_fd()
def setup_fd(self):
flags = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def fileno(self):
return self.fd.fileno()
def read(self, bufsize):
return self.fd.read(bufsize)
def close(self):
return self.fd.close()
def src(self):
return self.fd
class socket_reader():
"""select socket with file descriptor class"""
def __init__(self, src, dest, std_name=''):
self.src = src
self.dest = dest
self.std_name = std_name
self.completed = False
self.host = "localhost"
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.server_socket.bind((self.host, 0))
self.server_socket.setblocking(0)
self.port = self.server_socket.getsockname()[1]
address = (self.host, self.port)
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.client_socket.connect(address)
t = threading.Thread(target=self.send_msg, args=(self.src, self.client_socket, address))
t.start()
def send_msg(self, src, dest, address):
while True:
data = src.read(4096)
if data:
dest.sendto(data, address)
else:
break
dest.sendto("", address)
def read(self, bufsize):
try:
return self.server_socket.recv(bufsize)
except Exception as e:
Trace("failed to read from server socket: " + e.strerror)
self.close()
def close(self):
if self.client_socket:
self.client_socket.close()
if self.server_socket:
self.server_socket.close()
def fileno(self):
return self.server_socket.fileno()
def src(self):
return self.src
def os_symlink(src, dst):
if isUnix():
os.symlink(src, dst)
else:
windows_symlink(src, dst)
def windows_symlink(src, dst):
globalConfig = git_config.GitConfig.ForUser()
src = to_windows_path(src)
dst = to_windows_path(dst)
is_dir = True if os.path.isdir(os.path.realpath(os.path.join(os.path.dirname(dst), src))) else False
no_symlinks = globalConfig.GetBoolean("portable.windowsNoSymlinks")
if no_symlinks is None or no_symlinks == False:
symlink_options_dir = '/D'
symlink_options_file = ''
else:
src = os.path.abspath(os.path.join(os.path.dirname(dst), src))
Trace("Using no symlinks for %s from %s to %s", "dir" if is_dir else "file", src, dst)
symlink_options_dir = '/J'
symlink_options_file = '/H'
if is_dir:
cmd = ['cmd', '/c', 'mklink', symlink_options_dir, dst, src]
cmd = filter(len, cmd)
Trace(' '.join(cmd))
try:
subprocess.Popen(cmd, stdout=subprocess.PIPE).wait()
except Exception as e:
Trace("failed to create dir symlink: %s", e.strerror)
pass
else:
cmd = ['cmd', '/c', 'mklink', symlink_options_file, dst, src]
cmd = filter(len, cmd)
Trace(' '.join(cmd))
try:
subprocess.Popen(cmd, stdout=subprocess.PIPE).wait()
except Exception as e:
Trace("failed to create file symlink: %s", e.strerror)
pass
def os_path_islink(path):
if isUnix():
os.path.islink(path)
else:
if get_windows_symlink(path) is not None:
return True
if get_windows_hardlink(path) is not None:
return True
return False
def os_path_realpath(file_path):
if isUnix():
os.path.realpath(file_path)
else:
if not os.path.exists(file_path):
return file_path
return windows_realpath(file_path)
def windows_realpath(file_path):
symlink = file_path
while True:
s = get_windows_symlink(symlink)
if s is None:
break
else:
symlink = s
hardlink = get_windows_hardlink(symlink)
if hardlink is not None:
return hardlink
else:
return symlink
def get_windows_symlink(file_path):
if os.path.isdir(file_path):
root = os.path.abspath(os.path.join(file_path, os.pardir))
file_object = os.path.split(file_path)[1]
if not file_object:
file_object = os.path.split(os.path.split(file_object)[0])[1]
else:
root = os.path.dirname(file_path)
file_object = os.path.split(file_path)[1]
cmd = ['cmd', '/c', 'dir', '/AL', root]
try:
Trace(' '.join(cmd))
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except:
return None
lines = [s.strip() for s in out.split('\n')]
if len(lines) < 6:
return None
pattern = re.compile('.*<(.*)>\\s*(.*) \[(.*)\]$')
for line in lines[5:]:
result = pattern.match(line)
if result:
ftype = result.group(1)
fname = result.group(2)
flink = result.group(3)
if file_object == fname:
if ftype == 'SYMLINK' or ftype == 'SYMLINKD':
new_path = os.path.realpath(os.path.join(os.path.dirname(file_path), flink))
Trace("Relative link found: %s -> %s -> %s", fname, flink, new_path)
else:
new_path = flink
Trace("Absolute link found: %s -> %s", fname, flink)
return new_path
return None
def get_windows_hardlink(file_path):
if os.path.isdir(file_path):
return None
cmd = ['cmd', '/c', 'fsutil', 'hardlink', 'list', file_path]
try:
Trace(' '.join(cmd))
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except:
return None
lines = [s.strip() for s in out.split('\n')]
if len(lines) >= 2 and len(lines[1]) > 0:
hardlink = file_path[0:2] + lines[0]
Trace("Hard link found: %s -> %s", file_path, hardlink)
return hardlink
else:
return None
child_process = None
def RunPager(cmd):
if isUnix():
pager.RunPager(cmd.manifest.globalConfig)
else:
RunWindowsPager(cmd)
def RunWindowsPager(cmd):
executable = pager._SelectPager(cmd.manifest.globalConfig)
redirect_all(executable)
pager.active = True
def NoPager(cmd):
if not isUnix():
RunWindowsShell(cmd)
def RunWindowsShell(cmd):
executable = _SelectCatenate(cmd.manifest.globalConfig)
redirect_all(executable)
def redirect_all(executable):
old_sysin = sys.stdin
old_sysout = sys.stdout
old_syserr = sys.stderr
Trace("redirecting to %s" % executable)
p = subprocess.Popen([executable], stdin=subprocess.PIPE, stdout=old_sysout, stderr=old_syserr)
sys.stdout = p.stdin
sys.stderr = p.stdin
old_sysout.close()
global child_process
child_process = p
def _SelectCatenate(globalConfig):
try:
return os.environ['GIT_CATENATE']
except KeyError:
pass
pager = globalConfig.GetString('core.catenate')
if pager:
return pager
try:
return os.environ['CATENATE']
except KeyError:
pass
return 'cat'
def WaitForProcess():
if not isUnix():
global child_process
if child_process:
child_process.stdin.close()
child_process.wait()
def prepare_editor_args(editor):
if isUnix():
args = [editor + ' "$@"', 'sh']
shell = True
else:
editor = re.sub('["\']', '', editor)
args = editor.rsplit()
shell = False
return (args, shell)
def os_chmod(dest, mode):
if isUnix():
os.chmod(dest, mode)
| StarcoderdataPython |
6431172 | <filename>lib/hachoir/parser/program/python.py
"""
Python compiled source code parser.
Informations:
- Python 2.4.2 source code:
files Python/marshal.c and Python/import.c
Author: <NAME>
Creation: 25 march 2005
"""
from hachoir.parser import Parser
from hachoir.field import (FieldSet, UInt8,
UInt16, Int32, UInt32, Int64, ParserError, Float64,
Character, RawBytes, PascalString8, TimestampUnix32,
Bit, String)
from hachoir.core.endian import LITTLE_ENDIAN
from hachoir.core.bits import long2raw
from hachoir.core.text_handler import textHandler, hexadecimal
DISASSEMBLE = False
if DISASSEMBLE:
from dis import dis
def disassembleBytecode(field):
bytecode = field.value
dis(bytecode)
# --- String and string reference ---
def parseString(parent):
yield UInt32(parent, "length", "Length")
length = parent["length"].value
if parent.name == "lnotab":
bytecode_offset = 0
line_number = parent['../firstlineno'].value
for i in range(0, length, 2):
bc_off_delta = UInt8(parent, 'bytecode_offset_delta[]')
yield bc_off_delta
bytecode_offset += bc_off_delta.value
bc_off_delta._description = 'Bytecode Offset %i' % bytecode_offset
line_number_delta = UInt8(parent, 'line_number_delta[]')
yield line_number_delta
line_number += line_number_delta.value
line_number_delta._description = 'Line Number %i' % line_number
elif 0 < length:
yield RawBytes(parent, "text", length, "Content")
if DISASSEMBLE and parent.name == "compiled_code":
disassembleBytecode(parent["text"])
def parseStringRef(parent):
yield textHandler(UInt32(parent, "ref"), hexadecimal)
def createStringRefDesc(parent):
return "String ref: %s" % parent["ref"].display
# --- Integers ---
def parseInt32(parent):
yield Int32(parent, "value")
def parseInt64(parent):
yield Int64(parent, "value")
def parseLong(parent):
yield Int32(parent, "digit_count")
for index in range(abs(parent["digit_count"].value)):
yield UInt16(parent, "digit[]")
# --- Float and complex ---
def parseFloat(parent):
yield PascalString8(parent, "value")
def parseBinaryFloat(parent):
yield Float64(parent, "value")
def parseComplex(parent):
yield PascalString8(parent, "real")
yield PascalString8(parent, "complex")
def parseBinaryComplex(parent):
yield Float64(parent, "real")
yield Float64(parent, "complex")
# --- Tuple and list ---
def parseTuple(parent):
yield UInt32(parent, "count", "Item count")
count = parent["count"].value
if count < 0:
raise ParserError("Invalid tuple/list count")
for index in range(count):
yield Object(parent, "item[]")
def parseSmallTuple(parent):
yield UInt8(parent, "count", "Item count")
count = parent["count"].value
if count < 0:
raise ParserError("Invalid tuple/list count")
for index in range(count):
yield Object(parent, "item[]")
def createTupleDesc(parent):
count = parent["count"].value
items = "%s items" % count
return "%s: %s" % (parent.code_info[2], items)
# --- Dict ---
def parseDict(parent):
"""
Format is: (key1, value1, key2, value2, ..., keyn, valuen, NULL)
where each keyi and valuei is an object.
"""
parent.count = 0
while True:
key = Object(parent, "key[]")
yield key
if key["bytecode"].value == "0":
break
yield Object(parent, "value[]")
parent.count += 1
def createDictDesc(parent):
return "Dict: %s" % ("%s keys" % parent.count)
def parseRef(parent):
yield UInt32(parent, "n", "Reference")
def parseShortASCII(parent):
size = UInt8(parent, "len", "Number of ASCII characters")
yield size
yield String(parent, "text", size.value, "String content", charset="ASCII")
# --- Code ---
def parseCode(parent):
if 0x3000000 <= parent.root.getVersion():
yield UInt32(parent, "arg_count", "Argument count")
yield UInt32(parent, "kwonlyargcount", "Keyword only argument count")
yield UInt32(parent, "nb_locals", "Number of local variables")
yield UInt32(parent, "stack_size", "Stack size")
yield UInt32(parent, "flags")
elif 0x2030000 <= parent.root.getVersion():
yield UInt32(parent, "arg_count", "Argument count")
yield UInt32(parent, "nb_locals", "Number of local variables")
yield UInt32(parent, "stack_size", "Stack size")
yield UInt32(parent, "flags")
else:
yield UInt16(parent, "arg_count", "Argument count")
yield UInt16(parent, "nb_locals", "Number of local variables")
yield UInt16(parent, "stack_size", "Stack size")
yield UInt16(parent, "flags")
yield Object(parent, "compiled_code")
yield Object(parent, "consts")
yield Object(parent, "names")
yield Object(parent, "varnames")
if 0x2000000 <= parent.root.getVersion():
yield Object(parent, "freevars")
yield Object(parent, "cellvars")
yield Object(parent, "filename")
yield Object(parent, "name")
if 0x2030000 <= parent.root.getVersion():
yield UInt32(parent, "firstlineno", "First line number")
else:
yield UInt16(parent, "firstlineno", "First line number")
yield Object(parent, "lnotab")
class Object(FieldSet):
bytecode_info = {
# Don't contains any data
'0': ("null", None, "NULL", None),
'N': ("none", None, "None", None),
'F': ("false", None, "False", None),
'T': ("true", None, "True", None),
'S': ("stop_iter", None, "StopIter", None),
'.': ("ellipsis", None, "ELLIPSIS", None),
'?': ("unknown", None, "Unknown", None),
'i': ("int32", parseInt32, "Int32", None),
'I': ("int64", parseInt64, "Int64", None),
'f': ("float", parseFloat, "Float", None),
'g': ("bin_float", parseBinaryFloat, "Binary float", None),
'x': ("complex", parseComplex, "Complex", None),
'y': ("bin_complex", parseBinaryComplex, "Binary complex", None),
'l': ("long", parseLong, "Long", None),
's': ("string", parseString, "String", None),
't': ("interned", parseString, "Interned", None),
'u': ("unicode", parseString, "Unicode", None),
'R': ("string_ref", parseStringRef, "String ref", createStringRefDesc),
'(': ("tuple", parseTuple, "Tuple", createTupleDesc),
')': ("small_tuple", parseSmallTuple, "Tuple", createTupleDesc),
'[': ("list", parseTuple, "List", createTupleDesc),
'<': ("set", parseTuple, "Set", createTupleDesc),
'>': ("frozenset", parseTuple, "Frozen set", createTupleDesc),
'{': ("dict", parseDict, "Dict", createDictDesc),
'c': ("code", parseCode, "Code", None),
'r': ("ref", parseRef, "Reference", None),
'z': ("short_ascii", parseShortASCII, "Short ASCII", None),
'Z': ("short_ascii_interned", parseShortASCII, "Short ASCII interned", None),
}
def __init__(self, parent, name, **kw):
FieldSet.__init__(self, parent, name, **kw)
code = self["bytecode"].value
if code not in self.bytecode_info:
raise ParserError('Unknown bytecode %r at position %s'
% (code, self.absolute_address // 8))
self.code_info = self.bytecode_info[code]
if not name:
self._name = self.code_info[0]
if code == "l":
self.createValue = self.createValueLong
elif code in ("i", "I", "f", "g"):
self.createValue = lambda: self["value"].value
elif code == "T":
self.createValue = lambda: True
elif code == "F":
self.createValue = lambda: False
elif code in ("x", "y"):
self.createValue = self.createValueComplex
elif code in ("s", "t", "u"):
self.createValue = self.createValueString
self.createDisplay = self.createDisplayString
if code == 't':
if not hasattr(self.root, 'string_table'):
self.root.string_table = []
self.root.string_table.append(self)
elif code == 'R':
if hasattr(self.root, 'string_table'):
self.createValue = self.createValueStringRef
def createValueString(self):
if "text" in self:
return self["text"].value
else:
return ""
def createDisplayString(self):
if "text" in self:
return self["text"].display
else:
return "(empty)"
def createValueLong(self):
is_negative = self["digit_count"].value < 0
count = abs(self["digit_count"].value)
total = 0
for index in range(count - 1, -1, -1):
total <<= 15
total += self["digit[%u]" % index].value
if is_negative:
total = -total
return total
def createValueStringRef(self):
return self.root.string_table[self['ref'].value].value
def createDisplayStringRef(self):
return self.root.string_table[self['ref'].value].display
def createValueComplex(self):
return complex(
float(self["real"].value),
float(self["complex"].value))
def createFields(self):
yield BytecodeChar(self, "bytecode", "Bytecode")
yield Bit(self, "flag_ref", "Is a reference?")
parser = self.code_info[1]
if parser:
yield from parser(self)
def createDescription(self):
create = self.code_info[3]
if create:
return create(self)
else:
return self.code_info[2]
class BytecodeChar(Character):
static_size = 7
class PythonCompiledFile(Parser):
PARSER_TAGS = {
"id": "python",
"category": "program",
"file_ext": ("pyc", "pyo"),
"min_size": 9 * 8,
"description": "Compiled Python script (.pyc/.pyo files)"
}
endian = LITTLE_ENDIAN
# Dictionnary which associate the pyc signature (32-bit integer)
# to a Python version string (eg. "m\xf2\r\n" => "Python 2.4b1").
# This list comes from CPython source code, see MAGIC_NUMBER
# in file Lib/importlib/_bootstrap_external.py
MAGIC = {
# Python 1.x
20121: ("1.5", 0x1050000),
50428: ("1.6", 0x1060000),
# Python 2.x
50823: ("2.0", 0x2000000),
60202: ("2.1", 0x2010000),
60717: ("2.2", 0x2020000),
62011: ("2.3a0", 0x2030000),
62021: ("2.3a0", 0x2030000),
62041: ("2.4a0", 0x2040000),
62051: ("2.4a3", 0x2040000),
62061: ("2.4b1", 0x2040000),
62071: ("2.5a0", 0x2050000),
62081: ("2.5a0 (ast-branch)", 0x2050000),
62091: ("2.5a0 (with)", 0x2050000),
62092: ("2.5a0 (WITH_CLEANUP opcode)", 0x2050000),
62101: ("2.5b3", 0x2050000),
62111: ("2.5b3", 0x2050000),
62121: ("2.5c1", 0x2050000),
62131: ("2.5c2", 0x2050000),
62151: ("2.6a0", 0x2070000),
62161: ("2.6a1", 0x2070000),
62171: ("2.7a0", 0x2070000),
62181: ("2.7a0", 0x2070000),
62191: ("2.7a0", 0x2070000),
62201: ("2.7a0", 0x2070000),
62211: ("2.7a0", 0x2070000),
# Python 3.x
3000: ("3.0 (3000)", 0x3000000),
3010: ("3.0 (3010)", 0x3000000),
3020: ("3.0 (3020)", 0x3000000),
3030: ("3.0 (3030)", 0x3000000),
3040: ("3.0 (3040)", 0x3000000),
3050: ("3.0 (3050)", 0x3000000),
3060: ("3.0 (3060)", 0x3000000),
3061: ("3.0 (3061)", 0x3000000),
3071: ("3.0 (3071)", 0x3000000),
3081: ("3.0 (3081)", 0x3000000),
3091: ("3.0 (3091)", 0x3000000),
3101: ("3.0 (3101)", 0x3000000),
3103: ("3.0 (3103)", 0x3000000),
3111: ("3.0a4", 0x3000000),
3131: ("3.0a5", 0x3000000),
3141: ("3.1a0", 0x3010000),
3151: ("3.1a0", 0x3010000),
3160: ("3.2a0", 0x3020000),
3170: ("3.2a1", 0x3020000),
3180: ("3.2a2", 0x3020000),
3190: ("Python 3.3a0", 0x3030000),
3200: ("Python 3.3a0 ", 0x3030000),
3210: ("Python 3.3a0 ", 0x3030000),
3220: ("Python 3.3a1 ", 0x3030000),
3230: ("Python 3.3a4 ", 0x3030000),
3250: ("Python 3.4a1 ", 0x3040000),
3260: ("Python 3.4a1 ", 0x3040000),
3270: ("Python 3.4a1 ", 0x3040000),
3280: ("Python 3.4a1 ", 0x3040000),
3290: ("Python 3.4a4 ", 0x3040000),
3300: ("Python 3.4a4 ", 0x3040000),
3310: ("Python 3.4rc2", 0x3040000),
3320: ("Python 3.5a0 ", 0x3050000),
3330: ("Python 3.5b1 ", 0x3050000),
3340: ("Python 3.5b2 ", 0x3050000),
3350: ("Python 3.5b2 ", 0x3050000),
3351: ("Python 3.5.2 ", 0x3050000),
3360: ("Python 3.6a0 ", 0x3060000),
3361: ("Python 3.6a0 ", 0x3060000),
3370: ("Python 3.6a1 ", 0x3060000),
3371: ("Python 3.6a1 ", 0x3060000),
3372: ("Python 3.6a1 ", 0x3060000),
3373: ("Python 3.6b1 ", 0x3060000),
3375: ("Python 3.6b1 ", 0x3060000),
3376: ("Python 3.6b1 ", 0x3060000),
3377: ("Python 3.6b1 ", 0x3060000),
3378: ("Python 3.6b2 ", 0x3060000),
3379: ("Python 3.6rc1", 0x3060000),
3390: ("Python 3.7a0 ", 0x3070000),
}
# Dictionnary which associate the pyc signature (4-byte long string)
# to a Python version string (eg. "m\xf2\r\n" => "2.4b1")
STR_MAGIC = dict(
(long2raw(magic | (ord('\r') << 16) |
(ord('\n') << 24), LITTLE_ENDIAN), value[0])
for magic, value in MAGIC.items())
def validate(self):
magic_number = self["magic_number"].value
if magic_number not in self.MAGIC:
return "Unknown magic number (%s)" % magic_number
if self["magic_string"].value != "\r\n":
return r"Wrong magic string (\r\n)"
version = self.getVersion()
if version >= 0x3030000 and self['magic_number'].value >= 3200:
offset = 12
else:
offset = 8
value = self.stream.readBits(offset * 8, 7, self.endian)
if value != ord(b'c'):
return "First object bytecode is not code"
return True
def getVersion(self):
if not hasattr(self, "version"):
signature = self.stream.readBits(0, 16, self.endian)
self.version = self.MAGIC[signature][1]
return self.version
def createFields(self):
yield UInt16(self, "magic_number", "Magic number")
yield String(self, "magic_string", 2, r"Magic string \r\n", charset="ASCII")
yield TimestampUnix32(self, "timestamp", "Timestamp")
version = self.getVersion()
if version >= 0x3030000 and self['magic_number'].value >= 3200:
yield UInt32(self, "filesize", "Size of the Python source file (.py) modulo 2**32")
yield Object(self, "content")
| StarcoderdataPython |
259113 | from os import path
path_src = path.dirname(path.abspath(__file__))
path_base = path.dirname(path_src)
path_data = path.join(path_base, 'data')
html_lib = 'html5lib'
OSHA_base_url = 'https://www.osha.gov/pls/imis/'
OSHA_columns = (
'SIC4_cd', 'SIC4_desc', 'ind_cd', 'ind_desc',
'maj_cd', 'maj_desc', 'div_cd', 'div_desc')
SEC_base_url = 'https://www.sec.gov/info/edgar/siccodes.htm'
SEC_expected_columns = ['SICCode', 'A/D \xc2\xa0Office', 'Industry Title']
SEC_columns = ['SIC4_cd', 'AD_office', 'industry_title']
| StarcoderdataPython |
1794958 | <filename>checklist/context_processors.py
# https://stackoverflow.com/a/34903331/6543250 - to pass data to "base.html"
from checklist.models import Category, Notification
def add_variable_to_context(request):
context = {}
context["category_list"] = Category.objects.all()
# if user logged in
if request.user.is_authenticated:
# descending order in notifs whose receiving user is the currently logged in user
context["notif_list"] = Notification.objects.filter(
toUser=request.user
).order_by("-date_notified")
else:
context["notif_list"] = []
return context
| StarcoderdataPython |
5101076 | <filename>vistautils/range.py
# really needs to be totally-ordered
from abc import ABCMeta, abstractmethod
from datetime import date
from typing import (
Any,
Container,
Generic,
Hashable,
Iterable,
Mapping,
Optional,
Sequence,
Sized,
Tuple,
TypeVar,
Union,
)
from attr import attrib, attrs, validators
from immutablecollections import ImmutableDict, ImmutableSet, immutabledict, immutableset
# Port of Guava's Range data type and associated classes
from vistautils.preconditions import check_arg, check_not_none
import deprecation
from sortedcontainers import SortedDict
# will be initialized after bound type declarations
# noinspection PyTypeHints
_OPEN: "BoundType" = None # type:ignore
# noinspection PyTypeHints
_CLOSED: "BoundType" = None # type:ignore
class BoundType:
"""
A possible type of boundary for a range.
A boundary is either closed, meaning it includes its endpoint, or open, meaning it does not.
"""
__slots__ = ()
@staticmethod
def open() -> "BoundType":
return _OPEN
@staticmethod
def closed() -> "BoundType":
return _CLOSED
def flip(self):
return _CLOSED if self is _OPEN else _OPEN
class _Open(BoundType):
__slots__ = ()
class _Closed(BoundType):
__slots__ = ()
# noinspection PyRedeclaration,PyTypeHints
_OPEN: BoundType = _Open() # type: ignore
# noinspection PyRedeclaration,PyTypeHints
_CLOSED: BoundType = _Closed() # type: ignore
# these need to be initialized after declaration of _Cut
# noinspection PyTypeHints
_BELOW_ALL: "_Cut" = None # type:ignore
# noinspection PyTypeHints
_ABOVE_ALL: "_Cut" = None # type:ignore
# T needs to be comparable, but Python typing seems to lack a way to specify this?
# see https://github.com/python/mypy/issues/500
# we track this with our own issue #201
T = TypeVar("T")
class _Cut(Generic[T], metaclass=ABCMeta):
"""
Implementation detail for the internal structure of Range instances.
Represents a unique way of "cutting" a "number line" into two sections; this can be done below
a certain value, above a certain value, below all values or above all values.
With this object defined in this way, an interval can always be represented by a pair of
Cut instances.
This is a Python port of Guava code originally written by <NAME>.
"""
__slots__ = ()
@property
@abstractmethod
def endpoint(self) -> T:
raise NotImplementedError()
@abstractmethod
def is_less_than(self, other: T) -> bool:
raise NotImplementedError()
@abstractmethod
def as_upper_bound(self) -> BoundType:
raise NotImplementedError()
@abstractmethod
def as_lower_bound(self) -> BoundType:
raise NotImplementedError()
@abstractmethod
def describe_as_lower_bound(self) -> str:
raise NotImplementedError()
@abstractmethod
def describe_as_upper_bound(self) -> str:
raise NotImplementedError()
def compare_to(self, other: "_Cut[T]") -> int:
# overridden by BelowAll, AboveAll
if other is _BELOW_ALL:
return 1
if other is _ABOVE_ALL:
return -1
if self.endpoint < other.endpoint: # type: ignore
return -1
elif other.endpoint < self.endpoint: # type: ignore
return 1
else:
# BelowValue precedes AboveValue
if isinstance(self, _AboveValue):
if isinstance(other, _AboveValue):
return 0
else:
return 1
else:
if isinstance(other, _AboveValue):
return -1
else:
return 0
# cannot use @totalordering because we want to compare across sub-classes
# so we need to spell out all the operators
def __lt__(self, other) -> bool:
return self.compare_to(other) < 0
def __le__(self, other) -> bool:
return self.compare_to(other) <= 0
def __gt__(self, other) -> bool:
return self.compare_to(other) > 0
def __ge__(self, other) -> bool:
return self.compare_to(other) >= 0
def __eq__(self, other) -> bool:
return self.compare_to(other) == 0
@attrs(frozen=True, slots=True, hash=False, eq=False)
class _BelowAll(_Cut[T]):
# pylint:disable=protected-access
@property
def endpoint(self) -> T:
raise ValueError("BelowAll cut lacks an endpoint")
def is_less_than(self, other: T) -> bool: # pylint:disable=unused-argument
return True
def as_upper_bound(self) -> BoundType:
raise AssertionError("Should never be called")
def as_lower_bound(self) -> BoundType:
raise AssertionError("Should never be called")
def describe_as_lower_bound(self) -> str:
return "(-\u221e" # Returns ( and a negative infinity
def describe_as_upper_bound(self) -> str:
raise AssertionError("Can't happen")
def compare_to(self, other: "_Cut[T]") -> int:
# we assume only the constant _BELOW_ALL is ever instantiated
if other is self:
return 0
return -1
def __hash__(self):
# some arbitrary number
return 233904909
@attrs(frozen=True, slots=True, hash=False, eq=False)
class _AboveAll(_Cut[T]):
# pylint:disable=protected-access
@property
def endpoint(self) -> T:
raise ValueError("AboveAll cut lacks an endpoint")
def is_less_than(self, other: T) -> bool: # pylint:disable=unused-argument
return False
def as_upper_bound(self) -> BoundType:
raise AssertionError("Should never be called")
def as_lower_bound(self) -> BoundType:
raise AssertionError("Should never be called")
def describe_as_lower_bound(self) -> str:
raise AssertionError("Can't happen")
def describe_as_upper_bound(self) -> str:
return "+\u221e)" # Returns positive infinity and )
def compare_to(self, other: "_Cut[T]") -> int:
# we assume only the constant _ABOVE_ALL is ever instantiated
if other is self:
return 0
return 1
def __hash__(self):
# some arbitrary number
return 9989388
# noinspection PyRedeclaration
_BELOW_ALL = _BelowAll()
# noinspection PyRedeclaration
_ABOVE_ALL = _AboveAll()
@attrs(frozen=True, slots=True, repr=False, hash=False, eq=False)
class _BelowValue(_Cut[T]):
# pylint:disable=protected-access
_endpoint = attrib()
@property
def endpoint(self) -> T:
return self._endpoint
def is_less_than(self, other: T) -> bool:
return self._endpoint < other or self._endpoint == other
def as_upper_bound(self) -> BoundType:
return _OPEN
def as_lower_bound(self) -> BoundType:
return _CLOSED
def __hash__(self):
return hash(self._endpoint)
def __eq__(self, other):
if isinstance(other, _BelowValue):
return self._endpoint == other._endpoint
return False
def describe_as_lower_bound(self) -> str:
return "[%s" % self._endpoint
def describe_as_upper_bound(self) -> str:
return "%s)" % self._endpoint
def __repr__(self) -> str:
return "\\\\%s/" % self._endpoint
@attrs(frozen=True, slots=True, repr=False, hash=False, eq=False)
class _AboveValue(_Cut[T]):
# pylint:disable=protected-access
_endpoint = attrib()
@property
def endpoint(self) -> T:
return self._endpoint
def is_less_than(self, other: T) -> bool:
return self._endpoint < other
def as_upper_bound(self) -> BoundType:
return _CLOSED
def as_lower_bound(self) -> BoundType:
return _OPEN
def __hash__(self):
# bitwise complement to distinguish it from the corresponding _BelowValue
return ~hash(self._endpoint)
def __eq__(self, other):
if isinstance(other, _AboveValue):
return self._endpoint == other._endpoint
return False
def describe_as_lower_bound(self) -> str:
return "(%s" % self._endpoint
def describe_as_upper_bound(self) -> str:
return "%s]" % self._endpoint
def __repr__(self) -> str:
return "/%s\\\\" % self._endpoint
# must initialize after declaring Range
# noinspection PyTypeHints
RANGE_ALL: "Range" = None # type: ignore
# this should have slots=True but cannot for the moment due to
# https://github.com/python-attrs/attrs/issues/313
# Pylint disable due to https://github.com/PyCQA/pylint/issues/2472
@attrs(frozen=True, repr=False, eq=False, hash=False) # pylint: disable=inherit-non-class
class Range(Container[T], Generic[T], Hashable):
"""
The boundaries of a contiguous span of values.
The value must be of some type which implements `<` in a way consistent with `__eq__`.
Note this does not provide a means of iterating over these values.
Each end of the `Range` may be *bounded* or *unbounded*. If bounded, there is an associated
*endpoint* value and the range is considered either *open* (does not include the endpoint
value) or *closed* (does include the endpoint value). With three possibilities on each
side, this yields nine basic types of ranges, enumerated belows.
(Notation: a square bracket (`[ ]`) indicates that the range is closed on that side;
a parenthesis (`( )`) means it is either open or unbounded. The construct `{x | statement}`
is read "the set of all x such that statement.")
======== ========== ==============
Notation Definition Factory method
======== ========== ==============
`(a..b)` `{x | a < x < b}` open
`[a..b]` `{x | a <= x <= b}` closed
`(a..b]` `{x | a < x <= b}` open_closed
`[a..b)` `{x | a <= x < b}` closed_open
`(a..+∞)` `{x | x > a}` greater_than
`[a..+∞)` `{x | x >= a}` at_least
`(-∞..b)` `{x | x < b}` less_than
`(-∞..b]` `{x | x <= b}` at_most
`(-∞..+∞)` `{x}` all
========= =========== ==============
When both endpoints exist, the upper endpoint may not be less than the lower. The endpoints may
be equal only if at least one of the bounds is closed:
* `[a..a]` : a singleton range
* `[a..a)`; `(a..a]` : empty ranges; also valid
* `(a..a)` : invalid; an exception will be thrown
========
Warnings
========
* Use immutable value types only, if at all possible. If you must use a mutable type, do not
allow the endpoint instances to mutate after the range is created!
=======
Notes
========
* Instances of this type are obtained using the static factory methods in this class.
* Ranges are convex: whenever two values are contained, all values in between them must also be
contained. More formally, for any `c1 <= c2 <= c3` of type `C`,
`c1 in r and c3 in r` implies `c2 in r`. This means that a `Range[int]` can never
be used to represent, say, "all prime numbers from 1 to 100."
* Terminology note: a range `a` is said to be the maximal range having property `P` if,
for all ranges `b` also having property `P`, `a.encloses(b)`. Likewise, `a` is minimal when
`b.encloses(a)` for all `b` having property `P`. See, for example, the definition of
intersection.
This class (including the documentation) is an almost direct translation of Guava's Range,
which was originally authored by <NAME> and <NAME>.
"""
# pylint:disable=protected-access
_lower_bound: _Cut[T] = attrib(validator=validators.instance_of(_Cut))
_upper_bound: _Cut[T] = attrib(validator=validators.instance_of(_Cut))
def __attrs_post_init__(self):
check_arg(
self._lower_bound <= self._upper_bound,
"Upper bound of a range cannot be less than lower bound but got %s ",
(self,),
)
check_arg(self._lower_bound is not _ABOVE_ALL)
check_arg(self._upper_bound is not _BELOW_ALL)
@staticmethod
def open(lower: T, upper: T) -> "Range[T]":
return Range(_AboveValue(lower), _BelowValue(upper))
@staticmethod
def closed(lower: T, upper: T) -> "Range[T]":
return Range(_BelowValue(lower), _AboveValue(upper))
@staticmethod
def closed_open(lower: T, upper: T) -> "Range[T]":
return Range(_BelowValue(lower), _BelowValue(upper))
@staticmethod
def open_closed(lower: T, upper: T) -> "Range[T]":
return Range(_AboveValue(lower), _AboveValue(upper))
@staticmethod
def less_than(upper: T) -> "Range[T]":
return Range(_BELOW_ALL, _BelowValue(upper))
@staticmethod
def at_most(upper: T) -> "Range[T]":
return Range(_BELOW_ALL, _AboveValue(upper))
@staticmethod
def greater_than(lower: T) -> "Range[T]":
return Range(_AboveValue(lower), _ABOVE_ALL)
@staticmethod
def at_least(lower: T) -> "Range[T]":
return Range(_BelowValue(lower), _ABOVE_ALL)
@staticmethod
def all() -> "Range[T]":
return RANGE_ALL
@staticmethod
def create_spanning(ranges: Sequence["Range[T]"]):
if not ranges:
raise ValueError("Cannot create range from span of empty range collection")
return Range(
min(x._lower_bound for x in ranges), max(x._upper_bound for x in ranges)
)
def has_lower_bound(self) -> bool:
return self._lower_bound is not _BELOW_ALL
def has_upper_bound(self) -> bool:
return self._upper_bound is not _ABOVE_ALL
@property
def lower_bound_type(self) -> BoundType:
return self._lower_bound.as_lower_bound()
@property
def upper_bound_type(self) -> BoundType:
return self._upper_bound.as_upper_bound()
@property
def lower_endpoint(self) -> T:
return self._lower_bound.endpoint
@property
def upper_endpoint(self) -> T:
return self._upper_bound.endpoint
def is_empty(self) -> bool:
"""
Determine if a range is empty.
Returns `True` if this range is of the form `[v..v)` or `(v..v]`.
(This does not encompass ranges of the form (v..v), because such ranges are invalid and
can't be constructed at all.)
Note that certain discrete ranges such as the integer range (3..4) are not considered empty,
even though they contain no actual values.
"""
return self._lower_bound == self._upper_bound
# I don't know why mypy complains about narrowing the type, which seems a reasonable thing to do
def __contains__(self, val: T) -> bool: # type: ignore
check_not_none(val)
return self._lower_bound.is_less_than(val) and not self._upper_bound.is_less_than(
val
)
def encloses(self, other: "Range[T]") -> bool:
# noinspection PyChainedComparisons
return (
self._lower_bound.compare_to(other._lower_bound) <= 0
and self._upper_bound.compare_to(other._upper_bound) >= 0
)
def is_connected(self, other: "Range[T]") -> bool:
"""
Determine if two ranges are connected.
Returns `True` if there exists a (possibly empty) range which is enclosed by both this
range and `other`. For example,
* `[2, 4)` and `[5, 7)` are not connected
* `[2, 4)` and `[3, 5)` are connected, because both enclose `[3, 4)`
* `[2, 4)` and `[4, 6)` are connected, because both enclose the empty range `[4, 4)`
Note that this range and `other` have a well-defined union and intersection (as a
single, possibly-empty range) if and only if this method returns `True`.
The connectedness relation is both reflexive and symmetric, but does not form an
equivalence relation as it is not transitive.
Note that certain discrete ranges are not considered connected, even though there are
no elements "between them." For example, `[3, 5]` is not considered connected to
`[6, 10]`.
"""
return (
self._lower_bound <= other._upper_bound
and other._lower_bound <= self._upper_bound
)
def span(self, other: "Range[T]") -> "Range[T]":
"""
Get the minimal range enclosing both this range and `other`.
For example, the span of `[ 1..3] and (5..7)` is `[1..7)`.
If the input ranges are connected, the returned range can also be called their union.
If they are not, note that the span might contain values that are not contained in either
input range.
Like intersection, this operation is commutative, associative and idempotent. Unlike it, it
is always well-defined for any two input ranges.
"""
lower_cmp = self._lower_bound.compare_to(other._lower_bound)
upper_cmp = self._upper_bound.compare_to(other._upper_bound)
if lower_cmp <= 0 and upper_cmp >= 0:
return self
elif lower_cmp >= 0 and upper_cmp <= 0:
return other
else:
return Range(
self._lower_bound if lower_cmp <= 0 else other._lower_bound,
self._upper_bound if upper_cmp >= 0 else other._upper_bound,
)
def intersection(self, connected_range: "Range[T]") -> "Range[T]":
"""
Get the intersection of this range and `other`.
Returns the maximal range enclosed by both this range and connectedRange, if such a
range exists.
For example, the intersection of `[1..5]` and `(3..7)` is `(3..5]`. The
resulting range may be empty; for example, `[1..5)` intersected with `[5..7)`
yields the empty range `[5..5)`.
The intersection exists if and only if the two ranges are connected. This method throws
a `ValueError` is `connected_range` is not in fact connected.
The intersection operation is commutative, associative and idempotent, and its identity
element is the `all` range/
"""
lower_cmp = self._lower_bound.compare_to(connected_range._lower_bound)
upper_cmp = self._upper_bound.compare_to(connected_range._upper_bound)
if lower_cmp >= 0 >= upper_cmp:
return self
elif lower_cmp <= 0 <= upper_cmp:
return connected_range
else:
return Range(
self._lower_bound if lower_cmp >= 0 else connected_range._lower_bound,
self._upper_bound if upper_cmp <= 0 else connected_range._upper_bound,
)
def intersects(self, other_range: "Range[T]") -> bool:
"""
Determine if this range i
Args:
other_range:
Returns:
"""
lower_cmp = self._lower_bound.compare_to(other_range._lower_bound)
upper_cmp = self._upper_bound.compare_to(other_range._upper_bound)
if lower_cmp >= 0 >= upper_cmp:
return True
elif lower_cmp <= 0 <= upper_cmp:
return True
else:
intersection_lb = (
self._lower_bound if lower_cmp >= 0 else other_range._lower_bound
)
intersection_ub = (
self._upper_bound if upper_cmp <= 0 else other_range._upper_bound
)
return intersection_lb <= intersection_ub
def __eq__(self, other) -> bool:
if isinstance(other, Range):
return (
self._lower_bound == other._lower_bound
and self._upper_bound == other._upper_bound
)
return False
def __hash__(self) -> int:
return hash(self._lower_bound) + 31 * hash(self._upper_bound)
def __repr__(self) -> str:
return (
self._lower_bound.describe_as_lower_bound()
+ ".."
+ self._upper_bound.describe_as_upper_bound()
)
# noinspection PyRedeclaration
RANGE_ALL = Range(_BELOW_ALL, _ABOVE_ALL)
# Pylint disable due to https://github.com/PyCQA/pylint/issues/2472
class RangeSet(
Generic[T], Container[T], Sized, metaclass=ABCMeta
): # pylint:disable=E0239
"""
A set comprising zero or more nonempty, disconnected ranges of type `T`.
Implementations that choose to support the `add(Range)` operation are required to ignore empty
ranges and coalesce connected ranges. For example ::
rangeSet: RangeSet[int] = TreeRangeSet();`
rangeSet.add(Range.closed(1, 10)); // {[1, 10]}
rangeSet.add(Range.closed_open(11, 15)); // disconnected range; {[1, 10], [11, 15)}
rangeSet.add(Range.closed_open(15, 20)); // connected range; {[1, 10], [11, 20)}
rangeSet.add(Range.open_closed(0, 0)); // empty range; {[1, 10], [11, 20)}
rangeSet.remove(Range.open(5, 10)); // splits [1, 10]; {[1, 5], [10, 10], [11, 20)}
Note that the behavior of `Range.isEmpty()` and `Range.isConnected(Range)` may not be as
expected on discrete ranges. See the documentation of those methods for details.
This (including the documentation) is a partial translation of Guava's RangeSet to Python.
Guava's implementation was written by <NAME> and <NAME>.
"""
__slots__ = ()
@staticmethod
def create_mutable() -> "MutableRangeSet[T]":
return _MutableSortedDictRangeSet.create()
@abstractmethod
def __contains__(self, value: T) -> bool: # type: ignore
"""
Determine whether any of this range set's member ranges contains `value`.
"""
raise NotImplementedError()
@abstractmethod
def encloses(self, rng: Range[T]) -> bool:
"""
Check if any member range encloses `rng`
"""
raise NotImplementedError()
def encloses_all(self, rngs: Union["RangeSet[T]", Iterable[Range[T]]]) -> bool:
"""
For each input range, check if any member range encloses it.
"""
if isinstance(rngs, RangeSet):
return self.encloses_all(rngs.as_ranges())
for rng in rngs:
if not self.encloses(rng):
return False
return True
@abstractmethod
def intersects(self, rng: Range[T]) -> bool:
"""
Get whether any ranges in this set intersects `rng`
Returns `True` if there exists a non-empty range enclosed by both a member range in
this range set and the specified range.
"""
raise NotImplementedError()
@abstractmethod
def ranges_overlapping(self, rng: Range[T]) -> ImmutableSet[Range[T]]:
"""
Get all ranges in this set that overlap (have an intersection) with `rng`.
Unlike Guava's `intersectRanges`, this does not truncate partially intersecting ranges to
just the intersecting portion.
"""
raise NotImplementedError()
@abstractmethod
def range_containing(self, value: T) -> Optional[Range[T]]:
raise NotImplementedError()
@abstractmethod
def range_enclosing_range(self, value: Range[T]) -> Optional[Range[T]]:
raise NotImplementedError()
@abstractmethod
def ranges_enclosed_by(self, rng) -> ImmutableSet[Range[T]]:
raise NotImplementedError()
@abstractmethod
def rightmost_containing_or_below(self, upper_limit: T) -> Optional[Range[T]]:
"""
Get the rightmost range in this set whose lower bound does not exceed *upper_limit*.
Formally, this is the range `(x, y)` with minimal `y` such that `(upper_limit, +inf)`
does not contain `(x, y)`.
If there is no such set, `None` is returned.
For example::
range_set: RangeSet[int] = immutablerangeset([
Range.open_closed(1, 10)
Range.open(12, 15)
])
// range_set: {(1, 10], (12, 15)}
range_set.rightmost_containing_or_below(3) // returns (1, 10]
range_set.rightmost_containing_or_below(11) // returns (1, 10]
range_set.rightmost_containing_or_below(12) // returns (1, 10]
range_set.rightmost_containing_or_below(13) // returns (12, 15)
range_set.rightmost_containing_or_below(15) // returns (12, 15)
range_set.rightmost_containing_or_below(1) // returns None
"""
@abstractmethod
def leftmost_containing_or_above(self, lower_limit: T) -> Optional[Range[T]]:
"""
Get the leftmost range in this set whose upper bound is not below *lower_limit*.
Formally, this is the range `(x, y)` with maximal `x` such that `(-inf, lower_limit)`
does not contain `(x, y)`.
If there is no such set, `None` is returned.
For example::
range_set: RangeSet[int] = immutablerangeset([
Range.open(1, 10)
Range.open_closed(12, 15)
])
// range_set: {(1, 10), (12, 15]}
range_set.leftmost_containing_or_above(1) // returns (1, 10)
range_set.leftmost_containing_or_above(3) // returns (1, 10)
range_set.leftmost_containing_or_above(10) // returns (12, 15]
range_set.leftmost_containing_or_above(11) // returns (12, 15]
range_set.leftmost_containing_or_above(12) // returns (12, 15]
range_set.leftmost_containing_or_above(13) // returns (12, 15]
range_set.leftmost_containing_or_above(15) // returns (12, 15]
range_set.leftmost_containing_or_above(16) // returns None
"""
@deprecation.deprecated(
deprecated_in="0.19.0",
removed_in=date(2020, 8, 10),
details="Deprecated, use rightmost_containing_or_below(upper_limit). "
"This method may be removed in a future release.",
)
def maximal_containing_or_below(self, upper_limit: T) -> Optional[Range[T]]:
return self.rightmost_containing_or_below(upper_limit)
@deprecation.deprecated(
deprecated_in="0.19.0",
removed_in=date(2020, 8, 10),
details="Deprecated, use leftmost_containing_or_above(upper_limit). "
"This method may be removed in a future release.",
)
def minimal_containing_or_above(self, lower_limit: T) -> Optional[Range[T]]:
return self.leftmost_containing_or_above(lower_limit)
@abstractmethod
def as_ranges(self) -> Sequence[Range[T]]:
raise NotImplementedError()
def __eq__(self, other) -> bool:
if isinstance(other, RangeSet):
return ImmutableSet.of(self.as_ranges()) == ImmutableSet.of(other.as_ranges())
else:
return False
def __hash__(self) -> int:
return hash(self.as_ranges())
@abstractmethod
def is_empty(self) -> bool:
"""
Determine if this range set has no ranges.
"""
raise NotImplementedError()
@property
@abstractmethod
def span(self) -> Range[T]:
"""
The minimal range which encloses all ranges in this range set.
"""
raise NotImplementedError()
def __repr__(self):
return self.__class__.__name__ + "(" + str(self.as_ranges()) + ")"
def __getstate__(self) -> Tuple[Range[T], ...]:
if self.is_empty():
return ()
return tuple(self.as_ranges())
@abstractmethod
def __setstate__(self, state: Iterable[Range[T]]) -> None:
raise NotImplementedError()
T2 = TypeVar("T2")
class ImmutableRangeSet(RangeSet[T], metaclass=ABCMeta):
"""
A RangeSet which cannot be modified.
If you reach into its guts and modify it, its behavior is undefined.
"""
@staticmethod
def builder() -> "ImmutableRangeSet.Builder[T]":
return _ImmutableSortedDictRangeSet.Builder()
class Builder(Generic[T2], metaclass=ABCMeta):
@abstractmethod
def add(self, rng: Range[T2]) -> "ImmutableRangeSet.Builder[T2]":
raise NotImplementedError()
def add_all(self, ranges: Iterable[Range[T2]]) -> "ImmutableRangeSet.Builder[T2]":
for rng in ranges:
self.add(rng)
return self
@abstractmethod
def build(self) -> "ImmutableRangeSet[T2]":
raise NotImplementedError()
class MutableRangeSet(RangeSet[T], metaclass=ABCMeta):
__slots__ = ()
def add(self, rng: Range[T]) -> "MutableRangeSet[T]":
"""
Add the specified range to this RangeSet (optional operation).
For equal range sets `a` and `b`, the result of `a.add(range)` is that `a` will be the
minimal range set for which both `a.encloses_all(b)` and `a.encloses(range)`.
Note that `range` will be coalesced with any ranges in the range set that are connected
with it. Moreover, if range is empty, this is a no-op.
Returns the RangeSet itself to facilitate chaining operations, especially in tests.
"""
raise NotImplementedError()
def add_all(
self, rngs: Union["RangeSet[T]", Iterable[Range[T]]]
) -> "MutableRangeSet[T]":
"""
Add all the specified ranges to this RangeSet (optional operation).
Returns the RangeSet itself to facilitate chaining operations, especially in tests.
"""
if isinstance(rngs, RangeSet):
return self.add_all(rngs.as_ranges())
for rng in rngs:
self.add(rng)
return self
def clear(self) -> None:
"""
Remove all ranges from this RangeSet (optional operation).
After this operation, `c in this_range_set` will return `False` for all `c`.
This is equivalent to `remove(Range.all())`.
"""
raise NotImplementedError()
def remove(self, rng: Range[T]) -> "MutableRangeSet[T]":
"""
Remove the specified range from this RangeSet (optional operation).
After this operation, if `rng.contains(c)`, `self.contains(c)` will return `False`.
If `rng` is empty, this is a no-op.
Returns the RangeSet itself to facilitate chaining operations, especially in tests.
"""
raise NotImplementedError()
def remove_all(
self, rngs: Union["RangeSet[T]", Iterable[Range[T]]]
) -> "MutableRangeSet[T]":
"""
Remove each specified range.
Returns the RangeSet itself to facilitate chaining operations, especially in tests.
"""
if isinstance(rngs, RangeSet):
return self.remove_all(rngs.as_ranges())
for rng in rngs:
self.remove(rng)
return self
# noinspection PyProtectedMember
class _SortedDictRangeSet(RangeSet[T], metaclass=ABCMeta):
# pylint:disable=protected-access
def __init__(self, ranges_by_lower_bound: SortedDict) -> None:
# we store the ranges as a map sorted by their lower bound
# Note that because we enforce that there are no overlapping or connected ranges,
# this sorts the ranges by upper bound as well
self._ranges_by_lower_bound = ranges_by_lower_bound
def range_containing(self, value: T) -> Optional[Range[T]]:
highest_range_beginning_at_or_below = _value_at_or_below(
self._ranges_by_lower_bound, _BelowValue(value)
)
if highest_range_beginning_at_or_below:
if value in highest_range_beginning_at_or_below:
return highest_range_beginning_at_or_below
return None
def range_enclosing_range(self, rng: Range[T]) -> Optional[Range[T]]:
# this implementation can be sped up
highest_range_beginning_at_or_below = _value_at_or_below(
self._ranges_by_lower_bound, rng._lower_bound
)
if (
highest_range_beginning_at_or_below
and highest_range_beginning_at_or_below.encloses(rng)
):
return highest_range_beginning_at_or_below
return None
def ranges_enclosed_by(self, query_rng: Range[T]) -> ImmutableSet[Range[T]]:
highest_range_at_or_above = _value_at_or_above(
self._ranges_by_lower_bound, query_rng._lower_bound
)
if highest_range_at_or_above:
start_idx = self._ranges_by_lower_bound.index(
highest_range_at_or_above._lower_bound
)
ret: ImmutableSet.Builder[Range[T]] = ImmutableSet.builder()
for idx in range(start_idx, len(self._ranges_by_lower_bound)):
rng_at_idx = self._ranges_by_lower_bound.values()[idx]
if query_rng.encloses(rng_at_idx):
ret.add(rng_at_idx)
else:
break
return ret.build()
else:
return immutableset()
# noinspection PyTypeHints
def __contains__(self, value: T) -> bool: # type: ignore
highest_range_beginning_at_or_below = _value_at_or_below(
self._ranges_by_lower_bound, _BelowValue(value)
)
return bool(
highest_range_beginning_at_or_below
and value in highest_range_beginning_at_or_below
)
def encloses(self, rng: Range[T]) -> bool:
highest_range_beginning_at_or_below = _value_at_or_below(
self._ranges_by_lower_bound, rng._lower_bound
)
return bool(
highest_range_beginning_at_or_below
and highest_range_beginning_at_or_below.encloses(rng)
)
def intersects(self, rng: Range[T]) -> bool:
check_not_none(rng)
ceiling_range: Optional[Range[T]] = _value_at_or_above(
self._ranges_by_lower_bound, rng._lower_bound
)
if (
ceiling_range
and ceiling_range.is_connected(rng)
and not ceiling_range.intersection(rng).is_empty()
):
return True
# check strictness of lowerEntry
lower_range: Optional[Range[T]] = _value_below(
self._ranges_by_lower_bound, rng._lower_bound
)
return bool(
lower_range
and lower_range.is_connected(rng)
and not lower_range.intersection(rng).is_empty()
)
def ranges_overlapping(self, rng: Range[T]) -> ImmutableSet[Range[T]]:
check_not_none(rng)
if self.is_empty():
return immutableset()
rlb = self._ranges_by_lower_bound
from_index = rlb.bisect(rng._lower_bound)
# If we would insert at the end (are greater than all the elements, the only range that
# could possibly overlap is the last one.
if from_index == len(rlb):
last_range: Range[T] = rlb[rlb.keys()[-1]]
if last_range.intersects(rng):
return immutableset([last_range])
return immutableset()
to_index = rlb.bisect(rng._upper_bound)
# If we would insert at the start (are smaller than all the elements, the only range that
# could possibly overlap is the first one.
if to_index == 0:
first_range: Range[T] = rlb[rlb.keys()[0]]
if first_range.intersects(rng):
return immutableset([first_range])
return immutableset()
return immutableset(
[
rlb[rlb.keys()[index]]
# The ranges at the extreme indices do not necessarily overlap,
for index in range(
max(0, from_index - 1), to_index
) # range method is not inclusive
# so this explicit check is needed.
if rlb[rlb.keys()[index]].intersects(rng)
]
)
def rightmost_containing_or_below(self, upper_limit: T) -> Optional[Range[T]]:
return _value_at_or_below(self._ranges_by_lower_bound, _BelowValue(upper_limit))
def leftmost_containing_or_above(self, lower_limit: T) -> Optional[Range[T]]:
sorted_dict = self._ranges_by_lower_bound
# an AboveValue cut corresponds to a closed upper interval, which catches containment
# as desired
# I have no idea why mypy is asking for an explicit type assignment here
limit_as_bound: _AboveValue = _AboveValue(lower_limit)
# insertion index into the sorted list of sets
idx = sorted_dict.bisect_left(limit_as_bound)
# so the index of the "latest" set with a lower bound preceding lower_limit is back one
containing_or_below_index = idx - 1
if containing_or_below_index >= 0:
# if such a set exists, we need to check if we are contained in it...
latest_beginning_before = sorted_dict[
sorted_dict.keys()[containing_or_below_index]
]
if limit_as_bound <= latest_beginning_before._upper_bound:
return latest_beginning_before
if idx < len(sorted_dict):
return sorted_dict[sorted_dict.keys()[idx]]
else:
return None
def as_ranges(self) -> Sequence[Range[T]]:
return self._ranges_by_lower_bound.values()
def is_empty(self) -> bool:
return len(self._ranges_by_lower_bound) == 0
@property
def span(self) -> Range[T]:
if self.is_empty():
raise ValueError("Can't take span of an empty RangeSet")
return Range(
self._ranges_by_lower_bound.values()[0]._lower_bound,
self._ranges_by_lower_bound.values()[-1]._upper_bound,
)
def immutable_copy(self) -> ImmutableRangeSet[T]:
return _ImmutableSortedDictRangeSet(self._ranges_by_lower_bound.copy())
def __repr__(self):
return repr(list(self.as_ranges()))
def __len__(self) -> int:
return len(self._ranges_by_lower_bound)
def __setstate__(self, state: Iterable[Range[T]]) -> None:
self._ranges_by_lower_bound = SortedDict(
[(rng._lower_bound, rng) for rng in state]
)
class _MutableSortedDictRangeSet(_SortedDictRangeSet[T], MutableRangeSet[T]):
# pylint:disable=protected-access
@staticmethod
def create() -> "MutableRangeSet[T]":
return _MutableSortedDictRangeSet(SortedDict())
def add(self, range_to_add: Range[T]) -> "MutableRangeSet[T]":
if range_to_add.is_empty():
return self
# the range we actually need to add may not correspond exactly to range_to_add
# because it may overlap or be connected to existing ranges
# lb_to_add and ub_to_add will form the range we actually add
lb_to_add = range_to_add._lower_bound
ub_to_add = range_to_add._upper_bound
range_below_lb: Optional[Range[T]] = _value_below(
self._ranges_by_lower_bound, lb_to_add
)
# is there any range which begins strictly before range_to_add's lower bound?
# if so, range_to_add's beginning might lie within that range...
if range_below_lb and lb_to_add <= range_below_lb._upper_bound:
# and we need to coalesce with it by extending the bounds to include
# that range's lower bound
lb_to_add = range_below_lb._lower_bound
# if the upper bound exceeds ours, too, then our range to add is entirely
# contained within range_below_lb. Since this is already in the set, we
# have nothing to do
if ub_to_add < range_below_lb._upper_bound:
return self
# now we need to check of coalescing and connectedness on the upper end
range_below_ub: Optional[Range[T]] = _value_at_or_below(
self._ranges_by_lower_bound, ub_to_add
)
if range_below_ub and ub_to_add < range_below_ub._upper_bound:
ub_to_add = range_below_ub._upper_bound
# any ranges which lie within the range we are getting ready to add are subsumed in it
_clear(self._ranges_by_lower_bound, lb_to_add, ub_to_add)
self._replace_range_with_same_lower_bound(Range(lb_to_add, ub_to_add))
return self
def add_all(
self, ranges_to_add: Union["RangeSet[T]", Iterable[Range[T]]]
) -> "MutableRangeSet[T]":
if isinstance(ranges_to_add, RangeSet):
return self.add_all(ranges_to_add.as_ranges())
for rng in ranges_to_add:
self.add(rng)
return self
def clear(self) -> None:
_clear(self._ranges_by_lower_bound, _BELOW_ALL, _ABOVE_ALL)
def remove(self, rng: Range[T]) -> "MutableRangeSet[T]":
raise NotImplementedError(
"I didn't need this, so I didn't bother to implement it yet."
)
def _replace_range_with_same_lower_bound(self, rng: Range[T]) -> None:
if rng.is_empty():
del self._ranges_by_lower_bound[rng._lower_bound]
else:
self._ranges_by_lower_bound[rng._lower_bound] = rng
class _ImmutableSortedDictRangeSet(ImmutableRangeSet[T], _SortedDictRangeSet[T]):
"""
An implementation of ImmutableRangeSet
This should never be directly created by the user. In particular if something maintains
a reference to sorted_dict, then all immutability guarantees are broken.
"""
class Builder(ImmutableRangeSet.Builder[T2]):
def __init__(self):
self._mutable_builder = _MutableSortedDictRangeSet.create()
def add(self, rng: Range[T2]) -> "ImmutableRangeSet.Builder[T2]":
self._mutable_builder.add(rng)
return self
def build(self) -> "ImmutableRangeSet[T2]":
return self._mutable_builder.immutable_copy()
K = TypeVar("K")
V = TypeVar("V")
class RangeMap(Generic[K, V], metaclass=ABCMeta):
"""
A mapping from disjoint nonempty ranges to values.
Queries look up the value associated with the range (if any) that contains a specified key.
In contrast to RangeSet, no "coalescing" is done of connected ranges, even if they are mapped
to the same value.
Note that this does not extend `Mapping` because you can't iterate over the keys.
This (including the documentation) is a partial translation of Guava's RangeMap to Python.
Guava's implementation was written by <NAME>.
"""
__slots__ = ()
@abstractmethod
def __contains__(self, key: K) -> bool:
"""
Determine whether any of this range set's key ranges contains `key`.
"""
raise NotImplementedError()
@abstractmethod
def get_enclosed_by(self, rng: Range[K]) -> ImmutableSet[V]:
"""
Get values mapped to by any key in `rng`.
"""
raise NotImplementedError()
@abstractmethod
def get_from_rightmost_containing_or_below(self, key: K):
"""
Get the value associated with the rightmost range in this set whose lower bound does not
exceed *upper_limit*.
Formally, this is the value associated with the range `(x, y)` with minimal `y` such that
`(upper_limit, +inf)` does not contain `(x, y)`.
If there is no such set, `None` is returned.
For example::
range_map: RangeMap[int, int] = immutablerangemap([
(Range.open_closed(1, 10), 36), // (1, 10) maps to 36
(Range.open(12, 15), 17) // (12, 13) maps to 17
]);`
// range keys: {(1, 10], (12, 15)}
range_map.get_from_rightmost_containing_or_below(1) // returns None
range_map.get_from_rightmost_containing_or_below(3) // returns 36
range_map.get_from_rightmost_containing_or_below(11) // returns 36
range_map.get_from_rightmost_containing_or_below(12) // returns 36
range_map.get_from_rightmost_containing_or_below(13) // returns 17
range_map.get_from_rightmost_containing_or_below(15) // returns 17
"""
@abstractmethod
def get_from_leftmost_containing_or_above(self, key: K):
"""
Get the value associated with the leftmost range in this set whose upper bound is not below
*lower_limit*.
Formally, this is the value associated with the range `(x, y)` with maximal `x` such that
`(-inf, lower_limit)` does not contain `(x, y)`.
If there is no such set, `None` is returned.
For example::
range_map: RangeSet[int] = immutablerangemap([
(Range.open(1, 10), 5),
(Range.open_closed(12, 15), 7)
])
// range keys: {(1, 10), (12, 15]}
range_map.get_from_leftmost_containing_or_above(1) // returns 5
range_map.get_from_leftmost_containing_or_above(3) // returns 5
range_map.get_from_leftmost_containing_or_above(10) // returns 7
range_map.get_from_leftmost_containing_or_above(11) // returns 7
range_map.get_from_leftmost_containing_or_above(12) // returns 7
range_map.get_from_leftmost_containing_or_above(13) // returns 7
range_map.get_from_leftmost_containing_or_above(15) // returns 7
range_map.get_from_leftmost_containing_or_above(16) // returns None
"""
@deprecation.deprecated(
deprecated_in="0.19.0",
removed_in=date(2020, 8, 10),
details="Deprecated, use get_from_rightmost_containing_or_below(upper_limit). "
"This method may be removed in a future release.",
)
def get_from_maximal_containing_or_below(self, key: K):
return self.get_from_rightmost_containing_or_below(key)
@deprecation.deprecated(
deprecated_in="0.19.0",
removed_in=date(2020, 8, 10),
details="Deprecated, use get_from_leftmost_containing_or_below(key). "
"This method may be removed in a future release.",
)
def get_from_minimal_containing_or_above(self, key: K):
return self.get_from_leftmost_containing_or_above(key)
def __eq__(self, other) -> bool:
if isinstance(other, RangeMap):
return ImmutableSet.of(self.as_dict()) == ImmutableSet.of(other.as_dict())
else:
return False
@abstractmethod
def as_dict(self) -> Mapping[Range[K], V]:
raise NotImplementedError()
def __hash__(self) -> int:
return hash(self.as_dict())
@abstractmethod
def is_empty(self) -> bool:
"""
Determine if this range map has no mappings.
"""
raise NotImplementedError()
def __repr__(self):
return self.__class__.__name__ + "(" + str(self.as_dict()) + ")"
# necessary for builder because an inner class cannot share type variables with its outer class
K2 = TypeVar("K2")
V2 = TypeVar("V2")
# this should have slots=True but cannot for the moment due to
# https://github.com/python-attrs/attrs/issues/313
@attrs(frozen=True, repr=False)
class ImmutableRangeMap(Generic[K, V], RangeMap[K, V]):
rng_to_val: ImmutableDict[Range[K], V] = attrib(converter=immutabledict)
range_set: ImmutableRangeSet[K] = attrib(init=False)
def __attrs_post_init__(self) -> None:
if len(self.rng_to_val) != len(self.range_set):
raise ValueError(
"Some range keys are connected or overlapping. Overlapping keys "
"will never be supported. Support for connected keys is tracked in "
"https://github.com/isi-vista/vistautils/issues/37"
)
@staticmethod
@deprecation.deprecated(
deprecated_in="0.19.0",
removed_in=date(2020, 8, 10),
details="Deprecated - prefer the module-level factory ``immutablerangemap`` with no "
"arguments.",
)
def empty() -> "ImmutableRangeMap[K, V]":
return ImmutableRangeMap(immutabledict())
@staticmethod
def builder() -> "ImmutableRangeMap.Builder[K, V]":
return ImmutableRangeMap.Builder()
def __contains__(self, key: K) -> bool:
return key in self.range_set
def get_enclosed_by(self, rng: Range[K]) -> ImmutableSet[V]:
ret: ImmutableSet.Builder[V] = ImmutableSet.builder()
for rng_key in self.range_set.ranges_enclosed_by(rng):
ret.add(self.rng_to_val[rng_key])
return ret.build()
def is_empty(self) -> bool:
return self.range_set.is_empty()
def __getitem__(self, k: K) -> Optional[V]:
containing_range = self.range_set.range_containing(k)
return self.rng_to_val[containing_range] if containing_range else None
def as_dict(self) -> Mapping[Range[K], V]:
return self.rng_to_val
def get_from_rightmost_containing_or_below(self, key: K) -> Optional[V]:
probe_range = self.range_set.rightmost_containing_or_below(key)
return self.rng_to_val[probe_range] if probe_range else None
def get_from_leftmost_containing_or_above(self, key: K) -> Optional[V]:
probe_range = self.range_set.leftmost_containing_or_above(key)
return self.rng_to_val[probe_range] if probe_range else None
def __reduce__(self):
# __getstate__/__setstate__ cannot be used because the implementation is frozen.
_repr = ()
if not self.is_empty():
_repr = tuple(self.as_dict().items())
return (immutablerangemap, (_repr,))
@range_set.default # type: ignore
def _init_range_set(self) -> ImmutableRangeSet[K]:
return ( # type: ignore
ImmutableRangeSet.builder() # type: ignore
.add_all(self.rng_to_val.keys()) # type: ignore
.build() # type: ignore
)
class Builder(Generic[K2, V2]):
def __init__(self):
self.rng_to_val = ImmutableDict.builder()
def put(self, key: Range[K2], val: V2) -> "ImmutableRangeMap.Builder[K2, V2]":
self.rng_to_val.put(key, val)
return self
def build(self) -> "ImmutableRangeMap[K2, V2]":
return ImmutableRangeMap(self.rng_to_val.build())
def immutablerangemap(
mappings: Optional[Iterable[Tuple[Range[K], V]]] = None
) -> ImmutableRangeMap[K, V]:
return ImmutableRangeMap(immutabledict(mappings))
# utility functions for SortedDict to give it an interface more like Java's NavigableMap
def _value_below(sorted_dict: SortedDict, key: T) -> Optional[Any]:
"""
Get item for greatest key strictly less than the given key
Returns None if there is no such key.
"""
idx = sorted_dict.bisect_left(key) - 1
if idx >= 0:
if idx >= len(sorted_dict):
idx = len(sorted_dict) - 1
lb_key = sorted_dict.keys()[idx]
return sorted_dict[lb_key]
else:
return None
def _value_at_or_below(sorted_dict: SortedDict, key: T) -> Optional[Any]:
"""
Get item for greatest key less than or equal to a given key.
Returns None if there is no such key
"""
if not sorted_dict:
return None
idx = sorted_dict.bisect_left(key)
if idx >= len(sorted_dict) or key != sorted_dict.keys()[idx]:
if idx > 0:
key = sorted_dict.keys()[idx - 1]
else:
return None
return sorted_dict[key]
def _value_at_or_above(sorted_dict: SortedDict, key: T) -> Optional[Any]:
if not sorted_dict:
return None
idx = sorted_dict.bisect_left(key)
if idx >= len(sorted_dict):
return None
return sorted_dict[sorted_dict.keys()[idx]]
def _clear(
sorted_dict: SortedDict, start_key_inclusive: T, stop_key_exclusive: T
) -> None:
# we copy to a list first in case sorted_dict is not happy with modification during iteration
for key_to_delete in list(
sorted_dict.irange(
start_key_inclusive, stop_key_exclusive, inclusive=(True, False)
)
):
del sorted_dict[key_to_delete]
| StarcoderdataPython |
11245572 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 11 20:47:26 2018
@author: misskeisha
"""
import math
class Polygon(object):
def __init__(self,n,s):
self.n = n
self.s = s
def parameter(self):
return self.n*self.s
def area(self):
area = ((self.s**2)*self.n)/(4*math.tan(math.pi / self.n))
return area
x = Polygon(5,3)
print(x.parameter())
print(x.area())
| StarcoderdataPython |
11362624 | <reponame>tqtifnypmb/ML<filename>tensorflow_tutorial/MLP_keras.py<gh_stars>0
import tensorflow as tf
import numpy as np
from tensorflow import keras
from math import floor
from sklearn import datasets
from sklearn import preprocessing
class MLP:
def __init__(self, learning_rate = 1e-3):
self.learning_rate_ = learning_rate
self.mlp_ = None
def _create_model(self, n_features, n_hidden, n_classes):
model = keras.Sequential()
activation_layer = keras.layers.Dense(n_hidden,
input_dim=n_features,
activation='relu')
model.add(activation_layer)
output_layer = keras.layers.Dense(n_classes, activation='softmax')
model.add(output_layer)
optimizer = tf.train.AdamOptimizer(self.learning_rate_)
model.compile(optimizer,
loss=tf.losses.hinge_loss,
metrics=['accuracy'])
self.mlp_ = model
def fit(self, X, y):
n_samples = X.shape[0]
n_features = X.shape[1]
n_hidden = max(n_samples / 2, 4)
n_hidden = int(floor(n_hidden))
n_classes = y.shape[1]
if self.mlp_ is None:
self._create_model(n_features, n_hidden, n_classes)
self.mlp_.fit(X, y, batch_size=80, epochs=200)
def predict(self, X):
return self.mlp_.predict(X)
def evaluate(self, X, y):
return self.mlp_.evaluate(X, y)
if __name__ == '__main__':
data = datasets.load_iris()
X = data.data
y = data.target.reshape(-1, 1)
y = preprocessing.OneHotEncoder().fit_transform(y).toarray()
mlp = MLP(1e-2)
mlp.fit(X, y)
loss, acc = mlp.evaluate(X, y)
print(acc)
print(loss) | StarcoderdataPython |
6653221 | <reponame>stanwood/traidoo-api<gh_stars>1-10
import pytest
from model_bakery import baker
from products.models import Product
pytestmark = pytest.mark.django_db
def test_fallback_to_image_url(client_admin, traidoo_region):
product = baker.make("products.product", image_url="foo.png", region=traidoo_region)
response = client_admin.get("/products/{}".format(product.id))
assert response.data["image"] == "foo.png"
def test_get_product(
buyer, client_buyer, traidoo_region, delivery_options, traidoo_settings
):
seller = baker.make(
"users.user",
business_license=None,
city="Test City",
company_name="Test Company",
description="Test description of the test company",
first_name="<NAME>",
id=4567,
id_photo=None,
image=None,
last_name="<NAME>",
)
category = baker.make_recipe(
"categories.category",
default_vat=None,
id=123,
name="Test category",
ordering=None,
parent=None,
)
container = baker.make(
"containers.container",
delivery_fee=None,
deposit=None,
id=345,
image=None,
image_url=None,
size_class="Test size class",
standard=None,
volume=456.78,
)
product = baker.make(
"products.product",
amount=123.45,
description="Test description of the test product",
id=98765,
image=None,
is_gluten_free=False,
is_gmo_free=False,
is_grazing_animal=False,
is_organic=False,
is_vegan=False,
name="Test Product",
price=99.98,
unit=None,
vat=19,
seller=seller,
category=category,
container_type=container,
region=traidoo_region,
delivery_options=delivery_options,
sellers_product_identifier="test123",
ean13="12345678",
ean8="12345678",
)
product.tags.add("tag1")
response = client_buyer.get(f"/products/{product.id}")
region_settings = traidoo_region.settings.first()
assert region_settings.central_logistics_company
assert response.json() == {
"amount": 123.45,
"category": {
"defaultVat": None,
"icon": {
"id": category.icon.id,
"iconUrl": category.icon.icon_url,
"name": category.icon.name,
},
"id": 123,
"name": "Test category",
"ordering": None,
"parent": None,
},
"containerType": {
"deliveryFee": None,
"deposit": None,
"id": 345,
"image": None,
"imageUrl": None,
"sizeClass": "Test size class",
"standard": None,
"volume": 456.78,
},
"delivery": {"logistics": 1851.38, "pickup": 0.0, "seller": 0.0},
"deliveryCharge": 0.0,
"deliveryOptions": [
{"id": 0, "name": "traidoo"},
{"id": 1, "name": "seller"},
{"id": 2, "name": "buyer"},
],
"description": "Test description of the test product",
"id": 98765,
"image": None,
"isGlutenFree": False,
"isGmoFree": False,
"isGrazingAnimal": False,
"isOrganic": False,
"isVegan": False,
"itemsAvailable": None,
"name": "Test Product",
"price": 99.98,
"region": {
"id": traidoo_region.id,
"name": traidoo_region.name,
"slug": traidoo_region.slug,
},
"regions": [],
"seller": {
"businessLicense": None,
"city": "Test City",
"companyName": "Test Company",
"description": "Test description of the test company",
"firstName": "<NAME>",
"id": 4567,
"idPhoto": None,
"image": None,
"lastName": "<NAME>",
},
"unit": None,
"vat": 19.0,
"tags": [
{
"id": product.tags.first().id,
"name": "tag1",
"slug": "tag1",
}
],
"sellersProductIdentifier": "test123",
"thirdPartyDelivery": False,
"ean13": "12345678",
"ean8": "12345678",
}
def test_get_product_no_central_logistic_company(
buyer, client_buyer, traidoo_region, delivery_options, traidoo_settings
):
traidoo_settings.central_logistics_company = False
traidoo_settings.save()
seller = baker.make(
"users.user",
business_license=None,
city="Test City",
company_name="Test Company",
description="Test description of the test company",
first_name="<NAME>",
id=4567,
id_photo=None,
image=None,
last_name="<NAME>",
)
category = baker.make_recipe(
"categories.category",
default_vat=None,
id=123,
name="Test category",
ordering=None,
parent=None,
)
container = baker.make(
"containers.container",
delivery_fee=None,
deposit=None,
id=345,
image=None,
image_url=None,
size_class="Test size class",
standard=None,
volume=456.78,
)
product = baker.make(
"products.product",
amount=123.45,
description="Test description of the test product",
id=98765,
image=None,
is_gluten_free=False,
is_gmo_free=False,
is_grazing_animal=False,
is_organic=False,
is_vegan=False,
name="Test Product",
price=99.98,
unit=None,
vat=19,
seller=seller,
category=category,
container_type=container,
region=traidoo_region,
delivery_options=delivery_options,
)
response = client_buyer.get(f"/products/{product.id}")
assert response.status_code == 200
region_settings = traidoo_region.settings.first()
assert not region_settings.central_logistics_company
product_delivery_options = product.delivery_options.order_by("id").values(
"id", "name"
)
assert list(product_delivery_options) == [
{"id": 0, "name": "traidoo"},
{"id": 1, "name": "seller"},
{"id": 2, "name": "buyer"},
]
parsed_response = response.json()
assert parsed_response["deliveryOptions"] == [
{"id": 1, "name": "seller"},
{"id": 2, "name": "buyer"},
]
def test_get_products(client_buyer, traidoo_region):
region_1 = baker.make("common.region", name="Test 1")
region_2 = baker.make("common.region", name="Test 2")
region_3 = baker.make("common.region", name="Test 3")
# Products from regions other than current
for _ in range(20):
baker.make("products.product", region=region_3, regions=[traidoo_region])
baker.make("products.product", region=region_2, regions=[traidoo_region])
baker.make("products.product", region=region_1, regions=[traidoo_region])
# Products form region "Test 3" but not available in traidoo region
baker.make("products.product", region=region_1, regions=[])
# Products from MsSwiss region
for _ in range(100):
baker.make("products.product", region=traidoo_region)
assert Product.objects.count() == 180
first_page = client_buyer.get(
"/products",
data={"offset": 0, "limit": 100},
**{"HTTP_REGION": traidoo_region.slug},
)
assert first_page.status_code == 200
parsed_response = first_page.json()
# Total products
assert parsed_response["count"] == 160
# First page (100 products) should be form traidoo region
assert all(
[
product["region"]["id"] == traidoo_region.id
for product in parsed_response["results"]
]
)
second_page = client_buyer.get(
"/products",
data={"offset": 100, "limit": 100},
**{"HTTP_REGION": traidoo_region.slug},
)
assert second_page.status_code == 200
parsed_response = second_page.json()
# First page (100 products) should be form traidoo region
assert all(
[
product["region"]["id"] != traidoo_region.id
for product in parsed_response["results"]
]
)
| StarcoderdataPython |
104449 | <reponame>kuraakhilesh8230/aries-cloudagent-python
from asynctest import TestCase as AsyncTestCase
from ..indy import V20CredExRecordIndy
class TestV20CredExRecordIndy(AsyncTestCase):
async def test_record(self):
same = [
V20CredExRecordIndy(
cred_ex_indy_id="dummy-0",
cred_ex_id="abc",
cred_request_metadata={"a": 1, "b": 2},
rev_reg_id=None,
cred_rev_id=None,
)
] * 2
diff = [
V20CredExRecordIndy(
cred_ex_indy_id="dummy-1",
cred_ex_id="def",
cred_request_metadata={"a": 1, "b": 2},
rev_reg_id=None,
cred_rev_id=None,
),
V20CredExRecordIndy(
cred_ex_indy_id="dummy-1",
cred_ex_id="ghi",
cred_request_metadata={"a": 1, "b": 2},
rev_reg_id=None,
cred_rev_id=None,
),
V20CredExRecordIndy(
cred_ex_indy_id="dummy-1",
cred_ex_id="def",
cred_request_metadata={"a": 1, "b": 2},
rev_reg_id="rev-reg-id",
cred_rev_id="cred-rev-id",
),
]
for i in range(len(same) - 1):
for j in range(i, len(same)):
assert same[i] == same[j]
for i in range(len(diff) - 1):
for j in range(i, len(diff)):
assert diff[i] == diff[j] if i == j else diff[i] != diff[j]
assert same[0].cred_ex_indy_id == "dummy-0"
| StarcoderdataPython |
5194779 | """
The base classes for the styling.
"""
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from six import with_metaclass
__all__ = (
'Attrs',
'DEFAULT_ATTRS',
'ANSI_COLOR_NAMES',
'Style',
'DynamicStyle',
)
#: Style attributes.
Attrs = namedtuple('Attrs', 'color bgcolor bold underline italic blink reverse')
"""
:param color: Hexadecimal string. E.g. '000000' or Ansi color name: e.g. 'ansiblue'
:param bgcolor: Hexadecimal string. E.g. 'ffffff' or Ansi color name: e.g. 'ansired'
:param bold: Boolean
:param underline: Boolean
:param italic: Boolean
:param blink: Boolean
:param reverse: Boolean
"""
#: The default `Attrs`.
DEFAULT_ATTRS = Attrs(color=None, bgcolor=None, bold=False, underline=False,
italic=False, blink=False, reverse=False)
#: ``Attrs.bgcolor/fgcolor`` can be in either 'ffffff' format, or can be any of
#: the following in case we want to take colors from the 8/16 color palette.
#: Usually, in that case, the terminal application allows to configure the RGB
#: values for these names.
ANSI_COLOR_NAMES = [
'ansiblack', 'ansiwhite', 'ansidefault',
# Low intensity.
'ansired', 'ansigreen', 'ansiyellow', 'ansiblue', 'ansifuchsia', 'ansiturquoise', 'ansilightgray',
# High intensity. (Not supported everywhere.)
'ansidarkgray', 'ansidarkred', 'ansidarkgreen', 'ansibrown', 'ansidarkblue',
'ansipurple', 'ansiteal',
]
class Style(with_metaclass(ABCMeta, object)):
"""
Abstract base class for prompt_toolkit styles.
"""
@abstractmethod
def get_attrs_for_token(self, token):
"""
Return :class:`.Attrs` for the given token.
"""
@abstractmethod
def invalidation_hash(self):
"""
Invalidation hash for the style. When this changes over time, the
renderer knows that something in the style changed, and that everything
has to be redrawn.
"""
class DynamicStyle(Style):
"""
Style class that can dynamically returns an other Style.
:param get_style: Callable that returns a :class:`.Style` instance.
"""
def __init__(self, get_style):
self.get_style = get_style
def get_attrs_for_token(self, token):
style = self.get_style()
assert isinstance(style, Style)
return style.get_attrs_for_token(token)
def invalidation_hash(self):
return self.get_style().invalidation_hash()
| StarcoderdataPython |
3555804 | <reponame>nitramsivart/provenance<gh_stars>10-100
"""initial schema
Revision ID: e0317ab07ba4
Revises:
Create Date: 2017-03-13 13:33:59.644604
"""
import sqlalchemy as sa
import sqlalchemy.dialects.postgresql as pg
from alembic import op
# revision identifiers, used by Alembic.
revision = 'e0317ab07ba4'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'artifact_set_members',
sa.Column('set_id', sa.VARCHAR(length=40), nullable=False),
sa.Column('artifact_id', sa.VARCHAR(length=40), nullable=False),
sa.PrimaryKeyConstraint('set_id', 'artifact_id'),
)
op.create_table(
'artifact_sets',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('set_id', sa.VARCHAR(length=40), nullable=True),
sa.Column('name', sa.VARCHAR(length=1000), nullable=True),
sa.Column('created_at', pg.TIMESTAMP(), nullable=True),
sa.PrimaryKeyConstraint('id'),
)
op.create_table(
'runs',
sa.Column('id', sa.VARCHAR(length=40), nullable=False),
sa.Column('hostname', sa.VARCHAR(length=256), nullable=True),
sa.Column('info', pg.JSONB(), nullable=True),
sa.Column('created_at', pg.TIMESTAMP(), nullable=True),
sa.PrimaryKeyConstraint('id'),
)
op.create_table(
'artifacts',
sa.Column('id', sa.VARCHAR(length=40), nullable=False),
sa.Column('value_id', sa.VARCHAR(length=50), nullable=True),
sa.Column('run_id', sa.VARCHAR(length=40), nullable=True),
sa.Column('name', sa.VARCHAR(length=1000), nullable=True),
sa.Column('version', sa.INTEGER(), nullable=True),
sa.Column('fn_module', sa.VARCHAR(length=100), nullable=True),
sa.Column('fn_name', sa.VARCHAR(length=100), nullable=True),
sa.Column('composite', sa.BOOLEAN(), nullable=True),
sa.Column('value_id_duration', sa.FLOAT(), nullable=True),
sa.Column('compute_duration', sa.FLOAT(), nullable=True),
sa.Column('hash_duration', sa.FLOAT(), nullable=True),
sa.Column('computed_at', pg.TIMESTAMP(), nullable=True),
sa.Column('added_at', pg.TIMESTAMP(), nullable=True),
sa.Column('input_artifact_ids', pg.ARRAY(pg.VARCHAR(length=40)), nullable=True),
sa.Column('inputs_json', pg.JSONB(), nullable=True),
sa.Column('serializer', sa.VARCHAR(length=128), nullable=True),
sa.Column('load_kwargs', pg.JSONB(), nullable=True),
sa.Column('dump_kwargs', pg.JSONB(), nullable=True),
sa.Column('custom_fields', pg.JSONB(), nullable=True),
sa.ForeignKeyConstraint(
['run_id'],
['runs.id'],
),
sa.PrimaryKeyConstraint('id'),
)
def downgrade():
op.drop_table('artifacts')
op.drop_table('runs')
op.drop_table('artifact_sets')
op.drop_table('artifact_set_members')
| StarcoderdataPython |
4959882 | # This framework folder is the Discord Framework my bot use.
# You can use the code in this directory for your bot.
# I am not really planning on uploading it to PyPI though...
from inspect import getmembers, getsource, signature
from discord.ext import commands as _commands
from requests import post as _post_message
from functools import wraps as _wraps
from gc import collect as _collect
from traceback import format_exc
from datetime import datetime
from aiohttp import FormData
from io import BytesIO
from json import dumps
from os import getenv
import discord as dpy
import signal as sg
import asyncio
from .oreo import Oreo
from .parser import Parser
from .panel import CustomPanel
from .colorthief import Smart_ColorThief
from .message import embed, Paginator, ChooseEmbed, WaitForMessage
from .util import Util, error_message, SpotifyAPI
from .games import GuessTheFlag, Slot, TicTacToe, RockPaperScissors, GeographyQuiz, MathQuiz, GuessAvatar, Trivia, GuessMyNumber, Hangman
from .canvas import ServerCard, UserCard, ProfileCard, GDLevel, Blur, ImageClient
from .database import Database
from .lego import legofy
VALID_FILENAMES = ('png', 'jpg', 'jpeg', 'gif', 'svg')
UNUSED_MESSAGE_HANDLERS = (
"pinned",
"flags",
"application",
"activity",
"mention_everyone",
"tts",
"type",
"attachments",
"embeds",
"nonce",
"mention_roles",
"call"
)
def get_prefix(config_file: str = "config.ini"):
from configparser import ConfigParser # i don't recommend calling this function more than once
_config = ConfigParser()
_config.read(config_file)
return _config["bot"].get("prefix", "1")
def modify_discord_py_functions():
"""
Modify the discord.py functions and variables that are unused by the bot.
This also changes how the bot caches stuff.
Things changed such as:
- How the bot starts.
- How the bot shuts down.
- How the bot handles messages.
- Added several custom functions here and there.
- Several functions now uses the lower-level functions of discord.py.
- The bot now only caches messages made by the bot itself.
- The bot now caches guild emojis as string.
- The bot doesn't cache other bots.
- The discord.py's Message object doesn't retrieve the stickers, embeds, nonce, role_mentions, etc.
- The discord.py's Attachment object is not used. <message>.attachments will return an array of attachment URLs instead.
"""
### BEGIN MADNESS ###
for name, value in getmembers(dpy.User):
if name.startswith("__") or value.__class__.__name__ != "function" or "can only be used by non-bot accounts" not in getsource(value):
continue
delattr(dpy.User, name)
for handler in UNUSED_MESSAGE_HANDLERS:
setattr(dpy.Message, f"_handle_{handler}", (lambda s, *a: None))
### BEGIN MADNESS ###
def _hooked_wrapped_callback(command, ctx, coro):
@_wraps(coro)
async def wrapped():
try:
extra_args = ctx.message.content[:500].split()[1:]
await coro(command.cog, ctx, *(extra_args if extra_args and len(signature(coro).parameters) > 2 else tuple()))
except asyncio.CancelledError:
ctx.command_failed = True
return
except Exception as exc:
ctx.command_failed = True
raise _commands.CommandInvokeError(exc) from exc
finally:
await command.call_after_hooks(ctx)
return None
return wrapped
def _embed_add_useless_stuff(self, ctx, disable_color: bool = False):
self._footer = {
"text": "Command executed by "+str(ctx.author),
"icon_url": str(ctx.author.avatar_url)
}
self.timestamp = datetime.now()
if not disable_color:
self.colour = ctx.me.colour
return self
async def _send_image(self, url, alexflipnote: bool = False, content: str = None, file_format="png"):
try:
if isinstance(url, BytesIO):
return await self.bot.http.send_files(self.channel.id, content=content, files=[dpy.File(url, f"file.{file_format}")], reference=self.message)
session = self.bot.util.alex_client if alexflipnote else self.bot.http._HTTPClient__session
async with session.get(url) as data:
_bytes = await data.read()
assert data.status < 400, "API returns a bad status code"
assert data.headers['Content-Type'].startswith("image/"), "Content does not have an image."
extension = "." + data.headers['Content-Type'][6:].lower()
buffer = self.bot.util._crop_out_memegen(self, _bytes) if url.startswith("https://api.memegen.link/") else BytesIO(_bytes)
await self.bot.http.send_files(self.channel.id, content=content, files=[dpy.File(buffer, f"file{extension}")], reference=self.message)
del extension, _bytes, data, buffer
_collect()
except Exception as e:
raise self.error_message(f"Image not found.\n{str(e)}")
async def _success_embed(self, message=None, description=None, delete_after=None):
response = await self._state.http.send_message(self.channel.id, content="", embed={
"title": message,
"description": description,
"color": 3066993
})
if delete_after:
await asyncio.sleep(delete_after)
return await self._state.http.delete_message(channel_id=self.channel.id, message_id=response["id"])
async def _send_embed(self, *args, **kwargs):
_builtin_embed = self.bot.Embed(self, *args, **kwargs)
await _builtin_embed.send()
del _builtin_embed
def _parse_message_create(self, data):
if (not hasattr(self, "_is_ready")) or data["type"] or (not data.get("guild_id")) or data.get("webhook_id"):
return
elif data["author"].get('bot') and (not data["author"]["id"] == str(self.user.id)):
return
channel, _ = self._get_guild_channel(data)
if not data["author"].get("bot"): self.dispatch('message', dpy.Message(channel=channel, data=data, state=self))
else: self._messages.append(dpy.Message(channel=channel, data=data, state=self))
del channel, data
async def _run_command(self, message):
if not message.content.startswith(self.command_prefix):
return
ctx = _commands.Context(prefix=self.command_prefix, bot=self, message=message, args=message.content.split(" ")[1:])
if not ctx.channel.permissions_for(ctx.me).send_messages:
return # what's the point of running a command if the bot doesn't have the perms to send messages kekw
try:
command_name = message.content[len(self.command_prefix):].split()[0]
ctx.command = self.all_commands[command_name.lower()]
if not await ctx.command.can_run(ctx):
return
if ctx.command._max_concurrency:
await ctx.command._max_concurrency.acquire(ctx)
try:
ctx.command._prepare_cooldowns(ctx)
except:
if ctx.command._max_concurrency:
await ctx.command._max_concurrency.release(ctx)
raise
await _hooked_wrapped_callback(ctx.command, ctx, ctx.command.callback)()
except (KeyError, IndexError):
return
except Exception as exc:
self.dispatch("command_error", ctx, exc, format_exc())
else:
self.command_uses += 1
del ctx, command_name
def _remove_guild(self, guild):
self._guilds.pop(guild.id, None)
for emoji in map(lambda x: int(x.split(':')[2].split('>')[0]), guild.emojis):
self._emojis.pop(emoji, None)
del guild
_collect()
def _event_on_close(self):
print("Closing bot...")
if hasattr(self, "_is_closed") and self._is_closed:
print("Closing process closed because the bot is already closed.")
return
total_uptime = self.util.strfsecond(datetime.now().timestamp() - self.util._start)
_post_message(f"{dpy.http.Route.BASE}/channels/{self.util.status_channel}/messages", headers={ "Authorization": f"Bot {self.http.token}", "Content-Type": "application/json" }, data=dumps({
"embed": { "title": "Bot is down :(", "description": "The bot is down.", "color": 16711680, "fields": [
{"name": "Commands run throughout run-time",
"value": str(self.command_uses), "inline": True}, { "name": "Total Uptime",
"value": total_uptime, "inline": True}
], "footer": { "text": "Please note that not all down-times are due to hosting problems. This could be a bug-fix or a development update." } }
}))
data = self.db.get("config", {"h": True})
current_time = datetime.now()
if len(data["down_times"]) > 20:
data["down_times"].pop(0)
data["down_times"].append(f'{current_time.strftime("%Y-%m-%dT%H:%M:%SZ")}|{self.util.strfsecond(current_time.timestamp() - self.util._start)}')
self.db.modify("config", self.db.types.CHANGE, {"h": True}, {
"commands_run": data["commands_run"] + self.command_uses,
"down_times": data["down_times"],
"online": False,
"spotify_credentials": {
"token": self.spotify._token,
"expiry_date": self.spotify._token_expiry_date
}
})
self._is_closed = True
del data, current_time, total_uptime
self.loop.stop()
def _store_user(self, data):
user_id = int(data["id"])
try:
return self._users[user_id]
except:
user = dpy.User(state=self, data=data)
if (data["discriminator"] != "0000") and (not data.get("bot")):
self._users[user_id] = user
return user
def _store_emoji(self, guild, data):
guild_data = (guild,) if guild else ()
self._emojis[int(data["id"])] = (data["name"], int(data["id"]), data.get("animated", False), dpy.utils.snowflake_time(int(data["id"])), bool(guild), *guild_data)
return f"<{'a' if data.get('animated') else ''}:{data['name']}:{data['id']}>"
def _run_bot(self, *args, **kwargs):
loop = self.loop
try:
loop.add_signal_handler(sg.SIGINT, self.event_on_close)
loop.add_signal_handler(sg.SIGTERM, self.event_on_close)
except:
pass
async def runner():
try:
await self.start(*args, **kwargs)
finally:
if not self.is_closed():
await self.close()
self.event_on_close()
def stop_loop_on_completion(f):
loop.stop()
future = asyncio.ensure_future(runner(), loop=loop)
future.add_done_callback(stop_loop_on_completion)
try:
loop.run_forever()
finally:
future.remove_done_callback(stop_loop_on_completion)
self.event_on_close()
try:
try:
task_retriever = asyncio.Task.all_tasks
except:
task_retriever = asyncio.all_tasks
tasks = {t for t in task_retriever(loop=loop) if not t.done()}
if not tasks:
return
for task in tasks:
task.cancel()
loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
for task in tasks:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler({
'message': 'Unhandled exception during Client.run shutdown.',
'exception': task.exception(),
'task': task
})
loop.run_until_complete(loop.shutdown_asyncgens())
loop.run_until_complete(self.http._HTTPClient__session.close())
finally:
loop.close()
if not future.cancelled():
try:
return future.result()
except KeyboardInterrupt:
return
def _parse_guild_emojis_update(self, data):
guild = self._get_guild(int(data['guild_id']))
if not guild:
return
before_emojis = guild.emojis
for emoji in map(lambda x: int(x.split(':')[2].split('>')[0]), before_emojis):
self._emojis.pop(emoji, None)
guild.emojis = tuple(map(lambda d: self.store_emoji(guild, d), data['emojis'])) # don't dispatch since we're not using it
def _message_init(self, *, state, channel, data):
self._state = state
self.id = int(data['id'])
self.attachments = [a['url'] for a in data['attachments'] if a.get('url') and (a['filename'].split('.')[-1].lower() in VALID_FILENAMES)] if data['attachments'] else []
self.channel = channel
self.type = dpy.MessageType.default
self._edited_timestamp = dpy.utils.parse_time(data['edited_timestamp'])
self.content = data['content']
self.reactions = []
for h in ('author', 'member', 'mentions'):
try: getattr(self, f'_handle_{h}')(data[h])
except: continue
async def _message_edit(self, **fields):
kwargs = { "content": fields.get("content", "") }
if (embed := fields.get("embed")):
kwargs['embed'] = embed.to_dict()
return await self._state.http.edit_message(self.channel.id, self.id, **kwargs)
async def _raw_send_message(self, channel_id, **kwargs):
r = dpy.http.Route('POST', '/channels/{channel_id}/messages', channel_id=channel_id)
kwargs['allowed_mentions'] = {'replied_user': False, 'parse': []}
if (reference := kwargs.pop('reference', None)):
kwargs['message_reference'] = { 'message_id': reference.id, 'guild_id': reference.guild.id }
return await self.request(r, json=kwargs)
async def _raw_send_files(self, channel_id, files, reference=None, **kwargs):
r = dpy.http.Route('POST', '/channels/{channel_id}/messages', channel_id=channel_id)
json = { 'content': kwargs.pop('content', ''), 'allowed_mentions': {'replied_user': False, 'parse': []} }
if reference:
json['message_reference'] = { 'message_id': reference.id, 'guild_id': reference.guild.id }
form = FormData()
form.add_field('payload_json', dpy.utils.to_json(json))
form.add_field('file', files[0].fp, filename=files[0].filename, content_type='application/octet-stream')
return await self.request(r, data=form, files=files)
async def _send_message(self, content = "", embed = None, file = None, delete_after = None, reference = None):
channel = await self._get_channel()
if embed and (not isinstance(embed, dict)):
embed = embed.to_dict()
if file:
try: data = await self._state.http.send_files(channel.id, files=[file], content=content, embed=embed, reference=reference)
finally: file.close()
else:
data = await self._state.http.send_message(channel.id, content=content, embed=embed, reference=reference)
msg = dpy.Message(state=self._state, channel=channel, data=data)
if delete_after:
await msg.delete(delay=delete_after)
return msg
### END MADNESS ###
delattr(dpy.Client, "fetch_user_profile") # this one is made for selfbots only
delattr(dpy.Embed, "__len__")
setattr(dpy.Message, "__init__", _message_init)
setattr(dpy.Message, "edit", _message_edit)
setattr(dpy.Message, "__repr__", lambda s: f"<Message id={s.id} channel={s.channel!r} author={s.author!r}>")
setattr(dpy.abc.Messageable, "send", _send_message)
setattr(dpy.Client, "run", _run_bot)
setattr(dpy.Client, "event_on_close", _event_on_close)
setattr(_commands.bot.BotBase, "run_command", _run_command)
setattr(_commands.Context, "error_message", error_message)
setattr(_commands.Context, "embed", _send_embed)
setattr(_commands.Context, "send_image", _send_image)
setattr(_commands.Context, "success_embed", _success_embed)
setattr(dpy.state.ConnectionState, "parse_message_create", _parse_message_create)
setattr(dpy.state.ConnectionState, "store_emoji", _store_emoji)
setattr(dpy.state.ConnectionState, "store_user", _store_user)
setattr(dpy.state.ConnectionState, "_remove_guild", _remove_guild)
setattr(dpy.state.ConnectionState, "parse_guild_emojis_update", _parse_guild_emojis_update)
setattr(dpy.http.HTTPClient, "send_message", _raw_send_message)
setattr(dpy.http.HTTPClient, "send_files", _raw_send_files)
setattr(dpy.Embed, "add_useless_stuff", _embed_add_useless_stuff)
del _send_message, _raw_send_message, _raw_send_files, _message_edit, _message_init, _run_bot, _event_on_close, _run_command, _send_embed, _send_image, _success_embed, _parse_message_create, _store_emoji, _store_user, _remove_guild, _parse_guild_emojis_update, _embed_add_useless_stuff
_collect()
def initiate(client, db_name: str = "username601"): # no stop calling me yanderedev 2.0 (because i am)
client.slot = Slot
client.oreo = Oreo
client.Blur = Blur
client.lego = legofy
client.Embed = embed
client.Parser = Parser
client.Trivia = Trivia
client.GDLevel = GDLevel
client.Hangman = Hangman
client.Panel = CustomPanel
client.MathQuiz = MathQuiz
client.UserCard = UserCard
client.TicTacToe = TicTacToe
client.ServerCard = ServerCard
client.rps = RockPaperScissors
client.GeoQuiz = GeographyQuiz
client.ProfileCard = ProfileCard
client.GuessAvatar = GuessAvatar
client.ChooseEmbed = ChooseEmbed
client.EmbedPaginator = Paginator
client.Image = ImageClient(client)
client.GuessTheFlag = GuessTheFlag
client.GuessMyNumber = GuessMyNumber
client.ColorThief = Smart_ColorThief
client.WaitForMessage = WaitForMessage
client.db = Database(getenv("DB_LINK"), db_name)
credentials = client.db.get('config', {'h': True}).get('spotify_credentials')
client.spotify = SpotifyAPI(client, getenv("SPOTIFY_CREDENTIALS"), token=credentials['token'] if credentials else None, token_expiry_date=credentials['expiry_date'] if credentials else None)
Util(client) | StarcoderdataPython |
8199047 | <gh_stars>10-100
import tensorflow as tf
import os
import sys
import data_generation
import networks
import scipy.io as sio
import param
import util
import cv2
import truncated_vgg
from keras.backend.tensorflow_backend import set_session
from keras.optimizers import Adam
from PIL import Image
import numpy as np
from html4vision import Col, imagetable
from skimage.measure import compare_ssim, compare_psnr, compare_mse
import pickle
def test(model_name, save_dir, gpu_id, vid_i, iter_num=9999, dbg=False):
params = param.get_general_params()
img_width = params['IMG_WIDTH']
img_height = params['IMG_HEIGHT']
test_feed, dir_len = data_generation.create_test_feed(params, 5, vid_i=vid_i, txtfile=f'../testset_5_v3/test_{vid_i}_img.txt', k_txtfile=f'../testset_5_v3/train_{vid_i}_img.txt')
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
vgg_model = truncated_vgg.vgg_norm()
networks.make_trainable(vgg_model, False)
response_weights = sio.loadmat('../data/vgg_activation_distribution_train.mat')
model = networks.network_posewarp(params)
weight_path = str(os.path.join(params['model_save_dir'], os.path.join(f"{model_name}", f'{iter_num}.h5'))) # model name doesn't super work
model.load_weights(weight_path)
model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)])
model.summary()
n_iters = params['n_training_iter']
gen = np.zeros((dir_len, 3, 256, 256))
scores = np.zeros((dir_len, 3))
for j in range(1 if dbg else dir_len):
try:
x, y, scale, pos, img_num, src = next(test_feed)
arr_loss = model.predict_on_batch(x)
except cv2.error as e:
print("OpenCV Error, gonna ignore")
continue
i = 0
generated = (arr_loss[i] + 1) * 128
gen_resized = data_generation.reverse_center_and_scale_image(generated, img_width, img_height, pos, scale)
target = (y[i] + 1) * 128
target_resized = data_generation.reverse_center_and_scale_image(target, img_width, img_height, pos, scale)
source = (x[0][i] + 1) * 128
# resized_source = cv2.resize(source, (0, 0), fx=2, fy=2)
# source_resized = data_generation.reverse_center_and_scale_image(source, img_width, img_height, pos, scale)
modified_img = data_generation.add_source_to_image(gen_resized, src)
cv2.imwrite(save_dir + f'/{img_num:08d}.png', modified_img)
gen[j] = np.transpose(generated, (2, 0, 1))
scores[j][0] = compare_ssim(generated, target, multichannel=True, data_range=256)
scores[j][1] = compare_psnr(generated, target, data_range=256)
scores[j][2] = compare_mse(generated, target)
mean_scores = scores.mean(axis=0)
std_scores = scores.std(axis=0)
print(mean_scores)
print(std_scores)
save_dict = os.path.join(save_dir, f"saved_scores_{vid_i}.pkl")
pickle.dump( scores, open( save_dict, "wb" ) )
def test_all(model_name, exp_name, gpu_id, iter_num=9999, dbg=False):
"""
model_name: pass in names of folder that holds weights
exp_name: folder name of where the visualizations will go
gpu_id: 0,1,2,3
iter_num: number of iterations finetuned on - specified in weight names usually
dbg: do one run of each to see if all the videos work correctly
"""
for i in range(2, 9):
print(i)
# make new folder inside viz/i/generated
new_path = f'/data/jl5/data-meta/experiments/{exp_name}/viz/{i}/generated/'
try:
os.makedirs(new_path, exist_ok=False)
except FileExistsError:
print("Files already exist")
response = input("Would you like to continue anyways?\n")
if response.lower() != "yes":
exit()
test(model_name, new_path, gpu_id, i, iter_num, dbg)
def test_all_scaled(gpu_id, iter_num=6999, dbg=False):
"""
model_name: pass in names of folder that holds weights
exp_name: folder name of where the visualizations will go
gpu_id: 0,1,2,3
iter_num: number of iterations finetuned on - specified in weight names usually
dbg: do one run of each to see if all the videos work correctly
"""
model_names = [
'177_FT_AUTH_vid1',
'178_FT_AUTH_2',
'179_FT_AUTH_vid3',
'180_FT_AUTH_vid4',
'181_FT_AUTH_vid5',
'182_FT_AUTH_vid6',
'183_FT_AUTH_vid7',
'184_FT_AUTH_vid8'
]
for i in range(1, 9):
print(i)
# make new folder inside viz/i/generated
new_path = f'/data/jl5/data-meta/experiments/185_FT_PT_AUTH_inf/viz/{i}/generated/'
test(model_names[i-1], new_path, gpu_id, i, iter_num, dbg)
if __name__ == "__main__":
import pdb
if len(sys.argv) == 3:
test_all_scaled(sys.argv[1], int(sys.argv[2]))
if len(sys.argv) == 5:
test_all(sys.argv[1], sys.argv[2], sys.argv[3], int(sys.argv[4]))
elif len(sys.argv) == 6:
test_all(sys.argv[1], sys.argv[2], sys.argv[3], int(sys.argv[4]), sys.argv[5])
else:
print("Wrong num of arguments")
| StarcoderdataPython |
1993615 | # -*- coding: utf-8 -*-
# author: itimor
from __future__ import print_function, unicode_literals
import json
from rest_framework.response import Response
from collections import OrderedDict
from rest_framework import viewsets
from django.utils import timezone
from rest_framework.decorators import action
from common import status
from common.dispath import JsonResponse
from common.exceptions import *
from tools.models import RequestEvent
class ModelViewSet(viewsets.ModelViewSet):
def __init__(self, *args, **kwargs):
super(ModelViewSet, self).__init__(*args, **kwargs)
self.resultData = False
def watch_audit_log(self, request):
ip = request.META.get("HTTP_X_FORWARDED_FOR", "")
if not ip:
ip = request.META.get('REMOTE_ADDR', "")
method = request._request.method
RequestEvent.objects.create(
url=request.path,
method=method,
query_string=json.dumps({
'query_params': request.query_params,
'json': request.data
}),
user=self.request.user,
remote_ip=ip,
create_time=timezone.now()
)
def create(self, request, *args, **kwargs):
self.watch_audit_log(request)
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
try:
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return JsonResponse(OrderedDict([
('results', serializer.data)
], code=status.HTTP_200_OK), headers=headers)
except Exception as e:
print(e)
return JsonResponse(OrderedDict([
('results', {"msg": ExceptionX.PasreRaise(e)})
], code=status.HTTP_500_INTERNAL_SERVER_ERROR))
def perform_create(self, serializer):
serializer.save()
def list(self, request, *args, **kwargs):
# 不记录list get请求
# self.watch_audit_log(request)
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return JsonResponse(OrderedDict([
('results', serializer.data)
], code=status.HTTP_200_OK))
def retrieve(self, request, *args, **kwargs):
self.watch_audit_log(request)
instance = self.get_object()
serializer = self.get_serializer(instance)
return JsonResponse(OrderedDict([
('results', serializer.data)
], code=status.HTTP_200_OK))
def update(self, request, *args, **kwargs):
self.watch_audit_log(request)
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
instance._prefetched_objects_cache = {}
return JsonResponse(OrderedDict([
('results', serializer.data)
], code=status.HTTP_200_OK))
def perform_update(self, serializer):
serializer.is_valid(raise_exception=True)
serializer.save()
def partial_update(self, request, *args, **kwargs):
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
self.watch_audit_log(request)
instance = self.get_object()
self.perform_destroy(instance)
return JsonResponse(OrderedDict(code=status.HTTP_200_OK))
def perform_destroy(self, instance):
instance.delete()
class FKModelViewSet(ModelViewSet):
def transer(self, instance=None, id=None):
self.resultData = True
if self.action == "create":
self.kwargs = {'pk': id}
instance = self.get_object()
serializer = self.get_serializer(instance)
return serializer
def perform_create(self, serializer):
super(FKModelViewSet, self).perform_create(serializer)
self.readSerializer = self.transer(id=serializer.data['id'])
def perform_update(self, serializer):
super(FKModelViewSet, self).perform_update(serializer)
self.readSerializer = self.transer(self.readInstance)
def perform_destroy(self, instance):
super(FKModelViewSet, self).perform_destroy(instance)
def create(self, request, *args, **kwargs):
self.watch_audit_log(request)
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
serializer = self.readSerializer
return JsonResponse(OrderedDict([
('results', serializer.data)
], code=status.HTTP_200_OK), headers=headers)
def update(self, request, *args, **kwargs):
self.watch_audit_log(request)
partial = kwargs.pop('partial', False)
self.readInstance = instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
serializer = self.readSerializer
if getattr(instance, '_prefetched_objects_cache', None):
instance._prefetched_objects_cache = {}
return JsonResponse(OrderedDict([
('results', serializer.data)
], code=status.HTTP_200_OK))
def destroy(self, request, *args, **kwargs):
self.watch_audit_log(request)
instance = self.get_object()
self.perform_destroy(instance)
return JsonResponse(OrderedDict(code=status.HTTP_200_OK))
# 批量操作modelview bulk_create|bulk_delete|bulk_update
class BulkModelMixin(ModelViewSet):
# 批量添加
@action(methods=['post'], url_path='bulk_create', detail=False)
def bulk_create(self, request, *args, **kwargs):
"""
/api/tool/simple/bulk_create/
:return:
"""
self.watch_audit_log(request)
objs = request.data
if not objs:
return Response(status=status.HTTP_400_BAD_REQUEST)
bulk_models = []
for obj in objs:
req = {'id': '0102', 'msg': 'success'}
print(obj)
try:
serializer = self.get_serializer(data=obj)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
req['id'] = serializer.data['id']
except Exception as e:
req['msg'] = ExceptionX.ToString(e)
bulk_models.append(req)
return JsonResponse(OrderedDict([
('results', bulk_models)
], code=status.HTTP_200_OK))
@action(methods=['delete'], url_path='bulk_delete', detail=False)
def bulk_delete(self, request, *args, **kwargs):
"""
/api/tool/simple/bulk_delete/
:return:
"""
self.watch_audit_log(request)
ids = request.data
if not ids:
return Response(status=status.HTTP_404_NOT_FOUND)
bulk_models = []
for id in ids:
req = {'id': id, 'msg': 'success'}
try:
queryset = self.filter_queryset(self.get_queryset())
instance = queryset.get(pk=id)
self.perform_destroy(instance)
except Exception as e:
req['msg'] = ExceptionX.ToString(e)
bulk_models.append(req)
return JsonResponse(OrderedDict([
('results', bulk_models)
], code=status.HTTP_200_OK))
@action(methods=['put', 'patch'], url_path='bulk_update', detail=False)
def bulk_update(self, request, *args, **kwargs):
"""
/api/tool/simple/bulk_update/
:return:
"""
self.watch_audit_log(request)
ids = request.data['ids']
obj = request.data['obj']
if not ids or not obj:
return Response(status=status.HTTP_400_BAD_REQUEST)
bulk_models = []
for id in ids:
req = {'id': id, 'msg': 'success'}
try:
queryset = self.filter_queryset(self.get_queryset())
instance = queryset.get(pk=id)
serializer = self.get_serializer(instance, data=obj, partial=True)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
except Exception as e:
req['msg'] = ExceptionX.ToString(e)
bulk_models.append(req)
return JsonResponse(OrderedDict([
('results', bulk_models)
], code=status.HTTP_200_OK))
| StarcoderdataPython |
4800551 | <reponame>luosolo/SuPyPlex<gh_stars>0
from supyplex.level import LevelLoader
from supyplex.commons import *
ZONK = 1
INFOTRON = 4
MURPHY = 3
EMPTY = 0
slicks = [ZONK, INFOTRON, 5, 26, 27, 38, 39]
class GameLogic(object):
"""
This is the controller of the game.
here is implemented a sort of physics of the game
"""
def __init__(self, level, base_dir):
# The map is a matrix 24x60
self.levelInfo = LevelLoader(base_dir).load_level(level)
self.map = self.levelInfo.map
def can_move(self, p: Point):
"""
this function tells if the player murphy can move to the position in the map
:param p: The point where the player want to move
:return:
"""
val = self.map[p.y][p.x]
return val in {0, 2, 4}
def move_complete(self, from_position: Point, to_position: Point):
"""
this function is called every time the player completes its movement
:param from_position: start position
:param to_position: end position
:return:
"""
# TODO i need to check if there is an elegant way to implement such behaviour
self.map[from_position.y][from_position.x] = EMPTY
self.map[to_position.y][to_position.x] = MURPHY
def calculate_g_item_movement(self, y, x, tp):
if self.map[y][x] == tp:
if self.map[y + 1][x] == EMPTY:
self.map[y][x] = EMPTY
self.map[y + 1][x] = tp
elif self.map[y + 1][x] in slicks:
if self.map[y][x + 1] == EMPTY and self.map[y + 1][x + 1] == EMPTY:
self.map[y][x] = EMPTY
self.map[y][x + 1] = tp
elif self.map[y][x - 1] == EMPTY and self.map[y + 1][x - 1] == EMPTY:
self.map[y][x] = EMPTY
self.map[y][x - 1] = tp
def calculate_next_move(self):
for y in range(24):
for x in range(60):
self.calculate_g_item_movement(y, x, ZONK)
self.calculate_g_item_movement(y, x, INFOTRON)
pass
| StarcoderdataPython |
3416015 | <reponame>inamori/DeepLearningImplementations<gh_stars>1000+
import os
import sys
from tqdm import tqdm
import tensorflow as tf
import models
sys.path.append("../utils")
import losses
import data_utils as du
import training_utils as tu
import visualization_utils as vu
FLAGS = tf.app.flags.FLAGS
def train_model():
# Setup session
sess = tu.setup_training_session()
##########
# Innputs
##########
# Setup async input queue of real images
X_real = du.read_celebA()
# Noise
batch_size = tf.shape(X_real)[0]
z_noise_for_D = tf.random_uniform((batch_size, FLAGS.z_dim,), minval=-1, maxval=1, name="z_input_D")
z_noise_for_G = tf.random_uniform((batch_size, FLAGS.z_dim,), minval=-1, maxval=1, name="z_input_G")
# k factor
k_factor = tf.Variable(initial_value=0., trainable=False, name='anneal_factor')
# learning rate
lr = tf.Variable(initial_value=FLAGS.learning_rate, trainable=False, name='learning_rate')
########################
# Instantiate models
########################
G = models.Generator(nb_filters=FLAGS.nb_filters_G)
D = models.Discriminator(h_dim=FLAGS.h_dim, nb_filters=FLAGS.nb_filters_D)
##########
# Outputs
##########
X_rec_real = D(X_real, output_name="X_rec_real")
X_fake_for_D = G(z_noise_for_D, output_name="X_fake_for_D")
X_rec_fake_for_D = D(X_fake_for_D, reuse=True, output_name="X_rec_fake_for_D")
X_fake_for_G = G(z_noise_for_G, reuse=True, output_name="X_fake_for_G")
X_rec_fake_for_G = D(X_fake_for_G, reuse=True, output_name="X_rec_fake_for_G")
# output images for plots
real_toplot = du.unnormalize_image(X_real, name="real_toplot")
generated_toplot = du.unnormalize_image(X_fake_for_G, name="generated_toplot")
real_rec_toplot = du.unnormalize_image(X_rec_real, name="rec_toplot")
generated_rec_toplot = du.unnormalize_image(X_rec_fake_for_G, name="generated_rec_toplot")
###########################
# Instantiate optimizers
###########################
opt = tf.train.AdamOptimizer(learning_rate=lr, name='opt')
###########################
# losses
###########################
loss_real = losses.mae(X_real, X_rec_real)
loss_fake_for_D = losses.mae(X_fake_for_D, X_rec_fake_for_D)
loss_fake_for_G = losses.mae(X_fake_for_G, X_rec_fake_for_G)
L_D = loss_real - k_factor * loss_fake_for_D
L_G = loss_fake_for_G
Convergence = loss_real + tf.abs(FLAGS.gamma * loss_real - loss_fake_for_G)
###########################
# Compute updates ops
###########################
dict_G_vars = G.get_trainable_variables()
G_vars = [dict_G_vars[k] for k in dict_G_vars.keys()]
dict_D_vars = D.get_trainable_variables()
D_vars = [dict_D_vars[k] for k in dict_D_vars.keys()]
G_gradvar = opt.compute_gradients(L_G, var_list=G_vars)
G_update = opt.apply_gradients(G_gradvar, name='G_loss_minimize')
D_gradvar = opt.compute_gradients(L_D, var_list=D_vars)
D_update = opt.apply_gradients(D_gradvar, name='D_loss_minimize')
update_k_factor = tf.assign(k_factor, k_factor + FLAGS.lambdak * (FLAGS.gamma * loss_real - loss_fake_for_G))
update_lr = tf.assign(lr, tf.maximum(1E-6, lr / 2))
##########################
# Summary ops
##########################
# Add summary for gradients
tu.add_gradient_summary(G_gradvar)
tu.add_gradient_summary(D_gradvar)
# Add scalar symmaries for G
tf.summary.scalar("G loss", L_G)
# Add scalar symmaries for D
tf.summary.scalar("D loss", L_D)
# Add scalar symmaries for D
tf.summary.scalar("k_factor", k_factor)
tf.summary.scalar("Convergence", Convergence)
tf.summary.scalar("learning rate", lr)
summary_op = tf.summary.merge_all()
############################
# Start training
############################
# Initialize session
saver = tu.initialize_session(sess)
# Start queues
coord, threads = du.manage_queues(sess)
# Summaries
writer = tu.manage_summaries(sess)
# Run checks on data dimensions
list_data = [z_noise_for_D, z_noise_for_G]
list_data += [X_real, X_rec_real, X_fake_for_G, X_rec_fake_for_G, X_fake_for_D, X_rec_fake_for_D]
list_data += [generated_toplot, real_toplot]
output = sess.run(list_data)
tu.check_data(output, list_data)
for e in tqdm(range(FLAGS.nb_epoch), desc="Training progress"):
# Anneal learning rate
if (e + 1) % 200 == 0:
sess.run([update_lr])
t = tqdm(range(FLAGS.nb_batch_per_epoch), desc="Epoch %i" % e, mininterval=0.5)
for batch_counter in t:
output = sess.run([G_update, D_update, update_k_factor])
if batch_counter % (FLAGS.nb_batch_per_epoch // (int(0.5 * FLAGS.nb_batch_per_epoch))) == 0:
output = sess.run([summary_op])
writer.add_summary(output[-1], e * FLAGS.nb_batch_per_epoch + batch_counter)
# Plot some generated images
Xf, Xr, Xrrec, Xfrec = sess.run([generated_toplot, real_toplot, real_rec_toplot, generated_rec_toplot])
vu.save_image(Xf, Xr, title="current_batch", e=e)
vu.save_image(Xrrec, Xfrec, title="reconstruction", e=e)
# Save session
saver.save(sess, os.path.join(FLAGS.model_dir, "model"), global_step=e)
# Show data statistics
output = sess.run(list_data)
tu.check_data(output, list_data)
# Stop threads
coord.request_stop()
coord.join(threads)
print('Finished training!')
| StarcoderdataPython |
1913363 | from flask import send_from_directory, abort, Flask, jsonify, abort, request, render_template
import os,sys,inspect
from sklearn.externals import joblib
import numpy as np
from sklearn.preprocessing import LabelEncoder
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
app = Flask(__name__)
user_input = ""
@app.route('/auto_ml/Classification/predict', methods=['POST'])
def classifier_input():
global user_input
data=request.get_json(force=True)
user_input = data['Data']
n_len = len(user_input)
user_input= np.array(user_input)
user_input = user_input.reshape(1,n_len)
clfmodel_load = joblib.load(sys.path[0]+'\\Model\\Classification\\saved_model.pkl')
clfenc_load=joblib.load(sys.path[0]+'\\Model\\Classification\\classifier_dict.pkl')
y_pred_bin=clfmodel_load.predict(user_input)
y_pred=clfenc_load[y_pred_bin[0]]
#labelencoder_y = LabelEncoder()
#value=labelencoder_y.inverse_transform(y_pred)
return jsonify(str(y_pred))
if __name__ == '__main__':
app.run(debug=True) | StarcoderdataPython |
11232546 | <gh_stars>1-10
import lab as B
from matrix import Dense, Kronecker
# noinspection PyUnresolvedReferences
from ..util import (
ConditionalContext,
AssertDenseWarning,
approx,
dense1_pd,
diag1_pd,
kron_pd,
)
def test_cholesky_solve_diag(diag1_pd):
chol = B.cholesky(diag1_pd)
approx(B.cholesky_solve(chol, B.eye(chol)), B.inv(diag1_pd))
def test_cholesky_solve_lt(dense1_pd):
chol = B.cholesky(dense1_pd)
with AssertDenseWarning("solving <lower-triangular> x = <diagonal>"):
approx(B.cholesky_solve(chol, B.eye(chol)), B.inv(dense1_pd))
def test_cholesky_solve_ut(dense1_pd):
chol = B.cholesky(dense1_pd)
with AssertDenseWarning(
[
"solving <upper-triangular> x = <diagonal>",
"matrix-multiplying <upper-triangular> and <lower-triangular>",
]
):
approx(
B.cholesky_solve(B.transpose(chol), B.eye(chol)),
B.inv(B.matmul(chol, chol, tr_a=True)),
)
def test_cholesky_solve_kron(kron_pd):
chol = B.cholesky(kron_pd)
with ConditionalContext(
isinstance(kron_pd.left, Dense) or isinstance(kron_pd.right, Dense),
AssertDenseWarning("solving <lower-triangular> x = <diagonal>"),
):
approx(
B.cholesky_solve(chol, Kronecker(B.eye(chol.left), B.eye(chol.right))),
B.inv(kron_pd),
)
| StarcoderdataPython |
3341107 | from museolib.backend import Backend, BackendItem
import json
class BackendJSON(Backend):
def load_items(self):
filename = self.options.get('filename', None)
with open(filename, 'r') as fd:
data = json.loads(fd.read())
self.keywords = data['keywords']
assert(filename is not None)
for item in data['items']:
yield BackendItem(item)
| StarcoderdataPython |
8188312 | <filename>src/tests/scripts/CompareMafAndCheckMafCoverage.py
#!python3
import sys
def readMaf(benchMark_file, test_file):
benchMarkalignedPosition = dict()
with open(benchMark_file) as f:
for line in f:
elements = line.split()
if len(elements) == 7:
(s, refchr, refstart, reflength, refstrand, refchrLength, refali) = elements
refchr = refchr.replace("col.", "")
if refchr not in benchMarkalignedPosition:
benchMarkalignedPosition[refchr] = dict()
refstart = int(refstart)
line2 = f.readline()
elements2 = line2.split()
(s, querychr, querystart, querylength, querystrand, queryChrLength, queryali) = elements2
querychr = querychr.replace("query.", "")
if querychr != refchr:
print("this script does not apply for you comparsion, please do not use it")
print(querychr)
print(refchr)
querystart = int(querystart)
refPosition = 0
queryPosition = 0
if querystrand[0] == '+':
for i in range(len(refali)):
if refali[i] != '-':
if queryali[i] != '-' and refali[i] != queryali[i]:
benchMarkalignedPosition[refchr][refstart+refPosition] = querystart + queryPosition
elif queryali[i] == '-':
benchMarkalignedPosition[refchr][refstart+refPosition] = -1 # gap alignment, give it a value of -1
if refali[i] != '-':
refPosition = refPosition + 1
if queryali[i] != '-':
queryPosition = queryPosition + 1
else:
print("this script does not apply for you comparsion, please do not use it")
totalProducedLength = 0
with open(test_file) as f:
for line in f:
elements = line.split()
if len(elements) == 7:
(s, refchr, refstart, reflength, refstrand, refchrLength, refali) = elements
refchr = refchr.replace("col.", "")
if refchr in benchMarkalignedPosition:
refstart = int(refstart)
line2 = f.readline()
elements2 = line2.split()
(s, querychr, querystart, querylength, querystrand, queryChrLength, queryali) = elements2
if len(refali) == len(queryali):
querychr = querychr.replace("query.", "")
if querychr != refchr: ## interchrosome relocations are all wrong
for i in range(len(refali)):
if refali[i] != '-':
totalProducedLength = totalProducedLength + 1
else:
querystart = int(querystart)
refPosition = 0
queryPosition = 0
if querystrand[0] == '+':
for i in range(len(refali)):
if refali[i] != '-':
if queryali[i] != '-' and (refstart+refPosition) in benchMarkalignedPosition[refchr] and benchMarkalignedPosition[refchr][refstart+refPosition] == (querystart + queryPosition):
benchMarkalignedPosition[refchr][refstart+refPosition] = -5 # if it is same with benchmark, give it a value of -5
elif queryali[i] == '-' and (refstart+refPosition) in benchMarkalignedPosition[refchr] and benchMarkalignedPosition[refchr][refstart+refPosition] == -1:
benchMarkalignedPosition[refchr][refstart+refPosition] = -5
if refali[i] != queryali[i]:
totalProducedLength = totalProducedLength + 1
if refali[i] != '-':
refPosition = refPosition + 1
if queryali[i] != '-':
queryPosition = queryPosition + 1
else: # here we code assume that there is no negative alignment in the benchmark alignment. All inversions are wrong If this is not true, should changed it
for i in range(len(refali)):
if refali[i] != '-':
totalProducedLength = totalProducedLength + 1
return benchMarkalignedPosition, totalProducedLength
benchMarkalignedPosition, totalProducedLength = readMaf(sys.argv[1], sys.argv[2])
totalRefLength = 0
totalCorrected = 0
for chr in benchMarkalignedPosition:
for position in benchMarkalignedPosition[chr]:
totalRefLength = totalRefLength + 1
if benchMarkalignedPosition[chr][position] == -5: # if it was same with benchmark, the value was set as -5
totalCorrected = totalCorrected + 1
output = open(sys.argv[2] + ".aliEvaluatioin", 'w')
output.write ("totalRefLength:" + str(totalRefLength) + "\n")
output.write ("totalProducedLength:" + str(totalProducedLength) + "\n")
output.write ("totalCorrected:" + str(totalCorrected) + "\n")
recall = totalCorrected/totalRefLength
output.write ("recall:" + str(recall) + "\n")
precision = totalCorrected/totalProducedLength
output.write ("precision:" + str(precision) + "\n")
fscore = 2 * precision * recall/(precision + recall)
output.write ("fscore:" + str(fscore) + "\n")
output.close()
| StarcoderdataPython |
8042169 | """ Setup file. """
import os
from setuptools import setup, find_packages
HERE = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(HERE, 'README.rst')).read()
CHANGES = open(os.path.join(HERE, 'CHANGES.rst')).read()
REQUIREMENTS = [
'dropbox',
'psycopg2',
'pycrypto',
'pyramid>=1.5',
'pyramid_beaker',
'pyramid_duh>=0.1.2',
'pyramid_jinja2',
'pyramid_tm',
'python-dateutil',
'SQLAlchemy',
'transaction',
'zope.sqlalchemy',
]
TEST_REQUIREMENTS = []
if __name__ == "__main__":
setup(
name='stevetags',
version="develop",
description='Dropbox assistant',
long_description=README + '\n\n' + CHANGES,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Framework :: Pyramid',
'Private :: Do Not Upload',
],
author='<NAME>',
author_email='<EMAIL>',
url='',
platforms='any',
include_package_data=True,
zip_safe=False,
packages=find_packages(),
entry_points={
'console_scripts': [
'st-deploy = stevetags:deploy',
],
'paste.app_factory': [
'main = stevetags:main',
],
'paste.filter_app_factory': [
'security_headers = stevetags.security:SecurityHeaders',
],
},
install_requires=REQUIREMENTS,
tests_require=REQUIREMENTS + TEST_REQUIREMENTS,
)
| StarcoderdataPython |
383819 | <filename>segmentation-of-nuclei/watershed/__init__.py
__all__ = ['seg_with_watershed'];
| StarcoderdataPython |
4982442 | # Copyright Contributors to the Pyro-Cov project.
# SPDX-License-Identifier: Apache-2.0
import heapq
import logging
import shutil
import warnings
from collections import defaultdict, namedtuple
from typing import Dict, FrozenSet, Optional, Set, Tuple
import tqdm
from Bio.Phylo.NewickIO import Parser, Writer
from . import pangolin
from .external.usher import parsimony_pb2
logger = logging.getLogger(__name__)
Mutation = namedtuple("Mutation", ["position", "ref", "mut"])
NUCLEOTIDE = "ACGT"
def load_usher_clades(filename: str) -> Dict[str, Tuple[str, str]]:
"""
Loads usher's output clades.txt and extracts the best lineage and a list of
possible lineages, for each sequence.
"""
clades: Dict[str, Tuple[str, str]] = {}
with open(filename) as f:
for line in f:
name, lineages = line.strip().split("\t")
# Split histograms like B.1.1.161*|B.1.1(2/3),B.1.1.161(1/3) into points
# like B.1.1.161 and lists like B.1.1,B.1.1.161.
if "*|" in lineages:
lineage, lineages = lineages.split("*|")
lineages = ",".join(part.split("(")[0] for part in lineages.split(","))
else:
assert "*" not in lineages
assert "|" not in lineages
lineage = lineages
clades[name] = lineage, lineages
return clades
def load_mutation_tree(filename: str) -> Dict[str, FrozenSet[Mutation]]:
"""
Loads an usher lineageTree.pb annotated with mutations and pango lineages,
and creates a mapping from lineages to their set of mutations.
"""
with open(filename, "rb") as f:
proto = parsimony_pb2.data.FromString(f.read()) # type: ignore
# Extract phylogenetic tree.
tree = next(Parser.from_string(proto.newick).parse())
clades = list(tree.find_clades())
assert len(proto.metadata) == len(clades)
assert len(proto.node_mutations) == len(clades)
# Map lineages to clades.
lineage_to_clade = {
str(meta.clade): clade
for clade, meta in zip(clades, proto.metadata)
if meta and meta.clade
}
# Accumulate mutations in each clade, which are overwritten at each position.
clade_to_muts: Dict[object, Dict[int, Mutation]] = defaultdict(dict)
for clade, muts in zip(clades, proto.node_mutations):
for mut in muts.mutation:
clade_to_muts[clade][mut.position] = Mutation(
mut.position,
NUCLEOTIDE[mut.ref_nuc],
"".join(NUCLEOTIDE[n] for n in mut.mut_nuc),
)
for c in clade.clades:
clade_to_muts[c].update(clade_to_muts[clade])
mutations_by_lineage = {
k: frozenset(clade_to_muts[v].values()) for k, v in lineage_to_clade.items()
}
return mutations_by_lineage
def refine_mutation_tree(filename_in: str, filename_out: str) -> Dict[str, str]:
"""
Refines a mutation tree clade metadata from pango lineages like B.1.1 to
full node addresses like fine.0.12.4.1. Among clones, only the basal clade
with have a .clade attribute, all descendents will have metadata.clade ==
"". The tree structure remains unchanged.
"""
with open(filename_in, "rb") as f:
proto = parsimony_pb2.data.FromString(f.read()) # type: ignore
# Extract phylogenetic tree.
tree = next(Parser.from_string(proto.newick).parse())
clades = list(tree.find_clades())
logger.info(f"Refining a tree with {len(clades)} nodes")
assert len(proto.metadata) == len(clades)
assert len(proto.node_mutations) == len(clades)
metadata = dict(zip(clades, proto.metadata))
mutations = dict(zip(clades, proto.node_mutations))
# Add refined clades, collapsing clones.
num_children: Dict[str, int] = defaultdict(int)
clade_to_fine = {clades[0]: "fine"}
fine_to_clade = {"fine": clades[0]}
for parent in clades:
parent_fine = clade_to_fine[parent]
for child in parent.clades:
if mutations[child].mutation:
# Create a new fine id.
n = num_children[parent_fine]
fine = f"{parent_fine}.{n - 1}" if n else parent_fine + "."
num_children[parent_fine] += 1
clade_to_fine[child] = fine
fine_to_clade[fine] = child
else:
# Collapse clone into parent.
clade_to_fine[child] = parent_fine
# Save basal fine clades and the fine -> coarse mapping.
fine_to_coarse = {}
for clade, meta in metadata.items():
fine = clade_to_fine[clade]
if meta.clade:
fine_to_coarse[fine] = pangolin.compress(meta.clade)
meta.clade = fine if clade is fine_to_clade[fine] else ""
# Propagate basal clade metadata downward.
for parent in clades:
parent_coarse = fine_to_coarse[clade_to_fine[parent]]
for child in parent.clades:
fine_to_coarse.setdefault(clade_to_fine[child], parent_coarse)
with open(filename_out, "wb") as f:
f.write(proto.SerializeToString())
logger.info(f"Found {len(clades) - len(fine_to_coarse)} clones")
logger.info(f"Refined {len(set(fine_to_coarse.values()))} -> {len(fine_to_coarse)}")
return fine_to_coarse
def prune_mutation_tree(
filename_in: str,
filename_out: str,
max_num_nodes: int,
weights: Optional[Dict[str, int]] = None,
) -> Set[str]:
"""
Condenses a mutation tree by greedily pruning nodes with least value
under the error-minimizing objective function::
value(node) = num_mutations(node) * weights(node)
Returns a restricted set of clade names.
"""
with open(filename_in, "rb") as f:
proto = parsimony_pb2.data.FromString(f.read()) # type: ignore
num_pruned = len(proto.node_mutations) - max_num_nodes
if num_pruned < 0:
shutil.copyfile(filename_in, filename_out)
return {m.clade for m in proto.node_metadata if m.clade}
# Extract phylogenetic tree.
tree = next(Parser.from_string(proto.newick).parse())
clades = list(tree.find_clades())
logger.info(f"Pruning {num_pruned}/{len(clades)} nodes")
assert len(clades) == len(set(clades))
clade_to_id = {c: i for i, c in enumerate(clades)}
assert len(proto.metadata) == len(clades)
assert len(proto.node_mutations) == len(clades)
metadata = dict(zip(clades, proto.metadata))
mutations = dict(zip(clades, proto.node_mutations))
name_set = {m.clade for m in proto.metadata if m.clade}
# Initialize weights and topology.
if weights is None:
weights = {c: 1 for c in clades}
else:
assert set(weights).issubset(name_set)
old_weights = weights.copy()
weights = {}
for c, m in metadata.items():
weights[c] = old_weights.pop(m.clade, 0) if m.clade else 0
assert not old_weights, list(old_weights)
parents = {c: parent for parent in clades for c in parent.clades}
assert tree.root not in parents
def get_loss(clade):
return weights[clade] * len(mutations[clade].mutation)
# Greedily prune nodes.
heap = [(get_loss(c), clade_to_id[c]) for c in clades[1:]] # don't prune the root
heapq.heapify(heap)
for step in tqdm.tqdm(range(num_pruned)):
# Find the clade with lowest loss.
stale_loss, i = heapq.heappop(heap)
clade = clades[i]
loss = get_loss(clade)
while loss != stale_loss:
# Reinsert clades whose loss was stale.
stale_loss, i = heapq.heappushpop(heap, (loss, i))
clade = clades[i]
loss = get_loss(clade)
# Prune this clade.
parent = parents.pop(clade)
weights[parent] += weights.pop(clade, 0) # makes the parent loss stale
parent.clades.remove(clade)
parent.clades.extend(clade.clades)
mutation = list(mutations.pop(clade).mutation)
for child in clade.clades:
parents[child] = parent
m = mutations[child].mutation
cat = mutation + list(m) # order so as to be compatible with reversions
del m[:]
m.extend(cat)
clades = list(tree.find_clades())
assert len(clades) == max_num_nodes
# Create the pruned proto.
proto.newick = next(iter(Writer([tree]).to_strings()))
del proto.metadata[:]
del proto.node_mutations[:]
proto.metadata.extend(metadata[clade] for clade in clades)
proto.node_mutations.extend(mutations[clade] for clade in clades)
with open(filename_out, "wb") as f:
f.write(proto.SerializeToString())
return {metadata[clade].clade for clade in clades if metadata[clade].clade}
def apply_mutations(ref: str, mutations: FrozenSet[Mutation]) -> str:
"""
Applies a set of mutations to a reference sequence.
"""
seq = list(ref)
for m in mutations:
if m.mut == m.ref:
continue
if m.ref != seq[m.position - 1]:
warnings.warn(f"invalid reference: {m.ref} vs {seq[m.position - 1]}")
seq[m.position - 1] = m.mut
return "".join(seq)
class FineToMeso:
"""
Mapping from fine clade names like ``fine.1...3.`` to ancestors in
``meso_set`` like ``fine.1..`` .
"""
def __init__(self, meso_set):
self.meso_set = frozenset(meso_set)
self._cache = {}
def __call__(self, fine):
meso = self._cache.get(fine, None)
if meso is None:
meso = fine if fine in self.meso_set else self(fine.rsplit(".", 1)[0])
self._cache[fine] = meso
return meso
| StarcoderdataPython |
6557793 | <gh_stars>0
import pandas as pd
import extract
newsList = []
url = "http://feeds.bbci.co.uk/news/rss.xml"
extract.bbc_extract(url, newsList)
url = "http://rss.cnn.com/rss/cnn_topstories.rss"
extract.cnn_extract(url, newsList)
df = pd.DataFrame(newsList)
df.to_csv("../data/feed.csv", encoding='utf-8-sig')
| StarcoderdataPython |
1612330 | <gh_stars>1-10
"""
This is a template for creating custom ColumnMapExpectations.
For detailed instructions on how to use it, please see:
https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_column_map_expectations
"""
import json
from typing import Any
import dataprofiler as dp
import numpy as np
# remove extra tf loggin
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
class ColumnValuesConfidenceForDataLabelToBeGreaterThanOrEqualToThreshold(
ColumnMapMetricProvider
):
"""MetricProvider Class for Data Label Probability greater than \
or equal to the user-specified threshold"""
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.prediction_confidence_for_data_label_greater_than_or_equal_to_threshold"
condition_value_keys = (
"threshold",
"data_label",
)
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(
cls: Any, column: str, threshold: float, data_label: str, **kwargs: Any
) -> np.ndarray:
"""
Implement the yes/no question for the expectation
"""
labeler = dp.DataLabeler(labeler_type="structured")
labeler.postprocessor.set_params(is_pred_labels=False)
results = labeler.predict(
column,
predict_options={"show_confidences": True},
)
if data_label.upper() in labeler.label_mapping.keys():
data_label_ind = labeler.label_mapping[data_label.upper()]
else:
raise ValueError(
"""
The only values acceptable for the data label parameter are as follows:
['PAD', 'UNKNOWN', 'ADDRESS', 'BAN', 'CREDIT_CARD', 'DATE', 'TIME', 'DATETIME',\
'DRIVERS_LICENSE', 'EMAIL_ADDRESS', 'UUID', 'HASH_OR_KEY', 'IPV4', 'IPV6',\
'MAC_ADDRESS', 'PERSON', 'PHONE_NUMBER', 'SSN', 'URL', 'US_STATE', 'INTEGER',\
'FLOAT', 'QUANTITY', 'ORDINAL']
"""
)
data_label_conf = results["conf"][:, data_label_ind]
return data_label_conf >= threshold
class ExpectColumnsValuesConfidenceForDataLabelToBeGreaterThanOrEqualtoThreshold(
ColumnMapExpectation
):
"""
This function builds upon the custom column map expectations of Great Expectations. This function asks the question a yes/no question of each row in the user-specified column; namely, does the confidence threshold provided by the DataProfiler model exceed the user-specified threshold.
Args:
column (str): The column name that you want to check.
data_label(str): The data label for which you want to check confidences against the threshold value
threshold (float): The value, usually as a decimal (e.g. .32), you want to use to flag low confidence predictions
df.expect_column_values_to_probabilistically_match_data_label(
column,
data_label=<>,
threshold=float(0<=1)
)
"""
examples = [
{
"data": {
"OPEID6": ["1002", "1052", "25034", "McRoomyRoom"],
"INSTNM": [
"Alabama A & M University",
"University of Alabama at Birmingham",
"Amridge University",
"McRoomyRoom",
],
"ZIP": ["35762", "35294-0110", "36117-3553", "McRoomyRoom"],
"ACCREDAGENCY": [
"Southern Association of Colleges and Schools Commission on Colleges",
"Southern Association of Colleges and Schools Commission on Colleges",
"Southern Association of Colleges and Schools Commission on Colleges",
"McRoomyRoom",
],
"INSTURL": [
"www.aamu.edu/",
"https://www.uab.edu",
"www.amridgeuniversity.edu",
"McRoomyRoom",
],
"NPCURL": [
"www.aamu.edu/admissions-aid/tuition-fees/net-price-calculator.html",
"https://uab.studentaidcalculator.com/survey.aspx",
"www2.amridgeuniversity.edu:9091/",
"McRoomyRoom",
],
"LATITUDE": ["34.783368", "33.505697", "32.362609", "McRoomyRoom"],
"LONGITUDE": ["-86.568502", "-86.799345", "-86.17401", "McRoomyRoom"],
"RELAFFIL": ["NULL", "NULL", "74", "McRoomyRoom"],
"DEATH_YR2_RT": [
"PrivacySuppressed",
"PrivacySuppressed",
"PrivacySuppressed",
"McRoomyRoom",
],
"SEARCH_STRING": [
"Alabama A & M University AAMU",
"University of Alabama at Birmingham ",
"Amridge University Southern Christian University Regions University",
"McRoomyRoom",
],
},
"tests": [
{
"title": "positive_test_with_column_one",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "ZIP", "data_label": "ADDRESS", "threshold": 0.00},
"out": {
"success": True,
},
},
{
"title": "failing_test_with_column_one",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "ZIP", "data_label": "ADDRESS", "threshold": 1.00},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.prediction_confidence_for_data_label_greater_than_or_equal_to_threshold"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = (
"threshold",
"data_label",
"mostly",
)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {
"threshold": None,
"data_label": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
}
# This object contains metadata for display in the public Gallery
library_metadata = {
"requirements": ["dataprofiler", "tensorflow", "scikit-learn", "numpy"],
"maturity": "experimental", # "concept_only", "experimental", "beta", or "production"
"tags": ["dataprofiler"], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@taylorfturner", # Don't forget to add your github handle here!
],
}
if __name__ == "__main__":
diagnostics_report = (
ExpectColumnsValuesConfidenceForDataLabelToBeGreaterThanOrEqualtoThreshold().run_diagnostics()
)
print(diagnostics_report.generate_checklist())
| StarcoderdataPython |
4973793 | import sys
import unittest
from os.path import dirname
from HtmlTestRunner import HTMLTestRunner
print('-- Starting Fairdata tests --')
loader = unittest.TestLoader()
start_dir = dirname(__file__)
# tests are only automatically searched from files whose filenames end with *_tests.py
suite = loader.discover(start_dir, pattern='*_tests.py')
if sys.argv[-1] == '--runner=default':
# HTMLTestRunner does not seem to forward print() -clauses to stdout.
# it may be useful to use unittest's default TextTestRunner for debugging.
runner = unittest.TextTestRunner()
else:
runner = HTMLTestRunner(output='reports', report_name='fairdata_integration_tests', combine_reports=True)
runner.run(suite)
| StarcoderdataPython |
8047887 | from .Link import *
from .GenericSingleLink import *
from .GenericDoubleLink import * | StarcoderdataPython |
5181405 | <gh_stars>1-10
"""
lambda_function.py
"""
# postリクエストをline notify APIに送るためにrequestsのimport
import os
import time
from datetime import datetime, timezone
import pytz
import re
import requests
from bs4 import BeautifulSoup
import json
from linebot import LineBotApi
from linebot.models import TextSendMessage
url = os.getenv("URL")
# line notify APIのトークン
line_access_token = os.getenv("LINE_ACCESS_TOKEN")
def lambda_handler(event, context):
"""
lambda_handler
"""
print('event: {}'.format(event))
print('context: {}'.format(context))
# 現在時刻
now = datetime.now(tz=timezone.utc)
tokyo = pytz.timezone('Asia/Tokyo')
# 東京のローカル時間に変換
jst_now = tokyo.normalize(now.astimezone(tokyo))
content0 = jst_now.strftime("%m月%d日 %H:%M現在")
# bs4でパース
r = requests.get(url)
html = r.text.encode(r.encoding)
soup = BeautifulSoup(html, 'html.parser')
dict = {}
# lineに通知するメッセージを組み立て
content_text = []
# 予測地点
l_pattern = r"(.+)の今日明日の天気"
l_src = soup.title.text
dict['location'] = re.findall(l_pattern, l_src)[0]
content00 = "●" + dict['location'] + "の天気"
print(content00)
content_text.append(content00)
soup_tdy = soup.select('.today-weather')[0]
soup_tmr = soup.select('.tomorrow-weather')[0]
# 今日の天気
dict["today"] = forecast2dict(soup_tdy)
info = dict["today"]["forecasts"]
content1 = "=====" + dict["today"]["date"] + "=====" + "\n" + "天 気: " + info["weather"] + "\n" + "最高気温: " + info["high_temp"] + info["high_temp_diff"] + "\n" + "最低気温: " + info["low_temp"] + info["low_temp_diff"] + "\n" + "降水確率: " + "\n" + "[00-06]: " + info["rain_probability"]['00-06'] + \
"\n" + "[06-12]: " + info["rain_probability"]['06-12'] + "\n" + "[12-18]: " + info["rain_probability"]['12-18'] + \
"\n" + "[18-24]: " + info["rain_probability"]['18-24'] + \
"\n" + "風 向: " + info["wind_wave"]
print(content1)
content_text.append(content1)
# 明日の天気
dict["tomorrow"] = forecast2dict(soup_tmr)
info = dict["tomorrow"]["forecasts"]
content2 = "=====" + dict["tomorrow"]["date"] + "=====" + "\n" + "天 気: " + info["weather"] + "\n" + "最高気温: " + info["high_temp"] + info["high_temp_diff"] + "\n" + "最低気温: " + info["low_temp"] + info["low_temp_diff"] + "\n" + "降水確率: " + "\n" + "[00-06]: " + info["rain_probability"]['00-06'] + \
"\n" + "[06-12]: " + info["rain_probability"]['06-12'] + "\n" + "[12-18]: " + info["rain_probability"]['12-18'] + \
"\n" + "[18-24]: " + info["rain_probability"]['18-24'] + \
"\n" + "風 向: " + info["wind_wave"]
print(content2)
content_text.append(content2)
notification_message = content0 + "\n" + "\n\n".join(content_text)
# 送信するデータの指定
# headers = {'Authorization': f'Bearer {line_access_token}'}
# data = {'message': f'{notification_message}'}
line_bot_api = LineBotApi(line_access_token)
line_bot_api.broadcast(TextSendMessage(text=notification_message))
return {
'status_code': 200
}
def forecast2dict(soup):
data = {}
# 日付処理
d_pattern = r"(\d+)月(\d+)日\(([土日月火水木金])+\)"
d_src = soup.select('.left-style')
date = re.findall(d_pattern, d_src[0].text)[0]
data["date"] = "%s/%s(%s)" % (date[0], date[1], date[2])
#print("=====" + data["date"] + "=====")
# ## 取得
weather = soup.select('.weather-telop')[0]
high_temp = soup.select("[class='high-temp temp']")[0]
high_temp_diff = soup.select("[class='high-temp tempdiff']")[0]
low_temp = soup.select("[class='low-temp temp']")[0]
low_temp_diff = soup.select("[class='low-temp tempdiff']")[0]
rain_probability = soup.select('.rain-probability > td')
wind_wave = soup.select('.wind-wave > td')[0]
# ## 格納
forecast = {}
forecast["weather"] = weather.text.strip()
forecast["high_temp"] = high_temp.text.strip()
forecast["high_temp_diff"] = high_temp_diff.text.strip()
forecast["low_temp"] = low_temp.text.strip()
forecast["low_temp_diff"] = low_temp_diff.text.strip()
every_6h = {}
for i in range(4):
time_from = 0+6*i
time_to = 6+6*i
itr = '{:02}-{:02}'.format(time_from, time_to)
every_6h[itr] = rain_probability[i].text.strip()
forecast["rain_probability"] = every_6h
forecast["wind_wave"] = wind_wave.text.strip()
data["forecasts"] = forecast
return data
if __name__ == "__main__":
print(lambda_handler(event=None, context=None))
| StarcoderdataPython |
276616 | # -*- coding:utf-8 -*-
"""
@author:SiriYang
@file:SettingWidget.py
@time:2020/4/16 11:53
"""
import configparser
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QLabel, QLineEdit, QFileDialog, \
QPushButton, QScrollArea, QGridLayout, QSpinBox,QColorDialog
BTN_STYLE = """
QPushButton
{
font-family:Microsoft Yahei;
font-size:14px;
color:dimgray;
background-color:#fff;
border:1px solid #B5ADAD;
border-radius:2px;
}
QPushButton:hover
{
color:#fff;
background-color:dimgray;
}
QPushButton:pressed
{
color:#fff;
background-color:dimgray;
padding-left:3px;
padding-top:3px;
}
"""
SCROLLAREA_STYLE = """
QLabel{
font-family:Microsoft Yahei;
font-size:14px;
color:dimgray;
font-weight:bold;
}
QWidget{
background:#FBFAFA;
}
QLineEdit{
border:1px solid #B5ADAD;
font-family:Microsoft Yahei;
font-size:13px;
color:gray;
}
QScrollArea{
border:0px solid #B5ADAD;
}
QScrollBar:vertical
{
border-radius:7px;
background:#f1f1f1;
padding-top:14px;
padding-bottom:14px;
}
QScrollBar::handle:vertical
{
background:#C4CAD0;
border-radius:6px;
margin-left:2px;
margin-right:2px;
}
QScrollBar::handle:vertical:hover
{
background:gray;
border-radius:6px;
}
QScrollBar::add-line:vertical
{
height:14px;width:8px;
image:url('');
}
QScrollBar::sub-line:vertical
{
height:14px;width:8px;
image:url('');
}
QScrollBar::add-line:vertical:hover
{
height:14px;width:8px;
image:url('');
subcontrol-position:bottom;
}
QScrollBar::sub-line:vertical:hover
{
height:14px;width:8px;
image:url('');
subcontrol-position:top;
}
QScrollBar::add-page:vertical
{
background:#f1f1f1;
}
QScrollBar::sub-page:vertical
{
background:#f1f1f1;
}
QPushButton
{
font-family:Microsoft Yahei;
font-size:14px;
color:dimgray;
background-color:#fff;
border:1px solid #B5ADAD;
border-radius:2px;
}
QPushButton:hover
{
color:#fff;
background-color:dimgray;
}
QPushButton:pressed
{
color:#fff;
background-color:dimgray;
padding-left:3px;
padding-top:3px;
}
"""
class SettingWidget(QWidget):
def __init__(self, fWind):
super().__init__()
self.fWindow = fWind
self.config = configparser.ConfigParser()
self.config.read("init.ini",encoding="utf8")
self.initUI()
self.resetData()
def initUI(self):
"""------------------图片标注设置------------------"""
self.label_Img = QLabel('图片标注设置')
self.label_Img.setFixedHeight(20)
self.label_Img.setAlignment(Qt.AlignCenter)
self.label_textformat = QLabel('标注文本模板:')
self.label_textformat.setFixedSize(200, 20)
self.label_textformat.setAlignment(Qt.AlignLeft)
self.label_textformat_info = QLabel()
self.label_textformat_info.setFixedHeight(90)
self.label_textformat_info.setAlignment(Qt.AlignLeft)
self.label_textformat_info.setStyleSheet("font-family:Microsoft Yahei;font-size:12px;color:gray;")
self.label_textformat_info.setText(" 请在模板中你想要的位置插入以下关键字,系统会自动替换为相应的数据:\n 图片创建的:\n\t年:{year}\t月:{month}\t日:{day}\n\t时:{hour}\t分:{minute}\t秒:{second}\n 图片的名称: {name}")
self.lineedit_textformat = QLineEdit()
self.lineedit_textformat.setFixedHeight(30)
self.lineedit_textformat.textChanged.connect(self.on_textformat_changed)
self.label_textformat_preview = QLabel('效果预览:')
self.label_textformat_preview.setFixedHeight(20)
self.label_textformat_preview.setAlignment(Qt.AlignLeft)
self.label_textformat_preview.setStyleSheet("font-family:Microsoft Yahei;font-size:12px;color:gray;")
layout_textformat = QVBoxLayout()
layout_textformat.setContentsMargins(0, 5, 0, 0)
layout_textformat.setSpacing(5)
layout_textformat.addWidget(self.label_textformat)
layout_textformat.addWidget(self.label_textformat_info)
layout_textformat.addWidget(self.lineedit_textformat)
layout_textformat.addWidget(self.label_textformat_preview)
self.label_textsize = QLabel('文本大小:')
self.label_textsize.setFixedSize(60, 30)
self.label_textsize.setAlignment(Qt.AlignLeft | Qt.AlignCenter)
self.spinbox_textsize = QSpinBox()
self.spinbox_textsize.setFixedSize(50, 22)
self.spinbox_textsize.setAlignment(Qt.AlignCenter)
self.spinbox_textsize.setMinimum(1)
self.spinbox_textsize.setMaximum(200)
layout_textsize = QHBoxLayout()
layout_textsize.setContentsMargins(0, 5, 0, 0)
layout_textsize.setSpacing(5)
layout_textsize.addWidget(self.label_textsize)
layout_textsize.addWidget(self.spinbox_textsize)
layout_textsize.addStretch()
self.label_textoffset = QLabel('文本垂直偏移量:')
self.label_textoffset.setFixedSize(100, 30)
self.label_textoffset.setAlignment(Qt.AlignLeft | Qt.AlignCenter)
self.spinbox_textoffset = QSpinBox()
self.spinbox_textoffset.setFixedSize(50, 22)
self.spinbox_textoffset.setAlignment(Qt.AlignCenter)
self.spinbox_textoffset.setMinimum(-20)
self.spinbox_textoffset.setMaximum(100)
layout_textoffset = QHBoxLayout()
layout_textoffset.setContentsMargins(0, 5, 0, 0)
layout_textoffset.setSpacing(5)
layout_textoffset.addWidget(self.label_textoffset)
layout_textoffset.addWidget(self.spinbox_textoffset)
layout_textoffset.addStretch()
self.label_textcolor = QLabel('文本颜色:')
self.label_textcolor.setFixedSize(60, 30)
self.label_textcolor.setAlignment(Qt.AlignLeft | Qt.AlignCenter)
self.label_textcolor_R = QLabel('R:')
self.label_textcolor_R.setFixedSize(30, 30)
self.label_textcolor_R.setAlignment(Qt.AlignLeft | Qt.AlignCenter)
self.spinbox_textcolor_R = QSpinBox()
self.spinbox_textcolor_R.setFixedSize(50, 22)
self.spinbox_textcolor_R.setAlignment(Qt.AlignCenter)
self.spinbox_textcolor_R.setMinimum(0)
self.spinbox_textcolor_R.setMaximum(255)
self.label_textcolor_G = QLabel('G:')
self.label_textcolor_G.setFixedSize(30, 30)
self.label_textcolor_G.setAlignment(Qt.AlignLeft | Qt.AlignCenter)
self.spinbox_textcolor_G = QSpinBox()
self.spinbox_textcolor_G.setFixedSize(50, 22)
self.spinbox_textcolor_G.setAlignment(Qt.AlignCenter)
self.spinbox_textcolor_G.setMinimum(0)
self.spinbox_textcolor_G.setMaximum(255)
self.label_textcolor_B = QLabel('B:')
self.label_textcolor_B.setFixedSize(30, 30)
self.label_textcolor_B.setAlignment(Qt.AlignLeft | Qt.AlignCenter)
self.spinbox_textcolor_B = QSpinBox()
self.spinbox_textcolor_B.setFixedSize(50, 22)
self.spinbox_textcolor_B.setAlignment(Qt.AlignCenter)
self.spinbox_textcolor_B.setMinimum(0)
self.spinbox_textcolor_B.setMaximum(255)
self.label_textcolor_example=QLabel()
self.label_textcolor_example.setFixedSize(100,30)
self.colorDialogBtn=QPushButton("选择颜色")
self.colorDialogBtn.setFixedSize(60,30)
self.colorDialogBtn.clicked.connect(self.on_colorDialogBtn_clicked)
layout_textcolor = QHBoxLayout()
layout_textcolor.setContentsMargins(0, 5, 0, 0)
layout_textcolor.setSpacing(5)
layout_textcolor.addWidget(self.label_textcolor)
layout_textcolor.addWidget(self.label_textcolor_R)
layout_textcolor.addWidget(self.spinbox_textcolor_R)
layout_textcolor.addWidget(self.label_textcolor_G)
layout_textcolor.addWidget(self.spinbox_textcolor_G)
layout_textcolor.addWidget(self.label_textcolor_B)
layout_textcolor.addWidget(self.spinbox_textcolor_B)
layout_textcolor.addStretch()
layout_textcolor.addWidget(self.label_textcolor_example)
layout_textcolor.addStretch()
layout_textcolor.addWidget(self.colorDialogBtn)
self.label_OutPutPath = QLabel('结果保存路径:')
self.label_OutPutPath.setFixedSize(200, 20)
self.label_OutPutPath.setAlignment(Qt.AlignLeft)
self.lineedit_OutPutPath = QLineEdit()
self.lineedit_OutPutPath.setFixedHeight(30)
self.btn_OutPutPath = QPushButton('...')
self.btn_OutPutPath.setStyleSheet(BTN_STYLE)
self.btn_OutPutPath.setFixedSize(30, 30)
self.btn_OutPutPath.clicked.connect(self.on_btn_OutPutPath_clicked)
layout_OutPutPath = QGridLayout()
layout_OutPutPath.setContentsMargins(0, 5, 0, 0)
layout_OutPutPath.setSpacing(5)
layout_OutPutPath.addWidget(self.label_OutPutPath, 0, 0)
layout_OutPutPath.addWidget(self.lineedit_OutPutPath, 1, 0)
layout_OutPutPath.addWidget(self.btn_OutPutPath, 1, 1)
layout_Img = QVBoxLayout()
layout_Img.setContentsMargins(5, 10, 5, 0)
layout_Img.setSpacing(10)
layout_Img.addWidget(self.label_Img)
layout_Img.addLayout(layout_textformat)
layout_Img.addLayout(layout_textsize)
layout_Img.addLayout(layout_textoffset)
layout_Img.addLayout(layout_textcolor)
layout_Img.addLayout(layout_OutPutPath)
"""------------------QScrollArea------------------"""
sa_contentLayout = QVBoxLayout()
sa_contentLayout.setContentsMargins(0, 0, 0, 0)
sa_contentLayout.setSpacing(10)
sa_contentLayout.addLayout(layout_Img)
sa_contentLayout.addStretch()
self.sa_contentWidget = QWidget()
self.sa_contentWidget.setFixedSize(600, 450)
self.sa_contentWidget.setLayout(sa_contentLayout)
self.sa_Settings = QScrollArea()
self.sa_Settings.setStyleSheet(SCROLLAREA_STYLE)
self.sa_Settings.setWidget(self.sa_contentWidget)
self.sa_Settings.setAlignment(Qt.AlignHCenter)
# ------------------------------------------------------
self.btn_defualt = QPushButton('默认')
self.btn_defualt.setFixedSize(60, 30)
self.btn_defualt.setStyleSheet(BTN_STYLE)
self.btn_defualt.setStatusTip('恢复默认设置')
self.btn_defualt.clicked.connect(self.on_btn_defualt_clicked)
self.btn_cancel = QPushButton('取消')
self.btn_cancel.setFixedSize(60, 30)
self.btn_cancel.setStyleSheet(BTN_STYLE)
self.btn_cancel.setStatusTip('取消未保存的修改')
self.btn_cancel.clicked.connect(self.on_btn_cancel_clicked)
self.btn_apply = QPushButton('应用')
self.btn_apply.setFixedSize(60, 30)
self.btn_apply.setStyleSheet(BTN_STYLE)
self.btn_apply.setStatusTip('保存并应用修改')
self.btn_apply.clicked.connect(self.on_btn_apply_clicked)
layout_bottom = QHBoxLayout()
layout_bottom.setContentsMargins(0, 0, 20, 0)
layout_bottom.setSpacing(5)
layout_bottom.addStretch(1)
layout_bottom.addWidget(self.btn_defualt)
layout_bottom.addWidget(self.btn_cancel)
layout_bottom.addWidget(self.btn_apply)
# -------------------------------------------------------
mainLayout = QVBoxLayout()
mainLayout.setContentsMargins(5, 0, 5, 5)
mainLayout.setSpacing(3)
mainLayout.addWidget(self.sa_Settings)
mainLayout.addLayout(layout_bottom)
self.setLayout(mainLayout)
def resetData(self):
self.lineedit_textformat.setText(self.config.get("IMG","textformat"))
self.on_textformat_changed()
self.spinbox_textsize.setValue(int(self.config.get("IMG","textsize")))
self.spinbox_textoffset.setValue(int(self.config.get("IMG", "textoffset")))
r,g,b=int(self.config.get("IMG","textcolorr")),int(self.config.get("IMG","textcolorg")),int(self.config.get("IMG","textcolorb"))
self.spinbox_textcolor_R.setValue(r)
self.spinbox_textcolor_G.setValue(g)
self.spinbox_textcolor_B.setValue(b)
self.label_textcolor_example.setStyleSheet("background-color:rgb({},{},{});border:1px solid #B5ADAD;".format(r,g,b))
self.lineedit_OutPutPath.setText(self.config.get("IMG","imgsavepath"))
def defualtData(self):
self.config.read("./defualt.ini",encoding="utf8")
self.resetData()
self.config.write(open("./init.ini", "w",encoding="utf8"))
def on_btn_OutPutPath_clicked(self):
path = QFileDialog.getExistingDirectory(self, "设置图片标注结果保存路径", ".")
if len(path) != 0:
self.lineedit_OutPutPath.setText(path)
def on_colorDialogBtn_clicked(self):
col = QColorDialog.getColor()
self.spinbox_textcolor_R.setValue(col.red())
self.spinbox_textcolor_G.setValue(col.green())
self.spinbox_textcolor_B.setValue(col.blue())
self.label_textcolor_example.setStyleSheet(
"background-color:rgb({},{},{});border:1px solid #B5ADAD;".format(col.red(), col.green(), col.blue()))
def on_btn_defualt_clicked(self):
self.defualtData()
def on_btn_cancel_clicked(self):
self.resetData()
def on_btn_apply_clicked(self):
self.saveConfig()
def on_textformat_changed(self):
format="效果预览: "+self.lineedit_textformat.text()
format =format.replace('{year}','2020')
format =format.replace('{month}', '1')
format =format.replace('{day}', '1')
format =format.replace('{hour}', '8')
format =format.replace('{minute}', '30')
format =format.replace('{second}', '12')
format =format.replace('{name}', '风景画')
self.label_textformat_preview.setText(format)
def show(self):
self.resetData()
super().show()
def saveConfig(self):
self.config.set("IMG","textformat",self.lineedit_textformat.text())
self.config.set("IMG", "textsize", str(self.spinbox_textsize.value()))
self.config.set("IMG", "textoffset", str(self.spinbox_textoffset.value()))
self.config.set("IMG", "textcolorr", str(self.spinbox_textcolor_R.value()))
self.config.set("IMG", "textcolorg", str(self.spinbox_textcolor_G.value()))
self.config.set("IMG", "textcolorb", str(self.spinbox_textcolor_B.value()))
self.config.set("IMG", "imgsavepath", str(self.lineedit_OutPutPath.text()))
self.config.write(open("./init.ini", "w",encoding="utf8")) | StarcoderdataPython |
1896628 | <reponame>douglasPinheiro/nirvaris-menu<filename>menu/urls.py
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth.decorators import login_required
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
] | StarcoderdataPython |
3455294 | """Package with functionality used to analyze MALDI-TOF data."""
| StarcoderdataPython |
5141201 | #!/usr/bin/env python
# coding: utf-8
# ### Explore processed pan-cancer data
# In[1]:
import os
import sys
import numpy as np; np.random.seed(42)
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
import mpmp.config as cfg
import mpmp.utilities.data_utilities as du
# In[2]:
DATA_TYPE = 'mut_sigs'
# load gene/classification info and sample/cancer type info
print('Loading gene label data...', file=sys.stderr)
genes_df = du.load_vogelstein()
sample_info_df = du.load_sample_info(DATA_TYPE, verbose=True)
# load mutation info
# this returns a tuple of dataframes, unpack it below
pancancer_data = du.load_pancancer_data(verbose=True)
(sample_freeze_df,
mutation_df,
copy_loss_df,
copy_gain_df,
mut_burden_df) = pancancer_data
# In[3]:
# load relevant data
data_df = du.load_raw_data(DATA_TYPE, verbose=True)
# standardize columns of expression dataframe
if DATA_TYPE in cfg.standardize_data_types:
print('Standardizing columns of {} data...'.format(DATA_TYPE),
file=sys.stderr)
data_df[data_df.columns] = StandardScaler().fit_transform(data_df[data_df.columns])
print(data_df.shape)
data_df.iloc[:5, :5]
# First, let's look at the low-dimensional representation of the chosen data type.
#
# We'll choose a few cancer types that are similar to one another (LUSC/LUAD, LGG/GBM) and a few that should be dissimilar (BRCA, THCA).
# In[25]:
assert sample_info_df.index.equals(data_df.index)
# data_cancer_types = sorted(sample_info_df.cancer_type.unique())
data_cancer_types = ['LUAD', 'LUSC', 'THCA', 'LGG', 'GBM', 'BRCA']
data_types_df = (data_df
.merge(sample_info_df, left_index=True, right_index=True)
.query('cancer_type in @data_cancer_types')
.drop(columns=['sample_type', 'id_for_stratification'])
.reset_index()
)
print(data_types_df.cancer_type.unique())
data_types_df.iloc[:5, -5:]
# In[26]:
from sklearn.decomposition import PCA
from umap import UMAP
sns.set({'figure.figsize': (20, 8)})
fig, axarr = plt.subplots(1, 2)
pca = PCA(n_components=2)
X_proj_pca = pca.fit_transform(data_types_df.drop(columns=['sample_id', 'cancer_type']))
reducer = UMAP(n_components=2, random_state=42)
X_proj_umap = reducer.fit_transform(data_types_df.drop(columns=['sample_id', 'cancer_type']))
for i, cancer_type in enumerate(data_cancer_types):
ixs = data_types_df.index[data_types_df.cancer_type == cancer_type].tolist()
axarr[0].scatter(X_proj_pca[ixs, 0], X_proj_pca[ixs, 1], label=cancer_type, s=5)
axarr[1].scatter(X_proj_umap[ixs, 0], X_proj_umap[ixs, 1], label=cancer_type, s=5)
axarr[0].set_xlabel('PC1')
axarr[0].set_ylabel('PC2')
axarr[0].set_title('PCA projection of {} data, colored by cancer type'.format(DATA_TYPE))
axarr[0].legend()
axarr[1].set_xlabel('UMAP dimension 1')
axarr[1].set_ylabel('UMAP dimension 2')
axarr[1].set_title('UMAP projection of {} data, colored by cancer type'.format(DATA_TYPE))
axarr[1].legend()
# Now we want to dig a bit deeper into LGG and GBM, using expression and methylation data. It's fairly well-known that IDH1 mutation status defines distinct subtypes in both classes of brain tumors. We'll compare methylation and gene expression in IDH1-mutated vs. non-mutated samples, expecting to see a separation in our low dimensional representation.
#
# IDH1 plays a direct role in DNA methylation, so we anticipate that this separation between mutated and non-mutated samples will be slightly clearer in the methylation data.
# In[5]:
# load relevant data
rnaseq_df = du.load_raw_data('expression', verbose=True)
print('Standardizing columns of expression data...', file=sys.stderr)
rnaseq_df[rnaseq_df.columns] = StandardScaler().fit_transform(rnaseq_df[rnaseq_df.columns])
methylation_df = du.load_raw_data('me_27k', verbose=True)
print(methylation_df.shape)
methylation_df.iloc[:5, :5]
# In[6]:
from mpmp.utilities.tcga_utilities import process_y_matrix
def generate_labels(gene, classification):
# process the y matrix for the given gene or pathway
y_mutation_df = mutation_df.loc[:, gene]
# include copy number gains for oncogenes
# and copy number loss for tumor suppressor genes (TSG)
include_copy = True
if classification == "Oncogene":
y_copy_number_df = copy_gain_df.loc[:, gene]
elif classification == "TSG":
y_copy_number_df = copy_loss_df.loc[:, gene]
else:
y_copy_number_df = pd.DataFrame()
include_copy = False
# construct labels from mutation/CNV information, and filter for
# cancer types without an extreme label imbalance
y_df = process_y_matrix(
y_mutation=y_mutation_df,
y_copy=y_copy_number_df,
include_copy=include_copy,
gene=gene,
sample_freeze=sample_freeze_df,
mutation_burden=mut_burden_df,
filter_count=1,
filter_prop=0.01,
output_directory=None,
hyper_filter=5,
test=True # don't write filter info to file
)
return y_df
# In[7]:
gene = 'IDH1'
cancer_types = ['LGG', 'GBM']
classification = du.get_classification(gene, genes_df)
y_df = generate_labels(gene, classification)
y_df = y_df[y_df.DISEASE.isin(cancer_types)]
print(y_df.shape)
y_df.tail()
# In[8]:
# generate UMAP 2-dimensional representations of data
shuffle = False
def shuffle_cols(input_df):
# randomly permute genes of each sample in the rnaseq matrix
shuf_df = input_df.apply(lambda x:
np.random.permutation(x.tolist()),
axis=1)
# set up new dataframe
shuf_df = pd.DataFrame(shuf_df, columns=['col_list'])
shuf_df = pd.DataFrame(shuf_df.col_list.values.tolist(),
columns=input_df.columns,
index=input_df.index)
return shuf_df
# get samples that are present in all 3 datasets (expression, methylation, mutations)
ix_overlap = y_df.index.intersection(rnaseq_df.index).intersection(methylation_df.index)
y_mut_df = y_df.loc[ix_overlap, :]
rnaseq_mut_df = rnaseq_df.loc[ix_overlap, :]
me_mut_df = methylation_df.loc[ix_overlap, :]
if shuffle:
rnaseq_mut_df = shuffle_cols(rnaseq_mut_df)
me_mut_df = shuffle_cols(me_mut_df)
reducer = UMAP(n_components=2, random_state=42)
X_proj_rnaseq = reducer.fit_transform(rnaseq_mut_df)
X_proj_me = reducer.fit_transform(me_mut_df)
print(X_proj_rnaseq.shape)
print(X_proj_me.shape)
# In[9]:
gene_label = '{} mutant'.format(gene)
me_proj_df = pd.DataFrame({
'UMAP1': X_proj_me[:, 0],
'UMAP2': X_proj_me[:, 1],
'Cancer type': y_mut_df.DISEASE.values,
gene_label: y_mut_df.status.values.astype('bool')
})
rnaseq_proj_df = pd.DataFrame({
'UMAP1': X_proj_rnaseq[:, 0],
'UMAP2': X_proj_rnaseq[:, 1],
'Cancer type': y_mut_df.DISEASE.values,
gene_label: y_mut_df.status.values.astype('bool')
})
me_proj_df.head()
# In[10]:
sns.set({'figure.figsize': (20, 8)})
fig, axarr = plt.subplots(1, 2)
sns.scatterplot(x='UMAP1', y='UMAP2', data=me_proj_df, hue=gene_label,
style='Cancer type', ax=axarr[0])
axarr[0].set_xlabel('UMAP dimension 1')
axarr[0].set_ylabel('UMAP dimension 2')
axarr[0].set_title('UMAP projection of TCGA methylation data, colored by mutation status')
axarr[0].legend()
sns.scatterplot(x='UMAP1', y='UMAP2', data=rnaseq_proj_df, hue=gene_label,
style='Cancer type', ax=axarr[1])
axarr[1].set_xlabel('UMAP dimension 1')
axarr[1].set_ylabel('UMAP dimension 2')
axarr[1].set_title('UMAP projection of TCGA gene expression data, colored by mutation status')
axarr[1].legend()
# As expected, we can see that there's a nice separation between (most) IDH1 mutants and non-mutants in the methylation data. They separate to some degree in the gene expression data, but not quite as clearly.
#
# It's likely (although I haven't checked this yet) that the non-mutated samples in the IDH1-mutant methylation cluster are actually IDH2 mutants. IDH2 is thought to phenocopy IDH1 in gliomas, having a similar effect on methylation and gene expression as IDH1 when mutated.
| StarcoderdataPython |
8038618 | training_percentage = 0.8
import random
stop_words_dict = {'during': 0, 'has': 0, "it's": 0, 'very': 0, 'itself': 0, "why's": 0, "we'll": 0, 'hers': 0,
"isn't": 0, 'off': 0, 'we': 0, 'it': 0, 'the': 0, 'doing': 0, 'over': 0, 'its': 0, 'with': 0,
'so': 0, 'but': 0, 'they': 0, 'am': 0, 'until': 0, 'because': 0, "shouldn't": 0, "you're": 0,
'is': 0, "they're": 0, "you'd": 0, "mustn't": 0, 'would': 0, 'while': 0, 'should': 0, 'as': 0,
"i'd": 0, "we've": 0, 'when': 0, "wouldn't": 0, 'why': 0, "i'll": 0, 'theirs': 0, "aren't": 0,
'our': 0, 'from': 0, "we'd": 0, 'each': 0, 'only': 0, 'yourself': 0, 'been': 0, 'again': 0, 'of': 0,
'whom': 0, 'themselves': 0, 'or': 0, 'that': 0, 'me': 0, "how's": 0, 'those': 0, 'having': 0,
'was': 0, 'and': 0, 'few': 0, 'no': 0, 'any': 0, 'being': 0, 'an': 0, "let's": 0, "they'd": 0,
'own': 0, 'his': 0, 'herself': 0, 'before': 0, 'did': 0, 'too': 0, 'here': 0, 'were': 0, "that's": 0,
"what's": 0, "she'll": 0, 'i': 0, 'all': 0, 'have': 0, "weren't": 0, "you've": 0, "i'm": 0,
"he'd": 0, 'some': 0, 'into': 0, 'down': 0, 'this': 0, "she'd": 0, "i've": 0, 'do': 0, "can't": 0,
'for': 0, 'below': 0, 'through': 0, "don't": 0, 'more': 0, 'once': 0, "didn't": 0, 'same': 0,
"she's": 0, "they've": 0, "he'll": 0, 'not': 0, 'had': 0, 'such': 0, 'cannot': 0, 'about': 0,
'myself': 0, 'if': 0, "won't": 0, 'a': 0, 'how': 0, 'she': 0, 'you': 0, "we're": 0, "there's": 0,
'be': 0, 'yours': 0, "here's": 0, 'above': 0, 'at': 0, 'out': 0, 'does': 0, 'my': 0, 'to': 0,
'ought': 0, "hadn't": 0, "doesn't": 0, "couldn't": 0, 'he': 0, 'your': 0, 'ours': 0, 'up': 0,
'after': 0, "where's": 0, 'could': 0, 'under': 0, 'nor': 0, 'against': 0, 'further': 0, "they'll": 0,
'what': 0, 'then': 0, "you'll": 0, 'ourselves': 0, 'which': 0, 'between': 0, "shan't": 0, 'these': 0,
'in': 0, 'their': 0, "who's": 0, "he's": 0, 'yourselves': 0, 'himself': 0, 'both': 0, "wasn't": 0,
'him': 0, 'on': 0, 'them': 0, "when's": 0, 'there': 0, 'where': 0, 'than': 0, 'are': 0, 'her': 0,
"hasn't": 0, 'by': 0, 'other': 0, 'who': 0, "haven't": 0, 'most': 0}
def tokenize(sentence):
sentence = ' '.join(sentence)
sentence = sentence.lower()
sentence = sentence.split(' ')
return_list = []
for each_word in sentence:
if each_word not in stop_words_dict:
return_list.append(each_word.strip('\n'))
return return_list
f = open('train-text.txt', 'r')
l = open('train-labels.txt', 'r')
i = 0
keys = {}
words = []
for each_line in f:
temp = each_line.split(' ')
keys[temp[0]] = i
i += 1
words.append(tokenize(list(temp[1:])))
class1 = [0] * len(words)
class2 = [0] * len(words)
for each_line in l:
temp = each_line.split(' ')
class1[keys[temp[0]]] = temp[1].strip('\n')
class2[keys[temp[0]]] = temp[2].strip('\n')
import collections
probs = collections.defaultdict(dict)
vocab = []
for each_sentence in words:
for each_word in each_sentence:
vocab.append(each_word)
vocab = set(vocab)
total_set = range(len(words))
training_set = random.sample(total_set,int(0.8*len(total_set)))
testing_set = list(set(total_set) - set(training_set))
print(training_set)
print(testing_set)
train_words = 0
train_class1 = 0
train_class2 = 0
test_words = 0
test_class1 = 0
test_class2 = 0
| StarcoderdataPython |
1758251 | # encoding: utf-8
# =========================================================================
# ©2017-2018 北京国美云服科技有限公司
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from gyun.cli.misc.utils import explode_array
from gyun.cli.iaas_client.actions.base import BaseAction
class DescribeRouterStaticEntriesAction(BaseAction):
action = 'DescribeRouterStaticEntries'
command = 'describe-router-static-entries'
usage = '%(prog)s [-s "router_static_entry_id, ..."] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-e', '--router_static_entries', dest='router_static_entries',
action='store', type=str, default='',
help='the comma separated IDs of router_static_entries you want to list. ')
parser.add_argument('-s', '--router_static', dest='router_static',
action='store', type=str, default='',
help='filter by router static. ')
@classmethod
def build_directive(cls, options):
directive = {
'router_static_entries': explode_array(options.router_static_entries),
'router_static': options.router_static,
'offset':options.offset,
'limit': options.limit,
}
return directive
| StarcoderdataPython |
4894644 | <filename>tests/bugs/core_0063_test.py<gh_stars>0
#coding:utf-8
#
# id: bugs.core_0063
# title: Sequence of commands crash FB server
# decription:
# tracker_id: CORE-0063
# min_versions: ['2.5.0']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(charset='WIN1252', sql_dialect=3, init=init_script_1)
test_script_1 = """
set bail on;
create domain d_descricao_30000_nn as varchar(30000) not null collate win_ptbr;
create table movimento( complemento d_descricao_30000_nn );
insert into movimento values ('');
insert into movimento values ('1234567890');
insert into movimento values ('');
create domain d_text_blob as blob sub_type text collate win_ptbr;
alter table movimento add complemento2 d_text_blob;
update movimento set complemento2 = complemento;
alter table movimento drop complemento, add complemento d_text_blob;
drop domain d_descricao_30000_nn;
update movimento set complemento = complemento2;
set list on;
select 'OK' as result from rdb$database;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
RESULT OK
"""
@pytest.mark.version('>=2.5')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| StarcoderdataPython |
131003 | a = int(input("Enter: "))
reserve = a
temp = 0
rev=0
while a>0:
temp = a%10
a = a//10
rev = rev*10 + temp
if reserve == rev:
print("Palindrome")
else:
print("not") | StarcoderdataPython |
1901847 | """
Pascal VOC database
This class loads ground truth notations from standard Pascal VOC XML data formats
and transform them into IMDB format. Selective search is used for proposals, see roidb
function. Results are written as the Pascal VOC format. Evaluation is based on mAP
criterion.
"""
from __future__ import print_function
import cPickle
import cv2
import os
import numpy as np
from imdb import IMDB
from imagenet_eval import imagenet_eval, imagenet_eval_detailed, draw_ap, draw_map
from ds_utils import unique_boxes, filter_small_boxes
imagenet_classes = np.array(['__background__',\
'n02672831', 'n02691156', 'n02219486', 'n02419796', 'n07739125', 'n02454379',\
'n07718747', 'n02764044', 'n02766320', 'n02769748', 'n07693725', 'n02777292',\
'n07753592', 'n02786058', 'n02787622', 'n02799071', 'n02802426', 'n02807133',\
'n02815834', 'n02131653', 'n02206856', 'n07720875', 'n02828884', 'n02834778',\
'n02840245', 'n01503061', 'n02870880', 'n02879718', 'n02883205', 'n02880940',\
'n02892767', 'n07880968', 'n02924116', 'n02274259', 'n02437136', 'n02951585',
'n02958343', 'n02970849', 'n02402425', 'n02992211', 'n01784675', 'n03000684',\
'n03001627', 'n03017168', 'n03062245', 'n03063338', 'n03085013', 'n03793489',\
'n03109150', 'n03128519', 'n03134739', 'n03141823', 'n07718472', 'n03797390',\
'n03188531', 'n03196217', 'n03207941', 'n02084071', 'n02121808', 'n02268443',\
'n03249569', 'n03255030', 'n03271574', 'n02503517', 'n03314780', 'n07753113',\
'n03337140', 'n03991062', 'n03372029', 'n02118333', 'n03394916', 'n01639765',\
'n03400231', 'n02510455', 'n01443537', 'n03445777', 'n03445924', 'n07583066',\
'n03467517', 'n03483316', 'n03476991', 'n07697100', 'n03481172', 'n02342885',\
'n03494278', 'n03495258', 'n03124170', 'n07714571', 'n03513137', 'n02398521',\
'n03535780', 'n02374451', 'n07697537', 'n03584254', 'n01990800', 'n01910747',\
'n01882714', 'n03633091', 'n02165456', 'n03636649', 'n03642806', 'n07749582',\
'n02129165', 'n03676483', 'n01674464', 'n01982650', 'n03710721', 'n03720891',\
'n03759954', 'n03761084', 'n03764736', 'n03770439', 'n02484322', 'n03790512',\
'n07734744', 'n03804744', 'n03814639', 'n03838899', 'n07747607', 'n02444819',\
'n03908618', 'n03908714', 'n03916031', 'n00007846', 'n03928116', 'n07753275',\
'n03942813', 'n03950228', 'n07873807', 'n03958227', 'n03961711', 'n07768694',\
'n07615774', 'n02346627', 'n03995372', 'n07695742', 'n04004767', 'n04019541',\
'n04023962', 'n04026417', 'n02324045', 'n04039381', 'n01495701', 'n02509815',\
'n04070727', 'n04074963', 'n04116512', 'n04118538', 'n04118776', 'n04131690',\
'n04141076', 'n01770393', 'n04154565', 'n02076196', 'n02411705', 'n04228054',\
'n02445715', 'n01944390', 'n01726692', 'n04252077', 'n04252225', 'n04254120',\
'n04254680', 'n04256520', 'n04270147', 'n02355227', 'n02317335', 'n04317175',\
'n04330267', 'n04332243', 'n07745940', 'n04336792', 'n04356056', 'n04371430',\
'n02395003', 'n04376876', 'n04379243', 'n04392985', 'n04409515', 'n01776313',\
'n04591157', 'n02129604', 'n04442312', 'n06874185', 'n04468005', 'n04487394',\
'n03110669', 'n01662784', 'n03211117', 'n04509417', 'n04517823', 'n04536866',\
'n04540053', 'n04542943', 'n04554684', 'n04557648', 'n04530566', 'n02062744',\
'n04591713', 'n02391049'])
imagenet_cls_names = np.array(['__background__',\
'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', 'artichoke',\
'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', 'banana', 'band_aid',\
'banjo', 'baseball', 'basketball', 'bathing_cap', 'beaker', 'bear', 'bee',\
'bell_pepper', 'bench', 'bicycle', 'binder', 'bird', 'bookshelf', 'bow_tie',\
'bow', 'bowl', 'brassiere', 'burrito', 'bus', 'butterfly', 'camel', 'can_opener',\
'car', 'cart', 'cattle', 'cello', 'centipede', 'chain_saw', 'chair', 'chime',\
'cocktail_shaker', 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew',\
'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper', 'digital_clock',\
'dishwasher', 'dog', 'domestic_cat', 'dragonfly', 'drum', 'dumbbell', 'electric_fan',\
'elephant', 'face_powder', 'fig', 'filing_cabinet', 'flower_pot', 'flute', 'fox',\
'french_horn', 'frog', 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart',\
'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger', 'hammer', 'hamster',\
'harmonica', 'harp', 'hat_with_a_wide_brim', 'head_cabbage', 'helmet', 'hippopotamus',\
'horizontal_bar', 'horse', 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle',\
'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard', 'lobster', 'maillot',\
'maraca', 'microphone', 'microwave', 'milk_can', 'miniskirt', 'monkey', 'motorcycle',\
'mushroom', 'nail', 'neck_brace', 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener',\
'perfume', 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza', 'plastic_bag',\
'plate_rack', 'pomegranate', 'popsicle', 'porcupine', 'power_drill', 'pretzel', 'printer', 'puck',\
'punching_bag', 'purse', 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator', 'remote_control',\
'rubber_eraser', 'rugby_ball', 'ruler', 'salt_or_pepper_shaker', 'saxophone', 'scorpion',\
'screwdriver', 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile', 'snowplow',\
'soap_dispenser', 'soccer_ball', 'sofa', 'spatula', 'squirrel', 'starfish', 'stethoscope',\
'stove', 'strainer', 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine',\
'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie', 'tiger', 'toaster',\
'traffic_light', 'train', 'trombone', 'trumpet', 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum',\
'violin', 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft', 'whale', 'wine_bottle',\
'zebra'])
class imagenet(IMDB):
def __init__(self, image_set, root_path, devkit_path, result_path=None, mask_size=-1, binary_thresh=None):
"""
fill basic information to initialize imdb
:param image_set: 2007_trainval, 2007_test, etc
:param root_path: 'selective_search_data' and 'cache'
:param devkit_path: data and results
:return: imdb object
"""
# year, image_set = image_set.split('_')
super(imagenet, self).__init__('imagenet_', image_set, root_path, devkit_path) # set self.name
# self.year = year
# print (devkit_path)
# print ("devkit")
self.root_path = root_path
self.devkit_path = devkit_path
self.data_path = os.path.join(devkit_path, 'DET')
self.classes = imagenet_classes
self.num_classes = len(self.classes)
self.image_set_index = self.load_image_set_index()
self.num_images = len(self.image_set_index)
print('num_images', self.num_images)
self.mask_size = mask_size
self.binary_thresh = binary_thresh
self.config = {'comp_id': 'comp4',
'use_diff': False,
'min_size': 2}
def load_image_set_index(self):
"""
find out which indexes correspond to given image set (train or val)
:return:
"""
image_set_index_file = os.path.join(self.data_path, 'ImageSets', 'DET', self.image_set + '.txt')
assert os.path.exists(image_set_index_file), 'Path does not exist: {}'.format(image_set_index_file)
with open(image_set_index_file) as f:
image_set_index = [x.strip().split(' ')[0] for x in f.readlines()]
return image_set_index
def image_path_from_index(self, index):
"""
given image index, find out full path
:param index: index of a specific image
:return: full path of this image
"""
image_file = os.path.join(self.data_path,'Data','DET', self.image_set, index + '.JPEG')
assert os.path.exists(image_file), 'Path does not exist: {}'.format(image_file)
return image_file
def gt_roidb(self):
"""
return ground truth image regions database
:return: imdb[image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
for gt in roidb:
if gt['boxes'].shape[0]==0:
print(gt['image'])
return roidb
gt_roidb = [self.load_imagenet_annotation(index) for index in self.image_set_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def load_imagenet_annotation(self, index):
"""
for a given index, load image and bounding boxes info from XML file
:param index: index of a specific image
:return: record['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
import xml.etree.ElementTree as ET
roi_rec = dict()
roi_rec['image'] = self.image_path_from_index(index)
size = cv2.imread(roi_rec['image']).shape
roi_rec['height'] = size[0]
roi_rec['width'] = size[1]
filename = os.path.join(self.data_path, 'Annotations','DET',self.image_set, index + '.xml')
# print (filename)
tree = ET.parse(filename)
#print(tree)
objs = tree.findall('object')
# if not self.config['use_diff']:
# non_diff_objs = [obj for obj in objs if int(obj.find('difficult').text) == 0]
# objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
class_to_index = dict(zip(self.classes, range(self.num_classes)))
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text)
y1 = float(bbox.find('ymin').text)
x2 = float(bbox.find('xmax').text)
if x2 == size[1]:
print ("label xmax reach the image width")
x2 = x2 - 1
y2 = float(bbox.find('ymax').text)
if y2 == size[0]:
print ("label ymax reach the image height")
y2 = y2 - 1
cls = class_to_index[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
roi_rec.update({'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'max_classes': overlaps.argmax(axis=1),
'max_overlaps': overlaps.max(axis=1),
'flipped': False})
return roi_rec
def load_selective_search_roidb(self, gt_roidb):
"""
turn selective search proposals into selective search roidb
:param gt_roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
:return: roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
import scipy.io
matfile = os.path.join(self.root_path, 'selective_search_data', self.name + '.mat')
assert os.path.exists(matfile), 'selective search data does not exist: {}'.format(matfile)
raw_data = scipy.io.loadmat(matfile)['boxes'].ravel() # original was dict ['images', 'boxes']
box_list = []
for i in range(raw_data.shape[0]):
boxes = raw_data[i][:, (1, 0, 3, 2)] - 1 # pascal voc dataset starts from 1.
keep = unique_boxes(boxes)
boxes = boxes[keep, :]
keep = filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
box_list.append(boxes)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def selective_search_roidb(self, gt_roidb, append_gt=False):
"""
get selective search roidb and ground truth roidb
:param gt_roidb: ground truth roidb
:param append_gt: append ground truth
:return: roidb of selective search
"""
cache_file = os.path.join(self.cache_path, self.name + '_ss_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print('{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb
if append_gt:
print('appending ground truth annotations')
ss_roidb = self.load_selective_search_roidb(gt_roidb)
roidb = IMDB.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self.load_selective_search_roidb(gt_roidb)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print('wrote ss roidb to {}'.format(cache_file))
return roidb
def evaluate_detections(self, detections, detailed=False):
"""
top level evaluations
:param detections: result matrix, [bbox, confidence]
:return: None
"""
# make all these folders for results
result_dir = os.path.join(self.devkit_path, 'results')
if not os.path.exists(result_dir):
os.mkdir(result_dir)
year_folder = os.path.join(self.devkit_path, 'results', 'ImageNet')
if not os.path.exists(year_folder):
os.mkdir(year_folder)
res_file_folder = os.path.join(self.devkit_path, 'results', 'ImageNet' , 'Main')
if not os.path.exists(res_file_folder):
os.mkdir(res_file_folder)
self.write_pascal_results(detections)
self.do_python_eval()
if detailed:
self.do_python_eval_detailed()
def boxvoting(self, detections_list):
all_boxes = [[[] for _ in xrange(self.num_images)]
for _ in xrange(self.num_classes)]
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
for im_ind, index in enumerate(self.image_set_index):
dets = []
#for i in range(detections_list.shape[0]):
# dets.append() =
#if len(dets) == 0:
#continue
# the VOCdevkit expects 1-based indices
#for k in range(dets.shape[0]):
# f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
# format(index, dets[k, -1],
# dets[k, 0] + 1, dets[k, 1] + 1, dets[k, 2] + 1, dets[k, 3] + 1))
def evaluate_detections_merge(self, detections_list):
"""
top level evaluations
:param detections: result matrix, [bbox, confidence]
:return: None
"""
if detections_list.shape[0] <=1:
detections = detections_list
else:
detections = self.boxvoting(detections_list)
# make all these folders for results
result_dir = os.path.join(self.devkit_path, 'results')
if not os.path.exists(result_dir):
os.mkdir(result_dir)
year_folder = os.path.join(self.devkit_path, 'results', 'ImageNet')
if not os.path.exists(year_folder):
os.mkdir(year_folder)
res_file_folder = os.path.join(self.devkit_path, 'results', 'ImageNet' , 'Main')
if not os.path.exists(res_file_folder):
os.mkdir(res_file_folder)
self.write_pascal_results(detections)
self.do_python_eval()
def get_result_file_template(self):
"""
this is a template
VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
:return: a string template
"""
res_file_folder = os.path.join(self.devkit_path, 'results', 'ImageNet', 'Main')
#comp_id = self.config['comp_id']
#filename = comp_id + '_det_' + self.image_set + '_{:s}.txt'
filename = '_det_' + self.image_set + '_{:s}.txt'
path = os.path.join(res_file_folder, filename)
return path
def write_pascal_results(self, all_boxes):
"""
write results files in pascal devkit path
:param all_boxes: boxes to be processed [bbox, confidence]
:return: None
"""
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print('Writing {} VOC results file'.format(cls))
filename = self.get_result_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_set_index):
dets = all_boxes[cls_ind][im_ind]
if len(dets) == 0:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1, dets[k, 2] + 1, dets[k, 3] + 1))
def do_python_eval(self):
"""
python evaluation wrapper
:return: None
"""
annopath = os.path.join(self.data_path, 'Annotations',"DET",self.image_set, '{0!s}.xml')
imageset_file = os.path.join(self.data_path, 'ImageSets', 'DET', self.image_set + '.txt')
annocache = os.path.join(self.cache_path, self.name + '_annotations.pkl')
aps = []
ars = []
nobs = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True # if int(self.year) < 2010 else False
print('VOC07 metric? ' + ('Y' if use_07_metric else 'No'))
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
filename = self.get_result_file_template().format(cls)
rec, prec, ap, ar, npos = imagenet_eval(filename, annopath, imageset_file, cls, annocache,
ovthresh=0.5, use_07_metric=use_07_metric)
aps += [ap]
ars += [ar]
nobs += [npos]
print('AP for {} = {:.4f}'.format(cls, ap))
print('Mean AP = {:.4f}'.format(np.mean(aps)))
#self.ap = aps
draw_ap(aps, ars, nobs, imagenet_cls_names[1:], range_name='all', tag='map={:.4f}'.format(np.mean(aps)))
def save_ap(self,path = "saveap.txt"):
aps=[]
with open(path,"w") as f:
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
filename = self.get_result_file_template().format(cls)
rec, prec, ap = imagenet_eval(filename, self.annopath, self.imageset_file, cls, self.annocache,
ovthresh=0.5, use_07_metric=True)
aps += [ap]
f.write('AP for {} = {:.4f}'.format(cls, ap))
f.write('Mean AP = {:.4f}'.format(np.mean(aps)))
def do_python_eval_detailed(self):
"""
python evaluation wrapper
:return: None
"""
annopath = os.path.join(self.data_path, 'Annotations',"DET",self.image_set, '{0!s}.xml')
imageset_file = os.path.join(self.data_path, 'ImageSets', 'DET', self.image_set + '.txt')
annocache = os.path.join(self.cache_path, self.name + '_annotations.pkl')
# The PASCAL VOC metric changed in 2010
use_07_metric = True # if int(self.year) < 2010 else False
print('VOC07 metric? ' + ('Y' if use_07_metric else 'No'))
log_aspect_ratio_names = ['<-3', '-3~-1.5', '-1.5~-0.5', '-0.5~0.5', '0.5~1.5', '1.5~3', '>3']
log_aspect_ratio_ranges = [[-1e5, -3], [-3, -1.5], [-1.5, -0.5], [-0.5, 0.5],
[0.5, 1.5], [1.5, 3], [3, 1e5]]
log_area_names = ['<13', '13~15', '15~17', '17~19', '>19']
log_area_ranges = [[0, 13], [13, 15], [15, 17], [17, 19], [19, 1e5]]
# log_aspect_ratio_ranges, log_aspect_ratio_names = self.get_ranges(start = -3, end = 3, step = 0.2)
# log_area_ranges, log_area_names = self.get_ranges(start = 8, end = 19, step = 0.2)
log_area_map = []
nobs_in_range = []
for range_id, log_area_range in enumerate(log_area_ranges):
aps = []
ars = []
nobs = []
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
filename = self.get_result_file_template().format(cls)
rec, prec, ap, ar, npos = imagenet_eval_detailed(filename, annopath, imageset_file, cls, annocache,
ovthresh=0.5, use_07_metric=use_07_metric, tag='area',
log_area_range=log_area_range)
aps += [ap]
ars += [ar]
nobs += [npos]
print('AP for {} = {:.4f} in log area range: [{},{}]'
.format(imagenet_cls_names[cls_ind], ap, log_area_range[0], log_area_range[1]))
draw_ap(aps, ars, nobs, imagenet_cls_names[1:], log_area_names[range_id], tag='log_area')
nobs_in_range += [np.sum(nobs)]
# map = np.sum(np.array(aps) * np.array(nobs)) / np.maximum(np.sum(nobs), np.finfo(np.float64).eps)
map = np.mean(aps)
print('Mean AP = {:.4f} in log area range: [{},{}]'
.format(map, log_area_range[0], log_area_range[1]))
log_area_map += [map]
draw_map(log_area_map, log_area_names, nobs_in_range, tag='log_area')
print('map for area all:{}, num of gt:{}'.format(log_area_map, nobs_in_range))
log_aspect_ratio_map = []
nobs_in_range = []
for range_id, log_aspect_ratio_range in enumerate(log_aspect_ratio_ranges):
aps = []
ars = []
nobs = []
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
filename = self.get_result_file_template().format(cls)
rec, prec, ap, ar, npos = imagenet_eval_detailed(filename, annopath, imageset_file, cls, annocache,
ovthresh=0.5, use_07_metric=use_07_metric,
tag='aspect ratio',
log_aspect_ratio_range=log_aspect_ratio_range)
aps += [ap]
ars += [ar]
nobs += [npos]
print('AP for {} = {:.4f} in log aspect ratio range: [{},{}]'
.format(imagenet_cls_names[cls_ind], ap, log_aspect_ratio_range[0], log_aspect_ratio_range[1]))
draw_ap(aps, ars, nobs, imagenet_cls_names[1:], log_aspect_ratio_names[range_id], tag='log_aspect_ratio')
nobs_in_range += [np.sum(nobs)]
print('nobs in this range:{},sum:{}'.format(nobs, np.sum(nobs)))
# map = np.sum(np.array(aps) * np.array(nobs)) / np.maximum(np.sum(nobs), np.finfo(np.float64).eps)
map = np.mean(aps)
print('Mean AP = {:.4f} in log aspect ratio range: [{},{}]'
.format(map, log_aspect_ratio_range[0], log_aspect_ratio_range[1]))
log_aspect_ratio_map += [map]
draw_map(log_aspect_ratio_map, log_aspect_ratio_names, nobs_in_range, tag='log_aspect_ratio')
print('map for ratio all:{}, num of gt:{}'.format(log_aspect_ratio_map,nobs_in_range))
def get_ranges(self, start, end, step):
v = np.arange(start, end, step)
v = np.insert(v, 0, -1e5)
v = np.append(v, 1e5)
ranges = []
range_names = []
for idx in range(len(v) - 1):
range_start = v[idx]
range_end = v[idx + 1]
# if start/end is very close to zero, set it to zero
if range_start > -1e-10 and range_start < 1e-10:
range_start = 0
if range_end > -1e-10 and range_end < 1e-10:
range_end = 0
ranges.append([range_start, range_end])
# set names of first and last range
if idx == 0:
name = '<' + str(range_end)
elif idx == len(v) - 2:
name = '>' + str(range_start)
else:
name = str(range_start) + '~' + str(range_end)
range_names.append(name)
print(range_names)
print(ranges)
return ranges, range_names
| StarcoderdataPython |
5141293 | from __future__ import print_function
from __future__ import absolute_import
from past.builtins import basestring
import numpy as np
import os
from .ascii import read_columns
from . import moby_fits
class StructDB(np.ndarray):
"""
Mini-database for storage of simple records. For example,
detector properties (that you want to look up by det_uid) or TOD
properties (that you want to look up by TOD name).
In addition to the normal interface of a numpy structured array,
some database-like functions are supported, for finding single or
multiple records based on selected keys.
"""
def __new__(cls, shape, dtype=float, buffer=None, offset=0,
strides=None, order=None):
self = super(StructDB, cls).__new__(cls, shape, dtype, buffer,
offset, strides, order)
self.formats = {}
return self
def __array_finalize__(self, obj):
if isinstance(obj, StructDB):
self.formats = {}
if hasattr(obj, 'formats'):
for k in list(obj.formats.keys()):
if k in self.dtype.name:
self.formats[k] = obj.formats[k]
def copy(self, order='C'):
out = super(StructDB, self).copy(order=order)
out.formats = self.formats.copy()
return out
@classmethod
def from_data(cls, data, formats={}):
"""
Construct a StructDB array from numpy arrays. data can be one of:
* A dictionary of key -> value associations
* A list of tuples [(key1,value1), (key2,value2), ...]
"""
if isinstance(data, dict):
data = list(data.items())
dtype = [(d[0],np.asarray(d[1]).dtype) for d in data]
self = cls(shape=len(data[0][1]), dtype=dtype)
for d in data:
self[d[0]][:] = d[1]
if len(d) > 2:
self.formats[d[0]] = d[2]
self.formats.update(formats)
return self
def get_row(self, i):
return dict([(k,self[k][i]) for k in self.dtype.names])
def index(self, props={}, fail=True):
l_props = dict([(k,[v]) for k,v in list(props.items())])
idx = self.select_inner(l_props)
if idx[0] >= 0:
return idx[0]
if fail:
raise ValueError("Could not match specification %s" % props)
return -1
"""
Database-like operations.
"""
def select_outer(self, props={}, mask=False, fields_out=None):
"""
Select a set of records based on outer product of the
(key,values) pairs in the props dict. "props" is a dictionary
where each key is a field property and each value is a list of
desired values for that property. For example, if props is:
{'row': [1,2,3], 'col': [2,4]}
Then the function returns the (probably) 6 detectors that
have 'row' property in [1,2,3] and 'col' property in [2,4].
If a value is given as "None", it is ignored (i.e. no attempt
is made to match that key).
Returns the matching indices, or a boolean mask of size
len(self) if mask==True.
"""
# Create a mask to track the matches
selection = np.ones(len(self), 'bool')
# Reduce the mask with each property
for k, values in list(props.items()):
if values is None:
continue
if k in self.dtype.names:
selection *= [v in values for v in self[k]]
elif k == 'det_uid':
selection[values] *= True
else:
print("Property %s not defined for array." % k)
selection[:] = False
break
if fields_out is None:
if mask:
return selection
return selection.nonzero()[0]
if isinstance(fields_out, basestring):
return self[fields_out][selection]
else:
return StructDB.from_data([(f, self[f][selection]) for f in fields_out])
def select_inner(self, props, mask=False):
"""
Select a set of records based on inner product of the
(key,values) pairs in the props dict. "props" is a dictionary
where each key is a property and each value is a list of
desired values for that property. The value lists must all be
the same length. For example, if props is:
{'row': [1,2,3], 'col': [2,4,5]}
Then the function returns the index of 3 detectors with
(row,col) in [(1,2),(2,4),(3,5)].
If a value is given as "None", it is ignored (i.e. no attempt
is made to match that key).
When mask==False (the default), the array that is returned
gives the det_uid of the detectors matching the requested
properties, in the order that they were matched, with -1
dropped in when a match could not be made.
When mask==True, a boolean mask is returned that identifies
the position of successful matches; the ordering is lost, and
match failures are ignored.
"""
# Get tuples for items we are trying to find
keys = [k for k,v in list(props.items()) if v is not None]
props = list(zip(*[props[k] for k in props]))
# Init matched indices to -1.
matched = np.zeros(len(props), int)-1
for k in keys:
if not k in self.dtype.names:
print("Property %s not defined for array." % k)
return matched
# Find each.
for ip, p in enumerate(props):
s = np.ones(len(self), 'bool')
for k, v in zip(keys, p):
s *= self[k] == v
i = s.nonzero()[0]
if len(i) > 0:
matched[ip] = i
if mask:
mask = np.zeros(self.ndets, 'bool')
mask[matched[matched>=0]] = True
return mask
return matched
@classmethod
def from_column_file(cls, filename, field_map, dtypes={},
comments='#', skip=0):
"""
Load data from columnated ascii file. field_map a map from
field name to column number (0-indexed), which can be a dict
or a list of tuples.
dtypes, comments, and skip are all passed to ascii.read_columns.
E.g.:
det_data = StructDB.from_column_file(
'clusters.txt', [('id', 0),('RA', 1), ('Dec', 2)])
"""
t_dtypes = {}
for k,v in list(dtypes.items()):
t_dtypes[field_map[k]] = v
# Get column indices
if isinstance(field_map, dict):
field_map = list(field_map.items())
names, t_columns = list(zip(*field_map))
# Load data arrays
data = read_columns(filename, t_columns, skip=skip, dtypes=t_dtypes,
comments=comments)
# That's it
return cls.from_data(list(zip(names, data)))
def to_column_file(self, filename, fields=None, formats={},
header=True):
if isinstance(filename, basestring):
fout = open(filename, 'w')
else: # must be file-like, then...
fout = filename
if fields is None:
fields = self.dtype.names
formats_list = []
for k in fields:
formats_list.append(formats.get(k, self.formats.get(k)))
if formats_list[-1] is None:
formats_list[-1] = moby_fits.get_fmtcode(self[k])
data_refs = [self[k] for k in fields]
#fmt_str = ' '.join([all_formats[k] for k in fields]) + '\n'
#for i in xrange(len(self)):
# fout.write(fmt_str % tuple([self[k][i] for k in fields]))
#del fout
# Write data
for row in range(len(data_refs[0])):
tokens = [fmt % d[row] for fmt,d in zip(formats_list, data_refs)]
if header:
# Pad, when possible, to keep header aligned with data
header, didx = '#', 0
for f, t in zip(fields, tokens):
didx += len(t)
header += ' ' * max(1,didx-len(header)-len(f)) + f
didx += 1
fout.write(header + '\n')
# Mark header as written
header = False
fout.write(' '.join(tokens) + '\n')
del fout
@classmethod
def from_fits_table(cls, filename, index=1):
ftr = moby_fits.MobyFitsTableReader(filename, index)
self = cls.from_data(ftr.get_data())
self.formats = ftr.get_formats()
return self
def to_fits_table(self, filename=None, fields=None, formats=None,
fits_formats=None, clobber=True):
"""
Write the data to a FITS file, including printf formatting
information for easy ascii dumping.
'fields' is a list of fields to write; this will default to
self.fields. 'fits_formats' and 'formats' also default to
self.<that>, but if these formats are missing they will be
guessed based on the data.
This function returns the resulting fits BinTableHDU, so
pass filename=None if the HDU is what you really want.
"""
ftw = moby_fits.MobyFitsTableWriter(self, self.formats)
hdu = ftw.get_hdu()
if not filename is None:
ftw.write(filename, clobber=clobber)
return hdu
@classmethod
def from_hdf(cls, filename=None, dataset=None, group=None):
import h5py
if isinstance(filename, basestring):
hfile = h5py.File(filename, 'r')
else:
hfile = filename
assert (group is None or dataset is None)
if group is not None:
if isinstance(group, basestring):
group = hfile[group]
node = group
items = list(group.items())
try:
dt = [(str(k),v.dtype) for (k,v) in items]
except:
raise RuntimeError(
'Could not handle %s:%s; perhaps choose a dataset from: %s' %
(hfile.filename, dataset.name, [v.name for k,v in items]))
self = cls(len(items[0][1]), dt)
for k,v in items:
self[str(k)] = v
else:
if dataset is None:
dataset = '/'
if isinstance(dataset, basestring):
dataset = hfile[dataset]
self = cls(dataset.shape, dataset.dtype)
self[:] = dataset.value
node = dataset
self.formats = dict(node.attrs.get('_formats', []))
if isinstance(filename, basestring):
hfile.close()
return self
def to_hdf(self, filename, dataset=None, group=None, clobber=False,
compression=None):
"""
Write the StructDB array to the indicated HDF5 file. If a
dataset is named, the array is written as a single, structured
dataset with that name. If group is specified, the StructDB
is written as multiple, simple datasets, with specified group
name. We hereby declare that the former approach should be
taken whenever convenient.
"""
import h5py
if isinstance(filename, basestring):
hfile = h5py.File(filename, 'a')
else:
hfile = filename
assert(int(group is None) + int(dataset is None) == 1)
dest = dataset
if dataset is None:
dest = group
if dest in hfile:
if clobber:
del hfile[dest]
else:
raise RuntimeError("Location %s exists in HDF file. Pass "
"clobber=True to overwrite." % dest)
if dataset is not None:
node = hfile.create_dataset(dataset, data=self, compression=compression)
if group is not None:
node = hfile.create_group(group)
for n in self.dtype.names:
node.create_dataset(n, data=self[n], compression=compression)
if len(self.formats):
node.attrs['_formats'] = list(self.formats.items())
if isinstance(filename, basestring):
hfile.close()
| StarcoderdataPython |
8095940 | <gh_stars>0
arr.sort()
print(arr[-1])
print(len(arr)-1)
| StarcoderdataPython |
12835901 | """
# ****************************************************************************
#
# GOVERNMENT PURPOSE RIGHTS
#
# Contract Number: FA8750-15-2-0270 (Prime: William Marsh Rice University)
# Contractor Name: GrammaTech, Inc. (Right Holder - subaward R18683)
# Contractor Address: 531 Esty Street, Ithaca, NY 14850
# Expiration Date: 22 September 2023
#
# The Government's rights to use, modify, reproduce, release, perform,
# display, or disclose this software are restricted by DFARS 252.227-7014
# Rights in Noncommercial Computer Software and Noncommercial Computer Software
# Documentation clause contained in the above identified contract.
# No restrictions apply after the expiration date shown above.
# Any reproduction of the software or portions thereof marked with this legend
# must also reproduce the markings and any copyright.
#
# ****************************************************************************
# ****************************************************************************
#
# (c) 2014-2018 GrammaTech, Inc. All rights reserved.
#
# ****************************************************************************
"""
from __future__ import print_function
import argparse
import json
from salento.aggregators.base import Aggregator
class RawProbAggregator(Aggregator):
"""
This is based on the simple sequence aggregator, here for each call
the probability is retrieved. The schema of the output is below
{
"title" : "Schema File for representation of the probability values",
"type" : "object",
"properties" : {
"type" : "object",
"description" : "Each unit",
"properties" : {
"type" : "object",
"description" : "Each Sequence",
"properties" : {
"type" : "object",
"description" : "Each Call",
"properties" : {
"type" : "number",
"description" : "raw probability values"
}
}
}
}
}
"""
def __init__(self, data_file, model_dir):
Aggregator.__init__(self, data_file, model_dir)
def run(self):
"""
invoke the RNN to get the probability
return combined call and state probability values
"""
result_data = {}
# iterate over units
for k, package in enumerate(self.packages()):
result_data[str(k)] = {}
spec = self.get_latent_specification(package)
# iterate over sequence
for j, sequence in enumerate(self.sequences(package)):
events = self.events(sequence)
seq_calls = "--".join(x['call'] for x in events)
event_key = <KEY> + seq_calls
event_data = {}
# iterate over calls
for i, event in enumerate(events):
call_key = (str(i) + '--' + event['call'])
call_prob = float(self.distribution_next_call(
spec, events[:i+1], call=self.call(event)))
# next state probability
dist = self.distribution_next_state(spec, events[:i+1], None)
# use the probability summation rule on conditional
# probability to get a unified probability value
# Pr(Call, States) = Pr(State0| Call)Pr(Call) +
# Pr(State1| Call)Pr(Call) +
# Pr(State2| Call)Pr(Call)
prob_value = 0
# get the individual states
for key, value in dist.items():
if '#' in key:
prob_value += call_prob*value
event_data[call_key] = prob_value
result_data[str(k)][event_key] = event_data
return result_data
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_file', type=str, required=True,
help='input data file')
parser.add_argument('--model_dir', type=str, required=True,
help='directory to load model from')
parser.add_argument('--result_file', type=str, default=None,
help='write out result in json file')
clargs = parser.parse_args()
with RawProbAggregator(clargs.data_file, clargs.model_dir) as aggregator:
result = aggregator.run()
if clargs.result_file:
with open(clargs.result_file, 'w') as fwrite:
json.dump(result, fwrite)
else:
print(json.dumps(result))
| StarcoderdataPython |
98453 | <gh_stars>1-10
class TrieNode:
def __init__(self):
self.children = {}
self.is_word = False
self.word = str
class Trie:
def __init__(self):
self.root = TrieNode()
def add_word(self, word):
node = self.root
for c in word:
if c not in node.children:
node.children[c] = TrieNode()
node = node.children[c]
node.is_word = True
node.word = word
class Solution:
"""
@param words: a set of stirngs
@param target: a target string
@param k: An integer
@return: output all the strings that meet the requirements
"""
def kDistance(self, words, target, k):
trie = Trie()
for word in words:
trie.add_word(word)
n = len(target)
dp = [i for i in range(n + 1)]
result = []
self.find(trie.root, target, k, dp, result)
return result
def find(self, node, target, k, dp, result):
n = len(target)
if node.is_word:
print(node.word, dp[n])
if node.is_word and dp[n] <= k:
result.append(node.word)
next = [0 for i in range(n + 1)]
for c in node.children:
next[0] = dp[0] + 1
for i in range(1, n + 1):
if target[i - 1] == c:
# print(target[i - 1], c)
next[i] = min(dp[i - 1], min(dp[i] + 1, next[i - 1] + 1))
else:
next[i] = min(dp[i - 1] + 1, dp[i] + 1, next[i - 1] + 1)
self.find(node.children[c], target, k, next, result) | StarcoderdataPython |
1622323 | from tests.system.action.base import BaseActionTestCase
class PersonalNoteDeleteActionTest(BaseActionTestCase):
def test_delete_correct(self) -> None:
self.set_models(
{
"meeting/111": {"personal_note_ids": [1]},
"user/1": {
"personal_note_$111_ids": [1],
"personal_note_$_ids": ["111"],
},
"personal_note/1": {
"star": True,
"note": "blablabla",
"user_id": 1,
"meeting_id": 111,
},
}
)
response = self.request("personal_note.delete", {"id": 1})
self.assert_status_code(response, 200)
self.assert_model_deleted("personal_note/1")
user = self.get_model("user/1")
assert user.get("personal_note_$111_ids") == []
assert user.get("personal_note_$_ids") == []
def test_delete_wrong_user_id(self) -> None:
self.set_models(
{
"meeting/111": {"personal_note_ids": [1]},
"user/2": {
"personal_note_$111_ids": [1],
"personal_note_$_ids": ["111"],
},
"personal_note/1": {
"star": True,
"note": "blablabla",
"user_id": 2,
"meeting_id": 111,
},
}
)
response = self.request("personal_note.delete", {"id": 1})
self.assert_status_code(response, 400)
self.assertIn(
"Cannot delete not owned personal note.", response.json["message"]
)
self.assert_model_exists("personal_note/1")
| StarcoderdataPython |
9724387 | <filename>scidb/core/low/__init__.py
from .metadata import MetadataFileType, ObservableDict, NodeDict, Metadata, Properties
from .node import Root, Node
| StarcoderdataPython |
12815044 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Fixtures for tests"""
# Import modules
import pytest
import numpy as np
# Import from package
from pyswarms.backend.swarms import Swarm
@pytest.fixture
def swarm():
"""A contrived instance of the Swarm class at a certain timestep"""
attrs_at_t = {
'position' : np.array([[5,5,5], [3,3,3], [1,1,1]]),
'velocity' : np.array([[1,1,1], [1,1,1], [1,1,1]]),
'current_cost' : np.array([2,2,2]),
'pbest_cost' : np.array([1,2,3]),
'pbest_pos' : np.array([[1,2,3], [4,5,6], [7,8,9]]),
'best_cost' : 1,
'best_pos' : np.array([1,1,1]),
'options' : {'c1' : 0.5, 'c2': 1, 'w': 2}
}
return Swarm(**attrs_at_t)
| StarcoderdataPython |
1929190 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Dict, Iterable, List, Mapping, Optional, SupportsAbs, Union
from airflow.compat.functools import cached_property
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.hooks.dbapi import DbApiHook
from airflow.models import BaseOperator, SkipMixin
def parse_boolean(val: str) -> Union[str, bool]:
"""Try to parse a string into boolean.
Raises ValueError if the input is not a valid true- or false-like string value.
"""
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return True
if val in ('n', 'no', 'f', 'false', 'off', '0'):
return False
raise ValueError(f"{val!r} is not a boolean-like string value")
class BaseSQLOperator(BaseOperator):
"""
This is a base class for generic SQL Operator to get a DB Hook
The provided method is .get_db_hook(). The default behavior will try to
retrieve the DB hook based on connection type.
You can custom the behavior by overriding the .get_db_hook() method.
"""
def __init__(
self,
*,
conn_id: Optional[str] = None,
database: Optional[str] = None,
hook_params: Optional[Dict] = None,
**kwargs,
):
super().__init__(**kwargs)
self.conn_id = conn_id
self.database = database
self.hook_params = {} if hook_params is None else hook_params
@cached_property
def _hook(self):
"""Get DB Hook based on connection type"""
self.log.debug("Get connection for %s", self.conn_id)
conn = BaseHook.get_connection(self.conn_id)
hook = conn.get_hook(hook_params=self.hook_params)
if not isinstance(hook, DbApiHook):
raise AirflowException(
f'The connection type is not supported by {self.__class__.__name__}. '
f'The associated hook should be a subclass of `DbApiHook`. Got {hook.__class__.__name__}'
)
if self.database:
hook.schema = self.database
return hook
def get_db_hook(self) -> DbApiHook:
"""
Get the database hook for the connection.
:return: the database hook object.
:rtype: DbApiHook
"""
return self._hook
class SQLCheckOperator(BaseSQLOperator):
"""
Performs checks against a db. The ``SQLCheckOperator`` expects
a sql query that will return a single row. Each value on that
first row is evaluated using python ``bool`` casting. If any of the
values return ``False`` the check is failed and errors out.
Note that Python bool casting evals the following as ``False``:
* ``False``
* ``0``
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count ``== 0``. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as
the source table upstream, or that the count of today's partition is
greater than yesterday's partition, or that a set of metrics are less
than 3 standard deviation for the 7 day average.
This operator can be used as a data quality check in your pipeline, and
depending on where you put it in your DAG, you have the choice to
stop the critical path, preventing from
publishing dubious data, or on the side and receive email alerts
without stopping the progress of the DAG.
:param sql: the sql to be executed. (templated)
:type sql: str
:param conn_id: the connection ID used to connect to the database.
:type conn_id: str
:param database: name of database which overwrite the defined one in connection
:type database: str
"""
template_fields: Iterable[str] = ("sql",)
template_ext: Iterable[str] = (
".hql",
".sql",
)
ui_color = "#fff7e6"
def __init__(
self, *, sql: str, conn_id: Optional[str] = None, database: Optional[str] = None, **kwargs
) -> None:
super().__init__(conn_id=conn_id, database=database, **kwargs)
self.sql = sql
def execute(self, context=None):
self.log.info("Executing SQL check: %s", self.sql)
records = self.get_db_hook().get_first(self.sql)
self.log.info("Record: %s", records)
if not records:
raise AirflowException("The query returned None")
elif not all(bool(r) for r in records):
raise AirflowException(f"Test failed.\nQuery:\n{self.sql}\nResults:\n{records!s}")
self.log.info("Success.")
def _convert_to_float_if_possible(s):
"""
A small helper function to convert a string to a numeric value
if appropriate
:param s: the string to be converted
:type s: str
"""
try:
ret = float(s)
except (ValueError, TypeError):
ret = s
return ret
class SQLValueCheckOperator(BaseSQLOperator):
"""
Performs a simple value check using sql code.
:param sql: the sql to be executed. (templated)
:type sql: str
:param conn_id: the connection ID used to connect to the database.
:type conn_id: str
:param database: name of database which overwrite the defined one in connection
:type database: str
"""
__mapper_args__ = {"polymorphic_identity": "SQLValueCheckOperator"}
template_fields = (
"sql",
"pass_value",
) # type: Iterable[str]
template_ext = (
".hql",
".sql",
) # type: Iterable[str]
ui_color = "#fff7e6"
def __init__(
self,
*,
sql: str,
pass_value: Any,
tolerance: Any = None,
conn_id: Optional[str] = None,
database: Optional[str] = None,
**kwargs,
):
super().__init__(conn_id=conn_id, database=database, **kwargs)
self.sql = sql
self.pass_value = str(pass_value)
tol = _convert_to_float_if_possible(tolerance)
self.tol = tol if isinstance(tol, float) else None
self.has_tolerance = self.tol is not None
def execute(self, context=None):
self.log.info("Executing SQL check: %s", self.sql)
records = self.get_db_hook().get_first(self.sql)
if not records:
raise AirflowException("The query returned None")
pass_value_conv = _convert_to_float_if_possible(self.pass_value)
is_numeric_value_check = isinstance(pass_value_conv, float)
tolerance_pct_str = str(self.tol * 100) + "%" if self.has_tolerance else None
error_msg = (
"Test failed.\nPass value:{pass_value_conv}\n"
"Tolerance:{tolerance_pct_str}\n"
"Query:\n{sql}\nResults:\n{records!s}"
).format(
pass_value_conv=pass_value_conv,
tolerance_pct_str=tolerance_pct_str,
sql=self.sql,
records=records,
)
if not is_numeric_value_check:
tests = self._get_string_matches(records, pass_value_conv)
elif is_numeric_value_check:
try:
numeric_records = self._to_float(records)
except (ValueError, TypeError):
raise AirflowException(f"Converting a result to float failed.\n{error_msg}")
tests = self._get_numeric_matches(numeric_records, pass_value_conv)
else:
tests = []
if not all(tests):
raise AirflowException(error_msg)
def _to_float(self, records):
return [float(record) for record in records]
def _get_string_matches(self, records, pass_value_conv):
return [str(record) == pass_value_conv for record in records]
def _get_numeric_matches(self, numeric_records, numeric_pass_value_conv):
if self.has_tolerance:
return [
numeric_pass_value_conv * (1 - self.tol) <= record <= numeric_pass_value_conv * (1 + self.tol)
for record in numeric_records
]
return [record == numeric_pass_value_conv for record in numeric_records]
class SQLIntervalCheckOperator(BaseSQLOperator):
"""
Checks that the values of metrics given as SQL expressions are within
a certain tolerance of the ones from days_back before.
:param table: the table name
:type table: str
:param conn_id: the connection ID used to connect to the database.
:type conn_id: str
:param database: name of database which will overwrite the defined one in connection
:type database: Optional[str]
:param days_back: number of days between ds and the ds we want to check
against. Defaults to 7 days
:type days_back: Optional[int]
:param date_filter_column: The column name for the dates to filter on. Defaults to 'ds'
:type date_filter_column: Optional[str]
:param ratio_formula: which formula to use to compute the ratio between
the two metrics. Assuming cur is the metric of today and ref is
the metric to today - days_back.
max_over_min: computes max(cur, ref) / min(cur, ref)
relative_diff: computes abs(cur-ref) / ref
Default: 'max_over_min'
:type ratio_formula: str
:param ignore_zero: whether we should ignore zero metrics
:type ignore_zero: bool
:param metrics_thresholds: a dictionary of ratios indexed by metrics
:type metrics_thresholds: dict
"""
__mapper_args__ = {"polymorphic_identity": "SQLIntervalCheckOperator"}
template_fields: Iterable[str] = ("sql1", "sql2")
template_ext: Iterable[str] = (
".hql",
".sql",
)
template_fields_renderers = {"sql1": "sql", "sql2": "sql"}
ui_color = "#fff7e6"
ratio_formulas = {
"max_over_min": lambda cur, ref: float(max(cur, ref)) / min(cur, ref),
"relative_diff": lambda cur, ref: float(abs(cur - ref)) / ref,
}
def __init__(
self,
*,
table: str,
metrics_thresholds: Dict[str, int],
date_filter_column: Optional[str] = "ds",
days_back: SupportsAbs[int] = -7,
ratio_formula: Optional[str] = "max_over_min",
ignore_zero: bool = True,
conn_id: Optional[str] = None,
database: Optional[str] = None,
**kwargs,
):
super().__init__(conn_id=conn_id, database=database, **kwargs)
if ratio_formula not in self.ratio_formulas:
msg_template = "Invalid diff_method: {diff_method}. Supported diff methods are: {diff_methods}"
raise AirflowException(
msg_template.format(diff_method=ratio_formula, diff_methods=self.ratio_formulas)
)
self.ratio_formula = ratio_formula
self.ignore_zero = ignore_zero
self.table = table
self.metrics_thresholds = metrics_thresholds
self.metrics_sorted = sorted(metrics_thresholds.keys())
self.date_filter_column = date_filter_column
self.days_back = -abs(days_back)
sqlexp = ", ".join(self.metrics_sorted)
sqlt = f"SELECT {sqlexp} FROM {table} WHERE {date_filter_column}="
self.sql1 = sqlt + "'{{ ds }}'"
self.sql2 = sqlt + "'{{ macros.ds_add(ds, " + str(self.days_back) + ") }}'"
def execute(self, context=None):
hook = self.get_db_hook()
self.log.info("Using ratio formula: %s", self.ratio_formula)
self.log.info("Executing SQL check: %s", self.sql2)
row2 = hook.get_first(self.sql2)
self.log.info("Executing SQL check: %s", self.sql1)
row1 = hook.get_first(self.sql1)
if not row2:
raise AirflowException(f"The query {self.sql2} returned None")
if not row1:
raise AirflowException(f"The query {self.sql1} returned None")
current = dict(zip(self.metrics_sorted, row1))
reference = dict(zip(self.metrics_sorted, row2))
ratios = {}
test_results = {}
for metric in self.metrics_sorted:
cur = current[metric]
ref = reference[metric]
threshold = self.metrics_thresholds[metric]
if cur == 0 or ref == 0:
ratios[metric] = None
test_results[metric] = self.ignore_zero
else:
ratios[metric] = self.ratio_formulas[self.ratio_formula](current[metric], reference[metric])
test_results[metric] = ratios[metric] < threshold
self.log.info(
(
"Current metric for %s: %s\n"
"Past metric for %s: %s\n"
"Ratio for %s: %s\n"
"Threshold: %s\n"
),
metric,
cur,
metric,
ref,
metric,
ratios[metric],
threshold,
)
if not all(test_results.values()):
failed_tests = [it[0] for it in test_results.items() if not it[1]]
self.log.warning(
"The following %s tests out of %s failed:",
len(failed_tests),
len(self.metrics_sorted),
)
for k in failed_tests:
self.log.warning(
"'%s' check failed. %s is above %s",
k,
ratios[k],
self.metrics_thresholds[k],
)
raise AirflowException(f"The following tests have failed:\n {', '.join(sorted(failed_tests))}")
self.log.info("All tests have passed")
class SQLThresholdCheckOperator(BaseSQLOperator):
"""
Performs a value check using sql code against a minimum threshold
and a maximum threshold. Thresholds can be in the form of a numeric
value OR a sql statement that results a numeric.
:param sql: the sql to be executed. (templated)
:type sql: str
:param conn_id: the connection ID used to connect to the database.
:type conn_id: str
:param database: name of database which overwrite the defined one in connection
:type database: str
:param min_threshold: numerical value or min threshold sql to be executed (templated)
:type min_threshold: numeric or str
:param max_threshold: numerical value or max threshold sql to be executed (templated)
:type max_threshold: numeric or str
"""
template_fields = ("sql", "min_threshold", "max_threshold")
template_ext = (
".hql",
".sql",
) # type: Iterable[str]
def __init__(
self,
*,
sql: str,
min_threshold: Any,
max_threshold: Any,
conn_id: Optional[str] = None,
database: Optional[str] = None,
**kwargs,
):
super().__init__(conn_id=conn_id, database=database, **kwargs)
self.sql = sql
self.min_threshold = _convert_to_float_if_possible(min_threshold)
self.max_threshold = _convert_to_float_if_possible(max_threshold)
def execute(self, context=None):
hook = self.get_db_hook()
result = hook.get_first(self.sql)[0]
if isinstance(self.min_threshold, float):
lower_bound = self.min_threshold
else:
lower_bound = hook.get_first(self.min_threshold)[0]
if isinstance(self.max_threshold, float):
upper_bound = self.max_threshold
else:
upper_bound = hook.get_first(self.max_threshold)[0]
meta_data = {
"result": result,
"task_id": self.task_id,
"min_threshold": lower_bound,
"max_threshold": upper_bound,
"within_threshold": lower_bound <= result <= upper_bound,
}
self.push(meta_data)
if not meta_data["within_threshold"]:
error_msg = (
f'Threshold Check: "{meta_data.get("task_id")}" failed.\n'
f'DAG: {self.dag_id}\nTask_id: {meta_data.get("task_id")}\n'
f'Check description: {meta_data.get("description")}\n'
f"SQL: {self.sql}\n"
f'Result: {round(meta_data.get("result"), 2)} is not within thresholds '
f'{meta_data.get("min_threshold")} and {meta_data.get("max_threshold")}'
)
raise AirflowException(error_msg)
self.log.info("Test %s Successful.", self.task_id)
def push(self, meta_data):
"""
Optional: Send data check info and metadata to an external database.
Default functionality will log metadata.
"""
info = "\n".join(f"""{key}: {item}""" for key, item in meta_data.items())
self.log.info("Log from %s:\n%s", self.dag_id, info)
class BranchSQLOperator(BaseSQLOperator, SkipMixin):
"""
Allows a DAG to "branch" or follow a specified path based on the results of a SQL query.
:param sql: The SQL code to be executed, should return true or false (templated)
:type sql: Can receive a str representing a sql statement or reference to a template file.
Template reference are recognized by str ending in '.sql'.
Expected SQL query to return Boolean (True/False), integer (0 = False, Otherwise = 1)
or string (true/y/yes/1/on/false/n/no/0/off).
:param follow_task_ids_if_true: task id or task ids to follow if query returns true
:type follow_task_ids_if_true: str or list
:param follow_task_ids_if_false: task id or task ids to follow if query returns false
:type follow_task_ids_if_false: str or list
:param conn_id: the connection ID used to connect to the database.
:type conn_id: str
:param database: name of database which overwrite the defined one in connection
:type database: str
:param parameters: (optional) the parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
template_fields = ("sql",)
template_ext = (".sql",)
ui_color = "#a22034"
ui_fgcolor = "#F7F7F7"
def __init__(
self,
*,
sql: str,
follow_task_ids_if_true: List[str],
follow_task_ids_if_false: List[str],
conn_id: str = "default_conn_id",
database: Optional[str] = None,
parameters: Optional[Union[Mapping, Iterable]] = None,
**kwargs,
) -> None:
super().__init__(conn_id=conn_id, database=database, **kwargs)
self.sql = sql
self.parameters = parameters
self.follow_task_ids_if_true = follow_task_ids_if_true
self.follow_task_ids_if_false = follow_task_ids_if_false
def execute(self, context: Dict):
self.log.info(
"Executing: %s (with parameters %s) with connection: %s",
self.sql,
self.parameters,
self.conn_id,
)
record = self.get_db_hook().get_first(self.sql, self.parameters)
if not record:
raise AirflowException(
"No rows returned from sql query. Operator expected True or False return value."
)
if isinstance(record, list):
if isinstance(record[0], list):
query_result = record[0][0]
else:
query_result = record[0]
elif isinstance(record, tuple):
query_result = record[0]
else:
query_result = record
self.log.info("Query returns %s, type '%s'", query_result, type(query_result))
follow_branch = None
try:
if isinstance(query_result, bool):
if query_result:
follow_branch = self.follow_task_ids_if_true
elif isinstance(query_result, str):
# return result is not Boolean, try to convert from String to Boolean
if parse_boolean(query_result):
follow_branch = self.follow_task_ids_if_true
elif isinstance(query_result, int):
if bool(query_result):
follow_branch = self.follow_task_ids_if_true
else:
raise AirflowException(
f"Unexpected query return result '{query_result}' type '{type(query_result)}'"
)
if follow_branch is None:
follow_branch = self.follow_task_ids_if_false
except ValueError:
raise AirflowException(
f"Unexpected query return result '{query_result}' type '{type(query_result)}'"
)
self.skip_all_except(context["ti"], follow_branch)
| StarcoderdataPython |
9620729 | """
A suite of test to make sure the converted ResNet model is correct.
Annoying fact: GPU outputs pretty non-deterministic values everytime (due to
parallel implementation and floating point precision issue.)
"""
import sys
# sys.path.insert(0, '/pkgs/tensorflow-gpu-0.9.0')
sys.path.insert(0, '..')
import tensorflow as tf
import os
import numpy as np
import cv2
from synset import *
import tfplus
def checkpoint_fn(layers):
return 'ResNet-L%d.ckpt' % layers
def meta_fn(layers):
return 'ResNet-L%d.meta' % layers
def load_old_model(sess, nlayers, device='/cpu:0'):
with tf.device(device):
new_saver = tf.train.import_meta_graph(meta_fn(nlayers))
new_saver.restore(sess, checkpoint_fn(nlayers))
graph = tf.get_default_graph()
prob_tensor = graph.get_tensor_by_name("prob:0")
images = graph.get_tensor_by_name("images:0")
return graph, images, prob_tensor
def load_new_model(sess, restore_path, nlayers, device='/cpu:0'):
from resnet_imagenet_model import ResNetImageNetModel
with tf.device(device):
logger = tfplus.utils.logger.get()
with logger.verbose_level(2):
resnet = ResNetImageNetModel().set_all_options({
'inp_depth': 3,
'layers': get_layers(nlayers),
'strides': [1, 2, 2, 2],
'channels': [64, 256, 512, 1024, 2048],
'bottleneck': True,
'shortcut': 'projection',
'compatible': True,
'weight_decay': 1e-4,
'subtract_mean': True,
'trainable': False
})
inp_var = resnet.build_input()
out_var = resnet.build(inp_var)
out_var2 = resnet.build(inp_var)
saver = tf.train.Saver(resnet.get_save_var_dict())
saver.restore(sess, restore_path)
return resnet, inp_var, out_var, out_var2
def load_wrapper_model(sess, restore_path, nlayers, device='/cpu:0'):
from resnet_imagenet_model_wrapper import ResNetImageNetModelWrapper
with tf.device(device):
logger = tfplus.utils.logger.get()
with logger.verbose_level(2):
resnet = ResNetImageNetModelWrapper().set_all_options({
'inp_depth': 3,
'layers': get_layers(nlayers),
'strides': [1, 2, 2, 2],
'channels': [64, 256, 512, 1024, 2048],
'bottleneck': True,
'shortcut': 'projection',
'compatible': True,
'wd': 1e-4,
'subtract_mean': True,
'trainable': False
})
inp_var = resnet.build_input()
out_var = resnet.build(inp_var)
saver = tf.train.Saver(resnet.res_net.get_save_var_dict())
saver.restore(sess, restore_path)
return resnet.res_net, inp_var, out_var['y_out']
def get_layers(nlayer):
if nlayer == 50:
return [3, 4, 6, 3]
elif nlayer == 101:
return [3, 4, 23, 3]
elif nlayer == 152:
return [3, 8, 36, 3]
def build_convert_dict(graph, nlayers):
"""
---------------------------------------------------------------------------
Look up table
---------------------------------------------------------------------------
Tensorflow-ResNet My code
---------------------------------------------------------------------------
s1/weights conv1/w
s1/gamma bn1/gamma
s1/beta bn1/beta
s1/moving_mean bn1/ema_mean
s1/moving_variance bn1/ema_var
---------------------------------------------------------------------------
s{n}/b1/shortcut/weights stage_{n-2}/shortcut/w
s{n}/b1/shortcut/beta stage_{n-2}/shortcut/bn/beta
s{n}/b1/shortcut/gamma stage_{n-2}/shortcut/bn/gamma
s{n}/b1/moving_mean stage_{n-2}/shortcut/bn/ema_mean
s{n}/b1/moving_variance stage_{n-2}/shortcut/bn/ema_var
---------------------------------------------------------------------------
s{n}/b{m}/{a,b,c}/weights stage_{n-2}/layer_{m-1}/unit_{k}/w
s{n}/b{m}/{a,b,c}/beta stage_{n-2}/layer_{m-1}/unit_{k}/bn/beta
s{n}/b{m}/{a,b,c}/gamma stage_{n-2}/layer_{m-1}/unit_{k}/bn/gamma
s{n}/b{m}/moving_mean stage_{n-2}/layer_{m-1}/unit_{k}/bn/ema_mean
s{n}/b{m}/moving_variance stage_{n-2}/layer_{m-1}/unit_{k}/bn/ema_var
---------------------------------------------------------------------------
fc/weights fc/w
fc/biases fc/b
---------------------------------------------------------------------------
"""
vd = {}
vd['conv1/w'] = graph.get_tensor_by_name('scale1/weights:0')
vd['bn1/gamma'] = graph.get_tensor_by_name('scale1/gamma:0')
vd['bn1/beta'] = graph.get_tensor_by_name('scale1/beta:0')
vd['bn1/ema_mean'] = graph.get_tensor_by_name('scale1/moving_mean:0')
vd['bn1/ema_var'] = graph.get_tensor_by_name('scale1/moving_variance:0')
layers_list = get_layers(nlayers)
for ss in xrange(2, 6):
vd['res_net/stage_{}/shortcut/w'.format(ss - 2)] = \
graph.get_tensor_by_name(
'scale{}/block1/shortcut/weights:0'.format(ss))
vd['res_net/stage_{}/shortcut/bn/beta'.format(ss - 2)] = \
graph.get_tensor_by_name(
'scale{}/block1/shortcut/beta:0'.format(ss))
vd['res_net/stage_{}/shortcut/bn/gamma'.format(ss - 2)] = \
graph.get_tensor_by_name(
'scale{}/block1/shortcut/gamma:0'.format(ss))
vd['res_net/stage_{}/shortcut/bn/ema_mean'.format(ss - 2)] = \
graph.get_tensor_by_name(
'scale{}/block1/shortcut/moving_mean:0'.format(ss))
vd['res_net/stage_{}/shortcut/bn/ema_var'.format(ss - 2)] = \
graph.get_tensor_by_name(
'scale{}/block1/shortcut/moving_variance:0'.format(ss))
for ll in xrange(layers_list[ss - 2]):
for kk, k in enumerate(['a', 'b', 'c']):
vd['res_net/stage_{}/layer_{}/unit_{}/w'.format(
ss - 2, ll, kk)] = \
graph.get_tensor_by_name(
'scale{}/block{}/{}/weights:0'.format(ss, ll + 1, k))
vd['res_net/stage_{}/layer_{}/unit_{}/bn/beta'.format(
ss - 2, ll, kk)] = \
graph.get_tensor_by_name(
'scale{}/block{}/{}/beta:0'.format(ss, ll + 1, k))
vd['res_net/stage_{}/layer_{}/unit_{}/bn/gamma'.format(
ss - 2, ll, kk)] = \
graph.get_tensor_by_name(
'scale{}/block{}/{}/gamma:0'.format(ss, ll + 1, k))
vd['res_net/stage_{}/layer_{}/unit_{}/bn/ema_mean'.format(
ss - 2, ll, kk)] = \
graph.get_tensor_by_name(
'scale{}/block{}/{}/moving_mean:0'.format(ss, ll + 1, k))
vd['res_net/stage_{}/layer_{}/unit_{}/bn/ema_var'.format(
ss - 2, ll, kk)] = \
graph.get_tensor_by_name(
'scale{}/block{}/{}/moving_variance:0'.format(ss, ll + 1, k))
vd['fc/w'] = graph.get_tensor_by_name('fc/weights:0')
vd['fc/b'] = graph.get_tensor_by_name('fc/biases:0')
return vd
def save_convert_dict(sess, fname, vd):
vl = []
for k in sorted(vd.keys()):
vl.append(vd[k])
rr = sess.run(vl)
# for kk, k in enumerate(sorted(vd.keys())):
# print k, rr[kk].shape
tf.train.Saver(vd).save(sess, fname)
pass
def load_image(fname):
image = cv2.imread(fname).astype('float32') / 255
short_edge = min(image.shape[:2])
yy = int((image.shape[0] - short_edge) / 2)
xx = int((image.shape[1] - short_edge) / 2)
image = image[yy:yy + short_edge, xx:xx + short_edge]
image = cv2.resize(image, (224, 224))
image = image[:, :, [2, 1, 0]]
return image
def test_hidden_old(sess, inp_var, out_var, img):
batch = img.reshape((1, 224, 224, 3))
feed_dict = {inp_var: batch}
out_val = sess.run(out_var, feed_dict=feed_dict)
print out_val
pass
def test_hidden_new(sess, inp_var, out_var, img):
batch = img.reshape((1, 224, 224, 3))
feed_dict = {inp_var['x']: batch, inp_var['phase_train']: False}
out_val = sess.run(out_var, feed_dict=feed_dict)
print out_val
pass
def print_prob(prob):
# print prob
pred = np.argsort(prob)[::-1]
# Get top1 label
top1 = synset[pred[0]]
# Get top5 label
top5 = [synset[pred[i]] for i in range(5)]
for ii, tt in enumerate(top5):
print '{:d} {:45s} {:.8f}'.format(ii, tt, prob[pred[ii]])
return top1
def get_cpu_list():
return ['ResizeBilinear', 'ResizeBilinearGrad', 'Mod', 'CumMin',
'CumMinGrad', 'Hungarian', 'Reverse', 'SparseToDense',
'BatchMatMul', 'Gather', 'Print', 'InTopK', 'TopKV2', 'Print']
def get_device_fn(device):
"""Choose device for different ops."""
OPS_ON_CPU = set(get_cpu_list())
def _device_fn(op):
# Startswith "Save" is a hack...
if op.type in OPS_ON_CPU or op.name.startswith('save'):
return '/cpu:0'
else:
# Other ops will be placed on GPU if available, otherwise CPU.
return device
return _device_fn
def main():
NLAYERS = 152
# SAVE_FOLDER = '/ais/gobi4/mren/data'
SAVE_FOLDER = '/dev/shm/models/res152'
WRITE_GRAPH = False
GRAPH_DIR1 = '/u/mren/logs'
GRAPH_DIR2 = '/u/mren/logs2'
# DEVICE = '/gpu:0'
DEVICE = '/cpu:0'
image_file = 'cat.jpg'
image_data = load_image(image_file).reshape([1, 224, 224, 3])
weights_file = os.path.join(SAVE_FOLDER, 'resnet_imagenet.ckpt-0'.format(NLAYERS))
# Load old model
output_old = {}
old_vars = set()
with tf.Graph().as_default():
with tf.Session() as sess:
graph, inp_var, out_var = load_old_model(
sess, NLAYERS,
device=get_device_fn(DEVICE))
# Write graph.
if WRITE_GRAPH:
summary_writer = tf.train.SummaryWriter(
GRAPH_DIR2, graph_def=sess.graph_def)
summary_writer.close()
for vv in tf.all_variables():
old_vars.add(vv.name)
feed_dict = {inp_var: image_data}
output_old['w1'] = sess.run(
graph.get_tensor_by_name('scale1/weights:0'))
output_old['bn1_beta'] = sess.run(
graph.get_tensor_by_name('scale1/beta:0'))
output_old['bn1_gamma'] = sess.run(
graph.get_tensor_by_name('scale1/gamma:0'))
output_old['bn1_mean'] = sess.run(
graph.get_tensor_by_name('scale1/moving_mean:0'))
output_old['bn1_var'] = sess.run(
graph.get_tensor_by_name('scale1/moving_variance:0'))
# Test output.
print '-----------------------------------------------------------'
print 'Old model pass 1'
print_prob(sess.run(out_var, feed_dict=feed_dict)[0])
print '-----------------------------------------------------------'
# Test specific activation.
output_old['sub'] = sess.run(
graph.get_tensor_by_name('sub:0'), feed_dict=feed_dict)
output_old['conv1'] = sess.run(graph.get_tensor_by_name(
'scale1/Conv2D:0'), feed_dict=feed_dict)
output_old['bn1'] = sess.run(graph.get_tensor_by_name(
'scale1/batchnorm/add_1:0'), feed_dict=feed_dict)
output_old['act2'] = sess.run(graph.get_tensor_by_name(
'scale2/block3/Relu:0'), feed_dict=feed_dict)
# output_old['shortcut3'] = sess.run(graph.get_tensor_by_name(
# 'scale3/block1/shortcut/batchnorm/add_1:0'), feed_dict=feed_dict)
for ll in xrange(8):
output_old['act3_{}'.format(ll)] = sess.run(graph.get_tensor_by_name(
'scale3/block{}/Relu:0'.format(ll + 1)), feed_dict=feed_dict)
output_old['act4'] = sess.run(graph.get_tensor_by_name(
'scale4/block36/Relu:0'), feed_dict=feed_dict)
output_old['act5'] = sess.run(graph.get_tensor_by_name(
'scale5/block3/Relu:0'), feed_dict=feed_dict)
# Check that there is no remain.
vd = build_convert_dict(graph, NLAYERS)
for vv in vd.itervalues():
if vv.name in old_vars:
old_vars.remove(vv.name)
for vv in list(old_vars):
print 'Remaining', vv
# save_convert_dict(sess, weights_file, vd)
print '-----------------------------------------------------------'
print 'Old model pass 2'
print_prob(sess.run(out_var, feed_dict=feed_dict)[0])
print '-----------------------------------------------------------'
# Load new model
output_new = {}
with tf.Graph().as_default():
with tf.Session() as sess:
model, inp_var, out_var, out_var2 = load_new_model(
sess, weights_file, NLAYERS,
device=get_device_fn(DEVICE))
# Write graph.
if WRITE_GRAPH:
summary_writer = tf.train.SummaryWriter(
GRAPH_DIR2, graph_def=sess.graph_def)
summary_writer.close()
# Input is BGR.
feed_dict = {inp_var['x']: image_data[:, :, :, [2, 1, 0]],
inp_var['phase_train']: False}
# Test output.
print '-----------------------------------------------------------'
print 'New model pass 1'
print_prob(sess.run(out_var, feed_dict=feed_dict)[0])
print '-----------------------------------------------------------'
output_new['w1'] = sess.run(model.conv1.w)
output_new['bn1_beta'] = sess.run(model.bn1.beta)
output_new['bn1_gamma'] = sess.run(model.bn1.gamma)
output_new['bn1_mean'] = sess.run(
model.bn1.get_save_var_dict()['ema_mean'])
output_new['bn1_var'] = sess.run(
model.bn1.get_save_var_dict()['ema_var'])
# Test specific activation.
output_new['sub'] = sess.run(
model.get_var('x_sub'), feed_dict=feed_dict)
output_new['conv1'] = sess.run(
model.get_var('h_conv1'), feed_dict=feed_dict)
output_new['bn1'] = sess.run(
model.get_var('h_bn1'), feed_dict=feed_dict)
output_new['act2'] = sess.run(model.res_net.get_var(
'stage_0/layer_2/relu'), feed_dict=feed_dict)
# output_new['shortcut3'] = sess.run(
# model.res_net.get_var('stage_1/shortcut'), feed_dict=feed_dict)
for ll in xrange(8):
output_new['act3_{}'.format(ll)] = sess.run(
model.res_net.get_var('stage_1/layer_{}/relu'.format(ll)),
feed_dict=feed_dict)
output_new['act4'] = sess.run(model.res_net.get_var(
'stage_2/layer_35/relu'), feed_dict=feed_dict)
output_new['act5'] = sess.run(model.res_net.get_var(
'stage_3/layer_2/relu'), feed_dict=feed_dict)
print '-----------------------------------------------------------'
print 'New model pass 2'
print_prob(sess.run(out_var2, feed_dict=feed_dict)[0])
print '-----------------------------------------------------------'
output_wrapper = {}
with tf.Graph().as_default():
with tf.Session() as sess:
model, inp_var, out_var = load_wrapper_model(
sess, weights_file, NLAYERS,
device=get_device_fn(DEVICE))
# Write graph.
if WRITE_GRAPH:
summary_writer = tf.train.SummaryWriter(
GRAPH_DIR2, graph_def=sess.graph_def)
summary_writer.close()
# Input is BGR.
feed_dict = {inp_var['x']: image_data[:, :, :, [2, 1, 0]],
inp_var['phase_train']: False}
# Test output.
print '-----------------------------------------------------------'
print 'Wrapper model pass 1'
print_prob(sess.run(out_var, feed_dict=feed_dict)[0])
print '-----------------------------------------------------------'
output_wrapper['w1'] = sess.run(model.conv1.w)
output_wrapper['bn1_beta'] = sess.run(model.bn1.beta)
output_wrapper['bn1_gamma'] = sess.run(model.bn1.gamma)
output_wrapper['bn1_mean'] = sess.run(
model.bn1.get_save_var_dict()['ema_mean'])
output_wrapper['bn1_var'] = sess.run(
model.bn1.get_save_var_dict()['ema_var'])
# Test specific activation.
output_wrapper['sub'] = sess.run(
model.get_var('x_sub'), feed_dict=feed_dict)
output_wrapper['conv1'] = sess.run(
model.get_var('h_conv1'), feed_dict=feed_dict)
output_wrapper['bn1'] = sess.run(
model.get_var('h_bn1'), feed_dict=feed_dict)
output_wrapper['act2'] = sess.run(model.res_net.get_var(
'stage_0/layer_2/relu'), feed_dict=feed_dict)
# output_wrapper['shortcut3'] = sess.run(
# model.res_net.get_var('stage_1/shortcut'), feed_dict=feed_dict)
for ll in xrange(8):
output_wrapper['act3_{}'.format(ll)] = sess.run(
model.res_net.get_var('stage_1/layer_{}/relu'.format(ll)),
feed_dict=feed_dict)
output_wrapper['act4'] = sess.run(model.res_net.get_var(
'stage_2/layer_35/relu'), feed_dict=feed_dict)
output_wrapper['act5'] = sess.run(model.res_net.get_var(
'stage_3/layer_2/relu'), feed_dict=feed_dict)
print '-----------------------------------------------------------'
print 'Wrapper model pass 2'
print_prob(sess.run(out_var, feed_dict=feed_dict)[0])
print '-----------------------------------------------------------'
# Check all intermediate values.
print '-----------------------------------------------------------'
print 'Summary'
print '{:15s}\t{:10s}\t{:10s}'.format('variable', 'diff', 'rel diff')
print '-----------------------------------------------------------'
for kk in sorted(output_old.keys()):
diff = np.abs(output_old[kk] - output_new[kk]).mean()
denom = np.abs(output_old[kk]).mean()
print '{:15s}\t{:.8f}\t{:.8f}'.format(kk, diff, diff / denom)
print '-----------------------------------------------------------'
print '-----------------------------------------------------------'
print 'Summary 2'
print '{:15s}\t{:10s}\t{:10s}'.format('variable', 'diff', 'rel diff')
print '-----------------------------------------------------------'
for kk in sorted(output_old.keys()):
diff = np.abs(output_old[kk] - output_wrapper[kk]).mean()
denom = np.abs(output_old[kk]).mean()
print '{:15s}\t{:.8f}\t{:.8f}'.format(kk, diff, diff / denom)
print '-----------------------------------------------------------'
if __name__ == '__main__':
main()
| StarcoderdataPython |
8116309 | from .foregroundHandler import EstimateZodi, EstimateForeground, EstimateForegrounds
from .instrumentHandler import BuildChannels, BuildInstrument, LoadPayload, PreparePayload, MergeChannelsOutput, \
GetChannelList
from .loadOptions import LoadOptions
from .loadSource import LoadSource
from .noiseHandler import EstimateNoise, EstimateNoiseInChannel
from .propagateLight import PropagateTargetLight, PropagateForegroundLight
from .targetHandler import LoadTargetList, PrepareTarget, UpdateTargetTable, EstimateMaxSignal, \
ObserveTarget, ObserveTargetlist
| StarcoderdataPython |
3332191 | <gh_stars>10-100
from pipeline import * | StarcoderdataPython |
8013107 | <reponame>CalebUAz/Stock-Trader-App
def create_user_table(c):
c.execute('CREATE TABLE IF NOT EXISTS usertable(fullname TEXT, email TEXT, username TEXT,password TEXT, cash INTEGER)')
| StarcoderdataPython |
1740464 | <gh_stars>1-10
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
def test():
array = ak.from_numpy(np.zeros((3, 0), dtype=np.int32))
buffs = ak.to_buffers(array)
new_array = ak.from_buffers(*buffs)
assert ak.to_list(new_array) == [[], [], []]
| StarcoderdataPython |
9651659 | <filename>VectorMessenger/server.py<gh_stars>0
from os import system as cmd, chdir, path
from sys import platform as sysplatform
from argparse import ArgumentParser
from VectorMessenger.MessengerCore.Helpers.Global import APPDICT
from VectorMessenger.MessengerCore.CoreServer import MessengerServer
args = None
def startup():
if sysplatform == 'win32': cmd(f'title {APPDICT["server"]["title"]}')
MessengerServer(is_localhost=args.localhost)
def argparser():
parser = ArgumentParser(description="Server launcher")
parser.add_argument("--localhost", help="Run server on localhost", action="store_true")
global args
args = parser.parse_args()
def run_source():
""" Startup from source code with poetry """
argparser()
chdir(path.dirname(__file__))
startup()
if __name__ == '__main__':
""" Built app startup """
argparser()
chdir(path.abspath('.'))
startup()
| StarcoderdataPython |
3466019 | import logging
from pygame.locals import *
from src.const import *
from src.level.main_menu import MainMenu
class Menu:
x = DISPLAY_WIDTH / 4
y = DISPLAY_HEIGHT / 2 - 150
width = 180
height = 200
controls_text = ("WASD - move\n"
"Space - warp\n"
"R - restart\n"
"Esc - pause\n\n"
"Warp your way out of trouble!")
about_text = ("PyWeek April 2018 entry \n"
"by <NAME> and <NAME>\n\n"
"github.com/ozcer/warpin-walter\n")
def __init__(self, game):
self.game = game
self.font = pygame.font.Font('src//font//font.otf', 30)
self.image = pygame.Surface((Menu.width, Menu.height))
self.image.fill(D_BLUE)
self.image.set_alpha(155)
self.rect = self.image.get_rect()
self.x, self.y = Menu.x, Menu.y
self.rect.center = self.x, self.y
self.options = ["Play", "Help", "About"]
self.selection_index = 0
# Info panel
self.panel_surf = pygame.Surface((Menu.width * 2, Menu.height))
self.panel_rect = self.panel_surf.get_rect()
self.panel_rect.left = self.rect.right + 10
self.panel_rect.centery = self.y
self.panel_surf.set_alpha(155)
self.panel_surf.fill(L_BLUE)
self.panel_on = False
self.panel_text = ""
self.panel_font = pygame.font.Font('src//font//font.otf', 20)
def update(self):
self.rect.center = self.x, self.y
self._process_input()
def draw(self):
self.game.surface.blit(self.image, self.rect)
if self.panel_on:
self.panel_surf.fill(L_BLUE)
self._render_panel_text(self.panel_text)
self.game.surface.blit(self.panel_surf, self.panel_rect)
self._draw_options()
def _draw_options(self):
# draw options
dy = self.rect.h / (len(self.options) + 1)
for index, option in enumerate(self.options):
# Play becomes Resume if game already started
if option == "Play" and not isinstance(self.game.level, MainMenu):
option = "Resume"
option_surface = self.font.render(option, True, BLACK)
option_rect = option_surface.get_rect()
option_rect.center = self.rect.centerx, self.rect.top + (index + 1) * dy
# selected highlight
if index == self.selection_index:
selected_surf = pygame.Surface((150, 40))
selected_rect = selected_surf.get_rect()
selected_rect.center = self.rect.centerx, self.rect.top + (index + 1) * dy
selected_surf.fill(L_GREY)
selected_surf.set_alpha(155)
self.game.surface.blit(selected_surf, selected_rect)
self.game.surface.blit(option_surface, option_rect)
def _process_input(self):
for event in self.game.events:
if event.type == KEYDOWN:
key = event.key
if key in (K_UP, K_w):
self._change_selection(-1)
elif key in (K_DOWN, K_s):
self._change_selection(1)
elif key in (K_RETURN, K_SPACE):
self._make_selection()
def _change_selection(self, change):
self.panel_on = False
self.selection_index += change
self.selection_index = self.selection_index % len(self.options)
selection = self.options[self.selection_index]
logging.info(f"{self} selecting {selection} at index {self.selection_index}")
def _make_selection(self):
# Play/Resume
if self.selection_index == 0:
self.game.paused = False
if isinstance(self.game.level, MainMenu):
self.game.build_next_level()
elif self.selection_index == 1:
self.panel_on = True
self.panel_text = (Menu.controls_text)
elif self.selection_index == 2:
self.panel_on = True
self.panel_text = (Menu.about_text)
def _render_panel_text(self, text):
render_text_on_surface(text, self.panel_surf, self.panel_font,
top_padding=10, left_pading=10)
def __str__(self):
return f"{self.__class__.__name__} at {self.x, self.y}"
| StarcoderdataPython |
11360142 | import numpy as np
import sys
sys.path.append(".")
from src.alpha_rank import alpha_rank
def rock_paper_scissors(graphing=False):
payoffs = np.array([[ 0, -1, 1],
[ 1, 0, -1],
[-1, 1, 0]])
alphas = []
strat_probs = []
for alpha in np.logspace(-4, 2, num=60, base=10):
phi = alpha_rank([payoffs], alpha=alpha)
# print("Alpha: {:.2f}".format(alpha))
assert np.all(np.isclose(np.array([1/3, 1/3, 1/3]), np.array(phi), atol=1e-4))
alphas.append(alpha)
strat_probs.append(tuple(phi))
if graphing:
import matplotlib.pyplot as plt
action_names = ["Rock", "Paper", "Scissors"]
for idx, name in enumerate(action_names):
ys = [s[idx] for s in strat_probs]
plt.plot(alphas, ys, label=name)
plt.legend()
plt.xscale("log")
plt.ylim(0,1)
plt.xlabel(r"$\alpha$")
plt.ylabel(r"Mass in $\pi$")
plt.yticks([i/10 for i in range(11)])
plt.grid(True, which="major")
plt.savefig("graphs/alpha_rank_reprod/rps_alpha_strats.png")
plt.close()
def biased_rock_paper_scissors_multipop(graphing=False, mutations=False):
payoffs = np.array([
[[ 0, -0.5, 1],
[ 0.5, 0, -0.1],
[ -1, 0.1, 0]],
[[ 0, -0.5, 1],
[ 0.5, 0, -0.1],
[ -1, 0.1, 0]],
])
mutations_list = [1,10,20,30,40,50,100] if mutations else [50]
for m in mutations_list:
if mutations:
print("Mutation {}".format(m))
alphas = []
strat_probs = []
for alpha in np.logspace(-4, 2, num=60, base=10):
try:
phi = alpha_rank(payoffs, alpha=alpha, mutation=m)
alphas.append(alpha)
strat_probs.append(tuple(phi))
except ValueError:
pass
if graphing:
import matplotlib.pyplot as plt
action_names = ["{}_{}".format(a, b) for a in ["R", "P", "S"] for b in ["R", "P", "S"]]
# action_names = ["Rock", "Paper", "Scissors"]
for idx, name in enumerate(action_names):
ys = [s[idx] for s in strat_probs]
plt.plot(alphas, ys, label=name)
plt.legend()
plt.xscale("log")
plt.ylim(0,1)
plt.xlabel(r"$\alpha$")
plt.ylabel(r"Mass in $\pi$")
plt.yticks([i/10 for i in range(11)])
plt.grid(True, which="major")
plt.savefig("graphs/alpha_rank_reprod/multipop_biased_rps_alpha_strats_{}m.png".format(m))
plt.close()
def biased_rock_paper_scissors(graphing=False, mutations=False):
payoffs = np.array([[ 0, -0.5, 1],
[ 0.5, 0, -0.1],
[ -1, 0.1, 0]])
mutations_list = [1,10,20,30,40,50,100] if mutations else [50]
for m in mutations_list:
if mutations:
print("Mutation {}".format(m))
alphas = []
strat_probs = []
for alpha in np.logspace(-4, 2, num=60, base=10):
phi = alpha_rank([payoffs], alpha=alpha, mutation=m)
# print("Alpha: {:.2f}".format(alpha))
# probs = _get_rps_strat(phi)
alphas.append(alpha)
strat_probs.append(tuple(phi))
if graphing:
import matplotlib.pyplot as plt
# action_names = ["{}_{}".format(a, b) for a in ["R", "P", "S"] for b in ["R", "P", "S"]]
action_names = ["Rock", "Paper", "Scissors"]
for idx, name in enumerate(action_names):
ys = [s[idx] for s in strat_probs]
plt.plot(alphas, ys, label=name)
plt.legend()
plt.xscale("log")
plt.ylim(0,1)
plt.xlabel(r"$\alpha$")
plt.ylabel(r"Mass in $\pi$")
plt.yticks([i/10 for i in range(11)])
plt.grid(True, which="major")
plt.savefig("graphs/alpha_rank_reprod/biased_rps_alpha_strats_{}m.png".format(m))
plt.close()
def bos(graphing=False):
payoffs = [
np.array([[3, 0],
[0, 2]]),
np.array([[2, 0],
[0, 3]])
]
alphas = []
strat_probs = []
# Goes haywire after 10^(-1)
for alpha in np.logspace(-4, -1, num=30, base=10):
phi = alpha_rank(payoffs, alpha=alpha)
alphas.append(alpha)
strat_probs.append(tuple(phi))
if graphing:
import matplotlib.pyplot as plt
action_names = ["OO", "OM", "MO", "MM"]
for idx, name in enumerate(action_names):
ys = [s[idx] for s in strat_probs]
plt.plot(alphas, ys, label=name)
plt.legend()
plt.xscale("log")
plt.ylim(0,1)
plt.xlabel(r"$\alpha$")
plt.ylabel(r"Mass in $\pi$")
plt.yticks([i/10 for i in range(11)])
plt.grid(True, which="major")
plt.savefig("graphs/alpha_rank_reprod/bos.png")
plt.close()
def transpose_test(graphing=False):
payoffs = [
np.array([[ 0, 1],
[ 0, 0]]),
np.array([[ 0, 1],
[ 0, 0]])
]
alphas = []
strat_probs = []
for alpha in np.logspace(-4, 2, num=60, base=10):
phi = alpha_rank(payoffs, alpha=alpha)
alphas.append(alpha)
strat_probs.append(tuple(phi))
if graphing:
import matplotlib.pyplot as plt
action_names = ["AA", "AB", "BA", "BB"]
for idx, name in enumerate(action_names):
ys = [s[idx] for s in strat_probs]
plt.plot(alphas, ys, label=name)
plt.legend()
plt.xscale("log")
plt.ylim(-0.1,1.1)
plt.xlabel(r"$\alpha$")
plt.ylabel(r"Mass in $\pi$")
plt.yticks([i/10 for i in range(11)])
plt.grid(True, which="major")
plt.savefig("graphs/alpha_rank_reprod/transpose.png")
plt.close()
def prisoners_dilemma(graphing=False):
payoffs = [
np.array([[-1, -3],
[ 0, -2]]),
np.array([[ -1, 0],
[ -3, -2]])
]
alphas = []
strat_probs = []
for alpha in np.logspace(-4, 2, num=60, base=10):
phi = alpha_rank(payoffs, alpha=alpha)
alphas.append(alpha)
strat_probs.append(tuple(phi))
if graphing:
import matplotlib.pyplot as plt
action_names = ["CC", "CD", "DC", "DD"]
for idx, name in enumerate(action_names):
ys = [s[idx] for s in strat_probs]
plt.plot(alphas, ys, label=name)
plt.legend()
plt.xscale("log")
plt.ylim(-0.1,1.1)
plt.xlabel(r"$\alpha$")
plt.ylabel(r"Mass in $\pi$")
plt.yticks([i/10 for i in range(11)])
plt.grid(True, which="major")
plt.savefig("graphs/alpha_rank_reprod/prisoners_dilemma.png")
plt.close()
def bernoulli_one_pop(graphing=False):
payoffs = [
np.array(
[[0.5, 0.62, 0.3 ],
[0.26, 0.33, 0.9 ],
[0.36, 0.01, 0.94]])
]
alphas = []
strat_probs = []
for alpha in np.logspace(-4, 2, num=60, base=10):
phi = alpha_rank(payoffs, alpha=alpha)
alphas.append(alpha)
strat_probs.append(tuple(phi))
if graphing:
import matplotlib.pyplot as plt
action_names = ["A", "B", "C"]
for idx, name in enumerate(action_names):
ys = [s[idx] for s in strat_probs]
plt.plot(alphas, ys, label=name)
plt.legend()
plt.xscale("log")
plt.ylim(-0.1,1.1)
plt.xlabel(r"$\alpha$")
plt.ylabel(r"Mass in $\pi$")
plt.yticks([i/10 for i in range(11)])
plt.grid(True, which="major")
plt.savefig("graphs/alpha_rank_reprod/bernoulli_one_pop.png")
plt.close()
def infinite_alpha_rank_tests():
# Rock paper scissors
payoffs = np.array([[ 0, -1, 1],
[ 1, 0, -1],
[-1, 1, 0]])
phi = alpha_rank([payoffs], use_inf_alpha=True, use_sparse=False)
np.testing.assert_almost_equal(np.array([1/3,1/3,1/3]), phi)
phi = alpha_rank([payoffs], use_inf_alpha=True, use_sparse=True)
np.testing.assert_almost_equal(np.array([1/3,1/3,1/3]), phi)
# Biased rock paper scissors
payoffs = np.array([[ 0, -0.5, 1],
[ 0.5, 0, -0.1],
[ -1, 0.1, 0]])
phi = alpha_rank([payoffs], use_inf_alpha=True, use_sparse=False)
np.testing.assert_almost_equal(np.array([1/3,1/3,1/3]), phi)
phi = alpha_rank([payoffs], use_inf_alpha=True, use_sparse=True)
np.testing.assert_almost_equal(np.array([1/3,1/3,1/3]), phi)
# Battle of sexes
payoffs = [
np.array([[3, 0],
[0, 2]]),
np.array([[2, 0],
[0, 3]])
]
phi = alpha_rank(payoffs, use_inf_alpha=True, inf_alpha_eps=0.0000001, use_sparse=False) # Need a small eps to ensure its close for this small task
np.testing.assert_almost_equal(np.array([1/2, 0, 0, 1/2]), phi)
phi = alpha_rank(payoffs, use_inf_alpha=True, inf_alpha_eps=0.0000001, use_sparse=True) # Need a small eps to ensure its close for this small task
np.testing.assert_almost_equal(np.array([1/2, 0, 0, 1/2]), phi)
payoffs = [
np.array(
[[0.5, 0.62, 0.3 ],
[0.26, 0.33, 0.9 ],
[0.36, 0.01, 0.94]])
]
phi = alpha_rank(payoffs, use_inf_alpha=True, inf_alpha_eps=0.0000001, use_sparse=False) # Need a small eps to ensure its close for this small task
np.testing.assert_almost_equal(np.array([1/3, 1/3, 1/3]), phi)
phi = alpha_rank(payoffs, use_inf_alpha=True, inf_alpha_eps=0.0000001, use_sparse=True) # Need a small eps to ensure its close for this small task
np.testing.assert_almost_equal(np.array([1/3, 1/3, 1/3]), phi)
# Testing the implementation of alpha rank
if __name__ == "__main__":
# Finite alpha tests, mostly done by eye (comparing graphs to the alpha rank paper, https://arxiv.org/abs/1903.01373)
graphing = True
transpose_test(graphing=graphing)
rock_paper_scissors(graphing=graphing)
biased_rock_paper_scissors(graphing=graphing, mutations=False)
biased_rock_paper_scissors_multipop(graphing=graphing, mutations=False)
bos(graphing=graphing)
prisoners_dilemma(graphing=graphing)
bernoulli_one_pop(graphing=graphing)
# Infinite alpha tests
infinite_alpha_rank_tests() | StarcoderdataPython |
4830208 | """
Test Composite Map
"""
import numpy as np
import pytest
import astropy.units as u
from astropy.tests.helper import assert_quantity_allclose
import sunpy.data.test
import sunpy.map
from sunpy.tests.helpers import figure_test
testpath = sunpy.data.test.rootdir
# Ignore missing metadata warnings
pytestmark = [pytest.mark.filterwarnings('ignore:Missing metadata for observer'),
pytest.mark.filterwarnings(r'ignore:Unable to treat `\.meta` as a FITS header')]
@pytest.fixture
def composite_test_map(aia171_test_map, hmi_test_map):
# The test maps have wildly different observation times, which throws off compositing
hmi_test_map.meta['date-obs'] = aia171_test_map.meta['date-obs']
# Also set the HMI observer location to be the same as the AIA observer location
del hmi_test_map.meta['crln_obs']
del hmi_test_map.meta['crlt_obs']
hmi_test_map.meta['hgln_obs'] = aia171_test_map.observer_coordinate.lon.to_value('deg')
hmi_test_map.meta['hglt_obs'] = aia171_test_map.observer_coordinate.lat.to_value('deg')
return sunpy.map.Map(aia171_test_map, hmi_test_map, composite=True)
def test_type_of_arguments_composite_map(composite_test_map):
with pytest.raises(ValueError) as excinfo:
sunpy.map.CompositeMap(23, composite=True)
assert str(excinfo.value) == 'CompositeMap expects pre-constructed map objects.'
@figure_test
def test_plot_composite_map(composite_test_map):
composite_test_map.plot()
@figure_test
def test_plot_composite_map_contours(composite_test_map):
composite_test_map.set_levels(1, np.arange(-75, 76, 25) << u.percent)
composite_test_map.plot()
@figure_test
def test_plot_composite_map_linewidths(composite_test_map):
composite_test_map.set_levels(1, np.arange(-75, 76, 25) << u.percent)
composite_test_map.plot(linewidths=0.5)
def test_remove_composite_map(composite_test_map):
composite_test_map.remove_map(0)
with pytest.raises(IndexError):
composite_test_map.get_map(1)
def test_get_composite_map(composite_test_map, aia171_test_map, hmi_test_map):
assert composite_test_map.get_map(0) == aia171_test_map
assert composite_test_map.get_map(1) == hmi_test_map
def test_get_alpha_composite_map(composite_test_map, aia171_test_map, hmi_test_map):
assert composite_test_map.get_alpha() == [aia171_test_map.alpha, hmi_test_map.alpha]
def test_get_alpha_with_index_composite_map(composite_test_map, aia171_test_map, hmi_test_map):
assert composite_test_map.get_alpha(0) == aia171_test_map.alpha
assert composite_test_map.get_alpha(1) == hmi_test_map.alpha
def test_get_levels_composite_map(composite_test_map, aia171_test_map, hmi_test_map):
assert composite_test_map.get_levels() == [aia171_test_map.levels, hmi_test_map.levels]
def test_get_levels_with_index_composite_map(composite_test_map, aia171_test_map, hmi_test_map):
assert composite_test_map.get_levels(0) == aia171_test_map.levels
assert composite_test_map.get_levels(1) == hmi_test_map.levels
@figure_test
def test_set_alpha_composite_map(composite_test_map):
composite_test_map.set_alpha(1, 0.5)
composite_test_map.plot()
def test_set_alpha_out_of_range_composite_map(composite_test_map):
with pytest.raises(Exception) as excinfo:
composite_test_map.set_alpha(0, 5.0)
composite_test_map.set_alpha(1, -3.0)
assert str(excinfo.value) == 'Alpha value must be between 0 and 1.'
def test_set_levels_percent(composite_test_map):
numbers = np.arange(10, 100, 10)
composite_test_map.set_levels(0, numbers)
np.testing.assert_allclose(composite_test_map.get_levels(0), numbers)
implicit_percentage = np.arange(10, 100, 10)
composite_test_map.set_levels(0, implicit_percentage, percent=True)
assert_quantity_allclose(composite_test_map.get_levels(0), implicit_percentage << u.percent)
@figure_test
def test_peek_composite_map(composite_test_map):
composite_test_map.peek()
| StarcoderdataPython |
3498852 | """ Large number handling
"""
from .FLOAT import FLOAT
class DOUBLE(FLOAT):
"""double width float"""
_v_mysql_type = "double"
class HUGEDECIMAL(FLOAT):
"""Numbers large enough for CR. Maximum is 65 digits. A Trillion is 13 digits 33 digits . 32 digits is enough"""
_v_mysql_type = "DECIMAL(33,32)"
| StarcoderdataPython |
5166249 | from array import array
if __name__ == '__main__':
a = array('I', [5, 8, 17, 54, 63, 95, 7, 14, 9])
a[3] = 75
a[0] += 1
print(a) | StarcoderdataPython |
4896310 | import readBoard
#Move the tile at (i1,j1) into the position of (i2,j2) and combine the tiles
def combine(board,i1,j1,i2,j2):
testBoard[i2][j2] = testBoard[i2][j2] * 2
testBoard[i1][j1] = ' '
#Takes the board and the position of a tile as inputs and move the tile up by 1
def moveUp(board, i,j):
board[i-1][j] = board[i][j]
board[i][j] = ' '
#If the tile we just moved up still has an empty space above, call moveUp again
if ((not i == 1) and board[i-2][j] == ' '):
moveUp(board, i-1, j)
#If the tile is moving into and equal tile, combine them
#Don't check if the tile was moved to the edge of the board
elif i >= 2 and board[i-1][j] == board[i-2][j]:
combine(board, i-1,j,i-2,j)
def moveRight(board, i, j):
board[i][j+1] = board[i][j]
board[i][j] = ' '
if ((not j == 2) and board[i][j+2] == ' '):
moveRight(board, i, j+1)
elif j <= 1 and board[i][j+1] == board[i][j+2]:
combine(board, i, j+1, i, j+2)
def moveDown(board, i,j):
board[i+1][j] = board[i][j]
board[i][j] = ' '
if ((not i == 2) and board[i+2][j] == ' '):
moveDown(board, i+1, j)
elif i <= 1 and board[i+1][j] == board[i+2][j]:
combine(board, i+1,j,i+2,j)
def moveLeft(board, i, j):
board[i][j-1] = board[i][j]
board[i][j] = ' '
if ((not j == 1) and board[i][j-2] == ' '):
moveLeft(board, i, j-1)
elif j <= 5 and board[i][j-1] == board[i][j-2]:
combine(board, i, j-1, i, j-2)
def move(direction):
if direction == 'up':
#Loop throught each tile, except the top row which cannot move up
for i in range(1,4):
for j in range(4):
if not testBoard[i][j] == ' ':
#If the tile above is equal to the tile being moved, combine the 2
if testBoard[i][j] == testBoard[i-1][j]:
combine(testBoard,i,j,i-1,j)
#If the tile above is a blank, move the current tile up
elif testBoard[i-1][j] == ' ':
moveUp(testBoard, i, j)
elif direction == 'right':
for j in range(2,-1,-1):
for i in range(4):
if not testBoard[i][j] == ' ':
if testBoard[i][j] == testBoard[i][j+1]:
combine(testBoard, i,j,i,j+1)
elif testBoard[i][j+1] == ' ':
moveRight(testBoard, i, j)
elif direction == 'down':
for i in range(2,-1,-1):
for j in range(4):
print("i,j: ",i,',',j)
if not testBoard[i][j] == ' ':
if testBoard[i][j] == testBoard[i+1][j]:
combine(testBoard,i,j,i+1,j)
elif testBoard[i+1][j] == ' ':
moveDown(testBoard, i, j)
elif direction == 'left':
for j in range(1,4):
for i in range(4):
if not testBoard[i][j] == ' ':
if testBoard[i][j] == testBoard[i][j-1]:
combine(testBoard, i,j,i,j-1)
elif testBoard[i][j-1] == ' ':
moveLeft(testBoard, i, j)
else:
sys.exit("Invalid move direction")
| StarcoderdataPython |
1957024 | <reponame>chews0n/super-duper-octo-fiesta
import urllib.request
from datetime import date
import os
class DownloadWeatherData:
def __init__(self, station_number=27211, start_year=2010, end_year=date.today().year):
"""
Initialize the class in order to download the weather data from the Weather Canada website.
:param station_number: The station number provided by weather canada to extract the data at. The default is
Calgary Intl CS (27211) for the station id
:param start_year: The year in which you would like to start the data collection at
:param end_year: The year you would like to end the data collection at. The default is the current year.
"""
self.station_number = station_number
self.start_year = start_year
self.end_year = end_year
# This string downloads daily time intervals
self.scraping_string = "https://climate.weather.gc.ca/climate_data/bulk_data_e.html?format=csv&stationID" \
"={}&Year={}&Month=1&Day=14&timeframe=2&submit=Download+Data"
def download_data(self, download_location=os.path.join(os.getcwd(), "weather-data-calgary-{}.csv")):
"""
This function downloads the daily weather data to the computer in order to extract and work on the data
:param download_location: The path to the file location that you would like to save the files if you don't
want them in the current path. Please note that you have to include a {} in the string to differentiate
between years, otherwise the file will be overwritten and separate files for each year will not be generated
:return: NULL
"""
for year in range(self.start_year, self.end_year + 1):
year_string = self.scraping_string.format(self.station_number, year)
urllib.request.urlretrieve(year_string, filename=download_location.format(year))
| StarcoderdataPython |
6587172 | <gh_stars>100-1000
import os
import time
import datetime
import tempfile
import urllib2
import gzip
import pandas as pd
from gym import logger
from gym_cryptotrading.strings import *
class Generator:
dataset_path = None
temp_dir = None
def __init__(self, history_length, horizon):
Generator.load_gen()
self.history_length = history_length
self.horizon = horizon
self._load_data()
@property
def diff_blocks(self):
return self._diff_blocks
@property
def price_blocks(self):
return self._price_blocks
@property
def timestamp_blocks(self):
return self._timestamp_blocks
def _preprocess(self):
data = pd.read_csv(Generator.dataset_path)
message = 'Columns found in the dataset {}'.format(data.columns)
logger.info(message)
data = data.dropna()
start_time_stamp = data['Timestamp'][0]
timestamps = data['Timestamp'].apply(lambda x: (x - start_time_stamp) / 60)
timestamps = timestamps - range(timestamps.shape[0])
data.insert(0, 'blocks', timestamps)
blocks = data.groupby('blocks')
message = 'Number of blocks of continuous prices found are {}'.format(len(blocks))
logger.info(message)
self._data_blocks = []
distinct_episodes = 0
for name, indices in blocks.indices.items():
'''
Length of the block should exceed the history length and horizon by 1.
Extra 1 is required to normalize each price block by previos time stamp
'''
if len(indices) > (self.history_length + self.horizon + 1):
self._data_blocks.append(blocks.get_group(name))
# similarly, we subtract an extra 1 to calculate the number of distinct episodes
distinct_episodes = distinct_episodes + (len(indices) - (self.history_length + self.horizon) + 1 + 1)
data = None
message_list = [
'Number of usable blocks obtained from the dataset are {}'.format(len(self._data_blocks))
]
message_list.append(
'Number of distinct episodes for the current configuration are {}'.format(distinct_episodes)
)
map(logger.info, message_list)
def _generate_attributes(self):
self._diff_blocks = []
self._price_blocks = []
self._timestamp_blocks = []
for data_block in self._data_blocks:
block = data_block[['price_close', 'price_low', 'price_high', 'volume']]
closing_prices = block['price_close']
diff_block = closing_prices.shift(-1)[:-1].subtract(closing_prices[:-1])
# currently normalizing the prices by previous prices of the same category
normalized_block = block.shift(-1)[:-1].truediv(block[:-1])
self._diff_blocks.append(diff_block.as_matrix())
self._price_blocks.append(normalized_block.as_matrix())
self._timestamp_blocks.append(data_block['DateTime_UTC'].values[1:])
self._data_blocks = None #free memory
def _load_data(self):
self._preprocess()
self._generate_attributes()
@staticmethod
def get_transactions():
if not Generator.dataset_path:
Generator.set_dataset_path()
message = 'Getting latest transactions from {}.'.format(URL) + \
'\nThis might take a few minutes depending upon your internet speed.'
logger.info(message)
path = os.path.join(Generator.temp_dir, 'coinbaseUSD.csv.gz')
f = urllib2.urlopen(URL)
with open(path, 'w') as buffer:
buffer.write(f.read())
message = 'Latest transactions saved to {}'.format(path)
logger.info(message)
# Read the transactions into pandas dataframe
with gzip.open(path, 'r') as f:
d = pd.read_table(f, sep=',', header=None, index_col=0, names=['price', 'volume'])
os.remove(path)
d.index = d.index.map(lambda ts: datetime.datetime.fromtimestamp(int(ts)))
d.index.names = ['DateTime_UTC']
p = pd.DataFrame(d['price'].resample('1Min').ohlc())
p.columns = ['price_open', 'price_high', 'price_low', 'price_close']
v = pd.DataFrame(d['volume'].resample('1Min').sum())
v.columns = ['volume']
p['volume'] = v['volume']
unix_timestamps = p.index.map(lambda ts: int(time.mktime(ts.timetuple())))
p.insert(0, 'Timestamp', unix_timestamps)
p.to_csv(Generator.dataset_path, sep=',')
message = 'Dataset sampled and saved to {}'.format(Generator.dataset_path)
logger.info(message)
@staticmethod
def update_gen():
if not Generator.dataset_path:
Generator.set_dataset_path()
if os.path.isfile(Generator.dataset_path):
os.remove(Generator.dataset_path)
Generator.get_transactions()
@staticmethod
def load_gen():
if not Generator.dataset_path:
Generator.set_dataset_path()
'''
TODO: Need to do sanity check of the sampled dataset
'''
if not os.path.isfile(Generator.dataset_path):
message = 'Sampled Dataset not found at {}.'.format(Generator.dataset_path) + \
'\nSetting up the environment for first use.'
logger.info(message)
Generator.get_transactions()
@staticmethod
def set_dataset_path():
if not Generator.temp_dir:
Generator.set_temp_dir()
Generator.dataset_path = os.path.join(Generator.temp_dir, 'btc.csv')
@staticmethod
def set_temp_dir():
Generator.temp_dir = tempfile.gettempdir()
| StarcoderdataPython |
331921 | <gh_stars>0
from swagger_diff.errors import update_errors, _errors
def test_errors():
update_errors('hello/world/foo/bar', 'Some error message')
update_errors('hello/world/foo/baz', 'Another error message')
assert {'hello': {'world': {'foo': {'bar': 'Some error message', 'baz': 'Another error message'}}}} == _errors
| StarcoderdataPython |
1983680 | #!/usr/bin/env python
"""
Script that starts the SSH agent for a connection based on the domain.
If the agent is already running, it won't start another.
Put something like this in your ~/.ssh/config file:
Match exec ~/bin/start_ssh_agent_by_domain.py --domain=mydomain.com \\
--control=~/.ssh/agent-mydomain-%l.txt \\
--identities-dir=~/.ssh/identities/mydomain/ \\
--socket=/tmp/ssh-%u-mydomain/agent.sock %h
IdentityAgent /tmp/ssh-%u-mydomain/agent.sock
Every time ssh reads the client config file, it will run this script and
determine if you are trying to connect to mydomain.com or a host in that domain
(e.g. myhost.mydomain.com). If the domain matches, then this script looks for
the control file, and if it exists, reads the agent process ID from that file
and checks if the agent is still running. It will start the agent and create
the control file as necessary. This script will return with exit code 0 if the
domain matched and the agent is running, 1 otherwise.
When this script returns with exit code 0, then ssh will use the next config
line to override the SSH_AUTH_SOCK environment variable (if set) and use the
agent identified by the socket name. This allows you to have a separate agent
for that domain containing only the keys in identities-dir.
"""
from __future__ import print_function
import argparse
import os
import re
import subprocess
import sys
def _read_control_file(filename):
"""
Example control file contents:
SSH_AUTH_SOCK=/tmp/ssh-test/agent.sock; export SSH_AUTH_SOCK;
echo Agent pid 10380;
Return {'SSH_AUTH_SOCK': <sockfile>, 'SSH_AGENT_PID': <pid>}
"""
result = {}
with open(filename) as f:
for line in f:
m = re.match(r"SSH_AUTH_SOCK=(.*?);", line)
if m:
result['SSH_AUTH_SOCK'] = m.group(1)
continue
m = re.match(r"echo Agent pid (\d+);", line)
if m:
result['SSH_AGENT_PID'] = m.group(1)
continue
for key in ('SSH_AUTH_SOCK', 'SSH_AGENT_PID'):
if key not in result:
raise ValueError('Invalid control file "{}": Missing key {}'
.format(filename, key))
return result
def _check_if_agent_running(env, desired_sock):
# Return True iff the agent is running and we can use it.
# Raise an exception if it looks like we can't start the agent because of a
# resource conflict of some kind, or the agent is running but we can't use
# it.
# Return False if the agent is not running but we can start it.
agent_sock = env['SSH_AUTH_SOCK']
agent_pid = env['SSH_AGENT_PID']
agent_sock_exists = os.path.exists(agent_sock)
with open(os.devnull, 'wb') as null:
rc = subprocess.call(['ps', '-p', agent_pid],
stdout=null, stderr=subprocess.STDOUT)
agent_proc_exists = (rc == 0)
desired_sock_exists = os.path.exists(desired_sock)
if (desired_sock_exists
and desired_sock == agent_sock
and agent_proc_exists):
return True
if agent_proc_exists and agent_sock_exists:
raise RuntimeError("Some other agent is using the control file.")
if desired_sock_exists:
raise RuntimeError("Some other agent is using the socket file.")
return False
def _parse_key_fingerprint(line):
parts = line.split()
if len(parts) < 2:
return None
try:
key_size = int(parts[0])
except ValueError:
return None
return (key_size, parts[1])
def _get_key_fingerprint(key_path):
"""
Return (key_size, key_hash), or None if there was an error.
"""
cmd = ['ssh-keygen', '-l', '-E', 'md5', '-f', key_path]
with open(os.devnull, 'wb') as null:
try:
out = subprocess.check_output(cmd, stderr=null)
except subprocess.CalledProcessError:
return None
return _parse_key_fingerprint(out)
def _scan_identities_dir(identities_dir):
"""
Scan the identity files found in identities_dir.
Any file with a matching .pub file is assumed to be a private key.
Return {key_path: fingerprint}
"""
file_set = set(os.listdir(identities_dir))
identity_map = {}
for filename in sorted(file_set):
if filename.endswith('.pub'):
key_file = filename[:-4]
if key_file in file_set:
key_path = os.path.join(identities_dir, key_file)
fingerprint = _get_key_fingerprint(key_path)
if fingerprint:
identity_map[key_path] = fingerprint
return identity_map
def _list_agent_keys(agent_sock):
"""
Return a list of key fingerprints current loaded in the agent.
"""
cmd = ['ssh-add', '-l', '-E', 'md5']
env = os.environ.copy()
env['SSH_AUTH_SOCK'] = agent_sock
with open(os.devnull, 'wb') as null:
try:
out = subprocess.check_output(cmd, stderr=null, env=env)
except subprocess.CalledProcessError:
return []
result = []
for line in out.split(b'\n'):
fingerprint = _parse_key_fingerprint(line)
if not fingerprint:
continue
result.append(fingerprint)
return result
def _start_agent(args):
if os.path.exists(args.socket):
raise RuntimeError("Agent socket already exists: {}"
.format(args.socket))
cmd = ['ssh-agent', '-s', '-a', args.socket]
with open(args.control, 'wb') as f:
subprocess.check_call(cmd, stdout=f)
# If we reach this point the agent should be running.
# Load all keys into the agent that haven't already been loaded
current_key_set = set(_list_agent_keys(args.socket))
identity_map = _scan_identities_dir(args.identities_dir)
key_paths_to_add = [i[0] for i in identity_map.items()
if i[1] not in current_key_set]
if key_paths_to_add:
cmd = ['ssh-add', '-q']
cmd.extend(key_paths_to_add)
env = os.environ.copy()
env['SSH_AUTH_SOCK'] = agent_sock
subprocess.check_call(cmd, env=env)
def _ensure_agent_running_and_keys_loaded(args):
try:
s = os.stat(args.control)
non_empty_file_exists = (s.st_size > 0)
except OSError: # assuming file not found
non_empty_file_exists = False
if non_empty_file_exists:
env = _read_control_file(args.control)
if not _check_if_agent_running(env, args.socket):
_start_agent(args)
else:
_start_agent(args)
def start_ssh_agent_by_domain(args):
host_parts = args.hostname.lower().split('.')
domain_parts = args.domain.lower().split('.')
if host_parts[-len(domain_parts):] != domain_parts:
# Domain doesn't match
return 1
_ensure_agent_running_and_keys_loaded(args)
return 0
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--domain', help="Domain name to match")
parser.add_argument('--control', help="Agent control file")
parser.add_argument('--identities-dir',
help="Directory containing private and public keys")
parser.add_argument('--socket', help="Agent unix-domain socket")
parser.add_argument('hostname', metavar='<hostname>',
help="SSH destination host name")
args = parser.parse_args()
if not args.domain:
print("Missing --domain argument", file=sys.stderr)
return 1
if not args.control:
print("Missing --control argument", file=sys.stderr)
return 1
args.control = os.path.expanduser(args.control)
if not args.socket:
print("Missing --socket argument", file=sys.stderr)
return 1
if not args.identities_dir:
print("Missing --identities-dir argument", file=sys.stderr)
return 1
args.identities_dir = os.path.expanduser(args.identities_dir)
if not os.path.isdir(args.identities_dir):
print("identities-dir does not exist:", args.identities_dir)
return 1
return start_ssh_agent_by_domain(args)
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
12812139 | <gh_stars>0
"""
Problem 22
-----------
Using names.txt (right click and 'Save Link/Target As...'),
a 46K text file containing over five-thousand first names,
begin by sorting it into alphabetical order.
Then working out the alphabetical value for each name,
multiply this value by its alphabetical position in the list
to obtain a name score.
For example, when the list is sorted into alphabetical order,
COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN would obtain a score of 938 × 53 = 49714.
What is the total of all the name scores in the file?
"""
with open('data/022.txt') as f:
names = [line.strip() for line in f]
f.closed
names.sort()
total = 0
pos = 0
for name in names:
pos += 1
chars = [ord(c) - 64 for c in name]
total += pos * sum(chars)
print (total)
| StarcoderdataPython |
6586313 | import luigi
import time
import os
import subprocess
from tasks.readCleaning.cleanedReadQC import *
class GlobalParameter(luigi.Config):
pe_read_dir=luigi.Parameter()
mp_read_dir=luigi.Parameter()
pac_read_dir=luigi.Parameter()
ont_read_dir=luigi.Parameter()
pe_read_suffix=luigi.Parameter()
mp_read_suffix=luigi.Parameter()
pac_read_suffix=luigi.Parameter()
ont_read_suffix=luigi.Parameter()
projectName=luigi.Parameter()
threads = luigi.Parameter()
maxMemory = luigi.Parameter()
adapter = luigi.Parameter()
def run_cmd(cmd):
p = subprocess.Popen(cmd, bufsize=-1,
shell=True,
universal_newlines=True,
stdout=subprocess.PIPE,
executable='/bin/bash')
output = p.communicate()[0]
return output
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
createFolder("task_logs")
class reformat(luigi.Task):
paired_end_read_dir = GlobalParameter().pe_read_dir
mate_pair_read_dir = GlobalParameter().mp_read_dir
nanopore_read_dir = GlobalParameter().ont_read_dir
pacbio_read_dir = GlobalParameter().pac_read_dir
paired_end_read_suffix = GlobalParameter().pe_read_suffix
mate_pair_read_suffix = GlobalParameter().mp_read_suffix
nanopore_read_suffix = GlobalParameter().ont_read_suffix
pacbio_read_suffix = GlobalParameter().pac_read_suffix
threads = GlobalParameter().threads
maxMemory = GlobalParameter().maxMemory
projectName = GlobalParameter().projectName
sampleName = luigi.Parameter(description="name of the sample to be analyzed. (string)")
seq_platforms = luigi.ChoiceParameter(description="Choose From['pe: paired-end','pe-mp: paired-end and mate-pair',pe-ont: paired-end and nanopore, pe-pac: paired-end and pacbio, ont: nanopore, pac: pacbio]",
choices=["pe", "mp","pe-mp", "pe-ont", "pe-pac","ont","pac"], var_type=str)
def output(self):
pe_verified_read_folder = os.path.join(os.getcwd(), self.projectName,"ReadQC","VerifiedReads","PE-Reads" + "/")
mp_verified_read_folder = os.path.join(os.getcwd(), self.projectName,"ReadQC","VerifiedReads","MP-Reads" + "/")
ont_verified_read_folder = os.path.join(os.getcwd(), self.projectName,"ReadQC","VerifiedReads","ONT-Reads" + "/")
pac_verified_read_folder = os.path.join(os.getcwd(), self.projectName,"ReadQC","VerifiedReads","PAC-Reads" + "/")
###############################################################################################
if self.seq_platforms == "pe":
return {'out1': luigi.LocalTarget(pe_verified_read_folder + self.sampleName + "_R1.fastq"),
'out2': luigi.LocalTarget(pe_verified_read_folder + self.sampleName + "_R2.fastq")}
if self.seq_platforms == "ont":
return {'out1': luigi.LocalTarget(ont_verified_read_folder + self.sampleName + ".fastq")}
if self.seq_platforms == "pac":
return {'out1': luigi.LocalTarget(pac_verified_read_folder + self.sampleName + ".fastq")}
if self.seq_platforms == "mp":
return {'out1': luigi.LocalTarget(mp_verified_read_folder + self.sampleName + "_R1.fastq"),
'out2': luigi.LocalTarget(mp_verified_read_folder + self.sampleName + "_R2.fastq")}
if self.seq_platforms == "pe-mp":
return {'out1': luigi.LocalTarget(pe_verified_read_folder + self.sampleName + "_R1.fastq"),
'out2': luigi.LocalTarget(pe_verified_read_folder + self.sampleName + "_R2.fastq"),
'out3': luigi.LocalTarget(mp_verified_read_folder + self.sampleName + "_R1.fastq"),
'out4': luigi.LocalTarget(mp_verified_read_folder + self.sampleName + "_R2.fastq")}
if self.seq_platforms == "pe-ont":
return {'out1': luigi.LocalTarget(pe_verified_read_folder + self.sampleName + "_R1.fastq"),
'out2': luigi.LocalTarget(pe_verified_read_folder + self.sampleName + "_R2.fastq"),
'out3': luigi.LocalTarget(ont_verified_read_folder + self.sampleName + ".fastq")
}
if self.seq_platforms == "pe-pac":
return {'out1': luigi.LocalTarget(pe_verified_read_folder + self.sampleName + "_R1.fastq"),
'out2': luigi.LocalTarget(pe_verified_read_folder + self.sampleName + "_R2.fastq"),
'out3': luigi.LocalTarget(pac_verified_read_folder + self.sampleName + ".fastq")
}
def run(self):
pe_verified_read_folder = os.path.join(os.getcwd(),self.projectName,"ReadQC","VerifiedReads","PE-Reads" + "/")
mp_verified_read_folder = os.path.join(os.getcwd(),self.projectName,"ReadQC","VerifiedReads","MP-Reads" + "/")
ont_verified_read_folder = os.path.join(os.getcwd(),self.projectName,"ReadQC","VerifiedReads","ONT-Reads" + "/")
pac_verified_read_folder = os.path.join(os.getcwd(),self.projectName,"ReadQC","VerifiedReads","PAC-Reads" + "/")
pe_verification_log_folder = os.path.join(os.getcwd(), self.projectName,"log", "ReadQC", "VerifiedReads" ,"PE-Reads" + "/")
mp_verification_log_folder = os.path.join(os.getcwd(), self.projectName,"log", "ReadQC", "VerifiedReads","MP-Reads" + "/")
ont_verification_log_folder = os.path.join(os.getcwd(), self.projectName,"log", "ReadQC", "VerifiedReads" ,"ONT-Reads"+ "/")
pac_verification_log_folder = os.path.join(os.getcwd(), self.projectName,"log", "ReadQC", "VerifiedReads" ,"PAC-Reads" + "/")
cmd_verify_pe ="[ -d {pe_verified_read_folder} ] || mkdir -p {pe_verified_read_folder}; mkdir -p {pe_verification_log_folder}; " \
"reformat.sh " \
"-Xmx{Xmx}g " \
"threads={cpu} " \
"tossbrokenreads=t " \
"verifypaired=t " \
"in1={pe_read_dir}{sampleName}_R1.{pe_read_suffix} " \
"in2={pe_read_dir}{sampleName}_R2.{pe_read_suffix} " \
"out={pe_verified_read_folder}{sampleName}_R1.fastq " \
"out2={pe_verified_read_folder}{sampleName}_R2.fastq " \
" 2>&1 | tee {pe_verification_log_folder}{sampleName}_pe_reformat_run.log "\
.format(Xmx=GlobalParameter().maxMemory,
cpu=GlobalParameter().threads,
pe_read_dir=GlobalParameter().pe_read_dir,
pe_read_suffix=GlobalParameter().pe_read_suffix,
sampleName=self.sampleName,
pe_verified_read_folder=pe_verified_read_folder,
pe_verification_log_folder=pe_verification_log_folder)
##################
cmd_verify_mp = "[ -d {mp_verified_read_folder} ] || mkdir -p {mp_verified_read_folder}; mkdir -p {mp_verification_log_folder}; " \
"reformat.sh " \
"-Xmx{Xmx}g " \
"threads={cpu} " \
"verifypaired=t " \
"tossbrokenreads=t " \
"in1={mp_read_dir}{sampleName}_R1.{mp_read_suffix} " \
"in2={mp_read_dir}{sampleName}_R2.{mp_read_suffix} " \
"out={mp_verified_read_folder}{sampleName}_R1.fastq " \
"out2={mp_verified_read_folder}{sampleName}_R2.fastq " \
" 2>&1 | tee {mp_verification_log_folder}{sampleName}_pe_reformat_run.log " \
.format(Xmx=GlobalParameter().maxMemory,
cpu=GlobalParameter().threads,
mp_read_dir=self.mate_pair_read_dir,
mp_read_suffix=self.mate_pair_read_suffix,
sampleName=self.sampleName,
mp_verified_read_folder=mp_verified_read_folder,
mp_verification_log_folder=mp_verification_log_folder)
##################
cmd_verify_ont = "[ -d {ont_verified_read_folder} ] || mkdir -p {ont_verified_read_folder}; mkdir -p {ont_verification_log_folder};" \
"reformat.sh " \
"-Xmx{Xmx}g " \
"threads={cpu} " \
"tossbrokenreads=t " \
"in1={ont_read_dir}{sampleName}.{ont_read_suffix} " \
"out={ont_verified_read_folder}{sampleName}.fastq " \
" 2>&1 | tee {ont_verification_log_folder}{sampleName}_reformat_run.log " \
.format(Xmx=GlobalParameter().maxMemory,
cpu=GlobalParameter().threads,
ont_read_dir=GlobalParameter().ont_read_dir,
ont_read_suffix=GlobalParameter().ont_read_suffix,
sampleName=self.sampleName,
ont_verified_read_folder=ont_verified_read_folder,
ont_verification_log_folder=ont_verification_log_folder)
cmd_verify_pac = "[ -d {pac_verified_read_folder} ] || mkdir -p {pac_verified_read_folder}; mkdir -p {pac_verification_log_folder};" \
"reformat.sh " \
"-Xmx{Xmx}g " \
"threads={cpu} " \
"tossbrokenreads=t " \
"in1={pac_read_dir}{sampleName}.{pac_read_suffix} " \
"out={pac_verified_read_folder}{sampleName}.fastq " \
" 2>&1 | tee {pac_verification_log_folder}{sampleName}_reformat_run.log " \
.format(Xmx=GlobalParameter().maxMemory,
cpu=GlobalParameter().threads,
pac_read_dir=GlobalParameter().pac_read_dir,
pac_read_suffix=GlobalParameter().pac_read_suffix,
sampleName=self.sampleName,
pac_verified_read_folder=pac_verified_read_folder,
pac_verification_log_folder=pac_verification_log_folder)
if self.seq_platforms == "pe":
print("****** NOW RUNNING COMMAND ******: " + cmd_verify_pe)
print(run_cmd(cmd_verify_pe))
if self.seq_platforms == "mp":
print("****** NOW RUNNING COMMAND ******: " + cmd_verify_mp)
print(run_cmd(cmd_verify_mp))
if self.seq_platforms == "ont":
print("****** NOW RUNNING COMMAND ******: " + cmd_verify_ont)
print(run_cmd(cmd_verify_ont))
if self.seq_platforms == "pac":
print("****** NOW RUNNING COMMAND ******: " + cmd_verify_pac)
print(run_cmd(cmd_verify_pac))
if self.seq_platforms == "pe-mp":
print("****** NOW RUNNING COMMAND ******: " + cmd_verify_pe)
print(run_cmd(cmd_verify_pe))
print("****** NOW RUNNING COMMAND ******: " + cmd_verify_mp)
print(run_cmd(cmd_verify_mp))
if self.seq_platforms == "pe-ont":
print("****** NOW RUNNING COMMAND ******: " + cmd_verify_pe)
print(run_cmd(cmd_verify_pe))
print("****** NOW RUNNING COMMAND ******: " + cmd_verify_ont)
print(run_cmd(cmd_verify_ont))
if self.seq_platforms == "pe-pac":
print("****** NOW RUNNING COMMAND ******: " + cmd_verify_pe)
print(run_cmd(cmd_verify_pe))
print("****** NOW RUNNING COMMAND ******: " + cmd_verify_pac)
print(run_cmd(cmd_verify_pac))
class reformatReads(luigi.Task):
seq_platforms = luigi.ChoiceParameter(description="Choose From['pe: paired-end','pe-mp: paired-end and mate-pair',pe-ont: paired-end and nanopore, pe-pac: paired-end and pacbio",
choices=["pe", "mp", "pe-mp", "pe-ont", "pe-pac"], var_type=str)
#seq_platforms=GlobalParameter().seq_platforms
def requires(self):
if self.seq_platforms == "pe":
return [
[reformat(seq_platforms=self.seq_platforms,sampleName=i)
for i in [line.strip() for line in
open((os.path.join(os.getcwd(), "sample_list", "pe_samples.lst")))]]
]
if self.seq_platforms == "mp":
return [
[reformat(seq_platforms=self.seq_platforms,sampleName=i)
for i in [line.strip() for line in
open((os.path.join(os.getcwd(), "sample_list", "mp_samples.lst")))]]
]
if self.seq_platforms == "pe-mp":
return [
[reformat(seq_platforms="pe",sampleName=i)
for i in [line.strip()
for line in
open((os.path.join(os.getcwd(), "sample_list","pe_samples.lst")))]],
[reformat(seq_platforms="mp",sampleName=i)
for i in [line.strip()
for line in
open((os.path.join(os.getcwd(), "sample_list","mp_samples.lst")))]]
]
if self.seq_platforms == "pe-ont":
return [
[reformat(seq_platforms="pe",sampleName=i)
for i in [line.strip()
for line in
open((os.path.join(os.getcwd(), "sample_list","pe_samples.lst")))]],
[reformat(seq_platforms="ont",sampleName=i)
for i in [line.strip()
for line in
open((os.path.join(os.getcwd(), "sample_list","ont_samples.lst")))]]
]
if self.seq_platforms == "pe-pac":
return [
[reformat(seq_platforms="pe",sampleName=i)
for i in [line.strip()
for line in
open((os.path.join(os.getcwd(), "sample_list","pe_samples.lst")))]],
[reformat(seq_platforms="pac",sampleName=i)
for i in [line.strip()
for line in
open((os.path.join(os.getcwd(), "sample_list","pac_samples.lst")))]]
]
def output(self):
timestamp = time.strftime('%Y%m%d.%H%M%S', time.localtime())
return luigi.LocalTarget(os.path.join(os.getcwd(),"task_logs",'task.validate.reads.complete.{t}'.format(t=timestamp)))
def run(self):
timestamp = time.strftime('%Y%m%d.%H%M%S', time.localtime())
with self.output().open('w') as outfile:
outfile.write('read validation finished at {t}'.format(t=timestamp)) | StarcoderdataPython |
40483 | import OpenURL
import Rest
__all__ = [OpenURL.__name__, Rest.__name__]
| StarcoderdataPython |
1611066 | from airflow import DAG
from datetime import datetime, timedelta
from airflow.providers.amazon.aws.operators.ecs import ECSOperator
default_args = {
'owner': 'ubuntu',
'start_date': datetime(2019, 8, 14),
'retry_delay': timedelta(seconds=60*60)
}
with DAG('hybrid_airflow_ec2_dag', catchup=False, default_args=default_args, schedule_interval=None) as dag:
cloudquery = ECSOperator(
task_id="cloudquery",
dag=dag,
cluster="hybrid-airflow-cluster",
task_definition="apache-airflow",
overrides={ },
launch_type="EC2",
awslogs_group="/ecs/hybrid-airflow",
awslogs_stream_prefix="ecs/Hybrid-ELT-TaskDef"
)
cloudquery
| StarcoderdataPython |
4832600 | <filename>PythonTutor/session-2/variable.py
"""
Session: 2
Topic: Variable
"""
var = 'Hello World!'
print (var)
print ('My variable value is {} '.format(var)) | StarcoderdataPython |
3245026 | <filename>gym_goal/envs/goal_env.py
"""
Robot Soccer Goal domain by <NAME> et al. [2016], Reinforcement Learning with Parameterized Actions
Based on code from https://github.com/WarwickMasson/aaai-goal
Author: <NAME>
June 2018
"""
import numpy as np
import math
import gym
import pygame
from gym import spaces, error
from gym.utils import seeding
import sys
from .config import PLAYER_CONFIG, BALL_CONFIG, GOAL_AREA_LENGTH, GOAL_AREA_WIDTH, GOAL_WIDTH, GOAL_DEPTH, KICKABLE, \
INERTIA_MOMENT, MINPOWER, MAXPOWER, PITCH_LENGTH, PITCH_WIDTH, CATCHABLE, CATCH_PROBABILITY, SHIFT_VECTOR, \
SCALE_VECTOR, LOW_VECTOR, HIGH_VECTOR
from .util import bound, bound_vector, angle_position, angle_between, angle_difference, angle_close, norm_angle, \
vector_to_tuple
# actions
KICK = "kick"
DASH = "dash"
TURN = "turn"
TO_BALL = "toball"
SHOOT_GOAL = "shootgoal"
TURN_BALL = "turnball"
DRIBBLE = "dribble"
KICK_TO = "kickto"
ACTION_LOOKUP = {
0: KICK_TO,
1: SHOOT_GOAL,
2: SHOOT_GOAL,
}
# field bounds seem to be 0, PITCH_LENGTH / 2, -PITCH_WIDTH / 2, PITCH_WIDTH / 2
PARAMETERS_MIN = [
np.array([0, -PITCH_WIDTH / 2]), # -15
np.array([-GOAL_WIDTH / 2]), # -7.01
np.array([-GOAL_WIDTH / 2]), # -7.01
]
PARAMETERS_MAX = [
np.array([PITCH_LENGTH, PITCH_WIDTH / 2]), # 40, 15
np.array([GOAL_WIDTH / 2]), # 7.01
np.array([GOAL_WIDTH / 2]), # 7.01
]
def norm(vec2d):
# from numpy.linalg import norm
# faster to use custom norm because we know the vectors are always 2D
assert len(vec2d) == 2
return math.sqrt(vec2d[0]*vec2d[0] + vec2d[1]*vec2d[1])
class GoalEnv(gym.Env):
# metadata = {'render.modes': ['human', 'rgb_array']}
metadata = {'render.modes': ['human']} # cannot use rgb_array at the moment due to frame skip between actions
_VISUALISER_SCALE_FACTOR = 20
_VISUALISER_DELAY = 120 # fps
def __init__(self):
""" The entities are set up and added to a space. """
self.np_random = None
self.entities = []
self.player = None
self.ball = None
self.goalie = None
self.states = []
self.render_states = []
self.window = None
self.time = 0
self.max_time = 100
num_actions = len(ACTION_LOOKUP)
self.action_space = spaces.Tuple((
spaces.Discrete(num_actions), # actions
spaces.Tuple( # parameters
tuple(spaces.Box(PARAMETERS_MIN[i], PARAMETERS_MAX[i], dtype=np.float32) for i in range(num_actions))
)
))
self.observation_space = spaces.Tuple((
# spaces.Box(low=0., high=1., shape=self.get_state().shape, dtype=np.float32), # scaled states
spaces.Box(low=LOW_VECTOR, high=HIGH_VECTOR, dtype=np.float32), # unscaled states
spaces.Discrete(200), # internal time steps (200 limit is an estimate)
))
self.seed()
def step(self, action):
"""
Take a full, stabilised update.
Parameters
----------
action (ndarray) :
Returns
-------
ob, reward, episode_over, info : tuple
ob (object) :
reward (float) :
terminal (bool) :
info (dict) :
"""
act_index = action[0]
act = ACTION_LOOKUP[act_index]
param = action[1][act_index]
param = np.clip(param, PARAMETERS_MIN[act_index], PARAMETERS_MAX[act_index])
steps = 0
self.time += 1
if self.time == self.max_time:
reward = -self.ball.goal_distance()
end_episode = True
state = self.get_state()
return (state, 0), reward, end_episode, {}
end_episode = False
run = True
reward = 0.
while run:
steps += 1
reward, end_episode = self._update(act, param)
run = not end_episode
if run:
run = not self.player.can_kick(self.ball)
if act == DRIBBLE:
run = not self.ball.close_to(param) or run
elif act == KICK_TO:
run = norm(self.ball.velocity) > 0.1 or run
elif act == TURN_BALL:
theta = angle_between(self.player.position, self.ball.position)
run = not angle_close(theta, param[0]) or run
elif act == SHOOT_GOAL:
run = not end_episode
else:
run = False
state = self.get_state()
return (state, steps), reward, end_episode, {}
def _update(self, act, param):
"""
Performs a single transition with the given action,
returns the reward and terminal status.
"""
self.states.append([
self.player.position.copy(),
self.player.orientation,
self.goalie.position.copy(),
self.goalie.orientation,
self.ball.position.copy()])
self.render_states.append(self.states[-1])
self._perform_action(act, param, self.player)
self.goalie.move(self.ball, self.player)
for entity in self.entities:
entity.update()
self._resolve_collisions()
return self._terminal_check()
def reset(self):
# TODO: implement reset for each entity to avoid creating new objects and reduce duplicate code
initial_player = np.array((0, self.np_random.uniform(-PITCH_WIDTH / 2, PITCH_WIDTH / 2)))
angle = angle_between(initial_player, np.array((PITCH_LENGTH / 2, 0)))
self.player = Player(initial_player, angle)
MACHINE_EPSILON = 1e-12 # ensure always kickable on first state
# fixes seeded runs changing between machines due to minor precision differences,
# specifically from angle_position due to cos and sin approximations
initial_ball = initial_player + (KICKABLE - MACHINE_EPSILON) * angle_position(angle)
#initial_ball = initial_player + KICKABLE * angle_position(angle)
self.ball = Ball(initial_ball)
initial_goalie = self._keeper_target(initial_ball)
angle2 = angle_between(initial_goalie, initial_ball)
self.goalie = Goalie(initial_goalie, angle2)
self.entities = [self.player, self.goalie, self.ball]
self._update_entity_seeds()
self.states = []
self.render_states = []
self.time = 0
self.states.append([
self.player.position.copy(),
self.player.orientation,
self.goalie.position.copy(),
self.goalie.orientation,
self.ball.position.copy()])
self.render_states.append(self.states[-1])
return self.get_state(), 0
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
self.reset()
self._update_entity_seeds()
return [seed]
def _update_entity_seeds(self):
# will be empty at initialisation, call again after creating all entities
for entity in self.entities:
entity.np_random = self.np_random
@staticmethod
def _keeper_line(ball):
""" Finds the line the keeper wants to stay to. """
grad = -ball[1] / (PITCH_LENGTH / 2 - ball[0])
yint = ball[1] - grad * ball[0]
return grad, yint
def _keeper_target(self, ball):
""" Target the keeper wants to move towards. """
grad, yint = self._keeper_line(ball)
if ball[0] < PITCH_LENGTH / 2 - GOAL_AREA_LENGTH:
xval = ball[0]
else:
if ball[1] < -GOAL_AREA_WIDTH / 2:
xval = (-GOAL_AREA_WIDTH / 2 - yint) / grad
else:
xval = (GOAL_AREA_WIDTH / 2 - yint) / grad
xval = bound(xval, PITCH_LENGTH / 2 - GOAL_AREA_LENGTH, PITCH_LENGTH / 2)
yval = bound(grad * xval + yint, -GOAL_AREA_WIDTH / 2, GOAL_AREA_WIDTH / 2)
return np.array((xval, yval))
def get_state(self):
""" Returns the representation of the current state. """
state = np.concatenate((
self.player.position,
self.player.velocity,
[self.player.orientation],
self.goalie.position,
self.goalie.velocity,
[self.goalie.orientation],
self.ball.position,
self.ball.velocity))
#return self.scale_state(state)
return state
def _load_from_state(self, state):
assert len(state) == len(self.get_state())
self.player.position[0] = state[0]
self.player.position[1] = state[1]
self.player.velocity[0] = state[2]
self.player.velocity[1] = state[3]
self.player.orientation = state[4]
self.goalie.position[0] = state[5]
self.goalie.position[1] = state[6]
self.goalie.velocity[0] = state[7]
self.goalie.velocity[1] = state[8]
self.goalie.orientation = state[9]
self.ball.position[0] = state[10]
self.ball.position[1] = state[11]
self.ball.velocity[0] = state[12]
self.ball.velocity[1] = state[13]
def _perform_action(self, act, parameters, agent):
""" Applies for selected action for the given agent. """
if act == KICK:
agent.kick_ball(self.ball, parameters[0], parameters[1])
elif act == DASH:
agent.dash(parameters[0])
elif act == TURN:
agent.turn(parameters[0])
elif act == TO_BALL:
agent.to_ball(self.ball)
elif act == SHOOT_GOAL:
agent.shoot_goal(self.ball, parameters[0])
elif act == TURN_BALL:
agent.turn_ball(self.ball, parameters[0])
elif act == DRIBBLE:
agent.dribble(self.ball, parameters)
elif act == KICK_TO:
agent.kick_to(self.ball, parameters[0])
else:
raise error.InvalidAction("Action not recognised: ", act)
def _resolve_collisions(self):
""" Shift apart all colliding entities with one pass. """
for index, entity1 in enumerate(self.entities):
for entity2 in self.entities[index + 1:]:
if entity1.colliding(entity2):
entity1.decollide(entity2)
def _terminal_check(self):
""" Determines if the episode is ended, and the reward. """
if self.ball.in_net():
end_episode = True
reward = 50
elif self.goalie.can_catch(self.ball) or not self.ball.in_field():
end_episode = True
reward = -self.ball.goal_distance()
else:
end_episode = False
reward = 0
if end_episode:
self.states.append([
self.player.position.copy(),
self.player.orientation,
self.goalie.position.copy(),
self.goalie.orientation,
self.ball.position.copy()])
return reward, end_episode
def _is_stable(self):
""" Determines whether objects have stopped moving. """
speeds = [norm(entity.velocity) for entity in self.entities]
return max(speeds) < 0.1
@staticmethod
def scale_state(state):
""" Scale state variables between 0 and 1. """
scaled_state = (state + SHIFT_VECTOR) / SCALE_VECTOR
return scaled_state
@staticmethod
def unscale_state(scaled_state):
""" Unscale state variables. """
state = (scaled_state * SCALE_VECTOR) - SHIFT_VECTOR
return state
def __draw_internal_state(self, internal_state, fade=False):
""" Draw the field and players. """
player_position = internal_state[0]
player_orientation = internal_state[1]
goalie_position = internal_state[2]
goalie_orientation = internal_state[3]
ball_position = internal_state[4]
ball_size = BALL_CONFIG['SIZE']
self.window.blit(self.__background, (0, 0))
# Draw goal and penalty areas
length = self.__visualiser_scale(PITCH_LENGTH / 2)
width = self.__visualiser_scale(PITCH_WIDTH)
self.__draw_vertical(length, 0, width)
self.__draw_box(GOAL_AREA_WIDTH, GOAL_AREA_LENGTH)
# self.draw_box(PENALTY_AREA_WIDTH, PENALTY_AREA_LENGTH)
depth = length + self.__visualiser_scale(GOAL_DEPTH)
self.__draw_horizontal(width / 2 - self.__visualiser_scale(GOAL_WIDTH / 2), length, depth)
self.__draw_horizontal(width / 2 + self.__visualiser_scale(GOAL_WIDTH / 2), length, depth)
# self.draw_radius(vector(0, 0), CENTRE_CIRCLE_RADIUS)
# Draw Players
self.__draw_player(player_position, player_orientation, self.__white)
if not fade:
self.__draw_radius(player_position, KICKABLE)
self.__draw_player(goalie_position, goalie_orientation, self.__red)
if not fade:
self.__draw_radius(goalie_position, CATCHABLE)
# Draw ball
self.__draw_entity(ball_position, ball_size, self.__black)
pygame.display.update()
def __visualiser_scale(self, value):
''' Scale up a value. '''
return int(self._VISUALISER_SCALE_FACTOR * value)
def __upscale(self, position):
''' Maps a simulator position to a field position. '''
pos1 = self.__visualiser_scale(position[0])
pos2 = self.__visualiser_scale(position[1] + PITCH_WIDTH / 2)
return np.array([pos1, pos2])
def __draw_box(self, area_width, area_length):
""" Draw a box at the goal line. """
lower_corner = self.__visualiser_scale(PITCH_WIDTH / 2 - area_width / 2)
upper_corner = lower_corner + self.__visualiser_scale(area_width)
line = self.__visualiser_scale(PITCH_LENGTH / 2 - area_length)
self.__draw_vertical(line, lower_corner, upper_corner)
self.__draw_horizontal(lower_corner, line, self.__visualiser_scale(PITCH_LENGTH / 2))
self.__draw_horizontal(upper_corner, line, self.__visualiser_scale(PITCH_LENGTH / 2))
def __draw_player(self, position, orientation, colour):
''' Draw a player with given position and orientation. '''
size = PLAYER_CONFIG['SIZE']
self.__draw_entity(position, size, colour)
radius_end = size * angle_position(orientation)
pos = vector_to_tuple(self.__upscale(position))
end = vector_to_tuple(self.__upscale(position + radius_end))
pygame.draw.line(self.window, self.__black, pos, end)
def __draw_radius(self, position, radius):
""" Draw an empty circle. """
pos = vector_to_tuple(self.__upscale(position))
radius = self.__visualiser_scale(radius)
pygame.draw.circle(self.window, self.__white, pos, radius, 1)
def __draw_entity(self, position, size, colour):
""" Draws an entity as a ball. """
pos = vector_to_tuple(self.__upscale(position))
radius = self.__visualiser_scale(size)
pygame.draw.circle(self.window, colour, pos, radius)
def __draw_horizontal(self, yline, xline1, xline2):
""" Draw a horizontal line. """
pos1 = (xline1, yline)
pos2 = (xline2, yline)
pygame.draw.line(self.window, self.__white, pos1, pos2)
def __draw_vertical(self, xline, yline1, yline2):
""" Draw a vertical line. """
pos1 = (xline, yline1)
pos2 = (xline, yline2)
pygame.draw.line(self.window, self.__white, pos1, pos2)
def __draw_render_states(self):
"""
Draw the internal states from the last action.
"""
length = len(self.render_states)
for i in range(0, length):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
pygame.quit()
sys.exit()
self.__draw_internal_state(self.render_states[i])
self.__clock.tick(self._VISUALISER_DELAY)
self.render_states = [] # clear states for next render
def render(self, mode='human', close=False):
if close:
pygame.display.quit()
pygame.quit()
self.window = None
return
self._initialse_window()
self.__draw_render_states()
#img = self._get_image()
#if mode == 'rgb_array':
# return img
# elif mode == 'human':
# from gym.envs.classic_control import rendering
# if self.viewer is None:
# self.viewer = rendering.SimpleImageViewer(SCREEN_WIDTH, SCREEN_HEIGHT)
# self.viewer.imshow(img)
def _initialse_window(self):
# initialise visualiser
if self.window is None:
pygame.init()
width = self.__visualiser_scale(PITCH_LENGTH / 2 + GOAL_DEPTH)
height = self.__visualiser_scale(PITCH_WIDTH)
self.window = pygame.display.set_mode((width, height))
self.__clock = pygame.time.Clock()
size = (width, height)
self.__background = pygame.Surface(size)
self.__white = pygame.Color(255, 255, 255, 0)
self.__black = pygame.Color(0, 0, 0, 0)
self.__red = pygame.Color(255, 0, 0, 0)
self.__background.fill(pygame.Color(0, 125, 0, 0))
def save_render_states(self, dir, prefix, index=0):
self._initialse_window()
import os
for s in self.render_states:
self.__draw_internal_state(s)
pygame.image.save(self.window, os.path.join(dir, prefix+"_"+str("{:04d}".format(index))+".jpeg"))
index += 1
return index
class Entity:
""" This is a base entity class, representing moving objects. """
def __init__(self, config):
self.rand = config['RAND']
self.accel_max = config['ACCEL_MAX']
self.speed_max = config['SPEED_MAX']
self.power_rate = config['POWER_RATE']
self.decay = config['DECAY']
self.size = config['SIZE']
self.position = np.array([0., 0.])
self.velocity = np.array([0., 0.])
self.np_random = None # overwritten by seed()
def update(self):
""" Update the position and velocity. """
self.position += self.velocity
self.velocity *= self.decay
def accelerate(self, power, theta):
""" Applies a power to the entity in direction theta. """
rrand = self.np_random.uniform(-self.rand, self.rand)
theta = (1 + rrand) * theta
rmax = self.rand * norm(self.velocity)
noise = self.np_random.uniform(-rmax, rmax, size=2)
rate = float(power) * self.power_rate
acceleration = rate * angle_position(theta) + noise
acceleration = bound_vector(acceleration, self.accel_max)
self.velocity += acceleration
self.velocity = bound_vector(self.velocity, self.speed_max)
def decollide(self, other):
""" Shift overlapping entities apart. """
overlap = (self.size + other.size - self.distance(other)) / 2
theta1 = angle_between(self.position, other.position)
theta2 = angle_between(other.position, self.position)
self.position += overlap * angle_position(theta2)
other.position += overlap * angle_position(theta1)
self.velocity *= -1
other.velocity *= -1
def colliding(self, other):
""" Check if two entities are overlapping. """
dist = self.distance(other)
return dist < self.size + other.size
def distance(self, other):
""" Computes the euclidean distance to another entity. """
return norm(self.position - other.position)
def in_area(self, left, right, bot, top):
""" Checks if the entity is in the area. """
xval, yval = self.position
in_length = left <= xval <= right
in_width = bot <= yval <= top
return in_length and in_width
class Player(Entity):
""" This represents a player with a position,
velocity and an orientation. """
def __init__(self, position, orientation):
""" The values for this class are defined by the player constants. """
Entity.__init__(self, PLAYER_CONFIG)
self.position = position
self.orientation = orientation
def homothetic_centre(self, ball):
""" Computes the homothetic centre between the player and the ball. """
ratio = 1. / (self.size + ball.size)
position = (ball.position * self.size + self.position * ball.size)
return ratio * position
def tangent_points(self, htc):
""" Finds the tangent points on the player wrt to homothetic centre. """
diff = htc - self.position
square = sum(diff ** 2)
if square <= self.size ** 2:
delta = 0.0
else:
delta = np.sqrt(square - self.size ** 2)
xt1 = (diff[0] * self.size ** 2 + self.size * diff[1] * delta) / square
xt2 = (diff[0] * self.size ** 2 - self.size * diff[1] * delta) / square
yt1 = (diff[1] * self.size ** 2 + self.size * diff[0] * delta) / square
yt2 = (diff[1] * self.size ** 2 - self.size * diff[0] * delta) / square
tangent1 = np.array((xt1, yt1)) + self.position
tangent2 = np.array((xt1, yt2)) + self.position
tangent3 = np.array((xt2, yt1)) + self.position
tangent4 = np.array((xt2, yt2)) + self.position
if norm(tangent1 - self.position) == self.size:
return tangent1, tangent4
else:
return tangent2, tangent3
def ball_angles(self, ball, angle):
""" Determines which angle to kick the ball along. """
htc = self.homothetic_centre(ball)
tangent1, tangent2 = self.tangent_points(htc)
target = self.position + self.size * angle_position(angle)
if norm(tangent1 - target) < norm(tangent2 - target):
return angle_between(htc, tangent1)
else:
return angle_between(htc, tangent2)
def kick_power(self, ball):
""" Determines the kick power weighting given ball position. """
angle = angle_between(self.position, ball.position)
dir_diff = abs(angle_difference(angle, self.orientation))
dist = self.distance(ball)
return 1 - 0.25 * dir_diff / np.pi - 0.25 * dist / KICKABLE
def facing_ball(self, ball):
""" Determines whether the player is facing the ball. """
angle = angle_between(self.position, ball.position)
return self.facing_angle(angle)
def facing_angle(self, angle):
""" Determines whether the player is facing an angle. """
return angle_close(self.orientation, angle)
def turn(self, angle):
""" Turns the player. """
moment = norm_angle(angle)
speed = norm(self.velocity)
angle = moment / (1 + INERTIA_MOMENT * speed)
self.orientation = self.orientation + angle
def dash(self, power):
""" Dash forward. """
power = bound(power, MINPOWER, MAXPOWER)
self.accelerate(power, self.orientation)
def can_kick(self, ball):
""" Determines whether the player can kick the ball. """
return self.distance(ball) <= KICKABLE
def kick_ball(self, ball, power, direction):
""" Kicks the ball. """
if self.can_kick(ball):
power = bound(power, MINPOWER, MAXPOWER)
power *= self.kick_power(ball)
ball.accelerate(power, self.orientation + direction)
def kick_towards(self, ball, power, direction):
""" Kick the ball directly to a direction. """
self.kick_ball(ball, power, direction - self.orientation)
def shoot_goal(self, ball, ypos):
""" Shoot the goal at a targeted position on the goal line. """
ypos = bound(ypos, -GOAL_WIDTH / 2, GOAL_WIDTH / 2)
target = np.array((PITCH_LENGTH / 2 + ball.size, ypos))
self.kick_to(ball, target)
def face_ball(self, ball):
""" Turn the player towards the ball. """
theta = angle_between(self.position, ball.position)
self.face_angle(theta)
def face_angle(self, angle):
""" Turn the player towards and angle. """
self.turn(angle - self.orientation)
def to_ball(self, ball):
""" Move towards the ball. """
if not self.facing_ball(ball):
self.face_ball(ball)
elif not self.can_kick(ball):
self.dash(10)
def kick_to(self, ball, target):
""" Kick the ball to a target position. """
if not self.can_kick(ball):
self.to_ball(ball)
else:
accel = (1 - ball.decay) * (target - self.position) - ball.velocity
power = norm(accel) / (self.kick_power(ball) * ball.power_rate)
theta = np.arctan2(accel[1], accel[0])
self.kick_towards(ball, power, theta)
def turn_ball(self, ball, angle):
""" Turn the ball around the player. """
if not self.can_kick(ball):
self.to_ball(ball)
elif not self.facing_angle(angle):
self.face_angle(angle)
elif self.size < self.distance(ball):
theta = self.ball_angles(ball, angle)
power = 0.1 / self.kick_power(ball)
self.kick_towards(ball, power, theta)
def dribble(self, ball, target):
""" Dribble the ball to a position. """
angle = angle_between(self.position, ball.position)
theta = angle_between(self.position, target)
if not self.can_kick(ball):
self.to_ball(ball)
elif ball.close_to(target):
pass
elif not angle_close(angle, theta):
self.turn_ball(ball, theta)
elif not self.facing_angle(theta):
self.face_angle(theta)
elif self.distance(ball) < (KICKABLE + self.size + ball.size) / 2:
self.kick_towards(ball, 1.5, theta)
else:
self.dash(10)
class Goalie(Player):
""" This class defines a special goalie player. """
def move(self, ball, player):
""" This moves the goalie. """
ball_end = ball.position + ball.velocity / (1 - ball.decay)
diff = ball_end - ball.position
grad = diff[1] / diff[0] if diff[0] != 0. else 0 # avoid division by 0
yint = ball.position[1] - grad * ball.position[0]
goal_y = grad * PITCH_LENGTH / 2 + yint
if ball_end[0] > PITCH_LENGTH / 2 and -GOAL_WIDTH / 2 - CATCHABLE <= goal_y <= GOAL_WIDTH / 2 + CATCHABLE \
and grad != 0:
grad2 = -1 / grad
yint2 = self.position[1] - grad2 * self.position[0]
ballx = (yint2 - yint) / (grad - grad2)
bally = grad * ballx + yint
target = np.array((ballx, bally))
self.move_towards(20, target)
self.orientation = angle_between(self.position, target)
else:
self.orientation = angle_between(self.position, ball_end)
self.move_towards(8, ball_end)
def move_towards(self, power, target):
""" Move towards target position. """
theta = angle_between(self.position, target)
self.accelerate(power, theta)
def can_catch(self, ball):
""" Determines whether the goalie can catch the ball. """
can_catch = self.distance(ball) < CATCHABLE
return self.np_random.random_sample() <= CATCH_PROBABILITY and can_catch
class Ball(Entity):
""" This class represents the ball, which has no orientation. """
def __init__(self, position):
""" The values for this class are defined by the ball constants. """
Entity.__init__(self, BALL_CONFIG)
self.position = position
def close_to(self, position):
""" Determines whether the ball is close to a postion. """
return norm(self.position - position) <= 1.5
def goal_distance(self):
""" Returns the distance from the goal box. """
if self.position[0] < PITCH_LENGTH / 2:
if self.position[1] < -GOAL_WIDTH / 2:
bot_corner = np.array((PITCH_LENGTH / 2, -GOAL_WIDTH / 2))
return norm(self.position - bot_corner)
elif self.position[1] > GOAL_WIDTH / 2:
top_corner = np.array((PITCH_LENGTH / 2, GOAL_WIDTH / 2))
return norm(self.position - top_corner)
else:
return PITCH_LENGTH / 2 - self.position[0]
else:
if self.position[1] < -GOAL_WIDTH / 2:
return GOAL_WIDTH / 2 - self.position[1]
elif self.position[1] > GOAL_WIDTH / 2:
return self.position[1] - GOAL_WIDTH / 2
else:
return 0
def in_field(self):
""" Checks if the ball has left the field. """
return self.in_area(0, PITCH_LENGTH / 2, -PITCH_WIDTH / 2, PITCH_WIDTH / 2)
def in_net(self):
""" Checks if the ball is in the net. """
return self.in_area(PITCH_LENGTH / 2, PITCH_LENGTH / 2 + GOAL_DEPTH, -GOAL_WIDTH / 2, GOAL_WIDTH / 2)
def in_goalbox(self):
""" Checks if the ball is in the goal box. """
return self.in_area(PITCH_LENGTH / 2 - GOAL_AREA_LENGTH, PITCH_LENGTH / 2, -GOAL_AREA_WIDTH / 2,
GOAL_AREA_WIDTH)
| StarcoderdataPython |
6532944 | <gh_stars>0
# test electronvolt.py in terminal
# update README.md
# update version number in setuptools.setup
# rm -r __pycache__
# python setup.py sdist bdist_wheel
# twine upload dist/*
# rm -r build dist *.egg-info
# pip install electronvolt -U
# git commit and push
# pypi.org/project/electronvolt
# github.com/dw61/electronvolt
# test module in home directory or on mybinder.org
import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="electronvolt",
version="1.3.1",
author_email="<EMAIL>",
description="A physical quantity calculator.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dw61/electronvolt",
py_modules=["electronvolt"],
python_requires='>=3.6',
)
| StarcoderdataPython |
9769778 | <gh_stars>1-10
# -*- coding:utf-8 -*-
"""
tcase app 相关的标签
"""
from django import template
from django.db.models import F
from tproject.models import Project
from ..models import Case, Shiwu
register = template.Library()
@register.inclusion_tag('case/project_shiwu.html')
def get_project_shiwu(pk=0, user=None):
"""
获取项目的所有事务,并返回html
:param pk: 项目的主键,id
:param user: 请求用户,标签模版中不能使用request.user获取用户,所以传递user值
:return:
"""
# 根据项目id获取到相关的所有事务
if not pk:
pk = 0
# 获取所有的项目的事务,是当前用户的事务排在前面
if user:
all_shiwu = Shiwu.objects.filter(project_id=pk).annotate(
owner=(F('user_id') - user.pk)).order_by('owner')
else:
all_shiwu = Shiwu.objects.filter(project_id=pk)
# 需要排除,已经克隆了的
cloned_shiwu = Shiwu.objects.filter(is_clone=True, project_id=pk)
cloned_shiwu_ids = [i.parent for i in cloned_shiwu]
# 排除all_shiwu中的克隆的母体
all_shiwu = all_shiwu.exclude(id__in=cloned_shiwu_ids)
return {'all_shiwu': all_shiwu, "user": user}
| StarcoderdataPython |
1964424 | <filename>utils/__init__.py
from .trainer_utils import *
from .buffer import * | StarcoderdataPython |
3436377 | <filename>examples/keras/keras_sequential_classification_model.py<gh_stars>0
"""
Keras sequential classification example
==================
An example of a sequential network used as an OpenML flow.
"""
import keras
import openml.extensions.keras
############################################################################
# Define a sequential Keras model.
model = keras.models.Sequential([
keras.layers.BatchNormalization(),
keras.layers.Dense(units=1024, activation=keras.activations.relu),
keras.layers.Dropout(rate=0.4),
keras.layers.Dense(units=2, activation=keras.activations.softmax),
])
# We will compile using the Adam optimizer while targeting accuracy.
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
############################################################################
############################################################################
# Download the OpenML task for the german credit card dataset.
task = openml.tasks.get_task(31)
############################################################################
# Run the Keras model on the task (requires an API key).
run = openml.runs.run_model_on_task(model, task, avoid_duplicate_runs=False)
# Publish the experiment on OpenML (optional, requires an API key).
run.publish()
print('URL for run: %s/run/%d' % (openml.config.server, run.run_id))
############################################################################
| StarcoderdataPython |
3341217 | <gh_stars>1-10
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
#url(r'^$', 'main.views.home', name='home'),
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', name='logout',),
url(r'', include('mapentity.urls', namespace='mapentity', app_name='mapentity')),
url(r'^paperclip/', include('paperclip.urls')),
url(r'', include('main.urls', namespace='main', app_name='main')),
url(r'^admin/', include(admin.site.urls)),
)
| StarcoderdataPython |
6427750 | from checkov.common.graph.checks_infra.enums import Operators
from checkov.terraform.checks_infra.solvers.complex_solvers.base_complex_solver import BaseComplexSolver
from functools import reduce
from operator import or_
class OrSolver(BaseComplexSolver):
operator = Operators.OR
def __init__(self, solvers, resource_types):
super().__init__(solvers, resource_types)
def _get_operation(self, *args):
return reduce(or_, args)
def get_operation(self, vertex):
for i, solver in enumerate(self.solvers):
pred_result = solver.get_operation(vertex)
if pred_result:
return pred_result
return False
| StarcoderdataPython |
1937494 | <gh_stars>1-10
import numpy as np
from numpy.linalg import norm
import scipy.interpolate
class Sphere(object):
def __init__(self, _dim=2, _seg_type='linear'):
self.dim = _dim
self.seg_type = _seg_type
if self.seg_type == 'linear':
self.pts = []
# self.pts += [np.array([-0.5] * self.dim)]
# self.pts += [np.array([0.5] * self.dim)]
p0 = (np.random.rand(self.dim) - 0.5)
p1 = p0 + 0.2 * (np.random.rand(self.dim) - 0.5)
self.pts += [p0]
self.pts += [p1]
elif self.seg_type == 'cubic':
self.pts = []
self.pts += [np.array([-0.5, -0.5])]
self.pts += [np.array([-0.5, 0.0])]
self.pts += [np.array([0.0, 0.0])]
self.pts += [np.array([0.0, 0.5])]
self.eval_counter = 0 # Well, increasing when simulated
def reset(self):
pass
def center(self, task):
n = len(self.pts)
w = task
x = np.linspace(0.0, 1.0, n)
center = [0.0] * self.dim
for i in range(self.dim):
y = [p[i] for p in self.pts]
f = scipy.interpolate.interp1d(x, y, self.seg_type)
center[i] = f(w)
return np.array(center)
def simulate(self, sample):
self.eval_counter += 1
return sample.view(np.ndarray)
def evaluate(self, result, task):
c = self.center(task)
return norm(c - result)
def __str__(self):
return "[SphereProblem (%s)]" % self.seg_type
| StarcoderdataPython |
3216347 | <gh_stars>0
from contextlib import contextmanager
from typing import Dict, Optional, Iterator
from opentelemetry import trace
from opentelemetry.trace import Span
from hedwig.instrumentation.compat import Getter, extract, inject
from hedwig.models import Message
getter = Getter()
@contextmanager
def on_receive(sns_record=None, sqs_queue_message=None, google_pubsub_message=None) -> Iterator[Span]:
"""
Hook for instrumenting consumer after message is dequeued. If applicable, starts a new span.
:param sns_record:
:param sqs_queue_message:
:param google_pubsub_message:
:return:
"""
attributes: Optional[Dict]
if sqs_queue_message is not None:
attributes = {k: v["StringValue"] for k, v in sqs_queue_message.message_attributes.items()}
elif sns_record is not None:
attributes = sns_record["attributes"]
elif google_pubsub_message is not None:
attributes = google_pubsub_message.attributes
else:
attributes = None
tracectx = extract(getter, attributes) # type: ignore
tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span("message_received", context=tracectx, kind=trace.SpanKind.CONSUMER) as span:
yield span
def on_message(message: Message) -> None:
"""
Hook for instrumenting consumer after message is deserialized and validated. If applicable, updates the current span
with the right name.
:param message:
:return:
"""
span = trace.get_current_span()
span.update_name(message.type)
@contextmanager
def on_publish(message: Message, headers: Dict) -> Iterator[Span]:
"""
Hook for instrumenting publish. If applicable, injects tracing headers into headers dictionary.
:param message:
:param headers:
:return:
"""
tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span(f"publish/{message.type}", kind=trace.SpanKind.PRODUCER) as span:
inject(dict.__setitem__, headers)
yield span
| StarcoderdataPython |
221131 | from random import choice
from typing import Any, List, Tuple
from hamcrest import assert_that
from .pacing import aside, TRIVIAL
from .resolutions import Resolution
# Typehint Aliases
Question = Any
Action = Any
Ability = Any
ENTRANCE_DIRECTIONS = [
"{} arrives on stage!",
"{} enters, from the vomitorium!",
"{} enters, on a wire!",
"{} enters, stage left!",
"{} enters, stage right!",
"{} enters the frame!",
"{} gets over their stagefright!",
"{} hears their cue!",
"{} is ready for their close-up!",
"{} makes their debut!",
"The camera pans to {}!",
"The camera jump-cuts to {}!",
]
class UnableToPerformException(Exception):
"""
Raised when an actor does not possess the ability to perform the
action they attempted.
"""
pass
class Actor:
"""
Represents an actor, holding their name and abilities. Actors are the
performers of your screenplay, they represent your users as they go
about their business on your product.
An actor is meant to be instantiated using its static |Actor.named|
method. A typical invocation might look like:
perry = Actor.named("Perry")
This will create the actor, ready to take on their first role.
"""
@staticmethod
def named(name: str) -> "Actor":
"""
Names this actor, logs their entrance, and returns the instance.
Args:
name (str): The name of this new Actor.
Returns:
|Actor|
"""
aside(choice(ENTRANCE_DIRECTIONS).format(name), gravitas=TRIVIAL)
return Actor(name)
def can(self, *abilities: List[Ability]) -> "Actor":
"""
Adds an ability to this actor.
Args:
abilities (list(ability)): The abilities this actor can do.
Returns:
|Actor|
"""
self.abilities.extend(abilities)
return self
def who_can(self, *abilities: List[Ability]) -> "Actor":
"""Syntactic sugar for |Actor.can|."""
return self.can(*abilities)
def ability_to(self, ability: Ability) -> Ability:
"""
Finds the ability referenced and returns it, if the actor is able
to do it.
Args:
ability (Ability): The ability to perform.
Returns:
The requested ability.
Raises:
|UnableToPerformException|: if this actor is unable.
"""
for a in self.abilities:
if isinstance(a, ability):
return a
else:
raise UnableToPerformException(
"{} does not have the ability to {}".format(self, ability)
)
def uses_ability_to(self, ability: Ability) -> Ability:
"""Syntactic sugar for |Actor.ability_to|."""
return self.ability_to(ability)
def attempts_to(self, *actions: List[Action]) -> None:
"""
Performs a list of actions, one after the other.
Args:
actions (list(Action)): The list of actions to perform.
"""
for action in actions:
self.perform(action)
def was_able_to(self, *actions: List[Action]) -> None:
"""Syntactic sugar for |Actor.attempts_to|."""
return self.attempts_to(*actions)
def perform(self, action: Action) -> None:
"""
Performs the given action.
Args:
action (list(Action)): The |Action| to perform.
"""
action.perform_as(self)
def should_see_that(self, *tests: List[Tuple[Question, Resolution]]) -> None:
"""
Asks a series of questions, asserting that the expected answer
resolves.
Args:
tests list(tuple(Question, Resolution)): A list of tuples of
a question and a |Resolution|.
Raises:
AssertionError: If the question's actual answer does not match
the expected answer from the |Resolution|.
"""
for question, test in tests:
assert_that(question.answered_by(self), test)
def should_see_the(self, *tests: List[Tuple[Question, Resolution]]) -> None:
"""Syntactic sugar for |Actor.should_see_that|."""
return self.should_see_that(*tests)
def should_see(self, *tests: List[Tuple[Question, Resolution]]) -> None:
"""Syntactic sugar for |Actor.should_see_that|."""
return self.should_see_that(*tests)
def exit(self) -> None:
"""
The actor forgets all of their abilities, ready to assume a new
role when their next cue calls them.
"""
for ability in self.abilities:
ability.forget()
self.abilities.remove(ability)
def exit_stage_right(self) -> None:
"""Syntactic sugar for |Actor.exit|."""
aside("{} bows and exits, stage right.".format(self), gravitas=TRIVIAL)
self.exit()
def exit_stage_left(self) -> None:
"""Syntactic sugar for |Actor.exit|."""
aside("{} bows and exits, stage left.".format(self), gravitas=TRIVIAL)
self.exit()
def __repr__(self) -> str:
return self.name
def __init__(self, name: str) -> None:
self.name = name
self.abilities = []
# Natural-language-enabling syntactic sugar
AnActor = Actor
| StarcoderdataPython |
11239569 | from .models import Tag
from django.shortcuts import render
from django.views.generic import CreateView, UpdateView, DetailView, ListView
class TagListView(ListView):
model = Tag
class TagDetailView(DetailView):
model = Tag
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["questions"] = self.object.question_set.all()
return context
| StarcoderdataPython |
4854914 | import struct
import sys
import time
import cv2
import numpy as np
import libipmq
def producer():
capture = cv2.VideoCapture(0, cv2.CAP_V4L2)
capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M', 'J', 'P', 'G'))
capture.set(cv2.CAP_PROP_FPS, 30.0)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1280.0)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 720.0)
_, frame = capture.read()
frame_data_size = np.product(list(frame.shape))
frame_size = 12 + frame_data_size
producer = libipmq.Producer("test.queue", "/dev/shm/test.data", 3 * frame_size)
last_measurement = time.time()
count = 0
while True:
allocation = producer.allocate(frame_size)
frame = np.asarray(allocation)[12:].reshape((frame.shape[0], frame.shape[1], 3))
capture.read(frame)
allocation.copy_from(0, struct.pack("iii", frame.shape[1], frame.shape[0], 16))
producer.publish("test", allocation)
count += 1
elapsed = (time.time() - last_measurement)
if elapsed >= 1.0:
print("FPS: {:.3f}".format(count / elapsed))
last_measurement = time.time()
count = 0
def consumer():
consumer = libipmq.Consumer("test.queue")
consumer.create_queue("test", True)
consumer.bind_queue("test", ".*")
def callback(commands, queue_id, routing_key, message_id, message):
width, height, img_format = struct.unpack("iii", message[:12])
frame = np.frombuffer(message[12:], np.uint8).reshape((height, width, 3))
cv2.imshow("Frame - {}x{}".format(width, height), frame)
cv2.waitKey(1)
commands.acknowledge(queue_id, message_id)
consumer.start_consume_queue("test", callback)
if __name__ == "__main__":
command = sys.argv[1]
if command == "producer":
producer()
elif command == "consumer":
consumer()
| StarcoderdataPython |
11361350 | <gh_stars>1-10
#Imports the tkinter module
import tkinter
#Imports the tkinter.messagebox module
import tkinter.messagebox
#Main Function
def main() :
#Creates the window
test_window = tkinter.Tk()
#Sets the window's title
test_window.wm_title("My Window")
#Creates two frames that belong to test_window
upper_frame = tkinter.Frame(test_window)
lower_frame = tkinter.Frame(test_window)
#Creates three global IntVar variables.
#One for each checkbutton.
#They are global in this program so other functions
#can access them.
global cbvar
global cbvar2
global cbvar3
cbvar = tkinter.IntVar()
cbvar2 = tkinter.IntVar()
cbvar3 = tkinter.IntVar()
#Sets each IntVar to zero (unselected)
cbvar.set(0)
cbvar2.set(0)
cbvar3.set(0)
#Creates three Checkbuttons that belong to upper_frame and
#uses their repective IntVar variable.
testcb = tkinter.Checkbutton(upper_frame,
text="Option 1",
variable=cbvar)
testcb2 = tkinter.Checkbutton(upper_frame,
text="Option 2",
variable=cbvar2)
testcb3 = tkinter.Checkbutton(upper_frame,
text="Option 3",
variable=cbvar3)
#Packs the Checkbuttons onto upper_frame
testcb.pack()
testcb2.pack()
testcb3.pack()
#Creates a button that belongs to lower_frame and calls the
#showdialog function when clicked.
ok_button = tkinter.Button(lower_frame,
text="Get Selections",
command=showdialog)
#Creates a button that belongs to lower_frame and calls test_window's
#destroy function when clicked.
quit_button = tkinter.Button(lower_frame,
text="Quit",
command=test_window.destroy)
#Packs the two buttons onto lower_frame
ok_button.pack(side="left")
quit_button.pack(side="left")
#Packs the frames onto the window
upper_frame.pack()
lower_frame.pack()
#Enters the main loop, displaying the window
#and waiting for events
tkinter.mainloop()
#Function that displays a dialog box when it is called.
#The function builds a string based on which Checkbuttons are selected.
#The dialog box will display the string created.
def showdialog() :
output = "You selected:\n"
#If the IntVar variable's get function returns 1, that means
#the Checkbutton is currently selected/checked
if cbvar.get() == 1 :
output+= "Option 1\n"
if cbvar2.get() == 1 :
output+= "Option 2\n"
if cbvar3.get() == 1 :
output+= "Option 3\n"
if cbvar.get() == cbvar2.get() == cbvar3.get() == 0 :
output += "None"
tkinter.messagebox.showinfo("Selections", output)
#Calls the main function/starts the program
main()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.