id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
389337 | <filename>set1/challenge6.py
from common.tools.base64 import Base64Decoder
from common.challenge import MatasanoChallenge
from common.attacks.xor import RepeatingKeyXORDecrypter
class Set1Challenge06(MatasanoChallenge):
FILE = 'set1/data/6.txt'
def expected_value(self):
return open('set1/data/6ans.txt', 'r').read()
def value(self):
decoded_content = Base64Decoder().decode_file(self.FILE)
return RepeatingKeyXORDecrypter().decrypt(decoded_content) | StarcoderdataPython |
9749537 | """
This example shows how to use a PointsDensity
actor to show the density of labelled cells
"""
import random
import numpy as np
from brainrender import Scene
from brainrender.actors import Points, PointsDensity
from rich import print
from myterial import orange
from pathlib import Path
print(f"[{orange}]Running example: {Path(__file__).name}")
def get_n_random_points_in_region(region, N):
"""
Gets N random points inside (or on the surface) of a mes
"""
region_bounds = region.mesh.bounds()
X = np.random.randint(region_bounds[0], region_bounds[1], size=10000)
Y = np.random.randint(region_bounds[2], region_bounds[3], size=10000)
Z = np.random.randint(region_bounds[4], region_bounds[5], size=10000)
pts = [[x, y, z] for x, y, z in zip(X, Y, Z)]
ipts = region.mesh.insidePoints(pts).points()
return np.vstack(random.choices(ipts, k=N))
scene = Scene(title="Labelled cells")
# Get a numpy array with (fake) coordinates of some labelled cells
mos = scene.add_brain_region("MOs", alpha=0.0)
coordinates = get_n_random_points_in_region(mos, 2000)
# Add to scene
scene.add(Points(coordinates, name="CELLS", colors="salmon"))
scene.add(PointsDensity(coordinates))
# render
scene.render()
| StarcoderdataPython |
5027451 | #!/usr/bin/env python3
'''
Author: AGDC Services
Website: AGDCservices.com
Date: 20210501
This is a Command and Control(C2) server simulator for TCP traffic
Usage:
- Fill in the variables at the top of main
- write your custom C2 code in the "Start of C2 Simulation Code"
section in main after the client class is instantiated.
Use the server instance to access the utility
networking functions. The available functions to the server instance
are listed at the top of the "Start of C2 Simulation Code" section
- Run the file from a command prompt using python 3
'''
import ssl
import socket
import sys
import struct
import base64
import time
'''
The following two variables are only needed if TLS is enabled
Details:
- Create the key and cert on the host where this script will be run
- fill in the variables below with the key / cert paths
- set the bDoTls variable in the main function to True
one method to create a TLS key / cert is to use openssl on linux with the following commands
all of the default options can be used when creating the x509 certificate
- openssl genpkey -out <fileName> -algorithm RSA -pkeyopt rsa_keygen_bits:<keyLen in bits>
- openssl req -new -x509 -key <key filePath> -days 7200 -out <fileName>
example:
openssl genpkey -out key.pem -algorithm RSA -pkeyopt rsa_keygen_bits:2048
openssl req -new -x509 -key key.pem -days 7200 -out cert.cert
'''
gTlsKeyFilePath = r''
gTlsCertFilePath = r''
def main():
'''
main function to simulate C2 server network traffic
'''
############################################
# Initialization Variables
############################################
# initialize variables
listeningIp = '0.0.0.0' # string of dotted decimal ip address to listen on, e.g. '0.0.0.0' for all IPs
port = 80 # decimal port number to listen on
# boolean indicating if TLS should be used
# fill in the gTlsKeyFilePath and gTlsCertFilePath variables at top of simulator
# if TLS is enabled
bDoTls = False
############################################
# error check
if bDoTls == True and (len(gTlsKeyFilePath) == 0 or len(gTlsCertFilePath) == 0):
sys.exit('\n[*] ERROR: variables gTlsKeyFilePath and gTlsCertFilePath must be used if TLS is enabled')
############################################
# Start Of C2 Simulation Code
############################################
#
# Built in Utility Function Prototypes
# See function headers for usage details
#
# Print_Hexdump(byteString)
#
# Server(listeningIp, port, bDoTls) # Class Constructor
# <Server_Class_Instance>.Send(buf)
# <Server_Class_Instance>.Recv_Len(nLen)
# <Server_Class_Instance>.Recv_Len_Prepended(lengthOfLengthField, bLittleEndian)
# <Server_Class_Instance>.Recv_Delim(delim)
# <Server_Class_Instance>.Socket_Close()
#
# initialize the server instance
server = Server(listeningIp, port, bDoTls)
class Server:
# Server class which contains all the basic networking
# utility functions needed to simulate a C2 server
def __init__(self, host, port, bDoTls):
print('Starting TCP Server\n')
# create a socket
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind to the port and start accepting connections
s.bind( (host, port) )
s.listen(1)
self.conn, clientAddr = s.accept()
# if TLS is needed, wrap the socket in the key / cert
# declared in the global variables at the top of the script
if bDoTls == True:
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.verify_mode = ssl.CERT_NONE
context.load_cert_chain(keyfile=gTlsKeyFilePath, certfile=gTlsCertFilePath)
ss = context.wrap_socket(self.conn, server_side=True)
self.conn = ss
def Send(self, buf):
'''
send entire buffer
buf must be a byte string
'''
# error check
if isinstance(buf, bytes) == False:
sys.exit('\n[*] ERROR: buf must be a byte string for Send function')
self.conn.sendall(buf)
def Recv_Len(self, nLen):
'''
receives a fixed length (nLen) number of bytes
and returns a byte string of the received bytes
'''
result = b''
bytesRemaining = nLen
while bytesRemaining > 0:
result += self.conn.recv(bytesRemaining)
bytesRemaining = nLen - len(result)
return result
def Recv_Len_Prepended(self, lengthOfLengthField, bLittleEndian):
'''
receives a packet assuming the packet length preceeds the data
drops the length part of the bytes and just returns the packet data
returns a byte string of the received packet data
lengthOfLengthField should be the number of bytes used to store
the length of the data portion, e.g. 1,2,4 bytes
bLittleEndian refers to the endianess of the lengthOfLengthField
'''
# determine how to unpack the length field based on function arguments
endiness = '<' if bLittleEndian == True else '>'
sizeIndicator = {1:'B', 2:'H', 4:'I'}
# error check
if lengthOfLengthField not in sizeIndicator:
sys.exit('\n[*] error: lengthOfLengthField not valid for this function')
# build the format string to specify how to unpack the data length
formatStr = endiness + sizeIndicator[lengthOfLengthField]
# get the length of the data segment
dataLen = self.Recv_Len(lengthOfLengthField)
# transform the length from string to int
dataLen = struct.unpack(formatStr, dataLen)[0]
# get the actual data
data = self.Recv_Len(dataLen)
return data
def Recv_Delim(self, delim):
'''
receives a packet until you receive the terminating deliminator
delim must be a byte string
returns the received bytes, minus the deliminator, as a byte string
'''
# error check
if isinstance(delim, bytes) == False:
sys.exit('\n[*] ERROR: delim must be a byte string for Recv_Delim function')
result = b''
while result.endswith(delim) == False:
result += self.Recv_Len(1)
result = result[:-len(delim)]
return result
def Socket_Close(self):
'''
closes the socket
'''
if self.conn is not None:
self.conn.close()
def Print_Hexdump(data, displayLen = 16):
'''
utility function to print the data as a hex dump output
'''
# error check
if isinstance(data, bytes) == False:
sys.exit('\n[*] ERROR: data must be a byte string for Print_Hexdump function')
lines = []
for i in range(0, len(data), displayLen):
chars = data[i:(i + displayLen)]
# get standard output views for ????????
offset = '{:04x}'.format(i)
hexValues = ' '.join( '{:02x}'.format(i) for i in chars)
asciiValues = ''.join([chr(i) if i in range(0x20, 0x7f) else '.' for i in chars])
# add space after every 8 bytes
charLen = 3 # include space included between hex values
hexValues = ' '.join( [hexValues[i:(i + charLen*8) ] for i in range(0, len(hexValues) - 0, charLen*8)] )
charLen = 1 # no space includeed in ascii values
asciiValues = ' '.join( [asciiValues[i:(i + charLen*8)] for i in range(0, len(asciiValues) - 0, charLen*8) ] )
# combine all parts of the hexdump into a single list
spaceAdded = ((3 * displayLen) - 1) / (3 * 8)
lines.append('{:s} {:{}s} {:s}'.format(offset, hexValues, displayLen * 3 + spaceAdded, asciiValues))
print('\n'.join(lines))
return '\n'.join(lines)
if __name__ == '__main__':
main() | StarcoderdataPython |
277844 | <filename>test/test_cli.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import inspect
import io
import os
import sys
import textwrap
import unittest
# prefer local copy to the one which is installed
# hack from http://stackoverflow.com/a/6098238/280539
_top_level_path = os.path.realpath(
os.path.abspath(
os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")
)
)
if _top_level_path not in sys.path:
sys.path.insert(0, _top_level_path)
# end of hack
from SPARQLWrapper.main import main, parse_args
from SPARQLWrapper import POST
endpoint = "http://ja.dbpedia.org/sparql"
testfile = os.path.join(os.path.dirname(__file__), "test.rq")
testquery = "SELECT DISTINCT ?x WHERE { ?x ?y ?z . } LIMIT 1"
class SPARQLWrapperCLI_Test_Base(unittest.TestCase):
def setUp(self):
self.org_stdout, sys.stdout = sys.stdout, io.StringIO()
self.org_stderr, sys.stderr = sys.stderr, io.StringIO()
def tearDown(self):
sys.stdout = self.org_stdout
class SPARQLWrapperCLIParser_Test(SPARQLWrapperCLI_Test_Base):
def testHelp(self):
with self.assertRaises(SystemExit) as cm:
parse_args(["-h"])
self.assertEqual(cm.exception.code, 0)
self.assertEqual(sys.stdout.getvalue()[:5], "usage")
def testVersion(self):
with self.assertRaises(SystemExit) as cm:
parse_args(["-V"])
self.assertEqual(cm.exception.code, 0)
self.assertEqual(sys.stdout.getvalue()[:3], "rqw")
def testNoarg(self):
with self.assertRaises(SystemExit) as cm:
parse_args([])
self.assertEqual(cm.exception.code, 2)
self.assertEqual(
sys.stderr.getvalue().split("\n")[1],
"rqw: error: one of the arguments -f/--file -Q/--query is required",
)
def testQueryAndFile(self):
with self.assertRaises(SystemExit) as cm:
parse_args(["-Q", testquery, "-f", "-"])
self.assertEqual(cm.exception.code, 2)
self.assertEqual(
sys.stderr.getvalue().split("\n")[1],
"rqw: error: argument -f/--file: not allowed with argument -Q/--query",
)
def testInvalidFormat(self):
with self.assertRaises(SystemExit) as cm:
parse_args(["-Q", testquery, "-F", "jjssoonn"])
self.assertEqual(cm.exception.code, 2)
self.assertEqual(
sys.stderr.getvalue().split("\n")[1],
"rqw: error: argument -F/--format: invalid choice: 'jjssoonn' (choose from 'json', 'xml', 'turtle', 'n3', 'rdf', 'rdf+xml', 'csv', 'tsv', 'json-ld')",
)
def testInvalidFile(self):
with self.assertRaises(SystemExit) as cm:
parse_args(["-f", "440044.rq"])
self.assertEqual(cm.exception.code, 2)
self.assertEqual(
sys.stderr.getvalue().split("\n")[1],
"rqw: error: argument -f/--file: invalid check_file value: '440044.rq'",
)
class SPARQLWrapperCLI_Test(SPARQLWrapperCLI_Test_Base):
def testQueryWithEndpoint(self):
main(
[
"-Q",
testquery,
"-e",
endpoint,
]
)
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
{
"head": {
"link": [],
"vars": [
"x"
]
},
"results": {
"distinct": false,
"ordered": true,
"bindings": [
{
"x": {
"type": "uri",
"value": "http://www.openlinksw.com/virtrdf-data-formats#default-iid"
}
}
]
}
}
"""
),
)
def testQueryWithFile(self):
main(["-f", testfile, "-e", endpoint])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
{
"head": {
"link": [],
"vars": [
"pllabel"
]
},
"results": {
"distinct": false,
"ordered": true,
"bindings": [
{
"pllabel": {
"type": "literal",
"xml:lang": "ja",
"value": "PARLOG"
}
}
]
}
}
"""
),
)
def testQueryWithFileXML(self):
main(["-f", testfile, "-e", endpoint, "-F", "xml"])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
<?xml version="1.0" ?><sparql xmlns="http://www.w3.org/2005/sparql-results#" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.w3.org/2001/sw/DataAccess/rf1/result2.xsd">
<head>
<variable name="pllabel"/>
</head>
<results distinct="false" ordered="true">
<result>
<binding name="pllabel"><literal xml:lang="ja">PARLOG</literal></binding>
</result>
</results>
</sparql>
"""
),
)
def testQueryWithFileTurtle(self):
main(["-f", testfile, "-e", endpoint, "-F", "turtle"])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
@prefix res: <http://www.w3.org/2005/sparql-results#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
_:_ a res:ResultSet .
_:_ res:resultVariable "pllabel" .
_:_ res:solution [
res:binding [ res:variable "pllabel" ; res:value "PARLOG"@ja ] ] .\n
"""
),
)
def testQueryWithFileTurtleQuiet(self):
main(
[
"-f",
testfile,
"-e",
endpoint,
"-F",
"turtle",
"-q",
]
)
self.assertEqual(sys.stderr.getvalue(), "")
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
@prefix res: <http://www.w3.org/2005/sparql-results#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
_:_ a res:ResultSet .
_:_ res:resultVariable "pllabel" .
_:_ res:solution [
res:binding [ res:variable "pllabel" ; res:value "PARLOG"@ja ] ] .\n
"""
),
)
def testQueryWithFileN3(self):
main(["-f", testfile, "-e", endpoint, "-F", "n3"])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
@prefix res: <http://www.w3.org/2005/sparql-results#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
_:_ a res:ResultSet .
_:_ res:resultVariable "pllabel" .
_:_ res:solution [
res:binding [ res:variable "pllabel" ; res:value "PARLOG"@ja ] ] .\n
"""
),
)
def testQueryRDF(self):
main(["-Q", "DESCRIBE <http://ja.wikipedia.org/wiki/SPARQL>", "-e", endpoint, "-F", "rdf"])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
@prefix dc: <http://purl.org/dc/elements/1.1/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
<http://ja.dbpedia.org/resource/SPARQL> foaf:isPrimaryTopicOf <http://ja.wikipedia.org/wiki/SPARQL> .
<http://ja.wikipedia.org/wiki/SPARQL> a foaf:Document ;
dc:language "ja" ;
foaf:primaryTopic <http://ja.dbpedia.org/resource/SPARQL> .
"""
),
)
def testQueryWithFileRDFXML(self):
main(["-f", testfile, "-e", endpoint, "-F", "rdf+xml"])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
<?xml version="1.0" ?><sparql xmlns="http://www.w3.org/2005/sparql-results#" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.w3.org/2001/sw/DataAccess/rf1/result2.xsd">
<head>
<variable name="pllabel"/>
</head>
<results distinct="false" ordered="true">
<result>
<binding name="pllabel"><literal xml:lang="ja">PARLOG</literal></binding>
</result>
</results>
</sparql>
"""
),
)
def testQueryWithFileCSV(self):
main(["-f", testfile, "-e", endpoint, "-F", "csv"])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
"pllabel"
"PARLOG"\n
"""
),
)
def testQueryWithFileTSV(self):
main(["-f", testfile, "-e", endpoint, "-F", "tsv"])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
"pllabel"
"PARLOG"\n
"""
),
)
def testQueryToLovFuseki(self):
main(["-e", "https://lov.linkeddata.es/dataset/lov/sparql/", "-Q", testquery])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
{
"head": {
"vars": [
"x"
]
},
"results": {
"bindings": [
{
"x": {
"type": "uri",
"value": "http://www.w3.org/2002/07/owl#someValuesFrom"
}
}
]
}
}
"""
),
)
def testQueryToRDF4J(self):
main(
[
"-e",
"http://vocabs.ands.org.au/repository/api/sparql/csiro_international-chronostratigraphic-chart_2018-revised-corrected",
"-Q",
testquery,
]
)
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
{
"head": {
"vars": [
"x"
]
},
"results": {
"bindings": [
{
"x": {
"type": "uri",
"value": "http://www.w3.org/1999/02/22-rdf-syntax-ns#type"
}
}
]
}
}
"""
),
)
def testQueryToAllegroGraph(self):
main(["-e", "https://mmisw.org/sparql", "-Q", testquery])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
{
"head": {
"vars": [
"x"
]
},
"results": {
"bindings": [
{
"x": {
"type": "uri",
"value": "https://mmisw.org/ont/~mjuckes/cmip_variables_alpha/rsdcs4co2"
}
}
]
}
}
"""
),
)
def testQueryToGraphDBEnterprise(self):
main(["-e", "http://factforge.net/repositories/ff-news", "-Q", testquery])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
{
"head": {
"vars": [
"x"
]
},
"results": {
"bindings": [
{
"x": {
"type": "uri",
"value": "http://www.w3.org/1999/02/22-rdf-syntax-ns#type"
}
}
]
}
}
"""
),
)
def testQueryToStardog(self):
main(["-e", "https://lindas.admin.ch/query", "-Q", testquery, "-m", POST])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
{
"head": {
"vars": [
"x"
]
},
"results": {
"bindings": [
{
"x": {
"type": "uri",
"value": "http://classifications.data.admin.ch/canton/bl"
}
}
]
}
}
"""
),
)
def testQueryToAgrovoc_AllegroGraph(self):
main(["-e", "https://agrovoc.fao.org/sparql", "-Q", testquery])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
{
"head": {
"vars": [
"x"
]
},
"results": {
"bindings": [
{
"x": {
"type": "uri",
"value": "http://aims.fao.org/aos/agrovoc/"
}
}
]
}
}
"""
),
)
def testQueryToVirtuosoV8(self):
main(["-e", "http://dbpedia-live.openlinksw.com/sparql", "-Q", testquery])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
{
"head": {
"link": [],
"vars": [
"x"
]
},
"results": {
"distinct": false,
"ordered": true,
"bindings": [
{
"x": {
"type": "uri",
"value": "http://www.openlinksw.com/virtrdf-data-formats#default-iid"
}
}
]
}
}
"""
),
)
def testQueryToVirtuosoV7(self):
main(["-e", "http://dbpedia.org/sparql", "-Q", testquery])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
{
"head": {
"link": [],
"vars": [
"x"
]
},
"results": {
"distinct": false,
"ordered": true,
"bindings": [
{
"x": {
"type": "uri",
"value": "http://www.openlinksw.com/virtrdf-data-formats#default-iid"
}
}
]
}
}
"""
),
)
def testQueryToBrazeGraph(self):
main(["-e", "https://query.wikidata.org/sparql", "-Q", testquery])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
{
"head": {
"vars": [
"x"
]
},
"results": {
"bindings": [
{
"x": {
"type": "uri",
"value": "http://wikiba.se/ontology#Dump"
}
}
]
}
}
"""
),
)
def testQueryToFuseki2V3_6(self):
main(["-e", "https://agrovoc.uniroma2.it/sparql/", "-Q", testquery])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
{
"head": {
"vars": [
"x"
]
},
"results": {
"bindings": [
{
"x": {
"type": "uri",
"value": "http://aims.fao.org/aos/agrovoc/"
}
}
]
}
}
"""
),
)
def testQueryToFuseki2V3_8(self):
main(["-e", "http://zbw.eu/beta/sparql/stw/query", "-Q", testquery])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
{
"head": {
"vars": [
"x"
]
},
"results": {
"bindings": [
{
"x": {
"type": "uri",
"value": "http://www.w3.org/2004/02/skos/core"
}
}
]
}
}
"""
),
)
def testQueryTo4store(self):
main(["-e", "http://rdf.chise.org/sparql", "-Q", testquery])
self.assertEqual(
sys.stdout.getvalue(),
textwrap.dedent(
"""\
{
"head": {
"vars": [
"x"
]
},
"results": {
"bindings": [
{
"x": {
"type": "bnode",
"value": "b1f4d352f000000fc"
}
}
]
}
}
"""
),
)
| StarcoderdataPython |
3354987 | import django_dynamic_fixture as fixture
from allauth.account.views import SignupView
from django.contrib.auth.models import User
from django.core.cache import cache
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from readthedocs.organizations.models import (
Organization,
Team,
TeamInvite,
TeamMember,
)
from readthedocs.organizations.views import private as private_views
from readthedocs.organizations.views import public as public_views
from readthedocs.projects.models import Project
from readthedocs.rtd_tests.base import RequestFactoryTestMixin
@override_settings(RTD_ALLOW_ORGANIZATIONS=True)
class OrganizationViewTests(RequestFactoryTestMixin, TestCase):
"""Organization views tests."""
def setUp(self):
self.owner = fixture.get(User)
self.project = fixture.get(Project)
self.organization = fixture.get(
Organization,
owners=[self.owner],
projects=[self.project],
)
self.team = fixture.get(Team, organization=self.organization)
def test_delete(self):
"""Delete organization on post."""
req = self.request(
'post',
'/organizations/{}/delete/'.format(self.organization.slug),
user=self.owner,
)
view = private_views.DeleteOrganization.as_view()
resp = view(req, slug=self.organization.slug)
self.assertFalse(Organization.objects
.filter(pk=self.organization.pk)
.exists())
self.assertFalse(Team.objects
.filter(pk=self.team.pk)
.exists())
self.assertFalse(Project.objects
.filter(pk=self.project.pk)
.exists())
@override_settings(RTD_ALLOW_ORGANIZATIONS=True)
class OrganizationInviteViewTests(RequestFactoryTestMixin, TestCase):
"""Tests for invite handling in views."""
def setUp(self):
self.owner = fixture.get(User)
self.organization = fixture.get(
Organization,
owners=[self.owner],
)
self.team = fixture.get(Team, organization=self.organization)
def tearDown(self):
cache.clear()
def test_redemption_by_authed_user(self):
user = fixture.get(User)
invite = fixture.get(
TeamInvite, email=user.email, team=self.team,
organization=self.organization,
)
team_member = fixture.get(
TeamMember,
invite=invite,
member=None,
team=self.team,
)
req = self.request(
'get',
'/organizations/invite/{}/redeem'.format(invite.hash),
user=user,
)
view = public_views.UpdateOrganizationTeamMember.as_view()
view(req, hash=invite.hash)
ret_teammember = TeamMember.objects.get(member=user)
self.assertIsNone(ret_teammember.invite)
self.assertEqual(ret_teammember, team_member)
with self.assertRaises(TeamInvite.DoesNotExist):
TeamInvite.objects.get(pk=invite.pk)
def test_redemption_by_unauthed_user(self):
"""Redemption on triggers on user signup."""
email = 'non-existant-9238723@example.com'
with self.assertRaises(User.DoesNotExist):
User.objects.get(email=email)
invite = fixture.get(
TeamInvite, email=email, team=self.team,
organization=self.organization,
)
team_member = fixture.get(
TeamMember,
invite=invite,
member=None,
team=self.team,
)
req = self.request(
'get',
'/organizations/invite/{}/redeem'.format(invite.hash),
)
view = public_views.UpdateOrganizationTeamMember.as_view()
view(req, hash=invite.hash)
self.assertEqual(team_member.invite, invite)
self.assertIsNone(team_member.member)
self.assertEqual(req.session['invite'], invite.pk)
self.assertEqual(req.session['invite:allow_signup'], True)
self.assertEqual(req.session['invite:email'], email)
# This cookie makes the EmailAddress be verified after signing up with
# the same email address the user was invited. This is done
# automatically by django-allauth
self.assertEqual(req.session['account_verified_email'], email)
session = req.session
# Test signup view
req = self.request(
'post',
'/accounts/signup',
data={
'username': 'test-92867',
'email': email,
'password1': 'password',
'password2': 'password',
'confirmation_key': 'foo',
},
session=session,
)
resp = SignupView.as_view()(req)
self.assertEqual(resp.status_code, 302)
ret_teammember = TeamMember.objects.get(member__email=email)
self.assertIsNone(ret_teammember.invite)
self.assertEqual(ret_teammember, team_member)
with self.assertRaises(TeamInvite.DoesNotExist):
TeamInvite.objects.get(pk=invite.pk)
self.assertTrue(
User.objects.get(email=email)
.emailaddress_set.filter(verified=True)
.exists()
)
def test_redemption_by_dulpicate_user(self):
user = fixture.get(User)
invite = fixture.get(
TeamInvite, email=user.email, team=self.team,
organization=self.organization,
)
team_member_a = fixture.get(
TeamMember,
invite=None,
member=user,
team=self.team,
)
team_member_b = fixture.get(
TeamMember,
invite=invite,
member=None,
team=self.team,
)
self.assertEqual(TeamMember.objects.filter(member=user).count(), 1)
req = self.request(
'get',
'/organizations/invite/{}/redeem'.format(invite.hash),
user=user,
)
view = public_views.UpdateOrganizationTeamMember.as_view()
view(req, hash=invite.hash)
self.assertEqual(TeamMember.objects.filter(member=user).count(), 1)
self.assertEqual(TeamMember.objects.filter(invite=invite).count(), 0)
with self.assertRaises(TeamInvite.DoesNotExist):
TeamInvite.objects.get(pk=invite.pk)
@override_settings(RTD_ALLOW_ORGANIZATIONS=True)
class OrganizationSignupTestCase(TestCase):
def tearDown(self):
cache.clear()
def test_organization_signup(self):
self.assertEqual(Organization.objects.count(), 0)
user = fixture.get(User)
self.client.force_login(user)
data = {
'name': 'Testing Organization',
'email': '<EMAIL>',
}
resp = self.client.post(reverse('organization_create'), data=data)
self.assertEqual(Organization.objects.count(), 1)
org = Organization.objects.first()
self.assertEqual(org.name, 'Testing Organization')
self.assertEqual(org.email, '<EMAIL>')
self.assertRedirects(
resp,
reverse('organization_detail', kwargs={'slug': org.slug}),
)
| StarcoderdataPython |
1986251 | import plotly.graph_objects as go
def return_graphs(df):
''' Module to create Plotly figures '''
# extract data needed for visuals
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
# create visuals
genres_graph = []
genres_graph.append(
go.Bar(
x=genre_names,
y=genre_counts
)
)
genres_layout = dict(
{
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
)
# extract count for each category occurance
categ_counts = df[df.columns[4:]].sum().sort_values(ascending=True)
categ_names = list(categ_counts.index)
# create category visual
categ_graph = []
categ_graph.append(
go.Bar(
x=categ_counts,
y=categ_names,
orientation='h'
)
)
categ_layout = dict(
{
'title': 'Category Occurance Count',
'yaxis': {
'title': 'Category'
},
'xaxis': {
'title': 'Count'
},
"margin": {
"pad": 12,
"l": 160,
"r": 40,
"t": 80,
"b": 40,
},
"yaxis": {"dtick": 1}
}
)
graphs = []
graphs.append(dict(data=genres_graph, layout=genres_layout))
graphs.append(dict(data=categ_graph, layout=categ_layout))
return graphs | StarcoderdataPython |
5058211 | <filename>start1.py<gh_stars>1-10
#import libraries
import gym
import time
#create environment
environment = gym.make('CartPole-v0')
#reset environment
environment.reset()
#iterator for rendaring the environment
for _ in range(100):
time.sleep(0.05)
environment.render()
_, _, completed, _ = environment.step(environment.action_space.sample())
if completed:
break; | StarcoderdataPython |
4906026 | <filename>demo/demo.py
from gearbox.transmission.gears import *
from gearbox.standards.iso import Pitting as isoPitting
from gearbox.standards.iso import Bending as isoBending
from gearbox.standards.agma import Pitting as agmaPitting
from gearbox.standards.agma import Bending as agmaBending
from gearbox.export.export import *
from gearbox.optimization.addendum import *
module = 2.5 # m
helix_angle = 0.0 # beta
pressure_angle = 20.0 # alpha
lubricant = Lubricant(
name='Kiruna',
v40=160
)
material = Material(
name='AISI 2010',
classification='NV(nitrocar)',
sh_limit=1500.,
sf_limit=460.,
e=206000.,
poisson=0.3,
density=7.83e-6,
brinell=286.6667
)
tool = Tool(
ha_p=1,
hf_p=1.25,
rho_fp=0.38,
x=0,
rho_ao=0,
delta_ao=0,
nc=10.
)
pinion = Gear(
profile=tool,
material=material,
z=22.,
beta=helix_angle,
alpha=pressure_angle,
m=module,
x=0.525,
b=34.0,
bs=34.0,
sr=0.0,
rz=3.67,
precision_grade=6.0,
shaft_diameter=35.0,
schema=3.0,
l=60.0,
s=15.0,
backlash=0.017,
gear_crown=1,
helix_modification=1,
favorable_contact=True,
gear_condition=1
)
gear = Gear(
profile=tool,
material=material,
z=40.,
m=module,
beta=helix_angle,
alpha=pressure_angle,
x=-0.275,
b=34.0,
bs=34.0,
sr=0.0,
rz=3.67,
precision_grade=6.0,
shaft_diameter=50.0,
schema=3.0,
l=60.0,
s=35.0,
backlash=-0.017,
gear_crown=1,
helix_modification=1,
favorable_contact=True,
gear_condition=1
)
pair = [pinion, gear]
transmission = Transmission(
gears=pair,
lubricant=lubricant,
rpm_in=1450.0,
p=40.0,
l=10000.0,
gear_box_type=2,
ka=1.3,
sh_min=1,
sf_min=1
)
print ('========================================')
print ('ISO Pitting')
print (isoPitting(transmission=transmission).calculate())
print ('========================================')
print ('========================================')
print ('ISO Bending')
print (isoBending(transmission=transmission).calculate)
print ('========================================')
print ('========================================')
print ('AGMA Pitting')
print (agmaPitting(transmission=transmission).calculate())
print ('========================================')
print ('========================================')
print ('AGMA Bending')
print (agmaBending(transmission=transmission).calculate())
print ('========================================')
print ('========================================')
xoptim_bending = Optmization(transmission).bending()
xoptim_pitting_iso = Optmization(transmission).pitting(standard='ISO')
# xoptim_pitting_agma = Optmization(transmission).pitting(standard='AGMA')
print ('Profile shift optimization')
print ('x1=%s, x2=%s for minimum bending stress' % (xoptim_bending[0], xoptim_bending[1]))
print ('x1=%s, x2=%s for minimum contact stress using ISO standard' % (xoptim_pitting_iso[0], xoptim_pitting_iso[1]))
print ('========================================')
output_folder = os.path.join(os.path.dirname(__file__), 'output')
try:
os.mkdir(output_folder)
except:
pass
# ===============================================
# EXPORT TO MATLAB/COMSOL SCRIPT
# ===============================================
# 2D model matlab-comsol export
# for 2D export type='2D' is optional because '2D' is the default output
comsol_transmission_model_name2d = 'transmission2d'
comsol_pinion_model_name2d = 'pinion2d'
comsol_wheel_model_name2d = 'wheel2d'
ExportGear(pinion).matlab_comsol(model_name=comsol_pinion_model_name2d, output_folder=output_folder, type='2D')
ExportGear(gear).matlab_comsol(model_name=comsol_wheel_model_name2d, output_folder=output_folder, type='2D')
ExportPair(pair).matlab_comsol(model_name=comsol_transmission_model_name2d, output_folder=output_folder, type='2D')
# 3D model matlab-comsol export
comsol_transmission_model_name3d = 'transmission3d'
comsol_pinion_model_name3d = 'pinion3d'
comsol_wheel_model_name3d = 'wheel3d'
ExportGear(pinion).matlab_comsol(model_name=comsol_pinion_model_name3d, output_folder=output_folder, type='3D')
ExportGear(gear).matlab_comsol(model_name=comsol_wheel_model_name3d, output_folder=output_folder, type='3D')
ExportPair(pair).matlab_comsol(model_name=comsol_transmission_model_name3d, output_folder=output_folder, type='3D')
# ===============================================
# EXPORT TO ABAQUS PYTHON
# ===============================================
# 2D model abaqus export
# for 2D export type='2D' is optional because '2D' is the default output
abaqus_transmission_model_name2d = 'transmission2d'
abaqus_pinion_model_name2d = 'pinion2d'
abaqus_wheel_model_name2d = 'wheel2d'
ExportGear(pinion).abaqus(model_name=abaqus_pinion_model_name2d, output_folder=output_folder, type='2D')
ExportGear(gear).abaqus(model_name=abaqus_wheel_model_name2d, output_folder=output_folder, type='2D')
ExportPair(pair).abaqus(model_name=abaqus_transmission_model_name2d, output_folder=output_folder, type='2D')
# 3D model abaqus export
abaqus_transmission_model_name3d = 'transmission3d'
abaqus_pinion_model_name3d = 'pinion3d'
abaqus_wheel_model_name3d = 'wheel3d'
ExportGear(pinion).abaqus(model_name=abaqus_pinion_model_name3d, output_folder=output_folder, type='3D')
ExportGear(gear).abaqus(model_name=abaqus_wheel_model_name3d, output_folder=output_folder, type='3D')
ExportPair(pair).abaqus(model_name=abaqus_transmission_model_name3d, output_folder=output_folder, type='3D')
# ===============================================
# EXPORT TO ANSYS/WOKRBENCH Script
# ===============================================
# 2D model abaqus export
# for 2D export type='2D' is optional because '2D' is the default output
ansys_transmission_model_name2d = 'transmission2d'
ansys_pinion_model_name2d = 'pinion2d'
ansys_wheel_model_name2d = 'wheel2d'
ExportGear(pinion).ansys(model_name=ansys_pinion_model_name2d, output_folder=output_folder, type='2D')
ExportGear(gear).ansys(model_name=ansys_wheel_model_name2d, output_folder=output_folder, type='2D')
ExportPair(pair).ansys(model_name=ansys_transmission_model_name2d, output_folder=output_folder, type='2D')
#
# 3D model ansys export
ansys_transmission_model_name3d = 'transmission3d'
ansys_pinion_model_name3d = 'pinion3d'
ansys_wheel_model_name3d = 'wheel3d'
ExportGear(pinion).ansys(model_name=ansys_pinion_model_name3d, output_folder=output_folder, type='3D')
ExportGear(gear).ansys(model_name=ansys_wheel_model_name3d, output_folder=output_folder, type='3D')
ExportPair(pair).ansys(model_name=ansys_transmission_model_name3d, output_folder=output_folder, type='3D')
| StarcoderdataPython |
5044279 | """Contains layer-related utility functions.
"""
import tensorflow as tf
from tensorflow.keras.layers import Concatenate, Flatten, Layer, Activation
from typing import List, Any
from .activation import Dice
def _concat(inputs: List, axis: int = -1) -> tf.Tensor:
"""Concatenate list of input, handle the case when `len(inputs) = 1`.
Parameters
----------
inputs : List
List of input
axis : int, optional
Concatenate axis, by default ``-1``.
Returns
-------
tf.Tensor
Concatenated input.
"""
if len(inputs) == 1:
return inputs[0]
else:
return Concatenate(axis=axis)(inputs)
def concat(
dense_inputs: List, embd_inputs: List, axis: int = -1, keepdims: bool = False
) -> tf.Tensor:
"""Concatenate dense features and embedding of sparse features together.
Parameters
----------
dense_inputs : List
Dense features.
embd_inputs : List
Embedding of sparse features.
axis : int, optional
Concatenate axis, by default ``-1``.
keepdims : bool, optional
Whether flatten all inputs before concatenating or not, by default ``False``.
Returns
-------
tf.Tensor
Concatenated input.
Raises
------
ValueError
If no tensor is provided.
"""
if len(dense_inputs) + len(embd_inputs) == 0:
raise ValueError("Number of inputs should be larger than 0")
if len(dense_inputs) > 0 and len(embd_inputs) > 0:
dense = _concat(dense_inputs, axis)
sparse = _concat(embd_inputs, axis)
if not keepdims:
dense = Flatten()(dense)
sparse = Flatten()(sparse)
# * Change dtype
if dense.dtype != sparse.dtype:
if dense.dtype.is_integer:
dense = tf.cast(dense, sparse.dtype)
else:
sparse = tf.cast(sparse, dense.dtype)
return _concat([dense, sparse], axis)
if len(dense_inputs) > 0:
output = _concat(dense_inputs, axis)
if not keepdims:
output = Flatten()(output)
return output
if len(embd_inputs) > 0:
output = _concat(embd_inputs, axis)
if not keepdims:
output = Flatten()(output)
return output
def sampledsoftmaxloss(y_true, y_pred) -> Any:
"""Helper function for calculating sampled softmax loss.
Parameters
----------
y_true
Label.
y_pred
Prediction.
Returns
-------
Any
Sampled softmax loss.
"""
return tf.reduce_mean(y_pred)
def get_activation_layer(activation: str) -> Layer:
"""Return a activation layer by input activation function.
Parameters
----------
activation : str
Activation function name.
Returns
-------
Layer
An activation layer.
"""
if activation == "dice":
return Dice()
else:
return Activation(activation)
| StarcoderdataPython |
4912123 | <filename>pytify/linux.py
# -*- coding: utf-8 -*-
import sys
import dbus
from pytifylib import Pytifylib
import time
class Linux(Pytifylib):
def __init__(self):
try:
self.interface = dbus.Interface(
dbus.SessionBus().get_object(
'org.mpris.MediaPlayer2.spotify',
'/org/mpris/MediaPlayer2'
),
'org.mpris.MediaPlayer2.Player'
)
except dbus.exceptions.DBusException:
sys.exit('\n Some errors occured. Try restart or start Spotify. \n')
def listen(self, index):
#print "Höre ", self._get_song_uri_at_index(index)
self.interface.OpenUri(
#"spotify:user:warnerbros.records:playlist:1nQVIhbFzbZrsCJ0WPlVuh"
self._get_item_uri_at_index(index)
)
def next(self):
self.interface.Next()
def prev(self):
self.interface.Previous()
def play_pause(self):
self.interface.PlayPause()
def pause(self):
self.interface.Stop()
| StarcoderdataPython |
6553117 | <filename>tests/test_cmd.py
#!/usr/bin/env python
"""Tests for processing the cli."""
import os
import unittest
from tally_ho import cmd, tally_ho
class TestCLICmds(unittest.TestCase):
def setUp(self):
self.th = tally_ho.TallyHo('test.db')
def tearDown(self):
os.remove('test.db')
def create_category(self, name):
"""Create a category."""
create_cat_cmd = cmd.Command(
"category", "create", None, name, None, self.th)
category = cmd.process_cli_cmds(create_cat_cmd)
self.assertEqual(category.id, 1)
self.assertEqual(category.name, name)
def test_cli_gets_all_categories(self):
self.create_category("bugs")
get_cat_cmd = cmd.Command("category", 'None', None, None, None, self.th)
cat_list = cmd.process_cli_cmds(get_cat_cmd)
first_cat = cat_list[0]
self.assertEqual(len(cat_list), 1)
self.assertEqual(first_cat.name, "bugs")
create_cat_issues = cmd.Command("category",
"create",
None,
"issues",
None,
self.th
)
category = cmd.process_cli_cmds(create_cat_issues)
self.assertEqual(category.id, 2)
self.assertEqual(category.name, "issues")
get_cat_cmd = cmd.Command("category", 'None', None, None, None, self.th)
cat_list = cmd.process_cli_cmds(get_cat_cmd)
second_cat = cat_list[1]
self.assertEqual(len(cat_list), 2)
self.assertEqual(second_cat.name, "issues")
def test_cli_deletes_category(self):
self.create_category("bugs")
delete_cat_cmd = cmd.Command("category",
"delete",
None,
"bugs",
None,
self.th
)
cat_list = cmd.process_cli_cmds(delete_cat_cmd)
self.assertEqual(len(cat_list), 0)
def test_cli_creates_tally(self):
create_cat_cmd = cmd.Command("category",
"create",
None,
"bugs",
None,
self.th
)
category = cmd.process_cli_cmds(create_cat_cmd)
self.assertEqual(category.id, 1)
self.assertEqual(category.name, "bugs")
create_tally_cmd = cmd.Command("tally",
"create",
"stuck deployments",
"bugs",
None,
self.th
)
tally = cmd.process_cli_cmds(create_tally_cmd)
self.assertEqual(tally.id, 1)
self.assertEqual(tally.name, "stuck deployments")
self.assertEqual(tally.category, category.id)
self.assertEqual(tally.count, 1)
def test_cli_gets_single_tally(self):
self.create_category("bugs")
create_tally_cmd = cmd.Command(
"tally", "create", "stuck deployments", "bugs", None, self.th)
tally = cmd.process_cli_cmds(create_tally_cmd)
get_tally_cmd = cmd.Command("tally",
"get",
"stuck deployments",
"bugs",
None,
self.th
)
tally_result = cmd.process_cli_cmds(get_tally_cmd)
self.assertEqual(tally_result.id, tally.id)
self.assertEqual(tally_result.name, tally.name)
self.assertEqual(tally_result.category, tally.category)
self.assertEqual(tally_result.count, tally.count)
def test_cli_gets_all_tallies(self):
self.create_category("bugs")
create_tally_cmd = cmd.Command(
"tally", "create", "stuck deployments", "bugs", None, self.th)
cmd.process_cli_cmds(create_tally_cmd)
create_tally2_cmd = cmd.Command(
"tally", "create", "old database", "bugs", None, self.th)
cmd.process_cli_cmds(create_tally2_cmd)
get_tallies_cmd = cmd.Command(
"tally", "list", None, None, None, self.th)
tally2_result = cmd.process_cli_cmds(get_tallies_cmd)
self.assertEqual(len(tally2_result), 2)
def test_cli_deletes_tally(self):
self.create_category("bugs")
create_tally_cmd = cmd.Command(
"tally", "create", "stuck deployments", "bugs", None, self.th)
cmd.process_cli_cmds(create_tally_cmd)
tallies = self.th.get_tallies()
self.assertEqual(len(tallies), 1)
delete_tally_cmd = cmd.Command("tally",
"delete",
"stuck deployments",
"bugs",
None,
self.th
)
cmd.process_cli_cmds(delete_tally_cmd)
tallies = self.th.get_tallies()
self.assertEqual(len(tallies), 0)
def test_cat_fmt_ouput(self):
create_cat_cmd = cmd.Command("category",
"create",
None,
"bugs",
None,
self.th
)
category = cmd.process_cli_cmds(create_cat_cmd)
category = cmd.fmt_output(category)
self.assertIsInstance(category, list)
self.assertEqual(len(category), 1)
self.assertEqual(category[0][0], 1)
self.assertEqual(category[0][1], "bugs")
create_cat_cmd = cmd.Command("category",
"create",
None,
"issues",
None,
self.th
)
category = cmd.process_cli_cmds(create_cat_cmd)
category = cmd.fmt_output(category)
self.assertIsInstance(category, list)
self.assertEqual(len(category), 1)
self.assertEqual(category[0][0], 2)
self.assertEqual(category[0][1], "issues")
get_cat_cmd = cmd.Command("category", 'None', None, None, None, self.th)
cat_list = cmd.process_cli_cmds(get_cat_cmd)
self.assertIsInstance(cat_list, list)
self.assertEqual(len(cat_list), 2)
self.assertEqual(cat_list[0][0], 1)
self.assertEqual(cat_list[0][1], "bugs")
self.assertEqual(cat_list[1][0], 2)
self.assertEqual(cat_list[1][1], "issues")
def test_tally_fmt_ouput(self):
self.create_category("bugs")
create_tally_cmd = cmd.Command("tally",
"create",
"stuck deployments",
"bugs",
None,
self.th
)
cmd.process_cli_cmds(create_tally_cmd)
get_tally_cmd = cmd.Command("tally",
"get",
"stuck deployments",
"bugs",
None,
self.th
)
tally = cmd.process_cli_cmds(get_tally_cmd)
tally = cmd.fmt_output(tally)
self.assertIsInstance(tally, list)
self.assertEqual(len(tally), 1)
self.assertEqual(tally[0][0], 1)
self.assertEqual(tally[0][1], "stuck deployments")
self.assertEqual(tally[0][2], 1)
self.assertEqual(tally[0][3], 1)
create_tally_cmd2 = cmd.Command("tally",
"create",
"slow page load",
"bugs",
None,
self.th
)
tally2 = cmd.process_cli_cmds(create_tally_cmd2)
tally2 = cmd.fmt_output(tally2)
self.assertIsInstance(tally2, list)
self.assertEqual(len(tally2), 1)
self.assertEqual(tally2[0][0], 2)
self.assertEqual(tally2[0][1], "slow page load")
self.assertEqual(tally2[0][2], 1)
get_tallies_cmd = cmd.Command(
"tally", "list", None, None, None, self.th)
tally_list = cmd.process_cli_cmds(get_tallies_cmd)
self.assertIsInstance(tally_list, list)
self.assertEqual(len(tally_list), 2)
self.assertEqual(tally_list[0][0], 1)
self.assertEqual(tally_list[0][1], "stuck deployments")
self.assertEqual(tally_list[0][2], 1)
self.assertEqual(tally_list[1][0], 2)
self.assertEqual(tally_list[1][1], "slow page load")
self.assertEqual(tally_list[1][2], 1)
class TestArgHandling(unittest.TestCase):
def setUp(self):
self.th = tally_ho.TallyHo('test.db')
self.create_cat_cmd = cmd.Command("category", "create", None, "bugs", None, self.th)
self.get_cat_cmd = cmd.Command("category", 'None', None, None, None, self.th)
self.delete_cat_cmd = cmd.Command("category", "delete", None, "bugs", None, self.th)
self.create_tally_cmd = cmd.Command("tally", "create", "stuck deployments", "bugs", None, self.th)
self.get_tally_cmd = cmd.Command("tally", "get", "stuck deployments", "bugs", None, self.th)
self.get_tallies_cmd = cmd.Command("tally", "list", None, None, None, self.th)
self.delete_tally_cmd = cmd.Command("tally", "delete", "stuck deployments", "bugs", None,self.th)
def tearDown(self):
os.remove('test.db')
def test_argparse_can_manage_categories(self):
db = 'test.db'
create_cat_args = ['category', 'create', '--category', 'bugs']
create_cat_args2 = ['category', 'create', '--category', 'issues']
create_cat_args3 = ['category', 'create', '--category', 'db backup']
test_create_cat_cmd = cmd.parse_args(create_cat_args, db)
self.assertIsInstance(test_create_cat_cmd, cmd.Command)
self.assertIsInstance(test_create_cat_cmd.tally_ho, tally_ho.TallyHo)
self.assertEqual(test_create_cat_cmd.item, self.create_cat_cmd.item)
self.assertEqual(test_create_cat_cmd.action, self.create_cat_cmd.action)
self.assertEqual(test_create_cat_cmd.category, self.create_cat_cmd.category)
self.assertEqual(test_create_cat_cmd.quantity, self.create_cat_cmd.quantity)
category = cmd.process_cli_cmds(test_create_cat_cmd)
get_cat_args = ['category', '--category', 'bugs']
test_get_cat_cmd = cmd.parse_args(get_cat_args, db)
test_cat = cmd.process_cli_cmds(test_get_cat_cmd)[0]
self.assertEqual(test_cat.id, category.id)
self.assertEqual(test_cat.name, category.name)
create_cat_cmd2 = cmd.parse_args(create_cat_args2, db)
cmd.process_cli_cmds(create_cat_cmd2)
get_all_cat_args = ['category']
test_get_all_cat_cmd = cmd.parse_args(get_all_cat_args, db)
cat_list = cmd.process_cli_cmds(test_get_all_cat_cmd)
test_cat2 = cat_list[1]
self.assertEqual(len(cat_list), 2)
self.assertEqual(test_cat2.id, 2)
self.assertEqual(test_cat2.name, 'issues')
create_cat_cmd3 = cmd.parse_args(create_cat_args3, db)
cmd.process_cli_cmds(create_cat_cmd3)
cat_list2 = cmd.process_cli_cmds(test_get_all_cat_cmd)
test_cat3 = cat_list2[2]
self.assertEqual(len(cat_list2), 3)
self.assertEqual(test_cat3.id, 3)
delete_cat_args = ['category', 'delete', '--category', 'issues']
delete_cat_cmd = cmd.parse_args(delete_cat_args, db)
cmd.process_cli_cmds(delete_cat_cmd)
cat_list = cmd.process_cli_cmds(test_get_all_cat_cmd)
self.assertEqual(len(cat_list), 2)
self.assertEqual(cat_list[0].id, 1)
self.assertEqual(cat_list[1].id, 3)
def test_argparse_can_manage_tallies(self):
db = 'test.db'
self.th.create_category('bugs')
create_tally_args = ['tally', 'create', '--tally', 'stuck deployments', '--category', 'bugs']
create_tally_args2 = ['tally', 'create', '--tally', 'slow ui', '--category', 'bugs']
create_tally_args3 = ['tally', 'create', '--tally', '404 error', '--category', 'bugs']
delete_tally_args = ['tally', 'delete', '--tally', 'slow ui', '--category', 'bugs']
get_tally_args = ['tally', 'get', '--tally', 'slow ui', '--category', 'bugs']
get_all_tallies_args = ['tally', 'list']
create_tally_cmd = cmd.parse_args(create_tally_args, db)
tally = cmd.process_cli_cmds(create_tally_cmd)
self.assertEqual(tally.id, 1)
self.assertEqual(tally.name, "stuck deployments")
self.assertEqual(tally.count, 1)
create_tally_cmd2 = cmd.parse_args(create_tally_args2, db)
cmd.process_cli_cmds(create_tally_cmd2)
create_tally_cmd3 = cmd.parse_args(create_tally_args3, db)
cmd.process_cli_cmds(create_tally_cmd3)
get_tally_cmd = cmd.parse_args(get_tally_args, db)
tally_result = cmd.process_cli_cmds(get_tally_cmd)
self.assertEqual(tally_result.id, 2)
del_tally_cmd = cmd.parse_args(delete_tally_args, db)
cmd.process_cli_cmds(del_tally_cmd)
get_all_tallies_cmd = cmd.parse_args(get_all_tallies_args, db)
tally_result = cmd.process_cli_cmds(get_all_tallies_cmd)
self.assertEqual(len(tally_result), 2)
self.assertEqual(tally_result[1].id, 3) | StarcoderdataPython |
11251672 | <reponame>Aniiket7/Assignments<filename>Programming Assignment/p3.py
def maxArea(A, Len) :
area = 0
for i in range(Len) :
for j in range(i + 1, Len) :
area = max(area, min(A[j], A[i]) * (j - i))
return area
a = [int(n) for n in input("Enter an array: ").split()]
len1 = len(a)
print(maxArea(a, len1))
| StarcoderdataPython |
3276847 | <filename>tests/test_models.py
# Copyright 2016, 2021 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test cases for Customer Model
Test cases can be run with:
nosetests
coverage report -m
While debugging just these tests it's convenient to use this:
nosetests --stop tests/test_models.py:TestPetModel
"""
import os
import logging
import unittest
from werkzeug.exceptions import NotFound
from service.models import Customer, DataValidationError, db
from service import app
from .factories import CustomerFactory
DATABASE_URI = os.getenv(
"DATABASE_URI", "postgresql://postgres:postgres@localhost:5432/testdb"
)
######################################################################
# C U S T O M E R M O D E L T E S T C A S E S
######################################################################
# pylint: disable=too-many-public-methods
class TestCustomerModel(unittest.TestCase):
"""Test Cases for Customer Model"""
@classmethod
def setUpClass(cls):
"""This runs once before the entire test suite"""
app.config["TESTING"] = True
app.config["DEBUG"] = False
app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URI
app.logger.setLevel(logging.CRITICAL)
Customer.init_db(app)
@classmethod
def tearDownClass(cls):
"""This runs once after the entire test suite"""
db.session.close()
def setUp(self):
"""This runs before each test"""
db.drop_all() # clean up the last tests
db.create_all() # make our sqlalchemy tables
def tearDown(self):
"""This runs after each test"""
db.session.remove()
db.drop_all()
######################################################################
# H E L P E R M E T H O D S
######################################################################
def _create_customer(self):
""" Creates a Customer from a Factory """
fake_customer = CustomerFactory()
customer = Customer(
first_name = fake_customer.first_name,
last_name = fake_customer.last_name,
email = fake_customer.email,
phone_number = fake_customer.phone_number
)
self.assertTrue(customer != None)
self.assertEqual(customer.id, None)
return customer
######################################################################
# T E S T C A S E S
######################################################################
def test_create_a_customer(self):
""" Create a Customer and assert that it exists """
fake_customer = CustomerFactory()
customer = Customer(
first_name = fake_customer.first_name,
last_name = fake_customer.last_name,
email = fake_customer.email,
phone_number = fake_customer.phone_number
)
self.assertTrue(customer != None)
self.assertEqual(customer.id, None)
self.assertEqual(customer.first_name, fake_customer.first_name)
self.assertEqual(customer.last_name, fake_customer.last_name)
self.assertEqual(customer.email, fake_customer.email)
self.assertEqual(customer.phone_number, fake_customer.phone_number)
def test_add_a_customer(self):
""" Creates a customer and adds it to the database """
customers = Customer.all()
self.assertEqual(customers, [])
customer = self._create_customer()
customer.create()
# Assert that it was assigned an id and shows up in the database
self.assertEqual(customer.id, 1)
customers = Customer.all()
self.assertEqual(len(customers),1)
def test_update_customer(self):
""" Update a customer """
customer = self._create_customer()
customer.create()
# Assert that it was assigned an id and shows in the database
self.assertEqual(customer.id, 1)
# Fetch it back
customer = Customer.find(customer.id)
customer.email = "<EMAIL>"
customer.save()
# Fetch it back again
customer = Customer.find(customer.id)
self.assertEqual(customer.email, "<EMAIL>")
def test_delete_a_customer(self):
""" Delete an account from the database """
customers = Customer.all()
self.assertEqual(customers, [])
customer = self._create_customer()
customer.create()
# Assert that it was assigned an id and shows up in the database
self.assertEqual(customer.id, 1)
customers = Customer.all()
self.assertEqual(len(customers), 1)
customer = customers[0]
customer.delete()
customers = Customer.all()
self.assertEqual(len(customers), 0)
def test_find_or_404(self):
""" Find or throw 404 error """
customer = self._create_customer()
customer.create()
# Assert that it was assigned an id and shows up in the database
self.assertEqual(customer.id, 1)
# Fetch it back
customer = Customer.find_or_404(customer.id)
self.assertEqual(customer.id, 1)
def test_find_by_first_name(self):
""" Find by first name """
customer = self._create_customer()
customer.create()
# Fetch it back by name
same_customer = Customer.find_by_first_name(customer.first_name)[0]
self.assertEqual(same_customer.id, customer.id)
self.assertEqual(same_customer.first_name, customer.first_name)
def test_find_by_last_name(self):
""" Find by last name """
customer = self._create_customer()
customer.create()
# Fetch it back by name
same_customer = Customer.find_by_last_name(customer.last_name)[0]
self.assertEqual(same_customer.id, customer.id)
self.assertEqual(same_customer.last_name, customer.last_name)
def test_serialize_a_customer(self):
""" Serialize a customer """
customer = self._create_customer()
serial_customer = customer.serialize()
self.assertEqual(serial_customer['id'], customer.id)
self.assertEqual(serial_customer['first_name'], customer.first_name)
self.assertEqual(serial_customer['last_name'], customer.last_name)
self.assertEqual(serial_customer['email'], customer.email)
self.assertEqual(serial_customer['phone_number'], customer.phone_number)
def test_deserialize_a_customer(self):
""" Deserialize a customer """
customer = self._create_customer()
serial_customer = customer.serialize()
new_customer = Customer()
new_customer.deserialize(serial_customer)
self.assertEqual(new_customer.id, customer.id)
self.assertEqual(new_customer.first_name, customer.first_name)
self.assertEqual(new_customer.last_name, customer.last_name)
self.assertEqual(new_customer.email, customer.email)
self.assertEqual(new_customer.phone_number, customer.phone_number)
def test_deserialize_with_key_error(self):
""" Deserialize a customer with a KeyError """
customer = Customer()
self.assertRaises(DataValidationError, customer.deserialize, {})
def test_deserialize_with_type_error(self):
""" Deserialize a customer with a TypeError """
customer = Customer()
self.assertRaises(DataValidationError, customer.deserialize, []) | StarcoderdataPython |
8071362 | <gh_stars>0
x=5
if x=="test":
print(x)
| StarcoderdataPython |
5174998 | # Generated by Django 3.1 on 2020-09-11 06:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("verification", "0019_auto_20200909_0752"),
("verification", "0018_auto_20200910_0929"),
]
operations = []
| StarcoderdataPython |
6689447 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
import datetime
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.forms import *
from django.forms.widgets import RadioFieldRenderer
from django.utils import formats
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.translation import activate, deactivate
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.encoding import python_2_unicode_compatible
from ..models import Article
class FormsWidgetTestCase(TestCase):
# Each Widget class corresponds to an HTML form widget. A Widget knows how to
# render itself, given a field name and some data. Widgets don't perform
# validation.
def test_textinput(self):
w = TextInput()
self.assertHTMLEqual(w.render('email', ''), '<input type="text" name="email" />')
self.assertHTMLEqual(w.render('email', None), '<input type="text" name="email" />')
self.assertHTMLEqual(w.render('email', '<EMAIL>'), '<input type="text" name="email" value="<EMAIL>" />')
self.assertHTMLEqual(w.render('email', 'some "quoted" & ampersanded value'), '<input type="text" name="email" value="some "quoted" & ampersanded value" />')
self.assertHTMLEqual(w.render('email', '<EMAIL>', attrs={'class': 'fun'}), '<input type="text" name="email" value="<EMAIL>" class="fun" />')
self.assertHTMLEqual(w.render('email', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'}), '<input type="text" name="email" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" class="fun" />')
# You can also pass 'attrs' to the constructor:
w = TextInput(attrs={'class': 'fun', 'type': 'email'})
self.assertHTMLEqual(w.render('email', ''), '<input type="email" class="fun" name="email" />')
self.assertHTMLEqual(w.render('email', '<EMAIL>'), '<input type="email" class="fun" value="<EMAIL>" name="email" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = TextInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('email', '', attrs={'class': 'special'}), '<input type="text" class="special" name="email" />')
# 'attrs' can be safe-strings if needed)
w = TextInput(attrs={'onBlur': mark_safe("function('foo')")})
self.assertHTMLEqual(w.render('email', ''), '<input onBlur="function(\'foo\')" type="text" name="email" />')
def test_passwordinput(self):
w = PasswordInput()
self.assertHTMLEqual(w.render('email', ''), '<input type="password" name="email" />')
self.assertHTMLEqual(w.render('email', None), '<input type="password" name="email" />')
self.assertHTMLEqual(w.render('email', 'secret'), '<input type="password" name="email" />')
# The render_value argument lets you specify whether the widget should render
# its value. For security reasons, this is off by default.
w = PasswordInput(render_value=True)
self.assertHTMLEqual(w.render('email', ''), '<input type="password" name="email" />')
self.assertHTMLEqual(w.render('email', None), '<input type="password" name="email" />')
self.assertHTMLEqual(w.render('email', '<EMAIL>'), '<input type="password" name="email" value="<EMAIL>" />')
self.assertHTMLEqual(w.render('email', 'some "quoted" & ampersanded value'), '<input type="password" name="email" value="some "quoted" & ampersanded value" />')
self.assertHTMLEqual(w.render('email', '<EMAIL>', attrs={'class': 'fun'}), '<input type="password" name="email" value="<EMAIL>" class="fun" />')
# You can also pass 'attrs' to the constructor:
w = PasswordInput(attrs={'class': 'fun'}, render_value=True)
self.assertHTMLEqual(w.render('email', ''), '<input type="password" class="fun" name="email" />')
self.assertHTMLEqual(w.render('email', '<EMAIL>'), '<input type="password" class="fun" value="<EMAIL>" name="email" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = PasswordInput(attrs={'class': 'pretty'}, render_value=True)
self.assertHTMLEqual(w.render('email', '', attrs={'class': 'special'}), '<input type="password" class="special" name="email" />')
self.assertHTMLEqual(w.render('email', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'}), '<input type="password" class="fun" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" name="email" />')
def test_hiddeninput(self):
w = HiddenInput()
self.assertHTMLEqual(w.render('email', ''), '<input type="hidden" name="email" />')
self.assertHTMLEqual(w.render('email', None), '<input type="hidden" name="email" />')
self.assertHTMLEqual(w.render('email', '<EMAIL>'), '<input type="hidden" name="email" value="<EMAIL>" />')
self.assertHTMLEqual(w.render('email', 'some "quoted" & ampersanded value'), '<input type="hidden" name="email" value="some "quoted" & ampersanded value" />')
self.assertHTMLEqual(w.render('email', '<EMAIL>', attrs={'class': 'fun'}), '<input type="hidden" name="email" value="<EMAIL>" class="fun" />')
# You can also pass 'attrs' to the constructor:
w = HiddenInput(attrs={'class': 'fun'})
self.assertHTMLEqual(w.render('email', ''), '<input type="hidden" class="fun" name="email" />')
self.assertHTMLEqual(w.render('email', '<EMAIL>'), '<input type="hidden" class="fun" value="<EMAIL>" name="email" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = HiddenInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('email', '', attrs={'class': 'special'}), '<input type="hidden" class="special" name="email" />')
self.assertHTMLEqual(w.render('email', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'}), '<input type="hidden" class="fun" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" name="email" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = HiddenInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('email', '', attrs={'class': 'special'}), '<input type="hidden" class="special" name="email" />')
# Boolean values are rendered to their string forms ("True" and "False").
w = HiddenInput()
self.assertHTMLEqual(w.render('get_spam', False), '<input type="hidden" name="get_spam" value="False" />')
self.assertHTMLEqual(w.render('get_spam', True), '<input type="hidden" name="get_spam" value="True" />')
def test_multiplehiddeninput(self):
w = MultipleHiddenInput()
self.assertHTMLEqual(w.render('email', []), '')
self.assertHTMLEqual(w.render('email', None), '')
self.assertHTMLEqual(w.render('email', ['<EMAIL>']), '<input type="hidden" name="email" value="<EMAIL>" />')
self.assertHTMLEqual(w.render('email', ['some "quoted" & ampersanded value']), '<input type="hidden" name="email" value="some "quoted" & ampersanded value" />')
self.assertHTMLEqual(w.render('email', ['<EMAIL>', '<EMAIL>']), '<input type="hidden" name="email" value="<EMAIL>" />\n<input type="hidden" name="email" value="<EMAIL>" />')
self.assertHTMLEqual(w.render('email', ['<EMAIL>'], attrs={'class': 'fun'}), '<input type="hidden" name="email" value="<EMAIL>" class="fun" />')
self.assertHTMLEqual(w.render('email', ['<EMAIL>', '<EMAIL>'], attrs={'class': 'fun'}), '<input type="hidden" name="email" value="<EMAIL>" class="fun" />\n<input type="hidden" name="email" value="<EMAIL>" class="fun" />')
# You can also pass 'attrs' to the constructor:
w = MultipleHiddenInput(attrs={'class': 'fun'})
self.assertHTMLEqual(w.render('email', []), '')
self.assertHTMLEqual(w.render('email', ['<EMAIL>']), '<input type="hidden" class="fun" value="<EMAIL>" name="email" />')
self.assertHTMLEqual(w.render('email', ['<EMAIL>', '<EMAIL>']), '<input type="hidden" class="fun" value="<EMAIL>" name="email" />\n<input type="hidden" class="fun" value="<EMAIL>" name="email" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = MultipleHiddenInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('email', ['<EMAIL>'], attrs={'class': 'special'}), '<input type="hidden" class="special" value="<EMAIL>" name="email" />')
self.assertHTMLEqual(w.render('email', ['ŠĐĆŽćžšđ'], attrs={'class': 'fun'}), '<input type="hidden" class="fun" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" name="email" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = MultipleHiddenInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('email', ['<EMAIL>'], attrs={'class': 'special'}), '<input type="hidden" class="special" value="<EMAIL>" name="email" />')
# Each input gets a separate ID.
w = MultipleHiddenInput()
self.assertHTMLEqual(w.render('letters', list('abc'), attrs={'id': 'hideme'}), '<input type="hidden" name="letters" value="a" id="hideme_0" />\n<input type="hidden" name="letters" value="b" id="hideme_1" />\n<input type="hidden" name="letters" value="c" id="hideme_2" />')
def test_fileinput(self):
# FileInput widgets don't ever show the value, because the old value is of no use
# if you are updating the form or if the provided file generated an error.
w = FileInput()
self.assertHTMLEqual(w.render('email', ''), '<input type="file" name="email" />')
self.assertHTMLEqual(w.render('email', None), '<input type="file" name="email" />')
self.assertHTMLEqual(w.render('email', '<EMAIL>'), '<input type="file" name="email" />')
self.assertHTMLEqual(w.render('email', 'some "quoted" & ampersanded value'), '<input type="file" name="email" />')
self.assertHTMLEqual(w.render('email', '<EMAIL>', attrs={'class': 'fun'}), '<input type="file" name="email" class="fun" />')
# You can also pass 'attrs' to the constructor:
w = FileInput(attrs={'class': 'fun'})
self.assertHTMLEqual(w.render('email', ''), '<input type="file" class="fun" name="email" />')
self.assertHTMLEqual(w.render('email', '<EMAIL>'), '<input type="file" class="fun" name="email" />')
self.assertHTMLEqual(w.render('email', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'}), '<input type="file" class="fun" name="email" />')
def test_textarea(self):
w = Textarea()
self.assertHTMLEqual(w.render('msg', ''), '<textarea rows="10" cols="40" name="msg"></textarea>')
self.assertHTMLEqual(w.render('msg', None), '<textarea rows="10" cols="40" name="msg"></textarea>')
self.assertHTMLEqual(w.render('msg', 'value'), '<textarea rows="10" cols="40" name="msg">value</textarea>')
self.assertHTMLEqual(w.render('msg', 'some "quoted" & ampersanded value'), '<textarea rows="10" cols="40" name="msg">some "quoted" & ampersanded value</textarea>')
self.assertHTMLEqual(w.render('msg', mark_safe('pre "quoted" value')), '<textarea rows="10" cols="40" name="msg">pre "quoted" value</textarea>')
self.assertHTMLEqual(w.render('msg', 'value', attrs={'class': 'pretty', 'rows': 20}), '<textarea class="pretty" rows="20" cols="40" name="msg">value</textarea>')
# You can also pass 'attrs' to the constructor:
w = Textarea(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('msg', ''), '<textarea rows="10" cols="40" name="msg" class="pretty"></textarea>')
self.assertHTMLEqual(w.render('msg', 'example'), '<textarea rows="10" cols="40" name="msg" class="pretty">example</textarea>')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = Textarea(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('msg', '', attrs={'class': 'special'}), '<textarea rows="10" cols="40" name="msg" class="special"></textarea>')
self.assertHTMLEqual(w.render('msg', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'}), '<textarea rows="10" cols="40" name="msg" class="fun">\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111</textarea>')
def test_checkboxinput(self):
w = CheckboxInput()
self.assertHTMLEqual(w.render('is_cool', ''), '<input type="checkbox" name="is_cool" />')
self.assertHTMLEqual(w.render('is_cool', None), '<input type="checkbox" name="is_cool" />')
self.assertHTMLEqual(w.render('is_cool', False), '<input type="checkbox" name="is_cool" />')
self.assertHTMLEqual(w.render('is_cool', True), '<input checked="checked" type="checkbox" name="is_cool" />')
# Using any value that's not in ('', None, False, True) will check the checkbox
# and set the 'value' attribute.
self.assertHTMLEqual(w.render('is_cool', 'foo'), '<input checked="checked" type="checkbox" name="is_cool" value="foo" />')
self.assertHTMLEqual(w.render('is_cool', False, attrs={'class': 'pretty'}), '<input type="checkbox" name="is_cool" class="pretty" />')
# regression for #17114
self.assertHTMLEqual(w.render('is_cool', 0), '<input checked="checked" type="checkbox" name="is_cool" value="0" />')
self.assertHTMLEqual(w.render('is_cool', 1), '<input checked="checked" type="checkbox" name="is_cool" value="1" />')
# You can also pass 'attrs' to the constructor:
w = CheckboxInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('is_cool', ''), '<input type="checkbox" class="pretty" name="is_cool" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = CheckboxInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('is_cool', '', attrs={'class': 'special'}), '<input type="checkbox" class="special" name="is_cool" />')
# You can pass 'check_test' to the constructor. This is a callable that takes the
# value and returns True if the box should be checked.
w = CheckboxInput(check_test=lambda value: value.startswith('hello'))
self.assertHTMLEqual(w.render('greeting', ''), '<input type="checkbox" name="greeting" />')
self.assertHTMLEqual(w.render('greeting', 'hello'), '<input checked="checked" type="checkbox" name="greeting" value="hello" />')
self.assertHTMLEqual(w.render('greeting', 'hello there'), '<input checked="checked" type="checkbox" name="greeting" value="hello there" />')
self.assertHTMLEqual(w.render('greeting', 'hello & goodbye'), '<input checked="checked" type="checkbox" name="greeting" value="hello & goodbye" />')
# Ticket #17888: calling check_test shouldn't swallow exceptions
with self.assertRaises(AttributeError):
w.render('greeting', True)
# The CheckboxInput widget will return False if the key is not found in the data
# dictionary (because HTML form submission doesn't send any result for unchecked
# checkboxes).
self.assertFalse(w.value_from_datadict({}, {}, 'testing'))
value = w.value_from_datadict({'testing': '0'}, {}, 'testing')
self.assertIsInstance(value, bool)
self.assertTrue(value)
def test_select(self):
w = Select()
self.assertHTMLEqual(w.render('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select name="beatle">
<option value="J" selected="selected">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
# If the value is None, none of the options are selected:
self.assertHTMLEqual(w.render('beatle', None, choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select name="beatle">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
# If the value corresponds to a label (but not to an option value), none of the options are selected:
self.assertHTMLEqual(w.render('beatle', 'John', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select name="beatle">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
# Only one option can be selected, see #8103:
self.assertHTMLEqual(w.render('choices', '0', choices=(('0', '0'), ('1', '1'), ('2', '2'), ('3', '3'), ('0', 'extra'))), """<select name="choices">
<option value="0" selected="selected">0</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="0">extra</option>
</select>""")
# The value is compared to its str():
self.assertHTMLEqual(w.render('num', 2, choices=[('1', '1'), ('2', '2'), ('3', '3')]), """<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
self.assertHTMLEqual(w.render('num', '2', choices=[(1, 1), (2, 2), (3, 3)]), """<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
self.assertHTMLEqual(w.render('num', 2, choices=[(1, 1), (2, 2), (3, 3)]), """<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
# The 'choices' argument can be any iterable:
from itertools import chain
def get_choices():
for i in range(5):
yield (i, i)
self.assertHTMLEqual(w.render('num', 2, choices=get_choices()), """<select name="num">
<option value="0">0</option>
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
</select>""")
things = ({'id': 1, 'name': 'And Boom'}, {'id': 2, 'name': 'One More Thing!'})
class SomeForm(Form):
somechoice = ChoiceField(choices=chain((('', '-'*9),), [(thing['id'], thing['name']) for thing in things]))
f = SomeForm()
self.assertHTMLEqual(f.as_table(), '<tr><th><label for="id_somechoice">Somechoice:</label></th><td><select name="somechoice" id="id_somechoice">\n<option value="" selected="selected">---------</option>\n<option value="1">And Boom</option>\n<option value="2">One More Thing!</option>\n</select></td></tr>')
self.assertHTMLEqual(f.as_table(), '<tr><th><label for="id_somechoice">Somechoice:</label></th><td><select name="somechoice" id="id_somechoice">\n<option value="" selected="selected">---------</option>\n<option value="1">And Boom</option>\n<option value="2">One More Thing!</option>\n</select></td></tr>')
f = SomeForm({'somechoice': 2})
self.assertHTMLEqual(f.as_table(), '<tr><th><label for="id_somechoice">Somechoice:</label></th><td><select name="somechoice" id="id_somechoice">\n<option value="">---------</option>\n<option value="1">And Boom</option>\n<option value="2" selected="selected">One More Thing!</option>\n</select></td></tr>')
# You can also pass 'choices' to the constructor:
w = Select(choices=[(1, 1), (2, 2), (3, 3)])
self.assertHTMLEqual(w.render('num', 2), """<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
# If 'choices' is passed to both the constructor and render(), then they'll both be in the output:
self.assertHTMLEqual(w.render('num', 2, choices=[(4, 4), (5, 5)]), """<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>""")
# Choices are escaped correctly
self.assertHTMLEqual(w.render('escape', None, choices=(('bad', 'you & me'), ('good', mark_safe('you > me')))), """<select name="escape">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="bad">you & me</option>
<option value="good">you > me</option>
</select>""")
# Unicode choices are correctly rendered as HTML
self.assertHTMLEqual(w.render('email', 'ŠĐĆŽćžšđ', choices=[('ŠĐĆŽćžšđ', 'ŠĐabcĆŽćžšđ'), ('ćžšđ', 'abcćžšđ')]), '<select name="email">\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" selected="selected">\u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111</option>\n<option value="\u0107\u017e\u0161\u0111">abc\u0107\u017e\u0161\u0111</option>\n</select>')
# If choices is passed to the constructor and is a generator, it can be iterated
# over multiple times without getting consumed:
w = Select(choices=get_choices())
self.assertHTMLEqual(w.render('num', 2), """<select name="num">
<option value="0">0</option>
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
</select>""")
self.assertHTMLEqual(w.render('num', 3), """<select name="num">
<option value="0">0</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3" selected="selected">3</option>
<option value="4">4</option>
</select>""")
# Choices can be nested one level in order to create HTML optgroups:
w.choices=(('outer1', 'Outer 1'), ('Group "1"', (('inner1', 'Inner 1'), ('inner2', 'Inner 2'))))
self.assertHTMLEqual(w.render('nestchoice', None), """<select name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>""")
self.assertHTMLEqual(w.render('nestchoice', 'outer1'), """<select name="nestchoice">
<option value="outer1" selected="selected">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>""")
self.assertHTMLEqual(w.render('nestchoice', 'inner1'), """<select name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1" selected="selected">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>""")
def test_nullbooleanselect(self):
w = NullBooleanSelect()
self.assertTrue(w.render('is_cool', True), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
self.assertHTMLEqual(w.render('is_cool', False), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
self.assertHTMLEqual(w.render('is_cool', None), """<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>""")
self.assertHTMLEqual(w.render('is_cool', '2'), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
self.assertHTMLEqual(w.render('is_cool', '3'), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
def test_selectmultiple(self):
w = SelectMultiple()
self.assertHTMLEqual(w.render('beatles', ['J'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select multiple="multiple" name="beatles">
<option value="J" selected="selected">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
self.assertHTMLEqual(w.render('beatles', ['J', 'P'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select multiple="multiple" name="beatles">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
self.assertHTMLEqual(w.render('beatles', ['J', 'P', 'R'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select multiple="multiple" name="beatles">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R" selected="selected">Ringo</option>
</select>""")
# If the value is None, none of the options are selected:
self.assertHTMLEqual(w.render('beatles', None, choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select multiple="multiple" name="beatles">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
# If the value corresponds to a label (but not to an option value), none of the options are selected:
self.assertHTMLEqual(w.render('beatles', ['John'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select multiple="multiple" name="beatles">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
# Multiple options (with the same value) can be selected, see #8103:
self.assertHTMLEqual(w.render('choices', ['0'], choices=(('0', '0'), ('1', '1'), ('2', '2'), ('3', '3'), ('0', 'extra'))), """<select multiple="multiple" name="choices">
<option value="0" selected="selected">0</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="0" selected="selected">extra</option>
</select>""")
# If multiple values are given, but some of them are not valid, the valid ones are selected:
self.assertHTMLEqual(w.render('beatles', ['J', 'G', 'foo'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select multiple="multiple" name="beatles">
<option value="J" selected="selected">John</option>
<option value="P">Paul</option>
<option value="G" selected="selected">George</option>
<option value="R">Ringo</option>
</select>""")
# The value is compared to its str():
self.assertHTMLEqual(w.render('nums', [2], choices=[('1', '1'), ('2', '2'), ('3', '3')]), """<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
self.assertHTMLEqual(w.render('nums', ['2'], choices=[(1, 1), (2, 2), (3, 3)]), """<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
self.assertHTMLEqual(w.render('nums', [2], choices=[(1, 1), (2, 2), (3, 3)]), """<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
# The 'choices' argument can be any iterable:
def get_choices():
for i in range(5):
yield (i, i)
self.assertHTMLEqual(w.render('nums', [2], choices=get_choices()), """<select multiple="multiple" name="nums">
<option value="0">0</option>
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
</select>""")
# You can also pass 'choices' to the constructor:
w = SelectMultiple(choices=[(1, 1), (2, 2), (3, 3)])
self.assertHTMLEqual(w.render('nums', [2]), """<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
# If 'choices' is passed to both the constructor and render(), then they'll both be in the output:
self.assertHTMLEqual(w.render('nums', [2], choices=[(4, 4), (5, 5)]), """<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>""")
# Choices are escaped correctly
self.assertHTMLEqual(w.render('escape', None, choices=(('bad', 'you & me'), ('good', mark_safe('you > me')))), """<select multiple="multiple" name="escape">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="bad">you & me</option>
<option value="good">you > me</option>
</select>""")
# Unicode choices are correctly rendered as HTML
self.assertHTMLEqual(w.render('nums', ['ŠĐĆŽćžšđ'], choices=[('ŠĐĆŽćžšđ', 'ŠĐabcĆŽćžšđ'), ('ćžšđ', 'abcćžšđ')]), '<select multiple="multiple" name="nums">\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" selected="selected">\u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111</option>\n<option value="\u0107\u017e\u0161\u0111">abc\u0107\u017e\u0161\u0111</option>\n</select>')
# Choices can be nested one level in order to create HTML optgroups:
w.choices = (('outer1', 'Outer 1'), ('Group "1"', (('inner1', 'Inner 1'), ('inner2', 'Inner 2'))))
self.assertHTMLEqual(w.render('nestchoice', None), """<select multiple="multiple" name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>""")
self.assertHTMLEqual(w.render('nestchoice', ['outer1']), """<select multiple="multiple" name="nestchoice">
<option value="outer1" selected="selected">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>""")
self.assertHTMLEqual(w.render('nestchoice', ['inner1']), """<select multiple="multiple" name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1" selected="selected">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>""")
self.assertHTMLEqual(w.render('nestchoice', ['outer1', 'inner2']), """<select multiple="multiple" name="nestchoice">
<option value="outer1" selected="selected">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2" selected="selected">Inner 2</option>
</optgroup>
</select>""")
def test_radioselect(self):
w = RadioSelect()
self.assertHTMLEqual(w.render('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input checked="checked" type="radio" name="beatle" value="J" /> John</label></li>
<li><label><input type="radio" name="beatle" value="P" /> Paul</label></li>
<li><label><input type="radio" name="beatle" value="G" /> George</label></li>
<li><label><input type="radio" name="beatle" value="R" /> Ringo</label></li>
</ul>""")
# If the value is None, none of the options are checked:
self.assertHTMLEqual(w.render('beatle', None, choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input type="radio" name="beatle" value="J" /> John</label></li>
<li><label><input type="radio" name="beatle" value="P" /> Paul</label></li>
<li><label><input type="radio" name="beatle" value="G" /> George</label></li>
<li><label><input type="radio" name="beatle" value="R" /> Ringo</label></li>
</ul>""")
# If the value corresponds to a label (but not to an option value), none of the options are checked:
self.assertHTMLEqual(w.render('beatle', 'John', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input type="radio" name="beatle" value="J" /> John</label></li>
<li><label><input type="radio" name="beatle" value="P" /> Paul</label></li>
<li><label><input type="radio" name="beatle" value="G" /> George</label></li>
<li><label><input type="radio" name="beatle" value="R" /> Ringo</label></li>
</ul>""")
# The value is compared to its str():
self.assertHTMLEqual(w.render('num', 2, choices=[('1', '1'), ('2', '2'), ('3', '3')]), """<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
</ul>""")
self.assertHTMLEqual(w.render('num', '2', choices=[(1, 1), (2, 2), (3, 3)]), """<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
</ul>""")
self.assertHTMLEqual(w.render('num', 2, choices=[(1, 1), (2, 2), (3, 3)]), """<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
</ul>""")
# The 'choices' argument can be any iterable:
def get_choices():
for i in range(5):
yield (i, i)
self.assertHTMLEqual(w.render('num', 2, choices=get_choices()), """<ul>
<li><label><input type="radio" name="num" value="0" /> 0</label></li>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
<li><label><input type="radio" name="num" value="4" /> 4</label></li>
</ul>""")
# You can also pass 'choices' to the constructor:
w = RadioSelect(choices=[(1, 1), (2, 2), (3, 3)])
self.assertHTMLEqual(w.render('num', 2), """<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
</ul>""")
# If 'choices' is passed to both the constructor and render(), then they'll both be in the output:
self.assertHTMLEqual(w.render('num', 2, choices=[(4, 4), (5, 5)]), """<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
<li><label><input type="radio" name="num" value="4" /> 4</label></li>
<li><label><input type="radio" name="num" value="5" /> 5</label></li>
</ul>""")
# RadioSelect uses a RadioFieldRenderer to render the individual radio inputs.
# You can manipulate that object directly to customize the way the RadioSelect
# is rendered.
w = RadioSelect()
r = w.get_renderer('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
inp_set1 = []
inp_set2 = []
inp_set3 = []
inp_set4 = []
for inp in r:
inp_set1.append(str(inp))
inp_set2.append('%s<br />' % inp)
inp_set3.append('<p>%s %s</p>' % (inp.tag(), inp.choice_label))
inp_set4.append('%s %s %s %s %s' % (inp.name, inp.value, inp.choice_value, inp.choice_label, inp.is_checked()))
self.assertHTMLEqual('\n'.join(inp_set1), """<label><input checked="checked" type="radio" name="beatle" value="J" /> John</label>
<label><input type="radio" name="beatle" value="P" /> Paul</label>
<label><input type="radio" name="beatle" value="G" /> George</label>
<label><input type="radio" name="beatle" value="R" /> Ringo</label>""")
self.assertHTMLEqual('\n'.join(inp_set2), """<label><input checked="checked" type="radio" name="beatle" value="J" /> John</label><br />
<label><input type="radio" name="beatle" value="P" /> Paul</label><br />
<label><input type="radio" name="beatle" value="G" /> George</label><br />
<label><input type="radio" name="beatle" value="R" /> Ringo</label><br />""")
self.assertHTMLEqual('\n'.join(inp_set3), """<p><input checked="checked" type="radio" name="beatle" value="J" /> John</p>
<p><input type="radio" name="beatle" value="P" /> Paul</p>
<p><input type="radio" name="beatle" value="G" /> George</p>
<p><input type="radio" name="beatle" value="R" /> Ringo</p>""")
self.assertHTMLEqual('\n'.join(inp_set4), """beatle J J John True
beatle J P Paul False
beatle J G George False
beatle J R Ringo False""")
# You can create your own custom renderers for RadioSelect to use.
class MyRenderer(RadioFieldRenderer):
def render(self):
return '<br />\n'.join([six.text_type(choice) for choice in self])
w = RadioSelect(renderer=MyRenderer)
self.assertHTMLEqual(w.render('beatle', 'G', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<label><input type="radio" name="beatle" value="J" /> John</label><br />
<label><input type="radio" name="beatle" value="P" /> Paul</label><br />
<label><input checked="checked" type="radio" name="beatle" value="G" /> George</label><br />
<label><input type="radio" name="beatle" value="R" /> Ringo</label>""")
# Or you can use custom RadioSelect fields that use your custom renderer.
class CustomRadioSelect(RadioSelect):
renderer = MyRenderer
w = CustomRadioSelect()
self.assertHTMLEqual(w.render('beatle', 'G', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<label><input type="radio" name="beatle" value="J" /> John</label><br />
<label><input type="radio" name="beatle" value="P" /> Paul</label><br />
<label><input checked="checked" type="radio" name="beatle" value="G" /> George</label><br />
<label><input type="radio" name="beatle" value="R" /> Ringo</label>""")
# A RadioFieldRenderer object also allows index access to individual RadioInput
w = RadioSelect()
r = w.get_renderer('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
self.assertHTMLEqual(str(r[1]), '<label><input type="radio" name="beatle" value="P" /> Paul</label>')
self.assertHTMLEqual(str(r[0]), '<label><input checked="checked" type="radio" name="beatle" value="J" /> John</label>')
self.assertTrue(r[0].is_checked())
self.assertFalse(r[1].is_checked())
self.assertEqual((r[1].name, r[1].value, r[1].choice_value, r[1].choice_label), ('beatle', 'J', 'P', 'Paul'))
try:
r[10]
self.fail("This offset should not exist.")
except IndexError:
pass
# Choices are escaped correctly
w = RadioSelect()
self.assertHTMLEqual(w.render('escape', None, choices=(('bad', 'you & me'), ('good', mark_safe('you > me')))), """<ul>
<li><label><input type="radio" name="escape" value="bad" /> you & me</label></li>
<li><label><input type="radio" name="escape" value="good" /> you > me</label></li>
</ul>""")
# Unicode choices are correctly rendered as HTML
w = RadioSelect()
self.assertHTMLEqual(six.text_type(w.render('email', 'ŠĐĆŽćžšđ', choices=[('ŠĐĆŽćžšđ', 'ŠĐabcĆŽćžšđ'), ('ćžšđ', 'abcćžšđ')])), '<ul>\n<li><label><input checked="checked" type="radio" name="email" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" /> \u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111</label></li>\n<li><label><input type="radio" name="email" value="\u0107\u017e\u0161\u0111" /> abc\u0107\u017e\u0161\u0111</label></li>\n</ul>')
# Attributes provided at instantiation are passed to the constituent inputs
w = RadioSelect(attrs={'id':'foo'})
self.assertHTMLEqual(w.render('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label for="foo_0"><input checked="checked" type="radio" id="foo_0" value="J" name="beatle" /> John</label></li>
<li><label for="foo_1"><input type="radio" id="foo_1" value="P" name="beatle" /> Paul</label></li>
<li><label for="foo_2"><input type="radio" id="foo_2" value="G" name="beatle" /> George</label></li>
<li><label for="foo_3"><input type="radio" id="foo_3" value="R" name="beatle" /> Ringo</label></li>
</ul>""")
# Attributes provided at render-time are passed to the constituent inputs
w = RadioSelect()
self.assertHTMLEqual(w.render('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')), attrs={'id':'bar'}), """<ul>
<li><label for="bar_0"><input checked="checked" type="radio" id="bar_0" value="J" name="beatle" /> John</label></li>
<li><label for="bar_1"><input type="radio" id="bar_1" value="P" name="beatle" /> Paul</label></li>
<li><label for="bar_2"><input type="radio" id="bar_2" value="G" name="beatle" /> George</label></li>
<li><label for="bar_3"><input type="radio" id="bar_3" value="R" name="beatle" /> Ringo</label></li>
</ul>""")
def test_checkboxselectmultiple(self):
w = CheckboxSelectMultiple()
self.assertHTMLEqual(w.render('beatles', ['J'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input checked="checked" type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>""")
self.assertHTMLEqual(w.render('beatles', ['J', 'P'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input checked="checked" type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input checked="checked" type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>""")
self.assertHTMLEqual(w.render('beatles', ['J', 'P', 'R'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input checked="checked" type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input checked="checked" type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input checked="checked" type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>""")
# If the value is None, none of the options are selected:
self.assertHTMLEqual(w.render('beatles', None, choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>""")
# If the value corresponds to a label (but not to an option value), none of the options are selected:
self.assertHTMLEqual(w.render('beatles', ['John'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>""")
# If multiple values are given, but some of them are not valid, the valid ones are selected:
self.assertHTMLEqual(w.render('beatles', ['J', 'G', 'foo'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input checked="checked" type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input checked="checked" type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>""")
# The value is compared to its str():
self.assertHTMLEqual(w.render('nums', [2], choices=[('1', '1'), ('2', '2'), ('3', '3')]), """<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
</ul>""")
self.assertHTMLEqual(w.render('nums', ['2'], choices=[(1, 1), (2, 2), (3, 3)]), """<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
</ul>""")
self.assertHTMLEqual(w.render('nums', [2], choices=[(1, 1), (2, 2), (3, 3)]), """<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
</ul>""")
# The 'choices' argument can be any iterable:
def get_choices():
for i in range(5):
yield (i, i)
self.assertHTMLEqual(w.render('nums', [2], choices=get_choices()), """<ul>
<li><label><input type="checkbox" name="nums" value="0" /> 0</label></li>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
<li><label><input type="checkbox" name="nums" value="4" /> 4</label></li>
</ul>""")
# You can also pass 'choices' to the constructor:
w = CheckboxSelectMultiple(choices=[(1, 1), (2, 2), (3, 3)])
self.assertHTMLEqual(w.render('nums', [2]), """<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
</ul>""")
# If 'choices' is passed to both the constructor and render(), then they'll both be in the output:
self.assertHTMLEqual(w.render('nums', [2], choices=[(4, 4), (5, 5)]), """<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
<li><label><input type="checkbox" name="nums" value="4" /> 4</label></li>
<li><label><input type="checkbox" name="nums" value="5" /> 5</label></li>
</ul>""")
# Choices are escaped correctly
self.assertHTMLEqual(w.render('escape', None, choices=(('bad', 'you & me'), ('good', mark_safe('you > me')))), """<ul>
<li><label><input type="checkbox" name="escape" value="1" /> 1</label></li>
<li><label><input type="checkbox" name="escape" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="escape" value="3" /> 3</label></li>
<li><label><input type="checkbox" name="escape" value="bad" /> you & me</label></li>
<li><label><input type="checkbox" name="escape" value="good" /> you > me</label></li>
</ul>""")
# Unicode choices are correctly rendered as HTML
self.assertHTMLEqual(w.render('nums', ['ŠĐĆŽćžšđ'], choices=[('ŠĐĆŽćžšđ', 'ŠĐabcĆŽćžšđ'), ('ćžšđ', 'abcćžšđ')]), '<ul>\n<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>\n<li><label><input type="checkbox" name="nums" value="2" /> 2</label></li>\n<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>\n<li><label><input checked="checked" type="checkbox" name="nums" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" /> \u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111</label></li>\n<li><label><input type="checkbox" name="nums" value="\u0107\u017e\u0161\u0111" /> abc\u0107\u017e\u0161\u0111</label></li>\n</ul>')
# Each input gets a separate ID
self.assertHTMLEqual(CheckboxSelectMultiple().render('letters', list('ac'), choices=zip(list('abc'), list('ABC')), attrs={'id': 'abc'}), """<ul>
<li><label for="abc_0"><input checked="checked" type="checkbox" name="letters" value="a" id="abc_0" /> A</label></li>
<li><label for="abc_1"><input type="checkbox" name="letters" value="b" id="abc_1" /> B</label></li>
<li><label for="abc_2"><input checked="checked" type="checkbox" name="letters" value="c" id="abc_2" /> C</label></li>
</ul>""")
# Each input gets a separate ID when the ID is passed to the constructor
self.assertHTMLEqual(CheckboxSelectMultiple(attrs={'id': 'abc'}).render('letters', list('ac'), choices=zip(list('abc'), list('ABC'))), """<ul>
<li><label for="abc_0"><input checked="checked" type="checkbox" name="letters" value="a" id="abc_0" /> A</label></li>
<li><label for="abc_1"><input type="checkbox" name="letters" value="b" id="abc_1" /> B</label></li>
<li><label for="abc_2"><input checked="checked" type="checkbox" name="letters" value="c" id="abc_2" /> C</label></li>
</ul>""")
def test_multi(self):
class MyMultiWidget(MultiWidget):
def decompress(self, value):
if value:
return value.split('__')
return ['', '']
def format_output(self, rendered_widgets):
return '<br />'.join(rendered_widgets)
w = MyMultiWidget(widgets=(TextInput(attrs={'class': 'big'}), TextInput(attrs={'class': 'small'})))
self.assertHTMLEqual(w.render('name', ['john', 'lennon']), '<input type="text" class="big" value="john" name="name_0" /><br /><input type="text" class="small" value="lennon" name="name_1" />')
self.assertHTMLEqual(w.render('name', 'john__lennon'), '<input type="text" class="big" value="john" name="name_0" /><br /><input type="text" class="small" value="lennon" name="name_1" />')
self.assertHTMLEqual(w.render('name', 'john__lennon', attrs={'id':'foo'}), '<input id="foo_0" type="text" class="big" value="john" name="name_0" /><br /><input id="foo_1" type="text" class="small" value="lennon" name="name_1" />')
w = MyMultiWidget(widgets=(TextInput(attrs={'class': 'big'}), TextInput(attrs={'class': 'small'})), attrs={'id': 'bar'})
self.assertHTMLEqual(w.render('name', ['john', 'lennon']), '<input id="bar_0" type="text" class="big" value="john" name="name_0" /><br /><input id="bar_1" type="text" class="small" value="lennon" name="name_1" />')
def test_splitdatetime(self):
w = SplitDateTimeWidget()
self.assertHTMLEqual(w.render('date', ''), '<input type="text" name="date_0" /><input type="text" name="date_1" />')
self.assertHTMLEqual(w.render('date', None), '<input type="text" name="date_0" /><input type="text" name="date_1" />')
self.assertHTMLEqual(w.render('date', datetime.datetime(2006, 1, 10, 7, 30)), '<input type="text" name="date_0" value="2006-01-10" /><input type="text" name="date_1" value="07:30:00" />')
self.assertHTMLEqual(w.render('date', [datetime.date(2006, 1, 10), datetime.time(7, 30)]), '<input type="text" name="date_0" value="2006-01-10" /><input type="text" name="date_1" value="07:30:00" />')
# You can also pass 'attrs' to the constructor. In this case, the attrs will be
w = SplitDateTimeWidget(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('date', datetime.datetime(2006, 1, 10, 7, 30)), '<input type="text" class="pretty" value="2006-01-10" name="date_0" /><input type="text" class="pretty" value="07:30:00" name="date_1" />')
# Use 'date_format' and 'time_format' to change the way a value is displayed.
w = SplitDateTimeWidget(date_format='%d/%m/%Y', time_format='%H:%M')
self.assertHTMLEqual(w.render('date', datetime.datetime(2006, 1, 10, 7, 30)), '<input type="text" name="date_0" value="10/01/2006" /><input type="text" name="date_1" value="07:30" />')
def test_datetimeinput(self):
w = DateTimeInput()
self.assertHTMLEqual(w.render('date', None), '<input type="text" name="date" />')
d = datetime.datetime(2007, 9, 17, 12, 51, 34, 482548)
self.assertEqual(str(d), '2007-09-17 12:51:34.482548')
# The microseconds are trimmed on display, by default.
self.assertHTMLEqual(w.render('date', d), '<input type="text" name="date" value="2007-09-17 12:51:34" />')
self.assertHTMLEqual(w.render('date', datetime.datetime(2007, 9, 17, 12, 51, 34)), '<input type="text" name="date" value="2007-09-17 12:51:34" />')
self.assertHTMLEqual(w.render('date', datetime.datetime(2007, 9, 17, 12, 51)), '<input type="text" name="date" value="2007-09-17 12:51:00" />')
# Use 'format' to change the way a value is displayed.
w = DateTimeInput(format='%d/%m/%Y %H:%M', attrs={'type': 'datetime'})
self.assertHTMLEqual(w.render('date', d), '<input type="datetime" name="date" value="17/09/2007 12:51" />')
def test_dateinput(self):
w = DateInput()
self.assertHTMLEqual(w.render('date', None), '<input type="text" name="date" />')
d = datetime.date(2007, 9, 17)
self.assertEqual(str(d), '2007-09-17')
self.assertHTMLEqual(w.render('date', d), '<input type="text" name="date" value="2007-09-17" />')
self.assertHTMLEqual(w.render('date', datetime.date(2007, 9, 17)), '<input type="text" name="date" value="2007-09-17" />')
# We should be able to initialize from a unicode value.
self.assertHTMLEqual(w.render('date', '2007-09-17'), '<input type="text" name="date" value="2007-09-17" />')
# Use 'format' to change the way a value is displayed.
w = DateInput(format='%d/%m/%Y', attrs={'type': 'date'})
self.assertHTMLEqual(w.render('date', d), '<input type="date" name="date" value="17/09/2007" />')
def test_timeinput(self):
w = TimeInput()
self.assertHTMLEqual(w.render('time', None), '<input type="text" name="time" />')
t = datetime.time(12, 51, 34, 482548)
self.assertEqual(str(t), '12:51:34.482548')
# The microseconds are trimmed on display, by default.
self.assertHTMLEqual(w.render('time', t), '<input type="text" name="time" value="12:51:34" />')
self.assertHTMLEqual(w.render('time', datetime.time(12, 51, 34)), '<input type="text" name="time" value="12:51:34" />')
self.assertHTMLEqual(w.render('time', datetime.time(12, 51)), '<input type="text" name="time" value="12:51:00" />')
# We should be able to initialize from a unicode value.
self.assertHTMLEqual(w.render('time', '13:12:11'), '<input type="text" name="time" value="13:12:11" />')
# Use 'format' to change the way a value is displayed.
w = TimeInput(format='%H:%M', attrs={'type': 'time'})
self.assertHTMLEqual(w.render('time', t), '<input type="time" name="time" value="12:51" />')
def test_splithiddendatetime(self):
from django.forms.widgets import SplitHiddenDateTimeWidget
w = SplitHiddenDateTimeWidget()
self.assertHTMLEqual(w.render('date', ''), '<input type="hidden" name="date_0" /><input type="hidden" name="date_1" />')
d = datetime.datetime(2007, 9, 17, 12, 51, 34, 482548)
self.assertHTMLEqual(str(d), '2007-09-17 12:51:34.482548')
self.assertHTMLEqual(w.render('date', d), '<input type="hidden" name="date_0" value="2007-09-17" /><input type="hidden" name="date_1" value="12:51:34" />')
self.assertHTMLEqual(w.render('date', datetime.datetime(2007, 9, 17, 12, 51, 34)), '<input type="hidden" name="date_0" value="2007-09-17" /><input type="hidden" name="date_1" value="12:51:34" />')
self.assertHTMLEqual(w.render('date', datetime.datetime(2007, 9, 17, 12, 51)), '<input type="hidden" name="date_0" value="2007-09-17" /><input type="hidden" name="date_1" value="12:51:00" />')
class NullBooleanSelectLazyForm(Form):
"""Form to test for lazy evaluation. Refs #17190"""
bool = BooleanField(widget=NullBooleanSelect())
@override_settings(USE_L10N=True)
class FormsI18NWidgetsTestCase(TestCase):
def setUp(self):
super(FormsI18NWidgetsTestCase, self).setUp()
activate('de-at')
def tearDown(self):
deactivate()
super(FormsI18NWidgetsTestCase, self).tearDown()
def test_datetimeinput(self):
w = DateTimeInput()
d = datetime.datetime(2007, 9, 17, 12, 51, 34, 482548)
w.is_localized = True
self.assertHTMLEqual(w.render('date', d), '<input type="text" name="date" value="17.09.2007 12:51:34" />')
def test_dateinput(self):
w = DateInput()
d = datetime.date(2007, 9, 17)
w.is_localized = True
self.assertHTMLEqual(w.render('date', d), '<input type="text" name="date" value="17.09.2007" />')
def test_timeinput(self):
w = TimeInput()
t = datetime.time(12, 51, 34, 482548)
w.is_localized = True
self.assertHTMLEqual(w.render('time', t), '<input type="text" name="time" value="12:51:34" />')
def test_splithiddendatetime(self):
from django.forms.widgets import SplitHiddenDateTimeWidget
w = SplitHiddenDateTimeWidget()
w.is_localized = True
self.assertHTMLEqual(w.render('date', datetime.datetime(2007, 9, 17, 12, 51)), '<input type="hidden" name="date_0" value="17.09.2007" /><input type="hidden" name="date_1" value="12:51:00" />')
def test_nullbooleanselect(self):
"""
Ensure that the NullBooleanSelect widget's options are lazily
localized.
Refs #17190
"""
f = NullBooleanSelectLazyForm()
self.assertHTMLEqual(f.fields['bool'].widget.render('id_bool', True), '<select name="id_bool">\n<option value="1">Unbekannt</option>\n<option value="2" selected="selected">Ja</option>\n<option value="3">Nein</option>\n</select>')
class SelectAndTextWidget(MultiWidget):
"""
MultiWidget subclass
"""
def __init__(self, choices=[]):
widgets = [
RadioSelect(choices=choices),
TextInput
]
super(SelectAndTextWidget, self).__init__(widgets)
def _set_choices(self, choices):
"""
When choices are set for this widget, we want to pass those along to the Select widget
"""
self.widgets[0].choices = choices
def _get_choices(self):
"""
The choices for this widget are the Select widget's choices
"""
return self.widgets[0].choices
choices = property(_get_choices, _set_choices)
class WidgetTests(TestCase):
def test_12048(self):
# See ticket #12048.
w1 = SelectAndTextWidget(choices=[1,2,3])
w2 = copy.deepcopy(w1)
w2.choices = [4,5,6]
# w2 ought to be independent of w1, since MultiWidget ought
# to make a copy of its sub-widgets when it is copied.
self.assertEqual(w1.choices, [1,2,3])
def test_13390(self):
# See ticket #13390
class SplitDateForm(Form):
field = DateTimeField(widget=SplitDateTimeWidget, required=False)
form = SplitDateForm({'field': ''})
self.assertTrue(form.is_valid())
form = SplitDateForm({'field': ['', '']})
self.assertTrue(form.is_valid())
class SplitDateRequiredForm(Form):
field = DateTimeField(widget=SplitDateTimeWidget, required=True)
form = SplitDateRequiredForm({'field': ''})
self.assertFalse(form.is_valid())
form = SplitDateRequiredForm({'field': ['', '']})
self.assertFalse(form.is_valid())
class LiveWidgetTests(AdminSeleniumWebDriverTestCase):
urls = 'regressiontests.forms.urls'
def test_textarea_trailing_newlines(self):
"""
Test that a roundtrip on a ModelForm doesn't alter the TextField value
"""
article = Article.objects.create(content="\nTst\n")
self.selenium.get('%s%s' % (self.live_server_url,
reverse('article_form', args=[article.pk])))
self.selenium.find_element_by_id('submit').submit()
article = Article.objects.get(pk=article.pk)
# Should be "\nTst\n" after #19251 is fixed
self.assertEqual(article.content, "\r\nTst\r\n")
@python_2_unicode_compatible
class FakeFieldFile(object):
"""
Quacks like a FieldFile (has a .url and unicode representation), but
doesn't require us to care about storages etc.
"""
url = 'something'
def __str__(self):
return self.url
class ClearableFileInputTests(TestCase):
def test_clear_input_renders(self):
"""
A ClearableFileInput with is_required False and rendered with
an initial value that is a file renders a clear checkbox.
"""
widget = ClearableFileInput()
widget.is_required = False
self.assertHTMLEqual(widget.render('myfile', FakeFieldFile()),
'Currently: <a href="something">something</a> <input type="checkbox" name="myfile-clear" id="myfile-clear_id" /> <label for="myfile-clear_id">Clear</label><br />Change: <input type="file" name="myfile" />')
def test_html_escaped(self):
"""
A ClearableFileInput should escape name, filename and URL when
rendering HTML. Refs #15182.
"""
@python_2_unicode_compatible
class StrangeFieldFile(object):
url = "something?chapter=1§=2©=3&lang=en"
def __str__(self):
return '''something<div onclick="alert('oops')">.jpg'''
widget = ClearableFileInput()
field = StrangeFieldFile()
output = widget.render('my<div>file', field)
self.assertFalse(field.url in output)
self.assertTrue('href="something?chapter=1&sect=2&copy=3&lang=en"' in output)
self.assertFalse(six.text_type(field) in output)
self.assertTrue('something<div onclick="alert('oops')">.jpg' in output)
self.assertTrue('my<div>file' in output)
self.assertFalse('my<div>file' in output)
def test_clear_input_renders_only_if_not_required(self):
"""
A ClearableFileInput with is_required=False does not render a clear
checkbox.
"""
widget = ClearableFileInput()
widget.is_required = True
self.assertHTMLEqual(widget.render('myfile', FakeFieldFile()),
'Currently: <a href="something">something</a> <br />Change: <input type="file" name="myfile" />')
def test_clear_input_renders_only_if_initial(self):
"""
A ClearableFileInput instantiated with no initial value does not render
a clear checkbox.
"""
widget = ClearableFileInput()
widget.is_required = False
self.assertHTMLEqual(widget.render('myfile', None),
'<input type="file" name="myfile" />')
def test_clear_input_checked_returns_false(self):
"""
ClearableFileInput.value_from_datadict returns False if the clear
checkbox is checked, if not required.
"""
widget = ClearableFileInput()
widget.is_required = False
self.assertEqual(widget.value_from_datadict(
data={'myfile-clear': True},
files={},
name='myfile'), False)
def test_clear_input_checked_returns_false_only_if_not_required(self):
"""
ClearableFileInput.value_from_datadict never returns False if the field
is required.
"""
widget = ClearableFileInput()
widget.is_required = True
f = SimpleUploadedFile('something.txt', b'content')
self.assertEqual(widget.value_from_datadict(
data={'myfile-clear': True},
files={'myfile': f},
name='myfile'), f)
| StarcoderdataPython |
12811439 | <gh_stars>1-10
from .pt_reader import PTReader
from .cls_reader import DocClsReader
from .pt_cls_reader import DocRepClsReader
| StarcoderdataPython |
1882448 | from django.db import models
# Create your models here.
class Servicio(models.Model):
titulo=models.CharField(max_length=50)
contenido=models.CharField(max_length=50)
imagen=models.ImageField(upload_to='servicios')
created=models.DateTimeField(auto_now_add=True)
updated=models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name='servicio'
verbose_name_plural='servicios'
def __str__(self):
return self.titulo | StarcoderdataPython |
6646503 | from django.apps import AppConfig
class ModulosArtigosConfig(AppConfig):
name = 'icsmp_project.modulos_artigos'
| StarcoderdataPython |
168708 | <filename>2020/day12/solution.py
import re
import sys
def main():
with open(sys.argv[1]) as f:
instructions = [l.strip() for l in f]
part1(instructions)
part2(instructions)
def part1(instructions):
x = 0
y = 0
direction = (1, 0)
for instruction in instructions:
operation = instruction[0]
magnitude = int(instruction[1:])
if operation == 'N':
y += magnitude
elif operation == 'E':
x += magnitude
elif operation == 'S':
y -= magnitude
elif operation == 'W':
x -= magnitude
elif operation == 'F':
x += direction[0] * magnitude
y += direction[1] * magnitude
elif operation == 'R':
direction = rotate(direction, 360 - magnitude)
elif operation == 'L':
direction = rotate(direction, magnitude)
print(abs(x) + abs(y))
def part2(instructions):
x = 0
y = 0
wx = 10
wy = 1
for instruction in instructions:
operation = instruction[0]
magnitude = int(instruction[1:])
if operation == 'N':
wy += magnitude
elif operation == 'E':
wx += magnitude
elif operation == 'S':
wy -= magnitude
elif operation == 'W':
wx -= magnitude
elif operation == 'F':
x += wx * magnitude
y += wy * magnitude
elif operation == 'R':
wx, wy = rotate((wx, wy), 360 - magnitude)
elif operation == 'L':
wx, wy = rotate((wx, wy), magnitude)
print(abs(x) + abs(y))
def rotate(coord, angle, origin=(0, 0)):
x, y = coord
ox, oy = origin
dx = x - ox
dy = y - oy
if angle == 90:
return ox - dy, oy + dx
elif angle == 180:
return ox - dx, oy - dy
elif angle == 270:
return ox + dy, oy - dx
if __name__ == '__main__':
main()
| StarcoderdataPython |
163988 | <filename>diffrend/numpy/camera.py
import numpy as np
from diffrend.numpy.vector import Vector
from diffrend.numpy.quaternion import Quaternion
import diffrend.numpy.ops as ops
class Camera(object):
def __init__(self, pos, at, up, viewport):
#assert isinstance(orientation, Quaternion)
self.pos = np.array(pos, dtype=np.float32)
if self.pos.size == 3:
self.pos = np.append(self.pos, 1.0)
#self.orientation = orientation
self.at = np.array(at, dtype=np.float32)
if self.at.size == 3:
self.at = np.append(self.eye, 1.0)
self.up = ops.normalize(np.array(up))
self.viewport = viewport
self.view_matrix = ops.lookat(eye=self.pos, at=self.at, up=self.up)
def __str__(self):
#return 'Camera: pos {}, orientation: {}'.format(self.pos, self.orientation)
return 'Camera: pos {}, at: {}, up: {}'.format(self.pos, self.at, self.up)
@property
def eye(self):
return self.pos
@property
def aspect_ratio(self):
return self.viewport[2] / self.viewport[3]
@property
def M(self):
return self.view_matrix
def rotate(self, axis, angle):
self.orientation = self.orientation.rotate(angle_rad=angle, axis=axis)
def translate(self, translation):
self.pos[:3] += translation[:3]
def lookat(self, eye, at, up):
"""Same as the global lookat but changes the state of the current camera
:param eye:
:param at:
:param up:
:return:
"""
if type(eye) is list:
eye = np.array(eye, dtype=np.float32)
if eye.size == 3:
eye = np.append(eye, 1.0)
self.pos = eye
self.view_matrix = ops.lookat(self.pos, at, up)
def generate_rays(self):
pass
class PinholeCamera(Camera):
def __init__(self, pos, at, up, fovy, focal_length, viewport):
"""
:param pos:
:param at:
:param up:
:param fovy: Vertical field of view in radians
:param focal_length:
:param viewport:
"""
super(PinholeCamera, self).__init__(pos, at, up, viewport)
self.fovy = float(fovy)
self.focal_length = float(focal_length)
self.viewport = viewport
height = 2 * self.focal_length * np.tan(self.fovy / 2.)
aspect_ratio = float(viewport[2]) / viewport[3]
width = height * aspect_ratio
self.proj_matrix = np.array([[self.focal_length, 0, 0, 0],
[0, self.focal_length, 0, 0],
[0, 0, self.focal_length, 0],
[0, 0, 1, 0]])
@property
def M(self):
return np.dot(self.proj_matrix, self.model_view)
@property
def viewport_matrix(self):
return None
@property
def projection(self):
return self.proj_matrix
@property
def model_view(self):
w = 1
if self.pos.size == 4:
w = self.pos[3]
translation_matrix = np.array([[1, 0, 0, -self.pos[0] / w],
[0, 1, 0, -self.pos[1] / w],
[0, 0, 1, -self.pos[2] / w],
[0, 0, 0, 1]])
return np.dot(translation_matrix, self.orientation.R, )
class TrackBallCamera(PinholeCamera):
def __init__(self, pos, up, fovy, focal_length, viewport):
super(TrackBallCamera, self).__init__(pos, np.array([0., 0., 0., 1.]), up, fovy, focal_length, viewport)
self.model_matrix = np.eye(4)
def screen_to_sphere(self, coords):
w, h = self.viewport[2] - self.viewport[0], self.viewport[3] - self.viewport[1]
cx, cy = w/2, h/2
radius = min(cx, cy)
#print(cx, cy, radius)
x = (coords[0] - cx) / radius
y = -(coords[1] - cy) / radius
r_sqr = x ** 2 + y ** 2
if r_sqr > 1:
s = 1 / np.sqrt(r_sqr)
x *= s
y *= s
z = 0
else:
z = np.sqrt(1 - r_sqr)
return np.array([x, y, z])
def mouse_press(self, coords):
self.src = self.screen_to_sphere(coords)
def mouse_move(self, coords):
self.dst = self.screen_to_sphere(coords)
#print('src', self.src, 'dst:', self.dst)
# compute object rotation
axis = ops.normalize(np.cross(self.src, self.dst))
theta = np.arccos(np.dot(self.src, self.dst))
#print('axis of rotation ', axis)
#print('rotation amount ', theta)
#self.rotate(axis=axis, angle=theta)
# cam_pos = ops.rotate_axis_angle(axis=axis, angle=theta, vec=self.pos)
# cam_up = ops.rotate_axis_angle(axis=axis, angle=theta, vec=self.up)
# print('cam_pos ', cam_pos)
# print('cam_up ', cam_up)
self.model_matrix = np.matmul(self.model_matrix, ops.axis_angle_matrix(axis=axis, angle=theta))
#self.pos = cam_pos
#self.up = cam_up
self.src = self.dst
#self.view_matrix = self.orientation.R
def zoom(self, amount):
delta = ops.normalize(self.pos) * amount
self.translate(delta)
class VirtualSphereCamera(PinholeCamera):
def __init__(self, pos, up, fovy, focal_length, viewport):
from diffrend.numpy.geometry import Sphere
super(VirtualSphereCamera, self).__init__(pos, np.array([0., 0., 0., 1.]), up, fovy, focal_length, viewport)
self.cam_dir = ops.normalize(self.pos[:3])
self.radius, self.phi, self.theta = ops.cart2sph(self.cam_dir[0], self.cam_dir[1], self.cam_dir[2])
self.sphere = Sphere(center=np.array([0., 0., 0.]), radius=self.radius)
def mouse_press(self, coords):
self.src = ops.normalize([coords[0], coords[1], self.pos[2] - self.focal_length])
# pick point on sphere
# ...
def mouse_move(self, coords):
self.dst = ops.normalize([coords[0], coords[1], self.pos[2] - self.focal_length])
delta_step = self.dst - self.src
if ops.norm(delta_step) == 0:
return
print('src', self.src, 'dst:', self.dst)
# compute object rotation
axis = ops.normalize(np.cross(self.src, self.dst))
theta = np.arccos(np.dot(self.src, self.dst))
print(axis, theta)
self.cam_dir = ops.rotate_axis_angle(axis=axis, angle=theta, vec=self.cam_dir)
print('cam_dir', self.cam_dir)
self.phi += delta_step[0] * 0.1
self.theta += delta_step[1] * 0.1
print('delta ', delta_step)
cam_pos = ops.sph2cart(radius=self.radius, phi=self.phi, theta=self.theta)
print('cam_pos', cam_pos)
#self.pos = cam_pos
# self.rotate(axis=axis, angle=theta)
#
self.src = self.dst
# self.view_matrix = self.orientation.R
def zoom(self, amount):
delta = ops.normalize(self.pos) * amount
self.translate(delta)
if __name__ == '__main__':
cam = VirtualSphereCamera([0., 0., 1., 1.], up=[0, 1, 0], fovy=45, focal_length=1.,
viewport=[0, 0, 640, 480])
cam.mouse_press([0, 0])
cam.mouse_move([1, 0])
print(cam)
cam = TrackBallCamera([0.0, 0.0, 1.0, 1.0], up=[0, 1, 0], fovy=45, focal_length=0.01,
viewport=[0, 0, 640, 480])
cam.mouse_press([640/2, 480/2])
cam.mouse_move([640/2 + 2, 480/2])
| StarcoderdataPython |
97936 | <reponame>CN-UPB/FutureCoord<filename>utils/graph_generator.py
import random
import argparse
import numpy as np
import networkx as nx
from geopy.distance import geodesic
def graphml_reader(seed, compute, bandwidth, inputfile, outputfile):
'''Creates a gpickle graph from a graphml file. The node capacity (compute)
and the link capacity (bandwidth) are created randomly within the given bounds,
while the latency is calculated by the distance of the nodes'''
SPEED_OF_LIGHT = 299792458 # meter per second
PROPAGATION_FACTOR = 0.77 # https://en.wikipedia.org/wiki/Propagation_delay
random.seed(seed)
# setting ranged for random values of the nodes
file = inputfile
if not file.endswith(".graphml"):
raise ValueError("{} is not a GraphML file".format(file))
network = nx.read_graphml(file, node_type=int)
newnetwork = nx.Graph()
mapping = dict()
num = 0
for (node, data) in network.nodes(data=True):
# some nodes are not actually nodes in a sense that a position ect. is given.
if data['Internal'] == 1:
mapping[node] = num
newnetwork.add_node(num, compute=random.uniform(*compute))
num += 1
for e in network.edges():
n1 = network.nodes(data=True)[e[0]]
n2 = network.nodes(data=True)[e[1]]
if n1['Internal'] == 0 or n2['Internal'] == 0:
continue
n1_lat, n1_long = n1.get("Latitude"), n1.get("Longitude")
n2_lat, n2_long = n2.get("Latitude"), n2.get("Longitude")
distance = geodesic((n1_lat, n1_long),
(n2_lat, n2_long)).meters # in meters
delay = (distance / SPEED_OF_LIGHT * 1000) * \
PROPAGATION_FACTOR # in milliseconds
# This is not normalized
newnetwork.add_edge(mapping[e[0]], mapping[e[1]], latency=float(
delay), bandwidth=random.uniform(*bandwidth))
nx.write_gpickle(newnetwork, outputfile)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='''Creates a gpickle graph from a graphml or gml file.
The node capacity (compute) and the link capacity (bandwidth)
are created randomly, while the latency
is calculated by the distance of the nodes''')
parser.add_argument('--seed', type=int, nargs='?',
default=0)
parser.add_argument('--inputfile', type=str, nargs='?',
const=1)
parser.add_argument('--outputfile', type=str, nargs='?',
const=1, default=r'./data/network.gpickle')
args = parser.parse_args()
# bounds for the resources - should be normalized between 0 and 1
compute = (0.0, 1.0)
bandwidth = (0.0, 1.0)
if args.inputfile.endswith(".graphml"):
graphml_reader(args.seed, compute, bandwidth,
args.inputfile, args.outputfile)
else:
raise ValueError("Input not supported. It should be a graphml file")
| StarcoderdataPython |
6404694 | __author__ = 'zhanghe'
class shortTextFilter:
def __init__(self,min_length=10):
self.min_length = min_length
def filterFile(self,src_filename,dst_filename):
f_dst = open(dst_filename,'a+')
with open(src_filename,'r') as src:
for line in src:
count = len(line.split(" "))
if count>=self.min_length:
f_dst.write(line)
#test short text filter
ftr = shortTextFilter()
ftr.filterFile("../data/src.txt","../data/dst.txt")
| StarcoderdataPython |
6612563 | from skimage import io
import os
import numpy as np
DEBUG = 0
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
if DEBUG:
print(BASE_DIR)
PICS_DIR = os.path.join(BASE_DIR,"..\\pics\\test_match")
if DEBUG:
print(PICS_DIR)
GREY = [247, 247, 247]
GREEN = [148, 211, 77]
WHITE = [255, 255, 255]
vertex_top = 1233
vertex_left = 174
box_width_all = 735
box_height_all = 112
start_top = 1257
start_left = 352
box_width = int(735 / 2)
box_height = int(112 * 2/3)
interval_height = int((1738 - 1233) / 3)
question_pos = [1054, 1215, 59, 1000]
def crop_answer(whole_img):
answer_1 = whole_img[start_top+interval_height*0:start_top+box_height+interval_height*0, start_left:start_left+box_width, 0:3]
answer_2 = whole_img[start_top+interval_height*1:start_top+box_height+interval_height*1, start_left:start_left+box_width, 0:3]
answer_3 = whole_img[start_top+interval_height*2:start_top+box_height+interval_height*2, start_left:start_left+box_width, 0:3]
answer_4 = whole_img[start_top+interval_height*3:start_top+box_height+interval_height*3, start_left:start_left+box_width, 0:3]
return answer_1, answer_2, answer_3, answer_4
def cal_num_scalar(image, color):
num =0
for loop in range(image.shape[0]):
for loop2 in range(image.shape[1]):
if image[loop][loop2][0] == color[0] :# and image[loop][loop2][1] == color[1] and image[loop][loop2][2] == color[2]:
continue
else:
#print(image[loop][loop2][0:3])
num = num+1
return num
def cal_num(image, color):
num = 0
image_useful = image[:, :, 0] != color[0]
num = np.sum(np.sum(image_useful))
return int(num)
def cal_num_cat(image, color):
if 0:
height_split = int(image.shape[0]/3)
num = ""
for i in range(3):
image_useful = image[height_split * i:height_split * (i+1), :, 0] != color[0]
num1 = np.sum(np.sum(image_useful))
num += str(num1)
return int(np.int(num))
else:
width_split = int(image.shape[1]/2)
data_str = ""
for i in range(2):
image_useful = image[:, width_split * i:width_split * (i+1), 0] != color[0]
num = np.sum(np.sum(image_useful))
num_str = str(num)
if num_str.__len__() == 1:
num_str = "0000" + num_str
elif num_str.__len__() == 2:
num_str = "000" + num_str
elif num_str.__len__() == 3:
num_str = "00" + num_str
elif num_str.__len__() == 4:
num_str = "0" + num_str
elif num_str.__len__() == 5:
pass
else:
assert False, "num_str length error. length: %d" % num_str.__len__()
data_str += num_str
return data_str
def cal_num1(image, color):
num =0
for loop in range(image.shape[0]):
for loop2 in range(image.shape[1]):
if sum(image[loop][loop2][0:3] == color) == 3:
continue
else:
#print(image[loop][loop2][0:3])
num = num+1
return num
def selection(correct_loss, loss1, loss2, loss3, loss4):
a = np.array([loss1, loss2, loss3, loss4])
a = np.abs(a-correct_loss)
sort_id = np.argmin(a)
#print("selection: ",a, sort_id)
return sort_id
def selection_str(correct_loss, loss1, loss2, loss3, loss4):
def split_str(loss):
loss_1 = loss[0:5]
loss_2 = loss[5:10]
out = np.zeros(shape=(1, 2))
out[0, 0] = int(loss_1)
out[0, 1] = int(loss_2)
return out
a = np.concatenate([split_str(loss1), split_str(loss2), split_str(loss3), split_str(loss4)], axis=0)
a = np.abs(a-split_str(correct_loss))
b = np.max(a, axis=1)
sort_id = np.argmin(b)
# print("selection: ",b, sort_id)
return sort_id
def selection_str_rValue(correct_loss, loss1, loss2, loss3, loss4):
def split_str(loss):
loss_1 = loss[0:5]
loss_2 = loss[5:10]
out = np.zeros(shape=(1, 2))
try:
out[0, 0] = int(loss_1)
out[0, 1] = int(loss_2)
except ValueError:
print(loss)
assert False, "ValueError"
return out
a = np.concatenate([split_str(loss1), split_str(loss2), split_str(loss3), split_str(loss4)], axis=0)
a = np.abs(a-split_str(correct_loss))
b = np.max(a, axis=1)
sort_id = np.argmin(b)
# print("selection: ",b, sort_id)
return [sort_id, b[sort_id]]
if __name__ == "__main__":
#img_label_green_2 = io.imread(os.path.join(PICS_DIR,"answer_1.png"))
#img_question = io.imread(os.path.join(PICS_DIR,"question_0.png"))
#img_question_2 = io.imread(os.path.join(PICS_DIR,"question_1.png"))
#img_whole_green = io.imread(os.path.join(PICS_DIR,"autojump_1.png"))
##raw grey image
img_whole_grey = io.imread(os.path.join(PICS_DIR,"autojump_0.png"))
##crop question and answer,and get descriptor
question = img_whole_grey[question_pos[0]:question_pos[1], question_pos[2]:question_pos[3],0:3]
correct_question = cal_num(question, WHITE)
## another raw image
img_whole_grey = io.imread(os.path.join(PICS_DIR,"autojump_1.png"))
##crop question and answer,and get descriptor
question_new = img_whole_grey[question_pos[0]:question_pos[1], question_pos[2]:question_pos[3],0:3]
correct_question_new = cal_num(question, WHITE)
#########
io.imshow(question-question_new)
answer_1, answer_2, answer_3, answer_4 = crop_answer(img_whole_grey)
loss1 = cal_num(answer_1, GREY)
loss2 = cal_num(answer_2, GREY)
loss3 = cal_num(answer_3, GREY)
loss4 = cal_num(answer_4, GREY)
##calculate library's key value(questions')
img_question = io.imread(os.path.join(PICS_DIR,"question_0.png"))
loss_ques = cal_num(img_question, WHITE)
correct_answer = io.imread(os.path.join(PICS_DIR,"answer_0.png"))
correct_loss = cal_num(correct_answer, GREEN)
id = selection(correct_loss, loss1, loss2, loss3, loss4)
print(id)
#i=3
#img_label_grey_first = img_whole_grey[start_top+interval_height*i:start_top+box_height+interval_height*i, start_left:start_left+box_width, 0:3]
#img_label_grey_second = img_whole_green[start_top+interval_height*i:start_top+box_height+interval_height*i, start_left:start_left+box_width, 0:3]
#io.imshow(-img_label_grey_second+img_label_grey_first)
#io.imshow(img_label_grey_second-img_label_grey_first)
#label_num_pixel = cal_num(img_label_green, GREEN)
#print("LABEL_NUM_PIXEL: ", label_num_pixel)
#
#
#label_num_pixel_2 = cal_num(img_label_green_2, GREEN)
#print("LABEL_NUM_PIXEL_2: ", label_num_pixel_2)
#
#label_num_pixel_3 = cal_num(img_label_green_3, GREEN)
#print("LABEL_NUM_PIXEL_3: ", label_num_pixel_3)
#
#Q_num_pixel = cal_num(img_question, WHITE)
#print("Q_NUM_PIXEL: ", Q_num_pixel)
#
#label_num_pixel_grey = cal_num(img_label_grey, GREY)
#print("LABEL_NUM_PIXEL_GREY: ", label_num_pixel_grey)
#
#label_num_pixel_grey_first = cal_num(img_label_grey_first, GREY)
#print("LABEL_NUM_PIXEL_GREY_F: ", label_num_pixel_grey_first)
#
#label_num_pixel_grey_second = cal_num(img_label_grey_second, GREEN)
#print("LABEL_NUM_PIXEL_GREY_S: ", label_num_pixel_grey_second)
| StarcoderdataPython |
5003905 | <filename>ex022-Analisando um Texto.py
'''
EXERCÍCIO 022: Analisador de Textos
Crie um programa que leia o nome completo de uma pessoa e mostre:
> O nome com todas as letras maiúsculas e minúsculas.
> Quantas letras ao todo (sem considerar espaços).
> Quantas letras tem o primeiro nome.
'''
def lin():
print('-'*80)
def inico():
lin()
nome = str(input('Digite seu nome completo: ')).strip()
lin()
print(f'Olá {nome.title()}.\nAnalisando seu nome temos:\n')
lin()
print(f'Seu nome em maiúsculas é {nome.upper()}.')
lin()
print(f'Seu nome em minúsculas é {nome.lower()}.')
lin()
print(f'Seu nome tem ao todo {len( nome.replace(" ",""))} letras.')
lin()
print(f'Seu primeiro nome é {nome.split()[0].title()} e ele tem {len(nome.split()[0])} letras')
lin()
if __name__=="__main__":
continuar = True
while continuar:
inico()
lin()
resposta = str(input('Deseja continuar? [S/N]')).upper()
lin()
if resposta == 'N':
lin()
print('Muito obrigado, volte sempre.')
lin()
continuar = False
| StarcoderdataPython |
4907209 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .check_name_availability_request_parameters import CheckNameAvailabilityRequestParameters
from .check_name_availability_response import CheckNameAvailabilityResponse
from .sku import Sku
from .check_availability_parameters import CheckAvailabilityParameters
from .check_availability_result import CheckAvailabilityResult
from .namespace_create_or_update_parameters import NamespaceCreateOrUpdateParameters
from .namespace_patch_parameters import NamespacePatchParameters
from .namespace_resource import NamespaceResource
from .shared_access_authorization_rule_properties import SharedAccessAuthorizationRuleProperties
from .shared_access_authorization_rule_create_or_update_parameters import SharedAccessAuthorizationRuleCreateOrUpdateParameters
from .shared_access_authorization_rule_resource import SharedAccessAuthorizationRuleResource
from .resource_list_keys import ResourceListKeys
from .policykey_resource import PolicykeyResource
from .apns_credential import ApnsCredential
from .wns_credential import WnsCredential
from .gcm_credential import GcmCredential
from .mpns_credential import MpnsCredential
from .adm_credential import AdmCredential
from .baidu_credential import BaiduCredential
from .notification_hub_create_or_update_parameters import NotificationHubCreateOrUpdateParameters
from .notification_hub_resource import NotificationHubResource
from .pns_credentials_resource import PnsCredentialsResource
from .resource import Resource
from .sub_resource import SubResource
from .namespace_resource_paged import NamespaceResourcePaged
from .shared_access_authorization_rule_resource_paged import SharedAccessAuthorizationRuleResourcePaged
from .notification_hub_resource_paged import NotificationHubResourcePaged
from .notification_hubs_management_client_enums import (
SkuName,
NamespaceType,
AccessRights,
)
__all__ = [
'CheckNameAvailabilityRequestParameters',
'CheckNameAvailabilityResponse',
'Sku',
'CheckAvailabilityParameters',
'CheckAvailabilityResult',
'NamespaceCreateOrUpdateParameters',
'NamespacePatchParameters',
'NamespaceResource',
'SharedAccessAuthorizationRuleProperties',
'SharedAccessAuthorizationRuleCreateOrUpdateParameters',
'SharedAccessAuthorizationRuleResource',
'ResourceListKeys',
'PolicykeyResource',
'ApnsCredential',
'WnsCredential',
'GcmCredential',
'MpnsCredential',
'AdmCredential',
'BaiduCredential',
'NotificationHubCreateOrUpdateParameters',
'NotificationHubResource',
'PnsCredentialsResource',
'Resource',
'SubResource',
'NamespaceResourcePaged',
'SharedAccessAuthorizationRuleResourcePaged',
'NotificationHubResourcePaged',
'SkuName',
'NamespaceType',
'AccessRights',
]
| StarcoderdataPython |
9796127 | <reponame>scuml/django_bench_runner
import os
import sys
from setuptools import setup, find_packages
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
if 'publish' in sys.argv:
if 'test' in sys.argv:
os.system('python setup.py sdist bdist_wheel upload -rtest')
else:
os.system('python setup.py sdist bdist_wheel upload')
sys.exit()
import pypandoc
description = pypandoc.convert('README.md', 'rst')
with open('LICENSE') as f:
license = f.read()
setup(
name='django_bench_runner',
version='0.1.1',
description='Locate slow tests in your django project.',
url='http://github.com/scuml/django_bench_runner',
packages=find_packages(where="src"),
license=license,
long_description=description,
author='<NAME>',
author_email='<EMAIL>',
package_dir={"": "src"},
package_data={'': ['LICENSE']},
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'Topic :: Internet :: WWW/HTTP',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
),
)
| StarcoderdataPython |
6471707 | <filename>myaccounts/encoders.py
# encoding: utf-8
from django.forms.models import model_to_dict
from django.db.models import Model
from django.db.models.fields.files import FieldFile
from datetime import datetime
from enum import Enum
from decimal import Decimal
import json
from rest_framework.utils.encoders import JSONEncoder
class BaseObjectEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, Model):
return model_to_dict(obj)
ex = getattr(obj, '_excludes', {})
customs = getattr(obj, '_customs', {})
if ex or customs:
vals = customs.copy()
vals.update(getattr(obj, '__dict__', {}))
return dict([(k, v) for k, v in vals.items()
if k not in ex and not k.startswith('_') and v])
return super(BaseObjectEncoder, self).default(obj)
@classmethod
def to_json(cls, obj, *args, **kwargs):
return json.dumps(obj, cls=cls, *args, **kwargs)
@classmethod
def from_json(cls, jsonstr, *args, **kwargs):
return json.loads(jsonstr, *args, **kwargs)
to_json = BaseObjectEncoder.to_json
| StarcoderdataPython |
6572644 | #
# IMPORTS
#
import bpy
import imp
import mathutils
import utilities
imp.reload( utilities )
from math import radians
############################################################
#
# GLOBALS
#
############################################################
############################################################
#
# EXPORT CAMERAS
#
############################################################
def ExportTrigger( object, doc, rootElement ):
triggerElement = doc.createElement( "trigger" )
items = object.items();
for item in items:
if item[0] != "_RNA_UI":
triggerElement.setAttribute( item[0], item[1] );
rootElement.appendChild( triggerElement ) | StarcoderdataPython |
3526693 | <gh_stars>1-10
"""
Simple utility to render an .svg to a .png
"""
import os
import argparse
import pydiffvg
import torch as th
def render(canvas_width, canvas_height, shapes, shape_groups):
_render = pydiffvg.RenderFunction.apply
scene_args = pydiffvg.RenderFunction.serialize_scene(
canvas_width, canvas_height, shapes, shape_groups)
img = _render(canvas_width, # width
canvas_height, # height
2, # num_samples_x
2, # num_samples_y
0, # seed
None,
*scene_args)
return img
def main(args):
pydiffvg.set_device(th.device('cuda:1'))
# Load SVG
svg_path = os.path.join(args.svg_path)
save_svg_path = svg_path.replace('.svg', '_resave.svg')
canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(svg_path)
print("canvas_width", canvas_width)
print("canvas_height", canvas_height)
print("shapes", shapes)
for shape in shapes:
print("num_control_points", shape.num_control_points.size(), shape.num_control_points)
print("points", shape.points.size(), shape.points)
print("is_closed", shape.is_closed)
print("stroke_width", shape.stroke_width.size(), shape.stroke_width)
print("id", shape.id)
print("use_distance_approx", shape.use_distance_approx)
print("shape_groups", shape_groups)
pydiffvg.save_svg_paths_only(save_svg_path, canvas_width, canvas_height, shapes, shape_groups)
# Save initial state
ref = render(canvas_width, canvas_height, shapes, shape_groups)
# pydiffvg.imwrite(ref.cpu(), args.out_path, gamma=2.2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("svg_path", help="source SVG path")
parser.add_argument("out_path", help="output image path")
args = parser.parse_args()
main(args)
| StarcoderdataPython |
5187721 | """
Created on Oct 2, 2012
@author: <NAME>
Adapted from cos.py from Nghia & Georgiana
"""
import numpy as np
from composes.similarity.similarity import Similarity
from scipy.spatial.distance import jaccard
class JaccardSimilarity(Similarity):
"""
Computes the jaccard similarity of two vectors.
"""
def _sim(self, v1, v2):
if v1.norm() == 0 or v2.norm() == 0:
return 0.0
return jaccard(v1, v2)
def _sims_to_matrix(self, vector, matrix_):
raise NotImplementedError("whatevs")
| StarcoderdataPython |
9763090 | # -*- coding:utf8 -*-
# File : query_pipe.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 3/19/17
#
# This file is part of TensorArtist.
from . import configs, utils
from ...core import get_logger
from ...core.utils.callback import CallbackManager
from ...core.utils.meta import notnone_property
import zmq
import threading
import queue
import contextlib
import collections
import pickle
import functools
# import msgpack
# import msgpack_numpy
# msgpack_numpy.patch()
# dumpb = functools.partial(msgpack.dumps, use_bin_type=True)
# loadb = msgpack.loads
import pickle
dumpb = pickle.dumps
loadb = pickle.loads
logger = get_logger(__file__)
__all__ = ['QueryMessage', 'QueryRepPipe', 'QueryReqPipe']
QueryMessage = collections.namedtuple('QueryMessage', ['identifier', 'payload'])
class QueryRepPipe(object):
def __init__(self, name, send_qsize=0, mode='ipc'):
self._name = name
self._conn_info = None
self._context_lock = threading.Lock()
self._context = zmq.Context()
self._tosock = self._context.socket(zmq.ROUTER)
self._frsock = self._context.socket(zmq.PULL)
self._tosock.set_hwm(10)
self._frsock.set_hwm(10)
self._dispatcher = CallbackManager()
self._send_queue = queue.Queue(maxsize=send_qsize)
self._rcv_thread = None
self._snd_thread = None
self._mode = mode
assert mode in ('ipc', 'tcp')
@property
def dispatcher(self):
return self._dispatcher
@notnone_property
def conn_info(self):
return self._conn_info
def initialize(self):
self._conn_info = []
if self._mode == 'tcp':
port = self._frsock.bind_to_random_port('tcp://*')
self._conn_info.append('tcp://{}:{}'.format(utils.get_addr(), port))
port = self._tosock.bind_to_random_port('tcp://*')
self._conn_info.append('tcp://{}:{}'.format(utils.get_addr(), port))
elif self._mode == 'ipc':
self._conn_info.append(utils.bind_to_random_ipc(self._frsock, self._name + '-c2s-'))
self._conn_info.append(utils.bind_to_random_ipc(self._tosock, self._name + '-s2c-'))
self._rcv_thread = threading.Thread(target=self.mainloop_recv, daemon=True)
self._rcv_thread.start()
self._snd_thread = threading.Thread(target=self.mainloop_send, daemon=True)
self._snd_thread.start()
def finalize(self):
utils.graceful_close(self._tosock)
utils.graceful_close(self._frsock)
self._context.term()
@contextlib.contextmanager
def activate(self):
self.initialize()
try:
yield
finally:
self.finalize()
def mainloop_recv(self):
try:
while True:
if self._frsock.closed:
break
msg = loadb(self._frsock.recv(copy=False).bytes)
identifier, type, payload = msg
self._dispatcher.dispatch(type, self, identifier, payload)
except zmq.ContextTerminated:
pass
except zmq.ZMQError as e:
if self._tosock.closed:
logger.warn('Recv socket closed unexpectedly.')
else:
raise e
def mainloop_send(self):
try:
while True:
if self._tosock.closed:
break
job = self._send_queue.get()
self._tosock.send_multipart([job.identifier, dumpb(job.payload)], copy=False)
except zmq.ContextTerminated:
pass
except zmq.ZMQError as e:
if self._tosock.closed:
logger.warn('Send socket closed unexpectedly.')
else:
raise e
def send(self, identifier, msg):
self._send_queue.put(QueryMessage(identifier, msg))
class QueryReqPipe(object):
def __init__(self, name, conn_info):
self._name = name
self._conn_info = conn_info
self._context = None
self._tosock = None
self._frsock = None
@property
def identity(self):
return self._name.encode('utf-8')
def initialize(self):
self._context = zmq.Context()
self._tosock = self._context.socket(zmq.PUSH)
self._frsock = self._context.socket(zmq.DEALER)
self._tosock.setsockopt(zmq.IDENTITY, self.identity)
self._frsock.setsockopt(zmq.IDENTITY, self.identity)
self._tosock.set_hwm(2)
self._tosock.connect(self._conn_info[0])
self._frsock.connect(self._conn_info[1])
def finalize(self):
utils.graceful_close(self._frsock)
utils.graceful_close(self._tosock)
self._context.term()
@contextlib.contextmanager
def activate(self):
self.initialize()
try:
yield
finally:
self.finalize()
def query(self, type, inp, do_recv=True):
self._tosock.send(dumpb((self.identity, type, inp)), copy=False)
if do_recv:
out = loadb(self._frsock.recv(copy=False).bytes)
return out
| StarcoderdataPython |
9788934 | <gh_stars>0
from django.db import models
from leasing.models.mixins import TimeStampedModel
from ..utils import generate_unique_identifier
class Form(TimeStampedModel):
name = models.CharField(max_length=255)
description = models.TextField(blank=True)
is_template = models.BooleanField(default=False)
title = models.CharField(max_length=255, blank=True)
class Section(models.Model):
title = models.CharField(max_length=255)
identifier = models.SlugField()
visible = models.BooleanField(default=True)
sort_order = models.PositiveIntegerField(default=0)
add_new_allowed = models.BooleanField(default=False)
add_new_text = models.CharField(max_length=255)
parent = models.ForeignKey(
"self",
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="subsections",
)
form = models.ForeignKey(Form, on_delete=models.CASCADE, related_name="sections")
class Meta:
ordering = ["sort_order"]
unique_together = (
"form",
"identifier",
)
def save(self, *args, **kwargs):
if not self.identifier:
max_length = self._meta.get_field("identifier").max_length
self.identifier = generate_unique_identifier(
Section, self.title, max_length
)
super(Section, self).save(*args, **kwargs)
class FieldType(models.Model):
name = models.CharField(max_length=255)
identifier = models.SlugField(unique=True)
def save(self, *args, **kwargs):
if not self.identifier:
max_length = self._meta.get_field("identifier").max_length
self.identifier = generate_unique_identifier(
FieldType, self.name, max_length
)
super(FieldType, self).save(*args, **kwargs)
class Field(models.Model):
label = models.CharField(max_length=255)
hint_text = models.CharField(max_length=255)
identifier = models.SlugField()
enabled = models.BooleanField(default=True)
required = models.BooleanField(default=False)
validation = models.CharField(max_length=255)
action = models.CharField(max_length=255)
sort_order = models.PositiveIntegerField(default=0)
type = models.ForeignKey(FieldType, on_delete=models.PROTECT)
section = models.ForeignKey(Section, on_delete=models.CASCADE)
class Meta:
ordering = ["sort_order"]
unique_together = (
"section",
"identifier",
)
def save(self, *args, **kwargs):
if not self.identifier:
max_length = self._meta.get_field("identifier").max_length
self.identifier = generate_unique_identifier(Field, self.label, max_length)
super(Field, self).save(*args, **kwargs)
class Choice(models.Model):
text = models.CharField(max_length=255)
value = models.CharField(max_length=50)
action = models.CharField(max_length=255)
has_text_input = models.BooleanField(default=False)
field = models.ForeignKey(Field, on_delete=models.CASCADE)
| StarcoderdataPython |
11354979 | from aiohttp import web
def redirect(request, router_name, **kwargs):
url = request.app.router[router_name].url_for(**kwargs)
return web.HTTPFound(url)
| StarcoderdataPython |
1767559 | <reponame>att/Snappy-Frontend
#!/usr/bin/python
from contextlib import closing
from datetime import datetime
import json
import MySQLdb
import sys
import os
DB_USER = os.environ["SNAPPY_USER"]
DB_PASS = os.environ["SNAPPY_PW"]
DB_NAME = os.environ["SNAPPY_DB"]
DB_IP = os.environ["SNAPPY_HOST"]
DB_PORT = int(os.environ["SNAPPY_PORT"])
def get_tables(cursor):
cursor.execute('SHOW tables')
return [r[0] for r in cursor.fetchall()]
def get_rows_as_dicts(cursor, table):
cursor.execute('select id,sub,next,parent,grp,root,state,done,result,feid,policy,arg0,arg1 from {}'.format(table))
columns = [d[0] for d in cursor.description]
return [dict(zip(columns, row)) for row in cursor.fetchall()]
def dump_date(thing):
if isinstance(thing, datetime):
return thing.isoformat()
return str(thing)
if len(sys.argv) <> 2:
print "Usage: ./getsinglesummaryjson.py <job_id>"
sys.exit()
with closing(MySQLdb.connect(user=DB_USER, passwd=<PASSWORD>, db=DB_NAME, host=DB_IP, port=DB_PORT)) as conn, closing(conn.cursor()) as cursor:
jobid = sys.argv[1]
dump = {}
for table in get_tables(cursor):
dump[table] = get_rows_as_dicts(cursor, table)
result = "job id " + jobid + " not found"
# the default output is if the job isn't found
data = {}
data['error_msg'] = 'job id ' + jobid + ' not found'
result = data
counter = 0
while (counter < len(dump["jobs"])):
if (int(jobid) == dump["jobs"][counter]["id"]):
result = dump["jobs"][counter]
counter += 1
print(json.dumps(result))
| StarcoderdataPython |
1869137 | # -*- coding: utf-8 -*-
""" Base interfaces for dipy """
from __future__ import print_function, division, unicode_literals, absolute_import
import os.path as op
import numpy as np
from ... import logging
from ..base import (traits, File, isdefined,
BaseInterface, BaseInterfaceInputSpec)
IFLOGGER = logging.getLogger('interface')
HAVE_DIPY = True
try:
import dipy
except ImportError:
HAVE_DIPY = False
def no_dipy():
""" Check if dipy is available """
global HAVE_DIPY
return not HAVE_DIPY
def dipy_version():
""" Check dipy version """
if no_dipy():
return None
return dipy.__version__
class DipyBaseInterface(BaseInterface):
"""
A base interface for py:mod:`dipy` computations
"""
def __init__(self, **inputs):
if no_dipy():
IFLOGGER.warn('dipy was not found')
# raise ImportError('dipy was not found')
super(DipyBaseInterface, self).__init__(**inputs)
class DipyBaseInterfaceInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc=('input diffusion data'))
in_bval = File(exists=True, mandatory=True, desc=('input b-values table'))
in_bvec = File(exists=True, mandatory=True, desc=('input b-vectors table'))
b0_thres = traits.Int(700, usedefault=True, desc=('b0 threshold'))
out_prefix = traits.Str(desc=('output prefix for file names'))
class DipyDiffusionInterface(DipyBaseInterface):
"""
A base interface for py:mod:`dipy` computations
"""
input_spec = DipyBaseInterfaceInputSpec
def _get_gradient_table(self):
bval = np.loadtxt(self.inputs.in_bval)
bvec = np.loadtxt(self.inputs.in_bvec).T
from dipy.core.gradients import gradient_table
gtab = gradient_table(bval, bvec)
gtab.b0_threshold = self.inputs.b0_thres
return gtab
def _gen_filename(self, name, ext=None):
fname, fext = op.splitext(op.basename(self.inputs.in_file))
if fext == '.gz':
fname, fext2 = op.splitext(fname)
fext = fext2 + fext
if not isdefined(self.inputs.out_prefix):
out_prefix = op.abspath(fname)
else:
out_prefix = self.inputs.out_prefix
if ext is None:
ext = fext
return out_prefix + '_' + name + ext
| StarcoderdataPython |
6548102 | <filename>mynie/__init__.py
# This file contains the entry_point callable that is responsible for
# dynamically adding the parsers into the Genie framework.
from mynie.parser import linux, myos
__all__ = []
def add_my_parsers():
"""
See genie/libs/parser/utils/entry_points.py for more information.
"""
return {
'linux': [
linux.free.LinuxFree,
linux.date.LinuxDate
],
'myos': [
myos.free.MyOsFree,
myos.date.MyOsDate
],
}
| StarcoderdataPython |
5136311 | """
CUDA PARALLEL PROGRAMMING: parallel_numba.py
* Purpose: Python code for performing matrix operations on the GPU using Numba CUDA.JIT
* @author <NAME>
* @version 1.0 15/10/18
"""
import numpy as np
from numba import cuda
NUM_THREADS = 32
def get_cuda_execution_config(m, n):
""" Calculates the execution configuration optimal for maximum
occupancy of the grid for launching a CUDA kernel.
Args:
m (int): number of rows of matrix.
n (int): number of cols of matrix.
Returns:
tuple of `int`: grid dimensions for launching kernel, equivalent to `dim3` type.
tuple of `int`: block dimensions for launching kernel, equivalent to `dim3` type.
"""
dimBlock = (NUM_THREADS, NUM_THREADS)
dimGrid = ((n // dimBlock[0]) + 1, (m // dimBlock[1]) + 1)
return dimGrid, dimBlock
@cuda.jit
def matmul(a, b, c, m, n, k):
row = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y
col = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
if row < m and col < k:
summ = 0
for i in range(n):
summ += a[row, i] * b[i, col]
c[row, col] = summ
@cuda.jit
def matsum(a, b, c, m, n):
row = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y
col = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
if row < m and col < n:
c[row, col] = a[row, col] + b[row, col]
@cuda.jit
def matprod(a, b, c, m, n):
row = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y
col = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
if row < m and col < n:
c[row, col] = a[row, col] * b[row, col]
@cuda.jit
def sum(a, value, c, m, n):
row = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y
col = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
if row < m and col < n:
c[row, col] = a[row, col] + value
@cuda.jit
def prod(a, value, c, m, n):
row = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y
col = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
if row < m and col < n:
c[row, col] = a[row, col] * value
@cuda.jit
def maximum(a, value, c, m, n):
row = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y
col = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
if row < m and col < n:
c[row, col] = a[row, col] if a[row, col] > value else value
if __name__ == '__main__':
# matmul
M = 324
N = 432
K = 412
a = np.random.randn(M, N)
b = np.random.randn(N, K)
c = np.zeros(shape=(M, K))
dimGrid, dimBlock = get_cuda_execution_config(M, K)
matmul[dimGrid, dimBlock](a, b, c, M, N, K)
assert np.allclose(np.dot(a, b), c), 'matmul op is buggy'
assert not np.isnan(np.sum(c)), 'matmul op is buggy'
M = 5425
N = 4123
a = np.random.randn(M, N)
b = np.random.randn(M, N)
c = np.zeros_like(a)
value = 546.432
dimGrid, dimBlock = get_cuda_execution_config(M, N)
# other ops
matsum[dimGrid, dimBlock](a, b, c, M, N)
assert np.all((a + b) == c), 'matsum op is buggy'
matprod[dimGrid, dimBlock](a, b, c, M, N)
assert np.all((a * b) == c), 'matprod op is buggy'
sum[dimGrid, dimBlock](a, value, c, M, N)
assert np.all((a + value) == c), 'elem-wise sum op is buggy'
prod[dimGrid, dimBlock](a, value, c, M, N)
assert np.all((a * value) == c), 'elem-wise prod op is buggy'
maximum[dimGrid, dimBlock](a, 0., c, M, N)
assert np.all(np.maximum(a, 0.) == c), 'elem-wise max op is buggy'
print('Passed all tests!')
| StarcoderdataPython |
12861982 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('group', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('community', '0001_initial'),
('doc', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='rule',
name='cached_ids',
field=models.ManyToManyField(to='doc.Document'),
preserve_default=True,
),
migrations.AddField(
model_name='rule',
name='community_list',
field=models.ForeignKey(to='community.CommunityList'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='rule',
unique_together=set([('community_list', 'rule_type', 'value')]),
),
migrations.AddField(
model_name='listnotification',
name='event',
field=models.ForeignKey(to='doc.DocEvent'),
preserve_default=True,
),
migrations.AddField(
model_name='expectedchange',
name='community_list',
field=models.ForeignKey(to='community.CommunityList'),
preserve_default=True,
),
migrations.AddField(
model_name='expectedchange',
name='document',
field=models.ForeignKey(to='doc.Document'),
preserve_default=True,
),
migrations.AddField(
model_name='emailsubscription',
name='community_list',
field=models.ForeignKey(to='community.CommunityList'),
preserve_default=True,
),
migrations.AddField(
model_name='documentchangedates',
name='document',
field=models.ForeignKey(to='doc.Document'),
preserve_default=True,
),
migrations.AddField(
model_name='displayconfiguration',
name='community_list',
field=models.ForeignKey(to='community.CommunityList'),
preserve_default=True,
),
migrations.AddField(
model_name='communitylist',
name='added_ids',
field=models.ManyToManyField(to='doc.Document'),
preserve_default=True,
),
migrations.AddField(
model_name='communitylist',
name='group',
field=models.ForeignKey(blank=True, to='group.Group', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='communitylist',
name='user',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
]
| StarcoderdataPython |
11349456 | <filename>flask/ytegg/ytegg/app.py<gh_stars>0
import os, click, sqlite3
from flask import Flask, g, redirect, url_for
from .config import DefaultConfig
from .model import db, User
from .todo import todo
from .user import user
from . import model
__all__ = ['create_app']
def create_app(config=None, app_name=None):
"""Create a Flask app."""
if app_name is None:
app_name = DefaultConfig.PROJECT
app = Flask(app_name)
app.register_blueprint(todo, url_prefix='/todo')
app.register_blueprint(user, url_prefix='/user')
app.config.from_object(DefaultConfig)
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'test-egg.db'),
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='<PASSWORD>'
))
db.init_app(app)
app.config["yteggdb"] = db
app.config['model'] = model
return app
app = create_app()
@app.route('/')
def index():
return "home..."
@app.route('/login', methods=['GET', 'POST'])
def login():
return redirect('user/login',next=url_for('index'))
@app.route('/debug')
def debug():
import pdb; pdb.set_trace()
return "debug..."
@app.route('/admin')
def admin():
return '<a href="/admin/">Click me to get to Admin!</a>'
@app.cli.command('initdb')
def initdb():
db.drop_all()
db.create_all()
'''
if __name__ == '__main__':
db.init_app(app)
#db.create_all(app=app)
app.run(debug=True,threaded=True)
'''
| StarcoderdataPython |
9742477 | # SPDX-FileCopyrightText: 2021 Genome Research Ltd.
#
# SPDX-License-Identifier: MIT
from main.service import SpeciesService
from main.swagger import SpeciesSwagger
from .base import BaseResource, setup_resource
api_species = SpeciesSwagger.api
@setup_resource
class SpeciesResource(BaseResource):
class Meta:
service = SpeciesService
swagger = SpeciesSwagger
| StarcoderdataPython |
11379066 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Used to access API for retrieving information about competitions.
from .get import get_competitions # noqa: F401
from .competition import Competition # noqa: F401
from .standing import CompetitionStanding # noqa: F401
| StarcoderdataPython |
295906 | # coding=utf-8
import logging
import sys
import traceback
from collections import OrderedDict
from datetime import datetime
from queue import Empty
from queue import Queue
from threading import Thread
from mongoengine import fields
from zspider.confs.conf import INNER_IP
from zspider.confs.conf import LOG_DATEFORMAT
from zspider.utils import engine
from zspider.utils.fields_models import FBaseQuerySet
from zspider.utils.fields_models import IpField
__author__ = "zephor"
LEVELS = OrderedDict()
LEVELS[logging.NOTSET] = "NSET"
LEVELS[logging.DEBUG] = "DBUG"
LEVELS[logging.INFO] = "INFO"
LEVELS[logging.WARN] = "WARN"
LEVELS[logging.ERROR] = "ERRO"
LEVELS[logging.FATAL] = "FATL"
class BaseLog(engine.DynamicDocument):
meta = {
"abstract": True,
"queryset_class": FBaseQuerySet,
"index_background": True,
"indexes": ["#ip", ("-time", "-msecs")], # hashed index
}
ip = IpField(required=True, verbose_name=u"机器ip")
pid = fields.IntField(required=True)
pathname = fields.StringField(verbose_name=u"文件")
lineno = fields.IntField(required=True, verbose_name=u"行号")
level = fields.IntField(default=logging.NOTSET, choices=LEVELS.keys())
msg = fields.StringField(verbose_name=u"信息")
time = fields.DateTimeField(required=True, verbose_name=u"时间")
msecs = fields.FloatField(required=True)
class LogCrawler(BaseLog):
meta = {
"max_size": 5 * 2 ** 30,
"max_documents": 10000000,
"indexes": ["task_id", "#url", "$task_name"],
}
task_id = fields.ObjectIdField(verbose_name=u"任务ID")
task_name = fields.StringField(max_length=32, verbose_name=u"任务名称")
url = fields.URLField()
class LogDispatcher(BaseLog):
meta = {"max_size": 512 * 2 ** 20, "max_documents": 1000000}
task_id = fields.ObjectIdField(verbose_name=u"任务ID")
class ThreadMongoHandler(logging.Handler):
# reserve or special handled fields
RECORD_FIELDS = {
"threadName",
"name",
"thread",
"created",
"process",
"processName",
"args",
"module",
"filename",
"levelno",
"msg",
"message",
"exc_info",
"funcName",
"relativeCreated",
"levelname",
"asctime",
}
def __init__(self, log_model, max_thread=2, *args):
super(ThreadMongoHandler, self).__init__(*args)
assert issubclass(log_model, BaseLog), "log_model must be a subclass of BaseLog"
assert 0 < max_thread < 6, "thread is not efficient enough, must be 1~5 threads"
self.log_cls = log_model
log_model.ensure_index(
"#ip"
) # prevent bug: non-thread safe mongoengine collection creation
self.q = Queue()
self._r = 1
thread_pool = self.tp = set()
while len(thread_pool) < max_thread:
process = Thread(target=self.record)
process.setDaemon(True)
thread_pool.add(process)
for p in thread_pool:
p.start()
@staticmethod
def _write(msg=None):
if msg is None:
msg = traceback.format_exc()
try:
sys.stderr.write(
"{0}: {1}".format(datetime.now().strftime(LOG_DATEFORMAT), msg)
)
except Exception:
pass
def handle(self, record):
rv = self.filter(record)
if rv:
self.emit(record)
return rv
def emit(self, record):
try:
self.format(record)
msg = {}
for k, v in record.__dict__.items():
if isinstance(v, (str, int, float)):
msg[k] = v
self.q.put_nowait(msg)
except Exception:
self.handleError(record)
def close(self):
self._r = 0
for p in self.tp:
p.join()
self._write("exit with %d logs remained\n" % self.q.qsize())
def record(self):
while self._r:
try:
msg = self.q.get(timeout=2)
except Empty:
continue
except SystemExit:
raise
except Exception:
self._write()
continue
log = self.log_cls(
ip=INNER_IP,
pid=msg["process"],
level=msg["levelno"],
msg=msg["message"],
time=msg["asctime"],
)
for k, v in msg.items():
if k not in self.RECORD_FIELDS:
setattr(log, k, v)
try:
log.save()
except AssertionError:
self._write()
except Exception:
# this supposed to be resolved
self._write()
| StarcoderdataPython |
31872 | <gh_stars>0
from . import views
from django.contrib.auth import views as auth_views
from django.urls import path
urlpatterns = [ #Kari CSV_TO_TABLE Commit
path('csv_upload/', views.csv_table, name='csv_table'),
path('today/', views.today_table, name='today_table'),
path('search/', views.search, name='search'),
] | StarcoderdataPython |
248529 | <reponame>AminAliH47/PicoStyle<filename>account/admin.py
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from account.models import User
UserAdmin.fieldsets += ('Seller', {'fields': (
'is_seller', 'company_name', 'co_address', 'co_country_registered', 'co_website_address', 'co_email',
'about_manager_en', 'about_manager_ru', 'about_manager_it', 'phone_number', 'products_type', 'brand_name',
'about_brand_en', 'about_brand_ru', 'about_brand_it', 'brand_logo', 'is_brand', 'branch_address',
)}), ('Author', {'fields': ('is_author',)}), ('Advance', {'fields': ('is_entrepreneur',)})
admin.site.register(User, UserAdmin)
| StarcoderdataPython |
5055712 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
import constants
remote_repository_id_list = []
def app_url(content):
"""
Generate APPLICATION_URL from ENV VAR.
"""
url = os.environ['APPLICATION_URL']
return content.replace('$APP_URL', url)
def repository_group(content):
"""
Generate APPLICATION_URL from ENV VAR.
"""
group = os.environ['REPOSITORY_GROUP']
temp = constants.REPOSITORY_GROUP.replace('$ID', group)
return content.replace('$REPOSITORY_GROUP', temp)
def remote_repositories(content):
"""
Generate REMOTE_REPOSITORIES from ENV VARs.
By default Openshift 3 template defines space for 12 REMOTE vars.
If more needed, aditional var EXTRA_REMOTES is part of the template - use comma as separator.
"""
global remote_repository_id_list
temp = ' <remoteRepositories>\n'
for i in range(12):
try:
var_name = os.environ['REMOTE' + str(i+1)]
if var_name == "":
continue
remote_repository = constants.REMOTE_REPOSITORIES.replace('$URL', var_name)
remote_repository = remote_repository.replace('$REPO_ID', str(i+1))
remote_repository = remote_repository.replace('$REPO_NAME', str(i+1))
temp += remote_repository
remote_repository_id_list.append(i+1)
except KeyError as e:
continue
try:
extras = os.environ['EXTRA_REMOTES'].split(',')
if len(extras) == 1:
temp += '\n </remoteRepositories>'
return content.replace('$REMOTE_REPOSITORIES', temp)
for j in range(len(extras)):
remote_repository = constants.REMOTE_REPOSITORIES.replace('$URL', str.strip(extras[j]))
remote_repository = remote_repository.replace('$REPO_ID', str(i+1))
remote_repository = remote_repository.replace('$REPO_NAME', str(i+1))
temp += remote_repository
remote_repository_id_list.append(i+j+2)
except KeyError as e:
pass # EXTRA_REMOTES is optional
temp += '\n </remoteRepositories>'
result = content.replace('$REMOTE_REPOSITORIES', temp)
return result
def proxy_connectors(content):
"""
Generate PROXY_CONNECTORS from ENV VARs.
Connect virtual repository group with a real Maven repo.
"""
global remote_repository_id_list
temp = ' <proxyConnectors>\n'
for i in remote_repository_id_list:
proxy_connector = constants.PROXY_CONNECTORS.replace('$ORDER', str(i))
proxy_connector = proxy_connector.replace('$TARGET_REPO_ID', str(i))
temp += proxy_connector
temp += '\n </proxyConnectors>'
result = content.replace('$PROXY_CONNECTORS', temp)
return result
def main():
"""
Generate config file for Apache Archiva ($ARCHIVA_HOME/conf/settings.xml)
Need to use settings.xml distributed with this code.
"""
settings_file = os.environ['ARCHIVA_HOME'] + '/conf/archiva.xml'
try:
with open(settings_file, 'r') as settings:
content = settings.read()
content = app_url(content)
content = repository_group(content)
content = remote_repositories(content)
content = proxy_connectors(content)
with open(settings_file, 'w') as settings:
settings.write(content)
except (IOError) as e:
sys.exit('IOException when operating on file %s' % settings_file)
if __name__ == '__main__':
main() | StarcoderdataPython |
9693163 | # SPDX-FileCopyrightText: Copyright (c) 2021 <NAME>
#
# SPDX-License-Identifier: MIT
# The MIT License (MIT)
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`sparkfun_qwiickeypad`
================================================================================
CircuitPython library for the Sparkfun Qwiic Keypad
* Author(s): <NAME>
Implementation Notes
--------------------
**Hardware:**
* This is library is for the SparkFun Qwiic 12-Button Keypad.
* SparkFun sells these at its website: www.sparkfun.com
* Do you like this library? Help support SparkFun. Buy a board!
https://www.sparkfun.com/products/15290
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Bus Device library: https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
"""
# imports__version__ = "0.0.0-auto.0"
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/fourstix/Sparkfun_CircuitPython_QwiicKeypad.git"
# imports
from time import sleep
from micropython import const
from adafruit_bus_device.i2c_device import I2CDevice
# public constants
QWIIC_KEYPAD_ADDR = const(0x4B) # default I2C Address
# register constants
_KEYPAD_ID = const(0x00)
_KEYPAD_VERSION1 = const(0x01)
_KEYPAD_VERSION2 = const(0x02)
_KEYPAD_BUTTON = const(0x03)
_KEYPAD_TIME_MSB = const(0x04)
_KEYPAD_TIME_LSB = const(0x05)
_KEYPAD_UPDATE_FIFO = const(0x06)
_KEYPAD_CHANGE_ADDRESS = const(0x07)
# This sets the bit0 on the updateFIFO register
_UPDATE_FIFO_COMMAND = const(0x01)
# class
class Sparkfun_QwiicKeypad:
"""CircuitPython class for the Sparkfun Qwiic 12-Button Keypad"""
def __init__(self, i2c, address=QWIIC_KEYPAD_ADDR, debug=False):
"""Initialize Qwiic Keypad for i2c communication."""
self._device = I2CDevice(i2c, address)
# save handle to i2c bus in case address is changed
self._i2c = i2c
self._debug = debug
# public properites (read-only)
@property
def connected(self):
"""Check the id of Keypad. Returns True if successful."""
if self._read_register(_KEYPAD_ID) != QWIIC_KEYPAD_ADDR:
return False
return True
@property
def version(self):
"""Return the version string for the Keypad firmware."""
major = self._read_register(_KEYPAD_VERSION1)
minor = self._read_register(_KEYPAD_VERSION2)
return "v" + str(major) + "." + str(minor)
@property
def button(self):
"""Return the button at the top of the stack (aka the oldest button).
Return -1 for Error/Busy Try Again or 0 for No Button Pressed."""
return self._read_register(_KEYPAD_BUTTON)
@property
def time_since_pressed(self):
"""Return the number of milliseconds since the current button in FIFO was pressed."""
msb = self._read_register(_KEYPAD_TIME_MSB)
lsb = self._read_register(_KEYPAD_TIME_LSB)
return (msb << 8) | lsb
# public functions
def set_i2c_address(self, new_address):
"""Change the i2c address of Keypad snd return True if successful."""
# check range of new address
if new_address < 8 or new_address > 119:
print("ERROR: Address outside 8-119 range")
return False
# write new address
self._write_register(_KEYPAD_CHANGE_ADDRESS, new_address)
# wait a second for joystick to settle after change
sleep(1)
# try to re-create new i2c device at new address
try:
self._device = I2CDevice(self._i2c, new_address)
except ValueError as err:
print("Address Change Failure")
print(err)
return False
# if we made it here, everything went fine
return True
def update_fifo(self):
"""Commands keypad to plug in the next button into the register map."""
self._write_register(_KEYPAD_UPDATE_FIFO, _UPDATE_FIFO_COMMAND)
# No i2c begin function is needed since I2Cdevice class takes care of that
# private methods
def _read_register(self, addr):
# Read and return a byte from the specified 8-bit register address.
# ignore spurious Remote IO errors thrown when keypad is busy
try:
with self._device as device:
device.write(bytes([addr & 0xFF]))
result = bytearray(1)
# write_then_readinto() does not work reliably,
# so do explicit write followed by read into
# device.write_then_readinto(bytes([addr & 0xFF]), result)
device.readinto(result)
if self._debug:
print("$%02X => %s" % (addr, [hex(i) for i in result]))
return result[0]
except OSError as err:
if self._debug:
print(err)
# return error value for read
return -1
def _write_register(self, addr, value):
# Write a byte to the specified 8-bit register address
# ignore spurious Remote IO errors thrown when keypad is busy
try:
with self._device as device:
# Wait a bit for bus to settle
sleep(0.050)
device.write(bytes([addr & 0xFF, value & 0xFF]))
if self._debug:
print("$%02X <= 0x%02X" % (addr, value))
except OSError as err:
if self._debug:
print(err)
| StarcoderdataPython |
8010961 | <reponame>snasiriany/parasol<filename>parasol/control/mpc.py
import tqdm
import numpy as np
from .common import Controller
import parasol.util as util
import scipy.stats as st
class MPC(Controller):
control_type = 'mpc'
def __init__(self, model, env, horizon,
diag_cost=False,
action_min=-1.0, action_max=1.0, sample_std=1.0):
self.model = model
self.horizon = horizon
self.ds, self.da = self.model.ds, self.model.da
self.do, self.du = self.model.do, self.model.du
self.horizon = horizon
self.diag_cost = diag_cost
self.action_min, self.action_max = action_min, action_max
self.env = env
def initialize(self):
pass
def act(self, observations, controls, t, noise=None):
state, _ = self.model.encode(observations, controls, t)
horizon = min(self.horizon, self.model.horizon - t)
action = self.cem_opt(state, horizon, iters=10)
if noise is not None:
action += noise * 0.01
return action
def cem_opt(self, state, horizon, iters=1):
mu = np.zeros((horizon,self.da))
sigma = 0.2 * np.ones((horizon, self.da))
for i in range(iters):
states, actions = self.sim_actions_forward(state, horizon, mu, sigma)
costs = self.eval_traj_costs(states, actions)
best_candidates = actions[np.argsort(costs)[:15]]
mu, sigma = np.mean(best_candidates, axis=0), np.std(best_candidates, axis=0)
sigma = np.clip(sigma, 0, 2)
return mu[0]
def sim_actions_forward(self, state, horizon, mu, sigma):
num_traj = 1024
states = [np.tile(state, [num_traj, 1])]
X = st.truncnorm(-2, 2, loc = mu, scale = sigma)
actions = X.rvs(size = (num_traj, horizon, self.da))
curr_states = states[0]
for t in range(horizon):
curr_states, _ = self.model.forward(curr_states, actions[:, t], 0)
if t < horizon - 1:
states.append(curr_states)
return np.array(states).transpose([1, 0, 2]), actions
def eval_traj_costs(self, states, actions):
costs = self.model.evaluate_cost(states)
costs += 0.5 * np.einsum('nta,ab,ntb->nt', actions, self.env.torque_matrix(), actions)
return costs
def train(self, rollouts, train_step, out_dir=None):
pass
| StarcoderdataPython |
4892111 | <filename>src/onnx2ks/onnx-build-prelude.py
#
# onnx-build-prelude - Convert ONNX Schemas to Knossos prelude
#
# References:
# https://github.com/onnx/onnx/blob/master/docs/IR.md
# https://github.com/onnx/onnx/blob/72b701f7a55cafa4b8ab66a21dc22da0905b2f4c/onnx/onnx.in.proto
#
# This is largely a run-once script. Its main task is to produce a consistent set of Knossos
# edefs for the current onnx schemas found in "onnx.defs.get_all_schemas()", and place them
# in "etc/onnx-prelude-autogen.ks". It needs to handle a few concepts that don't translate
# well, such as:
#
# - Attributes and inputs
# KS has only inputs, constant parameters can be optimized by OTP
#
# - Type constraints
# ONNX has some slightly tricky rules to declare the same op over multiple types.
# Knossos has ad-hoc overloading, so we just emit the same name with each acceptable type combo.
# This leads to 25 decls for SequenceInsert, but it's seriously not a biggie in the scheme
# of things. Think of it as C++ templates.
#
# - Optional inputs
# Emits one type signature for each callable version of the function
#
# - Optional outputs
# Emits versions of the function of the form "take1$foo" to auto-discard the outputs.
#
# Because it's a "run rarely" script, there are various manual hints to generate the
# right thing, and anthing "too hard" has been manually handled in "etc/onnx-prelude.ks"
#
#%%
from typing import TextIO
from collections import namedtuple
import warnings
import sys
import re
import onnx
import onnxruntime.capi.onnxruntime_pybind11_state as ort
from onnxruntime.capi.onnxruntime_pybind11_state.schemadef import OpSchema
from ksc.type import Type, make_tuple_if_many
from ksc.utils import paren
from ksc.expr import Def, EDef, Rule, Const, Var, Lam, Call, Let, If, Assert
# Pretty printing
# Importing prettyprint to get the decorated printers for Expression and Type
import ksc.prettyprint # pylint: disable=unused-import
# Import the prettyprinter routines we use explicitly in this file
from prettyprinter import cpprint, pprint, pformat
# Needed this in order to see the error messages when pprint fails
import warnings
warnings.filterwarnings("always")
from onnx2ks import get_all_schemas, get_attribute_default_value, onnxAttrType_to_Type
#%%
def comment(s: str):
"""
Make a LISP inline comment from str
"""
return f"#|{s}|#"
def onnxType_to_Type_with_mangler(ty: str):
"""
Convert ty to KS type, and record any transformations (E.g. Complex->(Tuple Float Float)) in "mangler"
"""
if ty.startswith("tensor("):
assert ty.endswith(")")
mangler, elty = onnxType_to_Type_with_mangler(ty[7:-1])
return mangler, Type.Tensor(-1, elty)
if ty.startswith("seq("):
# seq -> Vec
assert ty.endswith(")")
mangler, elty = onnxType_to_Type_with_mangler(ty[4:-1])
return "seq$" + mangler, Type.Tensor(1, elty)
if ty.startswith("map("):
# map -> Vec Tuple
assert ty.endswith(")")
mangler, elty = onnxType_to_Type_with_mangler("tuple(" + ty[4:-1] + ")")
return "map$" + mangler, Type.Tensor(1, elty)
if ty.startswith("tuple("):
assert ty.endswith(")")
args = ty[6:-1]
manglers_and_tys = [
onnxType_to_Type_with_mangler(s) for s in re.split(", *", args)
]
manglers = "".join([x[0] for x in manglers_and_tys])
tys = [x[1] for x in manglers_and_tys]
return f"{manglers}", Type.Tuple(*tys)
if ty in ["double", "float", "float16", "bfloat16"]:
return "", Type.Float
if ty in ["complex64", "complex128"]:
return "cplx$", Type.Tuple(Type.Float, Type.Float)
if ty in ["int64", "int32", "int16", "int8", "uint64", "uint32", "uint16", "uint8"]:
return "", Type.Integer
if ty == "string":
return "", Type.String
if ty == "bool":
return "", Type.Bool
raise NotImplementedError(f"didn't convert '{ty}''")
def onnxType_to_Type(ty: str):
return onnxType_to_Type_with_mangler(ty)[1]
TypeConstraintParam = namedtuple("TypeConstraintParam", ["name", "ty"])
def TCPs_from_tc(type_constraint):
"""
Take type_constraint(type_param_str, allowed_type_strs) and return list of TypeConstraintParam
"""
tys = type_constraint.allowed_type_strs # Get all ONNX types
tys = set(
[onnxType_to_Type_with_mangler(ty) for ty in tys]
) # Convert to Knossos and uniquify
return [
TypeConstraintParam(type_constraint.type_param_str, ty) for ty in tys
] # make list
def all_combinations_type_constraints(type_constraints):
"""
Take list of type constraints of the form
[ ("T1", [ty11, ty12, ty13]), ("T2", [ty21, ty22])]
and generate the list
[ [("T1", ty11), ("T2", ty21)]
[("T1", ty11), ("T2", ty22)]
[("T1", ty12), ("T2", ty21)]
[("T1", ty12), ("T2", ty22)]
[("T1", ty13), ("T2", ty21)]
[("T1", ty13), ("T2", ty22)]]
"""
if len(type_constraints) == 0:
return [[]]
# Generate all combinations from head and tail
tcps = TCPs_from_tc(type_constraints[0])
tails = all_combinations_type_constraints(type_constraints[1:])
return [[x] + tail for x in tcps for tail in tails]
def test_all_combinations_type_constraints():
print("Test test_all_combinations_type_constraints")
for s in onnx.defs.get_all_schemas():
if s.name == "CastMap":
tcs = s.type_constraints
all = all_combinations_type_constraints(tcs)
assert len(all) == 6
test_all_combinations_type_constraints()
def onnx_schemas_to_prelude(prelude: TextIO):
def writeln(line):
prelude.write(line + "\n")
writeln(";; THIS FILE IS AUTOGENERATED. See onnx-build-prelude.py.")
print(f"Processing schemas:", end="")
schemas = get_all_schemas()
for s in schemas.values():
domain_prefix = "" if s.domain == "" else s.domain + "."
print(f" {domain_prefix}{s.name}", end="")
# 0. Send the doc
writeln(f'\n;; Doing {domain_prefix}{s.name} # line "{s.file}" {s.line}')
writeln(f"; since_version {s.since_version}")
if s.deprecated:
writeln(";; Deprecated")
continue
if s.doc:
for doc in s.doc.splitlines():
writeln(f";; {doc}")
else:
writeln(";; no doc")
writeln(f";; Type constraints:")
for tc in s.type_constraints:
writeln(
f";; {tc.type_param_str} | {tc.allowed_type_strs} | {tc.description}"
)
# 0.1 Some special-cases, which are assumed "hand-written" in the output,
# e.g. output type depends on some runtime value.
# We can relax this if we assume the runtime values are always constants -- e.g. some attrs
if s.name in [
"Constant",
"If",
"Loop",
"Scan",
"ZipMap",
# Dropout: "ratio" arg is float, not Tensor<float>.
"Dropout",
"TrainableDropout",
"BiasDropout",
"DropoutGrad",
"TrainableDropoutGrad",
"BiasDropoutGrad",
"SequenceEmpty",
"Cast",
"CastMap",
# "EyeLike",
"TreeEnsembleClassifier",
"LinearClassifier",
"SVMClassifier",
"LabelEncoder",
"CategoryMapper",
"LambOptimizer", # 1024 optional input groups
]:
print("!", end="")
writeln(f";; SPECIAL: {s.name}")
continue
# 0.2 Further special cases, which just need some help in inferring the output type
out_type_from_sig = None
if s.name == "ConcatFromSequence":
# Onnx type constraints can't express S: seq<'t> -> T: 't
def ConcatFromSequence_type(tys):
return tys[0].tensor_elem_type
out_type_from_sig = lambda tys: ConcatFromSequence_type(tys)
if s.name == "DictVectorizer":
def DictVectorizer_type(tys):
# Vec (Tuple key value)
kv = tys[0].tensor_elem_type
value_ty = kv.tuple_elem(1)
return Type.Tensor(1, value_ty)
out_type_from_sig = lambda tys: DictVectorizer_type(tys)
if s.name == "SplitToSequence":
out_type_from_sig = lambda tys: Type.Tensor(1, tys[0])
if s.name == "SequenceAt":
out_type_from_sig = lambda tys: tys[0].tensor_elem_type
if s.name == "SequenceConstruct":
out_type_from_sig = lambda tys: Type.Tensor(1, tys[0])
# SequenceAt, SequenceConstruct, SequenceEmpty, NonMaxSuppression, TreeEnsembleRegressor
# Gather type constraints
output_typeStrs = set([o.typeStr for o in s.outputs])
input_typeStrs = set([o.typeStr for o in s.inputs])
input_type_constraints = list(
filter(lambda tc: tc.type_param_str in input_typeStrs, s.type_constraints)
)
all_signatures = all_combinations_type_constraints(input_type_constraints)
if len(all_signatures) > 1:
print(f"[x{len(all_signatures)}]", end="")
for sig in all_signatures:
# Gather (Tn, Type) pairs into dict
typeStrDict = {tc.name: tc.ty for tc in sig}
# 1: Assemble arguments. Knossos treats "inputs" and "attributes" uniformly.
input_arg_types = []
input_arg_names = []
arg_comments = ""
def onnxTypes_to_mangler_and_Type(typeStr, types):
mangler_and_ty = typeStrDict.get(typeStr)
if mangler_and_ty != None:
return mangler_and_ty
tys = set([onnxType_to_Type_with_mangler(ty) for ty in types])
if len(tys) > 1:
writeln(
f";; WARN: multiple types but no type constraints at {s.name}: {tys}"
)
mangler_and_ty = tys.pop()
assert mangler_and_ty != None
return mangler_and_ty
# 1.1: Inputs
n_optional = 0
n_variadic = 0
for i in s.inputs:
input_arg_names.append(i.name)
mangler, ty = onnxTypes_to_mangler_and_Type(i.typeStr, i.types)
input_arg_types.append(ty)
if i.option == OpSchema.FormalParameterOption.Single:
opt = "single"
if i.option == OpSchema.FormalParameterOption.Variadic:
opt = "variadic"
n_variadic += 1
if i.option == OpSchema.FormalParameterOption.Optional:
opt = "optional"
n_optional += 1
arg_comments += f"Arg<{i.name},{ty},{mangler},{opt}> "
# 1.2: Attributes
attr_arg_names = []
attr_arg_types = []
for key in s.attributes:
a = s.attributes[key]
attr_arg_names.append(a.name)
attr_arg_types.append(onnxAttrType_to_Type(a.type))
if a.required:
arg_comments += f"Attr<{a.name}> "
else:
val = get_attribute_default_value(a)
if val == None:
warnings.warn(
f"Optional attribute without default value {a.name}"
)
val = "**MISSING**"
arg_comments += f"Attr<Optional,{a.name},{val}> "
# 1.3: Outputs
return_types = []
if out_type_from_sig == None:
for o in s.outputs:
mangler, ty = onnxTypes_to_mangler_and_Type(o.typeStr, o.types)
if mangler != "":
writeln(f";; NOTE: output mangler {mangler}")
return_types.append(ty)
else:
return_types = [out_type_from_sig(input_arg_types)]
n_outputs = len(s.outputs)
n_optional_outputs = 0
n_variadic_outputs = 0
for o in s.outputs:
if o.option == OpSchema.FormalParameterOption.Single:
opt = "single"
if o.option == OpSchema.FormalParameterOption.Variadic:
opt = "variadic"
n_variadic_outputs += 1
if o.option == OpSchema.FormalParameterOption.Optional:
n_optional_outputs += 1
opt = "optional"
# Grab any manglers
manglers = set(
[
onnxType_to_Type_with_mangler(ty)[0]
for ty in i.types
for i in s.inputs + s.outputs
]
)
if "" in manglers:
manglers.remove("")
if len(manglers) > 0:
writeln(f";; NOTE: manglers {manglers}")
# 2: Def vs edef -- not in ort 1.5.2
# if s.has_function:
# writeln(f";; has body: {type(s.function_body)}")
# Emit actual signatures, one per optional arg, one per optional output
for k_out in range(n_optional_outputs + 1):
# k_out = n_optional_outputs -- normal case, unmodified name
take_n = n_outputs - n_optional_outputs + k_out
if k_out == n_optional_outputs:
name = domain_prefix + s.name
else:
name = f"take{take_n}$" + domain_prefix + s.name
# Return multiple outputs as a tuple
if take_n > 1:
return_type = Type.Tuple(*return_types[0:take_n])
writeln(f";; NOTE: multiple outputs as tuple")
elif take_n == 1:
return_type = return_types[0]
else:
return_type = Type.Tuple() # Empty tuple is Knossos "unit" type
writeln(f"; #out={k_out} ARGS: {arg_comments}")
n_args = len(input_arg_types)
for k in range(n_optional + 1):
arg_types = input_arg_types[: n_args - k] + attr_arg_types
obj = EDef(name, return_type, make_tuple_if_many(arg_types))
pprint(obj, stream=prelude, width=1024, ribbon_width=1024, indent=2)
if s.has_type_and_shape_inference_function:
writeln(f"; shape${domain_prefix}{s.name}\n")
pass # out.append(EDef("shape$" + s.name, Type.Float, [Type.Float, Type.Vec(Type.Float)]))
else:
writeln(f"; No shape function for {s.name}\n")
print("... done")
if __name__ == "__main__":
if len(sys.argv) < 2:
filename = "etc/onnx-prelude-autogen.ks"
else:
filename = sys.argv[1]
with open(filename, "w") as prelude:
onnx_schemas_to_prelude(prelude)
print(f"Wrote schemas to {filename}")
# %%
| StarcoderdataPython |
6649489 | """
Defines ``FIPS_TO_STATE``, ``STATE_ABBR``, ``OFFICE_NAMES`` and ``PARTY_NAMES``
look-up constants.
"""
FIPS_TO_STATE = {
'CT': {
'09001': 'FAIRFIELD',
'09003': 'HARTFORD',
'09005': 'LITCHFIELD',
'09007': 'MIDDLESEX',
'09009': 'NEW HAVEN',
'09011': 'NEW LONDON',
'09013': 'TOLLAND',
'09015': 'WINDHAM'
},
'ME': {
'23001': 'ANDROSCOGGIN',
'23003': 'AROOSTOOK',
'23005': 'CUMBERLAND',
'23007': 'FRANKLIN',
'23009': 'HANCOCK',
'23011': 'KENNEBEC',
'23013': 'KNOX',
'23015': 'LINCOLN',
'23017': 'OXFORD',
'23019': 'PENOBSCOT',
'23021': 'PISCATAQUIS',
'23023': 'SAGADAHOC',
'23025': 'SOMERSET',
'23027': 'WALDO',
'23029': 'WASHINGTON',
'23031': 'YORK'
},
'MA': {
'25001': 'BARNSTABLE',
'25003': 'BERKSHIRE',
'25005': 'BRISTOL',
'25007': 'DUKES',
'25009': 'ESSEX',
'25011': 'FRANKLIN',
'25013': 'HAMPDEN',
'25015': 'HAMPSHIRE',
'25017': 'MIDDLESEX',
'25019': 'NANTUCKET',
'25021': 'NORFOLK',
'25023': 'PLYMOUTH',
'25025': 'SUFFOLK',
'25027': 'WORCESTER'
},
'NH': {
'33001': 'BELKNAP',
'33003': 'CARROLL',
'33005': 'CHESHIRE',
'33007': 'COOS',
'33009': 'GRAFTON',
'33011': 'HILLSBOROUGH',
'33013': 'MERRIMACK',
'33015': 'ROCKINGHAM',
'33017': 'STRAFFORD',
'33019': 'SULLIVAN'
},
'RI': {
'44001': 'BRISTOL',
'44003': 'KENT',
'44005': 'NEWPORT',
'44007': 'PROVIDENCE',
'44009': 'WASHINGTON'
},
'VT': {
'50001': 'ADDISON',
'50003': 'BENNINGTON',
'50005': 'CALEDONIA',
'50007': 'CHITTENDEN',
'50009': 'ESSEX',
'50011': 'FRANKLIN',
'50013': '<NAME>',
'50015': 'LAMOILLE',
'50017': 'ORANGE',
'50019': 'ORLEANS',
'50021': 'RUTLAND',
'50023': 'WASHINGTON',
'50025': 'WINDHAM',
'50027': 'WINDSOR'
}
}
STATE_ABBR = {
'AL': 'Alabama',
'AK': 'Alaska',
'AS': 'America Samoa',
'AZ': 'Arizona',
'AR': 'Arkansas',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DE': 'Delaware',
'DC': 'District of Columbia',
'FM': 'Federated States of Micronesia',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'IA': 'Iowa',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'ME': 'Maine',
'MH': 'Northern Mariana Islands',
'MD': 'Maryland',
'MA': 'Massachusetts',
'MI': 'Michigan',
'MN': 'Minnesota',
'MS': 'Mississippi',
'MO': 'Missouri',
'MT': 'Montana',
'NE': 'Nebraska',
'NV': 'Nevada',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NY': 'New York',
'NC': 'North Carolina',
'ND': 'North Dakota',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PW': 'Palau',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VT': 'Vermont',
'VI': 'U.S. Virgin Islands',
'VA': 'Virginia',
'WA': 'Washington',
'WV': 'West Virginia',
'WI': 'Wisconsin',
'WY': 'Wyoming',
'US': 'National'
}
OFFICE_NAMES = {
'A': 'Attorney General',
'C': 'Controller or Comptroller',
'D': 'District Attorney',
'E': 'Education Commissioner or Supt Public Instruction',
'J': 'Insurance Commissioner',
'L': 'Lieutenant Governor',
'M': 'Mayor',
'N': 'City Council',
'R': 'Secretary of State',
'T': 'Treasurer',
'G': 'Governor',
'H': 'U.S. House',
'I': 'Amendment, Ballot Measure, Initiative, Proposal, Proposition, \
Referendum or Question',
'P': 'President',
'S': 'U.S. Senate',
'Y': 'State House, State Assembly, General Assembly or House of Delegates',
'Z': 'State Senate'
}
PARTY_NAMES = {
'ACP': 'A CONNECTICUT PARTY',
'AFP': 'AMERICAN FREEDOM (FORMERLY ATP)',
'AGN': 'AGAINST',
'AHP': 'AMERICAN HERITAGE',
'AIP': 'AMERICAN INDEPENDENT',
'AKI': 'ALASKAN INDEPENDENCE',
'AMC': 'AMERICAN CONSTITUTION',
'AME': 'AMERICANS ELECT',
'AMF': 'AMERICA FIRST',
'AMI': 'AMERICAN INDIAN',
'AMP': 'AMERICA\'S PARTY',
'AMR': 'AMERICAN PARTY',
'AMS': 'AMERICANS',
'AO': 'ALL OTHERS',
'ATP': 'AMERICAN THIRD POSITION (NOW AFP)',
'BEP': 'BLUE ENIGMA PARTY',
'BLD': 'BUILDERS PARTY',
'BOT': 'Boston Tea',
'BUC': 'BUCHANAN REFORM',
'CAM': 'CONSTITUTIONAL AMERICANS',
'CC': 'CONCERNED CITIZENS',
'CEN': 'CENTRIST PARTY',
'CF': 'CITIZENS FIRST',
'CIP': 'CONNECTICUT INDEPENDENT PARTY',
'CM': 'COOL MOOSE',
'CMT': 'COMMUNIST',
'CNL': 'CONSTITUTIONAL GOVERNMENT',
'CNP': 'CONCERNS OF PEOPLE',
'CON': 'CONSERVATIVE',
'CRK': 'POLITICIANS ARE CROOKS',
'CST': 'CONSTITUTION',
'CTL': 'CT FOR LIEBERMAN',
'DAS': 'DESCENDANTS OF AMERICAN SLAVES',
'DCG': 'DC STATEHOOD GREEN',
'DEM': 'DEMOCRAT',
'EFP': 'EARTH FEDERATION',
'EPF': 'ECOLOGY PARTY OF FL',
'FAA': 'FAIR',
'FDL': 'FAIR DEAL',
'FEP': 'FREE ENERGY',
'FOR': 'FOR',
'FRE': 'FREEDOM',
'FRU': 'FRIENDS UNITED',
'FSB': 'FARMERS & SMALL BUSINESS',
'FSO': 'FREEDOM SOCIALIST',
'FVP': 'FAMILY VALUES PARTY',
'GNW': 'GREENS NO TO WAR',
'GOP': 'REPUBLICAN',
'GRN': 'GREEN',
'GRP': 'GRASS ROOTS',
'HCP': 'HEALTHCARE PARTY',
'HP': 'HOME PROTECTION',
'HQ8': 'Heartquake \'08',
'IAP': 'INDEPENDENT AMERICAN',
'IF': 'INDEPENDENT FUSION',
'IG': 'INDEPENDENT GRASSROOTS',
'IGR': 'INDEPENDENT GREEN',
'ILS': 'ILLINOIS SOLIDARITY',
'IND': 'INDEPENDENT',
'INP': 'INDEPENDENCE',
'INR': 'INDEPENDENT REFORM',
'INT': 'INTEGRITY PARTY',
'IP': 'INDEPENDENT PARTY',
'IPD': 'INDEPENDENT PARTY OF DE',
'IPH': 'INDEPENDENT PARTY OF HAWAII',
'ISC': 'INDEPENDENT SAVE OUR CHILDREN',
'IX': 'ILLINOIS GREEN',
'JP': 'JUSTICE PARTY',
'JS': 'JESUS PARTY',
'LAB': 'LABOR',
'LBL': 'LIBERAL',
'LBP': 'LOOKING BACK',
'LEC': 'LIBERTY,COLOGY,OMMUNITY',
'LFM': 'LABOR AND FARM',
'LIB': 'LIBERTARIAN',
'LMJ': 'LEGALIZE MARIJUANA',
'LTP': 'LOUISIANA TAXPAYERS PARTY',
'LUN': 'LIBERTY UNION',
'LUP': 'LIBERTY UNION/PROGRESSIVE',
'MDE': 'MODERATE DEMOCRAT',
'MIP': 'MARYLAND INDEPENDENT PARTY',
'MJP': 'MARIJUANA PARTY',
'MML': 'MAKE MARIJUANA LEGAL',
'MNT': 'MOUNTAIN PARTY',
'MOD': 'MODERATE',
'MRP': 'MARIJUANA REFORM',
'NAL': 'NEW ALLIANCE',
'NEB': 'NEBRASKA',
'NEW': 'NEW',
'NLP': 'NATURAL LAW',
'NMI': 'NEW MEXICO INDEPENDENT PARTY',
'NNT': 'NO NEW TAXES',
'NO': 'NO',
'NON': 'NONE OF THE ABOVE',
'NP': 'NON PARTISAN',
'NPA': 'NO PARTY AFFILIATION',
'NPD': 'NO PARTY DESIGNATION',
'OBJ': 'Objectivist',
'ONE': 'ONE EARTH',
'OTE': '128 DISTRICT',
'OTH': 'OTHER',
'PAC': 'PACIFIC',
'PAG': 'PACIFIC GREEN',
'PAT': 'PATRIOT PARTY',
'PCF': 'Pacifist',
'PCH': 'PERSONAL CHOICE',
'PEC': 'PETITIONING CANDIDATE',
'PFP': 'PEACE & FREEDOM',
'PIP': 'PUERTO RICAN INDEPENDENCE (PR)',
'PJP': 'PEACE AND JUSTICE PARTY',
'PLC': 'PRO-LIFE CONSERVATIVE',
'PLP': 'PROGRESSIVE LABOR PARTY',
'PNP': 'NEW PROGRESSIVE (PUERTO RICO)',
'POP': 'POPULIST',
'PPD': 'POPULAR DEMOCRATIC (PUERTO RICO)',
'PPO': 'PEACE PARTY OF OREGON',
'PRG': 'PROGRESSIVE',
'PRO': 'PROHIBITION',
'PRT': 'PRESERVE OUR TOWN',
'PSL': 'SOCIALISM & LIBERATION',
'PTC': 'PROPERTY TAX CUT',
'PWF': 'PROTECT WORKING FAMILIES',
'RES': 'RESOURCE',
'RFC': 'RANDOLPH FOR CONGRESS',
'RJF': 'RESTORE JUSTICE-FREEDOM',
'RM': 'REFORM MINNESOTA',
'RP': 'REFORM',
'RPM': 'REPUBLICAN MODERATE',
'RTL': 'RIGHT TO LIFE',
'SCC': 'SCHOOL CHOICE',
'SCL': 'Socialism',
'SEP': 'SOCIALIST EQUALITY',
'SM': 'SAVE MEDICARE',
'SOC': 'SOCIALIST',
'SPU': 'SOCIALIST USA',
'Sta': 'STATEHOOD PARTY',
'SWP': 'SOCIALIST WORKERS',
'TBD': 'TO BE DETERMINED',
'TBL': 'THE BETTER LIFE',
'TCN': 'TAX CUTS NOW',
'TEA': 'TEA PARTY',
'TLM': 'TERM LIMITS',
'TS': 'TIMESIZING.COM PARTY',
'UCZ': 'UNITED CITIZENS',
'UNA': 'UNAFFILIATED',
'UNI': 'UNITED INDEPENDENT',
'UNP': 'UNITED PACIFISTS',
'UNR': 'UNENROLLED',
'UNT': 'UNITED PARTY',
'UST': 'U.S. TAXPAYERS',
'UTY': 'UNITY',
'VET': 'VETERANS PARTY OF AMERICA',
'WCP': 'WORKING CLASS',
'WF': 'WORKING FAMILIES',
'WLG': 'WORKER\'S LEAGUE',
'WRI': 'WRITE - IN',
'WSN': 'WEST SIDE NEIGHBORS',
'WTP': 'WE THE PEOPLE',
'WW': 'WORKERS WORLD',
'WYC': 'WYOMING COUNTRY PARTY',
'YES': 'YES'
}
| StarcoderdataPython |
6535052 |
import warnings
from pydiatra.checks import check_source
from .base import PythonTool, Issue, AccessIssue
class PyDiatraIssue(Issue):
tool = 'pydiatra'
class PyDiatraTool(PythonTool):
"""
pydiatra is yet another static checker for Python code.
"""
@classmethod
def get_all_codes(cls):
# Not currently a way to introspect all the codes pydiatra can return
return []
def execute(self, finder):
issues = []
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for filepath in finder.files(self.config['filters']):
try:
source = finder.read_file(filepath)
except EnvironmentError as exc:
issues.append(
AccessIssue(exc, filepath)
)
else:
for tag in check_source(str(filepath), source):
issues.append(self.make_issue(filepath, tag))
return [
issue
for issue in issues
if issue.code not in self.config['disabled']
]
def make_issue(self, filepath, tag):
return PyDiatraIssue(
tag.args[0],
' '.join(tag.args[1:]) or tag.args[0],
filepath,
tag.lineno,
)
| StarcoderdataPython |
193159 | #!/usr/bin/env python
__all__ = ['tumblr_download']
from ..common import *
import re
def tumblr_download(url, output_dir = '.', merge = True, info_only = False):
html = get_html(url)
html = parse.unquote(html).replace('\/', '/')
title = unescape_html(r1(r'<meta property="og:title" content="([^"]*)" />', html) or
r1(r'<meta property="og:description" content="([^"]*)" />', html) or
r1(r'<title>([^<\n]*)', html)).replace('\n', '')
real_url = r1(r'source src=\\x22([^\\]+)\\', html)
if not real_url:
real_url = r1(r'audio_file=([^&]+)&', html) + '?plead=please-dont-download-this-or-our-lawyers-wont-let-us-host-audio'
type, ext, size = url_info(real_url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir, merge = merge)
site_info = "Tumblr.com"
download = tumblr_download
download_playlist = playlist_not_supported('tumblr')
| StarcoderdataPython |
168725 | #------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#------------------------------------------------------------------------------
import unittest
from enthought.traits.api import Delegate, HasTraits, Instance, Str, Any
# global because event handlers are being called with wrong value for self
baz_s_handler_self = None
baz_sd_handler_self = None
foo_s_handler_self = None
foo_t_handler_self = None
class Foo( HasTraits ):
s = Str( 'foo' )
t = Str( 'foo.t')
def _s_changed(self, name, old, new):
print 'Foo._s_changed( %s, %s, %s, %s)' % (self, name, old, new)
global foo_s_handler_self
foo_s_handler_self = self
return
def _t_changed(self, name, old, new):
print 'Foo._t_changed( %s, %s, %s, %s)' % (self, name, old, new)
global foo_t_handler_self
foo_t_handler_self = self
return
class Bar( HasTraits ):
foo = Instance( Foo, () )
s = Delegate( 'foo' )
class BazModify( HasTraits ):
foo = Instance( Foo, () )
sd = Delegate( 'foo', prefix='s', modify=True )
t = Delegate( 'foo', modify=True )
def _s_changed(self, name, old, new):
# should never be called
print 'BazModify._s_changed( %s, %s, %s, %s)' % (self, name, old, new)
global baz_s_handler_self
baz_s_handler_self = self
return
def _sd_changed(self, name, old, new):
print 'BazModify._sd_changed( %s, %s, %s, %s)' % (self, name, old, new)
global baz_sd_handler_self
baz_sd_handler_self = self
return
def _t_changed(self, name, old, new):
print 'BazModify._t_changed( %s, %s, %s, %s)' % (self, name, old, new)
global baz_t_handler_self
baz_t_handler_self = self
return
class BazNoModify( HasTraits ):
foo = Instance( Foo, () )
sd = Delegate( 'foo', prefix='s' )
t = Delegate( 'foo' )
def _s_changed(self, name, old, new):
print 'BazNoModify._s_changed( %s, %s, %s, %s)' % (self, name, old, new)
global baz_s_handler_self
baz_s_handler_self = self
return
def _sd_changed(self, name, old, new):
print 'BazNoModify._sd_changed( %s, %s, %s, %s)' % (self, name, old, new)
global baz_sd_handler_self
baz_sd_handler_self = self
return
def _t_changed(self, name, old, new):
print 'BazNoModify._t_changed( %s, %s, %s, %s)' % (self, name, old, new)
global baz_t_handler_self
baz_t_handler_self = self
return
class DelegateTestCase( unittest.TestCase ):
""" Test cases for delegated traits. """
def test_reset(self):
""" Test that a delegated trait may be reset.
Deleting the attribute should reset the trait back to its initial
delegation behavior.
"""
f = Foo()
b = Bar(foo=f)
# Check initial delegation.
self.assertEqual( f.s, b.s )
# Check that an override works.
b.s = 'bar'
self.assertNotEqual( f.s, b.s )
# Check that we can reset back to delegation. This is what we are
# really testing for.
del b.s
self.assertEqual( f.s, b.s )
return
# Below are 8 tests to check the calling of change notification handlers.
# There are 8 cases for the 2x2x2 matrix with axes:
# Delegate with prefix or not
# Delegate with modify write through or not
# Handler in the delegator and delegatee
#
def test_modify_prefix_handler_on_delegator(self):
f = Foo()
b = BazModify(foo=f)
self.assertEqual( f.s, b.sd )
global baz_s_handler_self
global baz_sd_handler_self
baz_sd_handler_self = None
baz_s_handler_self = None
b.sd = 'changed'
self.assertEqual( f.s, b.sd )
# Don't expect _s_changed to be called because from Baz's perspective
# the triat is named 'sd'
self.assertEqual( baz_s_handler_self, None )
# Do expect '_sd_changed' to be called with b as self
self.assertEqual( baz_sd_handler_self, b )
return
def test_modify_prefix_handler_on_delegatee(self):
f = Foo()
b = BazModify(foo=f)
self.assertEqual( f.s, b.sd )
global foo_s_handler_self
foo_s_handler_self = None
b.sd = 'changed'
self.assertEqual( f.s, b.sd )
# Foo expects its '_s_changed' handler to be called with f as self
self.assertEqual( foo_s_handler_self, f )
return
def test_no_modify_prefix_handler_on_delegator(self):
f = Foo()
b = BazNoModify(foo=f)
self.assertEqual( f.s, b.sd )
global baz_s_handler_self
global baz_sd_handler_self
baz_sd_handler_self = None
baz_s_handler_self = None
b.sd = 'changed'
self.assertNotEqual( f.s, b.sd )
# Don't expect _s_changed to be called because from Baz's perspective
# the triat is named 'sd'
self.assertEqual( baz_s_handler_self, None )
# Do expect '_sd_changed' to be called with b as self
self.assertEqual( baz_sd_handler_self, b )
return
def test_no_modify_prefix_handler_on_delegatee_not_called(self):
f = Foo()
b = BazNoModify(foo=f)
self.assertEqual( f.s, b.sd )
global foo_s_handler_self
foo_s_handler_self = None
b.sd = 'changed'
self.assertNotEqual( f.s, b.sd )
# Foo expects its '_s_changed' handler to be called with f as self
self.assertEqual( foo_s_handler_self, None )
return
def test_modify_handler_on_delegator(self):
f = Foo()
b = BazModify(foo=f)
self.assertEqual( f.t, b.t )
global baz_t_handler_self
baz_t_handler_self = None
b.t = 'changed'
self.assertEqual( f.t, b.t )
# Do expect '_t_changed' to be called with b as self
self.assertEqual( baz_t_handler_self, b )
return
def test_modify_handler_on_delegatee(self):
f = Foo()
b = BazModify(foo=f)
self.assertEqual( f.t, b.t )
global foo_t_handler_self
foo_t_handler_self = None
b.t = 'changed'
self.assertEqual( f.t, b.t )
# Foo t did change so '_t_changed' handler should be called
self.assertEqual( foo_t_handler_self, f)
return
def test_no_modify_handler_on_delegator(self):
f = Foo()
b = BazNoModify(foo=f)
self.assertEqual( f.t, b.t )
global baz_t_handler_self
baz_t_handler_self = None
b.t = 'changed'
self.assertNotEqual( f.t, b.t )
# Do expect '_t_changed' to be called with b as self
self.assertEqual( baz_t_handler_self, b )
return
def test_no_modify_handler_on_delegatee_not_called(self):
f = Foo()
b = BazNoModify(foo=f)
self.assertEqual( f.t, b.t )
global foo_t_handler_self
foo_t_handler_self = None
b.t = 'changed'
self.assertNotEqual( f.t, b.t )
# Foo t did not change so '_t_changed' handler should not be called
self.assertEqual( foo_t_handler_self, None)
return
# Below are 4 tests for notification when the delegated trait is changed
# directly rather than through the delegator.
def test_no_modify_handler_on_delegatee_direct_change(self):
f = Foo()
b = BazNoModify(foo=f)
self.assertEqual( f.t, b.t )
global foo_t_handler_self
foo_t_handler_self = None
f.t = 'changed'
self.assertEqual( f.t, b.t )
# Foo t did change so '_t_changed' handler should be called
self.assertEqual( foo_t_handler_self, f)
return
def test_no_modify_handler_on_delegator_direct_change(self):
f = Foo()
b = BazNoModify(foo=f)
self.assertEqual( f.t, b.t )
global baz_t_handler_self
baz_t_handler_self = None
f.t = 'changed'
self.assertEqual( f.t, b.t )
# Do expect '_t_changed' to be called with b as self
self.assertEqual( baz_t_handler_self, b )
return
def test_modify_handler_on_delegatee_direct_change(self):
f = Foo()
b = BazModify(foo=f)
self.assertEqual( f.t, b.t )
global foo_t_handler_self
foo_t_handler_self = None
f.t = 'changed'
self.assertEqual( f.t, b.t )
# Foo t did change so '_t_changed' handler should be called
self.assertEqual( foo_t_handler_self, f)
return
def test_modify_handler_on_delegator_direct_change(self):
f = Foo()
b = BazModify(foo=f)
self.assertEqual( f.t, b.t )
global baz_t_handler_self
baz_t_handler_self = None
f.t = 'changed'
self.assertEqual( f.t, b.t )
# Do expect '_t_changed' to be called with b as self
self.assertEqual( baz_t_handler_self, b )
return
#### EOF ######################################################################
| StarcoderdataPython |
3473146 | <reponame>jonathan-winn-geo/cmatools
"""Tests for io_common module."""
import os
from unittest.mock import patch
import pytest
from cmatools.definitions import ROOT_DIR
from cmatools.io.io_common import check_access, return_datadir_root_dir
# extract_archive_singlefile,; return_datadir_inputs_dir,; write_source_config,
# from pathlib import Path
DEBUG = True
# TODO refactor
# def test_extract_archive_singlefile():
#
# filename = "eobs.tgz"
# extractedfile = extract_archive_singlefile(filename)
# file = Path(return_datadir_inputs_dir() / extractedfile)
# print("Test ouput")
# print(extractedfile)
# print(file)
#
# assert file.is_file()
#
#
# def test_write_source_config():
#
# archivename = 'arcfile'
# extractfilename = 'extfile'
#
# write_source_config(archivename,extractfilename)
def test_return_datadir_root_dir_repo_input():
"""Test datadir root value with arg: input."""
assert return_datadir_root_dir("repo") == ROOT_DIR
def test_return_datadir_root_dir_temp_input(tmp_path):
"""Test datadir root value with arg: custom path."""
root_dir = tmp_path / "fake_sub_dir"
root_dir.mkdir()
assert return_datadir_root_dir(root_dir) == root_dir
# Can't know value of home ~ , so use mock
# Mocked home dir will not be accessible, so also need to mock check_access()
@patch("cmatools.io.io_common.check_access")
def test_return_datadir_root_dir_home_input(function_mock, monkeypatch):
"""Test datadir root value with arg: home ~."""
monkeypatch.setattr(os.path, "expanduser", lambda home: "/home/name/datadir")
function_mock.return_value = True
assert return_datadir_root_dir("~") == "/home/name/datadir"
def test_return_datadir_root_dir_bad_inputs():
"""Test exception raised."""
with pytest.raises(Exception):
return_datadir_root_dir("epor")
def test_check_access_raises_exception():
"""Test exception raised."""
root_dir = "/home/name/not_a_subdir"
with pytest.raises(FileNotFoundError):
check_access(root_dir)
def test_check_access():
"""Test check access function."""
# Root repo dir should be accessible
assert check_access(ROOT_DIR)
# User home root dir should be accessible
root_dir = os.path.expanduser("~")
assert check_access(root_dir)
| StarcoderdataPython |
11210218 | import logging
import os
from typing import Tuple, Optional, List, Dict
import requests
from requests import Response
from swift_cloud_py.authentication.authentication import authenticate
from swift_cloud_py.common.errors import UnauthorizedException, BadRequestException, \
UnknownCloudException, SafetyViolation
from swift_cloud_py.authentication.check_internet_connection import ensure_has_internet
from swift_cloud_py.entities.control_output.fixed_time_schedule import FixedTimeSchedule
from swift_cloud_py.entities.control_output.phase_diagram import PhaseDiagram
from swift_cloud_py.entities.intersection.intersection import Intersection
from swift_cloud_py.entities.kpis.kpis import KPIs
from swift_cloud_py.entities.scenario.arrival_rates import ArrivalRates
from swift_cloud_py.entities.scenario.queue_lengths import QueueLengths
from swift_cloud_py.enums import ObjectiveEnum
# allows using a test version of the api hosted at a different url (for testing purposes).
from swift_cloud_py.validate_safety_restrictions.validate import validate_safety_restrictions
CLOUD_API_URL = os.environ.get("smc_api_url", "https://cloud-api.swiftmobility.eu")
CONNECTION_ERROR_MSG = "Connection with swift mobility cloud api could not be established"
HORIZON_LB_EXCEEDED_MSG = "horizon should exceed one hour"
def check_status_code(response: Response) -> None:
"""
check status code returned by rest-api call; raises appropriate error if status code indicates that the call was
not succesfull.
"""
if response.status_code in [400]:
raise BadRequestException(str(response.json()))
elif response.status_code in [401]:
raise UnauthorizedException("JWT validation failed: Missing or invalid credentials")
elif response.status_code in [402]:
raise UnauthorizedException("Insufficient credits (cpu seconds) left.")
elif response.status_code in [403]:
raise UnauthorizedException("Forbidden.")
elif response.status_code in [426]:
raise UnauthorizedException(f"The cloud api is still in the beta phase; this means it might change. "
f"Message from cloud: {response.json()['msg']}.")
elif response.status_code in [504]:
raise TimeoutError
elif response.status_code != 200:
raise UnknownCloudException(f"Unknown status code (={response.status_code}) returned")
class SwiftMobilityCloudApi:
"""
Class to communicate with the cloud-api of swift mobility (and automating authentication).
Using this class simplifies the communication with the cloud-api (compared to using the rest-api's directly)
"""
_authentication_token: str = None # this token is updated by the @authenticate decorator
@classmethod
def get_authentication_header(cls):
return {'authorization': 'Bearer {0:s}'.format(cls._authentication_token)}
@classmethod
@ensure_has_internet
@authenticate
def get_optimized_fts(cls, intersection: Intersection, arrival_rates: ArrivalRates,
horizon: float = 2.0,
min_period_duration: float = 0.0, max_period_duration: float = 180,
objective: ObjectiveEnum = ObjectiveEnum.min_delay,
initial_queue_lengths: Optional[QueueLengths] = None,
fixed_time_schedules_to_exclude: Optional[List[FixedTimeSchedule]] = None,
warm_start_info: Optional[Dict] = None,
) -> Tuple[FixedTimeSchedule, PhaseDiagram, float, dict]:
"""
Optimize a fixed-time schedule
:param intersection: intersection for which to optimize the fts (contains signal groups, conflicts and more)
:param arrival_rates: arrival rates; each arrival rate is specified in personal car equivalent per hour (PCE/h)
cyclists per hour or pedestrians per hour
:param horizon: time period of interest in hours.
:param min_period_duration: minimum period duration of the fixed-time schedule in seconds
:param max_period_duration: minimum period duration of the fixed-time schedule in seconds
:param objective: what kpi (key performance indicator) to optimize. The following options are available:
- ObjectiveEnum.min_delay: minimize the delay experienced by road users arriving at the intersection during
the next 'horizon' hours. The initially waiting traffic is modeled as implicitly by increasing the
arrival rate by initial_queue_length / horizon PCE/h; this implies that we assume that this traffic is arriving
(evenly spread) during the horizon.
- ObjectiveEnum.min_period: search for the fixed-time schedule with the smallest period duration for which
all traffic lights are 'stable', i.e., the greenyellow interval is large enough so that the amount of traffic
that can (on average) depart during the horizon exceeds the traffic that arrives during
the horizon (+ initially waiting traffic).
- ObjectiveEnum.max_capacity: search for the fixed-time schedule that can handle the largest (percentual)
increase in traffic (including the initial amount of traffic), i.e., the largest percentual increase in traffic
for which all traffic lights are 'stable' (see also ObjectiveEnum.min_period). This objective function
disregards the maximum saturation of each traffic light (we assume the maximum saturation is 1 for each
traffic light).
:param initial_queue_lengths: initial amount of traffic waiting at each of the traffic lights; if None, then we
assume no initial traffic. The unit of each queue-length should align with the unit used for the arrival rate;
if the arrival rate is specified in PCE/h then the queue-length needs to be specified in PCE.
:return: fixed-time schedule, associated phase diagram and the objective value
(minimized delay, minimized period, or maximum percentual increase in traffic divided by 100, e.g. 1 means
currently at the verge of stability)
:param fixed_time_schedules_to_exclude: the fixed-time schedules that we want to exclude; this can be used to find
the second best schedule by excluding the best one.
:param warm_start_info: each optimization returns some information
(usually in the format {"id": "some identification string"}); if you want to compute the second best schedule
(by excluding the best schedule), then you can also provide the warm_start_info returned with the best schedule;
this will significantly speedup computations when trying to find the second best one.
"""
assert horizon >= 1, HORIZON_LB_EXCEEDED_MSG
if initial_queue_lengths is None:
# assume no initial traffic
initial_queue_lengths = QueueLengths({signalgroup.id: [0] * len(signalgroup.traffic_lights)
for signalgroup in intersection.signalgroups})
check_all_arrival_rates_and_queue_lengths_specified(intersection=intersection, arrival_rates=arrival_rates,
initial_queue_lengths=initial_queue_lengths)
if fixed_time_schedules_to_exclude is not None:
for fixed_time_schedule in fixed_time_schedules_to_exclude:
try:
validate_safety_restrictions(intersection=intersection, fixed_time_schedule=fixed_time_schedule)
except SafetyViolation as e:
logging.error(f"One of the fixed-time schedules in fixed_time_schedules_to_exclude' does not"
f"satisfy all safety restrictions. The violation: {e}")
raise SafetyViolation(e)
endpoint = f"{CLOUD_API_URL}/fts-optimization"
headers = SwiftMobilityCloudApi.get_authentication_header()
# rest-api call
try:
# assume that the traffic that is initially present arrives during the horizon.
corrected_arrival_rates = arrival_rates + initial_queue_lengths / horizon
json_dict = dict(
intersection=intersection.to_json(),
arrival_rates=corrected_arrival_rates.to_json(),
min_period_duration=min_period_duration,
max_period_duration=max_period_duration,
objective=objective.value,
)
if fixed_time_schedules_to_exclude is not None:
json_dict["fts_to_exclude"] = [fts.to_json() for fts in fixed_time_schedules_to_exclude]
if warm_start_info is not None:
json_dict["warm_start_info"] = warm_start_info
logging.debug(f"calling endpoint {endpoint}")
r = requests.post(endpoint, json=json_dict, headers=headers)
logging.debug(f"finished calling endpoint {endpoint}")
except requests.exceptions.ConnectionError:
raise UnknownCloudException(CONNECTION_ERROR_MSG)
# check for errors
check_status_code(response=r)
# parse output
output = r.json()
objective_value = output["obj_value"]
fixed_time_schedule = FixedTimeSchedule.from_json(output["fixed_time_schedule"])
# check if safety restrictions are satisfied; raises a SafetyViolation-exception if this is not the case.
validate_safety_restrictions(intersection=intersection, fixed_time_schedule=fixed_time_schedule)
phase_diagram = PhaseDiagram.from_json(output["phase_diagram"])
warm_start_info = output.get("warm_start_info", dict())
return fixed_time_schedule, phase_diagram, objective_value, warm_start_info
@classmethod
@ensure_has_internet
@authenticate
def get_tuned_fts(cls, intersection: Intersection, arrival_rates: ArrivalRates,
fixed_time_schedule: FixedTimeSchedule, horizon: float = 2.0,
min_period_duration: float = 0.0, max_period_duration: float = 180,
objective: ObjectiveEnum = ObjectiveEnum.min_delay,
initial_queue_lengths: Optional[QueueLengths] = None) \
-> Tuple[FixedTimeSchedule, float]:
"""
Tune a fixed-time schedule; tune the greenyellow times to a new situation but keep the 'structure' of this
fixed-time schedule the same (the phase diagram remains the same).
:param intersection: intersection for which to optimize the fts (contains signal groups, conflicts and more)
:param arrival_rates: arrival rates; each arrival rate is specified in personal car equivalent per hour (PCE/h)
cyclists per hour or pedestrians per hour
:param fixed_time_schedule: fixed-time schedule to tune.
:param horizon: time period of interest in hours.
:param min_period_duration: minimum period duration of the fixed-time schedule in seconds
:param max_period_duration: minimum period duration of the fixed-time schedule in seconds
:param objective: what kpi (key performance indicator) to optimize. The following options are available:
- ObjectiveEnum.min_delay: minimize the delay experienced by road users arriving at the intersection during
the next 'horizon' hours. The initially waiting traffic is modeled as implicitly by increasing the
arrival rate by initial_queue_length / horizon PCE/h; this implies that we assume that this traffic is arriving
(evenly spread) during the horizon.
- ObjectiveEnum.min_period: search for the fixed-time schedule with the smallest period duration for which
all traffic lights are 'stable', i.e., the greenyellow interval is large enough so that the amount of traffic
that can (on average) depart during the horizon exceeds the traffic that arrives during
the horizon (+ initially waiting traffic).
- ObjectiveEnum.max_capacity: search for the fixed-time schedule that can handle the largest (percentual)
increase in traffic (including the initial amount of traffic), i.e., the largest percentual increase in traffic
for which all traffic lights are 'stable' (see also ObjectiveEnum.min_period).
:param initial_queue_lengths: initial amount of traffic waiting at each of the traffic lights; if None, then we
assume no initial traffic. The unit of each queue-length should align with the unit used for the arrival rate;
if the arrival rate is specified in PCE/h then the queue-length needs to be specified in PCE.
:return: fixed-time schedule, associated phase diagram and the objective value
(minimized delay, minimized period, or maximum percentual increase in traffic divided by 100, e.g. 1 means
currently at the verge of stability)
"""
assert horizon >= 1, HORIZON_LB_EXCEEDED_MSG
if initial_queue_lengths is None:
# assume no initial traffic
initial_queue_lengths = QueueLengths({signalgroup.id: [0] * len(signalgroup.traffic_lights)
for signalgroup in intersection.signalgroups})
check_all_arrival_rates_and_queue_lengths_specified(intersection=intersection, arrival_rates=arrival_rates,
initial_queue_lengths=initial_queue_lengths)
endpoint = f"{CLOUD_API_URL}/fts-tuning"
headers = SwiftMobilityCloudApi.get_authentication_header()
# rest-api call
try:
# assume that the traffic that is initially present arrives during the horizon.
corrected_arrival_rates = arrival_rates + initial_queue_lengths / horizon
json_dict = dict(
intersection=intersection.to_json(),
fixed_time_schedule=fixed_time_schedule.to_json(),
arrival_rates=corrected_arrival_rates.to_json(),
min_period_duration=min_period_duration,
max_period_duration=max_period_duration,
objective=objective.value
)
logging.debug(f"calling endpoint {endpoint}")
r = requests.post(endpoint, json=json_dict, headers=headers)
logging.debug(f"finished calling endpoint {endpoint}")
except requests.exceptions.ConnectionError:
raise UnknownCloudException(CONNECTION_ERROR_MSG)
# check for errors
check_status_code(response=r)
# parse output
output = r.json()
objective_value = output["obj_value"]
fixed_time_schedule = FixedTimeSchedule.from_json(output["fixed_time_schedule"])
# check if safety restrictions are satisfied; raises a SafetyViolation-exception if this is not the case.
validate_safety_restrictions(intersection=intersection, fixed_time_schedule=fixed_time_schedule)
return fixed_time_schedule, objective_value
@classmethod
@ensure_has_internet
@authenticate
def evaluate_fts(cls, intersection: Intersection, arrival_rates: ArrivalRates,
fixed_time_schedule: FixedTimeSchedule, horizon: float = 2.0,
initial_queue_lengths: Optional[QueueLengths] = None) -> KPIs:
"""
Evaluate a fixed-time schedule; returns KPIs (estimated delay experienced by road users and the capacity (
see also KPIs and the SwiftMobilityCloudApi.get_optimized_fts() method for their definition.
:param intersection: intersection for which to optimize the fts (contains signal groups, conflicts and more)
:param arrival_rates: arrival rates; each arrival rate is specified in personal car equivalent per hour (PCE/h)
cyclists per hour or pedestrians per hour
:param fixed_time_schedule:
:param initial_queue_lengths: initial amount of traffic waiting at each of the traffic lights; if None, then we
assume no initial traffic.
:param horizon: time period of interest in hours.
:return KPIs, which are the estimated
"""
assert horizon >= 1, HORIZON_LB_EXCEEDED_MSG
if initial_queue_lengths is None:
# assume no initial traffic
initial_queue_lengths = QueueLengths({signalgroup.id: [0] * len(signalgroup.traffic_lights)
for signalgroup in intersection.signalgroups})
check_all_arrival_rates_and_queue_lengths_specified(intersection=intersection, arrival_rates=arrival_rates,
initial_queue_lengths=initial_queue_lengths)
endpoint = f"{CLOUD_API_URL}/fts-evaluation"
headers = SwiftMobilityCloudApi.get_authentication_header()
# rest-api call
try:
# assume that the traffic that is initially present arrives during the horizon.
corrected_arrival_rates = arrival_rates + initial_queue_lengths / horizon
json_dict = dict(
intersection=intersection.to_json(),
arrival_rates=corrected_arrival_rates.to_json(),
fixed_time_schedule=fixed_time_schedule.to_json()
)
logging.debug(f"calling endpoint {endpoint}")
r = requests.post(endpoint, json=json_dict, headers=headers)
logging.debug(f"finished calling endpoint {endpoint}")
except requests.exceptions.ConnectionError:
raise UnknownCloudException(CONNECTION_ERROR_MSG)
# check for errors
check_status_code(response=r)
return KPIs.from_json(r.json())
@classmethod
@ensure_has_internet
@authenticate
def get_phase_diagram(cls, intersection: Intersection, fixed_time_schedule: FixedTimeSchedule) -> PhaseDiagram:
"""
Get the phase diagram specifying the order in which the signal groups have their greenyellow intervals
in the fixed-time schedule
:param intersection: intersection for which to optimize the fts (contains signal groups, conflicts and more)
:param fixed_time_schedule: fixed-time schedule for which we want to retrieve the phase diagram.
:return: the associated phase diagram
IMPORTANT: we try to start the greenyellow intervals of two signal groups that are subject to a synchronous
start or a greenyellow-lead in the same phase; however, if this is not possible for all such pairs, then we try to
satisfy it for as many such pairs as possible.
For example consider the following theoretical problem where we have three signal groups:
sg1, sg2, and sg3. sg1 conflicts with sg2 and sg3. sg2 has a greenyellow_lead(min=30, max=50) w.r.t. sg3.
The following schedule is feasible
greenyellow_intervals = {"sg1": [[0, 10], [40, 50]], "sg2": [[20, 30]], "sg3": [[60,70]]}
period=80
However, it is not possible to find a phase diagram where sg2 and sg3 start in the same phase; only the
following phase diagram is possible: [[["sg1", 0]], [["sg2", 0]], [["sg1", 1]], [["sg3", 0]]]
"""
endpoint = f"{CLOUD_API_URL}/phase-diagram-computation"
headers = SwiftMobilityCloudApi.get_authentication_header()
# rest-api call
try:
json_dict = dict(
intersection=intersection.to_json(),
greenyellow_intervals=fixed_time_schedule.to_json()["greenyellow_intervals"],
period=fixed_time_schedule.to_json()["period"]
)
logging.debug(f"calling endpoint {endpoint}")
r = requests.post(endpoint, json=json_dict, headers=headers)
logging.debug(f"finished calling endpoint {endpoint}")
except requests.exceptions.ConnectionError:
raise UnknownCloudException(CONNECTION_ERROR_MSG)
# check for errors
check_status_code(response=r)
output = r.json()
# parse output
phase_diagram = PhaseDiagram.from_json(output["phase_diagram"])
return phase_diagram
def check_all_arrival_rates_and_queue_lengths_specified(intersection: Intersection, arrival_rates: ArrivalRates,
initial_queue_lengths: QueueLengths):
"""
:param intersection: intersection for which to optimize the fts (contains signal groups, conflicts and more)
:param arrival_rates: arrival rates in personal car equivalent per hour (PCE/h)
:param initial_queue_lengths: initial amount of traffic waiting at each of the traffic lights; if None, then we
assume no initial traffic.
:raises AssertionError if an arrival rate or queue length is not specified for some traffic light(s).
"""
for signalgroup in intersection.signalgroups:
assert signalgroup.id in arrival_rates.id_to_arrival_rates, \
f"arrival rate(s) must be specified for signal group {signalgroup.id}"
assert len(arrival_rates.id_to_arrival_rates[signalgroup.id]) == len(signalgroup.traffic_lights), \
f"arrival rate(s) must be specified for all traffic lights of signal group {signalgroup.id}"
assert signalgroup.id in initial_queue_lengths.id_to_queue_lengths, \
f"initial_queue_lengths(s) must be specified for signal group {signalgroup.id}"
assert len(initial_queue_lengths.id_to_queue_lengths[signalgroup.id]) == len(signalgroup.traffic_lights), \
f"initial_queue_lengths(s) must be specified for all traffic lights of signalgroup {signalgroup.id}"
| StarcoderdataPython |
6618280 | <gh_stars>0
import re, json
from . import dump
from . import action
class Post:
def __init__(self, ses, data):
try:
if type(data) == str:
data = json.loads(data)
self.ses = ses
self.data = data["node"]
if self.data.get("id"):
self.id = self.data["id"]
else:
self.id = ""
except:
self.id = ""
def __repr__(self):
return self.id
def like(self):
self.action.like_post(ses, self.id)
def unlike(self):
self.action.unlike_post(ses, self.id)
class People:
def __init__(self, ses, data):
if type(data) == str:
data = json.loads(data)
self.ses = ses
self.data = data["node"]
self.username = self.data["username"]
self.id = self.data["id"]
self.name = self.data["full_name"]
self.profile_picture = self.data["profile_pic_url"]
def __repr__(self):
return self.username
def follow(self):
return action.follow_people(self.ses, self.username, idPeople = self.id)
def unfollow(self):
return action.unfollow_people(self.ses, self.username, idPeople = self.id)
def follower(self):
return dump.follower_people(self.ses, usernamePeople = self.username, idPeople = self.id)
def following(self):
return dump.following_people(self.ses, usernamePeople = self.username, idPeople = self.id)
def post(self):
return dump.post_people(self.ses, usernamePeople = self.username)
class Output:
def __init__(self, items = None, data = None, idPeople = None, next = None):
self.items = items
self.next = next.replace("==", "%3D") if next else None
self.data = data
self.idPeople = idPeople
self.isNext = bool(self.next)
def __repr__(self):
return "<total_items: {}, next: {}>".format(len(self.items), self.next)
| StarcoderdataPython |
12852500 | <filename>pyaccords/pysrc/ec2instanceinfo.py
##############################################################################
#copyright 2013, <NAME> (<EMAIL>) Prologue #
#Licensed under the Apache License, Version 2.0 (the "License"); #
#you may not use this file except in compliance with the License. #
#You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
#Unless required by applicable law or agreed to in writing, software #
#distributed under the License is distributed on an "AS IS" BASIS, #
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
#See the License for the specific language governing permissions and #
#limitations under the License. #
##############################################################################
import HTMLParser
data = '''
<table cellspacing="0" class="table table-bordered table-hover table-condensed" id="data">
<thead>
<tr>
<th class="name">Name</th>
<th class="memory">Memory</th>
<th class="computeunits">
<abbr title="One EC2 Compute Unit provides the equivalent CPU capacity of a 1.0-1.2 GHz 2007 Opteron or 2007 Xeon processor.">Compute Units</abbr>
</th>
<th class="storage">Storage</th>
<th class="architecture">Architecture</th>
<th class="ioperf">I/O Performance</th>
<th class="maxips">
<abbr title="Adding additional IPs requires launching the instance in a VPC.">Max IPs</abbr>
</th>
<th class="apiname">API Name</th>
<th class="cost">Linux cost</th>
<th class="cost">Windows cost</th>
</tr>
</thead>
<tbody>
<tr>
<td class="name">M1 Small</td>
<td class="memory"><span sort="1.7">1.70 GB</span></td>
<td class="computeunits"><span sort="1">1</span></td>
<td class="storage"><span sort="160">160 GB</span></td>
<td class="architecture">32/64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">8</td>
<td class="apiname">m1.small</td>
<td class="cost" hour_cost="0.060">$0.060 per hour</td>
<td class="cost" hour_cost="0.115">$0.115 per hour</td>
</tr>
<tr>
<td class="name">M1 Medium</td>
<td class="memory"><span sort="3.75">3.75 GB</span></td>
<td class="computeunits"><span sort="2">2</span></td>
<td class="storage"><span sort="410">410 GB</span></td>
<td class="architecture">32/64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">12</td>
<td class="apiname">m1.medium</td>
<td class="cost" hour_cost="0.12">$0.12 per hour</td>
<td class="cost" hour_cost="0.23">$0.23 per hour</td>
</tr>
<tr>
<td class="name">M1 Large</td>
<td class="memory"><span sort="7.5">7.50 GB</span></td>
<td class="computeunits"><span sort="4">4</span></td>
<td class="storage"><span sort="850">850 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="3">High</span></td>
<td class="maxips">30</td>
<td class="apiname">m1.large</td>
<td class="cost" hour_cost="0.24">$0.24 per hour</td>
<td class="cost" hour_cost="0.46">$0.46 per hour</td>
</tr>
<tr>
<td class="name">M1 Extra Large</td>
<td class="memory"><span sort="15">15.00 GB</span></td>
<td class="computeunits"><span sort="8">8</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="3">High</span></td>
<td class="maxips">60</td>
<td class="apiname">m1.xlarge</td>
<td class="cost" hour_cost="0.48">$0.48 per hour</td>
<td class="cost" hour_cost="0.92">$0.92 per hour</td>
</tr>
<tr>
<td class="name">Micro</td>
<td class="memory"><span sort="0.6">0.60 GB</span></td>
<td class="computeunits"><span sort="2">2</span></td>
<td class="storage"><span sort="0">0 GB</span></td>
<td class="architecture">32/64-bit</td>
<td class="ioperf"><span sort="0">Low</span></td>
<td class="maxips">1</td>
<td class="apiname">t1.micro</td>
<td class="cost" hour_cost="0.02">$0.02 per hour</td>
<td class="cost" hour_cost="0.02">$0.02 per hour</td>
</tr>
<tr>
<td class="name">High-Memory Extra Large</td>
<td class="memory"><span sort="17.10">17.10 GB</span></td>
<td class="computeunits"><span sort="6.5">6.5</span></td>
<td class="storage"><span sort="420">420 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">60</td>
<td class="apiname">m2.xlarge</td>
<td class="cost" hour_cost="0.41">$0.41 per hour</td>
<td class="cost" hour_cost="0.57">$0.57 per hour</td>
</tr>
<tr>
<td class="name">High-Memory Double Extra Large</td>
<td class="memory"><span sort="34.2">34.20 GB</span></td>
<td class="computeunits"><span sort="13">13</span></td>
<td class="storage"><span sort="850">850 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="2">High</span></td>
<td class="maxips">120</td>
<td class="apiname">m2.2xlarge</td>
<td class="cost" hour_cost="0.82">$0.82 per hour</td>
<td class="cost" hour_cost="1.14">$1.14 per hour</td>
</tr>
<tr>
<td class="name">High-Memory Quadruple Extra Large</td>
<td class="memory"><span sort="68.4">68.40 GB</span></td>
<td class="computeunits"><span sort="26">26</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="3">High</span></td>
<td class="maxips">240</td>
<td class="apiname">m2.4xlarge</td>
<td class="cost" hour_cost="1.64">$1.64 per hour</td>
<td class="cost" hour_cost="2.28">$2.28 per hour</td>
</tr>
<tr>
<td class="name">M3 Extra Large</td>
<td class="memory"><span sort="15">15.00 GB</span></td>
<td class="computeunits"><span sort="13">13</span></td>
<td class="storage"><span sort="0">0 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">60</td>
<td class="apiname">m3.xlarge</td>
<td class="cost" hour_cost="0.50">$0.50 per hour</td>
<td class="cost" hour_cost="0.98">$0.98 per hour</td>
</tr>
<tr>
<td class="name">M3 Double Extra Large</td>
<td class="memory"><span sort="30">30.00 GB</span></td>
<td class="computeunits"><span sort="26">26</span></td>
<td class="storage"><span sort="0">0 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="2">High</span></td>
<td class="maxips">120</td>
<td class="apiname">m3.2xlarge</td>
<td class="cost" hour_cost="1.00">$1.00 per hour</td>
<td class="cost" hour_cost="1.96">$1.96 per hour</td>
</tr>
<tr>
<td class="name">High-CPU Medium</td>
<td class="memory"><span sort="1.7">1.70 GB</span></td>
<td class="computeunits"><span sort="5">5</span></td>
<td class="storage"><span sort="350">350 GB</span></td>
<td class="architecture">32_64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">12</td>
<td class="apiname">c1.medium</td>
<td class="cost" hour_cost="0.145">$0.145 per hour</td>
<td class="cost" hour_cost="0.285">$0.285 per hour</td>
</tr>
<tr>
<td class="name">High-CPU Extra Large</td>
<td class="memory"><span sort="7">7.00 GB</span></td>
<td class="computeunits"><span sort="20">20</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="2">High</span></td>
<td class="maxips">60</td>
<td class="apiname">c1.xlarge</td>
<td class="cost" hour_cost="0.58">$0.58 per hour</td>
<td class="cost" hour_cost="1.14">$1.14 per hour</td>
</tr>
<tr>
<td class="name">Cluster Compute Quadruple Extra Large</td>
<td class="memory"><span sort="23">23.00 GB</span></td>
<td class="computeunits"><span sort="33.5">33.5</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">cc1.4xlarge</td>
<td class="cost" hour_cost="1.30">$1.30 per hour</td>
<td class="cost" hour_cost="1.61">$1.61 per hour</td>
</tr>
<tr>
<td class="name">Cluster Compute Eight Extra Large</td>
<td class="memory"><span sort="60.5">60.50 GB</span></td>
<td class="computeunits"><span sort="88">88</span></td>
<td class="storage"><span sort="3370">3370 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">240</td>
<td class="apiname">cc2.8xlarge</td>
<td class="cost" hour_cost="2.40">$2.40 per hour</td>
<td class="cost" hour_cost="2.97">$2.97 per hour</td>
</tr>
<tr>
<td class="name">Cluster GPU Quadruple Extra Large</td>
<td class="memory"><span sort="22">22.00 GB</span></td>
<td class="computeunits"><span sort="33.5">33.5</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">cg1.4xlarge</td>
<td class="cost" hour_cost="2.10">$2.10 per hour</td>
<td class="cost" hour_cost="2.60">$2.60 per hour</td>
</tr>
<tr>
<td class="name">High I/O Quadruple Extra Large</td>
<td class="memory"><span sort="60.5">60.50 GB</span></td>
<td class="computeunits"><span sort="35">35</span></td>
<td class="storage"><span sort="2048">2048 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">hi1.4xlarge</td>
<td class="cost" hour_cost="3.10">$3.10 per hour</td>
<td class="cost" hour_cost="3.58">$3.58 per hour</td>
</tr>
<tr>
<td class="name">High Storage Eight Extra Large</td>
<td class="memory"><span sort="117.00">117.00 GB</span></td>
<td class="computeunits"><span sort="35">35</span></td>
<td class="storage"><span sort="49152">48 TB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">hs1.8xlarge</td>
<td class="cost" hour_cost="4.600">$4.600 per hour</td>
<td class="cost" hour_cost="4.931">$4.931 per hour</td>
</tr>
<tr>
<td class="name">High Memory Cluster Eight Extra Large</td>
<td class="memory"><span sort="244.00">244.00 GB</span></td>
<td class="computeunits"><span sort="88">88</span></td>
<td class="storage"><span sort="240">240 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">cr1.8xlarge</td>
<td class="cost" hour_cost="3.500">$3.500 per hour</td>
<td class="cost" hour_cost="3.831">$3.831 per hour</td>
</tr>
</tbody>
</table> '''
class TableParser(HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.in_td = False
self.flavors = []
def handle_starttag(self, tag, attrs):
if tag == 'td':
self.in_td = True
def handle_data(self, data):
if self.in_td:
self.flavors.append(data)
def handle_endtag(self, tag):
self.in_td = False
| StarcoderdataPython |
5129436 | """Convolutional LSTM
针对Time × 240min x 58fac 量价数据封装类,
并参考GoogLeNet对inputs使用两层1D卷积核降维
猫狗大战 <EMAIL>
2017-11-30
"""
import numpy as np
import tensorflow as tf
# import sonnet as snt
from sonnet.python.modules.rnn_core import RNNCore as sntRNNCore
from sonnet.python.modules.conv import Conv2D as sntConv2D
from params import *
def _swich(inputs):
return inputs * tf.nn.sigmoid(inputs)
class Conv2DLSTM(sntRNNCore):
"""Conv2D-LSTM
针对处理240分钟线量价因子
"""
def __init__(self, name,
output_channels,
input_size):
"""
:param name: str, name for variable_scope
:param output_channels: int same as hidden_size
:param input_size: list, inputs image shape, [240, 58]
"""
super().__init__(name=name)
self.output_channels = output_channels
self.image_shape = self.get_filtered_shape(input_size)
def _build(self, inputs, prev_state):
"""
build rnn network
use tf.nn.dynamic_rnn and make sure time_major=True
:param inputs: [Batch, Rows, Columns] or [Batch, Height, Width]
:param prev_state: tuple (prev_hidden, prev_cell)
prev_hidden and prev_cell get from self.initial_state
:return: output, state
output: next_hidden [Batch, new_rows, new_cols, output_channels]
state = (next_hidden, next_cell)
next_cell: [Batch, new_rows, new_cols, output_channels]
"""
# 调整格式
prev_hidden, prev_cell = prev_state
# inputs = tf.expand_dims(inputs, axis=-1)
# 卷积部分
inputs_and_hidden = self.input_conv(inputs) + \
self.hidden_conv(prev_hidden) + \
self.inner_bias()
# 分割
i, f, c, o = tf.split(inputs_and_hidden,
num_or_size_splits=4, axis=-1)
input_gate = tf.sigmoid(
i + self.cell_product('input', prev_cell))
forget_gate = tf.sigmoid(
f + self.cell_product('forget', prev_cell))
cell = forget_gate * prev_cell + input_gate * tf.tanh(c)
output_gate = tf.sigmoid(
o + self.cell_product('output', cell))
hidden = output_gate * tf.tanh(cell)
return hidden, (hidden, cell)
# 获取卷积后输入图片流的shape
def get_filtered_shape(self, input_size):
rows, columns = input_size
new_rows = np.floor((rows - 7 + 6) / 5 + 1)
new_columns = np.floor((columns - 7 + 6) / 3 + 1)
return [new_rows, new_columns]
# Hadamard product for memory cell
def cell_product(self, name, cell):
with tf.variable_scope(name):
weights = tf.get_variable(
name=name,
shape=[1] + self.image_shape + [self.output_channels],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=.1),
regularizer=tf.contrib.layers.l2_regularizer(scale=.1))
return cell * weights # 广播
# Convolution operator for hidden states
def hidden_conv(self, hidden):
with tf.variable_scope('hidden_conv'):
initializers = {"w": tf.truncated_normal_initializer(stddev=.1)}
regularizers = {"w": tf.contrib.layers.l2_regularizer(scale=.1)}
conv2d_35 = sntConv2D(
output_channels=self.output_channels * 4,
kernel_shape=[3, 5],
stride=[1, 1],
initializers=initializers,
regularizers=regularizers,
use_bias=False,
name='conv35')
return conv2d_35(hidden)
# Convolution operator for inputs
def input_conv(self, inputs):
with tf.variable_scope('input_conv'):
initializers = {"w": tf.truncated_normal_initializer(stddev=.1)}
regularizers = {"w": tf.contrib.layers.l2_regularizer(scale=.1)}
conv2d_71 = sntConv2D(
output_channels=CONV_SHAPE,
kernel_shape=[7, 1],
stride=[5, 1],
initializers=initializers,
regularizers=regularizers,
use_bias=False,
name='conv71')
conv2d_17 = sntConv2D(
output_channels=CONV_SHAPE,
kernel_shape=[1, 7],
stride=[1, 3],
initializers=initializers,
regularizers=regularizers,
use_bias=False,
name='conv17')
conv2d_33 = sntConv2D(
output_channels=self.output_channels * 4,
kernel_shape=[3, 3],
stride=[1, 1],
initializers=initializers,
regularizers=regularizers,
use_bias=False,
name='conv33')
inputs = _swich(conv2d_17(inputs))
inputs = _swich(conv2d_71(inputs))
return conv2d_33(inputs)
# bias in four equation
def inner_bias(self):
"""gate 偏置 [1, rows, columns, output_channels*4]"""
return tf.get_variable(
name='bias',
shape=[1] + self.image_shape + [self.output_channels * 4],
dtype=tf.float32,
initializer=tf.constant_initializer(1.))
def get_regularizer(self):
"""正则化 —— 2范数"""
return self.get_variables(tf.GraphKeys.REGULARIZATION_LOSSES)
@property
def state_size(self):
"""Returns a description of the state size, without batch dimension."""
return (tf.TensorShape(self.image_shape + [self.output_channels]),
tf.TensorShape(self.image_shape + [self.output_channels]))
@property
def output_size(self):
"""Returns a description of the output size, without batch dimension."""
return tf.TensorShape(self.image_shape + [self.output_channels])
def initial_state(self, batch_size, dtype):
"""Returns an initial state with zeros, for a batch size and data type.
NOTE: This method is here only for illustrative purposes, the corresponding
method in its superclass should be already doing this.
"""
sz1, sz2 = self.state_size
# Prepend batch size to the state shape, and create zeros.
return (tf.zeros([batch_size] + sz1.as_list(), dtype=dtype),
tf.zeros([batch_size] + sz2.as_list(), dtype=dtype))
class StackedRNN(sntRNNCore):
def __init__(self,
output_channels,
input_size,
name='stack_rnn'):
super().__init__(name=name)
self.output_channels = output_channels
# Multi-RNNCell
self.R1 = Conv2DLSTM('R1', RNN_OUT_SHAPE, input_size)
self.R1_shape = self.R1.get_filtered_shape(input_size)
self.R2 = Conv2DLSTM('R2', output_channels, self.R1_shape)
self.R2_shape = self.R2.get_filtered_shape(self.R1_shape)
def _build(self, inputs, prev_state):
h1, s1, h2, s2 = prev_state
new_inputs, next_1 = self.R1(inputs, (h1, s1))
output, next_2 = self.R2(new_inputs, (h2, s2))
next_state = (next_1[0], next_1[1], next_2[0], next_2[1])
return output, next_state
@property
def state_size(self):
"""Returns a description of the state size, without batch dimension."""
return (tf.TensorShape(self.R1_shape + [RNN_OUT_SHAPE]),
tf.TensorShape(self.R1_shape + [RNN_OUT_SHAPE]),
tf.TensorShape(self.R2_shape + [self.output_channels]),
tf.TensorShape(self.R2_shape + [self.output_channels]))
@property
def output_size(self):
"""Returns a description of the output size, without batch dimension."""
return tf.TensorShape(self.R2_shape + [self.output_channels])
def initial_state(self, batch_size, dtype):
"""Returns an initial state with zeros, for a batch size and data type.
NOTE: This method is here only for illustrative purposes, the corresponding
method in its superclass should be already doing this.
"""
sz1, sz2, sz3, sz4 = self.state_size
# Prepend batch size to the state shape, and create zeros.
return (tf.zeros([batch_size] + sz1.as_list(), dtype=dtype),
tf.zeros([batch_size] + sz2.as_list(), dtype=dtype),
tf.zeros([batch_size] + sz3.as_list(), dtype=dtype),
tf.zeros([batch_size] + sz4.as_list(), dtype=dtype))
| StarcoderdataPython |
1685207 | <filename>tests/acceptance/test_invalid_schema_files.py
def test_checker_non_json_schemafile(run_line, tmp_path):
foo = tmp_path / "foo.json"
bar = tmp_path / "bar.json"
foo.write_text("{")
bar.write_text("{}")
res = run_line(["check-jsonschema", "--schemafile", str(foo), str(bar)])
assert res.exit_code == 1
assert "schemafile could not be parsed" in res.stderr
def test_checker_invalid_schemafile(run_line, tmp_path):
foo = tmp_path / "foo.json"
bar = tmp_path / "bar.json"
foo.write_text('{"title": {"foo": "bar"}}')
bar.write_text("{}")
res = run_line(["check-jsonschema", "--schemafile", str(foo), str(bar)])
assert res.exit_code == 1
assert "schemafile was not valid" in res.stderr
def test_checker_invalid_schemafile_scheme(run_line, tmp_path):
foo = tmp_path / "foo.json"
bar = tmp_path / "bar.json"
foo.write_text('{"title": "foo"}')
bar.write_text("{}")
res = run_line(["check-jsonschema", "--schemafile", f"ftp://{foo}", str(bar)])
assert res.exit_code == 1
assert "only supports http, https" in res.stderr
| StarcoderdataPython |
9685483 | <reponame>lol768/eu2019model<gh_stars>0
# -*- coding: utf-8 -*-
"""Top-level package for eu2019model."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '1.0.0'
| StarcoderdataPython |
5132445 | # The MIT License (MIT)
# Copyright (c) 2021 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from datetime import datetime
from dateutil.relativedelta import relativedelta
import bisect
import copy
import itertools
import json
import logging
import math
import time
import warnings
from abc import abstractmethod, ABCMeta
from collections.abc import MutableMapping
from numcodecs import Blosc
from typing import Iterator, Any, List, Dict, Tuple, Callable, Iterable, KeysView, Mapping, Union
import numpy as np
import pandas as pd
from .cciodp import CciOdp
from .constants import COMMON_COORD_VAR_NAMES
_STATIC_ARRAY_COMPRESSOR_PARAMS = dict(cname='zstd', clevel=1, shuffle=Blosc.SHUFFLE, blocksize=0)
_STATIC_ARRAY_COMPRESSOR_CONFIG = dict(id='blosc', **_STATIC_ARRAY_COMPRESSOR_PARAMS)
_STATIC_ARRAY_COMPRESSOR = Blosc(**_STATIC_ARRAY_COMPRESSOR_PARAMS)
_LOG = logging.getLogger()
_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S"
def _dict_to_bytes(d: Dict):
return _str_to_bytes(json.dumps(d, indent=2))
def _str_to_bytes(s: str):
return bytes(s, encoding='utf-8')
# todo move this to xcube
class RemoteChunkStore(MutableMapping, metaclass=ABCMeta):
"""
A remote Zarr Store.
:param data_id: The identifier of the data resource
:param cube_params: A mapping containing additional parameters to define
the data set.
:param observer: An optional callback function called when remote requests
are mode: observer(**kwargs).
:param trace_store_calls: Whether store calls shall be printed
(for debugging).
"""
def __init__(self,
data_id: str,
cube_params: Mapping[str, Any] = None,
observer: Callable = None,
trace_store_calls=False):
if not cube_params:
cube_params = {}
self._variable_names = cube_params.get('variable_names',
self.get_all_variable_names())
self._attrs = {}
self._observers = [observer] if observer is not None else []
self._trace_store_calls = trace_store_calls
self._dataset_name = data_id
self._time_ranges = self.get_time_ranges(data_id, cube_params)
logging.debug('Determined time ranges')
if not self._time_ranges:
raise ValueError('Could not determine any valid time stamps')
t_array = [s.to_pydatetime()
+ 0.5 * (e.to_pydatetime() - s.to_pydatetime())
for s, e in self._time_ranges]
t_array = np.array(t_array).astype('datetime64[s]').astype(np.int64)
t_bnds_array = \
np.array(self._time_ranges).astype('datetime64[s]').astype(np.int64)
time_coverage_start = self._time_ranges[0][0]
time_coverage_end = self._time_ranges[-1][1]
cube_params['time_range'] = (self._extract_time_range_as_strings(
cube_params.get('time_range',
self.get_default_time_range(data_id))))
self._vfs = {}
self._var_name_to_ranges = {}
self._ranges_to_indexes = {}
self._ranges_to_var_names = {}
bbox = cube_params.get('bbox', None)
lon_size = -1
lat_size = -1
self._dimension_chunk_offsets = {}
self._dimensions = self.get_dimensions()
coords_data = self.get_coords_data(data_id)
logging.debug('Determined coordinates')
coords_data['time'] = {}
coords_data['time']['size'] = len(t_array)
coords_data['time']['data'] = t_array
if 'time_bounds' in coords_data:
coords_data.pop('time_bounds')
coords_data['time_bnds'] = {}
coords_data['time']['size'] = len(t_bnds_array)
coords_data['time']['data'] = t_bnds_array
sorted_coords_names = list(coords_data.keys())
sorted_coords_names.sort()
lat_min_offset = -1
lat_max_offset = -1
lon_min_offset = -1
lon_max_offset = -1
for coord_name in sorted_coords_names:
if coord_name == 'time' or coord_name == 'time_bnds':
continue
coord_attrs = self.get_attrs(coord_name)
coord_attrs['_ARRAY_DIMENSIONS'] = coord_attrs['dimensions']
coord_data = coords_data[coord_name]['data']
if bbox is not None and \
(coord_name == 'lat' or coord_name == 'latitude'):
if coord_data[0] < coord_data[-1]:
lat_min_offset = bisect.bisect_left(coord_data, bbox[1])
lat_max_offset = bisect.bisect_right(coord_data, bbox[3])
else:
lat_min_offset = len(coord_data) - \
bisect.bisect_left(coord_data[::-1], bbox[3])
lat_max_offset = len(coord_data) - \
bisect.bisect_right(coord_data[::-1], bbox[1])
coords_data = self._adjust_coord_data(coord_name,
lat_min_offset,
lat_max_offset,
coords_data,
coord_attrs)
coord_data = coords_data[coord_name]['data']
elif bbox is not None and \
(coord_name == 'lon' or coord_name == 'longitude'):
lon_min_offset = bisect.bisect_left(coord_data, bbox[0])
lon_max_offset = bisect.bisect_right(coord_data, bbox[2])
coords_data = self._adjust_coord_data(coord_name,
lon_min_offset,
lon_max_offset,
coords_data,
coord_attrs)
coord_data = coords_data[coord_name]['data']
elif bbox is not None and \
(coord_name == 'latitude_bounds' or coord_name == 'lat_bounds'
or coord_name == 'latitude_bnds' or coord_name == 'lat_bnds'):
coords_data = self._adjust_coord_data(coord_name,
lat_min_offset,
lat_max_offset,
coords_data,
coord_attrs)
coord_data = coords_data[coord_name]['data']
elif bbox is not None and \
(coord_name == 'longitude_bounds' or coord_name == 'lon_bounds'
or coord_name == 'longitude_bnds' or coord_name == 'lon_bnds'):
coords_data = self._adjust_coord_data(coord_name,
lon_min_offset,
lon_max_offset,
coords_data,
coord_attrs)
coord_data = coords_data[coord_name]['data']
if len(coord_data) > 0:
coord_array = np.array(coord_data)
self._add_static_array(coord_name, coord_array, coord_attrs)
else:
shape = list(coords_data[coord_name].
get('shape', coords_data[coord_name].get('size')))
chunk_size = coords_data[coord_name]['chunkSize']
if not isinstance(chunk_size, List):
chunk_size = [chunk_size]
encoding = self.get_encoding(coord_name)
self._add_remote_array(coord_name, shape, chunk_size,
encoding, coord_attrs)
time_attrs = {
"_ARRAY_DIMENSIONS": ['time'],
"units": "seconds since 1970-01-01T00:00:00Z",
"calendar": "proleptic_gregorian",
"standard_name": "time",
"bounds": "time_bnds",
}
time_bnds_attrs = {
"_ARRAY_DIMENSIONS": ['time', 'bnds'],
"units": "seconds since 1970-01-01T00:00:00Z",
"calendar": "proleptic_gregorian",
"standard_name": "time_bnds",
}
self._add_static_array('time', t_array, time_attrs)
self._add_static_array('time_bnds', t_bnds_array, time_bnds_attrs)
coordinate_names = [coord for coord in coords_data.keys()
if coord not in COMMON_COORD_VAR_NAMES]
coordinate_names = ' '.join(coordinate_names)
global_attrs = dict(
Conventions='CF-1.7',
coordinates=coordinate_names,
title=data_id,
date_created=pd.Timestamp.now().isoformat(),
processing_level=self._dataset_name.split('.')[3],
time_coverage_start=time_coverage_start.isoformat(),
time_coverage_end=time_coverage_end.isoformat(),
time_coverage_duration=
(time_coverage_end - time_coverage_start).isoformat(),
)
self._time_indexes = {}
remove = []
self._num_data_var_chunks_not_in_vfs = 0
logging.debug('Adding variables to dataset ...')
for variable_name in self._variable_names:
if variable_name in coords_data or variable_name == 'time_bnds':
remove.append(variable_name)
continue
var_encoding = self.get_encoding(variable_name)
var_attrs = self.get_attrs(variable_name)
dimensions = var_attrs.get('dimensions', [])
self._maybe_adjust_attrs(lon_size, lat_size, var_attrs)
chunk_sizes = var_attrs.get('chunk_sizes', [-1] * len(dimensions))
if isinstance(chunk_sizes, int):
chunk_sizes = [chunk_sizes]
if len(dimensions) > 0 and 'time' not in dimensions:
dimensions.insert(0, 'time')
chunk_sizes.insert(0, 1)
var_attrs.update(_ARRAY_DIMENSIONS=dimensions)
sizes = []
self._time_indexes[variable_name] = -1
time_dimension = -1
for i, coord_name in enumerate(dimensions):
if coord_name in coords_data:
sizes.append(coords_data[coord_name]['size'])
else:
sizes.append(self._dimensions.get(coord_name))
if coord_name == 'time':
self._time_indexes[variable_name] = i
time_dimension = i
if chunk_sizes[i] == -1:
chunk_sizes[i] = sizes[i]
var_attrs['shape'] = sizes
var_attrs['size'] = math.prod(sizes)
if var_encoding.get('dtype', '') == 'bytes1024':
if 'grid_mapping_name' in var_attrs:
var_encoding['dtype'] = 'U'
elif len(dimensions) == 1 and sizes[0] < 512 * 512:
_LOG.info(f"Variable '{variable_name}' is encoded as "
f"string. Will convert it to metadata.")
variable = {variable_name: sizes[0]}
var_data = self.get_variable_data(data_id, variable)
global_attrs[variable_name] = \
[var.decode('utf-8')
for var in var_data[variable_name]['data']]
remove.append(variable_name)
continue
else:
warnings.warn(f"Variable '{variable_name}' is encoded as "
f"string. Will omit it from the dataset.")
remove.append(variable_name)
continue
chunk_sizes = self._adjust_chunk_sizes(chunk_sizes,
sizes,
time_dimension)
var_attrs['chunk_sizes'] = chunk_sizes
if len(var_attrs.get('file_dimensions', [])) < len(dimensions):
var_attrs['file_chunk_sizes'] = chunk_sizes[1:]
else:
var_attrs['file_chunk_sizes'] = chunk_sizes
self._add_remote_array(variable_name,
sizes,
chunk_sizes,
var_encoding,
var_attrs)
self._num_data_var_chunks_not_in_vfs += np.prod(chunk_sizes)
logging.debug(f"Added a total of {len(self._variable_names)} variables "
f"to the data set")
for r in remove:
self._variable_names.remove(r)
cube_params['variable_names'] = self._variable_names
global_attrs['history'] = [dict(
program=f'{self._class_name}',
cube_params=cube_params
)]
# setup Virtual File System (vfs)
self._vfs['.zgroup'] = _dict_to_bytes(dict(zarr_format=2))
self._vfs['.zattrs'] = _dict_to_bytes(global_attrs)
def _adjust_coord_data(self, coord_name: str, min_offset:int,
max_offset: int, coords_data, dim_attrs: dict):
self._dimension_chunk_offsets[coord_name] = min_offset
coord_data = coords_data[coord_name]['data'][min_offset:max_offset]
shape = coord_data.shape
self._set_chunk_sizes(dim_attrs, shape, 'chunk_sizes')
self._set_chunk_sizes(dim_attrs, shape, 'file_chunk_sizes')
dim_attrs['size'] = coord_data.size
if 'shape' in dim_attrs:
dim_attrs['shape'] = list(shape)
if len(shape) == 1:
self._dimensions[coord_name] = coord_data.size
coords_data[coord_name]['size'] = coord_data.size
coords_data[coord_name]['chunkSize'] = dim_attrs['chunk_sizes']
coords_data[coord_name]['data'] = coord_data
return coords_data
def _set_chunk_sizes(self, dim_attrs, shape, name):
chunk_sizes = dim_attrs.get(name, 1000000)
if isinstance(chunk_sizes, int):
dim_attrs[name] = min(chunk_sizes, shape[0])
else:
# chunk sizes is list of ints
for i, chunk_size in enumerate(chunk_sizes):
chunk_sizes[i] = min(chunk_size, shape[i])
dim_attrs[name] = chunk_sizes
@classmethod
def _maybe_adjust_attrs(cls, lon_size, lat_size, var_attrs):
cls._maybe_adjust_to('lat', 'latitude', lat_size, var_attrs)
cls._maybe_adjust_to('lon', 'longitude', lon_size, var_attrs)
@classmethod
def _maybe_adjust_to(cls, first_name, second_name, adjusted_size, var_attrs):
if adjusted_size == -1:
return
try:
index = var_attrs['dimensions'].index(first_name)
except ValueError:
try:
index = var_attrs['dimensions'].index(second_name)
except ValueError:
index = -1
if index > 0:
var_attrs['shape'][index] = adjusted_size
if 'chunk_sizes' in var_attrs:
var_attrs['chunk_sizes'][index] = \
min(var_attrs['chunk_sizes'][index], adjusted_size)
var_attrs['file_chunk_sizes'][index] = \
min(var_attrs['file_chunk_sizes'][index], adjusted_size)
@classmethod
def _adjust_chunk_sizes(cls, chunks, sizes, time_dimension):
# check if we can actually read in everything as one big chunk
sum_sizes = np.prod(sizes, dtype=np.int64)
if time_dimension >= 0:
sum_sizes = sum_sizes / sizes[time_dimension] * chunks[time_dimension]
if sum_sizes < 1000000:
best_chunks = sizes.copy()
best_chunks[time_dimension] = chunks[time_dimension]
return best_chunks
if sum_sizes < 1000000:
return sizes
# determine valid values for a chunk size. A value is valid if the size can be divided by it without remainder
valid_chunk_sizes = []
for i, chunk, size in zip(range(len(chunks)), chunks, sizes):
# do not rechunk time dimension
if i == time_dimension:
valid_chunk_sizes.append([chunk])
continue
# handle case that the size cannot be divided evenly by the chunk
if size % chunk > 0:
if np.prod(chunks, dtype=np.int64) / chunk * size < 1000000:
# if the size is small enough to be ingested in single chunk, take it
valid_chunk_sizes.append([size])
else:
# otherwise, give in to that we cannot chunk the data evenly
valid_chunk_sizes.append(list(range(chunk, size + 1, chunk)))
continue
valid_dim_chunk_sizes = []
for r in range(chunk, size + 1, chunk):
if size % r == 0:
valid_dim_chunk_sizes.append(r)
valid_chunk_sizes.append(valid_dim_chunk_sizes)
# recursively determine the chunking with the biggest size smaller than 1000000
chunks, chunk_size = cls._get_best_chunks(chunks, valid_chunk_sizes, chunks.copy(), 0, 0, time_dimension)
return chunks
@classmethod
def _get_best_chunks(cls, chunks, valid_sizes, best_chunks, best_chunk_size, index, time_dimension):
for valid_size in valid_sizes[index]:
test_chunks = chunks.copy()
test_chunks[index] = valid_size
if index < len(chunks) - 1:
test_chunks, test_chunk_size = \
cls._get_best_chunks(test_chunks, valid_sizes, best_chunks, best_chunk_size, index + 1,
time_dimension)
else:
test_chunk_size = np.prod(test_chunks, dtype=np.int64)
if test_chunk_size > 1000000:
break
if test_chunk_size > best_chunk_size:
best_chunk_size = test_chunk_size
best_chunks = test_chunks.copy()
elif test_chunk_size == best_chunk_size:
# in case two chunkings have the same size, choose the one where values are more similar
where = np.full(len(test_chunks), fill_value=True)
where[time_dimension] = False
test_min_chunk = np.max(test_chunks, initial=0, where=where)
best_min_chunk = np.max(best_chunks, initial=0, where=where)
if best_min_chunk > test_min_chunk:
best_chunk_size = test_chunk_size
best_chunks = test_chunks.copy()
return best_chunks, best_chunk_size
@classmethod
def _adjust_size(cls, size: int, tile_size: int) -> int:
if size > tile_size:
num_tiles = cls._safe_int_div(size, tile_size)
size = num_tiles * tile_size
return size
@classmethod
def _safe_int_div(cls, x: int, y: int) -> int:
return (x + y - 1) // y
@classmethod
def _extract_time_as_string(cls, time: Union[pd.Timestamp, str]) -> str:
if isinstance(time, str):
time = pd.to_datetime(time, utc=True)
return time.tz_localize(None).isoformat()
@classmethod
def _extract_time_range_as_strings(cls, time_range: Union[Tuple, List]) -> (str, str):
if isinstance(time_range, tuple):
time_start, time_end = time_range
else:
time_start = time_range[0]
time_end = time_range[1]
return cls._extract_time_as_string(time_start), cls._extract_time_as_string(time_end)
@abstractmethod
def get_time_ranges(self, cube_id: str, cube_params: Mapping[str, Any]) -> List[Tuple]:
pass
@abstractmethod
def get_default_time_range(self, ds_id: str) -> Tuple[str, str]:
return '', ''
@abstractmethod
def get_all_variable_names(self) -> List[str]:
pass
# def get_spatial_lat_res(self):
# return self._cube_config.spatial_res
# def get_spatial_lon_res(self):
# return self._cube_config.spatial_res
@abstractmethod
def get_dimensions(self) -> Mapping[str, int]:
pass
@abstractmethod
def get_coords_data(self, dataset_id: str) -> dict:
pass
@abstractmethod
def get_variable_data(self, dataset_id: str, variable_names: Dict[str, int]):
pass
def add_observer(self, observer: Callable):
"""
Add a request observer.
:param observer: A callback function called when remote requests are mode: observer(**kwargs).
"""
self._observers.append(observer)
@abstractmethod
def get_encoding(self, band_name: str) -> Dict[str, Any]:
"""
Get the encoding settings for band (variable) *band_name*.
Must at least contain "dtype" whose value is a numpy array-protocol type string.
Refer to https://docs.scipy.org/doc/numpy/reference/arrays.interface.html#arrays-interface
and zarr format 2 spec.
"""
@abstractmethod
def get_attrs(self, band_name: str) -> Dict[str, Any]:
"""
Get any metadata attributes for band (variable) *band_name*.
"""
def request_bbox(self, x_tile_index: int, y_tile_index: int) -> Tuple[float, float, float, float]:
x_index = x_tile_index * self._tile_width
y_index = y_tile_index * self._tile_height
x01, _, _, y02 = self.cube_config.geometry
spatial_lat_res = self.get_spatial_lat_res()
spatial_lon_res = self.get_spatial_lon_res()
x1 = x01 + spatial_lon_res * x_index
x2 = x01 + spatial_lon_res * (x_index + self._tile_width)
y1 = y02 - spatial_lat_res * (y_index + self._tile_height)
y2 = y02 - spatial_lat_res * y_index
return x1, y1, x2, y2
def request_time_range(self, time_index: int) -> Tuple[pd.Timestamp, pd.Timestamp]:
start_time, end_time = self._time_ranges[time_index]
return start_time, end_time
def _add_static_array(self, name: str, array: np.ndarray, attrs: Dict):
shape = list(map(int, array.shape))
dtype = str(array.dtype.str)
order = "C"
array_metadata = {
"zarr_format": 2,
"chunks": shape,
"shape": shape,
"dtype": dtype,
"fill_value": None,
"compressor": _STATIC_ARRAY_COMPRESSOR_CONFIG,
"filters": None,
"order": order,
}
chunk_key = '.'.join(['0'] * array.ndim)
self._vfs[name] = _str_to_bytes('')
self._vfs[name + '/.zarray'] = _dict_to_bytes(array_metadata)
self._vfs[name + '/.zattrs'] = _dict_to_bytes(attrs)
self._vfs[name + '/' + chunk_key] = \
_STATIC_ARRAY_COMPRESSOR.encode(array.tobytes(order=order))
def _add_remote_array(self,
name: str,
shape: List[int],
chunks: List[int],
encoding: Dict[str, Any],
attrs: Dict):
array_metadata = dict(zarr_format=2,
shape=shape,
chunks=chunks,
compressor=None,
fill_value=None,
filters=None,
order='C')
array_metadata.update(encoding)
self._vfs[name] = _str_to_bytes('')
self._vfs[name + '/.zarray'] = _dict_to_bytes(array_metadata)
self._vfs[name + '/.zattrs'] = _dict_to_bytes(attrs)
nums = np.array(shape) // np.array(chunks)
ranges = tuple(map(range, map(int, nums)))
self._var_name_to_ranges[name] = ranges
if ranges not in self._ranges_to_indexes:
self._ranges_to_indexes[ranges] = list(itertools.product(*ranges))
if ranges not in self._ranges_to_var_names:
self._ranges_to_var_names[ranges] = []
self._ranges_to_var_names[ranges].append(name)
def _fetch_chunk(self, key: str, var_name: str, chunk_index: Tuple[int, ...]) -> bytes:
request_time_range = self.request_time_range(chunk_index[self._time_indexes[var_name]])
t0 = time.perf_counter()
try:
exception = None
chunk_data = self.fetch_chunk(key, var_name, chunk_index, request_time_range)
except Exception as e:
exception = e
chunk_data = None
duration = time.perf_counter() - t0
for observer in self._observers:
observer(var_name=var_name,
chunk_index=chunk_index,
# bbox=request_bbox,
time_range=request_time_range,
duration=duration,
exception=exception)
if exception:
raise exception
return chunk_data
@abstractmethod
def fetch_chunk(self,
key: str,
var_name: str,
chunk_index: Tuple[int, ...],
time_range: Tuple[pd.Timestamp, pd.Timestamp]
) -> bytes:
"""
Fetch chunk data from remote.
:param key: The original chunk key being retrieved.
:param var_name: Variable name
:param chunk_index: chunk index
:param time_range: Requested time range
:return: chunk data as raw bytes
"""
pass
@property
def _class_name(self):
return self.__module__ + '.' + self.__class__.__name__
###############################################################################
# Zarr Store (MutableMapping) implementation
###############################################################################
def keys(self) -> KeysView[str]:
if self._trace_store_calls:
print(f'{self._class_name}.keys()')
self._build_missing_vfs_entries()
return self._vfs.keys()
def listdir(self, key: str) -> Iterable[str]:
if self._trace_store_calls:
print(f'{self._class_name}.listdir(key={key!r})')
if key == '':
return list((k for k in self._vfs.keys() if '/' not in k))
else:
prefix = key + '/'
start = len(prefix)
return list((k for k in self._vfs.keys() if k.startswith(prefix) and k.find('/', start) == -1))
def getsize(self, key: str) -> int:
if self._trace_store_calls:
print(f'{self._class_name}.getsize(key={key!r})')
return len(self._vfs[key]) + self._num_data_var_chunks_not_in_vfs
def __iter__(self) -> Iterator[str]:
if self._trace_store_calls:
print(f'{self._class_name}.__iter__()')
self._build_missing_vfs_entries()
return iter(self._vfs.keys())
def __len__(self) -> int:
if self._trace_store_calls:
print(f'{self._class_name}.__len__()')
return len(self._vfs.keys()) + self._num_data_var_chunks_not_in_vfs
def __contains__(self, key) -> bool:
if self._trace_store_calls:
print(f'{self._class_name}.__contains__(key={key!r})')
if key in self._vfs:
return True
self._try_building_vfs_entry(key)
return key in self._vfs
def __getitem__(self, key: str) -> bytes:
if self._trace_store_calls:
print(f'{self._class_name}.__getitem__(key={key!r})')
try:
value = self._vfs[key]
except KeyError:
self._try_building_vfs_entry(key)
value = self._vfs[key]
if isinstance(value, tuple):
return self._fetch_chunk(key, *value)
return value
def _try_building_vfs_entry(self, key):
if '/' in key:
name, chunk_index_part = key.split('/')
try:
chunk_indexes = \
tuple(int(chunk_index) for chunk_index in chunk_index_part.split('.'))
except ValueError:
# latter part of key does not consist of chunk indexes
return
# build vfs entry of this chunk index for all variables that have this range
ranges = self._var_name_to_ranges[name]
indexes = self._ranges_to_indexes[ranges]
if chunk_indexes in indexes:
for var_name in self._ranges_to_var_names[ranges]:
self._vfs[var_name + '/' + chunk_index_part] = var_name, chunk_indexes
self._num_data_var_chunks_not_in_vfs -= 1
indexes.remove(chunk_indexes)
def _build_missing_vfs_entries(self):
for name, ranges in self._var_name_to_ranges.items():
indexes = self._ranges_to_indexes[ranges]
for index in indexes:
filename = '.'.join(map(str, index))
self._vfs[name + '/' + filename] = name, index
def __setitem__(self, key: str, value: bytes) -> None:
if self._trace_store_calls:
print(f'{self._class_name}.__setitem__(key={key!r}, value={value!r})')
raise TypeError(f'{self._class_name} is read-only')
def __delitem__(self, key: str) -> None:
if self._trace_store_calls:
print(f'{self._class_name}.__delitem__(key={key!r})')
raise TypeError(f'{self._class_name} is read-only')
class CciChunkStore(RemoteChunkStore):
"""
A remote Zarr Store using the ESA CCI Open Data Portal as backend.
:param cci_odp: CCI ODP instance.
:param cube_config: Cube configuration.
:param observer: An optional callback function called when remote requests are mode: observer(**kwargs).
:param trace_store_calls: Whether store calls shall be printed (for debugging).
"""
_SAMPLE_TYPE_TO_DTYPE = {
# Note: Sentinel Hub currently only supports unsigned
# integer values therefore requesting INT8 or INT16
# will return the same as UINT8 or UINT16 respectively.
'uint8': '|u1',
'uint16': '<u2',
'uint32': '<u4',
'int8': '|u1',
'int16': '<u2',
'int32': '<u4',
'float32': '<f4',
'float64': '<f8',
}
def __init__(self,
cci_odp: CciOdp,
dataset_id: str,
cube_params: Mapping[str, Any] = None,
observer: Callable = None,
trace_store_calls=False):
self._cci_odp = cci_odp
if dataset_id not in self._cci_odp.dataset_names:
raise ValueError(f'Data ID {dataset_id} not provided by ODP.')
self._metadata = self._cci_odp.get_dataset_metadata(dataset_id)
super().__init__(dataset_id,
cube_params,
observer=observer,
trace_store_calls=trace_store_calls)
def _extract_time_range_as_datetime(self, time_range: Union[Tuple, List]) -> (datetime, datetime, str, str):
iso_start_time, iso_end_time = self._extract_time_range_as_strings(time_range)
start_time = datetime.strptime(iso_start_time, _TIMESTAMP_FORMAT)
end_time = datetime.strptime(iso_end_time, _TIMESTAMP_FORMAT)
return start_time, end_time, iso_start_time, iso_end_time
def get_time_ranges(self, dataset_id: str, cube_params: Mapping[str, Any]) -> List[Tuple]:
start_time, end_time, iso_start_time, iso_end_time = \
self._extract_time_range_as_datetime(
cube_params.get('time_range', self.get_default_time_range(dataset_id)))
time_period = dataset_id.split('.')[2]
if time_period == 'day':
start_time = datetime(year=start_time.year, month=start_time.month, day=start_time.day)
end_time = datetime(year=end_time.year, month=end_time.month, day=end_time.day,
hour=23, minute=59, second=59)
delta = relativedelta(days=1)
elif time_period == 'month' or time_period == 'mon':
start_time = datetime(year=start_time.year, month=start_time.month, day=1)
end_time = datetime(year=end_time.year, month=end_time.month, day=1)
delta = relativedelta(months=+1)
end_time += delta
elif time_period == 'year' or time_period == 'yr':
start_time = datetime(year=start_time.year, month=1, day=1)
end_time = datetime(year=end_time.year, month=12, day=31)
delta = relativedelta(years=1)
else:
end_time = end_time.replace(hour=23, minute=59, second=59)
end_time_str = datetime.strftime(end_time, _TIMESTAMP_FORMAT)
iso_end_time = self._extract_time_as_string(end_time_str)
request_time_ranges = self._cci_odp.get_time_ranges_from_data(dataset_id, iso_start_time, iso_end_time)
return request_time_ranges
request_time_ranges = []
this = start_time
while this < end_time:
next = this + delta
pd_this = pd.Timestamp(datetime.strftime(this, _TIMESTAMP_FORMAT))
pd_next = pd.Timestamp(datetime.strftime(next, _TIMESTAMP_FORMAT))
request_time_ranges.append((pd_this, pd_next))
this = next
return request_time_ranges
def get_default_time_range(self, ds_id: str):
temporal_start = self._metadata.get('temporal_coverage_start', None)
temporal_end = self._metadata.get('temporal_coverage_end', None)
if not temporal_start or not temporal_end:
time_ranges = self._cci_odp.get_time_ranges_from_data(ds_id, '1000-01-01', '3000-12-31')
if not temporal_start:
if len(time_ranges) == 0:
raise ValueError(
"Could not determine temporal start of dataset. Please use 'time_range' parameter.")
temporal_start = time_ranges[0][0]
if not temporal_end:
if len(time_ranges) == 0:
raise ValueError(
"Could not determine temporal end of dataset. Please use 'time_range' parameter.")
temporal_end = time_ranges[-1][1]
return (temporal_start, temporal_end)
def get_all_variable_names(self) -> List[str]:
return [variable['name'] for variable in self._metadata['variables']]
def get_dimensions(self) -> Mapping[str, int]:
return copy.copy(self._metadata['dimensions'])
def get_coords_data(self, dataset_id: str):
var_names, coord_names = self._cci_odp.var_and_coord_names(dataset_id)
coords_dict = {}
for coord_name in coord_names:
coords_dict[coord_name] = self.get_attrs(coord_name).get('size')
dimension_data = self.get_variable_data(dataset_id, coords_dict)
if len(dimension_data) == 0:
# no valid data found in indicated time range, let's set this broader
dimension_data = self._cci_odp.get_variable_data(dataset_id, coords_dict)
return dimension_data
def get_variable_data(self, dataset_id: str, variable_dict: Dict[str, int]):
return self._cci_odp.get_variable_data(dataset_id,
variable_dict,
self._time_ranges[0][0].strftime(_TIMESTAMP_FORMAT),
self._time_ranges[0][1].strftime(_TIMESTAMP_FORMAT))
def get_encoding(self, var_name: str) -> Dict[str, Any]:
encoding_dict = {}
encoding_dict['fill_value'] = self.get_attrs(var_name).get('fill_value')
encoding_dict['dtype'] = self.get_attrs(var_name).get('data_type')
return encoding_dict
def get_attrs(self, var_name: str) -> Dict[str, Any]:
if var_name not in self._attrs:
self._attrs[var_name] = copy.deepcopy(
self._metadata.get('variable_infos', {}).get(var_name, {}))
return self._attrs[var_name]
def fetch_chunk(self,
key: str,
var_name: str,
chunk_index: Tuple[int, ...],
time_range: Tuple[pd.Timestamp, pd.Timestamp]) -> bytes:
start_time, end_time = time_range
identifier = self._cci_odp.get_dataset_id(self._dataset_name)
iso_start_date = start_time.tz_localize(None).isoformat()
iso_end_date = end_time.tz_localize(None).isoformat()
dim_indexes = self._get_dimension_indexes_for_chunk(var_name, chunk_index)
request = dict(parentIdentifier=identifier,
varNames=[var_name],
startDate=iso_start_date,
endDate=iso_end_date,
drsId=self._dataset_name,
fileFormat='.nc'
)
data = self._cci_odp.get_data_chunk(request, dim_indexes)
if not data:
raise KeyError(f'{key}: cannot fetch chunk for variable {var_name!r} '
f'and time_range {time_range!r}.')
_LOG.info(f'Fetched chunk for ({chunk_index})"{var_name}"')
return data
def _get_dimension_indexes_for_chunk(self, var_name: str, chunk_index: Tuple[int, ...]) -> tuple:
dim_indexes = []
var_dimensions = self.get_attrs(var_name).get('file_dimensions', [])
chunk_sizes = self.get_attrs(var_name).get('file_chunk_sizes', [])
offset = 0
# dealing with the case that time has been added as additional first dimension
if len(chunk_index) > len(chunk_sizes):
offset = 1
for i, var_dimension in enumerate(var_dimensions):
if var_dimension == 'time':
dim_indexes.append(slice(None, None, None))
continue
dim_size = self._dimensions.get(var_dimension, -1)
if dim_size < 0:
raise ValueError(f'Could not determine size of dimension {var_dimension}')
data_offset = self._dimension_chunk_offsets.get(var_dimension, 0)
start = data_offset + chunk_index[i + offset] * chunk_sizes[i]
end = min(start + chunk_sizes[i], data_offset + dim_size)
dim_indexes.append(slice(start, end))
return tuple(dim_indexes)
| StarcoderdataPython |
6697831 | #!/usr/bin/env python3
import os
import sys
import re
import argparse
# Package use pattern
package_use_re = re.compile('(\w+)::([\*\w]+)')
# Include use pattern
include_use_re = re.compile('`include\s+["<]([\w/\.\d]+)[">]')
# Module instance pattern, assuming parentheses contents removed
module_instance_re = re.compile(r'''
(\w+)\s+ # module_identifier
(?:\#\s*\(\)\s*)? # optional parameters
(\w+)\s* # instance name
\(\)\s* # port connections
(?=;) # statement end, don't consume
''', re.DOTALL | re.VERBOSE)
# These can fail with weird comments (like nested), but should be good enough
comment_line_re = re.compile(r'//.*')
comment_block_re = re.compile(r'/\*.*?\*/', re.DOTALL)
# Match literal quoted strings
quote_re = re.compile(r'".*?"')
# Enforce space before "#" in modules
add_space_re = re.compile(r'#\s*\(')
def de_parentheses(text):
pstack = 0
bstack = 0
result = ""
last_close = 0
for i, c in enumerate(text):
if c == '(':
if not pstack and not bstack:
result += text[last_close:i+1]
pstack += 1
elif c == '[':
if not bstack and not pstack:
result += text[last_close:i]
bstack += 1
elif c == ')' and pstack:
last_close = i
pstack -= 1
elif c == ']' and bstack:
last_close = i+1
bstack -= 1
result += text[last_close:]
return result
keywords = [
'accept_on', 'alias', 'always', 'always_comb',
'always_ff', 'always_latch', 'and', 'assert', 'assign', 'assume',
'automatic', 'before', 'begin', 'bind', 'bins', 'binsof', 'bit',
'break', 'buf', 'bufif0', 'bufif1', 'byte', 'case', 'casex', 'casez',
'cell', 'chandle', 'checker', 'class', 'clocking', 'cmos', 'config',
'const', 'constraint', 'context', 'continue', 'cover', 'covergroup',
'coverpoint', 'cross', 'deassign', 'default', 'defparam', 'design',
'disable', 'dist', 'do', 'edge', 'else', 'end', 'endcase',
'endchecker', 'endclass', 'endclocking', 'endconfig', 'endfunction',
'endgenerate', 'endgroup', 'endinterface', 'endmodule', 'endpackage',
'endprimitive', 'endprogram', 'endproperty', 'endspecify',
'endsequence', 'endtable', 'endtask', 'enum', 'event', 'eventually',
'expect', 'export', 'extends', 'extern', 'final', 'first_match',
'for', 'force', 'foreach', 'forever', 'fork', 'forkjoin', 'function',
'generate', 'genvar', 'global', 'highz0', 'highz1', 'if', 'iff',
'ifnone', 'ignore_bins', 'illegal_bins', 'implements', 'implies',
'import', 'incdir', 'include', 'initial', 'inout', 'input', 'inside',
'instance', 'int', 'integer', 'interconnect', 'interface',
'intersect', 'join', 'join_any', 'join_none', 'large', 'let',
'liblist', 'library', 'local', 'localparam', 'logic', 'longint',
'macromodule', 'matches', 'medium', 'modport', 'module', 'nand',
'negedge', 'nettype', 'new', 'nexttime', 'nmos', 'nor',
'noshowcancelled', 'not', 'notif0', 'notif1', 'null', 'or', 'output',
'package', 'packed', 'parameter', 'pmos', 'posedge', 'primitive',
'priority', 'program', 'property', 'protected', 'pull0', 'pull1',
'pulldown', 'pullup', 'pulsestyle_ondetect', 'pulsestyle_onevent',
'pure', 'rand', 'randc', 'randcase', 'randsequence', 'rcmos', 'real',
'realtime', 'ref', 'reg', 'reject_on', 'release', 'repeat',
'restrict', 'return', 'rnmos', 'rpmos', 'rtran', 'rtranif0',
'rtranif1', 's_always', 's_eventually', 's_nexttime', 's_until',
's_until_with', 'scalared', 'sequence', 'shortint', 'shortreal',
'showcancelled', 'signed', 'small', 'soft', 'solve', 'specify',
'specparam', 'static', 'string', 'strong', 'strong0', 'strong1',
'struct', 'super', 'supply0', 'supply1', 'sync_accept_on',
'sync_reject_on', 'table', 'tagged', 'task', 'this', 'throughout',
'time', 'timeprecision', 'timeunit', 'tran', 'tranif0', 'tranif1',
'tri', 'tri0', 'tri1', 'triand', 'trior', 'trireg', 'type', 'typedef',
'union', 'unique', 'unique0', 'unsigned', 'until', 'until_with',
'untyped', 'use', 'uwire', 'var', 'vectored', 'virtual', 'void',
'wait', 'wait_order', 'wand', 'weak', 'weak0', 'weak1', 'while',
'wildcard', 'wire', 'with', 'within', 'wor', 'xnor', 'xor'
]
def find_deps(path, name, text, args):
''' Process module contents to determine a list of dependencies
path = repository relative path to file
name = module name
text = file contents
args = arg parser object, looking for args.debug'''
#print("Find deps args:", path, name, args)
includes = []
packages = []
instances = []
# Remove characters or comments
text = comment_line_re.sub('', text)
text = comment_block_re.sub('', text)
# Get includes
include_search = include_use_re.findall(text)
if include_search:
for include_path in include_search:
includes.append(os.path.basename(include_path))
# Remove quoted characters (must be preserved for includes)
text = quote_re.sub('', text)
# Get packages
package_search = package_use_re.findall(text)
if package_search:
for (pkg_name, fname) in package_search:
packages.append(pkg_name)
# Get instances -- clean up code for instance search first
clean_text = add_space_re.sub(' #(', text)
clean_text = de_parentheses(clean_text)
instance_search = module_instance_re.findall(clean_text)
if instance_search:
for (mod_name, inst_name) in instance_search:
if mod_name not in keywords:
instances.append(mod_name)
dep_set = {obj for obj in includes + packages + instances if obj != name}
return list(dep_set)
def main(args):
name = args.name
path = args.path
text = open(path, 'r').read()
deps = find_deps(path, name, text, args)
print(f"{name} dependencies:")
deps.sort()
for mod in deps:
print(f"\t{mod}")
if __name__ == '__main__':
argp = argparse.ArgumentParser(
description='Parse file and create list of dependencies')
argp.add_argument('path', help="path of module to analyze")
argp.add_argument('name', help="name of module to analyze")
argp.add_argument('-d', '--debug', action='store_true', help='print debug')
args = argp.parse_args()
main(args)
| StarcoderdataPython |
6507881 | import asyncio
import pathlib
import threading
import aiofiles
import pytest
import requests
from baguette.app import Baguette
from baguette.headers import Headers
from baguette.httpexceptions import NotImplemented
from baguette.middleware import Middleware
from baguette.rendering import render
from baguette.request import Request
from baguette.responses import (
EmptyResponse,
FileResponse,
HTMLResponse,
PlainTextResponse,
)
from baguette.router import Router
from baguette.testing import TestClient
from baguette.view import View
from .conftest import (
Receive,
Send,
create_http_scope,
create_test_request,
strip,
)
def test_create_app():
app = Baguette()
assert app.debug is False
assert isinstance(app.router, Router)
assert isinstance(app.default_headers, Headers)
assert app.error_response_type == "plain"
assert app.error_include_description is True
app = Baguette(
debug=True,
default_headers={"server": "baguette"},
error_response_type="json",
error_include_description=False,
)
assert app.debug is True
assert isinstance(app.router, Router)
assert isinstance(app.default_headers, Headers)
assert app.default_headers["server"] == "baguette"
assert app.error_response_type == "json"
assert app.error_include_description is True # True because debug=True
with pytest.raises(ValueError):
Baguette(error_response_type="nonexistent")
def setup1(app):
@app.route("/")
async def index(request):
return await request.body()
return index
def setup2(app):
async def home():
return "home"
app.add_route(path="/home", handler=home, methods=["GET"])
return home
def setup3(app):
@app.route(
"/profile/<user_id:int>",
name="profile",
defaults={"user_id": 0},
)
class ProfileView(View):
async def get(user_id: int):
return str(user_id)
async def delete(user_id: int):
return EmptyResponse()
return ProfileView
@pytest.mark.parametrize(
["setup", "expected_attributes"],
[
[
setup1,
{
"path": "/",
"name": "index",
"methods": ["GET", "HEAD"],
"defaults": {},
"handler_kwargs": ["request"],
"handler_is_class": False,
},
],
[
setup2,
{
"path": "/home",
"name": "home",
"methods": ["GET"],
"defaults": {},
"handler_kwargs": [],
"handler_is_class": False,
},
],
[
setup3,
{
"path": "/profile/<user_id:int>",
"name": "profile",
"methods": ["GET", "DELETE"],
"defaults": {"user_id": 0},
"handler_kwargs": ["request"],
"handler_is_class": True,
},
],
],
)
def test_app_route(setup, expected_attributes: dict):
app = Baguette()
handler = setup(app)
router = app.router.routes.pop()
while (not router.handler_is_class and router.handler != handler) or (
router.handler_is_class and not isinstance(router.handler, handler)
):
router = app.router.routes.pop()
for name, value in expected_attributes.items():
assert getattr(router, name) == value
@pytest.mark.asyncio
@pytest.mark.parametrize(
[
"path",
"method",
"body",
"expected_response_body",
"expected_response_status_code",
],
[
["/", "GET", "Hello, World!", "Hello, World!", 200],
["/", "POST", "", "Method Not Allowed", 405],
["/user/1", "GET", "", "1", 200],
["/user/-1", "GET", "", "Bad Request", 400],
["/user/text", "GET", "", "Not Found", 404],
["/notimplemented", "GET", "", "Not Implemented", 501],
["/error", "GET", "", "Internal Server Error", 500],
["/nonexistent", "GET", "", "Not Found", 404],
],
)
async def test_app_handle_request(
path: str,
method: str,
body: str,
expected_response_body: str,
expected_response_status_code: int,
):
app = Baguette(error_include_description=False)
@app.route("/")
async def index(request):
return await request.body()
@app.route("/user/<user_id:int>")
async def user(user_id: int):
return str(user_id)
@app.route("/notimplemented")
async def notimplemented():
raise NotImplemented() # noqa: F901
@app.route("/error")
async def error():
raise Exception()
request = create_test_request(path=path, method=method, body=body)
response = await app.handle_request(request)
assert response.body == expected_response_body
assert response.status_code == expected_response_status_code
@pytest.mark.asyncio
@pytest.mark.parametrize(
["scope", "receive", "expected_sent_values"],
[
[
create_http_scope(),
Receive(
[
{
"type": "http.request.body",
"body": b"Hello, ",
"more_body": True,
},
{
"type": "http.request.body",
"body": b"World!",
},
]
),
[
{
"type": "http.response.start",
"status": 200,
"headers": [
[b"content-type", b"text/plain; charset=utf-8"]
],
},
{
"type": "http.response.body",
"body": b"Hello, World!",
},
],
],
[
{"type": "lifespan"},
Receive(
[
{"type": "lifespan.startup"},
{"type": "lifespan.shutdown"},
]
),
[
{"type": "lifespan.startup.complete"},
{"type": "lifespan.shutdown.complete"},
],
],
],
)
async def test_app_call(scope, receive: Receive, expected_sent_values: list):
app = Baguette()
@app.route("/")
async def index(request):
return PlainTextResponse(await request.body())
send = Send()
await app(scope, receive, send)
assert len(expected_sent_values) == len(send.values)
for message in send.values:
assert message == expected_sent_values.pop(0)
@pytest.mark.asyncio
async def test_app_call_lifespan_error():
class BadLifespanApp(Baguette):
async def startup(self):
raise Exception()
async def shutdown(self):
raise Exception()
app = BadLifespanApp()
scope = {"type": "lifespan"}
receive = Receive(
[
{"type": "lifespan.startup"},
{"type": "lifespan.shutdown"},
]
)
expected_sent_values = [
{"type": "lifespan.startup.failed", "message": ""},
{"type": "lifespan.shutdown.failed", "message": ""},
]
send = Send()
await app(scope, receive, send)
assert len(expected_sent_values) == len(send.values)
for message in send.values:
assert message == expected_sent_values.pop(0)
@pytest.mark.asyncio
async def test_app_call_error():
app = Baguette()
scope = {"type": "nonexistent"}
receive = Receive()
send = Send()
with pytest.raises(NotImplementedError):
await app(scope, receive, send)
@pytest.mark.asyncio
@pytest.mark.parametrize(
["path", "file_path", "mimetype"],
[
["/static/banner.png", "tests/static/banner.png", "image/png"],
["/static/css/style.css", "tests/static/css/style.css", "text/css"],
[
"/static/js/script.js",
"tests/static/js/script.js",
"application/javascript",
],
],
)
async def test_app_static(path, file_path, mimetype):
app = TestClient(Baguette(static_directory="tests/static"))
response: FileResponse = await app.get(path)
path = pathlib.Path(file_path).resolve(strict=True)
async with aiofiles.open(path, "rb") as f:
content_length = len(await f.read())
assert isinstance(response, FileResponse)
assert response.file_path == path
assert response.mimetype == mimetype
assert response.headers["content-type"] == mimetype
assert response.file_size == content_length
assert int(response.headers["content-length"]) == content_length
expected_html = """
<!DOCTYPE html>
<html lang="en">
<head>
<link rel="stylesheet" href="/static/css/style.css" />
<title>Index - My Webpage</title>
<style type="text/css">
.important {
color: #336699;
}
</style>
</head>
<body>
<div id="content">
<h1>Index</h1>
<p class="important">Welcome to my awesome homepage.</p>
<p>1st paragraph</p>
<p>2nd paragraph</p>
</div>
<div id="footer">
© Copyright 2021 by <a href="https://example.com/">me</a>.
</div>
</body>
</html>"""
@pytest.mark.asyncio
async def test_app_render():
app = Baguette(templates_directory="tests/templates")
@app.route("/template")
async def template():
return await render(
"index.html",
paragraphs=["1st paragraph", "2nd paragraph"],
)
app = TestClient(app)
response: HTMLResponse = await app.get("/template")
assert strip(response.body) == strip(expected_html)
import time
class TimingMiddleware(Middleware):
async def __call__(self, request: Request):
start_time = time.perf_counter()
response = await self.next(request)
response.headers["X-time"] = str(time.perf_counter() - start_time)
return response
@pytest.mark.asyncio
async def test_app_create_middleware():
app = Baguette(middlewares=[TimingMiddleware])
@app.route("/short")
async def short():
return ""
@app.route("/long")
async def long():
await asyncio.sleep(0.2)
return ""
assert len(app.middlewares) == 3
request = create_test_request(path="/short")
response = await app.handle_request(request)
assert "X-time" in response.headers
short_time = float(response.headers["X-time"])
request = create_test_request(path="/long")
response = await app.handle_request(request)
assert "X-time" in response.headers
long_time = float(response.headers["X-time"])
assert short_time < long_time
@pytest.mark.asyncio
async def test_app_add_remove_middleware():
app = Baguette()
@app.route("/short")
async def short():
return ""
@app.route("/long")
async def long():
await asyncio.sleep(0.2)
return ""
assert len(app.middlewares) == 2
app.add_middleware(TimingMiddleware)
assert len(app.middlewares) == 3
request = create_test_request(path="/short")
response = await app.handle_request(request)
assert "X-time" in response.headers
short_time = float(response.headers["X-time"])
request = create_test_request(path="/long")
response = await app.handle_request(request)
assert "X-time" in response.headers
long_time = float(response.headers["X-time"])
assert short_time < long_time
app.remove_middleware(TimingMiddleware)
assert len(app.middlewares) == 2
@pytest.mark.asyncio
async def test_app_middleware():
app = Baguette()
@app.route("/short")
async def short():
return ""
@app.route("/long")
async def long():
await asyncio.sleep(0.2)
return ""
assert len(app.middlewares) == 2
@app.middleware()
class TimingMiddleware(Middleware):
async def __call__(self, request: Request):
start_time = time.perf_counter()
response = await self.next(request)
response.headers["X-time"] = str(time.perf_counter() - start_time)
return response
assert len(app.middlewares) == 3
@app.middleware()
async def timing_middleware(next_middleware, request: Request):
start_time = time.time()
response = await next_middleware(request)
process_time = time.time() - start_time
response.headers["X-time"] = str(process_time)
return response
assert len(app.middlewares) == 4
request = create_test_request(path="/short")
response = await app.handle_request(request)
assert "X-time" in response.headers
short_time = float(response.headers["X-time"])
request = create_test_request(path="/long")
response = await app.handle_request(request)
assert "X-time" in response.headers
long_time = float(response.headers["X-time"])
assert short_time < long_time
app.remove_middleware(timing_middleware)
app.remove_middleware(TimingMiddleware)
assert len(app.middlewares) == 2
def test_app_run():
app = Baguette()
running = threading.Event()
async def set_running():
running.set()
def do_request():
if not running.wait(10.0):
raise TimeoutError("App hasn't started after 10s")
requests.get("http://127.0.0.1:8080")
request_thread = threading.Thread(target=do_request)
request_thread.start()
app.run(
host="127.0.0.1",
port=8080,
debug=True,
limit_max_requests=1, # end after one request
callback_notify=set_running,
timeout_notify=1, # set the running event every second
)
request_thread.join()
| StarcoderdataPython |
1904623 | import grpc
import time
import echo_pb2
import echo_pb2_grpc
import utils
from concurrent import futures
from datetime import datetime
class Server(object):
def __init__(self, port=5000):
self.port = port
self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
echo_pb2_grpc.add_EchoServicer_to_server(ServerServiceExample(), self.server)
def start(self):
self.server.add_insecure_port('[::]:{}'.format(self.port))
self.server.start()
print('Starting server. Listening on port {}'.format(self.port))
def stop(self):
self.server.stop(0)
def run(self):
try:
while True:
time.sleep(4)
break
except KeyboardInterrupt:
self.stop()
class ServerServiceExample(echo_pb2_grpc.EchoServicer):
def __init__(self):
self.username = "Echo"
def SendEcho(self, request, context):
print("{} S <- C: [{}] {}".format(datetime.fromtimestamp(time.time()), request.username, request.content))
response = echo_pb2.Data()
response.username = self.username
response.content = utils.UpperString(request.content)
print("{} S -> C: [{}] {}".format(datetime.fromtimestamp(time.time()), response.username, response.content))
return response
if __name__ == "__main__":
server = Server()
server.start()
server.run()
| StarcoderdataPython |
12813514 | <reponame>brandon-rhodes/pycon2010-mighty-dictionary
import _dictdraw, sys
d = {'ftp': 21, 'ssh': 22, 'smtp': 25, 'time': 37, 'www': 80}
surface = _dictdraw.draw_dictionary(d, [4])
surface.write_to_png(sys.argv[1])
| StarcoderdataPython |
9710245 | <reponame>tuomas777/tunnistamo<filename>oidc_apis/scopes.py
from django.utils.translation import ugettext_lazy as _
from oidc_provider.lib.claims import ScopeClaims, StandardScopeClaims
from .models import ApiScope
from .utils import combine_uniquely
class ApiScopeClaims(ScopeClaims):
@classmethod
def get_scopes_info(cls, scopes=[]):
scopes_by_identifier = {
api_scope.identifier: api_scope
for api_scope in ApiScope.objects.by_identifiers(scopes)
}
api_scopes = (scopes_by_identifier.get(scope) for scope in scopes)
return [
{
'scope': api_scope.identifier,
'name': api_scope.name,
'description': api_scope.description,
}
for api_scope in api_scopes if api_scope
]
class GithubUsernameScopeClaims(ScopeClaims):
info_github_username = (
_("GitHub username"), _("Access to your GitHub username."))
def scope_github_username(self):
social_accounts = self.user.socialaccount_set
github_account = social_accounts.filter(provider='github').first()
if not github_account:
return {}
github_data = github_account.extra_data
return {
'github_username': github_data.get('login'),
}
class CombinedScopeClaims(ScopeClaims):
combined_scope_claims = [
StandardScopeClaims,
GithubUsernameScopeClaims,
ApiScopeClaims,
]
@classmethod
def get_scopes_info(cls, scopes=[]):
extended_scopes = cls._extend_scope(scopes)
scopes_info_map = {}
for claim_cls in cls.combined_scope_claims:
for info in claim_cls.get_scopes_info(extended_scopes):
scopes_info_map[info['scope']] = info
return [
scopes_info_map[scope]
for scope in extended_scopes
if scope in scopes_info_map
]
@classmethod
def _extend_scope(cls, scopes):
required_scopes = cls._get_all_required_scopes_by_api_scopes(scopes)
extended_scopes = combine_uniquely(scopes, sorted(required_scopes))
return extended_scopes
@classmethod
def _get_all_required_scopes_by_api_scopes(cls, scopes):
api_scopes = ApiScope.objects.by_identifiers(scopes)
apis = {x.api for x in api_scopes}
return set(sum((list(api.required_scopes) for api in apis), []))
def create_response_dic(self):
result = super(CombinedScopeClaims, self).create_response_dic()
token = FakeToken.from_claims(self)
for claim_cls in self.combined_scope_claims:
claim = claim_cls(token)
result.update(claim.create_response_dic())
return result
class FakeToken(object):
"""
Object that adapts a token.
ScopeClaims constructor needs a token, but really uses just its
user, scope and client attributes. This adapter makes it possible
to create a token like object from those three attributes or from a
claims object (which doesn't store the token) allowing it to be
passed to a ScopeClaims constructor.
"""
def __init__(self, user, scope, client):
self.user = user
self.scope = scope
self.client = client
@classmethod
def from_claims(cls, claims):
return cls(claims.user, claims.scopes, claims.client)
def get_userinfo_by_scopes(user, scopes, client=None):
token = FakeToken(user, scopes, client)
return _get_userinfo_by_token(token)
def _get_userinfo_by_token(token):
return CombinedScopeClaims(token).create_response_dic()
| StarcoderdataPython |
6535242 | # Copyright (c) 2021 tapiocode
# https://github.com/tapiocode
# MIT License
import micropython
from led_array import led_array
from machine import Timer
micropython.alloc_emergency_exception_buf(100)
sequences = [
'sweeper',
'linear',
[4, 5, 3, 6, 2, 7, 1, 8, 0, 9, -1, -1, -1, -1, -1, -1], # Burst out from center
[-1, 3, 6, -1, 0, 2, 1, -1, 7, -1, 9, -1, 5, 4, -1, 8] # Random
]
curr_sequence_index = 0
print('Starting')
def goto_next_sequence():
global led_demo, sequences, curr_sequence_index
# Loop through the array indices
next = sequences[curr_sequence_index % len(sequences)]
curr_sequence_index += 1
print('Sequence: ', next)
led_demo.set_led_sequence(next)
# Set up a periodic timer to trigger change
tim = Timer(
period = 4000,
mode = Timer.PERIODIC,
callback = lambda t:goto_next_sequence()
)
led_demo = led_array([16, 17, 18, 19, 20, 21, 22, 26, 27, 28], 'sweeper')
# Script never exits after calling run()
led_demo.run()
| StarcoderdataPython |
6665183 | import io
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import torchvision.models as models
from PIL import Image
from flask import Flask, request, send_from_directory
from flask_cors import CORS
from functools import reduce
with open('details.json') as f:
details = json.load(f)
train_mean = details['train_mean']
train_std = details['train_std']
imsize = details['imsize']
categories = details['class_names']
model_accuracy = details['accuracy']
CAT_OTHER = 'other'
IDX_OTHER = categories.index(CAT_OTHER)
CATEGORIES_NO_OTHER = [cat for cat in categories if cat != CAT_OTHER]
# Source: https://pytorch.org/tutorials/intermediate/flask_rest_api_tutorial.html
app = Flask(__name__, static_folder='frontend/build', static_url_path='')
cors = CORS(app)
model = models.resnet18(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, len(categories))
model.load_state_dict(torch.load('model', map_location=torch.device('cpu'))) # force CPU-only deserialization
model.eval()
def transform_image(image_bytes):
my_transforms = transforms.Compose([
transforms.Resize(imsize),
transforms.ToTensor(),
transforms.Normalize(
mean=train_mean,
std=train_std
)
])
img = Image.open(io.BytesIO(image_bytes)).convert('RGB')
return my_transforms(img).unsqueeze(0)
def get_prediction(image_bytes):
tensor = transform_image(image_bytes=image_bytes)
logits = model.forward(tensor)
probas = F.softmax(logits, dim=-1)
return probas
def generate_response(p):
"""[Generate a response to be returned by the API]
Args:
prediction ([torch.Tensor]): [model's prediction]
Returns:
[dict]: [response]
"""
_, y_hat = p.max(1)
predicted_idx = y_hat.item()
predicted_cat = categories[predicted_idx]
response = {}
if predicted_cat == CAT_OTHER:
response['fruit_visible'] = False
else:
response['fruit_visible'] = True
# Calculate probability distribution ignoring 'other'
new_p = p.flatten().tolist()
del new_p[IDX_OTHER]
# Scale so probabilities sum up to 1
scale = 1 / sum(new_p)
new_p = [scale * x for x in new_p]
# Store category with highest probability
response['result'] = {
'category': predicted_cat,
'probability': max(new_p)
}
# Store summary of all categories and associated probabilities
response['summary'] = {
'categories': CATEGORIES_NO_OTHER,
'probabilities': new_p
}
return response
@app.route('/predict', methods=['POST'])
def predict():
if request.method == 'POST':
files = request.files.getlist('file[]')
# Summarise predictions for each file
ps = [get_prediction(image_bytes=file.read()) for file in files]
ps_sum = reduce(torch.add, ps)
ps_avg = torch.div(ps_sum, len(ps))
# Process average of all predictions
response = generate_response(ps_avg)
return response, 200
@app.route('/model_accuracy', methods=['GET'])
def get_model_accuracy():
if request.method == 'GET':
return {'accuracy': model_accuracy}, 200
@app.route('/robots.txt')
@app.route('/sitemap.xml')
def static_from_root():
return send_from_directory(app.static_url_path, request.path[1:])
@app.route('/')
def serve():
return send_from_directory(app.static_folder, 'index.html')
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
1908614 | <filename>tests/test_fastq.py
from .minimal import minimal
from hypothesis import errors, given
import pytest
from hypothesis_bio.hypothesis_bio import fastq, fastq_quality, MAX_ASCII
def test_fastq_quality_smallest_example():
actual = minimal(fastq_quality())
expected = ""
assert actual == expected
def test_fastq_quality_smallest_non_empty_with_default_ascii():
actual = minimal(fastq_quality(size=1))
expected = "@"
assert actual == expected
def test_fastq_quality_size_three_with_one_quality_score():
actual = minimal(fastq_quality(size=3, min_score=5, max_score=5))
expected = "EEE"
assert actual == expected
def test_fastq_quality_size_three_with_one_quality_score_and_sanger_offset():
actual = minimal(fastq_quality(size=3, min_score=5, max_score=5, offset=33))
expected = "&&&"
assert actual == expected
def test_fastq_quality_min_score_larger_than_max_score_raises_error():
min_score = 10
max_score = 9
with pytest.raises(errors.InvalidArgument):
minimal(fastq_quality(min_score=min_score, max_score=max_score))
def test_fastq_quality_offset_causes_outside_ascii_range_raises_error():
min_score = 100
max_score = 101
offset = 200
with pytest.raises(ValueError):
minimal(fastq_quality(min_score=min_score, max_score=max_score))
def test_fastq_smallest_example():
actual = minimal(fastq())
expected = "@ \n\n+ \n"
assert actual == expected
def test_fastq_smallest_non_empty():
actual = minimal(fastq(size=1))
expected = "@ \nA\n+ \n@"
assert actual == expected
@given(fastq(size=10))
def test_fastq_size_over_one(fastq_string: str):
fields = fastq_string.split("\n")
header_begin = fields[0][0]
assert header_begin == "@"
header = fields[0][1:]
assert all(c not in ">@" for c in header)
sequence = fields[1]
assert all(c in "ACGT" for c in sequence)
seq_qual_sep = fields[2][0]
assert seq_qual_sep == "+"
quality = fields[-1]
assert all(64 <= ord(c) <= MAX_ASCII for c in quality)
@given(fastq(size=10, add_comment=True, additional_description=False))
def test_fastq_size_over_one_with_comment(fastq_string: str):
fields = fastq_string.split("\n")
header_begin = fields[0][0]
assert header_begin == "@"
header = fields[0][1:]
assert all(c not in ">@" for c in header)
assert " " in header
sequence = fields[1]
assert all(c in "ACGT" for c in sequence)
seq_qual_sep = fields[2][0]
assert seq_qual_sep == "+"
quality = fields[-1]
assert all(64 <= ord(c) <= MAX_ASCII for c in quality)
@given(fastq(size=10, add_comment=True, additional_description=True))
def test_fastq_size_over_one_with_comment(fastq_string: str):
fields = fastq_string.split("\n")
header_begin = fields[0][0]
assert header_begin == "@"
header = fields[0][1:]
assert all(c not in ">@" for c in header)
assert " " in header
sequence = fields[1]
assert all(c in "ACGT" for c in sequence)
seq_qual_sep = fields[2][0]
assert seq_qual_sep == "+"
additional_description = fields[2][1:]
assert all(c not in ">@" for c in header)
assert " " in header
quality = fields[-1]
assert all(64 <= ord(c) <= MAX_ASCII for c in quality)
| StarcoderdataPython |
6450237 | <filename>docx_output.py
# -*- coding: utf-8 -*-
import codecs
import MySQLdb
import parse_html
import sys
from docx import Document
from docx.oxml.ns import qn
from docx.oxml import OxmlElement
from docx.shared import RGBColor
from docx.shared import Pt
from docx.enum.text import WD_ALIGN_PARAGRAPH
from docx.shared import Inches
reload(sys);
sys.setdefaultencoding("utf8")
def query(chinese, table_name):
# 连接
db = MySQLdb.connect("localhost", "root", "kellydc", "ancientChinese", charset='utf8' )
# 获取操作游标
cursor=db.cursor()
# 使用execute方法执行SQL语句
result_set = []
n = cursor.execute("select * from %s where chinese='%s'" % (table_name, chinese))
for info in cursor.fetchall():
result_set.append(info)
return result_set
print len(parse_html.chinese_set)
ids = list(set(parse_html.chinese_set))
print len(ids)
def html_output(html_file):
f = open(html_file, 'w+')
length = len(ids)
pace = int(length/100)+1
nowpace = 0
nowpercant = 0
for charI in ids:
nowpace+=1
print >> f,"<p>"
print >> f,charI
print >> f,"</p>"
result = query(charI, "sample")
print >> f,"<p>"
for rs in result:
print >> f,rs[3],rs[2],","
print >> f,"</p>"
if nowpace==pace:
nowpercant+=1
print str(nowpercant)+"%"
nowpace = 0
f.close()
print "done"
def word_output(word_file):
document = Document()
document.styles['Normal'].font.name = u'黑体'
document.add_heading(word_file, 0)
length = len(ids)
pace = int(length/100)+1
nowpace = 0
nowpercant = 0
for charI in ids:
nowpace+=1
p1 = document.add_paragraph(charI)
result = query(charI, "sample")
p = document.add_paragraph()
for rs in result:
run = p.add_run()
run.add_picture(str(rs[3]))
run.add_text(str(rs[2]))
run.add_text(';')
if nowpace==pace:
nowpercant+=1
print str(nowpercant)+"%"
nowpace = 0
document.save(word_file) #可以设置其他路径
if parse_html.situation==0:
if sys.argv[1]=="docx":
word_output(sys.argv[2])
elif sys.argv[1]=="html":
html_output(sys.argv[2])
print "done"
elif parse_html.situation==1:
print "chinese less"
else:
print "img less"
| StarcoderdataPython |
5051517 | <filename>src/25.py
"""
1000-digit Fibonacci number
The Fibonacci sequence is defined by the recurrence relation:
F_n = F_n−1 + F_n−2, where F_1 = 1 and F_2 = 1.
Hence the first 12 terms will be:
F_1 = 1
F_2 = 1
F_3 = 2
F_4 = 3
F_5 = 5
F_6 = 8
F_7 = 13
F_8 = 21
F_9 = 34
F_10 = 55
F_11 = 89
F_12 = 144
The 12th term, F_12, is the first term to contain three digits.
What is the index of the first term in the Fibonacci sequence to contain
1000 digits?
"""
length = 1_000
def initial_func(length):
a = 1
b = 1
index = 1
while a < 10 ** (length - 1):
a, b = b, a + b
index += 1
return index
def improved_func(length):
pass
# 2783915460
print(initial_func(length))
# print(improved_func(length))
| StarcoderdataPython |
9700576 | <reponame>AravinthPanch/code-snippets<gh_stars>1-10
__author__ = '<NAME>'
__email__ = "<EMAIL>"
__date__ = '22/04/15'
import matplotlib.pyplot as plt
if __name__ == '__main__':
print "In"
plt.plot([1, 2, 3, 4])
plt.ylabel('some numbers')
plt.show() | StarcoderdataPython |
6627223 | <filename>Cas_1/Kinetic Energy/A_Kinetic_energy.py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import xgcm
import cartopy.crs as ccrs
from xmitgcm import open_mdsdataset
from matplotlib.mlab import bivariate_normal
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
dir0 = '/homedata/bderembl/runmit/test_southatlgyre'
ds0 = open_mdsdataset(dir0,iters='all',prefix=['U','V'])
grid = xgcm.Grid(ds0)
print(grid)
Kinetic_energy_X = grid.interp((ds0.U.where(ds0.hFacW>0)*ds0.hFacW)**2, 'X')
Kinetic_energy_Y = grid.interp((ds0.V.where(ds0.hFacS>0)*ds0.hFacS)**2, 'Y')
Kinetic_energy = Kinetic_energy_X + Kinetic_energy_Y
print('Kinetic_energy')
print(Kinetic_energy)
i = 0
nz = 0
while (i < 50):
i=i+1
print(i)
plt.figure(1)
ax = plt.subplot(projection=ccrs.PlateCarree());
Kinetic_energy[i,nz,:,:].plot.pcolormesh('XC', 'YC', ax=ax,vmin=0,norm=colors.PowerNorm(gamma=1./2.),vmax=10,cmap='ocean');
plt.title('Kinetic Energy m²/s²')
plt.text(5,5,i,ha='center',wrap=True)
ax.coastlines()
gl = ax.gridlines(draw_labels=True, alpha = 0.5, linestyle='--');
gl.xlabels_top = False
gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
if (i < 10):
plt.savefig('Kinetic Energy-'+'00'+str(i)+'.png')
plt.clf()
elif (i > 9) and (i < 100):
plt.savefig('Kinetic Energy-'+'0'+str(i)+'.png')
plt.clf()
else:
plt.savefig('Kinetic Energy-'+str(i)+'.png')
plt.clf()
| StarcoderdataPython |
7698 | <reponame>LukoninDmitryPy/agro_site-2<filename>agro_site/orders/migrations/0001_initial.py
# Generated by Django 2.2.16 on 2022-04-12 13:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.expressions
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('sales_backend', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('D', 'Dialog'), ('C', 'Chat')], default='D', max_length=1, verbose_name='Тип')),
('members', models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='Участник')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=250)),
('postal_code', models.CharField(max_length=20)),
('city', models.CharField(max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('paid', models.BooleanField(default=False)),
('status_order', models.CharField(choices=[('В обработке', 'В обработке'), ('Заказ собран', 'Заказ собран'), ('Заказ отправлен', 'Заказ отправлен')], default='В обработке', max_length=20)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Заказы',
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('quantity', models.PositiveIntegerField(default=1)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='orders.Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_items', to='sales_backend.Product')),
('seller', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='seller', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_users', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField(verbose_name='Сообщение')),
('pub_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Дата сообщения')),
('is_readed', models.BooleanField(default=False, verbose_name='Прочитано')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Пользователь')),
('chat', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.Chat', verbose_name='Чат')),
],
options={
'ordering': ['pub_date'],
},
),
migrations.AddConstraint(
model_name='orderitem',
constraint=models.CheckConstraint(check=models.Q(_negated=True, user=django.db.models.expressions.F('seller')), name='dont_buy_yourself'),
),
]
| StarcoderdataPython |
1951088 | #!/usr/bin/env python3
import argparse
import sys
def xor(data, key):
res = b''
for i in range(len(data)):
curr_key = ord(key[i % len(key)])
res += bytes(chr(data[i] ^ curr_key), 'latin-1')
if i == len(data) - 1:
res += b'\0'
return res
def print_output(ciphertext, pad):
padding = '02x' if pad == True else '1x'
print('{ 0x' + ', 0x'.join(format(x, padding) for x in ciphertext) + ' };')
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='xor.py')
parser.add_argument(
'-k', help='XOR key'
)
parser.add_argument(
'-f', help='bin file containing payload'
)
parser.add_argument(
'-s', help='String to XOR'
)
parser.add_argument(
'-w', help="File to write encrypted binary payload to"
)
parser.add_argument(
'--pad', dest='pad', help="Pad with 0 so all output is 2 chars wide 0x0 => 0x00", action=argparse.BooleanOptionalAction, default='--no-pad'
)
args = parser.parse_args()
if args.f == None and args.s == None:
parser.print_help()
sys.exit()
if args.f != None:
with open(args.f, mode='rb') as file:
buf = file.read()
file.close()
else:
buf = bytes(args.s, 'latin-1')
ciphertext = xor(buf, args.k)
if args.w != None:
with open(args.w, mode='wb') as file:
file.write(ciphertext)
file.close
else:
print_output(ciphertext, args.pad)
| StarcoderdataPython |
6503856 | import io
from unittest import TestCase
import pytest
import vcr
from ohapi.api import (
SettingsError, oauth2_auth_url, oauth2_token_exchange,
get_page, message, delete_file, upload_file, upload_stream)
parameter_defaults = {
'CLIENT_ID_VALID': 'validclientid',
'CLIENT_SECRET_VALID': 'validclientsecret',
'CODE_VALID': 'validcode',
'REFRESH_TOKEN_VALID': 'validrefreshtoken',
'CLIENT_ID_INVALID': 'invalidclientid',
'CLIENT_SECRET_INVALID': 'invalidclientsecret',
'CODE_INVALID': 'invalidcode',
'REFRESH_TOKEN_INVALID': 'invalidrefreshtoken',
'REDIRECT_URI': 'http://127.0.0.1:5000/authorize_openhumans/',
'ACCESS_TOKEN': 'accesstoken',
'ACCESS_TOKEN_EXPIRED': 'accesstokenexpired',
'ACCESS_TOKEN_INVALID': 'accesstokeninvalid',
'MASTER_ACCESS_TOKEN': '<PASSWORD>accesstoken',
'INVALID_PMI1': 'invalidprojectmemberid1',
'INVALID_PMI2': 'invalidprojectmemberid2',
'VALID_PMI1': 'validprojectmemberid1',
'VALID_PMI2': 'validprojectmemberid2',
'SUBJECT': 'testsubject',
'MESSAGE': 'testmessage',
'REMOTE_FILE_INFO': {'download_url': 'https://valid_url/'},
'TARGET_FILEPATH': 'testing_extras/lorem_ipsum.txt',
'TARGET_FILEPATH2': 'testing_extras/lorem_ipsum_partial.txt',
'TARGET_FILEPATH_EMPTY': 'testing_extras/empty_file.txt',
'FILE_METADATA': {'tags': ['text'], 'description': 'Lorem ipsum text'},
'FILE_METADATA_INVALID': {},
'FILE_METADATA_INVALID_WITH_DESC': {'description': 'Lorem ipsum text'},
'MAX_BYTES': 'maxbytes'
}
"""
_config_params_api.py is not usually present. You can create this to use valid
codes and tokens if you wish to record new cassettes. If present, this file is
used to overwrite `parameter_defaults` with the (hopefully valid, but secret)
items in the file. DO NOT COMMIT IT TO GIT!
To get started, do:
cp _config_params_api.py.example _config_params_api.py
Edit _config_params_api.py to define valid secret codes, tokens, etc.
Run a specific function to (re)create an associated cassette, e.g.:
pytest ohapi/tests/test_api.py::APITest::test_oauth2_token_exchange__valid_code
(This only makes a new cassette if one doesn't already exist!)
"""
try:
from _config_params_api import params
for param in params:
parameter_defaults[param] = params[param]
except ImportError:
pass
for param in parameter_defaults:
locals()[param] = parameter_defaults[param]
FILTERSET = [('access_token', 'ACCESSTOKEN'), ('client_id', 'CLIENTID'),
('client_secret', 'CLIENTSECRET'), ('code', 'CODE'),
('refresh_token', 'REFRESHTOKEN'),
('invalid_access_token', 'INVALID<PASSWORD>'),
('project_member_id', 'PROJECTMEMBERID'),
('file_id', 'FILEID')]
my_vcr = vcr.VCR(path_transformer=vcr.VCR.ensure_suffix('.yaml'),
cassette_library_dir='ohapi/cassettes',
filter_headers=[('Authorization', 'XXXXXXXX')],
filter_query_parameters=FILTERSET,
filter_post_data_parameters=FILTERSET,)
class APITestOAuthTokenExchange(TestCase):
"""
Tests for :func:`oauth2_auth_url<ohapi.api.oauth2_auth_url>`.
"""
def setUp(self):
pass
def test_oauth2_auth_url__no_client_id(self):
with pytest.raises(SettingsError):
oauth2_auth_url()
def test_oauth2_auth_url__with_client_id(self):
auth_url = oauth2_auth_url(client_id='abcd1234')
assert auth_url == (
'https://www.openhumans.org/direct-sharing/projects/'
'oauth2/authorize/?client_id=abcd1234&response_type=code')
def test_oauth2_auth_url__with_client_id_and_redirect_uri(self):
auth_url = oauth2_auth_url(client_id='abcd1234',
redirect_uri='http://127.0.0.1:5000/auth/')
assert auth_url == (
'https://www.openhumans.org/direct-sharing/projects/'
'oauth2/authorize/?client_id=abcd1234&response_type=code'
'&redirect_uri=http%3A%2F%2F127.0.0.1%3A5000%2Fauth%2F')
@my_vcr.use_cassette()
def test_oauth2_token_exchange__valid_code(self):
data = oauth2_token_exchange(
code=CODE_VALID, client_id=CLIENT_ID_VALID,
client_secret=CLIENT_SECRET_VALID, redirect_uri=REDIRECT_URI)
assert data == {
'access_token': 'returnedaccesstoken',
'expires_in': 36000,
'refresh_token': 'returned<PASSWORD>',
'scope': 'american-gut read wildlife open-humans write '
'pgp go-viral',
'token_type': 'Bearer'}
@my_vcr.use_cassette()
def test_oauth2_token_exchange__invalid_code(self):
with self.assertRaises(Exception):
data = oauth2_token_exchange(
code=CODE_VALID, client_id=CLIENT_ID_VALID,
client_secret=CLIENT_SECRET_VALID, redirect_uri=REDIRECT_URI)
@my_vcr.use_cassette()
def test_oauth2_token_exchange__invalid_client(self):
with self.assertRaises(Exception):
data = oauth2_token_exchange(
code=CODE_INVALID, client_id=CLIENT_ID_INVALID,
client_secret=CLIENT_SECRET_VALID, redirect_uri=REDIRECT_URI)
@my_vcr.use_cassette()
def test_oauth2_token_exchange__invalid_secret(self):
with self.assertRaises(Exception):
data = oauth2_token_exchange(
code=CODE_VALID, client_id=CLIENT_ID_VALID,
client_secret=CLIENT_SECRET_INVALID,
redirect_uri=REDIRECT_URI)
@my_vcr.use_cassette()
def test_oauth2_token_exchange__valid_refresh(self):
data = oauth2_token_exchange(
refresh_token=REFRESH_TOKEN_VALID, client_id=CLIENT_ID_VALID,
client_secret=CLIENT_SECRET_VALID, redirect_uri=REDIRECT_URI)
assert data == {
'access_token': '<PASSWORD>',
'expires_in': 36000,
'refresh_token': '<PASSWORD>',
'scope': 'american-gut read wildlife open-humans write '
'pgp go-viral',
'token_type': 'Bearer'}
@my_vcr.use_cassette()
def test_oauth2_token_exchange__invalid_refresh(self):
with self.assertRaises(Exception):
data = oauth2_token_exchange(
refresh_token=REFRESH_TOKEN_INVALID,
client_id=CLIENT_ID_VALID,
client_secret=CLIENT_SECRET_VALID,
redirect_uri=REDIRECT_URI)
class APITestGetPage(TestCase):
"""
Tests for :func:`get_page<ohapi.api.get_page>`.
"""
def setUp(self):
pass
@my_vcr.use_cassette()
def test_get_page_with_results(self):
url = ('https://www.openhumans.org/api/direct-sharing/project/'
'exchange-member/?'
'access_token={}'.format(ACCESS_TOKEN))
response = get_page(url)
self.assertEqual(response['project_member_id'], 'PMI')
self.assertEqual(response['message_permission'], True)
self.assertEqual(response['data'], [])
self.assertEqual(response['username'], 'test_user')
self.assertEqual(response['sources_shared'], [])
self.assertEqual(response['created'], 'created_date_time')
@my_vcr.use_cassette()
def test_get_page_invalid_access_token(self):
try:
url = ('https://www.openhumans.org/api/direct-sharing/project/'
'exchange-member/?access_token={}'.format("invalid_token"))
self.assertRaises(Exception, get_page, url)
except Exception:
pass
class APITestMessage(TestCase):
"""
Tests for :func:`message<ohapi.api.message>`.
"""
def setUp(self):
pass
@my_vcr.use_cassette()
def test_message_valid_access_token(self):
response = message(subject=SUBJECT, message=MESSAGE,
access_token=ACCESS_TOKEN)
self.assertEqual(response.status_code, 200)
@my_vcr.use_cassette()
def test_message_expired_access_token(self):
with self.assertRaises(Exception):
response = message(subject=SUBJECT, message=MESSAGE,
access_token=ACCESS_TOKEN_EXPIRED)
assert response.json() == {"detail": "Expired token."}
@my_vcr.use_cassette()
def test_message_invalid_access_token(self):
with self.assertRaises(Exception):
response = message(subject=SUBJECT, message=MESSAGE,
access_token=ACCESS_TOKEN_INVALID)
assert response.json() == {"detail": "Invalid token."}
@my_vcr.use_cassette()
def test_message_all_members_true_project_member_id_none(self):
response = message(all_members=True, subject=SUBJECT, message=MESSAGE,
access_token=ACCESS_TOKEN)
self.assertEqual(response.status_code, 200)
@my_vcr.use_cassette()
def test_message_all_members_true_project_member_id_not_none(self):
self.assertRaises(Exception, message, all_members=True,
project_member_ids=['abcdef', 'sdf'],
subject=SUBJECT, message=MESSAGE,
access_token=ACCESS_TOKEN)
@my_vcr.use_cassette()
def test_message_all_members_false_projectmemberid_has_invalid_char(self):
with self.assertRaises(Exception):
response = message(project_member_ids=['abcdef1', 'test'],
subject=SUBJECT, message=MESSAGE,
access_token=MASTER_ACCESS_TOKEN)
assert response.json() == {"errors":
{"project_member_ids":
["Project member IDs are always 8" +
" digits long."]}}
@my_vcr.use_cassette()
def test_message_all_members_false_projectmemberid_has_invalid_digit(self):
with self.assertRaises(Exception):
response = message(project_member_ids=[INVALID_PMI1,
INVALID_PMI2],
subject=SUBJECT, message=MESSAGE,
access_token=MASTER_ACCESS_TOKEN)
assert response.json() == {"errors":
{"project_member_ids":
["Invalid project member ID(s):" +
" invalidPMI2"]}}
@my_vcr.use_cassette()
def test_message_all_members_false_project_member_id_not_none_valid(self):
response = message(project_member_ids=[VALID_PMI1, VALID_PMI2],
subject=SUBJECT, message=MESSAGE,
access_token=ACCESS_TOKEN)
self.assertEqual(response.status_code, 200)
class APITestDeleteFile(TestCase):
"""
Tests for :func:`delete_file<ohapi.api.delete_file>`.
"""
def setUp(self):
pass
@my_vcr.use_cassette()
def test_delete_file__invalid_access_token(self):
with self.assertRaises(Exception):
response = delete_file(
access_token=ACCESS_TOKEN_INVALID,
project_member_id='59319749',
all_files=True)
assert response.json() == {"detail": "Invalid token."}
@my_vcr.use_cassette()
def test_delete_file_project_member_id_given(self):
response = delete_file(access_token=ACCESS_TOKEN,
project_member_id='59319749', all_files=True)
self.assertEqual(response.status_code, 200)
@my_vcr.use_cassette()
def test_delete_file_project_member_id_invalid(self):
with self.assertRaises(Exception):
response = delete_file(access_token=ACCESS_TOKEN, all_files=True,
project_member_id='1234')
self.assertEqual(response.status_code, 400)
@my_vcr.use_cassette()
def test_delete_file__expired_access_token(self):
with self.assertRaises(Exception):
response = delete_file(access_token=ACCESS_TOKEN_EXPIRED,
all_files=True,
project_member_id='59319749')
assert response.json() == {"detail": "Expired token."}
@my_vcr.use_cassette()
def test_delete_file__valid_access_token(self):
response = delete_file(
access_token=ACCESS_TOKEN, project_member_id='59319749',
all_files=True)
self.assertEqual(response.status_code, 200)
class APITestUpload(TestCase):
"""
Tests for :func:`upload_file<ohapi.api.upload_file>`.
File and stream upload testing use "lorem_ipsum.txt" and other files
in the testing_extra directory.
"""
def setUp(self):
pass
@my_vcr.use_cassette()
def test_upload_valid_file_valid_access_token(self):
response = upload_file(
target_filepath=TARGET_FILEPATH,
metadata=FILE_METADATA,
access_token=ACCESS_TOKEN,
project_member_id=VALID_PMI1)
self.assertEqual(response.status_code, 200)
assert response.json() == {'size': 446, 'status': 'ok'}
@my_vcr.use_cassette()
def test_upload_large_file_valid_access_token(self):
self.assertRaisesRegexp(
Exception, 'Maximum file size exceeded', upload_file,
target_filepath=TARGET_FILEPATH,
metadata=FILE_METADATA,
access_token=ACCESS_TOKEN,
project_member_id=VALID_PMI1,
max_bytes=0)
@my_vcr.use_cassette()
def test_upload_file_invalid_access_token(self):
self.assertRaises(
Exception, 'Invalid token', upload_file,
target_filepath=TARGET_FILEPATH,
metadata=FILE_METADATA,
access_token=ACCESS_TOKEN_INVALID,
project_member_id=VALID_PMI1)
@my_vcr.use_cassette()
def test_upload_file_expired_access_token(self):
self.assertRaisesRegexp(
Exception, 'Expired token', upload_file,
target_filepath=TARGET_FILEPATH,
metadata=FILE_METADATA,
access_token=ACCESS_TOKEN_EXPI<PASSWORD>,
project_member_id=VALID_PMI1)
@my_vcr.use_cassette()
def test_upload_file_invalid_metadata_with_description(self):
self.assertRaisesRegexp(
Exception, 'tags.+ is a required field', upload_file,
target_filepath=TARGET_FILEPATH,
metadata=FILE_METADATA_INVALID_WITH_DESC,
access_token=ACCESS_TOKEN,
project_member_id=VALID_PMI1)
@my_vcr.use_cassette()
def test_upload_file_invalid_metadata_without_description(self):
self.assertRaisesRegexp(
Exception, 'description.+ is a required field of the metadata',
upload_file,
target_filepath=TARGET_FILEPATH,
metadata=FILE_METADATA_INVALID,
access_token=<PASSWORD>_TOKEN,
project_member_id=VALID_PMI1)
@my_vcr.use_cassette()
def test_upload_file_empty(self):
self.assertRaisesRegexp(
Exception, 'The submitted file is empty.',
upload_file,
target_filepath=TARGET_FILEPATH_EMPTY,
metadata=FILE_METADATA,
access_token=<PASSWORD>_TOKEN,
project_member_id=VALID_PMI1)
def test_upload_file_remote_info_not_none_valid(self):
"""
Test assumes remote_file_info['download_url'] matches 'lorum_ipsum.txt'
"""
with my_vcr.use_cassette('ohapi/cassettes/test_upload_file_' +
'remote_info_not_none_valid.yaml') as cass:
upload_file(target_filepath=TARGET_FILEPATH,
metadata=FILE_METADATA,
access_token=<PASSWORD>_TOKEN,
project_member_id=VALID_PMI1,
remote_file_info=REMOTE_FILE_INFO)
self.assertEqual(cass.responses[0][
"status"]["code"], 200)
self.assertEqual(cass.responses[0][
"headers"]["Content-Length"], ['446'])
@my_vcr.use_cassette()
def test_upload_file_remote_info_not_none_invalid_access_token(self):
"""
Test assumes remote_file_info['download_url'] matches 'lorum_ipsum.txt'
"""
# Note: alternate file needed to trigger an attempted upload.
self.assertRaisesRegexp(
Exception, 'Invalid token', upload_file,
target_filepath=TARGET_FILEPATH2,
metadata=FILE_METADATA,
access_token=ACCESS_TOKEN_INVALID,
project_member_id=VALID_PMI1,
remote_file_info=REMOTE_FILE_INFO)
@my_vcr.use_cassette()
def test_upload_file_remote_info_not_none_expired_access_token(self):
# Note: alternate file needed to trigger an attempted upload.
self.assertRaisesRegexp(
Exception, 'Expired token', upload_file,
target_filepath=TARGET_FILEPATH2,
metadata=FILE_METADATA,
access_token=ACCESS_TOKEN_EXPIRED,
project_member_id=VALID_PMI1,
remote_file_info=REMOTE_FILE_INFO)
@my_vcr.use_cassette()
def test_upload_file_empty_remote_info_not_none(self):
self.assertRaisesRegexp(
Exception, 'The submitted file is empty.', upload_file,
target_filepath=TARGET_FILEPATH_EMPTY,
metadata=FILE_METADATA,
access_token=ACCESS_TOKEN,
project_member_id=VALID_PMI1,
remote_file_info=REMOTE_FILE_INFO)
@my_vcr.use_cassette()
def test_upload_file_remote_info_not_none_matching_file_size(self):
result = upload_file(
target_filepath=TARGET_FILEPATH,
metadata=FILE_METADATA,
access_token=ACCESS_TOKEN,
project_member_id=VALID_PMI1,
remote_file_info=REMOTE_FILE_INFO)
self.assertRegexpMatches(
result, 'remote exists with matching file size')
@my_vcr.use_cassette()
def test_upload_file_remote_info_not_none_invalid_metadata_with_desc(self):
# Note: alternate file needed to trigger an attempted upload.
self.assertRaisesRegexp(
Exception, 'tags.+ is a required field of the metadata',
upload_file,
target_filepath=TARGET_FILEPATH2,
metadata=FILE_METADATA_INVALID_WITH_DESC,
access_token=ACCESS_TOKEN,
project_member_id=VALID_PMI1,
remote_file_info=REMOTE_FILE_INFO)
@my_vcr.use_cassette()
def test_upload_file_remote_info_not_none_invalid_metadata(self):
self.assertRaisesRegexp(
Exception, 'description.+ is a required field of the metadata',
upload_file,
target_filepath=TARGET_FILEPATH2,
metadata=FILE_METADATA_INVALID,
access_token=ACCESS_TOKEN,
project_member_id=VALID_PMI1,
remote_file_info=REMOTE_FILE_INFO)
@my_vcr.use_cassette()
def test_upload_stream_valid(self):
stream = None
with open(TARGET_FILEPATH, 'rb') as testfile:
testdata = testfile.read()
stream = io.BytesIO(testdata)
response = upload_stream(
stream=stream,
filename=TARGET_FILEPATH.split('/')[-1],
metadata=FILE_METADATA,
access_token=<PASSWORD>_TOKEN,
project_member_id=VALID_PMI1)
self.assertEqual(response.status_code, 200)
assert response.json() == {'size': 446, 'status': 'ok'}
| StarcoderdataPython |
9695673 | <gh_stars>0
"""
Module: 'uasyncio' on esp8266 v1.9.3
"""
# MCU: (sysname='esp8266', nodename='esp8266', release='2.0.0(5a875ba)', version='v1.9.3-8-g63826ac5c on 2017-11-01', machine='ESP module with ESP8266')
# Stubber: 1.1.2 - updated
from typing import Any
DEBUG = 0
class EventLoop:
""""""
def call_at_(self, *argv) -> Any:
pass
def call_later(self, *argv) -> Any:
pass
def call_later_ms(self, *argv) -> Any:
pass
def call_soon(self, *argv) -> Any:
pass
def close(self, *argv) -> Any:
pass
def create_task(self, *argv) -> Any:
pass
def run_forever(self, *argv) -> Any:
pass
def run_until_complete(self, *argv) -> Any:
pass
def stop(self, *argv) -> Any:
pass
def time(self, *argv) -> Any:
pass
def wait(self, *argv) -> Any:
pass
class IORead:
""""""
class IOReadDone:
""""""
class IOWrite:
""""""
class IOWriteDone:
""""""
class PollEventLoop:
""""""
def add_reader(self, *argv) -> Any:
pass
def add_writer(self, *argv) -> Any:
pass
def remove_reader(self, *argv) -> Any:
pass
def remove_writer(self, *argv) -> Any:
pass
def wait(self, *argv) -> Any:
pass
class SleepMs:
""""""
class StopLoop:
""""""
class StreamReader:
""""""
aclose = None
read = None
readexactly = None
readline = None
class StreamWriter:
""""""
aclose = None
awrite = None
awriteiter = None
def get_extra_info(self, *argv) -> Any:
pass
class SysCall:
""""""
def handle(self, *argv) -> Any:
pass
class SysCall1:
""""""
def Task():
pass
_socket = None
core = None
def coroutine():
pass
def ensure_future():
pass
def get_event_loop():
pass
log = None
open_connection = None
select = None
def set_debug():
pass
sleep = None
sleep_ms = None
start_server = None
time = None
class type_gen:
""""""
def close(self, *argv) -> Any:
pass
def send(self, *argv) -> Any:
pass
def throw(self, *argv) -> Any:
pass
uasyncio = None
uerrno = None
utimeq = None
| StarcoderdataPython |
240034 | <reponame>sky-2002/weaviate-examples<gh_stars>0
import weaviate
import json
import helper
client = weaviate.Client("http://localhost:9999")
client.timeout_config = (10000)
client.schema.delete_all()
schema = {
"classes": [
{
"class": "Podcast",
"properties": [
{
"name": "title",
"dataType": ["text"]
},
{
"name": "transcript",
"dataType": ["text"]
}
]
}
]
}
client.schema.create(schema)
with open("data/podcast_ds.json", 'r') as f:
datastore = json.load(f)
def add_podcasts(batch_size = 1):
no_items_in_batch = 0
for item in datastore:
podcast_object = {
"title": item["title"],
"transcript": item["transcript"]
}
podcast_uuid = helper.generate_uuid('podcast', item["title"] + item["url"])
client.batch.add_data_object(podcast_object, "Podcast", podcast_uuid)
no_items_in_batch += 1
if no_items_in_batch >= batch_size:
results = client.batch.create_objects()
for result in results:
if result['result'] != {}:
helper.log(result['result'])
message = str(item["title"]) + ' imported'
helper.log(message)
no_items_in_batch = 0
client.batch.create_objects()
add_podcasts(1)
| StarcoderdataPython |
1971351 | from django.test import LiveServerTestCase
from selenium import webdriver
import unittest
import os
from unittest import skipIf
class BlogTest(LiveServerTestCase):
def setUp(self):
self.driver = webdriver.Chrome()
def tearDown(self):
self.driver.quit()
@skipIf(os.getenv('DJANGO_SETTINGS_MODULE') == 'core.settings.travis', 'Travis')
def test_blog_hp(self):
""" Testing opening HP blog page. """
self.driver.get('http://localhost:8000/blog')
self.assertIn("<NAME> - Le Blog", self.driver.title)
| StarcoderdataPython |
6447392 | <filename>utils.py<gh_stars>1-10
#coding:UTF-8
from contextlib import contextmanager
import cv2
import time
#捕获当前屏幕并resize成(84*84*1)的灰度图
def resizeBirdrToAtari(observation):
observation = cv2.cvtColor(cv2.resize(observation, (84, 84)), cv2.COLOR_BGR2GRAY)
_, observation = cv2.threshold(observation,1,255,cv2.THRESH_BINARY)
return observation
@contextmanager
def trainTimer(name):
start = time.time()
yield
end = time.time()
print('{} COST_Time:{}'.format(name, end - start))
| StarcoderdataPython |
83216 | from collections import namedtuple
class PackageError(Exception):
"""Exception to be raised when user wants to exit on error."""
class RecipeError(Exception):
"""Exception to be raised when user wants to exit on error."""
class Error(namedtuple('Error', ['file', 'code', 'message'])):
"""Error class creates error codes to be shown to the user."""
def __str__(self):
"""Override namedtuple's __str__ so that error codes are readable."""
return u'{}: {} {}'.format(self.file, self.code, self.message)
| StarcoderdataPython |
3278627 | <filename>tests/dhcpv4/kea_only/config_backend/test_relay.py
"""Kea config backend testing relay."""
import pytest
from dhcp4_scen import get_address, get_rejected
from cb_model import setup_server_for_config_backend_cmds
pytestmark = [pytest.mark.v4,
pytest.mark.v6,
pytest.mark.kea_only,
pytest.mark.controlchannel,
pytest.mark.hook,
pytest.mark.config_backend]
def test_relay_in_subnet(dhcp_version):
relay_addr_1 = "10.0.0.1" if dhcp_version == 'v4' else 'fc00:db20:35b:7399::5'
relay_addr_2 = "10.0.0.2" if dhcp_version == 'v4' else 'fc00:e968:6179::de52:7100'
exp_addr_1 = '192.168.50.1' if dhcp_version == 'v4' else '2001:db8:1::1'
exp_addr_2 = '192.168.50.2' if dhcp_version == 'v4' else '2001:db8:1::2'
exp_addr_3 = '192.168.50.3' if dhcp_version == 'v4' else '2001:db8:1::3'
cfg = setup_server_for_config_backend_cmds()
# create a subnet with specific IP address for relay agent
subnet_cfg, _ = cfg.add_subnet(relay={"ip-addresses": [relay_addr_1]})
# client 1 behind relay agent 1 should get a lease
get_address(mac_addr='00:00:00:00:00:01', relay_addr=relay_addr_1, exp_addr=exp_addr_1)
# client 2 behing unknown relay agent 2 should NOT get any lease
get_rejected(mac_addr='00:00:00:00:00:02', relay_addr=relay_addr_2)
# add another relay agent 2
subnet_cfg.update(relay={"ip-addresses": [relay_addr_1, relay_addr_2]})
# client 2 now should get a lease
get_address(mac_addr='00:00:00:00:00:02', relay_addr=relay_addr_2, exp_addr=exp_addr_2)
# another client 3 behind relay agent 1 still should be able to get a lease
get_address(mac_addr='00:00:00:00:00:03', relay_addr=relay_addr_1, exp_addr=exp_addr_3)
def test_relay_in_network(dhcp_version):
relay_addr_1 = "10.0.0.1" if dhcp_version == 'v4' else 'fc00:db20:35b:7399::5'
relay_addr_2 = "10.0.0.2" if dhcp_version == 'v4' else 'fc00:e968:6179::de52:7100'
relay_addr_3 = "10.0.0.3" if dhcp_version == 'v4' else 'fc00:db20:35b:7399::5'
exp_addr_1 = '192.168.50.1' if dhcp_version == 'v4' else '2001:db8:1::1'
exp_addr_2 = '192.168.50.2' if dhcp_version == 'v4' else '2001:db8:1::2'
exp_addr_3 = '192.168.50.3' if dhcp_version == 'v4' else '2001:db8:1::3'
exp_addr_4 = '192.168.50.4' if dhcp_version == 'v4' else '2001:db8:1::4'
cfg = setup_server_for_config_backend_cmds()
# create a network with specific IP address for relay agent
network_cfg, _ = cfg.add_network(relay={"ip-addresses": [relay_addr_1]})
subnet_cfg, _ = cfg.add_subnet(network=network_cfg)
# client 1 behind relay agent 1 should get a lease
get_address(mac_addr='00:00:00:00:00:01', relay_addr=relay_addr_1, exp_addr=exp_addr_1)
# client 2 behing unknown relay agent 2 should NOT get any lease
get_rejected(mac_addr='00:00:00:00:00:02', relay_addr=relay_addr_2)
# add another relay agent 2
network_cfg.update(relay={"ip-addresses": [relay_addr_1, relay_addr_2]})
# client 2 now should get a lease
get_address(mac_addr='00:00:00:00:00:02', relay_addr=relay_addr_2, exp_addr=exp_addr_2)
# another client 3 behind relay agent 1 still should be able to get a lease
get_address(mac_addr='00:00:00:00:00:03', relay_addr=relay_addr_1, exp_addr=exp_addr_3)
# and now override relay on subnet level to relay agent 3
subnet_cfg.update(relay={"ip-addresses": [relay_addr_3]})
# client 4 now should get a lease
get_address(mac_addr='00:00:00:00:00:04', relay_addr=relay_addr_3, exp_addr=exp_addr_4)
# another client 5 behind relay agent 1 now should NOT be able to get any lease
get_rejected(mac_addr='00:00:00:00:00:03', relay_addr=relay_addr_1)
| StarcoderdataPython |
4965812 | <reponame>otimgren/centrex-molecule-trajectories<filename>src/trajectories/beamline_elements/electrostatic_lens.py
import pickle
from abc import ABC, abstractmethod
from dataclasses import dataclass
from os.path import exists
from pathlib import Path
import h5py
import matplotlib.pyplot as plt
import numpy as np
from centrex_TlF.states import State, UncoupledBasisState
from matplotlib.patches import Rectangle
from scipy.constants import g
from scipy.interpolate import interp1d
from ..beamline import Beamline
from ..molecule import Molecule
from ..stark_potential import stark_potential
from .apertures import BeamlineElement
__all__ = ["ElectrostaticLens"]
@dataclass
class ElectrostaticLens(BeamlineElement):
"""
Class used for propagating molecules through the electrostatic lens.
"""
d: float = 1.75 * 0.0254 # Bore diameter of lens in m
dz: float = 1e-3 # Spatial size of integration step that is taken inside the lens
V: float = 27.6e3 # Voltage on lens electrodes
a_interp: interp1d = None
state: State = 1 * UncoupledBasisState(
J=2,
mJ=0,
I1=1 / 2,
m1=1 / 2,
I2=1 / 2,
m2=-1 / 2,
Omega=0,
P=+1,
electronic_state="X",
) # Molecular state assumed for the molecule to calculate trajectory inside electrostatic lens
mass: float = (
204.38 + 19.00
) * 1.67e-27 # Molecular mass in kg for molecule that will be propagating through lens
def propagate_through(self, molecule):
"""
Propagates a molecule through the electrostatic lens.
"""
# Calculate the time taken to reach start of lens from initial position
delta_t = (self.z0 - molecule.x()[2]) / molecule.v()[2]
# Calculate the position and velocity of the molecule at start of beamline element
molecule.update_trajectory(delta_t)
# Determine if molecule is now within the lens bore. If not, the molecule is
# considered dead
rho = np.sqrt(np.sum(molecule.x()[:2] ** 2))
if rho > self.d / 2:
molecule.set_dead()
molecule.set_aperture_hit("Lens entrance")
return
# Now propagate the molecule inside the lens. RK4 is used to integrate the time-evolution of the trajectory
# here
self.propagate_inside_lens(molecule)
if not molecule.alive:
return
# Make sure that molecule is now at the end of the lens:
# Calculate the time taken to reach start of lens from initial position
delta_t = (self.z1 - molecule.x()[2]) / molecule.v()[2]
# Calculate the position and velocity of the molecule at start of beamline element
molecule.update_trajectory(delta_t)
def propagate_inside_lens(self, molecule):
"""
Function that calculates and updates the trajectory of a molecule while it is inside the electrostatic lens.
"""
# Get value of counter that keeps track of trajectory indices
n = molecule.trajectory.n
# Calculate number of integration steps to take inside the lens and the timestep
N_steps = int(np.rint(self.L / self.dz))
dt = self.dz / molecule.v()[2]
# Loop over timesteps
for i in range(N_steps):
x = molecule.x()
k1 = molecule.v()
l1 = self.lens_acceleration(x)
k2 = k1 + dt * l1 / 2
l2 = self.lens_acceleration(x + dt * k1)
k3 = k1 + dt * l2 / 2
l3 = self.lens_acceleration(x + dt * k2 / 2)
k4 = k1 + dt * l3
l4 = self.lens_acceleration(x + dt * k3)
# Update the molecule trajectory
molecule.trajectory.x[n, :] = x + dt * (k1 + 2 * k2 + 2 * k3 + k4) / 6
molecule.trajectory.v[n, :] = k1 + dt * (l1 + 2 * l2 + 2 * l3 + l4) / 6
molecule.trajectory.t[n] = molecule.trajectory.t[n - 1] + dt
molecule.trajectory.a[n, :] = l1
n += 1
molecule.trajectory.n = n
# Cut molecules outside the allowed region
rho = np.sqrt(np.sum(molecule.x()[:2] ** 2))
if rho > self.d / 2:
molecule.set_dead()
molecule.set_aperture_hit("Inside lens")
return
def N_steps(self):
"""
Number of steps when going through an electrostatic lens is 1 for getting to the entrance of the lens
plus int(np.rint(self.L/self.dz)) for propagating inside and exiting the lens
"""
return 1 + int(np.rint(self.L / self.dz))
def plot(self, axes):
"""
Plot lens on the provided axes
"""
rect1 = Rectangle((self.z0, self.d / 2), self.z1 - self.z0, 0.02, color="b")
rect2 = Rectangle(
(self.z0, -self.d / 2 - 0.02), self.z1 - self.z0, 0.02, color="b"
)
axes[0].add_patch(rect1)
axes[0].add_patch(rect2)
rect3 = Rectangle((self.z0, self.d / 2), self.z1 - self.z0, 0.02, color="b")
rect4 = Rectangle(
(self.z0, -self.d / 2 - 0.02), self.z1 - self.z0, 0.02, color="b"
)
axes[1].add_patch(rect3)
axes[1].add_patch(rect4)
def save_to_hdf(self, filepath: Path, parent_group_path: str) -> None:
"""
Saves the beamline element to an hdf file
"""
# Open the hdf file
with h5py.File(filepath, "a") as f:
# Create a group for the beamline element
group_path = parent_group_path + "/" + self.name
f.create_group(group_path)
# Write the name of the beamline element class into file
f[group_path].attrs["class"] = type(self).__name__
# Loop over the attributes of the beamline element and save them to the attributes
# of the group
for key, value in vars(self).items():
if key not in ["state", "a_interp"] and value:
f[group_path].attrs[key] = value
elif key == "a_interp":
pass
else:
f[group_path].attrs[key] = value.__repr__()
def lens_acceleration(self, x):
"""
Calculates the acceleration (in m/s^2) for a molecule inside the electrostatic lens. To speed this up, an
interpolation function that gives the acceleration as a function of radial position is saved the first time
this function is run for a given lens configuration and molecular state.
"""
if not self.a_interp:
# Find the relevant quantum numbers for calculating the acceleration
J = self.state.find_largest_component().J
mJ = self.state.find_largest_component().mJ
# Check if the interpolation function has already been saved to file
filename = (
f"acceleration_interp_d={self.d:.4f}m_V={self.V:.1f}V_J={J}_mJ={mJ}.pkl"
)
INTERP_DIR = "./interpolation_functions/"
if exists(INTERP_DIR + filename):
with open(INTERP_DIR + filename, "rb") as f:
self.a_interp = pickle.load(f)
# If not, calculate the acceleration as function of position inside the lens
else:
print(
"Interpolation function for lens acceleration not found, making new one"
)
# Make an array of radius values for which to calculate the Stark shift
dr = 1e-4
r_values = np.linspace(
0, self.d / 2 * 1.01, int(np.round(self.d / 2 / dr))
)
# Convert radius values into electric field values (assuming E = 2*V/R^2 * r within the lens radius)
E_values = 2 * self.V / ((self.d / 2) ** 2) * r_values # E is in V/m
# Calculate the Stark shift at each of these positions
V_stark = stark_potential(self.state, E_values / 100)
# Calculate radial acceleration at each radius based on dV_stark/dr
a_values = -np.gradient(V_stark, dr) / self.mass
# Make an interpolating function based on the radial positions and calculated accelerations
self.a_interp = interp1d(r_values, a_values)
# Save the interpolation function to file
with open(INTERP_DIR + filename, "wb+") as f:
pickle.dump(self.a_interp, f)
# Calculate the acceleration at the position of the molecule using the interpolation function
r = np.sqrt(np.sum(x[:2] ** 2))
a_r = self.a_interp(r)
a = np.zeros((3,))
if r != 0:
# Resolve acceleration into components
a[0] = a_r * x[0] / r
a[1] = a_r * x[1] / r
a[2] = 0
a[1] -= g
return a
| StarcoderdataPython |
5005391 | <reponame>m4reQ/spyke
import dataclasses
import logging
import typing as t
import functools
from os import path
import numpy as np
import glm
from OpenGL import GL
from spyke.enums import ShaderType
from spyke.exceptions import GraphicsException, SpykeException
from spyke.graphics import gl
_logger = logging.getLogger(__name__)
@dataclasses.dataclass
class ShaderSpec:
vertex_filepath: str
fragment_filepath: str
geometry_filepath: t.Optional[str] = None
compute_filepath: t.Optional[str] = None
tess_eval_filepath: t.Optional[str] = None
tess_control_filepath: t.Optional[str] = None
class Shader(gl.GLObject):
def __init__(self, shader_spec: ShaderSpec, auto_compile: bool=True):
super().__init__()
self._id: int = 0
self._stages: t.List[int] = []
self._uniforms: t.Dict[str, int] = {}
self._is_compiled = False
self._spec = shader_spec
if auto_compile:
self.compile()
def compile(self) -> None:
if self._is_compiled:
_logger.warning('Shader program is already compiled.')
return
self._id = gl.create_program()
assert self._id != 0, 'Could not create shader program'
self._create_stage_and_attach(self._spec.vertex_filepath, ShaderType.VertexShader)
self._create_stage_and_attach(self._spec.fragment_filepath, ShaderType.FragmentShader)
if self._spec.geometry_filepath is not None:
self._create_stage_and_attach(self._spec.geometry_filepath, ShaderType.GeometryShader)
if self._spec.compute_filepath is not None:
self._create_stage_and_attach(self._spec.compute_filepath, ShaderType.ComputeShader)
if self._spec.tess_eval_filepath is not None:
self._create_stage_and_attach(self._spec.tess_eval_filepath, ShaderType.TessEvaluationShader)
if self._spec.tess_control_filepath is not None:
self._create_stage_and_attach(self._spec.tess_control_filepath, ShaderType.TessControlShader)
GL.glLinkProgram(self.id)
info_log = GL.glGetProgramInfoLog(self.id)
if info_log != '':
raise SpykeException(f'Shader program {self.id} link failure:\n{info_log}')
self._cleanup_stages()
self._is_compiled = True
_logger.info('Shader program %d compiled succesfully.', self.id)
def _cleanup_stages(self) -> None:
for shader in self._stages:
GL.glDetachShader(self.id, shader)
GL.glDeleteShader(shader)
self._stages.clear()
def _create_stage_and_attach(self, filepath: str, _type: ShaderType) -> None:
if self._is_compiled:
_logger.error('Cannot attach shader to already compiled program.')
return
if not path.exists(filepath):
raise SpykeException(f'Shader file {filepath} not found.')
with open(filepath, 'r') as f:
source = f.read()
shader = GL.glCreateShader(_type)
GL.glShaderSource(shader, source)
GL.glCompileShader(shader)
info_log = GL.glGetShaderInfoLog(shader)
if info_log != '':
raise SpykeException(f'Shader {shader} of type {_type.name} compilation failure:\n{info_log}')
GL.glAttachShader(self.id, shader)
self._stages.append(shader)
def validate(self) -> None:
if not self._is_compiled:
_logger.error('Cannot validate not compiled shader program.')
return
GL.glValidateProgram(self.id)
info_log = GL.glGetProgramInfoLog(self.id)
if info_log != '':
raise SpykeException(f'Shader program {self.id} validation failure:\n{info_log}')
_logger.debug('%s has been validated succesfully.', self)
def use(self) -> None:
GL.glUseProgram(self.id)
def _delete(self) -> None:
GL.glDeleteProgram(self.id)
@functools.lru_cache
def get_attrib_location(self, name: str) -> int:
loc = GL.glGetAttribLocation(self.id, name)
assert loc != -1, f'Cannot find attribute named "{name}".'
return loc
@functools.lru_cache
def get_uniform_location(self, name: str) -> int:
if name in self._uniforms:
return self._uniforms[name]
loc = GL.glGetUniformLocation(self.id, name)
assert loc != -1, f'Cannot find uniform named "{name}".'
self._uniforms[name] = loc
return loc
@functools.lru_cache
def get_uniform_block_location(self, name: str) -> int:
loc = GL.glGetUniformBlockIndex(self.id, name)
assert loc != -1, f'Cannot find uniform block named "{name}".'
return loc
def set_uniform_block_binding(self, name: str, index: int) -> None:
loc = self.get_uniform_block_location(name)
GL.glUniformBlockBinding(self.id, loc, index)
def set_uniform_int(self, name: str, value: t.Union[int, t.List[int]]) -> None:
loc = self.get_uniform_location(name)
if isinstance(value, int):
GL.glProgramUniform1i(self.id, loc, value)
elif isinstance(value, list):
GL.glProgramUniform1iv(self.id, loc, len(
value), np.asarray(value, dtype=np.int32))
else:
raise GraphicsException(f'Invalid type of uniform value: {type(value).__name__}')
def set_uniform_float(self, name: str, value: t.Union[float, t.List[float]]) -> None:
loc = self.get_uniform_location(name)
if isinstance(value, int):
GL.glProgramUniform1f(self.id, loc, value)
elif isinstance(value, list):
GL.glProgramUniform1fv(self.id, loc, len(
value), np.asarray(value, dtype=np.float32))
else:
raise GraphicsException(f'Invalid type of uniform value: {type(value).__name__}')
def set_uniform_double(self, name: str, value: t.Union[float, t.List[float]]) -> None:
loc = self.get_uniform_location(name)
if isinstance(value, int):
GL.glProgramUniform1d(self.id, loc, value)
elif isinstance(value, list):
GL.glProgramUniform1dv(self.id, loc, len(
value), np.asarray(value, dtype=np.float64))
else:
raise GraphicsException(f'Invalid type of uniform value: {type(value).__name__}')
# TODO: Add generic typing for `value` parameter
# TODO: Add support for matrices which width and height differ
def set_uniform_matrix(self, name: str, value, transpose: bool) -> None:
# TODO: Implement faster way of getting appropreriate function (maybe caching?)
fn = getattr(GL, f'glProgramUniformMatrix{value.length}fv')
fn(self.id, self.get_uniform_location(name),
1, transpose, glm.value_ptr(value))
| StarcoderdataPython |
4830686 | preco= float(input('Digite o preço $'))
desconto= preco * 0.95
print('O valor do produto após o desconto será de {}'.format(desconto)) | StarcoderdataPython |
9610576 | <gh_stars>100-1000
import datetime
import msgpack
import typing
from . import _serializer
from ... import objects
class Serializer(
_serializer.Serializer,
):
name: str = 'msgpack'
def __init__(
self,
) -> None:
self.msgpack_packer = msgpack.Packer(
autoreset=True,
default=self.encode_extensions,
use_bin_type=True,
strict_types=True,
)
def decode_extensions(
self,
obj: typing.Any,
) -> typing.Any:
if '__datetime__' in obj:
return datetime.datetime.fromtimestamp(obj['__datetime__'])
elif '__tuple__' in obj:
return tuple(obj['__tuple__'])
elif '__task__' in obj:
return objects.Task(
**obj['__task__'],
)
else:
return obj
def encode_extensions(
self,
obj: typing.Any,
) -> typing.Any:
if type(obj) == datetime.datetime:
return {
'__datetime__': obj.timestamp(),
}
elif type(obj) == tuple:
return {
'__tuple__': list(obj),
}
elif type(obj) == objects.Task:
return {
'__task__': obj.__dict__,
}
else:
raise TypeError(f'unsupported type {type(obj)}')
def serialize(
self,
data: typing.Any,
) -> bytes:
return self.msgpack_packer.pack(data)
def unserialize(
self,
data: bytes,
) -> typing.Any:
return msgpack.unpackb(
packed=data,
strict_map_key=False,
object_hook=self.decode_extensions,
raw=False,
)
| StarcoderdataPython |
5070111 | <reponame>AristodamusAdairs/Agents-and-Environment<gh_stars>1-10
import Random
class Agent(object):
def__init__(self,env):
"""set up the agent"""
self.env=env
def go(self,n):
"""acts for n time steps"""
raise NotImplementedError("go") # abstract method
from display import Displayable
class Environment(Displayable):
def initial_percepts(self):
"""returns the initial percepts for the agent"""
raise NotImplementedError("initial_percepts") # abstract method
def do(self,action):
"""does the action in the environment
returns the next percept """
raise NotImplementedError("do") # abstract method
class TP_env(Environment):
prices = [234, 234, 234, 234, 255, 255, 275, 275, 211, 211, 211,
234, 234, 234, 234, 199, 199, 275, 275, 234, 234, 234, 234, 255,
255, 260, 260, 265, 265, 265, 265, 270, 270, 255, 255, 260, 260,
265, 265, 150, 150, 265, 265, 270, 270, 255, 255, 260, 260, 265,
265, 265, 265, 270, 270, 211, 211, 255, 255, 260, 260, 265, 265,
260, 265, 270, 270, 205, 255, 255, 260, 260, 265, 265, 265, 265,
270, 270]
max_price_addon = 20 # maximum of random value added to get price
def __init__(self):
"""paper buying agent"""
"""paper buying agent"""
self.time=0
self.stock=20
self.stock_history = [] # memory of the stock history
self.price_history = [] # memory of the price history
def initial_percepts(self):
"""return initial percepts"""
self.stock_history.append(self.stock)
price = self.prices[0]+random.randrange(self.max_price_addon)
self.price_history.append(price)
return {'price': price,
'instock': self.stock}
def do(self, action):
"""does action (buy) and returns percepts (price and instock)"""
used = pick_from_dist({6:0.1, 5:0.1, 4:0.2, 3:0.3, 2:0.2, 1:0.1})
bought = action['buy']
self.stock = self.stock+bought-used
self.stock_history.append(self.stock)
self.time += 1
price = (self.prices[self.time%len(self.prices)] # repeating pattern
+random.randrange(self.max_price_addon) # plus randomness
+self.time//2) # plus inflation
self.price_history.append(price)
return {'price': price,
'instock': self.stock}
def pick_from_dist(item_prob_dist):
""" returns a value from a distribution.
item_prob_dist is an item:probability dictionary, where the
probabilities sum to 1.
returns an item chosen in proportion to its probability
"""
ranreal = random.random()
for (it,prob) in item_prob_dist.items():
if ranreal < prob:
return it
else:
ranreal -= prob
raise RuntimeError(str(item_prob_dist)+" is not a probability distribution")
class TP_agent(Agent):
def __init__(self, env):
self.env = env
self.spent = 0
percepts = env.initial_percepts()
self.last_price = percepts['price']
self.ave = self.ave+(self.last_price-self.ave)*0.05
self.instock = percepts['instock']
env = TP_env()
ag = TP_agent(env)
#ag.go(90)
#ag.spent/env.time ## average spent per time period
import matplotlib.pyplot as plt
class Plot_prices(object):
"""Set up the plot for history of price and number in stock"""
def __init__(self, ag,env):
self.ag = ag
self.env = env
plt.ion()
plt.xlabel("Time")
plt.ylabel("Number in stock.
def plot_run(self):
"""plot history of price and instock"""
num = len(env.stock_history)
plt.plot(range(num),env.stock_history,label="In stock")
plt.plot(range(num),env.price_history,label="Price")
#plt.legend(loc="upper left")
plt.draw()
# pl = Plot_prices(ag,env)
# ag.go(90); pl.plot_run()
| StarcoderdataPython |
6490644 | <gh_stars>0
import MySQLdb, sys, random
from mysql_passwords import database, user, password
from swisscalc import calculateTop8Threshold
class DeckCursor:
def __init__(self, cur):
self.cur = cur
def __enter__(self):
return self.cur
def __exit__(self, type, value, traceback):
self.cur.close()
class DeckDB:
schema = {
'pairings':['playerid', 'round', 'score', 'tablenum', 'tournamentid'],
'seatings':['playerid', 'buildtable', 'tournamentid'],
'players':['name', 'country', 'tournamentid'],
'deckchecks':['playerid', 'teamplayer', 'tournamentid', 'round'],
'round':['roundnum', 'tournamentid'],
'tournaments':['name', 'url', 'rounds', 'password', 'pairings', 'team', 'decklisturl'],
}
def __init__(self):
self.db = MySQLdb.connect(host='localhost', user=user, passwd=password, db=database)
self.seatletters=['A','B','C']
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.db.commit()
self.db.close()
def commit(self):
self.db.commit()
def cursor(self):
return DeckCursor(self.db.cursor())
def checkEvent(self, event, output):
try:
id = self.getEventId(event)
except:
output.printMessage("Unknown event %s" % event)
sys.exit(0)
def find(self, table, search):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT * FROM "+table+" WHERE "+(" AND ".join([x+'="'+str(search[x])+'"' for x in search.keys()])))
return cur.fetchall()[0]
def deleteRow(self, table, search):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("DELETE FROM "+table+" WHERE "+(" AND ".join([x+'="'+str(search[x])+'"' for x in search.keys()])))
def clearTable(self, table):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("DELETE FROM %s" % table)
self.db.commit()
def update(self, table, matches, values):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("UPDATE "+table+" SET " +(', '.join([str(x)+'="'+str(values[x])+'"' for x in values])) + " WHERE " + (' AND '.join([str(x)+'="'+str(matches[x])+'"' for x in matches])))
self.db.commit()
def insert(self, table, data):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("INSERT INTO %s (%s) VALUES (%s)" % (table, ",".join(self.schema[table]), ",".join(["%s" for x in data])), data)
self.db.commit()
def get_round(self, tournamentid):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT * FROM round WHERE tournamentid=%s", tournamentid)
rows = cur.fetchall()
return int(rows[0][0])
def getEventId(self, event):
try:
return int(event)
except:
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT tournamentid FROM tournaments WHERE name=%s", event)
rows = cur.fetchall()
return int(rows[0][0])
def isEventTeam(self, eventid):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT team FROM tournaments WHERE tournamentid=%s", eventid)
rows = cur.fetchall()
return True if rows[0][0] else False
def hasEventPairings(self, eventid):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT pairings FROM tournaments WHERE tournamentid=%s", eventid)
rows = cur.fetchall()
return True if rows[0][0] else False
def getEventSettings(self, eventid):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT name, url, rounds, password, pairings, team, decklisturl FROM tournaments WHERE tournamentid=%s", eventid)
rows = cur.fetchall()
return rows[0]
def getEventPassword(self, eventid):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT password FROM tournaments WHERE tournamentid=%s", eventid)
rows = cur.fetchall()
return rows[0][0]
def getEventName(self, eventid):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT name FROM tournaments WHERE tournamentid=%s", eventid)
rows = cur.fetchall()
return rows[0][0]
def getEventRounds(self, eventid):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT rounds FROM tournaments WHERE tournamentid=%s", eventid)
rows = cur.fetchall()
return int(rows[0][0])
def getDecklistUrl(self, eventid):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT decklisturl FROM tournaments WHERE tournamentid=%s", eventid)
rows = cur.fetchall()
return rows[0][0]
def getEventUrl(self, eventid):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT url FROM tournaments WHERE tournamentid=%s", eventid)
rows = cur.fetchall()
return rows[0][0]
def getPlayersForEachByeNumber(self, eventid):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("""
SELECT round-1 AS byes, count(round) AS players FROM (
SELECT min(round) AS round FROM
pairings INNER JOIN
players ON
pairings.playerid=players.playerid
WHERE players.tournamentid=%s GROUP BY name)
AS minRounds
GROUP BY round
ORDER BY byes
""", eventid)
rows = cur.fetchall()
result = []
b = 0
for (byes, number) in rows:
while b < byes:
b = b + 1
result.append(0)
result.append(number)
b = b + 1
return result
def get_events(self):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT tournamentid, name, url FROM tournaments")
return cur.fetchall()
def get_all_checks(self, event):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT round, name, teamplayer FROM deckchecks INNER JOIN players ON deckchecks.playerid=players.playerid WHERE deckchecks.tournamentid=%s ORDER BY round DESC", event)
rows = cur.fetchall()
currentround = 0
checks = {}
for row in rows:
if row[0] != currentround:
currentround = row[0]
checks[currentround] = []
checks[currentround].append((row[1], row[2]))
return checks
def get_pairings(self, tournamentid, roundnum=None):
roundnum = roundnum or self.get_round(tournamentid)
with DeckCursor(self.db.cursor()) as cur:
cur.execute("""
SELECT lpair.tablenum, lname, lscore, rname, rscore FROM (
SELECT name AS lname, players.playerid AS lid, score AS lscore, tablenum
FROM players INNER JOIN pairings
ON players.playerid=pairings.playerid
WHERE pairings.tournamentid=%s AND pairings.round=%s
) AS lpair INNER JOIN (
SELECT name AS rname, players.playerid AS rid, score AS rscore, tablenum
FROM players INNER JOIN pairings ON players.playerid=pairings.playerid
WHERE pairings.tournamentid=%s AND pairings.round=%s
) AS rpair
ON lpair.tablenum=rpair.tablenum AND lid!=rid and lpair.tablenum != 0
ORDER BY lname
""", (tournamentid, roundnum, tournamentid, roundnum))
return cur.fetchall()
def get_top_tables(self, tournamentid, roundnum=None):
roundnum = roundnum or self.get_round(tournamentid)
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT MAX(score), tablenum FROM pairings WHERE tournamentid=%s AND round=%s GROUP BY tablenum ORDER BY MAX(score) DESC, tablenum LIMIT 100", (tournamentid, roundnum))
return cur.fetchall()
def get_recommendations(self, tournamentid, roundnum=None, n=6, rand=True):
roundnum = roundnum or self.get_round(tournamentid)
if rand:
marginalthreshold=0
additionalFilters=""
else:
playersWithEachByes = self.getPlayersForEachByeNumber(tournamentid)
totalRounds = self.getEventRounds(tournamentid)
(marginalthreshold, top8threshold, undefeatedthreshold) =calculateTop8Threshold(playersWithEachByes, totalRounds, roundnum)
additionalFilters="AND deckchecks.playerid is NULL"
with DeckCursor(self.db.cursor()) as cur:
cur.execute("""
SELECT players.playerid, name, score, buildtable
FROM players INNER JOIN seatings
ON players.playerid=seatings.playerid
INNER JOIN pairings
ON players.playerid=pairings.playerid
LEFT OUTER JOIN deckchecks
ON deckchecks.playerid=players.playerid
WHERE players.tournamentid=%s
AND pairings.round=%s
AND pairings.score>=%s
"""+additionalFilters+" ORDER BY tablenum", (tournamentid, roundnum, marginalthreshold))
rows = cur.fetchall()
tables = {}
for r in rows:
try:
prevtables = self.get_prev_tables(tournamentid, r[0])
l = tables.get(prevtables[-1], list())
l.append((r[1], r[2], prevtables, r[3]))
tables[prevtables[-1]] = l
except:
pass
rv = []
for i in range(0, n):
k = random.choice(tables.keys())
try:
rv.append((k, tables[k][0], tables[k][1]))
except:
pass
return rv
def get_build_table(self, tournamentid, tablenum):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT players.playerid, name, MAX(score), buildtable FROM players INNER JOIN seatings ON players.playerid=seatings.playerid INNER JOIN pairings ON players.playerid=pairings.playerid WHERE buildtable=%s AND players.tournamentid=%s GROUP BY name, buildtable", (tablenum, tournamentid))
rows = cur.fetchall()
return [(r[1], r[2], self.get_prev_tables(tournamentid, r[0]), r[3]) for r in rows]
def hasSeating(self, tournamentid):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT * FROM players INNER JOIN seatings on players.playerid=seatings.playerid WHERE players.tournamentid=%s", (tournamentid))
rows = cur.fetchall()
return True if len(rows) > 0 else False
def get_table(self, tournamentid, tablenum, roundnum=None):
roundnum = roundnum or self.get_round(tournamentid)
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT players.playerid, name, score, buildtable FROM players INNER JOIN seatings ON players.playerid=seatings.playerid INNER JOIN pairings ON players.playerid=pairings.playerid WHERE tablenum=%s AND players.tournamentid=%s AND pairings.round=%s", (tablenum, tournamentid, roundnum))
rows = cur.fetchall()
return ((rows[0][1], rows[0][2], self.get_prev_tables(tournamentid, rows[0][0]), rows[0][3]), (rows[1][1], rows[1][2], self.get_prev_tables(tournamentid, rows[1][0]), rows[1][3]))
def get_table_ids(self, tournamentid, tablenum, roundnum=None):
roundnum = roundnum or self.get_round(tournamentid)
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT players.playerid FROM players INNER JOIN seatings ON players.playerid=seatings.playerid INNER JOIN pairings ON players.playerid=pairings.playerid WHERE tablenum=%s AND players.tournamentid=%s AND round=%s", (tablenum, tournamentid, roundnum))
rows = cur.fetchall()
return (rows[0][0], rows[1][0])
def get_player_id(self, tournamentid, name):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT playerid FROM players WHERE players.tournamentid=%s AND name COLLATE LATIN1_GENERAL_CI = %s", (tournamentid, name))
rows = cur.fetchall()
return rows[0][0]
def get_prev_tables(self, tournamentid, playerid):
currentround = self.get_round(tournamentid)
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT round, tablenum FROM pairings WHERE tournamentid=%s AND playerid=%s ORDER BY round", (tournamentid, playerid))
rows = cur.fetchall()
rv = []
c = 0
for r in rows:
rnd = r[0]
tbl = r[1]
for i in range(c, rnd-1):
rv.append('Bye')
c = rnd
rv.append(tbl)
for i in range(c, currentround):
rv.append('-')
return rv
def getAllPlayers(self, tournamentid):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT name, buildtable FROM players INNER JOIN seatings on players.playerid=seatings.playerid WHERE players.tournamentid=%s"%tournamentid)
rows = cur.fetchall()
return [(row[0], row[1]) for row in rows]
def get_players(self, tournamentid, name):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT players.playerid, name, MAX(score), buildtable FROM players INNER JOIN seatings ON players.playerid=seatings.playerid LEFT OUTER JOIN pairings ON players.playerid=pairings.playerid WHERE name COLLATE LATIN1_GENERAL_CI LIKE %s AND players.tournamentid=%s GROUP BY name", ('%'+name+'%', tournamentid))
rows = cur.fetchall()
return [(row[1], row[2], self.get_prev_tables(tournamentid, row[0]), row[3]) for row in rows]
def getPreviousChecks(self, tournamentid, name):
with DeckCursor(self.db.cursor()) as cur:
cur.execute("SELECT deckchecks.round, deckchecks.teamplayer FROM deckchecks INNER JOIN players on deckchecks.playerid=players.playerid WHERE deckchecks.tournamentid=%s AND players.name=%s", (tournamentid, name))
rows = cur.fetchall()
if self.isEventTeam(tournamentid):
return ["%s:%s" % (row[0],self.seatletters[row[1]]) for row in rows]
else:
return [row[0] for row in rows]
| StarcoderdataPython |
248280 | <gh_stars>0
from typing import List, Any
from fastapi import APIRouter, HTTPException
from sqlalchemy.future import select
from starlette import status
from starlette.requests import Request
from models.users import User
from schemas.users import UserCreate, UserDB, UserUpdate, UserOut
from utils.password import password_hash_ctx
router = APIRouter()
@router.get(
'/users/',
name='users:get',
summary='get list of users',
status_code=status.HTTP_200_OK,
description='get list of users with limit and skip page',
response_model=List[UserOut]
)
async def user_get_list(
request: Request,
skip: int = 0,
limit: int = 50
) -> Any:
db = request.app.state.db
res = await db.execute(select(User).offset(skip).limit(limit))
found_users = res.scalars().all()
return found_users
@router.post(
'/users/',
name="users:post",
summary="create a new user",
status_code=status.HTTP_201_CREATED,
description="Creates a new user with post query",
response_model=UserDB
)
async def user_post(user: UserCreate, request: Request):
db = request.app.state.db
res = await db.execute(select(User).filter(User.email == user.email))
found_users = res.scalar_one_or_none()
if found_users:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"User with email '{user.email}' already exists"
)
user_db = User(**user.dict())
user_db.password = password_hash_ctx.hash(user_db.password)
db.add(user_db)
await db.commit()
await db.refresh(user_db)
return user_db
@router.get(
'/users/{user_id}',
name="users:get-by-id",
summary="get user by id",
response_model=UserDB
)
async def user_get_by_id(user_id: int, request: Request):
res = await request.app.state.db.execute(select(User).filter(User.id == user_id))
db_user = res.scalar()
if not db_user:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="User not found")
return db_user
@router.put(
'/users/{user_id}',
name="users:put",
summary="update user data by id overwriting all attributes",
response_model=UserDB
)
async def user_put(user_id: int, user: UserUpdate, request: Request):
found_user = await update_user_field(request, user, user_id, exclude_none=True)
return found_user
@router.patch(
'/users/{user_id}',
name="users:patch",
summary="partially update user attributes by id",
response_model=UserDB
)
async def user_patch(user_id: int, user: UserUpdate, request: Request):
found_user = await update_user_field(request, user, user_id, exclude_unset=True)
return found_user
async def update_user_field(request, user, user_id, **kwargs):
db = request.app.state.db
res = await db.execute(select(User).filter(User.id == user_id))
found_user = res.scalar_one_or_none()
if not found_user:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"User with id '{user_id}' not found"
)
for var, value in user.dict(**kwargs).items():
setattr(found_user, var, value)
if user.dict(exclude_none=True).get('password') is not None:
found_user.password = password_hash_ctx.hash(user.password)
db.add(found_user)
await db.commit()
await db.refresh(found_user)
return found_user
@router.delete(
'/users/{user_id}',
name="users:delete",
summary="delete user by id",
response_model=UserDB
)
async def user_delete(user_id: int, request: Request):
db = request.app.state.db
res = await db.execute(select(User).filter(User.id == user_id))
found_user = res.scalar_one_or_none()
if not found_user:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"User with id '{user_id}' not found"
)
await db.delete(found_user)
await db.commit()
return found_user
| StarcoderdataPython |
1734920 | #
# @lc app=leetcode.cn id=575 lang=python3
#
# [575] 分糖果
#
# https://leetcode-cn.com/problems/distribute-candies/description/
#
# algorithms
# Easy (68.32%)
# Total Accepted: 64K
# Total Submissions: 89.9K
# Testcase Example: '[1,1,2,2,3,3]'
#
#
# 给定一个偶数长度的数组,其中不同的数字代表着不同种类的糖果,每一个数字代表一个糖果。你需要把这些糖果平均分给一个弟弟和一个妹妹。返回妹妹可以获得的最大糖果的种类数。
#
# 示例 1:
#
#
# 输入: candies = [1,1,2,2,3,3]
# 输出: 3
# 解析: 一共有三种种类的糖果,每一种都有两个。
# 最优分配方案:妹妹获得[1,2,3],弟弟也获得[1,2,3]。这样使妹妹获得糖果的种类数最多。
#
#
# 示例 2 :
#
#
# 输入: candies = [1,1,2,3]
# 输出: 2
# 解析: 妹妹获得糖果[2,3],弟弟获得糖果[1,1],妹妹有两种不同的糖果,弟弟只有一种。这样使得妹妹可以获得的糖果种类数最多。
#
#
# 注意:
#
#
# 数组的长度为[2, 10,000],并且确定为偶数。
# 数组中数字的大小在范围[-100,000, 100,000]内。
# from typing import List
class Solution:
def distributeCandies(self, candyType: List[int]) -> int:
return min(len(set(candyType)), len(candyType) // 2)
| StarcoderdataPython |
6444421 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
("wagtailcore", "0002_initial_data"),
)
def forwards(self, orm):
# Adding model 'Embed'
db.create_table(u'wagtailembeds_embed', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('max_width', self.gf('django.db.models.fields.SmallIntegerField')(null=True, blank=True)),
('type', self.gf('django.db.models.fields.CharField')(max_length=10)),
('html', self.gf('django.db.models.fields.TextField')(blank=True)),
('title', self.gf('django.db.models.fields.TextField')(blank=True)),
('thumbnail_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('width', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('height', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'wagtailembeds', ['Embed'])
# Adding unique constraint on 'Embed', fields ['url', 'max_width']
db.create_unique(u'wagtailembeds_embed', ['url', 'max_width'])
def backwards(self, orm):
# Removing unique constraint on 'Embed', fields ['url', 'max_width']
db.delete_unique(u'wagtailembeds_embed', ['url', 'max_width'])
# Deleting model 'Embed'
db.delete_table(u'wagtailembeds_embed')
models = {
u'wagtailembeds.embed': {
'Meta': {'unique_together': "(('url', 'max_width'),)", 'object_name': 'Embed'},
'height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'max_width': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['wagtailembeds'] | StarcoderdataPython |
6500609 | <gh_stars>1-10
import copy
import sys
def DFS_directed_recur(G, s, nodes_explored, t, f, leader, current_leader):
#Breadths first search for finding all the nodes connected to s undirected graph, also compute the distance to the s.
#G: the undirected graph, nodes form 1 to N
#s: the starting node
#current_order: a list contianing a number to be changed every recursion
nodes_explored.add(s)
leader[s] = current_leader
#print(G[s])
for edge in G[s]:
if edge not in nodes_explored:
DFS_directed_recur(G, edge, nodes_explored, t, f, leader, current_leader)
t[0] += 1
f[s] = t[0]
#nodes_order[s] = current_order[0]
#current_order[0] -= 1
def DFS_directed_iter(G, s, nodes_explored, t, f, leader, current_leader):
#Breadths first search for finding all the nodes connected to s undirected graph, also compute the distance to the s.
#G: the undirected graph, nodes form 1 to N
#s: the starting node
Q = [s]
nodes_explored.add(s)
leader[s] = current_leader
#{1: 1, 5: 2, 2: 3, 3: 4, 6: 5, 7: 6, 8: 7, 9: 8, 4: 9}
while len(Q) > 0:
s = Q[-1]
if len(G[s]) == 0 or belong(G[s], nodes_explored) == True:
t[0] += 1
f[s] = t[0]
Q.pop()
else:
for v in G[s]:
if v not in nodes_explored:
nodes_explored.add(v)
leader[v] = current_leader
Q.append(v)
break
def belong(l, p):
for i in l:
if i not in p:
return False
return True
def DFS_loop(G, N, f = {}):
nodes_explored = set()
leader = {}
t = [N + 1]
s = None
if len(f) > 0:
l = list(f.items())
l.sort(key=lambda x: x[1], reverse=True)
for item in l:
i = item[0]
if i not in nodes_explored:
s = i
DFS_directed_iter(G, s, nodes_explored, t, f, leader, s)
else:
for i in range(N, 0, -1):
if i not in nodes_explored:
s = i
DFS_directed_iter(G, s, nodes_explored, t, f, leader, s)
return leader, f
def reverse_graph(G):
G_c = copy.deepcopy(G)
for edge in G_c:
edge[0], edge[1] = edge[1], edge[0]
return G_c
def count_scc(leader):
scc = {}
for key, value in leader.items():
if value not in scc:
scc[value] = 1
else:
scc[value] += 1
l = list(scc.items())
l.sort(key=lambda x: x[1], reverse=True)
return l[0: 5]
def G_dict(G, N):
G_d = dict()
for edge in G:
s = edge[0]
v = edge[1]
if s in G_d:
G_d[s].add(v)
else:
G_d[s] = {v}
for i in range(1, N + 1):
if i not in G_d:
G_d[i] = set()
return G_d
if __name__ == "__main__":
'''
#G = [[2, 1], [2, 3], [3, 2], [3, 4], [6, 4], [3, 6], [5, 2], [1, 5], [5, 6], [6, 7], [7, 8], [8, 9], [9, 7]]
G = [[1,2], [2,4], [1,3], [3, 4], [1, 5], [5, 1]]
#G = {1:{2}, 2:{3,4}, 3:{4,5}, 4:{1}, 5:{6}, 6:{5}}
G_rev = reverse_graph(G)
#G_rev = {1:{4}, 2:{1}, 3:{2}, 4:{2,3}, 5:{3,6}, 6:{5}}
G = G_dict(G, 4)
print(G)
G_rev = G_dict(G_rev, 4)
print(G_rev)
leader, f = DFS_loop(G_rev, 4)
leader, f = DFS_loop(G, 4, f)
#print(leader)
l = count_scc(leader)
print(l)
#leader = {}
#DFS_directed_iter(G, 2, set(), f={}, t = [0], leader = leader, current_leader=1)
#print(leader)
'''
#sys.setrecursionlimit(1000000)
G = []
with open('SCC.txt', 'r') as f:
while True:
line = f.readline()
if not line:
break
l = list(map(int, line.split()))
G.append(l)
G_rev = reverse_graph(G)
G = G_dict(G, 875714)
G_rev = G_dict(G_rev, 875714)
print("hehe")
leader, f = DFS_loop(G_rev, 875714)
leader, f = DFS_loop(G, 875714, f)
l = count_scc(leader)
print(l)
| StarcoderdataPython |
6671992 | <reponame>zveronics/zveronics-python-server<filename>src/zveronics/server.py
import json
import logging
import socketserver
import struct
import msgpack
log = logging.getLogger(__name__)
def unpack(data):
return msgpack.unpackb(data, raw=False)
def pack(data):
return msgpack.packb(data, use_bin_type=True)
class UserRequestHandler(socketserver.BaseRequestHandler):
"""The request handler class for zveronics server."""
def handle(self):
int_format = struct.Struct('<I')
size = int(int_format.size)
log.debug('Waiting message size (%s bytes)', str(size))
msg_size = int_format.unpack(self.request.recv(size))[0]
log.debug('Reading message of size %s', msg_size)
msg = unpack(self.request.recv(msg_size))
log.debug('Got message: %s', json.dumps(msg))
def serve_zveronics(host, port):
log.info('Starting zveronics server on %s:%s', host, port)
with socketserver.TCPServer((host, port),
UserRequestHandler) as server:
server.serve_forever()
| StarcoderdataPython |
3336825 | <reponame>UPstartDeveloper/CS-1.3-Core-Data-Structures
#!python
class Node(object):
def __init__(self, data):
"""Initialize this node with the given data."""
self.data = data
self.next = None
def __repr__(self):
"""Return a string representation of this node."""
return 'Node({!r})'.format(self.data)
class LinkedList(object):
def __init__(self, iterable=None):
"""Initialize this linked list and append the given items, if any."""
self.head = None # First node
self.tail = None # Last node
self.size = 0 # Number of nodes
# Append the given items
if iterable is not None:
for item in iterable:
self.append(item)
def __str__(self):
"""Return a formatted string representation of this linked list."""
items = ['({!r})'.format(item) for item in self.items()]
return '[{}]'.format(' -> '.join(items))
def __repr__(self):
"""Return a string representation of this linked list."""
return 'LinkedList({!r})'.format(self.items())
def items(self):
"""Return a list of all items in this linked list.
Best and worst case running time: Theta(n) for n items in the list
because we always need to loop through all n nodes.
"""
# Create an empty list of results
result = [] # Constant time to create a new list
# Start at the head node
node = self.head # Constant time to assign a variable reference
# Loop until the node is None, which is one node too far past the tail
while node is not None: # Always n iterations because no early exit
# Append this node's data to the results list
result.append(node.data) # Constant time to append to a list
# Skip to the next node
node = node.next # Constant time to reassign a variable
# Now result contains the data from all nodes
return result # Constant time to return a list
def is_empty(self):
"""Return True if this linked list is empty, or False."""
return self.head is None
def length(self):
"""Return the length of this linked list by traversing its nodes.
This method runs in constant time, otherwise represented as O(1), in
all scenarios. This is because all we do is retrieve the value
stored in the size property of the given LinkedList instance.
"""
return self.size
def get_at_index(self, index):
"""Return the item at the given index in this linked list, or
raise ValueError if the given index is out of range of the list
size.
Best case running time: O(1)
This method runs in constant time if we are getting the item at the
index 0 (also can be thought of as the head node), or if the index
is out of range of the list. In the former, this is because we only
need one iteration through the for loop. In the latter, it is
because we can be sure we will need no iteration of the for loop,
and just raise the ValuError exception.
Worst case running time: O(n)
This occurs if we are trying to get the item at the last index
position in the list, otherwise referred to as the tail node. This
is because we will need to traverse through all the items in the
list, which increases in linear time with respect to the growth of
items in the list, which is represented by n.
"""
# Check if the given index is out of range and if so raise an error
if not (0 <= index < self.size):
raise ValueError('List index out of range: {}'.format(index))
# Find the node at the given index
node = self.head
for i in range(index):
node = node.next
# return its data
return node.data
def insert_at_index(self, index, item):
"""Insert the given item at the given index in this linked list, or
raise ValueError if the given index is out of range of the list
size.
Best case running time: O(1)
This function mimics the approach of the prepend and append methods
in several scenarios: if we are inserting an item at the head node
position (which is index 0), this uses the runtime of the prepend
method; or if we inserting at the tail node, or we are inserting an
item into a list with no other items, this method uses the same
runtime as the append operation (in the latter case, the index
would also be equal to 0). Furthermore, this method also runs in
constant time if the index is out of range, in which it takes
constant time to raise tbe ValueError.
Worst case running time: O(n)
If we are inserting an item at an index position that cannot be
considered the head nor the tail, then we have a variable number
of items to iterate over. In the worst case scenario, the index we
want to insert at is just before the tail, meaning we need n-1
iterations through the for loop (found in the else block below),
where n is the number of items in the list. The runtime in this
scenario scales in linear time to the growth of the list, therefore
we disregard the -1 and are left with O(n) runtime.
"""
# Check if the given index is out of range and if so raise an error
if not (0 <= index <= self.size):
raise ValueError('List index out of range: {}'.format(index))
# if we are inserting at the tail, or no previous nodes, then append
if self.size == index:
self.append(item)
# if we are inserting at the head, then just perform a prepend
elif index == 0:
self.prepend(item)
else:
# Perform traversal of the nodes
prev_node = self.head
# find the node at index, as well as the one just before it
for i in range(index - 1):
prev_node = prev_node.next
node_being_moved = prev_node.next
# make a new Node for the data to be inserted
new_node = Node(item)
# insert the new node at the index, and connect it to the rest
prev_node.next = new_node
new_node.next = node_being_moved
# increment the size property
self.size += 1
def append(self, item):
"""Insert the given item at the tail of this linked list.
Best and worst case running time: O(1)
In all scenarios, this method runs in constant time. The runtime
complexity of all its statements are independent of the size of the
list. This is because in all the statements we always instantiate a
new Node, check the size of the list, rearrange the pointers of the
head and tail properties (and in some cases the next property of
the tail). Finally, we end by incrementing the value of the size
property.
"""
# Create a new node to hold the given item
new_node = Node(item)
# Check if this linked list is empty
if self.is_empty():
# Assign head to new node
self.head = new_node
else:
# Otherwise insert new node after tail
self.tail.next = new_node
# Update tail to new node regardless
self.tail = new_node
# Increment the size property
self.size += 1
def prepend(self, item):
"""Insert the given item at the head of this linked list.
Best and worst case running time: O(1)
Similar to the append method, the runtime of the prepend operation
is independent of the size of the list. In all scenarios, we merely
need to make a new node, rearrange the pointers of the head and
tail, and increment the size of the list.
"""
# Create a new node to hold the given item
new_node = Node(item)
# Check if this linked list is empty
if self.is_empty():
# Assign tail to new node
self.tail = new_node
else:
# Otherwise insert new node before head
new_node.next = self.head
# Update head to new node regardless
self.head = new_node
# Increment the size property
self.size += 1
def find(self, quality):
"""Return an item from this linked list satisfying the given quality.
Best case running time: O(1) if item is near the head of the list.
Worst case running time: O(n) if item is near the tail of the list
or not present and we need to loop through all n nodes in the list.
"""
# Start at the head node
node = self.head # Constant time to assign a variable reference
# Loop until the node is None, which is one node too far past the tail
while node is not None: # Up to n iterations if we don't exit early
# Check if this node's data satisfies the given quality function
if quality(node.data): # Constant time to call quality function
# We found data satisfying the quality function, so exit early
return node.data # Constant time to return data
# Skip to the next node
node = node.next # Constant time to reassign a variable
# We never found data satisfying quality, but have to return something
return None # Constant time to return None
def replace(self, old_item, new_item):
"""Replace the given old_item in this linked list with given new_item
using the same node, or raise ValueError if old_item is not found.
Best case running time: O(1)
If we are trying to replace an item in a list that's empty, that
has only one item, or the item is present at the head, we have a
constant runtime that is independent of how the size of the list.
In the first scenario mentioned, we would not need any traversal of
the list; and in the other two scenarios, we would need exactly one.
Worst case running time: O(n)
If we are trying to replace an item that is present at the end of
the list, or not present in the list at all, we will need to
traverse through all the items in the list. This operation scales in
linear time, because it grows in direct proportion to the number of
items in the list (since one iteration is required for each item).
"""
# Start at the head node
node = self.head
# traverse the nodes to find old_item
while node is not None:
# Check if this node's data matches the old_item
if node.data == old_item:
# We found the node, now change the data it references
node.data = new_item
# end the method by returning None
return None
else:
# Skip to the next node
node = node.next
# old_item is not in the list, so raise a ValuError
raise ValueError(f'Item not in list: {old_item}')
def delete(self, item):
"""Delete the given item from this linked list, or raise ValueError.
Best case running time: O(1)
If the item is at the head of the list, then we only require one
iteration for traversal, and then we rely on decision strucutres to
appropiately remove the item. The runtime of these operations will
not change based on the length of the list.
Worst case running time: O(n)
If the item at the end of the list, or not present at all, this
method will have to traverse through all the items in the list to
get to it. This causes the runtime of this method to reach O(n)
asymptotically, because it is essentially an implementation of the
linear search algorithm.
"""
# Start at the head node
node = self.head
# Keep track of the node before the one containing the given item
previous = None
# Create a flag to track if we have found the given item
found = False
# Loop until we have found the given item or the node is None
while found is False and node is not None:
# Check if the node's data matches the given item
if node.data == item:
# We found data matching the given item, so update found flag
found = True
else:
# Skip to the next node
previous = node
node = node.next
# Check if we found the given item or we never did and reached the tail
if found is True:
# Check if we found a node in the middle of this linked list
if node is not self.head and node is not self.tail:
# Update the previous node to skip around the found node
previous.next = node.next
# Unlink the found node from its next node
node.next = None
# Check if we found a node at the head
if node is self.head:
# Update head to the next node
self.head = node.next
# Unlink the found node from the next node
node.next = None
# Check if we found a node at the tail
if node is self.tail:
# Check if there is a node before the found node
if previous is not None:
# Unlink the previous node from the found node
previous.next = None
# Update tail to the previous node regardless
self.tail = previous
# Decrement the size property
self.size -= 1
else:
# Otherwise raise an error to tell the user that delete has failed
raise ValueError('Item not found: {}'.format(item))
def __iter__(self):
"""Return a generator of the data referenced by the Nodes in this
LinkedList.
Credit to my old LinkedList class in my CS 1.2 repo:
https://github.com/UPstartDeveloper/CS-1.2-Intro-Data-Structures/blob/master/Code/linkedlist.py
"""
node = self.head
while node is not None:
item = node.data
node = node.next
yield(item)
def test_linked_list():
ll = LinkedList()
print(ll)
print('Appending items:')
ll.append('A')
print(ll)
ll.append('B')
print(ll)
ll.append('C')
print(ll)
print('head: {}'.format(ll.head))
print('tail: {}'.format(ll.tail))
print('size: {}'.format(ll.size))
print('length: {}'.format(ll.length()))
print([item for item in ll])
print('Getting items by index:')
for index in range(ll.size):
item = ll.get_at_index(index)
print('get_at_index({}): {!r}'.format(index, item))
print('Deleting items:')
ll.delete('B')
print(ll)
ll.delete('C')
print(ll)
ll.delete('A')
print(ll)
print('head: {}'.format(ll.head))
print('tail: {}'.format(ll.tail))
print('size: {}'.format(ll.size))
print('length: {}'.format(ll.length()))
if __name__ == '__main__':
test_linked_list()
| StarcoderdataPython |
6642185 | from scholarly_citation_finder.apps.citation.mag.IsiFieldofstudy import IsiFieldofstudy
class ScfjsonSerializer:
'''
Convert a publication to SCF JSON format.
'''
def __init__(self, database):
self.database = database
def serialze(self, publication, citations=None, isi_fieldofstudy=False):
'''
Serialize a publication.
:param publication: Publication object
:param citations: If true, citations get serialized as well
:param isi_fieldofstudy: If true, convert field of study to ISI fields
'''
if self.database == 'mag':
publication.title = publication.title.title()
journal_name = None
if publication.journal_id:
journal_name = publication.journal.name
if publication.conference_id:
if publication.conference.name:
publication.booktitle = publication.conference.name
else:
publication.booktitle = publication.conference.short_name
if not publication.type:
if publication.conference_id:
publication.type = 'inproceedings'
if publication.booktitle:
publication.type = 'incollection'
else:
publication.type = 'article'
if publication.volume:
publication.volume = self.__convert_to_integer(publication.volume)
if publication.number:
publication.number = self.__convert_to_integer(publication.number)
if publication.pages_from:
publication.pages_from = self.__convert_to_integer(publication.pages_from)
if publication.pages_to:
publication.pages_to = self.__convert_to_integer(publication.pages_to)
authors = []
for item in publication.publicationauthoraffilation_set.all():
if item.author_id:
authors.append(item.author.name)
keywords = []
for keyword in publication.publicationkeyword_set.all():
keywords.append(keyword.name)
result = {'type': publication.type,
'title': publication.title,
'year': publication.year,
'booktitle': publication.booktitle,
'journal_name': journal_name,
'volumne': publication.volume,
'number': publication.number,
'pages_from': publication.pages_from,
'pages_to': publication.pages_to,
'series': publication.series,
'publisher': publication.publisher,
'isbn': publication.isbn,
'doi': publication.doi,
'abstract': publication.abstract,
'copyright': publication.copyright,
'authors': authors,
'keywords': keywords,
'citations': []}
if isi_fieldofstudy:
result['isi_fieldofstudy'] = self.__isi_fieldofstudy_mapping(publication)
if citations:
for citation in citations:
result['citations'].append(self.serialze(citation.publication))
return result
def __convert_to_integer(self, value):
try:
return int(value)
except(ValueError):
return None
def __isi_fieldofstudy_mapping(self, publication):
'''
Map the field of study of a publication to a ISI field of study.
:param publication: Publication object.
'''
# Sort field of study in descending level order, i.e. check first level 1 and then level 0.
# Don't consider field of studies with a confidence lower then 0.5
query = publication.publicationfieldofstudy_set.filter(level__gte=0, level__lte=1).order_by('-level', '-confidence')
# Iterate over all level 1 and 0 field of studies
for publication_fos in query.iterator():
if publication_fos.level == 0 and publication_fos.fieldofstudy_name in IsiFieldofstudy.mappingLevel0:
return IsiFieldofstudy.mappingLevel0[publication_fos.fieldofstudy_name]
elif publication_fos.level == 1 and publication_fos.fieldofstudy_name in IsiFieldofstudy.mappingLevel1:
return IsiFieldofstudy.mappingLevel1[publication_fos.fieldofstudy_name]
return None
| StarcoderdataPython |
9616722 | from test_plus.test import TestCase
from ..models import Email
from .factories import EmailFactory
from foundation.offices.tests.factories import OfficeFactory
class EmailTest(TestCase):
def setUp(self):
self.obj = EmailFactory(email="<EMAIL>")
def test__str__(self):
self.assertEqual(self.obj.__str__(), "<EMAIL>")
| StarcoderdataPython |
3392513 | <gh_stars>1-10
# coding: utf-8
"""
Jamf Pro API
## Overview This is a sample Jamf Pro server which allows for usage without any authentication. The Jamf Pro environment which supports the Try it Out functionality does not run the current beta version of Jamf Pro, thus any newly added endpoints will result in an error and should be used soley for documentation purposes. # noqa: E501
The version of the OpenAPI document: 10.25.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from jamf.configuration import Configuration
class ComputerContentCachingCacheDetail(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'computer_content_caching_cache_details_id': 'str',
'category_name': 'str',
'disk_space_bytes_used': 'int'
}
attribute_map = {
'computer_content_caching_cache_details_id': 'computerContentCachingCacheDetailsId',
'category_name': 'categoryName',
'disk_space_bytes_used': 'diskSpaceBytesUsed'
}
def __init__(self, computer_content_caching_cache_details_id=None, category_name=None, disk_space_bytes_used=None, local_vars_configuration=None): # noqa: E501
"""ComputerContentCachingCacheDetail - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._computer_content_caching_cache_details_id = None
self._category_name = None
self._disk_space_bytes_used = None
self.discriminator = None
if computer_content_caching_cache_details_id is not None:
self.computer_content_caching_cache_details_id = computer_content_caching_cache_details_id
if category_name is not None:
self.category_name = category_name
if disk_space_bytes_used is not None:
self.disk_space_bytes_used = disk_space_bytes_used
@property
def computer_content_caching_cache_details_id(self):
"""Gets the computer_content_caching_cache_details_id of this ComputerContentCachingCacheDetail. # noqa: E501
:return: The computer_content_caching_cache_details_id of this ComputerContentCachingCacheDetail. # noqa: E501
:rtype: str
"""
return self._computer_content_caching_cache_details_id
@computer_content_caching_cache_details_id.setter
def computer_content_caching_cache_details_id(self, computer_content_caching_cache_details_id):
"""Sets the computer_content_caching_cache_details_id of this ComputerContentCachingCacheDetail.
:param computer_content_caching_cache_details_id: The computer_content_caching_cache_details_id of this ComputerContentCachingCacheDetail. # noqa: E501
:type computer_content_caching_cache_details_id: str
"""
self._computer_content_caching_cache_details_id = computer_content_caching_cache_details_id
@property
def category_name(self):
"""Gets the category_name of this ComputerContentCachingCacheDetail. # noqa: E501
:return: The category_name of this ComputerContentCachingCacheDetail. # noqa: E501
:rtype: str
"""
return self._category_name
@category_name.setter
def category_name(self, category_name):
"""Sets the category_name of this ComputerContentCachingCacheDetail.
:param category_name: The category_name of this ComputerContentCachingCacheDetail. # noqa: E501
:type category_name: str
"""
self._category_name = category_name
@property
def disk_space_bytes_used(self):
"""Gets the disk_space_bytes_used of this ComputerContentCachingCacheDetail. # noqa: E501
:return: The disk_space_bytes_used of this ComputerContentCachingCacheDetail. # noqa: E501
:rtype: int
"""
return self._disk_space_bytes_used
@disk_space_bytes_used.setter
def disk_space_bytes_used(self, disk_space_bytes_used):
"""Sets the disk_space_bytes_used of this ComputerContentCachingCacheDetail.
:param disk_space_bytes_used: The disk_space_bytes_used of this ComputerContentCachingCacheDetail. # noqa: E501
:type disk_space_bytes_used: int
"""
self._disk_space_bytes_used = disk_space_bytes_used
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ComputerContentCachingCacheDetail):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ComputerContentCachingCacheDetail):
return True
return self.to_dict() != other.to_dict()
| StarcoderdataPython |
9707209 | <filename>tests/integration/cartography/intel/aws/test_iam.py
import cartography.intel.aws.iam
import cartography.intel.aws.permission_relationships
import tests.data.aws.iam
TEST_ACCOUNT_ID = '000000000000'
TEST_REGION = 'us-east-1'
TEST_UPDATE_TAG = 123456789
def _create_base_account(neo4j_session):
neo4j_session.run("MERGE (a:AWSAccount{id:{AccountId}})", AccountId=TEST_ACCOUNT_ID)
def test_load_users(neo4j_session):
_create_base_account(neo4j_session)
data = tests.data.aws.iam.LIST_USERS['Users']
cartography.intel.aws.iam.load_users(
neo4j_session,
data,
TEST_ACCOUNT_ID,
TEST_UPDATE_TAG,
)
def test_load_groups(neo4j_session):
data = tests.data.aws.iam.LIST_GROUPS['Groups']
cartography.intel.aws.iam.load_groups(
neo4j_session,
data,
TEST_ACCOUNT_ID,
TEST_UPDATE_TAG,
)
def test_load_roles(neo4j_session):
data = tests.data.aws.iam.LIST_ROLES['Roles']
cartography.intel.aws.iam.load_roles(
neo4j_session,
data,
TEST_ACCOUNT_ID,
TEST_UPDATE_TAG,
)
def test_load_roles_creates_trust_relationships(neo4j_session):
data = tests.data.aws.iam.LIST_ROLES['Roles']
cartography.intel.aws.iam.load_roles(
neo4j_session,
data,
TEST_ACCOUNT_ID,
TEST_UPDATE_TAG,
)
# Get TRUSTS_AWS_PRINCIPAL relationships from Neo4j.
result = neo4j_session.run(
"""
MATCH (n1:AWSRole)-[:TRUSTS_AWS_PRINCIPAL]->(n2:AWSPrincipal) RETURN n1.arn, n2.arn;
""",
)
# Define the relationships we expect in terms of role ARN and principal ARN.
expected = {
('arn:aws:iam::000000000000:role/example-role-0', 'arn:aws:iam::000000000000:root'),
('arn:aws:iam::000000000000:role/example-role-1', 'arn:aws:iam::000000000000:role/example-role-0'),
('arn:aws:iam::000000000000:role/example-role-2', 'ec2.amazonaws.com'),
('arn:aws:iam::000000000000:role/example-role-3', 'arn:aws:iam::000000000000:saml-provider/ADFS'),
}
# Transform the results of our query above to match the format of our expectations.
actual = {
(r['n1.arn'], r['n2.arn']) for r in result
}
# Compare our actual results to our expected results.
assert actual == expected
def test_load_inline_policy(neo4j_session):
cartography.intel.aws.iam.load_policy(
neo4j_session,
"arn:aws:iam::000000000000:group/example-group-0/example-group-0/inline_policy/group_inline_policy",
"group_inline_policy",
"inline",
"arn:aws:iam::000000000000:group/example-group-0",
TEST_UPDATE_TAG,
)
def test_load_inline_policy_data(neo4j_session):
cartography.intel.aws.iam.load_policy_statements(
neo4j_session,
"arn:aws:iam::000000000000:group/example-group-0/example-group-0/inline_policy/group_inline_policy",
"group_inline_policy",
tests.data.aws.iam.INLINE_POLICY_STATEMENTS,
TEST_UPDATE_TAG,
)
def test_map_permissions(neo4j_session):
# Insert an s3 bucket to map
neo4j_session.run(
"""
MERGE (s3:S3Bucket{arn:'arn:aws:s3:::test_bucket'})<-[:RESOURCE]-(a:AWSAccount{id:{AccountId}})
""", AccountId=TEST_ACCOUNT_ID,
)
cartography.intel.aws.permission_relationships.sync(
neo4j_session,
TEST_ACCOUNT_ID,
TEST_UPDATE_TAG, {
"permission_relationship_file": "cartography/data/permission_relationships.yaml",
},
)
results = neo4j_session.run("MATCH ()-[r:CAN_READ]->() RETURN count(r) as rel_count")
assert results
for result in results:
assert result["rel_count"] == 1
| StarcoderdataPython |
8114450 | <gh_stars>0
"""
Première tentative d'implémenter A* pour le projet ASD1-Labyrinthes.
On part d'une grille rectangulaire. Chaque case est un "noeud". Les
déplacements permis sont verticaux et horizontaux par pas de 1, représentant
des "arêtes" avec un coût de 1.
Tout est basé sur une grille rectangulaire.
L'objet de base est une cellule, représentée par un tuple (row, col, cost), où
(row, col) sont des coordonnées dans la grille et cost le coût réel pour
arriver jusqu'à cette cellule depuis le départ, s'il est déjà connu, None
sinon.
Author: Dalker (<EMAIL>)
Start Date: 2021.04.06
"""
import logging as log
import matplotlib.pyplot as plt
import architecte
class Grid():
"""
Grille à résoudre par l'algorithme (vrai labyrinthe ou autre).
NB: la grille est fixe et ne connaît pas les coûts, donc ne gère que des
cellules représentées par des tuples (row, col), tout en acceptant
(row, col, _) comme entrée de ses méthodes.
Attributs:
- ascii: représentation ASCII de la grille
- n_rows: nombre de lignes
- n_cols: nombre de colonnes
- passable: liste de listes de booléens
(True: on peut passer, False: obstacle)
- in_: tuple (row, col) de l'entrée [NB: in est interdit comme identifiant]
- out: tuple (row, col) de la sortie
"""
def __init__(self, ascii_grid):
"""
Construire grille à partir de représentation en str.
La grille d'entrée doit utiliser les symboles:
'#' pour obstacle
'I' pour l'entrée
'O' pour la sortie
Tout autre caractère est interprété comme "on peut passer"
"""
self.ascii = ascii_grid.strip()
rows = self.ascii.split("\n")
self.n_rows = len(rows)
self.n_cols = len(rows[0])
assert all((len(row) == self.n_cols) for row in rows),\
"la grille devrait être rectangulaire"
log.debug("created grid with %d rows and %d cols",
self.n_rows, self.n_cols)
self.passable = [[char != "#" for char in row] for row in rows]
for n_row, row in enumerate(rows):
for n_col, char in enumerate(row):
if char == "I":
self.in_ = (n_row, n_col)
elif char == "O":
self.out = (n_row, n_col)
def __str__(self):
"""Restituer une vue ASCII de la grille."""
return self.ascii
def __contains__(self, cell):
"""La cellule est-elle dans la grille et traversable?"""
row, col, *_ = cell # décomposer le tuple de coordonnées
if (0 <= row < self.n_rows and
0 <= col < self.n_cols and
self.passable[row][col]):
return True
return False
def add_path(self, path):
"""Ajouter un chemin à la représentation ASCII de la grille."""
asciirows = self.ascii.split("\n")
self.ascii = "\n".join([
"".join(["*" if (row, col) in path else asciirows[row][col]
for col in range(self.n_cols)])
for row in range(self.n_rows)])
class Fringe():
"""
Ensemble de cellules en attente de traitement avec informations de coût.
Une cellule est un tuple (row, col, cost). Le Fringe associe à chacune
aussi un coût estimé, qui doit être fourni lorsque la cellule est ajoutée.
On doit pouvoir extraire efficacement une cellule de priorité minimale,
mais aussi chercher une cellule et modifier la priorité d'un node.
D'après nos recherches, un "Fibonacci Heap" est optimal pour ce cas, mais
pour l'instant nous utilisons un "Heap" beaucoup plus basique et facile à
manipuler, à savoir un (ou plusieurs) dict. L'implémentation de cette
classe peut être modifiée par la suite sans en modifier l'interface.
Attributs:
- cost: coût réel pour accéder à cette cellule
- heuristic: coût heuristique d'une cellule
"""
def __init__(self, first_cell):
"""
Initialiser le fringe.
Entrée: un tuple (ligne, colonne) indiquant l'entrée du labyrinthe.
"""
self.cost = {first_cell: 0}
self.heuristic = {first_cell: 0}
self._predecessor = {first_cell: None}
def append(self, cell, real_cost, estimated_cost, predecessor=None):
"""
Ajouter une cellule au fringe ou la mettre à jour.
Si la cellule est déjà présente, on la met à jour si le nouveau coût
est plus bas que le précédent (on a trouvé un meilleur chemin pour y
arriver).
Entrées:
- cell: cellule sous forme (row, col)
- real_cost: coût réel pour arriver jusqu'à cette cellule
- estimated_cost: coût estimé d'un chemin complet passant par cell
- predecessor: cellule précédente dans le chemin arrivant à cell
avec le coût réel indiqué
"""
if cell not in self.cost or real_cost < self.cost[cell]:
self.cost[cell] = real_cost
self.heuristic[cell] = estimated_cost
self._predecessor[cell] = predecessor
def pop(self):
"""
Extraire un noeud de bas coût ainsi que son prédecesseur.
Sortie: tuple (cellule, prédecesseur, coût)
"""
if not self.heuristic: # fringe is empty
return None, None, None
least = min(self.heuristic,
key=lambda cell: self.heuristic[cell])
del self.heuristic[least]
return least, self._predecessor[least], self.cost[least]
class AstarView():
"""
Visualisation de l'avancée de l'algorithme A*.
Attributs:
- grid: Grid
- fringe: Fringe
- closed: list
Tous trois sont des références aux objects manipulés en cours d'algorithme.
Les modifications sont donc visibles automatiquement.
"""
def __init__(self, grid, fringe, closed):
"""Initialiser la vue."""
self.grid = grid
self.fringe = fringe
self.closed = closed
_, self._axes = plt.subplots()
self.max_color = 2*sum(abs(grid.in_[j] - grid.out[j]) for j in (0, 1))
self._matrix = [[0 if self.grid.passable[row][col]
else 2*self.max_color
for col in range(grid.n_cols)]
for row in range(grid.n_rows)]
self._image = self._axes.matshow(self._matrix)
self._axes.set_axis_off()
self.update()
def update(self):
"""Update and display the view of the Maze."""
for cell in self.fringe.heuristic:
row, col = cell
heuristic = self.fringe.heuristic[cell]
self._matrix[row][col] = heuristic
self._image.set_data(self._matrix)
plt.pause(0.00001)
def showpath(self, path):
"""Montrer le chemin trouvé et laisser l'image visible."""
for row, col in path:
self._matrix[row][col] = self.max_color
self._image.set_data(self._matrix)
# plt.pause(0.00001)
plt.show()
def astar(grid, view=False):
"""
Trouver un chemin optimal dans une grille par algorithme A*.
Entrée: un objet Grid.
Sortie: une liste de cellules successives constituant un chemin
"""
closed = dict() # associations cellule_traitée -> prédecesseur
fringe = Fringe(grid.in_) # file d'attente de cellules à traiter
if view:
astar_view = AstarView(grid, fringe, closed)
while True:
current, predecessor, cost = fringe.pop()
if current is None:
log.debug("Le labyrinthe ne peut pas être résolu.")
return None
if current == grid.out:
log.debug("Found exit!")
path = [current]
current = predecessor
while current in closed:
path.append(current)
current = closed[current]
path = list(reversed(path))
if view:
astar_view.showpath(path)
return path
cost += 1
for direction in ((0, 1), (0, -1), (-1, 0), (1, 0)):
neighbour = tuple(current[j] + direction[j] for j in (0, 1))
if neighbour not in grid or neighbour in closed:
continue
distance = sum(abs(neighbour[j] - grid.out[j]) for j in (0, 1))
fringe.append(neighbour, cost, cost+distance, predecessor=current)
if view:
astar_view.update()
closed[current] = predecessor
if view:
astar_view.update()
def test(asciimaze, view=False):
"""Effectuer un test avec la grille donnée."""
grid = Grid(asciimaze)
print("Trying to find an A* path in grid:")
print(grid)
path = astar(grid, view)
if path is not None:
grid.add_path(path)
print("A* solution found:")
print(grid)
else:
print("No A* solution found.")
print()
if __name__ == "__main__":
log.basicConfig(level=log.INFO)
print("* starting unsolvable test *")
test("#I#O#")
print("* starting basic test *")
# test(architecte.GRILLE10x10, view=True)
test(architecte.GRILLE20x20, view=True)
# test(architecte.GRILLE30x30, view=True)
| StarcoderdataPython |
6402333 | # Generated by Django 2.1.2 on 2018-11-01 08:00
import autoslug.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hospital_name', models.CharField(max_length=255)),
('file', models.FileField(upload_to='records/%Y-%m-%d')),
('created', models.DateTimeField(auto_now_add=True)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='hospital_name', unique_with=('created__day',))),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'File',
'verbose_name_plural': 'Files',
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='Folder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=100)),
('gender', models.CharField(choices=[('MA', 'Male'), ('FE', 'Female'), ('NA', 'Not Applicable')], default='MA', max_length=2)),
('date_of_birth', models.DateField()),
('created', models.DateTimeField(auto_now_add=True)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='full_name', unique_with=('created__day',))),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Folder',
'verbose_name_plural': 'Folders',
'ordering': ('-created',),
},
),
migrations.AddField(
model_name='file',
name='folder',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='records.Folder'),
),
]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.