max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
awxkit/test/cli/test_client.py | vrevelas/awx | 0 | 17500 | <reponame>vrevelas/awx<filename>awxkit/test/cli/test_client.py<gh_stars>0
from io import StringIO
import pytest
from requests.exceptions import ConnectionError
from awxkit.cli import run, CLI
class MockedCLI(CLI):
def fetch_version_root(self):
pass
@property
def v2(self):
return MockedCLI()
@property
def json(self):
return {
'users': None
}
@pytest.mark.parametrize('help_param', ['-h', '--help'])
def test_help(capfd, help_param):
with pytest.raises(SystemExit):
run(['awx {}'.format(help_param)])
out, err = capfd.readouterr()
assert "usage:" in out
for snippet in (
'--conf.host https://example.awx.org]',
'-v, --verbose'
):
assert snippet in out
def test_connection_error(capfd):
cli = CLI()
cli.parse_args(['awx'])
with pytest.raises(ConnectionError):
cli.connect()
@pytest.mark.parametrize('resource', ['', 'invalid'])
def test_list_resources(capfd, resource):
# if a valid resource isn't specified, print --help
cli = MockedCLI()
cli.parse_args(['awx {}'.format(resource)])
cli.connect()
cli.parse_resource()
out, err = capfd.readouterr()
assert "usage:" in out
for snippet in (
'--conf.host https://example.awx.org]',
'-v, --verbose'
):
assert snippet in out
| 2.3125 | 2 |
tests/python/pants_test/tasks/test_what_changed.py | areitz/pants | 0 | 17501 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.codegen.targets.java_protobuf_library import JavaProtobufLibrary
from pants.backend.codegen.targets.java_thrift_library import JavaThriftLibrary
from pants.backend.codegen.targets.python_thrift_library import PythonThriftLibrary
from pants.backend.core.from_target import FromTarget
from pants.backend.core.targets.resources import Resources
from pants.backend.core.tasks.what_changed import WhatChanged
from pants.backend.core.wrapped_globs import RGlobs
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.unpacked_jars import UnpackedJars
from pants.backend.python.targets.python_library import PythonLibrary
from pants.base.build_file_aliases import BuildFileAliases
from pants.base.source_root import SourceRoot
from pants.goal.workspace import Workspace
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class BaseWhatChangedTest(ConsoleTaskTestBase):
@property
def alias_groups(self):
return BuildFileAliases.create(
targets={
'java_library': JavaLibrary,
'python_library': PythonLibrary,
'jar_library': JarLibrary,
'unpacked_jars': UnpackedJars,
'resources': Resources,
'java_thrift_library': JavaThriftLibrary,
'java_protobuf_library': JavaProtobufLibrary,
'python_thrift_library': PythonThriftLibrary,
},
context_aware_object_factories={
'source_root': SourceRoot.factory,
'rglobs': RGlobs,
'from_target': FromTarget,
},
objects={
'jar': JarDependency,
}
)
@classmethod
def task_type(cls):
return WhatChanged
def assert_console_output(self, *output, **kwargs):
options = {'spec_excludes': [], 'exclude_target_regexp': []}
if 'options' in kwargs:
options.update(kwargs['options'])
kwargs['options'] = options
super(BaseWhatChangedTest, self).assert_console_output(*output, **kwargs)
def workspace(self, files=None, parent=None, diffspec=None, diff_files=None):
class MockWorkspace(Workspace):
def touched_files(_, p):
self.assertEqual(parent or 'HEAD', p)
return files or []
def changes_in(_, ds):
self.assertEqual(diffspec, ds)
return diff_files or []
return MockWorkspace()
class WhatChangedTestBasic(BaseWhatChangedTest):
def test_nochanges(self):
self.assert_console_output(workspace=self.workspace())
def test_parent(self):
self.assert_console_output(options={'changes_since': '42'},
workspace=self.workspace(parent='42'))
def test_files(self):
self.assert_console_output(
'a/b/c',
'd',
'e/f',
options={'files': True},
workspace=self.workspace(files=['a/b/c', 'd', 'e/f'])
)
class WhatChangedTest(BaseWhatChangedTest):
def setUp(self):
super(WhatChangedTest, self).setUp()
self.add_to_build_file('root', dedent("""
source_root('src/py', python_library, resources)
source_root('resources/a1', resources)
"""))
self.add_to_build_file('root/src/py/a', dedent("""
python_library(
name='alpha',
sources=['b/c', 'd'],
resources=['test.resources']
)
jar_library(
name='beta',
jars=[
jar(org='gamma', name='ray', rev='1.137.bruce_banner')
]
)
"""))
self.add_to_build_file('root/src/py/1', dedent("""
python_library(
name='numeric',
sources=['2']
)
"""))
self.add_to_build_file('root/src/py/dependency_tree/a', dedent("""
python_library(
name='a',
sources=['a.py'],
)
"""))
self.add_to_build_file('root/src/py/dependency_tree/b', dedent("""
python_library(
name='b',
sources=['b.py'],
dependencies=['root/src/py/dependency_tree/a']
)
"""))
self.add_to_build_file('root/src/py/dependency_tree/c', dedent("""
python_library(
name='c',
sources=['c.py'],
dependencies=['root/src/py/dependency_tree/b']
)
"""))
self.add_to_build_file('root/src/thrift', dedent("""
java_thrift_library(
name='thrift',
sources=['a.thrift']
)
python_thrift_library(
name='py-thrift',
sources=['a.thrift']
)
"""))
self.add_to_build_file('root/resources/a', dedent("""
resources(
name='a_resources',
sources=['a.resources']
)
"""))
self.add_to_build_file('root/src/java/a', dedent("""
java_library(
name='a_java',
sources=rglobs("*.java"),
)
"""))
self.add_to_build_file('root/3rdparty/BUILD.twitter', dedent("""
jar_library(
name='dummy',
jars=[
jar(org='foo', name='ray', rev='1.45')
])
"""))
self.add_to_build_file('root/3rdparty/BUILD', dedent("""
jar_library(
name='dummy1',
jars=[
jar(org='foo1', name='ray', rev='1.45')
])
"""))
# This is a directory that might confuse case insensitive file systems (on macs for example).
# It should not be treated as a BUILD file.
self.create_dir('root/scripts/a/build')
self.add_to_build_file('root/scripts/BUILD', dedent("""
java_library(
name='scripts',
sources=['a/build/scripts.java'],
)
"""))
def test_spec_excludes(self):
self.assert_console_output(
'root/src/py/a:alpha',
options={'spec_excludes': 'root/src/py/1'},
workspace=self.workspace(files=['root/src/py/a/b/c', 'root/src/py/a/d'])
)
def test_owned(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
workspace=self.workspace(files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'])
)
def test_multiply_owned(self):
self.assert_console_output(
'root/src/thrift:thrift',
'root/src/thrift:py-thrift',
workspace=self.workspace(files=['root/src/thrift/a.thrift'])
)
def test_build(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/a:beta',
workspace=self.workspace(files=['root/src/py/a/BUILD'])
)
def test_resource_changed(self):
self.assert_console_output(
'root/src/py/a:alpha',
workspace=self.workspace(files=['root/src/py/a/test.resources'])
)
def test_resource_changed_for_java_lib(self):
self.assert_console_output(
'root/resources/a:a_resources',
workspace=self.workspace(files=['root/resources/a/a.resources'])
)
def test_build_sibling(self):
self.assert_console_output(
'root/3rdparty:dummy',
workspace=self.workspace(files=['root/3rdparty/BUILD.twitter'])
)
def test_resource_type_error(self):
self.add_to_build_file('root/resources/a1', dedent("""
java_library(
name='a1',
sources=['a1.test'],
resources=[1]
)
"""))
self.assert_console_raises(
Exception,
workspace=self.workspace(files=['root/resources/a1/a1.test'])
)
def test_build_directory(self):
# This should ensure that a directory named the same as build files does not cause an exception.
self.assert_console_output(
'root/scripts:scripts',
workspace=self.workspace(files=['root/scripts/a/build', 'root/scripts/a/build/scripts.java'])
)
def test_fast(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
options={'fast': True},
workspace=self.workspace(
files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'],
),
)
def test_diffspec(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
options={'diffspec': '42'},
workspace=self.workspace(
diffspec='42',
diff_files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'],
),
)
def test_diffspec_removed_files(self):
self.assert_console_output(
'root/src/java/a:a_java',
options={'diffspec': '42'},
workspace=self.workspace(
diffspec='42',
diff_files=['root/src/java/a/b/c/Foo.java'],
),
)
def test_include_dependees(self):
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/b:b',
options={'include_dependees': 'direct'},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/b:b',
'root/src/py/dependency_tree/c:c',
options={'include_dependees': 'transitive'},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
def test_exclude(self):
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/b:b',
'root/src/py/dependency_tree/c:c',
options={'include_dependees': 'transitive'},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/c:c',
options={'include_dependees': 'transitive', 'exclude_target_regexp': [':b']},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
def test_deferred_sources(self):
self.add_to_build_file('root/proto', dedent("""
java_protobuf_library(name='unpacked_jars',
sources=from_target(':external-source'),
)
unpacked_jars(name='external-source',
libraries=[':external-source-jars'],
include_patterns=[
'com/squareup/testing/**/*.proto',
],
)
jar_library(name='external-source-jars',
jars=[
jar(org='com.squareup.testing.protolib', name='protolib-external-test', rev='0.0.2'),
],
)
"""))
self.assert_console_output(
'root/proto:unpacked_jars',
'root/proto:external-source',
'root/proto:external-source-jars',
workspace=self.workspace(files=['root/proto/BUILD'])
)
| 1.632813 | 2 |
utils/image_utils.py | novicasarenac/car-racing-rl | 10 | 17502 | import PIL
import numpy as np
def to_grayscale(img):
return np.dot(img, [0.299, 0.587, 0.144])
def zero_center(img):
return img - 127.0
def crop(img, bottom=12, left=6, right=6):
height, width = img.shape
return img[0: height - bottom, left: width - right]
def save(img, path):
pil_img = PIL.Image.fromarray(img)
pil_img.save(path)
| 3.015625 | 3 |
sharing_groups/apps.py | sthagen/misp-hub | 2 | 17503 | <gh_stars>1-10
from django.apps import AppConfig
class SharingGroupsConfig(AppConfig):
name = 'sharing_groups'
| 1.179688 | 1 |
GoogleCloud/backend.py | ryanjsfx2424/HowToNFTs | 0 | 17504 | <filename>GoogleCloud/backend.py
## backend.py
"""
The purpose of this script is to continuously monitor the blockchain to
1) determine if a holder aquires or loses an NFT:
2) if they do, generate a new image/movie for the tokens they hold,
3) upload the new image/movie to the hosting service
4) update the metadata file
Repeat :)
(The above ordering matters!)
"""
## use python3!!!
import os
import io
import json
from web3 import Web3
## PARAMETERS
DEPLOYER_ADDRESS = "0x01656d41e041b50fc7c1eb270f7d891021937436"
INFURA_URL = "https://rinkeby.infura.io/v3/37de3193ccf345fe810932c3d0f103d8"
EXT_IMG = ".mp4"
EXT_METADATA = ".json"
ADDRESS = "0xb552E0dDd94EA72DBc089619115c81529cd8CA70" # address for deployed smart contract
## web3 stuff
w3 = Web3(Web3.HTTPProvider(INFURA_URL))
with open("../contract/abi_v020.json", "r") as fid:
rl = "".join(fid.readlines())
abi = json.loads(rl)
# end with open
## goal is to update token URI based on how many are held
## by that owner (but deployer doesn't count!)
contract = w3.eth.contract(address=ADDRESS, abi=abi)
totalSupply = contract.functions.totalSupply().call()
print("total supply: ", totalSupply)
for ii in range(totalSupply):
token = contract.functions.tokenByIndex(ii).call()
owner = contract.functions.ownerOf(token).call()
tokenList = contract.functions.walletOfOwner(owner).call()
## string comparison fails for some mysterious reason
if int(owner,16) == int(DEPLOYER_ADDRESS,16):
tokenList = [ii+1]
# end if
print("token: ", token)
print("owner: ", owner)
print("tokenList: ", tokenList)
newTokenName = str(token)
for jj in range(len(tokenList)):
if tokenList[jj] != token:
newTokenName += "_" + str(tokenList[jj])
# end if
# end for jj
print("newTokenName: ", newTokenName)
## first, check if metadata on hosting service has newTokenName.
## if so, we're good! If not, update it!
old_foos = []
metadata_correct = False
os.system("gsutil ls gs://how-to-nfts-metadata/foo" + str(token) + ".txt"
+ " > foo_file0.txt")
os.system("gsutil ls gs://how-to-nfts-metadata/foo" + str(token) + "_*.txt"
+ " > foo_file1.txt")
for jj in range(2):
with open("foo_file" + str(jj) + ".txt", "r") as fid:
for line in fid:
old_foos.append(line)
if "foo" + newTokenName + ".txt" in line:
metadata_correct = True
# end if
# end for
# end with
os.system("rm foo_file" + str(jj) + ".txt")
# end for jj
print("old_foos: ", old_foos)
if metadata_correct:
print("metadata correct (supposedly) so skipping")
continue
# end if
if len(old_foos) > 1:
print("error! only expected one old foo file.")
raise
# end if
old_foo = old_foos[0][:-1] # strip trailing newline character
old_foo = old_foo.split("metadata/")[1]
print("old_foo: ", old_foo)
## evidently metadata is not correct...
## first, we generate a new movie (if needed) and rsync with
## the GCP bucket.
## then, we'll update the metadata file, remove the old foo
## file and touch a new one
## then we'll rsync the metadata folder with the bucket.
target = "../nftmp4s/HowToKarate" + str(token) + ".mp4"
destination = "../nftmp4s/HowToKarate" + newTokenName + ".mp4"
if not os.path.exists(destination):
os.system("cp " + target + " " + destination)
for jj in range(len(tokenList)):
if tokenList[jj] != token:
print("destination: ", destination)
print("tokenList[jj]: ", tokenList[jj])
os.system('ffmpeg -y -i ' + destination + ' -i nftmp4s/HowToKarate' + str(tokenList[jj]) + '.mp4' + \
' -filter_complex "[0:v] [1:v]' + \
' concat=n=2:v=1 [v]"' + \
' -map "[v]" ' + "concat.mp4")
os.system("mv concat.mp4 " + destination)
# end if
# end for jj
## note, can rsync in parallel via rsync -m...
os.system("gsutil rsync ../nftmp4s/ gs://how-to-nfts-data/")
# end if
## next, we'll update the metadata file, remove the old foo
## file and touch a new one
## then we'll rsync the metadata folder with the bucket.
os.system("cp ../metadata/" + str(token) + ".json temp.json")
with open("../metadata/" + str(token) + ".json", "w") as fid_write:
with open("temp.json", "r") as fid_read:
for line in fid_read:
if '"image":' in line:
line = line.split("HowToKarate")[0] + "HowToKarate" + \
str(newTokenName) + '.mp4",\n'
# end i
fid_write.write(line)
# end for line
# end with open write
# end with open read
os.system("rm temp.json")
os.system("touch ../metadata/foo" + str(newTokenName) + ".txt")
os.system("rm ../metadata/" + old_foo)
## last, we need to update the _metadata file and then rsync.
with open("../metadata/_metadata.json", "w") as fid_write:
fid_write.write("{\n")
for jj in range(1,25):
with open("../metadata/" + str(jj) + ".json", "r") as fid_read:
for line in fid_read:
if "}" in line and len(line) == 2 and jj != 24:
line = "},\n"
# end if
fid_write.write(line)
# end for
# end with open
fid_write.write("}")
# end with open
os.system("gsutil rsync -d ../metadata/ gs://how-to-nfts-metadata/")
# end for ii
## end test.py
| 2.609375 | 3 |
dependencies/pyffi/formats/tga/__init__.py | korri123/fnv-blender-niftools-addon | 4 | 17505 | """
:mod:`pyffi.formats.tga` --- Targa (.tga)
=========================================
Implementation
--------------
.. autoclass:: TgaFormat
:show-inheritance:
:members:
Regression tests
----------------
Read a TGA file
^^^^^^^^^^^^^^^
>>> # check and read tga file
>>> import os
>>> from os.path import dirname
>>> dirpath = __file__
>>> for i in range(4): #recurse up to root repo dir
... dirpath = dirname(dirpath)
>>> repo_root = dirpath
>>> format_root = os.path.join(repo_root, 'tests', 'formats', 'tga')
>>> file = os.path.join(format_root, 'test.tga').replace("\\\\", "/")
>>> stream = open(file, 'rb')
>>> data = TgaFormat.Data()
>>> data.inspect(stream)
>>> data.read(stream)
>>> stream.close()
>>> data.header.width
60
>>> data.header.height
20
Parse all TGA files in a directory tree
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>>> for stream, data in TgaFormat.walkData(format_root):
... try:
... # the replace call makes the doctest also pass on windows
... os_path = stream.name
... split = (os_path.split(os.sep))[-4:]
... rejoin = os.path.join(*split).replace("\\\\", "/")
... print("reading %s" % rejoin)
... except Exception:
... print(
... "Warning: read failed due corrupt file,"
... " corrupt format description, or bug.") # doctest: +REPORT_NDIFF
reading tests/formats/tga/test.tga
reading tests/formats/tga/test_footer.tga
Create a TGA file from scratch and write to file
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>>> data = TgaFormat.Data()
>>> from tempfile import TemporaryFile
>>> stream = TemporaryFile()
>>> data.write(stream)
>>> stream.close()
"""
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright (c) 2007-2012, Python File Format Interface
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Python File Format Interface
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
import struct, os, re
import pyffi.object_models.xml
import pyffi.object_models.common
import pyffi.object_models.xml.basic
import pyffi.object_models.xml.struct_
import pyffi.object_models
import pyffi.utils.graph
from pyffi.utils.graph import EdgeFilter
class TgaFormat(pyffi.object_models.xml.FileFormat):
"""This class implements the TGA format."""
xml_file_name = 'tga.xml'
# where to look for tga.xml and in what order:
# TGAXMLPATH env var, or TgaFormat module directory
xml_file_path = [os.getenv('TGAXMLPATH'), os.path.dirname(__file__)]
# filter for recognizing tga files by extension
RE_FILENAME = re.compile(r'^.*\.tga$', re.IGNORECASE)
# basic types
int = pyffi.object_models.common.Int
uint = pyffi.object_models.common.UInt
byte = pyffi.object_models.common.Byte
ubyte = pyffi.object_models.common.UByte
char = pyffi.object_models.common.Char
short = pyffi.object_models.common.Short
ushort = pyffi.object_models.common.UShort
float = pyffi.object_models.common.Float
PixelData = pyffi.object_models.common.UndecodedData
class FooterString(pyffi.object_models.xml.basic.BasicBase):
"""The Targa footer signature."""
def __str__(self):
return 'TRUEVISION-XFILE.\x00'
def read(self, stream, data):
"""Read signature from stream.
:param stream: The stream to read from.
:type stream: file
"""
signat = stream.read(18)
if signat != self.__str__().encode("ascii"):
raise ValueError(
"invalid Targa signature: expected '%s' but got '%s'"
%(self.__str__(), signat))
def write(self, stream, data):
"""Write signature to stream.
:param stream: The stream to read from.
:type stream: file
"""
stream.write(self.__str__().encode("ascii"))
def get_value(self):
"""Get signature.
:return: The signature.
"""
return self.__str__()
def set_value(self, value):
"""Set signature.
:param value: The value to assign.
:type value: str
"""
if value != self.__str__():
raise ValueError(
"invalid Targa signature: expected '%s' but got '%s'"
%(self.__str__(), value))
def get_size(self, data=None):
"""Return number of bytes that the signature occupies in a file.
:return: Number of bytes.
"""
return 18
def get_hash(self, data=None):
"""Return a hash value for the signature.
:return: An immutable object that can be used as a hash.
"""
return self.__str__()
class Image(pyffi.utils.graph.GlobalNode):
def __init__(self):
# children are either individual pixels, or RLE packets
self.children = []
def read(self, stream, data):
data = data
if data.header.image_type in (TgaFormat.ImageType.INDEXED,
TgaFormat.ImageType.RGB,
TgaFormat.ImageType.GREY):
self.children = [
TgaFormat.Pixel(argument=data.header.pixel_size)
for i in range(data.header.width
* data.header.height)]
for pixel in self.children:
pixel.read(stream, data)
else:
self.children = []
count = 0
while count < data.header.width * data.header.height:
pixel = TgaFormat.RLEPixels(
argument=data.header.pixel_size)
pixel.read(stream, data)
self.children.append(pixel)
count += pixel.header.count + 1
def write(self, stream, data):
data = data
for child in self.children:
child.arg = data.header.pixel_size
child.write(stream, data)
def get_detail_child_nodes(self, edge_filter=EdgeFilter()):
for child in self.children:
yield child
def get_detail_child_names(self, edge_filter=EdgeFilter()):
for i in range(len(self.children)):
yield str(i)
class Data(pyffi.object_models.FileFormat.Data):
def __init__(self):
self.header = TgaFormat.Header()
self.image = TgaFormat.Image()
self.footer = None # TgaFormat.Footer() is optional
def inspect(self, stream):
"""Quick heuristic check if stream contains Targa data,
by looking at the first 18 bytes.
:param stream: The stream to inspect.
:type stream: file
"""
# XXX todo: set some of the actual fields of the header
pos = stream.tell()
# read header
try:
id_length, colormap_type, image_type, \
colormap_index, colormap_length, colormap_size, \
x_origin, y_origin, width, height, \
pixel_size, flags = struct.unpack("<BBBHHBHHHHBB",
stream.read(18))
except struct.error:
# could not read 18 bytes
# not a TGA file
raise ValueError("Not a Targa file.")
finally:
stream.seek(pos)
# check if tga type is valid
# check pixel size
# check width and height
if not(image_type in (1, 2, 3, 9, 10, 11)
and pixel_size in (8, 24, 32)
and width <= 100000
and height <= 100000):
raise ValueError("Not a Targa file.")
# this looks like a tga file!
def read(self, stream):
"""Read a tga file.
:param stream: The stream from which to read.
:type stream: ``file``
"""
# read the file
self.inspect(stream) # quick check
# header
self.header.read(stream, self)
# image
self.image.read(stream, self)
# check if we are at the end of the file
if not stream.read(1):
self.footer = None
return
# footer
stream.seek(-26, os.SEEK_END)
self.footer = TgaFormat.Footer()
self.footer.read(stream, self)
def write(self, stream):
"""Write a tga file.
:param stream: The stream to write to.
:type stream: ``file``
"""
self.header.write(stream, self)
self.image.write(stream, self)
if self.footer:
self.footer.write(stream, self)
def get_global_child_nodes(self, edge_filter=EdgeFilter()):
yield self.header
yield self.image
if self.footer:
yield self.footer
def get_global_child_names(self, edge_filter=EdgeFilter()):
yield "Header"
yield "Image"
if self.footer:
yield "Footer"
if __name__ == '__main__':
import doctest
doctest.testmod()
| 2.546875 | 3 |
python/push.py | swallowstalker/postopush | 1 | 17506 | import telegram
import os
def main():
token = os.getenv("TOKEN", None)
message = os.getenv("MESSAGE", "No message, please set MESSAGE env")
chat_id = os.getenv("CHAT_ID", None)
bot = telegram.Bot(token=token)
bot.send_message(chat_id=chat_id, text=message, parse_mode=telegram.ParseMode.HTML)
if __name__ == "__main__":
main() | 2.453125 | 2 |
advent-of-code-2018/day 13/main.py | gikf/advent-of-code | 0 | 17507 | <gh_stars>0
"""Advent of Code 2018 Day 13."""
from copy import deepcopy
CARTS = '<>^v'
INTERSECTION = '+'
CURVES = '\\/'
cart_to_direction = {
'<': 180,
'^': 90,
'>': 0,
'v': 270,
}
direction_to_move = {
0: (0, 1),
90: (-1, 0),
180: (0, -1),
270: (1, 0),
}
direction_to_cart = {
0: '>',
90: '^',
180: '<',
270: 'v',
}
turns = {
0: 90,
1: 0,
2: -90,
}
next_direction = {
0: {
'\\': 270,
'/': 90,
},
90: {
'\\': 180,
'/': 0,
},
180: {
'\\': 90,
'/': 270,
},
270: {
'\\': 0,
'/': 180,
},
}
def main(file_input='input.txt'):
lines = [[*line.strip('\n')] for line in get_file_contents(file_input)]
carts = find_carts(lines)
tracks = remove_carts(lines)
collision = follow_tracks(tracks, deepcopy(carts))
print('First collision:', ','.join(str(num) for num in collision[::-1]))
last_cart_location = follow_tracks(tracks, deepcopy(carts), True)
print('Last cart position after all crashes:',
','.join(str(num) for num in last_cart_location[::-1]))
def follow_tracks(tracks, carts, prevent_collision=False):
"""Follow tracks with carts. Optionally prevent ending with collision."""
while len(carts) > 1:
carts, collisions = move_carts(tracks, carts)
if collisions and not prevent_collision:
return collisions[0]
return carts[0][0]
def find_repeated_position(carts):
"""Find position taken by two carts - colliding."""
repeated = []
seen_positions = set()
for cur_position, *_ in carts:
position = tuple(cur_position)
if position in seen_positions:
repeated.append(cur_position)
seen_positions.add(position)
return repeated
def move_carts(tracks, carts):
"""Move carts by one on tracks."""
collisions = []
for cart in sorted(carts):
position, direction, turn = cart
move = direction_to_move[direction]
next_position = [pos + change for pos, change in zip(position, move)]
next_square = get_square(tracks, next_position)
if next_square == INTERSECTION:
next_direction, next_turn = turn_cart(direction, turn)
cart[1] = next_direction
cart[2] = next_turn
elif is_curve(next_square):
next_direction = curve_cart(direction, next_square)
cart[1] = next_direction
cart[0] = next_position
repeated_position = find_repeated_position(carts)
if repeated_position:
collisions.extend(repeated_position)
carts = remove_collided_carts(carts, repeated_position)
return carts, collisions
def remove_collided_carts(carts, repeated_position):
"""Remove carts colliding on the repeated_position."""
return [cart for cart in carts
if cart[0] not in repeated_position]
def curve_cart(direction, curve):
"""Move cart over the curve."""
return next_direction[direction][curve]
def turn_cart(direction, turn):
"""Turn cart from direction, depending on the turn type."""
return (direction + turns[turn]) % 360, (turn + 1) % len(turns)
def is_curve(square):
"""Check if square is one of the curves."""
return square in CURVES
def get_square(tracks, position):
"""Get square from tracks with position."""
row, col = position
return tracks[row][col]
def remove_carts(lines):
"""Remove carts from lines, replacing them with normal tracks."""
for row_no, row in enumerate(lines):
for col_no, square in enumerate(row):
if square in '<>':
lines[row_no][col_no] = '-'
elif square in 'v^':
lines[row_no][col_no] = '|'
return lines
def find_carts(lines):
"""Find carts in lines. Return list of lists with cart parameters."""
carts = []
for row_no, row in enumerate(lines):
for col_no, square in enumerate(row):
if square not in CARTS:
continue
carts.append([[row_no, col_no], cart_to_direction[square], 0])
return carts
def get_file_contents(file):
"""Read all lines from file."""
with open(file) as f:
return f.readlines()
if __name__ == '__main__':
main()
| 3.390625 | 3 |
goopylib/applications/custom_ease.py | YuvrajThorat/goopylib | 0 | 17508 | <reponame>YuvrajThorat/goopylib<filename>goopylib/applications/custom_ease.py
from goopylib.imports import *
from pathlib import Path as pathlib_Path
# I kinda wanted to scrap this, it wasn't that good.
def create_custom_ease():
window = Window(title="goopylib: Create Custom Ease", width=get_screen_size()[1] * 0.7,
height=get_screen_size()[1] * 0.7, autoflush=False, bk_colour=DARKER_GREY)
window.set_coords(0, 0, 1000, 1000)
path = f"{pathlib_Path(__file__).parent.absolute()}/textures/"
Image(Point(500, 500), f"{path}background.png").draw(window)
add_button = Button(Image(Point(882, 219), f"{path}AddButton.png"),
Image(Point(882, 219), f"{path}AddButton.png").resize_factor(1.03),
Image(Point(882, 219), f"{path}AddButton.png").resize_factor(1.07),
Image(Point(882, 219), f"{path}AddButton.png").convert_greyscale()).draw(window)
clear_button = Button(Image(Point(882, 280), f"{path}ClearButton.png"),
Image(Point(882, 280), f"{path}ClearButton.png").resize_factor(1.03),
Image(Point(882, 280), f"{path}ClearButton.png").resize_factor(1.07)).draw(window)
play_button = Button(Image(Point(256, 805), f"{path}PlayButton.png"),
Image(Point(256, 805), f"{path}PlayButton.png").resize_factor(1.03),
Image(Point(256, 805), f"{path}PlayButton.png").resize_factor(1.07)).draw(window)
shape_button = CycleButton(0,
Button(Image(Point(99, 805), f"{path}RectangleButton.png"),
Image(Point(99, 805), f"{path}RectangleButton.png").resize_factor(1.03),
Image(Point(99, 805), f"{path}RectangleButton.png").resize_factor(1.07)),
Button(Image(Point(99, 805), f"{path}CircleButton.png"),
Image(Point(99, 805), f"{path}CircleButton.png").resize_factor(1.03),
Image(Point(99, 805), f"{path}CircleButton.png").resize_factor(1.07))) \
.draw(window)
interpolation_button = CycleButton(0,
Button(Image(Point(882, 109), f"{path}BezierButton.png"),
Image(Point(882, 109), f"{path}BezierButton.png").resize_factor(1.03),
Image(Point(882, 109), f"{path}BezierButton.png").resize_factor(1.07)),
Button(Image(Point(882, 109), f"{path}CubicButton.png"),
Image(Point(882, 109), f"{path}CubicButton.png").resize_factor(1.03),
Image(Point(882, 109), f"{path}CubicButton.png").resize_factor(1.07)),
Button(Image(Point(882, 109), f"{path}LinearButton.png"),
Image(Point(882, 109), f"{path}LinearButton.png").resize_factor(1.03),
Image(Point(882, 109), f"{path}LinearButton.png").resize_factor(1.07))) \
.draw(window)
template_button = CycleButton(0,
Button(Image(Point(882, 411), f"{path}LinearTemplate.png"),
Image(Point(882, 411), f"{path}LinearTemplate.png").resize_factor(1.03),
Image(Point(882, 411), f"{path}LinearTemplate.png").resize_factor(1.07)),
Button(Image(Point(882, 411), f"{path}BackTemplate.png"),
Image(Point(882, 411), f"{path}BackTemplate.png").resize_factor(1.03),
Image(Point(882, 411), f"{path}BackTemplate.png").resize_factor(1.07)),
Button(Image(Point(882, 411), f"{path}ExponentialTemplate.png"),
Image(Point(882, 411), f"{path}ExponentialTemplate.png").resize_factor(1.03),
Image(Point(882, 411), f"{path}ExponentialTemplate.png").resize_factor(1.07)),
Button(Image(Point(882, 411), f"{path}PolynomialTemplate.png"),
Image(Point(882, 411), f"{path}PolynomialTemplate.png").resize_factor(1.03),
Image(Point(882, 411), f"{path}PolynomialTemplate.png").resize_factor(1.07)),
Button(Image(Point(882, 411), f"{path}SineTemplate.png"),
Image(Point(882, 411), f"{path}SineTemplate.png").resize_factor(1.03),
Image(Point(882, 411), f"{path}SineTemplate.png").resize_factor(1.07)),
Button(Image(Point(882, 411), f"{path}CircleTemplate.png"),
Image(Point(882, 411), f"{path}CircleTemplate.png").resize_factor(1.03),
Image(Point(882, 411), f"{path}CircleTemplate.png").resize_factor(1.07)),
Button(Image(Point(882, 411), f"{path}ElasticTemplate.png"),
Image(Point(882, 411), f"{path}ElasticTemplate.png").resize_factor(1.03),
Image(Point(882, 411), f"{path}ElasticTemplate.png").resize_factor(1.07)),
Button(Image(Point(882, 411), f"{path}BounceTemplate.png"),
Image(Point(882, 411), f"{path}BounceTemplate.png").resize_factor(1.03),
Image(Point(882, 411), f"{path}BounceTemplate.png").resize_factor(1.07)),
disabled_graphic=Image(Point(882, 428), f"{path}CustomTemplate.png")).draw(window)
save_button = Button(Image(Point(882, 647), f"{path}SaveButton.png"),
Image(Point(882, 647), f"{path}SaveButton.png").resize_factor(1.03),
Image(Point(882, 647), f"{path}SaveButton.png").resize_factor(1.07)).draw(window)
open_button = Button(Image(Point(882, 708), f"{path}OpenButton.png"),
Image(Point(882, 708), f"{path}OpenButton.png").resize_factor(1.03),
Image(Point(882, 708), f"{path}OpenButton.png").resize_factor(1.07)).draw(window)
simulation_graphic1 = CycleButton(0, Rectangle(Point(50, 875), Point(125, 950), fill=LIGHTER_BLUE, outline_width=0),
Circle(Point(88, 913), 38, fill=LIGHTER_BLUE, outline_width=0),
autoflush=False).draw(window)
simulation_graphic2 = CycleButton(0,
Rectangle(Point(845, 845), Point(920, 920), fill=LIGHTER_BLUE, outline_width=0),
Circle(Point(883, 883), 37.5, fill=LIGHTER_BLUE, outline_width=0),
autoflush=False).draw(window)
graph = Image(Point(500, 500), f"{path}Graph.png").draw(window)
delete_dropdown = Button(Image(Point(0, 0), f"{path}DeleteDropdown.png"),
Image(Point(0, 0), f"{path}DeleteDropdownHover.png"))
colour_grad = colour_gradient(LIGHTER_VIOLET, DARKEST_VIOLET, 100)
resolution = 2
control_points = [Circle(p, radius=10, fill=VIOLET, outline_width=0).draw(window) for p in
[Point(110, 673), Point(668, 118)]]
last_control_points = control_points.copy()
points = []
for obj in control_points:
points.append(obj.anchor)
curve = []
for t in range(int(10 ** resolution)):
t /= 10 ** resolution
curve.append(py_bezier_curve(t, points))
circle_objects = []
for i in range(int(10 ** resolution)):
circle_objects.append(Circle(curve[i], radius=2, fill=colour_grad[i], outline_width=0).draw(window))
selected_point = None
selected_curve_point = None
def ease(time):
points = []
for obj in control_points:
points.append(obj.anchor)
if interpolation_button.get_state() == 0:
return 1 - (py_bezier_curve(time, points).y - 118) / 555
else:
return 1 - (LinearInterpolation(time, points).y - 118) / 555
def update_curve():
nonlocal circle_objects, curve, point, last_control_points, t, i
points = []
for obj in control_points:
points.append(obj.anchor)
curve = []
for t in range(int(10 ** resolution)):
t /= 10 ** resolution
if interpolation_button.get_state() == 0:
curve.append(py_bezier_curve(t, points))
else:
curve.append(LinearInterpolation(t, points))
for i in range(int(10 ** resolution)):
circle_objects[i].move_to_y(curve[i].y)
last_control_points = control_points.copy()
while True:
t = time.time()
mouse_pos = window.check_left_mouse_click()
if mouse_pos is not None:
if open_button.is_clicked(mouse_pos):
filename = openfilebrowser()
elif save_button.is_clicked(mouse_pos):
filename = openfilebrowser()
elif interpolation_button.is_clicked(mouse_pos):
update_curve()
elif play_button.is_clicked(mouse_pos):
play_button.disable()
"""
x = []
for t in range(int(10 ** resolution)):
t /= 10 ** resolution
x.append(ease(t))
plt.plot(x)
plt.show()"""
simulation_graphic1.glide_x(500, time=2, easing=ease)
simulation_graphic2.animate_rotate(360, time=2, easing=ease)
elif shape_button.is_clicked(mouse_pos):
simulation_graphic1.set_state(shape_button.get_state())
simulation_graphic2.set_state(shape_button.get_state())
elif clear_button.is_clicked(mouse_pos):
for point in control_points[1:-1]:
point.undraw()
control_points.remove(point)
elif add_button.is_clicked(mouse_pos):
add_button.disable()
if selected_curve_point is None:
i = int((10 ** resolution / 2) * 2 ** (2 - len(control_points)))
control_points.insert(1, Circle(curve[i], radius=10, fill=colour_grad[i], outline_width=0).draw(
window))
control_points[1].set_draggable(callback=update_curve)
else:
i = max(
math.ceil(circle_objects.index(selected_curve_point) / (100 / (len(control_points) - 1))) - 1,
1)
control_points.insert(i, Circle(selected_curve_point.get_anchor(), radius=10,
fill=colour_grad[circle_objects.index(selected_curve_point)],
outline_width=0).draw(window))
selected_curve_point = None
control_points[i].set_draggable(callback=update_curve)
add_button.enable()
template_button.disable()
elif delete_dropdown.is_clicked(mouse_pos):
delete_dropdown.undraw()
selected_point.undraw()
control_points.remove(selected_point)
if len(control_points) == 2:
template_button.enable()
else:
for point in circle_objects:
if point.is_clicked(mouse_pos):
for i, p in enumerate(circle_objects):
p.set_fill(colour_grad[i])
point.set_fill(WHITE)
selected_curve_point = point
break
if last_control_points != control_points:
update_curve()
if play_button.is_disabled and not simulation_graphic1.is_gliding:
play_button.enable()
mouse_pos = window.check_right_mouse_click()
for point in control_points[1:-1]:
if point.is_clicked(mouse_pos):
delete_dropdown.draw(window).move_to_point(point.anchor, align="topleft")
selected_point = point
window.update()
def create_custom_ease2():
window = Window(title="goopylib_b: Create Custom Ease", width=get_screen_size()[1] * 0.7,
height=get_screen_size()[1] * 0.7, autoflush=False, bk_colour=DARKER_GREY)
window.set_coords(0, 0, 1000, 1000)
path = f"{pathlib_Path(__file__).parent.absolute()}/textures/"
while True:
if window.is_closed():
break
window.update()
window.close()
| 2.375 | 2 |
app/lib/duplication_check/train.py | WHUT-XGP/ASoulCnki | 0 | 17509 | # -*- encoding: utf-8 -*-
"""
Filename :train.py
Description :获取小作文摘要
Time :2021/06/22 15:21:08
Author :hwa
Version :1.0
"""
from app.lib.duplication_check.reply_database import ReplyDatabase
import time
def train_data():
start_time = time.time()
db = ReplyDatabase.load_from_json("data/bilibili_cnki_reply.json")
db.dump_to_image("database.dat")
end_time = time.time()
print("train cost {} s".format(end_time - start_time))
if __name__ == "__main__":
train_data()
| 2.328125 | 2 |
buildscripts/task_generation/evg_config_builder.py | benety/mongo | 0 | 17510 | """Builder for generating evergreen configuration."""
from threading import Lock
from typing import Set, List, Dict
import inject
from shrub.v2 import ShrubProject, BuildVariant, ExistingTask, Task
from buildscripts.patch_builds.task_generation import validate_task_generation_limit
from buildscripts.task_generation.constants import ACTIVATE_ARCHIVE_DIST_TEST_DEBUG_TASK
from buildscripts.task_generation.gen_task_service import GenTaskService, \
GenTaskOptions, ResmokeGenTaskParams, FuzzerGenTaskParams
from buildscripts.task_generation.generated_config import GeneratedFile, GeneratedConfiguration
from buildscripts.task_generation.resmoke_proxy import ResmokeProxyService
from buildscripts.task_generation.suite_split import SuiteSplitService, GeneratedSuite, \
SuiteSplitParameters
from buildscripts.task_generation.task_types.fuzzer_tasks import FuzzerTask
# pylint: disable=too-many-instance-attributes
class EvgConfigBuilder:
"""A builder class for building evergreen configuration."""
@inject.autoparams()
def __init__(
self,
resmoke_proxy: ResmokeProxyService,
suite_split_service: SuiteSplitService,
evg_config_gen_service: GenTaskService,
gen_options: GenTaskOptions,
) -> None:
"""
Initialize a new builder.
:param resmoke_proxy: Proxy to access resmoke data.
:param suite_split_service: Service to split suites into sub-suites.
:param evg_config_gen_service: Service to generate evergreen configuration.
:param gen_options: Global options for generating evergreen configuration.
"""
self.resmoke_proxy = resmoke_proxy
self.suite_split_service = suite_split_service
self.evg_config_gen_service = evg_config_gen_service
self.gen_options = gen_options
self.shrub_config = ShrubProject.empty()
self.build_variants: Dict[str, BuildVariant] = {}
self.generated_files: List[GeneratedFile] = []
self.lock = Lock()
def get_build_variant(self, build_variant: str) -> BuildVariant:
"""
Get the build variant object, creating it if it doesn't exist.
NOTE: The `lock` should be held by any functions calling this one.
:param build_variant: Name of build variant.
:return: BuildVariant object being created.
"""
if build_variant not in self.build_variants:
self.build_variants[build_variant] = BuildVariant(build_variant, activate=False)
return self.build_variants[build_variant]
def generate_suite(self, split_params: SuiteSplitParameters,
gen_params: ResmokeGenTaskParams) -> None:
"""
Add configuration to generate a split version of the specified resmoke suite.
:param split_params: Parameters of how resmoke suite should be split.
:param gen_params: Parameters of how evergreen configuration should be generated.
"""
generated_suite = self.suite_split_service.split_suite(split_params)
with self.lock:
build_variant = self.get_build_variant(generated_suite.build_variant)
resmoke_tasks = self.evg_config_gen_service.generate_task(generated_suite,
build_variant, gen_params)
self.generated_files.extend(self.resmoke_proxy.render_suite_files(resmoke_tasks))
def generate_fuzzer(self, fuzzer_params: FuzzerGenTaskParams) -> FuzzerTask:
"""
Add configuration to generate the specified fuzzer task.
:param fuzzer_params: Parameters of how the fuzzer suite should generated.
"""
with self.lock:
build_variant = self.get_build_variant(fuzzer_params.variant)
return self.evg_config_gen_service.generate_fuzzer_task(fuzzer_params, build_variant)
def add_display_task(self, display_task_name: str, execution_task_names: Set[str],
build_variant: str) -> None:
"""
Add configuration to generate the specified display task.
:param display_task_name: Name of display task to create.
:param execution_task_names: Name of execution tasks to include in display task.
:param build_variant: Name of build variant to add to.
"""
execution_tasks = {ExistingTask(task_name) for task_name in execution_task_names}
with self.lock:
build_variant = self.get_build_variant(build_variant)
build_variant.display_task(display_task_name, execution_existing_tasks=execution_tasks)
def generate_archive_dist_test_debug_activator_task(self, variant: str):
"""
Generate dummy task to activate the task that archives debug symbols.
We can't activate it directly as it's not generated.
"""
with self.lock:
build_variant = self.get_build_variant(variant)
build_variant.add_existing_task(ExistingTask(ACTIVATE_ARCHIVE_DIST_TEST_DEBUG_TASK))
def build(self, config_file_name: str) -> GeneratedConfiguration:
"""
Build the specified configuration and return the files needed to create it.
:param config_file_name: Filename to use for evergreen configuration.
:return: Dictionary of files and contents that are needed to create configuration.
"""
for build_variant in self.build_variants.values():
self.shrub_config.add_build_variant(build_variant)
if not validate_task_generation_limit(self.shrub_config):
raise ValueError("Attempting to generate more than max tasks in single generator")
self.generated_files.append(GeneratedFile(config_file_name, self.shrub_config.json()))
return GeneratedConfiguration(self.generated_files)
| 1.90625 | 2 |
test/unit/vint/ast/plugin/scope_plugin/stub_node.py | mosheavni/vint | 538 | 17511 | <reponame>mosheavni/vint
from vint.ast.node_type import NodeType
from vint.ast.plugin.scope_plugin.identifier_attribute import (
IDENTIFIER_ATTRIBUTE,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG,
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT,
)
def create_id(id_value, is_declarative=True, is_function=False, is_autoload=False,
is_declarative_parameter=False, is_on_str_expr_context=False):
return {
'type': NodeType.IDENTIFIER.value,
'value': id_value,
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: is_declarative,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: is_function,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: is_autoload,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: is_declarative_parameter,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: is_on_str_expr_context,
},
}
def create_env(env_value):
return {
'type': NodeType.ENV.value,
'value': env_value,
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: True,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: False,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: False,
},
}
def create_option(opt_value):
return {
'type': NodeType.OPTION.value,
'value': opt_value,
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: True,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: False,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: False,
},
}
def create_reg(reg_value):
return {
'type': NodeType.REG.value,
'value': reg_value,
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: True,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: False,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: False,
},
}
def create_curlyname(is_declarative=True):
""" Create a node as a `my_{'var'}`
"""
return {
'type': NodeType.CURLYNAME.value,
'value': [
{
'type': NodeType.CURLYNAMEPART.value,
'value': 'my_',
},
{
'type': NodeType.CURLYNAMEEXPR.value,
'value': {
'type': NodeType.CURLYNAMEEXPR.value,
'value': 'var',
},
}
],
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: is_declarative,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: True,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: False,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: False,
},
}
def create_subscript_member(is_declarative=True):
return {
'type': NodeType.IDENTIFIER.value,
'value': 'member',
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: is_declarative,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: True,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: False,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: False,
},
}
| 2.125 | 2 |
core/analyser.py | hryu/cpu_usage_analyser | 0 | 17512 | <filename>core/analyser.py
class Analyser:
def __init__(self, callbacks, notifiers, state):
self.cbs = callbacks
self.state = state
self.notifiers = notifiers
def on_begin_analyse(self, timestamp):
pass
def on_end_analyse(self, timestamp):
pass
def analyse(self, event):
event_name = event.name
# for 'perf' tool
split_event_name = event.name.split(':')
if len(split_event_name) > 1:
event_name = split_event_name[1].strip()
if event_name in self.cbs:
self.cbs[event_name](event)
elif (event_name.startswith('sys_enter') or \
event_name.startswith('syscall_entry_')) and \
'syscall_entry' in self.cbs:
self.cbs['syscall_entry'](event)
elif (event_name.startswith('sys_exit') or \
event_name.startswith('syscall_exit_')) and \
'syscall_exit' in self.cbs:
self.cbs['syscall_exit'](event)
def notify(self, notification_id, **kwargs):
if notification_id in self.notifiers:
self.notifiers[notification_id](**kwargs)
| 2.625 | 3 |
grpr2-ch/maci/policies/__init__.py | saarcohen30/GrPR2-CH | 0 | 17513 | <filename>grpr2-ch/maci/policies/__init__.py
from .nn_policy import NNPolicy
# from .gmm import GMMPolicy
# from .latent_space_policy import LatentSpacePolicy
from .uniform_policy import UniformPolicy
# from .gaussian_policy import GaussianPolicy
from .stochastic_policy import StochasticNNPolicy, StochasticNNConditionalPolicy
from .deterministic_policy import DeterministicNNPolicy
| 1.234375 | 1 |
sugarpidisplay/sugarpiconfig/views.py | szpaku80/SugarPiDisplay | 1 | 17514 | """
Routes and views for the flask application.
"""
import os
import json
from flask import Flask, redirect, request, render_template, flash
from pathlib import Path
from flask_wtf import FlaskForm
from wtforms import StringField,SelectField,PasswordField,BooleanField
from wtforms.validators import InputRequired,ValidationError
from . import app
source_dexcom = 'dexcom'
source_nightscout = 'nightscout'
LOG_FILENAME="sugarpidisplay.log"
folder_name = '.sugarpidisplay'
config_file = 'config.json'
pi_sugar_path = os.path.join(str(Path.home()), folder_name)
Path(pi_sugar_path).mkdir(exist_ok=True)
def dexcom_field_check(form, field):
if (form.data_source.data == source_dexcom):
if (not field.data):
raise ValidationError('Field cannot be empty')
def nightscout_field_check(form, field):
if (form.data_source.data == source_nightscout):
if (not field.data):
raise ValidationError('Field cannot be empty')
class MyForm(FlaskForm):
class Meta:
csrf = False
data_source = SelectField(
'Data Source',
choices=[(source_dexcom, 'Dexcom'), (source_nightscout, 'Nightscout')]
)
use_animation = BooleanField('Use Animation')
dexcom_user = StringField('Dexcom UserName', validators=[dexcom_field_check])
dexcom_pass = PasswordField('<PASSWORD>', validators=[dexcom_field_check])
ns_url = StringField('Nightscout URL', validators=[nightscout_field_check])
ns_token = StringField('Nightscout Access Token', validators=[nightscout_field_check])
@app.route('/hello')
def hello_world():
return 'Hello, World!'
@app.route('/success')
def success():
return 'Your device is configured. Now cycle the power and it will use the new settings'
@app.route('/', methods=('GET', 'POST'))
def setup():
form = MyForm()
if request.method == 'POST':
if form.validate() == False:
flash('Fields are missing.')
return render_template('setup.html', form=form)
else:
handle_submit(form)
return redirect('/success')
#if form.is_submitted():
loadData(form)
return render_template('setup.html', form=form)
def handle_submit(form):
config = { 'data_source': form.data_source.data }
config['use_animation'] = form.use_animation.data
if (form.data_source.data == source_dexcom):
config['dexcom_username'] = form.dexcom_user.data
config['dexcom_password'] = form.dexcom_pass.data
else:
config['nightscout_url'] = form.ns_url.data
config['nightscout_access_token'] = form.ns_token.data
#__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
f = open(os.path.join(pi_sugar_path, config_file), "w")
json.dump(config, f, indent = 4)
f.close()
def loadData(form):
config_full_path = os.path.join(pi_sugar_path, config_file)
if (not Path(config_full_path).exists()):
return
try:
f = open(config_full_path, "r")
config = json.load(f)
f.close()
if ('data_source' in config):
form.data_source.data = config['data_source']
if (config['data_source'] == source_dexcom):
if ('dexcom_username' in config):
form.dexcom_user.data = config['dexcom_username']
if ('dexcom_password' in config):
form.dexcom_pass.data = config['dexcom_password']
if (config['data_source'] == source_nightscout):
if ('nightscout_url' in config):
form.ns_url.data = config['nightscout_url']
if ('nightscout_access_token' in config):
form.ns_token.data = config['nightscout_access_token']
form.use_animation.data = config['use_animation']
except:
pass
| 2.515625 | 3 |
progressao_aritmeticav3.py | eduardobaltazarmarfim/PythonC | 0 | 17515 | def retorno():
resp=input('Deseja executar o programa novamente?[s/n] ')
if(resp=='S' or resp=='s'):
verificar()
else:
print('Processo finalizado com sucesso!')
pass
def cabecalho(titulo):
print('-'*30)
print(' '*9+titulo+' '*15)
print('-'*30)
pass
def mensagem_erro():
print('Dados inseridos são invalidos!')
pass
def verificar():
try:
cabecalho('Progressão PA')
num=int(input('Digite o primeiro termo: '))
numPA=int(input('Digite sua razão PA: '))
except:
mensagem_erro()
retorno()
else:
cont=1
loop=1
rept=1
contagem=0
while loop!=0:
if(rept==1):
while cont<=10:
if(cont>=10):
print('{} -> PAUSA\n'.format(num),end='')
else:
print('{} -> '.format(num),end='')
cont+=1
num+=numPA
contagem+=1
rept+=1
loop=int(input('Quantos termos deseja mostrar a mais? '))
if(loop<=0):
print('Progressão finalizada com {} termos mostrados'.format(contagem))
break
else:
cont=1
while cont<=loop:
if(cont>=loop):
print('{} -> PAUSA\n'.format(num),end='')
else:
print('{} -> '.format(num),end='')
cont+=1
num+=numPA
contagem+=1
rept+=1
loop=int(input('Quantos termos deseja mostrar a mais? '))
if(loop<=0):
print('Progressão finalizada com {} termos mostrados'.format(contagem))
break
retorno()
pass
verificar() | 4 | 4 |
policies/plc_migrate_default.py | PaloAltoNetworks/pcs-migration-management | 1 | 17516 | <reponame>PaloAltoNetworks/pcs-migration-management
from policies import plc_get, plc_add, plc_update
from sdk.color_print import c_print
from tqdm import tqdm
def migrate_builtin_policies(tenant_sessions: list, logger):
'''
Updates the default/built in policies of all clone tenants so they are the same as the
source tenant. Default policies can not be added or deleted.
'''
tenant_updated_policies = []
tenant_default_policies = []
for tenant_session in tenant_sessions:
tenant_default_policies.append(plc_get.api_get_default(tenant_session, logger))
original_tenant = tenant_default_policies[0]
clone_tenant_default_policies = tenant_default_policies[1:]
for index, tenant in enumerate(clone_tenant_default_policies):
added = 0
for plc in tqdm(tenant, desc='Syncing Default Policies', leave=False):
for old_plc in original_tenant:
if plc['name'] == old_plc['name']:
#Compliance metadata is not apart of every policy so it has to be compared situationally
complianceMetadata = []
if 'complianceMetadata' in plc:
complianceMetadata = plc['complianceMetadata']
old_complianceMetadata = []
if 'complianceMetadata' in old_plc:
old_complianceMetadata = old_plc['complianceMetadata']
compFlag = False
for el in old_complianceMetadata:
name = el['standardName']
if name not in [cmp['standardName'] for cmp in complianceMetadata]:
compFlag = True
break
req_id = el['requirementId']
if req_id not in [cmp['requirementId'] for cmp in complianceMetadata]:
compFlag = True
break
sec_id = el['sectionId']
if sec_id not in [cmp['sectionId'] for cmp in complianceMetadata]:
compFlag = True
break
#Sort Labels
labels = plc['labels']
o_labels = old_plc['labels']
labels.sort()
o_labels.sort()
#If there is a difference between the source tenant policy and the destination tenant policy, then update the policy
# if plc['severity'] != old_plc['severity'] or plc['labels'] != old_plc['labels'] or plc['rule'] != old_plc['rule'] or compFlag:
if plc['severity'] != old_plc['severity'] or labels != o_labels or plc['rule'] != old_plc['rule'] or compFlag:
res = plc_add.update_default_policy(tenant_sessions[index + 1], old_plc, logger)
if res != 'BAD':
added += 1
tenant_updated_policies.append(added)
logger.info('Finished migrating Default Policies')
return tenant_updated_policies
if __name__ == '__main__':
from sdk.load_config import load_config_create_sessions
tenant_sessions = load_config_create_sessions()
migrate_builtin_policies(tenant_sessions)
| 2.140625 | 2 |
MAIN VERSION 2.py | HorridHanu/Notepad-Python | 1 | 17517 | <reponame>HorridHanu/Notepad-Python
########################################################################################
########################################################################################
## # CODE LANGUAGE IS PYHTON! ## ## ##
## # DATE: 1-JULY-2021 ## ## ######## ## ## ## ##
## # CODE BY HANU! ########## ## ######### ## ## ##
## # ONLY FOR EDUCATIONAL PURPOSE! ########## ####### ## ## ## ## ##
## # NOTEPAD COPY MAIN! ## ## ## ## ## ## ## ## ##
## # ITS ONLY DEMO! ## ## ####### ## ## ######## ##
########################################################################################
########################################################################################
#Define Functions For Cammand!
def fun():
print("yes work! \n"
"PLEASE CHECK NEXT VERSION ON ->Github.com/HorridHanu<- .")
# Define function for Files!
# Define function for Newfile!
import os.path
import os
def newfile():
global file
root.title("Untitled - Notepad")
file = None
text.delete(1.0, END)
# function for openfile!
from tkinter.filedialog import askopenfilename, asksaveasfilename
def openfile():
global file
file = askopenfilename(defaultextension=".txt", filetypes=[("All Files", "*.*"),
("Text Documents",
" *.txt")])
if file == "":
file=None
else:
root.title(os.path.basename(file) + " - Notepad")
text.delete(1.0, END)
f= open(file, "r")
text.insert(1.0, f.read())
f.close()
# function for savefile!
def savefile():
global file
if file == None:
file = asksaveasfilename(initialfile='Untitled.txt',defaultextension='.txt',
filetypes=[("All Files", ".txt"),
("Text Documents", ".txt")])
if file =="":
file =None
else:
#save the file!
root.title(os.path.basename(file) + " - Notepad")
f = open(file, "w")
f.write(text.get(1.0, END))
f.close()
# print("file save")
else:
# save the file!
f = open(file, "w")
f.write(text.get(1.0, END))
f.close()
# Define function for Edits!
# function for cut!
def cut():
text.event_generate(("<<Cut>>"))
# function for copy!
def copy():
text.event_generate(("<<Copy>>"))
# function for paste!
def paste():
text.event_generate(("<<Paste>>"))
# function for delete!
def delete():
text.delete(1.0, END)
# Define functions for ABOUT!
# import the message box as tmsg
import tkinter.messagebox as tmsg
# function for help!
def help():
# print("I will help you!")
# showinfo help to show a messsage !
tmsg.showinfo("Help", "Tell Us Whats happen?\nContact Us On ->Github.com/HorridHanu<-")
# print(a) return value (ok)
# function for rate!
def rate():
# askquestion help to to ask question in yes or no
a= tmsg.askquestion("Rate us!", " Was Your Experince Good?")
# print(a) return value is yes no or!
if a == 'yes':
msg = "Thanks Sir Please Rate Us On Appstore!"
else:
msg = "Tell Us Whats happen?\nContact Us On ->Github.com/HorridHanu<-"
tmsg.showinfo("Experince..", msg)
# function for joining!
def join_us():
ans = tmsg.askquestion("Join", "Would You Join Us On Github")
# print(ans)
if ans =="no":
msg = "Without Joining You Cann't Get Next Update!"
else:
msg ="Go To ->Github.com/HorridHanu<- \n For More Update And Versions...."
tmsg.showwarning("Warning", msg)
# define function for about!
def about():
tmsg.showerror("About", "Notepad By Hanu.. \nVersion 2.0.."
"\nCopy Right 2021 Hanu Corporation. "
"All Right Reserved!"
" For All OS {Windows}, {Linux}, {MacOS}"
" User Interface Are Protected By Trademark"
" And Other Pendings"
" Or Existing Intellecutal Property Right In "
" United State And Other Countries.")
#BASIC TKINTER SETUP!
from tkinter import *
root=Tk()
root.geometry("700x390")
root.title("Untitled - Notpad")
root.bell() #used to bell on opening!
# root.iconphoto("1.ICON.png")
# STATUS BAR!
statusbar = StringVar()
statusbar.set(" Be Happy....")
sbar = Label(root, textvariable=statusbar, relief=SUNKEN, anchor="w").pack(fill=X, side=BOTTOM)
# DEFINE FUNCTION FOR STATUS BAR!
def status_bar():
statusbar.set(" Font Lucida, Size 19 And You Are Working Be Happy.....")
# define function for font!
def font():
statusbar.set(" Font Is Lucida And Size Is 17......")
# define function for time!
# IMPORT Datetime MODULE!
from datetime import datetime
now = datetime.now()
Time = now.strftime("%H:%M")
Date = now.strftime("%D")
def time_now():
statusbar.set(f"{Time} {Date}")
# SCROLLBAR AND TEXT AREA!
# scrollbar using Scroll widget!
sb = Scrollbar(root)
sb.pack(fill=Y, side=RIGHT)
# Text area using text widget and connect with scroll bar!
text = Text(root, font="lucida 17", yscrollcommand=sb.set)
# for taking the full geometry
text.pack(fill=BOTH, expand=True)
file = None
sb.config(command=text.yview)
#Main Menu!
mainmenu=Menu(root)
# Submenu File!
m1 = Menu(mainmenu, tearoff=0)
m1.add_separator()
# to new file
m1.add_command(label="New Ctrl+N", command=newfile)
# m1.add_separator()
# to open existing file
m1.add_command(label="Open.. Ctrl+O", command=openfile)
# m1.add_separator()
# to save current file
m1.add_command(label="save Ctrl+s", command=savefile)
m1.add_separator()
# to print
m1.add_command(label="Print Ctrl+P", command=fun)
# to Exit!
m1.add_separator()
m1.add_command(label="Exit", command=exit) #exit has pre-function to exit!
mainmenu.add_cascade(label="File", menu=m1)
# file menu END
#Submenu Edit!
m2 = Menu(mainmenu, tearoff = 0)
m2.add_separator()
# to cut
m2.add_command(label="Cut Ctrl+X", command=cut)
# to copy
m2.add_command(label="Copy Ctrl+C", command=copy)
# to paste
m2.add_command(label="Paste Ctrl+V", command=paste)
m2.add_separator()
# to delete
m2.add_command(label="Delete Del", command=delete)
m2.add_separator()
m2.add_command(label="Select Ctrl+A",command=fun)
# to time
m2.add_command(label="Time/Date F5",command=time_now)
mainmenu.add_cascade(label="Edit", menu=m2)
# edit menu END
#Submenu Format
m3 = Menu(mainmenu, tearoff = 0)
m3.add_separator()
m3.add_command(label="WordWrap", command=fun)
# to font
m3.add_command(label="font..", command=font)
mainmenu.add_cascade(label="Format", menu=m3)
#Submenu Veiw
m4 = Menu(mainmenu, tearoff=0)
m4.add_separator()
# to view statusbar
m4.add_command(label="Status Bar", command=status_bar)
mainmenu.add_cascade(label="View", menu=m4)
#Submenu View Help
m5=Menu(mainmenu, tearoff = 0)
m5.add_separator()
# to view help
m5.add_command(label="View Help", command=help)
m5.add_separator()
# m5.add_separator()
# m5.add_separator()
# to rate
m5.add_command(label="Rate us!", command=rate)
# m5.add_separator()
# to join
m5.add_command(label="Join us!", command=join_us)
m5.add_separator()
m5.add_separator()
# about
m5.add_command(label="About Notepad", command=about)
mainmenu.add_cascade(label="Help", menu=m5)
# View help menu END
root.config(menu=mainmenu) #configure the mainmenu as menu
root.mainloop()
########################################################################################
######################################################################################## | 2.484375 | 2 |
unfollow_parfum.py | AntonPukhonin/InstaPy | 0 | 17518 | <filename>unfollow_parfum.py
from instapy import InstaPy
#insta_username = 'antonpuhonin'
#insta_password = '<PASSWORD>'
insta_username = 'tonparfums'
insta_password = '<PASSWORD>'
try:
session = InstaPy(username=insta_username,
password=insta_password,
headless_browser=True,
multi_logs=True)
session.login()
session.unfollow_users(amount=200, onlyInstapyFollowed = True, onlyInstapyMethod = 'FIFO', unfollow_after=6*24*60*60 )
finally:
session.end()
| 1.960938 | 2 |
portal/grading/serializers.py | LDSSA/portal | 2 | 17519 | <gh_stars>1-10
from rest_framework import serializers
from portal.academy import models
from portal.applications.models import Submission, Challenge
class GradeSerializer(serializers.ModelSerializer):
notebook = serializers.FileField(source="feedback")
class Meta:
model = models.Grade
fields = (
"score",
"status",
"message",
"notebook",
)
class ChecksumSerializer(serializers.ModelSerializer):
unit = serializers.SlugField(source="code")
class Meta:
model = models.Unit
fields = (
"unit",
"checksum",
)
def update(self, instance, validated_data):
old_checksum = instance.checksum
instance = super().update(instance, validated_data)
if old_checksum != instance.checksum:
for grade in models.Grade.objects.filter(
unit=instance, status="graded"
):
grade.status = "out-of-date"
grade.save()
return instance
class AdmissionsGradeSerializer(serializers.ModelSerializer):
notebook = serializers.FileField(source="feedback")
class Meta(GradeSerializer.Meta):
model = Submission
class AdmissionsChecksumSerializer(serializers.ModelSerializer):
unit = serializers.SlugField(source="code")
class Meta:
model = Challenge
fields = (
"unit",
"checksum",
)
| 2.25 | 2 |
Funcoes/ex106-sistemaInterativoAjuda.py | ascaniopy/python | 0 | 17520 | <filename>Funcoes/ex106-sistemaInterativoAjuda.py
from time import sleep
c = ('\033[m', # 0 - Sem cores
'\033[0;30;41m', # 1 - Vermelho
'\033[0;30;42m', # 2 - Verde
'\033[0;30;43m', # 3 - Amarelo
'\033[0;30;44m', # 4 - Azul
'\033[0;30;45m', # 5 - Roxo
'\033[0;30m' # 6 - Branco
)
#Programa principal
| 1.992188 | 2 |
deeptrack/extras/__init__.py | Margon01/DeepTrack-2.0_old | 65 | 17521 | <reponame>Margon01/DeepTrack-2.0_old
from . import datasets, radialcenter | 0.71875 | 1 |
pyvisdk/enums/virtual_machine_ht_sharing.py | Infinidat/pyvisdk | 0 | 17522 |
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
VirtualMachineHtSharing = Enum(
'any',
'internal',
'none',
)
| 1.617188 | 2 |
reo/migrations/0118_auto_20210715_2148.py | NREL/REopt_API | 7 | 17523 | <reponame>NREL/REopt_API
# Generated by Django 3.1.12 on 2021-07-15 21:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reo', '0117_auto_20210715_2122'),
]
operations = [
migrations.AddField(
model_name='sitemodel',
name='lifetime_emissions_cost_Health',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='sitemodel',
name='lifetime_emissions_cost_Health_bau',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='sitemodel',
name='lifetime_emissions_lb_NOx',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='sitemodel',
name='lifetime_emissions_lb_NOx_bau',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='sitemodel',
name='lifetime_emissions_lb_PM',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='sitemodel',
name='lifetime_emissions_lb_PM_bau',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='sitemodel',
name='lifetime_emissions_lb_SO2',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='sitemodel',
name='lifetime_emissions_lb_SO2_bau',
field=models.FloatField(blank=True, null=True),
),
]
| 1.507813 | 2 |
bpy_lambda/2.78/scripts/addons_contrib/io_scene_cod/__init__.py | resultant-gamedev/bpy_lambda | 0 | 17524 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
"""
Blender-CoD: Blender Add-On for Call of Duty modding
Version: alpha 3
Copyright (c) 2011 CoDEmanX, Flybynyt -- <EMAIL>
http://code.google.com/p/blender-cod/
TODO
- UI for xmodel and xanim import (planned for alpha 4/5)
"""
bl_info = {
"name": "Blender-CoD - Add-On for Call of Duty modding (alpha 3)",
"author": "CoDEmanX, Flybynyt",
"version": (0, 3, 5),
"blender": (2, 62, 0),
"location": "File > Import | File > Export",
"description": "Export models to *.XMODEL_EXPORT and animations to *.XANIM_EXPORT",
"warning": "Alpha version, please report any bugs!",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/Call_of_Duty_IO",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"support": "TESTING",
"category": "Import-Export"
}
# To support reload properly, try to access a package var, if it's there, reload everything
if "bpy" in locals():
import imp
if "import_xmodel" in locals():
imp.reload(import_xmodel)
if "export_xmodel" in locals():
imp.reload(export_xmodel)
if "import_xanim" in locals():
imp.reload(import_xanim)
if "export_xanim" in locals():
imp.reload(export_xanim)
import bpy
from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty
import bpy_extras.io_utils
from bpy_extras.io_utils import ExportHelper, ImportHelper
import time
# Planned for alpha 4/5
class ImportXmodel(bpy.types.Operator, ImportHelper):
"""Load a CoD XMODEL_EXPORT File"""
bl_idname = "import_scene.xmodel"
bl_label = "Import XMODEL_EXPORT"
bl_options = {'PRESET'}
filename_ext = ".XMODEL_EXPORT"
filter_glob = StringProperty(default="*.XMODEL_EXPORT", options={'HIDDEN'})
#use_meshes = BoolProperty(name="Meshes", description="Import meshes", default=True)
#use_armature = BoolProperty(name="Armature", description="Import Armature", default=True)
#use_bind_armature = BoolProperty(name="Bind Meshes to Armature", description="Parent imported meshes to armature", default=True)
#use_split_objects = BoolProperty(name="Object", description="Import OBJ Objects into Blender Objects", default=True)
#use_split_groups = BoolProperty(name="Group", description="Import OBJ Groups into Blender Objects", default=True)
#use_image_search = BoolProperty(name="Image Search", description="Search subdirs for any associated images (Warning, may be slow)", default=True)
def execute(self, context):
from . import import_xmodel
start_time = time.clock()
result = import_xmodel.load(self, context, **self.as_keywords(ignore=("filter_glob", "check_existing")))
if not result:
self.report({'INFO'}, "Import finished in %.4f sec." % (time.clock() - start_time))
return {'FINISHED'}
else:
self.report({'ERROR'}, result)
return {'CANCELLED'}
"""
def draw(self, context):
layout = self.layout
col = layout.column()
col.prop(self, "use_meshes")
col.prop(self, "use_armature")
row = layout.row()
row.active = self.use_meshes and self.use_armature
row.prop(self, "use_bind_armature")
"""
@classmethod
def poll(self, context):
return (context.scene is not None)
class ImportXanim(bpy.types.Operator, ImportHelper):
"""Load a CoD XANIM_EXPORT File"""
bl_idname = "import_scene.xanim"
bl_label = "Import XANIM_EXPORT"
bl_options = {'PRESET'}
filename_ext = ".XANIM_EXPORT"
filter_glob = StringProperty(default="*.XANIM_EXPORT;*.NT_EXPORT", options={'HIDDEN'})
def execute(self, context):
# print("Selected: " + context.active_object.name)
from . import import_xanim
return import_xanim.load(self, context, **self.as_keywords(ignore=("filter_glob",)))
class ExportXmodel(bpy.types.Operator, ExportHelper):
"""Save a CoD XMODEL_EXPORT File"""
bl_idname = "export_scene.xmodel"
bl_label = 'Export XMODEL_EXPORT'
bl_options = {'PRESET'}
filename_ext = ".XMODEL_EXPORT"
filter_glob = StringProperty(default="*.XMODEL_EXPORT", options={'HIDDEN'})
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
use_version = EnumProperty(
name="Format Version",
description="XMODEL_EXPORT format version for export",
items=(('5', "Version 5", "vCoD, CoD:UO"),
('6', "Version 6", "CoD2, CoD4, CoD5, CoD7")),
default='6',
)
use_selection = BoolProperty(
name="Selection only",
description="Export selected meshes only (object or weight paint mode)",
default=False
)
use_vertex_colors = BoolProperty(
name="Vertex colors",
description="Export vertex colors (if disabled, white color will be used)",
default=True
)
use_vertex_colors_alpha = BoolProperty(
name="As alpha",
description="Turn RGB vertex colors into grayscale (average value) and use it as alpha transparency. White is 1 (opaque), black 0 (invisible)",
default=False
)
use_apply_modifiers = BoolProperty(
name="Apply Modifiers",
description="Apply all mesh modifiers except Armature (preview resolution)",
default=True
)
use_armature = BoolProperty(
name="Armature",
description="Export bones (if disabled, only a 'tag_origin' bone will be written)",
default=True
)
use_vertex_cleanup = BoolProperty(
name="Clean up vertices",
description="Try this if you have problems converting to xmodel. Skips vertices which aren't used by any face and updates references.",
default=False
)
use_armature_pose = BoolProperty(
name="Pose animation to models",
description="Export meshes with Armature modifier applied as a series of XMODEL_EXPORT files",
default=False
)
use_frame_start = IntProperty(
name="Start",
description="First frame to export",
default=1,
min=0
)
use_frame_end = IntProperty(
name="End",
description="Last frame to export",
default=250,
min=0
)
use_weight_min = BoolProperty(
name="Minimum bone weight",
description="Try this if you get 'too small weight' errors when converting",
default=False,
)
use_weight_min_threshold = FloatProperty(
name="Threshold",
description="Smallest allowed weight (minimum value)",
default=0.010097,
min=0.0,
max=1.0,
precision=6
)
def execute(self, context):
from . import export_xmodel
start_time = time.clock()
result = export_xmodel.save(self, context, **self.as_keywords(ignore=("filter_glob", "check_existing")))
if not result:
self.report({'INFO'}, "Export finished in %.4f sec." % (time.clock() - start_time))
return {'FINISHED'}
else:
self.report({'ERROR'}, result)
return {'CANCELLED'}
# Extend ExportHelper invoke function to support dynamic default values
def invoke(self, context, event):
#self.use_frame_start = context.scene.frame_start
self.use_frame_start = context.scene.frame_current
#self.use_frame_end = context.scene.frame_end
self.use_frame_end = context.scene.frame_current
return super().invoke(context, event)
def draw(self, context):
layout = self.layout
row = layout.row(align=True)
row.prop(self, "use_version", expand=True)
# Calculate number of selected mesh objects
if context.mode in {'OBJECT', 'PAINT_WEIGHT'}:
meshes_selected = len([m for m in bpy.data.objects if m.type == 'MESH' and m.select])
else:
meshes_selected = 0
col = layout.column(align=True)
col.prop(self, "use_selection", "Selection only (%i meshes)" % meshes_selected)
col.enabled = bool(meshes_selected)
col = layout.column(align=True)
col.prop(self, "use_apply_modifiers")
col = layout.column(align=True)
col.enabled = not self.use_armature_pose
if self.use_armature and self.use_armature_pose:
col.prop(self, "use_armature", "Armature (disabled)")
else:
col.prop(self, "use_armature")
if self.use_version == '6':
row = layout.row(align=True)
row.prop(self, "use_vertex_colors")
sub = row.split()
sub.active = self.use_vertex_colors
sub.prop(self, "use_vertex_colors_alpha")
col = layout.column(align=True)
col.label("Advanced:")
col = layout.column(align=True)
col.prop(self, "use_vertex_cleanup")
box = layout.box()
col = box.column(align=True)
col.prop(self, "use_armature_pose")
sub = box.column()
sub.active = self.use_armature_pose
sub.label(text="Frame range: (%i frames)" % (abs(self.use_frame_end - self.use_frame_start) + 1))
row = sub.row(align=True)
row.prop(self, "use_frame_start")
row.prop(self, "use_frame_end")
box = layout.box()
col = box.column(align=True)
col.prop(self, "use_weight_min")
sub = box.column()
sub.enabled = self.use_weight_min
sub.prop(self, "use_weight_min_threshold")
@classmethod
def poll(self, context):
return (context.scene is not None)
class ExportXanim(bpy.types.Operator, ExportHelper):
"""Save a XMODEL_XANIM File"""
bl_idname = "export_scene.xanim"
bl_label = 'Export XANIM_EXPORT'
bl_options = {'PRESET'}
filename_ext = ".XANIM_EXPORT"
filter_glob = StringProperty(default="*.XANIM_EXPORT", options={'HIDDEN'})
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
use_selection = BoolProperty(
name="Selection only",
description="Export selected bones only (pose mode)",
default=False
)
use_framerate = IntProperty(
name="Framerate",
description="Set frames per second for export, 30 fps is commonly used.",
default=24,
min=1,
max=100
)
use_frame_start = IntProperty(
name="Start",
description="First frame to export",
default=1,
min=0
)
use_frame_end = IntProperty(
name="End",
description="Last frame to export",
default=250,
min=0
)
use_notetrack = BoolProperty(
name="Notetrack",
description="Export timeline markers as notetrack nodes",
default=True
)
use_notetrack_format = EnumProperty(
name="Notetrack format",
description="Notetrack format to use. Always set 'CoD 7' for Black Ops, even if not using notetrack!",
items=(('5', "CoD 5", "Separate NT_EXPORT notetrack file for 'World at War'"),
('7', "CoD 7", "Separate NT_EXPORT notetrack file for 'Black Ops'"),
('1', "all other", "Inline notetrack data for all CoD versions except WaW and BO")),
default='1',
)
def execute(self, context):
from . import export_xanim
start_time = time.clock()
result = export_xanim.save(self, context, **self.as_keywords(ignore=("filter_glob", "check_existing")))
if not result:
self.report({'INFO'}, "Export finished in %.4f sec." % (time.clock() - start_time))
return {'FINISHED'}
else:
self.report({'ERROR'}, result)
return {'CANCELLED'}
# Extend ExportHelper invoke function to support dynamic default values
def invoke(self, context, event):
self.use_frame_start = context.scene.frame_start
self.use_frame_end = context.scene.frame_end
self.use_framerate = round(context.scene.render.fps / context.scene.render.fps_base)
return super().invoke(context, event)
def draw(self, context):
layout = self.layout
bones_selected = 0
armature = None
# Take the first armature
for ob in bpy.data.objects:
if ob.type == 'ARMATURE' and len(ob.data.bones) > 0:
armature = ob.data
# Calculate number of selected bones if in pose-mode
if context.mode == 'POSE':
bones_selected = len([b for b in armature.bones if b.select])
# Prepare info string
armature_info = "%s (%i bones)" % (ob.name, len(armature.bones))
break
else:
armature_info = "Not found!"
if armature:
icon = 'NONE'
else:
icon = 'ERROR'
col = layout.column(align=True)
col.label("Armature: %s" % armature_info, icon)
col = layout.column(align=True)
col.prop(self, "use_selection", "Selection only (%i bones)" % bones_selected)
col.enabled = bool(bones_selected)
layout.label(text="Frame range: (%i frames)" % (abs(self.use_frame_end - self.use_frame_start) + 1))
row = layout.row(align=True)
row.prop(self, "use_frame_start")
row.prop(self, "use_frame_end")
col = layout.column(align=True)
col.prop(self, "use_framerate")
# Calculate number of markers in export range
frame_min = min(self.use_frame_start, self.use_frame_end)
frame_max = max(self.use_frame_start, self.use_frame_end)
num_markers = len([m for m in context.scene.timeline_markers if frame_max >= m.frame >= frame_min])
col = layout.column(align=True)
col.prop(self, "use_notetrack", text="Notetrack (%i nodes)" % num_markers)
col = layout.column(align=True)
col.prop(self, "use_notetrack_format", expand=True)
@classmethod
def poll(self, context):
return (context.scene is not None)
def menu_func_xmodel_import(self, context):
self.layout.operator(ImportXmodel.bl_idname, text="CoD Xmodel (.XMODEL_EXPORT)")
"""
def menu_func_xanim_import(self, context):
self.layout.operator(ImportXanim.bl_idname, text="CoD Xanim (.XANIM_EXPORT)")
"""
def menu_func_xmodel_export(self, context):
self.layout.operator(ExportXmodel.bl_idname, text="CoD Xmodel (.XMODEL_EXPORT)")
def menu_func_xanim_export(self, context):
self.layout.operator(ExportXanim.bl_idname, text="CoD Xanim (.XANIM_EXPORT)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func_xmodel_import)
#bpy.types.INFO_MT_file_import.append(menu_func_xanim_import)
bpy.types.INFO_MT_file_export.append(menu_func_xmodel_export)
bpy.types.INFO_MT_file_export.append(menu_func_xanim_export)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func_xmodel_import)
#bpy.types.INFO_MT_file_import.remove(menu_func_xanim_import)
bpy.types.INFO_MT_file_export.remove(menu_func_xmodel_export)
bpy.types.INFO_MT_file_export.remove(menu_func_xanim_export)
if __name__ == "__main__":
register()
| 1.703125 | 2 |
pynpact/tests/steps/test_extract.py | NProfileAnalysisComputationalTool/npact | 2 | 17525 | <gh_stars>1-10
import os.path
import pytest
import py
from pynpact.steps import extract
def test_binfile_exists():
assert extract.BIN
assert os.path.exists(extract.BIN)
def test_plan(gbkconfig, executor):
extract.plan(gbkconfig, executor)
filename = gbkconfig[extract.OUTPUTKEY]
assert filename
p = py.path.local(filename)
assert p.exists()
# based on how many genes are in testgbk
assert 3 == len(p.readlines())
def test_plan_async(gbkconfig, async_executor):
extract.plan(gbkconfig, async_executor)
filename = gbkconfig[extract.OUTPUTKEY]
assert filename
async_executor.result(filename, 1)
p = py.path.local(filename)
assert p.exists()
# based on how many genes are in testgbk
assert 3 == len(p.readlines())
| 2.25 | 2 |
dl_training/core.py | Duplums/SMLvsDL | 0 | 17526 | <reponame>Duplums/SMLvsDL<gh_stars>0
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2019
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Core classes.
"""
# System import
import os
import pickle
from copy import deepcopy
import subprocess
# Third party import
import torch
import torch.nn.functional as func
from torch.nn import DataParallel
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
# Package import
from dl_training.utils import checkpoint
from dl_training.history import History
import dl_training.metrics as mmetrics
import logging
class Base(object):
""" Class to perform classification.
"""
def __init__(self, optimizer_name="Adam", learning_rate=1e-3,
loss_name="NLLLoss", metrics=None, use_cuda=False,
pretrained=None, load_optimizer=True, use_multi_gpu=True,
**kwargs):
""" Class instantiation.
Observers will be notified, allowed signals are:
- 'before_epoch'
- 'after_epoch'
Parameters
----------
optimizer_name: str, default 'Adam'
the name of the optimizer: see 'torch.optim' for a description
of available optimizer.
learning_rate: float, default 1e-3
the optimizer learning rate.
loss_name: str, default 'NLLLoss'
the name of the loss: see 'torch.nn' for a description
of available loss.
metrics: list of str
a list of extra metrics that will be computed.
use_cuda: bool, default False
whether to use GPU or CPU.
pretrained: path, default None
path to the pretrained model or weights.
load_optimizer: boolean, default True
if pretrained is set, whether to also load the optimizer's weights or not
use_multi_gpu: boolean, default True
if several GPUs are available, use them during forward/backward pass
kwargs: dict
specify directly a custom 'model', 'optimizer' or 'loss'. Can also
be used to set specific optimizer parameters.
"""
self.optimizer = kwargs.get("optimizer")
self.logger = logging.getLogger("SMLvsDL")
self.loss = kwargs.get("loss")
self.device = torch.device("cuda" if use_cuda else "cpu")
for name in ("optimizer", "loss"):
if name in kwargs:
kwargs.pop(name)
if "model" in kwargs:
self.model = kwargs.pop("model")
if self.optimizer is None:
if optimizer_name in dir(torch.optim):
self.optimizer = getattr(torch.optim, optimizer_name)(
self.model.parameters(),
lr=learning_rate,
**kwargs)
else:
raise ValueError("Optimizer '{0}' uknown: check available "
"optimizer in 'pytorch.optim'.")
if self.loss is None:
if loss_name not in dir(torch.nn):
raise ValueError("Loss '{0}' uknown: check available loss in "
"'pytorch.nn'.")
self.loss = getattr(torch.nn, loss_name)()
self.metrics = {}
for name in (metrics or []):
if name not in mmetrics.METRICS:
raise ValueError("Metric '{0}' not yet supported: you can try "
"to fill the 'METRICS' factory, or ask for "
"some help!".format(name))
self.metrics[name] = mmetrics.METRICS[name]
if use_cuda and not torch.cuda.is_available():
raise ValueError("No GPU found: unset 'use_cuda' parameter.")
if pretrained is not None:
checkpoint = None
try:
checkpoint = torch.load(pretrained, map_location=lambda storage, loc: storage)
except BaseException as e:
self.logger.error('Impossible to load the checkpoint: %s' % str(e))
if checkpoint is not None:
if hasattr(checkpoint, "state_dict"):
self.model.load_state_dict(checkpoint.state_dict())
elif isinstance(checkpoint, dict):
if "model" in checkpoint:
try:
for key in list(checkpoint['model'].keys()):
if key.replace('module.', '') != key:
checkpoint['model'][key.replace('module.', '')] = checkpoint['model'][key]
del(checkpoint['model'][key])
#####
unexpected= self.model.load_state_dict(checkpoint["model"], strict=False)
self.logger.info('Model loading info: {}'.format(unexpected))
self.logger.info('Model loaded')
except BaseException as e:
self.logger.error('Error while loading the model\'s weights: %s' % str(e))
raise ValueError("")
if "optimizer" in checkpoint:
if load_optimizer:
try:
self.optimizer.load_state_dict(checkpoint["optimizer"])
for state in self.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.to(self.device)
except BaseException as e:
self.logger.error('Error while loading the optimizer\'s weights: %s' % str(e))
else:
self.logger.warning("The optimizer's weights are not restored ! ")
else:
self.model.load_state_dict(checkpoint)
if use_multi_gpu and torch.cuda.device_count() > 1:
self.model = DataParallel(self.model)
self.model = self.model.to(self.device)
def training(self, manager, nb_epochs: int, checkpointdir=None,
fold_index=None, scheduler=None, with_validation=True,
nb_epochs_per_saving=1, exp_name=None, **kwargs_train):
""" Train the model.
Parameters
----------
manager: a dl_training DataManager
a manager containing the train and validation data.
nb_epochs: int, default 100
the number of epochs.
checkpointdir: str, default None
a destination folder where intermediate models/historues will be
saved.
fold_index: int or [int] default None
the index(es) of the fold(s) to use for the training, default use all the
available folds.
scheduler: torch.optim.lr_scheduler, default None
a scheduler used to reduce the learning rate.
with_validation: bool, default True
if set use the validation dataset.
nb_epochs_per_saving: int, default 1,
the number of epochs after which the model+optimizer's parameters are saved
exp_name: str, default None
the experience name that will be launched
Returns
-------
train_history, valid_history: History
the train/validation history.
"""
train_history = History(name="Train_%s"%(exp_name or ""))
if with_validation is not None:
valid_history = History(name="Validation_%s"%(exp_name or ""))
else:
valid_history = None
print(self.loss)
print(self.optimizer)
folds = range(manager.get_nb_folds())
if fold_index is not None:
if isinstance(fold_index, int):
folds = [fold_index]
elif isinstance(fold_index, list):
folds = fold_index
init_optim_state = deepcopy(self.optimizer.state_dict())
init_model_state = deepcopy(self.model.state_dict())
if scheduler is not None:
init_scheduler_state = deepcopy(scheduler.state_dict())
for fold in folds:
# Initialize everything before optimizing on a new fold
self.optimizer.load_state_dict(init_optim_state)
self.model.load_state_dict(init_model_state)
if scheduler is not None:
scheduler.load_state_dict(init_scheduler_state)
loader = manager.get_dataloader(
train=True,
validation=True,
fold_index=fold)
for epoch in range(nb_epochs):
loss, values = self.train(loader.train, fold, epoch, **kwargs_train)
train_history.log((fold, epoch), loss=loss, **values)
train_history.summary()
if scheduler is not None:
scheduler.step()
print('Scheduler lr: {}'.format(scheduler.get_lr()), flush=True)
print('Optimizer lr: %f'%self.optimizer.param_groups[0]['lr'], flush=True)
if checkpointdir is not None and (epoch % nb_epochs_per_saving == 0 or epoch == nb_epochs-1) \
and epoch > 0:
if not os.path.isdir(checkpointdir):
subprocess.check_call(['mkdir', '-p', checkpointdir])
self.logger.info("Directory %s created."%checkpointdir)
checkpoint(
model=self.model,
epoch=epoch,
fold=fold,
outdir=checkpointdir,
name=exp_name,
optimizer=self.optimizer)
train_history.save(
outdir=checkpointdir,
epoch=epoch,
fold=fold)
if with_validation:
_, _, _, loss, values = self.test(loader.validation, **kwargs_train)
valid_history.log((fold, epoch), validation_loss=loss, **values)
valid_history.summary()
if checkpointdir is not None and (epoch % nb_epochs_per_saving == 0 or epoch == nb_epochs-1) \
and epoch > 0:
valid_history.save(
outdir=checkpointdir,
epoch=epoch,
fold=fold)
return train_history, valid_history
def train(self, loader,fold=None, epoch=None, **kwargs):
""" Train the model on the trained data.
Parameters
----------
loader: a pytorch Dataloader
Returns
-------
loss: float
the value of the loss function.
values: dict
the values of the metrics.
"""
self.model.train()
nb_batch = len(loader)
pbar = tqdm(total=nb_batch, desc="Mini-Batch")
values = {}
iteration = 0
losses = []
y_pred = []
y_true = []
for dataitem in loader:
pbar.update()
inputs = dataitem.inputs
if isinstance(inputs, torch.Tensor):
inputs = inputs.to(self.device)
list_targets = []
_targets = []
for item in (dataitem.outputs, dataitem.labels):
if item is not None:
_targets.append(item.to(self.device))
if len(_targets) == 1:
_targets = _targets[0]
list_targets.append(_targets)
self.optimizer.zero_grad()
outputs = self.model(inputs)
batch_loss = self.loss(outputs, *list_targets)
batch_loss.backward()
self.optimizer.step()
losses.append(float(batch_loss))
y_pred.extend(outputs.detach().cpu().numpy())
y_true.extend(list_targets[0].detach().cpu().numpy())
aux_losses = (self.model.get_aux_losses() if hasattr(self.model, 'get_aux_losses') else dict())
aux_losses.update(self.loss.get_aux_losses() if hasattr(self.loss, 'get_aux_losses') else dict())
for name, aux_loss in aux_losses.items():
if name not in values:
values[name] = 0
values[name] += float(aux_loss) / nb_batch
iteration += 1
loss = np.mean(losses)
for name, metric in self.metrics.items():
if name not in values:
values[name] = 0
values[name] = float(metric(torch.tensor(y_pred), torch.tensor(y_true)))
pbar.close()
return loss, values
def testing(self, loader: DataLoader, saving_dir=None, exp_name=None, **kwargs):
""" Evaluate the model.
Parameters
----------
loader: a pytorch DataLoader
saving_dir: str path to the saving directory
exp_name: str, name of the experiments that is used to derive the output file name of testing results.
Returns
-------
y: array-like
the predicted data.
X: array-like
the input data.
y_true: array-like
the true data if available.
loss: float
the value of the loss function if true data availble.
values: dict
the values of the metrics if true data availble.
"""
y, y_true, X, loss, values = self.test(loader)
if saving_dir is not None:
if not os.path.isdir(saving_dir):
subprocess.check_call(['mkdir', '-p', saving_dir])
self.logger.info("Directory %s created."%saving_dir)
with open(os.path.join(saving_dir, (exp_name or 'test')+'.pkl'), 'wb') as f:
pickle.dump({'y_pred': y, 'y_true': y_true, 'loss': loss, 'metrics': values}, f)
return y, X, y_true, loss, values
def test(self, loader):
""" Evaluate the model on the tests or validation data.
Parameter
---------
loader: a pytorch Dataset
the data loader.
Returns
-------
y: array-like
the predicted data.
y_true: array-like
the true data
X: array_like
the input data
loss: float
the value of the loss function.
values: dict
the values of the metrics.
"""
self.model.eval()
nb_batch = len(loader)
pbar = tqdm(total=nb_batch, desc="Mini-Batch")
loss = 0
values = {}
visuals = []
with torch.no_grad():
y, y_true, X = [], [], []
for dataitem in loader:
pbar.update()
inputs = dataitem.inputs
if isinstance(inputs, torch.Tensor):
inputs = inputs.to(self.device)
list_targets = []
targets = []
for item in (dataitem.outputs, dataitem.labels):
if item is not None:
targets.append(item.to(self.device))
y_true.extend(item.cpu().detach().numpy())
if len(targets) == 1:
targets = targets[0]
elif len(targets) == 0:
targets = None
if targets is not None:
list_targets.append(targets)
outputs = self.model(inputs)
if len(list_targets) > 0:
batch_loss = self.loss(outputs, *list_targets)
loss += float(batch_loss) / nb_batch
y.extend(outputs.cpu().detach().numpy())
if isinstance(inputs, torch.Tensor):
X.extend(inputs.cpu().detach().numpy())
aux_losses = (self.model.get_aux_losses() if hasattr(self.model, 'get_aux_losses') else dict())
aux_losses.update(self.loss.get_aux_losses() if hasattr(self.loss, 'get_aux_losses') else dict())
for name, aux_loss in aux_losses.items():
name += " on validation set"
if name not in values:
values[name] = 0
values[name] += aux_loss / nb_batch
# Now computes the metrics with (y, y_true)
for name, metric in self.metrics.items():
name += " on validation set"
values[name] = metric(torch.tensor(y), torch.tensor(y_true))
pbar.close()
return y, y_true, X, loss, values | 1.921875 | 2 |
python/two_pointers/1004_max_consecutive_ones_iii.py | linshaoyong/leetcode | 6 | 17527 | from collections import deque
class Solution(object):
def longestOnes(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
start, res = 0, 0
zeros = deque()
for i in range(len(A)):
if A[i] == 0:
zeros.append(i)
if K == 0:
res = max(res, i - start)
start = zeros.popleft() + 1
else:
K -= 1
res = max(res, len(A) - start)
return res
def test_long_ones():
s = Solution()
assert 6 == s.longestOnes([1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0], 2)
assert 10 == s.longestOnes(
[0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1], 3)
assert 3 == s.longestOnes([0, 0, 1, 1, 1, 0, 0], 0)
assert 4 == s.longestOnes([0, 0, 0, 1], 4)
| 3.234375 | 3 |
student_files/lap_times_db.py | jstucken/DET-Python-Anki-Overdrive-v1-1 | 0 | 17528 | #
# This script allows the user to control an Anki car using Python
# To control multiple cars at once, open a seperate Command Line Window for each car
# and call this script with the approriate car mac address.
# This script attempts to save lap times into local mysql db running on the pi
# Author: jstucken
# Created: 23-2-2021
#
SCRIPT_TITLE="Lap timer saving to Mysql"
# import required modules
import loader.bootstrapper
import time
from overdrive import Overdrive
from php_communicator import PhpCommunicator
from network import Network
# Setup our car
car = Overdrive(12) # init overdrive object
car.enableLocationData()
# get car mac address from our class object
car_mac = car.getMacAddress()
car_id = car.getCarId()
username = car.getUsername()
student_id = car.getStudentId()
# count number of laps completed
lap_count = 0
# start the car off
# usage: car.changeSpeed(speed, accel)
car.changeSpeed(400, 800)
last_lap_time = 0
last_lap_count = -1
# race 3 laps and time each one
while lap_count !=3:
time.sleep(0.1)
# lap count is incremented when cars pass over the finish line
lap_count = car.getLapCount()
# count laps done
if last_lap_count != lap_count:
last_lap_count = lap_count
print()
print("lap_count: "+str(lap_count))
# get lap time
prev_lap_time = car.getLapTime()
if last_lap_time != prev_lap_time:
print()
print("prev_lap_time: "+str(prev_lap_time))
# if car has completed at least 1 lap
if lap_count > 0:
# Save last_lap_time time to database now
# get cars current location and speed
location = car.getLocation()
speed = car.getSpeed()
# data to be sent to API
data = {
'student_id':student_id,
'car_id':car_id,
'lap_time':prev_lap_time,
'lap_count':lap_count,
'speed':speed
}
# get the local IP address of the server machine
local_ip_address = Network.getLocalIPAddress()
# build our PHP script URL where data will be sent to be saved
# eg "http://192.168.0.10/lap_times_save.php"
url = "http://"+local_ip_address+"/python_communicator/lap_times_save.php"
# Send data to PHP to save to database
php = PhpCommunicator()
return_text = php.getResponse(url, data) # get the response from PHP
# extracting response text
print("Response from PHP script: %s"%return_text)
# end if
print()
print("*****")
last_lap_time = prev_lap_time
# stop the car
car.stopCarFast()
print("Stopping as car has done the required number of laps")
car.disconnect()
quit() | 3.25 | 3 |
flatsat/opensatkit/cfs/apps/adcs_io/adcs-drivers/cubewheel-driver/test/code.py | cromulencellc/hackasat-final-2021 | 4 | 17529 | import board
from i2cperipheral import I2CPeripheral
from analogio import AnalogOut
from digitalio import DigitalInOut, Direction, Pull
import struct
import math
import time
regs = [0] * 16
index = 0
i2c_addr = 0x68
frame_id = 0
motor_control_mode = 0
backup_mode = 0
motor_switch_state = 0
hall_switch_state = 0
encoder_switch_state = 0
error_flag = 0
unused = 0
invalidTelemetryFlag = 0
invalidTelecommandFlag = 0
encoderError = 0
uartError = 0
i2cError = 0
canError = 0
configurationError = 0
speedError = 0
reference_speed = 0
wheel_current = 290 # mA
wheel_speed = math.floor(100/2) #rpm
wheel_duty = 5
wheel_speed_backup = wheel_speed
def send_tlm_identification():
# print("Send TLM Identification")
output = []
output += bytearray([8, 0, 9, 8]) + struct.pack("H", 1111) + struct.pack("H", 8888)
return output
def send_tlm_identification_ext():
# print("Send TLM Identification Ext")
output = []
output += struct.pack("H", 1234) + bytearray([68, 0xFF])
return output
def send_tlm_status(motor_control_mode, backup_mode, motor_switch_state, hall_switch_state, encoder_switch_state, error_flag):
# print("Send TLM Status MCM:{0:d}, BM:{1:d}, MSS:{2:d} HSS:{3:d}, ESS:{4:d}, Error Flag: {5:d}".format(motor_control_mode, backup_mode, motor_switch_state, hall_switch_state, encoder_switch_state, error_flag))
status = 0
status |= (backup_mode & 0x1) << 7
status |= (motor_switch_state & 0x1) << 6
status |= (hall_switch_state & 0x1) << 5
status |= (encoder_switch_state & 0x1) << 4
status |= (error_flag & 0x1) << 3
status |= unused
# print("Status byte: {0:d}:{1:08b}".format(status,status))
output = []
output = struct.pack("H", 1111) + struct.pack("H", 8888) + bytearray([0, 0, motor_control_mode, status])
return output
def send_tlm_wheel_data_full(wheel_speed, wheel_reference_speed, wheel_current):
# print("Send TLM Wheel Data Full")
output = []
output += struct.pack("h", wheel_speed) + struct.pack("h", wheel_reference_speed) + struct.pack("h", wheel_current)
return output
def send_tlm_wheel_data_additional(wheel_duty, wheel_speed_backup):
# print("Send TLM Wheel Data Additional")
output = []
output += struct.pack("h", wheel_duty) + struct.pack("h", wheel_duty)
return output
def send_tlm_wheel_status_flags(invalidTelemetryFlag=0, invalidTelecommandFlag=0, encoderError=0, uartError=0, i2cError=0, canError=0, configurationError=0, speedError=0):
status = 0
status |= (invalidTelemetryFlag & 0x01)
status |= (invalidTelecommandFlag & 0x01) << 1
status |= (encoderError & 0x01) << 2
status |= (uartError & 0x01) << 3
status |= (i2cError & 0x01) << 4
status |= (canError & 0x01) << 5
status |= (configurationError & 0x01) << 6
status |= (speedError & 0x01) << 7
return bytearray([status])
def voltage_to_dac(voltage):
return math.floor((voltage*1024)/3.3 * 64)
vout = 0.95
dac_value = voltage_to_dac(vout)
print("Set analog output for testing: {0:f} ({1:d}) V".format(vout, dac_value))
analog_out = AnalogOut(board.A0)
analog_out.value = dac_value
enable_pin = DigitalInOut(board.D8)
enable_pin.direction = Direction.INPUT
# enable_pin.pull = Pull.DOWN
print("Waiting for wheel enable")
while enable_pin.value == False:
time.sleep(0.1)
print("Starting I2C response")
with I2CPeripheral(board.SCL, board.SDA, (i2c_addr,)) as device:
while True:
r = device.request()
if not r:
# Maybe do some housekeeping
continue
with r: # Closes the transfer if necessary by sending a NACK or feeding dummy bytes
# print("Process request")
# print("I2C Addr: 0x{0:02X}, Is Read {1:d}, Is Restart {2:d}".format(r.address, r.is_read, r.is_restart))
if r.address == i2c_addr:
if not r.is_read: # Main write which is Selected read
# print("Get Frame Id Byte")
b = r.read(1)
if b:
frame_id = struct.unpack("B", b)[0]
print("Recieved frame ID: " + str(frame_id))
if frame_id < 40:
# print("Telecommand Recieved")
if frame_id == 1:
reset_id = struct.unpack("B", r.read(1))[0]
# print("Reset telecommand recieved: {0:d}".format(reset_id))
elif frame_id == 2:
reference_speed = struct.unpack("h", r.read(2))[0]
reference_speed_rpm = float(reference_speed/2.0)
wheel_speed = reference_speed + 5
# print("Reference speed telecommand recieved. Speed: {0:d}:{1:f}".format(reference_speed, reference_speed_rpm))
elif frame_id == 3:
wheel_duty = struct.unpack("h", r.read(2))[0]
# print("Duty cycle command recieved. Duty Cycle: {0:d}".format(wheel_duty))
elif frame_id == 7:
motor_switch_state = r.read(1)
# print("Recieved motor power state command. State: {}".format(motor_switch_state))
elif frame_id == 8:
encoder_switch_state = r.read(1)
# print("Recieved encoder power state command. State: {}".format(encoder_switch_state))
elif frame_id == 8:
hall_switch_state = r.read(1)
# print("Recieved hall power state command. State: {}".format(encoder_switch_state))
elif frame_id == 10:
motor_control_mode = struct.unpack("B", r.read(1))[0]
# print("Control mode telecommand recieved. Mode: {0:d}".format(motor_control_mode))
elif frame_id == 12:
backup_mode = r.read(1)
# print("Recieved back-up mode state command. State: {}".format(backup_mode))
elif frame_id == 20:
clear_errors = r.read(1)
if clear_errors == 85:
invalidTelemetryFlag = 0
invalidTelecommandFlag = 0
encoderError = 0
uartError = 0
i2cError = 0
canError = 0
configurationError = 0
speedError = 0
elif frame_id == 31:
new_i2c_addr = r.read(1)
# print("Recieved set I2C addr command. I2C: {}".format(new_i2c_addr))
elif frame_id == 33:
new_can_mask = r.read(1)
# print("Recieved set CAN mask command. CAN Mask: {}".format(new_can_mask))
elif frame_id == 33:
b = r.read(3)
# print("Recieved PWM Gain Command: {0:s}".format(str(b)))
elif frame_id == 34:
b = r.read(6)
# print("Recieved Main Speed Controller Gain Command: {0:s}".format(str(b)))
elif frame_id == 35:
b = r.read(6)
# print("Recieved Backup Speed Controller Gain Command: {0:s}".format(str(b)))
else:
invalidTelecommandFlag = 1
else:
# print("No data to read")
continue
elif r.is_restart: # Combined transfer: This is the Main read message
# print("Recieved Telemetry Request")
n = 0
if frame_id == 128:
n = r.write(bytes(send_tlm_identification()))
elif frame_id == 129:
n = r.write(bytes(send_tlm_identification_ext()))
elif frame_id == 130:
n = r.write(bytes(send_tlm_status(motor_control_mode, backup_mode, motor_switch_state, hall_switch_state, encoder_switch_state, error_flag)))
elif frame_id == 133:
n = r.write(bytes(2))
elif frame_id == 134:
n = r.write(bytes(2))
elif frame_id == 135:
n = r.write(bytes(2))
elif frame_id == 137:
n = r.write(bytes(send_tlm_wheel_data_full(wheel_speed, reference_speed, wheel_current)))
elif frame_id == 138:
n = r.write(bytes(send_tlm_wheel_data_additional(wheel_duty, wheel_speed_backup)))
elif frame_id == 139:
n = r.write(bytearray([9,8,7]))
elif frame_id == 140:
n = r.write(bytearray([1,2,3,4,5,6]))
elif frame_id == 141:
n = r.write(bytearray([10, 11, 12, 13, 14, 15]))
elif frame_id == 145:
n = r.write(bytes(send_tlm_wheel_status_flags(invalidTelemetryFlag, invalidTelecommandFlag, encoderError, uartError, i2cError, canError, configurationError, speedError)))
else:
invalidTelemetryFlag = 1
# print("Wrote " + str(n) + " bytes to master")
| 2.53125 | 3 |
constants.py | tooreht/airstripmap | 0 | 17530 | <reponame>tooreht/airstripmap<filename>constants.py
GOV_AIRPORTS = {
"Antananarivo/Ivato": "big",
"Antsiranana/Diego": "small",
"Fianarantsoa": "small",
"Tolagnaro/Ft. Dauphin": "small",
"Mahajanga": "medium",
"Mananjary": "small",
"<NAME>": "medium",
"Morondava": "small",
"<NAME>": "small",
"Sambava": "small",
"Toamasina": "small",
"Toliary": "small",
}
| 1.429688 | 1 |
practical_0/fibonacci.py | BarracudaPff/code-golf-data-pythpn | 0 | 17531 | <filename>practical_0/fibonacci.py
def fibonacci(n):
fibonacci = np.zeros(10, dtype=np.int32)
fibonacci_pow = np.zeros(10, dtype=np.int32)
fibonacci[0] = 0
fibonacci[1] = 1
for i in np.arange(2, 10):
fibonacci[i] = fibonacci[i - 1] + fibonacci[i - 2]
fibonacci[i] = int(fibonacci[i])
print(fibonacci)
for i in np.arange(10):
fibonacci_pow[i] = np.power(int(fibonacci[i]), int(n))
print(fibonacci_pow)
print(np.vstack((fibonacci, fibonacci_pow)))
np.savetxt("myfibonaccis.txt", np.hstack((fibonacci, fibonacci_pow)), fmt="%u")
def main(n):
fibonacci(n)
if __name__ == "__main__":
INPUT = sys.argv[1]
print(INPUT)
main(INPUT) | 3.6875 | 4 |
UW_System/UW_System/UW_System/spiders/uw_system.py | Nouldine/MyCrawlerSystem | 0 | 17532 | <filename>UW_System/UW_System/UW_System/spiders/uw_system.py
from scrapy import Spider
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.linkextractors import LinkExtractor
import scrapy
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError, TCPTimedOutError
from w3lib.html import remove_tags
from UW_System.items import UwSystemItem
class uw_system( scrapy.Spider ):
name = 'uw_system'
allowed_domains = ['wisconsin.edu']
start_urls = [
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0701&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0502&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1001&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2211&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2212&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2202&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1003&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1002&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1911&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0517&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0401&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1905&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2213&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0863&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0829&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0877&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1220&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1506&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1008&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=4957&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0823&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2204&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0862&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0821&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0801&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0802&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1501&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=4955&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1010&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0504&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1512&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1102&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=4931&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2206&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1914&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1103&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0837&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2205&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=4913&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2210&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0838&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0601&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1801&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0855&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=4901&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0506&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0509&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1701&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0702&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1005&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0870&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0876&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1509&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1902&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2207&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2001&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2222&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2103&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1510&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2208&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2104&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1105&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0808&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0865&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1007&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=4949&submitButton=Match+All+Courses",
]
def start_requests( self ):
for u in self.start_urls:
yield scrapy.Request( u, callback = self.parse_httpbin,
errback = self.errback_httpbin,
dont_filter = True )
def parse_httpbin( self, response ):
self.logger.info("Got successful response {}".format(response.url) )
#items = UwSystemItem()
#course = response.css('#reportTable > tbody > tr > td.::text').extract()
#course = response.css('tbody > tr > td::text').extract()
#course = response.css('.campus-one-list::text').extract()[0];
course_1 = response.xpath('////tr/td[1][@class="campus-one-list"]/text()').extract()
title_1 = response.xpath('////tr/td[2][@class="campus-one-list"]/text()').extract()
course_2 = response.xpath('////tr/td[3][@class="campus-two-list"]/text()').extract()
title_2 = response.xpath('////tr/td[4][@class="campus-two-list"]/text()').extract()
credits = response.xpath('////tr/td[5][@class="campus-two-list"]/text()').extract()
gen_ed = response.xpath('////tr/td[6][@class="campus-two-list"]').extract()
level = response.xpath('////tr/td[7][@class="campus-two-list"]').extract()
special = response.xpath('////tr/td[8][@class="special-list"]').extract()
final_course_1 = []
final_title_1 = []
final_course_2 = []
final_title_2 = []
final_credits = []
final_gen_ed = []
final_level = []
final_special = []
for course_set1 in course_1:
if course_set1 == '\n' or course_set1 == ' ':
continue
final_course_1.append(remove_tags(course_set1))
for title1 in title_1:
if title1 == '\n' or title1 == ' ':
continue
final_title_1.append(remove_tags(title1))
for course_set2 in course_2:
if course_set2 == '\n' or course_set2 == ' ':
continue
final_course_2.append(remove_tags(course_set2))
for title2 in title_2:
if title2 == '\n' or title2 == ' ':
continue
final_title_2.append(remove_tags(title2))
for creditset in credits:
if creditset == '\n' or creditset == ' ':
continue
final_credits.append(remove_tags(creditset))
for gen in gen_ed:
if gen == '\n':
continue
final_gen_ed.append(remove_tags(gen))
for lev in level:
if lev == '\n' or lev == ' ':
continue
final_level.append(remove_tags(lev))
for specia in special:
if specia == '\n\n ':
continue
final_special.append(remove_tags(specia))
item = []
track_index = 0
course_size = len(final_course_1)
while track_index < course_size:
items = UwSystemItem()
items['course_1'] = final_course_1[ track_index ]
items['title_1'] = final_title_1[ track_index ]
items['course_2'] = final_course_2[ track_index ]
items['title_2'] = final_title_2[ track_index ]
items['credits'] = final_credits[ track_index ]
try:
items['gen_ed'] = final_gen_ed[ track_index ]
except IndexError:
items['gen_ed'] = 'None'
try:
items['level'] = final_level[ track_index ]
except IndexError:
items['level'] = 'None'
try:
items['special'] = final_special[ track_index ]
except IndexError:
items['special'] = 'None'
item.append(items)
track_index += 1
return item
def errback_httpbin( self, failure):
# log all failures
self.logger.error(repr(failure))
# in case you want to do something special for some errors,
# you may need the failure's type:
if failure.check(HttpError):
# These exception come from HttpError spider middleware
# you can get the non-200 response
response = failure.value.response
self.logger.error("HttpError on %s", response.url )
elif failure.check(DNSLookupError):
# This is the original request
request = failure.request
self.logger.error('DNSLookupError on %s', request.url )
elif failure.check(TimeoutError, TCPTimeOutError ):
request = failure.request
self.logger.error('TimeoutError on %s', request.url)
| 2.046875 | 2 |
src/lib/GL/glutbindings/glutbind.py | kokizzu/v8cgi | 4 | 17533 | <reponame>kokizzu/v8cgi
import sys
import re
PATH_GLUT = 'glut.h'
FILE_GLUT = 'glutbind.cpp'
TEMPLATES = ['glutInit', 'glutTimerFunc']
def main():
"""
Still some things have to be hand-made, like
changing argv pargc values in the glutInit method definition
Also change the TimerFunc method with some magic.
"""
make_glut()
def make_glut():
constants = []
functions = []
void_stars = []
constant = re.compile(".+define[\s]+GLUT_([^\s]+).*")
function = re.compile("[\s]*extern[\s]+([^\s]+)[\s]+APIENTRY[\s]+glut([A-Za-z0-9]+)\((.*)\);")
text_out = []
fin = open(PATH_GLUT, 'r')
for l in fin:
mat = re.match(constant, l)
if mat and not mat.group(1) in constants:
name = mat.group(1)
constants.append(name)
text_out.append(make_constant("GLUT", name))
if name.find("STROKE") != -1 or name.find("BITMAP") != -1:
void_stars.append(name)
#print "GLUT_" + mat.group(1) + "\n"
else:
mat = re.match(function, l)
if mat:
prefix = "glut"
return_val = mat.group(1)
name = mat.group(2)
params = mat.group(3)
functions.append(name)
#if has template then take the template code
if (prefix + name) in TEMPLATES:
t = open(prefix + name + '.template', 'r')
text_out.append(t.read())
t.close()
else:
has_lambda, count, params_list = get_param_list(params)
if has_lambda is True and count == 1:
text_out.append(make_function_with_callback(prefix, name, params_list, return_val))
else:
text_out.append(make_function(prefix, name, params_list, count, return_val))
#print return_val + " " + name + " " + params
fin.close()
fout = open(FILE_GLUT, 'w')
fout.write("""
#include "glutbind.h"
int* pargc_;
char** argv_;
map<const char*, void*> font_;
Persistent<Context> GlutFactory::glut_persistent_context;
""" + '\n'.join(text_out) + make_main_glut_function(constants, functions, void_stars))
fout.close()
def make_main_glut_function(constants, functions, void_stars):
text_out_begin = """
Handle<ObjectTemplate> GlutFactory::createGlut(int* pargc, char** argv) {
pargc_ = pargc;
argv_ = argv;
HandleScope handle_scope;
Handle<ObjectTemplate> Glut = ObjectTemplate::New();
Glut->SetInternalFieldCount(1);
"""
text_out_end = """
// Again, return the result through the current handle scope.
return handle_scope.Close(Glut);
}
"""
fnt = [bind_font(name) for name in void_stars]
cts = [bind_accessor("Glut", name) for name in constants]
fts = [bind_function("Glut", name) for name in functions]
return text_out_begin + '\n'.join(fnt) + '\n'.join(cts) + '\n'.join(fts) + text_out_end
def make_constant(prefix, name):
if name.find("BITMAP") != -1 or name.find("STROKE") != -1:
return_val = "return String::New(\""+ name +"\");\n"
else:
return_val = "return Uint32::New(GLUT_"+ name +");"
text_out = """
Handle<Value> GetGLUT_%%(Local<String> property,
const AccessorInfo &info) {
##
}
"""
return multiple_replace({
'%%': name,
'##': return_val
}, text_out)
def make_function(prefix, name, params_list, count, return_val):
text_out = """
Handle<Value> GLUT<name>Callback(const Arguments& args) {
//if less that nbr of formal parameters then do nothing
if (args.Length() < <len_params>) return v8::Undefined();
//define handle scope
HandleScope scope;
//get arguments
<args>
//make call
<call>
return v8::Undefined();
}
"""
return multiple_replace({
'<name>': name,
'<len_params>': str(count),
'<args>': make_args(params_list, count),
'<call>': make_call(prefix + name, params_list, count)
}, text_out)
def make_function_with_callback(prefix, name, params_list, return_val):
text_out = """
Persistent<Function> persistent<name>;
<prototype> {
//define handle scope
HandleScope scope;
Handle<Value> valueArr[<nformalparams>];
<formalparamassignment>
TryCatch try_catch;
Handle<Value> result = persistent<name>->Call(GlutFactory::glut_persistent_context->Global(), <nformalparams>, valueArr);
if (result.IsEmpty()) {
String::Utf8Value error(try_catch.Exception());
fprintf(stderr, "Exception in <name>: %s\\n", *error);
}
}
Handle<Value> GLUT<name>Callback(const Arguments& args) {
//if less that nbr of formal parameters then do nothing
if (args.Length() < 1 || !args[0]->IsFunction()) return v8::Undefined();
//get arguments
//delete previous assigned function
persistent<name>.Dispose();
Handle<Function> value0 = Handle<Function>::Cast(args[0]);
persistent<name> = Persistent<Function>::New(value0);
//make call
glut<name>((<signature>) func<name>);
return v8::Undefined();
}
"""
nformalparams, prototype = make_prototype(name, params_list[0])
signature = params_list[0].replace('func', '')
formalparamassignment = formal_param_assignment(signature)
return multiple_replace({
'<name>': name,
'<nformalparams>': str(nformalparams),
'<prototype>': prototype,
'<formalparamassignment>': formalparamassignment,
'<signature>': signature
}, text_out)
def make_prototype(name, signature):
print 'prev ' + signature
signature = signature.replace('(*func)', 'func' + name)
ht = signature.split('(')
hd, tail = ht[0], ht[1].replace(')', '')
ans = [get_type(''.join(val), False) + ' arg' + str(i) for i, val in enumerate(tail.split(',')) if val.find('void') == -1]
#.strip().split(' ')[:-1]
print 'end ' + hd + ' ( ' + ','.join(ans) + ')'
return len(ans), hd + ' ( ' + ','.join(ans) + ')'
def formal_param_assignment(signature):
print "signature"
print signature
pat = re.compile('[\s]*[a-zA-Z0-9\*]+[\s]*\(\*[\s]*\)\((.*)\)')
pars = re.match(pat, signature)
if pars:
pars = pars.group(1).split(',')
ans = []
for i, val in enumerate(pars):
if val.find('int') != -1 or val.find('unsigned char') != -1:
ans.append(" valueArr[" + str(i) + "] = Integer::New(arg" + str(i) + ");")
elif val.find('float') != -1 or val.find('double') != -1:
ans.append(" valueArr[" + str(i) + "] = Number::New(arg" + str(i) + ");")
elif val.find('char*') != -1:
ans.append(" valueArr[" + str(i) + "] = String::New(arg" + str(i) + ");")
return '\n'.join(ans)
else:
return ''
def get_param_list(params):
params_list = []
params_aux = params.split(',')
passed = False
for par in params_aux:
if passed and params_list[-1].count('(') != params_list[-1].count(')'):
params_list[-1] += ',' + par
else:
params_list.append(par)
passed = True
aux = len(params_list)
if aux == 1 and params_list[0].find('func') == -1 and len(params_list[0].strip().split(' ')) == 1:
nb = 0
else:
nb = aux
return ' '.join(params_list).find('func') != -1, nb, params_list
def make_args(params_list, count):
ans = []
for i in range(count):
el = params_list[i]
type = get_type(el)
#is function
if type.find('(*') != -1:
ans.append(" Handle<Function> value" + str(i) + " = Handle<Function>::Cast(args[" + str(i) + "]);\n void* arg" + str(i) + " = *value" + str(i) + ";\n")
#print "function " + type
#is string
elif type.find('char*') != -1:
ans.append(" String::Utf8Value value"+ str(i) +"(args["+ str(i) +"]);\n char* arg" + str(i) + " = *value"+ str(i) +";\n")
#print "string " + type
#is void*
elif type.find('void*') != -1:
ans.append(" String::Utf8Value value"+ str(i) +"(args["+ str(i) +"]);\n char* key" + str(i) + " = *value"+ str(i) +";\n void* arg" + str(i) + " = font_[key"+ str(i) +"];\n")
#print "void " + type
#is array
elif type.find('*') != -1:
ans.append(" Handle<Array> arg" + str(i) + " = Array::Cast(args[" + str(i) + "]);\n")
#print "array " + type
#is unsigned integer
elif type.find('unsigned int') != -1:
ans.append(" unsigned int arg" + str(i) + " = args["+ str(i) +"]->Uint32Value();\n")
#print "unsigned int " + type
#is integer
elif type.find('int') != -1 or type.find('enum') != -1:
ans.append(" int arg" + str(i) + " = args["+ str(i) +"]->IntegerValue();\n")
#print "integer " + type
#is double, float
elif type.find('double') != -1 or type.find('float') != -1:
ans.append(" double arg" + str(i) + " = args["+ str(i) +"]->NumberValue();\n")
#print "double " + type
else:
print "don't know what this is "
print type
return ''.join(ans)
def make_call(name, params_list, nb):
return name + "(" + ", ".join([get_type(params_list[i]) + "arg" + str(i) for i in range(nb)]) + ");"
def bind_accessor(prefix, name):
return " " + prefix + "->SetAccessor(String::NewSymbol(\"" + name + "\"), GetGLUT_" + name + ");\n"
def bind_function(prefix, name):
return " " + prefix + "->Set(String::NewSymbol(\"" + name + "\"), FunctionTemplate::New(GLUT" + name + "Callback));\n"
def bind_font(name):
return " font_[\""+ name +"\"] = GLUT_" + name + ";\n"
def get_type(t, parens=True):
if t.find('(*') != -1 or t.find('func') != -1:
ans = t.replace('func', '')
else:
ans = ' '.join(t.strip().split(' ')[:-1]) + '*' * (t.strip().split(' ')[-1].count('*'))
return '(' + ans + ')' if parens else ans
def multiple_replace(dict, text):
""" Replace in 'text' all occurences of any key in the given
dictionary by its corresponding value. Returns the new tring."""
# Create a regular expression from the dictionary keys
regex = re.compile("(%s)" % "|".join(map(re.escape, dict.keys())))
# For each match, look-up corresponding value in dictionary
return regex.sub(lambda mo: dict[mo.string[mo.start():mo.end()]], text)
main()
| 2.5 | 2 |
clase_caballo.py | DorianAlbertoIbanezNanguelu/concurrencia-caballos | 0 | 17534 | import threading
import time
import random
from multiprocessing.pool import ThreadPool
from PyQt5 import QtCore, QtGui, QtWidgets
bandera = False
val1 = ""
msg = 'Caballo ganador es: {}'
# Clase Caballo
class caballo(threading.Thread):
def __init__(self, num, b1,resultado):
global val1,bandera
threading.Thread.__init__(self)
bandera = False
self.resultado = 20.0
self.tiempo_inicio = time.time()
self.tiempo_final = ""
self.tiempo_total = ""
self.num = num
self.valor = 0
self.boton = b1
self.eleccion= ""
# Selecciona un valor aleatorio, 10 20 o 30
def aleatorio(self):
mylist = ["10","20","30","40"]
self.eleccion = random.choice(mylist)
# Movimiento de los caballos
def movimiento(self):
self.p = self.boton.pos()
self.p += QtCore.QPoint(int(self.eleccion), 0)
self.valor += int(self.eleccion)
self.boton.move(self.p)
time.sleep(0.75)
def retorno(self):
self.resultado
# Hilos
def run(self):
global bandera
while(True):
if bandera == True:
break
else:
self.aleatorio()
self.movimiento()
if self.valor >= 600:
self.tiempo_final = time.time()
self.resultado = self.tiempo_final-self.tiempo_inicio
print("\nEl caballo: " + str(self.num)+" cruzó la meta!!, Tiempo: "+str(self.resultado))
bandera=True
break
| 3.0625 | 3 |
vwo/api/track.py | wingify/vwo-python-sdk | 14 | 17535 | <gh_stars>10-100
# Copyright 2019-2021 Wingify Software Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..helpers import impression_util
from ..constants import constants
from ..constants.constants import API_METHODS
from ..helpers import campaign_util, validate_util
from ..enums.log_message_enum import LogMessageEnum
from ..enums.file_name_enum import FileNameEnum
from ..enums.log_level_enum import LogLevelEnum
FILE = FileNameEnum.Api.Track
def _track(vwo_instance, campaign_specifier, user_id, goal_identifier, **kwargs):
"""
This API method: Marks the conversion of the campaign(s) for a particular goal
1. validates the arguments being passed
2. retrieves the campaigns having the same global goal
3. calls track_campaign_goal for all the goals
Args:
campaign_specifier (None, list, string): Campaign key(s), it can be None in case
of all campaigns, list in case of given campaigns and string in case of particular
campaign should to be tracked.
user_id (string): ID assigned to a user
goal_identifier (string): campaign(s)'s unique goal identifier
Keyword Args:
revenue_value (int|float|string): Provide it through **kwargs.
It is the revenue generated on triggering the goal
custom_variables (dict): Custom variables required for segmentation
variation_targeting_variables (dict): Whitelisting variables to target users
Returns:
dict|None: None if called for single campaign and no goal tracked or
called for all campaigns and no goal tracked.
Dict otherwise of campaign_key with True/False showing whether the goal
has been tracked for the campaign or not
"""
vwo_instance.logger.set_api(API_METHODS.TRACK)
# Retrive revenue value and custom_variables
revenue_value = kwargs.get("revenue_value")
custom_variables = kwargs.get("custom_variables")
variation_targeting_variables = kwargs.get("variation_targeting_variables")
valid_params = True
# Check for valid args
if (
not validate_util.is_valid_string(user_id)
or not validate_util.is_valid_string(goal_identifier)
or (custom_variables is not None and not validate_util.is_valid_dict(custom_variables))
or (
variation_targeting_variables is not None and not validate_util.is_valid_dict(variation_targeting_variables)
)
or (revenue_value is not None and not validate_util.is_valid_basic_data_type(revenue_value))
):
valid_params = False
goal_type_to_track = kwargs.get("goal_type_to_track")
if goal_type_to_track is None:
goal_type_to_track = vwo_instance.goal_type_to_track
elif not validate_util.is_valid_goal_type(goal_type_to_track):
valid_params = False
if not valid_params:
vwo_instance.logger.log(
LogLevelEnum.ERROR, LogMessageEnum.ERROR_MESSAGES.TRACK_API_INVALID_PARAMS.format(file=FILE)
)
return None
campaigns_without_goal = []
no_campaign_found = False
if type(campaign_specifier) is str:
campaign = campaign_util.get_campaign(vwo_instance.settings_file, campaign_specifier)
goal = campaign_util.get_campaign_goal(campaign, goal_identifier)
if not goal:
no_campaign_found = True
else:
campaign_goal_list = [(campaign, goal)]
elif type(campaign_specifier) is list:
campaigns = campaign_util.get_campaigns(vwo_instance.settings_file, campaign_specifier).values()
(campaign_goal_list, campaigns_without_goal) = campaign_util.get_campaigns_with_goal_id(
campaigns, goal_identifier
)
for campaign in campaigns_without_goal:
vwo_instance.logger.log(
LogLevelEnum.ERROR,
LogMessageEnum.ERROR_MESSAGES.TRACK_API_GOAL_NOT_FOUND.format(
file=FILE, goal_identifier=goal_identifier, user_id=user_id, campaign_key=campaign.get("key")
),
)
elif campaign_specifier is None:
campaigns = vwo_instance.settings_file.get("campaigns")
campaign_goal_list = campaign_util.get_campaigns_with_goal_id(campaigns, goal_identifier)[0]
if not campaign_goal_list:
no_campaign_found = True
else:
vwo_instance.logger.log(
# Specific log for campaign_specifier type
LogLevelEnum.ERROR,
LogMessageEnum.ERROR_MESSAGES.TRACK_API_INVALID_PARAMS.format(file=FILE),
)
return None
if no_campaign_found:
vwo_instance.logger.log(
LogLevelEnum.ERROR,
LogMessageEnum.ERROR_MESSAGES.NO_CAMPAIGN_FOUND.format(file=FILE, goal_identifier=goal_identifier),
)
return None
ret_value = {}
campaign_goal_revenue_prop_list = []
for campaign, goal in campaign_goal_list:
result = track_campaign_goal(
vwo_instance,
campaign,
user_id,
goal,
revenue_value,
custom_variables,
variation_targeting_variables,
goal_type_to_track,
campaign_goal_revenue_prop_list,
)
ret_value[campaign.get("key")] = result
for campaign in campaigns_without_goal:
ret_value[campaign.get("key")] = False
if len(campaign_goal_revenue_prop_list) != 0 and (
not vwo_instance.is_event_batching_enabled and vwo_instance.is_event_arch_enabled is True
):
params = impression_util.get_events_params(vwo_instance.settings_file, goal_identifier)
impression = impression_util.create_track_goal_events_impression(
vwo_instance.settings_file, user_id, goal_identifier, campaign_goal_revenue_prop_list, revenue=revenue_value
)
vwo_instance.event_dispatcher.dispatch_events(params=params, impression=impression)
return ret_value
def track_campaign_goal(
vwo_instance,
campaign,
user_id,
goal,
revenue_value,
custom_variables,
variation_targeting_variables,
goal_type_to_track,
campaign_goal_revenue_prop_list,
):
"""
It marks the conversion of given goal for the given campaign
1. Checks if user is eligible to get bucketed into the campaign,
2. Gets the assigned determinitic variation to the
user(based on userId), if user becomes part of campaign
3. Sends an impression call to VWO server to track goal data if event arch
is not enabled
Args:
campaign (dict): Campaign object
user_id (string): ID assigned to a user
goal (dict): Goal object
revenue_value (int|float|string): It is the revenue generated on triggering the goal
custom_variables (dict): Custom variables required for segmentation
variation_targeting_variables (dict): Whitelisting variables to target users
goal_type_to_track (vwo.GOAL_TYPES): Goal type that should be tracked in case of mixed
global goal identifier
campaign_goal_revenue_prop_list (list): list of campaign_id, goal_id & goal's revenueProp
(if revenue goal else None) to build event arch impression
Returns:
bool: True if goal successfully tracked else False
"""
campaign_type = campaign.get("type")
if campaign_type == constants.CAMPAIGN_TYPES.FEATURE_ROLLOUT:
vwo_instance.logger.log(
LogLevelEnum.ERROR,
LogMessageEnum.ERROR_MESSAGES.INVALID_API.format(
file=FILE, user_id=user_id, campaign_key=campaign.get("key"), campaign_type=campaign_type
),
)
return False
goal_type = goal.get("type")
if (goal_type_to_track == constants.GOAL_TYPES.CUSTOM and goal_type == constants.GOAL_TYPES.REVENUE) or (
goal_type_to_track == constants.GOAL_TYPES.REVENUE and goal_type == constants.GOAL_TYPES.CUSTOM
):
# We can log goal type didn't match in debug mode
return False
if goal_type == constants.GOAL_TYPES.REVENUE and not validate_util.is_valid_value(revenue_value):
vwo_instance.logger.log(
LogLevelEnum.ERROR,
LogMessageEnum.ERROR_MESSAGES.TRACK_API_REVENUE_NOT_PASSED_FOR_REVENUE_GOAL.format(
file=FILE, user_id=user_id, goal_identifier=goal.get("identifier"), campaign_key=campaign.get("key")
),
)
return False
if goal_type == constants.GOAL_TYPES.CUSTOM:
revenue_value = None
variation, _ = vwo_instance.variation_decider.get_variation(
user_id,
campaign,
custom_variables=custom_variables,
variation_targeting_variables=variation_targeting_variables,
goal_data={"identifier": goal.get("identifier")},
api_method=constants.API_METHODS.TRACK,
)
if variation:
if not vwo_instance.is_event_arch_enabled or vwo_instance.is_event_batching_enabled is True:
impression = impression_util.create_impression(
vwo_instance.settings_file,
campaign.get("id"),
variation.get("id"),
user_id,
goal.get("id"),
revenue_value,
)
vwo_instance.event_dispatcher.dispatch(impression)
vwo_instance.logger.log(
LogLevelEnum.INFO,
LogMessageEnum.INFO_MESSAGES.MAIN_KEYS_FOR_IMPRESSION.format(
file=FILE,
campaign_id=impression.get("experiment_id"),
account_id=impression.get("account_id"),
variation_id=impression.get("combination"),
),
)
else:
campaign_goal_revenue_prop_list.append((campaign.get("id"), goal.get("id"), goal.get("revenueProp")))
return True
return False
| 2.3125 | 2 |
iMessSpam.py | fabiopigi/iMessageSpam | 0 | 17536 | # -*- coding: utf-8 -*-
#import some dope
import sys
import os
import re
import time
from random import randrange
from itertools import repeat
numbers = {
'adam' :"+41111111111",
'bob' :"+41222222222",
'chris' :"+41333333333",
'dave' :"+41444444444",
}
print "Gespeicherte Empfänger: "
for name in numbers:
print "%10s - %s"%(name,numbers[name])
number = ""
while number == "":
numberID = raw_input("\nEmpfänger eingeben: ")
if numberID in numbers:
number = numbers[numberID]
pause = int(raw_input("\nIntervall in Sekunden: "))
print """
Verfügbare Optionen:
[1] Zeitansagen im Format 'Es ist 17:34:22'
[2] Zufällige '<NAME>' Jokes
[3] Satz für Satz aus einem Buch (Twilight)
[4] Fifty Shades of HEX
[5] Fröhliches Flaggen raten
"""
option = int(raw_input("Option auswählen: "))
if option == 1:
anzahl = int(raw_input("\nAnzahl Nachrichten: "))
start = 0
elif option == 2:
anzahl = int(raw_input("\nAnzahl Nachrichten: "))
start = 0
replaceName = raw_input("\n'<NAME>' durch Namen ersetzen: ")
if replaceName == "":
replaceName = "<NAME>"
elif option == 3:
p = open('content/twilight.txt')
book = p.read()
pat = re.compile(r'([A-Z][^\.!?]*[\.!?])', re.M)
sentences = pat.findall(book)
anzahl = int(raw_input("\nAnzahl Nachrichten: "))
start = int(raw_input("\nBei n. Satz anfangen: "))-1
anzahl = anzahl + (start)
elif option == 4:
anzahl = 50
start = 0
elif option == 5:
anzahl = 50
start = 0
import Countries
else:
anzahl = 0
start = 0
print "\n\nSenden beginnt...\n\n"
#tunay bei 207
for i in range(start,anzahl,1):
if option == 1:
cmdCode = "date +'%H:%M:%S'"
message = "Es ist jetzt " + os.popen(cmdCode).read()
elif option == 2:
curlCode = "curl 'http://api.icndb.com/jokes/random' -s | sed -e 's/.*joke\\\": \\\"//' -e 's/\\\", \\\".*//' -e 's/<NAME>/" + replaceName + "/g' -e 's/"/\"/g'"
message = os.popen(curlCode).read()
elif option == 3:
message = sentences[i]
elif option == 4:
message = "#%s" % "".join(list(repeat(hex(randrange(16, 255))[2:],3))).upper()
elif option == 5:
flags = os.listdir("content/flags")
country = Countries.iso[flags[randrange(1,len(flags))][:2]]
message = "Dies ist die Flagge von '%s'."%(country["Name"])
filePath = os.path.abspath("content/flags/%s.png"%country["ISO"])
osaCode = "osascript sendImage.scpt \"%s\" \"%s\""%(number,filePath)
osaReturn = os.popen(osaCode).read()
print message
message = message.replace('"', r'\"')
osaCode = "osascript sendText.scpt \"%s\" \"%s\""%(number,message)
print "%3d > %s"%((i+1),message)
osaReturn = os.popen(osaCode).read()
time.sleep(pause)
| 3.546875 | 4 |
mesh_to_tet.py | NVlabs/deformable_object_grasping | 30 | 17537 | <reponame>NVlabs/deformable_object_grasping
# Copyright (c) 2020 NVIDIA Corporation
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""Convert a .mesh file (fTetWild format) to .tet (IsaacGym format)."""
def convert_mesh_to_tet(mesh_file_path, tet_output_path):
"""Convert a .mesh file to a .tet file."""
mesh_file = open(mesh_file_path, "r")
tet_output = open(tet_output_path, "w")
mesh_lines = list(mesh_file)
mesh_lines = [line.strip('\n') for line in mesh_lines]
vertices_start = mesh_lines.index('Vertices')
num_vertices = mesh_lines[vertices_start + 1]
vertices = mesh_lines[vertices_start + 2:vertices_start + 2
+ int(num_vertices)]
tetrahedra_start = mesh_lines.index('Tetrahedra')
num_tetrahedra = mesh_lines[tetrahedra_start + 1]
tetrahedra = mesh_lines[tetrahedra_start + 2:tetrahedra_start + 2
+ int(num_tetrahedra)]
print("# Vertices, # Tetrahedra:", num_vertices, num_tetrahedra)
# Write to tet output
tet_output.write("# Tetrahedral mesh generated using\n\n")
tet_output.write("# " + num_vertices + " vertices\n")
for v in vertices:
tet_output.write("v " + v + "\n")
tet_output.write("\n")
tet_output.write("# " + num_tetrahedra + " tetrahedra\n")
for t in tetrahedra:
line = t.split(' 0')[0]
line = line.split(" ")
line = [str(int(k) - 1) for k in line]
l_text = ' '.join(line)
tet_output.write("t " + l_text + "\n")
if __name__ == "__main__":
convert_mesh_to_tet(
"path/to/mesh",
"path/to/tet")
| 1.835938 | 2 |
tests/policies_tests/test_deterministic_policy.py | xinyuewang1/chainerrl | 2 | 17538 | <gh_stars>1-10
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import unittest
import chainer
import chainer.functions as F
from chainer import testing
from chainer.testing import attr
import numpy as np
import chainerrl
@testing.parameterize(*(
testing.product({
'n_input_channels': [1, 5],
'action_size': [1, 2],
'bound_action': [True, False],
'nonlinearity': ['relu', 'elu'],
'model_class': [chainerrl.policies.FCDeterministicPolicy],
'model_kwargs': testing.product({
'n_hidden_layers': [0, 1, 2],
'n_hidden_channels': [1, 2],
'last_wscale': [1, 1e-3],
}),
}) +
testing.product({
'n_input_channels': [1, 5],
'action_size': [1, 2],
'bound_action': [True, False],
'nonlinearity': ['relu', 'elu'],
'model_class': [chainerrl.policies.FCBNDeterministicPolicy],
'model_kwargs': testing.product({
'n_hidden_layers': [0, 1, 2],
'n_hidden_channels': [1, 2],
'normalize_input': [True, False],
'last_wscale': [1, 1e-3],
}),
}) +
testing.product({
'n_input_channels': [1, 5],
'action_size': [1, 2],
'bound_action': [True, False],
'nonlinearity': ['relu', 'elu'],
'model_class': [chainerrl.policies.FCLSTMDeterministicPolicy],
'model_kwargs': testing.product({
'n_hidden_layers': [0, 1, 2],
'n_hidden_channels': [1, 2],
'last_wscale': [1, 1e-3],
}),
})
))
class TestDeterministicPolicy(unittest.TestCase):
def _make_model(self, **kwargs):
kwargs.update(self.model_kwargs)
return self.model_class(**kwargs)
def _test_call(self, gpu):
# This method only check if a given model can receive random input
# data and return output data with the correct interface.
nonlinearity = getattr(F, self.nonlinearity)
min_action = np.full((self.action_size,), -0.01, dtype=np.float32)
max_action = np.full((self.action_size,), 0.01, dtype=np.float32)
model = self._make_model(
n_input_channels=self.n_input_channels,
action_size=self.action_size,
bound_action=self.bound_action,
min_action=min_action,
max_action=max_action,
nonlinearity=nonlinearity,
)
batch_size = 7
x = np.random.rand(
batch_size, self.n_input_channels).astype(np.float32)
if gpu >= 0:
model.to_gpu(gpu)
x = chainer.cuda.to_gpu(x)
min_action = chainer.cuda.to_gpu(min_action)
max_action = chainer.cuda.to_gpu(max_action)
y = model(x)
self.assertTrue(isinstance(
y, chainerrl.distribution.ContinuousDeterministicDistribution))
a = y.sample()
self.assertTrue(isinstance(a, chainer.Variable))
self.assertEqual(a.shape, (batch_size, self.action_size))
self.assertEqual(chainer.cuda.get_array_module(a),
chainer.cuda.get_array_module(x))
if self.bound_action:
self.assertTrue((a.array <= max_action).all())
self.assertTrue((a.array >= min_action).all())
def test_call_cpu(self):
self._test_call(gpu=-1)
@attr.gpu
def test_call_gpu(self):
self._test_call(gpu=0)
| 1.8125 | 2 |
freshmaker/handlers/botas/botas_shipped_advisory.py | mulaievaRH/freshmaker | 5 | 17539 | # -*- coding: utf-8 -*-
# Copyright (c) 2020 Red Hat, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import json
from datetime import datetime
import re
import koji
from kobo.rpmlib import parse_nvr
import semver
from freshmaker import db, conf, log
from freshmaker.handlers import ContainerBuildHandler
from freshmaker.events import BotasErrataShippedEvent, ManualBundleRebuild
from freshmaker.lightblue import ContainerImage
from freshmaker.models import ArtifactBuild, ArtifactType, Event
from freshmaker.types import EventState, ArtifactBuildState, RebuildReason
from freshmaker.pyxis import Pyxis
from freshmaker.kojiservice import KojiService
from freshmaker.errata import Errata
class HandleBotasAdvisory(ContainerBuildHandler):
"""
Handles event that was created by transition of an advisory filed by
BOTAS to SHIPPED_LIVE state
"""
name = "HandleBotasAdvisory"
# This prefix should be added to event reason, when skipping the event.
# Because Release Driver checks event's reason for certain prefixes,
# to determine if there is an error in bundles processing.
_no_bundle_prefix = "No bundles to rebuild: "
def __init__(self, pyxis=None):
super().__init__()
if pyxis:
self._pyxis = pyxis
else:
if not conf.pyxis_server_url:
raise ValueError("'PYXIS_SERVER_URL' parameter should be set")
self._pyxis = Pyxis(conf.pyxis_server_url)
if not conf.freshmaker_root_url or "://" not in conf.freshmaker_root_url:
raise ValueError("'FRESHMAKER_ROOT_URL' parameter should be set to "
"a valid URL")
# Currently processed event
self.event = None
def can_handle(self, event):
if (isinstance(event, BotasErrataShippedEvent) and
'docker' in event.advisory.content_types):
return True
# This handler can handle manual bundle rebuilds too
if isinstance(event, ManualBundleRebuild):
return True
return False
def handle(self, event):
if event.dry_run:
self.force_dry_run()
self.event = event
db_event = Event.get_or_create_from_event(db.session, event)
self.set_context(db_event)
# Check if event is allowed by internal policies
if not self.event.is_allowed(self):
msg = ("This image rebuild is not allowed by internal policy. "
f"message_id: {event.msg_id}")
db_event.transition(EventState.SKIPPED, msg)
self.log_info(msg)
return []
if isinstance(event, ManualBundleRebuild) and \
hasattr(event, 'bundle_images'):
bundles_to_rebuild = self._handle_release_driver_rebuild(db_event)
# automatic rebuild and manual bundle rebuild(triggered by post request)
else:
bundles_to_rebuild = self._handle_bundle_rebuild(db_event)
if not bundles_to_rebuild:
return []
builds = self._prepare_builds(db_event, bundles_to_rebuild)
# Reset context to db_event.
self.set_context(db_event)
self.start_to_build_images(builds)
if all([b.state == ArtifactBuildState.FAILED.value for b in builds]):
db_event.transition(EventState.FAILED, "All bundle rebuilds failed")
else:
msg = f"Advisory {db_event.search_key}: Rebuilding " \
f"{len(db_event.builds.all())} bundle images."
db_event.transition(EventState.BUILDING, msg)
return []
def _handle_bundle_rebuild(self, db_event):
"""
Handle auto rebuild for an advisory created by Botas
OR manually triggered rebuild
:param db_event: database event that represent rebuild event
:rtype: list
:return: list of advisories that should be rebuilt
"""
# Mapping of operators' original build nvrs to rebuilt nvrs in advisory
nvrs_mapping = self._create_original_to_rebuilt_nvrs_map()
original_nvrs = nvrs_mapping.keys()
self.log_info(
"Orignial nvrs of build in the advisory #{0} are: {1}".format(
self.event.advisory.errata_id, " ".join(original_nvrs)))
# Get image manifest_list_digest for all original images, manifest_list_digest is used
# in pullspecs in bundle's related images
original_digests_by_nvr = {}
original_nvrs_by_digest = {}
for nvr in original_nvrs:
digest = self._pyxis.get_manifest_list_digest_by_nvr(nvr)
if digest:
original_digests_by_nvr[nvr] = digest
original_nvrs_by_digest[digest] = nvr
else:
log.warning(
f"Image manifest_list_digest not found for original image {nvr} in Pyxis, "
"skip this image"
)
if not original_digests_by_nvr:
msg = f"None of the original images have digests in Pyxis: {','.join(original_nvrs)}"
log.warning(msg)
db_event.transition(EventState.SKIPPED, msg)
return []
# Get image manifest_list_digest for all rebuilt images, manifest_list_digest is used
# in pullspecs of bundle's related images
rebuilt_digests_by_nvr = {}
rebuilt_nvrs = nvrs_mapping.values()
for nvr in rebuilt_nvrs:
# Don't require that the manifest list digest be published in this case because
# there's a delay from after an advisory is shipped and when the published repositories
# entry is populated
digest = self._pyxis.get_manifest_list_digest_by_nvr(nvr, must_be_published=False)
if digest:
rebuilt_digests_by_nvr[nvr] = digest
else:
log.warning(
f"Image manifest_list_digest not found for rebuilt image {nvr} in Pyxis, "
"skip this image"
)
if not rebuilt_digests_by_nvr:
msg = f"None of the rebuilt images have digests in Pyxis: {','.join(rebuilt_nvrs)}"
log.warning(msg)
db_event.transition(EventState.SKIPPED, msg)
return []
index_images = self._pyxis.get_operator_indices()
# get latest bundle images per channel per index image filtered
# by the highest semantic version
all_bundles = self._pyxis.get_latest_bundles(index_images)
self.log_debug(
"There are %d bundles that are latest in a channel in the found index images",
len(all_bundles),
)
# A mapping of digests to bundle metadata. This metadata is used to
# for the CSV metadata updates.
bundle_mds_by_digest = {}
# get bundle digests for original images
bundle_digests_by_related_nvr = {}
for image_nvr, image_digest in original_digests_by_nvr.items():
bundles = self._pyxis.get_bundles_by_related_image_digest(
image_digest, all_bundles
)
if not bundles:
log.info(f"No latest bundle image with the related image of {image_nvr}")
continue
for bundle in bundles:
bundle_digest = bundle['bundle_path_digest']
bundle_mds_by_digest[bundle_digest] = bundle
bundle_digests_by_related_nvr.setdefault(image_nvr, []).append(bundle_digest)
if not bundle_digests_by_related_nvr:
msg = "None of the original images have related bundles, skip."
log.warning(msg)
db_event.transition(EventState.SKIPPED, msg)
return []
self.log_info(
"Found %d bundles with relevant related images", len(bundle_digests_by_related_nvr)
)
# Mapping of bundle digest to bundle data
# {
# digest: {
# "images": [image_amd64, image_aarch64],
# "nvr": NVR,
# "auto_rebuild": True/False,
# "osbs_pinning": True/False,
# "pullspecs": [...],
# }
# }
bundles_by_digest = {}
default_bundle_data = {
'images': [],
'nvr': None,
'auto_rebuild': False,
'osbs_pinning': False,
# CSV modifications for the rebuilt bundle image
'pullspec_replacements': [],
'update': {},
}
# Get images for each bundle digest, a bundle digest can have multiple images
# with different arches.
for digest in bundle_mds_by_digest:
bundles = self._pyxis.get_images_by_digest(digest)
# If no bundle image found, just skip this bundle digest
if not bundles:
self.log_warn('The bundle digest %r was not found in Pyxis. Skipping.', digest)
continue
bundle_nvr = bundles[0]['brew']['build']
# If specific container images where requested to rebuild, process only them
if (isinstance(self.event, ManualBundleRebuild)
and self.event.container_images # noqa: W503
and bundle_nvr not in self.event.container_images): # noqa: W503
self.log_debug("Ignoring '%s', because it's not in requested rebuilds"
" (container_images in request)", bundle_nvr)
continue
# Filter out builds from dependent event that were rebuilt recently
done_build = db_event.get_artifact_build_from_event_dependencies(
bundle_nvr)
if done_build:
self.log_debug("Ignoring '%s' bundle, because it was already rebuilt"
" in dependent event", bundle_nvr)
continue
bundles_by_digest.setdefault(digest, copy.deepcopy(default_bundle_data))
bundles_by_digest[digest]['nvr'] = bundle_nvr
bundles_by_digest[digest]['images'] = bundles
# Unauthenticated koji session to fetch build info of bundles
koji_api = KojiService(conf.koji_profile)
# For each bundle, check whether it should be rebuilt by comparing the
# auto_rebuild_tags of repository and bundle's tags
for digest, bundle_data in bundles_by_digest.items():
bundle_nvr = bundle_data['nvr']
# Images are for different arches, just check against the first image
image = bundle_data['images'][0]
if self.image_has_auto_rebuild_tag(image):
bundle_data['auto_rebuild'] = True
# Fetch buildinfo
buildinfo = koji_api.get_build(bundle_nvr)
related_images = (
buildinfo.get('extra', {})
.get('image', {})
.get('operator_manifests', {})
.get('related_images', {})
)
bundle_data['osbs_pinning'] = related_images.get('created_by_osbs', False)
# Save the original pullspecs
bundle_data['pullspec_replacements'] = related_images.get('pullspecs', [])
# Digests of bundles to be rebuilt
to_rebuild_digests = set()
# Now for each bundle, replace the original digest with rebuilt
# digest (override pullspecs)
for digest, bundle_data in bundles_by_digest.items():
# Override pullspecs only when auto_rebuild is enabled and OSBS-pinning
# mechanism is used.
if not (bundle_data['auto_rebuild'] and bundle_data['osbs_pinning']):
self.log_info(
'The bundle %r does not have auto-rebuild tags (%r) and/or OSBS pinning (%r)',
bundle_data['nvr'],
bundle_data['auto_rebuild'],
bundle_data['osbs_pinning'],
)
continue
csv_name = bundle_mds_by_digest[digest]['csv_name']
version = bundle_mds_by_digest[digest]['version_original']
bundle_data.update(self._get_csv_updates(csv_name, version))
for pullspec in bundle_data['pullspec_replacements']:
# A pullspec item example:
# {
# 'new': 'registry.exampe.io/repo/example-operator@sha256:<sha256-value>',
# 'original': 'registry.example.io/repo/example-operator:v2.2.0',
# 'pinned': True,
# # value used for internal purpose during manual rebuilds, it's an old pullspec that was replaced
# '_old': 'registry.exampe.io/repo/example-operator@sha256:<previous-sha256-value>,
# }
# A pullspec path is in format of "registry/repository@digest"
pullspec_elems = pullspec.get('new').split('@')
old_digest = pullspec_elems[1]
if old_digest not in original_nvrs_by_digest:
# This related image is not one of the original images
continue
# This related image is one of our original images
old_nvr = original_nvrs_by_digest[old_digest]
new_nvr = nvrs_mapping[old_nvr]
new_digest = rebuilt_digests_by_nvr[new_nvr]
# save pullspec that image had before rebuild
pullspec['_old'] = pullspec.get('new')
# Replace the old digest with new digest
pullspec_elems[1] = new_digest
new_pullspec = '@'.join(pullspec_elems)
pullspec['new'] = new_pullspec
# Always set pinned to True when it was replaced by Freshmaker
# since it indicates that the pullspec was modified from the
# original pullspec
pullspec['pinned'] = True
# Once a pullspec in this bundle has been overrided, add this bundle
# to rebuild list
self.log_info(
'Changing pullspec %r to %r in the bundle %r',
pullspec['_old'],
pullspec['new'],
bundle_data['nvr'],
)
to_rebuild_digests.add(digest)
if not to_rebuild_digests:
msg = self._no_bundle_prefix + "No bundle images to rebuild for " \
f"advisory {self.event.advisory.name}"
self.log_info(msg)
db_event.transition(EventState.SKIPPED, msg)
db.session.commit()
return []
bundles_to_rebuild = list(map(lambda x: bundles_by_digest[x],
to_rebuild_digests))
return bundles_to_rebuild
def _handle_release_driver_rebuild(self, db_event):
"""
Handle manual rebuild submitted by Release Driver for an advisory created by Botas
:param db_event: database event that represents a rebuild event
:rtype: list
:return: list of advisories that should be rebuilt
"""
old_to_new_pullspec_map = self._get_pullspecs_mapping()
if not old_to_new_pullspec_map:
msg = self._no_bundle_prefix + 'None of the bundle images have ' \
'applicable pullspecs to replace'
log.warning(msg)
db_event.transition(EventState.SKIPPED, msg)
return []
# Unauthenticated koji session to fetch build info of bundles
koji_api = KojiService(conf.koji_profile)
rebuild_nvr_to_pullspecs_map = dict()
# compare replaced pullspecs with pullspecs in 'container_images' and
# create map for bundles that should be rebuilt with their nvrs
for container_image_nvr in self.event.container_images:
artifact_build = db.session.query(ArtifactBuild).filter(
ArtifactBuild.rebuilt_nvr == container_image_nvr,
ArtifactBuild.type == ArtifactType.IMAGE.value,
).one_or_none()
pullspecs = []
# Try to find build in FM database, if it's not there check in Brew
if artifact_build:
self.log_info(
"%s in the container_images list was found in the database", container_image_nvr
)
pullspecs = artifact_build.bundle_pullspec_overrides["pullspec_replacements"]
else:
self.log_info(
"%s in the container_images list is not in the database. Searching in Brew "
"instead.",
container_image_nvr,
)
# Fetch buildinfo from Koji
buildinfo = koji_api.get_build(container_image_nvr)
# Get the original pullspecs
pullspecs = (
buildinfo.get('extra', {})
.get('image', {})
.get('operator_manifests', {})
.get('related_images', {})
.get('pullspecs', [])
)
for pullspec in pullspecs:
if pullspec.get('new') not in old_to_new_pullspec_map:
self.log_debug("The pullspec %s is not getting replaced", pullspec.get('new'))
continue
# use newer pullspecs in the image
self.log_info(
"Replacing the pullspec %s with %s on %s",
pullspec['new'],
old_to_new_pullspec_map[pullspec['new']],
container_image_nvr,
)
pullspec['new'] = old_to_new_pullspec_map[pullspec['new']]
rebuild_nvr_to_pullspecs_map[container_image_nvr] = pullspecs
if not rebuild_nvr_to_pullspecs_map:
msg = self._no_bundle_prefix + 'None of the container images have ' \
'applicable pullspecs from the input bundle images'
log.info(msg)
db_event.transition(EventState.SKIPPED, msg)
return []
# list with metadata about every bundle to do rebuild
to_rebuild_bundles = []
# fill 'append' and 'update' fields for bundles to rebuild
for nvr, pullspecs in rebuild_nvr_to_pullspecs_map.items():
self.log_debug("Getting the manifest list digest for %s", nvr)
bundle_digest = self._pyxis.get_manifest_list_digest_by_nvr(nvr)
if bundle_digest is not None:
self.log_debug("The manifest list digest for %s is %s", nvr, bundle_digest)
bundles = self._pyxis.get_bundles_by_digest(bundle_digest)
if not bundles:
self.log_error(
"The manifest_list_digest %s is not available on the bundles API endpoint",
bundle_digest,
)
continue
temp_bundle = bundles[0]
csv_updates = (self._get_csv_updates(temp_bundle['csv_name'],
temp_bundle['version_original']))
to_rebuild_bundles.append({
'nvr': nvr,
'update': csv_updates['update'],
'pullspec_replacements': pullspecs,
})
else:
log.warning('Can\'t find manifest_list_digest for bundle '
f'"{nvr}" in Pyxis')
if not to_rebuild_bundles:
msg = 'Can\'t find digests for any of the bundles to rebuild'
log.warning(msg)
db_event.transition(EventState.FAILED, msg)
return []
return to_rebuild_bundles
def _get_pullspecs_mapping(self):
"""
Get map of all replaced pullspecs from 'bundle_images' provided in an event.
:rtype: dict
:return: map of all '_old' pullspecs that was replaced by 'new'
pullspecs in previous Freshmaker rebuilds
"""
old_to_new_pullspec_map = dict()
for bundle_nvr in self.event.bundle_images:
artifact_build = db.session.query(ArtifactBuild).filter(
ArtifactBuild.rebuilt_nvr == bundle_nvr,
ArtifactBuild.type == ArtifactType.IMAGE.value,
).one_or_none()
if artifact_build is None:
log.warning(
f'Can\'t find build for a bundle image "{bundle_nvr}"')
continue
pullspec_overrides = artifact_build.bundle_pullspec_overrides
for pullspec in pullspec_overrides['pullspec_replacements']:
old_pullspec = pullspec.get('_old', None)
if old_pullspec is None:
continue
old_to_new_pullspec_map[old_pullspec] = pullspec['new']
return old_to_new_pullspec_map
@classmethod
def _get_csv_updates(cls, csv_name, version):
"""
Determine the CSV updates required for the bundle image.
:param str csv_name: the name field in the bundle's ClusterServiceVersion file
:param str version: the version of the bundle image being rebuilt
:return: a dictionary of the CSV updates needed
:rtype: dict
"""
csv_modifications = {}
new_version, fm_suffix = cls._get_rebuild_bundle_version(version)
new_csv_name = cls._get_csv_name(csv_name, version, new_version, fm_suffix)
csv_modifications['update'] = {
'metadata': {
# Update the name of the CSV to something uniquely identify the rebuild
'name': new_csv_name,
# Declare that this rebuild is a substitute of the bundle being rebuilt
'annotations': {'olm.substitutesFor': csv_name}
},
'spec': {
# Update the version of the rebuild to be unique and a newer version than the
# the version of the bundle being rebuilt
'version': new_version,
}
}
return csv_modifications
@classmethod
def _get_rebuild_bundle_version(cls, version):
"""
Get a bundle version for the Freshmaker rebuild of the bundle image.
Examples:
1.2.3 => 1.2.3+0.$timestamp.p (no build ID and not a rebuild)
1.2.3+48273 => 1.2.3+48273.0.$timestamp.p (build ID and not a rebuild)
1.2.3+48273.0.1616457250.p => 1.2.3+48273.0.$timestamp.p (build ID and a rebuild)
:param str version: the version of the bundle image being rebuilt
:return: a tuple of the bundle version of the Freshmaker rebuild of the bundle image and
the suffix that was added by Freshmaker
:rtype: tuple(str, str)
"""
parsed_version = semver.VersionInfo.parse(version)
# Strip off the microseconds of the timestamp
timestamp = int(datetime.utcnow().timestamp())
new_fm_suffix = f'0.{timestamp}.p'
if parsed_version.build:
# Check if the bundle was a Freshmaker rebuild. Include .patched
# for backwards compatibility with the old suffix.
fm_suffix_search = re.search(
r'(?P<fm_suffix>0\.\d+\.(?:p|patched))$', parsed_version.build
)
if fm_suffix_search:
fm_suffix = fm_suffix_search.groupdict()['fm_suffix']
# Get the build without the Freshmaker suffix. This may include a build ID
# from the original build before Freshmaker rebuilt it or be empty.
build_wo_fm_suffix = parsed_version.build[:- len(fm_suffix)]
new_build = f"{build_wo_fm_suffix}{new_fm_suffix}"
else:
# This was not previously rebuilt by Freshmaker so just append the suffix
# to the existing build ID with '.' separating it.
new_build = f"{parsed_version.build}.{new_fm_suffix}"
else:
# If there is existing build ID, then make the Freshmaker suffix the build ID
new_build = new_fm_suffix
# Don't use the replace method in order to support semver 2.8.1
new_version_dict = parsed_version._asdict()
new_version_dict["build"] = new_build
new_version = str(semver.VersionInfo(**new_version_dict))
return new_version, new_fm_suffix
@staticmethod
def _get_csv_name(csv_name, version, rebuild_version, fm_suffix):
"""
Get a bundle CSV name for the Freshmaker rebuild of the bundle image.
:param str csv_name: the name of the ClusterServiceVersion (CSV) file of the bundle image
:param str version: the version of the bundle image being rebuilt
:param str rebuild_version: the new version being assigned by Freshmaker for the rebuild
:param str fm_suffix: the portion of rebuild_version that was generated by Freshmaker
:return: the bundle ClusterServiceVersion (CSV) name of the Freshmaker rebuild of the bundle
image
:rtype: str
"""
# The CSV name must be in the format of a valid DNS name, which means the + from the
# build ID must be replaced. In the event this was a previous Freshmaker rebuild, version
# may have a build ID that would be the DNS safe version in the CSV name.
dns_safe_version = version.replace('+', '-')
if dns_safe_version in csv_name:
dns_safe_rebuild_version = rebuild_version.replace('+', '-')
return csv_name.replace(dns_safe_version, dns_safe_rebuild_version)
else:
return f'{csv_name}.{fm_suffix}'
def get_published_original_nvr(self, rebuilt_nvr):
"""
Search for an original build, that has been built and published to a
repository, and get original_nvr from it
:param str rebuilt_nvr: rebuilt NVR to look build by
:rtype: str or None
:return: original NVR from the first published FM build for given NVR
"""
original_nvr = None
# artifact build should be only one in database, or raise an error
artifact_build = db.session.query(ArtifactBuild).filter(
ArtifactBuild.rebuilt_nvr == rebuilt_nvr,
ArtifactBuild.type == ArtifactType.IMAGE.value,
).one_or_none()
# recursively search for original artifact build
if artifact_build is not None:
original_nvr = artifact_build.original_nvr
# check if image is published
request_params = {'include': 'data.repositories',
'page_size': 1}
images = self._pyxis._pagination(f'images/nvr/{original_nvr}',
request_params)
if not images:
return None
# stop recursion if the image is published in some repo
if any(repo['published'] for repo in images[0].get('repositories')):
return original_nvr
next_nvr = self.get_published_original_nvr(original_nvr)
if next_nvr is not None:
original_nvr = next_nvr
return original_nvr
def image_has_auto_rebuild_tag(self, image):
""" Check if image has a tag enabled for auto rebuild.
:param dict image: Dict representation of an image entity in Pyxis.
:rtype: bool
:return: True if image has a tag enabled for auto rebuild in repository, otherwise False.
"""
for repo in image['repositories']:
# Skip unpublished repository
if not repo['published']:
continue
auto_rebuild_tags = self._pyxis.get_auto_rebuild_tags(
repo['registry'], repo['repository']
)
tags = [t['name'] for t in repo.get('tags', [])]
if set(auto_rebuild_tags) & set(tags):
return True
# It'd be more efficient to do this check first, but the exceptions are edge cases
# (e.g. testing) and it's best to not use it unless absolutely necessary
nvr = image['brew']['build']
parsed_nvr = parse_nvr(nvr)
nv = f'{parsed_nvr["name"]}-{parsed_nvr["version"]}'
if nv in conf.bundle_autorebuild_tag_exceptions:
self.log_info(
'The bundle %r has an exception for being tagged with an auto-rebuild tag', nvr
)
return True
return False
def _create_original_to_rebuilt_nvrs_map(self):
"""
Creates mapping of original operator build NVRs to rebuilt NVRs in advisory.
Including NVRs of the builds from the blocking advisories
:rtype: dict
:return: map of the original NVRs as keys and rebuilt NVRs as values
"""
nvrs_mapping = {}
# Get builds from all blocking advisories
blocking_advisories_builds = \
Errata().get_blocking_advisories_builds(self.event.advisory.errata_id)
# Get builds NVRs from the advisory attached to the message/event and
# then get original NVR for every build
for product_info in self.event.advisory.builds.values():
for build in product_info['builds']:
# Each build is a one key/value pair, and key is the build NVR
build_nvr = next(iter(build))
# Search for the first build that triggered the chain of rebuilds
# for every shipped NVR to get original NVR from it
original_nvr = self.get_published_original_nvr(build_nvr)
if original_nvr is None:
continue
nvrs_mapping[original_nvr] = build_nvr
parsed_build_nvr = parse_nvr(build_nvr)
# Check builds from blocking advisories and add to the mapping
# all of them, that have overlapping package names
for block_build in blocking_advisories_builds:
block_build_nvr = parse_nvr(block_build)
if (block_build_nvr['name'] == parsed_build_nvr['name']
and block_build_nvr['version'] == parsed_build_nvr['version']): # noqa: W503
nvrs_mapping[block_build] = build_nvr
return nvrs_mapping
def _prepare_builds(self, db_event, to_rebuild_bundles):
"""
Prepare models.ArtifactBuild instance for every bundle that will be
rebuilt
:param models.Event db_event: database event that will contain builds
:param list to_rebuild_bundles: bundles to rebuild
:return: builds that already in database and ready to be submitted to brew
:rtype: list
"""
builds = []
csv_mod_url = conf.freshmaker_root_url + "/api/2/pullspec_overrides/{}"
for bundle in to_rebuild_bundles:
# Reset context to db_event for each iteration before
# the ArtifactBuild is created.
self.set_context(db_event)
rebuild_reason = RebuildReason.DIRECTLY_AFFECTED.value
bundle_name = koji.parse_NVR(bundle["nvr"])["name"]
build = self.record_build(
db_event, bundle_name, ArtifactType.IMAGE,
state=ArtifactBuildState.PLANNED.value,
original_nvr=bundle["nvr"],
rebuild_reason=rebuild_reason)
# Set context to particular build so logging shows this build
# in case of error.
self.set_context(build)
build.transition(ArtifactBuildState.PLANNED.value, "")
additional_data = ContainerImage.get_additional_data_from_koji(bundle["nvr"])
build.build_args = json.dumps({
"repository": additional_data["repository"],
"commit": additional_data["commit"],
"target": additional_data["target"],
"branch": additional_data["git_branch"],
"arches": additional_data["arches"],
# The build system always enforces that bundle images build from
# "scratch", so there is no parent image. See:
# https://osbs.readthedocs.io/en/latest/users.html?#operator-manifest-bundle-builds
"original_parent": None,
"operator_csv_modifications_url": csv_mod_url.format(build.id),
})
build.bundle_pullspec_overrides = {
"pullspec_replacements": bundle["pullspec_replacements"],
"update": bundle["update"],
}
db.session.commit()
builds.append(build)
return builds
| 1.039063 | 1 |
src/masonite/contracts/AuthContract.py | holic-cl/masonite | 95 | 17540 | from abc import ABC as Contract, abstractmethod
class AuthContract(Contract):
@abstractmethod
def user(self):
pass
@abstractmethod
def save(self):
pass
@abstractmethod
def delete(self):
pass
| 2.984375 | 3 |
netblow/bin/netblow_cli.py | viniciusarcanjo/netblow | 8 | 17541 | <filename>netblow/bin/netblow_cli.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""netblow_cli module."""
import argparse
from netblow.netblow import NetBlow
from netblow.version import __version__
def main():
"""Entry function."""
parser = argparse.ArgumentParser(
description="netblow. Vendor agnostic network testing framework to stress network failures." # noqa
)
# to add required args.
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
m_group = optional.add_mutually_exclusive_group()
m_group.add_argument(
'-d',
'--dryrun',
default=False,
action='store_true',
help="show tests calls, won't connect to any devices")
m_group.add_argument(
'-c',
'--concheck',
default=False,
action='store_true',
help='check connectivity with all devices in the topology')
m_group.add_argument(
'-1',
'--once',
default=False,
action='store_true',
help="iterates only once and perfom napalm diffs")
parser.add_argument(
'-l',
'--level',
choices=['info', 'debug'],
default='info',
help='logging verbosity level (default: info)')
parser.add_argument(
'-v',
'--version',
action='version',
version='{}'.format(__version__),
help='show version')
required.add_argument(
'-f', '--topology', help='topology yml file')
required.add_argument(
'-t', '--tests', help='tests yml file')
parser._action_groups.append(optional)
args = parser.parse_args()
if not args.topology:
parser.error('You have to specify the topology yml file with -f')
if not args.tests:
if args.once or not args.dryrun and not args.concheck:
parser.error('You have to specify the tests yml file with -t')
NetBlow(
topo_file=args.topology,
test_file=args.tests,
dry_run=args.dryrun,
enable_salt=False,
iter_once=args.once,
auto_open=True,
auto_test=True,
con_check=args.concheck,
level=args.level)
if __name__ == "__main__":
main()
| 2.5 | 2 |
src/06_tool/regular_expression.py | edgardeng/python-advance-interview | 1 | 17542 | '''
' Python Regular Expression 正则表达式
'
'''
import re
def test_match():
s = 'hello python Hello'
p = 'hello'
o = re.match(p, s)
print(o)
print(dir(o))
print(o.group()) # 返回匹配的字符串
print(o.span()) # 范围
print(o.start()) # 开始处
print('*' * 30, 'flags参数的使用')
o2 = re.match(p, s, re.L)
print(o2.group()) # 返回匹配的字符串
# 常用字符的使用
def test_match_character():
print('-' * 30, ' . 匹配任意一个字符')
print(re.match('.', 'abv'))
print(re.match('.', '12'))
print(re.match('.', '\n'))
print('-' * 30, ' \d 匹配数字 0-9')
print(re.match('\d', 'abc456'))
print(re.match('\d', '234svd'))
print('-' * 30, ' \D 匹配非数字 0-9')
print(re.match('\D', 'abc456'))
print(re.match('\D', '234svd'))
print('-' * 30, ' \s 匹配空白字符')
print(re.match('\s', '\n12\t'))
print(re.match('\s', '\t'))
print(re.match('\s', 'addd'))
print('-' * 30, ' \S 匹配非空白字符')
print(re.match('\S', '\n12\t'))
print(re.match('\S', '\t'))
print(re.match('\S', 'addd'))
print('-' * 30, ' \w 匹配字母、数字')
print(re.match('\w', 'AB'))
print(re.match('\w', 'ab'))
print(re.match('\w', '12'))
print(re.match('\w', '__'))
print(re.match('\w', '##'))
print('-' * 30, ' \W 匹配非 字母、数字')
print(re.match('\W', 'AB'))
print(re.match('\W', 'ab'))
print(re.match('\W', '12'))
print(re.match('\W', '__'))
print(re.match('\W', '##'))
print('-' * 30, ' \[] 匹配列表中的字符')
print(re.match('[2468]', '22'))
print(re.match('[2468]', '33'))
print(re.match('[2468]', '83'))
print(re.match('[2468]', '38'))
def test_match_phone():
print('-' * 30, ' 匹配手机号')
patten = '\d\d\d\d\d\d\d\d\d\d\d'
print(re.match(patten, '13466669999'))
print(re.match('1[345789]\d\d\d\d\d\d\d\d\d', '13466669999'))
# 限定符
def test_match_qualifier():
print('-' * 30, ' * 匹配零次或多次')
print(re.match('\d*', '123abc')) # 匹配开头的数字
print(re.match('\d*', 'abc'))
print('-' * 30, ' + |匹配一次或多次')
print(re.match('\d+', '123abc')) # 匹配开头的数字
print(re.match('\d+', 'abc'))
print('-' * 30, ' ? |匹配一次或零次')
print(re.match('\d?', '1abc'))
print(re.match('\d?', '123abc')) # 匹配开头的数字
print(re.match('\d?', 'abc'))
print('-' * 30, ' {m} |重复m次')
print(re.match('\d{2}', '123abc')) # 匹配开头2个数字
print(re.match('\d{2}', '12abc'))
print(re.match('\d{2}', '1abc'))
print(re.match('\d{2}', 'abc'))
print('-' * 30, '{m,n}|重复m到n次')
print(re.match('\d{1,3}', '1234abc')) # 匹配开头2个数字
print(re.match('\d{1,3}', '123abc'))
print(re.match('\d{1,3}', '12abc'))
print(re.match('\d{1,3}', '1abc'))
print(re.match('\d{1,3}', 'abc'))
print('-' * 30, '{m,}|至少m次')
print(re.match('\d{2,}', '1234abc')) # 匹配开头2个数字
print(re.match('\d{2,}', '123abc'))
print(re.match('\d{2,}', '12abc'))
print(re.match('\d{2,}', '1abc'))
print(re.match('\d{2,}', 'abc'))
print('-' * 30, '案例1 首字母为大写字符,其他小写字符')
print(re.match('[A-Z][a-z]*', 'abc'))
print(re.match('[A-Z][a-z]*', 'ABC'))
print(re.match('[A-Z][a-z]*', 'Abc'))
print(re.match('[A-Z][a-z]*', 'AbC'))
print('-' * 30, '案例2 有效变量名 字母数字下划线,数字不开头')
print(re.match('[a-zA-Z_][a-zA-Z0-9_]*', 'abc'))
print(re.match('[a-zA-Z_]\w*', 'abc'))
print(re.match('[a-zA-Z_][a-zA-Z0-9_]*', 'abc123'))
print(re.match('[a-zA-Z_]\w*', '123abc'))
print(re.match('[a-zA-Z_]\w*', '_123abc'))
print('-' * 30, '案例2 1-99的数字')
print(re.match('[1-9]\d?', '23abc'))
print(re.match('[1-9]\d?', '100'))
print(re.match('[1-9]\d?', '11'))
print(re.match('[1-9]\d?', '1'))
print(re.match('[1-9]\d?', '0'))
print(re.match('[1-9]\d?', '09'))
print('-' * 30, '案例2 8-20随机密码 大写,小写,下划线,数字')
print(re.match('\w{8,20}', '1234567'))
print(re.match('\w{8,20}', '1234567$$'))
print(re.match('\w{8,20}', '1234567abc_'))
print(re.match('\w{8,20}', '1234567abc#'))
print(re.match('\w{8,20}', '12345678901234567890zx'))
# 转义字符 原生字符
def escape_character():
print('C:\t\d\e')
print('C:\\t\\d\\e')
print(r'C:\t\d\e')
# 边界字符
def boundary():
print('-' * 30, '$ 匹配字符串结尾')
print(re.match('[1-9]\d{4,<EMAIL>', '<EMAIL>'))
print(re.match('[1-9]\d{4,9}@qq.<EMAIL>', '<EMAIL>'))
print(re.match(r'[1-9]\d{4,9}@qq.<EMAIL>$', '<EMAIL>'))
print(re.match(r'[1-9]\d{<EMAIL>$', '<EMAIL>'))
print('-' * 30, ' ^ 匹配字符串开头')
print(re.match(r'^hello.*', 'hello abc'))
print(re.match(r'^hello.*', 'abc hello abc'))
print('-' * 30, ' \b 匹配单词的边界')
print(re.match(r'.*\bab', '123 aabc')) # 单词 ab 开始
print(re.match(r'.*\bab', '123 abcd'))
print(re.match(r'.*\bab', '123 aaa'))
print(re.match(r'.*\bab', '123 abcd cdab'))
print(re.match(r'.*ab\b', '123 abc')) # 单词 ab 结尾
print(re.match(r'.*ab\b', '123 aaa'))
print(re.match(r'.*ab\b', '123 ab'))
print(re.match(r'.*ab\b', '123 cdab'))
print(re.match(r'.*ab\b', '123 abcd cdab'))
def test_search():
print(re.match(r'hello', 'hello python'))
print(re.search(r'hello', 'hello python'))
print(re.match(r'hello', 'python hello'))
print(re.search(r'hello', 'python hello '))
print(re.match('aa|bb|cc', 'aa'))
print(re.match('aa|bb|cc', 'bbb'))
print(re.match('aa|bb|cc', 'ccc'))
print(re.match('aa|bb|cc', 'a bb ccc'))
print(re.search('aa|bb|cc', 'a bb ccc'))
# 多个字符
def test_multi_character():
print('-' * 30, '案例 0-100之间的数字: 0-99 | 100')
print(re.match('[1-9]?\d|100', '1'))
print(re.match('[1-9]?\d|100', '11'))
print(re.match('[1-9]?\d|100', '100'))
print(re.match('[1-9]?\d$|100$', '100'))
print(re.match('[1-9]?\d$|100$', '1000'))
print('-' * 30, '案例 ')
print(re.match('[ab][cd]', 'ab'))
print(re.match('[ab][cd]', 'ac'))
print(re.match('[ab][cd]', 'ad'))
print(re.match('ab|cd', 'abc'))
print(re.match('ab|cd', 'ac'))
# 匹配分组
def test_group():
print('-' * 30, '座机号码 区号{3,4} 号码{5,8} 010-0000 0791-222222')
print(re.match(r'\d{3,4}-[1-9]\d{4,7}', '010-10086'))
print(re.match(r'\d{3,4}-[1-9]\d{4,7}', '010-88888888'))
print(re.match(r'\d{3,4}-[1-9]\d{4,7}', '1111-10086'))
print(re.match(r'\d{3,4}-[1-9]\d{4,7}', '1111-88888888'))
print('-' * 30, ' 匹配分组')
o = re.match(r'(\d{3,4})-([1-9]\d{4,7})', '1111-88888888')
print(o)
print(o.group(0), o.group(1), o.group(2))
print(o.groups(), o.groups()[0], o.groups()[1])
print('-' * 30, 'html 标签')
print(re.match(r'<.+><.+>.+</.+></.+>', '<html><a>abc</a></html>'))
print(re.match(r'<.+><.+>.+</.+></.+>', '<html><a>abc</b></html>'))
print(re.match(r'<(.*)><(.*)>.*</\2></\1>', '<html><a>abc</b></html>'))
print(re.match(r'<(.*)><(.*)>.*</\2></\1>', '<html><d>abc</d></html>'))
print('-' * 30, 'html 标签 - 别名')
print(re.match(r'<(?P<k_html>.+)><(?P<k_head>.+)>.*</(?P=k_head)></(?P=k_html)>', '<html><d>abc</d></html>'))
## 搜索与替换
def test_sub():
print('-' * 30, ' 替换')
print(re.sub(r'#.*$', '', '2004-222-23322 # 这是个什么')) # 替换#开头的部分
print(re.sub(r'#\D*', '', '2004-222-23322 # 这是个什么'))
print('-' * 30, ' 替换 subn')
print(re.subn(r'#\D*', '', '2004-222-23322 # 这是个什么'))
print(re.subn(r'#.*$', '', '2004-222-23322 # 这是个什么'))
def test_compile():
print('-' * 30, ' compile的使用')
regex = re.compile(r'\w+') # 匹配字母或数字
print(regex.match('1223dfdf'))
print(regex.match('##1223dfdf'))
def test_findall():
print('-' * 30, ' findall 返回数组')
print(re.findall(r'\w', '##1223dfdf')) # 匹配字母或数字 f
print(re.findall(r'\w+', '## 1223 df df 1'))
print('-' * 30, ' finditer 返回迭代器')
print(re.finditer(r'\w+', '## 1223 df df 1'))
for i in re.finditer(r'\w+', '## 1223 df df 1'):
print(i, i.group())
def test_split():
print('-' * 30, ' split 返回数组')
print(re.split(r'\d+', '123abc123abc'))
print(re.split(r'\d+', '123 abc 123 abc'))
print(re.split(r'\d+', 'abc123 abc 123 abc'))
print(re.split(r'\d+', 'abc 123 abc 123 abc',1))
def greedy_mode():
print('-' * 30, ' 贪婪模式')
result = re.match(r'(.+)(\d+-\d+-\d+)', 'this is my tel: 122-1244-1242')
print(result.group(1))
print(result.group(2))
print('-' * 30, ' 非贪婪模式 尽可能少的匹配')
result = re.match(r'(.+?)(\d+-\d+-\d+)', 'this is my tel: 122-1244-1242')
print(result.group(1))
print(result.group(2))
print('-' * 30, ' 贪婪模式')
print(re.match(r'abc(\d+)', 'abc123456'))
print(re.match(r'abc(\d+?)', 'abc123456'))
if __name__ == '__main__':
# test_match()
# test_match_character()
# test_match_phone()
# test_match_qualifier()
# escape_character()
# boundary()
# test_search()
# test_multi_character()
# test_group()
# test_sub()
# test_compile()
# test_findall()
# test_split()
# greedy_mode()
# <.+><.+>.+</.+></.+>
s = '<link href="../assets/css/app.css?t=20112455" type="text/css" rel="stylesheet">'
mathched = re.findall(r'\S+assets/css/\S+.css\S+"', s)
for m in mathched:
print(m, m.index('.css'))
s = s.replace(m, m[:m.index('.css')] + '.css?t=00000"')
print(s)
| 3.828125 | 4 |
dot_dotfiles/mail/dot_offlineimap.py | TheRealOne78/dots | 758 | 17543 | #! /usr/bin/env python2
# -*- coding: utf8 -*-
from subprocess import check_output
def get_pass():
return check_output("pass gmail/me", shell=True).strip("\n")
| 2.09375 | 2 |
tests/encoding-utils/test_big_endian_integer.py | carver/ethereum-utils | 0 | 17544 | <gh_stars>0
from __future__ import unicode_literals
import pytest
from hypothesis import (
strategies as st,
given,
)
from eth_utils.encoding import (
int_to_big_endian,
big_endian_to_int,
)
@pytest.mark.parametrize(
'as_int,as_big_endian',
(
(0, b'\x00'),
(1, b'\x01'),
(7, b'\x07'),
(8, b'\x08'),
(9, b'\x09'),
(256, b'\x01\x00'),
(2**256 - 1, b'\xff' * 32),
),
)
def test_big_endian_conversions(as_int, as_big_endian):
as_int_result = big_endian_to_int(as_big_endian)
assert as_int_result == as_int
as_big_endian_result = int_to_big_endian(as_int)
assert as_big_endian_result == as_big_endian
@given(value=st.integers(min_value=0, max_value=2**256 - 1))
def test_big_endian_round_trip_from_int(value):
result = big_endian_to_int(int_to_big_endian(value))
assert result == value
@given(
value=st.binary(min_size=1, max_size=32).map(
lambda v: v.lstrip(b'\x00') or b'\x00'
)
)
def test_big_endian_round_trip_from_big_endian(value):
result = int_to_big_endian(big_endian_to_int(value))
assert result == value
| 2.421875 | 2 |
src/data/normalization.py | poly-ai/fluid-surface-estimation | 2 | 17545 | <gh_stars>1-10
import numpy as np
# Normalize dataset such that all sequences have min value 0.0, max value 1.0
def normalize(dataset, lower_lim=0.0, upper_lim=1.0):
seq_mins = dataset.min(axis=(1, 2, 3))
seq_maxes = dataset.max(axis=(1, 2, 3))
dataset -= seq_mins.reshape((-1, 1, 1, 1))
dataset /= (seq_maxes - seq_mins).reshape((-1, 1, 1, 1))
return dataset
# Normalize only the sequences in the data that have value outside range [0, 1)
# Normalizes these sequences to have min value 0.0, max value 1.0
def normalize_only_outliers(dataset, lower_lim=0.0, upper_lim=1.0):
# Scale and offset each sequence so that all values are within [0,1)
seq_mins = dataset.min(axis=(1, 2, 3))
seq_maxes = dataset.max(axis=(1, 2, 3))
# Limit normalization only to waves that are out of the range [0,1)
active = np.logical_or(
np.less(seq_mins, lower_lim), np.greater(seq_maxes, upper_lim)
)
dataset[active] -= seq_mins[active].reshape((-1, 1, 1, 1))
dataset[active] /= (seq_maxes - seq_mins)[active].reshape((-1, 1, 1, 1))
return dataset
| 2.859375 | 3 |
setup.py | fwitte/PyPSA | 0 | 17546 | <reponame>fwitte/PyPSA
from __future__ import absolute_import
from setuptools import setup, find_packages
from codecs import open
with open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
name='pypsa',
version='0.19.1',
author='PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html',
author_email='<EMAIL>',
description='Python for Power Systems Analysis',
long_description=long_description,
long_description_content_type='text/x-rst',
url='https://github.com/PyPSA/PyPSA',
license='MIT',
packages=find_packages(exclude=['doc', 'test']),
include_package_data=True,
python_requires='>=3.6',
install_requires=[
'numpy',
'scipy',
'pandas>=0.24.0',
'xarray',
'netcdf4',
'tables',
'pyomo>=5.7',
'matplotlib',
'networkx>=1.10',
'deprecation'
],
extras_require = {
"dev": ["pytest", "pypower", "pandapower", "scikit-learn"],
"cartopy": ['cartopy>=0.16'],
"docs": ["numpydoc", "sphinx", "sphinx_rtd_theme", "nbsphinx", "nbsphinx-link", "black"],
'gurobipy':['gurobipy']
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
])
| 1.4375 | 1 |
quiz/urls.py | Hysham/Quiz-Hoster | 1 | 17547 | <reponame>Hysham/Quiz-Hoster<filename>quiz/urls.py
from django.urls import path
from . import views
urlpatterns = [
path('', views.quiz_home, name='quiz-home'),
path('page/<int:page_no>/', views.quiz_page, name='quiz-page' ),
path('about/', views.quiz_about, name='quiz-about'),
path('submit/', views.quiz_submit, name='quiz-submit'),
## after quiz end
path('view_result/<int:page_no>/', views.quiz_view_result, name='quiz-view_result'),
path('leaderboard/', views.quiz_leaderboard, name='quiz-leaderboard'),
path('feedback/', views.quiz_feedback, name='quiz-feedback'),
] | 2.171875 | 2 |
jamf/setconfig.py | pythoninthegrass/python-jamf | 25 | 17548 | <filename>jamf/setconfig.py<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Jamf Config
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright (c) 2020 University of Utah, Marriott Library"
__license__ = "MIT"
__version__ = "1.0.4"
import argparse
import getpass
import jamf
import logging
import platform
import pprint
import sys
from os import path
class Parser:
def __init__(self):
myplatform = platform.system()
if myplatform == "Darwin":
default_pref = jamf.config.MACOS_PREFS_TILDA
elif myplatform == "Linux":
default_pref = jamf.config.LINUX_PREFS_TILDA
self.parser = argparse.ArgumentParser()
self.parser.add_argument(
"-H", "--hostname", help="specify hostname (default: prompt)"
)
self.parser.add_argument(
"-u", "--user", help="specify username (default: prompt)"
)
self.parser.add_argument(
"-p", "--passwd", help="specify password (default: prompt)"
)
self.parser.add_argument(
"-C",
"--config",
dest="path",
metavar="PATH",
default=default_pref,
help=f"specify config file (default {default_pref})",
)
self.parser.add_argument(
"-P",
"--print",
action="store_true",
help="print existing config profile (except password!)",
)
self.parser.add_argument(
"-t",
"--test",
action="store_true",
help="Connect to the Jamf server using the config file",
)
def parse(self, argv):
"""
:param argv: list of arguments to parse
:returns: argparse.NameSpace object
"""
return self.parser.parse_args(argv)
def setconfig(argv):
logger = logging.getLogger(__name__)
args = Parser().parse(argv)
logger.debug(f"args: {args!r}")
if args.path:
config_path = args.path
else:
myplatform = platform.system()
if myplatform == "Darwin":
default_pref = jamf.config.MACOS_PREFS_TILDA
elif myplatform == "Linux":
default_pref = jamf.config.LINUX_PREFS_TILDA
config_path = default_pref
if config_path[0] == "~":
config_path = path.expanduser(config_path)
if args.test:
api = jamf.API(config_path=config_path)
pprint.pprint(api.get("accounts"))
elif args.print:
conf = jamf.config.Config(prompt=False, explain=True, config_path=config_path)
print(conf.hostname)
print(conf.username)
if conf.password:
print("Password is set")
else:
print("Password is not set")
else:
if args.hostname:
hostname = args.hostname
else:
hostname = jamf.config.prompt_hostname()
if args.user:
user = args.user
else:
user = input("username: ")
if args.passwd:
passwd = args.passwd
else:
passwd = <PASSWORD>()
conf = jamf.config.Config(
hostname=hostname, username=user, password=<PASSWORD>, prompt=False
)
conf.save(config_path=config_path)
def main():
fmt = "%(asctime)s: %(levelname)8s: %(name)s - %(funcName)s(): %(message)s"
logging.basicConfig(level=logging.INFO, format=fmt)
setconfig(sys.argv[1:])
if __name__ == "__main__":
main()
| 2.546875 | 3 |
src/menuResponse/migrations/0001_initial.py | miguelaav/dev | 0 | 17549 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-12 17:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('menuCreate', '0001_initial'),
('menu', '0002_remove_menu_slug'),
]
operations = [
migrations.CreateModel(
name='MenuResponseModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comments', models.CharField(max_length=200)),
('date', models.DateField(auto_now_add=True)),
('MenuID', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='menuCreate.MenuCreateModel')),
('option', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='menu.Menu')),
],
),
]
| 1.71875 | 2 |
datacube/drivers/s3/storage/s3aio/s3aio.py | Zac-HD/datacube-core | 2 | 17550 | """
S3AIO Class
Array access to a single S3 object
"""
from __future__ import absolute_import
import SharedArray as sa
import zstd
from itertools import repeat, product
import numpy as np
from pathos.multiprocessing import ProcessingPool
from six.moves import zip
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from .s3io import S3IO, generate_array_name
class S3AIO(object):
def __init__(self, enable_compression=True, enable_s3=True, file_path=None, num_workers=30):
"""Initialise the S3 array IO interface.
:param bool enable_s3: Flag to store objects in s3 or disk.
True: store in S3
False: store on disk (for testing purposes)
:param str file_path: The root directory for the emulated s3 buckets when enable_se is set to False.
:param int num_workers: The number of workers for parallel IO.
"""
self.s3io = S3IO(enable_s3, file_path, num_workers)
self.pool = ProcessingPool(num_workers)
self.enable_compression = enable_compression
def to_1d(self, index, shape):
"""Converts nD index to 1D index.
:param tuple index: N-D Index to be converted.
:param tuple shape: Shape to be used for conversion.
:return: Returns the 1D index.
"""
return np.ravel_multi_index(index, shape)
def to_nd(self, index, shape):
"""Converts 1D index to nD index.
:param tuple index: 1D Index to be converted.
:param tuple shape: Shape to be used for conversion.
:return: Returns the ND index.
"""
return np.unravel_index(index, shape)
def get_point(self, index_point, shape, dtype, s3_bucket, s3_key):
"""Gets a point in the nd array stored in S3.
Only works if compression is off.
:param tuple index_point: Index of the point to be retrieved.
:param tuple shape: Shape of the stored data.
:param numpy.dtype: dtype of the stored data.
:param str s3_bucket: S3 bucket name
:param str s3_key: S3 key name
:return: Returns the point data.
"""
item_size = np.dtype(dtype).itemsize
idx = self.to_1d(index_point, shape) * item_size
if self.enable_compression:
b = self.s3io.get_bytes(s3_bucket, s3_key)
cctx = zstd.ZstdDecompressor()
b = cctx.decompress(b)[idx:idx + item_size]
else:
b = self.s3io.get_byte_range(s3_bucket, s3_key, idx, idx + item_size)
a = np.frombuffer(b, dtype=dtype, count=-1, offset=0)
return a
def cdims(self, slices, shape):
return [sl.start == 0 and sl.stop == sh and (sl.step is None or sl.step == 1)
for sl, sh in zip(slices, shape)]
def get_slice(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals
"""Gets a slice of the nd array stored in S3.
Only works if compression is off.
:param tuple array_slice: tuple of slices to retrieve.
:param tuple shape: Shape of the stored data.
:param numpy.dtype: dtype of the stored data.
:param str s3_bucket: S3 bucket name
:param str s3_key: S3 key name
:return: Returns the data slice.
"""
# convert array_slice into into sub-slices of maximum contiguous blocks
# Todo:
# - parallelise reads and writes
# - option 1. get memory rows in parallel and merge
# - option 2. smarter byte range subsets depending on:
# - data size
# - data contiguity
if self.enable_compression:
return self.get_slice_by_bbox(array_slice, shape, dtype, s3_bucket, s3_key)
# truncate array_slice to shape
# array_slice = [slice(max(0, s.start) - min(sh, s.stop)) for s, sh in zip(array_sliced, shape)]
array_slice = [slice(max(0, s.start), min(sh, s.stop)) for s, sh in zip(array_slice, shape)]
cdim = self.cdims(array_slice, shape)
try:
end = cdim[::-1].index(False) + 1
except ValueError:
end = len(shape)
start = len(shape) - end
outer = array_slice[:-end]
outer_ranges = [range(s.start, s.stop) for s in outer]
outer_cells = list(product(*outer_ranges))
blocks = list(zip(outer_cells, repeat(array_slice[start:])))
item_size = np.dtype(dtype).itemsize
results = []
for cell, sub_range in blocks:
# print(cell, sub_range)
s3_start = (np.ravel_multi_index(cell + tuple([s.start for s in sub_range]), shape)) * item_size
s3_end = (np.ravel_multi_index(cell + tuple([s.stop - 1 for s in sub_range]), shape) + 1) * item_size
# print(s3_start, s3_end)
data = self.s3io.get_byte_range(s3_bucket, s3_key, s3_start, s3_end)
results.append((cell, sub_range, data))
result = np.empty([s.stop - s.start for s in array_slice], dtype=dtype)
offset = [s.start for s in array_slice]
for cell, sub_range, data in results:
t = [slice(x.start - o, x.stop - o) if isinstance(x, slice) else x - o for x, o in
zip(cell + tuple(sub_range), offset)]
if data.dtype != dtype:
data = np.frombuffer(data, dtype=dtype, count=-1, offset=0)
result[t] = data.reshape([s.stop - s.start for s in sub_range])
return result
def get_slice_mp(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals
"""Gets a slice of the nd array stored in S3 in parallel.
Only works if compression is off.
:param tuple array_slice: tuple of slices to retrieve.
:param tuple shape: Shape of the stored data.
:param numpy.dtype: dtype of the stored data.
:param str s3_bucket: S3 bucket name
:param str s3_key: S3 key name
:return: Returns the data slice.
"""
# pylint: disable=too-many-locals
def work_get_slice(block, array_name, offset, s3_bucket, s3_key, shape, dtype):
result = sa.attach(array_name)
cell, sub_range = block
item_size = np.dtype(dtype).itemsize
s3_start = (np.ravel_multi_index(cell + tuple([s.start for s in sub_range]), shape)) * item_size
s3_end = (np.ravel_multi_index(cell + tuple([s.stop - 1 for s in sub_range]), shape) + 1) * item_size
data = self.s3io.get_byte_range(s3_bucket, s3_key, s3_start, s3_end)
t = [slice(x.start - o, x.stop - o) if isinstance(x, slice) else x - o for x, o in
zip(cell + tuple(sub_range), offset)]
if data.dtype != dtype:
data = np.frombuffer(data, dtype=dtype, count=-1, offset=0)
# data = data.reshape([s.stop - s.start for s in sub_range])
result[t] = data.reshape([s.stop - s.start for s in sub_range])
if self.enable_compression:
return self.get_slice_by_bbox(array_slice, shape, dtype, s3_bucket, s3_key)
cdim = self.cdims(array_slice, shape)
try:
end = cdim[::-1].index(False) + 1
except ValueError:
end = len(shape)
start = len(shape) - end
outer = array_slice[:-end]
outer_ranges = [range(s.start, s.stop) for s in outer]
outer_cells = list(product(*outer_ranges))
blocks = list(zip(outer_cells, repeat(array_slice[start:])))
offset = [s.start for s in array_slice]
array_name = generate_array_name('S3AIO')
sa.create(array_name, shape=[s.stop - s.start for s in array_slice], dtype=dtype)
shared_array = sa.attach(array_name)
self.pool.map(work_get_slice, blocks, repeat(array_name), repeat(offset), repeat(s3_bucket),
repeat(s3_key), repeat(shape), repeat(dtype))
sa.delete(array_name)
return shared_array
def get_slice_by_bbox(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals
"""Gets a slice of the nd array stored in S3 by bounding box.
:param tuple array_slice: tuple of slices to retrieve.
:param tuple shape: Shape of the stored data.
:param numpy.dtype: dtype of the stored data.
:param str s3_bucket: S3 bucket name
:param str s3_key: S3 key name
:return: Returns the data slice.
"""
# Todo:
# - parallelise reads and writes
# - option 1. use get_byte_range_mp
# - option 2. smarter byte range subsets depending on:
# - data size
# - data contiguity
item_size = np.dtype(dtype).itemsize
s3_begin = (np.ravel_multi_index(tuple([s.start for s in array_slice]), shape)) * item_size
s3_end = (np.ravel_multi_index(tuple([s.stop - 1 for s in array_slice]), shape) + 1) * item_size
# if s3_end-s3_begin <= 5*1024*1024:
# d = self.s3io.get_byte_range(s3_bucket, s3_key, s3_begin, s3_end)
# else:
# d = self.s3io.get_byte_range_mp(s3_bucket, s3_key, s3_begin, s3_end, 5*1024*1024)
d = self.s3io.get_bytes(s3_bucket, s3_key)
if self.enable_compression:
cctx = zstd.ZstdDecompressor()
d = cctx.decompress(d)
d = np.frombuffer(d, dtype=np.uint8, count=-1, offset=0)
d = d[s3_begin:s3_end]
cdim = self.cdims(array_slice, shape)
try:
end = cdim[::-1].index(False) + 1
except ValueError:
end = len(shape)
start = len(shape) - end
outer = array_slice[:-end]
outer_ranges = [range(s.start, s.stop) for s in outer]
outer_cells = list(product(*outer_ranges))
blocks = list(zip(outer_cells, repeat(array_slice[start:])))
item_size = np.dtype(dtype).itemsize
results = []
for cell, sub_range in blocks:
s3_start = (np.ravel_multi_index(cell + tuple([s.start for s in sub_range]), shape)) * item_size
s3_end = (np.ravel_multi_index(cell + tuple([s.stop - 1 for s in sub_range]), shape) + 1) * item_size
data = d[s3_start - s3_begin:s3_end - s3_begin]
results.append((cell, sub_range, data))
result = np.empty([s.stop - s.start for s in array_slice], dtype=dtype)
offset = [s.start for s in array_slice]
for cell, sub_range, data in results:
t = [slice(x.start - o, x.stop - o) if isinstance(x, slice) else x - o for x, o in
zip(cell + tuple(sub_range), offset)]
if data.dtype != dtype:
data = np.frombuffer(data, dtype=dtype, count=-1, offset=0)
result[t] = data.reshape([s.stop - s.start for s in sub_range])
return result
| 2.3125 | 2 |
make_snapshot.py | trquinn/ICgen | 1 | 17551 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 21 15:11:31 2014
@author: ibackus
"""
__version__ = "$Revision: 1 $"
# $Source$
import pynbody
SimArray = pynbody.array.SimArray
import numpy as np
import gc
import os
import isaac
import calc_velocity
import ICgen_utils
import ICglobal_settings
global_settings = ICglobal_settings.global_settings
def snapshot_gen(ICobj):
"""
Generates a tipsy snapshot from the initial conditions object ICobj.
Returns snapshot, param
snapshot: tipsy snapshot
param: dictionary containing info for a .param file
"""
print 'Generating snapshot...'
# Constants
G = SimArray(1.0,'G')
# ------------------------------------
# Load in things from ICobj
# ------------------------------------
print 'Accessing data from ICs'
settings = ICobj.settings
# filenames
snapshotName = settings.filenames.snapshotName
paramName = settings.filenames.paramName
# particle positions
r = ICobj.pos.r
xyz = ICobj.pos.xyz
# Number of particles
nParticles = ICobj.pos.nParticles
# molecular mass
m = settings.physical.m
# star mass
m_star = settings.physical.M.copy()
# disk mass
m_disk = ICobj.sigma.m_disk.copy()
m_disk = isaac.match_units(m_disk, m_star)[0]
# mass of the gas particles
m_particles = m_disk / float(nParticles)
# re-scale the particles (allows making of lo-mass disk)
m_particles *= settings.snapshot.mScale
# -------------------------------------------------
# Assign output
# -------------------------------------------------
print 'Assigning data to snapshot'
# Get units all set up
m_unit = m_star.units
pos_unit = r.units
if xyz.units != r.units:
xyz.convert_units(pos_unit)
# time units are sqrt(L^3/GM)
t_unit = np.sqrt((pos_unit**3)*np.power((G*m_unit), -1)).units
# velocity units are L/t
v_unit = (pos_unit/t_unit).ratio('km s**-1')
# Make it a unit
v_unit = pynbody.units.Unit('{0} km s**-1'.format(v_unit))
# Other settings
metals = settings.snapshot.metals
star_metals = metals
# -------------------------------------------------
# Initialize snapshot
# -------------------------------------------------
# Note that empty pos, vel, and mass arrays are created in the snapshot
snapshot = pynbody.new(star=1,gas=nParticles)
snapshot['vel'].units = v_unit
snapshot['eps'] = 0.01*SimArray(np.ones(nParticles+1, dtype=np.float32), pos_unit)
snapshot['metals'] = SimArray(np.zeros(nParticles+1, dtype=np.float32))
snapshot['rho'] = SimArray(np.zeros(nParticles+1, dtype=np.float32))
snapshot.gas['pos'] = xyz
snapshot.gas['temp'] = ICobj.T(r)
snapshot.gas['mass'] = m_particles
snapshot.gas['metals'] = metals
snapshot.star['pos'] = SimArray([[ 0., 0., 0.]],pos_unit)
snapshot.star['vel'] = SimArray([[ 0., 0., 0.]], v_unit)
snapshot.star['mass'] = m_star
snapshot.star['metals'] = SimArray(star_metals)
# Estimate the star's softening length as the closest particle distance
snapshot.star['eps'] = r.min()
# Make param file
param = isaac.make_param(snapshot, snapshotName)
param['dMeanMolWeight'] = m
gc.collect()
# -------------------------------------------------
# CALCULATE VELOCITY USING calc_velocity.py. This also estimates the
# gravitational softening length eps
# -------------------------------------------------
print 'Calculating circular velocity'
preset = settings.changa_run.preset
max_particles = global_settings['misc']['max_particles']
calc_velocity.v_xy(snapshot, param, changa_preset=preset, max_particles=max_particles)
gc.collect()
# -------------------------------------------------
# Estimate time step for changa to use
# -------------------------------------------------
# Save param file
isaac.configsave(param, paramName, 'param')
# Save snapshot
snapshot.write(filename=snapshotName, fmt=pynbody.tipsy.TipsySnap)
# est dDelta
dDelta = ICgen_utils.est_time_step(paramName, preset)
param['dDelta'] = dDelta
# -------------------------------------------------
# Create director file
# -------------------------------------------------
# largest radius to plot
r_director = float(0.9 * r.max())
# Maximum surface density
sigma_min = float(ICobj.sigma(r_director))
# surface density at largest radius
sigma_max = float(ICobj.sigma.input_dict['sigma'].max())
# Create director dict
director = isaac.make_director(sigma_min, sigma_max, r_director, filename=param['achOutName'])
## Save .director file
#isaac.configsave(director, directorName, 'director')
# -------------------------------------------------
# Wrap up
# -------------------------------------------------
print 'Wrapping up'
# Now set the star particle's tform to a negative number. This allows
# UW ChaNGa treat it as a sink particle.
snapshot.star['tform'] = -1.0
# Update params
r_sink = isaac.strip_units(r.min())
param['dSinkBoundOrbitRadius'] = r_sink
param['dSinkRadius'] = r_sink
param['dSinkMassMin'] = 0.9 * isaac.strip_units(m_star)
param['bDoSinks'] = 1
return snapshot, param, director | 2.34375 | 2 |
diag_rank_update.py | IPA-HD/ldaf_classification | 0 | 17552 | <reponame>IPA-HD/ldaf_classification<gh_stars>0
"""
Diagonal Matrix with rank-1 updates.
"""
import itertools
import torch
from torch.functional import Tensor
class DiagRankUpdate(object):
"""Diagonal Matrix with rank-1 updates"""
def __init__(self, diag, rankUpdates):
super(DiagRankUpdate, self).__init__()
self.diag = diag
self.rankUpdates = rankUpdates
assert rankUpdates.ndim == 3
assert rankUpdates.shape[1] == 2
assert rankUpdates.shape[2] == diag.shape[0]
assert rankUpdates.device == diag.device
assert rankUpdates.dtype == diag.dtype
@property
def dtype(self):
return self.diag.dtype
@property
def ndim(self):
return 2
def __repr__(self) -> str:
return "{0}×{0} DiagonalMatrix with {1} Rank-1 Update".format(
self.diag.size()[0],
len(self.rankUpdates)
) + ("s" if len(self.rankUpdates)!=1 else "")
def tensor(self):
return torch.diag(self.diag) + torch.matmul(self.rankUpdates[:,0,:].t(), self.rankUpdates[:,1,:])
def device(self):
return self.diag.device
def dim(self):
return 2
def size(self):
return torch.Size([
self.diag.size()[0],
self.diag.size()[0]
])
def t(self):
return DiagRankUpdate(self.diag.clone(), torch.flip(self.rankUpdates, (1,)))
def add(self, other):
if type(other) != DiagRankUpdate:
return torch.add(self.tensor(), other)
return DiagRankUpdate(
self.diag + other.diag,
torch.cat((self.rankUpdates, other.rankUpdates))
)
def __add__(self, other):
return self.add(other)
def __radd__(self, other):
return other.add(self)
def negative(self):
return DiagRankUpdate(
-self.diag,
self.rankUpdates * torch.tensor([[-1, 1]])
)
def __sub__(self, other):
return self.add(other.negative())
def __rsub__(self, other):
return other.add(self.negative())
def matmul(self, other):
if type(other) != DiagRankUpdate:
return torch.mul(self.tensor(), other)
return DiagRankUpdate(
self.diag * other.diag,
torch.cat((
torch.cat(
(
self.diag[None, None, :] * other.rankUpdates[:, (0,), :],
other.rankUpdates[:, (1,), :]
),
dim = 1
),
torch.cat(
(
self.rankUpdates[:, (0,), :],
other.diag[None, None, :] * self.rankUpdates[:, (1,), :]
),
dim=1
),
torch.stack([
torch.stack((s[1].dot(o[0]) * s[0], o[1])) for s, o in itertools.product(
self.rankUpdates,
other.rankUpdates
)]
)
))
)
def batchDot(self, v):
"""
Batched multiplication self @ v
with batch of matrices v (batch_size, n, k)
"""
assert v.ndim == 3
assert v.shape[1] == self.rankUpdates.shape[2]
n = v.shape[1]
diag_bmm = self.diag.reshape((1, n, 1))*v
inner_prod = torch.matmul(self.rankUpdates[:,1,:].unsqueeze(0), v)
# inner_prod now has shape (batch_size, n_updates, k)
outer_prod = torch.matmul(
self.rankUpdates[:,0,:].t().unsqueeze(0),
inner_prod
)
# outer_prod now has shape (batch_size, n, k)
return diag_bmm + outer_prod
def batchDotTransposed(self, v):
"""
Batched multiplication self.t() @ v
with batch of matrices v (batch_size, n, k)
"""
assert v.ndim == 3
assert v.shape[1] == self.rankUpdates.shape[2]
n = v.shape[1]
diag_bmm = self.diag.reshape((1, n, 1))*v
inner_prod = torch.matmul(self.rankUpdates[:,0,:].unsqueeze(0), v)
# inner_prod now has shape (batch_size, n_updates, k)
outer_prod = torch.matmul(
self.rankUpdates[:,1,:].t().unsqueeze(0),
inner_prod
)
# outer_prod now has shape (batch_size, n, k)
return diag_bmm + outer_prod
def dotRight(self, other):
"""
Multiply self @ other
"""
return self.diag * other + torch.matmul(
torch.matmul( self.rankUpdates[:,1,:] , other ),
self.rankUpdates[:,0,:]
)
def dotLeft(self, other):
"""
Multiply other @ self
"""
return self.diag * other + torch.matmul(
torch.matmul( self.rankUpdates[:,0,:] , other ),
self.rankUpdates[:,1,:]
)
def dotBoth(self, v, w):
"""
Let A be self and v, w ∈ ℝⁿ. Then `dotBoth` computes
vᵀ A w
"""
return (self.diag * v * w).sum() + torch.dot(
torch.matmul(self.rankUpdates[:, 0, :], v),
torch.matmul(self.rankUpdates[:, 1, :], w)
)
def trace(self):
return self.diag.sum() + sum([torch.dot(r[0], r[1]) for r in self.rankUpdates])
def appendUpdate(self, other):
return DiagRankUpdate(
self.diag.clone(),
torch.cat((self.rankUpdates, other[None, :, :]))
)
def inverse(self):
if self.rankUpdates.shape[0] == 0:
return DiagRankUpdate(
1 / self.diag,
torch.empty((0,2,self.size()[0]), device=self.device())
)
else:
inv = DiagRankUpdate(self.diag, self.rankUpdates[0:-1, :, :]).inverse()
v = self.rankUpdates[-1,0,:]
w = self.rankUpdates[-1,1,:]
return inv.appendUpdate(
torch.stack((
inv.dotRight(v).negative(),
inv.dotLeft(w) / (
1 + inv.dotBoth(w, v)
)
))
)
def det(self):
if self.rankUpdates.shape[0] == 0:
return self.diag.prod()
else:
reduced = DiagRankUpdate(
self.diag,
self.rankUpdates[0:-1, :, :]
)
v = self.rankUpdates[-1, 0, :]
w = self.rankUpdates[-1, 1, :]
return (1 + reduced.inverse().dotBoth(w, v)) * reduced.det()
def log_det(self):
if self.rankUpdates.shape[0] == 0:
return self.diag.log().sum()
else:
reduced = DiagRankUpdate(
self.diag,
self.rankUpdates[0:-1, :, :]
)
v = self.rankUpdates[-1, 0, :]
w = self.rankUpdates[-1, 1, :]
return torch.log(1 + reduced.inverse().dotBoth(w, v)) + reduced.log_det()
def kl_divergence(self, other, mu0=None, mu1=None):
inv = other.inverse()
if not mu0 is None:
mu1mu0 = mu1 - mu0
return (
inv.matmul(self).trace()
+
inv.dotBoth(mu1mu0, mu1mu0)
-
self.size()[0]
+
other.log_det() - self.log_det()
) / 2
kl = (
inv.matmul(self).trace()
-
self.size()[0]
+
other.log_det() - self.log_det()
) / 2
if kl < 0:
print("Warning, KL was < 0.", kl)
return kl
def projectionBoth(self):
n = self.size()[0]
ones = -torch.ones(n) / n
a = self.rankUpdates[:,0,:]
b = self.rankUpdates[:,1,:]
a_sum = a.sum(dim=1)
b_sum = b.sum(dim=1)
return self.appendUpdate(
torch.stack((
ones,
self.diag + a_sum @ b
))
).appendUpdate(
torch.stack((
self.diag + b_sum @ a,
ones
))
).appendUpdate(
torch.stack((
(self.diag.sum() + (a_sum * b_sum).sum()) * ones,
ones,
))
) | 2.25 | 2 |
tests/garage/tf/policies/test_gaussian_mlp_policy_with_model.py | XavierJingfeng/starter | 0 | 17553 | import pickle
from unittest import mock
from nose2.tools.params import params
import numpy as np
import tensorflow as tf
from garage.tf.envs import TfEnv
from garage.tf.policies import GaussianMLPPolicyWithModel
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
from tests.fixtures.models import SimpleGaussianMLPModel
class TestGaussianMLPPolicyWithModel(TfGraphTestCase):
@params(
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
)
def test_get_action(self, obs_dim, action_dim):
env = TfEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.policies.'
'gaussian_mlp_policy_with_model.GaussianMLPModel'),
new=SimpleGaussianMLPModel):
policy = GaussianMLPPolicyWithModel(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
action, prob = policy.get_action(obs)
expected_action = np.full(action_dim, 0.75)
expected_mean = np.full(action_dim, 0.5)
expected_log_std = np.full(action_dim, 0.5)
assert env.action_space.contains(action)
assert np.array_equal(action, expected_action)
assert np.array_equal(prob['mean'], expected_mean)
assert np.array_equal(prob['log_std'], expected_log_std)
actions, probs = policy.get_actions([obs, obs, obs])
for action, mean, log_std in zip(actions, probs['mean'],
probs['log_std']):
assert env.action_space.contains(action)
assert np.array_equal(action, expected_action)
assert np.array_equal(prob['mean'], expected_mean)
assert np.array_equal(prob['log_std'], expected_log_std)
@params(
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
)
def test_dist_info_sym(self, obs_dim, action_dim):
env = TfEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.policies.'
'gaussian_mlp_policy_with_model.GaussianMLPModel'),
new=SimpleGaussianMLPModel):
policy = GaussianMLPPolicyWithModel(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
obs_dim = env.spec.observation_space.flat_dim
obs_ph = tf.placeholder(tf.float32, shape=(None, obs_dim))
dist1_sym = policy.dist_info_sym(obs_ph, name='p1_sym')
expected_mean = np.full(action_dim, 0.5)
expected_log_std = np.full(action_dim, 0.5)
prob = self.sess.run(dist1_sym, feed_dict={obs_ph: [obs.flatten()]})
assert np.array_equal(prob['mean'], expected_mean)
assert np.array_equal(prob['log_std'], expected_log_std)
@params(
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
)
def test_is_pickleable(self, obs_dim, action_dim):
env = TfEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.policies.'
'gaussian_mlp_policy_with_model.GaussianMLPModel'),
new=SimpleGaussianMLPModel):
policy = GaussianMLPPolicyWithModel(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
obs_dim = env.spec.observation_space.flat_dim
action1, prob1 = policy.get_action(obs)
p = pickle.dumps(policy)
with tf.Session(graph=tf.Graph()):
policy_pickled = pickle.loads(p)
action2, prob2 = policy_pickled.get_action(obs)
assert env.action_space.contains(action1)
assert np.array_equal(action1, action2)
assert np.array_equal(prob1['mean'], prob2['mean'])
assert np.array_equal(prob1['log_std'], prob2['log_std'])
| 2.21875 | 2 |
cloudify_gcp/monitoring/stackdriver_uptimecheck.py | cloudify-cosmo/cloudify-gcp-plugin | 4 | 17554 | <reponame>cloudify-cosmo/cloudify-gcp-plugin<filename>cloudify_gcp/monitoring/stackdriver_uptimecheck.py<gh_stars>1-10
# #######
# Copyright (c) 2018-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cloudify import ctx
from cloudify.decorators import operation
from cloudify_gcp.gcp import check_response
from .. import utils
from .. import constants
from ..monitoring import MonitoringBase
class StackDriverUpTimeCheckConfig(MonitoringBase):
def __init__(self, config, logger,
project_id=None, uptime_check_config=None, name=None):
super(StackDriverUpTimeCheckConfig, self).__init__(
config,
logger,
project_id,
None)
self.project_id = project_id
self.uptime_check_config = uptime_check_config
self.name = name
@check_response
def create(self):
return self.discovery_uptime_check.create(
parent='projects/{}'.format(self.project_id),
body=self.uptime_check_config).execute()
@check_response
def delete(self):
return self.discovery_uptime_check.delete(name=self.name).execute()
@check_response
def update(self):
return self.discovery_uptime_check.update(
name=self.name,
body=self.uptime_check_config).execute()
@operation(resumable=True)
@utils.throw_cloudify_exceptions
def create(project_id, uptime_check_config, **kwargs):
if utils.resource_created(ctx, constants.NAME):
return
gcp_config = utils.get_gcp_config()
group = StackDriverUpTimeCheckConfig(
gcp_config, ctx.logger,
project_id=project_id, uptime_check_config=uptime_check_config)
resource = utils.create(group)
ctx.instance.runtime_properties[constants.NAME] = resource[constants.NAME]
@operation(resumable=True)
@utils.retry_on_failure('Retrying deleting stackdriver group')
@utils.throw_cloudify_exceptions
def delete(**kwargs):
gcp_config = utils.get_gcp_config()
props = ctx.instance.runtime_properties
if props.get(constants.NAME):
group = StackDriverUpTimeCheckConfig(
gcp_config, ctx.logger, name=props[constants.NAME])
utils.delete_if_not_external(group)
@operation(resumable=True)
@utils.throw_cloudify_exceptions
def update(project_id, uptime_check_config, **kwargs):
gcp_config = utils.get_gcp_config()
uptime_check = StackDriverUpTimeCheckConfig(
gcp_config, ctx.logger, project_id, uptime_check_config,
name=ctx.instance.runtime_properties[constants.NAME])
uptime_check.update()
| 1.929688 | 2 |
tests/test_database.py | penggan666/index_selection_evaluation | 37 | 17555 | <filename>tests/test_database.py
import unittest
from selection.dbms.postgres_dbms import PostgresDatabaseConnector
from selection.index import Index
from selection.table_generator import TableGenerator
from selection.workload import Column, Query, Table
class TestDatabase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.db_name = "tpch_test_db_database"
db = PostgresDatabaseConnector(None, autocommit=True)
TableGenerator("tpch", 0.001, db, explicit_database_name=cls.db_name)
db.close()
@classmethod
def tearDownClass(cls):
connector = PostgresDatabaseConnector(None, autocommit=True)
if connector.database_exists(cls.db_name):
connector.drop_database(cls.db_name)
def test_postgres_index_simulation(self):
db = PostgresDatabaseConnector(self.db_name, "postgres")
self.assertTrue(db.supports_index_simulation())
db.close()
def test_simple_statement(self):
db = PostgresDatabaseConnector(self.db_name, "postgres")
statement = "select count(*) from nation"
result = db.exec_fetch(statement)
self.assertEqual(result[0], 25)
db.close()
def test_runtime_data_logging(self):
db = PostgresDatabaseConnector(self.db_name, "postgres")
query = Query(17, "SELECT count(*) FROM nation;")
db.get_cost(query)
self.assertEqual(db.cost_estimations, 1)
self.assertGreater(db.cost_estimation_duration, 0)
column_n_name = Column("n_name")
nation_table = Table("nation")
nation_table.add_column(column_n_name)
index = Index([column_n_name])
index_oid = db.simulate_index(index)[0]
self.assertGreater(db.index_simulation_duration, 0)
self.assertEqual(db.simulated_indexes, 1)
previou_simulation_duration = db.index_simulation_duration
db.drop_simulated_index(index_oid)
self.assertGreater(db.index_simulation_duration, previou_simulation_duration)
if __name__ == "__main__":
unittest.main()
| 2.796875 | 3 |
aljson/__init__.py | hrzp/aljson | 1 | 17556 | from sqlalchemy.orm.collections import InstrumentedList
class BaseMixin:
caller_stack = list()
def extract_relations(self):
return self.__mapper__.relationships.keys()
def extract_columns(self):
return self.__mapper__.columns.keys()
def get_columns(self):
result = dict()
result['relationships'] = self.extract_relations()
result['columns'] = self.extract_columns()
return result
def convert_columns_to_dict(self, columns):
result = dict()
for item in columns:
result[item] = getattr(self, item)
return result
def convert_instrumented_list(self, items):
result = list()
for item in items:
result.append(item.to_json(self.caller_stack))
return result
def detect_class_name(self, item):
if item.__class__.__name__ == 'InstrumentedList':
return item[0].__class__.__name__.lower()
return item.__class__.__name__.lower()
def convert_relations_to_dict(self, relations):
result = dict()
me = self.__class__.__name__.lower()
self.caller_stack.append(me)
for relation in relations:
obj = getattr(self, relation)
if self.detect_class_name(obj) in self.caller_stack:
continue
if type(obj) == InstrumentedList:
result[relation] = self.convert_instrumented_list(obj)
continue
result[relation] = obj.to_json(self.caller_stack)
return result
def to_json(self, caller_stack=None):
'''
Convert a SqlAlchemy query object to a dict(json)
'''
self.caller_stack = [] if not caller_stack else caller_stack
final_obj = dict()
columns = self.get_columns()
final_obj.update(self.convert_columns_to_dict(columns['columns']))
final_obj.update(self.convert_relations_to_dict(
columns['relationships']))
return final_obj
| 2.53125 | 3 |
src/robotide/context/coreplugins.py | veryl-technologies/t24-tests-ide | 1 | 17557 | # Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_core_plugins():
from robotide.run import RunAnything
from robotide.recentfiles import RecentFilesPlugin
from robotide.ui.preview import PreviewPlugin
from robotide.ui.keywordsearch import KeywordSearch
from robotide.editor import EditorPlugin
from robotide.editor.texteditor import TextEditorPlugin
from robotide.log import LogPlugin
from robotide.searchtests.searchtests import TestSearchPlugin
from robotide.spec.specimporter import SpecImporterPlugin
return [RunAnything, RecentFilesPlugin, PreviewPlugin, SpecImporterPlugin,
EditorPlugin, TextEditorPlugin, KeywordSearch, LogPlugin, TestSearchPlugin]
| 1.539063 | 2 |
backend/apps/api/system/v1/serializers/groups.py | offurface/smsta | 0 | 17558 | from rest_framework import serializers
from ... import models
class DepartmentSerializers(serializers.ModelSerializer):
"""
Сериализатор кафедр
"""
class Meta:
model = models.Department
fields = ["short_name", "full_name"]
class StudentSerializers(serializers.ModelSerializer):
"""
Сериализатор студентов
"""
class Meta:
model = models.Student
fields = ["pk", "name", "surname", "patronymic", "gender"]
class AcademicGroupsDetailSerializers(serializers.ModelSerializer):
"""
Сериализатор Академических Групп
"""
department = DepartmentSerializers()
students = StudentSerializers(many=True, read_only=True)
class Meta:
model = models.AcademicGroup
fields = [
"pk",
"start_date",
"department",
"name",
"students",
"course",
]
| 2.515625 | 3 |
cinebot_mini/web_utils/blender_client.py | cheng-chi/cinebot_mini | 0 | 17559 | from cinebot_mini import SERVERS
import requests
import numpy as np
import json
def base_url():
blender_dict = SERVERS["blender"]
url = "http://{}:{}".format(
blender_dict["host"], blender_dict["port"])
return url
def handshake():
url = base_url() + "/api/ping"
for i in range(5):
try:
r = requests.get(url, timeout=1.0)
r_data = r.json()
assert(r_data["url"] == "/api/ping")
return True
except Exception as e:
continue
return False
def create_object(name, type="CAMERA"):
url = base_url() + "/api/create"
data = {
"type": type,
"name": name
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
obj_dict = r_data['result']
if "name" in obj_dict:
return obj_dict["name"]
else:
print("Creating {} failed!", obj_name)
def create_objects(type="CAMERA", num=4, base_name="screen_camera_"):
url = base_url() + "/api/create"
obj_names = []
for i in range(num):
obj_name = base_name + str(i)
data = {
"type": type,
"name": obj_name
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
obj_dict = r_data['result']
if "name" in obj_dict:
obj_names.append(obj_dict["name"])
else:
print("Creating {} failed!", obj_name)
return obj_names
def set_transform_euler(obj_name, loc, rot, degree=True):
url = base_url() + "/api/object/" + obj_name + "/property"
rot_data = list(rot)
if degree:
rot_data = (np.array(rot) / 180.0 * np.pi).tolist()
data = {
"properties": {
"location": list(loc),
"rotation_euler": list(rot_data)
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_transform_matrix(obj_name, matrix):
url = base_url() + "/api/object/" + obj_name + "/property"
data = {
"properties": {
"matrix_world": matrix.tolist()
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_transform_matrix(obj_name, matrix):
url = base_url() + "/api/object/" + obj_name + "/property"
data = {
"properties": {
"matrix_world": matrix.tolist()
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_property(obj_name, key, val, prop_type="properties"):
url = base_url() + "/api/object/" + obj_name + "/property"
data = {
prop_type: {
key: val
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def get_property(obj_name):
url = base_url() + "/api/object/" + obj_name + "/property"
r = requests.get(url)
r_data = r.json()
return r_data["result"]
def test_object_exist(obj_name):
url = base_url() + "/api/object/" + obj_name + "/property"
data = dict()
r = requests.get(url, data=json.dumps(data))
return r.status_code != 404
def set_animation_euler(obj_name, locs, rots, degree=True):
url = base_url() + "/api/object/" + obj_name + "/animation"
rot_data = rots
if degree:
rot_data = rots / 180.0 * np.pi
transforms = []
for t in range(len(locs)):
tf_data = dict()
tf_data["frame_number"] = t
tf_data["location"] = locs[t].tolist()
tf_data["rotation_euler"] = rot_data[t].tolist()
transforms.append(tf_data)
data = {
"transforms": transforms
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_animation_matrix(obj_name, matrices):
url = base_url() + "/api/object/" + obj_name + "/animation"
transforms = []
for t in range(len(matrices)):
tf_data = dict()
tf_data["frame_number"] = t
tf_data["matrix_world"] = matrices[t].tolist()
transforms.append(tf_data)
data = {
"transforms": transforms
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def get_animation_dict(obj_name):
url = base_url() + "/api/object/" + obj_name + "/animation"
r = requests.get(url)
r_data = r.json()
animation = r_data["result"]
result = dict()
for frame in animation:
t = frame["frame_number"]
arr = np.array(frame["matrix_world"])
result[t] = arr
return result
def get_animation(obj_name):
url = base_url() + "/api/object/" + obj_name + "/animation"
r = requests.get(url)
r_data = r.json()
animation = r_data["result"]
result = []
for frame in animation:
arr = np.array(frame["matrix_world"])
result.append(arr)
return result
def delete_animation(obj_name):
url = base_url() + "/api/object/" + obj_name + "/animation"
r = requests.delete(url)
r_data = r.json()
return r_data["result"]
def delete_object(obj_name):
url = base_url() + "/api/object/" + obj_name
r = requests.delete(url)
r_data = r.json()
return r_data["result"]
def render_animation(file_name, frame_start, frame_end):
url = base_url() + "/api/render/animation"
data = {
"output_file_path": file_name,
"frame_start": frame_start,
"frame_end": frame_end
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_render_resolution(pixel_dim):
url = base_url() + "/api/render/property"
x, y = pixel_dim
data = {
"properties": {
"resolution_x": x,
"resolution_y": y
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"] == "SUCCESS"
def set_camera_properties(cam_name, focal_length_m, sensor_dims_m):
url = base_url() + "/api/object/" + cam_name + "/property"
lens = focal_length_m * 1000
w, h = np.array(sensor_dims_m) * 1000
data = {
"data_properties": {
"lens": lens,
"sensor_width": w,
"sensor_height": h
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"] == "SUCCESS"
def set_active_camera(cam_name):
url = base_url() + "/api/render/active_camera"
data = {
"name": cam_name
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"] == "SUCCESS"
| 2.96875 | 3 |
gem5-configs/configs-microbench-tests/run_controlbenchmarks.py | TCHERNET/parsec-tests2 | 5 | 17560 | # -*- coding: utf-8 -*-
# Copyright (c) 2018 The Regents of the University of California
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: <NAME>
from __future__ import print_function
import argparse
import m5
from m5.objects import TimingSimpleCPU, DerivO3CPU
from m5.objects import SimpleIndirectPredictor, LocalBP, BiModeBP, TournamentBP, LTAGE, SimpleMemory
from m5.objects import Root
from m5.objects import *
from system import BaseTestSystem
from system import InfMemory, SingleCycleMemory, SlowMemory
# Branch predictor params
# If indirect Predictor is disabled use BTB with these params
btbEntries = 512
btbTagSize = 19
class IndirectPred(SimpleIndirectPredictor):
indirectSets = 256 # Cache sets for indirect predictor
indirectWays = 2 # Ways for indirect predictor
indirectTagSize = 16 # Indirect target cache tag bits
indirectPathLength = 3 # Previous indirect targets to use for path history
indirectGHRBits = 13 # Indirect GHR number of bits
ipred = SimpleIndirectPredictor()
#CPU Configs
class Simple_LocalBP(TimingSimpleCPU):
branchPred = LocalBP()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
branchPred.localPredictorSize = 2048
branchPred.localCtrBits = 2
class DefaultO3_LocalBP(DerivO3CPU):
branchPred = LocalBP()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
branchPred.localPredictorSize = 2048
branchPred.localCtrBits = 2
class Simple_BiModeBP(TimingSimpleCPU):
branchPred = BiModeBP()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
branchPred.globalPredictorSize = 8192
branchPred.globalCtrBits = 2
branchPred.choicePredictorSize = 8192
branchPred.choiceCtrBits = 2
class DefaultO3_BiModeBP(DerivO3CPU):
branchPred = BiModeBP()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
branchPred.globalPredictorSize = 8192
branchPred.globalCtrBits = 2
branchPred.choicePredictorSize = 8192
branchPred.choiceCtrBits = 2
class Simple_TournamentBP(TimingSimpleCPU):
branchPred = TournamentBP()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
branchPred.localPredictorSize = 2048
branchPred.localCtrBits = 2
branchPred.localHistoryTableSize = 2048
branchPred.globalPredictorSize = 8192
branchPred.globalCtrBits = 2
branchPred.choicePredictorSize = 8192
branchPred.choiceCtrBits = 2
class DefaultO3_TournamentBP(DerivO3CPU):
branchPred = TournamentBP()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
branchPred.localPredictorSize = 2048
branchPred.localCtrBits = 2
branchPred.localHistoryTableSize = 2048
branchPred.globalPredictorSize = 8192
branchPred.globalCtrBits = 2
branchPred.choicePredictorSize = 8192
branchPred.choiceCtrBits = 2
class Simple_LTAGEBP(TimingSimpleCPU):
branchPred = LTAGE()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
class DefaultO3_LTAGEBP(DerivO3CPU):
branchPred = LTAGE()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
# Add more CPUs Configs under test before this
valid_configs = [Simple_LocalBP, Simple_BiModeBP, Simple_TournamentBP, Simple_LTAGEBP, DefaultO3_LocalBP, DefaultO3_BiModeBP, DefaultO3_TournamentBP, DefaultO3_LTAGEBP]
valid_configs = {cls.__name__[:-2]:cls for cls in valid_configs}
# Add more Memories under test before this
valid_memories = [InfMemory, SingleCycleMemory, SlowMemory]
valid_memories = {cls.__name__[:-6]:cls for cls in valid_memories}
parser = argparse.ArgumentParser()
parser.add_argument('config', choices = valid_configs.keys())
parser.add_argument('memory_model', choices = valid_memories.keys())
parser.add_argument('binary', type = str, help = "Path to binary to run")
args = parser.parse_args()
class MySystem(BaseTestSystem):
_CPUModel = valid_configs[args.config]
_MemoryModel = valid_memories[args.memory_model]
system = MySystem()
system.setTestBinary(args.binary)
root = Root(full_system = False, system = system)
m5.instantiate()
exit_event = m5.simulate()
if exit_event.getCause() != 'exiting with last active thread context':
print("Benchmark failed with bad exit cause.")
print(exit_event.getCause())
exit(1)
if exit_event.getCode() != 0:
print("Benchmark failed with bad exit code.")
print("Exit code {}".format(exit_event.getCode()))
exit(1)
print("{} ms".format(m5.curTick()/1e9))
| 1.195313 | 1 |
poezio/args.py | hrnciar/poezio | 0 | 17561 | <filename>poezio/args.py
"""
Module related to the argument parsing
There is a fallback to the deprecated optparse if argparse is not found
"""
from pathlib import Path
from argparse import ArgumentParser, SUPPRESS
from poezio.version import __version__
def parse_args(CONFIG_PATH: Path):
"""
Parse the arguments from the command line
"""
parser = ArgumentParser('poezio')
parser.add_argument(
"-c",
"--check-config",
dest="check_config",
action='store_true',
help='Check the config file')
parser.add_argument(
"-d",
"--debug",
dest="debug",
help="The file where debug will be written",
metavar="DEBUG_FILE")
parser.add_argument(
"-f",
"--file",
dest="filename",
default=CONFIG_PATH / 'poezio.cfg',
type=Path,
help="The config file you want to use",
metavar="CONFIG_FILE")
parser.add_argument(
'-v',
'--version',
action='version',
version='Poezio v%s' % __version__,
)
parser.add_argument(
"--custom-version",
dest="custom_version",
help=SUPPRESS,
metavar="VERSION",
default=__version__
)
options = parser.parse_args()
return options
| 3.21875 | 3 |
src/mist/api/poller/schedulers.py | vladimir-ilyashenko/mist.api | 0 | 17562 | from celerybeatmongo.schedulers import MongoScheduler
from mist.api.sharding.mixins import ShardManagerMixin
from mist.api.poller.models import PollingSchedule
from mist.api.poller.models import OwnerPollingSchedule
from mist.api.poller.models import CloudPollingSchedule
from mist.api.poller.models import MachinePollingSchedule
import datetime
class PollingScheduler(MongoScheduler):
Model = PollingSchedule
UPDATE_INTERVAL = datetime.timedelta(seconds=20)
class OwnerPollingScheduler(MongoScheduler):
Model = OwnerPollingSchedule
UPDATE_INTERVAL = datetime.timedelta(seconds=20)
class CloudPollingScheduler(MongoScheduler):
Model = CloudPollingSchedule
UPDATE_INTERVAL = datetime.timedelta(seconds=20)
class MachinePollingScheduler(MongoScheduler):
Model = MachinePollingSchedule
UPDATE_INTERVAL = datetime.timedelta(seconds=20)
class ShardedOwnerScheduler(ShardManagerMixin, OwnerPollingScheduler):
pass
class ShardedCloudScheduler(ShardManagerMixin, CloudPollingScheduler):
pass
class ShardedMachineScheduler(ShardManagerMixin, MachinePollingScheduler):
pass
| 2.1875 | 2 |
datasets/hdd_classif.py | valeoai/BEEF | 4 | 17563 | from collections import Counter
import json
from pathlib import Path
from PIL import Image
import numpy as np
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from bootstrap.lib.logger import Logger
from bootstrap.datasets import transforms as bootstrap_tf
try:
from .hdd import HDD
except:
from hdd import HDD
class HDDClassif(HDD):
def __init__(self,
dir_data,
split,
win_size,
im_size,
layer, # "goal" or "cause"
frame_position,
traintest_mode,
fps=10,
horizon=2, # in seconds
extract_mode=False,
batch_size=2,
debug=False,
shuffle=False,
pin_memory=False,
nb_threads=0):
self.win_size = win_size
self.frame_position = frame_position
super(HDDClassif, self).__init__(dir_data,
split,
im_size,
fps,
horizon, # in seconds
batch_size,
debug,
shuffle,
pin_memory,
nb_threads)
self.layer = layer
if self.layer == "cause":
self.layer_id = '1'
self.classid_to_ix = [-1, 16, 17, 18, 19, 20, 22]
elif self.layer == "goal":
self.layer_id = '0'
self.classid_to_ix = [-1, 0, 1, 2, 3, 4, 5, 7, 8, 10, 11, 12]
else:
raise ValueError(self.layer)
# The classid 0 is the background class
self.ix_to_classid = dict((ix, classid) for classid, ix in enumerate(self.classid_to_ix))
self.class_freq = self.get_class_freq()
self.collate_fn = bootstrap_tf.Compose([
bootstrap_tf.ListDictsToDictLists(),
bootstrap_tf.StackTensors()
])
self.dir_navig_features = self.dir_processed_annot
self.im_transform = transforms.Compose([transforms.Resize((self.im_h, self.im_w)),
transforms.ToTensor(),
transforms.Normalize(mean = [0.43216, 0.394666, 0.37645],
std = [0.22803, 0.22145, 0.216989])])
self.traintest_mode = traintest_mode
if self.traintest_mode:
self.make_batch_loader = self._make_batch_loader_traintest
else:
self.make_batch_loader = self._make_batch_loader
def classid_to_classname(self, classid):
ix = self.classid_to_ix[classid]
if ix == -1:
return '__background__'
else:
return self.ix_to_event[ix]
def _make_batch_loader(self, batch_size=None, shuffle=None, num_samples=200000):
nb_threads = self.nb_threads
batch_size = self.batch_size if batch_size is None else batch_size
shuffle = self.shuffle if shuffle is None else shuffle
if shuffle:
sampler = data.RandomSampler(self, replacement=True, num_samples=min(num_samples, len(self)))
shuffle = None
else:
sampler = None
batch_loader = data.DataLoader(
dataset=self,
batch_size=batch_size,
shuffle=shuffle,
pin_memory=self.pin_memory,
num_workers=nb_threads,
collate_fn=self.collate_fn,
sampler=sampler)
return batch_loader
def _make_batch_loader_traintest(self, batch_size=None, shuffle=None):
nb_threads = self.nb_threads
batch_size = self.batch_size if batch_size is None else batch_size
num_samples = batch_size*70000
shuffle = self.shuffle if shuffle is None else shuffle
if shuffle:
sampler = data.RandomSampler(self, replacement=True, num_samples=num_samples)
shuffle = None
else:
sampler = None
batch_loader = data.DataLoader(
dataset=self,
batch_size=batch_size,
shuffle=shuffle,
pin_memory=self.pin_memory,
num_workers=nb_threads,
collate_fn=self.collate_fn,
sampler=sampler)
return batch_loader
def build_index(self):
Logger()('Building index for %s split...' % self.split)
split_file = self.dir_data.joinpath(self.split+'.txt')
index = []
session_template = "{0}-{1}-{2}-{3}-{4}"
self.vid_to_index = []
self.vidname_to_vidid = {}
for idx, session_id in enumerate(open(split_file)):
name = session_template.format(session_id[:4],
session_id[4:6],
session_id[6:8],
session_id[8:10],
session_id[10:12])
annot_paths = list(filter(lambda x:name in x.as_posix(),
self.dir_processed_annot.iterdir()))
if len(annot_paths) == 0:
continue
assert len(annot_paths) == 1
annot_path = annot_paths[0]
if annot_path.exists():
frame_annots = sorted(annot_path.iterdir())
frame_annots = [None]*self.frame_position + frame_annots + [None]*(self.win_size-self.frame_position-1) # Zero-padding of the full video, such that each frame can get a context
L = [frame_annots[i:i+self.win_size] for i in range(0, len(frame_annots)-self.win_size+1)]
self.vid_to_index.append((len(index), len(index)+len(L)))
self.vidname_to_vidid[annot_path.name] = len(index)
index += L
# if self.debug:
# index += frame_annots[5000:7000]
# break
# else:
# index += frame_annots
if self.debug and idx==1:
break
Logger()('Done')
return index
def get_class_freq(self):
class_freq_path = self.dir_processed_annot.joinpath('%s_class_freq.json' % self.layer)
if class_freq_path.exists():
Logger()('Loading class frequency')
class_freq = json.load(open(class_freq_path))
Logger()('Loaded class frequency')
else:
Logger()('Computing class frequency')
if self.split != "train":
raise NotImplementedError('Extract class weigths on train set first')
class_freq = self.compute_class_freq()
with open(class_freq_path, 'w') as F:
F.write(json.dumps(class_freq))
return class_freq
def compute_class_freq(self):
class_freq = Counter()
S = 0
for paths in self.index:
annot_path = paths[-1]
if annot_path is None:
continue
annot = json.load(open(annot_path))
event = annot['labels'][self.layer_id]
classid = self.ix_to_classid.get(event, 0)
class_freq[classid] += 1
S += 1
for classid in class_freq:
class_freq[classid] = class_freq[classid] / S
return class_freq
def get_navig(self, annot):
item = {}
if len(annot['prev_xy']) == self.length:
prev_xy = torch.Tensor(annot['prev_xy'])
r_prev_xy = torch.Tensor(annot['r_prev_xy'])
else:
# should be padded before
n = len(annot['prev_xy'])
prev_xy = torch.Tensor(self.length,2).zero_()
r_prev_xy = torch.Tensor(self.length,2).zero_()
if n>0:
prev_xy[self.length - n:] = torch.Tensor(annot['prev_xy'])
r_prev_xy[self.length - n:] = torch.Tensor(annot['r_prev_xy'])
item['prev_xy'] = prev_xy
item['r_prev_xy'] = r_prev_xy
if len(annot['next_xy']) == self.length:
next_xy = torch.Tensor(annot['next_xy'])
r_next_xy = torch.Tensor(annot['r_next_xy'])
else:
# should be padded after
n = len(annot['next_xy'])
next_xy = torch.Tensor(self.length,2).zero_()
r_next_xy = torch.Tensor(self.length,2).zero_()
if n>0:
next_xy[:n] = torch.Tensor(annot['next_xy'])
r_next_xy[:n] = torch.Tensor(annot['r_next_xy'])
item['next_xy'] = next_xy
item['r_next_xy'] = r_next_xy
item['blinkers'] = torch.LongTensor([self.blinkers_to_ix[annot['blinkers']]])
return item
def get_navig_path(self, annot_path):
# Sometimes, due to sampling considerations, the navig annotation doesn't exist.
# We simply take the navig annotation for the closest existing sample
annot_navig_path = self.dir_navig_features.joinpath(annot_path.parent.name,
annot_path.name)
if not annot_navig_path.exists():
annot_num = int(annot_path.stem)
annot_navig_path = self.dir_navig_features.joinpath(annot_path.parent.name,
f"{annot_num-1:06d}.json")
if not annot_navig_path.exists():
annot_navig_path = self.dir_navig_features.joinpath(annot_path.parent.name,
f"{annot_num+1:06d}.json")
if not annot_navig_path.exists():
annot_navig_path = self.dir_navig_features.joinpath(annot_path.parent.name,
f"{annot_num-2:06d}.json")
return annot_navig_path
def __getitem__(self, idx):
paths = self.index[idx]
y_true = torch.LongTensor(self.win_size).zero_() -1
frames = None
navig = None
item = {}
for frame_id, annot_path in enumerate(paths):
if annot_path is None:
continue
frame_number = int(annot_path.stem) + 1
frames_folder = self.dir_processed_img.joinpath(annot_path.parent.name)
frame_path = frames_folder.joinpath(f"{frame_number:06d}.jpg")
im = Image.open(frame_path)
im = self.im_transform(im)
if frames is None:
frames = torch.Tensor(self.win_size, 3, self.im_h, self.im_w).zero_()
frames[frame_id] = im
annot = json.load(open(annot_path))
event = annot['labels'][self.layer_id]
y_true[frame_id] = self.ix_to_classid.get(event, 0)
if navig is None:
navig = {'prev_xy':torch.Tensor(self.win_size, self.length, 2).zero_() - 1,
'next_xy':torch.Tensor(self.win_size, self.length, 2).zero_() - 1,
'r_prev_xy':torch.Tensor(self.win_size, self.length, 2).zero_() - 1,
'r_next_xy':torch.Tensor(self.win_size, self.length, 2).zero_() - 1,
'xy_polynom':torch.Tensor(self.win_size, 5, 2).zero_() - 1,
'blinkers':torch.LongTensor(self.win_size).zero_() - 1}
annot_navig_path = self.get_navig_path(annot_path)
annot_navig = json.load(open(annot_navig_path))
_navig = self.get_navig(annot_navig)
for k in _navig:
navig[k][frame_id] = _navig[k]
item.update(navig)
item['frames'] = frames
item['idx'] = idx
item['paths'] = paths
item['frame_path'] = paths[self.frame_position]
item['y_true_all'] = y_true
item['y_true'] = y_true[self.frame_position]
for k in navig:
item[k+'_all'] = item[k]
item[k] = item[k+'_all'][self.frame_position]
item['frame_position'] = torch.LongTensor([self.frame_position])
return item
if __name__ == "__main__":
split = "val"
fps = 3
dir_data = Path("/datasets_local/HDD")
nb_threads = 0
horizon = 2
win_size = 21
layer = "goal"
batch_size = 12
use_navig = False
im_size = "small"
dataset = HDDClassif(dir_data,
split,
win_size,
im_size,
layer, # "goal" or "cause"
use_navig=use_navig,
fps=fps,
horizon=horizon, # in seconds
batch_size=batch_size,
debug=False,
shuffle=False,
pin_memory=False,
nb_threads=0)
vidname_to_index = {}
for idx, sequence in enumerate(dataset.index):
vid_name = sequence[0].parent.name
if vid_name not in vidname_to_index:
vidname_to_index[vid_name] = []
vidname_to_index[vid_name].append(idx)
batch_sampler = SequentialBatchSampler(vidname_to_index, batch_size)
N = 0
for batch in batch_sampler:
print(batch)
N += 1
# item = dataset[5]
# loader = dataset.make_batch_loader(batch_size,
# shuffle=False)
# for idx, batch in enumerate(loader):
# break | 2.078125 | 2 |
1W/6/3.py | allenalvin333/Hackerrank_Prep | 2 | 17564 | # https://www.hackerrank.com/challenges/one-week-preparation-kit-jesse-and-cookies/problem
#!/bin/python3
import math
import os
import random
import re
import sys
import heapq
#
# Complete the 'cookies' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER k
# 2. INTEGER_ARRAY A
#
def cookies(k, A, z=0):
heapq.heapify(A)
while True:
a = heapq.heappop(A)
if(a>=k): return z
if(len(A)==0): return -1
b = heapq.heappop(A)
heapq.heappush(A,(a+2*b))
z+=1
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
k = int(first_multiple_input[1])
A = list(map(int, input().rstrip().split()))
result = cookies(k, A)
fptr.write(str(result) + '\n')
fptr.close() | 3.59375 | 4 |
toughio/capillarity/_base.py | keurfonluu/toughio | 21 | 17565 | <filename>toughio/capillarity/_base.py
from abc import ABCMeta, abstractmethod, abstractproperty
import numpy
__all__ = [
"BaseCapillarity",
]
# See <https://stackoverflow.com/questions/35673474/using-abc-abcmeta-in-a-way-it-is-compatible-both-with-python-2-7-and-python-3-5>
ABC = ABCMeta("ABC", (object,), {"__slots__": ()})
class BaseCapillarity(ABC):
_id = None
_name = ""
def __init__(self, *args):
"""
Base class for capillarity models.
Do not use.
"""
pass
def __repr__(self):
"""Display capillarity model informations."""
out = ["{} capillarity model (ICP = {}):".format(self._name, self._id)]
out += [
" CP({}) = {}".format(i + 1, parameter)
for i, parameter in enumerate(self.parameters)
]
return "\n".join(out)
def __call__(self, sl):
"""Calculate capillary pressure given liquid saturation."""
if numpy.ndim(sl) == 0:
if not (0.0 <= sl <= 1.0):
raise ValueError()
return self._eval(sl, *self.parameters)
else:
sl = numpy.asarray(sl)
if not numpy.logical_and((sl >= 0.0).all(), (sl <= 1.0).all()):
raise ValueError()
return numpy.array([self._eval(sat, *self.parameters) for sat in sl])
@abstractmethod
def _eval(self, sl, *args):
raise NotImplementedError()
def plot(self, n=100, ax=None, figsize=(10, 8), plt_kws=None):
"""
Plot capillary pressure curve.
Parameters
----------
n : int, optional, default 100
Number of saturation points.
ax : matplotlib.pyplot.Axes or None, optional, default None
Matplotlib axes. If `None`, a new figure and axe is created.
figsize : array_like or None, optional, default None
New figure size if `ax` is `None`.
plt_kws : dict or None, optional, default None
Additional keywords passed to :func:`matplotlib.pyplot.semilogy`.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError(
"Plotting capillary pressure curve requires matplotlib to be installed."
)
if not (isinstance(n, int) and n > 1):
raise ValueError()
if not (ax is None or isinstance(ax, plt.Axes)):
raise TypeError()
if not (figsize is None or isinstance(figsize, (tuple, list, numpy.ndarray))):
raise TypeError()
if len(figsize) != 2:
raise ValueError()
if not (plt_kws is None or isinstance(plt_kws, dict)):
raise TypeError()
# Plot parameters
plt_kws = plt_kws if plt_kws is not None else {}
_kwargs = {"linestyle": "-", "linewidth": 2}
_kwargs.update(plt_kws)
# Initialize figure
if ax:
ax1 = ax
else:
figsize = figsize if figsize else (8, 5)
fig = plt.figure(figsize=figsize, facecolor="white")
ax1 = fig.add_subplot(1, 1, 1)
# Calculate capillary pressure
sl = numpy.linspace(0.0, 1.0, n)
pcap = self(sl)
# Plot
ax1.semilogy(sl, numpy.abs(pcap), **_kwargs)
ax1.set_xlim(0.0, 1.0)
ax1.set_xlabel("Saturation (liquid)")
ax1.set_ylabel("Capillary pressure (Pa)")
ax1.grid(True, linestyle=":")
plt.draw()
plt.show()
return ax1
@property
def id(self):
"""Return capillarity model ID in TOUGH."""
return self._id
@property
def name(self):
"""Return capillarity model name."""
return self._name
@abstractproperty
def parameters(self):
raise NotImplementedError()
@parameters.setter
def parameters(self, value):
raise NotImplementedError()
| 2.90625 | 3 |
gui.py | NejcHirci/material-addon | 4 | 17566 | import bpy
import glob
from bpy.types import Panel, Operator
from bpy.app.handlers import persistent
import os
import threading
from queue import Queue
from pathlib import Path
from . mix_ops import *
from . matgan_ops import *
from . neural_ops import *
cache_path = os.path.join(Path(__file__).parent.resolve(), '.cache')
# Redraw all function
def redraw_all(context):
for area in context.screen.areas:
if area.type in ['NODE_EDITOR']:
area.tag_redraw()
# Thread function for reading output
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line.decode('utf-8').strip())
out.close()
@persistent
def on_addon_save(dummy):
for mat in bpy.data.materials:
if "matgan" in mat.name:
match = re.match(".+?(?=_matgan_mat)", mat.name)
obj_name = match[0] if match else ""
if obj_name in bpy.data.objects:
obj = bpy.data.objects[obj_name]
dir = os.path.join(obj["MaterialGAN_Path"], 'out')
update_matgan(obj, dir)
elif "neural" in mat.name:
match = re.match(".+?(?=_neural_mat)", mat.name)
obj_name = match[0] if match else ""
if obj_name in bpy.data.objects:
obj = bpy.data.objects[obj_name]
dir = os.path.join(obj["Neural_Path"], 'out')
update_neural(obj, dir)
elif "mix" in mat.name:
match = re.match(".+?(?=_mix_mat)", mat.name)
obj_name = match[0] if match else ""
if obj_name in bpy.data.objects:
obj = bpy.data.objects[obj_name]
dir = os.path.join(obj["Algorithmic_Path"], 'out')
update_mix(obj, dir)
@persistent
def on_addon_load(dummy):
MAT_OT_MATGAN_GetInterpolations._popen = None
MAT_OT_MATGAN_Generator._popen = None
MAT_OT_MATGAN_InputFromFlashImage._popen = None
MAT_OT_MATGAN_SuperResolution._popen = None
blender_path = os.path.join(Path(__file__).parent.resolve(), 'final.blend')
with bpy.data.libraries.load(blender_path, link=False) as (data_from, data_to):
data_to.materials = [mat for mat in data_from.materials]
group_list = ['photo_to_pbr', 'Aluminium', 'Wood', 'Plastic', 'Plaster', 'Leather', 'Silk', 'Concrete', 'Marble']
data_to.node_groups = [n for n in data_from.node_groups if n in group_list]
if not os.path.exists(cache_path):
os.makedirs(cache_path)
else:
for root, dirs, files in os.walk(cache_path):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
# Load mix images
names = ['Aluminium', 'Wood', 'Plastic', 'Plaster', 'Leather', 'Silk', 'Concrete', 'Marble']
for i in names:
img = bpy.data.images.load(os.path.join(Path(__file__).parent.resolve(), f'algorithmic/{i}.png'))
img.name = i
img.preview_ensure()
def update_active_mat(self, context):
active_obj = bpy.context.active_object
if active_obj:
if context.scene.SelectWorkflow == 'MatGAN':
base_name = "matgan_mat"
elif context.scene.SelectWorkflow == 'NeuralMAT':
base_name = "neural_mat"
elif context.scene.SelectWorkflow == 'MixMAT':
base_name = "mix_mat"
name = f"{active_obj.name}_{base_name}"
if name not in bpy.data.materials:
mat = bpy.data.materials[base_name].copy()
mat.name = name
else:
mat = bpy.data.materials[name]
active_obj.active_material = mat
if context.scene.SelectWorkflow == 'MatGAN' and 'MaterialGAN_Path' in active_obj:
bpy.context.scene.matgan_properties.directory = active_obj['MaterialGAN_Path']
elif context.scene.SelectWorkflow == 'NeuralMAT' and 'Neural_Path' in active_obj:
bpy.context.scene.neural_properties.directory = active_obj['Neural_Path']
elif context.scene.SelectWorkflow == 'MixMAT' and 'Algorithmic_Path' in active_obj:
bpy.context.scene.mixmat_properties.directory = active_obj['Algorithmic_Path']
# Copy files to .cache folder
def copy_to_cache(src_path, name):
dst_path = os.path.join(cache_path, name)
if not os.path.exists(dst_path):
os.makedirs(dst_path)
if os.path.isdir(src_path):
for file in os.listdir(os.fsencode(src_path)):
f = os.fsdecode(file)
if f.endswith(".png") or f.endswith(".pt") or f.endswith('.ckpt'):
shutil.copyfile(os.path.join(src_path, f), os.path.join(dst_path, f))
def register():
if on_addon_load not in bpy.app.handlers.load_post:
bpy.app.handlers.load_post.append(on_addon_load)
if on_addon_save not in bpy.app.handlers.save_pre:
bpy.app.handlers.save_pre.append(on_addon_save)
bpy.types.Scene.SelectWorkflow = bpy.props.EnumProperty(
name='Material System Select',
description='Selected Material System for editing and generation.',
items={
('MatGAN', 'MaterialGAN + LIIF', 'Using MaterialGAN for generation and LIIF model for upscaling. ' \
+ 'Editing implemented as vector space exploration.'),
('NeuralMAT', 'Neural Material', 'Using Neural Material model for generatiog. ' \
+ 'Editing implemented as material interpolations.'),
('MixMAT', 'Algorithmic generation', 'Using a Blender shader nodes approach for ' \
+ 'generating textures from albedo with mix blender shader nodes for editing.')
},
default='MatGAN',
update=update_active_mat
)
def unregister():
if on_addon_load in bpy.app.handlers.load_post:
bpy.app.handlers.load_post.remove(on_addon_load)
if on_addon_save in bpy.app.handlers.save_pre:
bpy.app.handlers.save_pre.remove(on_addon_save)
class MAT_PT_GeneratorPanel(Panel):
bl_space_type = "NODE_EDITOR"
bl_region_type = "UI"
bl_label = "Modifier operations"
bl_category = "MaterialGenerator Util"
thumb_scale = 8.0
check_existing = False
mix_preview = None
def draw_matgan(self, context):
layout = self.layout
matgan = bpy.context.scene.matgan_properties
# ================================================
# Draw MaterialGAN props and operators
# ================================================
row = layout.row()
row.prop(matgan, "progress", emboss=False, text="Status")
row = layout.row()
col = row.column()
col.prop(matgan, "num_rend", text="Num of images")
col = row.column()
col.prop(matgan, "epochs", text="Epochs")
row = layout.row()
row.prop(matgan, "directory", text="Directory")
row.operator("matgan.file_browser", icon="FILE_FOLDER", text="")
row = layout.row()
col = row.column()
col.operator("matgan.input_from_images", text="Format flash images")
row = layout.row()
col = row.column()
col.operator("matgan.mat_from_images", text="Generate Material")
col = row.column()
col.operator("matgan.stop_generator", text="", icon="PAUSE")
layout.separator()
# ================================================
# Draw Upscale LIIF
# ================================================
row = layout.row()
col = row.column()
col.prop(matgan, "h_res", text="Height resolution")
col = row.column()
col.prop(matgan, "w_res", text="Width resolution")
row = layout.row()
row.operator("matgan.super_res", text="Upscale material")
layout.separator()
row = layout.row()
row.operator("matgan.get_interpolations", text="Get interpolations")
layout.separator()
# ================================================
# Draw Gallery view
# ================================================
if MAT_OT_MATGAN_GetInterpolations._popen is None and MAT_OT_MATGAN_Generator._popen is None:
row = layout.row()
row.operator("matgan.revert_material", text="Revert material to previous")
self.draw_gallery(context, matgan, "matgan")
def draw_gallery(self, context, gan, mode):
x = MAT_OT_GalleryDirection.direction
interp_dir = os.path.join(gan.directory, 'interps')
out_dir = os.path.join(gan.directory, 'out')
rname = f"{bpy.context.active_object.name}_{mode}" if bpy.context.active_object else mode
if f'7_{x}_render.png' in bpy.data.images and f"{rname}_render.png" in bpy.data.images:
layout = self.layout
row = layout.row()
sign = '+' if MAT_OT_GalleryDirection.direction == 1 else '-'
row.operator("wm.edit_direction_toggle", text="Toggle direction")
box = layout.box()
cols = box.column_flow(columns=3)
# Get images
dir_list = sorted(glob.glob(interp_dir + f'/*_{x}_render.png'))
id = 0
for dir in dir_list:
if id == 4:
in_box = cols.box()
col = in_box.column()
img = bpy.data.images[f'{rname}_render.png']
img.preview_ensure()
col.template_icon(icon_value=img.preview.icon_id, scale=10)
col.label(text="Current material")
name = os.path.split(dir)[1]
img = bpy.data.images[name]
img.preview_ensure()
in_box = cols.box()
col = in_box.column()
col.template_icon(icon_value=img.preview.icon_id, scale=10)
operator = col.operator(f'{mode}.edit_move', text=f"Semantic {sign}{name[0]}")
operator.direction = name[0]
id += 1
def draw_neural(self, context):
layout = self.layout
neural = bpy.context.scene.neural_properties
# ================================================
# Draw NeuralMaterial props and operators
# ================================================
row = layout.row()
row.prop(neural, "progress", emboss=False, text="Status")
row = layout.row()
col = row.column()
col.prop(neural, "num_rend", text="Images")
col = row.column()
col.prop(neural, "epochs", text="Epochs")
col = row.column()
col.prop(neural, "seed", text="Seed")
row = layout.row()
col = row.column()
col.prop(neural, "h_res", text="Height resolution")
col = row.column()
col.prop(neural, "w_res", text="Width resolution")
row = layout.row()
row.prop(neural, "directory", text="Directory")
row.operator("neural.file_browser", icon="FILE_FOLDER", text="")
row = layout.row()
col = row.column()
col.operator("neural.generator", text="Generate Material")
col = row.column()
col.operator("neural.stop_generator", text="", icon="PAUSE")
row = layout.row()
col = row.column()
col.operator("neural.reseed", text="Upscale Material")
layout.separator()
# ================================================
# Draw NeuralMaterial interpolations operator
# ================================================
row = layout.row()
row.operator("neural.get_interpolations", text="Get interpolations")
layout.separator()
# ================================================
# Draw Gallery view
# ================================================
if MAT_OT_NEURAL_GetInterpolations._popen is None and MAT_OT_NEURAL_Generator._popen is None:
row = layout.row()
row.operator("neural.revert_material", text="Revert material to previous")
self.draw_gallery(context, neural, "neural")
def draw_mixmat(self, context):
layout = self.layout
mix = bpy.context.scene.mixmat_properties
# ================================================
# Draw Mix Materials generator operator
# ================================================
row = layout.row()
row.prop(mix, "progress", emboss=False, text="Status")
row = layout.row()
row.prop(mix, "directory", text="Directory")
row.operator("mixmat.file_browser", icon="FILE_FOLDER", text="")
row = layout.row()
row.operator("mixmat.generator", text="Generate")
layout.separator()
# ================================================
# Draw Mix material interpolations operator
# ================================================
row = layout.row()
row.prop(mix, "material", text="Select")
if 'Material' in mix.progress:
row.prop(mix, "value", text="Mix level")
layout.separator()
row = layout.row()
img = bpy.data.images[mix.material]
row.template_icon(icon_value=img.preview.icon_id, scale=10)
def draw(self, context):
self.layout.prop(context.scene, 'SelectWorkflow')
if context.scene.SelectWorkflow == 'MatGAN':
self.draw_matgan(context)
elif context.scene.SelectWorkflow == 'NeuralMAT':
self.draw_neural(context)
elif context.scene.SelectWorkflow == 'MixMAT':
self.draw_mixmat(context)
class MAT_OT_StatusUpdater(Operator):
"""Operator which runs its self from a timer"""
bl_idname = "wm.modal_status_updater"
bl_label = "Modal Status Updater"
_sTime = 0
_timer = None
_thread = None
_q = Queue()
def modal(self, context, event):
gan = bpy.context.scene.matgan_properties
if event.type == 'TIMER':
if MAT_OT_MATGAN_Generator._popen:
if MAT_OT_MATGAN_Generator._popen.poll() is None:
try:
line = self._q.get_nowait()
print(line)
update_matgan(bpy.context.active_object, os.path.join(gan.directory, 'out'))
gan.progress = line
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
except:
pass
else:
name = f"{bpy.context.active_object.name}_matgan" if bpy.context.active_object else "matgan"
copy_to_cache(os.path.join(gan.directory, 'out'), name)
update_matgan(bpy.context.active_object, os.path.join(cache_path, name))
gan.progress = "Material generated."
redraw_all(context)
MAT_OT_MATGAN_Generator._popen = None
self.cancel(context)
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
return {'CANCELLED'}
elif MAT_OT_MATGAN_InputFromFlashImage._popen:
if MAT_OT_MATGAN_InputFromFlashImage._popen.poll() is None:
try:
line = self._q.get_nowait()
print(line)
gan.progress = line
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
except:
pass
else:
gan.progress = "Input ready."
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
MAT_OT_MATGAN_InputFromFlashImage._popen = None
self.cancel(context)
return {'CANCELLED'}
elif MAT_OT_MATGAN_SuperResolution._popen:
if MAT_OT_MATGAN_SuperResolution._popen.poll() is not None:
gan.progress = "Material upscaled."
name = f"{bpy.context.active_object.name}_matgan" if bpy.context.active_object else "matgan"
copy_to_cache(os.path.join(gan.directory, 'out'), name)
update_matgan(bpy.context.active_object, os.path.join(cache_path, name))
redraw_all(context)
MAT_OT_MATGAN_SuperResolution._popen = None
self._thread = None
self.cancel(context)
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
return {'CANCELLED'}
elif MAT_OT_MATGAN_GetInterpolations._popen:
if MAT_OT_MATGAN_GetInterpolations._popen.poll() is None:
try:
line = self._q.get_nowait()
print(line)
gan.progress = line
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
except:
pass
else:
name = f"{bpy.context.active_object.name}_matgan" if bpy.context.active_object else "matgan"
check_remove_img(f'{name}_render.png')
img = bpy.data.images.load(os.path.join(gan.directory, 'out') + '/render.png')
img.name = f'{name}_render.png'
interp_path = os.path.join(gan.directory, 'interps')
dir_list = sorted(glob.glob(interp_path + '/*_*_render.png'))
for dir in dir_list:
check_remove_img(os.path.split(dir)[1])
img = bpy.data.images.load(dir)
img.name = os.path.split(dir)[1]
gan.progress = "Material interpolations generated."
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
MAT_OT_MATGAN_GetInterpolations._popen = None
self.cancel(context)
return {'CANCELLED'}
elif MAT_OT_NEURAL_Generator._popen:
gan = bpy.context.scene.neural_properties
if MAT_OT_NEURAL_Generator._popen.poll() is None:
try:
line = self._q.get_nowait()
print(line)
update_neural(bpy.context.active_object, os.path.join(gan.directory, 'out'))
gan.progress = line
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
except:
pass
else:
name = f"{bpy.context.active_object.name}_neural" if bpy.context.active_object else "neural"
copy_to_cache(os.path.join(gan.directory, 'out'), name)
update_neural(bpy.context.active_object, os.path.join(cache_path, name))
gan.progress = "Material generated."
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
MAT_OT_NEURAL_Generator._popen = None
self.cancel(context)
return {'CANCELLED'}
elif MAT_OT_NEURAL_GetInterpolations._popen:
gan = bpy.context.scene.neural_properties
if MAT_OT_NEURAL_GetInterpolations._popen.poll() is None:
try:
line = self._q.get_nowait()
print(line)
gan.progress = line
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
except:
pass
else:
name = f"{bpy.context.active_object.name}_neural" if bpy.context.active_object else "neural"
check_remove_img(f'{name}_render.png')
img = bpy.data.images.load(os.path.join(gan.directory, 'out') + '/render.png')
img.name = f'{name}_render.png'
interp_path = os.path.join(gan.directory, 'interps')
dir_list = sorted(glob.glob(interp_path + '/*_*_render.png'))
for dir in dir_list:
check_remove_img(os.path.split(dir)[1])
img = bpy.data.images.load(dir)
img.name = os.path.split(dir)[1]
gan.progress = "Material interpolations generated."
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
copy_to_cache(os.path.join(gan.directory, 'out'), name)
update_neural(bpy.context.active_object, os.path.join(cache_path, name))
redraw_all(context)
MAT_OT_NEURAL_GetInterpolations._popen = None
self.cancel(context)
return {'CANCELLED'}
else:
self.cancel(context)
return {'CANCELLED'}
return {'PASS_THROUGH'}
def execute(self, context):
self._sTime = time.time()
wm = context.window_manager
self._timer = wm.event_timer_add(0.1, window=context.window)
wm.modal_handler_add(self)
if MAT_OT_MATGAN_Generator._popen:
self._thread = threading.Thread(target=enqueue_output, args=(MAT_OT_MATGAN_Generator._popen.stdout, self._q), daemon=True)
elif MAT_OT_MATGAN_InputFromFlashImage._popen:
self._thread = threading.Thread(target=enqueue_output, args=(MAT_OT_MATGAN_InputFromFlashImage._popen.stdout, self._q), daemon=True)
elif MAT_OT_MATGAN_GetInterpolations._popen:
self._thread = threading.Thread(target=enqueue_output, args=(MAT_OT_MATGAN_GetInterpolations._popen.stdout, self._q), daemon=True)
elif MAT_OT_MATGAN_SuperResolution._popen:
self._thread = threading.Thread(target=enqueue_output, args=(MAT_OT_MATGAN_SuperResolution._popen.stdout, self._q), daemon=True)
elif MAT_OT_NEURAL_Generator._popen:
self._thread = threading.Thread(target=enqueue_output, args=(MAT_OT_NEURAL_Generator._popen.stdout, self._q), daemon=True)
elif MAT_OT_NEURAL_GetInterpolations._popen:
self._thread = threading.Thread(target=enqueue_output, args=(MAT_OT_NEURAL_GetInterpolations._popen.stdout, self._q), daemon=True)
self._thread.start()
return {'RUNNING_MODAL'}
def cancel(self, context):
wm = context.window_manager
wm.event_timer_remove(self._timer)
class MAT_OT_GalleryDirection(Operator):
"""Operator which switches gallery edit direction"""
bl_idname = "wm.edit_direction_toggle"
bl_label = "Direction switch operator"
direction = 1
def execute(self, context):
if MAT_OT_GalleryDirection.direction == 1:
MAT_OT_GalleryDirection.direction = 2
bpy.context.scene.matgan_properties.direction = MAT_OT_GalleryDirection.direction = 2
bpy.context.scene.neural_properties.direction = MAT_OT_GalleryDirection.direction = 2
else:
MAT_OT_GalleryDirection.direction = 1
bpy.context.scene.matgan_properties.direction = MAT_OT_GalleryDirection.direction = 1
bpy.context.scene.neural_properties.direction = MAT_OT_GalleryDirection.direction = 1
return {'FINISHED'} | 2.125 | 2 |
run.py | kbeyer/RPi-LED-SpectrumAnalyzer | 14 | 17567 | """ Main entry point for running the demo. """
# Standard library
import time
import sys
# Third party library
import alsaaudio as aa
# Local library
from char import show_text
from hs_logo import draw_logo
from leds import ColumnedLEDStrip
from music import calculate_levels, read_musicfile_in_chunks, calculate_column_frequency
from shairplay import initialize_shairplay, shutdown_shairplay, RaopCallbacks
COLUMNS = 12
GAP_LEDS = 0
TOTAL_LEDS = 100
SKIP_LEDS = 4
SAMPLE_RATE = 44100
NUM_CHANNELS = 2
FORMAT = aa.PCM_FORMAT_S16_LE
PERIOD_SIZE = 2048
frequency_limits = calculate_column_frequency(200, 10000, COLUMNS)
def analyze_airplay_input(led_strip):
from os.path import join
lib_path = join(sys.prefix, 'lib')
initialize_shairplay(lib_path, get_shairplay_callback_class(led_strip))
while True:
try:
pass
except KeyboardInterrupt:
shutdown_shairplay()
break
def analyze_audio_file(led_strip, path):
for chunk, sample_rate in read_musicfile_in_chunks(path, play_audio=True):
data = calculate_levels(chunk, sample_rate, frequency_limits)
led_strip.display_data(data)
def analyze_line_in(led_strip, hacker_school=True):
start_time = time.time()
while True:
if hacker_school and time.time() - start_time > 60 * 2:
hacker_school_display()
start_time = time.time()
size, chunk = input.read()
if size > 0:
L = (len(chunk)/2 * 2)
chunk = chunk[:L]
data = calculate_levels(chunk, SAMPLE_RATE, frequency_limits)
led_strip.display_data(data[::-1])
def get_audio_input():
input = aa.PCM(aa.PCM_CAPTURE, aa.PCM_NONBLOCK)
input.setchannels(NUM_CHANNELS)
input.setformat(aa.PCM_FORMAT_S16_BE)
input.setrate(SAMPLE_RATE)
input.setperiodsize(PERIOD_SIZE)
return input
def get_led_strip():
led = ColumnedLEDStrip(
leds=TOTAL_LEDS, columns=COLUMNS, gap_leds=GAP_LEDS, skip_leds=SKIP_LEDS
)
led.all_off()
return led
def get_shairplay_callback_class(led_strip):
class SampleCallbacks(RaopCallbacks):
def audio_init(self, bits, channels, samplerate):
print "Initializing", bits, channels, samplerate
self.bits = bits
self.channels = channels
self.samplerate = samplerate
min_frequency = 500
max_frequency = samplerate / 30 * 10 # Abusing integer division
self.frequency_limits = calculate_column_frequency(
min_frequency, max_frequency, COLUMNS
)
self.buffer = ''
def audio_process(self, session, buffer):
data = calculate_levels(buffer, self.samplerate, self.frequency_limits, self.channels, self.bits)
led_strip.display_data(data[::-1])
def audio_destroy(self, session):
print "Destroying"
def audio_set_volume(self, session, volume):
print "Set volume to", volume
def audio_set_metadata(self, session, metadata):
print "Got", len(metadata), "bytes of metadata"
def audio_set_coverart(self, session, coverart):
print "Got", len(coverart), "bytes of coverart"
return SampleCallbacks
def hacker_school_display(led_strip):
draw_logo(led_strip)
time.sleep(1)
show_text(led_strip, 'NEVER GRADUATE!', x_offset=3, y_offset=1, sleep=0.5)
if __name__ == '__main__':
from textwrap import dedent
input_types = ('local', 'linein', 'airplay')
usage = dedent("""\
Usage: %s <input-type> [additional arguments]
input-type: should be one of %s
To play a local file, you can pass the path to the file as an additional
argument.
""") % (sys.argv[0], input_types)
if len(sys.argv) == 1:
print usage
sys.exit(1)
input_type = sys.argv[1]
led_strip = get_led_strip()
if input_type == 'local':
path = sys.argv[2] if len(sys.argv) > 2 else 'sample.mp3'
analyze_audio_file(led_strip, path)
elif input_type == 'airplay':
analyze_airplay_input(led_strip)
elif input_type == 'linein':
analyze_line_in(led_strip)
else:
print usage
sys.exit(1)
| 2.46875 | 2 |
2020/07/solution.py | dglmoore/advent-of-code | 0 | 17568 | import re
def part1(lines, yourbag="shiny gold"):
# A nice little regex that will extract a list of all bags in a given line.
# The first is the outermost bag, and the rest are inner bags.
pattern = re.compile(r"(?:\d*)\s*(.*?)\s*bags?[.,]?(?: contain)?\s*")
# We're going to use an adjacency list mapping each bag type to the bag
# types that can contain it.
contained_by = dict()
for line in lines:
outer, *innards = pattern.findall(line)
for inner in innards:
if inner != 'no other':
if inner in contained_by:
contained_by[inner].append(outer)
else:
contained_by[inner] = [outer]
# We're going to start at our bag type. Ask which bag types can contain it,
# add those to as stack, and then add our bag type to the set of all
# "working" outer bag types. Then pop the top bag type of the stack and
# repeat the above process. This continues until the stack is empty.
#
# The answer is then the number of bags in our set (less 1 for our inital
# bag).
#
# This is an alternative to using recursion. Really, though, it's just
# doing the recursion manually. The pushing and the popping off of the
# stack is done for you when you use recursion... you just can't see the
# stack... it's maintained internally. For more information google "call
# stack".
stack = [yourbag]
works = set()
while len(stack) != 0:
bag = stack.pop()
if bag not in works:
if bag in contained_by:
stack.extend(contained_by[bag])
works.add(bag)
return len(works) - 1
def part2(lines, yourbag="shiny gold"):
# This regex is similar to part 1 except it includes the number of times an
# inner bag type must occur.
pattern = re.compile(r"(\d*)\s*(.*?)\s*bags?[.,]?(?: contain)?\s*")
# We'll be keeping an adjacency list mapping each outer bag type to a list
# of the required inner bags and their multiplicies.
must_contain = dict()
for line in lines:
(_, outer), *innards = pattern.findall(line)
for (n, inner) in innards:
if inner != 'no other':
if outer in must_contain:
must_contain[outer].append((inner, int(n)))
else:
must_contain[outer] = [(inner, int(n))]
# I'll leave it to you to work this one out. ;-)
stack = [(yourbag, 1)]
numbags = 0
while len(stack) != 0:
bag, n = stack.pop()
numbags += n
if bag in must_contain:
for innerbag, m in must_contain[bag]:
stack.append((innerbag, n * m))
return numbags - 1
if __name__ == '__main__':
with open("test.txt") as handle:
lines = handle.readlines()
print("Part I: ", part1(lines))
print("Part II:", part2(lines))
| 3.875 | 4 |
src/m6_your_turtles.py | polsteaj/01-IntroductionToPython | 0 | 17569 | <gh_stars>0
"""
Your chance to explore Loops and Turtles!
Authors: <NAME>, <NAME>, <NAME>, <NAME>,
their colleagues and <NAME>.
"""
import rosegraphics as rg
###############################################################################
# DONE: 1.
# On Line 5 above, replace PUT_YOUR_NAME_HERE with your own name.
###############################################################################
###############################################################################
# DONE: 2.
# You should have RUN the m5e_loopy_turtles module and READ its code.
# (Do so now if you have not already done so.)
#
# Below this comment, add ANY CODE THAT YOU WANT, as long as:
# 1. You construct at least 2 rg.SimpleTurtle objects.
# 2. Each rg.SimpleTurtle object draws something
# (by moving, using its rg.Pen). ANYTHING is fine!
# 3. Each rg.SimpleTurtle moves inside a LOOP.
#
# Be creative! Strive for way-cool pictures! Abstract pictures rule!
#
# If you make syntax (notational) errors, no worries -- get help
# fixing them at either this session OR at the NEXT session.
#
# Don't forget to COMMIT-and-PUSH when you are done with this module.
###############################################################################
window = rg.TurtleWindow()
my_turtle = rg.SimpleTurtle('turtle')
my_turtle.pen = rg.Pen('blue', 10)
my_turtle.speed = 10
your_turtle = rg.SimpleTurtle()
your_turtle.pen = rg.Pen('red', 5)
your_turtle.speed = 10
your_turtle.pen_up()
your_turtle.forward(3)
your_turtle.pen_down()
size = 300
for k in range(15):
my_turtle.draw_square(size)
my_turtle.pen_up()
my_turtle.right(45)
my_turtle.forward(10)
my_turtle.left(45)
my_turtle.pen_down()
your_turtle.draw_square(size-100)
your_turtle.pen_up()
your_turtle.right(45)
your_turtle.forward(10)
your_turtle.left(45)
your_turtle.pen_down()
size = size - 20
window.close_on_mouse_click()
| 3.609375 | 4 |
docs/ResearchSession/manage.py | VoIlAlex/pytorchresearch | 1 | 17570 | from backbone import entry_point
if __name__ == '__main__':
entry_point.main()
| 1.078125 | 1 |
base.py | chenzhangyu/WeiboOAuth | 1 | 17571 | # encoding=utf-8
__author__ = 'lance'
import tornado.web
class BaseHandler(tornado.web.RequestHandler):
pass
| 1.421875 | 1 |
paddlers/custom_models/cd/cdnet.py | huilin16/PaddleRS | 40 | 17572 | <filename>paddlers/custom_models/cd/cdnet.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
class CDNet(nn.Layer):
def __init__(self, in_channels=6, num_classes=2):
super(CDNet, self).__init__()
self.conv1 = Conv7x7(in_channels, 64, norm=True, act=True)
self.pool1 = nn.MaxPool2D(2, 2, return_mask=True)
self.conv2 = Conv7x7(64, 64, norm=True, act=True)
self.pool2 = nn.MaxPool2D(2, 2, return_mask=True)
self.conv3 = Conv7x7(64, 64, norm=True, act=True)
self.pool3 = nn.MaxPool2D(2, 2, return_mask=True)
self.conv4 = Conv7x7(64, 64, norm=True, act=True)
self.pool4 = nn.MaxPool2D(2, 2, return_mask=True)
self.conv5 = Conv7x7(64, 64, norm=True, act=True)
self.upool4 = nn.MaxUnPool2D(2, 2)
self.conv6 = Conv7x7(64, 64, norm=True, act=True)
self.upool3 = nn.MaxUnPool2D(2, 2)
self.conv7 = Conv7x7(64, 64, norm=True, act=True)
self.upool2 = nn.MaxUnPool2D(2, 2)
self.conv8 = Conv7x7(64, 64, norm=True, act=True)
self.upool1 = nn.MaxUnPool2D(2, 2)
self.conv_out = Conv7x7(64, num_classes, norm=False, act=False)
def forward(self, t1, t2):
x = paddle.concat([t1, t2], axis=1)
x, ind1 = self.pool1(self.conv1(x))
x, ind2 = self.pool2(self.conv2(x))
x, ind3 = self.pool3(self.conv3(x))
x, ind4 = self.pool4(self.conv4(x))
x = self.conv5(self.upool4(x, ind4))
x = self.conv6(self.upool3(x, ind3))
x = self.conv7(self.upool2(x, ind2))
x = self.conv8(self.upool1(x, ind1))
return [self.conv_out(x)]
class Conv7x7(nn.Layer):
def __init__(self, in_ch, out_ch, norm=False, act=False):
super(Conv7x7, self).__init__()
layers = [
nn.Pad2D(3), nn.Conv2D(
in_ch, out_ch, 7, bias_attr=(False if norm else None))
]
if norm:
layers.append(nn.BatchNorm2D(out_ch))
if act:
layers.append(nn.ReLU())
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
if __name__ == "__main__":
t1 = paddle.randn((1, 3, 512, 512), dtype="float32")
t2 = paddle.randn((1, 3, 512, 512), dtype="float32")
model = CDNet(6, 2)
pred = model(t1, t2)[0]
print(pred.shape)
| 2.140625 | 2 |
contrib/make-leap-seconds.py | dmgerman/ntpsec | 0 | 17573 | <filename>contrib/make-leap-seconds.py
#!/usr/bin/env python
"""\
make-leap-seconds.py - make leap second file for testing
Optional args are date of leap second: YYYY-MM-DD
and expiration date of file.
Defaults are start of tomorrow (UTC), and 28 days after the leap.
"Start of tomorow" is as soon as possible for testing.
"""
# SPDX-License-Identifier: BSD-2-Clause
from __future__ import print_function, division
import datetime
import sha
import sys
import time
JAN_1970 = 2208988800 # convert Unix/POSIX epoch to NTP epoch
epoch = datetime.datetime.utcfromtimestamp(0)
args = sys.argv[1:]
leap = time.time()
days = int(leap/86400)
leap = (days+1)*86400
if len(args) > 0:
leapdate = datetime.datetime.strptime(args[0], "%Y-%m-%d")
leap = (leapdate - epoch).total_seconds()
leap = int(leap)
args = args[1:]
expire = leap + 28*86400
if len(args) > 0:
expiredate = datetime.datetime.strptime(args[0], "%Y-%m-%d")
expire = (expiredate - epoch).total_seconds()
expire = int(expire)
args = args[1:]
leap_txt = time.asctime(time.gmtime(leap))
leap = str(leap+JAN_1970)
expire_txt = time.asctime(time.gmtime(expire))
expire = str(expire+JAN_1970)
update = int(time.time())
update_txt = time.asctime(time.gmtime(update))
update = str(update+JAN_1970)
tai = "40" # hardwired
# File format
#
# # is comment
# #$ xxx Update Date
# #@ xxx Expiration Date
# #h SHA1 hash of payload
#
# #$ 3676924800
# #@ 3707596800
# 2272060800 10 # 1 Jan 1972
# #h dacf2c42 2c4765d6 3c797af8 2cf630eb 699c8c67
#
# All dates use NTP epoch of 1900-01-01
sha1 = sha.new()
print("%s %s # %s" % (leap, tai, leap_txt))
sha1.update(leap)
sha1.update(tai)
print("#@ %s # %s" % (expire, expire_txt))
sha1.update(expire)
print("#$ %s # %s" % (update, update_txt))
sha1.update(update)
digest = sha1.hexdigest()
print("#h %s %s %s %s %s" %
(digest[0:8], digest[8:16], digest[16:24], digest[24:32], digest[32:40]))
# end
| 3.40625 | 3 |
tests/profiling/test_scheduler.py | uniq10/dd-trace-py | 1 | 17574 | <reponame>uniq10/dd-trace-py
# -*- encoding: utf-8 -*-
from ddtrace.profiling import event
from ddtrace.profiling import exporter
from ddtrace.profiling import recorder
from ddtrace.profiling import scheduler
class _FailExporter(exporter.Exporter):
@staticmethod
def export(events):
raise Exception("BOO!")
def test_exporter_failure():
r = recorder.Recorder()
exp = _FailExporter()
s = scheduler.Scheduler(r, [exp])
r.push_events([event.Event()] * 10)
s.flush()
def test_thread_name():
r = recorder.Recorder()
exp = exporter.NullExporter()
s = scheduler.Scheduler(r, [exp])
s.start()
assert s._worker.name == "ddtrace.profiling.scheduler:Scheduler"
s.stop()
| 2.078125 | 2 |
scrape_reviews/scrape_reviews/spiders/imdb_spider.py | eshwarkoka/sentiment_analysis_on_movie_reviews | 0 | 17575 | import scrapy,json,re,time,os,glob
from scrapy.exceptions import CloseSpider
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
#get all the imdb xpaths from xpaths.json file
with open('./locators/xpaths.json') as f:
xpaths = json.load(f)
imdb = xpaths["imdb"][0]
#define all the required variables
movie_name = ''
project_path = r'/Users/eshwar/Documents/projects/sentiment_analysis_on_movie_reviews/'
scraped_reviews_path = project_path + "data/scraped_reviews/"
predicted_reviews_path = project_path + "data/predicted_reviews/"
chrome_driver_path = project_path+"scrape_reviews/chrome_driver/chromedriver"
class IMDBSpider(scrapy.Spider):
name = 'imdb_spider'
allowed_domains = ["imdb.com"]
start_urls = [
'https://www.imdb.com/find?ref_=nv_sr_fn&q='
]
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url+self.ip+"&s=tt" , dont_filter=True)
def parse(self, response):
#get all the globally defined variables
global movie_name, project_path, scraped_reviews_path, chrome_driver_path
#get first title
first_title = response.xpath(imdb["first_title"]).extract()
#extract title id from first title
for each_split in first_title[0].split("/"):
if each_split.startswith("tt"):
title_id = each_split
#extract movie name from first title
movie_name = str(re.search(r'">(.+?)</a>', str(first_title[0])).group(1)).replace(" ","_")
temp_movie_name = movie_name
#put timestamp
epoch = time.time()
movie_name+="$#$"+str(epoch)
# create temp file to store movie name temporarily
with open(scraped_reviews_path + "temp.txt", 'w') as f:
f.write(movie_name)
#check timestamp
current_dir = os.getcwd()
change_dir = scraped_reviews_path
os.chdir(change_dir)
temp = temp_movie_name+"$#$"
old_file_name = glob.glob(temp+"*")
diff = 0
#flag determines if searched movie is already searched within a week or not
#flag = 0 (file available)
#flag = 1 (new search)
flag = 1
if len(old_file_name) > 0:
old_file_name = old_file_name[0]
old_timestamp = old_file_name.split("$#$")[1][:-5]
diff = epoch - float(old_timestamp)
if diff < 604800:
flag = 0
with open(project_path+"flag.txt", "w") as f:
f.write(str(flag))
raise CloseSpider('file available')
else:
os.remove(scraped_reviews_path+old_file_name)
os.remove(predicted_reviews_path+old_file_name)
os.chdir(current_dir)
#form imdb reviews link
reviews_link = imdb["urv_link_part_1"] + title_id + imdb["urv_link_part_2"]
#get chrome driver executable
options = Options()
options.headless = True
chrome_driver = webdriver.Chrome(chrome_driver_path, chrome_options=options)
#go to reviews link
chrome_driver.get(reviews_link)
#click load more button until the button exists
while True:
try:
WebDriverWait(chrome_driver, 10).until(EC.element_to_be_clickable((By.XPATH, imdb["load_more_button"]))).click()
except TimeoutException as ex:
break
#get the number of reviews
num_of_reviews = chrome_driver.find_element_by_xpath(imdb["number_of_reviews"]).text
reviews_no = num_of_reviews.split()[0]
print(reviews_no)
#open all the spoilers
spoiler_click = chrome_driver.find_elements_by_xpath(imdb["spoiler_open"])
for i in range(0, len(spoiler_click)):
if spoiler_click[i].is_displayed():
spoiler_click[i].click()
#get all the reviews
reviews = chrome_driver.find_elements_by_xpath(imdb["reviews"])
#convert reviews to list
reviews_list = [str(review.text).replace("\n"," ") for review in reviews]
#get all the authors
authors = chrome_driver.find_elements_by_xpath(imdb["authors"])
#convert authors to list
authors_list = [a.text for a in authors]
#get all the review dates
review_dates = chrome_driver.find_elements_by_xpath(imdb["review_dates"])
#convert review dates to list
review_dates_list = [rd.text for rd in review_dates]
#get all the titles
titles = chrome_driver.find_elements_by_xpath(imdb["titles"])
#convert titles to list
titles_list = [str(t.text).replace("\n", " ") for t in titles]
#create json_data variable with authors, review dates, titles and reviews
json_data = [
{
"author" : a,
"review_date" : rd,
"title" : t,
"review" : re
} for a, rd, t, re in zip(authors_list, review_dates_list, titles_list, reviews_list)
]
output_filename = scraped_reviews_path + movie_name + ".json"
with open(output_filename, 'w') as f:
json.dump(json_data, f, ensure_ascii=False, indent=4)
#close the chrome driver
chrome_driver.close()
| 2.625 | 3 |
backend/notifications/admin.py | ProgrammingLanguageLeader/TutorsApp | 3 | 17576 | from django.contrib import admin
from notifications.models import Notification
@admin.register(Notification)
class NotificationAdmin(admin.ModelAdmin):
list_display = (
'sender',
'recipient',
'creation_time',
'verb',
'unread',
)
list_filter = (
'sender',
'recipient',
'unread',
'verb',
)
search_fields = (
'verb',
)
| 1.6875 | 2 |
logistic-regression/code.py | kalpeshsnaik09/ga-learner-dsmp-repo | 0 | 17577 | # --------------
# import the libraries
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# Code starts here
df=pd.read_csv(path)
print(df.head())
X=df.drop(columns='insuranceclaim')
y=df['insuranceclaim']
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=6)
# Code ends here
# --------------
import matplotlib.pyplot as plt
# Code starts here
plt.boxplot(X_train['bmi'])
plt.show()
q_value=X_train['bmi'].quantile(0.95)
print(y_train.value_counts())
# Code ends here
# --------------
import seaborn as sns
# Code starts here
relation=X_train.corr()
print(relation)
sns.pairplot(X_train)
plt.show()
# Code ends here
# --------------
import seaborn as sns
import matplotlib.pyplot as plt
# Code starts here
cols=['children','sex','region','smoker']
fig,axes=plt.subplots(2,2)
for i in range(2):
for j in range(2):
col=cols[i*2+j]
sns.countplot(X_train[col],hue=y_train,ax=axes[i,j])
# Code ends here
# --------------
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# parameters for grid search
parameters = {'C':[0.1,0.5,1,5]}
# Code starts here
lr=LogisticRegression(random_state=9)
grid=GridSearchCV(estimator=lr,param_grid=parameters)
grid.fit(X_train,y_train)
y_pred=grid.predict(X_test)
accuracy=accuracy_score(y_test,y_pred)
print(accuracy)
# Code ends here
# --------------
from sklearn.metrics import roc_auc_score
from sklearn import metrics
# Code starts here
score=roc_auc_score(y_test,y_pred)
y_pred_proba=grid.predict_proba(X_test)[:,1]
fpr,tpr,_=metrics.roc_curve(y_test,y_pred)
roc_auc=roc_auc_score(y_test,y_pred_proba)
plt.plot(fpr,tpr,label="Logistic model, auc="+str(roc_auc))
# Code ends here
| 2.796875 | 3 |
macro_benchmark/SSD_Tensorflow/caffe_to_tensorflow.py | songhappy/ai-matrix | 180 | 17578 | """Convert a Caffe model file to TensorFlow checkpoint format.
Assume that the network built is a equivalent (or a sub-) to the Caffe
definition.
"""
import tensorflow as tf
from nets import caffe_scope
from nets import nets_factory
slim = tf.contrib.slim
# =========================================================================== #
# Main flags.
# =========================================================================== #
tf.app.flags.DEFINE_string(
'model_name', 'ssd_300_vgg', 'Name of the model to convert.')
tf.app.flags.DEFINE_string(
'num_classes', 21, 'Number of classes in the dataset.')
tf.app.flags.DEFINE_string(
'caffemodel_path', None,
'The path to the Caffe model file to convert.')
FLAGS = tf.app.flags.FLAGS
# =========================================================================== #
# Main converting routine.
# =========================================================================== #
def main(_):
# Caffe scope...
caffemodel = caffe_scope.CaffeScope()
caffemodel.load(FLAGS.caffemodel_path)
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
global_step = slim.create_global_step()
num_classes = int(FLAGS.num_classes)
# Select the network.
ssd_class = nets_factory.get_network(FLAGS.model_name)
ssd_params = ssd_class.default_params._replace(num_classes=num_classes)
ssd_net = ssd_class(ssd_params)
ssd_shape = ssd_net.params.img_shape
# Image placeholder and model.
shape = (1, ssd_shape[0], ssd_shape[1], 3)
img_input = tf.placeholder(shape=shape, dtype=tf.float32)
# Create model.
with slim.arg_scope(ssd_net.arg_scope_caffe(caffemodel)):
ssd_net.net(img_input, is_training=False)
init_op = tf.global_variables_initializer()
with tf.Session() as session:
# Run the init operation.
session.run(init_op)
# Save model in checkpoint.
saver = tf.train.Saver()
ckpt_path = FLAGS.caffemodel_path.replace('.caffemodel', '.ckpt')
saver.save(session, ckpt_path, write_meta_graph=False)
if __name__ == '__main__':
tf.app.run()
| 3 | 3 |
setup.py | danihodovic/django-toolshed | 3 | 17579 | <gh_stars>1-10
#!/usr/bin/env python
import os
import re
from setuptools import find_packages, setup
def get_version(*file_paths):
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
version = get_version("django_toolshed", "__init__.py")
readme = open("README.md").read()
setup(
name="django-toolshed",
version=version,
description="""Your project description goes here""",
long_description=readme,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/danihodovic/django-toolshed",
packages=find_packages(),
include_package_data=True,
install_requires=[],
license="MIT",
keywords="django,app",
classifiers=[
"Development Status :: 3 - Alpha",
"Framework :: Django :: 2.0",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
],
)
| 1.890625 | 2 |
detection/models/roi_extractors/roi_align.py | waiiinta/object_detection_lab | 13 | 17580 | import tensorflow as tf
from detection.utils.misc import *
class PyramidROIAlign(tf.keras.layers.Layer):
def __init__(self, pool_shape, **kwargs):
'''
Implements ROI Pooling on multiple levels of the feature pyramid.
Attributes
---
pool_shape: (height, width) of the output pooled regions.
Example: (7, 7)
'''
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def call(self, inputs, training=True):
'''
Args
---
rois_list: list of [num_rois, (y1, x1, y2, x2)] in normalized coordinates.
feature_map_list: List of [batch, height, width, channels].
feature maps from different levels of the pyramid.
img_metas: [batch_size, 11]
Returns
---
pooled_rois_list: list of [num_rois, pooled_height, pooled_width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
'''
rois_list, feature_map_list, img_metas = inputs # [2000 ,4], list:[P2, P3, P4, P5]
pad_shapes = calc_pad_shapes(img_metas)
pad_areas = pad_shapes[:, 0] * pad_shapes[:, 1] # 1216*1216
num_rois_list = [rois.shape.as_list()[0] for rois in rois_list] # data:[2000]
roi_indices = tf.constant(
[i for i in range(len(rois_list)) for _ in range(rois_list[i].shape.as_list()[0])],
dtype=tf.int32
) #[0.....], shape:[2000]
areas = tf.constant(# range(1) range(2000)
[pad_areas[i] for i in range(pad_areas.shape[0]) for _ in range(num_rois_list[i])],
dtype=tf.float32
)#[1216*1216, 1216*1216,...], shape:[2000]
rois = tf.concat(rois_list, axis=0) # [2000, 4]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(rois, 4, axis=1) # 4 of [2000, 1]
h = y2 - y1 # [2000, 1]
w = x2 - x1 # [2000, 1]
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
roi_level = tf.math.log( # [2000]
tf.sqrt(tf.squeeze(h * w, 1))
/ tf.cast((224.0 / tf.sqrt(areas * 1.0)), tf.float32)
) / tf.math.log(2.0)
roi_level = tf.minimum(5, tf.maximum( # [2000], clamp to [2-5]
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
# roi_level will indicates which level of feature to use
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled_rois = []
roi_to_level = []
for i, level in enumerate(range(2, 6)): # 2,3,4,5
ix = tf.where(tf.equal(roi_level, level)) # [1999, 1], means 1999 of 2000 select P2
level_rois = tf.gather_nd(rois, ix) # boxes to crop, [1999, 4]
# ROI indices for crop_and_resize.
level_roi_indices = tf.gather_nd(roi_indices, ix) # [19999], data:[0....0]
# Keep track of which roi is mapped to which level
roi_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_rois = tf.stop_gradient(level_rois)
level_roi_indices = tf.stop_gradient(level_roi_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_rois, pool_height, pool_width, channels]
pooled_rois.append(tf.image.crop_and_resize(
feature_map_list[i], level_rois, level_roi_indices, self.pool_shape,
method="bilinear")) # [1, 304, 304, 256], [1999, 4], [1999], [2]=[7,7]=>[1999,7,7,256]
# [1999, 7, 7, 256], [], [], [1,7,7,256] => [2000, 7, 7, 256]
# Pack pooled features into one tensor
pooled_rois = tf.concat(pooled_rois, axis=0)
# Pack roi_to_level mapping into one array and add another
# column representing the order of pooled rois
roi_to_level = tf.concat(roi_to_level, axis=0) # [2000, 1], 1999 of P2, and 1 other P
roi_range = tf.expand_dims(tf.range(tf.shape(roi_to_level)[0]), 1) # [2000, 1], 0~1999
roi_to_level = tf.concat([tf.cast(roi_to_level, tf.int32), roi_range],
axis=1) # [2000, 2], (P, range)
# Rearrange pooled features to match the order of the original rois
# Sort roi_to_level by batch then roi indextf.Tensor([ 0 100001 200002 ... 199801997 199901998 20101999], shape=(2000,), dtype=int32)
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = roi_to_level[:, 0] * 100000 + roi_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape( # k=2000
roi_to_level)[0]).indices[::-1]# reverse the order
ix = tf.gather(roi_to_level[:, 1], ix) # [2000]
pooled_rois = tf.gather(pooled_rois, ix) # [2000, 7, 7, 256]
# 2000 of [7, 7, 256]
pooled_rois_list = tf.split(pooled_rois, num_rois_list, axis=0)
return pooled_rois_list
| 2.625 | 3 |
examples/simple_regex/routes/__init__.py | nekonoshiri/tiny-router | 0 | 17581 | from ..router import Router
from . import create_user, get_user
router = Router()
router.include(get_user.router)
router.include(create_user.router)
| 1.578125 | 2 |
poetry/packages/constraints/any_constraint.py | vanyakosmos/poetry | 2 | 17582 | <reponame>vanyakosmos/poetry<filename>poetry/packages/constraints/any_constraint.py
from .base_constraint import BaseConstraint
from .empty_constraint import EmptyConstraint
class AnyConstraint(BaseConstraint):
def allows(self, other):
return True
def allows_all(self, other):
return True
def allows_any(self, other):
return True
def difference(self, other):
if other.is_any():
return EmptyConstraint()
return other
def intersect(self, other):
return other
def union(self, other):
return AnyConstraint()
def is_any(self):
return True
def is_empty(self):
return False
def __str__(self):
return "*"
def __eq__(self, other):
return other.is_any()
| 2.59375 | 3 |
lib/rucio/db/sqla/migrate_repo/versions/22d887e4ec0a_create_sources_table.py | brianv0/rucio | 0 | 17583 | <reponame>brianv0/rucio
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - <NAME>, <<EMAIL>>, 2015
"""Create sources table
Revision ID: 22d887e4ec0a
Revises: 1a80adff031a
Create Date: 2015-03-30 11:37:20.737582
"""
from alembic import context, op
import sqlalchemy as sa
from rucio.db.sqla.types import GUID
# revision identifiers, used by Alembic.
revision = '22d887e4ec0a'
down_revision = '1a80adff031a'
def upgrade():
op.create_table('sources',
sa.Column('request_id', GUID()),
sa.Column('scope', sa.String(25)),
sa.Column('name', sa.String(255)),
sa.Column('rse_id', GUID()),
sa.Column('dest_rse_id', GUID()),
sa.Column('url', sa.String(2048)),
sa.Column('ranking', sa.Integer),
sa.Column('bytes', sa.BigInteger),
sa.Column('updated_at', sa.DateTime),
sa.Column('created_at', sa.DateTime))
if context.get_context().dialect.name != 'sqlite':
op.create_primary_key('SOURCES_PK', 'sources', ['request_id', 'rse_id', 'scope', 'name'])
op.create_foreign_key('SOURCES_REQ_ID_FK', 'sources', 'requests', ['request_id'], ['id'])
op.create_foreign_key('SOURCES_REPLICAS_FK', 'sources', 'replicas', ['scope', 'name', 'rse_id'], ['scope', 'name', 'rse_id'])
op.create_foreign_key('SOURCES_RSES_FK', 'sources', 'rses', ['rse_id'], ['id'])
op.create_foreign_key('SOURCES_DST_RSES_FK', 'sources', 'rses', ['dest_rse_id'], ['id'])
op.create_check_constraint('SOURCES_CREATED_NN', 'sources', 'created_at is not null')
op.create_check_constraint('SOURCES_UPDATED_NN', 'sources', 'updated_at is not null')
op.create_index('SOURCES_SRC_DST_IDX', 'sources', ['rse_id', 'dest_rse_id'])
def downgrade():
op.drop_table('sources')
| 1.890625 | 2 |
algorithms/FdGars/FdGars_main.py | ss1004124654/DGFraud-TF2 | 51 | 17584 | <reponame>ss1004124654/DGFraud-TF2
"""
This code is attributed to <NAME> (@YingtongDou) and UIC BDSC Lab
DGFraud (A Deep Graph-based Toolbox for Fraud Detection in TensorFlow 2.X)
https://github.com/safe-graph/DGFraud-TF2
"""
import argparse
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from tensorflow.keras import optimizers
from algorithms.FdGars.FdGars import FdGars
from utils.data_loader import load_data_dblp
from utils.utils import preprocess_adj, preprocess_feature, sample_mask
# init the common args, expect the model specific args
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=123, help='random seed')
parser.add_argument('--epochs', type=int, default=200,
help='number of epochs to train')
parser.add_argument('--batch_size', type=int, default=512,
help='batch size')
parser.add_argument('--train_size', type=float, default=0.2,
help='training set percentage')
parser.add_argument('--dropout', type=float, default=0.5, help='dropout rate')
parser.add_argument('--weight_decay', type=float, default=0.001,
help='weight decay')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument('--nhid', type=int, default=64,
help='number of hidden units in GCN')
args = parser.parse_args()
# set seed
np.random.seed(args.seed)
tf.random.set_seed(args.seed)
def FdGars_main(support: list,
features: tf.SparseTensor,
label: tf.Tensor, masks: list,
args: argparse.ArgumentParser().parse_args()) -> None:
"""
Main function to train, val and test the model
:param support: a list of the sparse adjacency matrices
:param features: node feature tuple for all nodes {coords, values, shape}
:param label: the label tensor for all nodes
:param masks: a list of mask tensors to obtain the train, val, test data
:param args: additional parameters
"""
model = FdGars(args.input_dim, args.nhid, args.output_dim, args)
optimizer = optimizers.Adam(lr=args.lr)
# train
for epoch in tqdm(range(args.epochs)):
with tf.GradientTape() as tape:
train_loss, train_acc = model([support, features, label, masks[0]])
grads = tape.gradient(train_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
val_loss, val_acc = model([support, features, label, masks[1]],
training=False)
if epoch % 10 == 0:
print(
f"train_loss: {train_loss:.4f}, "
f"train_acc: {train_acc:.4f},"
f"val_loss: {val_loss:.4f},"
f"val_acc: {val_acc:.4f}")
# test
_, test_acc = model([support, features, label, masks[2]], training=False)
print(f"Test acc: {test_acc:.4f}")
if __name__ == "__main__":
# load the data
adj_list, features, [idx_train, _, idx_val, _, idx_test, _], y = \
load_data_dblp(meta=False, train_size=args.train_size)
# convert to dense tensors
train_mask = tf.convert_to_tensor(sample_mask(idx_train, y.shape[0]))
val_mask = tf.convert_to_tensor(sample_mask(idx_val, y.shape[0]))
test_mask = tf.convert_to_tensor(sample_mask(idx_test, y.shape[0]))
label = tf.convert_to_tensor(y, dtype=tf.float32)
# normalize the adj matrix and feature matrix
features = preprocess_feature(features)
support = preprocess_adj(adj_list[0])
# initialize the model parameters
args.input_dim = features[2][1]
args.output_dim = y.shape[1]
args.train_size = len(idx_train)
args.num_features_nonzero = features[1].shape
# cast sparse matrix tuples to sparse tensors
features = tf.cast(tf.SparseTensor(*features), dtype=tf.float32)
support = [tf.cast(tf.SparseTensor(*support), dtype=tf.float32)]
FdGars_main(support, features, label,
[train_mask, val_mask, test_mask], args)
| 2.375 | 2 |
problem3a.py | mvignoul/phys218_example | 0 | 17585 | """ find the Schwarzschild radius of the Sun in m using pint"""
import pint
class Sun:
""" Class to describe a star based on its mass in terms of solar masses """
def __init__(self, mass):
self.ureg = pint.UnitRegistry()
self.ureg.define("Msolar = 1.98855*10**30 * kilogram")
self.mass = mass * self.ureg.Msolar
def schwarz(self):
""" Find the Schwarzchild radius for the class """
g_newt = self.ureg.newtonian_constant_of_gravitation
msun = self.mass
r_sch = 2 * g_newt * msun / self.ureg.speed_of_light**2
return r_sch.to_base_units()
def schwarz_rad(mass):
""" Given a mass, find the Schwarzschild radius """
star = Sun(mass)
radius = star.schwarz()
return radius
if __name__ == "__main__":
MASS = 1.0
RAD = schwarz_rad(MASS)
print(RAD)
| 3.65625 | 4 |
morpfw/authn/pas/user/rulesprovider.py | morpframework/morpfw | 8 | 17586 | from ....crud.rulesprovider.base import RulesProvider
from .. import exc
from ..app import App
from ..utils import has_role
from .model import UserCollection, UserModel
class UserRulesProvider(RulesProvider):
context: UserModel
def change_password(self, password: str, new_password: str, secure: bool = True):
context = self.context
if secure and not has_role(self.request, "administrator"):
if not context.validate(password, check_state=False):
raise exc.InvalidPasswordError(context.userid)
context.storage.change_password(context, context.identity.userid, new_password)
def validate(self, password: str, check_state=True) -> bool:
context = self.context
if check_state and context.data["state"] != "active":
return False
return context.storage.validate(context, context.userid, password)
@App.rulesprovider(model=UserModel)
def get_user_rulesprovider(context):
return UserRulesProvider(context)
| 2.125 | 2 |
dsa_extras/library/codec_code/huffman.py | palette-swapped-serra/dsa-extras | 1 | 17587 | from dsa.parsing.line_parsing import line_parser
from dsa.parsing.token_parsing import make_parser
_parser = line_parser(
'Huffman table entry',
make_parser(
'Huffman table entry data',
('integer', 'encoded bit sequence'),
('hexdump', 'decoded bytes')
)
)
class HuffmanTable:
def __init__(self, decode, encode):
self._decode = decode
self._encode = encode
def _decode_gen(self, stream):
read_byte = stream.read(1)[0]
bit_offset = 0
value = 1
while True:
if value in self._decode:
encoded = self._decode[value]
yield encoded
if encoded[-1] == 0:
return
value = 1 # clear composed value
# append a bit to the composed value
value = (value << 1) | ((read_byte >> bit_offset) & 1)
bit_offset += 1
if bit_offset == 8:
bit_offset = 0
read_byte = stream.read(1)[0]
def decode(self, stream):
return b''.join(self._decode_gen(stream))
class Loader:
def __init__(self):
self._decode = {}
self._encode = {}
def line(self, tokens):
compressed, uncompressed = _parser(tokens)[0]
self._decode[compressed] = uncompressed
self._encode[uncompressed] = compressed
def result(self):
return HuffmanTable(self._decode, self._encode)
| 2.953125 | 3 |
src/fake_news_detector/core/data_process/exploration.py | elena20ruiz/FNC | 4 | 17588 | def split_in_three(data_real, data_fake):
min_v = min(data_fake.min(), data_real.min())
max_v = max(data_fake.max(), data_real.max())
tercio = (max_v - min_v) / 3
# Calculate 1/3
th_one = min_v + tercio
# Calculate 2/3
th_two = max_v - tercio
first_f, second_f, third_f = split_data(th_one, th_two, data_fake)
first_r, second_r, third_r = split_data(th_one, th_two, data_real)
total_f = len(data_fake)
fake = [first_f/total_f, second_f/total_f, third_f/total_f]
total_r = len(data_real)
real = [first_r/total_r, second_r/total_r, third_r/total_r]
return fake, real
def split_data(th_one, th_two, data):
first = 0
second = 0
third = 0
for i in data:
if i <= th_one:
third += 1
elif i >= th_two:
first += 1
else:
second +=1
return first, second, third | 3.71875 | 4 |
evap/evaluation/tests/test_auth.py | Sohn123/EvaP | 0 | 17589 | <gh_stars>0
from unittest.mock import patch
import urllib
from django.urls import reverse
from django.core import mail
from django.conf import settings
from django.test import override_settings
from model_bakery import baker
from evap.evaluation import auth
from evap.evaluation.models import Contribution, Evaluation, UserProfile
from evap.evaluation.tests.tools import WebTest
class LoginTests(WebTest):
csrf_checks = False
@classmethod
def setUpTestData(cls):
cls.external_user = baker.make(UserProfile, email="<EMAIL>")
cls.external_user.ensure_valid_login_key()
cls.inactive_external_user = baker.make(UserProfile, email="<EMAIL>", is_active=False)
cls.inactive_external_user.ensure_valid_login_key()
evaluation = baker.make(Evaluation, state='published')
baker.make(
Contribution,
evaluation=evaluation,
contributor=cls.external_user,
role=Contribution.Role.EDITOR,
textanswer_visibility=Contribution.TextAnswerVisibility.GENERAL_TEXTANSWERS,
)
baker.make(
Contribution,
evaluation=evaluation,
contributor=cls.inactive_external_user,
role=Contribution.Role.EDITOR,
textanswer_visibility=Contribution.TextAnswerVisibility.GENERAL_TEXTANSWERS,
)
@override_settings(PAGE_URL='https://example.com')
def test_login_url_generation(self):
generated_url = self.external_user.login_url
self.assertEqual(generated_url, 'https://example.com/key/{}'.format(self.external_user.login_key))
reversed_url = reverse('evaluation:login_key_authentication', args=[self.external_user.login_key])
self.assertEqual(reversed_url, '/key/{}'.format(self.external_user.login_key))
def test_login_url_works(self):
self.assertRedirects(self.app.get(reverse("contributor:index")), "/?next=/contributor/")
url_with_key = reverse("evaluation:login_key_authentication", args=[self.external_user.login_key])
old_login_key = self.external_user.login_key
old_login_key_valid_until = self.external_user.login_key_valid_until
page = self.app.get(url_with_key)
self.external_user.refresh_from_db()
self.assertEqual(old_login_key, self.external_user.login_key)
self.assertEqual(old_login_key_valid_until, self.external_user.login_key_valid_until)
self.assertContains(page, 'Login')
self.assertContains(page, self.external_user.full_name)
page = self.app.post(url_with_key).follow().follow()
self.assertContains(page, 'Logout')
self.assertContains(page, self.external_user.full_name)
def test_login_key_valid_only_once(self):
page = self.app.get(reverse("evaluation:login_key_authentication", args=[self.external_user.login_key]))
self.assertContains(page, self.external_user.full_name)
url_with_key = reverse("evaluation:login_key_authentication", args=[self.external_user.login_key])
page = self.app.post(url_with_key).follow().follow()
self.assertContains(page, 'Logout')
page = self.app.get(reverse("django-auth-logout")).follow()
self.assertNotContains(page, 'Logout')
page = self.app.get(url_with_key).follow()
self.assertContains(page, 'The login URL is not valid anymore.')
self.assertEqual(len(mail.outbox), 1) # a new login key was sent
new_key = UserProfile.objects.get(id=self.external_user.id).login_key
page = self.app.post(reverse("evaluation:login_key_authentication", args=[new_key])).follow().follow()
self.assertContains(page, self.external_user.full_name)
def test_inactive_external_users_can_not_login(self):
page = self.app.get(reverse("evaluation:login_key_authentication", args=[self.inactive_external_user.login_key])).follow()
self.assertContains(page, "Inactive users are not allowed to login")
self.assertNotContains(page, "Logout")
def test_login_key_resend_if_still_valid(self):
old_key = self.external_user.login_key
page = self.app.post("/", params={"submit_type": "new_key", "email": self.external_user.email}).follow()
new_key = UserProfile.objects.get(id=self.external_user.id).login_key
self.assertEqual(old_key, new_key)
self.assertEqual(len(mail.outbox), 1) # a login key was sent
self.assertContains(page, "We sent you an email with a one-time login URL. Please check your inbox.")
@override_settings(
OIDC_OP_AUTHORIZATION_ENDPOINT='https://oidc.example.com/auth',
ACTIVATE_OPEN_ID_LOGIN=True,
)
def test_oidc_login(self):
# This should send them to /oidc/authenticate
page = self.app.get("/").click("Login")
# which should then redirect them to OIDC_OP_AUTHORIZTATION_ENDPOINT
location = page.headers['location']
self.assertIn(settings.OIDC_OP_AUTHORIZATION_ENDPOINT, location)
parse_result = urllib.parse.urlparse(location)
parsed_query = urllib.parse.parse_qs(parse_result.query)
self.assertIn("email", parsed_query["scope"][0].split(" "))
self.assertIn("/oidc/callback/", parsed_query["redirect_uri"][0])
state = parsed_query["state"][0]
user = baker.make(UserProfile)
# usually, the browser would now open that page and login. Then, they'd be redirected to /oidc/callback
with patch.object(auth.OIDCAuthenticationBackend, 'authenticate', return_value=user, __name__='authenticate'):
page = self.app.get(f"/oidc/callback/?code=secret-code&state={state}")
# The oidc module will now send a request to the oidc provider, asking whether the code is valid.
# We've mocked the method that does that and will just return a UserProfile.
# Thus, at this point, the user should be logged in and be redirected back to the start page.
location = page.headers['location']
parse_result = urllib.parse.urlparse(location)
self.assertEqual(parse_result.path, "/")
page = self.app.get(location)
# A GET here should then redirect to the users real start page.
# This should be a 403 since the user is external and has no course participation
page = page.follow(expect_errors=True)
# user should see the Logout button then.
self.assertIn('Logout', page.body.decode())
| 2.078125 | 2 |
gwd/converters/spike2kaggle.py | kazakh-shai/kaggle-global-wheat-detection | 136 | 17590 | import argparse
import os.path as osp
from glob import glob
import cv2
import pandas as pd
from tqdm import tqdm
from gwd.converters import kaggle2coco
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--image-pattern", default="/data/SPIKE_images/*jpg")
parser.add_argument("--annotation-root", default="/data/SPIKE_annotations")
parser.add_argument("--kaggle_output_path", default="/data/spike.csv")
parser.add_argument("--coco_output_path", default="/data/coco_spike.json")
return parser.parse_args()
def main():
args = parse_args()
img_paths = glob(args.image_pattern)
annotations = []
for img_path in tqdm(img_paths):
ann_path = osp.join(args.annotation_root, (osp.basename(img_path.replace("jpg", "bboxes.tsv"))))
ann = pd.read_csv(ann_path, sep="\t", names=["x_min", "y_min", "x_max", "y_max"])
h, w = cv2.imread(img_path).shape[:2]
ann[["x_min", "x_max"]] = ann[["x_min", "x_max"]].clip(0, w)
ann[["y_min", "y_max"]] = ann[["y_min", "y_max"]].clip(0, h)
ann["height"] = h
ann["width"] = w
ann["bbox_width"] = ann["x_max"] - ann["x_min"]
ann["bbox_height"] = ann["y_max"] - ann["y_min"]
ann = ann[(ann["bbox_width"] > 0) & (ann["bbox_height"] > 0)].copy()
ann["bbox"] = ann[["x_min", "y_min", "bbox_width", "bbox_height"]].values.tolist()
ann["image_id"] = osp.basename(img_path).split(".")[0]
annotations.append(ann)
annotations = pd.concat(annotations)
annotations["source"] = "spike"
print(annotations.head())
annotations[["image_id", "source", "width", "height", "bbox"]].to_csv(args.kaggle_output_path, index=False)
kaggle2coco.main(args.kaggle_output_path, args.coco_output_path)
if __name__ == "__main__":
main()
| 2.515625 | 3 |
shuttl/tests/test_views/test_organization.py | shuttl-io/shuttl-cms | 2 | 17591 | <gh_stars>1-10
import json
from shuttl import app
from shuttl.tests import testbase
from shuttl.Models.Reseller import Reseller
from shuttl.Models.organization import Organization, OrganizationDoesNotExistException
class OrganizationViewTest(testbase.BaseTest):
def _setUp(self):
pass
def test_index(self):
# rv = self.app.get('/')
# assert 'Shuttl' in rv.data.decode('utf-8')
pass
def test_login(self):
rv = self.login('test')
pass
def login(self, organization):
return self.app.post('/login', data=dict(
organization=organization
), follow_redirects=True)
def test_creation(self):
results = self.app.post("/organization/", data = dict(name="testOrg"))
self.assertEqual(results.status_code, 201)
results2 = self.app.post("/organization/", data = dict(name="testOrg"))
self.assertEqual(results2.status_code, 409)
results = json.loads(results.data.decode())
expected = {
'reseller': {
'directory': '',
'name': 'shuttl',
'_url': 'shuttl.com',
'subdir': '',
'id': 1,
'admins': [],
'organizations': [],
'_price': 10.0
},
'id': 1,
'name': 'testOrg',
'websites': [],
'users': []
}
self.assertEqual(len(Organization.query.all()), 1)
self.assertEqual(len(list(self.reseller.organizations.all())), 1)
self.assertEqual(results, expected)
pass
def test_getAll(self):
self.app.post("/organization/", data = dict(name="testOrg"))
self.app.post("/organization/", data = dict(name="testOrg2"))
self.app.post("/organization/", data = dict(name="testOrg3"))
expected = [
Organization.query.filter(Organization.name == "testOrg").first().serialize(),
Organization.query.filter(Organization.name == "testOrg2").first().serialize(),
Organization.query.filter(Organization.name == "testOrg3").first().serialize()
]
results = self.app.get("/organization/")
results = json.loads(results.data.decode())
self.assertEqual(len(results), 3)
self.assertEqual(expected, results)
pass
def test_get(self):
results = self.app.post("/organization/", data = dict(name="testOrg"))
results_dict = json.loads(results.data.decode())
id = results_dict["id"]
results = self.app.get("/organization/{0}".format(id))
self.assertEqual(results.status_code, 200)
results = json.loads(results.data.decode())
self.assertEqual(results_dict, results)
results = self.app.get("/organization/1234")
self.assertEqual(results.status_code, 404)
pass
def test_patch(self):
results = self.app.post("/organization/", data = dict(name="testOrg"))
reseller = Reseller.Create(name="test3", _url="shuttl2.com")
results_dict = json.loads(results.data.decode())
results = self.app.patch("/organization/{0}".format(results_dict["id"]), data=dict(name="testOrg4"))
self.assertEqual(results.status_code, 200)
self.assertRaises(OrganizationDoesNotExistException, Organization.Get, name="testOrg", vendor=self.reseller)
results = json.loads(results.data.decode())
self.assertEqual(results["name"], "testOrg4")
org = Organization.Get(name="testOrg4", vendor=self.reseller)
self.assertEqual(org.serialize(), results)
results = self.app.patch("/organization/{0}".format(results_dict["id"]), data=dict(vendor=reseller.id))
self.assertRaises(OrganizationDoesNotExistException, Organization.Get, name="testOrg4", vendor=self.reseller)
results = json.loads(results.data.decode())
org = Organization.Get(name="testOrg4", vendor=reseller)
self.assertEqual(org.serialize(), results)
self.assertEqual(len(list(self.reseller.organizations.all())), 0)
self.assertEqual(len(list(reseller.organizations.all())), 1)
results = self.app.patch("/organization/1234", data=dict(vendor=reseller.id))
self.assertEqual(results.status_code, 404)
results = self.app.patch("/organization/", data=dict(vendor=reseller.id))
self.assertEqual(results.status_code, 405)
pass
def test_delete(self):
results = self.app.post("/organization/", data = dict(name="testOrg"))
results2 = self.app.post("/organization/", data = dict(name="testOrg2"))
results = json.loads(results.data.decode())
res3 = self.app.delete("/organization/{0}".format(results["id"]))
self.assertEqual(res3.status_code, 200)
self.assertEqual(len(list(self.reseller.organizations.all())), 1)
res3 = self.app.delete("/organization/")
self.assertEqual(res3.status_code, 405)
res3 = self.app.delete("/organization/1234")
self.assertEqual(res3.status_code, 404)
pass
| 2.296875 | 2 |
emissary/controllers/load.py | LukeB42/Emissary | 193 | 17592 | <gh_stars>100-1000
# This file contains functions designed for
# loading cron tables and storing new feeds.
from emissary import db
from sqlalchemy import and_
from emissary.controllers.utils import spaceparse
from emissary.controllers.cron import parse_timings
from emissary.models import APIKey, Feed, FeedGroup
def create_feed(log, db, key, group, feed):
"""
Takes a key object, a group name and a dictionary
describing a feed ({name:,url:,schedule:,active:})
and reliably attaches a newly created feed to the key
and group.
"""
if not type(feed) == dict:
log('Unexpected type when creating feed for API key "%s"' % key.name)
return
for i in ['name', 'schedule', 'active', 'url']:
if not i in feed.keys():
log('%s: Error creating feed. Missing "%s" field from feed definition.' % (key.name, i))
return
f = Feed.query.filter(and_(Feed.key == key, Feed.name == feed['name'])).first()
fg = FeedGroup.query.filter(and_(FeedGroup.key == key, FeedGroup.name == group)).first()
if f:
if f.group:
log('%s: Error creating feed "%s" in group "%s", feed already exists in group "%s".' % \
(key.name, feed['name'], group, f.group.name))
return
elif fg:
log('%s: %s: Adding feed "%s"' % (key.name, fg.name, f.name))
fg.append(f)
db.session.add(fg)
db.session.add(f)
db.session.commit()
return
if not fg:
log('%s: Creating feed group %s.' % (key.name, group))
fg = FeedGroup(name=group)
key.feedgroups.append(fg)
try:
parse_timings(feed['schedule'])
except Exception, e:
log('%s: %s: Error creating "%s": %s' % \
(key.name, fg.name, feed['name'], e.message))
log('%s: %s: Creating feed "%s"' % (key.name, fg.name, feed['name']))
f = Feed(
name=feed['name'],
url=feed['url'],
active=feed['active'],
schedule=feed['schedule']
)
fg.feeds.append(f)
key.feeds.append(f)
db.session.add(key)
db.session.add(fg)
db.session.add(f)
db.session.commit()
def parse_crontab(filename):
"""
Get a file descriptor on filename and
create feeds and groups for API keys therein.
"""
def log(message):
print message
# read filename into a string named crontab
try:
fd = open(filename, "r")
except OSError:
print "Error opening %s" % filename
raise SystemExit
crontab = fd.read()
fd.close()
# keep a resident api key on hand
key = None
for i, line in enumerate(crontab.split('\n')):
# Set the APIKey we're working with when we find a line starting
# with apikey:
if line.startswith("apikey:"):
if ' ' in line:
key_str = line.split()[1]
key = APIKey.query.filter(APIKey.key == key_str).first()
if not key:
print 'Malformed or unknown API key at line %i in %s: %s' % (i+1, filename, line)
raise SystemExit
else:
print 'Using API key "%s".' % key.name
if line.startswith("http"):
feed = {'active': True}
# Grab the URL and set the string to the remainder
feed['url'] = line.split().pop(0)
line = ' '.join(line.split()[1:])
# Grab names and groups
names = spaceparse(line)
if not names:
print "Error parsing feed or group name at line %i in %s: %s" % (i+1, filename, line)
continue
feed['name'], group = names[:2]
# The schedule should be the last five items
schedule = line.split()[-5:]
try:
parse_timings(schedule)
except Exception, e:
print "Error parsing schedule at line %i in %s: %s" % (i+1, filename, e.message)
continue
feed['schedule'] = ' '.join(schedule)
create_feed(log, db, key, group, feed)
| 2.640625 | 3 |
vodgen/main.py | Oveof/Vodgen | 0 | 17593 | <reponame>Oveof/Vodgen
"""Vodgen app"""
from msilib.schema import Directory
import sys
import json
import re
from PyQt5.QtWidgets import (QApplication, QCheckBox, QComboBox,
QFileDialog, QLabel, QLineEdit, QMainWindow, QPlainTextEdit, QPushButton, QVBoxLayout, QWidget)
from videocutter import create_video
from thumbnail import Thumbnail, Player, Config, ImageInfo, MatchInfo
import sys
from os.path import exists
#sys.stdout = open("vodgen.log", "w")
import logging
import os
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.warning, filename="./vodgen.log")
if not exists("./characterinfo.json"):
logging.error("characterinfo.json could not be found!")
exit()
if not exists("./config.json"):
logging.error("config.json could not be found!")
exit()
class Error(Exception):
pass
class InvalidRoundName(Error):
pass
class MissingPlayer1Character(Error):
pass
class MissingPlayer2Character(Error):
pass
def formatTitle(title):
game_info = title.split(": ")[1].split(" - ")[0]
tournament_round = ' '.join(game_info.split(' ')[-2:])
#gameRound = gameInfo.split(' ', 2)
game_name = game_info.split(' ')[0]
if "Winners" in game_info:
game_name = game_info.split(' Winners')[0]
elif "Losers" in game_info:
game_name = game_info.split(' Losers')[0]
elif "Grand Finals" in game_info:
game_name = game_info.split(' Grand')[0]
else:
raise InvalidRoundName()
player_info = title.split("-")[1]
team1 = player_info.split("vs")[0].strip()
team1_players = team1.split("(")[0].split(" + ")
team1_characters_search = re.search(r"\(([A-Za-z0-9_, .é+]+)\)", team1)
if team1_characters_search == None:
raise MissingPlayer1Character()
team1_characters = team1_characters_search.group(1).split(", ")[0].split(" + ")
team2 = player_info.split("vs")[1].strip()
team2_players = team2.split("(")[0].split(" + ")
team2_characters_search = re.search(r"\(([A-Za-z0-9_, .é+]+)\)", team2)
if team2_characters_search == None:
raise MissingPlayer2Character
team2_characters = team2_characters_search.group(1).split(", ")[0].split(" + ")
player_names = team1_players + team2_players
player_characters = team1_characters + team2_characters
player_list = []
for x in range(len(player_names)):
if len(player_names) / 2 > x:
team_num = 0
else:
team_num = 1
player_list.append(Player(player_names[x], player_characters[x], team_num, x+1))
return player_list, tournament_round, game_name
class MainWindow(QMainWindow):
"""Main UI window"""
def __init__(self):
super().__init__()
self.setWindowTitle("Vodgen")
layout = QVBoxLayout()
self.choose_stream = QPushButton("Choose stream file")
self.choose_stream.clicked.connect(self.choose_video_file)
self.choose_region = QComboBox()
self.choose_game = QComboBox()
self.choose_banner = QPushButton("Choose Banner")
self.choose_banner.clicked.connect(self.choose_banner_file)
#Adds regions form config to dropdown menu
with open('config.json', encoding="utf-8") as file:
config = json.load(file)
for attribute, _ in config["tournament"].items():
self.choose_region.addItem(attribute)
for attribute, _ in config["game"].items():
self.choose_game.addItem(attribute)
self.only_thumbnails = QCheckBox("Only thumbnails")
self.create_videos_button = QPushButton("Generate VoDs and thumbnails")
self.create_videos_button.clicked.connect(self.create_all)
self.textbox = QPlainTextEdit()
self.textbox.resize(280,40)
self.choose_codec = QComboBox()
self.choose_codec.addItem("")
self.choose_codec.addItem("h264_nvenc")
self.choose_codec.addItem("AMF")
self.choose_stream_label = QLabel("")
self.choose_banner_label = QLabel("")
layout.addWidget(self.choose_region)
layout.addWidget(self.choose_game)
layout.addWidget(self.choose_stream)
layout.addWidget(self.choose_stream_label)
layout.addWidget(self.choose_banner)
layout.addWidget(self.choose_banner_label)
layout.addWidget(self.textbox)
layout.addWidget(self.only_thumbnails)
layout.addWidget(self.choose_codec)
layout.addWidget(self.create_videos_button)
widget = QWidget()
widget.setLayout(layout)
self.setCentralWidget(widget)
def choose_video_file(self):
"""Choose file helper method"""
self.video_path = QFileDialog.getOpenFileName(self, "Select File", filter="MP4 (*.mp4)")
self.choose_stream_label.setText(self.video_path[0])
def choose_banner_file(self):
"""Choose file helper method"""
self.banner_path = QFileDialog.getOpenFileName(self, "Select File", filter="PNG (*.png)")
self.choose_banner_label.setText(self.banner_path[0])
def choose_dir(self):
"""Choose directory helper method"""
return QFileDialog.getExistingDirectory(self, "Select Directory")
def create_all(self):
if self.textbox.toPlainText() == "":
logging.warning("Input is empty")
return
user_input = self.textbox.toPlainText().split("\n")
for line in user_input:
logging.info(f"Started work on line: {line}")
try:
title = line.split(" ", 2)[2]
start_time = line.split(" ")[0]
end_time = line.split(" ")[1]
except IndexError:
logging.warning(f"Invalid line: {line}")
return 0
try:
player_list, tournament_round, game_name, = formatTitle(title)
except InvalidRoundName:
logging.warning("Invalid tournament round name on line: " + line )
return 0
except MissingPlayer1Character:
logging.warning("Missing player 1 character name on line: " + line)
return 0
except MissingPlayer2Character:
logging.warning("Missing player 2 character name on line: " + line)
return 0
match = MatchInfo(str(self.choose_game.currentText()), tournament_round)
image_info = ImageInfo()
config = Config(str(self.choose_game.currentText()), str(self.choose_region.currentText()))
windows_title = title.replace("|", "¤")
windows_title = windows_title.replace(":", "#")
new_thumbnail = Thumbnail(player_list, match, image_info, config, windows_title)
new_thumbnail.create_thumbnail(self.banner_path[0])
logging.info(f"Thumbnail created for line: {line}")
results_directory = ""
with open('config.json', encoding="utf-8") as file:
config = json.load(file)
results_directory = config["tournament"][str(self.choose_region.currentText())]["output_dir"]
if not exists(results_directory):
logging.warning("Output directory could not be found in filesystem")
logging.info("Creating output directory...")
os.mkdir(results_directory)
create_video(self.video_path[0], start_time, end_time, f"{results_directory}/" + windows_title + ".mp4", self.choose_region.currentText())
app = QApplication(sys.argv)
window = MainWindow()
window.show()
app.exec()
#sys.stdout.close()
| 2.609375 | 3 |
bin/plpproject.py | stefanct/pulp-tools | 2 | 17594 | <gh_stars>1-10
#
# Copyright (C) 2018 ETH Zurich and University of Bologna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import plptools as plp
class PkgDep(plp.PkgDep):
def __init__(self, *kargs, **kwargs):
super(PkgDep, self).__init__(*kargs, **kwargs)
class Package(plp.Package):
def __init__(self, *kargs, **kwargs):
super(Package, self).__init__(*kargs, **kwargs)
class ArtifactoryServer(object):
def __init__(self, name, url, ssl_verify=True):
self.name = name
self.url = url
self.ssl_verify = ssl_verify
class Module(plp.Module):
def __init__(self, *kargs, **kwargs):
super(Module, self).__init__(*kargs, **kwargs)
class BuildStep(object):
def __init__(self, name, command):
self.name = name
self.command = command
class Group(plp.Group):
def __init__(self, *kargs, **kwargs):
super(Group, self).__init__(*kargs, **kwargs)
class BuildStepMap(object):
def __init__(self, name, stepList):
self.name = name
self.stepList = stepList
class BuildSteps(object):
def __init__(self, stepList):
self.stepList = stepList
self.steps = {}
for step in stepList:
self.steps[step.name] = step
def get(self, name): return self.steps.get(name).stepList
| 2.15625 | 2 |
leetcode/268_missing_number/268_missing_number.py | ryangillard/misc | 0 | 17595 | <filename>leetcode/268_missing_number/268_missing_number.py<gh_stars>0
class Solution(object):
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums_set = set(nums)
full_length = len(nums) + 1
for num in range(full_length):
if num not in nums_set:
return num | 3.59375 | 4 |
tests/shell/test_console.py | svidoso/ipopo | 65 | 17596 | #!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Tests the shell console
:author: <NAME>
"""
# Pelix
from pelix.utilities import to_str, to_bytes
# Standard library
import random
import string
import sys
import threading
import time
# Tests
try:
import unittest2 as unittest
except ImportError:
import unittest
# ------------------------------------------------------------------------------
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
try:
import subprocess
except ImportError:
# Can't run the test if we can't start another process
pass
else:
class ShellStandaloneTest(unittest.TestCase):
"""
Tests the console shell when started as a script
"""
@staticmethod
def random_str():
"""
Generates a random string
:return: A random string
"""
data = list(string.ascii_letters)
random.shuffle(data)
return ''.join(data)
def test_echo(self):
"""
Tests the console shell 'echo' method
"""
# Get shell PS1 (static method)
import pelix.shell.core
ps1 = pelix.shell.core._ShellService.get_ps1()
# Start the shell process
process = subprocess.Popen(
[sys.executable, '-m', 'pelix.shell'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Avoid being blocked...
timer = threading.Timer(5, process.terminate)
timer.start()
# Wait for prompt
got = ""
while ps1 not in got:
char = to_str(process.stdout.read(1))
if not char:
if sys.version_info[0] == 2:
self.skipTest("Shell console test doesn't work on "
"Python 2.7 with Travis")
else:
if process.poll():
output = to_str(process.stdout.read())
else:
output = "<no output>"
self.fail("Can't read from stdout (rc={})\n{}"
.format(process.returncode, output))
else:
got += char
# We should be good
timer.cancel()
try:
# Try echoing
data = self.random_str()
# Write command
process.stdin.write(to_bytes("echo {}\n".format(data)))
process.stdin.flush()
# Read result
last_line = to_str(process.stdout.readline()).rstrip()
self.assertEqual(last_line, data, "Wrong output")
# Stop the process
process.stdin.write(to_bytes("exit\n"))
process.stdin.flush()
# Wait for the process to stop (1 second max)
delta = 0
start = time.time()
while delta <= 1:
delta = time.time() - start
if process.poll() is not None:
break
time.sleep(.1)
else:
self.fail("Process took too long to stop")
finally:
try:
# Kill it in any case
process.terminate()
except OSError:
# Process was already stopped
pass
def test_properties(self):
"""
Tests the console shell properties parameter
"""
# Prepare some properties
key1 = self.random_str()[:5]
key2 = self.random_str()[:5]
val1 = self.random_str()
val2 = self.random_str()
# Start the shell process
process = subprocess.Popen(
[sys.executable, '-m', 'pelix.shell',
'-D', '{}={}'.format(key1, val1), '{}={}'.format(key2, val2)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
try:
# List properties, stop and get output
output = to_str(process.communicate(to_bytes("properties"))[0])
found = 0
for line in output.splitlines(False):
if key1 in line:
self.assertIn(val1, line)
found += 1
elif key2 in line:
self.assertIn(val2, line)
found += 1
self.assertEqual(found, 2, "Wrong number of properties")
finally:
try:
# Kill it in any case
process.terminate()
except OSError:
# Process was already stopped
pass
| 2.4375 | 2 |
src/util/utils.py | 5agado/intro-ai | 3 | 17597 | <filename>src/util/utils.py<gh_stars>1-10
import os
import math
def dotProduct(v1, v2):
return sum(x * y for x, y in zip(v1, v2))
def sigmoid(x):
return 1.0 / (1.0 + math.exp(-x))
def getResourcesPath():
return os.path.abspath(os.path.join(os.path.dirname( __file__ ), os.pardir, 'resources'))
def readTrainModel(filePath, numOutputs = 1):
f = open(filePath, 'r')
res = []
for line in f:
sLine = list(map(float, line.strip().split(" ")))
res.append(((sLine[:-numOutputs]), sLine[-numOutputs:]))
return res
def readMatrix(filePath):
f = open(filePath, 'r')
res = []
for line in f:
res.append(list(map(float, line.strip().split(" "))))
return res | 2.484375 | 2 |
scripts/models/xgboost/test-xgboost_tuning3.py | jmquintana79/utilsDS | 0 | 17598 | <gh_stars>0
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2018-09-26 10:01:02
# @Last Modified by: <NAME>
# @Last Modified time: 2018-09-26 16:04:24
"""
XGBOOST Regressor with Bayesian tuning: OPTION 3
In this case it will be used hyperopt-sklearn and his native algorithm
"xgboost_regression".
NOTE: scikit-learn tools is not working for this estimator.
Reference: https://github.com/hyperopt/hyperopt-sklearn
"""
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import sys
sys.path.append('../../')
from datasets import solar
from tools.reader import get_dcol
from preprocessing.scalers.normalization import Scaler
from models.metrics import metrics_regression
from tools.timer import *
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold, KFold
import time
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import xgboost as xgb
from sklearn.metrics import r2_score, mean_absolute_error
import os
os.environ['OMP_NUM_THREADS'] = str(2)
def main():
# init timer
t = Timer()
t.add('test')
""" DATA PREPARATION """
# load data
data, dcol = solar.load()
# select data
ly = ['y']
lx = ['doy', 'hour', 'LCDC267', 'MCDC267', 'HCDC267', 'TCDC267', 'logAPCP267', 'RH267', 'TMP267', 'DSWRF267']
data = data[lx + ly]
dcol = get_dcol(data, ltarget=ly)
# select one hour data
hour = 11
idata = data[data.hour == hour]
idata.drop('hour', axis=1, inplace=True)
idcol = get_dcol(idata, ltarget=['y'])
# clean
del(data)
del(dcol)
# filtering outliers (ghi vs power)
from preprocessing.outliers import median2D
isoutlier = median2D.launch(idata['DSWRF267'].values, idata.y.values, percent=20.)
idata['isoutlier'] = isoutlier
idata = idata[idata.isoutlier == False]
idata.drop('isoutlier', axis=1, inplace=True)
# prepare data
X = idata[idcol['lx']].values
scaler = Scaler()
y = scaler.fit_transform(idata[idcol['ly']].values).ravel()
print('Prepared data: X: %s y: %s' % (X.shape, y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
print('Prepared data: X_train: %s y_train: %s' % (X_train.shape, y_train.shape))
print('Prepared data: X_test: %s y_test: %s' % (X_test.shape, y_test.shape))
# replace training dataset
X = X_train
y = y_train
""" ESTIMATOR WITH BAYESIAN TUNING """
from hpsklearn import HyperoptEstimator, xgboost_regression
from hyperopt import tpe
# Instantiate a HyperoptEstimator with the search space and number of evaluations
clf = HyperoptEstimator(regressor=xgboost_regression('my_clf'),
preprocessing=[],
algo=tpe.suggest,
max_evals=250,
trial_timeout=300)
clf.fit(X, y)
print(clf.best_model())
y_hat = clf.predict(X_test)
dscores = metrics_regression(y_test, y_hat, X.shape[1])
tf = t.since('test')
print('\nBayesian tuning -test: bias = %.3f mae = %.3f r2 = %.3f (time: %s)' %
(dscores['bias'], dscores['mae'], dscores['r2'], format_duration(tf)))
# training
y_hat = clf.predict(X)
dscores = metrics_regression(y, y_hat, X.shape[1])
print('Bayesian tuning - train: bias = %.3f mae = %.3f r2 = %.3f (time: %s)' %
(dscores['bias'], dscores['mae'], dscores['r2'], format_duration(tf)))
if __name__ == '__main__':
main()
| 2.578125 | 3 |
EstruturaDeRepeticao/exercicio32.py | Nicolas-Wursthorn/exercicios-python-brasil | 0 | 17599 | <filename>EstruturaDeRepeticao/exercicio32.py
# O Departamento Estadual de Meteorologia lhe contratou para desenvolver um programa que leia as um conjunto indeterminado de temperaturas, e informe ao final a menor e a maior temperaturas informadas, bem como a média das temperaturas.
temperaturas = []
while True:
graus = float(input("Digite a temperatura em graus (tecle 0 para parar): "))
temperaturas.append(graus)
media = sum(temperaturas) / len(temperaturas)
if graus == 0:
temperaturas.pop()
print("A maior temperatura registrada: {}°C".format(max(temperaturas)))
print("A menor temperatura registrada: {}°C".format(min(temperaturas)))
print("A temperatura média registrada: {}°C".format(media))
break | 3.90625 | 4 |