hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7ca783f400a2f552b7d64e3767e01fb3717ef036
| 582
|
py
|
Python
|
exampleinc.py
|
zulip/finbot
|
dcb6bfe54a674f4ff98370677a648b6cc1706e16
|
[
"Apache-2.0"
] | 7
|
2017-02-19T16:35:24.000Z
|
2022-03-09T20:05:49.000Z
|
exampleinc.py
|
zulip/finbot
|
dcb6bfe54a674f4ff98370677a648b6cc1706e16
|
[
"Apache-2.0"
] | null | null | null |
exampleinc.py
|
zulip/finbot
|
dcb6bfe54a674f4ff98370677a648b6cc1706e16
|
[
"Apache-2.0"
] | 3
|
2020-02-13T18:06:46.000Z
|
2021-06-10T19:56:30.000Z
|
#!/usr/bin/python
from money import *
c = Company("Example Inc")
c.add_flow(FixedCost("Initial Cash", -500000))
c.add_flow(FixedCost("Incorporation", 500))
c.add_flow(ConstantCost("Office", 50000))
c.add_flow(PeriodicCost("Subscription", 4000, "2012-01-05", 14))
c.add_flow(DelayedCost("2012-02-01", ConstantCost("Office", 50000)))
c.add_flow(DelayedCost("2012-02-01", FixedCost("Financing", 50000)))
c.add_flow(SemiMonthlyCost("Payroll", 4000, "2012-01-01"))
c.add_flow(SemiMonthlyWages("Payroll", 6000, "2012-01-01"))
print(c)
c.cash_monthly_summary("2012-01-01", "2013-07-01")
| 36.375
| 68
| 0.730241
| 89
| 582
| 4.662921
| 0.449438
| 0.077108
| 0.154217
| 0.093976
| 0.260241
| 0.260241
| 0.13012
| 0
| 0
| 0
| 0
| 0.172477
| 0.063574
| 582
| 15
| 69
| 38.8
| 0.588991
| 0.027491
| 0
| 0
| 0
| 0
| 0.270796
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ca7926bc8bb9c6d96d0fde91ed69d0cb52091a0
| 847
|
py
|
Python
|
guardian/validators.py
|
dawid1stanek/guardian
|
89359c93d5f36c8b458428e147000352fa7ad01d
|
[
"Apache-2.0"
] | null | null | null |
guardian/validators.py
|
dawid1stanek/guardian
|
89359c93d5f36c8b458428e147000352fa7ad01d
|
[
"Apache-2.0"
] | null | null | null |
guardian/validators.py
|
dawid1stanek/guardian
|
89359c93d5f36c8b458428e147000352fa7ad01d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
import socket
import subprocess
import argparse
import logging
LOGGER = logging.getLogger(__name__)
class ValidatorError(Exception):
pass
def ping(address):
try:
subprocess.check_call(('ping', '-c 1', '-W 1', address), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
LOGGER.info('Ping server %s - OK', address)
except subprocess.CalledProcessError as e:
LOGGER.error('Ping server %s - Failed', address)
raise ValidatorError(e)
ping.short_name = 'PING'
def port(address, port):
s = socket.socket()
try:
s.connect((address, port))
LOGGER.info('Checking port %s:%d - OK', address, port)
except socket.error as e:
LOGGER.error('Checking port %s:%d - Failed', address, port)
raise ValidatorError(e)
port.short_name = 'PORT'
| 24.2
| 112
| 0.663518
| 109
| 847
| 5.091743
| 0.412844
| 0.079279
| 0.03964
| 0.05045
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00299
| 0.210153
| 847
| 34
| 113
| 24.911765
| 0.826607
| 0.023613
| 0
| 0.16
| 0
| 0
| 0.138015
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0.04
| 0.2
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7caad7d95f67042bb7aad81b10bf684a91160170
| 9,603
|
py
|
Python
|
hoomd/mpcd/test-py/stream_slit_test.py
|
schwendp/hoomd-blue
|
df7970121b19bc4f8674348ab3241055ac87153b
|
[
"BSD-3-Clause"
] | 2
|
2020-03-30T14:38:50.000Z
|
2020-06-02T05:53:41.000Z
|
hoomd/mpcd/test-py/stream_slit_test.py
|
schwendp/hoomd-blue
|
df7970121b19bc4f8674348ab3241055ac87153b
|
[
"BSD-3-Clause"
] | null | null | null |
hoomd/mpcd/test-py/stream_slit_test.py
|
schwendp/hoomd-blue
|
df7970121b19bc4f8674348ab3241055ac87153b
|
[
"BSD-3-Clause"
] | 1
|
2020-05-20T07:00:08.000Z
|
2020-05-20T07:00:08.000Z
|
# Copyright (c) 2009-2019 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
# Maintainer: mphoward
import unittest
import numpy as np
import hoomd
from hoomd import md
from hoomd import mpcd
# unit tests for mpcd slit streaming geometry
class mpcd_stream_slit_test(unittest.TestCase):
def setUp(self):
# establish the simulation context
hoomd.context.initialize()
# set the decomposition in z for mpi builds
if hoomd.comm.get_num_ranks() > 1:
hoomd.comm.decomposition(nz=2)
# default testing configuration
hoomd.init.read_snapshot(hoomd.data.make_snapshot(N=0, box=hoomd.data.boxdim(L=10.)))
# initialize the system from the starting snapshot
snap = mpcd.data.make_snapshot(N=2)
snap.particles.position[:] = [[4.95,-4.95,3.85],[0.,0.,-3.8]]
snap.particles.velocity[:] = [[1.,-1.,1.],[-1.,-1.,-1.]]
self.s = mpcd.init.read_snapshot(snap)
mpcd.integrator(dt=0.1)
# test creation can happen (with all parameters set)
def test_create(self):
mpcd.stream.slit(H=4., V=0.1, boundary="no_slip", period=2)
# test for setting parameters
def test_set_params(self):
slit = mpcd.stream.slit(H=4.)
self.assertAlmostEqual(slit.H, 4.)
self.assertAlmostEqual(slit.V, 0.)
self.assertEqual(slit.boundary, "no_slip")
self.assertAlmostEqual(slit._cpp.geometry.getH(), 4.)
self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.)
self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip)
# change H and also ensure other parameters stay the same
slit.set_params(H=2.)
self.assertAlmostEqual(slit.H, 2.)
self.assertAlmostEqual(slit.V, 0.)
self.assertEqual(slit.boundary, "no_slip")
self.assertAlmostEqual(slit._cpp.geometry.getH(), 2.)
self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.)
self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip)
# change V
slit.set_params(V=0.1)
self.assertAlmostEqual(slit.V, 0.1)
self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.1)
# change BCs
slit.set_params(boundary="slip")
self.assertEqual(slit.boundary, "slip")
self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.slip)
# test for invalid boundary conditions being set
def test_bad_boundary(self):
slit = mpcd.stream.slit(H=4.)
slit.set_params(boundary="no_slip")
slit.set_params(boundary="slip")
with self.assertRaises(ValueError):
slit.set_params(boundary="invalid")
# test basic stepping behavior with no slip boundary conditions
def test_step_noslip(self):
mpcd.stream.slit(H=4.)
# take one step
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,1.])
np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9])
np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.])
# take another step where one particle will now hit the wall
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.])
np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0])
np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.])
# take another step, wrapping the second particle through the boundary
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
np.testing.assert_array_almost_equal(snap.particles.position[0], [4.95,-4.95,3.85])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.])
np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9])
np.testing.assert_array_almost_equal(snap.particles.velocity[1], [1.,1.,1.])
def test_step_moving_wall(self):
mpcd.stream.slit(H=4., boundary="no_slip", V=1.0, period=3)
# change velocity of lower particle so it is translating relative to wall
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
snap.particles.velocity[1] = [-2.,-1.,-1.]
self.s.restore_snapshot(snap)
# run one step and check bounce back of particles
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
# the first particle is matched exactly to the wall speed, and so it will translate at
# same velocity along +x for 3 steps. It will bounce back in y and z to where it started.
# (vx stays the same, and vy and vz flip.)
np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,-4.95,3.85])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,1.,-1.])
# the second particle has y and z velocities flip again, and since it started closer,
# it moves relative to original position.
np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.4,-0.1,-3.9])
np.testing.assert_array_almost_equal(snap.particles.velocity[1], [0.,1.,1.])
# test basic stepping behavior with slip boundary conditions
def test_step_slip(self):
mpcd.stream.slit(H=4., boundary="slip")
# take one step
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,1.])
np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9])
np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.])
# take another step where one particle will now hit the wall
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.85,4.85,3.95])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.])
np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0])
np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.])
# take another step, wrapping the second particle through the boundary
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,4.75,3.85])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.])
np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.3,-0.3,-3.9])
np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,1.])
# test that setting the slit size too large raises an error
def test_validate_box(self):
# initial configuration is invalid
slit = mpcd.stream.slit(H=10.)
with self.assertRaises(RuntimeError):
hoomd.run(1)
# now it should be valid
slit.set_params(H=4.)
hoomd.run(2)
# make sure we can invalidate it again
slit.set_params(H=10.)
with self.assertRaises(RuntimeError):
hoomd.run(1)
# test that particles out of bounds can be caught
def test_out_of_bounds(self):
slit = mpcd.stream.slit(H=3.8)
with self.assertRaises(RuntimeError):
hoomd.run(1)
slit.set_params(H=3.85)
hoomd.run(1)
# test that virtual particle filler can be attached, removed, and updated
def test_filler(self):
# initialization of a filler
slit = mpcd.stream.slit(H=4.)
slit.set_filler(density=5., kT=1.0, seed=42, type='A')
self.assertTrue(slit._filler is not None)
# run should be able to setup the filler, although this all happens silently
hoomd.run(1)
# changing the geometry should still be OK with a run
slit.set_params(V=1.0)
hoomd.run(1)
# changing filler should be allowed
slit.set_filler(density=10., kT=1.5, seed=7)
self.assertTrue(slit._filler is not None)
hoomd.run(1)
# assert an error is raised if we set a bad particle type
with self.assertRaises(RuntimeError):
slit.set_filler(density=5., kT=1.0, seed=42, type='B')
# assert an error is raised if we set a bad density
with self.assertRaises(RuntimeError):
slit.set_filler(density=-1.0, kT=1.0, seed=42)
# removing the filler should still allow a run
slit.remove_filler()
self.assertTrue(slit._filler is None)
hoomd.run(1)
def tearDown(self):
del self.s
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| 43.06278
| 101
| 0.64157
| 1,390
| 9,603
| 4.308633
| 0.171942
| 0.013024
| 0.070129
| 0.093505
| 0.636834
| 0.595258
| 0.562531
| 0.51845
| 0.496577
| 0.470863
| 0
| 0.040615
| 0.228262
| 9,603
| 222
| 102
| 43.256757
| 0.767508
| 0.212954
| 0
| 0.492857
| 0
| 0
| 0.010246
| 0
| 0
| 0
| 0
| 0
| 0.378571
| 1
| 0.078571
| false
| 0
| 0.035714
| 0
| 0.121429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7caae2b0b77242e98f5f62bea314586497fa86a7
| 7,261
|
py
|
Python
|
tests/functional/model_models.py
|
haoyuchen1992/CourseBuilder
|
ba8f0e05c53cc74bb4e46235a7855fdfbd63dff7
|
[
"Apache-2.0"
] | 1
|
2015-04-15T08:38:08.000Z
|
2015-04-15T08:38:08.000Z
|
tests/functional/model_models.py
|
haoyuchen1992/CourseBuilder
|
ba8f0e05c53cc74bb4e46235a7855fdfbd63dff7
|
[
"Apache-2.0"
] | 1
|
2021-06-08T09:49:12.000Z
|
2021-06-08T09:49:12.000Z
|
tests/functional/model_models.py
|
haoyuchen1992/CourseBuilder
|
ba8f0e05c53cc74bb4e46235a7855fdfbd63dff7
|
[
"Apache-2.0"
] | 3
|
2015-10-25T12:39:07.000Z
|
2021-06-08T09:47:34.000Z
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for models.models."""
__author__ = [
'johncox@google.com (John Cox)',
]
import datetime
from models import models
from tests.functional import actions
# Disable complaints about docstrings for self-documenting tests.
# pylint: disable-msg=g-missing-docstring
class EventEntityTestCase(actions.ExportTestBase):
def test_for_export_transforms_correctly(self):
event = models.EventEntity(source='source', user_id='1')
key = event.put()
exported = event.for_export(self.transform)
self.assert_blacklisted_properties_removed(event, exported)
self.assertEqual('source', event.source)
self.assertEqual('transformed_1', exported.user_id)
self.assertEqual(key, models.EventEntity.safe_key(key, self.transform))
class PersonalProfileTestCase(actions.ExportTestBase):
def test_for_export_transforms_correctly_and_sets_safe_key(self):
date_of_birth = datetime.date.today()
email = 'test@example.com'
legal_name = 'legal_name'
nick_name = 'nick_name'
user_id = '1'
profile = models.PersonalProfile(
date_of_birth=date_of_birth, email=email, key_name=user_id,
legal_name=legal_name, nick_name=nick_name)
profile.put()
exported = profile.for_export(self.transform)
self.assert_blacklisted_properties_removed(profile, exported)
self.assertEqual(
self.transform(user_id), exported.safe_key.name())
class QuestionDAOTestCase(actions.TestBase):
"""Functional tests for QuestionDAO."""
# Name determined by parent. pylint: disable-msg=g-bad-name
def setUp(self):
"""Sets up datastore contents."""
super(QuestionDAOTestCase, self).setUp()
self.used_twice_question_id = 1
self.used_twice_question_dto = models.QuestionDTO(
self.used_twice_question_id, {})
self.used_once_question_id = 2
self.used_once_question_dto = models.QuestionDTO(
self.used_once_question_id, {})
self.unused_question_id = 3
self.unused_question_dto = models.QuestionDTO(
self.unused_question_id, {})
models.QuestionDAO.save_all([
self.used_twice_question_dto, self.used_once_question_dto,
self.unused_question_dto])
# Handcoding the dicts. This is dangerous because they're handcoded
# elsewhere, the implementations could fall out of sync, and these tests
# may then pass erroneously.
self.first_question_group_description = 'first_question_group'
self.first_question_group_id = 4
self.first_question_group_dto = models.QuestionGroupDTO(
self.first_question_group_id,
{'description': self.first_question_group_description,
'items': [{'question': str(self.used_once_question_id)}]})
self.second_question_group_description = 'second_question_group'
self.second_question_group_id = 5
self.second_question_group_dto = models.QuestionGroupDTO(
self.second_question_group_id,
{'description': self.second_question_group_description,
'items': [{'question': str(self.used_twice_question_id)}]})
self.third_question_group_description = 'third_question_group'
self.third_question_group_id = 6
self.third_question_group_dto = models.QuestionGroupDTO(
self.third_question_group_id,
{'description': self.third_question_group_description,
'items': [{'question': str(self.used_twice_question_id)}]})
models.QuestionGroupDAO.save_all([
self.first_question_group_dto, self.second_question_group_dto,
self.third_question_group_dto])
def test_used_by_returns_description_of_single_question_group(self):
self.assertEqual(
[self.first_question_group_description],
models.QuestionDAO.used_by(self.used_once_question_id))
def test_used_by_returns_descriptions_of_multiple_question_groups(self):
self.assertEqual(
[self.second_question_group_description,
self.third_question_group_description],
models.QuestionDAO.used_by(self.used_twice_question_id))
def test_used_by_returns_empty_list_for_unused_question(self):
not_found_id = 7
self.assertFalse(models.QuestionDAO.load(not_found_id))
self.assertEqual([], models.QuestionDAO.used_by(not_found_id))
class StudentTestCase(actions.ExportTestBase):
def test_for_export_transforms_correctly(self):
user_id = '1'
student = models.Student(key_name='name', user_id='1', is_enrolled=True)
key = student.put()
exported = student.for_export(self.transform)
self.assert_blacklisted_properties_removed(student, exported)
self.assertTrue(exported.is_enrolled)
self.assertEqual('transformed_1', exported.user_id)
self.assertEqual(
'transformed_' + user_id, exported.key_by_user_id.name())
self.assertEqual(
models.Student.safe_key(key, self.transform), exported.safe_key)
def test_get_key_does_not_transform_by_default(self):
user_id = 'user_id'
student = models.Student(key_name='name', user_id=user_id)
student.put()
self.assertEqual(user_id, student.get_key().name())
def test_safe_key_transforms_name(self):
key = models.Student(key_name='name').put()
self.assertEqual(
'transformed_name',
models.Student.safe_key(key, self.transform).name())
class StudentAnswersEntityTestCase(actions.ExportTestBase):
def test_safe_key_transforms_name(self):
student_key = models.Student(key_name='name').put()
answers = models.StudentAnswersEntity(key_name=student_key.name())
answers_key = answers.put()
self.assertEqual(
'transformed_name',
models.StudentAnswersEntity.safe_key(
answers_key, self.transform).name())
class StudentPropertyEntityTestCase(actions.ExportTestBase):
def test_safe_key_transforms_user_id_component(self):
user_id = 'user_id'
student = models.Student(key_name='email@example.com', user_id=user_id)
student.put()
property_name = 'property-name'
student_property_key = models.StudentPropertyEntity.create(
student, property_name).put()
self.assertEqual(
'transformed_%s-%s' % (user_id, property_name),
models.StudentPropertyEntity.safe_key(
student_property_key, self.transform).name())
| 39.677596
| 80
| 0.700454
| 868
| 7,261
| 5.543779
| 0.233871
| 0.067539
| 0.044888
| 0.030549
| 0.487531
| 0.348919
| 0.264963
| 0.198047
| 0.152535
| 0.044057
| 0
| 0.003653
| 0.208374
| 7,261
| 182
| 81
| 39.895604
| 0.833507
| 0.137033
| 0
| 0.195122
| 0
| 0
| 0.057766
| 0.00337
| 0
| 0
| 0
| 0
| 0.154472
| 1
| 0.089431
| false
| 0
| 0.02439
| 0
| 0.162602
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cab92b7f1c079530e07c1c01ef7a728efea9d02
| 43,187
|
py
|
Python
|
torchaudio/functional/functional.py
|
iseessel/audio
|
64551a69186d28db1f499ba373f1b19c6a7ed894
|
[
"BSD-2-Clause"
] | null | null | null |
torchaudio/functional/functional.py
|
iseessel/audio
|
64551a69186d28db1f499ba373f1b19c6a7ed894
|
[
"BSD-2-Clause"
] | null | null | null |
torchaudio/functional/functional.py
|
iseessel/audio
|
64551a69186d28db1f499ba373f1b19c6a7ed894
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import io
import math
import warnings
from typing import Optional, Tuple
import torch
from torch import Tensor
from torchaudio._internal import module_utils as _mod_utils
import torchaudio
__all__ = [
"spectrogram",
"griffinlim",
"amplitude_to_DB",
"DB_to_amplitude",
"compute_deltas",
"compute_kaldi_pitch",
"create_fb_matrix",
"create_dct",
"compute_deltas",
"detect_pitch_frequency",
"DB_to_amplitude",
"mu_law_encoding",
"mu_law_decoding",
"complex_norm",
"angle",
"magphase",
"phase_vocoder",
'mask_along_axis',
'mask_along_axis_iid',
'sliding_window_cmn',
"spectral_centroid",
"apply_codec",
]
def spectrogram(
waveform: Tensor,
pad: int,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
power: Optional[float],
normalized: bool,
center: bool = True,
pad_mode: str = "reflect",
onesided: bool = True
) -> Tensor:
r"""Create a spectrogram or a batch of spectrograms from a raw audio signal.
The spectrogram can be either magnitude-only or complex.
Args:
waveform (Tensor): Tensor of audio of dimension (..., time)
pad (int): Two sided padding of signal
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT
hop_length (int): Length of hop between STFT windows
win_length (int): Window size
power (float or None): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
If None, then the complex spectrum is returned instead.
normalized (bool): Whether to normalize by magnitude after stft
center (bool, optional): whether to pad :attr:`waveform` on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
Default: ``True``
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. Default: ``"reflect"``
onesided (bool, optional): controls whether to return half of results to
avoid redundancy. Default: ``True``
Returns:
Tensor: Dimension (..., freq, time), freq is
``n_fft // 2 + 1`` and ``n_fft`` is the number of
Fourier bins, and time is the number of window hops (n_frame).
"""
if pad > 0:
# TODO add "with torch.no_grad():" back when JIT supports it
waveform = torch.nn.functional.pad(waveform, (pad, pad), "constant")
# pack batch
shape = waveform.size()
waveform = waveform.reshape(-1, shape[-1])
# default values are consistent with librosa.core.spectrum._spectrogram
spec_f = torch.stft(
input=waveform,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
pad_mode=pad_mode,
normalized=False,
onesided=onesided,
return_complex=True,
)
# unpack batch
spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:])
if normalized:
spec_f /= window.pow(2.).sum().sqrt()
if power is not None:
if power == 1.0:
return spec_f.abs()
return spec_f.abs().pow(power)
return torch.view_as_real(spec_f)
def griffinlim(
specgram: Tensor,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
power: float,
normalized: bool,
n_iter: int,
momentum: float,
length: Optional[int],
rand_init: bool
) -> Tensor:
r"""Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation.
Implementation ported from `librosa`.
* [1] McFee, Brian, Colin Raffel, Dawen Liang, Daniel PW Ellis, Matt McVicar, Eric Battenberg, and Oriol Nieto.
"librosa: Audio and music signal analysis in python."
In Proceedings of the 14th python in science conference, pp. 18-25. 2015.
* [2] Perraudin, N., Balazs, P., & Søndergaard, P. L.
"A fast Griffin-Lim algorithm,"
IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (pp. 1-4),
Oct. 2013.
* [3] D. W. Griffin and J. S. Lim,
"Signal estimation from modified short-time Fourier transform,"
IEEE Trans. ASSP, vol.32, no.2, pp.236–243, Apr. 1984.
Args:
specgram (Tensor): A magnitude-only STFT spectrogram of dimension (..., freq, frames)
where freq is ``n_fft // 2 + 1``.
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT, creates ``n_fft // 2 + 1`` bins
hop_length (int): Length of hop between STFT windows. (
Default: ``win_length // 2``)
win_length (int): Window size. (Default: ``n_fft``)
power (float): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
normalized (bool): Whether to normalize by magnitude after stft.
n_iter (int): Number of iteration for phase recovery process.
momentum (float): The momentum parameter for fast Griffin-Lim.
Setting this to 0 recovers the original Griffin-Lim method.
Values near 1 can lead to faster convergence, but above 1 may not converge.
length (int or None): Array length of the expected output.
rand_init (bool): Initializes phase randomly if True, to zero otherwise.
Returns:
torch.Tensor: waveform of (..., time), where time equals the ``length`` parameter if given.
"""
assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum)
assert momentum >= 0, 'momentum={} < 0'.format(momentum)
if normalized:
warnings.warn(
"The argument normalized is not used in Griffin-Lim, "
"and will be removed in v0.9.0 release. To suppress this warning, "
"please use `normalized=False`.")
# pack batch
shape = specgram.size()
specgram = specgram.reshape([-1] + list(shape[-2:]))
specgram = specgram.pow(1 / power)
# randomly initialize the phase
batch, freq, frames = specgram.size()
if rand_init:
angles = 2 * math.pi * torch.rand(batch, freq, frames)
else:
angles = torch.zeros(batch, freq, frames)
angles = torch.stack([angles.cos(), angles.sin()], dim=-1) \
.to(dtype=specgram.dtype, device=specgram.device)
specgram = specgram.unsqueeze(-1).expand_as(angles)
# And initialize the previous iterate to 0
rebuilt = torch.tensor(0.)
for _ in range(n_iter):
# Store the previous iterate
tprev = rebuilt
# Invert with our current estimate of the phases
inverse = torch.istft(specgram * angles,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
length=length).float()
# Rebuild the spectrogram
rebuilt = torch.view_as_real(
torch.stft(
input=inverse,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=True,
pad_mode='reflect',
normalized=False,
onesided=True,
return_complex=True,
)
)
# Update our phase estimates
angles = rebuilt
if momentum:
angles = angles - tprev.mul_(momentum / (1 + momentum))
angles = angles.div(complex_norm(angles).add(1e-16).unsqueeze(-1).expand_as(angles))
# Return the final phase estimates
waveform = torch.istft(specgram * angles,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
length=length)
# unpack batch
waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:])
return waveform
def amplitude_to_DB(
x: Tensor,
multiplier: float,
amin: float,
db_multiplier: float,
top_db: Optional[float] = None
) -> Tensor:
r"""Turn a spectrogram from the power/amplitude scale to the decibel scale.
The output of each tensor in a batch depends on the maximum value of that tensor,
and so may return different values for an audio clip split into snippets vs. a full clip.
Args:
x (Tensor): Input spectrogram(s) before being converted to decibel scale. Input should take
the form `(..., freq, time)`. Batched inputs should include a channel dimension and
have the form `(batch, channel, freq, time)`.
multiplier (float): Use 10. for power and 20. for amplitude
amin (float): Number to clamp ``x``
db_multiplier (float): Log10(max(reference value and amin))
top_db (float or None, optional): Minimum negative cut-off in decibels. A reasonable number
is 80. (Default: ``None``)
Returns:
Tensor: Output tensor in decibel scale
"""
x_db = multiplier * torch.log10(torch.clamp(x, min=amin))
x_db -= multiplier * db_multiplier
if top_db is not None:
# Expand batch
shape = x_db.size()
packed_channels = shape[-3] if x_db.dim() > 2 else 1
x_db = x_db.reshape(-1, packed_channels, shape[-2], shape[-1])
x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1, 1, 1, 1))
# Repack batch
x_db = x_db.reshape(shape)
return x_db
def DB_to_amplitude(
x: Tensor,
ref: float,
power: float
) -> Tensor:
r"""Turn a tensor from the decibel scale to the power/amplitude scale.
Args:
x (Tensor): Input tensor before being converted to power/amplitude scale.
ref (float): Reference which the output will be scaled by.
power (float): If power equals 1, will compute DB to power. If 0.5, will compute DB to amplitude.
Returns:
Tensor: Output tensor in power/amplitude scale.
"""
return ref * torch.pow(torch.pow(10.0, 0.1 * x), power)
def _hz_to_mel(freq: float, mel_scale: str = "htk") -> float:
r"""Convert Hz to Mels.
Args:
freqs (float): Frequencies in Hz
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Returns:
mels (float): Frequency in Mels
"""
if mel_scale not in ['slaney', 'htk']:
raise ValueError('mel_scale should be one of "htk" or "slaney".')
if mel_scale == "htk":
return 2595.0 * math.log10(1.0 + (freq / 700.0))
# Fill in the linear part
f_min = 0.0
f_sp = 200.0 / 3
mels = (freq - f_min) / f_sp
# Fill in the log-scale part
min_log_hz = 1000.0
min_log_mel = (min_log_hz - f_min) / f_sp
logstep = math.log(6.4) / 27.0
if freq >= min_log_hz:
mels = min_log_mel + math.log(freq / min_log_hz) / logstep
return mels
def _mel_to_hz(mels: Tensor, mel_scale: str = "htk") -> Tensor:
"""Convert mel bin numbers to frequencies.
Args:
mels (Tensor): Mel frequencies
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Returns:
freqs (Tensor): Mels converted in Hz
"""
if mel_scale not in ['slaney', 'htk']:
raise ValueError('mel_scale should be one of "htk" or "slaney".')
if mel_scale == "htk":
return 700.0 * (10.0**(mels / 2595.0) - 1.0)
# Fill in the linear scale
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
# And now the nonlinear scale
min_log_hz = 1000.0
min_log_mel = (min_log_hz - f_min) / f_sp
logstep = math.log(6.4) / 27.0
log_t = (mels >= min_log_mel)
freqs[log_t] = min_log_hz * torch.exp(logstep * (mels[log_t] - min_log_mel))
return freqs
def create_fb_matrix(
n_freqs: int,
f_min: float,
f_max: float,
n_mels: int,
sample_rate: int,
norm: Optional[str] = None,
mel_scale: str = "htk",
) -> Tensor:
r"""Create a frequency bin conversion matrix.
Args:
n_freqs (int): Number of frequencies to highlight/apply
f_min (float): Minimum frequency (Hz)
f_max (float): Maximum frequency (Hz)
n_mels (int): Number of mel filterbanks
sample_rate (int): Sample rate of the audio waveform
norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Returns:
Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``)
meaning number of frequencies to highlight/apply to x the number of filterbanks.
Each column is a filterbank so that assuming there is a matrix A of
size (..., ``n_freqs``), the applied result would be
``A * create_fb_matrix(A.size(-1), ...)``.
"""
if norm is not None and norm != "slaney":
raise ValueError("norm must be one of None or 'slaney'")
# freq bins
# Equivalent filterbank construction by Librosa
all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
# calculate mel freq bins
m_min = _hz_to_mel(f_min, mel_scale=mel_scale)
m_max = _hz_to_mel(f_max, mel_scale=mel_scale)
m_pts = torch.linspace(m_min, m_max, n_mels + 2)
f_pts = _mel_to_hz(m_pts, mel_scale=mel_scale)
# calculate the difference between each mel point and each stft freq point in hertz
f_diff = f_pts[1:] - f_pts[:-1] # (n_mels + 1)
slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2)
# create overlapping triangles
zero = torch.zeros(1)
down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels)
up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels)
fb = torch.max(zero, torch.min(down_slopes, up_slopes))
if norm is not None and norm == "slaney":
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels])
fb *= enorm.unsqueeze(0)
if (fb.max(dim=0).values == 0.).any():
warnings.warn(
"At least one mel filterbank has all zero values. "
f"The value for `n_mels` ({n_mels}) may be set too high. "
f"Or, the value for `n_freqs` ({n_freqs}) may be set too low."
)
return fb
def create_dct(
n_mfcc: int,
n_mels: int,
norm: Optional[str]
) -> Tensor:
r"""Create a DCT transformation matrix with shape (``n_mels``, ``n_mfcc``),
normalized depending on norm.
Args:
n_mfcc (int): Number of mfc coefficients to retain
n_mels (int): Number of mel filterbanks
norm (str or None): Norm to use (either 'ortho' or None)
Returns:
Tensor: The transformation matrix, to be right-multiplied to
row-wise data of size (``n_mels``, ``n_mfcc``).
"""
# http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II
n = torch.arange(float(n_mels))
k = torch.arange(float(n_mfcc)).unsqueeze(1)
dct = torch.cos(math.pi / float(n_mels) * (n + 0.5) * k) # size (n_mfcc, n_mels)
if norm is None:
dct *= 2.0
else:
assert norm == "ortho"
dct[0] *= 1.0 / math.sqrt(2.0)
dct *= math.sqrt(2.0 / float(n_mels))
return dct.t()
def mu_law_encoding(
x: Tensor,
quantization_channels: int
) -> Tensor:
r"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1.
Args:
x (Tensor): Input tensor
quantization_channels (int): Number of channels
Returns:
Tensor: Input after mu-law encoding
"""
mu = quantization_channels - 1.0
if not x.is_floating_point():
x = x.to(torch.float)
mu = torch.tensor(mu, dtype=x.dtype)
x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64)
return x_mu
def mu_law_decoding(
x_mu: Tensor,
quantization_channels: int
) -> Tensor:
r"""Decode mu-law encoded signal. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This expects an input with values between 0 and quantization_channels - 1
and returns a signal scaled between -1 and 1.
Args:
x_mu (Tensor): Input tensor
quantization_channels (int): Number of channels
Returns:
Tensor: Input after mu-law decoding
"""
mu = quantization_channels - 1.0
if not x_mu.is_floating_point():
x_mu = x_mu.to(torch.float)
mu = torch.tensor(mu, dtype=x_mu.dtype)
x = ((x_mu) / mu) * 2 - 1.0
x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu
return x
def complex_norm(
complex_tensor: Tensor,
power: float = 1.0
) -> Tensor:
r"""Compute the norm of complex tensor input.
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`
power (float): Power of the norm. (Default: `1.0`).
Returns:
Tensor: Power of the normed input tensor. Shape of `(..., )`
"""
# Replace by torch.norm once issue is fixed
# https://github.com/pytorch/pytorch/issues/34279
return complex_tensor.pow(2.).sum(-1).pow(0.5 * power)
def angle(
complex_tensor: Tensor
) -> Tensor:
r"""Compute the angle of complex tensor input.
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`
Return:
Tensor: Angle of a complex tensor. Shape of `(..., )`
"""
return torch.atan2(complex_tensor[..., 1], complex_tensor[..., 0])
def magphase(
complex_tensor: Tensor,
power: float = 1.0
) -> Tuple[Tensor, Tensor]:
r"""Separate a complex-valued spectrogram with shape `(..., 2)` into its magnitude and phase.
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`
power (float): Power of the norm. (Default: `1.0`)
Returns:
(Tensor, Tensor): The magnitude and phase of the complex tensor
"""
mag = complex_norm(complex_tensor, power)
phase = angle(complex_tensor)
return mag, phase
def phase_vocoder(
complex_specgrams: Tensor,
rate: float,
phase_advance: Tensor
) -> Tensor:
r"""Given a STFT tensor, speed up in time without modifying pitch by a
factor of ``rate``.
Args:
complex_specgrams (Tensor): Dimension of `(..., freq, time, complex=2)`
rate (float): Speed-up factor
phase_advance (Tensor): Expected phase advance in each bin. Dimension of (freq, 1)
Returns:
Tensor: Complex Specgrams Stretch with dimension of `(..., freq, ceil(time/rate), complex=2)`
Example
>>> freq, hop_length = 1025, 512
>>> # (channel, freq, time, complex=2)
>>> complex_specgrams = torch.randn(2, freq, 300, 2)
>>> rate = 1.3 # Speed up by 30%
>>> phase_advance = torch.linspace(
>>> 0, math.pi * hop_length, freq)[..., None]
>>> x = phase_vocoder(complex_specgrams, rate, phase_advance)
>>> x.shape # with 231 == ceil(300 / 1.3)
torch.Size([2, 1025, 231, 2])
"""
# pack batch
shape = complex_specgrams.size()
complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-3:]))
time_steps = torch.arange(0,
complex_specgrams.size(-2),
rate,
device=complex_specgrams.device,
dtype=complex_specgrams.dtype)
alphas = time_steps % 1.0
phase_0 = angle(complex_specgrams[..., :1, :])
# Time Padding
complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 0, 0, 2])
# (new_bins, freq, 2)
complex_specgrams_0 = complex_specgrams.index_select(-2, time_steps.long())
complex_specgrams_1 = complex_specgrams.index_select(-2, (time_steps + 1).long())
angle_0 = angle(complex_specgrams_0)
angle_1 = angle(complex_specgrams_1)
norm_0 = torch.norm(complex_specgrams_0, p=2, dim=-1)
norm_1 = torch.norm(complex_specgrams_1, p=2, dim=-1)
phase = angle_1 - angle_0 - phase_advance
phase = phase - 2 * math.pi * torch.round(phase / (2 * math.pi))
# Compute Phase Accum
phase = phase + phase_advance
phase = torch.cat([phase_0, phase[..., :-1]], dim=-1)
phase_acc = torch.cumsum(phase, -1)
mag = alphas * norm_1 + (1 - alphas) * norm_0
real_stretch = mag * torch.cos(phase_acc)
imag_stretch = mag * torch.sin(phase_acc)
complex_specgrams_stretch = torch.stack([real_stretch, imag_stretch], dim=-1)
# unpack batch
complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-3] + complex_specgrams_stretch.shape[1:])
return complex_specgrams_stretch
def mask_along_axis_iid(
specgrams: Tensor,
mask_param: int,
mask_value: float,
axis: int
) -> Tensor:
r"""
Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
Args:
specgrams (Tensor): Real spectrograms (batch, channel, freq, time)
mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
mask_value (float): Value to assign to the masked columns
axis (int): Axis to apply masking on (2 -> frequency, 3 -> time)
Returns:
Tensor: Masked spectrograms of dimensions (batch, channel, freq, time)
"""
if axis != 2 and axis != 3:
raise ValueError('Only Frequency and Time masking are supported')
device = specgrams.device
dtype = specgrams.dtype
value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * mask_param
min_value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * (specgrams.size(axis) - value)
# Create broadcastable mask
mask_start = min_value[..., None, None]
mask_end = (min_value + value)[..., None, None]
mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype)
# Per batch example masking
specgrams = specgrams.transpose(axis, -1)
specgrams.masked_fill_((mask >= mask_start) & (mask < mask_end), mask_value)
specgrams = specgrams.transpose(axis, -1)
return specgrams
def mask_along_axis(
specgram: Tensor,
mask_param: int,
mask_value: float,
axis: int
) -> Tensor:
r"""
Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
All examples will have the same mask interval.
Args:
specgram (Tensor): Real spectrogram (channel, freq, time)
mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
mask_value (float): Value to assign to the masked columns
axis (int): Axis to apply masking on (1 -> frequency, 2 -> time)
Returns:
Tensor: Masked spectrogram of dimensions (channel, freq, time)
"""
# pack batch
shape = specgram.size()
specgram = specgram.reshape([-1] + list(shape[-2:]))
value = torch.rand(1) * mask_param
min_value = torch.rand(1) * (specgram.size(axis) - value)
mask_start = (min_value.long()).squeeze()
mask_end = (min_value.long() + value.long()).squeeze()
assert mask_end - mask_start < mask_param
if axis == 1:
specgram[:, mask_start:mask_end] = mask_value
elif axis == 2:
specgram[:, :, mask_start:mask_end] = mask_value
else:
raise ValueError('Only Frequency and Time masking are supported')
# unpack batch
specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:])
return specgram
def compute_deltas(
specgram: Tensor,
win_length: int = 5,
mode: str = "replicate"
) -> Tensor:
r"""Compute delta coefficients of a tensor, usually a spectrogram:
.. math::
d_t = \frac{\sum_{n=1}^{\text{N}} n (c_{t+n} - c_{t-n})}{2 \sum_{n=1}^{\text{N}} n^2}
where :math:`d_t` is the deltas at time :math:`t`,
:math:`c_t` is the spectrogram coeffcients at time :math:`t`,
:math:`N` is ``(win_length-1)//2``.
Args:
specgram (Tensor): Tensor of audio of dimension (..., freq, time)
win_length (int, optional): The window length used for computing delta (Default: ``5``)
mode (str, optional): Mode parameter passed to padding (Default: ``"replicate"``)
Returns:
Tensor: Tensor of deltas of dimension (..., freq, time)
Example
>>> specgram = torch.randn(1, 40, 1000)
>>> delta = compute_deltas(specgram)
>>> delta2 = compute_deltas(delta)
"""
device = specgram.device
dtype = specgram.dtype
# pack batch
shape = specgram.size()
specgram = specgram.reshape(1, -1, shape[-1])
assert win_length >= 3
n = (win_length - 1) // 2
# twice sum of integer squared
denom = n * (n + 1) * (2 * n + 1) / 3
specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode)
kernel = torch.arange(-n, n + 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1)
output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom
# unpack batch
output = output.reshape(shape)
return output
def _compute_nccf(
waveform: Tensor,
sample_rate: int,
frame_time: float,
freq_low: int
) -> Tensor:
r"""
Compute Normalized Cross-Correlation Function (NCCF).
.. math::
\phi_i(m) = \frac{\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\sqrt{E(b_i) E(m+b_i)}},
where
:math:`\phi_i(m)` is the NCCF at frame :math:`i` with lag :math:`m`,
:math:`w` is the waveform,
:math:`N` is the length of a frame,
:math:`b_i` is the beginning of frame :math:`i`,
:math:`E(j)` is the energy :math:`\sum_{n=j}^{j+N-1} w^2(n)`.
"""
EPSILON = 10 ** (-9)
# Number of lags to check
lags = int(math.ceil(sample_rate / freq_low))
frame_size = int(math.ceil(sample_rate * frame_time))
waveform_length = waveform.size()[-1]
num_of_frames = int(math.ceil(waveform_length / frame_size))
p = lags + num_of_frames * frame_size - waveform_length
waveform = torch.nn.functional.pad(waveform, (0, p))
# Compute lags
output_lag = []
for lag in range(1, lags + 1):
s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]
s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]
output_frames = (
(s1 * s2).sum(-1)
/ (EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2)
/ (EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2)
)
output_lag.append(output_frames.unsqueeze(-1))
nccf = torch.cat(output_lag, -1)
return nccf
def _combine_max(
a: Tuple[Tensor, Tensor],
b: Tuple[Tensor, Tensor],
thresh: float = 0.99
) -> Tuple[Tensor, Tensor]:
"""
Take value from first if bigger than a multiplicative factor of the second, elementwise.
"""
mask = (a[0] > thresh * b[0])
values = mask * a[0] + ~mask * b[0]
indices = mask * a[1] + ~mask * b[1]
return values, indices
def _find_max_per_frame(
nccf: Tensor,
sample_rate: int,
freq_high: int
) -> Tensor:
r"""
For each frame, take the highest value of NCCF,
apply centered median smoothing, and convert to frequency.
Note: If the max among all the lags is very close
to the first half of lags, then the latter is taken.
"""
lag_min = int(math.ceil(sample_rate / freq_high))
# Find near enough max that is smallest
best = torch.max(nccf[..., lag_min:], -1)
half_size = nccf.shape[-1] // 2
half = torch.max(nccf[..., lag_min:half_size], -1)
best = _combine_max(half, best)
indices = best[1]
# Add back minimal lag
indices += lag_min
# Add 1 empirical calibration offset
indices += 1
return indices
def _median_smoothing(
indices: Tensor,
win_length: int
) -> Tensor:
r"""
Apply median smoothing to the 1D tensor over the given window.
"""
# Centered windowed
pad_length = (win_length - 1) // 2
# "replicate" padding in any dimension
indices = torch.nn.functional.pad(
indices, (pad_length, 0), mode="constant", value=0.
)
indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1)
roll = indices.unfold(-1, win_length, 1)
values, _ = torch.median(roll, -1)
return values
def detect_pitch_frequency(
waveform: Tensor,
sample_rate: int,
frame_time: float = 10 ** (-2),
win_length: int = 30,
freq_low: int = 85,
freq_high: int = 3400,
) -> Tensor:
r"""Detect pitch frequency.
It is implemented using normalized cross-correlation function and median smoothing.
Args:
waveform (Tensor): Tensor of audio of dimension (..., freq, time)
sample_rate (int): The sample rate of the waveform (Hz)
frame_time (float, optional): Duration of a frame (Default: ``10 ** (-2)``).
win_length (int, optional): The window length for median smoothing (in number of frames) (Default: ``30``).
freq_low (int, optional): Lowest frequency that can be detected (Hz) (Default: ``85``).
freq_high (int, optional): Highest frequency that can be detected (Hz) (Default: ``3400``).
Returns:
Tensor: Tensor of freq of dimension (..., frame)
"""
# pack batch
shape = list(waveform.size())
waveform = waveform.reshape([-1] + shape[-1:])
nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low)
indices = _find_max_per_frame(nccf, sample_rate, freq_high)
indices = _median_smoothing(indices, win_length)
# Convert indices to frequency
EPSILON = 10 ** (-9)
freq = sample_rate / (EPSILON + indices.to(torch.float))
# unpack batch
freq = freq.reshape(shape[:-1] + list(freq.shape[-1:]))
return freq
def sliding_window_cmn(
waveform: Tensor,
cmn_window: int = 600,
min_cmn_window: int = 100,
center: bool = False,
norm_vars: bool = False,
) -> Tensor:
r"""
Apply sliding-window cepstral mean (and optionally variance) normalization per utterance.
Args:
waveform (Tensor): Tensor of audio of dimension (..., freq, time)
cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600)
min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start).
Only applicable if center == false, ignored if center==true (int, default = 100)
center (bool, optional): If true, use a window centered on the current frame
(to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false)
norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false)
Returns:
Tensor: Tensor of freq of dimension (..., frame)
"""
input_shape = waveform.shape
num_frames, num_feats = input_shape[-2:]
waveform = waveform.view(-1, num_frames, num_feats)
num_channels = waveform.shape[0]
dtype = waveform.dtype
device = waveform.device
last_window_start = last_window_end = -1
cur_sum = torch.zeros(num_channels, num_feats, dtype=dtype, device=device)
cur_sumsq = torch.zeros(num_channels, num_feats, dtype=dtype, device=device)
cmn_waveform = torch.zeros(
num_channels, num_frames, num_feats, dtype=dtype, device=device)
for t in range(num_frames):
window_start = 0
window_end = 0
if center:
window_start = t - cmn_window // 2
window_end = window_start + cmn_window
else:
window_start = t - cmn_window
window_end = t + 1
if window_start < 0:
window_end -= window_start
window_start = 0
if not center:
if window_end > t:
window_end = max(t + 1, min_cmn_window)
if window_end > num_frames:
window_start -= (window_end - num_frames)
window_end = num_frames
if window_start < 0:
window_start = 0
if last_window_start == -1:
input_part = waveform[:, window_start: window_end - window_start, :]
cur_sum += torch.sum(input_part, 1)
if norm_vars:
cur_sumsq += torch.cumsum(input_part ** 2, 1)[:, -1, :]
else:
if window_start > last_window_start:
frame_to_remove = waveform[:, last_window_start, :]
cur_sum -= frame_to_remove
if norm_vars:
cur_sumsq -= (frame_to_remove ** 2)
if window_end > last_window_end:
frame_to_add = waveform[:, last_window_end, :]
cur_sum += frame_to_add
if norm_vars:
cur_sumsq += (frame_to_add ** 2)
window_frames = window_end - window_start
last_window_start = window_start
last_window_end = window_end
cmn_waveform[:, t, :] = waveform[:, t, :] - cur_sum / window_frames
if norm_vars:
if window_frames == 1:
cmn_waveform[:, t, :] = torch.zeros(
num_channels, num_feats, dtype=dtype, device=device)
else:
variance = cur_sumsq
variance = variance / window_frames
variance -= ((cur_sum ** 2) / (window_frames ** 2))
variance = torch.pow(variance, -0.5)
cmn_waveform[:, t, :] *= variance
cmn_waveform = cmn_waveform.view(input_shape[:-2] + (num_frames, num_feats))
if len(input_shape) == 2:
cmn_waveform = cmn_waveform.squeeze(0)
return cmn_waveform
def spectral_centroid(
waveform: Tensor,
sample_rate: int,
pad: int,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
) -> Tensor:
r"""
Compute the spectral centroid for each channel along the time axis.
The spectral centroid is defined as the weighted average of the
frequency values, weighted by their magnitude.
Args:
waveform (Tensor): Tensor of audio of dimension (..., time)
sample_rate (int): Sample rate of the audio waveform
pad (int): Two sided padding of signal
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT
hop_length (int): Length of hop between STFT windows
win_length (int): Window size
Returns:
Tensor: Dimension (..., time)
"""
specgram = spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, power=1., normalized=False)
freqs = torch.linspace(0, sample_rate // 2, steps=1 + n_fft // 2,
device=specgram.device).reshape((-1, 1))
freq_dim = -2
return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim)
@_mod_utils.requires_sox()
def apply_codec(
waveform: Tensor,
sample_rate: int,
format: str,
channels_first: bool = True,
compression: Optional[float] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
) -> Tensor:
r"""
Apply codecs as a form of augmentation.
Args:
waveform (Tensor): Audio data. Must be 2 dimensional. See also ```channels_first```.
sample_rate (int): Sample rate of the audio waveform.
format (str): File format.
channels_first (bool):
When True, both the input and output Tensor have dimension ``[channel, time]``.
Otherwise, they have dimension ``[time, channel]``.
compression (float): Used for formats other than WAV.
For mor details see :py:func:`torchaudio.backend.sox_io_backend.save`.
encoding (str, optional): Changes the encoding for the supported formats.
For more details see :py:func:`torchaudio.backend.sox_io_backend.save`.
bits_per_sample (int, optional): Changes the bit depth for the supported formats.
For more details see :py:func:`torchaudio.backend.sox_io_backend.save`.
Returns:
torch.Tensor: Resulting Tensor.
If ``channels_first=True``, it has ``[channel, time]`` else ``[time, channel]``.
"""
bytes = io.BytesIO()
torchaudio.backend.sox_io_backend.save(bytes,
waveform,
sample_rate,
channels_first,
compression,
format,
encoding,
bits_per_sample
)
bytes.seek(0)
augmented, _ = torchaudio.sox_effects.sox_effects.apply_effects_file(
bytes, effects=[["rate", f"{sample_rate}"]], channels_first=channels_first, format=format)
return augmented
@_mod_utils.requires_kaldi()
def compute_kaldi_pitch(
waveform: torch.Tensor,
sample_rate: float,
frame_length: float = 25.0,
frame_shift: float = 10.0,
min_f0: float = 50,
max_f0: float = 400,
soft_min_f0: float = 10.0,
penalty_factor: float = 0.1,
lowpass_cutoff: float = 1000,
resample_frequency: float = 4000,
delta_pitch: float = 0.005,
nccf_ballast: float = 7000,
lowpass_filter_width: int = 1,
upsample_filter_width: int = 5,
max_frames_latency: int = 0,
frames_per_chunk: int = 0,
simulate_first_pass_online: bool = False,
recompute_frame: int = 500,
snip_edges: bool = True,
) -> torch.Tensor:
"""Extract pitch based on method described in [1].
This function computes the equivalent of `compute-kaldi-pitch-feats` from Kaldi.
Args:
waveform (Tensor):
The input waveform of shape `(..., time)`.
sample_rate (float):
Sample rate of `waveform`.
frame_length (float, optional):
Frame length in milliseconds. (default: 25.0)
frame_shift (float, optional):
Frame shift in milliseconds. (default: 10.0)
min_f0 (float, optional):
Minimum F0 to search for (Hz) (default: 50.0)
max_f0 (float, optional):
Maximum F0 to search for (Hz) (default: 400.0)
soft_min_f0 (float, optional):
Minimum f0, applied in soft way, must not exceed min-f0 (default: 10.0)
penalty_factor (float, optional):
Cost factor for FO change. (default: 0.1)
lowpass_cutoff (float, optional):
Cutoff frequency for LowPass filter (Hz) (default: 1000)
resample_frequency (float, optional):
Frequency that we down-sample the signal to. Must be more than twice lowpass-cutoff.
(default: 4000)
delta_pitch( float, optional):
Smallest relative change in pitch that our algorithm measures. (default: 0.005)
nccf_ballast (float, optional):
Increasing this factor reduces NCCF for quiet frames (default: 7000)
lowpass_filter_width (int, optional):
Integer that determines filter width of lowpass filter, more gives sharper filter.
(default: 1)
upsample_filter_width (int, optional):
Integer that determines filter width when upsampling NCCF. (default: 5)
max_frames_latency (int, optional):
Maximum number of frames of latency that we allow pitch tracking to introduce into
the feature processing (affects output only if ``frames_per_chunk > 0`` and
``simulate_first_pass_online=True``) (default: 0)
frames_per_chunk (int, optional):
The number of frames used for energy normalization. (default: 0)
simulate_first_pass_online (bool, optional):
If true, the function will output features that correspond to what an online decoder
would see in the first pass of decoding -- not the final version of the features,
which is the default. (default: False)
Relevant if ``frames_per_chunk > 0``.
recompute_frame (int, optional):
Only relevant for compatibility with online pitch extraction.
A non-critical parameter; the frame at which we recompute some of the forward pointers,
after revising our estimate of the signal energy.
Relevant if ``frames_per_chunk > 0``. (default: 500)
snip_edges (bool, optional):
If this is set to false, the incomplete frames near the ending edge won't be snipped,
so that the number of frames is the file size divided by the frame-shift.
This makes different types of features give the same number of frames. (default: True)
Returns:
Tensor: Pitch feature. Shape: ``(batch, frames 2)`` where the last dimension
corresponds to pitch and NCCF.
Reference:
- A pitch extraction algorithm tuned for automatic speech recognition
P. Ghahremani, B. BabaAli, D. Povey, K. Riedhammer, J. Trmal and S. Khudanpur
2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP),
Florence, 2014, pp. 2494-2498, doi: 10.1109/ICASSP.2014.6854049.
"""
shape = waveform.shape
waveform = waveform.reshape(-1, shape[-1])
result = torch.ops.torchaudio.kaldi_ComputeKaldiPitch(
waveform, sample_rate, frame_length, frame_shift,
min_f0, max_f0, soft_min_f0, penalty_factor, lowpass_cutoff,
resample_frequency, delta_pitch, nccf_ballast,
lowpass_filter_width, upsample_filter_width, max_frames_latency,
frames_per_chunk, simulate_first_pass_online, recompute_frame,
snip_edges,
)
result = result.reshape(shape[:-1] + result.shape[-2:])
return result
| 35.39918
| 116
| 0.611666
| 5,766
| 43,187
| 4.436698
| 0.135796
| 0.009851
| 0.00516
| 0.003714
| 0.283011
| 0.230357
| 0.202799
| 0.182785
| 0.167071
| 0.137049
| 0
| 0.021524
| 0.277074
| 43,187
| 1,219
| 117
| 35.42822
| 0.797828
| 0.445227
| 0
| 0.240066
| 0
| 0
| 0.044092
| 0.000983
| 0
| 0
| 0
| 0.00082
| 0.008278
| 1
| 0.043046
| false
| 0.009934
| 0.013245
| 0
| 0.10596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cabc4e8d6c4275c91322768679e9a68335e86e0
| 12,964
|
py
|
Python
|
src/status_node.py
|
Faust-Wang/vswarm
|
d18ce643218c18ef1e762f40562104b2a0926ad7
|
[
"MIT"
] | 21
|
2021-03-03T10:51:46.000Z
|
2022-03-28T11:00:35.000Z
|
src/status_node.py
|
Faust-Wang/vswarm
|
d18ce643218c18ef1e762f40562104b2a0926ad7
|
[
"MIT"
] | 2
|
2021-07-21T07:57:16.000Z
|
2022-03-17T12:41:51.000Z
|
src/status_node.py
|
hvourtsis/vswarm
|
d18ce643218c18ef1e762f40562104b2a0926ad7
|
[
"MIT"
] | 8
|
2021-02-27T14:29:55.000Z
|
2022-01-05T19:40:38.000Z
|
#!/usr/bin/env python3
from __future__ import absolute_import, division, print_function
import curses
import sys
from collections import deque
from datetime import datetime
import numpy as np
import rospy
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
from geometry_msgs.msg import PoseStamped
from mavros_msgs.msg import ExtendedState, PositionTarget, State # StatusText
from scipy.spatial.transform import Rotation as R
from sensor_msgs.msg import BatteryState, Image, NavSatFix
GPS_FIX_DICT = {
0: ('No GPS', curses.COLOR_RED),
1: ('No fix', curses.COLOR_RED),
2: ('2D lock', curses.COLOR_BLUE),
3: ('3D lock', curses.COLOR_BLUE),
4: ('DGPS', curses.COLOR_MAGENTA),
5: ('RTK float', curses.COLOR_YELLOW),
6: ('RTK fix', curses.COLOR_GREEN)
}
def get_color(color):
return curses.color_pair(color)
def frequency_from_messages(messages):
durations = []
for i in range(len(messages) - 1):
duration = messages[i + 1].header.stamp - messages[i].header.stamp
durations.append(duration.to_sec())
frequency = 1 / np.mean(durations)
if np.isnan(frequency):
return 0
return frequency
class StatusNode:
def __init__(self, screen):
rospy.init_node('status_node', argv=sys.argv)
self.rate = rospy.get_param('~rate', default=1.0)
# Curses setup
self.screen = curses.initscr()
self.rows, self.cols = self.screen.getmaxyx()
height_status = 15
self.status = curses.newwin(height_status, self.cols, 1, 2)
# self.console = curses.newwin(self.rows - height_status, self.cols, 12, 2)
self.lines = 0
self.text = ''
self.screen.keypad(True)
curses.curs_set(False) # Hide cursor
colors = [curses.COLOR_BLACK, curses.COLOR_BLUE, curses.COLOR_CYAN,
curses.COLOR_GREEN, curses.COLOR_MAGENTA, curses.COLOR_RED,
curses.COLOR_WHITE, curses.COLOR_YELLOW]
# Curses color setup
curses.use_default_colors()
for color in colors:
curses.init_pair(color, color, -1)
# Default variables
self.status_battery_perc = None
self.state = State()
self.state_sub = rospy.Subscriber('mavros/state', State,
callback=self.state_callback,
queue_size=1)
self.battery = BatteryState()
self.battery_sub = rospy.Subscriber('mavros/battery', BatteryState,
callback=self.battery_callback,
queue_size=1)
self.extended = ExtendedState()
self.extended_sub = rospy.Subscriber('mavros/extended_state', ExtendedState,
callback=self.extended_callback,
queue_size=1)
# self.statustext = StatusText()
# self.statustext_sub = rospy.Subscriber('mavros/statustext/recv', StatusText,
# callback=self.statustext_callback,
# queue_size=1)
self.gps = NavSatFix()
self.gps_sub = rospy.Subscriber('mavros/global_position/raw/fix', NavSatFix,
callback=self.gps_callback,
queue_size=1)
self.local_pose = PoseStamped()
self.local_pose_sub = rospy.Subscriber('mavros/local_position/pose', PoseStamped,
callback=self.local_pose_callback,
queue_size=1)
self.global_pose = PoseStamped()
self.global_pose_sub = rospy.Subscriber('global_position/pose', PoseStamped,
callback=self.global_pose_callback,
queue_size=1)
self.diagnostics = DiagnosticArray()
self.diagnostic_gps = DiagnosticStatus()
self.diagnostics_sub = rospy.Subscriber('/diagnostics', DiagnosticArray,
callback=self.diagnostics_callback,
queue_size=1)
self.setpoint = PositionTarget()
self.setpoint_sub = rospy.Subscriber('mavros/setpoint_raw/local', PositionTarget,
callback=self.setpoint_callback,
queue_size=1)
self.cameras = ['front', 'right', 'back', 'left']
self.image_subscribers = []
self.images = {c: deque(maxlen=10) for c in self.cameras}
for camera in self.cameras:
topic = f'camera_{camera}/image_raw'
subscriber = rospy.Subscriber(topic, Image, callback=self.image_callback,
callback_args=camera, queue_size=1,
buff_size=2 ** 24)
self.image_subscribers.append(subscriber)
def battery_callback(self, battery_msg):
if battery_msg.location == 'id0':
self.battery = battery_msg
def state_callback(self, state_msg):
self.state = state_msg
def extended_callback(self, extended_msg):
self.extended = extended_msg
def diagnostics_callback(self, diagnostics_msg):
for status in diagnostics_msg.status:
if 'GPS' in status.name:
self.diagnostic_gps = status
def gps_callback(self, gps_msg):
self.gps = gps_msg
def local_pose_callback(self, pose_msg):
self.local_pose = pose_msg
def global_pose_callback(self, pose_msg):
self.global_pose = pose_msg
def setpoint_callback(self, setpoint_msg):
self.setpoint = setpoint_msg
def image_callback(self, image_msg, camera):
self.images[camera].append(image_msg)
def statustext_callback(self, statustext_msg):
screen = self.console
time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# time_str = datetime.datetime.fromtimestamp(unix_time)
text = statustext_msg.text
severity = statustext_msg.severity
msg = statustext_msg
severity_red = [msg.EMERGENCY, msg.ALERT, msg.CRITICAL, msg.ERROR]
severity_yellow = [msg.WARNING, msg.NOTICE]
severity_neutral = [msg.INFO, msg.DEBUG]
color = curses.COLOR_CYAN
if severity in severity_red:
color = curses.COLOR_RED
elif severity in severity_yellow:
color = curses.COLOR_YELLOW
elif severity in severity_neutral:
color = curses.COLOR_WHITE
self.text = f'{time_str}: {text} ({color})'
# screen.addstr(self.lines, 0, log, get_color(color))
self.lines += 1
screen.refresh()
def print_status(self):
screen = self.status
screen.clear()
# rospy.loginfo(status)
# print(status)
x_tab = 0
x_indent = 14
row = 0
# Battery
battery_percentage = int(self.battery.percentage * 100)
color = curses.COLOR_CYAN
if battery_percentage > 50:
color = curses.COLOR_GREEN
elif battery_percentage > 25:
color = curses.COLOR_YELLOW
elif battery_percentage > 0:
color = curses.COLOR_RED
status_battery = str(battery_percentage) + '%'
screen.addstr(row, x_tab, 'Battery: ')
screen.addstr(row, x_indent, status_battery, get_color(color))
row += 1
# Armed
if self.state.armed:
color = curses.COLOR_RED
status_armed = 'Yes'
else:
color = curses.COLOR_GREEN
status_armed = 'No'
screen.addstr(row, x_tab, 'Armed: ')
screen.addstr(row, x_indent, status_armed, get_color(color))
row += 1
# Mode
color = curses.COLOR_CYAN
mode = self.state.mode
if mode.startswith('AUTO'):
mode = mode.split('.')[-1]
mode = mode.capitalize()
if mode == 'Offboard':
color = curses.COLOR_RED
else:
color = curses.COLOR_BLUE
if mode == '':
mode = 'None'
elif mode == 'Posctl':
mode = 'Position'
elif mode == 'Rtl':
mode = 'Return'
status_mode = '{}'.format(mode)
screen.addstr(row, x_tab, 'Mode: ')
screen.addstr(row, x_indent, status_mode, get_color(color))
row += 1
# Extended status
if self.extended.landed_state == self.extended.LANDED_STATE_IN_AIR:
status_extended = 'Air'
color = curses.COLOR_RED
elif self.extended.landed_state == self.extended.LANDED_STATE_LANDING:
status_extended = 'Landed'
color = curses.COLOR_GREEN
elif self.extended.landed_state == self.extended.LANDED_STATE_ON_GROUND:
status_extended = 'Ground'
color = curses.COLOR_GREEN
elif self.extended.landed_state == self.extended.LANDED_STATE_TAKEOFF:
status_extended = 'Takeoff'
color = curses.COLOR_RED
elif self.extended.landed_state == self.extended.LANDED_STATE_UNDEFINED:
status_extended = 'Undefined'
color = curses.COLOR_CYAN
screen.addstr(row, x_tab, 'State: ')
screen.addstr(row, x_indent, status_extended, get_color(color))
row += 1
# GPS info
satellites = 0
fix_type, color = GPS_FIX_DICT[0]
for value in self.diagnostic_gps.values:
if value.key == 'Satellites visible':
satellites = value.value
elif value.key == 'Fix type':
fix_type, color = GPS_FIX_DICT[int(value.value)]
screen.addstr(row, x_tab, 'GPS info: ')
screen.addstr(row, x_indent, f'{fix_type} ({satellites} sat)', get_color(color))
row += 2
# GPS pos
latitude = self.gps.latitude
longitude = self.gps.longitude
altitude = round(self.gps.altitude, 2)
status_gps = f'{latitude:.7f} {longitude:.7f} {altitude:.2f} (LLA)'
screen.addstr(row, x_tab, 'GPS pos: ')
screen.addstr(row, x_indent, status_gps)
row += 1
# Local pose
p = self.local_pose.pose.position
q = self.local_pose.pose.orientation
quaternion = [q.x, q.y, q.z, q.w]
try:
rot = R.from_quat(quaternion)
except ValueError:
rot = R.from_euler('zyx', [0.0, 0.0, 0.0])
yaw, pitch, roll = rot.as_euler('zyx', degrees=True)
x, y, z = round(p.x, 2), round(p.y, 2), round(p.z, 2)
yaw, pitch, roll = int(yaw), int(pitch), int(roll)
screen.addstr(row, x_tab, 'Local pos: ')
screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f} (XYZ) {roll} {pitch} {yaw} (RPY)')
row += 1
# Global pose
p = self.global_pose.pose.position
q = self.global_pose.pose.orientation
quaternion = [q.x, q.y, q.z, q.w]
try:
rot = R.from_quat(quaternion)
except ValueError:
rot = R.from_euler('zyx', [0.0, 0.0, 0.0])
yaw, pitch, roll = rot.as_euler('zyx', degrees=True)
x, y, z = round(p.x, 2), round(p.y, 2), round(p.z, 2)
yaw, pitch, roll = int(yaw), int(pitch), int(roll)
screen.addstr(row, x_tab, 'Global pos: ')
screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f} (XYZ) {roll} {pitch} {yaw} (RPY)')
row += 1
# Setpoint
v = self.setpoint.velocity
vx, vy, vz = round(v.x, 2), round(v.y, 2), round(v.z, 2)
yaw = int(np.rad2deg(self.setpoint.yaw))
screen.addstr(row, x_tab, 'Setpoint: ')
screen.addstr(row, x_indent, f'{vx:.2f} {vy:.2f} {vz:.2f} (XYZ) {yaw} (Y)')
row += 1
# Cameras
freqs = {c: 0 for c in self.cameras}
for cam, messages in self.images.items():
freqs[cam] = frequency_from_messages(messages)
ff, fr, fb, fl = [int(round(v)) for k, v in freqs.items()]
screen.addstr(row, x_tab, 'Cameras: ')
screen.addstr(row, x_indent, f'{ff} {fr} {fb} {fl} (front right back left [Hz])')
row += 1
screen.refresh()
self.screen.refresh()
def run(self):
rate = rospy.Rate(self.rate)
try:
while not rospy.is_shutdown():
self.print_status()
rate.sleep()
except rospy.ROSInterruptException:
curses.nocbreak()
self.screen.keypad(False)
curses.echo()
def curses_main(screen):
StatusNode(screen).run()
def main():
try:
curses.wrapper(curses_main)
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
main()
| 35.132791
| 100
| 0.57166
| 1,513
| 12,964
| 4.727032
| 0.181097
| 0.053831
| 0.041946
| 0.044743
| 0.274888
| 0.184004
| 0.121644
| 0.121644
| 0.115213
| 0.115213
| 0
| 0.011793
| 0.319732
| 12,964
| 368
| 101
| 35.228261
| 0.799184
| 0.049136
| 0
| 0.215328
| 0
| 0.010949
| 0.066981
| 0.010324
| 0
| 0
| 0
| 0
| 0
| 1
| 0.062044
| false
| 0.00365
| 0.043796
| 0.00365
| 0.120438
| 0.010949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cabe7391066e68e59a6eee1bcca21b689be0897
| 5,010
|
py
|
Python
|
bin/boxplot_param.py
|
mo-schmid/MIALab
|
8a7e183df7007993e8a28513a73dca20bfd60737
|
[
"Apache-2.0"
] | null | null | null |
bin/boxplot_param.py
|
mo-schmid/MIALab
|
8a7e183df7007993e8a28513a73dca20bfd60737
|
[
"Apache-2.0"
] | null | null | null |
bin/boxplot_param.py
|
mo-schmid/MIALab
|
8a7e183df7007993e8a28513a73dca20bfd60737
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
from pathlib import Path
class ResultParam():
"""Result Parameter"""
def __init__(self, path: Path, param_str: str):
"""Initializes a new instance of the Result Parameter
Args:
path (Path): path to the desired result file
param_str (str): string containing the parameters used in the postprocessing
"""
self.path = path
self.param_str = param_str
def set_box_format(bp, color):
plt.setp(bp['boxes'], color=color)
plt.setp(bp['whiskers'], color=color)
plt.setp(bp['caps'], color=color)
plt.setp(bp['caps'], linewidth=1)
plt.setp(bp['medians'], color='red')
plt.setp(bp['medians'], linewidth=1.5)
plt.setp(bp['fliers'], marker='.')
plt.setp(bp['fliers'], markerfacecolor='black')
plt.setp(bp['fliers'], alpha=1)
def boxplot(file_path: str, data: list, title: str, x_label: str, y_label: str, x_ticks: tuple,
min_: float = None, max_: float = None):
if len(data) != len(x_ticks):
raise ValueError('arguments data and x_ticks need to have same length')
fig = plt.figure(
figsize=( 2 *1.5, 5*1.5)) # figsize defaults to (width, height) =(6.4, 4.8),
# for boxplots, we want the ratio to be inversed
ax = fig.add_subplot(111) # create an axes instance (nrows=ncols=index)
bp = ax.boxplot(data, widths=0.6)
set_box_format(bp, '000')
# set and format litle, labels, and ticks
ax.set_title(title, fontweight='bold', fontsize=20)
ax.set_ylabel(y_label, fontweight='bold', fontsize=18)
# ax.set_xlabel(x_label, fontweight='bold', fontsize=9.5) # we don't use the x-label since it should be clear from the x-ticks
ax.yaxis.set_tick_params(labelsize=12)
ax.set_xticklabels(x_ticks, fontdict={'fontsize': 18, 'fontweight': 'bold'}, rotation=45)
# remove frame
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# thicken frame
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
# adjust min and max if provided
if min_ is not None or max_ is not None:
min_original, max_original = ax.get_ylim()
min_ = min_ if min_ is not None and min_ < min_original else min_original
max_ = max_ if max_ is not None and max_ > max_original else max_original
ax.set_ylim(min_, max_)
plt.savefig(file_path, bbox_inches="tight")
plt.close()
def format_data(data, label: str, metric: str):
return data[data['LABEL'] == label][metric].values
def metric_to_readable_text(metric: str):
if metric == 'DICE':
return 'Dice coefficient'
elif metric == 'HDRFDST':
return 'Hausdorff distance (mm)'
else:
raise ValueError('Metric "{}" unknown'.format(metric))
def main(results: [ResultParam], plot_dir: Path):
"""generates box plots comparing two or more result sets for all labels
Args:
results ([ResultParam]): a list of result parameters (Path and description)
plot_dir: ath to the desired result folder to store the qq-plots
"""
metrics = ('DICE', 'HDRFDST') # the metrics we want to plot the results for
metrics_yaxis_limits = ((0.0, 1.0), (0.0, 18)) # tuples of y-axis limits (min, max) for each metric. Use None if unknown
labels = ('WhiteMatter','GreyMatter', 'Hippocampus','Amygdala','Thalamus') # the brain structures/tissues you are interested in
# load the CSVs. We usually want to compare different methods (e.g. a set of different features), therefore,
# we load two CSV (for simplicity, it is the same here)
# todo: adapt to your needs to compare different methods (e.g. load different CSVs)
dfs = []
methods = []
for res in results:
dfs.append(pd.read_csv(res.path, sep=';'))
methods.append(res.param_str)
# todo: read parameter values from text file, use them to plot the information about the paramter
# some parameters to improve the plot's readability
title = '{}'
for label in labels:
for metric, (min_, max_) in zip(metrics, metrics_yaxis_limits):
boxplot(os.path.join(plot_dir, '{}_{}.png'.format(label, metric)),
[format_data(df, label, metric) for df in dfs],
title.format(label),
'Method', metric_to_readable_text(metric),
methods,
min_, max_
)
if __name__ == '__main__':
results = []
results.append(ResultParam(Path(Path.cwd() / "mia-result\gridsearch_PKF/2020-12-11-09-51-54/no_PP/results.csv"),
"no pp"))
results.append(ResultParam(Path(Path.cwd() /"mia-result/gridsearch_PKF/2020-12-11-09-51-54/with_PP/PP-V-20_0-BG-True/results.csv"),
"with pp"))
main(results, Path(Path.cwd() / 'mia-result/plot_results'))
| 35.531915
| 135
| 0.641916
| 716
| 5,010
| 4.363128
| 0.357542
| 0.020166
| 0.025928
| 0.017926
| 0.115557
| 0.077465
| 0.045455
| 0.045455
| 0.045455
| 0.045455
| 0
| 0.019521
| 0.233134
| 5,010
| 140
| 136
| 35.785714
| 0.793597
| 0.269261
| 0
| 0
| 0
| 0.025
| 0.143177
| 0.04726
| 0
| 0
| 0
| 0.007143
| 0
| 1
| 0.075
| false
| 0
| 0.075
| 0.0125
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cae45b970c1385083dad6bbec98b3cd495bf626
| 3,948
|
py
|
Python
|
EMeRGE/dssmetrics/constants.py
|
NREL/EMeRGE
|
573e86ca8e62080c664998e8cc79e9231e7ad502
|
[
"BSD-3-Clause"
] | 6
|
2020-04-11T18:09:00.000Z
|
2022-01-23T20:38:38.000Z
|
EMeRGE/dssmetrics/constants.py
|
NREL/EMeRGE
|
573e86ca8e62080c664998e8cc79e9231e7ad502
|
[
"BSD-3-Clause"
] | null | null | null |
EMeRGE/dssmetrics/constants.py
|
NREL/EMeRGE
|
573e86ca8e62080c664998e8cc79e9231e7ad502
|
[
"BSD-3-Clause"
] | 3
|
2020-06-11T02:48:49.000Z
|
2021-08-10T07:13:57.000Z
|
""" Default values : DO NOT CHANGE !!!"""
LOG_FORMAT = "%(asctime)s: %(levelname)s: %(message)s"
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
MAXITERATIONS = 100
LIFE_PARAMETERS = {"theta_i":30,"theta_fl":36,"theta_gfl":28.6,
"R":4.87,"n":1,"tau":3.5,"m":1,"A":-13.391,
"B":6972.15,"num_of_iteration":4,}
DEFAULT_TEMP = 25
MAX_TRANS_LOADING = 1.5
DEFAULT_CONFIGURATION = {
"dss_filepath": "",
"dss_filename":"",
"extra_data_path": ".",
"export_folder":"",
"start_time":"2018-1-1 0:0:0",
"end_time":"2018-2-1 0:0:0",
"simulation_time_step (minute)": 15,
"frequency": 50,
"upper_voltage": 1.1,
"lower_voltage":0.9,
"record_every": 96,
"export_voltages": False,
"export_lineloadings": False,
"export_transloadings":False,
"export_start_date": "",
"export_end_date": "",
"volt_var": {
"enabled": False,
"yarray": [0.44,0.44,0,0,-0.44,-0.44],
"xarray": [0.7,0.90,0.95,1.05,1.10,1.3]
},
"log_settings": {
"save_in_file": False,
"log_folder": ".",
"log_filename":"logs.log",
"clear_old_log_file": True
}
}
DEFAULT_ADVANCED_CONFIGURATION = {
"project_path": "C:\\Users\\KDUWADI\\Desktop\\NREL_Projects\\CIFF-TANGEDCO\\TANGEDCO\\EMERGE\\Projects",
"active_project":"GR_PALAYAM",
"active_scenario": "FullYear",
"dss_filename":"gr_palayam.dss",
"start_time":"2018-1-1 0:0:0",
"end_time":"2018-1-2 0:0:0",
"simulation_time_step (minute)": 60,
"frequency": 50,
"upper_voltage": 1.1,
"lower_voltage":0.9,
"record_every": 4,
"parallel_simulation":True,
"parallel_process": 1,
"export_voltages": False,
"export_lineloadings": False,
"export_transloadings":False,
"export_start_date": "",
"export_end_date": "",
"volt_var": {
"enabled": True,
"yarray": [0.44,0.44,0,0,-0.44,-0.44],
"xarray": [0.7,0.90,0.95,1.05,1.10,1.3]
},
"log_settings": {
"save_in_file": False,
"log_filename":"",
"clear_old_log_file": True
}
}
VALID_SETTINGS = {
"project_path":{'type':str},
"active_project":{'type':str},
"active_scenario":{'type':str},
"dss_filepath": {'type': str},
"dss_filename":{'type':str},
"export_folder":{'type':str},
"start_time":{'type':str},
"end_time":{'type':str},
"simulation_time_step (minute)":{'type':int},
"frequency": {'type':int,'options':[50,60]},
"upper_voltage": {'type':float,'range':[1,1.5]},
"lower_voltage":{'type':float,'range':[0.8,1]},
"record_every": {'type':int},
"extra_data_path":{'type':str},
"parallel_simulation":{'type':bool},
"parallel_process": {'type':int,'range':[1,4]},
"export_voltages": {'type':bool},
"export_lineloadings": {'type':bool},
"export_transloadings":{'type':bool},
"export_start_date": {'type':str},
"export_end_date": {'type':str},
"volt_var": {
"enabled": {'type':bool},
"yarray": {'type':list},
"xarray": {'type':list}
},
"log_settings": {
"save_in_file": {'type':bool},
"log_folder": {'type':str},
"log_filename":{'type':str},
"clear_old_log_file": {'type':bool}
}
}
| 35.567568
| 108
| 0.472898
| 428
| 3,948
| 4.11215
| 0.301402
| 0.051705
| 0.010227
| 0.013636
| 0.357386
| 0.323864
| 0.323864
| 0.293182
| 0.293182
| 0.293182
| 0
| 0.061899
| 0.341185
| 3,948
| 110
| 109
| 35.890909
| 0.614764
| 0.008612
| 0
| 0.316832
| 0
| 0
| 0.382684
| 0.021773
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cae7acd2ab857e48bf48cfdcc2ed083e6292337
| 12,669
|
py
|
Python
|
minesweeper/game.py
|
MathisFederico/Minesweeper
|
b66b41066e325813b24497d2caca0a11c048e18b
|
[
"MIT"
] | 1
|
2020-12-23T11:52:40.000Z
|
2020-12-23T11:52:40.000Z
|
minesweeper/game.py
|
MathisFederico/Minesweeper
|
b66b41066e325813b24497d2caca0a11c048e18b
|
[
"MIT"
] | null | null | null |
minesweeper/game.py
|
MathisFederico/Minesweeper
|
b66b41066e325813b24497d2caca0a11c048e18b
|
[
"MIT"
] | null | null | null |
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
from . import images
from gym import Env, spaces
from time import time
import numpy as np
from copy import copy
import colorsys
import pygame
from pygame.transform import scale
class MinesweeperEnv(Env):
def __init__(self, grid_shape=(10, 15), bombs_density=0.1, n_bombs=None, impact_size=3, max_time=999, chicken=False):
self.grid_shape = grid_shape
self.grid_size = np.prod(grid_shape)
self.n_bombs = max(1, int(bombs_density * self.grid_size)) if n_bombs is None else n_bombs
self.n_bombs = min(self.grid_size - 1, self.n_bombs)
self.flaged_bombs = 0
self.flaged_empty = 0
self.max_time = max_time
if impact_size % 2 == 0:
raise ValueError('Impact_size must be an odd number !')
self.impact_size = impact_size
# Define constants
self.HIDDEN = 0
self.REVEAL = 1
self.FLAG = 2
self.BOMB = self.impact_size ** 2
# Setting up gym Env conventions
nvec_observation = (self.BOMB + 2) * np.ones(self.grid_shape)
self.observation_space = spaces.MultiDiscrete(nvec_observation)
nvec_action = np.array(self.grid_shape + (2,))
self.action_space = spaces.MultiDiscrete(nvec_action)
# Initalize state
self.state = np.zeros(self.grid_shape + (2,), dtype=np.uint8)
## Setup bombs places
idx = np.indices(self.grid_shape).reshape(2, -1)
bombs_ids = np.random.choice(range(self.grid_size), size=self.n_bombs, replace=False)
self.bombs_positions = idx[0][bombs_ids], idx[1][bombs_ids]
## Place numbers
self.semi_impact_size = (self.impact_size-1)//2
bomb_impact = np.ones((self.impact_size, self.impact_size), dtype=np.uint8)
for bombs_id in bombs_ids:
bomb_x, bomb_y = idx[0][bombs_id], idx[1][bombs_id]
x_min, x_max, dx_min, dx_max = self.clip_index(bomb_x, 0)
y_min, y_max, dy_min, dy_max = self.clip_index(bomb_y, 1)
bomb_region = self.state[x_min:x_max, y_min:y_max, 0]
bomb_region += bomb_impact[dx_min:dx_max, dy_min:dy_max]
## Place bombs
self.state[self.bombs_positions + (0,)] = self.BOMB
self.start_time = time()
self.time_left = int(time() - self.start_time)
# Setup rendering
self.pygame_is_init = False
self.chicken = chicken
self.done = False
self.score = 0
def get_observation(self):
observation = copy(self.state[:, :, 1])
revealed = observation == 1
flaged = observation == 2
observation += self.impact_size ** 2 + 1
observation[revealed] = copy(self.state[:, :, 0][revealed])
observation[flaged] -= 1
return observation
def reveal_around(self, coords, reward, done, without_loss=False):
if not done:
x_min, x_max, _, _ = self.clip_index(coords[0], 0)
y_min, y_max, _, _ = self.clip_index(coords[1], 1)
region = self.state[x_min:x_max, y_min:y_max, :]
unseen_around = np.sum(region[..., 1] == 0)
if unseen_around == 0:
if not without_loss:
reward -= 0.001
return
flags_around = np.sum(region[..., 1] == 2)
if flags_around == self.state[coords + (0,)]:
unrevealed_zeros_around = np.logical_and(region[..., 0] == 0, region[..., 1] == self.HIDDEN)
if np.any(unrevealed_zeros_around):
zeros_coords = np.argwhere(unrevealed_zeros_around)
for zero in zeros_coords:
coord = (x_min + zero[0], y_min + zero[1])
self.state[coord + (1,)] = 1
self.reveal_around(coord, reward, done, without_loss=True)
self.state[x_min:x_max, y_min:y_max, 1][self.state[x_min:x_max, y_min:y_max, 1] != self.FLAG] = 1
unflagged_bombs_around = np.logical_and(region[..., 0] == self.BOMB, region[..., 1] != self.FLAG)
if np.any(unflagged_bombs_around):
self.done = True
reward, done = -1, True
else:
if not without_loss:
reward -= 0.001
def clip_index(self, x, axis):
max_idx = self.grid_shape[axis]
x_min, x_max = max(0, x-self.semi_impact_size), min(max_idx, x + self.semi_impact_size + 1)
dx_min, dx_max = x_min - (x - self.semi_impact_size), x_max - (x + self.semi_impact_size + 1) + self.impact_size
return x_min, x_max, dx_min, dx_max
def step(self, action):
coords = action[:2]
action_type = action[2] + 1 # 0 -> 1 = reveal; 1 -> 2 = toggle_flag
case_state = self.state[coords + (1,)]
case_content = self.state[coords + (0,)]
NO_BOMBS_AROUND = 0
reward, done = 0, False
self.time_left = self.max_time - time() + self.start_time
if self.time_left <= 0:
score = -(self.n_bombs - self.flaged_bombs + self.flaged_empty)/self.n_bombs
reward, done = score, True
return self.get_observation(), reward, done, {'passed':False}
if action_type == self.REVEAL:
if case_state == self.HIDDEN:
self.state[coords + (1,)] = action_type
if case_content == self.BOMB:
if self.pygame_is_init: self.done = True
reward, done = -1, True
return self.get_observation(), reward, done, {'passed':False}
elif case_content == NO_BOMBS_AROUND:
self.reveal_around(coords, reward, done)
elif case_state == self.REVEAL:
self.reveal_around(coords, reward, done)
reward -= 0.01
else:
reward -= 0.001
self.score += reward
return self.get_observation(), reward, done, {'passed':True}
elif action_type == self.FLAG:
if case_state == self.REVEAL:
reward -= 0.001
else:
flaging = 1
if case_state == self.FLAG:
flaging = -1
self.state[coords + (1,)] = self.HIDDEN
else:
self.state[coords + (1,)] = self.FLAG
if case_content == self.BOMB:
self.flaged_bombs += flaging
else:
self.flaged_empty += flaging
if self.flaged_bombs == self.n_bombs and self.flaged_empty == 0:
reward, done = 2 + self.time_left/self.max_time, True
if np.any(np.logical_and(self.state[..., 0]==9, self.state[..., 1]==1)) or self.done:
reward, done = -1 + self.time_left/self.max_time + (self.flaged_bombs - self.flaged_empty)/self.n_bombs, True
self.score += reward
return self.get_observation(), reward, done, {'passed':False}
def reset(self):
self.__init__(self.grid_shape, n_bombs=self.n_bombs, impact_size=self.impact_size, max_time=self.max_time, chicken=self.chicken)
return self.get_observation()
def render(self):
if not self.pygame_is_init:
self._init_pygame()
self.pygame_is_init = True
for event in pygame.event.get():
if event.type == pygame.QUIT: # pylint: disable=E1101
pygame.quit() # pylint: disable=E1101
# Plot background
pygame.draw.rect(self.window, (60, 56, 53), (0, 0, self.height, self.width))
# Plot grid
for index, state in np.ndenumerate(self.state[..., 1]):
self._plot_block(index, state)
# Plot infos
## Score
score_text = self.score_font.render("SCORE", 1, (255, 10, 10))
score = self.score_font.render(str(round(self.score, 4)), 1, (255, 10, 10))
self.window.blit(score_text, (0.1*self.header_size, 0.75*self.width))
self.window.blit(score, (0.1*self.header_size, 0.8*self.width))
## Time left
time_text = self.num_font.render("TIME", 1, (255, 10, 10))
self.time_left = self.max_time - time() + self.start_time
time_left = self.num_font.render(str(int(self.time_left+1)), 1, (255, 10, 10))
self.window.blit(time_text, (0.1*self.header_size, 0.03*self.width))
self.window.blit(time_left, (0.1*self.header_size, 0.1*self.width))
## Bombs left
bombs_text = self.num_font.render("BOMBS", 1, (255, 255, 10))
left_text = self.num_font.render("LEFT", 1, (255, 255, 10))
potential_bombs_left = self.n_bombs - self.flaged_bombs - self.flaged_empty
potential_bombs_left = self.num_font.render(str(int(potential_bombs_left)), 1, (255, 255, 10))
self.window.blit(bombs_text, (0.1*self.header_size, 0.4*self.width))
self.window.blit(left_text, (0.1*self.header_size, 0.45*self.width))
self.window.blit(potential_bombs_left, (0.1*self.header_size, 0.5*self.width))
pygame.display.flip()
pygame.time.wait(10)
if self.done:
pygame.time.wait(3000)
@staticmethod
def _get_color(n, max_n):
BLUE_HUE = 0.6
RED_HUE = 0.0
HUE = RED_HUE + (BLUE_HUE - RED_HUE) * ((max_n - n) / max_n)**3
color = 255 * np.array(colorsys.hsv_to_rgb(HUE, 1, 0.7))
return color
def _plot_block(self, index, state):
position = tuple(self.origin + self.scale_factor * self.BLOCK_SIZE * np.array((index[1], index[0])))
label = None
if state == self.HIDDEN and not self.done:
img_key = 'hidden'
elif state == self.FLAG:
if not self.done:
img_key = 'flag'
else:
content = self.state[index][0]
if content == self.BOMB:
img_key = 'disabled_mine' if not self.chicken else 'disabled_chicken'
else:
img_key = 'misplaced_flag'
else:
content = self.state[index][0]
if content == self.BOMB:
if state == self.HIDDEN:
img_key = 'mine' if not self.chicken else 'chicken'
else:
img_key = 'exploded_mine' if not self.chicken else 'exploded_chicken'
else:
img_key = 'revealed'
label = self.num_font.render(str(content), 1, self._get_color(content, self.BOMB))
self.window.blit(self.images[img_key], position)
if label: self.window.blit(label, position + self.font_offset - (content > 9) * self.decimal_font_offset)
def _init_pygame(self):
pygame.init() # pylint: disable=E1101
# Open Pygame window
self.scale_factor = 2 * min(12 / self.grid_shape[0], 25 / self.grid_shape[1])
self.BLOCK_SIZE = 32
self.header_size = self.scale_factor * 100
self.origin = np.array([self.header_size, 0])
self.width = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[0])
self.height = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[1] + self.header_size)
self.window = pygame.display.set_mode((self.height, self.width))
# Setup font for numbers
num_font_size = 20
self.num_font = pygame.font.SysFont("monospace", int(self.scale_factor * num_font_size))
self.font_offset = self.scale_factor * self.BLOCK_SIZE * np.array([0.325, 0.15])
self.decimal_font_offset = self.scale_factor * self.BLOCK_SIZE * np.array([0.225, 0])
self.score_font = pygame.font.SysFont("monospace", int(self.scale_factor * 12))
# Load images
def scale_image(img, scale_factor=self.scale_factor):
return scale(img, (int(scale_factor*img.get_width()), int(scale_factor*img.get_height())))
images_names = ['hidden', 'revealed', 'flag', 'misplaced_flag']
if self.chicken:
images_names += ['chicken', 'exploded_chicken', 'disabled_chicken']
else:
images_names += ['mine', 'exploded_mine', 'disabled_mine']
self.images = {}
for img_name in images_names:
with pkg_resources.path(images, img_name + '.png') as path:
img = pygame.image.load(str(path)).convert()
self.images[img_name] = scale_image(img)
| 41.950331
| 136
| 0.582209
| 1,707
| 12,669
| 4.111306
| 0.134153
| 0.017811
| 0.022229
| 0.009119
| 0.352237
| 0.241949
| 0.186378
| 0.139926
| 0.129097
| 0.08293
| 0
| 0.031896
| 0.297182
| 12,669
| 301
| 137
| 42.089701
| 0.756289
| 0.031021
| 0
| 0.147186
| 0
| 0
| 0.024577
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0.017316
| 0.047619
| 0.004329
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7caf8da8a5f682874ecef410bafcd6662e5de11b
| 3,440
|
py
|
Python
|
models/layers/mesh_conv.py
|
CallumMcMahon/MeshCNN
|
343950a8d69807ed4afa13f1843edb37c4cd042c
|
[
"MIT"
] | 2
|
2022-01-05T09:21:17.000Z
|
2022-03-24T15:20:14.000Z
|
models/layers/mesh_conv.py
|
CallumMcMahon/MeshCNN
|
343950a8d69807ed4afa13f1843edb37c4cd042c
|
[
"MIT"
] | null | null | null |
models/layers/mesh_conv.py
|
CallumMcMahon/MeshCNN
|
343950a8d69807ed4afa13f1843edb37c4cd042c
|
[
"MIT"
] | 1
|
2022-03-24T15:20:20.000Z
|
2022-03-24T15:20:20.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MeshConv(nn.Module):
""" Computes convolution between edges and 4 incident (1-ring) edge neighbors
in the forward pass takes:
x: edge features (Batch x Features x Edges)
mesh: list of mesh data-structure (len(mesh) == Batch)
and applies convolution
"""
def __init__(self, in_channels, out_channels, k=5, bias=True):
super(MeshConv, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, k), bias=bias)
self.k = k
def forward(self, x, mesh):
x = x.squeeze(-1)
# pad gemm
G = torch.cat([self.pad_gemm(i, x.shape[2], x.device) for i in mesh], 0)
# build 'neighborhood image' and apply convolution
G = self.create_GeMM(x, G)
x = self.conv(G)
return x
def flatten_gemm_inds(self, Gi):
(b, ne, nn) = Gi.shape
ne += 1
batch_n = torch.floor(torch.arange(b * ne, device=Gi.device).float() / ne).view(b, ne)
add_fac = batch_n * ne
add_fac = add_fac.view(b, ne, 1)
add_fac = add_fac.repeat(1, 1, nn)
# flatten Gi
Gi = Gi.float() + add_fac[:, 1:, :]
return Gi
def create_GeMM(self, x, Gi):
""" gathers the edge features (x) with from the 1-ring indices (Gi)
applys symmetric functions to handle order invariance
returns a 'fake image' which can use 2d convolution on
output dimensions: Batch x Channels x Edges x 5
"""
Gishape = Gi.shape
# pad the first row of every sample in batch with zeros
padding = torch.zeros((x.shape[0], x.shape[1], 1), requires_grad=True, device=x.device)
# add zero feature vector then shift all indices. border edges now reference zero vector
x = torch.cat((padding, x), dim=2)
Gi = Gi + 1 #shift
# first flatten indices
Gi_flat = self.flatten_gemm_inds(Gi)
Gi_flat = Gi_flat.view(-1).long()
#
odim = x.shape
x = x.permute(0, 2, 1).contiguous()
x = x.view(odim[0] * odim[2], odim[1])
# indices of gemm never reference padded section of x so padded section never used
f = torch.index_select(x, dim=0, index=Gi_flat)
f = f.view(Gishape[0], Gishape[1], Gishape[2], -1)
f = f.permute(0, 3, 1, 2)
# apply the symmetric functions for an equivariant convolution
x_1 = f[:, :, :, 1] + f[:, :, :, 3]
x_2 = f[:, :, :, 2] + f[:, :, :, 4]
x_3 = torch.abs(f[:, :, :, 1] - f[:, :, :, 3])
x_4 = torch.abs(f[:, :, :, 2] - f[:, :, :, 4])
f = torch.stack([f[:, :, :, 0], x_1, x_2, x_3, x_4], dim=3)
return f
def pad_gemm(self, m, xsz, device):
""" extracts one-ring neighbors (4x) -> m.gemm_edges
which is of size #edges x 4
add the edge_id itself to make #edges x 5
then pad to desired size e.g., xsz x 5
"""
padded_gemm = torch.tensor(m.gemm_edges, device=device).float()
padded_gemm = padded_gemm.requires_grad_()
padded_gemm = torch.cat((torch.arange(int(m.edges_count), device=device).float().unsqueeze(1), padded_gemm), dim=1)
# pad using F
padded_gemm = F.pad(padded_gemm, (0, 0, 0, xsz - m.edges_count), "constant", 0)
padded_gemm = padded_gemm.unsqueeze(0)
return padded_gemm
| 41.445783
| 123
| 0.585756
| 528
| 3,440
| 3.698864
| 0.289773
| 0.051203
| 0.029186
| 0.021505
| 0.00512
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026677
| 0.280814
| 3,440
| 83
| 124
| 41.445783
| 0.762732
| 0.289244
| 0
| 0
| 0
| 0
| 0.003433
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102041
| false
| 0
| 0.061224
| 0
| 0.265306
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cb23f9d984ca01ba8f682afe13184f98d4f5e92
| 389
|
py
|
Python
|
qtask/utils/testing.py
|
LinkTsang/qtask-legacy-python
|
9b264b8e33313e4d3615472d59a2a39948eeeaa1
|
[
"MIT"
] | null | null | null |
qtask/utils/testing.py
|
LinkTsang/qtask-legacy-python
|
9b264b8e33313e4d3615472d59a2a39948eeeaa1
|
[
"MIT"
] | null | null | null |
qtask/utils/testing.py
|
LinkTsang/qtask-legacy-python
|
9b264b8e33313e4d3615472d59a2a39948eeeaa1
|
[
"MIT"
] | null | null | null |
import asyncio
import traceback
import unittest
def async_test(f):
def wrapper(test_case: unittest.TestCase, *args, **kwargs):
loop = asyncio.get_event_loop()
task = loop.create_task(f(test_case, *args, **kwargs))
try:
loop.run_until_complete(task)
except Exception:
traceback.print_exc()
raise
return wrapper
| 22.882353
| 63
| 0.62982
| 46
| 389
| 5.130435
| 0.608696
| 0.067797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.277635
| 389
| 16
| 64
| 24.3125
| 0.839858
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.230769
| 0
| 0.461538
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cb54fa0b7a5c349c3088529c91a97ac9de21c8e
| 2,684
|
py
|
Python
|
plugin.video.yatp/libs/client/commands.py
|
mesabib/kodi.yatp
|
d874df43047b5b58f84cb3760fc891d9a133a69f
|
[
"CNRI-Python"
] | 54
|
2015-08-01T20:31:36.000Z
|
2022-02-06T11:06:01.000Z
|
plugin.video.yatp/libs/client/commands.py
|
mesabib/kodi.yatp
|
d874df43047b5b58f84cb3760fc891d9a133a69f
|
[
"CNRI-Python"
] | 57
|
2015-08-31T09:54:49.000Z
|
2018-08-30T20:39:12.000Z
|
plugin.video.yatp/libs/client/commands.py
|
mesabib/kodi.yatp
|
d874df43047b5b58f84cb3760fc891d9a133a69f
|
[
"CNRI-Python"
] | 16
|
2016-01-17T11:44:41.000Z
|
2021-12-12T00:41:29.000Z
|
# coding: utf-8
# Module: commands
# Created on: 28.07.2015
# Author: Roman Miroshnychenko aka Roman V.M. (romanvm@yandex.ua)
# Licence: GPL v.3: http://www.gnu.org/copyleft/gpl.html
"""
Context menu commands
"""
import sys
import xbmc
import xbmcgui
import json_requests as jsonrq
from simpleplugin import Addon
addon = Addon('plugin.video.yatp')
_ = addon.initialize_gettext()
def show_torrent_info(info_hash):
"""
Display current torrent info
:param info_hash:
:return:
"""
torr_info = jsonrq.get_torrent_info(info_hash)
info_dialog = xbmcgui.DialogProgress()
info_dialog.create(torr_info['name'])
while not info_dialog.iscanceled():
info_dialog.update(torr_info['progress'],
_('state: {0}; seeds: {1}; peers: {2}').format(
torr_info['state'],
torr_info['num_seeds'],
torr_info['num_peers']
),
_('size: {0}MB; DL speed: {1}KB/s; UL speed: {2}KB/s').format(
torr_info['size'],
torr_info['dl_speed'],
torr_info['ul_speed']
),
_('total DL: {0}MB; total UL: {1}MB').format(
torr_info['total_download'],
torr_info['total_upload'])
)
xbmc.sleep(1000)
torr_info = jsonrq.get_torrent_info(info_hash)
if __name__ == '__main__':
if sys.argv[1] == 'pause':
jsonrq.pause_torrent(sys.argv[2])
elif sys.argv[1] == 'resume':
jsonrq.resume_torrent(sys.argv[2])
elif sys.argv[1] == 'delete' and xbmcgui.Dialog().yesno(
_('Confirm delete'),
_('Do you really want to delete the torrent?')):
jsonrq.remove_torrent(sys.argv[2], False)
elif sys.argv[1] == 'delete_with_files'and xbmcgui.Dialog().yesno(
_('Confirm delete'),
_('Do you really want to delete the torrent with files?'),
_('Warning: The files will be deleted permanently!')):
jsonrq.remove_torrent(sys.argv[2], True)
elif sys.argv[1] == 'pause_all':
jsonrq.pause_all()
elif sys.argv[1] == 'resume_all':
jsonrq.resume_all()
elif sys.argv[1] == 'show_info':
show_torrent_info(sys.argv[2])
elif sys.argv[1] == 'restore_finished':
jsonrq.restore_finished(sys.argv[2])
else:
addon.log_debug('Command cancelled or invalid command: {0}'.format(sys.argv[1]))
xbmc.executebuiltin('Container.Refresh')
| 35.786667
| 89
| 0.554396
| 319
| 2,684
| 4.454545
| 0.39185
| 0.073892
| 0.050669
| 0.059113
| 0.281492
| 0.235046
| 0.197044
| 0.18297
| 0.0943
| 0.0943
| 0
| 0.020607
| 0.312966
| 2,684
| 74
| 90
| 36.27027
| 0.75
| 0.09389
| 0
| 0.111111
| 0
| 0.018519
| 0.219024
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018519
| false
| 0
| 0.092593
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cb5d1b6022bb826ecb887e64d632c52c31ffdb9
| 5,563
|
py
|
Python
|
pipeline/scripts/package.py
|
deplatformr/open-images
|
3726c9802bda1d7ecbbbd9920d5566daaecc9faa
|
[
"MIT"
] | 2
|
2020-10-12T02:37:54.000Z
|
2020-10-14T15:16:49.000Z
|
pipeline/scripts/package.py
|
deplatformr/open-images
|
3726c9802bda1d7ecbbbd9920d5566daaecc9faa
|
[
"MIT"
] | null | null | null |
pipeline/scripts/package.py
|
deplatformr/open-images
|
3726c9802bda1d7ecbbbd9920d5566daaecc9faa
|
[
"MIT"
] | null | null | null |
import os
import shutil
import sqlite3
import tarfile
from datetime import datetime
import bagit
def create_package(images, batch_dir):
package_threshold = 838860800 # 800 Mib to the next power of 2 = 1GiB
print("Package threshold: " + get_human_readable_file_size(package_threshold))
abs_path = os.getcwd()
try:
package_size = 0
for image in images:
package_size += image[1]
print("Total batch size: " + get_human_readable_file_size(package_size))
if package_size < package_threshold:
print("Not enough images yet to make a package from this batch.")
return()
else:
try:
# create new batch directory
split = os.path.split(batch_dir)
new_dir_number = int(split[1]) + 1
new_batch_dir = os.path.join(split[0], str(new_dir_number))
os.makedirs(new_batch_dir)
# move all related files for the last image that's getting removed from batch to keep within threshold
last_image = images[-1]
path, dirs, files = next(os.walk(batch_dir))
for file in files:
if file.find(last_image[0]) != -1:
filepath = os.path.join(path, file)
shutil.move(filepath, os.path.join(
new_batch_dir, file))
# drop the last image from the list (convert tuple) to get the package size back under threshold
images.pop(-1)
except Exception as e:
print("Unable to separate batch to make a package.")
print(e)
return()
# Convert batch directory into a Bagit directory
external_identifier = "deplatformr-open-images-" + split[1]
bagit.make_bag(batch_dir,
{'Source-Organization': 'Deplatformr Project', 'Organization-Address': 'https://open-images.deplatformr.com', 'External-Description': 'This package contains a subset of the Google Open Images dataset used for machine learning training. The image files have been downloaded from their Flickr server source, verified for fixity, had EXIF metadata extracted, and are now bundled here with their annotation data, segmentation files and newly generated sha512 checksums. This content and context is described in a sidecar metadata files using schema.org/ImageObject and JSON-LD format.', 'External-Identifier': external_identifier, 'License': 'https://creativecommons.org/licenses/by/2.0/'}, checksums=["sha512"])
print("Created a Bagit directory.")
try:
# Create the tar package
packages_dir = os.path.join(
os.getcwd(), "source_data/packages/")
tarball_name = external_identifier + ".tar"
tarball = tarfile.open(os.path.join(
packages_dir, tarball_name), "w")
tarball.add(batch_dir, arcname=external_identifier)
tarball.close()
print("Created tarball " + tarball_name + ".")
except Exception as e:
print("Unable to create a tarball package from batch.")
print(e)
return()
try:
shutil.rmtree(batch_dir)
print("Deleted the batch source directory.")
except OSError as e:
print("Unable to delete the source directory.")
print(e)
# record the tarball package name for each image
db_path = os.path.join(
abs_path, "source_data/deplatformr_open_images_v6.sqlite")
images_db = sqlite3.connect(db_path)
cursor = images_db.cursor()
for image in images:
cursor.execute("UPDATE open_images SET package_name = ? WHERE ImageID = ?",
(tarball_name, image[0],),)
images_db.commit()
images_db.close()
# add tarball name, size, and timestamp to the workflow dbase
utctime = datetime.utcnow()
tarball_size = os.path.getsize(
os.path.join(packages_dir, tarball_name))
print("Tarball size is: " + get_human_readable_file_size(tarball_size))
db_path = os.path.join(
abs_path, "deplatformr_open_images_workflow.sqlite")
workflow_db = sqlite3.connect(db_path)
cursor = workflow_db.cursor()
for image in images:
print("Linking image " +
image[0] + " to " + tarball_name + " in SQLite.")
cursor.execute(
"UPDATE images SET package_name = ? WHERE image_id = ?", (tarball_name, image[0],),)
cursor.execute("INSERT INTO packages (name, size, timestamp) VALUES (?,?,?)",
(tarball_name, tarball_size, utctime,),)
workflow_db.commit()
workflow_db.close()
except Exception as e:
print("Unable to create a package for batch directory " + batch_dir)
print(e)
def get_human_readable_file_size(size, precision=2):
suffixes = ["B", "KiB", "MiB", "GiB", "TiB"]
suffixIndex = 0
while size > 1024 and suffixIndex < 4:
suffixIndex += 1 # increment the index of the suffix
size = size / 1024.0 # apply the division
return "%.*f %s" % (precision, size, suffixes[suffixIndex])
return()
| 47.144068
| 735
| 0.585655
| 646
| 5,563
| 4.905573
| 0.309598
| 0.025245
| 0.025245
| 0.025245
| 0.156832
| 0.120858
| 0.068476
| 0.023982
| 0.023982
| 0
| 0
| 0.013881
| 0.326622
| 5,563
| 117
| 736
| 47.547009
| 0.832088
| 0.088082
| 0
| 0.208333
| 0
| 0.010417
| 0.266298
| 0.02983
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020833
| false
| 0
| 0.0625
| 0
| 0.09375
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cb6f4beed1a08b09244a31819b47421774b7914
| 6,486
|
py
|
Python
|
eval/util/metrics.py
|
fau-is/grm
|
78b1559ea0dda1b817283adecd58da50ca232223
|
[
"MIT"
] | 5
|
2020-09-15T18:57:01.000Z
|
2021-12-13T14:14:08.000Z
|
eval/util/metrics.py
|
fau-is/grm
|
78b1559ea0dda1b817283adecd58da50ca232223
|
[
"MIT"
] | null | null | null |
eval/util/metrics.py
|
fau-is/grm
|
78b1559ea0dda1b817283adecd58da50ca232223
|
[
"MIT"
] | 1
|
2020-09-10T17:45:22.000Z
|
2020-09-10T17:45:22.000Z
|
import sklearn
import pandas
import seaborn as sns
import matplotlib.pyplot as pyplot
from functools import reduce
# import numpy as np
def metrics_from_prediction_and_label(labels, predictions, verbose=False):
measures = {
"accuracy": sklearn.metrics.accuracy_score(labels, predictions),
"balanced_accuracy": sklearn.metrics.balanced_accuracy_score(labels, predictions),
"precision_micro": sklearn.metrics.precision_score(labels, predictions, average='micro'),
"precision_macro": sklearn.metrics.precision_score(labels, predictions, average='macro'),
"precision_weighted": sklearn.metrics.precision_score(labels, predictions, average='weighted'),
"recall_micro": sklearn.metrics.recall_score(labels, predictions, average='micro'),
"recall_macro": sklearn.metrics.recall_score(labels, predictions, average='macro'),
"recall_weighted": sklearn.metrics.recall_score(labels, predictions, average='weighted'),
"f1_score_micro": sklearn.metrics.f1_score(labels, predictions, average='micro'),
"f1_score_macro": sklearn.metrics.f1_score(labels, predictions, average='macro'),
"f1_score_weighted": sklearn.metrics.f1_score(labels, predictions, average='weighted')
}
try:
measures["roc_auc_weighted"] = multi_class_roc_auc_score(labels, predictions, 'weighted')
measures["roc_auc_macro"] = multi_class_roc_auc_score(labels, predictions, 'macro')
measures["roc_auc_micro"] = multi_class_roc_auc_score(labels, predictions, 'micro')
except ValueError:
print("Warning: Roc auc score can not be calculated ...")
try:
# note we use the average precision at different threshold values as the auc of the pr-curve
# and not the auc-pr-curve with the trapezoidal rule / linear interpolation because it could be too optimistic
measures["auc_prc_weighted"] = multi_class_prc_auc_score(labels, predictions, 'weighted')
measures["auc_prc_macro"] = multi_class_prc_auc_score(labels, predictions, 'macro')
measures["auc_prc_micro"] = multi_class_prc_auc_score(labels, predictions, 'micro')
except ValueError:
print("Warning: Auc prc score can not be calculated ...")
save_confusion_matrix(labels, predictions)
report = save_classification_report(labels, predictions)
classes = list(sorted(set(labels)))
for pos_class in classes:
measures[str(pos_class) + "_precision"] = report[str(pos_class)]['precision']
measures[str(pos_class) + "_recall"] = report[str(pos_class)]['recall']
measures[str(pos_class) + "_f1-score"] = report[str(pos_class)]['f1-score']
measures[str(pos_class) + "_support"] = report[str(pos_class)]['support']
if pos_class == 1:
neg_class = 0
else:
neg_class = 1
tp, fp, tn, fn = calculate_cm_states(labels, predictions, pos_class, neg_class)
measures[str(pos_class) + "_tp"] = tp
measures[str(pos_class) + "_fp"] = fp
measures[str(pos_class) + "_tn"] = tn
measures[str(pos_class) + "_fn"] = fn
if tn + fp == 0:
pass
else:
# Specificity or true negative rate
measures[str(pos_class) + "_tnr"] = tn / (tn + fp)
# Fall out or false positive rate
measures[str(pos_class) + "_fpr"] = fp / (fp + tn)
if tn + fn == 0:
pass
else:
# Negative predictive value
measures[str(pos_class) + "_npv"] = tn / (tn + fn)
if tp + fn == 0:
pass
else:
# False negative rate
measures[str(pos_class) + "_fnr"] = fn / (tp + fn)
if tp + fp == 0:
pass
else:
# False discovery rate
measures[str(pos_class) + "_fdr"] = fp / (tp + fp)
return measures
def calculate_cm_states(labels, predictions, pos_class, neg_class):
tp = 0
fp = 0
tn = 0
fn = 0
for i in range(len(predictions)):
if labels[i] == predictions[i] == pos_class:
tp += 1
if predictions[i] == pos_class and labels[i] != predictions[i]:
fp += 1
if labels[i] == predictions[i] == neg_class:
tn += 1
if predictions[i] == neg_class and labels[i] != predictions[i]:
fn += 1
return tp, fp, tn, fn
def save_classification_report(labels, predictions):
return sklearn.metrics.classification_report(y_true=labels, y_pred=predictions, output_dict=True)
def multi_class_roc_auc_score(label, predict, average):
label_binarizer = sklearn.preprocessing.LabelBinarizer()
label_binarizer.fit(label)
label = label_binarizer.transform(label)
predict = label_binarizer.transform(predict)
return sklearn.metrics.roc_auc_score(label, predict, average=average)
def multi_class_prc_auc_score(label, predict, average):
label_binarizer = sklearn.preprocessing.LabelBinarizer()
label_binarizer.fit(label)
label = label_binarizer.transform(label)
predict = label_binarizer.transform(predict)
return sklearn.metrics.average_precision_score(label, predict, average=average)
def label_binarizer(labels):
for index in range(0, len(labels)):
if labels[index] >= 0.5:
labels[index] = 1.0
else:
labels[index] = 0.0
return labels
def save_confusion_matrix(labels, predictions, path="../../../results/cm.pdf"):
classes = sklearn.utils.multiclass.unique_labels(labels, predictions)
cms = []
cm = sklearn.metrics.confusion_matrix(labels, predictions)
cm_df = pandas.DataFrame(cm, index=classes, columns=classes)
cms.append(cm_df)
def prettify(n):
"""
if n > 1000000:
return str(np.round(n / 1000000, 1)) + 'M'
elif n > 1000:
return str(np.round(n / 1000, 1)) + 'K'
else:
return str(n)
"""
return str(n)
cm = reduce(lambda x, y: x.add(y, fill_value=0), cms)
annot = cm.applymap(prettify)
cm = (cm.T / cm.sum(axis=1)).T
fig, g = pyplot.subplots(figsize=(7, 4.5))
g = sns.heatmap(cm, annot=annot, fmt='', cmap='Blues', cbar=False, rasterized=True, linewidths=0.1)
_ = g.set(ylabel='Actual', xlabel='Prediction')
for _, spine in g.spines.items():
spine.set_visible(True)
pyplot.xticks(rotation=45)
fig.tight_layout()
fig.savefig(path)
pyplot.close()
| 36.234637
| 118
| 0.646007
| 810
| 6,486
| 4.983951
| 0.224691
| 0.109487
| 0.092643
| 0.061184
| 0.468665
| 0.360912
| 0.298737
| 0.146148
| 0.146148
| 0.092643
| 0
| 0.012671
| 0.233426
| 6,486
| 179
| 119
| 36.234637
| 0.799276
| 0.076781
| 0
| 0.180328
| 0
| 0
| 0.095809
| 0.003886
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065574
| false
| 0.032787
| 0.040984
| 0.008197
| 0.163934
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cb7c886108da63565062eb8d192b4df3da78f64
| 3,566
|
py
|
Python
|
dpgs_sandbox/tests/test_bug_migrations_in_base_models.py
|
gabrielpiassetta/django-pgschemas
|
1e76db4cef31c7534bf4ba109961e835a1dd3c96
|
[
"MIT"
] | null | null | null |
dpgs_sandbox/tests/test_bug_migrations_in_base_models.py
|
gabrielpiassetta/django-pgschemas
|
1e76db4cef31c7534bf4ba109961e835a1dd3c96
|
[
"MIT"
] | null | null | null |
dpgs_sandbox/tests/test_bug_migrations_in_base_models.py
|
gabrielpiassetta/django-pgschemas
|
1e76db4cef31c7534bf4ba109961e835a1dd3c96
|
[
"MIT"
] | null | null | null |
import warnings
from unittest.mock import patch
from django.apps import apps
from django.core import management
from django.core.management.base import CommandError
from django.db import models
from django.db.utils import ProgrammingError
from django.test import TransactionTestCase, tag
from django_pgschemas.checks import check_schema_names
from django_pgschemas.models import TenantMixin
from django_pgschemas.utils import get_tenant_model
TenantModel = get_tenant_model()
def patched_get_tenant_model(*args, **kwargs):
class TenantModel(TenantMixin):
dummy = models.TextField()
class Meta:
app_label = get_tenant_model()._meta.app_label
return TenantModel
@tag("bug")
class MigrationZeroRoundTripTestCase(TransactionTestCase):
"""
Provoke a handled ProgrammingError by migrating models from empty database.
"""
def test_database_checks_with_zero_migrations(self):
management.call_command("migrate", "shared_public", "zero", verbosity=0)
# The goal is that the next line doesn't raise ProgrammingError
check_schema_names(apps.get_app_config("django_pgschemas"))
management.call_command("migrate", verbosity=0)
@tag("bug")
class UnappliedMigrationTestCase(TransactionTestCase):
"""
Provoke a handled ProgrammingError by running tenant command with pending model changes.
"""
@classmethod
def setUpClass(cls):
tenant1 = TenantModel(schema_name="tenant1")
tenant1.save(verbosity=0)
@classmethod
def tearDownClass(cls):
for tenant in TenantModel.objects.all():
tenant.delete(force_drop=True)
@patch("django_pgschemas.management.commands.get_tenant_model", patched_get_tenant_model)
def test_whowill_with_pending_migrations(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore") # Avoid warnings about model being registered twice
with self.assertRaises(CommandError) as ctx:
management.call_command("whowill", all_schemas=True, verbosity=0)
self.assertEqual(
str(ctx.exception),
"Error while attempting to retrieve dynamic schemas. "
"Perhaps you need to migrate the 'public' schema first?",
)
@tag("bug")
class MigrateIgnoringExcludedSchemasTestCase(TransactionTestCase):
@classmethod
def setUpClass(cls):
tenant1 = TenantModel(schema_name="tenant1")
tenant1.save(verbosity=0)
@classmethod
def tearDownClass(cls):
for tenant in TenantModel.objects.all():
tenant.delete(force_drop=True)
def test_migrate_with_exclusions(self):
# We first unapply a migration with fake so we can reapply it without fake
# This should work without errors
management.call_command("migrate", "app_tenants", "0001_initial", fake=True, schemas=["tenant1"], verbosity=0)
# We then migrate on all schemas except for tenant1, THIS IS THE CASE WE WANT TO TEST
# This should work without errors
management.call_command("migrate", all_schemas=True, excluded_schemas=["tenant1"], verbosity=0)
# If we try to global migrate now, we should get a ProgrammingError
with self.assertRaises(ProgrammingError):
management.call_command("migrate", all_schemas=True, verbosity=0)
# We finally apply the migration again with fake
# This should work without errors
management.call_command("migrate", fake=True, all_schemas=True, verbosity=0)
| 37.536842
| 118
| 0.714526
| 419
| 3,566
| 5.937947
| 0.350835
| 0.036174
| 0.059084
| 0.067524
| 0.29381
| 0.270498
| 0.228698
| 0.20619
| 0.20619
| 0.184084
| 0
| 0.007768
| 0.205833
| 3,566
| 94
| 119
| 37.93617
| 0.870763
| 0.180314
| 0
| 0.311475
| 0
| 0
| 0.106449
| 0.018377
| 0
| 0
| 0
| 0
| 0.04918
| 1
| 0.131148
| false
| 0
| 0.180328
| 0
| 0.42623
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cb900078da95ed33cbe2fdf9bd9a465b5e9a56e
| 6,330
|
py
|
Python
|
tfx/components/transform/component.py
|
pingsutw/tfx
|
bf0d1d74e3f6ea429989fc7b80b82bea08077857
|
[
"Apache-2.0"
] | null | null | null |
tfx/components/transform/component.py
|
pingsutw/tfx
|
bf0d1d74e3f6ea429989fc7b80b82bea08077857
|
[
"Apache-2.0"
] | null | null | null |
tfx/components/transform/component.py
|
pingsutw/tfx
|
bf0d1d74e3f6ea429989fc7b80b82bea08077857
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX Transform component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text, Union
import absl
from tfx import types
from tfx.components.base import base_component
from tfx.components.base import executor_spec
from tfx.components.transform import executor
from tfx.orchestration import data_types
from tfx.types import artifact
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import TransformSpec
class Transform(base_component.BaseComponent):
"""A TFX component to transform the input examples.
The Transform component wraps TensorFlow Transform (tf.Transform) to
preprocess data in a TFX pipeline. This component will load the
preprocessing_fn from input module file, preprocess both 'train' and 'eval'
splits of input examples, generate the `tf.Transform` output, and save both
transform function and transformed examples to orchestrator desired locations.
## Providing a preprocessing function
The TFX executor will use the estimator provided in the `module_file` file
to train the model. The Transform executor will look specifically for the
`preprocessing_fn()` function within that file.
An example of `preprocessing_fn()` can be found in the [user-supplied
code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
of the TFX Chicago Taxi pipeline example.
## Example
```
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=infer_schema.outputs['schema'],
module_file=module_file)
```
Please see https://www.tensorflow.org/tfx/transform for more details.
"""
SPEC_CLASS = TransformSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
examples: types.Channel = None,
schema: types.Channel = None,
module_file: Optional[Union[Text, data_types.RuntimeParameter]] = None,
preprocessing_fn: Optional[Union[Text,
data_types.RuntimeParameter]] = None,
transform_graph: Optional[types.Channel] = None,
transformed_examples: Optional[types.Channel] = None,
input_data: Optional[types.Channel] = None,
instance_name: Optional[Text] = None,
enable_cache: Optional[bool] = None):
"""Construct a Transform component.
Args:
examples: A Channel of type `standard_artifacts.Examples` (required).
This should contain the two splits 'train' and 'eval'.
schema: A Channel of type `standard_artifacts.Schema`. This should
contain a single schema artifact.
module_file: The file path to a python module file, from which the
'preprocessing_fn' function will be loaded. The function must have the
following signature.
def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]:
...
where the values of input and returned Dict are either tf.Tensor or
tf.SparseTensor. Exactly one of 'module_file' or 'preprocessing_fn'
must be supplied.
preprocessing_fn: The path to python function that implements a
'preprocessing_fn'. See 'module_file' for expected signature of the
function. Exactly one of 'module_file' or 'preprocessing_fn' must be
supplied.
transform_graph: Optional output 'TransformPath' channel for output of
'tf.Transform', which includes an exported Tensorflow graph suitable for
both training and serving;
transformed_examples: Optional output 'ExamplesPath' channel for
materialized transformed examples, which includes both 'train' and
'eval' splits.
input_data: Backwards compatibility alias for the 'examples' argument.
instance_name: Optional unique instance name. Necessary iff multiple
transform components are declared in the same pipeline.
enable_cache: Optional boolean to indicate if cache is enabled for the
Transform component. If not specified, defaults to the value
specified for pipeline's enable_cache parameter.
Raises:
ValueError: When both or neither of 'module_file' and 'preprocessing_fn'
is supplied.
"""
if input_data:
absl.logging.warning(
'The "input_data" argument to the Transform component has '
'been renamed to "examples" and is deprecated. Please update your '
'usage as support for this argument will be removed soon.')
examples = input_data
if bool(module_file) == bool(preprocessing_fn):
raise ValueError(
"Exactly one of 'module_file' or 'preprocessing_fn' must be supplied."
)
transform_graph = transform_graph or types.Channel(
type=standard_artifacts.TransformGraph,
artifacts=[standard_artifacts.TransformGraph()])
if not transformed_examples:
example_artifact = standard_artifacts.Examples()
example_artifact.split_names = artifact_utils.encode_split_names(
artifact.DEFAULT_EXAMPLE_SPLITS)
transformed_examples = types.Channel(
type=standard_artifacts.Examples, artifacts=[example_artifact])
spec = TransformSpec(
examples=examples,
schema=schema,
module_file=module_file,
preprocessing_fn=preprocessing_fn,
transform_graph=transform_graph,
transformed_examples=transformed_examples)
super(Transform, self).__init__(
spec=spec, instance_name=instance_name, enable_cache=enable_cache)
| 43.356164
| 105
| 0.733017
| 799
| 6,330
| 5.670839
| 0.309136
| 0.049658
| 0.017656
| 0.011918
| 0.134407
| 0.075259
| 0.061576
| 0.041271
| 0.041271
| 0.041271
| 0
| 0.001977
| 0.20079
| 6,330
| 145
| 106
| 43.655172
| 0.893655
| 0.562717
| 0
| 0
| 0
| 0
| 0.096056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017544
| false
| 0
| 0.245614
| 0
| 0.315789
| 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cb9aea67a579bf1b09555b59098bc7f2315e25f
| 959
|
py
|
Python
|
objects/GitIndexEntry.py
|
anderslatif/alg
|
d5902a05a4cb249e554f65a7e8016d7d050b6da9
|
[
"MIT"
] | null | null | null |
objects/GitIndexEntry.py
|
anderslatif/alg
|
d5902a05a4cb249e554f65a7e8016d7d050b6da9
|
[
"MIT"
] | null | null | null |
objects/GitIndexEntry.py
|
anderslatif/alg
|
d5902a05a4cb249e554f65a7e8016d7d050b6da9
|
[
"MIT"
] | null | null | null |
# https://github.com/git/git/blob/master/Documentation/technical/index-format.txt
class GitIndexEntry(object):
# The last time a file's metadata changed. This is a tuple (seconds, nanoseconds)
ctime = None
# The last time a file's data changed. This is a tuple (seconds, nanoseconds)
mtime = None
# the ID of device containing this file
dev = None
# The file's inode number
ino = None
# The object type, either b1000 (regular), b1010 (symlink), b1110 (gitlink)
mode_type = None
# The object permissions as an integer
mode_permissions = None
# User ID of owner
uui = None
# Group ID of owner
gid = None
# Size of this object in bytes
size = None
# The object's hash as a hex string
object = None
flag_assume_valid = None
flag_extended = None
flag_stage = None
# Length of the name if < OxFFF, -1 otherwise
flag_name_length = None
name = None
| 22.302326
| 85
| 0.657977
| 139
| 959
| 4.482014
| 0.532374
| 0.067416
| 0.0626
| 0.038523
| 0.173355
| 0.173355
| 0.11878
| 0
| 0
| 0
| 0
| 0.018625
| 0.272159
| 959
| 42
| 86
| 22.833333
| 0.873926
| 0.573514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cbb90e215684507ec88ead7205a67d14728eaf9
| 809
|
py
|
Python
|
chainer/_version.py
|
yumetov/chainer
|
522e017a18008ee00e39f4ae4b30f4f9db3824b2
|
[
"MIT"
] | 3,705
|
2017-06-01T07:36:12.000Z
|
2022-03-30T10:46:15.000Z
|
chainer/_version.py
|
yumetov/chainer
|
522e017a18008ee00e39f4ae4b30f4f9db3824b2
|
[
"MIT"
] | 5,998
|
2017-06-01T06:40:17.000Z
|
2022-03-08T01:42:44.000Z
|
chainer/_version.py
|
yumetov/chainer
|
522e017a18008ee00e39f4ae4b30f4f9db3824b2
|
[
"MIT"
] | 1,150
|
2017-06-02T03:39:46.000Z
|
2022-03-29T02:29:32.000Z
|
__version__ = '7.8.0'
_optional_dependencies = [
{
'name': 'CuPy',
'packages': [
'cupy-cuda120',
'cupy-cuda114',
'cupy-cuda113',
'cupy-cuda112',
'cupy-cuda111',
'cupy-cuda110',
'cupy-cuda102',
'cupy-cuda101',
'cupy-cuda100',
'cupy-cuda92',
'cupy-cuda91',
'cupy-cuda90',
'cupy-cuda80',
'cupy',
],
'specifier': '>=7.7.0,<8.0.0',
'help': 'https://docs.cupy.dev/en/latest/install.html',
},
{
'name': 'iDeep',
'packages': [
'ideep4py',
],
'specifier': '>=2.0.0.post3, <2.1',
'help': 'https://docs.chainer.org/en/latest/tips.html',
},
]
| 23.114286
| 63
| 0.410383
| 72
| 809
| 4.527778
| 0.541667
| 0.01227
| 0.079755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104938
| 0.399258
| 809
| 34
| 64
| 23.794118
| 0.565844
| 0
| 0
| 0.125
| 0
| 0
| 0.431397
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cbc5cd567a3861d37ece4294dbac699b11bc6a2
| 10,435
|
py
|
Python
|
image_aug.py
|
qwerasdf887/image_augmentation
|
7d465eba4d6af5d9a4cd79bf1981c8ef206ffe42
|
[
"MIT"
] | null | null | null |
image_aug.py
|
qwerasdf887/image_augmentation
|
7d465eba4d6af5d9a4cd79bf1981c8ef206ffe42
|
[
"MIT"
] | null | null | null |
image_aug.py
|
qwerasdf887/image_augmentation
|
7d465eba4d6af5d9a4cd79bf1981c8ef206ffe42
|
[
"MIT"
] | null | null | null |
# coding=UTF-8
# This Python file uses the following encoding: utf-8
import cv2
import numpy as np
import xml.etree.cElementTree as ET
from random import sample
#default args:
default_args = {'noise_prob': 0.1,
'gasuss_mean': 0,
'gasuss_var': 0.001,
'rand_hug': 30,
'rand_saturation':30,
'rand_light': 30,
'rot_angle': 15,
'bordervalue': (127, 127, 127),
'zoom_out_value': 0.7,
'output_shape': (416, 416),
'take_value' : 5
}
#添加黑色noise
def sp_noise(image, box_loc=None, **kwargs):
h, w = image.shape[0:2]
noise = np.random.rand(h,w)
out_img = image.copy()
out_img[noise < kwargs['noise_prob']] = 0
if box_loc is None:
return out_img
else:
return out_img, box_loc
#高斯noise
def gasuss_noise(image, box_loc=None, **kwargs):
out_img = (image / 255.) - 0.5
noise = np.random.normal(kwargs['gasuss_mean'], kwargs['gasuss_var']** 0.5, image.shape)
out_img = out_img + noise + 0.5
out_img[out_img < 0] = 0
out_img[out_img > 1] = 1
out_img = (out_img * 255).astype(np.uint8)
if box_loc is None:
return out_img
else:
return out_img, box_loc
#調整彩度(彩度通道加上隨機-N~N之值)
def mod_hue(image, box_loc=None, **kwargs):
out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32)
out_img[:,:,0] += np.random.randint(-kwargs['rand_hug'], kwargs['rand_hug'])
out_img = cv2.cvtColor(np.clip(out_img, 0, 180).astype(np.uint8), cv2.COLOR_HSV2BGR)
if box_loc is None:
return out_img
else:
return out_img, box_loc
#調整飽和度(飽和度通道加上隨機-N~N之值)
def mod_saturation(image, box_loc=None, **kwargs):
out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32)
out_img[:,:,1] += np.random.randint(-kwargs['rand_saturation'], kwargs['rand_saturation'])
out_img = cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR)
if box_loc is None:
return out_img
else:
return out_img, box_loc
#調整亮度(亮度通道加上隨機-N~N之值)
def mod_light(image, box_loc=None, **kwargs):
out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32)
out_img[:,:,2] += np.random.randint(-kwargs['rand_light'], kwargs['rand_light'])
out_img = cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR)
if box_loc is None:
return out_img
else:
return out_img, box_loc
#水平翻轉
def horizontal_flip(image, box_loc=None, **kwargs):
'''
Args:
box_loc: bounding box location(x_min, y_min, x_max, y_max)
'''
if box_loc is None:
return cv2.flip(image, 1)
else:
w = image.shape[1]
for i in box_loc:
if i[2] == 0:
break
else:
x_min, x_max = i[0], i[2]
i[0] = w - x_max
i[2] = w - x_min
return cv2.flip(image, 1), box_loc
#垂直翻轉
def vertical_flip(image, box_loc=None, **kwargs):
'''
Args:
box_loc: bounding box location(num box,(x_min, y_min, x_max, y_max, label))
'''
if box_loc is None:
return cv2.flip(image, 0)
else:
h = image.shape[0]
for i in box_loc:
if i[3] == 0:
break
else:
y_min, y_max = i[1], i[3]
i[1] = h - y_max
i[3] = h - y_min
return cv2.flip(image, 0), box_loc
#旋轉-n~n度
def rot_image(image, box_loc=None, **kwargs):
'''
Args:
box_loc: bounding box location(num box,(x_min, y_min, x_max, y_max, label))
rot: 要選轉的範圍
bordervalue: 空白處補的值
'''
h, w, _ = image.shape
center = ( w // 2, h // 2)
angle = np.random.randint(-kwargs['rot_angle'], kwargs['rot_angle'])
M = cv2.getRotationMatrix2D(center, angle, 1)
out_img = cv2.warpAffine(image, M, (w, h), borderValue = kwargs['bordervalue'])
if box_loc is None:
return out_img
else:
loc = box_loc[:,0:4].copy()
loc = np.append(loc, loc[:, 0:1], axis=-1)
loc = np.append(loc, loc[:, 3:4], axis=-1)
loc = np.append(loc, loc[:, 2:3], axis=-1)
loc = np.append(loc, loc[:, 1:2], axis=-1)
loc = loc.reshape(-1, 4, 2)
loc = loc - np.array(center)
rot_loc = loc.dot(np.transpose(M[:,0:2]))
rot_loc = rot_loc + np.array(center)
rot_box = np.hstack([np.min(rot_loc, axis=-2), np.max(rot_loc, axis=-2), box_loc[:, 4:5]])
rot_box = np.floor(rot_box)
rot_box[...,0:4] = np.clip(rot_box[...,0:4], [0,0,0,0], [w-1, h-1, w-1, h-1])
return out_img, rot_box
#等比例縮放影像
def resize_img(image, box_loc=None, **kwargs):
h, w, _ = image.shape
max_edge = max(kwargs['output_shape'][0], kwargs['output_shape'][1])
scale = min( max_edge / h, max_edge / w)
h = int(h * scale)
w = int(w * scale)
if box_loc is None:
return cv2.resize(image, (w, h))
else:
box_loc[:,0] = box_loc[:,0] * scale
box_loc[:,1] = box_loc[:,1] * scale
box_loc[:,2] = box_loc[:,2] * scale
box_loc[:,3] = box_loc[:,3] * scale
return cv2.resize(image, (w, h)), box_loc.astype(np.int32)
#將樸片補至指定大小
def padding_img(image, box_loc=None, **kwargs):
h, w, _ = image.shape
dx = int((kwargs['output_shape'][1] - w) / 2)
dy = int((kwargs['output_shape'][0] - h) / 2)
out_img = np.ones((kwargs['output_shape'][0], kwargs['output_shape'][1], 3), np.uint8) * kwargs['bordervalue'][0]
out_img[dy: dy + h, dx: dx + w] = cv2.resize(image, (w, h))
if box_loc is None:
return out_img
else:
box_loc[:,0] = box_loc[:,0] + dx
box_loc[:,1] = box_loc[:,1] + dy
box_loc[:,2] = box_loc[:,2] + dx
box_loc[:,3] = box_loc[:,3] + dy
return out_img, box_loc.astype(np.int32)
#隨機縮小 value~1倍
def random_zoom_out(image, box_loc=None, **kwargs):
h, w, _ = image.shape
scale = np.random.uniform(kwargs['zoom_out_value'], 1)
h = int(h * scale)
w = int(w * scale)
dx = int((image.shape[1] - w) / 2)
dy = int((image.shape[0] - h) / 2)
out_img = np.ones(image.shape, np.uint8) * kwargs['bordervalue'][0]
out_img[dy: dy + h, dx: dx + w] = cv2.resize(image, (w, h))
if box_loc is None:
return out_img
else:
box_loc[:,0] = box_loc[:,0] * scale + dx
box_loc[:,1] = box_loc[:,1] * scale + dy
box_loc[:,2] = box_loc[:,2] * scale + dx
box_loc[:,3] = box_loc[:,3] * scale + dy
return out_img, box_loc.astype(np.int32)
#load csv data
def load_csv(xml_path, max_boxes=4):
tree = ET.parse(xml_path)
root = tree.getroot()
#location list
loc_list = np.zeros((0, 5))
box_count = 0
for obj in root.iter('object'):
if box_count >= max_boxes:
break
'''
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult) == 1:
continue
cls_id = classes.index(cls)
'''
loc = obj.find('bndbox')
x_min = int(loc.find('xmin').text)
y_min = int(loc.find('ymin').text)
x_max = int(loc.find('xmax').text)
y_max = int(loc.find('ymax').text)
loc_list = np.vstack([loc_list, np.array([x_min, y_min, x_max, y_max, 0])])
box_count += 1
return loc_list.astype(np.float32)
#draw rectangle
def draw_rect(image, box_loc):
for i in box_loc:
cv2.rectangle(image, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])), (0, 255, 0), 4)
def print_args(**kwargs):
for key, value in kwargs.items():
print('key name: {}\nvalue:{}\n'.format(key, value))
#隨機選擇0~N個 image augmentation方法
def rand_aug_image(image, box_loc=None, **kwargs):
if box_loc is None:
out_img = resize_img(image, **kwargs)
else:
out_img, box_loc = resize_img(image, box_loc, **kwargs)
#total augmentation function
func_list = [sp_noise, gasuss_noise, mod_hue, mod_saturation, mod_light,
horizontal_flip, vertical_flip, rot_image, random_zoom_out]
#rand take function
take_func = sample(func_list, np.random.randint(kwargs['take_value']))
for func in take_func:
if box_loc is None:
out_img = func(out_img, **kwargs)
else:
out_img, box_loc = func(out_img, box_loc, **kwargs)
if box_loc is None:
out_img = padding_img(out_img, **kwargs)
return out_img
else:
out_img, box_loc = padding_img(out_img, box_loc, **kwargs)
return out_img, box_loc
if __name__ == "__main__":
img = cv2.imread('./00002.jpg')
bbox = load_csv('./00002.xml')
#黑點noise
#aug_img = sp_noise(img, **default_args)
#aug_img, bbox = sp_noise(img, bbox, **default_args)
#gasuss_noise
#aug_img = gasuss_noise(img, **default_args)
#aug_img, bbox = gasuss_noise(img, bbox, **default_args)
#調整Hue
#aug_img = mod_hue(img, **default_args)
#aug_img, bbox = mod_hue(img, bbox, **default_args)
#調整saturation
#aug_img = mod_saturation(img, **default_args)
#aug_img, bbox = mod_saturation(img, bbox, **default_args)
#調整light
#aug_img = mod_light(img, **default_args)
#aug_img, bbox = mod_light(img, bbox, **default_args)
#水平翻轉
#aug_img = horizontal_flip(img, **default_args)
#aug_img, bbox = horizontal_flip(img, bbox, **default_args)
#垂直翻轉
#aug_img = vertical_flip(img, **default_args)
#aug_img, bbox = vertical_flip(img, bbox, **default_args)
#旋轉角度
#aug_img = rot_image(img, **default_args)
#aug_img, bbox = rot_image(img, bbox, **default_args)
#等比例resize至指定大小
#aug_img = resize_img(img, **default_args)
#aug_img, bbox = resize_img(img, bbox, **default_args)
#補形狀至指定大小
#aug_img = padding_img(aug_img, **default_args)
#aug_img, bbox = padding_img(aug_img, bbox, **default_args)
#隨機縮小 N~1倍
#aug_img = random_zoom_out(img, **default_args)
#aug_img, bbox = random_zoom_out(img, bbox, **default_args)
#隨機選擇augmentation方法
aug_img = rand_aug_image(img, **default_args)
#aug_img, bbox = rand_aug_image(img, bbox, **default_args)
print(bbox)
draw_rect(aug_img, bbox)
cv2.imshow('img', img)
cv2.imshow('aug img', aug_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 31.430723
| 117
| 0.58965
| 1,603
| 10,435
| 3.617592
| 0.127261
| 0.078634
| 0.037248
| 0.024142
| 0.494051
| 0.419383
| 0.382825
| 0.2816
| 0.236075
| 0.19193
| 0
| 0.032525
| 0.257499
| 10,435
| 332
| 118
| 31.430723
| 0.715927
| 0.174796
| 0
| 0.333333
| 0
| 0
| 0.055059
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073529
| false
| 0
| 0.019608
| 0
| 0.215686
| 0.014706
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cbd6ca4479663e9722341b796b7cdd0073b6b18
| 1,507
|
py
|
Python
|
03_picnic/picnic.py
|
intimanipuchi/tiny_python_projects
|
5e419620ae07b0bcf8df073ba3f6c6c3d7d1a93c
|
[
"MIT"
] | null | null | null |
03_picnic/picnic.py
|
intimanipuchi/tiny_python_projects
|
5e419620ae07b0bcf8df073ba3f6c6c3d7d1a93c
|
[
"MIT"
] | null | null | null |
03_picnic/picnic.py
|
intimanipuchi/tiny_python_projects
|
5e419620ae07b0bcf8df073ba3f6c6c3d7d1a93c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Author : Roman Koziy <koziyroman@gmail.com>
Date : 2021-12-15
Purpose: Working with lists
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description="Working with lists",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("items",
type=str,
nargs="+",
metavar="str",
help="item(s) to bring")
parser.add_argument("-s",
"--sorted",
help="a boolean flag",
action="store_true")
return parser.parse_args()
# --------------------------------------------------
def main():
"""The main function: formatting and printing the output"""
args = get_args()
sort_flag = args.sorted
items = args.items
if sort_flag:
items = sorted(items)
if len(items) == 1:
print(f"You are bringing {items[0]}.")
elif len(items) < 3:
items.insert(-1, "and")
print(f"You are bringing {' '.join(items)}.")
else:
# print(items)
last = items[-1]
and_last = "and " + last
items[-1] = and_last
# print(items)
print(f"You are bringing {', '.join(items)}.")
# --------------------------------------------------
if __name__ == "__main__":
main()
| 25.116667
| 63
| 0.472462
| 147
| 1,507
| 4.714286
| 0.52381
| 0.030303
| 0.038961
| 0.051948
| 0.161616
| 0.083694
| 0.083694
| 0
| 0
| 0
| 0
| 0.014327
| 0.305242
| 1,507
| 59
| 64
| 25.542373
| 0.647564
| 0.247512
| 0
| 0
| 0
| 0
| 0.171917
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.029412
| 0
| 0.117647
| 0.088235
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cbdd46842ad893e844a14b8fc15ffc18db30ecc
| 2,832
|
py
|
Python
|
Volume Estimation/volume.py
|
JessieRamaux/Food-Volume-Estimation
|
260b0e78a3b6a7b8bbe9daf98956502beea92552
|
[
"MIT"
] | 10
|
2021-02-19T09:31:43.000Z
|
2022-02-09T08:29:02.000Z
|
Volume Estimation/volume.py
|
JessieRamaux/Food-Volume-Estimation
|
260b0e78a3b6a7b8bbe9daf98956502beea92552
|
[
"MIT"
] | null | null | null |
Volume Estimation/volume.py
|
JessieRamaux/Food-Volume-Estimation
|
260b0e78a3b6a7b8bbe9daf98956502beea92552
|
[
"MIT"
] | 3
|
2021-02-16T00:05:32.000Z
|
2021-06-11T13:37:10.000Z
|
import numpy as np
import cv2
import os
import json
import glob
from PIL import Image, ImageDraw
plate_diameter = 25 #cm
plate_depth = 1.5 #cm
plate_thickness = 0.2 #cm
def Max(x, y):
if (x >= y):
return x
else:
return y
def polygons_to_mask(img_shape, polygons):
mask = np.zeros(img_shape, dtype=np.uint8)
mask = Image.fromarray(mask)
xy = list(map(tuple, polygons))
ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1)
mask = np.array(mask, dtype=bool)
return mask
def mask2box(mask):
index = np.argwhere(mask == 1)
rows = index[:, 0]
clos = index[:, 1]
left_top_r = np.min(rows)
left_top_c = np.min(clos)
right_bottom_r = np.max(rows)
right_bottom_c = np.max(clos)
return [left_top_c, left_top_r, right_bottom_c, right_bottom_r]
def get_bbox(points, h, w):
polygons = points
mask = polygons_to_mask([h,w], polygons)
return mask2box(mask)
def get_scale(points, img, lowest):
bbox = get_bbox(points, img.shape[0], img.shape[1])
diameter = (bbox[2]-bbox[0]+1+bbox[3]-bbox[1]+1)/2
len_per_pix = plate_diameter/float(diameter)
avg = 0
k = 0
for point in points:
avg += img[point[1]][point[0]]
k += 1
avg = avg/float(k)
depth = lowest - avg
depth_per_pix = plate_depth/depth
return len_per_pix, depth_per_pix
def cal_volume(points, img, len_per_pix, depth_per_pix, lowest):
volume = 0.0
bbox = get_bbox(points, img.shape[0], img.shape[1])
points = np.array(points)
shape = points.shape
points = points.reshape(shape[0], 1, shape[1])
for i in range(bbox[0], bbox[2]+1):
for j in range(bbox[1], bbox[3]+1):
if (cv2.pointPolygonTest(points, (i,j), False) >= 0):
volume += Max(0, (lowest - img[j][i]) * depth_per_pix - plate_thickness) * len_per_pix * len_per_pix
return volume
def get_volume(img, json_path):
lowest = np.max(img)
vol_dict = {}
#print(lowest)
len_per_pix = 0.0
depth_per_pix = 0.0
with open(json_path, 'r') as json_file:
data = json.load(json_file)
for shape in data['shapes']:
if (shape['label'] == "plate"):
len_per_pix, depth_per_pix = get_scale(shape['points'], img, lowest)
#print(len_per_pix, depth_per_pix)
break
for shape in data['shapes']:
label = shape['label']
if (label == "plate"):
continue
points = shape['points']
volume = cal_volume(points, img, len_per_pix, depth_per_pix, lowest)
if (label in vol_dict):
vol_dict[label] += volume
else:
vol_dict[label] = volume
return vol_dict
img = cv2.imread("out.png",0)
print(get_volume(img,"test.json"))
| 28.039604
| 116
| 0.604167
| 433
| 2,832
| 3.766744
| 0.235566
| 0.062538
| 0.049663
| 0.042918
| 0.158185
| 0.13366
| 0.096873
| 0.096873
| 0.096873
| 0.096873
| 0
| 0.024507
| 0.265184
| 2,832
| 101
| 117
| 28.039604
| 0.75925
| 0.018362
| 0
| 0.073171
| 0
| 0
| 0.021974
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085366
| false
| 0
| 0.073171
| 0
| 0.256098
| 0.012195
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cbf3fcf677b8e93a5ef2be1bcf1c650636a93f5
| 2,003
|
py
|
Python
|
core/domain/role_services_test.py
|
Mohitbalwani26/oppia
|
a3d1de8b428b8216bb61ba70315583fe077f5b8a
|
[
"Apache-2.0"
] | null | null | null |
core/domain/role_services_test.py
|
Mohitbalwani26/oppia
|
a3d1de8b428b8216bb61ba70315583fe077f5b8a
|
[
"Apache-2.0"
] | null | null | null |
core/domain/role_services_test.py
|
Mohitbalwani26/oppia
|
a3d1de8b428b8216bb61ba70315583fe077f5b8a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test functions relating to roles and actions."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import role_services
from core.tests import test_utils
import feconf
import python_utils
class RolesAndActionsServicesUnitTests(test_utils.GenericTestBase):
"""Tests for roles and actions."""
def test_get_role_actions_return_value_in_correct_schema(self):
role_actions = role_services.get_role_actions()
self.assertTrue(isinstance(role_actions, dict))
for role_name, allotted_actions in role_actions.items():
self.assertTrue(isinstance(role_name, python_utils.UNICODE))
self.assertTrue(isinstance(allotted_actions, list))
self.assertEqual(len(set(allotted_actions)), len(allotted_actions))
for action_name in allotted_actions:
self.assertTrue(
isinstance(action_name, python_utils.UNICODE))
def test_get_all_actions(self):
with self.assertRaisesRegexp(
Exception, 'Role TEST_ROLE does not exist.'):
role_services.get_all_actions('TEST_ROLE')
self.assertEqual(
role_services.get_all_actions(feconf.ROLE_ID_GUEST),
[role_services.ACTION_PLAY_ANY_PUBLIC_ACTIVITY])
| 39.27451
| 79
| 0.734898
| 262
| 2,003
| 5.400763
| 0.465649
| 0.042403
| 0.067845
| 0.022615
| 0.083392
| 0.048057
| 0
| 0
| 0
| 0
| 0
| 0.005549
| 0.190215
| 2,003
| 50
| 80
| 40.06
| 0.866831
| 0.368947
| 0
| 0
| 0
| 0
| 0.031528
| 0
| 0
| 0
| 0
| 0
| 0.291667
| 1
| 0.083333
| false
| 0
| 0.25
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cbf5d867e83bab7776ed420c8f1d228f4f2244d
| 82,473
|
py
|
Python
|
deep_learning/keras/keras/backend/cntk_backend.py
|
xpennec/applications
|
50aefdf14de308fc3c132784ebba9d329e47b087
|
[
"MIT"
] | 21
|
2019-01-12T17:59:41.000Z
|
2022-03-08T17:42:56.000Z
|
deep_learning/keras/keras/backend/cntk_backend.py
|
farrell236/applications
|
0e1ab139ade2a0b3ba6f04f6fd93822b1dd5ae2f
|
[
"MIT"
] | 7
|
2019-01-24T11:44:58.000Z
|
2020-04-21T21:13:37.000Z
|
deep_learning/keras/keras/backend/cntk_backend.py
|
farrell236/applications
|
0e1ab139ade2a0b3ba6f04f6fd93822b1dd5ae2f
|
[
"MIT"
] | 8
|
2019-01-24T11:36:05.000Z
|
2021-06-15T20:59:50.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cntk as C
import numpy as np
from .common import floatx, epsilon, image_dim_ordering, image_data_format
from collections import defaultdict
from contextlib import contextmanager
import warnings
C.set_global_option('align_axis', 1)
b_any = any
dev = C.device.use_default_device()
if dev.type() == 0:
warnings.warn(
'CNTK backend warning: GPU is not detected. '
'CNTK\'s CPU version is not fully optimized,'
'please run with GPU to get better performance.')
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
# LEARNING_PHASE_PLACEHOLDER is the placeholder for dynamic learning phase
_LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase')
# static learning phase flag, if it is not 0 or 1, we will go with dynamic learning phase tensor.
_LEARNING_PHASE = -1
_UID_PREFIXES = defaultdict(int)
# cntk doesn't support gradient as symbolic op, to hook up with keras model,
# we will create gradient as a constant placeholder, here use this global
# map to keep the mapping from grad placeholder to parameter
grad_parameter_dict = {}
NAME_SCOPE_STACK = []
@contextmanager
def name_scope(name):
global NAME_SCOPE_STACK
NAME_SCOPE_STACK.append(name)
yield
NAME_SCOPE_STACK.pop()
def get_uid(prefix=''):
_UID_PREFIXES[prefix] += 1
return _UID_PREFIXES[prefix]
def learning_phase():
# If _LEARNING_PHASE is not 0 or 1, return dynamic learning phase tensor
return _LEARNING_PHASE if _LEARNING_PHASE in {0, 1} else _LEARNING_PHASE_PLACEHOLDER
def set_learning_phase(value):
global _LEARNING_PHASE
if value not in {0, 1}:
raise ValueError('CNTK Backend: Set learning phase '
'with value %s is not supported, '
'expected 0 or 1.' % value)
_LEARNING_PHASE = value
def clear_session():
"""Reset learning phase flag for cntk backend.
"""
global _LEARNING_PHASE
global _LEARNING_PHASE_PLACEHOLDER
_LEARNING_PHASE = -1
_LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0)
def in_train_phase(x, alt, training=None):
global _LEARNING_PHASE
if training is None:
training = learning_phase()
uses_learning_phase = True
else:
uses_learning_phase = False
# CNTK currently don't support cond op, so here we use
# element_select approach as workaround. It may have
# perf issue, will resolve it later with cntk cond op.
if callable(x) and isinstance(x, C.cntk_py.Function) is False:
x = x()
if callable(alt) and isinstance(alt, C.cntk_py.Function) is False:
alt = alt()
if training is True:
x._uses_learning_phase = uses_learning_phase
return x
else:
# if _LEARNING_PHASE is static
if isinstance(training, int) or isinstance(training, bool):
result = x if training == 1 or training is True else alt
else:
result = C.element_select(training, x, alt)
result._uses_learning_phase = uses_learning_phase
return result
def in_test_phase(x, alt, training=None):
return in_train_phase(alt, x, training=training)
def _convert_string_dtype(dtype):
# cntk only support float32 and float64
if dtype == 'float32':
return np.float32
elif dtype == 'float64':
return np.float64
else:
# cntk only running with float,
# try to cast to float to run the model
return np.float32
def _convert_dtype_string(dtype):
if dtype == np.float32:
return 'float32'
elif dtype == np.float64:
return 'float64'
else:
raise ValueError('CNTK Backend: Unsupported dtype: %s. '
'CNTK only supports float32 and '
'float64.' % dtype)
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
# Arguments
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
# Returns
A variable instance (with Keras metadata included).
"""
if dtype is None:
dtype = floatx()
if name is None:
name = ''
if isinstance(
value,
C.variables.Constant) or isinstance(
value,
C.variables.Parameter):
value = value.value
# we don't support init parameter with symbolic op, so eval it first as
# workaround
if isinstance(value, C.cntk_py.Function):
value = eval(value)
shape = value.shape if hasattr(value, 'shape') else ()
if hasattr(value, 'dtype') and value.dtype != dtype and len(shape) > 0:
value = value.astype(dtype)
# TODO: remove the conversion when cntk supports int32, int64
# https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter
dtype = 'float32' if 'int' in str(dtype) else dtype
v = C.parameter(shape=shape,
init=value,
dtype=dtype,
name=_prepare_name(name, 'variable'))
v._keras_shape = v.shape
v._uses_learning_phase = False
v.constraint = constraint
return v
def bias_add(x, bias, data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
dims = len(x.shape)
if dims > 0 and x.shape[0] == C.InferredDimension:
dims -= 1
bias_dims = len(bias.shape)
if bias_dims != 1 and bias_dims != dims:
raise ValueError('Unexpected bias dimensions %d, '
'expected 1 or %d dimensions' % (bias_dims, dims))
if dims == 4:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1, 1, 1)
else:
shape = (bias.shape[3],) + bias.shape[:3]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, 1, 1, bias.shape[0])
else:
shape = bias.shape
elif dims == 3:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1, 1)
else:
shape = (bias.shape[2],) + bias.shape[:2]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, 1, bias.shape[0])
else:
shape = bias.shape
elif dims == 2:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1)
else:
shape = (bias.shape[1],) + bias.shape[:1]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, bias.shape[0])
else:
shape = bias.shape
else:
shape = bias.shape
return x + reshape(bias, shape)
def eval(x):
if isinstance(x, C.cntk_py.Function):
return x.eval()
elif isinstance(x, C.variables.Constant) or isinstance(x, C.variables.Parameter):
return x.value
else:
raise ValueError('CNTK Backend: `eval` method on '
'`%s` type is not supported. '
'CNTK only supports `eval` with '
'`Function`, `Constant` or '
'`Parameter`.' % type(x))
def placeholder(
shape=None,
ndim=None,
dtype=None,
sparse=False,
name=None,
dynamic_axis_num=1):
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
dynamic_dimension = C.FreeDimension if _get_cntk_version() >= 2.2 else C.InferredDimension
cntk_shape = [dynamic_dimension if s is None else s for s in shape]
cntk_shape = tuple(cntk_shape)
if dynamic_axis_num > len(cntk_shape):
raise ValueError('CNTK backend: creating placeholder with '
'%d dimension is not supported, at least '
'%d dimensions are needed.'
% (len(cntk_shape, dynamic_axis_num)))
if name is None:
name = ''
cntk_shape = cntk_shape[dynamic_axis_num:]
x = C.input(
shape=cntk_shape,
dtype=_convert_string_dtype(dtype),
is_sparse=sparse,
name=name)
x._keras_shape = shape
x._uses_learning_phase = False
x._cntk_placeholder = True
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
# Arguments
x: A candidate placeholder.
# Returns
Boolean.
"""
return hasattr(x, '_cntk_placeholder') and x._cntk_placeholder
def is_keras_tensor(x):
if not is_tensor(x):
raise ValueError('Unexpectedly found an instance of type `' +
str(type(x)) + '`. '
'Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
def is_tensor(x):
return isinstance(x, (C.variables.Constant,
C.variables.Variable,
C.variables.Parameter,
C.ops.functions.Function))
def shape(x):
shape = list(int_shape(x))
num_dynamic = _get_dynamic_axis_num(x)
non_dyn_shape = []
for i in range(len(x.shape)):
if shape[i + num_dynamic] is None:
non_dyn_shape.append(x.shape[i])
else:
non_dyn_shape.append(shape[i + num_dynamic])
return shape[:num_dynamic] + non_dyn_shape
def is_sparse(tensor):
return tensor.is_sparse
def int_shape(x):
if hasattr(x, '_keras_shape'):
return x._keras_shape
shape = x.shape
if hasattr(x, 'dynamic_axes'):
dynamic_shape = [None for a in x.dynamic_axes]
shape = tuple(dynamic_shape) + shape
return shape
def ndim(x):
shape = int_shape(x)
return len(shape)
def _prepare_name(name, default):
prefix = '_'.join(NAME_SCOPE_STACK)
if name is None or name == '':
return prefix + '/' + default
return prefix + '/' + name
def constant(value, dtype=None, shape=None, name=None):
if dtype is None:
dtype = floatx()
if shape is None:
shape = ()
np_value = value * np.ones(shape)
const = C.constant(np_value,
dtype=dtype,
name=_prepare_name(name, 'constant'))
const._keras_shape = const.shape
const._uses_learning_phase = False
return const
def random_binomial(shape, p=0.0, dtype=None, seed=None):
# use numpy workaround now
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e7)
np.random.seed(seed)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
size = 1
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
size *= _
binomial = np.random.binomial(1, p, size).astype(dtype).reshape(shape)
return variable(value=binomial, dtype=dtype)
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
return random_uniform_variable(shape, minval, maxval, dtype, seed)
def random_uniform_variable(shape, low, high,
dtype=None, name=None, seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e3)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
if name is None:
name = ''
scale = (high - low) / 2
p = C.parameter(
shape,
init=C.initializer.uniform(
scale,
seed=seed),
dtype=dtype,
name=name)
return variable(value=p.value + low + scale)
def random_normal_variable(
shape,
mean,
scale,
dtype=None,
name=None,
seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e7)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
if name is None:
name = ''
return C.parameter(
shape=shape,
init=C.initializer.normal(
scale=scale,
seed=seed),
dtype=dtype,
name=name)
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
if dtype is None:
dtype = floatx()
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
# how to apply mean and stddev
return random_normal_variable(shape=shape, mean=mean, scale=1.0, seed=seed)
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
if seed is None:
seed = np.random.randint(1, 10e6)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
return C.parameter(
shape, init=C.initializer.truncated_normal(
stddev, seed=seed), dtype=dtype)
def dtype(x):
return _convert_dtype_string(x.dtype)
def zeros(shape, dtype=None, name=None):
if dtype is None:
dtype = floatx()
ctype = _convert_string_dtype(dtype)
return variable(value=np.zeros(shape, ctype), dtype=dtype, name=name)
def ones(shape, dtype=None, name=None):
if dtype is None:
dtype = floatx()
ctype = _convert_string_dtype(dtype)
return variable(value=np.ones(shape, ctype), dtype=dtype, name=name)
def eye(size, dtype=None, name=None):
if dtype is None:
dtype = floatx()
return variable(np.eye(size), dtype, name)
def zeros_like(x, dtype=None, name=None):
return x * 0
def ones_like(x, dtype=None, name=None):
return zeros_like(x) + 1
def count_params(x):
for _ in x.shape:
if _ == C.InferredDimension or _ == C.FreeDimension:
raise ValueError('CNTK backend: `count_params` with dynamic '
'shape is not supported. Please provide '
'fixed dimension instead of `None`.')
return np.prod(int_shape(x))
def cast(x, dtype):
# cntk calculate everything in float, so don't need case from bool / int
return x
def dot(x, y):
if len(x.shape) > 2 or len(y.shape) > 2:
y_shape = int_shape(y)
if len(y_shape) > 2:
permutation = [len(y_shape) - 2]
permutation += list(range(len(y_shape) - 2))
permutation += [len(y_shape) - 1]
y = C.transpose(y, perm=permutation)
return C.times(x, y, len(y_shape) - 1)
else:
return C.times(x, y)
def batch_dot(x, y, axes=None):
x_shape = int_shape(x)
y_shape = int_shape(y)
if isinstance(axes, int):
axes = (axes, axes)
if axes is None:
# behaves like tf.batch_matmul as default
axes = [len(x_shape) - 1, len(y_shape) - 2]
if b_any([isinstance(a, (list, tuple)) for a in axes]):
raise ValueError('Multiple target dimensions are not supported. ' +
'Expected: None, int, (int, int), ' +
'Provided: ' + str(axes))
if len(x_shape) == 2 and len(y_shape) == 2:
if axes[0] == axes[1]:
result = sum(x * y, axis=axes[0], keepdims=True)
return result if axes[0] == 1 else transpose(result)
else:
return sum(x * transpose(y), axis=axes[0], keepdims=True)
else:
if len(y_shape) == 2:
y = expand_dims(y)
normalized_axis = []
normalized_axis.append(_normalize_axis(axes[0], x)[0])
normalized_axis.append(_normalize_axis(axes[1], y)[0])
# transpose
i = normalized_axis[0]
while i < len(x.shape) - 1:
x = C.swapaxes(x, i, i + 1)
i += 1
i = normalized_axis[1]
while i > 0:
y = C.swapaxes(y, i, i - 1)
i -= 1
result = C.times(x, y, output_rank=(len(y.shape) - 1)
if len(y.shape) > 1 else 1)
if len(y_shape) == 2:
result = squeeze(result, -1)
return result
def transpose(x):
return C.swapaxes(x, 0, 1)
def gather(reference, indices):
# There is a bug in cntk gather op which may cause crash.
# We have made a fix but not catched in CNTK 2.1 release.
# Will update with gather op in next release
if _get_cntk_version() >= 2.2:
return C.ops.gather(reference, indices)
else:
num_classes = reference.shape[0]
one_hot_matrix = C.ops.one_hot(indices, num_classes)
return C.times(one_hot_matrix, reference, output_rank=len(reference.shape) - 1)
def _remove_dims(x, axis, keepdims=False):
if keepdims is False and isinstance(axis, list):
# sequence axis is removed by default, so don't need reshape on it
reduce_axes = []
for a in axis:
if isinstance(a, C.Axis) is False:
reduce_axes.append(a)
return _reshape_dummy_dim(x, reduce_axes)
else:
if isinstance(axis, list):
has_seq = False
for a in axis:
if isinstance(a, C.Axis):
has_seq = True
break
if has_seq:
nones = _get_dynamic_axis_num(x)
x = expand_dims(x, nones)
return x
def max(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_max')
return _remove_dims(output, axis, keepdims)
def min(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_min')
return _remove_dims(output, axis, keepdims)
def sum(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_sum')
return _remove_dims(output, axis, keepdims)
def prod(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_prod')
return _remove_dims(output, axis, keepdims)
def logsumexp(x, axis=None, keepdims=False):
return log(sum(exp(x), axis=axis, keepdims=keepdims))
def var(x, axis=None, keepdims=False):
m = mean(x, axis, keepdims=True)
devs_squared = C.square(x - m)
return mean(devs_squared, axis=axis, keepdims=keepdims)
def std(x, axis=None, keepdims=False):
return C.sqrt(var(x, axis=axis, keepdims=keepdims))
def expand_dims(x, axis=-1):
shape = list(int_shape(x))
nones = _get_dynamic_axis_num(x)
index = axis if axis >= 0 else len(shape) + 1
shape.insert(index, 1)
new_shape = shape[nones:]
new_shape = tuple(
[C.InferredDimension if _ is None else _ for _ in new_shape])
result = C.reshape(x, new_shape)
if index < nones:
result._keras_shape = shape
return result
def squeeze(x, axis):
if isinstance(axis, tuple):
axis = list(axis)
if not isinstance(axis, list):
axis = [axis]
shape = list(int_shape(x))
_axis = []
for _ in axis:
if isinstance(_, int):
_axis.append(_ if _ >= 0 else _ + len(shape))
if len(_axis) == 0:
return x
nones = _get_dynamic_axis_num(x)
for _ in sorted(_axis, reverse=True):
del shape[_]
new_shape = shape[nones:]
new_shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in new_shape])
return C.reshape(x, new_shape)
def tile(x, n):
if isinstance(n, int):
n = (n,)
elif isinstance(n, list):
n = tuple(n)
shape = int_shape(x)
num_dynamic_axis = _get_dynamic_axis_num(x)
# Padding the axis
if len(n) < len(shape):
n = tuple([1 for _ in range(len(shape) - len(n))]) + n
if len(n) != len(shape):
raise NotImplementedError
i = num_dynamic_axis
for i, rep in enumerate(n):
if i >= num_dynamic_axis and shape[i] is not None:
tmp = [x] * rep
x = C.splice(*tmp, axis=i - num_dynamic_axis)
i += 1
return x
def _normalize_axis(axis, x):
shape = int_shape(x)
ndim = len(shape)
nones = _get_dynamic_axis_num(x)
if nones > ndim:
raise ValueError('CNTK Backend: tensor with keras shape: `%s` has '
'%d cntk dynamic axis, this is not expected, please '
'double check the keras shape history.' % (str(shape), nones))
# Current cntk does not support shape like (1, batch). so using the workaround
# here to mapping the correct axis. Will remove this tricky after we add support
# in native cntk op
cntk_axis = []
dynamic_axis_index = 0
for i in range(ndim):
if shape[i] is None and dynamic_axis_index < nones:
cntk_axis.append(x.dynamic_axes[dynamic_axis_index])
dynamic_axis_index += 1
else:
cntk_axis.append(i - dynamic_axis_index)
if dynamic_axis_index < nones:
i = 0
while dynamic_axis_index < nones:
cntk_axis[i] = x.dynamic_axes[dynamic_axis_index]
i += 1
dynamic_axis_index += 1
while i < len(cntk_axis):
cntk_axis[i] -= nones
i += 1
if isinstance(axis, tuple):
_axis = list(axis)
elif isinstance(axis, int):
_axis = [axis]
elif isinstance(axis, list):
_axis = list(axis)
else:
_axis = axis
if isinstance(_axis, list):
for i, a in enumerate(_axis):
if a is not None and a < 0:
_axis[i] = (a % ndim)
if _axis[i] is not None:
_axis[i] = cntk_axis[_axis[i]]
else:
if _axis is None:
_axis = C.Axis.all_axes()
return _axis
def _reshape_dummy_dim(x, axis):
shape = list(x.shape)
_axis = [_ + len(shape) if _ < 0 else _ for _ in axis]
if shape.count(C.InferredDimension) > 1 or shape.count(C.FreeDimension) > 1:
result = x
for index in sorted(_axis, reverse=True):
result = C.reshape(result,
shape=(),
begin_axis=index,
end_axis=index + 1)
return result
else:
for index in sorted(_axis, reverse=True):
del shape[index]
shape = [C.InferredDimension if _ == C.FreeDimension else _ for _ in shape]
return C.reshape(x, shape)
def mean(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_mean')
return _remove_dims(output, axis, keepdims)
def any(x, axis=None, keepdims=False):
reduce_result = sum(x, axis, keepdims=keepdims)
any_matrix = C.element_select(
reduce_result,
ones_like(reduce_result),
zeros_like(reduce_result))
if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0:
return C.reduce_sum(any_matrix)
else:
return any_matrix
def all(x, axis=None, keepdims=False):
reduce_result = prod(x, axis, keepdims=keepdims)
all_matrix = C.element_select(
reduce_result,
ones_like(reduce_result),
zeros_like(reduce_result))
if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0:
return C.reduce_sum(all_matrix)
else:
return all_matrix
def classification_error(target, output, axis=-1):
return C.ops.reduce_mean(
C.equal(
argmax(
output,
axis=-1),
argmax(
target,
axis=-1)),
axis=C.Axis.all_axes())
def argmax(x, axis=-1):
axis = [axis]
axis = _normalize_axis(axis, x)
output = C.ops.argmax(x, axis=axis[0])
return _reshape_dummy_dim(output, axis)
def argmin(x, axis=-1):
axis = [axis]
axis = _normalize_axis(axis, x)
output = C.ops.argmin(x, axis=axis[0])
return _reshape_dummy_dim(output, axis)
def square(x):
return C.square(x)
def abs(x):
return C.abs(x)
def sqrt(x):
return C.sqrt(x)
def exp(x):
return C.exp(x)
def log(x):
return C.log(x)
def round(x):
return C.round(x)
def sigmoid(x):
return C.sigmoid(x)
def sign(x):
return x / C.abs(x)
def pow(x, a):
return C.pow(x, a)
def clip(x, min_value, max_value):
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
if min_value is None:
min_value = -np.inf
return C.clip(x, min_value, max_value)
def binary_crossentropy(target, output, from_logits=False):
if from_logits:
output = C.sigmoid(output)
output = C.clip(output, epsilon(), 1.0 - epsilon())
output = -target * C.log(output) - (1.0 - target) * C.log(1.0 - output)
return output
def get_variable_shape(x):
return int_shape(x)
def update(x, new_x):
return C.assign(x, new_x)
def moving_average_update(variable, value, momentum):
return C.assign(variable, variable * momentum + value * (1. - momentum))
def update_add(x, increment):
result = x + increment
return C.assign(x, result)
def gradients(loss, variables):
# cntk does not support gradients as symbolic op,
# to hook up with keras model
# we will return a constant as place holder, the cntk learner will apply
# the gradient during training.
global grad_parameter_dict
if isinstance(variables, list) is False:
variables = [variables]
grads = []
for v in variables:
g = C.constant(0, shape=v.shape, name='keras_grad_placeholder')
grads.append(g)
grad_parameter_dict[g] = v
return grads
def equal(x, y):
return C.equal(x, y)
def not_equal(x, y):
return C.not_equal(x, y)
def greater(x, y):
return C.greater(x, y)
def greater_equal(x, y):
return C.greater_equal(x, y)
def less(x, y):
return C.less(x, y)
def less_equal(x, y):
return C.less_equal(x, y)
def maximum(x, y):
return C.element_max(x, y)
def minimum(x, y):
return C.element_min(x, y)
def sin(x):
return C.sin(x)
def cos(x):
return C.cos(x)
def normalize_batch_in_training(x, gamma, beta,
reduction_axes, epsilon=1e-3):
if gamma is None:
if beta is None:
gamma = ones_like(x)
else:
gamma = ones_like(beta)
if beta is None:
if gamma is None:
beta = zeros_like(x)
else:
beta = zeros_like(gamma)
mean, variant = _moments(x, _normalize_axis(reduction_axes, x))
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
normalized = batch_normalization(
x, mean, variant, beta, gamma, epsilon)
else:
# need broadcasting
target_shape = []
x_shape = int_shape(x)
# skip the batch axis
for axis in range(1, ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
if ndim(gamma) > axis:
gamma = C.reduce_mean(gamma, axis - 1)
beta = C.reduce_mean(beta, axis - 1)
else:
target_shape.append(x_shape[axis])
broadcast_mean = C.reshape(mean, target_shape)
broadcast_var = C.reshape(variant, target_shape)
broadcast_gamma = C.reshape(gamma, target_shape)
broadcast_beta = C.reshape(beta, target_shape)
normalized = batch_normalization(
x,
broadcast_mean,
broadcast_var,
broadcast_beta,
broadcast_gamma,
epsilon)
return normalized, mean, variant
def _moments(x, axes=None, shift=None, keep_dims=False):
_axes = tuple(axes)
if shift is None:
shift = x
# Compute true mean while keeping the dims for proper broadcasting.
for axis in _axes:
shift = C.reduce_mean(shift, axis=axis)
shift = C.stop_gradient(shift)
shifted_mean = C.minus(x, shift)
for axis in _axes:
shifted_mean = C.reduce_mean(shifted_mean, axis=axis)
variance_mean = C.square(C.minus(x, shift))
for axis in _axes:
variance_mean = C.reduce_mean(variance_mean, axis=axis)
variance = C.minus(variance_mean, C.square(shifted_mean))
mean = C.plus(shifted_mean, shift)
if not keep_dims:
mean = squeeze(mean, _axes)
variance = squeeze(variance, _axes)
return mean, variance
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
# The mean / var / beta / gamma may be processed by broadcast
# so it may have an extra batch axis with 1, it is not needed
# in cntk, need to remove those dummy axis.
if ndim(mean) == ndim(x) and shape(mean)[0] == 1:
mean = _reshape_dummy_dim(mean, [0])
if ndim(var) == ndim(x) and shape(var)[0] == 1:
var = _reshape_dummy_dim(var, [0])
if gamma is None:
gamma = ones_like(var)
elif ndim(gamma) == ndim(x) and shape(gamma)[0] == 1:
gamma = _reshape_dummy_dim(gamma, [0])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) == ndim(x) and shape(beta)[0] == 1:
beta = _reshape_dummy_dim(beta, [0])
return (x - mean) / (C.sqrt(var) + epsilon) * gamma + beta
def concatenate(tensors, axis=-1):
if len(tensors) == 0:
return None
axis = [axis]
axis = _normalize_axis(axis, tensors[0])
return C.splice(*tensors, axis=axis[0])
def flatten(x):
return reshape(x, (-1,))
def reshape(x, shape):
shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in shape])
if isinstance(x, C.variables.Parameter):
return C.reshape(x, shape)
else:
num_dynamic_axis = _get_dynamic_axis_num(x)
if num_dynamic_axis == 1 and len(shape) > 0 and shape[0] == -1:
# collapse axis with batch axis
if b_any(_ == C.InferredDimension for _ in x.shape) or b_any(
_ == C.FreeDimension for _ in x.shape):
warnings.warn(
'Warning: CNTK backend does not support '
'collapse of batch axis with inferred dimension. '
'The reshape did not take place.')
return x
return _reshape_batch(x, shape)
else:
# no collapse, then first need to padding the shape
if num_dynamic_axis >= len(shape):
i = 0
while i < len(shape):
if shape[i] is None or shape[i] == -1:
i += 1
else:
break
shape = tuple([-1 for _ in range(num_dynamic_axis - i)]) + shape
new_shape = list(shape)
new_shape = new_shape[num_dynamic_axis:]
new_shape = [C.InferredDimension if _ is None else _ for _ in new_shape]
return C.reshape(x, new_shape)
def permute_dimensions(x, pattern):
dims = len(int_shape(x))
num_dynamic_axis = _get_dynamic_axis_num(x)
if isinstance(pattern, list):
current_layout = [i for i in range(dims)]
else:
current_layout = tuple([i for i in range(dims)])
if num_dynamic_axis > 0 and pattern[:num_dynamic_axis] != current_layout[:num_dynamic_axis]:
raise ValueError('CNTK backend: the permute pattern %s '
'requested permute on dynamic axis, '
'which is not supported. Please do permute '
'on static axis.' % pattern)
axis = list(pattern)
axis = axis[num_dynamic_axis:]
axis = _normalize_axis(axis, x)
return C.transpose(x, axis)
def resize_images(x, height_factor, width_factor, data_format):
if data_format == 'channels_first':
output = repeat_elements(x, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, height_factor, axis=1)
output = repeat_elements(output, width_factor, axis=2)
return output
else:
raise ValueError('CNTK Backend: Invalid data_format:', data_format)
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('CNTK Backend: Invalid data_format:', data_format)
def repeat_elements(x, rep, axis):
axis = _normalize_axis(axis, x)
axis = axis[0]
slices = []
shape = x.shape
i = 0
while i < shape[axis]:
tmp = C.ops.slice(x, axis, i, i + 1)
for _ in range(rep):
slices.append(tmp)
i += 1
return C.splice(*slices, axis=axis)
def repeat(x, n):
# this is a workaround for recurrent layer
# if n is inferred dimension,
# we can't figure out how to repeat it in cntk now
# return the same x to take cntk broadcast feature
# to make the recurrent layer work.
# need to be fixed in GA.
if n is C.InferredDimension or n is C.FreeDimension:
return x
index = 1 - _get_dynamic_axis_num(x)
if index < 0 or index > 1:
raise NotImplementedError
new_shape = list(x.shape)
new_shape.insert(index, 1)
new_shape = tuple(new_shape)
x = C.reshape(x, new_shape)
temp = [x] * n
return C.splice(*temp, axis=index)
def tanh(x):
return C.tanh(x)
def _static_rnn(step_function, inputs, initial_states,
go_backwards=False, mask=None, constants=None,
unroll=False, input_length=None):
shape = int_shape(inputs)
dims = len(shape)
uses_learning_phase = False
if dims < 3:
raise ValueError('Input should be at least 3D.')
# if the second axis is static axis, CNTK will do unroll by default
if shape[1] is None:
raise ValueError('CNTK Backend: the input of static rnn '
'has shape `%s`, the second axis '
'is not static. If you want to run '
'rnn with non-static axis, please try '
'dynamic rnn with sequence axis.' % shape)
if constants is None:
constants = []
if mask is not None:
mask_shape = int_shape(mask)
if len(mask_shape) == dims - 1:
mask = expand_dims(mask)
nones = _get_dynamic_axis_num(inputs)
states = tuple(initial_states)
outputs = []
time_axis = 1 - nones if nones > 0 else 1
if go_backwards:
i = shape[1] - 1
while i >= 0:
current = C.ops.slice(inputs, time_axis, i, i + 1)
# remove dummy dimension
current = squeeze(current, time_axis)
output, new_states = step_function(
current, tuple(states) + tuple(constants))
if getattr(output, '_uses_learning_phase', False):
uses_learning_phase = True
if mask is not None:
mask_slice = C.ops.slice(mask, time_axis, i, i + 1)
mask_slice = squeeze(mask_slice, time_axis)
if len(outputs) == 0:
prev_output = zeros_like(output)
else:
prev_output = outputs[-1]
output = C.ops.element_select(mask_slice, output, prev_output)
return_states = []
for s, n_s in zip(states, new_states):
return_states.append(
C.ops.element_select(
mask_slice, n_s, s))
new_states = return_states
outputs.append(output)
states = new_states
i -= 1
else:
i = 0
while i < shape[1]:
current = C.ops.slice(inputs, time_axis, i, i + 1)
# remove dummy dimension
current = squeeze(current, 1)
output, new_states = step_function(
current, tuple(states) + tuple(constants))
if getattr(output, '_uses_learning_phase', False):
uses_learning_phase = True
if mask is not None:
mask_slice = C.ops.slice(mask, time_axis, i, i + 1)
mask_slice = squeeze(mask_slice, 1)
if len(outputs) == 0:
prev_output = zeros_like(output)
else:
prev_output = outputs[-1]
output = C.ops.element_select(mask_slice, output, prev_output)
return_states = []
for s, n_s in zip(states, new_states):
return_states.append(
C.ops.element_select(
mask_slice, n_s, s))
new_states = return_states
outputs.append(output)
states = new_states[:len(states)]
i += 1
i = 1
# add the time_step axis back
final_output = expand_dims(outputs[0], 1)
last_output = outputs[0]
while i < len(outputs):
# add the time_step axis back
output_slice = expand_dims(outputs[i], 1)
final_output = C.splice(final_output, output_slice, axis=time_axis)
last_output = outputs[i]
i += 1
last_output._uses_learning_phase = uses_learning_phase
return last_output, final_output, states
def rnn(step_function, inputs, initial_states,
go_backwards=False, mask=None, constants=None,
unroll=False, input_length=None):
shape = int_shape(inputs)
dims = len(shape)
global uses_learning_phase
uses_learning_phase = False
if dims < 3:
raise ValueError('CNTK Backend: the input of rnn has only rank %d '
'Need at least rank 3 to run RNN.' % dims)
if _get_dynamic_axis_num(inputs) == 0 or unroll:
return _static_rnn(
step_function,
inputs,
initial_states,
go_backwards,
mask,
constants,
unroll,
input_length)
if constants is None:
constants = []
num_time_step = shape[1]
if num_time_step is None and not has_seq_axis(inputs):
num_time_step = inputs.shape[0]
initial = []
for s in initial_states:
if _get_dynamic_axis_num(s) == 0:
if hasattr(C, 'to_batch'):
initial.append(C.to_batch(s))
else:
initial.append(C.user_function(ConvertToBatch(s)))
else:
initial.append(s)
need_convert = not has_seq_axis(inputs)
if go_backwards and need_convert is False:
raise NotImplementedError('CNTK Backend: `go_backwards` is not supported with '
'variable-length sequences. Please specify a '
'static length for your sequences.')
rnn_inputs = inputs
if need_convert:
if go_backwards:
rnn_inputs = reverse(rnn_inputs, 1)
rnn_inputs = C.to_sequence(rnn_inputs)
rnn_constants = []
for constant in constants:
if isinstance(constant, list):
new_c = []
for c in constant:
if _get_dynamic_axis_num(c) == 1:
new_c.append(C.sequence.broadcast_as(c, rnn_inputs))
else:
new_c.append(c)
rnn_constants.append(new_c)
else:
if _get_dynamic_axis_num(constant) == 1:
rnn_constants.append(C.sequence.broadcast_as(constant, rnn_inputs))
else:
rnn_constants.append(constant)
else:
rnn_constants = constants
if mask is not None and not has_seq_axis(mask):
if go_backwards:
mask = reverse(mask, 1)
if len(int_shape(mask)) == 2:
mask = expand_dims(mask)
mask = C.to_sequence_like(mask, rnn_inputs)
states = tuple(initial)
with C.default_options(axis_offset=1):
def _recurrence(x, states, m):
# create place holder
place_holders = [C.placeholder(dynamic_axes=x.dynamic_axes) for _ in states]
past_values = []
for s, p in zip(states, place_holders):
past_values.append(C.sequence.past_value(p, s))
new_output, new_states = step_function(
x, tuple(past_values) + tuple(rnn_constants))
if getattr(new_output, '_uses_learning_phase', False):
global uses_learning_phase
uses_learning_phase = True
if m is not None:
new_states = [C.element_select(m, n, s) for n, s in zip(new_states, past_values)]
n_s = []
for o, p in zip(new_states, place_holders):
n_s.append(o.replace_placeholders({p: o.output}))
if len(n_s) > 0:
new_output = n_s[0]
return new_output, n_s
final_output, final_states = _recurrence(rnn_inputs, states, mask)
last_output = C.sequence.last(final_output)
last_states = [C.sequence.last(s) for s in final_states]
if need_convert:
final_output = C.sequence.unpack(final_output, 0, no_mask_output=True)
if num_time_step is not None and num_time_step is not C.FreeDimension:
final_output = _reshape_sequence(final_output, num_time_step)
f_stats = []
for l_s, i_s in zip(last_states, initial_states):
if _get_dynamic_axis_num(i_s) == 0 and _get_dynamic_axis_num(l_s) == 1:
if hasattr(C, 'unpack_batch'):
f_stats.append(C.unpack_batch(l_s))
else:
f_stats.append(C.user_function(ConvertToStatic(l_s, batch_size=i_s.shape[0])))
else:
f_stats.append(l_s)
last_output._uses_learning_phase = uses_learning_phase
return last_output, final_output, f_stats
def has_seq_axis(x):
return hasattr(x, 'dynamic_axes') and len(x.dynamic_axes) > 1
def l2_normalize(x, axis=None):
axis = [axis]
axis = _normalize_axis(axis, x)
norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0]))
return x / norm
def hard_sigmoid(x):
x = (0.2 * x) + 0.5
x = C.clip(x, 0.0, 1.0)
return x
def conv1d(x, kernel, strides=1, padding='valid',
data_format=None, dilation_rate=1):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if padding == 'causal':
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel.shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
if data_format == 'channels_last':
x = C.swapaxes(x, 0, 1)
kernel = C.swapaxes(kernel, 0, 2)
padding = _preprocess_border_mode(padding)
strides = [strides]
x = C.convolution(
kernel,
x,
strides=tuple(strides),
auto_padding=[
False,
padding])
if data_format == 'channels_last':
x = C.swapaxes(x, 0, 1)
return x
def conv2d(x, kernel, strides=(1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
kernel = _preprocess_conv2d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
if dilation_rate == (1, 1):
strides = (1,) + strides
x = C.convolution(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding])
else:
assert dilation_rate[0] == dilation_rate[1]
assert strides == (1, 1), 'Invalid strides for dilated convolution'
x = C.convolution(
kernel,
x,
strides=dilation_rate[0],
auto_padding=[
False,
padding,
padding])
return _postprocess_conv2d_output(x, data_format)
def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1,
padding='valid', data_format=None, dilation_rate=1):
raise NotImplementedError
def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1),
padding='valid', data_format=None, dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format)
depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)),
(-1, 1) + depthwise_kernel.shape[2:])
pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format)
padding = _preprocess_border_mode(padding)
if dilation_rate == (1, 1):
strides = (1,) + strides
x = C.convolution(depthwise_kernel, x,
strides=strides,
auto_padding=[False, padding, padding],
groups=x.shape[0])
x = C.convolution(pointwise_kernel, x,
strides=(1, 1, 1),
auto_padding=[False])
else:
if dilation_rate[0] != dilation_rate[1]:
raise ValueError('CNTK Backend: non-square dilation_rate is '
'not supported.')
if strides != (1, 1):
raise ValueError('Invalid strides for dilated convolution')
x = C.convolution(depthwise_kernel, x,
strides=dilation_rate[0],
auto_padding=[False, padding, padding])
x = C.convolution(pointwise_kernel, x,
strides=(1, 1, 1),
auto_padding=[False])
return _postprocess_conv2d_output(x, data_format)
def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format)
depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)),
(-1, 1) + depthwise_kernel.shape[2:])
padding = _preprocess_border_mode(padding)
if dilation_rate == (1, 1):
strides = (1,) + strides
x = C.convolution(depthwise_kernel, x,
strides=strides,
auto_padding=[False, padding, padding],
groups=x.shape[0])
else:
if dilation_rate[0] != dilation_rate[1]:
raise ValueError('CNTK Backend: non-square dilation_rate is '
'not supported.')
if strides != (1, 1):
raise ValueError('Invalid strides for dilated convolution')
x = C.convolution(depthwise_kernel, x,
strides=dilation_rate[0],
auto_padding=[False, padding, padding],
groups=x.shape[0])
return _postprocess_conv2d_output(x, data_format)
def conv3d(x, kernel, strides=(1, 1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv3d_input(x, data_format)
kernel = _preprocess_conv3d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
strides = strides + (strides[0],)
x = C.convolution(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding,
padding])
return _postprocess_conv3d_output(x, data_format)
def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1),
padding='valid', data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv3d_input(x, data_format)
kernel = _preprocess_conv3d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
strides = (1,) + strides
# cntk output_shape does not include batch axis
output_shape = output_shape[1:]
# in keras2, need handle output shape in different format
if data_format == 'channels_last':
shape = list(output_shape)
shape[0] = output_shape[3]
shape[1] = output_shape[0]
shape[2] = output_shape[1]
shape[3] = output_shape[2]
output_shape = tuple(shape)
x = C.convolution_transpose(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding,
padding],
output_shape=output_shape)
return _postprocess_conv3d_output(x, data_format)
def pool2d(x, pool_size, strides=(1, 1),
padding='valid', data_format=None,
pool_mode='max'):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
padding = _preprocess_border_mode(padding)
strides = strides
pool_size = pool_size
x = _preprocess_conv2d_input(x, data_format)
if pool_mode == 'max':
x = C.pooling(
x,
C.MAX_POOLING,
pool_size,
strides,
auto_padding=[padding])
elif pool_mode == 'avg':
x = C.pooling(
x,
C.AVG_POOLING,
pool_size,
strides,
auto_padding=[padding])
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
return _postprocess_conv2d_output(x, data_format)
def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid',
data_format=None, pool_mode='max'):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
padding = _preprocess_border_mode(padding)
x = _preprocess_conv3d_input(x, data_format)
if pool_mode == 'max':
x = C.pooling(
x,
C.MAX_POOLING,
pool_size,
strides,
auto_padding=[padding])
elif pool_mode == 'avg':
x = C.pooling(
x,
C.AVG_POOLING,
pool_size,
strides,
auto_padding=[padding])
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
return _postprocess_conv3d_output(x, data_format)
def relu(x, alpha=0., max_value=None):
if alpha != 0.:
negative_part = C.relu(-x)
x = C.relu(x)
if max_value is not None:
x = C.clip(x, 0.0, max_value)
if alpha != 0.:
x -= alpha * negative_part
return x
def dropout(x, level, noise_shape=None, seed=None):
if level < 0. or level >= 1:
raise ValueError('CNTK Backend: Invalid dropout level %s, '
'must be in interval [0, 1].' % level)
return C.dropout(x, level)
def batch_flatten(x):
# cntk's batch axis is not in shape,
# so just flatten all the dim in x.shape
dim = np.prod(x.shape)
x = C.reshape(x, (-1,))
x._keras_shape = (None, dim)
return x
def softmax(x, axis=-1):
return C.softmax(x, axis=axis)
def softplus(x):
return C.softplus(x)
def softsign(x):
return x / (1 + C.abs(x))
def categorical_crossentropy(target, output, from_logits=False):
if from_logits:
result = C.cross_entropy_with_softmax(output, target)
# cntk's result shape is (batch, 1), while keras expect (batch, )
return C.reshape(result, ())
else:
# scale preds so that the class probas of each sample sum to 1
output /= C.reduce_sum(output, axis=-1)
# avoid numerical instability with epsilon clipping
output = C.clip(output, epsilon(), 1.0 - epsilon())
return -sum(target * C.log(output), axis=-1)
def sparse_categorical_crossentropy(target, output, from_logits=False):
target = C.one_hot(target, output.shape[-1])
target = C.reshape(target, output.shape)
return categorical_crossentropy(target, output, from_logits)
class Function(object):
def __init__(self, inputs, outputs, updates=[], **kwargs):
self.placeholders = inputs
self.trainer = None
self.unrelated_updates = None
self.updates = updates
if len(updates) > 0:
assert len(outputs) > 0
self.loss = outputs[0]
# need group update by gradient place holder
u_ops = []
unrelated_updates = []
for update in updates:
if isinstance(update, tuple):
if len(update) != 2:
raise NotImplementedError
else:
u = C.assign(update[0], update[1])
else:
u = update
if len(u.arguments) == 0:
u_ops.append(u)
else:
unrelated_updates.append(u)
update_func = C.combine([u.output for u in u_ops])
grads = update_func.find_all_with_name('keras_grad_placeholder')
u_list = []
p_list = []
for g in grads:
if g in grad_parameter_dict:
p_list.append(grad_parameter_dict[g])
u_list.append(g)
else:
raise ValueError(
'CNTK backend: when constructing trainer, '
'found gradient node `%s` which is not '
'related to any parameters in the model. '
'Please double check how the gradient node '
'is constructed.' % g)
if len(u_list) > 0:
learner = C.cntk_py.universal_learner(p_list, u_list, update_func)
criterion = (
outputs[0],
outputs[1]) if len(outputs) > 1 else (
outputs[0],
)
self.trainer = C.trainer.Trainer(
outputs[0], criterion, [learner])
self.trainer_output = tuple([f.output for f in criterion])
elif len(u_ops) > 0:
unrelated_updates.extend(u_ops)
if len(unrelated_updates) > 0:
self.unrelated_updates = C.combine([_.output for _ in unrelated_updates])
if self.trainer is None:
self.metrics_outputs = [f.output for f in outputs]
self.metrics_func = C.combine(self.metrics_outputs)
# cntk only could handle loss and 1 metric in trainer, for metrics more
# than 2, need manual eval
elif len(outputs) > 2:
self.metrics_outputs = [f.output for f in outputs[2:]]
self.metrics_func = C.combine(self.metrics_outputs)
else:
self.metrics_func = None
@staticmethod
def _is_input_shape_compatible(input, placeholder):
if hasattr(input, 'shape') and hasattr(placeholder, 'shape'):
num_dynamic = get_num_dynamic_axis(placeholder)
input_shape = input.shape[num_dynamic:]
placeholder_shape = placeholder.shape
for i, p in zip(input_shape, placeholder_shape):
if i != p and p != C.InferredDimension and p != C.FreeDimension:
return False
return True
def __call__(self, inputs):
global _LEARNING_PHASE_PLACEHOLDER
global _LEARNING_PHASE
assert isinstance(inputs, (list, tuple))
feed_dict = {}
for tensor, value in zip(self.placeholders, inputs):
# cntk only support calculate on float, do auto cast here
if (hasattr(value, 'dtype') and
value.dtype != np.float32 and
value.dtype != np.float64):
value = value.astype(np.float32)
if tensor == _LEARNING_PHASE_PLACEHOLDER:
_LEARNING_PHASE_PLACEHOLDER.value = np.asarray(value)
else:
# in current version cntk can't support input with variable
# length. Will support it in next release.
if not self._is_input_shape_compatible(value, tensor):
raise ValueError('CNTK backend: The placeholder has been resolved '
'to shape `%s`, but input shape is `%s`. Currently '
'CNTK can not take variable length inputs. Please '
'pass inputs that have a static shape.'
% (str(tensor.shape), str(value.shape)))
feed_dict[tensor] = value
updated = []
if self.trainer is not None:
input_dict = {}
for argument in self.loss.arguments:
if argument in feed_dict:
input_dict[argument] = feed_dict[argument]
else:
raise ValueError(
'CNTK backend: argument %s is not found in inputs. '
'Please double check the model and inputs in '
'`train_function`.' % argument.name)
result = self.trainer.train_minibatch(
input_dict, self.trainer_output)
assert(len(result) == 2)
outputs = result[1]
for o in self.trainer_output:
updated.append(outputs[o])
if self.metrics_func is not None:
input_dict = {}
for argument in self.metrics_func.arguments:
if argument in feed_dict:
input_dict[argument] = feed_dict[argument]
else:
raise ValueError('CNTK backend: metrics argument %s '
'is not found in inputs. Please double '
'check the model and inputs.' % argument.name)
# Some ops (like dropout) won't be applied during "eval" in cntk.
# They only evaluated in training phase. To make it work, call
# "forward" method to let cntk know we want to evaluate them.from
# But the assign ops won't be executed under this mode, that's why
# we need this check.
if (self.unrelated_updates is None and
(_LEARNING_PHASE_PLACEHOLDER.value == 1.0 or _LEARNING_PHASE == 1)):
_, output_values = self.metrics_func.forward(
input_dict,
self.metrics_func.outputs,
(self.metrics_func.outputs[0],),
as_numpy=False)
else:
output_values = self.metrics_func.eval(input_dict, as_numpy=False)
if isinstance(output_values, dict):
for o in self.metrics_outputs:
value = output_values[o]
v = value.asarray()
updated.append(v)
else:
v = output_values.asarray()
for o in self.metrics_outputs:
updated.append(v)
if self.unrelated_updates is not None:
input_dict = {}
for argument in self.unrelated_updates.arguments:
if argument in feed_dict:
input_dict[argument] = feed_dict[argument]
else:
raise ValueError(
'CNTK backend: assign ops argument %s '
'is not found in inputs. Please double '
'check the model and inputs.' % argument.name)
self.unrelated_updates.eval(input_dict, as_numpy=False)
return updated
def function(inputs, outputs, updates=[], **kwargs):
return Function(inputs, outputs, updates=updates, **kwargs)
def temporal_padding(x, padding=(1, 1)):
assert len(padding) == 2
num_dynamic_axis = _get_dynamic_axis_num(x)
base_shape = x.shape
if num_dynamic_axis > 0:
assert len(base_shape) == 2
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[padding, (0, 0)])
else:
x = _padding(x, padding, 0)
else:
assert len(base_shape) == 3
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[(0, 0), padding, (0, 0)])
else:
x = _padding(x, padding, 1)
return x
def _padding(x, pattern, axis):
base_shape = x.shape
if b_any([dim < 0 for dim in base_shape]):
raise ValueError('CNTK Backend: padding input tensor with '
'shape `%s` contains non-specified dimension, '
'which is not supported. Please give fixed '
'dimension to enable padding.' % base_shape)
if pattern[0] > 0:
prefix_shape = list(base_shape)
prefix_shape[axis] = pattern[0]
prefix_shape = tuple(prefix_shape)
x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis)
base_shape = x.shape
if pattern[1] > 0:
postfix_shape = list(base_shape)
postfix_shape[axis] = pattern[1]
postfix_shape = tuple(postfix_shape)
x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis)
return x
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
num_dynamic_axis = _get_dynamic_axis_num(x)
base_shape = x.shape
if data_format == 'channels_first':
if num_dynamic_axis > 0:
assert len(base_shape) == 3
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1])])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
else:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1])])
else:
x = _padding(x, padding[0], 2)
x = _padding(x, padding[1], 3)
else:
if num_dynamic_axis > 0:
assert len(base_shape) == 3
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), [0, 0]])
else:
x = _padding(x, padding[0], 0)
x = _padding(x, padding[1], 1)
else:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), [0, 0]])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
return x
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
num_dynamic_axis = _get_dynamic_axis_num(x)
base_shape = x.shape
if data_format == 'channels_first':
if num_dynamic_axis > 0:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2])])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
x = _padding(x, padding[2], 3)
else:
assert len(base_shape) == 5
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1]), list(padding[2])])
else:
x = _padding(x, padding[0], 2)
x = _padding(x, padding[1], 3)
x = _padding(x, padding[2], 4)
else:
if num_dynamic_axis > 0:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]])
else:
x = _padding(x, padding[0], 0)
x = _padding(x, padding[1], 1)
x = _padding(x, padding[2], 2)
else:
assert len(base_shape) == 5
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
x = _padding(x, padding[2], 3)
return x
def one_hot(indices, num_classes):
return C.one_hot(indices, num_classes)
def get_value(x):
if isinstance(
x,
C.variables.Parameter) or isinstance(
x,
C.variables.Constant):
return x.value
else:
return eval(x)
def batch_get_value(xs):
result = []
for x in xs:
if (isinstance(x, C.variables.Parameter) or
isinstance(x, C.variables.Constant)):
result.append(x.value)
else:
result.append(eval(x))
return result
def set_value(x, value):
if (isinstance(x, C.variables.Parameter) or
isinstance(x, C.variables.Constant)):
if isinstance(value, (float, int)):
value = np.full(x.shape, value, dtype=floatx())
x.value = value
else:
raise NotImplementedError
def print_tensor(x, message=''):
return C.user_function(
LambdaFunc(x,
when=lambda x: True,
execute=lambda x: print(message)))
def batch_set_value(tuples):
for t in tuples:
x = t[0]
value = t[1]
if isinstance(value, np.ndarray) is False:
value = np.asarray(value)
if isinstance(x, C.variables.Parameter):
x.value = value
else:
raise NotImplementedError
def stop_gradient(variables):
if isinstance(variables, (list, tuple)):
return map(C.stop_gradient, variables)
else:
return C.stop_gradient(variables)
def switch(condition, then_expression, else_expression):
ndim_cond = ndim(condition)
ndim_expr = ndim(then_expression)
if ndim_cond > ndim_expr:
raise ValueError('Rank of condition should be less'
' than or equal to rank of then and'
' else expressions. ndim(condition)=' +
str(ndim_cond) + ', ndim(then_expression)'
'=' + str(ndim_expr))
elif ndim_cond < ndim_expr:
shape_expr = int_shape(then_expression)
ndim_diff = ndim_expr - ndim_cond
for i in range(ndim_diff):
condition = expand_dims(condition)
condition = tile(condition, shape_expr[ndim_cond + i])
return C.element_select(condition,
then_expression,
else_expression)
def elu(x, alpha=1.):
res = C.elu(x)
if alpha == 1:
return res
else:
return C.element_select(C.greater(x, 0), res, alpha * res)
def in_top_k(predictions, targets, k):
_targets = C.one_hot(targets, predictions.shape[-1])
result = C.classification_error(predictions, _targets, topN=k)
return 1 - C.reshape(result, shape=())
def conv2d_transpose(x, kernel, output_shape, strides=(1, 1),
padding='valid', data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
kernel = _preprocess_conv2d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
strides = (1,) + strides
# cntk output_shape does not include batch axis
output_shape = output_shape[1:]
# in keras2, need handle output shape in different format
if data_format == 'channels_last':
shape = list(output_shape)
shape[0] = output_shape[2]
shape[1] = output_shape[0]
shape[2] = output_shape[1]
output_shape = tuple(shape)
x = C.convolution_transpose(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding],
output_shape=output_shape)
return _postprocess_conv2d_output(x, data_format)
def identity(x, name=None):
if name is None:
name = '%s_alias' % x.name
return C.alias(x, name=name)
def _preprocess_conv2d_input(x, data_format):
if data_format == 'channels_last':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols)
# TF input shape: (samples, rows, cols, input_depth)
x = C.transpose(x, (2, 0, 1))
return x
def _preprocess_conv2d_kernel(kernel, data_format):
# As of Keras 2.0.0, all kernels are normalized
# on the format `(rows, cols, input_depth, depth)`,
# independently of `data_format`.
# CNTK expects `(depth, input_depth, rows, cols)`.
kernel = C.transpose(kernel, (3, 2, 0, 1))
return kernel
def _preprocess_border_mode(padding):
if padding == 'same':
padding = True
elif padding == 'valid':
padding = False
else:
raise ValueError('Invalid border mode: ' + str(padding))
return padding
def _postprocess_conv2d_output(x, data_format):
if data_format == 'channels_last':
x = C.transpose(x, (1, 2, 0))
return x
def _preprocess_conv3d_input(x, data_format):
if data_format == 'channels_last':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
# TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3,
# input_depth)
x = C.transpose(x, (3, 0, 1, 2))
return x
def _preprocess_conv3d_kernel(kernel, dim_ordering):
kernel = C.transpose(kernel, (4, 3, 0, 1, 2))
return kernel
def _postprocess_conv3d_output(x, dim_ordering):
if dim_ordering == 'channels_last':
x = C.transpose(x, (1, 2, 3, 0))
return x
def _get_dynamic_axis_num(x):
if hasattr(x, 'dynamic_axes'):
return len(x.dynamic_axes)
else:
return 0
def _contain_seqence_axis(x):
if _get_dynamic_axis_num(x) > 1:
return x.dynamic_axes[1] == C.Axis.default_dynamic_axis()
else:
return False
def get_num_dynamic_axis(x):
return _get_dynamic_axis_num(x)
def _reduce_on_axis(x, axis, reduce_fun_name):
if isinstance(axis, list):
for a in axis:
if isinstance(a, C.Axis) \
and a != C.Axis.default_batch_axis() \
and hasattr(C.sequence, reduce_fun_name):
x = getattr(C.sequence, reduce_fun_name)(x, a)
else:
x = getattr(C, reduce_fun_name)(x, a)
else:
x = getattr(C, reduce_fun_name)(x, axis)
return x
def _reshape_sequence(x, time_step):
tmp_shape = list(int_shape(x))
tmp_shape[1] = time_step
return reshape(x, tmp_shape)
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
stride = strides[0]
kernel_shape = int_shape(kernel)
output_length, feature_dim, filters = kernel_shape
xs = []
for i in range(output_length):
slice_length = slice(i * stride,
i * stride + kernel_size[0])
xs.append(reshape(inputs[:, slice_length, :],
(-1, 1, feature_dim)))
x_aggregate = concatenate(xs, axis=1)
# transpose kernel to output_filters first, to apply broadcast
weight = permute_dimensions(kernel, (2, 0, 1))
# Shape: (batch, filters, output_length, input_length * kernel_size)
output = x_aggregate * weight
# Shape: (batch, filters, output_length)
output = sum(output, axis=3)
# Shape: (batch, output_length, filters)
return permute_dimensions(output, (0, 2, 1))
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
stride_row, stride_col = strides
output_row, output_col = output_shape
kernel_shape = int_shape(kernel)
_, feature_dim, filters = kernel_shape
xs = []
for i in range(output_row):
for j in range(output_col):
slice_row = slice(i * stride_row,
i * stride_row + kernel_size[0])
slice_col = slice(j * stride_col,
j * stride_col + kernel_size[1])
if data_format == 'channels_first':
xs.append(reshape(inputs[:, :, slice_row, slice_col],
(-1, 1, feature_dim)))
else:
xs.append(reshape(inputs[:, slice_row, slice_col, :],
(-1, 1, feature_dim)))
x_aggregate = concatenate(xs, axis=1)
# transpose kernel to put filters first
weight = permute_dimensions(kernel, (2, 0, 1))
# shape: batch, filters, output_length, input_length * kernel_size
output = x_aggregate * weight
# shape: batch, filters, output_length
output = sum(output, axis=3)
# shape: batch, filters, row, col
output = reshape(output,
(-1, filters, output_row, output_col))
if data_format == 'channels_last':
# shape: batch, row, col, filters
output = permute_dimensions(output, (0, 2, 3, 1))
return output
def reverse(x, axes):
if isinstance(axes, int):
axes = [axes]
cntk_axes = _normalize_axis(axes, x)
begin_index = [0 for _ in cntk_axes]
end_index = [0 for _ in cntk_axes]
strides = [-1 for _ in cntk_axes]
return C.slice(x, cntk_axes, begin_index, end_index, strides)
def _reshape_batch(x, shape):
# there is a bug in cntk 2.1's unpack_batch implementation
if hasattr(C, 'unpack_batch') and _get_cntk_version() >= 2.2:
const_a = C.unpack_batch(x)
const_a = C.reshape(const_a, shape)
return C.to_batch(const_a)
else:
return C.user_function(ReshapeBatch(x, shape[1:]))
def _get_cntk_version():
version = C.__version__
if version.endswith('+'):
version = version[:-1]
# for hot fix, ignore all the . except the first one.
if len(version) > 2 and version[1] == '.':
version = version[:2] + version[2:].replace('.', '')
try:
return float(version)
except:
warnings.warn(
'CNTK backend warning: CNTK version not detected. '
'Will using CNTK 2.0 GA as default.')
return float(2.0)
class ReshapeBatch(C.ops.functions.UserFunction):
def __init__(self, input, shape, name='reshape_with_batch'):
super(ReshapeBatch, self).__init__([input], as_numpy=False, name=name)
self.from_shape = input.shape
self.target_shape = shape
def infer_outputs(self):
batch_axis = C.Axis.default_batch_axis()
return [
C.output_variable(
self.target_shape,
self.inputs[0].dtype,
[batch_axis])]
def forward(self, arguments, device=None, outputs_to_retain=None):
num_element = arguments.shape()[0] * np.prod(np.asarray(self.from_shape))
num_static_element = np.prod(np.asarray(self.target_shape))
num_batch = int(num_element / num_static_element)
result = arguments.data().as_shape((num_batch,) + self.target_shape)
return None, C.cntk_py.Value(result)
def backward(self, state, root_gradients):
grad_array_view = root_gradients.data()
num_element = root_gradients.shape()[0] * np.prod(np.asarray(self.target_shape))
num_static_element = np.prod(np.asarray(self.from_shape))
num_old_batch = int(num_element / num_static_element)
return C.cntk_py.Value(
grad_array_view.as_shape(
(num_old_batch,) + self.from_shape))
class ConvertToBatch(C.ops.functions.UserFunction):
"""Converts input first axis to CNTK batch axis.
We may introduce this operation in CNTK native
implementation later.
# Arguments
inputs: a cntk variable (parameter/constant)
name: name of this node
"""
def __init__(self, input, name='convert_to_batch'):
super(ConvertToBatch, self).__init__([input], as_numpy=False, name=name)
def infer_outputs(self):
batch_axis = C.Axis.default_batch_axis()
return [
C.output_variable(
self.inputs[0].shape[1:],
self.inputs[0].dtype,
[batch_axis])]
def forward(self, arguments, device=None, outputs_to_retain=None):
return None, C.cntk_py.Value(arguments.data())
def backward(self, state, root_gradients):
return C.cntk_py.Value(root_gradients.data())
class ConvertToStatic(C.ops.functions.UserFunction):
"""Converts input first axis to CNTK static axis.
We may introduce this operation in CNTK native
implementation later.
# Arguments
inputs: a cntk tensor which has batch axis
batch_size: size of batch axis.
name: name of this node.
"""
def __init__(self, input, batch_size, name='convert_to_static'):
super(ConvertToStatic, self).__init__([input], as_numpy=False, name=name)
self.target_shape = (batch_size,) + input.shape
def infer_outputs(self):
return [
C.output_variable(
self.target_shape,
self.inputs[0].dtype,
[])]
def forward(self, arguments, device=None, outputs_to_retain=None):
return None, C.cntk_py.Value(arguments.data())
def backward(self, state, root_gradients):
return C.cntk_py.Value(root_gradients.data())
class LambdaFunc(C.ops.functions.UserFunction):
def __init__(self,
arg,
when=lambda arg: True,
execute=lambda arg: print(arg),
name=''):
self.when = when
self.execute = execute
super(LambdaFunc, self).__init__([arg], name=name)
def infer_outputs(self):
return [
C.output_variable(
self.inputs[0].shape,
self.inputs[0].dtype,
self.inputs[0].dynamic_axes)]
def forward(self, argument, device=None, outputs_to_retain=None):
if self.when(argument):
self.execute(argument)
return None, argument
def backward(self, state, root_gradients):
return root_gradients
| 32.153216
| 109
| 0.584361
| 10,603
| 82,473
| 4.353862
| 0.06583
| 0.033576
| 0.011437
| 0.008838
| 0.509466
| 0.443029
| 0.399749
| 0.355927
| 0.33264
| 0.309354
| 0
| 0.013767
| 0.31477
| 82,473
| 2,564
| 110
| 32.165757
| 0.803107
| 0.071163
| 0
| 0.425897
| 0
| 0.016641
| 0.06477
| 0.001126
| 0
| 0
| 0
| 0.00039
| 0.01196
| 1
| 0.089964
| false
| 0.00052
| 0.00468
| 0.026521
| 0.198648
| 0.00208
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cbfb5620b9999ebfec8396e7e566e9eef183412
| 6,946
|
py
|
Python
|
Project Files/Prebuilt tools/twitter/Twitter/pylib/oauthlib/oauth1/rfc5849/endpoints/resource.py
|
nVoid/Yale-TouchDesigner-April2016
|
40eb36f515fa3935f3e9ddaa923664e88308262c
|
[
"MIT"
] | 39
|
2015-06-10T23:18:07.000Z
|
2021-10-21T04:29:06.000Z
|
Project Files/Prebuilt tools/twitter/Twitter/pylib/oauthlib/oauth1/rfc5849/endpoints/resource.py
|
nVoid/Yale-TouchDesigner-April2016
|
40eb36f515fa3935f3e9ddaa923664e88308262c
|
[
"MIT"
] | 13
|
2020-10-28T16:02:09.000Z
|
2020-11-16T13:30:05.000Z
|
Project Files/Prebuilt tools/twitter/Twitter/pylib/oauthlib/oauth1/rfc5849/endpoints/resource.py
|
nVoid/Yale-TouchDesigner-April2016
|
40eb36f515fa3935f3e9ddaa923664e88308262c
|
[
"MIT"
] | 26
|
2015-06-10T22:09:15.000Z
|
2021-06-27T15:45:15.000Z
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.endpoints.resource
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of the resource protection provider logic of
OAuth 1.0 RFC 5849.
"""
from __future__ import absolute_import, unicode_literals
from oauthlib.common import log
from .base import BaseEndpoint
from .. import errors
class ResourceEndpoint(BaseEndpoint):
"""An endpoint responsible for protecting resources.
Typical use is to instantiate with a request validator and invoke the
``validate_protected_resource_request`` in a decorator around a view
function. If the request is valid, invoke and return the response of the
view. If invalid create and return an error response directly from the
decorator.
See :doc:`/oauth1/validator` for details on which validator methods to implement
for this endpoint.
An example decorator::
from functools import wraps
from your_validator import your_validator
from oauthlib.oauth1 import ResourceEndpoint
endpoint = ResourceEndpoint(your_validator)
def require_oauth(realms=None):
def decorator(f):
@wraps(f)
def wrapper(request, *args, **kwargs):
v, r = provider.validate_protected_resource_request(
request.url,
http_method=request.method,
body=request.data,
headers=request.headers,
realms=realms or [])
if v:
return f(*args, **kwargs)
else:
return abort(403)
"""
def validate_protected_resource_request(self, uri, http_method='GET',
body=None, headers=None, realms=None):
"""Create a request token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param realms: A list of realms the resource is protected under.
This will be supplied to the ``validate_realms``
method of the request validator.
:returns: A tuple of 2 elements.
1. True if valid, False otherwise.
2. An oauthlib.common.Request object.
"""
try:
request = self._create_request(uri, http_method, body, headers)
except errors.OAuth1Error:
return False, None
try:
self._check_transport_security(request)
self._check_mandatory_parameters(request)
except errors.OAuth1Error:
return False, request
if not request.resource_owner_key:
return False, request
if not self.request_validator.check_access_token(
request.resource_owner_key):
return False, request
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request,
access_token=request.resource_owner_key):
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid or expired token.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy token is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable resource owner enumeration
valid_resource_owner = self.request_validator.validate_access_token(
request.client_key, request.resource_owner_key, request)
if not valid_resource_owner:
request.resource_owner_key = self.request_validator.dummy_access_token
# Note that `realm`_ is only used in authorization headers and how
# it should be interepreted is not included in the OAuth spec.
# However they could be seen as a scope or realm to which the
# client has access and as such every client should be checked
# to ensure it is authorized access to that scope or realm.
# .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2
#
# Note that early exit would enable client realm access enumeration.
#
# The require_realm indicates this is the first step in the OAuth
# workflow where a client requests access to a specific realm.
# This first step (obtaining request token) need not require a realm
# and can then be identified by checking the require_resource_owner
# flag and abscence of realm.
#
# Clients obtaining an access token will not supply a realm and it will
# not be checked. Instead the previously requested realm should be
# transferred from the request token to the access token.
#
# Access to protected resources will always validate the realm but note
# that the realm is now tied to the access token and not provided by
# the client.
valid_realm = self.request_validator.validate_realms(request.client_key,
request.resource_owner_key, request, uri=request.uri,
realms=realms)
valid_signature = self._check_signature(request)
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_resource_owner, valid_realm,
valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client: %s", valid_client)
log.info("Valid token: %s", valid_resource_owner)
log.info("Valid realm: %s", valid_realm)
log.info("Valid signature: %s", valid_signature)
return v, request
| 44.525641
| 84
| 0.6444
| 847
| 6,946
| 5.172373
| 0.304604
| 0.035608
| 0.031956
| 0.0315
| 0.216617
| 0.178726
| 0.178726
| 0.169596
| 0.148596
| 0.134216
| 0
| 0.006933
| 0.293982
| 6,946
| 155
| 85
| 44.812903
| 0.886419
| 0.580766
| 0
| 0.173913
| 0
| 0
| 0.03994
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021739
| false
| 0
| 0.086957
| 0
| 0.26087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cbffc6e03738d28e2329b530ca6fb3c25fe1127
| 1,493
|
py
|
Python
|
python/ex_1.py
|
AymenSe/Geometric-operations-DIP
|
ef0b0bc86210a8da5e63136bf5a239179b869722
|
[
"MIT"
] | null | null | null |
python/ex_1.py
|
AymenSe/Geometric-operations-DIP
|
ef0b0bc86210a8da5e63136bf5a239179b869722
|
[
"MIT"
] | null | null | null |
python/ex_1.py
|
AymenSe/Geometric-operations-DIP
|
ef0b0bc86210a8da5e63136bf5a239179b869722
|
[
"MIT"
] | null | null | null |
####################################################
#
# @ Authors : SEKHRI Aymen
# MOHAMMED HACENE Tarek
#
# @ Hint: you have to install all requirements
# from requirements.txt
#
####################################################
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
# load the image
onion_img = cv.imread("onion.png")
# Store height and width and channels of the image
row, col, chs = onion_img.shape
# Store the spectral resolution
dtype_img = onion_img.dtype # This will give you: uint8
def translation(img, trans):
"""
args:
- img: absolute path to the image
- trans: must be a tuple (row_trans, col_trans)
"""
# read the image
image = cv.imread(img)
# retrieve the height and the width
height, width = image.shape[:2]
# retrieve the params of translation
row_trans, col_trans = trans
# Create the translation matrix
T = np.float32([[1, 0, col_trans], [0, 1, row_trans]])
# Apply the T matrix: T*M
img_translation = cv.warpAffine(image, T, (width, height))
# show the images
cv.imshow("Original Image", image)
cv.imshow('Translation Image', img_translation)
# Don't destroy the images until the user do
cv.waitKey()
cv.destroyAllWindows()
# translation 20 pixel to the right
translation("onion.png", (0, 20))
# translation 50 lines and 100 cols to the right
translation("onion.png", (50, 100))
# remove the peper from the image using translations
translation("onion.png", (40, 40))
| 24.883333
| 59
| 0.649029
| 209
| 1,493
| 4.578947
| 0.449761
| 0.041797
| 0.059561
| 0.033438
| 0.060606
| 0.060606
| 0
| 0
| 0
| 0
| 0
| 0.023026
| 0.185532
| 1,493
| 59
| 60
| 25.305085
| 0.76398
| 0.470864
| 0
| 0
| 0
| 0
| 0.100752
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.157895
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cc0266db2f787f19a55358bfe261dafe0201d9d
| 3,999
|
py
|
Python
|
utils/hit_rate_utils.py
|
h-zcc/ref-nms
|
8f83f350c497d0ef875c778a8ce76725552abb3c
|
[
"MIT"
] | 19
|
2020-12-14T13:53:10.000Z
|
2022-02-27T09:46:15.000Z
|
utils/hit_rate_utils.py
|
h-zcc/ref-nms
|
8f83f350c497d0ef875c778a8ce76725552abb3c
|
[
"MIT"
] | 3
|
2021-01-16T11:41:07.000Z
|
2021-08-06T08:21:42.000Z
|
utils/hit_rate_utils.py
|
h-zcc/ref-nms
|
8f83f350c497d0ef875c778a8ce76725552abb3c
|
[
"MIT"
] | 3
|
2021-01-10T15:25:29.000Z
|
2021-09-26T01:38:16.000Z
|
from utils.misc import calculate_iou, xywh_to_xyxy
__all__ = ['NewHitRateEvaluator', 'CtxHitRateEvaluator']
class NewHitRateEvaluator:
def __init__(self, refer, top_N=None, threshold=0.5):
"""Evaluate refexp-based hit rate.
Args:
refdb: `refdb` dict.
split: Dataset split to evaluate on.
top_N: Select top-N scoring proposals to evaluate. `None` means no selection. Default `None`.
"""
self.refer = refer
self.top_N = top_N
self.threshold = threshold
def eval_hit_rate(self, split, proposal_dict, image_as_key=False):
"""Evaluate refexp-based hit rate.
Args:
proposal_dict: {exp_id or image_id: [{box: [4,], score: float}]}.
image_as_key: Use image_id instead of exp_id as key, default `False`.
Returns:
proposal_per_ref: Number of proposals per refexp.
hit_rate: Refexp-based hit rate of proposals.
"""
# Initialize counters
num_hit = 0
num_proposal = 0
num_ref = 0 # NOTE: this is the number of refexp, not ref
for ref_id in self.refer.getRefIds(split=split):
ref = self.refer.Refs[ref_id]
image_id = ref['image_id']
ann_id = ref['ann_id']
ann = self.refer.Anns[ann_id]
gt_box = xywh_to_xyxy(ann['bbox'])
for exp_id in ref['sent_ids']:
# Get proposals
if image_as_key:
proposals = proposal_dict[image_id]
else:
proposals = proposal_dict[exp_id]
# Rank and select proposals
ranked_proposals = sorted(proposals, key=lambda p: p['score'], reverse=True)[:self.top_N]
for proposal in ranked_proposals:
if calculate_iou(gt_box, proposal['box']) > self.threshold:
num_hit += 1
break
num_proposal += len(ranked_proposals)
num_ref += 1
proposal_per_ref = num_proposal / num_ref
hit_rate = num_hit / num_ref
return proposal_per_ref, hit_rate
class CtxHitRateEvaluator:
def __init__(self, refer, ctxdb, top_N=None, threshold=0.5):
self.refer = refer
self.ctxdb = ctxdb
self.top_N = top_N
self.threshold = threshold
def eval_hit_rate(self, split, proposal_dict, image_as_key=False):
"""Evaluate refexp-based hit rate.
Args:
proposal_dict: {exp_id or image_id: [{box: [4,], score: float}]}.
image_as_key: Use image_id instead of exp_id as key, default `False`.
Returns:
proposal_per_ref: Number of proposals per refexp.
hit_rate: Refexp-based hit rate of proposals.
"""
# Initialize counters
recall_list = []
avg_num_list = []
for exp_id, ctx in self.ctxdb[split].items():
exp_id = int(exp_id)
if len(ctx['ctx']) == 0:
continue
# Get proposals
if image_as_key:
image_id = self.refer.sentToRef[exp_id]['image_id']
proposals = proposal_dict[image_id]
else:
proposals = proposal_dict[exp_id]
# Rank and select proposals
ranked_proposals = sorted(proposals, key=lambda p: p['score'], reverse=True)[:self.top_N]
hit_num, ctx_num = 0, 0
for ctx_item in ctx['ctx']:
ctx_num += 1
ctx_box = ctx_item['box']
for proposal in ranked_proposals:
if calculate_iou(ctx_box, proposal['box']) > self.threshold:
hit_num += 1
break
recall_list.append(hit_num / ctx_num)
avg_num_list.append(len(ranked_proposals))
return sum(avg_num_list) / len(avg_num_list), sum(recall_list) / len(recall_list)
| 36.688073
| 105
| 0.565141
| 495
| 3,999
| 4.30101
| 0.214141
| 0.036167
| 0.028182
| 0.042273
| 0.542978
| 0.517614
| 0.463128
| 0.463128
| 0.423673
| 0.423673
| 0
| 0.006112
| 0.345336
| 3,999
| 108
| 106
| 37.027778
| 0.807105
| 0.24031
| 0
| 0.354839
| 0
| 0
| 0.03394
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.016129
| 0
| 0.145161
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cc16e64c487a1ae6266b04c47e0496bada66d00
| 905
|
py
|
Python
|
LeetCode_ReorderDataLogFiles.py
|
amukher3/Problem_solutions
|
8fa6014a91f295d08cafb989024caa91d99211d9
|
[
"Apache-2.0"
] | 1
|
2021-12-28T08:58:51.000Z
|
2021-12-28T08:58:51.000Z
|
LeetCode_ReorderDataLogFiles.py
|
amukher3/Coding
|
a330cb04b5dd5cc1c3cf69249417a71586441bc7
|
[
"Apache-2.0"
] | null | null | null |
LeetCode_ReorderDataLogFiles.py
|
amukher3/Coding
|
a330cb04b5dd5cc1c3cf69249417a71586441bc7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 22 19:07:30 2020
@author: Abhishek Mukherjee
"""
class Solution:
def reorderLogFiles(self, logs: List[str]) -> List[str]:
letLog=[]
digLog=[]
for i in range(len(logs)):
temp=[]
temp=logs[i].split(' ')
if temp[1].isdigit() is True:
digLog.append(logs[i])
else:
letLog.append(logs[i])
tempLetLog=[]
for i in letLog:
tempLetLog.append(' '.join(i.split(' ')[1:]+[i.split(' ')[0]]))
tempLetLog=sorted(tempLetLog)
letLog=[]
for i in tempLetLog:
tempPrime=i.split(' ')[:-1]
temp=i.split(' ')[-1]
letLog.append(' '.join([temp]+tempPrime))
return letLog+digLog
| 27.424242
| 76
| 0.438674
| 91
| 905
| 4.362637
| 0.494505
| 0.075567
| 0.04534
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033645
| 0.40884
| 905
| 33
| 77
| 27.424242
| 0.708411
| 0.096133
| 0
| 0.095238
| 0
| 0
| 0.008986
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cc17f1b77efcc568026cf1d93c6a6ded983ab6a
| 475
|
py
|
Python
|
saleor/core/transactions.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 15,337
|
2015-01-12T02:11:52.000Z
|
2021-10-05T19:19:29.000Z
|
saleor/core/transactions.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 7,486
|
2015-02-11T10:52:13.000Z
|
2021-10-06T09:37:15.000Z
|
saleor/core/transactions.py
|
aminziadna/saleor
|
2e78fb5bcf8b83a6278af02551a104cfa555a1fb
|
[
"CC-BY-4.0"
] | 5,864
|
2015-01-16T14:52:54.000Z
|
2021-10-05T23:01:15.000Z
|
from contextlib import contextmanager
from django.db import DatabaseError
from ..core.tracing import traced_atomic_transaction
@contextmanager
def transaction_with_commit_on_errors():
"""Perform transaction and raise an error in any occurred."""
error = None
with traced_atomic_transaction():
try:
yield
except DatabaseError:
raise
except Exception as e:
error = e
if error:
raise error
| 22.619048
| 65
| 0.669474
| 53
| 475
| 5.849057
| 0.622642
| 0.077419
| 0.148387
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.28
| 475
| 20
| 66
| 23.75
| 0.906433
| 0.115789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.2
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cc1ccd2747eb46713eaccaf9ca6dc49d25b3128
| 3,314
|
py
|
Python
|
src/command_modules/azure-cli-policyinsights/azure/cli/command_modules/policyinsights/tests/latest/test_policyinsights_scenario.py
|
diberry/azure-cli
|
302999245cbb13b890b0a74f03443c577bd4bfae
|
[
"MIT"
] | 1
|
2019-03-30T20:49:32.000Z
|
2019-03-30T20:49:32.000Z
|
src/command_modules/azure-cli-policyinsights/azure/cli/command_modules/policyinsights/tests/latest/test_policyinsights_scenario.py
|
diberry/azure-cli
|
302999245cbb13b890b0a74f03443c577bd4bfae
|
[
"MIT"
] | 4
|
2018-08-08T20:01:17.000Z
|
2018-09-17T15:20:06.000Z
|
src/command_modules/azure-cli-policyinsights/azure/cli/command_modules/policyinsights/tests/latest/test_policyinsights_scenario.py
|
diberry/azure-cli
|
302999245cbb13b890b0a74f03443c577bd4bfae
|
[
"MIT"
] | 1
|
2018-04-14T01:46:00.000Z
|
2018-04-14T01:46:00.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ScenarioTest, record_only
@record_only()
class PolicyInsightsTests(ScenarioTest):
def test_policy_insights(self):
top_clause = '--top 2'
filter_clause = '--filter "isCompliant eq false"'
apply_clause = '--apply "groupby((policyAssignmentId, resourceId), aggregate($count as numRecords))"'
select_clause = '--select "policyAssignmentId, resourceId, numRecords"'
order_by_clause = '--order-by "numRecords desc"'
from_clause = '--from "2018-04-04T00:00:00"'
to_clause = '--to "2018-05-22T00:00:00"'
scopes = [
'-m "azgovtest4"',
'',
'-g "defaultresourcegroup-eus"',
'--resource "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/eastusnsggroup/providers/microsoft.network/networksecuritygroups/eastusnsg/securityrules/allow-joba"',
'--resource "omssecuritydevkeyvalut" --namespace "microsoft.keyvault" --resource-type "vaults" -g "omssecurityintresourcegroup"',
'--resource "default" --namespace "microsoft.network" --resource-type "subnets" --parent "virtualnetworks/mms-wcus-vnet" -g "mms-wcus"',
'-s "335cefd2-ab16-430f-b364-974a170eb1d5"',
'-d "25bf1e2a-6004-47ad-9bd1-2a40dd6de016"',
'-a "96e22f7846e94bb186ae3a01"',
'-a "bc916e4f3ab54030822a11b3" -g "tipkeyvaultresourcegroup" '
]
for scope in scopes:
events = self.cmd('az policy event list {} {} {} {} {} {} {} {}'.format(
scope,
from_clause,
to_clause,
filter_clause,
apply_clause,
select_clause,
order_by_clause,
top_clause)).get_output_in_json()
assert len(events) >= 0
states = self.cmd('az policy state list {} {} {} {} {} {} {} {}'.format(
scope,
from_clause,
to_clause,
filter_clause,
apply_clause,
select_clause,
order_by_clause,
top_clause)).get_output_in_json()
assert len(states) >= 0
summary = self.cmd('az policy state summarize {} {} {} {} {}'.format(
scope,
from_clause,
to_clause,
filter_clause,
top_clause)).get_output_in_json()
assert summary["results"] is not None
assert len(summary["policyAssignments"]) >= 0
if len(summary["policyAssignments"]) > 0:
assert summary["policyAssignments"][0]["results"] is not None
assert len(summary["policyAssignments"][0]["policyDefinitions"]) >= 0
if len(summary["policyAssignments"][0]["policyDefinitions"]) > 0:
assert summary["policyAssignments"][0]["policyDefinitions"][0]["results"] is not None
| 48.028986
| 194
| 0.545866
| 293
| 3,314
| 6.03413
| 0.453925
| 0.081448
| 0.084842
| 0.063348
| 0.3569
| 0.279977
| 0.225679
| 0.225679
| 0.18552
| 0.128959
| 0
| 0.060352
| 0.280024
| 3,314
| 68
| 195
| 48.735294
| 0.680637
| 0.101388
| 0
| 0.368421
| 0
| 0.052632
| 0.407669
| 0.169189
| 0
| 0
| 0
| 0
| 0.122807
| 1
| 0.017544
| false
| 0
| 0.017544
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cc70ea72109b3602fc21ef2eb53e2e3c1469770
| 1,461
|
py
|
Python
|
grr/server/grr_response_server/databases/db_yara_test_lib.py
|
khanhgithead/grr
|
8ad8a4d2c5a93c92729206b7771af19d92d4f915
|
[
"Apache-2.0"
] | 4,238
|
2015-01-01T15:34:50.000Z
|
2022-03-31T08:18:05.000Z
|
grr/server/grr_response_server/databases/db_yara_test_lib.py
|
khanhgithead/grr
|
8ad8a4d2c5a93c92729206b7771af19d92d4f915
|
[
"Apache-2.0"
] | 787
|
2015-01-02T21:34:24.000Z
|
2022-03-02T13:26:38.000Z
|
grr/server/grr_response_server/databases/db_yara_test_lib.py
|
khanhgithead/grr
|
8ad8a4d2c5a93c92729206b7771af19d92d4f915
|
[
"Apache-2.0"
] | 856
|
2015-01-02T02:50:11.000Z
|
2022-03-31T11:11:53.000Z
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""A module with test cases for the YARA database method."""
import os
from grr_response_server.databases import db
from grr_response_server.rdfvalues import objects as rdf_objects
class DatabaseTestYaraMixin(object):
"""A mixin class for testing YARA methods of database implementations."""
def testWriteYaraSignatureReferenceIncorrectUsername(self):
blob_id = rdf_objects.BlobID(os.urandom(32))
with self.assertRaises(db.UnknownGRRUserError) as context:
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="quux")
self.assertEqual(context.exception.username, "quux")
def testWriteYaraSignatureReferenceDuplicated(self):
self.db.WriteGRRUser("foo")
blob_id = rdf_objects.BlobID(os.urandom(32))
# Writing duplicated signatures is possible, it should not raise.
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo")
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo")
def testVerifyYaraSignatureReferenceSimple(self):
self.db.WriteGRRUser("foo")
blob_id = rdf_objects.BlobID(os.urandom(32))
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo")
self.assertTrue(self.db.VerifyYaraSignatureReference(blob_id))
def testVerifyYaraSignatureReferenceIncorrect(self):
blob_id = rdf_objects.BlobID(os.urandom(32))
self.assertFalse(self.db.VerifyYaraSignatureReference(blob_id))
| 33.976744
| 75
| 0.776181
| 173
| 1,461
| 6.421965
| 0.404624
| 0.075608
| 0.032403
| 0.057606
| 0.449145
| 0.377138
| 0.377138
| 0.377138
| 0.329433
| 0.212421
| 0
| 0.007004
| 0.120465
| 1,461
| 42
| 76
| 34.785714
| 0.857588
| 0.158111
| 0
| 0.409091
| 0
| 0
| 0.018899
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.181818
| false
| 0
| 0.136364
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cc7a37d4874c578241d8fb555c025d8c962058b
| 4,912
|
py
|
Python
|
gpytorch/kernels/inducing_point_kernel.py
|
4aHxKzD/gpytorch
|
7193545f88820ea04588b983f1d7ed603a59a27c
|
[
"MIT"
] | 1
|
2021-03-05T07:20:58.000Z
|
2021-03-05T07:20:58.000Z
|
gpytorch/kernels/inducing_point_kernel.py
|
4aHxKzD/gpytorch
|
7193545f88820ea04588b983f1d7ed603a59a27c
|
[
"MIT"
] | 1
|
2021-02-24T14:01:43.000Z
|
2021-02-24T14:01:43.000Z
|
gpytorch/kernels/inducing_point_kernel.py
|
syncrostone/gpytorch
|
4d33fbf64594aab2dd6e0cfcb3242510231b3e0e
|
[
"MIT"
] | 1
|
2021-03-15T12:32:24.000Z
|
2021-03-15T12:32:24.000Z
|
#!/usr/bin/env python3
import copy
import math
import torch
from ..distributions import MultivariateNormal
from ..lazy import DiagLazyTensor, LowRankRootAddedDiagLazyTensor, LowRankRootLazyTensor, MatmulLazyTensor, delazify
from ..mlls import InducingPointKernelAddedLossTerm
from ..models import exact_prediction_strategies
from ..utils.cholesky import psd_safe_cholesky
from .kernel import Kernel
class InducingPointKernel(Kernel):
def __init__(self, base_kernel, inducing_points, likelihood, active_dims=None):
super(InducingPointKernel, self).__init__(active_dims=active_dims)
self.base_kernel = base_kernel
self.likelihood = likelihood
if inducing_points.ndimension() == 1:
inducing_points = inducing_points.unsqueeze(-1)
self.register_parameter(name="inducing_points", parameter=torch.nn.Parameter(inducing_points))
self.register_added_loss_term("inducing_point_loss_term")
def _clear_cache(self):
if hasattr(self, "_cached_kernel_mat"):
del self._cached_kernel_mat
@property
def _inducing_mat(self):
if not self.training and hasattr(self, "_cached_kernel_mat"):
return self._cached_kernel_mat
else:
res = delazify(self.base_kernel(self.inducing_points, self.inducing_points))
if not self.training:
self._cached_kernel_mat = res
return res
@property
def _inducing_inv_root(self):
if not self.training and hasattr(self, "_cached_kernel_inv_root"):
return self._cached_kernel_inv_root
else:
chol = psd_safe_cholesky(self._inducing_mat, upper=True)
eye = torch.eye(chol.size(-1), device=chol.device, dtype=chol.dtype)
inv_root = torch.triangular_solve(eye, chol)[0]
res = inv_root
if not self.training:
self._cached_kernel_inv_root = res
return res
def _get_covariance(self, x1, x2):
k_ux1 = delazify(self.base_kernel(x1, self.inducing_points))
if torch.equal(x1, x2):
covar = LowRankRootLazyTensor(k_ux1.matmul(self._inducing_inv_root))
# Diagonal correction for predictive posterior
if not self.training:
correction = (self.base_kernel(x1, x2, diag=True) - covar.diag()).clamp(0, math.inf)
covar = LowRankRootAddedDiagLazyTensor(covar, DiagLazyTensor(correction))
else:
k_ux2 = delazify(self.base_kernel(x2, self.inducing_points))
covar = MatmulLazyTensor(
k_ux1.matmul(self._inducing_inv_root), k_ux2.matmul(self._inducing_inv_root).transpose(-1, -2)
)
return covar
def _covar_diag(self, inputs):
if inputs.ndimension() == 1:
inputs = inputs.unsqueeze(1)
# Get diagonal of covar
covar_diag = delazify(self.base_kernel(inputs, diag=True))
return DiagLazyTensor(covar_diag)
def forward(self, x1, x2, diag=False, **kwargs):
covar = self._get_covariance(x1, x2)
if self.training:
if not torch.equal(x1, x2):
raise RuntimeError("x1 should equal x2 in training mode")
zero_mean = torch.zeros_like(x1.select(-1, 0))
new_added_loss_term = InducingPointKernelAddedLossTerm(
MultivariateNormal(zero_mean, self._covar_diag(x1)),
MultivariateNormal(zero_mean, covar),
self.likelihood,
)
self.update_added_loss_term("inducing_point_loss_term", new_added_loss_term)
if diag:
return covar.diag()
else:
return covar
def num_outputs_per_input(self, x1, x2):
return self.base_kernel.num_outputs_per_input(x1, x2)
def __deepcopy__(self, memo):
replace_inv_root = False
replace_kernel_mat = False
if hasattr(self, "_cached_kernel_inv_root"):
replace_inv_root = True
kernel_inv_root = self._cached_kernel_inv_root
if hasattr(self, "_cached_kernel_mat"):
replace_kernel_mat = True
kernel_mat = self._cached_kernel_mat
cp = self.__class__(
base_kernel=copy.deepcopy(self.base_kernel),
inducing_points=copy.deepcopy(self.inducing_points),
likelihood=self.likelihood,
active_dims=self.active_dims,
)
if replace_inv_root:
cp._cached_kernel_inv_root = kernel_inv_root
if replace_kernel_mat:
cp._cached_kernel_mat = kernel_mat
return cp
def prediction_strategy(self, train_inputs, train_prior_dist, train_labels, likelihood):
# Allow for fast variances
return exact_prediction_strategies.SGPRPredictionStrategy(
train_inputs, train_prior_dist, train_labels, likelihood
)
| 36.656716
| 116
| 0.659202
| 574
| 4,912
| 5.30662
| 0.229965
| 0.039068
| 0.063033
| 0.043664
| 0.200263
| 0.154629
| 0.124097
| 0.061064
| 0.03086
| 0.03086
| 0
| 0.010738
| 0.260586
| 4,912
| 133
| 117
| 36.932331
| 0.827919
| 0.023005
| 0
| 0.147059
| 0
| 0
| 0.041293
| 0.019604
| 0
| 0
| 0
| 0
| 0
| 1
| 0.098039
| false
| 0
| 0.088235
| 0.019608
| 0.303922
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cc9967d7946d0cff670ce2e551feabb3ef304ce
| 891
|
py
|
Python
|
app/__init__.py
|
Jotasenpai/DigitalMediaStoreRESTfull
|
bb776d398e1756b1ff2fd4f392b80479ae29847d
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
Jotasenpai/DigitalMediaStoreRESTfull
|
bb776d398e1756b1ff2fd4f392b80479ae29847d
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
Jotasenpai/DigitalMediaStoreRESTfull
|
bb776d398e1756b1ff2fd4f392b80479ae29847d
|
[
"MIT"
] | null | null | null |
import logging
import os
from flask import Flask
from flask_cors import CORS
from app.extensions import api
from app.extensions.database import db
from app.extensions.schema import ma
from app.views import albums, artists, hello, tracks
def create_app(config, **kwargs):
logging.basicConfig(level=logging.INFO)
app = Flask(__name__, **kwargs)
CORS(app, resources={r"/api/*": {"origins": "*"}})
app.config.from_object(config)
# app.url_map.strict_slashes = False
with app.app_context():
api.init_app(app)
db.init_app(app)
db.create_all()
ma.init_app(app)
api.register_blueprint(hello.blp)
api.register_blueprint(artists.blp)
api.register_blueprint(albums.blp)
api.register_blueprint(tracks.blp)
try:
os.makedirs(app.instance_path)
except OSError:
pass
return app
| 21.214286
| 54
| 0.679012
| 119
| 891
| 4.92437
| 0.420168
| 0.047782
| 0.136519
| 0.117747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217733
| 891
| 41
| 55
| 21.731707
| 0.840746
| 0.038159
| 0
| 0
| 0
| 0
| 0.016374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0.037037
| 0.296296
| 0
| 0.37037
| 0.148148
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cc9e6223af3f0ca91fd050679827da65d115102
| 18,053
|
py
|
Python
|
app.py
|
SASHA-PAIS/A-Flask-web-app-for-inventory-management
|
e6ed1b0d1d06ba04f9930f7653ce0504ecf81dd3
|
[
"MIT"
] | null | null | null |
app.py
|
SASHA-PAIS/A-Flask-web-app-for-inventory-management
|
e6ed1b0d1d06ba04f9930f7653ce0504ecf81dd3
|
[
"MIT"
] | null | null | null |
app.py
|
SASHA-PAIS/A-Flask-web-app-for-inventory-management
|
e6ed1b0d1d06ba04f9930f7653ce0504ecf81dd3
|
[
"MIT"
] | null | null | null |
from flask import Flask, url_for, request, redirect
from flask import render_template as render
from flask_mysqldb import MySQL
import yaml
import json
import MySQLdb
import decimal
class Encoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, decimal.Decimal):
return str(obj)
# Setting up the flask instance
app = Flask(__name__)
# Configure the database
db = yaml.load(open('db.yaml'))
app.config['MYSQL_HOST'] = db['mysql_host']
app.config['MYSQL_USER'] = db['mysql_user']
app.config['MYSQL_PASSWORD'] = db['mysql_password']
app.config['MYSQL_DB'] = db['mysql_db']
mysql = MySQL(app)
link = {x:x for x in ["location", "product", "movement"]}
link["index"] = '/'
def init_database():
cursor = mysql.connection.cursor()
# Initialise all tables
cursor.execute("""
CREATE TABLE IF NOT EXISTS products(prod_id integer primary key auto_increment,
prod_name varchar(20) UNIQUE NOT NULL,
prod_quantity integer not null,
unallocated_quantity integer);
""")
# Might have to create a trigger, let's see!
cursor.execute("""
CREATE TABLE IF NOT EXISTS location(loc_id integer primary key auto_increment,
loc_name varchar(20) unique not null);
""")
cursor.execute("""
CREATE TABLE IF NOT EXISTS logistics(trans_id integer primary key auto_increment,
prod_id INTEGER NOT NULL,
from_loc_id INTEGER NULL,
to_loc_id INTEGER NULL,
prod_quantity INTEGER NOT NULL,
trans_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY(prod_id) REFERENCES products(prod_id),
FOREIGN KEY(from_loc_id) REFERENCES location(loc_id),
FOREIGN KEY(to_loc_id) REFERENCES location(loc_id));
""")
mysql.connection.commit()
cursor.close()
@app.route('/')
def summary():
init_database()
msg = None
q_data, warehouse, products = None, None, None
cursor = mysql.connection.cursor()
try:
cursor.execute("Select * from location")
warehouse = cursor.fetchall()
cursor.execute("Select * from products")
products = cursor.fetchall()
cursor.execute("""
SELECT prod_name, unallocated_quantity, prod_quantity FROM products
""")
q_data = cursor.fetchall()
except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e:
msg = f"An error occured: {e}"
print(msg)
cursor.close()
return render('index.html',link=link, title = "Summary", warehouses = warehouse, products = products, database = q_data)
@app.route('/location.html', methods=['POST', 'GET'])
def location():
init_database()
msg=None
cursor = mysql.connection.cursor()
cursor.execute("SELECT * FROM location ORDER BY loc_id")
warehouse_data = cursor.fetchall()
cursor.execute("SELECT loc_name FROM location")
loc_names = cursor.fetchall()
loc_new = []
for i in range(len(loc_names)):
loc_new.append(loc_names[i][0])
if request.method == 'POST':
warehouse_name = request.form['warehouse_name']
warehouse_name = warehouse_name.capitalize()
transaction_allowed = False
if warehouse_name not in ['', ' ', None] and warehouse_name not in loc_new:
transaction_allowed=True
if transaction_allowed:
try:
cursor.execute("INSERT INTO location(loc_name) VALUES(%s)", (warehouse_name,))
mysql.connection.commit()
except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e:
msg = f"An error occured: {e}"
else:
msg = f"{warehouse_name} added succcessfully"
if msg:
print(msg)
cursor.close()
return redirect(url_for('location'))
return render('location.html', link=link, warehouses=warehouse_data, transaction_message=msg, title = "Warehouse Locations")
@app.route('/product.html', methods=['POST', 'GET'])
def product():
init_database()
msg=None
cursor = mysql.connection.cursor()
cursor.execute("SELECT * from products")
products = cursor.fetchall()
cursor.execute("SELECT prod_name FROM products")
prod_names = cursor.fetchall()
prod_new = []
for i in range(len(prod_names)):
prod_new.append(prod_names[i][0])
if request.method == 'POST':
prod_name = request.form['prod_name']
quantity = request.form['prod_quantity']
prod_name = prod_name.capitalize()
transaction_allowed = False
if prod_name not in ['', ' ', None] and prod_name not in prod_new:
if quantity not in ['', ' ', None]:
transaction_allowed= True
if transaction_allowed:
try:
cursor.execute("INSERT INTO products(prod_name, prod_quantity, unallocated_quantity) VALUES (%s, %s, %s)", (prod_name, quantity, quantity))
mysql.connection.commit()
except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e:
msg = f"An error occured: {e}"
else:
msg = f"{prod_name} added succcessfully"
if msg:
print(msg)
cursor.close()
return redirect(url_for('product'))
return render('product.html', link=link, products = products, transaction_message=msg, title="Products Log")
@app.route('/movement.html', methods=['POST', 'GET'])
def movement():
init_database()
msg=None
cursor = mysql.connection.cursor()
cursor.execute("SELECT * FROM logistics")
logistics_data = cursor.fetchall()
cursor.execute("SELECT prod_id, prod_name, unallocated_quantity FROM products")
products = cursor.fetchall()
cursor.execute("SELECT loc_id, loc_name FROM location")
locations = cursor.fetchall()
# products - ((1, 'Piano', 250), (2, 'Iphone xr', 600), (6, 'Washing machine', 100), (7, 'Microwave', 50))
# x in product - (1, 'Piano', 250)
# x[0] = 1
# for p_id in [x[0] for x in products]:
# print(p_id)
# 1
# 2
# 6
# 7
# print(locations)
# for l_id in [x[0] for x in locations]:
# print(l_id)
# ((20, 'Andaman'), (19, 'Assam'), (26, 'Jodhpur'), (17, 'Puducherry'))
# 20
# 19
# 26
# 17
log_summary = []
for p_id in [x[0] for x in products]:
cursor.execute("SELECT prod_name FROM products WHERE prod_id = %s", str(p_id,))
temp_prod_name = cursor.fetchone()
#print(temp_prod_name) ('Piano',)
for l_id in [x[0] for x in locations]:
cursor.execute("SELECT loc_name FROM location WHERE loc_id = %s", (l_id,)) #str(l_id,) giving an error
temp_loc_name = cursor.fetchone()
# print(temp_loc_name) - (Andaman,)
#e.g. prod_id = 1 = piano, loc_id = 1 = andaman
cursor.execute("""
SELECT SUM(log.prod_quantity)
FROM logistics log
WHERE log.prod_id = %s AND log.to_loc_id = %s
""", (p_id, l_id))
sum_to_loc = cursor.fetchone() # No.of pianos that enter andaman
cursor.execute("""
SELECT SUM(log.prod_quantity)
FROM logistics log
WHERE log.prod_id = %s AND log.from_loc_id = %s
""", (p_id, l_id))
sum_from_loc = cursor.fetchone() # No. of pianos that leave andaman
# print(sum_from_loc)
if sum_from_loc[0] is None: #e.g. (None,) --> (0,) --> No pianos leave andaman
sum_from_loc = (0,)
if sum_to_loc[0] is None: #No pianos enter andaman
sum_to_loc = (0,)
#how much enters andaman - how much leaves andaman = how much remains (allocated) in andaman
# log_summary += [(temp_prod_name + temp_loc_name + (sum_to_loc[0] - sum_from_loc[0],) )] ORRRRRRRRRRR
log_summary.append(temp_prod_name + temp_loc_name + (sum_to_loc[0] - sum_from_loc[0],)) # (Piano,) + (Andaman,), (0,) = ('Piano', 'Andaman', 0)
#print(log_summary)
# [('Piano', 'Andaman', 0), ('Piano', 'Assam', 0), ('Piano', 'Jodhpur', 0), ('Piano', 'Puducherry', 0),
# ('Iphone xr', 'Andaman', 0), ('Iphone xr', 'Assam', 0), ('Iphone xr', 'Jodhpur', 0), ('Iphone xr', 'Puducherry', 0),
# ('Washing machine', 'Andaman', 0), ('Washing machine', 'Assam', 0), ('Washing machine', 'Jodhpur', 0), ('Washing machine', 'Puducherry', 0),
# ('Microwave', 'Andaman', 0), ('Microwave', 'Assam', 0), ('Microwave', 'Jodhpur', 0), ('Microwave', 'Puducherry', 0)]
alloc_json = {}
for row in log_summary:
try:
if row[1] in alloc_json[row[0]].keys(): #Check if Andaman exists in Piano ka keys, Check if Assam, exists in Piano ka keys, etc.
alloc_json[row[0]][row[1]] += row[2] #If yes, the add the quantity to the previous quantity
else:
alloc_json[row[0]][row[1]] = row[2] #If no, add it as a new quantity
except (KeyError, TypeError):
alloc_json[row[0]] = {} #Make the value of piano empty
alloc_json[row[0]][row[1]] = row[2] #Add Andaman with quantity as a new value in the dictionary
#print(alloc_json)
# {'Piano': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0},
# 'Iphone xr': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0},
# 'Washing machine': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0},
# 'Microwave': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}}
alloc_json = json.dumps(alloc_json, cls = Encoder)
# print(alloc_json)
# {"Piano": {"Andaman": 0, "Assam": 0, "Jodhpur": 0, "Puducherry": 0},
# "Iphone xr": {"Andaman": 0, "Assam": 0, "Jodhpur": 0, "Puducherry": 0},
# "Washing machine": {"Andaman": 0, "Assam": 0, "Jodhpur": 0, "Puducherry": 0},
# "Microwave": {"Andaman": 0, "Assam": 0, "Jodhpur": 0, "Puducherry": 0}}
if request.method == 'POST':
# transaction times are stored in UTC
prod_name = request.form['prod_name']
from_loc = request.form['from_loc']
to_loc = request.form['to_loc']
quantity = request.form['quantity']
# if no 'from loc' is given, that means the product is being shipped to a warehouse (init condition)
if from_loc in [None, '', ' ']:
try:
cursor.execute("""
INSERT INTO logistics(prod_id, to_loc_id, prod_quantity)
SELECT products.prod_id, location.loc_id, %s
FROM products, location
WHERE products.prod_name = %s AND location.loc_name = %s
""", (quantity, prod_name, to_loc))
# IMPORTANT to maintain consistency
cursor.execute("""
UPDATE products
SET unallocated_quantity = unallocated_quantity - %s
WHERE prod_name = %s
""", (quantity, prod_name))
mysql.connection.commit()
except (MySQLdb.Error, MySQLdb.Warning) as e:
msg = f"An error occured: {e}"
else:
msg = "Transaction added successfully"
elif to_loc in [None, '', ' ']:
print("To Location wasn't specified, will be unallocated")
try:
cursor.execute("""
INSERT INTO logistics(prod_id, from_loc_id, prod_quantity)
SELECT products.prod_id, location.loc_id, %s
FROM products, location
WHERE products.prod_name = %s AND location.loc_name = %s
""", (quantity, prod_name, from_loc))
#Important to maintain consistency
cursor.execute("""
UPDATE products
SET unallocated_quantity = unallocated_quantity + %s
WHERE prod_name = %s
""", (quantity, prod_name))
mysql.connection.commit()
except(MySQLdb.Error, MySQLdb.Warning) as e:
msg=f"An error occurred: {e}"
else:
msg = "Transaction added successfully"
# if 'from loc' and 'to_loc' given the product is being shipped between warehouses
else:
try:
cursor.execute("SELECT loc_id FROM location WHERE loc_name = %s", (from_loc,))
from_loc = ''.join([str(x[0]) for x in cursor.fetchall()])
# cursor.fetchall -> ((1,)), x -> (1,) x[0] -> 1 join converts 1 into a string
cursor.execute("SELECT loc_id FROM location WHERE loc_name = %s", (to_loc,))
to_loc = ''.join([str(x[0]) for x in cursor.fetchall() ])
cursor.execute("SELECT prod_id FROM products WHERE prod_name = %s", (prod_name,))
prod_id = ''.join([str(x[0]) for x in cursor.fetchall() ])
cursor.execute("""
INSERT INTO logistics(prod_id, from_loc_id, to_loc_id, prod_quantity)
VALUES(%s, %s, %s, %s)
""", (prod_id, from_loc, to_loc, quantity))
mysql.connection.commit()
except(MySQLdb.Error, MySQLdb.Warning) as e:
msg=f"An error occurred: {e}"
else:
msg = "Transaction added successfully"
#Print a transaction message if exists!
if msg:
print(msg)
cursor.close()
return redirect(url_for('movement'))
return render('movement.html', title = "Product Movement", link=link, trans_message=msg, products=products, locations=locations, allocated = alloc_json, logs = logistics_data, database = log_summary)
@app.route('/delete')
def delete():
# Make sure that the queries are working properly....I'm having some doubts about the datatypes
type_ = request.args.get('type')
cursor = mysql.connection.cursor()
if type_ == 'location':
id_ = request.args.get('loc_id')
cursor.execute("SELECT prod_id, SUM(prod_quantity) FROM logistics where to_loc_id = %s GROUP BY prod_id", (id_,))
in_place = cursor.fetchall()
cursor.execute("SELECT prod_id, SUM(prod_quantity) FROM logistics where from_loc_id = %s GROUP BY prod_id", (id_,))
out_place = cursor.fetchall()
#Convert list of tuples to dict
in_place = dict(in_place)
out_place = dict(out_place)
all_place = {}
#Inplace = {1:20, 3:2000} - keys - prod_id - toloc = mumbai
#out_place = {3:100} - keys - prod_id - fromloc = mumbai
for x in in_place.keys(): #calculator entered mumbai
if x in out_place.keys(): #calculator left mumbai
all_place[x] = in_place[x] - out_place[x] #2000 fridges came to mumbai from kolkata, 100 fridges were sent to daman diu, therefore, 1900 remains in mumbai which will be unallocated if mumbai is deleted
else:
all_place[x] = in_place[x]
for products_ in all_place.keys():
cursor.execute("""
UPDATE products SET unallocated_quantity = unallocated_quantity + %s WHERE prod_id = %s
""", (all_place[products_], products_))
cursor.execute("DELETE FROM location where loc_id = %s", (id_,))
mysql.connection.commit()
cursor.close()
return redirect(url_for('location'))
elif type_ == 'product':
id_ = request.args.get('prod_id')
cursor.execute("DELETE FROM products WHERE prod_id = %s", (id_,))
mysql.connection.commit()
cursor.close()
return redirect(url_for('product'))
@app.route('/edit', methods=['POST', 'GET'])
def edit():
# Try capitalize()
type_ = request.args.get('type')
cursor = mysql.connection.cursor()
cursor.execute("SELECT loc_name FROM location")
loc_names = cursor.fetchall()
loc_new = []
for i in range(len(loc_names)):
loc_new.append(loc_names[i][0])
cursor.execute("SELECT prod_name FROM products")
prod_names = cursor.fetchall()
prod_new = []
for i in range(len(prod_names)):
prod_new.append(prod_names[i][0])
if type_ == 'location' and request.method == 'POST':
loc_id = request.form['loc_id']
loc_name = request.form['loc_name']
loc_name = loc_name.capitalize()
if loc_name not in ['', ' ', None] and loc_name not in loc_new:
cursor.execute("UPDATE location SET loc_name = %s WHERE loc_id = %s", (loc_name, loc_id))
mysql.connection.commit()
cursor.close()
return redirect(url_for('location'))
elif type_ == 'product' and request.method == 'POST':
prod_id = request.form['product_id']
prod_name = request.form['prod_name']
prod_quantity = request.form['prod_quantity']
prod_name = prod_name.capitalize()
if prod_name not in ['', ' ', None] and prod_name not in prod_new:
cursor.execute("UPDATE products SET prod_name = %s WHERE prod_id = %s", (prod_name, str(prod_id)))
if prod_quantity not in ['', ' ', None] and prod_name not in prod_new:
cursor.execute("SELECT prod_quantity FROM products WHERE prod_id = %s", (prod_id,))
old_prod_quantity = cursor.fetchone()[0]
cursor.execute("""
UPDATE products SET prod_quantity = %s, unallocated_quantity = unallocated_quantity + %s - %s
WHERE prod_id = %s
""", (prod_quantity, prod_quantity, old_prod_quantity, str(prod_id)))
mysql.connection.commit()
cursor.close()
return redirect(url_for('product'))
return render(url_for(type_))
if __name__ == '__main__':
app.run(debug=True)
| 37.146091
| 218
| 0.585831
| 2,237
| 18,053
| 4.561913
| 0.122933
| 0.031357
| 0.04096
| 0.023812
| 0.5805
| 0.531994
| 0.465458
| 0.417932
| 0.399314
| 0.382166
| 0
| 0.012281
| 0.287376
| 18,053
| 486
| 219
| 37.146091
| 0.780956
| 0.182574
| 0
| 0.493631
| 0
| 0
| 0.312479
| 0.005855
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025478
| false
| 0.003185
| 0.022293
| 0
| 0.092357
| 0.015924
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ccc10fc8c636712784281edcf93b9e16ef2ae97
| 2,202
|
py
|
Python
|
configs/vinbig/detectors_resnext.py
|
SeHwanJoo/mmdetection_vinbig
|
9a27d2b5cd8b3ec9ed1a94e4704a7c883f15dce3
|
[
"Apache-2.0"
] | 2
|
2021-04-01T08:17:08.000Z
|
2021-07-12T11:53:53.000Z
|
configs/vinbig/detectors_resnext.py
|
SeHwanJoo/mmdetection_vinbig
|
9a27d2b5cd8b3ec9ed1a94e4704a7c883f15dce3
|
[
"Apache-2.0"
] | null | null | null |
configs/vinbig/detectors_resnext.py
|
SeHwanJoo/mmdetection_vinbig
|
9a27d2b5cd8b3ec9ed1a94e4704a7c883f15dce3
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../_base_/models/cascade_rcnn_r50_fpn.py',
'./dataset_base.py',
'./scheduler_base.py',
'../_base_/default_runtime.py'
]
model = dict(
pretrained='open-mmlab://resnext101_32x4d',
backbone=dict(
type='DetectoRS_ResNeXt',
pretrained='open-mmlab://resnext101_32x4d',
depth=101,
groups=32,
base_width=4,
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
output_img=True,
plugins=[
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, False, True, True),
in_channels=512,
position='after_conv2')
]
),
neck=dict(
type='RFP',
rfp_steps=2,
aspp_out_channels=64,
aspp_dilations=(1, 3, 6, 1),
rfp_backbone=dict(
rfp_inplanes=256,
type='DetectoRS_ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
pretrained='open-mmlab://resnext101_32x4d',
style='pytorch')),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
num_classes=14
),
dict(
type='Shared2FCBBoxHead',
num_classes=14
),
dict(
type='Shared2FCBBoxHead',
num_classes=14
)
]
),
test_cfg=dict(
rpn=dict(
nms_thr=0.7
),
rcnn=dict(
score_thr=0.0,
nms=dict(type='nms', iou_threshold=0.4)
)
)
)
| 27.525
| 57
| 0.475931
| 221
| 2,202
| 4.493213
| 0.429864
| 0.096677
| 0.04431
| 0.087613
| 0.423968
| 0.321249
| 0.321249
| 0.268882
| 0.268882
| 0.268882
| 0
| 0.056231
| 0.402361
| 2,202
| 79
| 58
| 27.873418
| 0.698328
| 0
| 0
| 0.410256
| 0
| 0
| 0.15713
| 0.070391
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cccbb5b10c9e4406bbef811b8c0c86a34ddfd24
| 26,701
|
py
|
Python
|
skbio/draw/tests/test_distributions.py
|
johnchase/scikit-bio
|
340e6153b6c93053d923d344e63481860e03731e
|
[
"BSD-3-Clause"
] | null | null | null |
skbio/draw/tests/test_distributions.py
|
johnchase/scikit-bio
|
340e6153b6c93053d923d344e63481860e03731e
|
[
"BSD-3-Clause"
] | null | null | null |
skbio/draw/tests/test_distributions.py
|
johnchase/scikit-bio
|
340e6153b6c93053d923d344e63481860e03731e
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import matplotlib.pyplot as plt
from skbio.draw import boxplots, grouped_distributions
from skbio.draw._distributions import (
_calc_data_point_locations, _calc_data_point_ticks, _color_box_plot,
_create_legend, _get_distribution_markers, _is_single_matplotlib_color,
_plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options,
_set_figure_size, _validate_input, _validate_x_values)
class DistributionsTests(TestCase):
def setUp(self):
# Test null data list.
self.Null = None
# Test empty data list.
self.Empty = []
# Test nested empty data list.
self.EmptyNested = [[]]
# Test nested empty data list (for bar/scatter plots).
self.EmptyDeeplyNested = [[[]]]
# Test invalid number of samples in data list (for bar/scatter plots).
self.InvalidNumSamples = [[[1, 2, 3, 4, 5]],
[[4, 5, 6, 7, 8], [2, 3, 2]],
[[4, 7, 10, 33, 32, 6, 7, 8]]]
# Test valid data with three samples and four data points
# (for bar/scatter plots).
self.ValidTypicalData = [[[1.0, 2, 3.5, 5], [2, 3, 5, 6], [2, 3, 8]],
[[4, 7, 8], [8, 9, 10, 11], [9.0, 4, 1, 1]],
[[4, 33, 32, 6, 8], [5, 4, 8, 13], [1, 1, 2]],
[[2, 2, 2, 2], [3, 9, 8], [2, 1, 6, 7, 4, 5]]]
# Test valid data with one sample (for bar/scatter plots).
self.ValidSingleSampleData = [[[1, 2, 3, 4, 5]],
[[4, 5, 6, 7, 8]],
[[4, 7, 10, 33, 32, 6, 7, 8]]]
# Test typical data to be plotted by the boxplot function.
self.ValidTypicalBoxData = [[3.4, 10, 11.67, 12.0, 2, 2, 99.99],
[2.3, 4, 5, 88, 9, 10, 11, 1, 0, 3, -8],
[2, 9, 7, 5, 6]]
def tearDown(self):
# We get a warning from mpl if we don't clean up our figures.
plt.close('all')
def test_validate_input_null(self):
with npt.assert_raises(ValueError):
_validate_input(self.Null, None, None, None)
def test_validate_input_empty(self):
with npt.assert_raises(ValueError):
_validate_input(self.Empty, None, None, None)
def test_validate_input_empty_nested(self):
with npt.assert_raises(ValueError):
_validate_input(self.EmptyNested, None, None, None)
def test_validate_input_empty_deeply_nested(self):
num_points, num_samples = _validate_input(self.EmptyDeeplyNested,
None, None, None)
self.assertEqual(num_points, 1)
self.assertEqual(num_samples, 1)
def test_validate_input_empty_point(self):
with npt.assert_raises(ValueError):
_validate_input([[[1, 2, 3], [4, 5]], []], None, None, None)
def test_validate_input_invalid_num_samples(self):
with npt.assert_raises(ValueError):
_validate_input(self.InvalidNumSamples, None, None, None)
def test_validate_input_invalid_data_point_names(self):
with npt.assert_raises(ValueError):
_validate_input(self.ValidSingleSampleData, None, ["T0", "T1"],
None)
def test_validate_input_invalid_sample_names(self):
with npt.assert_raises(ValueError):
_validate_input(self.ValidSingleSampleData, None, None,
["Men", "Women"])
def test_validate_input_all_valid_input(self):
self.assertEqual(_validate_input(self.ValidTypicalData, [1, 3, 4, 8],
["T0", "T1", "T2", "T3"],
["Infants", "Children", "Teens"]),
(4, 3))
def test_validate_x_values_invalid_x_values(self):
with npt.assert_raises(ValueError):
_validate_x_values([1, 2, 3, 4], ["T0", "T1", "T2"],
len(self.ValidSingleSampleData))
def test_validate_x_values_invalid_x_tick_labels(self):
with npt.assert_raises(ValueError):
_validate_x_values(None, ["T0"], len(self.ValidSingleSampleData))
def test_validate_x_values_nonnumber_x_values(self):
with npt.assert_raises(ValueError):
_validate_x_values(["foo", 2, 3], None,
len(self.ValidSingleSampleData))
def test_validate_x_values_valid_x_values(self):
_validate_x_values([1, 2.0, 3], None, 3)
def test_get_distribution_markers_null_marker_list(self):
self.assertEqual(_get_distribution_markers('colors', None, 5),
['b', 'g', 'r', 'c', 'm'])
def test_get_distribution_markers_empty_marker_list(self):
self.assertEqual(_get_distribution_markers('colors', None, 4),
['b', 'g', 'r', 'c'])
def test_get_distribution_markers_insufficient_markers(self):
self.assertEqual(npt.assert_warns(RuntimeWarning,
_get_distribution_markers,
'colors', None, 10),
['b', 'g', 'r', 'c', 'm', 'y', 'w', 'b', 'g', 'r'])
self.assertEqual(npt.assert_warns(RuntimeWarning,
_get_distribution_markers,
'symbols', ['^', '>', '<'], 5),
['^', '>', '<', '^', '>'])
def test_get_distribution_markers_bad_marker_type(self):
with npt.assert_raises(ValueError):
_get_distribution_markers('shapes', [], 3)
def test_get_distribution_markers_zero_markers(self):
self.assertEqual(_get_distribution_markers('symbols', None, 0), [])
self.assertEqual(_get_distribution_markers('symbols', ['^'], 0), [])
def test_get_distribution_markers_negative_num_markers(self):
with npt.assert_raises(ValueError):
_get_distribution_markers('symbols', [], -1)
def test_plot_bar_data(self):
fig, ax = plt.subplots()
result = _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'stdv')
self.assertEqual(result[0].__class__.__name__, "Rectangle")
self.assertEqual(len(result), 1)
self.assertAlmostEqual(result[0].get_width(), 0.5)
self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
self.assertAlmostEqual(result[0].get_height(), 2.0)
fig, ax = plt.subplots()
result = _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'sem')
self.assertEqual(result[0].__class__.__name__, "Rectangle")
self.assertEqual(len(result), 1)
self.assertAlmostEqual(result[0].get_width(), 0.5)
self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
self.assertAlmostEqual(result[0].get_height(), 2.0)
def test_plot_bar_data_bad_error_bar_type(self):
fig, ax = plt.subplots()
with npt.assert_raises(ValueError):
_plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'var')
def test_plot_bar_data_empty(self):
fig, ax = plt.subplots()
result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'stdv')
self.assertTrue(result is None)
fig, ax = plt.subplots()
result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'sem')
self.assertTrue(result is None)
def test_plot_scatter_data(self):
fig, ax = plt.subplots()
result = _plot_scatter_data(ax, [1, 2, 3], '^', 0.77, 1, 1.5, 'stdv')
self.assertEqual(result.get_sizes(), 20)
def test_plot_scatter_data_empty(self):
fig, ax = plt.subplots()
result = _plot_scatter_data(ax, [], '^', 0.77, 1, 1.5, 'stdv')
self.assertTrue(result is None)
def test_plot_box_data(self):
fig, ax = plt.subplots()
result = _plot_box_data(ax, [0, 0, 7, 8, -3, 44], 'blue', 0.33, 55,
1.5, 'stdv')
self.assertEqual(result.__class__.__name__, "dict")
self.assertEqual(len(result['boxes']), 1)
self.assertEqual(len(result['medians']), 1)
self.assertEqual(len(result['whiskers']), 2)
# mpl < 1.4.0 creates two Line2D instances, mpl 1.4.0 creates one,
# though the resulting plot looks identical between the two versions.
# see:
# https://github.com/pydata/pandas/issues/8382#issuecomment-56840974
# https://github.com/matplotlib/matplotlib/issues/3544
self.assertTrue(len(result['fliers']) == 1 or
len(result['fliers']) == 2)
self.assertEqual(len(result['caps']), 2)
def test_plot_box_data_empty(self):
fig, ax = plt.subplots()
result = _plot_box_data(ax, [], 'blue', 0.33, 55, 1.5, 'stdv')
self.assertTrue(result is None)
def test_calc_data_point_locations_invalid_x_values(self):
with npt.assert_raises(ValueError):
_calc_data_point_locations(3, [1, 10.5])
def test_calc_data_point_locations_default_spacing(self):
locs = _calc_data_point_locations(4)
np.testing.assert_allclose(locs, [1, 2, 3, 4])
def test_calc_data_point_locations_custom_spacing(self):
# Scaling down from 3..12 to 1..4.
locs = _calc_data_point_locations(4, [3, 4, 10, 12])
np.testing.assert_allclose(locs,
np.array([1, 1.33333333, 3.33333333, 4]))
# Sorted order shouldn't affect scaling.
locs = _calc_data_point_locations(4, [4, 3, 12, 10])
np.testing.assert_allclose(locs,
np.array([1.33333333, 1, 4, 3.33333333]))
# Scaling up from 0.001..0.87 to 1..3.
locs = _calc_data_point_locations(3, [0.001, 0.2543, 0.87])
np.testing.assert_allclose(locs,
np.array([1, 1.58296893, 3]))
def test_calc_data_point_ticks(self):
ticks = _calc_data_point_ticks(np.array([1, 5, 9, 11]), 1, 0.5, False)
np.testing.assert_allclose(ticks, [1.25, 5.25, 9.25, 11.25])
ticks = _calc_data_point_ticks(np.array([0]), 3, 0.5, False)
np.testing.assert_allclose(ticks, [0.75])
def test_set_axes_options(self):
fig, ax = plt.subplots()
_set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
x_tick_labels=["T0", "T1"])
self.assertEqual(ax.get_title(), "Plot Title")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(ax.get_xticklabels()[0].get_text(), "T0")
self.assertEqual(ax.get_xticklabels()[1].get_text(), "T1")
def test_set_axes_options_ylim(self):
fig, ax = plt.subplots()
_set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
x_tick_labels=["T0", "T1", "T2"], y_min=0, y_max=1)
self.assertEqual(ax.get_title(), "Plot Title")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(ax.get_xticklabels()[0].get_text(), "T0")
self.assertEqual(ax.get_xticklabels()[1].get_text(), "T1")
self.assertEqual(ax.get_ylim(), (0.0, 1.0))
def test_set_axes_options_x_values_as_tick_labels(self):
fig, ax = plt.subplots()
_set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
x_values=[42, 45, 800])
self.assertEqual(ax.get_title(), "Plot Title")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(ax.get_xticklabels()[0].get_text(), '42')
self.assertEqual(ax.get_xticklabels()[1].get_text(), '45')
self.assertEqual(ax.get_xticklabels()[2].get_text(), '800')
def test_set_axes_options_bad_ylim(self):
fig, ax = plt.subplots()
with npt.assert_raises(ValueError):
_set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
x_tick_labels=["T0", "T1", "T2"], y_min='car',
y_max=30)
def test_set_axes_options_invalid_x_tick_labels_orientation(self):
fig, ax = plt.subplots()
with npt.assert_raises(ValueError):
_set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
x_tick_labels=["T0", "T1"],
x_tick_labels_orientation='brofist')
def test_create_legend(self):
fig, ax = plt.subplots()
_create_legend(ax, ['b', 'r'], ['dist1', 'dist2'], 'colors')
self.assertEqual(len(ax.get_legend().get_texts()), 2)
fig, ax = plt.subplots()
_create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'],
'symbols')
self.assertEqual(len(ax.get_legend().get_texts()), 3)
def test_create_legend_invalid_input(self):
fig, ax = plt.subplots()
with npt.assert_raises(ValueError):
_create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2'], 'symbols')
with npt.assert_raises(ValueError):
_create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'],
'foo')
def test_grouped_distributions_bar(self):
fig = grouped_distributions('bar', self.ValidTypicalData,
[1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
["Infants", "Children", "Teens"],
['b', 'r', 'g'], "x-axis label",
"y-axis label", "Test")
ax = fig.get_axes()[0]
self.assertEqual(ax.get_title(), "Test")
self.assertEqual(ax.get_xlabel(), "x-axis label")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(len(ax.get_xticklabels()), 4)
np.testing.assert_allclose(ax.get_xticks(),
[1.1125, 2.0125, 3.8125, 4.1125])
def test_grouped_distributions_insufficient_colors(self):
args = ('bar', self.ValidTypicalData, [1, 4, 10, 11],
["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"],
['b', 'r'], "x-axis label", "y-axis label", "Test")
npt.assert_warns(RuntimeWarning,
grouped_distributions,
*args)
def test_grouped_distributions_scatter(self):
fig = grouped_distributions('scatter', self.ValidTypicalData,
[1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
["Infants", "Children", "Teens"],
['^', '>', '<'], "x-axis label",
"y-axis label", "Test")
ax = fig.get_axes()[0]
self.assertEqual(ax.get_title(), "Test")
self.assertEqual(ax.get_xlabel(), "x-axis label")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(len(ax.get_xticklabels()), 4)
np.testing.assert_allclose(ax.get_xticks(),
[1.075, 1.975, 3.775, 4.075])
def test_grouped_distributions_insufficient_symbols(self):
args = ('scatter', self.ValidTypicalData, [1, 4, 10, 11],
["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"],
['^'], "x-axis label", "y-axis label", "Test")
npt.assert_warns(RuntimeWarning, grouped_distributions, *args)
def test_grouped_distributions_empty_marker_list(self):
grouped_distributions('scatter', self.ValidTypicalData,
[1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
["Infants", "Children", "Teens"], [],
"x-axis label", "y-axis label", "Test")
def test_grouped_distributions_box(self):
fig = grouped_distributions('box', self.ValidTypicalData,
[1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
["Infants", "Children", "Teens"],
['b', 'g', 'y'], "x-axis label",
"y-axis label", "Test")
ax = fig.get_axes()[0]
self.assertEqual(ax.get_title(), "Test")
self.assertEqual(ax.get_xlabel(), "x-axis label")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(len(ax.get_xticklabels()), 4)
np.testing.assert_allclose(ax.get_xticks(),
[1.075, 1.975, 3.775, 4.075])
def test_grouped_distributions_error(self):
with npt.assert_raises(ValueError):
grouped_distributions('pie', self.ValidTypicalData,
[1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
["Infants", "Children", "Teens"],
['b', 'g', 'y'],
"x-axis label", "y-axis label", "Test")
def test_grouped_distributions_negative_distribution_width(self):
args = ('box', self.ValidTypicalData, [1, 4, 10, 11],
["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"],
['b', 'g', 'y'], "x-axis label", "y-axis label", "Test")
with self.assertRaises(ValueError):
grouped_distributions(*args, distribution_width=0)
with self.assertRaises(ValueError):
grouped_distributions(*args, distribution_width=-42)
def test_boxplots(self):
fig = boxplots(self.ValidTypicalBoxData, [1, 4, 10],
["Data 1", "Data 2", "Data 3"], "Test", "x-axis label",
"y-axis label",
legend=(('blue', 'red'), ('foo', 'bar')))
ax = fig.get_axes()[0]
self.assertEqual(ax.get_title(), "Test")
self.assertEqual(ax.get_xlabel(), "x-axis label")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(len(ax.get_xticklabels()), 3)
self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10]))
def test_boxplots_empty_distributions(self):
fig = boxplots([[1, 2, 3], [], [4, 5, 6]], [1, 4, 10],
["Data 1", "Data 2", "Data 3"], "Test", "x-axis label",
"y-axis label")
ax = fig.get_axes()[0]
self.assertEqual(ax.get_title(), "Test")
self.assertEqual(ax.get_xlabel(), "x-axis label")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(len(ax.get_xticklabels()), 3)
self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10]))
# second distribution (empty) should have nans since it is hidden.
# boxplots in mpl < 1.4.0 have 8 lines per boxplot, while mpl 1.4.0 has
# 7. in either case, the line at index 8 should have a nan for its y
# value
lines = ax.get_lines()
self.assertTrue(np.isnan(lines[8].get_xydata()[0][1]))
# line in first distribution should *not* have nan for its y value
self.assertFalse(np.isnan(lines[0].get_xydata()[0][1]))
# All distributions are empty.
fig = boxplots([[], [], []], [1, 4, 10],
["Data 1", "Data 2", "Data 3"], "Test", "x-axis label",
"y-axis label")
ax = fig.get_axes()[0]
self.assertEqual(ax.get_title(), "Test")
self.assertEqual(ax.get_xlabel(), "x-axis label")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(len(ax.get_xticklabels()), 3)
self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10]))
lines = ax.get_lines()
self.assertTrue(np.isnan(lines[0].get_xydata()[0][1]))
self.assertTrue(np.isnan(lines[8].get_xydata()[0][1]))
self.assertTrue(np.isnan(lines[16].get_xydata()[0][1]))
def test_boxplots_box_colors(self):
# Coloring works with all empty distributions.
fig = boxplots([[], [], []], box_colors=['blue', 'red', 'yellow'])
ax = fig.get_axes()[0]
self.assertEqual(len(ax.get_xticklabels()), 3)
# patch colors should match what we specified
self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0))
self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0))
# patch location should include at least one nan since the distribution
# is empty, and thus hidden
for patch in ax.patches:
self.assertTrue(np.isnan(patch.xy[0][1]))
fig = boxplots([[], [], []], box_colors='pink')
ax = fig.get_axes()[0]
self.assertEqual(len(ax.get_xticklabels()), 3)
for patch in ax.patches:
npt.assert_almost_equal(
patch.get_facecolor(),
(1.0, 0.7529411764705882, 0.796078431372549, 1.0))
self.assertTrue(np.isnan(patch.xy[0][1]))
# Coloring works with some empty distributions.
fig = boxplots([[], [1, 2, 3.5], []],
box_colors=['blue', 'red', 'yellow'])
ax = fig.get_axes()[0]
self.assertEqual(len(ax.get_xticklabels()), 3)
self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0))
self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0))
self.assertTrue(np.isnan(ax.patches[0].xy[0][1]))
self.assertFalse(np.isnan(ax.patches[1].xy[0][1]))
self.assertTrue(np.isnan(ax.patches[2].xy[0][1]))
def test_boxplots_invalid_input(self):
# Non-numeric entries in distribution.
with npt.assert_raises(ValueError):
boxplots([[1, 'foo', 3]])
# Number of colors doesn't match number of distributions.
with npt.assert_raises(ValueError):
boxplots([[1, 2, 3], [], [4, 5, 6]], box_colors=['blue', 'red'])
# Invalid legend.
with npt.assert_raises(ValueError):
boxplots([[1, 2, 3]], legend=('foo', 'bar', 'baz'))
def test_color_box_plot(self):
fig, ax = plt.subplots()
box_plot = plt.boxplot(self.ValidTypicalBoxData)
_color_box_plot(ax, box_plot, ['blue', 'w', (1, 1, 0.9)])
# Some colors are None.
fig, ax = plt.subplots()
box_plot = plt.boxplot(self.ValidTypicalBoxData)
_color_box_plot(ax, box_plot, ['blue', None, (1, 1, 0.9)])
# All colors are None.
fig, ax = plt.subplots()
box_plot = plt.boxplot(self.ValidTypicalBoxData)
_color_box_plot(ax, box_plot, [None, None, None])
def test_color_box_plot_invalid_input(self):
# Invalid color.
fig, ax = plt.subplots()
box_plot = plt.boxplot(self.ValidTypicalBoxData)
with npt.assert_raises(ValueError):
_color_box_plot(ax, box_plot, ['red', 'foobarbaz', 'blue'])
# Wrong number of colors.
fig, ax = plt.subplots()
box_plot = plt.boxplot(self.ValidTypicalBoxData)
with npt.assert_raises(ValueError):
_color_box_plot(ax, box_plot, ['blue', (1, 1, 0.9)])
def test_is_single_matplotlib_color(self):
self.assertTrue(_is_single_matplotlib_color('w'))
self.assertTrue(_is_single_matplotlib_color('white'))
self.assertTrue(_is_single_matplotlib_color([1, 1, 1]))
self.assertTrue(_is_single_matplotlib_color([1, 1, 1, 1]))
self.assertTrue(_is_single_matplotlib_color((1, 1, 1)))
self.assertTrue(_is_single_matplotlib_color((1, 1, 1, 1)))
self.assertTrue(_is_single_matplotlib_color((1.0, 1.0, 1.0, 1.0)))
self.assertTrue(_is_single_matplotlib_color((1.0, 1, 1.0)))
self.assertTrue(_is_single_matplotlib_color((2.0, 1, 1.0)))
self.assertFalse(_is_single_matplotlib_color(['w', 'r']))
self.assertFalse(_is_single_matplotlib_color(['w']))
self.assertFalse(_is_single_matplotlib_color(('w',)))
self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),)))
self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),
(0.9, 0.9))))
def test_set_figure_size(self):
fig, ax = plt.subplots()
_set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
x_tick_labels=['foofoofoo', 'barbarbar'],
x_tick_labels_orientation='vertical')
_set_figure_size(fig, 3, 4)
self.assertTrue(np.array_equal(fig.get_size_inches(), (3, 4)))
def test_set_figure_size_defaults(self):
fig, ax = plt.subplots()
_set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
x_tick_labels=['foofoofoo', 'barbarbar'],
x_tick_labels_orientation='vertical')
orig_fig_size = fig.get_size_inches()
_set_figure_size(fig)
self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size))
def test_set_figure_size_invalid(self):
fig, ax = plt.subplots()
_set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
x_tick_labels=['foofoofoo', 'barbarbar'],
x_tick_labels_orientation='vertical')
orig_fig_size = fig.get_size_inches()
_set_figure_size(fig, -1, 0)
self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size))
def test_set_figure_size_long_labels(self):
fig, ax = plt.subplots()
_set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
x_tick_labels=['foofoofooooooooooooooooooooooooo'
'oooooooooooooooooooooooooooooooo'
'oooooooooooooooooooooooooooooooo'
'oooo', 'barbarbar'],
x_tick_labels_orientation='vertical')
npt.assert_warns(RuntimeWarning, _set_figure_size, fig, 3, 3)
npt.assert_array_equal(fig.get_size_inches(), (3, 3))
if __name__ == '__main__':
main()
| 46.275563
| 79
| 0.562638
| 3,341
| 26,701
| 4.2607
| 0.102365
| 0.071654
| 0.045381
| 0.04496
| 0.736424
| 0.665472
| 0.622129
| 0.595293
| 0.537127
| 0.460555
| 0
| 0.047997
| 0.283697
| 26,701
| 576
| 80
| 46.355903
| 0.696277
| 0.070447
| 0
| 0.448513
| 0
| 0
| 0.071806
| 0.003875
| 0
| 0
| 0
| 0
| 0.340961
| 1
| 0.132723
| false
| 0
| 0.016018
| 0
| 0.15103
| 0.002288
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cced65964aa995783474d3ea16a3fdb37a88182
| 3,570
|
py
|
Python
|
tensorflow_probability/python/bijectors/invert_test.py
|
matthieucoquet/probability
|
2426f4fc4743ceedc1a638a03d19ce6654ebff76
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/bijectors/invert_test.py
|
matthieucoquet/probability
|
2426f4fc4743ceedc1a638a03d19ce6654ebff76
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/bijectors/invert_test.py
|
matthieucoquet/probability
|
2426f4fc4743ceedc1a638a03d19ce6654ebff76
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.bijectors import bijector_test_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
@test_util.run_all_in_graph_and_eager_modes
class InvertBijectorTest(tf.test.TestCase):
"""Tests the correctness of the Y = Invert(bij) transformation."""
def testBijector(self):
for fwd in [
tfb.Identity(),
tfb.Exp(),
tfb.Affine(shift=[0., 1.], scale_diag=[2., 3.]),
tfb.Softplus(),
tfb.SoftmaxCentered(),
]:
rev = tfb.Invert(fwd)
self.assertStartsWith(rev.name, "_".join(["invert", fwd.name]))
x = [[[1., 2.],
[2., 3.]]]
self.assertAllClose(
self.evaluate(fwd.inverse(x)), self.evaluate(rev.forward(x)))
self.assertAllClose(
self.evaluate(fwd.forward(x)), self.evaluate(rev.inverse(x)))
self.assertAllClose(
self.evaluate(fwd.forward_log_det_jacobian(x, event_ndims=1)),
self.evaluate(rev.inverse_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
self.evaluate(fwd.inverse_log_det_jacobian(x, event_ndims=1)),
self.evaluate(rev.forward_log_det_jacobian(x, event_ndims=1)))
def testScalarCongruency(self):
bijector = tfb.Invert(tfb.Exp())
bijector_test_util.assert_scalar_congruency(
bijector, lower_x=1e-3, upper_x=1.5, eval_func=self.evaluate, rtol=0.05)
def testShapeGetters(self):
bijector = tfb.Invert(
tfb.SoftmaxCentered(validate_args=True))
x = tf.TensorShape([2])
y = tf.TensorShape([1])
self.assertAllEqual(y, bijector.forward_event_shape(x))
self.assertAllEqual(
tensorshape_util.as_list(y),
self.evaluate(
bijector.forward_event_shape_tensor(tensorshape_util.as_list(x))))
self.assertAllEqual(x, bijector.inverse_event_shape(y))
self.assertAllEqual(
tensorshape_util.as_list(x),
self.evaluate(
bijector.inverse_event_shape_tensor(tensorshape_util.as_list(y))))
def testDocstringExample(self):
exp_gamma_distribution = (
tfd.TransformedDistribution(
distribution=tfd.Gamma(concentration=1., rate=2.),
bijector=tfb.Invert(tfb.Exp())))
self.assertAllEqual(
[],
self.evaluate(
tf.shape(
exp_gamma_distribution.sample(seed=tfp_test_util.test_seed()))))
if __name__ == "__main__":
tf.test.main()
| 38.387097
| 95
| 0.693557
| 453
| 3,570
| 5.262693
| 0.357616
| 0.060403
| 0.052433
| 0.065017
| 0.302013
| 0.239094
| 0.173238
| 0.069631
| 0.052852
| 0.034396
| 0
| 0.010929
| 0.179832
| 3,570
| 92
| 96
| 38.804348
| 0.803279
| 0.216807
| 0
| 0.153846
| 0
| 0
| 0.005413
| 0
| 0
| 0
| 0
| 0
| 0.169231
| 1
| 0.061538
| false
| 0
| 0.153846
| 0
| 0.230769
| 0.015385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ccf2b0c1cc9f5a9318ca8b0e302ba7e965fbb1e
| 4,394
|
py
|
Python
|
dayu_widgets/alert.py
|
ZSD-tim/dayu_widgets
|
31c2530bdc4161d9311574d9850c2e9471e53072
|
[
"MIT"
] | null | null | null |
dayu_widgets/alert.py
|
ZSD-tim/dayu_widgets
|
31c2530bdc4161d9311574d9850c2e9471e53072
|
[
"MIT"
] | null | null | null |
dayu_widgets/alert.py
|
ZSD-tim/dayu_widgets
|
31c2530bdc4161d9311574d9850c2e9471e53072
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.2
# Email : muyanru345@163.com
###################################################################
"""
MAlert class.
"""
import six
import functools
from dayu_widgets.avatar import MAvatar
from dayu_widgets.label import MLabel
from dayu_widgets import dayu_theme
from dayu_widgets.tool_button import MToolButton
from dayu_widgets.mixin import property_mixin
from dayu_widgets.qt import QWidget, QHBoxLayout, MPixmap, Qt, MIcon, Property
@property_mixin
class MAlert(QWidget):
"""
Alert component for feedback.
Property:
dayu_type: The feedback type with different color container.
dayu_text: The feedback string showed in container.
"""
InfoType = 'info'
SuccessType = 'success'
WarningType = 'warning'
ErrorType = 'error'
def __init__(self, text='', parent=None, flags=Qt.Widget):
super(MAlert, self).__init__(parent, flags)
self.setAttribute(Qt.WA_StyledBackground)
self._icon_label = MAvatar()
self._icon_label.set_dayu_size(dayu_theme.tiny)
self._content_label = MLabel().secondary()
self._close_button = MToolButton().svg('close_line.svg').tiny().icon_only()
self._close_button.clicked.connect(functools.partial(self.setVisible, False))
self._main_lay = QHBoxLayout()
self._main_lay.setContentsMargins(8, 8, 8, 8)
self._main_lay.addWidget(self._icon_label)
self._main_lay.addWidget(self._content_label)
self._main_lay.addStretch()
self._main_lay.addWidget(self._close_button)
self.setLayout(self._main_lay)
self.set_show_icon(True)
self.set_closeable(False)
self._dayu_type = None
self._dayu_text = None
self.set_dayu_type(MAlert.InfoType)
self.set_dayu_text(text)
def set_closeable(self, closeable):
"""Display the close icon button or not."""
self._close_button.setVisible(closeable)
def set_show_icon(self, show_icon):
"""Display the information type icon or not."""
self._icon_label.setVisible(show_icon)
def _set_dayu_text(self):
self._content_label.setText(self._dayu_text)
self.setVisible(bool(self._dayu_text))
def set_dayu_text(self, value):
"""Set the feedback content."""
if isinstance(value, six.string_types):
self._dayu_text = value
else:
raise TypeError("Input argument 'value' should be string type, "
"but get {}".format(type(value)))
self._set_dayu_text()
def _set_dayu_type(self):
self._icon_label.set_dayu_image(MPixmap('{}_fill.svg'.format(self._dayu_type),
vars(dayu_theme).get(self._dayu_type + '_color')))
self.style().polish(self)
def set_dayu_type(self, value):
"""Set feedback type."""
if value in [MAlert.InfoType, MAlert.SuccessType, MAlert.WarningType, MAlert.ErrorType]:
self._dayu_type = value
else:
raise ValueError("Input argument 'value' should be one of "
"info/success/warning/error string.")
self._set_dayu_type()
def get_dayu_type(self):
"""
Get MAlert feedback type.
:return: str
"""
return self._dayu_type
def get_dayu_text(self):
"""
Get MAlert feedback message.
:return: six.string_types
"""
return self._dayu_text
dayu_text = Property(six.text_type, get_dayu_text, set_dayu_text)
dayu_type = Property(str, get_dayu_type, set_dayu_type)
def info(self):
"""Set MAlert to InfoType"""
self.set_dayu_type(MAlert.InfoType)
return self
def success(self):
"""Set MAlert to SuccessType"""
self.set_dayu_type(MAlert.SuccessType)
return self
def warning(self):
"""Set MAlert to WarningType"""
self.set_dayu_type(MAlert.WarningType)
return self
def error(self):
"""Set MAlert to ErrorType"""
self.set_dayu_type(MAlert.ErrorType)
return self
def closable(self):
"""Set MAlert closebale is True"""
self.set_closeable(True)
return self
| 32.072993
| 98
| 0.62244
| 525
| 4,394
| 4.937143
| 0.251429
| 0.055556
| 0.038194
| 0.034722
| 0.157793
| 0.022377
| 0
| 0
| 0
| 0
| 0
| 0.004834
| 0.2467
| 4,394
| 136
| 99
| 32.308824
| 0.778248
| 0.142239
| 0
| 0.111111
| 0
| 0
| 0.052737
| 0.007452
| 0
| 0
| 0
| 0
| 0
| 1
| 0.17284
| false
| 0
| 0.098765
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cd14d3a6d6b9088b4271089222cd9080f058243
| 5,664
|
py
|
Python
|
jobs/SCH/JB_SALES_HIERARCHY_FLAG_N_SR.py
|
bibinvasudev/EBI_Project
|
df2560139e463d68a37e67e0bb683c06fa9ef91b
|
[
"CNRI-Python"
] | null | null | null |
jobs/SCH/JB_SALES_HIERARCHY_FLAG_N_SR.py
|
bibinvasudev/EBI_Project
|
df2560139e463d68a37e67e0bb683c06fa9ef91b
|
[
"CNRI-Python"
] | null | null | null |
jobs/SCH/JB_SALES_HIERARCHY_FLAG_N_SR.py
|
bibinvasudev/EBI_Project
|
df2560139e463d68a37e67e0bb683c06fa9ef91b
|
[
"CNRI-Python"
] | null | null | null |
# SCH1101.sh --> JB_SALES_HIERARCHY_FLAG_N_SR.py
#**************************************************************************************************************
#
# Created by : bibin
# Version : 1.0
#
# Description :
# 1. This script will load the data into 'SALES_HIERARCHY' table based on stream lookups.
#
#
# Initial Creation:
#
# Date (YYYY-MM-DD) Change Description
# ----------------- ------------------
# 2018-11-02 Initial creation
#
#**************************************************************************************************************
# Importing required Lib
from dependencies.spark import start_spark
from dependencies.EbiReadWrite import EbiReadWrite
import logging
import sys
from time import gmtime, strftime
import cx_Oracle
import py4j
import pyspark
# Spark logging
logger = logging.getLogger(__name__)
# Date Formats
start_date = "'"+strftime("%Y-%m-%d %H:%M:%S", gmtime())+"'"
log_date =strftime("%Y%m%d", gmtime())
# Job Naming Details
script_name = "SCH1101.SH"
app_name = "JB_SALES_HIERARCHY_FLAG_N_SR"
log_filename = app_name + '_' + log_date + '.log'
# Query for loading invoice table
def query_data(db_schema):
query = """INSERT INTO """+ db_schema +""".SALES_HIERARCHY
(SALES_GEOGRAPHY, SALES_MULTI_AREA, SALES_AREA, SALES_MULTI_REGION, SALES_REGION, SALES_DISTRICT, SALES_TEAM, EMPLOYEE_ID,
SALES_REP_NUMBER, LOGIN_ID, SALES_REP_NAME, SALES_REP_ORG, COMP_PLAN_TYPE_CODE, COMP_PLAN_TITLE, COMP_PLAN_CATEGORY_CODE, COMP_PLAN_DESCRIPTION,
GOAL_CURR_CODE, START_DATE, END_DATE, STATUS_CODE, PARTICIPANT_LEVEL_CODE, SALES_REP_TYPE_CODE, CURRENT_RECORD_FLAG, LAST_HIRE_DATE)
SELECT
B.WW_DIRECT_GEO_DESCRIPTION AS SALES_GEOGRAPHY,
B.MULTI_AREA_DESCRIPTION AS SALES_MULTI_AREA,
B.AREA_DESCRIPTION AS SALES_AREA,
B.MULTI_REGION_DESCRIPTION AS SALES_MULTI_REGION,
SUBSTR(B.REGION_DESCRIPTION,1,50) AS SALES_REGION,
SUBSTR(B.DISTRICT_DESCRIPTION,1,50) AS SALES_DISTRICT,
SUBSTR(B.TEAM_DESCRIPTION,1,50) AS SALES_TEAM,
A.EMPLOYEE_ID,
A.BK_SALES_REP_NUMBER AS SALES_REP_NUMBER,
SUBSTR(A.EMP_SYS_LOGIN_ID,1,10) AS LOGIN_ID,
SUBSTR(A.SALES_REP_NAME,1,50) AS SALES_REP_NAME,
A.ORGANIZATION_NAME AS SALES_REP_ORG,
A.COMP_PLAN_TYPE_CODE,
A.COMP_PLAN_TITLE,
A.COMP_PLAN_CATEGORY_CODE,
A.COMP_PLAN_DESCRIPTION,
NULL AS GOAL_CURR_CODE ,
A.START_DATE,
A.END_DATE,
A.STATUS_CODE,
A.PARTICIPANT_LEVEL_CODE,
SUBSTR(A.SALES_REP_TYPE_CODE,1,5) AS SALES_REP_TYPE_CODE,
A.CURRENT_RECORD_FLAG,
C.RECENT_HIRE_DATE AS LAST_HIRE_DATE
FROM
(
SELECT a.*,ROW_NUMBER() over (partition by BK_SALES_REP_NUMBER ORDER BY END_DATE desc) as RANK
FROM DIMS.SALES_PARTICIPANT a
WHERE
BK_SALES_REP_NUMBER NOT IN (SELECT DISTINCT BK_SALES_REP_NUMBER FROM DIMS.SALES_PARTICIPANT WHERE CURRENT_RECORD_FLAG = 'Y')
AND PARTICIPANT_LEVEL_CODE = 'SR'
ORDER BY BK_SALES_REP_NUMBER,SALES_PARTICIPANT_KEY
) A
INNER JOIN DIMS.SALES_TERR_HIERAR_AS_IS_MV B ON B.TERRITORY_KEY = A.TERRITORY_KEY
LEFT OUTER JOIN
(SELECT LTRIM(BK_EMPLOYEE_ID,'0') BK_EMPLOYEE_ID,RECENT_HIRE_DATE FROM DIMS.WORKER_DETAIL WHERE CURRENT_RECORD_IND = 1 ) C
ON C.BK_EMPLOYEE_ID = A.EMPLOYEE_ID
WHERE RANK = 1"""
return query
# Main method
def main():
try:
src_count = '0'
dest_count = '0'
# start Spark application and get Spark session, logger and config
spark, config = start_spark(
app_name=app_name)
# Create class Object
Ebi_read_write_obj = EbiReadWrite(app_name,spark,config,logger)
# DB prop Key of Source DB
db_prop_key_load = config['DB_PROP_KEY_LOAD']
db_prop_key_extract = config['DB_PROP_KEY_EXTRACT']
db_schema = config['DB_SCHEMA']
log_file = config['LOG_DIR_NAME'] + "/" + log_filename
#SQL Query
query = query_data(db_schema)
# Calling Job Class method --> get_target_data_update()
Ebi_read_write_obj.get_target_data_update(query,db_prop_key_load)
end_date="'"+strftime("%Y-%m-%d %H:%M:%S", gmtime())+"'"
data_format = "JOB START DT : "+start_date+" | SCRIPT NAME : "+script_name+" | JOB : "+app_name+" | SRC COUNT : "+src_count+" | TGT COUNT : "+dest_count+" | JOB END DT : "+end_date+" | STATUS : %(message)s"
Ebi_read_write_obj.create_log(data_format,log_file,logger)
logger.info("Success")
Ebi_read_write_obj.job_debugger_print(" \n __main__ " + app_name +" --> Job "+app_name+" Succeed \n")
except Exception as err:
# Write expeption in spark log or console
end_date="'"+strftime("%Y-%m-%d %H:%M:%S", gmtime())+"'"
data_format = "JOB START DT : "+start_date+" | SCRIPT NAME : "+script_name+" | JOB : "+app_name+" | SRC COUNT : "+src_count+" | TGT COUNT : "+dest_count+" | JOB END DT : "+end_date+" | STATUS : %(message)s"
Ebi_read_write_obj.create_log(data_format,log_file,logger)
logger.info("[Error] Failed")
Ebi_read_write_obj.job_debugger_print(" \n Job "+app_name+" Failed\n")
logger.error("\n __main__ "+ app_name +" --> Exception-Traceback :: " + str(err))
raise
# Entry point for script
if __name__ == "__main__":
# Calling main() method
main()
| 38.794521
| 218
| 0.625883
| 757
| 5,664
| 4.318362
| 0.265522
| 0.036708
| 0.029979
| 0.027531
| 0.201285
| 0.166412
| 0.15234
| 0.15234
| 0.132762
| 0.125421
| 0
| 0.009677
| 0.233757
| 5,664
| 145
| 219
| 39.062069
| 0.743548
| 0.169138
| 0
| 0.068966
| 0
| 0.011494
| 0.585616
| 0.149829
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022989
| false
| 0
| 0.091954
| 0
| 0.126437
| 0.022989
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cd1e3bcc66dd50fd9167cfd73166db8b21f6910
| 670
|
py
|
Python
|
myth/util.py
|
amanbhandari2002/mythproto
|
b03764485dad5178127307a3b3e4ddc508158143
|
[
"BSD-3-Clause"
] | 1
|
2020-10-01T09:17:00.000Z
|
2020-10-01T09:17:00.000Z
|
myth/util.py
|
amanbhandari2002/mythproto
|
b03764485dad5178127307a3b3e4ddc508158143
|
[
"BSD-3-Clause"
] | null | null | null |
myth/util.py
|
amanbhandari2002/mythproto
|
b03764485dad5178127307a3b3e4ddc508158143
|
[
"BSD-3-Clause"
] | 2
|
2020-09-30T19:53:40.000Z
|
2020-10-01T09:13:08.000Z
|
def decodeLongLong(lst):
high = int(lst[0]) << 32
low = int(lst[1])
if low < 0:
low += 4294967296
if high < 0:
high += 4294967296
return high + low
def encodeLongLong(i):
high = int(i / 4294967296)
low = i - high
return high, low
def parseOk(str):
if str == 'ok':
return True
else:
return False
def printList(lst):
#for i in range(len(lst)):
# print i, '\t', repr(lst[i])
pass
# t is a nine item tuple returned by the time module. This method converts it to
# MythTV's standard representation used on filenames
def encodeTime(t):
ret = ''
for i in t[:-3]:
si = str(i)
if len(si) < 2:
ret += si.zfill(2)
else:
ret += si
return ret
| 18.108108
| 80
| 0.631343
| 113
| 670
| 3.743363
| 0.504425
| 0.033097
| 0.061466
| 0.07565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075728
| 0.231343
| 670
| 36
| 81
| 18.611111
| 0.745631
| 0.273134
| 0
| 0.071429
| 0
| 0
| 0.004141
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.178571
| false
| 0.035714
| 0
| 0
| 0.357143
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cd283a215a5ab2f5c601f954e24742216c659e4
| 14,208
|
py
|
Python
|
scripts/tator_tracker.py
|
openem-team/openem
|
45222c9c77084eacab278da25a8734ae7d43f677
|
[
"MIT"
] | 10
|
2019-01-23T23:58:01.000Z
|
2021-08-30T19:42:35.000Z
|
scripts/tator_tracker.py
|
openem-team/openem
|
45222c9c77084eacab278da25a8734ae7d43f677
|
[
"MIT"
] | 3
|
2020-03-20T15:21:41.000Z
|
2020-09-18T18:49:38.000Z
|
scripts/tator_tracker.py
|
openem-team/openem
|
45222c9c77084eacab278da25a8734ae7d43f677
|
[
"MIT"
] | 2
|
2020-05-08T17:39:12.000Z
|
2020-10-09T01:27:17.000Z
|
#!/usr/bin/env python3
import argparse
import openem
import os
import cv2
import numpy as np
from openem.tracking import *
import json
import sys
import datetime
import tator
from pprint import pprint
from collections import defaultdict
import yaml
import math
import subprocess
import sys
def crop_localization(frame_bgr, localization):
img_width = frame_bgr.shape[1]
img_height = frame_bgr.shape[0]
box_x = round(localization['x'] * img_width)
box_y = round(localization['y'] * img_height)
box_width = round(localization['width'] * img_width)
box_height = round(localization['height'] * img_height)
img_crop = frame_bgr[box_y:box_y+box_height,box_x:box_x+box_width,:]
return img_crop
def join_up_iteration(detections, track_ids):
tracklets = defaultdict(list)
num_tracklets = np.max(track_ids) + 1
assert(len(detections) == len(track_ids))
for d,tid in zip(detections, track_ids):
tracklets[tid].append(d)
return tracklets
def extend_tracklets(tracklets, length):
for track_id,track in tracklets.items():
if len(track) <= 16:
continue
ext_length = min(length,len(track))
sum_h=0.0
sum_w=0.0
track.sort(key=lambda x:x['frame'])
def restore_det(det):
det['x'] = det.get('orig_x',det['x'])
det['y'] = det.get('orig_y',det['y'])
det['width'] = det.get('orig_w',det['width'])
det['height'] = det.get('orig_h',det['height'])
det['orig_x'] = det['x']
det['orig_y'] = det['y']
det['orig_w'] = det['width']
det['orig_h'] = det['height']
restore_det(track[0])
restore_det(track[-1])
for d in track:
sum_h += d['height']
sum_w += d['width']
angle,vel,comps = track_vel(track)
vel_x = comps[0]
vel_y = comps[1]
avg_h = sum_h / len(track)
avg_w = sum_w / len(track)
new_x = min(1,max(0,track[-1]['x']+(vel_x*ext_length)))
new_y = min(1,max(0,track[-1]['y']+(vel_y*ext_length)))
old_x = min(1,max(0,track[0]['x']-(vel_x*ext_length)))
old_y = min(1,max(0,track[0]['y']-(vel_y*ext_length)))
min_x = min(track[-1]['x'],new_x)
min_y = min(track[-1]['y'],new_y)
if min_x > 0 and min_y > 0:
track[-1]['x'] = min_x
track[-1]['y'] = min_y
track[-1]['width'] = min(max(0,abs(new_x-track[-1]['x'])+avg_w),1)
track[-1]['height'] = min(max(0,abs(new_x-track[-1]['y'])+avg_h),1)
else:
track[-1]['width'] = 0
track[-1]['height'] = 0
min_x = min(track[0]['x'],old_x)
min_y = min(track[0]['y'],old_y)
if min_x > 0 and min_y > 0:
track[0]['x'] = min(max(0,min_x),1)
track[0]['y'] = min(max(0,min_y),1)
track[0]['width'] = min(max(abs(old_x-track[0]['x'])+avg_w,0),1)
track[0]['height'] = min(max(abs(old_x-track[0]['y'])+avg_h,0),1)
else:
track[0]['width'] = 0
track[0]['height'] = 0
return tracklets
def split_tracklets(tracklets):
track_ids=[]
detections=[]
for track_id,track in tracklets.items():
for d in track:
track_ids.append(track_id)
detections.append(d)
return detections,track_ids
def trim_tracklets(detections, track_ids, max_length):
tracklets = join_up_iteration(detections, track_ids)
next_track_id = 1
new_tracklets = {}
for track_id,detections in tracklets.items():
new_track_count=math.ceil(len(detections)/max_length)
for i in range(new_track_count):
start=max_length*i
end=max_length+(max_length*i)
new_tracklets[next_track_id] = detections[start:end]
next_track_id += 1
detections, track_ids = split_tracklets(new_tracklets)
track_ids = renumber_track_ids(track_ids)
return detections, track_ids
if __name__=="__main__":
parser = argparse.ArgumentParser(description=__doc__)
tator.get_parser(parser)
parser.add_argument("--detection-type-id", type=int, required=True)
parser.add_argument("--tracklet-type-id", type=int, required=True)
parser.add_argument("--version-id", type=int)
parser.add_argument("--input-version-id", type=int)
parser.add_argument("--strategy-config", type=str)
parser.add_argument("--dry-run", action='store_true')
parser.add_argument('media_files', type=str, nargs='*')
args = parser.parse_args()
# Weight methods
methods = ['hybrid', 'iou', 'iou-motion', 'iou-global-motion']
# Weight methods that require the video
visual_methods = ['hybrid', 'iou-global-motion']
api = tator.get_api(args.host, args.token)
detection_type = api.get_localization_type(args.detection_type_id)
project = detection_type.project
version_id = args.version_id
default_strategy = {"method": "hybrid",
"frame-diffs": [1,2,4,8,16,32,64,128,256],
"args": {},
"extension": {'method' : None},
"max-length": {},
"min-length": 0}
if args.strategy_config:
strategy = {**default_strategy}
with open(args.strategy_config, "r") as strategy_file:
strategy.update(yaml.load(strategy_file))
else:
strategy = default_strategy
if strategy['method'] == 'hybrid':
model_file = strategy['args']['model_file']
batch_size = strategy['args'].get('batch_size', 4)
comparator=FeaturesComparator(model_file)
#extractor=FeaturesExtractor(args.model_file)
class_method = strategy.get('class-method',None)
classify_function = None
classify_args = {}
if class_method:
pip_package=class_method.get('pip',None)
if pip_package:
p = subprocess.run([sys.executable,
"-m",
"pip",
"install",
pip_package])
print("Finished process.", flush=True)
function_name = class_method.get('function',None)
classify_args = class_method.get('args',None)
names = function_name.split('.')
module = __import__(names[0])
for name in names[1:-1]:
module = getattr(module,name)
classify_function = getattr(module,names[-1])
print("Strategy: ", flush=True)
pprint(strategy)
print(args.media_files, flush=True)
optional_fetch_args = {}
if args.input_version_id:
optional_fetch_args['version'] = [args.input_version_id]
for media_file in args.media_files:
comps=os.path.splitext(os.path.basename(media_file))[0]
media_id=comps.split('_')[0]
media = api.get_media(media_id)
if media.attributes.get("Tracklet Generator Processed") != "No":
print(f"Skipping media ID {media.id}, name {media.name} due to "
f"'Tracklet Generator Processed' attribute being set to "
f"something other than 'No'!")
continue
media_shape = (media.height, media.width)
fps = media.fps
localizations_by_frame = {}
localizations = api.get_localization_list(project,
type=args.detection_type_id,
media_id=[media_id],
**optional_fetch_args)
localizations = [l.to_dict() for l in localizations]
if len(localizations) == 0:
print(f"No localizations present in media {media_file}", flush=True)
continue
print(f"Processing {len(localizations)} detections", flush=True)
# Group by localizations by frame
for lid, local in enumerate(localizations):
frame = local['frame']
if frame in localizations_by_frame:
localizations_by_frame[frame].append(local)
else:
localizations_by_frame[frame] = [local]
detections=[]
track_ids=[]
track_id=1
# If media does not exist, download it.
if strategy['method'] == 'iou-global-motion':
if not os.path.exists(media_file):
temp_path = f'/tmp/{os.path.basename(media_file)}'
for progress in tator.util.download_media(api, media, temp_path):
print(f"Downloading {media_file}, {progress}%...")
print("Download finished!")
# Unfrag the file
subprocess.run(["ffmpeg", '-i', temp_path, '-c:v', 'copy', media_file])
os.remove(temp_path)
if strategy['method'] == 'hybrid': # Not all visual methods need detection images
vid=cv2.VideoCapture(media_file)
ok=True
frame = 0
while ok:
ok,frame_bgr = vid.read()
if frame in localizations_by_frame:
for l in localizations_by_frame[frame]:
l['bgr'] = crop_localization(frame_bgr, l)
if l['attributes']['Confidence'] < 0.50:
continue
detections.append(l)
track_ids.append(track_id)
track_id += 1
frame+=1
else:
# The method is analytical on the detections coordinates
# and does not require processing the video
for frame,frame_detections in localizations_by_frame.items():
for det in frame_detections:
detections.append(det)
track_ids.append(track_id)
track_id += 1
print("Loaded all detections", flush=True)
track_ids = renumber_track_ids(track_ids)
if strategy['method'] == 'hybrid':
weights_strategy = HybridWeights(comparator,
None,
None,
media_shape,
fps,
0.0,
batch_size)
elif strategy['method'] == 'iou':
weights_strategy = IoUWeights(media_shape, **strategy['args'])
elif strategy['method'] == 'iou-motion':
weights_strategy = IoUMotionWeights(media_shape, **strategy['args'])
elif strategy['method'] == 'iou-global-motion':
weights_strategy = IoUGlobalMotionWeights(media_shape, media_file, **strategy['args'])
# Generate localization bgr based on grouped localizations
for x in strategy['frame-diffs']:
print(f"Started {x}", flush=True)
detections, track_ids, pairs, weights, is_cut, constraints = join_tracklets(
detections,
track_ids,
x,
weights_strategy)
if x in strategy['max-length']:
trim_to = strategy['max-length'][x]
print(f"Trimming track to max length of {trim_to}")
detections, track_ids = trim_tracklets(detections, track_ids, trim_to)
_,det_counts_per_track=np.unique(track_ids,return_counts=True)
print(f"frame-diff {x}: {len(detections)} to {len(det_counts_per_track)}", flush=True)
if x > 1 and strategy['extension']['method'] == 'linear-motion':
ext_frames=x
print(f"Extending by linear motion, {ext_frames}")
tracklets = join_up_iteration(detections,track_ids)
tracklets = extend_tracklets(tracklets, ext_frames)
detections, track_ids = split_tracklets(tracklets)
# Now we make new track objects based on the result
# from the graph solver
# [ detection, detection, detection, ...]
# [ track#, track#, track#,...]
# [ 133, 33, 13, 133,]
# [ 0,0,1,1]
# TODO: Handle is_cut?
def join_up_final(detections, track_ids):
tracklets = defaultdict(list)
num_tracklets = np.max(track_ids) + 1
assert(len(detections) == len(track_ids))
for d,tid in zip(detections, track_ids):
tracklets[tid].append(d)
return tracklets
def make_object(track):
track.sort(key=lambda x:x['frame'])
if classify_function:
valid,attrs = classify_function(media.to_dict(),
track,
**classify_args)
elif len(track) >= strategy['min-length']:
valid = True
attrs = {}
else:
valid = False
attrs = {}
if valid:
obj={"type": args.tracklet_type_id,
"media_ids": [int(media_id)],
"localization_ids": [x['id'] for x in track],
**attrs,
"version": version_id}
return obj
else:
return None
tracklets = join_up_final(detections, track_ids)
new_objs=[make_object(tracklet) for tracklet in tracklets.values()]
new_objs=[x for x in new_objs if x is not None]
print(f"New objects = {len(new_objs)}")
with open(f"/work/{media_id}.json", "w") as f:
json.dump(new_objs,f)
if not args.dry_run:
for response in tator.util.chunked_create(api.create_state_list,project,
state_spec=new_objs):
pass
try:
api.update_media(int(media_id), {"attributes":{"Tracklet Generator Processed": str(datetime.datetime.now())}})
except:
print("WARNING: Unable to set 'Tracklet Generator Processed' attribute")
| 39.248619
| 126
| 0.551943
| 1,686
| 14,208
| 4.451957
| 0.175563
| 0.034106
| 0.040767
| 0.017986
| 0.216893
| 0.158007
| 0.136957
| 0.085265
| 0.060751
| 0.04956
| 0
| 0.012664
| 0.327492
| 14,208
| 361
| 127
| 39.357341
| 0.772894
| 0.041878
| 0
| 0.193333
| 0
| 0
| 0.108446
| 0.006107
| 0
| 0
| 0
| 0.00277
| 0.006667
| 1
| 0.026667
| false
| 0.003333
| 0.056667
| 0
| 0.11
| 0.056667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cd30f449f940b3e03ca41f6babd9a375fe19ebf
| 1,167
|
py
|
Python
|
hypergan/losses/multi_loss.py
|
Darkar25/HyperGAN
|
76ef7e0c20569ceece88dc76396d92c77050692b
|
[
"MIT"
] | 1
|
2019-05-29T14:24:04.000Z
|
2019-05-29T14:24:04.000Z
|
hypergan/losses/multi_loss.py
|
KonradLinkowski/HyperGAN
|
3153daee838dbb8e8d8926b1e81419682a24f2fe
|
[
"MIT"
] | 218
|
2021-05-25T01:46:15.000Z
|
2022-02-11T01:08:52.000Z
|
hypergan/losses/multi_loss.py
|
KonradLinkowski/HyperGAN
|
3153daee838dbb8e8d8926b1e81419682a24f2fe
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
import hyperchamber as hc
from hypergan.losses.base_loss import BaseLoss
from hypergan.multi_component import MultiComponent
TINY=1e-8
class MultiLoss(BaseLoss):
"""Takes multiple distributions and does an additional approximator"""
def _create(self, d_real, d_fake):
gan = self.gan
config = self.config
losses = []
split = self.split
for d in gan.discriminator.children:
if config.swapped:
d_swap = d_real
d_real = d_fake
d_fake = d_swap
ds = self.split_batch(d.sample, split)
d_real = ds[0]
d_fake = tf.add_n(ds[1:])/(len(ds)-1)
loss_object = self.config['loss_class'](gan, self.config, d_real=d_real, d_fake=d_fake)
losses.append(loss_object)
#relational layer?
combine = MultiComponent(combine='concat', components=losses)
g_loss = combine.g_loss_features
d_loss = combine.d_loss_features
self.d_loss = d_loss
self.g_loss = g_loss
self.losses = losses
return [d_loss, g_loss]
| 26.522727
| 99
| 0.61868
| 157
| 1,167
| 4.382166
| 0.407643
| 0.043605
| 0.043605
| 0.043605
| 0.05814
| 0.05814
| 0.05814
| 0.05814
| 0
| 0
| 0
| 0.006098
| 0.297344
| 1,167
| 43
| 100
| 27.139535
| 0.832927
| 0.070266
| 0
| 0
| 0
| 0
| 0.014856
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.172414
| 0
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cd33c8d7f17b4aa6fc5f6d3f2701686f2ce01a4
| 13,643
|
py
|
Python
|
src/fidesops/api/v1/endpoints/policy_endpoints.py
|
mohan-pogala/fidesops
|
5c686362d4fb3b85253dd7e2898be1131a5071ab
|
[
"Apache-2.0"
] | null | null | null |
src/fidesops/api/v1/endpoints/policy_endpoints.py
|
mohan-pogala/fidesops
|
5c686362d4fb3b85253dd7e2898be1131a5071ab
|
[
"Apache-2.0"
] | null | null | null |
src/fidesops/api/v1/endpoints/policy_endpoints.py
|
mohan-pogala/fidesops
|
5c686362d4fb3b85253dd7e2898be1131a5071ab
|
[
"Apache-2.0"
] | null | null | null |
import logging
from typing import Any, Dict, List
from fastapi import APIRouter, Body, Depends, Security
from fastapi_pagination import (
Page,
Params,
)
from fastapi_pagination.bases import AbstractPage
from fastapi_pagination.ext.sqlalchemy import paginate
from fidesops.schemas.shared_schemas import FidesOpsKey
from pydantic import conlist
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
from starlette.exceptions import HTTPException
from starlette.status import HTTP_404_NOT_FOUND
from fidesops.api import deps
from fidesops.api.v1 import scope_registry as scopes
from fidesops.api.v1 import urn_registry as urls
from fidesops.common_exceptions import (
DataCategoryNotSupported,
PolicyValidationError,
RuleValidationError,
RuleTargetValidationError,
KeyOrNameAlreadyExists,
)
from fidesops.models.client import ClientDetail
from fidesops.models.policy import (
ActionType,
Policy,
Rule,
RuleTarget,
)
from fidesops.models.storage import StorageConfig
from fidesops.schemas import policy as schemas
from fidesops.schemas.api import BulkUpdateFailed
from fidesops.util.oauth_util import verify_oauth_client
router = APIRouter(tags=["Policy"], prefix=urls.V1_URL_PREFIX)
logger = logging.getLogger(__name__)
@router.get(
urls.POLICY_LIST,
status_code=200,
response_model=Page[schemas.PolicyResponse],
dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])],
)
def get_policy_list(
*,
db: Session = Depends(deps.get_db),
params: Params = Depends(),
) -> AbstractPage[Policy]:
"""
Return a paginated list of all Policy records in this system
"""
logger.info(f"Finding all policies with pagination params '{params}'")
policies = Policy.query(db=db)
return paginate(policies, params=params)
def get_policy_or_error(db: Session, policy_key: FidesOpsKey) -> Policy:
"""Helper method to load Policy or throw a 404"""
logger.info(f"Finding policy with key '{policy_key}'")
policy = Policy.get_by(db=db, field="key", value=policy_key)
if not policy:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail=f"No Policy found for key {policy_key}.",
)
return policy
@router.get(
urls.POLICY_DETAIL,
status_code=200,
response_model=schemas.PolicyResponse,
dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])],
)
def get_policy(
*,
policy_key: FidesOpsKey,
db: Session = Depends(deps.get_db),
) -> schemas.PolicyResponse:
"""
Return a single Policy
"""
return get_policy_or_error(db, policy_key)
@router.patch(
urls.POLICY_LIST,
status_code=200,
response_model=schemas.BulkPutPolicyResponse,
)
def create_or_update_policies(
*,
client: ClientDetail = Security(
verify_oauth_client,
scopes=[scopes.POLICY_CREATE_OR_UPDATE],
),
db: Session = Depends(deps.get_db),
data: conlist(schemas.Policy, max_items=50) = Body(...), # type: ignore
) -> schemas.BulkPutPolicyResponse:
"""
Given a list of policy data elements, create or update corresponding Policy objects
or report failure
"""
created_or_updated: List[Policy] = []
failed: List[BulkUpdateFailed] = []
logger.info(f"Starting bulk upsert for {len(data)} policies")
for policy_schema in data:
policy_data: Dict[str, Any] = dict(policy_schema)
try:
policy = Policy.create_or_update(
db=db,
data={
"name": policy_data["name"],
"key": policy_data.get("key"),
"client_id": client.id,
},
)
except KeyOrNameAlreadyExists as exc:
logger.warning("Create/update failed for policy: %s", exc)
failure = {
"message": exc.args[0],
"data": policy_data,
}
failed.append(BulkUpdateFailed(**failure))
continue
except PolicyValidationError as exc:
logger.warning("Create/update failed for policy: %s", exc)
failure = {
"message": "This record could not be added because the data provided was invalid.",
"data": policy_data,
}
failed.append(BulkUpdateFailed(**failure))
continue
else:
created_or_updated.append(policy)
return schemas.BulkPutPolicyResponse(
succeeded=created_or_updated,
failed=failed,
)
@router.patch(
urls.RULE_LIST,
status_code=200,
response_model=schemas.BulkPutRuleResponse,
)
def create_or_update_rules(
*,
client: ClientDetail = Security(
verify_oauth_client,
scopes=[scopes.RULE_CREATE_OR_UPDATE],
),
policy_key: FidesOpsKey,
db: Session = Depends(deps.get_db),
input_data: conlist(schemas.RuleCreate, max_items=50) = Body(...), # type: ignore
) -> schemas.BulkPutRuleResponse:
"""
Given a list of Rule data elements, create or update corresponding Rule objects
or report failure
"""
logger.info(f"Finding policy with key '{policy_key}'")
policy = get_policy_or_error(db, policy_key)
created_or_updated: List[Rule] = []
failed: List[BulkUpdateFailed] = []
logger.info(
f"Starting bulk upsert for {len(input_data)} rules on policy {policy_key}"
)
for schema in input_data:
# Validate all FKs in the input data exist
associated_storage_config_id = None
if schema.action_type == ActionType.access.value:
# Only validate the associated StorageConfig on access rules
storage_destination_key = schema.storage_destination_key
associated_storage_config: StorageConfig = StorageConfig.get_by(
db=db,
field="key",
value=storage_destination_key,
)
if not associated_storage_config:
logger.warning(
f"No storage config found with key {storage_destination_key}"
)
failure = {
"message": f"A StorageConfig with key {storage_destination_key} does not exist",
"data": dict(
schema
), # Be sure to pass the schema out the same way it came in
}
failed.append(BulkUpdateFailed(**failure))
continue
else:
associated_storage_config_id = associated_storage_config.id
masking_strategy_data = None
if schema.masking_strategy:
masking_strategy_data = schema.masking_strategy.dict()
try:
rule = Rule.create_or_update(
db=db,
data={
"action_type": schema.action_type,
"client_id": client.id,
"key": schema.key,
"name": schema.name,
"policy_id": policy.id,
"storage_destination_id": associated_storage_config_id,
"masking_strategy": masking_strategy_data,
},
)
except KeyOrNameAlreadyExists as exc:
logger.warning(
f"Create/update failed for rule '{schema.key}' on policy {policy_key}: {exc}"
)
failure = {
"message": exc.args[0],
"data": dict(schema),
}
failed.append(BulkUpdateFailed(**failure))
continue
except RuleValidationError as exc:
logger.warning(
f"Create/update failed for rule '{schema.key}' on policy {policy_key}: {exc}"
)
failure = {
"message": exc.args[0],
"data": dict(schema),
}
failed.append(BulkUpdateFailed(**failure))
continue
except ValueError as exc:
logger.warning(
f"Create/update failed for rule '{schema.key}' on policy {policy_key}: {exc}"
)
failure = {
"message": exc.args[0],
"data": dict(schema),
}
failed.append(BulkUpdateFailed(**failure))
continue
else:
created_or_updated.append(rule)
return schemas.BulkPutRuleResponse(succeeded=created_or_updated, failed=failed)
@router.delete(
urls.RULE_DETAIL,
status_code=204,
dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])],
)
def delete_rule(
*,
policy_key: FidesOpsKey,
rule_key: FidesOpsKey,
db: Session = Depends(deps.get_db),
) -> None:
"""
Delete a policy rule.
"""
policy = get_policy_or_error(db, policy_key)
logger.info(f"Finding rule with key '{rule_key}'")
rule = Rule.filter(
db=db, conditions=(Rule.key == rule_key and Rule.policy_id == policy.id)
).first()
if not rule:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail=f"No Rule found for key {rule_key} on Policy {policy_key}.",
)
logger.info(f"Deleting rule with key '{rule_key}'")
rule.delete(db=db)
@router.patch(
urls.RULE_TARGET_LIST,
status_code=200,
response_model=schemas.BulkPutRuleTargetResponse,
)
def create_or_update_rule_targets(
*,
client: ClientDetail = Security(
verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE]
),
policy_key: FidesOpsKey,
rule_key: FidesOpsKey,
db: Session = Depends(deps.get_db),
input_data: conlist(schemas.RuleTarget, max_items=50) = Body(...), # type: ignore
) -> schemas.BulkPutRuleTargetResponse:
"""
Given a list of Rule data elements, create corresponding Rule objects
or report failure
"""
policy = get_policy_or_error(db, policy_key)
logger.info(f"Finding rule with key '{rule_key}'")
rule = Rule.filter(
db=db, conditions=(Rule.key == rule_key and Rule.policy_id == policy.id)
).first()
if not rule:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail=f"No Rule found for key {rule_key} on Policy {policy_key}.",
)
created_or_updated = []
failed = []
logger.info(
f"Starting bulk upsert for {len(input_data)} rule targets on rule {rule_key}"
)
for schema in input_data:
try:
target = RuleTarget.create_or_update(
db=db,
data={
"name": schema.name,
"key": schema.key,
"data_category": schema.data_category,
"rule_id": rule.id,
"client_id": client.id,
},
)
except KeyOrNameAlreadyExists as exc:
logger.warning(
f"Create/update failed for rule target {schema.key} on rule {rule_key}: {exc}"
)
failure = {
"message": exc.args[0],
"data": dict(schema),
}
failed.append(BulkUpdateFailed(**failure))
continue
except (
DataCategoryNotSupported,
PolicyValidationError,
RuleTargetValidationError,
) as exc:
logger.warning(
f"Create/update failed for rule target {schema.key} on rule {rule_key}: {exc}"
)
failure = {
"message": exc.args[0],
"data": dict(schema),
}
failed.append(BulkUpdateFailed(**failure))
continue
except IntegrityError as exc:
logger.warning(
f"Create/update failed for rule target {schema.key} on rule {rule_key}: {exc}"
)
failure = {
"message": f"DataCategory {schema.data_category} is already specified on Rule with ID {rule.id}",
"data": dict(schema),
}
failed.append(BulkUpdateFailed(**failure))
else:
created_or_updated.append(target)
return schemas.BulkPutRuleTargetResponse(
succeeded=created_or_updated,
failed=failed,
)
@router.delete(
urls.RULE_TARGET_DETAIL,
status_code=204,
dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])],
)
def delete_rule_target(
*,
policy_key: FidesOpsKey,
rule_key: FidesOpsKey,
rule_target_key: FidesOpsKey,
db: Session = Depends(deps.get_db),
) -> None:
"""
Delete the rule target.
"""
policy = get_policy_or_error(db, policy_key)
logger.info(f"Finding rule with key '{rule_key}'")
rule = Rule.filter(
db=db, conditions=(Rule.key == rule_key and Rule.policy_id == policy.id)
).first()
if not rule:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail=f"No Rule found for key {rule_key} on Policy {policy_key}.",
)
logger.info(f"Finding rule target with key '{rule_target_key}'")
target = RuleTarget.filter(
db=db,
conditions=(
RuleTarget.key == rule_target_key and RuleTarget.rule_id == rule.id
),
).first()
if not target:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail=f"No RuleTarget found for key {rule_target_key} at Rule {rule_key} on Policy {policy_key}.",
)
logger.info(f"Deleting rule target with key '{rule_target_key}'")
target.delete(db=db)
| 31.95082
| 113
| 0.609543
| 1,503
| 13,643
| 5.351963
| 0.135063
| 0.025733
| 0.01641
| 0.03916
| 0.599453
| 0.56825
| 0.521258
| 0.459846
| 0.413973
| 0.408379
| 0
| 0.005926
| 0.29495
| 13,643
| 426
| 114
| 32.025822
| 0.830336
| 0.048083
| 0
| 0.518207
| 0
| 0
| 0.146357
| 0.00731
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022409
| false
| 0
| 0.061625
| 0
| 0.10084
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cd398bd2b3034834a61ea93e2ee16c8e1011acb
| 7,425
|
py
|
Python
|
engage-analytics/sentiment_analysis/src/report/interface_report.py
|
oliveriopt/mood-analytics
|
c98eb8c483a05af938a2f6f49d8ea803f5711572
|
[
"Apache-2.0"
] | null | null | null |
engage-analytics/sentiment_analysis/src/report/interface_report.py
|
oliveriopt/mood-analytics
|
c98eb8c483a05af938a2f6f49d8ea803f5711572
|
[
"Apache-2.0"
] | 2
|
2020-03-27T19:14:44.000Z
|
2020-03-27T19:14:44.000Z
|
engage-analytics/sentiment_analysis/src/report/interface_report.py
|
oliveriopt/mood-analytics
|
c98eb8c483a05af938a2f6f49d8ea803f5711572
|
[
"Apache-2.0"
] | null | null | null |
import emoji
import sentiment_analysis.src.report.cons_report as cons
import sentiment_analysis.src.constants as global_cons
from utils.data_connection.api_data_manager import APISourcesFetcher
from utils.utilities import read_json_file, CUSTOM_YEAR_WEEK_AGG, extract_dimension, extract_question
from sentiment_analysis.src.word_cloud import words_clouds
from sentiment_analysis.src.clients_language_sentiments_entity import ClientsLanguageSentiment
from nested_lookup import nested_lookup
class InterFaceReport:
def __init__(self, topics: dict, surveys: dict, company_id: str, weeks: list,
g_client: ClientsLanguageSentiment,
api_source_manager: APISourcesFetcher):
self.topics = topics
self.surveys = surveys
self.company_id = company_id
self.weeks = weeks
self.g_client = g_client
self.api_source_manager = api_source_manager
self.thresholds = ()
self.table_surveys_replies = []
self.table_topics = []
self.table_topic_comment = []
self.counter_text_sr = None
self.counter_text_topics = None
self.info_file = read_json_file("en_US.json")
self.image_base64_sr = None
self.image_base64_topics = None
def sort_by_dimension_sentiment_table(self) -> None:
"""
Sort by dimension and by sentiment
:return:
"""
temp_table = []
for dimension in cons.dimensions:
temp = [d for d in self.table_surveys_replies if d['dimension'] == dimension]
temp = sorted(temp, key=lambda k: k['sentiment'], reverse=True)
temp_table.extend(temp)
self.table_surveys_replies = temp_table
def insert_to_list_surveys_replies(self, features: list, company_week: int) -> None:
"""
Create array with the dictionary for interface
:param features: list of features to extract
:param company_week: company week of the company
:return:
"""
for item_analyze in features:
question = extract_question(self.info_file, dimension=item_analyze[0], week=company_week)
dimension = extract_dimension(self.info_file, dimension=item_analyze[0])
comment = item_analyze[1]
sentiment = item_analyze[2]
temp = {}
temp.update(dimension=dimension)
temp.update(question=question)
temp.update(comment=emoji.emojize(comment, use_aliases=True))
temp.update(sentiment=sentiment)
self.table_surveys_replies.append(temp)
self.sort_by_dimension_sentiment_table()
def insert_to_list_topics(self, features: list) -> None:
"""
Create array with the dictionary for interface - referenced to topic headlines
:param features: list of features to extract
:return:
"""
for item_analyze in features:
topic_id = item_analyze[0]
comment = item_analyze[1]
sentiment = item_analyze[2]
temp = {}
temp.update(id=topic_id)
temp.update(comment=emoji.emojize(comment, use_aliases=True))
temp.update(sentiment=sentiment)
self.table_topics.append(temp)
self.table_topics = sorted(self.table_topics, key=lambda k: k['sentiment'], reverse=True)
def insert_to_list_topic_comments(self, features: list) -> None:
"""
Create array with the dictionary for interface - referenced to topic comments
:param features: list of features to extract
:return:
"""
for item_analyze in features:
topic_id_comment_id = item_analyze[0]
comment = item_analyze[1]
sentiment = item_analyze[2]
temp = {}
temp.update(id=topic_id_comment_id)
temp.update(comment=emoji.emojize(comment, use_aliases=True))
temp.update(sentiment=sentiment)
self.table_topic_comment.append(temp)
self.table_topic_comment = sorted(self.table_topic_comment, key=lambda k: k['sentiment'], reverse=True)
def word_cloud(self):
"""
Create wordcloud of the main words
:return:
"""
self.image_base64_sr = words_clouds(self.counter_text_sr, cons.path_image_sr_wc)
self.image_base64_topics = words_clouds(self.counter_text_topics, cons.path_image_topics_wc)
@staticmethod
def __count_filter_keys(entities: list) -> object:
"""
Count and filter keys
:param entities: list of entities text
:return:
"""
entities = ClientsLanguageSentiment.count_entities(entities=entities)
entities = ClientsLanguageSentiment.filter_black_list(entities=entities)
return entities
def __process_sr(self) -> None:
"""
Process the surveys replies
:return:
"""
for company_id, periods in self.surveys.items():
for period in self.weeks:
period_parts = period.split(CUSTOM_YEAR_WEEK_AGG)
translations_week = self.api_source_manager.get_company_week_from_period(week=period_parts[0],
year=period_parts[1],
company_id=self.company_id)
sr_dimension = nested_lookup(global_cons.SR_DIMENSION, periods)
sr_content = nested_lookup(global_cons.SR_CONTENT, periods)
sr_sentiment = nested_lookup(global_cons.SENTIMENT, periods)
sr_entities = nested_lookup(global_cons.SR_ENTITIES, periods)
sr_comment_score = list(zip(sr_dimension, sr_content, sr_sentiment))
self.insert_to_list_surveys_replies(sr_comment_score, company_week=translations_week)
self.counter_text_sr = self.__count_filter_keys(entities=sr_entities)
def __process_topics(self) -> None:
"""
Process the topics
:return:
"""
for company_id, topics in self.topics.items():
# heading
topic_headings = nested_lookup(global_cons.TOPIC_CONTENT, topics)
topic_headings_sentiments = nested_lookup(global_cons.TOPIC_SENTIMENT, topics)
topic_ids = list(topics.keys())
topic_w_sentiments = list(zip(topic_ids, topic_headings, topic_headings_sentiments))
self.insert_to_list_topics(topic_w_sentiments)
# comments
for topic_id, topic in topics.items():
topic_comments = nested_lookup(global_cons.TOPIC_COMMENT, topic)
topic_comments_scores = nested_lookup(global_cons.TOPIC_COMMENT_SENTIMENT, topic)
topic_list_ids = [topic_id] * len(topic_comments)
topic_w_scores = list(zip(topic_list_ids, topic_comments, topic_comments_scores))
self.insert_to_list_topic_comments(topic_w_scores)
entities = nested_lookup(global_cons.TOPIC_ENTITIES, topics)
self.counter_text_topics = ClientsLanguageSentiment.count_entities(entities)
def process_interface(self) -> None:
"""
Take the info needed to write into report_pdf
:return:
"""
self.__process_sr()
self.__process_topics()
| 40.135135
| 116
| 0.644175
| 851
| 7,425
| 5.304348
| 0.169213
| 0.031679
| 0.035888
| 0.043864
| 0.344041
| 0.246345
| 0.224634
| 0.197829
| 0.173017
| 0.173017
| 0
| 0.00374
| 0.279731
| 7,425
| 184
| 117
| 40.353261
| 0.840314
| 0.095623
| 0
| 0.165138
| 0
| 0
| 0.007194
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091743
| false
| 0
| 0.073395
| 0
| 0.183486
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cd59f2bf170f30d86846848bf3c6c4bf7b96d9c
| 2,491
|
py
|
Python
|
lino/modlib/gfks/mixins.py
|
NewRGB/lino
|
43799e42107169ff173d3b8bc0324d5773471499
|
[
"BSD-2-Clause"
] | 1
|
2019-11-13T19:38:50.000Z
|
2019-11-13T19:38:50.000Z
|
lino/modlib/gfks/mixins.py
|
NewRGB/lino
|
43799e42107169ff173d3b8bc0324d5773471499
|
[
"BSD-2-Clause"
] | null | null | null |
lino/modlib/gfks/mixins.py
|
NewRGB/lino
|
43799e42107169ff173d3b8bc0324d5773471499
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: UTF-8 -*-
# Copyright 2010-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from builtins import object
from django.contrib.contenttypes.models import *
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils.text import format_lazy
from lino.api import dd
from lino.core.gfks import gfk2lookup
from .fields import GenericForeignKey, GenericForeignKeyIdField
class Controllable(dd.Model):
# Translators: will also be concatenated with '(type)' '(object)'
owner_label = _('Controlled by')
controller_is_optional = True
class Meta(object):
abstract = True
owner_type = dd.ForeignKey(
ContentType,
editable=True,
blank=True, null=True,
verbose_name=format_lazy(u"{} {}", owner_label, _('(type)')))
owner_id = GenericForeignKeyIdField(
owner_type,
editable=True,
blank=True, null=True,
verbose_name=format_lazy(u"{} {}", owner_label, _('(object)')))
owner = GenericForeignKey(
'owner_type', 'owner_id',
verbose_name=owner_label)
@classmethod
def update_controller_field(cls, verbose_name=None, **kwargs):
if verbose_name is not None:
dd.update_field(cls, 'owner', verbose_name=verbose_name)
kwargs.update(
verbose_name=format_lazy(u"{} {}",
verbose_name, _('(object)')))
dd.update_field(cls, 'owner_id', **kwargs)
if verbose_name is not None:
kwargs.update(
verbose_name=format_lazy(u"{} {}",
verbose_name, _('(type)')))
dd.update_field(cls, 'owner_type', **kwargs)
def update_owned_instance(self, controllable):
if self.owner:
self.owner.update_owned_instance(controllable)
super(Controllable, self).update_owned_instance(controllable)
def save(self, *args, **kw):
if settings.SITE.loading_from_dump:
super(Controllable, self).save(*args, **kw)
else:
if self.owner:
self.owner.update_owned_instance(self)
super(Controllable, self).save(*args, **kw)
if self.owner:
self.owner.after_update_owned_instance(self)
def controlled_rows(self, model, **kwargs):
gfk = self._meta.get_field('owner')
kwargs = gfk2lookup(gfk, self, **kwargs)
return model.objects.filter(**kwargs)
| 30.753086
| 71
| 0.635488
| 287
| 2,491
| 5.313589
| 0.341463
| 0.086557
| 0.062295
| 0.055082
| 0.321967
| 0.267541
| 0.226885
| 0.190164
| 0.139016
| 0.08
| 0
| 0.005895
| 0.250903
| 2,491
| 80
| 72
| 31.1375
| 0.811361
| 0.065837
| 0
| 0.267857
| 0
| 0
| 0.046101
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cd5ad9a803e1cac21f7d6ba2961e58bea3c98da
| 21,926
|
py
|
Python
|
optical_form_reader/main.py
|
1enes/optical_form_reader
|
fab99f2403c25f84fcb5bdac50148ab248432516
|
[
"MIT"
] | null | null | null |
optical_form_reader/main.py
|
1enes/optical_form_reader
|
fab99f2403c25f84fcb5bdac50148ab248432516
|
[
"MIT"
] | null | null | null |
optical_form_reader/main.py
|
1enes/optical_form_reader
|
fab99f2403c25f84fcb5bdac50148ab248432516
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
from imutils import contours
from imutils.perspective import four_point_transform
import imutils
import cv2
import matplotlib.pyplot as plt
import numpy as np
from imutils import contours
from imutils.perspective import four_point_transform,order_points
import imutils
cevap_anahtar={0:2,1:1,2:2,3:3,4:1,5:4,6:4,7:3,8:1,9:1,10:0,11:0,12:2,13:1,14:2,15:3,16:4,17:4,18:4,19:3,20:2,21:1,22:0,23:0,24:0,25:4,26:2,27:3,28:4,29:4,30:4,31:3,32:2,33:1,34:0,35:0,36:1,37:2,38:3,39:4} #,
alfabe={0:'A',1:'B',2:'C',3:'Ç',4:'D',5:'E',6:'F',7:'G',8:'Ğ',9:'H',10:'I',11:'İ',12:'J',13:'K',14:'L',15:'M',16:'N',17:'O',18:'Ö',19:'P',20:'Q',21:'R',22:'S',23:'Ş',24:'T',25:'U',26:'Ü',27:'V',28:'W',29:'Y',30:'Z',31:'X'}
def cevap_islemleri(isim,coords):
a=0
thresh=cv2.threshold(isim,179,255,cv2.THRESH_BINARY_INV)[1]
coords=contours.sort_contours(coords,method="top-to-bottom")[0]
for (s,i) in enumerate(np.arange(0,len(coords),20)):
cevap=None
cnt=contours.sort_contours(coords[i:i+30])[0]
toplam_beyaz=None
for (j,c) in enumerate(cnt):
maske=np.zeros(thresh.shape,dtype=np.uint8)
cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)
maske=cv2.bitwise_and(thresh,thresh,mask=maske)
a+=1
toplam_beyaz=cv2.countNonZero(maske)
if cevap is None or toplam_beyaz>cevap[0]:
cevap=(toplam_beyaz,j,s)
def cevap_contour_bul(isim,isim_gri):
coord=[]
thresholded=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8)
contour=cv2.findContours(thresholded,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)
x_coords=[(0,0)]
sayac=0
contour=imutils.grab_contours(contour)
contour=contours.sort_contours(contour,method="top-to-bottom")[0]
for c in contour:
approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True)
area=cv2.contourArea(approx)
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
if area<1500 and area>250 and ar>=0.9 and ar<=1.1:
box=cv2.minAreaRect(approx)
box=cv2.boxPoints(box)
box=np.array(box,dtype=np.int)
M=cv2.moments(box)
x=int(M['m10']/M['m00'])
y=int(M['m01']/M['m00'])
res=tekrar_bul(x_coords,x)
if res is False and abs(x_coords[-1][1]-y)<35:
coord.append(approx)
x_coords.append((x,y))
sayac+=1
#cv2.drawContours(isim,[box],0,(255,0,0),thickness=3)
#cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2)
elif abs(x_coords[-1][1]-y)>=35:
coord.append(approx)
x_coords=[(0,0)]
sayac+=1
x_coords.append((x,y))
#cv2.drawContours(isim,[box],0,(255,0,0),thickness=3)
#cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2)
else:
continue
return coord
def ters_bul(kagit,areas):
ret=False
#print(areas[0][0])
if areas[0][0]!=1 and areas[0][1]+areas[1][1]>2300000:
kagit=imutils.rotate(kagit,angle=180)
print("Kağıdı ters koymuşsunuz,çevrildi")
ret=True
return ret,kagit
else:
return ret,kagit
def kagit_bul(image,gray):
thr=cv2.threshold(gray,150,255,cv2.THRESH_BINARY)[1]
contour=cv2.findContours(thr,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
contour=imutils.grab_contours(contour)
contour=sorted(contour,key=cv2.contourArea,reverse=True)
for c in contour:
approx=cv2.approxPolyDP(c,0.02*cv2.arcLength(c,True),True)
if len(approx)==4:
#cv2.drawContours(image,[approx],0,(0,255,0),thickness=3)
break
warp=four_point_transform(image,approx.reshape(4,2))
warp_gri=four_point_transform(gray,approx.reshape(4,2))
return warp,warp_gri
def soru_grup_contour_bul(resim,gri):
thr2=cv2.threshold(gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1]
can=cv2.Canny(thr2,50,100)
can=cv2.dilate(can,None,iterations=3)
coords=[]
cont=cv2.findContours(can,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
cont=imutils.grab_contours(cont)
for c in cont:
approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True)
area=cv2.contourArea(approx)
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
if cv2.contourArea(c)>30 and ar>=0.9 and ar<=1.1:
box=cv2.minAreaRect(approx)
box=cv2.boxPoints(box)
box=np.array(box,dtype=np.int)
if cv2.contourArea(box)>150:
coords.append(approx)
cv2.drawContours(resim,[box],0,(0,0,255),thickness=3)
if len(coords)==5:
return coords
else:
return 0
def tekrar_bul(array,koordinat):
for c in array:
if koordinat==c[0] or abs(koordinat-c[0])<15:
return True #Tekrar var
else:
pass
return False
def contour_bul(isim,isim_gri,karmasiklik=0):
coord=[]
thr6=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8)
#thr6=cv2.threshold(isim_gri,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1]
ar_value=200
#if karmasiklik==1:
# ar_value=800
cont=cv2.findContours(thr6,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)
x_coords=[(0,0)]
sayac=0
cont=imutils.grab_contours(cont)
cont=contours.sort_contours(cont,method="top-to-bottom")[0]
for c in cont:
approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True)
area=cv2.contourArea(approx)
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
if area<1300 and area>300 and ar>=0.9 and ar<=1.1:
box=cv2.minAreaRect(approx)
box=cv2.boxPoints(box)
box=np.array(box,dtype=np.int)
M=cv2.moments(box)
x=int(M['m10']/M['m00'])
y=int(M['m01']/M['m00'])
# print(x,y)
res=tekrar_bul(x_coords,x)
if res is False and abs(x_coords[-1][1]-y)<35:
coord.append(approx)
x_coords.append((x,y))
sayac+=1
#cv2.drawContours(isim,[box],0,(255,0,0),thickness=3)
#cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2)
elif abs(x_coords[-1][1]-y)>=35:
coord.append(approx)
x_coords=[(0,0)]
sayac+=1
x_coords.append((x,y))
#cv2.drawContours(isim,[box],0,(255,0,0),thickness=3)
#cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2)
else:
continue
return coord,thr6
def contour_cizdir(resim,cont,isim="default"):
for c in cont:
cv2.drawContours(resim,[c],0,(0,255,0),thickness=4)
#print(f"Bulunan contour sayısı: {len(cont)}")
def bolge_bul(resim,gri):
bolgeler={}
thr2=cv2.adaptiveThreshold(gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8)
areas=[]
cont=cv2.findContours(thr2,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
cont=imutils.grab_contours(cont)
temp=[]
cont=contours.sort_contours(cont,"top-to-bottom")[0]
a=0
for c in cont:
approx=cv2.approxPolyDP(c,0.009*cv2.arcLength(c,True),True)
if cv2.contourArea(approx)>10050 and len(approx)==4:
a+=1
M=cv2.moments(approx)
x=int(M['m10']/M['m00'])
y=int(M['m01']/M['m00'])
#areas.append([a,cv2.contourArea(approx)])
#cv2.putText(resim,"{}".format(a),(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=4,color=(255,0,0),thickness=3)
temp.append(approx.reshape(4,2))
areas.append([a,cv2.contourArea(approx)])
#cv2.drawContours(resim,[approx],0,(255,0,0),thickness=3)
#cv2.imshow("resim_olge",imutils.resize(resim,height=650))
if len(temp)>=5:
bolgeler={'isim':temp[0],'ogrno':temp[1],'sinav_turu':temp[2],'soru_grubu':temp[3],'ogretim_onay':temp[4],'cevaplar':temp[5]}
areas=sorted(areas,key=lambda x:x[1],reverse=True)
return bolgeler,areas
def cevap_islemleri(cevap,coords,col_no=1):
iki_cevap=0
bos=0
dogru=0
q_no=0
yanlıs=0
if col_no==1:
pass
elif col_no==2:
q_no=30
elif col_no==3:
q_no=60
elif col_no==4:
q_no=90
yanit=[]
#cevap=cv2.cvtColor(cevap,cv2.COLOR_BGR2GRAY)
thresh=cv2.threshold(cevap,180,255,cv2.THRESH_BINARY_INV)[1]
coords=contours.sort_contours(coords,method="top-to-bottom")[0]
for (s,i) in enumerate(np.arange(0,len(coords),5)):
cevap=None
cnt=contours.sort_contours(coords[i:i+5])[0]
toplam_beyaz=None
say=0
for (j,c) in enumerate(cnt):
if len(cevap_anahtar)<=q_no+s:
return (dogru,yanlıs,bos,iki_cevap)
maske=np.zeros(thresh.shape,dtype=np.uint8)
cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)
maske=cv2.bitwise_and(thresh,thresh,mask=maske)
plt.imshow(maske,cmap='gray')
#plt.show()
toplam_beyaz=cv2.countNonZero(maske)
#print(toplam_beyaz,j)
if cevap is None or toplam_beyaz>cevap[0]:
cevap=(toplam_beyaz,j,q_no+s)
if toplam_beyaz>800:
say+=1
if say>1: #İKİ ŞIK İŞARETLEME DURUMU
iki_cevap+=1
continue
elif cevap[0]<800:# BOŞ BIRAKMA DURUMU
bos+=1
continue
else:
if cevap_anahtar[q_no+s]== cevap[1]:
#print(cevap_anahtar[q_no+s],cevap[1])
dogru+=1
else:
yanlıs+=1
'''
NUMBER OF TRUE,FALSE,NOT MARKED AND MARKED MORE THAN 1
'''
return(dogru,yanlıs,bos,iki_cevap)
def isim_islemleri(isim,coords,thresh):
a=0
yanit=[]
ad_str=""
coords=contours.sort_contours(coords,method="left-to-right")[0]
for (s,i) in enumerate(np.arange(0,len(coords),32)):
cevap=None
cnt=contours.sort_contours(coords[i:i+32],method="top-to-bottom")[0]
toplam_beyaz=None
for (j,c) in enumerate(cnt):
maske=np.zeros(thresh.shape,dtype=np.uint8)
cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)
maske=cv2.bitwise_and(thresh,thresh,mask=maske)
#plt.imshow(maske,cmap='gray')
#plt.show()
#a+=1
toplam_beyaz=cv2.countNonZero(maske)
#print(toplam_beyaz,j)
if cevap is None or toplam_beyaz>cevap[0]:
cevap=(toplam_beyaz,j,s)
# print("cevap",cevap)
if cevap[0]>500:
yanit.append(alfabe[cevap[1]])
elif cevap[0]<600:
yanit.append(" ")
for s in yanit:
ad_str+=s
return ad_str
def cevap_kolon(cevap):
pts1=np.array([(2,50),(300,50),(2,1545),(300,1545)])
pts2=np.array([(300,50),(600,50),(302,1545),(602,1545)])
pts3=np.array([(600,50),(900,50),(602,1545),(902,1545)])
pts4=np.array([(900,50),(1200,50),(902,1545),(1202,1545)])
col1=four_point_transform(cevap,pts1)
col2=four_point_transform(cevap,pts2)
col3=four_point_transform(cevap,pts3)
col4=four_point_transform(cevap,pts4)
return col1,col2,col3,col4
def cevap_gri(col1,col2,col3,col4):
'''
KOLONLARI GRİ YAPMAK İÇİN,MAİNDE YER KAPLAMASIN
'''
col1_gri=cv2.cvtColor(col1,cv2.COLOR_BGR2GRAY)
col2_gri=cv2.cvtColor(col2,cv2.COLOR_BGR2GRAY)
col3_gri=cv2.cvtColor(col3,cv2.COLOR_BGR2GRAY)
col4_gri=cv2.cvtColor(col4,cv2.COLOR_BGR2GRAY)
return col1_gri,col2_gri,col3_gri,col4_gri
def cevap_contour(col1,col2,col3,col4):
col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4)
col1_coord=cevap_contour_bul(col1,col1_gri)
col2_coord=cevap_contour_bul(col2,col1_gri)
col3_coord=cevap_contour_bul(col3,col1_gri)
col4_coord=cevap_contour_bul(col4,col1_gri)
return col1_coord,col2_coord,col3_coord,col4_coord
def ogrno_islemleri(ogrno,ogrno_gri,coords):
yanit=""
thresh=cv2.threshold(ogrno_gri,180,255,cv2.THRESH_BINARY_INV)[1]
coords=contours.sort_contours(coords,method="left-to-right")[0]
for (s,i) in enumerate(np.arange(0,len(coords),10)):
cevap=None
cnt=contours.sort_contours(coords[i:i+10],method="top-to-bottom")[0]
toplam_beyaz=None
for (j,c) in enumerate(cnt):
maske=np.zeros(thresh.shape,dtype=np.uint8)
cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)
maske=cv2.bitwise_and(thresh,thresh,mask=maske)
plt.imshow(maske,cmap='gray')
#plt.show()
toplam_beyaz=cv2.countNonZero(maske)
if cevap is None or toplam_beyaz>cevap[0]:
cevap=(toplam_beyaz,j,s)
if cevap[0]>500:
yanit+=str(cevap[1])
print("Okul Numarası:",yanit)
def sinav_islemleri(sinav,sinav_gri,coords):
yanit=["QUİZ","ARA","FİNAL","BÜTÜNLEME"]
thresh=cv2.threshold(sinav_gri,180,255,cv2.THRESH_BINARY_INV)[1]
coords=contours.sort_contours(coords,method="top-to-bottom")[0]
for (s,i) in enumerate(np.arange(0,len(coords),10)):
cevap=None
cnt=contours.sort_contours(coords[i:i+10],method="left-to-right")[0]
toplam_beyaz=None
for (j,c) in enumerate(cnt):
maske=np.zeros(thresh.shape,dtype=np.uint8)
cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)
maske=cv2.bitwise_and(thresh,thresh,mask=maske)
plt.imshow(maske,cmap='gray')
#plt.show()
toplam_beyaz=cv2.countNonZero(maske)
if cevap is None or toplam_beyaz>cevap[0]:
cevap=(toplam_beyaz,j,s)
return yanit[cevap[1]]
def sorugrup_islemleri(soru,soru_gri,coords):
yanit=["A","B","C","D","E"]
sayac=0
thresh=cv2.threshold(soru_gri,180,255,cv2.THRESH_BINARY_INV)[1]
coords=contours.sort_contours(coords,method="top-to-bottom")[0]
for (s,i) in enumerate(np.arange(0,len(coords),10)):
cevap=None
cnt=contours.sort_contours(coords[i:i+10],method="left-to-right")[0]
toplam_beyaz=None
for (j,c) in enumerate(cnt):
maske=np.zeros(thresh.shape,dtype=np.uint8)
cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)
maske=cv2.bitwise_and(thresh,thresh,mask=maske)
plt.imshow(maske,cmap='gray')
#plt.show()
sayac+=1
toplam_beyaz=cv2.countNonZero(maske)
if cevap is None or toplam_beyaz>cevap[0]:
cevap=(toplam_beyaz,j,s)
if sayac==5:
break
print(cevap)
if cevap[0]>500:
return yanit[cevap[1]]
#print("tespit edilemedi")
return "Tespit edilemedi"
####################################################################
def main_starter(bos_kagit,dolu_kagit):
image=cv2.imread(bos_kagit)
gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
kagit,kagit_gri=kagit_bul(image,gray)
bolgeler,areas=bolge_bul(kagit,kagit_gri)
'''
FIND SCHOOL NUMBER PART
'''
ogrno_bos=four_point_transform(kagit,bolgeler['ogrno'])
ogrno_bos_gri=four_point_transform(kagit_gri,bolgeler['ogrno'])
ogrno_coord,ogrno_thresh=contour_bul(ogrno_bos,ogrno_bos_gri)
contour_cizdir(ogrno_bos_gri,ogrno_coord,"ogrenci numarası")
#v2.imshow("ogrno",imutils.resize(ogrno_bos,height=400))
'''
DIVIDE ANSWER PART INTO 4 SLICES AND FIND ONE BY ONE
'''
cevap_bos=four_point_transform(kagit,bolgeler['cevaplar'])
cevap_bos_gri=four_point_transform(kagit_gri,bolgeler['cevaplar'])
col1,col2,col3,col4=cevap_kolon(cevap_bos)
col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4)
col1_coord,col2_coord,col3_coord,col4_coord=cevap_contour(col1,col2,col3,col4)
#contour_cizdir(col1,col1_coord)
#cevap_islemleri(col2_gri,coord_cevap)
'''
EXAM TYPE FIND PART
'''
sinav_bos=four_point_transform(kagit,bolgeler['sinav_turu'])
sinav_bos_gri=four_point_transform(kagit_gri,bolgeler['sinav_turu'])
sinav_coord,sinav_thresh=contour_bul(sinav_bos,sinav_bos_gri)
sinav_islemleri(sinav_bos,sinav_bos_gri,sinav_coord)
#cv2.imshow("sınav türü",sinav_bos_gri)
'''
OTHER PARTS THAT ON PAPER
'''
sorugrup_bos=four_point_transform(kagit,bolgeler['soru_grubu'])
sorugrup_bos_gri=four_point_transform(kagit_gri,bolgeler['soru_grubu'])
sorugrup_coord,sorugrup_thresh=contour_bul(sorugrup_bos,sorugrup_bos_gri,1)
coors=soru_grup_contour_bul(sorugrup_bos,sorugrup_bos_gri)
soru_cont,soru_thr=contour_bul(sorugrup_bos,sorugrup_bos_gri,1)
###############################
ogretim_bos=four_point_transform(kagit,bolgeler['ogretim_onay'])
ogretim_bos_gri=four_point_transform(kagit_gri,bolgeler['ogretim_onay'])
ogret_cont,ogret_thr=contour_bul(ogretim_bos,ogretim_bos_gri,1)
'''
NAME FIND PART.
'''
isim_bos=four_point_transform(kagit,bolgeler['isim'])
isim_bos_gri=cv2.cvtColor(isim_bos,cv2.COLOR_BGR2GRAY)
coord_isim, thres=contour_bul(isim_bos, isim_bos_gri)
#contour_cizdir(isim_bos,coord,"isim_bos")
#cevap_islemleri(cevap_bos_gri,coord)
##############################################
resim=cv2.imread(dolu_kagit)
resim_gri=cv2.cvtColor(resim,cv2.COLOR_BGR2GRAY)
warp2,warp2_gri=kagit_bul(resim,resim_gri)
bolgeler2,areas2=bolge_bul(warp2,warp2_gri)
ret,warp2=ters_bul(warp2,areas2)
'''
TERS İSE TEKRAR BOLGELERİ BUL
'''
if ret==True:
warp2_gri=cv2.cvtColor(warp2,cv2.COLOR_BGR2GRAY)
bolgeler2,areas2=bolge_bul(warp2,warp2_gri)
else:
pass
isim_dolu=four_point_transform(warp2,bolgeler2['isim'])
isim_dolu_gri=cv2.cvtColor(isim_dolu,cv2.COLOR_BGR2GRAY)
contour_cizdir(isim_dolu,coord_isim,"dolu_kagit_contourlu")
'''
OGRETİM ONAY DOLU KAGIT
'''
ogretim_dolu=four_point_transform(warp2,bolgeler2['ogretim_onay'])
ogretim_dolu_gri=cv2.cvtColor(ogretim_dolu,cv2.COLOR_BGR2GRAY)
ogret_onay=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogret_cont)
print("Öğretim Onayı:",ogret_onay)
#cv2.drawContours(ogretim_dolu,ogret_cont,-1,(255,0,0),thickness=3)
#cv2.imshow("ogretc",ogretim_dolu)
#ogretim_onayı=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogretimonay_coord)
sorugrup_dolu=four_point_transform(warp2,bolgeler2['soru_grubu'])
sorugrup_dolu_gri=cv2.cvtColor(sorugrup_dolu,cv2.COLOR_BGR2GRAY)
soru_tur=sorugrup_islemleri(sorugrup_dolu,sorugrup_dolu_gri,soru_cont)
print("Soru Grubu",soru_tur)
thresh_dolu=cv2.threshold(isim_dolu_gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1]
isim_str=isim_islemleri(isim_dolu_gri,coord_isim,thresh_dolu)
print(isim_str)
sinav_dolu=four_point_transform(warp2,bolgeler2['sinav_turu'])
sinav_dolu_gri=cv2.cvtColor(sinav_dolu,cv2.COLOR_BGR2GRAY)
sinav_turu=sinav_islemleri(sinav_dolu,sinav_dolu_gri,sinav_coord)
print("Sınav Türü: ",sinav_turu)
ogrno_dolu=four_point_transform(warp2,bolgeler2['ogrno'])
ogrno_dolu_gri=cv2.cvtColor(ogrno_dolu,cv2.COLOR_BGR2GRAY)
ogrno_islemleri(ogrno_dolu,ogrno_dolu_gri,ogrno_coord)
cevap_dolu=four_point_transform(warp2,bolgeler2['cevaplar'])
cevap_dolu_gri=cv2.cvtColor(cevap_dolu,cv2.COLOR_BGR2GRAY)
col1_dolu,col2_dolu,col3_dolu,col4_dolu=cevap_kolon(cevap_dolu)
col1_gri_dolu,col2_gri_dolu,col3_gri_dolu,col4_gri_dolu=cevap_gri(col1_dolu,col2_dolu,col3_dolu,col4_dolu)
#contour_cizdir(col1_dolu,col1_coord,"colon1 dolu")
if len(cevap_anahtar)<=30:
basarim=cevap_islemleri(col1_gri_dolu,col1_coord,1)
elif len(cevap_anahtar)<=60:
basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1)
basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2)
basarim=(basarim1[0]+basarim2[0],basarim1[1]+basarim2[1],basarim1[2]+basarim2[2],basarim1[3]+basarim2[3])
#print(basarim)
elif len(cevap_anahtar)<=90:
basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1)
basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2)
basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3)
basarim=basarim1+basarim2+basarim3
elif len(cevap_anahtar)<=120:
basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1)
basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2)
basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3)
basarim4=cevap_islemleri(col4_gri_dolu,col4_coord,4)
basarim=basarim1+basarim2+basarim3+basarim4
print(f"Doğru cevap sayısı:{basarim[0]}\nYanlış cevap sayısı:{basarim[1]}\nBoş sayısı:{basarim[2]}\nİki cevap işaret:{basarim[3]}")
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == '__main__':
bos_kagit="optic_empty.jpg"
dolu_kagit="optic_marked.jpg"
main_starter(bos_kagit,dolu_kagit)
| 31.100709
| 223
| 0.622457
| 3,169
| 21,926
| 4.132534
| 0.117703
| 0.022679
| 0.034362
| 0.023824
| 0.536729
| 0.517486
| 0.456017
| 0.426237
| 0.388287
| 0.375382
| 0
| 0.069134
| 0.233422
| 21,926
| 704
| 224
| 31.144886
| 0.709186
| 0.080863
| 0
| 0.458738
| 0
| 0.002427
| 0.040982
| 0.003976
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043689
| false
| 0.007282
| 0.026699
| 0
| 0.114078
| 0.019417
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cd8fbdd89ede56684cdd39b8bd8583e3ed86ea6
| 16,528
|
py
|
Python
|
test/testMatrix.py
|
turkeydonkey/nzmath3
|
a48ae9efcf0d9ad1485c2e9863c948a7f1b20311
|
[
"BSD-3-Clause"
] | 1
|
2021-05-26T19:22:17.000Z
|
2021-05-26T19:22:17.000Z
|
test/testMatrix.py
|
turkeydonkey/nzmath3
|
a48ae9efcf0d9ad1485c2e9863c948a7f1b20311
|
[
"BSD-3-Clause"
] | null | null | null |
test/testMatrix.py
|
turkeydonkey/nzmath3
|
a48ae9efcf0d9ad1485c2e9863c948a7f1b20311
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from nzmath.matrix import *
import nzmath.vector as vector
import nzmath.rational as rational
import nzmath.poly.uniutil as uniutil
Ra = rational.Rational
Poly = uniutil.polynomial
Int = rational.theIntegerRing
# sub test
try:
from test.testMatrixFiniteField import *
except:
try:
from nzmath.test.testMatrixFiniteField import *
except:
from .testMatrixFiniteField import *
## for RingMatrix
a1 = createMatrix(1, 2, [3, 2])
a2 = Matrix(1, 2, [5, -6])
a3 = createMatrix(3, 2, [7, 8]+[3, -2]+[0, 10])
a4 = Matrix(3, 2, [21, -12]+[1, -1]+[0, 0])
a5 = createMatrix(1, 2, [Poly({0:3, 1:5}, Int), Poly({1:2}, Int)])
## for RingSquareMatrix
b1 = createMatrix(2, 2, [1, 2]+[3, 4])
b2 = Matrix(2, 2, [0, -1]+[1, -2])
b3 = createMatrix(3, 3, [0, 1, 2]+[5, 4, 6]+[7, 9, 8])
b4 = Matrix(3, 3, [1, 2, 3]+[0, 5, -2]+[7, 1, 9])
b5 = createMatrix(3, 3, [1, 3, 2, 4, 6, 5, 6, 8, 9])
b6 = createMatrix(3, 3, [1, 2, 4, 0, 3, 5, 0, 0, 0])
b7 = createMatrix(3, 3, [1, 0, 0, 9, 1, 0, 5, 6, 1])
b8 = Matrix(3, 3, [3, 15, 12]+[2,7,5]+[1,-4,-2])
## for FieldMatrix
c1 = createMatrix(1, 2, [Ra(3), Ra(2)])
c2 = createMatrix(4, 5, \
[Ra(0), 0, 1, 2, -1]+[0, 0, 5, 12, -2]+[0, 0, 1, 3, -1]+[0, 0, 1, 2, 0])
c3 = createMatrix(3, 2, [Ra(1), 2]+[2, 5]+[6, 7])
## for FieldSquareMatrix
d1 = createMatrix(2, 2, [Ra(1), Ra(2)]+[Ra(3), Ra(4)])
d2 = createMatrix(3, 3, [Ra(1), 2, 3]+[4, 5, 6]+[5, 7, 9])
d3 = Matrix(3, 3, \
[Ra(1), Ra(2), Ra(3)]+[Ra(0), Ra(5), Ra(-2)]+[7, 1, 9])
d4 = createMatrix(6, 6, \
[Ra(4), 2, 5, 0, 2, 1]+[5, 1, 2, 5, 1, 1]+[90, 7, 54, 8, 4, 6]+\
[7, 5, 0, 8, 2, 5]+[8, 2, 6, 5, -4, 2]+[4, 1, 5, 6, 3, 1])
d5 = createMatrix(4, 4, \
[Ra(2), -1, 0, 0]+[-1, 2, -1, 0]+[0, -1, 2, -1]+[0, 0, -1, 2])
d6 = createMatrix(4, 4, \
[Ra(1), 2, 3, 4]+[2, 3, 4, 5]+[3, 4, 5, 6]+[4, 5, 6, 7])
d7 = Matrix(3, 3, \
[Ra(1, 2), Ra(2, 3), Ra(1, 5)]+[Ra(3, 2), Ra(1, 3), Ra(2, 5)]+[Ra(-1, 2), Ra(4, 3), Ra(3, 5)])
## other objects
v1 = vector.Vector([1, 4])
v2 = vector.Vector([8])
v3 = vector.Vector([0, 0, 1])
class MatrixTest(unittest.TestCase):
def testInit(self):
lst_lst = Matrix(3, 2, [[21, -12], [1, -1], [0, 0]])
self.assertEqual(a4, lst_lst)
lst_tuple = Matrix(3, 2, [(21, 1, 0), (-12, -1, 0)])
self.assertEqual(a4, lst_tuple)
lst_vect = Matrix(3, 2, [vector.Vector([21, 1, 0]), vector.Vector([-12, -1, 0])])
self.assertEqual(a4, lst_vect)
def testGetitem(self):
self.assertEqual(2, a1[1, 2])
self.assertEqual(-2, b2[2, 2])
self.assertRaises(IndexError, a1.__getitem__, "wrong")
self.assertEqual(vector.Vector([21, 1, 0]), a4[1])
def testEqual(self):
self.assertTrue(a1 == Matrix(1, 2, [3, 2]))
self.assertTrue(isinstance(a1 == a1, bool))
def testNonZero(self):
self.assertTrue(not zeroMatrix(2, 3))
def testContains(self):
self.assertTrue(5 in a2)
def testCall(self):
call = createMatrix(1, 2, [13, 4])
self.assertEqual(call, a5(2))
def testMap(self):
pow_two = createMatrix(1, 2, [9, 4])
self.assertEqual(pow_two, a1.map(lambda n : n ** 2))
def testReduce(self):
self.assertEqual(-2, a3.reduce(min))
def testGetRow(self):
row1 = vector.Vector([3, -2])
self.assertEqual(row1, a3.getRow(2))
row2 = vector.Vector([1, 2])
self.assertEqual(row2, b1.getRow(1))
def testGetColumn(self):
col1 = vector.Vector([-12, -1, 0])
self.assertEqual(col1, a4.getColumn(2))
col2 = vector.Vector([1, 3])
self.assertEqual(col2, b1.getColumn(1))
def testTranspose(self):
trans = createMatrix(2, 3, [7, 3, 0]+[8, -2, 10])
self.assertEqual(trans, a3.transpose())
def testGetBlock(self):
block = Matrix(2, 3, [4, 6, 5, 6, 8, 9])
self.assertEqual(block, b5.getBlock(2, 1, 2, 3))
def testSubMatrix(self):
sub1 = createMatrix(2, 1, [-12, 0])
self.assertEqual(sub1, a4.subMatrix(2, 1))
sub2 = createMatrix(2, 2, [4, 5, 6, 9])
self.assertEqual(sub2, b5.subMatrix([2, 3], [1, 3]))
class SquareMatrixTest(unittest.TestCase):
def testIsUpperTriangularMatrix(self):
UT = createMatrix(4, 4, \
[1, 2, 3, 4]+[0, 5, 6, 7]+[0, 0, 8, 9]+[0, 0, 0, 1])
notUT = createMatrix(4, 4, \
[1, 2, 3, 4]+[0, 5, 6, 7]+[0, 0, 8, 9]+[0, 0, 1, 1])
assert UT.isUpperTriangularMatrix()
assert not notUT.isUpperTriangularMatrix()
def testIsLowerTriangularMatrix(self):
LT = createMatrix(4, 4, \
[1, 0, 0, 0]+[2, 3, 0, 0]+[4, 5, 6, 0]+[7, 8, 9, 10])
notLT = createMatrix(4, 4, \
[1, 0, 0, 0]+[2, 3, 1, 0]+[4, 5, 6, 0]+[7, 8, 9, 10])
assert LT.isLowerTriangularMatrix()
assert not notLT.isLowerTriangularMatrix()
def testIsDiagonalMatrix(self):
diag = createMatrix(2, 2, [-3, 0, 0, 5])
assert diag.isDiagonalMatrix()
def testIsScalarMatrix(self):
scaler = createMatrix(2, 2, [10, 0, 0, 10])
assert scaler.isScalarMatrix()
def testIsSymmetricMatrix(self):
symmetric = createMatrix(2, 2, [2, 3, 3, 5])
assert symmetric.isSymmetricMatrix()
class RingMatrixTest(unittest.TestCase):
def testAdd(self):
sum1 = createMatrix(1, 2, [8, -4])
self.assertEqual(sum1, a1 + a2)
sum2 = createMatrix(2, 2, [1, 1, 4, 2])
self.assertEqual(sum2, b1 + b2)
def testSub(self):
sub1 = createMatrix(1, 2, [-2, 8])
self.assertEqual(sub1, a1 - a2)
sub2 = createMatrix(2, 2, [1, 3, 2, 6])
self.assertEqual(sub2, b1 - b2)
def testMul(self):
mul1 = createMatrix(1, 2, [2, -7])
self.assertEqual(mul1, a1 * b2)
mul2 = createMatrix(3, 2, [-15, -6]+[-2, -2]+[0, 0])
self.assertEqual(mul2, a4 * b1)
mul3 = createMatrix(3, 2, [1, -1]+[109, -64]+[156, -93])
self.assertEqual(mul3, b3 * a4)
def testScalarMul(self):
mul = createMatrix(1, 2, [15, 10])
self.assertEqual(mul, 5 * a1)
def testVectorMul(self):
mul = vector.Vector([9, 19])
self.assertEqual(mul, b1 * v1)
def testMod(self):
mod1 = createMatrix(3, 2, [1, 2]+[0, 1]+[0, 1])
self.assertEqual(mod1, a3 % 3)
def testNeg(self):
neg = createMatrix(2, 2, [0, 1, -1, 2])
self.assertEqual(neg, -b2)
def testHermiteNormalForm(self):
already = createMatrix(4, 3, [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
h = already.hermiteNormalForm()
self.assertEqual(h, already)
lessrank = createMatrix(2, 3, [1, 0, 0, 0, 1, 0])
h = lessrank.hermiteNormalForm()
self.assertEqual(h.row, lessrank.row)
self.assertEqual(h.column, lessrank.column)
zerovec = vector.Vector([0, 0])
self.assertEqual(zerovec, h.getColumn(1))
square = createMatrix(3, 3, [1, 0, 0, 0, 1, 1, 0, 1, 1])
h = square.hermiteNormalForm()
self.assertEqual(h.row, square.row)
self.assertEqual(h.column, square.column)
hermite = createMatrix(3, 3, [0, 1, 0, 0 ,0, 1, 0, 0, 1])
self.assertEqual(hermite, h)
def testExtHermiteNormalForm(self):
already = createMatrix(4, 3, [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
U_1, h_1 = already.exthermiteNormalForm()
self.assertEqual(h_1, already)
self.assertEqual(already * U_1, h_1)
lessrank = createMatrix(2, 3, [1, 0, 0, 0, 1, 0])
U_2, h_2 = lessrank.exthermiteNormalForm()
self.assertEqual(h_2.row, lessrank.row)
self.assertEqual(h_2.column, lessrank.column)
self.assertEqual(lessrank * U_2, h_2)
def testKernelAsModule(self):
ker_1 = a1.kernelAsModule()
self.assertEqual(a1 * ker_1[1], vector.Vector([0]))
#zero test
ker_2 = b1.kernelAsModule()
self.assertEqual(ker_2, None)
class RingSquareMatrixTest(unittest.TestCase):
def testPow(self):
pow1 = createMatrix(2, 2, [7, 10, 15, 22])
self.assertEqual(pow1, b1 ** 2)
pow2 = createMatrix(2, 2, [1, 0, 0, 1])
self.assertEqual(pow2, b2 ** 0)
def testIsOrthogonalMatrix(self):
orthogonal = createMatrix(2, 2, [Ra(3, 5), Ra(4, 5), Ra(-4, 5), Ra(3, 5)])
assert orthogonal.isOrthogonalMatrix()
def testIsAlternatingMatrix(self):
alternate1 = createMatrix(2, 2, [0, 2, -2, 0])
assert alternate1.isAlternatingMatrix()
alternate2 = createMatrix(2, [1, 2, -2, 0])
assert not alternate2.isAntisymmetricMatrix()
def testIsSingular(self):
assert b6.isSingular()
def testTrace(self):
self.assertEqual(15, b4.trace())
def testDeterminant(self):
self.assertEqual(-2, b1.determinant())
#sf.bug #1914349
self.assertTrue(isinstance(b3.determinant(), int))
self.assertEqual(36, b3.determinant())
def testCofactor(self):
self.assertEqual(-6, b5.cofactor(1, 2))
def testCommutator(self):
commutator = createMatrix(2, 2, [5, -1, 9, -5])
self.assertEqual(commutator, b1.commutator(b2))
def testCharacteristicMatrix(self):
charMat = createMatrix(2, 2, \
[Poly({0:-1,1:1}, Int), Poly({0:-2}, Int)]+[Poly({0:-3}, Int), Poly({0:-4,1:1}, Int)])
self.assertEqual(charMat, b1.characteristicMatrix())
def testCharacteristicPolynomial(self):
assert d1.characteristicPolynomial() == d1.characteristicMatrix().determinant()
def testAdjugateMatrix(self):
adjugate = createMatrix(3, 3, [47, -15, -19, -14, -12, 2, -35, 13, 5])
self.assertEqual(adjugate, b4.adjugateMatrix())
assert d1 * d1.adjugateMatrix() == d1.determinant() * unitMatrix(d1.row)
def testCofactorMatrix(self):
cofact = d5.cofactorMatrix()
self.assertEqual(d5.cofactor(2, 3), cofact[2, 3])
def testSmithNormalForm(self):
self.assertEqual([12, 1, 1], b5.smithNormalForm())
self.assertRaises(ValueError, b6.smithNormalForm)
self.assertEqual([1, 1, 1], b7.smithNormalForm())
self.assertEqual([9, 3, 1], b8.smithNormalForm())
def testExtSmithNormalForm(self):
smith1 = Matrix(3, 3, [12, 0, 0, 0, 1, 0, 0, 0, 1])
U_1, V_1, M_1 = b5.extsmithNormalForm()
self.assertEqual(smith1, M_1)
self.assertEqual(M_1, U_1 * b5 * V_1)
smith2 = Matrix(3, 3, [9, 0, 0, 0, 3, 0, 0, 0, 1])
U_2, V_2, M_2 = b8.extsmithNormalForm()
self.assertEqual(smith2, M_2)
self.assertEqual(M_2, U_2 * b8 * V_2)
class FieldMatrixTest(unittest.TestCase):
def testDiv(self):
div = createMatrix(1, 2, [1, Ra(2, 3)])
self.assertEqual(div, c1 / 3)
def testKernel(self):
ker = c2.kernel()
self.assertTrue(not c2 * ker)
def testImage(self):
img = createMatrix(4,3,[1,2,-1]+[5,12,-2]+[1,3,-1]+[1,2,0])
self.assertEqual(img, c2.image())
def testRank(self):
self.assertEqual(3, c2.rank())
self.assertEqual(3, d3.rank())
def testInverseImage(self):
self.assertEqual(d6, d5 * d5.inverseImage(d6))
self.assertRaises(NoInverseImage, d2.inverseImage, unitMatrix(3))
def testSolve(self):
for i in range(1, d6.column+1):
self.assertEqual(d6[i], d5 * d5.solve(d6[i])[0])
sol1 = c1.solve(v2)
for i in range(len(sol1[1])):
self.assertEqual(v2, c1 * (sol1[0]+sol1[1][i]))
self.assertRaises(NoInverseImage, c3.solve, v3)
def testColumnEchelonForm(self):
echelon = createMatrix(4, 5,\
[Ra(0), 0, 1, 0, 0]+[0, 0, 0, 2, 3]+[0, 0, 0, 1, 0]+[0, 0, 0, 0, 1])
self.assertEqual(echelon, c2.columnEchelonForm())
class FieldSquareMatrixTest(unittest.TestCase):
def testPow(self):
pow3 = createMatrix(2, 2, [Ra(11, 2), Ra(-5, 2), Ra(-15, 4), Ra(7, 4)])
self.assertEqual(pow3, d1 ** (-2))
def testTriangulate(self):
triangle = createMatrix(3, 3, \
[Ra(1, 1), 2, 3]+[0, 5, -2]+[0, 0, Ra(-86, 5)])
self.assertEqual(triangle, d3.triangulate())
def testDeterminant(self):
self.assertEqual(Ra(-7, 15), d7.determinant())
def testInverse(self):
cinverse = createMatrix(3, 3)
cinverse.set([Ra(-47, 86), Ra(15, 86), Ra(19, 86)]+\
[Ra(7, 43), Ra(6, 43), Ra(-1, 43)]+[Ra(35, 86), Ra(-13, 86), Ra(-5, 86)])
self.assertEqual(cinverse, d3.inverse())
self.assertRaises(NoInverse, d2.inverse)
self.assertEqual(d3.inverse() * c3, d3.inverse(c3))
def testInverseNoChange(self):
# sf bug#1849220
M1 = SquareMatrix(2, 2, [Ra(1, 2), Ra(1, 2), Ra(1, 1), Ra(-3, 2)])
M1.inverse()
M2 = SquareMatrix(2, 2, [Ra(1, 2), Ra(1, 2), Ra(1, 1), Ra(-3, 2)])
self.assertEqual(M2, M1)
def testHessenbergForm(self):
pass
def testLUDecomposition(self):
L, U = d4.LUDecomposition()
assert L * U == d4
assert L.isLowerTriangularMatrix()
assert U.isUpperTriangularMatrix()
class MatrixRingTest (unittest.TestCase):
def setUp(self):
self.m2z = MatrixRing.getInstance(2, Int)
def testZero(self):
z = self.m2z.zero
self.assertEqual(0, z[1, 1])
self.assertEqual(0, z[1, 2])
self.assertEqual(0, z[2, 1])
self.assertEqual(0, z[2, 2])
def testOne(self):
o = self.m2z.one
self.assertEqual(1, o[1, 1])
self.assertEqual(0, o[1, 2])
self.assertEqual(0, o[2, 1])
self.assertEqual(1, o[2, 2])
def testUnitMatrix(self):
"""
unitMatrix() is an alias of one.
"""
self.assertEqual(self.m2z.one, self.m2z.unitMatrix())
def testRingAPI(self):
m3z = MatrixRing.getInstance(3, Int)
m2q = MatrixRing.getInstance(2, rational.theRationalField)
# issubring
self.assertFalse(self.m2z.issubring(Int))
self.assertTrue(self.m2z.issubring(self.m2z))
self.assertTrue(self.m2z.issubring(m2q))
self.assertFalse(self.m2z.issubring(m3z))
# issuperring
self.assertFalse(self.m2z.issuperring(Int))
self.assertTrue(self.m2z.issuperring(self.m2z))
self.assertFalse(self.m2z.issuperring(m2q))
self.assertFalse(self.m2z.issuperring(m3z))
# getCommonSuperring
self.assertRaises(TypeError, self.m2z.getCommonSuperring, Int)
class SubspaceTest(unittest.TestCase):
def testSupplementBasis(self):
ba = Subspace(3, 2, [1, 2, 3, 4, 5, 7])
supbase = createMatrix(3, 3, [1, 2, 0, 3, 4, 0, 5, 7, 1])
self.assertEqual(supbase, ba.supplementBasis())
def testSumOfSubspaces(self):
unit1 = Subspace(3, 1, [1, 0, 0])
unit2 = Subspace(3, 2, [0, 0]+[1, 0]+[0, 1])
self.assertEqual(unitMatrix(3), unit1.sumOfSubspaces(unit2))
def testIntersectionOfSubspace(self):
unit1 = Subspace(3, 2, [1, 0]+[0, 1]+[0, 0])
unit2 = unitMatrix(3)
unit2.toSubspace()
intersect = Subspace(3, 2, [-1, 0]+[0, -1]+[0, 0])
self.assertEqual(intersect, unit1.intersectionOfSubspaces(unit2))
class FunctionTest(unittest.TestCase):
def testCreateMatrix(self):
Q = rational.theRationalField
mat1 = createMatrix(2, 3, [[2,3,4], [5,6,7]])
self.assertEqual(mat1.coeff_ring, Int)
mat2 = createMatrix(2, 3, [[2,3,4], [5,6,7]], Q)
self.assertEqual(mat2.coeff_ring, Q)
mat3 = createMatrix(3, [(1, 2, 3), (4, 5, 6), (7, 8, 9)], Q)
self.assertTrue(mat3.row == mat3.column)
self.assertTrue(mat3.__class__, FieldSquareMatrix)
mat4 = createMatrix(2, [vector.Vector([1, 4]), vector.Vector([6, 8])])
self.assertEqual(mat4.coeff_ring, Int)
mat5 = createMatrix(5, 6, Int)
self.assertTrue(mat5 == 0)
mat6 = createMatrix(1, 4)
self.assertTrue(mat6 == 0)
mat7 = createMatrix(3, Q)
self.assertTrue(mat7.row == mat7.column)
self.assertTrue(mat7 == 0)
self.assertEqual(mat7.coeff_ring, Q)
mat8 = createMatrix(7)
self.assertTrue(mat8 == 0)
def suite(suffix="Test"):
suite = unittest.TestSuite()
all_names = globals()
for name in all_names:
if name.endswith(suffix):
suite.addTest(unittest.makeSuite(all_names[name], "test"))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| 35.165957
| 94
| 0.575811
| 2,329
| 16,528
| 4.060541
| 0.147703
| 0.141165
| 0.009834
| 0.005922
| 0.175108
| 0.08618
| 0.068838
| 0.052342
| 0.045998
| 0.027704
| 0
| 0.102939
| 0.246491
| 16,528
| 469
| 95
| 35.240938
| 0.656416
| 0.012585
| 0
| 0.032609
| 0
| 0
| 0.00129
| 0
| 0
| 0
| 0
| 0
| 0.358696
| 1
| 0.179348
| false
| 0.002717
| 0.021739
| 0
| 0.228261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cdcd3e9c2d7e86acfb845c8d72f0dc9c0f23f7d
| 2,290
|
py
|
Python
|
api/routers/dashboard.py
|
xming521/coco_API
|
51d7ac3141e58f1d6a5438af135fba3ea101bd53
|
[
"MIT"
] | null | null | null |
api/routers/dashboard.py
|
xming521/coco_API
|
51d7ac3141e58f1d6a5438af135fba3ea101bd53
|
[
"MIT"
] | null | null | null |
api/routers/dashboard.py
|
xming521/coco_API
|
51d7ac3141e58f1d6a5438af135fba3ea101bd53
|
[
"MIT"
] | null | null | null |
import time
import psutil
import pymysql
from fastapi import APIRouter
from api.utils import response_code
router = APIRouter()
@router.get('/dashboard/getinfo')
def getinfo():
from init_global import g
res = {}
db = g.db_pool.connection()
cur = db.cursor()
cur.execute(f'select count(app_name) from app_list')
res['app_count'] = cur.fetchall()[0][0]
cur.execute(f'select count(app_name) from app_list where status="running"')
res['app_run_count'] = cur.fetchall()[0][0]
res['image_count'] = len(g.dc.images.list())
res['networks_count'] = len(g.dc.networks.list())
cur = db.cursor(cursor=pymysql.cursors.DictCursor)
cur.execute(f'select * from app_list order by start_time desc limit 10')
res['recent_event'] = cur.fetchall()
db.close()
return response_code.resp_200(data={"res": res})
def get_performance():
res = {}
# cpu
cpuCount = psutil.cpu_count(logical=False) # CPU核心
cpuPercent = psutil.cpu_percent(0.5) # 使用率
cpufree = round(100 - cpuPercent, 2) # CPU空余
# 内存
m = psutil.virtual_memory() # 内存信息
memoryTotal = round(m.total / (1024.0 * 1024.0 * 1024.0), 2) # 总内存
memoryUsed = round(m.used / (1024.0 * 1024.0 * 1024.0), 2) # 已用内存
memoryFree = round(memoryTotal - memoryUsed, 2) # 剩余内存
# 磁盘
io = psutil.disk_partitions()
diskCount = len(io)
diskTotal = 0 # 总储存空间大小
diskUsed = 0 # 已用
diskFree = 0 # 剩余
for i in io:
try:
o = psutil.disk_usage(i.mountpoint)
diskTotal += int(o.total / (1024.0 * 1024.0 * 1024.0))
diskUsed += int(o.used / (1024.0 * 1024.0 * 1024.0))
diskFree += int(o.free / (1024.0 * 1024.0 * 1024.0))
except:
pass
res['cpu'] = cpuPercent
res['mem'] = m.percent
res['disk'] = o.percent
res['memoryTotal'] = memoryTotal
res['memoryUsed'] = memoryUsed
res['diskTotal'] = diskTotal
res['diskUsed'] = diskUsed
return res
def push_realinfo():
from init_global import g
from main import socket_manager as sm
print(g.person_online)
while g.person_online:
res = get_performance()
# print(res)
g.push_loop.run_until_complete(sm.emit('dashboard', {'data': res}))
time.sleep(3)
| 30.533333
| 79
| 0.620524
| 316
| 2,290
| 4.39557
| 0.401899
| 0.053996
| 0.064795
| 0.071994
| 0.182145
| 0.12599
| 0.115191
| 0.057595
| 0.057595
| 0.057595
| 0
| 0.055715
| 0.239738
| 2,290
| 74
| 80
| 30.945946
| 0.742102
| 0.030131
| 0
| 0.065574
| 0
| 0
| 0.132366
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04918
| false
| 0.016393
| 0.131148
| 0
| 0.213115
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cdd5ff32b1c2238dcf9d02a8e0c07b84239dfc5
| 13,145
|
py
|
Python
|
tests/operators/test_hive_operator.py
|
Ryan-Miao/airflow
|
a2aca8714fac014ed7da97229d7877f1bc6e5a59
|
[
"Apache-2.0"
] | null | null | null |
tests/operators/test_hive_operator.py
|
Ryan-Miao/airflow
|
a2aca8714fac014ed7da97229d7877f1bc6e5a59
|
[
"Apache-2.0"
] | null | null | null |
tests/operators/test_hive_operator.py
|
Ryan-Miao/airflow
|
a2aca8714fac014ed7da97229d7877f1bc6e5a59
|
[
"Apache-2.0"
] | 1
|
2020-09-29T05:26:34.000Z
|
2020-09-29T05:26:34.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import os
import unittest
from unittest import mock
import nose
from airflow import DAG, configuration, operators
from airflow.models import TaskInstance
from airflow.operators.hive_operator import HiveOperator
from airflow.utils import timezone
DEFAULT_DATE = datetime.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
class TestHiveEnvironment(unittest.TestCase):
def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG('test_dag_id', default_args=args)
self.dag = dag
self.hql = """
USE airflow;
DROP TABLE IF EXISTS static_babynames_partitioned;
CREATE TABLE IF NOT EXISTS static_babynames_partitioned (
state string,
year string,
name string,
gender string,
num int)
PARTITIONED BY (ds string);
INSERT OVERWRITE TABLE static_babynames_partitioned
PARTITION(ds='{{ ds }}')
SELECT state, year, name, gender, num FROM static_babynames;
"""
class TestHiveCli(unittest.TestCase):
def setUp(self):
self.nondefault_schema = "nondefault"
os.environ["AIRFLOW__CORE__SECURITY"] = "kerberos"
def tearDown(self):
del os.environ["AIRFLOW__CORE__SECURITY"]
def test_get_proxy_user_value(self):
from airflow.hooks.hive_hooks import HiveCliHook
hook = HiveCliHook()
returner = mock.MagicMock()
returner.extra_dejson = {'proxy_user': 'a_user_proxy'}
hook.use_beeline = True
hook.conn = returner
# Run
result = hook._prepare_cli_cmd()
# Verify
self.assertIn('hive.server2.proxy.user=a_user_proxy', result[2])
class HiveOperatorConfigTest(TestHiveEnvironment):
def test_hive_airflow_default_config_queue(self):
t = HiveOperator(
task_id='test_default_config_queue',
hql=self.hql,
mapred_queue_priority='HIGH',
mapred_job_name='airflow.test_default_config_queue',
dag=self.dag)
# just check that the correct default value in test_default.cfg is used
test_config_hive_mapred_queue = configuration.conf.get(
'hive',
'default_hive_mapred_queue'
)
self.assertEqual(t.get_hook().mapred_queue, test_config_hive_mapred_queue)
def test_hive_airflow_default_config_queue_override(self):
specific_mapred_queue = 'default'
t = HiveOperator(
task_id='test_default_config_queue',
hql=self.hql,
mapred_queue=specific_mapred_queue,
mapred_queue_priority='HIGH',
mapred_job_name='airflow.test_default_config_queue',
dag=self.dag)
self.assertEqual(t.get_hook().mapred_queue, specific_mapred_queue)
class HiveOperatorTest(TestHiveEnvironment):
def test_hiveconf_jinja_translate(self):
hql = "SELECT ${num_col} FROM ${hiveconf:table};"
t = HiveOperator(
hiveconf_jinja_translate=True,
task_id='dry_run_basic_hql', hql=hql, dag=self.dag)
t.prepare_template()
self.assertEqual(t.hql, "SELECT {{ num_col }} FROM {{ table }};")
def test_hiveconf(self):
hql = "SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});"
t = HiveOperator(
hiveconfs={'table': 'static_babynames', 'day': '{{ ds }}'},
task_id='dry_run_basic_hql', hql=hql, dag=self.dag)
t.prepare_template()
self.assertEqual(
t.hql,
"SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});")
@mock.patch('airflow.operators.hive_operator.HiveOperator.get_hook')
def test_mapred_job_name(self, mock_get_hook):
mock_hook = mock.MagicMock()
mock_get_hook.return_value = mock_hook
t = HiveOperator(
task_id='test_mapred_job_name',
hql=self.hql,
dag=self.dag)
fake_execution_date = timezone.datetime(2018, 6, 19)
fake_ti = TaskInstance(task=t, execution_date=fake_execution_date)
fake_ti.hostname = 'fake_hostname'
fake_context = {'ti': fake_ti}
t.execute(fake_context)
self.assertEqual(
"Airflow HiveOperator task for {}.{}.{}.{}"
.format(fake_ti.hostname,
self.dag.dag_id, t.task_id,
fake_execution_date.isoformat()), mock_hook.mapred_job_name)
if 'AIRFLOW_RUNALL_TESTS' in os.environ:
import airflow.hooks.hive_hooks
import airflow.operators.presto_to_mysql
class TestHivePresto(TestHiveEnvironment):
def test_hive(self):
t = HiveOperator(
task_id='basic_hql', hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_queues(self):
t = HiveOperator(
task_id='test_hive_queues', hql=self.hql,
mapred_queue='default', mapred_queue_priority='HIGH',
mapred_job_name='airflow.test_hive_queues',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_dryrun(self):
t = HiveOperator(
task_id='dry_run_basic_hql', hql=self.hql, dag=self.dag)
t.dry_run()
def test_beeline(self):
t = HiveOperator(
task_id='beeline_hql', hive_cli_conn_id='hive_cli_default',
hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_presto(self):
sql = """
SELECT count(1) FROM airflow.static_babynames_partitioned;
"""
t = operators.presto_check_operator.PrestoCheckOperator(
task_id='presto_check', sql=sql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_presto_to_mysql(self):
t = operators.presto_to_mysql.PrestoToMySqlTransfer(
task_id='presto_to_mysql_check',
sql="""
SELECT name, count(*) as ccount
FROM airflow.static_babynames
GROUP BY name
""",
mysql_table='test_static_babynames',
mysql_preoperator='TRUNCATE TABLE test_static_babynames;',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hdfs_sensor(self):
t = operators.sensors.HdfsSensor(
task_id='hdfs_sensor_check',
filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_webhdfs_sensor(self):
t = operators.sensors.WebHdfsSensor(
task_id='webhdfs_sensor_check',
filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames',
timeout=120,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_sql_sensor(self):
t = operators.sensors.SqlSensor(
task_id='hdfs_sensor_check',
conn_id='presto_default',
sql="SELECT 'x' FROM airflow.static_babynames LIMIT 1;",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_stats(self):
t = operators.hive_stats_operator.HiveStatsCollectionOperator(
task_id='hive_stats_check',
table="airflow.static_babynames_partitioned",
partition={'ds': DEFAULT_DATE_DS},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_named_hive_partition_sensor(self):
t = operators.sensors.NamedHivePartitionSensor(
task_id='hive_partition_check',
partition_names=[
"airflow.static_babynames_partitioned/ds={{ds}}"
],
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_named_hive_partition_sensor_succeeds_on_multiple_partitions(self):
t = operators.sensors.NamedHivePartitionSensor(
task_id='hive_partition_check',
partition_names=[
"airflow.static_babynames_partitioned/ds={{ds}}",
"airflow.static_babynames_partitioned/ds={{ds}}"
],
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_named_hive_partition_sensor_parses_partitions_with_periods(self):
t = operators.sensors.NamedHivePartitionSensor.parse_partition_name(
partition="schema.table/part1=this.can.be.an.issue/part2=ok")
self.assertEqual(t[0], "schema")
self.assertEqual(t[1], "table")
self.assertEqual(t[2], "part1=this.can.be.an.issue/part2=this_should_be_ok")
@nose.tools.raises(airflow.exceptions.AirflowSensorTimeout)
def test_named_hive_partition_sensor_times_out_on_nonexistent_partition(self):
t = operators.sensors.NamedHivePartitionSensor(
task_id='hive_partition_check',
partition_names=[
"airflow.static_babynames_partitioned/ds={{ds}}",
"airflow.static_babynames_partitioned/ds=nonexistent"
],
poke_interval=0.1,
timeout=1,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_partition_sensor(self):
t = operators.sensors.HivePartitionSensor(
task_id='hive_partition_check',
table='airflow.static_babynames_partitioned',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_metastore_sql_sensor(self):
t = operators.sensors.MetastorePartitionSensor(
task_id='hive_partition_check',
table='airflow.static_babynames_partitioned',
partition_name='ds={}'.format(DEFAULT_DATE_DS),
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive2samba(self):
t = operators.hive_to_samba_operator.Hive2SambaOperator(
task_id='hive2samba_check',
samba_conn_id='tableau_samba',
hql="SELECT * FROM airflow.static_babynames LIMIT 10000",
destination_filepath='test_airflow.csv',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_to_mysql(self):
t = operators.hive_to_mysql.HiveToMySqlTransfer(
mysql_conn_id='airflow_db',
task_id='hive_to_mysql_check',
create=True,
sql="""
SELECT name
FROM airflow.static_babynames
LIMIT 100
""",
mysql_table='test_static_babynames',
mysql_preoperator=[
'DROP TABLE IF EXISTS test_static_babynames;',
'CREATE TABLE test_static_babynames (name VARCHAR(500))',
],
dag=self.dag)
t.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
| 39.005935
| 88
| 0.617117
| 1,504
| 13,145
| 5.091755
| 0.183511
| 0.060329
| 0.068556
| 0.027292
| 0.51802
| 0.419039
| 0.400496
| 0.349439
| 0.333638
| 0.322669
| 0
| 0.005491
| 0.29342
| 13,145
| 336
| 89
| 39.122024
| 0.819014
| 0.065044
| 0
| 0.416667
| 0
| 0
| 0.227339
| 0.097408
| 0
| 0
| 0
| 0
| 0.034091
| 1
| 0.102273
| false
| 0
| 0.045455
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cdde129ca347d44c4fc15ae483831b97628b1d6
| 4,553
|
py
|
Python
|
main.py
|
OrionDark7/Alakajam12
|
4f9f8f87a05feb718baddb12aa8cbbed3e36a071
|
[
"MIT"
] | null | null | null |
main.py
|
OrionDark7/Alakajam12
|
4f9f8f87a05feb718baddb12aa8cbbed3e36a071
|
[
"MIT"
] | null | null | null |
main.py
|
OrionDark7/Alakajam12
|
4f9f8f87a05feb718baddb12aa8cbbed3e36a071
|
[
"MIT"
] | null | null | null |
import pygame, math
from game import map, ui
window = pygame.display.set_mode([800, 600])
ui.window = window
screen = "game"
s = {"fullscreen": False}
running = True
gamedata = {"level": 0, "coal": 0, "iron": 1, "copper":0}
tiles = pygame.sprite.Group()
rails = pygame.sprite.Group()
carts = pygame.sprite.Group()
interactables = pygame.sprite.Group()
listmap = []
clock = pygame.time.Clock()
selected = pygame.image.load("./resources/images/selected.png")
selected2 = pygame.image.load("./resources/images/selected2.png")
box = pygame.image.load("./resources/images/box.png")
uibox = pygame.image.load("./resources/images/ui box.png")
class Mouse(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.surface.Surface([1, 1])
self.rect = self.image.get_rect()
self.rect.topleft = [0, 0]
self.clickedcart = None
self.hoveritem = None
self.tl = self.rect.topleft
self.mode = "select"
def pos(self, position):
self.rect.topleft = position
self.tl = self.rect.topleft
m = Mouse()
def snaptogrid(pos):
return [int(math.floor(pos[0] / 40)), int(math.floor(pos[1] / 40))]
def loadlevel(number):
global tiles, rails, carts, gamedata, listmap, interactables
tiles, rails, interactables, listmap = map.loadmap(int(number))
carts.empty()
gamedata["level"] = number
gamedata["coal"] = 0
gamedata["iron"] = 1
gamedata["copper"] = 0
loadlevel(0)
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
m.pos(pygame.mouse.get_pos())
if screen == "game":
if pygame.sprite.spritecollide(m, carts, False) and m.mode == "select":
carts.update("select", m, listmap)
if m.clickedcart != None:
m.mode = "action"
elif m.mode == "action" and m.clickedcart != None and listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0:
m.clickedcart.pathfind(listmap, snaptogrid(m.tl))
m.clickedcart = None
m.mode = "select"
elif event.type == pygame.MOUSEMOTION:
m.pos(pygame.mouse.get_pos())
if screen == "game":
m.hoveritem = None
if len(pygame.sprite.spritecollide(m, carts, False)) > 0:
m.hoveritem = pygame.sprite.spritecollide(m, carts, False)[0]
elif len(pygame.sprite.spritecollide(m, interactables, False)) > 0:
m.hoveritem = pygame.sprite.spritecollide(m, interactables, False)[0]
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
carts.add(map.Cart(snaptogrid(m.tl), "miner"))
if screen == "game":
window.fill([100, 100, 100])
tiles.draw(window)
carts.draw(window)
carts.update("update", m, listmap)
if not m.hoveritem == None and not m.mode == "action":
window.blit(box, [m.rect.left+10, m.rect.top+10])
ui.Resize(30)
ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25])
if m.hoveritem.type.startswith("mine") and m.hoveritem not in carts:
ui.Resize(18)
ui.Text("Carts Inside: " + str(m.hoveritem.data["carts"]), [m.rect.left+27, m.rect.top+47])
ui.Text("Max Carts: " + str(m.hoveritem.data["max"]), [m.rect.left+27, m.rect.top+60])
if not m.clickedcart == None:
window.blit(selected2, [m.clickedcart.rect.left-2, m.clickedcart.rect.top-2])
if m.mode == "action":
window.blit(box, [m.rect.left+10, m.rect.top+10])
ui.Resize(30)
try:
ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25])
except:
ui.Text(m.clickedcart.type.upper(), [m.rect.left+27, m.rect.top+25])
if listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0:
ui.Resize(22)
ui.Text("Click to move", [m.rect.left+27, m.rect.top+45])
ui.Text("Cart Here", [m.rect.left+27, m.rect.top+60])
window.blit(selected, [snaptogrid(m.tl)[0]*40-2, snaptogrid(m.tl)[1]*40-2])
window.blit(uibox, [555, 475])
pygame.display.flip()
clock.tick(60)
fps = clock.get_fps()
pygame.quit()
| 40.292035
| 124
| 0.573468
| 597
| 4,553
| 4.350084
| 0.21943
| 0.034655
| 0.03119
| 0.02965
| 0.340393
| 0.25953
| 0.245668
| 0.19253
| 0.144012
| 0.118598
| 0
| 0.032471
| 0.269493
| 4,553
| 112
| 125
| 40.651786
| 0.748346
| 0
| 0
| 0.128713
| 0
| 0
| 0.065891
| 0.02416
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039604
| false
| 0
| 0.019802
| 0.009901
| 0.079208
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cde5911adb7d9da7046ae21614759503f243fc8
| 51,158
|
py
|
Python
|
sympy/integrals/prde.py
|
Abhi58/sympy
|
5ca228b17a7d44ef08a268ba1fa959d5763634af
|
[
"BSD-3-Clause"
] | 2
|
2019-06-12T16:15:39.000Z
|
2019-10-06T10:40:59.000Z
|
sympy/integrals/prde.py
|
Abhi58/sympy
|
5ca228b17a7d44ef08a268ba1fa959d5763634af
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/integrals/prde.py
|
Abhi58/sympy
|
5ca228b17a7d44ef08a268ba1fa959d5763634af
|
[
"BSD-3-Clause"
] | 1
|
2019-10-02T10:47:13.000Z
|
2019-10-02T10:47:13.000Z
|
"""
Algorithms for solving Parametric Risch Differential Equations.
The methods used for solving Parametric Risch Differential Equations parallel
those for solving Risch Differential Equations. See the outline in the
docstring of rde.py for more information.
The Parametric Risch Differential Equation problem is, given f, g1, ..., gm in
K(t), to determine if there exist y in K(t) and c1, ..., cm in Const(K) such
that Dy + f*y == Sum(ci*gi, (i, 1, m)), and to find such y and ci if they exist.
For the algorithms here G is a list of tuples of factions of the terms on the
right hand side of the equation (i.e., gi in k(t)), and Q is a list of terms on
the right hand side of the equation (i.e., qi in k[t]). See the docstring of
each function for more information.
"""
from __future__ import print_function, division
from sympy.core import Dummy, ilcm, Add, Mul, Pow, S
from sympy.core.compatibility import reduce, range
from sympy.integrals.rde import (order_at, order_at_oo, weak_normalizer,
bound_degree)
from sympy.integrals.risch import (gcdex_diophantine, frac_in, derivation,
residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel,
recognize_log_derivative)
from sympy.matrices import zeros, eye
from sympy.polys import Poly, lcm, cancel, sqf_list
from sympy.polys.polymatrix import PolyMatrix as Matrix
from sympy.solvers import solve
def prde_normal_denom(fa, fd, G, DE):
"""
Parametric Risch Differential Equation - Normal part of the denominator.
Given a derivation D on k[t] and f, g1, ..., gm in k(t) with f weakly
normalized with respect to t, return the tuple (a, b, G, h) such that
a, h in k[t], b in k<t>, G = [g1, ..., gm] in k(t)^m, and for any solution
c1, ..., cm in Const(k) and y in k(t) of Dy + f*y == Sum(ci*gi, (i, 1, m)),
q == y*h in k<t> satisfies a*Dq + b*q == Sum(ci*Gi, (i, 1, m)).
"""
dn, ds = splitfactor(fd, DE)
Gas, Gds = list(zip(*G))
gd = reduce(lambda i, j: i.lcm(j), Gds, Poly(1, DE.t))
en, es = splitfactor(gd, DE)
p = dn.gcd(en)
h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t)))
a = dn*h
c = a*h
ba = a*fa - dn*derivation(h, DE)*fd
ba, bd = ba.cancel(fd, include=True)
G = [(c*A).cancel(D, include=True) for A, D in G]
return (a, (ba, bd), G, h)
def real_imag(ba, bd, gen):
"""
Helper function, to get the real and imaginary part of a rational function
evaluated at sqrt(-1) without actually evaluating it at sqrt(-1)
Separates the even and odd power terms by checking the degree of terms wrt
mod 4. Returns a tuple (ba[0], ba[1], bd) where ba[0] is real part
of the numerator ba[1] is the imaginary part and bd is the denominator
of the rational function.
"""
bd = bd.as_poly(gen).as_dict()
ba = ba.as_poly(gen).as_dict()
denom_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in bd.items()]
denom_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in bd.items()]
bd_real = sum(r for r in denom_real)
bd_imag = sum(r for r in denom_imag)
num_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in ba.items()]
num_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in ba.items()]
ba_real = sum(r for r in num_real)
ba_imag = sum(r for r in num_imag)
ba = ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen))
bd = (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen)
return (ba[0], ba[1], bd)
def prde_special_denom(a, ba, bd, G, DE, case='auto'):
"""
Parametric Risch Differential Equation - Special part of the denominator.
case is one of {'exp', 'tan', 'primitive'} for the hyperexponential,
hypertangent, and primitive cases, respectively. For the hyperexponential
(resp. hypertangent) case, given a derivation D on k[t] and a in k[t],
b in k<t>, and g1, ..., gm in k(t) with Dt/t in k (resp. Dt/(t**2 + 1) in
k, sqrt(-1) not in k), a != 0, and gcd(a, t) == 1 (resp.
gcd(a, t**2 + 1) == 1), return the tuple (A, B, GG, h) such that A, B, h in
k[t], GG = [gg1, ..., ggm] in k(t)^m, and for any solution c1, ..., cm in
Const(k) and q in k<t> of a*Dq + b*q == Sum(ci*gi, (i, 1, m)), r == q*h in
k[t] satisfies A*Dr + B*r == Sum(ci*ggi, (i, 1, m)).
For case == 'primitive', k<t> == k[t], so it returns (a, b, G, 1) in this
case.
"""
# TODO: Merge this with the very similar special_denom() in rde.py
if case == 'auto':
case = DE.case
if case == 'exp':
p = Poly(DE.t, DE.t)
elif case == 'tan':
p = Poly(DE.t**2 + 1, DE.t)
elif case in ['primitive', 'base']:
B = ba.quo(bd)
return (a, B, G, Poly(1, DE.t))
else:
raise ValueError("case must be one of {'exp', 'tan', 'primitive', "
"'base'}, not %s." % case)
nb = order_at(ba, p, DE.t) - order_at(bd, p, DE.t)
nc = min([order_at(Ga, p, DE.t) - order_at(Gd, p, DE.t) for Ga, Gd in G])
n = min(0, nc - min(0, nb))
if not nb:
# Possible cancellation.
if case == 'exp':
dcoeff = DE.d.quo(Poly(DE.t, DE.t))
with DecrementLevel(DE): # We are guaranteed to not have problems,
# because case != 'base'.
alphaa, alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t)
etaa, etad = frac_in(dcoeff, DE.t)
A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE)
if A is not None:
Q, m, z = A
if Q == 1:
n = min(n, m)
elif case == 'tan':
dcoeff = DE.d.quo(Poly(DE.t**2 + 1, DE.t))
with DecrementLevel(DE): # We are guaranteed to not have problems,
# because case != 'base'.
betaa, alphaa, alphad = real_imag(ba, bd*a, DE.t)
betad = alphad
etaa, etad = frac_in(dcoeff, DE.t)
if recognize_log_derivative(2*betaa, betad, DE):
A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE)
B = parametric_log_deriv(betaa, betad, etaa, etad, DE)
if A is not None and B is not None:
Q, s, z = A
# TODO: Add test
if Q == 1:
n = min(n, s/2)
N = max(0, -nb)
pN = p**N
pn = p**-n # This is 1/h
A = a*pN
B = ba*pN.quo(bd) + Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN
G = [(Ga*pN*pn).cancel(Gd, include=True) for Ga, Gd in G]
h = pn
# (a*p**N, (b + n*a*Dp/p)*p**N, g1*p**(N - n), ..., gm*p**(N - n), p**-n)
return (A, B, G, h)
def prde_linear_constraints(a, b, G, DE):
"""
Parametric Risch Differential Equation - Generate linear constraints on the constants.
Given a derivation D on k[t], a, b, in k[t] with gcd(a, b) == 1, and
G = [g1, ..., gm] in k(t)^m, return Q = [q1, ..., qm] in k[t]^m and a
matrix M with entries in k(t) such that for any solution c1, ..., cm in
Const(k) and p in k[t] of a*Dp + b*p == Sum(ci*gi, (i, 1, m)),
(c1, ..., cm) is a solution of Mx == 0, and p and the ci satisfy
a*Dp + b*p == Sum(ci*qi, (i, 1, m)).
Because M has entries in k(t), and because Matrix doesn't play well with
Poly, M will be a Matrix of Basic expressions.
"""
m = len(G)
Gns, Gds = list(zip(*G))
d = reduce(lambda i, j: i.lcm(j), Gds)
d = Poly(d, field=True)
Q = [(ga*(d).quo(gd)).div(d) for ga, gd in G]
if not all([ri.is_zero for _, ri in Q]):
N = max([ri.degree(DE.t) for _, ri in Q])
M = Matrix(N + 1, m, lambda i, j: Q[j][1].nth(i))
else:
M = Matrix(0, m, []) # No constraints, return the empty matrix.
qs, _ = list(zip(*Q))
return (qs, M)
def poly_linear_constraints(p, d):
"""
Given p = [p1, ..., pm] in k[t]^m and d in k[t], return
q = [q1, ..., qm] in k[t]^m and a matrix M with entries in k such
that Sum(ci*pi, (i, 1, m)), for c1, ..., cm in k, is divisible
by d if and only if (c1, ..., cm) is a solution of Mx = 0, in
which case the quotient is Sum(ci*qi, (i, 1, m)).
"""
m = len(p)
q, r = zip(*[pi.div(d) for pi in p])
if not all([ri.is_zero for ri in r]):
n = max([ri.degree() for ri in r])
M = Matrix(n + 1, m, lambda i, j: r[j].nth(i))
else:
M = Matrix(0, m, []) # No constraints.
return q, M
def constant_system(A, u, DE):
"""
Generate a system for the constant solutions.
Given a differential field (K, D) with constant field C = Const(K), a Matrix
A, and a vector (Matrix) u with coefficients in K, returns the tuple
(B, v, s), where B is a Matrix with coefficients in C and v is a vector
(Matrix) such that either v has coefficients in C, in which case s is True
and the solutions in C of Ax == u are exactly all the solutions of Bx == v,
or v has a non-constant coefficient, in which case s is False Ax == u has no
constant solution.
This algorithm is used both in solving parametric problems and in
determining if an element a of K is a derivative of an element of K or the
logarithmic derivative of a K-radical using the structure theorem approach.
Because Poly does not play well with Matrix yet, this algorithm assumes that
all matrix entries are Basic expressions.
"""
if not A:
return A, u
Au = A.row_join(u)
Au = Au.rref(simplify=cancel, normalize_last=False)[0]
# Warning: This will NOT return correct results if cancel() cannot reduce
# an identically zero expression to 0. The danger is that we might
# incorrectly prove that an integral is nonelementary (such as
# risch_integrate(exp((sin(x)**2 + cos(x)**2 - 1)*x**2), x).
# But this is a limitation in computer algebra in general, and implicit
# in the correctness of the Risch Algorithm is the computability of the
# constant field (actually, this same correctness problem exists in any
# algorithm that uses rref()).
#
# We therefore limit ourselves to constant fields that are computable
# via the cancel() function, in order to prevent a speed bottleneck from
# calling some more complex simplification function (rational function
# coefficients will fall into this class). Furthermore, (I believe) this
# problem will only crop up if the integral explicitly contains an
# expression in the constant field that is identically zero, but cannot
# be reduced to such by cancel(). Therefore, a careful user can avoid this
# problem entirely by being careful with the sorts of expressions that
# appear in his integrand in the variables other than the integration
# variable (the structure theorems should be able to completely decide these
# problems in the integration variable).
Au = Au.applyfunc(cancel)
A, u = Au[:, :-1], Au[:, -1]
for j in range(A.cols):
for i in range(A.rows):
if A[i, j].has(*DE.T):
# This assumes that const(F(t0, ..., tn) == const(K) == F
Ri = A[i, :]
# Rm+1; m = A.rows
Rm1 = Ri.applyfunc(lambda x: derivation(x, DE, basic=True)/
derivation(A[i, j], DE, basic=True))
Rm1 = Rm1.applyfunc(cancel)
um1 = cancel(derivation(u[i], DE, basic=True)/
derivation(A[i, j], DE, basic=True))
for s in range(A.rows):
# A[s, :] = A[s, :] - A[s, i]*A[:, m+1]
Asj = A[s, j]
A.row_op(s, lambda r, jj: cancel(r - Asj*Rm1[jj]))
# u[s] = u[s] - A[s, j]*u[m+1
u.row_op(s, lambda r, jj: cancel(r - Asj*um1))
A = A.col_join(Rm1)
u = u.col_join(Matrix([um1]))
return (A, u)
def prde_spde(a, b, Q, n, DE):
"""
Special Polynomial Differential Equation algorithm: Parametric Version.
Given a derivation D on k[t], an integer n, and a, b, q1, ..., qm in k[t]
with deg(a) > 0 and gcd(a, b) == 1, return (A, B, Q, R, n1), with
Qq = [q1, ..., qm] and R = [r1, ..., rm], such that for any solution
c1, ..., cm in Const(k) and q in k[t] of degree at most n of
a*Dq + b*q == Sum(ci*gi, (i, 1, m)), p = (q - Sum(ci*ri, (i, 1, m)))/a has
degree at most n1 and satisfies A*Dp + B*p == Sum(ci*qi, (i, 1, m))
"""
R, Z = list(zip(*[gcdex_diophantine(b, a, qi) for qi in Q]))
A = a
B = b + derivation(a, DE)
Qq = [zi - derivation(ri, DE) for ri, zi in zip(R, Z)]
R = list(R)
n1 = n - a.degree(DE.t)
return (A, B, Qq, R, n1)
def prde_no_cancel_b_large(b, Q, n, DE):
"""
Parametric Poly Risch Differential Equation - No cancellation: deg(b) large enough.
Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with
b != 0 and either D == d/dt or deg(b) > max(0, deg(D) - 1), returns
h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that
if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and
Dq + b*q == Sum(ci*qi, (i, 1, m)), then q = Sum(dj*hj, (j, 1, r)), where
d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0.
"""
db = b.degree(DE.t)
m = len(Q)
H = [Poly(0, DE.t)]*m
for N in range(n, -1, -1): # [n, ..., 0]
for i in range(m):
si = Q[i].nth(N + db)/b.LC()
sitn = Poly(si*DE.t**N, DE.t)
H[i] = H[i] + sitn
Q[i] = Q[i] - derivation(sitn, DE) - b*sitn
if all(qi.is_zero for qi in Q):
dc = -1
M = zeros(0, 2)
else:
dc = max([qi.degree(DE.t) for qi in Q])
M = Matrix(dc + 1, m, lambda i, j: Q[j].nth(i))
A, u = constant_system(M, zeros(dc + 1, 1), DE)
c = eye(m)
A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c))
return (H, A)
def prde_no_cancel_b_small(b, Q, n, DE):
"""
Parametric Poly Risch Differential Equation - No cancellation: deg(b) small enough.
Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with
deg(b) < deg(D) - 1 and either D == d/dt or deg(D) >= 2, returns
h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that
if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and
Dq + b*q == Sum(ci*qi, (i, 1, m)) then q = Sum(dj*hj, (j, 1, r)) where
d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0.
"""
m = len(Q)
H = [Poly(0, DE.t)]*m
for N in range(n, 0, -1): # [n, ..., 1]
for i in range(m):
si = Q[i].nth(N + DE.d.degree(DE.t) - 1)/(N*DE.d.LC())
sitn = Poly(si*DE.t**N, DE.t)
H[i] = H[i] + sitn
Q[i] = Q[i] - derivation(sitn, DE) - b*sitn
if b.degree(DE.t) > 0:
for i in range(m):
si = Poly(Q[i].nth(b.degree(DE.t))/b.LC(), DE.t)
H[i] = H[i] + si
Q[i] = Q[i] - derivation(si, DE) - b*si
if all(qi.is_zero for qi in Q):
dc = -1
M = Matrix()
else:
dc = max([qi.degree(DE.t) for qi in Q])
M = Matrix(dc + 1, m, lambda i, j: Q[j].nth(i))
A, u = constant_system(M, zeros(dc + 1, 1), DE)
c = eye(m)
A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c))
return (H, A)
# else: b is in k, deg(qi) < deg(Dt)
t = DE.t
if DE.case != 'base':
with DecrementLevel(DE):
t0 = DE.t # k = k0(t0)
ba, bd = frac_in(b, t0, field=True)
Q0 = [frac_in(qi.TC(), t0, field=True) for qi in Q]
f, B = param_rischDE(ba, bd, Q0, DE)
# f = [f1, ..., fr] in k^r and B is a matrix with
# m + r columns and entries in Const(k) = Const(k0)
# such that Dy0 + b*y0 = Sum(ci*qi, (i, 1, m)) has
# a solution y0 in k with c1, ..., cm in Const(k)
# if and only y0 = Sum(dj*fj, (j, 1, r)) where
# d1, ..., dr ar in Const(k) and
# B*Matrix([c1, ..., cm, d1, ..., dr]) == 0.
# Transform fractions (fa, fd) in f into constant
# polynomials fa/fd in k[t].
# (Is there a better way?)
f = [Poly(fa.as_expr()/fd.as_expr(), t, field=True)
for fa, fd in f]
else:
# Base case. Dy == 0 for all y in k and b == 0.
# Dy + b*y = Sum(ci*qi) is solvable if and only if
# Sum(ci*qi) == 0 in which case the solutions are
# y = d1*f1 for f1 = 1 and any d1 in Const(k) = k.
f = [Poly(1, t, field=True)] # r = 1
B = Matrix([[qi.TC() for qi in Q] + [S(0)]])
# The condition for solvability is
# B*Matrix([c1, ..., cm, d1]) == 0
# There are no constraints on d1.
# Coefficients of t^j (j > 0) in Sum(ci*qi) must be zero.
d = max([qi.degree(DE.t) for qi in Q])
if d > 0:
M = Matrix(d, m, lambda i, j: Q[j].nth(i + 1))
A, _ = constant_system(M, zeros(d, 1), DE)
else:
# No constraints on the hj.
A = Matrix(0, m, [])
# Solutions of the original equation are
# y = Sum(dj*fj, (j, 1, r) + Sum(ei*hi, (i, 1, m)),
# where ei == ci (i = 1, ..., m), when
# A*Matrix([c1, ..., cm]) == 0 and
# B*Matrix([c1, ..., cm, d1, ..., dr]) == 0
# Build combined constraint matrix with m + r + m columns.
r = len(f)
I = eye(m)
A = A.row_join(zeros(A.rows, r + m))
B = B.row_join(zeros(B.rows, m))
C = I.row_join(zeros(m, r)).row_join(-I)
return f + H, A.col_join(B).col_join(C)
def prde_cancel_liouvillian(b, Q, n, DE):
"""
Pg, 237.
"""
H = []
# Why use DecrementLevel? Below line answers that:
# Assuming that we can solve such problems over 'k' (not k[t])
if DE.case == 'primitive':
with DecrementLevel(DE):
ba, bd = frac_in(b, DE.t, field=True)
for i in range(n, -1, -1):
if DE.case == 'exp': # this re-checking can be avoided
with DecrementLevel(DE):
ba, bd = frac_in(b + i*derivation(DE.t, DE)/DE.t,
DE.t, field=True)
with DecrementLevel(DE):
Qy = [frac_in(q.nth(i), DE.t, field=True) for q in Q]
fi, Ai = param_rischDE(ba, bd, Qy, DE)
fi = [Poly(fa.as_expr()/fd.as_expr(), DE.t, field=True)
for fa, fd in fi]
ri = len(fi)
if i == n:
M = Ai
else:
M = Ai.col_join(M.row_join(zeros(M.rows, ri)))
Fi, hi = [None]*ri, [None]*ri
# from eq. on top of p.238 (unnumbered)
for j in range(ri):
hji = fi[j]*DE.t**i
hi[j] = hji
# building up Sum(djn*(D(fjn*t^n) - b*fjnt^n))
Fi[j] = -(derivation(hji, DE) - b*hji)
H += hi
# in the next loop instead of Q it has
# to be Q + Fi taking its place
Q = Q + Fi
return (H, M)
def param_poly_rischDE(a, b, q, n, DE):
"""Polynomial solutions of a parametric Risch differential equation.
Given a derivation D in k[t], a, b in k[t] relatively prime, and q
= [q1, ..., qm] in k[t]^m, return h = [h1, ..., hr] in k[t]^r and
a matrix A with m + r columns and entries in Const(k) such that
a*Dp + b*p = Sum(ci*qi, (i, 1, m)) has a solution p of degree <= n
in k[t] with c1, ..., cm in Const(k) if and only if p = Sum(dj*hj,
(j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm,
d1, ..., dr) is a solution of Ax == 0.
"""
m = len(q)
if n < 0:
# Only the trivial zero solution is possible.
# Find relations between the qi.
if all([qi.is_zero for qi in q]):
return [], zeros(1, m) # No constraints.
N = max([qi.degree(DE.t) for qi in q])
M = Matrix(N + 1, m, lambda i, j: q[j].nth(i))
A, _ = constant_system(M, zeros(M.rows, 1), DE)
return [], A
if a.is_ground:
# Normalization: a = 1.
a = a.LC()
b, q = b.quo_ground(a), [qi.quo_ground(a) for qi in q]
if not b.is_zero and (DE.case == 'base' or
b.degree() > max(0, DE.d.degree() - 1)):
return prde_no_cancel_b_large(b, q, n, DE)
elif ((b.is_zero or b.degree() < DE.d.degree() - 1)
and (DE.case == 'base' or DE.d.degree() >= 2)):
return prde_no_cancel_b_small(b, q, n, DE)
elif (DE.d.degree() >= 2 and
b.degree() == DE.d.degree() - 1 and
n > -b.as_poly().LC()/DE.d.as_poly().LC()):
raise NotImplementedError("prde_no_cancel_b_equal() is "
"not yet implemented.")
else:
# Liouvillian cases
if DE.case == 'primitive' or DE.case == 'exp':
return prde_cancel_liouvillian(b, q, n, DE)
else:
raise NotImplementedError("non-linear and hypertangent "
"cases have not yet been implemented")
# else: deg(a) > 0
# Iterate SPDE as long as possible cumulating coefficient
# and terms for the recovery of original solutions.
alpha, beta = 1, [0]*m
while n >= 0: # and a, b relatively prime
a, b, q, r, n = prde_spde(a, b, q, n, DE)
beta = [betai + alpha*ri for betai, ri in zip(beta, r)]
alpha *= a
# Solutions p of a*Dp + b*p = Sum(ci*qi) correspond to
# solutions alpha*p + Sum(ci*betai) of the initial equation.
d = a.gcd(b)
if not d.is_ground:
break
# a*Dp + b*p = Sum(ci*qi) may have a polynomial solution
# only if the sum is divisible by d.
qq, M = poly_linear_constraints(q, d)
# qq = [qq1, ..., qqm] where qqi = qi.quo(d).
# M is a matrix with m columns an entries in k.
# Sum(fi*qi, (i, 1, m)), where f1, ..., fm are elements of k, is
# divisible by d if and only if M*Matrix([f1, ..., fm]) == 0,
# in which case the quotient is Sum(fi*qqi).
A, _ = constant_system(M, zeros(M.rows, 1), DE)
# A is a matrix with m columns and entries in Const(k).
# Sum(ci*qqi) is Sum(ci*qi).quo(d), and the remainder is zero
# for c1, ..., cm in Const(k) if and only if
# A*Matrix([c1, ...,cm]) == 0.
V = A.nullspace()
# V = [v1, ..., vu] where each vj is a column matrix with
# entries aj1, ..., ajm in Const(k).
# Sum(aji*qi) is divisible by d with exact quotient Sum(aji*qqi).
# Sum(ci*qi) is divisible by d if and only if ci = Sum(dj*aji)
# (i = 1, ..., m) for some d1, ..., du in Const(k).
# In that case, solutions of
# a*Dp + b*p = Sum(ci*qi) = Sum(dj*Sum(aji*qi))
# are the same as those of
# (a/d)*Dp + (b/d)*p = Sum(dj*rj)
# where rj = Sum(aji*qqi).
if not V: # No non-trivial solution.
return [], eye(m) # Could return A, but this has
# the minimum number of rows.
Mqq = Matrix([qq]) # A single row.
r = [(Mqq*vj)[0] for vj in V] # [r1, ..., ru]
# Solutions of (a/d)*Dp + (b/d)*p = Sum(dj*rj) correspond to
# solutions alpha*p + Sum(Sum(dj*aji)*betai) of the initial
# equation. These are equal to alpha*p + Sum(dj*fj) where
# fj = Sum(aji*betai).
Mbeta = Matrix([beta])
f = [(Mbeta*vj)[0] for vj in V] # [f1, ..., fu]
#
# Solve the reduced equation recursively.
#
g, B = param_poly_rischDE(a.quo(d), b.quo(d), r, n, DE)
# g = [g1, ..., gv] in k[t]^v and and B is a matrix with u + v
# columns and entries in Const(k) such that
# (a/d)*Dp + (b/d)*p = Sum(dj*rj) has a solution p of degree <= n
# in k[t] if and only if p = Sum(ek*gk) where e1, ..., ev are in
# Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0.
# The solutions of the original equation are then
# Sum(dj*fj, (j, 1, u)) + alpha*Sum(ek*gk, (k, 1, v)).
# Collect solution components.
h = f + [alpha*gk for gk in g]
# Build combined relation matrix.
A = -eye(m)
for vj in V:
A = A.row_join(vj)
A = A.row_join(zeros(m, len(g)))
A = A.col_join(zeros(B.rows, m).row_join(B))
return h, A
def param_rischDE(fa, fd, G, DE):
"""
Solve a Parametric Risch Differential Equation: Dy + f*y == Sum(ci*Gi, (i, 1, m)).
Given a derivation D in k(t), f in k(t), and G
= [G1, ..., Gm] in k(t)^m, return h = [h1, ..., hr] in k(t)^r and
a matrix A with m + r columns and entries in Const(k) such that
Dy + f*y = Sum(ci*Gi, (i, 1, m)) has a solution y
in k(t) with c1, ..., cm in Const(k) if and only if y = Sum(dj*hj,
(j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm,
d1, ..., dr) is a solution of Ax == 0.
Elements of k(t) are tuples (a, d) with a and d in k[t].
"""
m = len(G)
q, (fa, fd) = weak_normalizer(fa, fd, DE)
# Solutions of the weakly normalized equation Dz + f*z = q*Sum(ci*Gi)
# correspond to solutions y = z/q of the original equation.
gamma = q
G = [(q*ga).cancel(gd, include=True) for ga, gd in G]
a, (ba, bd), G, hn = prde_normal_denom(fa, fd, G, DE)
# Solutions q in k<t> of a*Dq + b*q = Sum(ci*Gi) correspond
# to solutions z = q/hn of the weakly normalized equation.
gamma *= hn
A, B, G, hs = prde_special_denom(a, ba, bd, G, DE)
# Solutions p in k[t] of A*Dp + B*p = Sum(ci*Gi) correspond
# to solutions q = p/hs of the previous equation.
gamma *= hs
g = A.gcd(B)
a, b, g = A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True) for
gia, gid in G]
# a*Dp + b*p = Sum(ci*gi) may have a polynomial solution
# only if the sum is in k[t].
q, M = prde_linear_constraints(a, b, g, DE)
# q = [q1, ..., qm] where qi in k[t] is the polynomial component
# of the partial fraction expansion of gi.
# M is a matrix with m columns and entries in k.
# Sum(fi*gi, (i, 1, m)), where f1, ..., fm are elements of k,
# is a polynomial if and only if M*Matrix([f1, ..., fm]) == 0,
# in which case the sum is equal to Sum(fi*qi).
M, _ = constant_system(M, zeros(M.rows, 1), DE)
# M is a matrix with m columns and entries in Const(k).
# Sum(ci*gi) is in k[t] for c1, ..., cm in Const(k)
# if and only if M*Matrix([c1, ..., cm]) == 0,
# in which case the sum is Sum(ci*qi).
## Reduce number of constants at this point
V = M.nullspace()
# V = [v1, ..., vu] where each vj is a column matrix with
# entries aj1, ..., ajm in Const(k).
# Sum(aji*gi) is in k[t] and equal to Sum(aji*qi) (j = 1, ..., u).
# Sum(ci*gi) is in k[t] if and only is ci = Sum(dj*aji)
# (i = 1, ..., m) for some d1, ..., du in Const(k).
# In that case,
# Sum(ci*gi) = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) = Sum(dj*rj)
# where rj = Sum(aji*qi) (j = 1, ..., u) in k[t].
if not V: # No non-trivial solution
return [], eye(m)
Mq = Matrix([q]) # A single row.
r = [(Mq*vj)[0] for vj in V] # [r1, ..., ru]
# Solutions of a*Dp + b*p = Sum(dj*rj) correspond to solutions
# y = p/gamma of the initial equation with ci = Sum(dj*aji).
try:
# We try n=5. At least for prde_spde, it will always
# terminate no matter what n is.
n = bound_degree(a, b, r, DE, parametric=True)
except NotImplementedError:
# A temporary bound is set. Eventually, it will be removed.
# the currently added test case takes large time
# even with n=5, and much longer with large n's.
n = 5
h, B = param_poly_rischDE(a, b, r, n, DE)
# h = [h1, ..., hv] in k[t]^v and and B is a matrix with u + v
# columns and entries in Const(k) such that
# a*Dp + b*p = Sum(dj*rj) has a solution p of degree <= n
# in k[t] if and only if p = Sum(ek*hk) where e1, ..., ev are in
# Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0.
# The solutions of the original equation for ci = Sum(dj*aji)
# (i = 1, ..., m) are then y = Sum(ek*hk, (k, 1, v))/gamma.
## Build combined relation matrix with m + u + v columns.
A = -eye(m)
for vj in V:
A = A.row_join(vj)
A = A.row_join(zeros(m, len(h)))
A = A.col_join(zeros(B.rows, m).row_join(B))
## Eliminate d1, ..., du.
W = A.nullspace()
# W = [w1, ..., wt] where each wl is a column matrix with
# entries blk (k = 1, ..., m + u + v) in Const(k).
# The vectors (bl1, ..., blm) generate the space of those
# constant families (c1, ..., cm) for which a solution of
# the equation Dy + f*y == Sum(ci*Gi) exists. They generate
# the space and form a basis except possibly when Dy + f*y == 0
# is solvable in k(t}. The corresponding solutions are
# y = Sum(blk'*hk, (k, 1, v))/gamma, where k' = k + m + u.
v = len(h)
M = Matrix([wl[:m] + wl[-v:] for wl in W]) # excise dj's.
N = M.nullspace()
# N = [n1, ..., ns] where the ni in Const(k)^(m + v) are column
# vectors generating the space of linear relations between
# c1, ..., cm, e1, ..., ev.
C = Matrix([ni[:] for ni in N]) # rows n1, ..., ns.
return [hk.cancel(gamma, include=True) for hk in h], C
def limited_integrate_reduce(fa, fd, G, DE):
"""
Simpler version of step 1 & 2 for the limited integration problem.
Given a derivation D on k(t) and f, g1, ..., gn in k(t), return
(a, b, h, N, g, V) such that a, b, h in k[t], N is a non-negative integer,
g in k(t), V == [v1, ..., vm] in k(t)^m, and for any solution v in k(t),
c1, ..., cm in C of f == Dv + Sum(ci*wi, (i, 1, m)), p = v*h is in k<t>, and
p and the ci satisfy a*Dp + b*p == g + Sum(ci*vi, (i, 1, m)). Furthermore,
if S1irr == Sirr, then p is in k[t], and if t is nonlinear or Liouvillian
over k, then deg(p) <= N.
So that the special part is always computed, this function calls the more
general prde_special_denom() automatically if it cannot determine that
S1irr == Sirr. Furthermore, it will automatically call bound_degree() when
t is linear and non-Liouvillian, which for the transcendental case, implies
that Dt == a*t + b with for some a, b in k*.
"""
dn, ds = splitfactor(fd, DE)
E = [splitfactor(gd, DE) for _, gd in G]
En, Es = list(zip(*E))
c = reduce(lambda i, j: i.lcm(j), (dn,) + En) # lcm(dn, en1, ..., enm)
hn = c.gcd(c.diff(DE.t))
a = hn
b = -derivation(hn, DE)
N = 0
# These are the cases where we know that S1irr = Sirr, but there could be
# others, and this algorithm will need to be extended to handle them.
if DE.case in ['base', 'primitive', 'exp', 'tan']:
hs = reduce(lambda i, j: i.lcm(j), (ds,) + Es) # lcm(ds, es1, ..., esm)
a = hn*hs
b -= (hn*derivation(hs, DE)).quo(hs)
mu = min(order_at_oo(fa, fd, DE.t), min([order_at_oo(ga, gd, DE.t) for
ga, gd in G]))
# So far, all the above are also nonlinear or Liouvillian, but if this
# changes, then this will need to be updated to call bound_degree()
# as per the docstring of this function (DE.case == 'other_linear').
N = hn.degree(DE.t) + hs.degree(DE.t) + max(0, 1 - DE.d.degree(DE.t) - mu)
else:
# TODO: implement this
raise NotImplementedError
V = [(-a*hn*ga).cancel(gd, include=True) for ga, gd in G]
return (a, b, a, N, (a*hn*fa).cancel(fd, include=True), V)
def limited_integrate(fa, fd, G, DE):
"""
Solves the limited integration problem: f = Dv + Sum(ci*wi, (i, 1, n))
"""
fa, fd = fa*Poly(1/fd.LC(), DE.t), fd.monic()
# interpretting limited integration problem as a
# parametric Risch DE problem
Fa = Poly(0, DE.t)
Fd = Poly(1, DE.t)
G = [(fa, fd)] + G
h, A = param_rischDE(Fa, Fd, G, DE)
V = A.nullspace()
V = [v for v in V if v[0] != 0]
if not V:
return None
else:
# we can take any vector from V, we take V[0]
c0 = V[0][0]
# v = [-1, c1, ..., cm, d1, ..., dr]
v = V[0]/(-c0)
r = len(h)
m = len(v) - r - 1
C = list(v[1: m + 1])
y = -sum([v[m + 1 + i]*h[i][0].as_expr()/h[i][1].as_expr() \
for i in range(r)])
y_num, y_den = y.as_numer_denom()
Ya, Yd = Poly(y_num, DE.t), Poly(y_den, DE.t)
Y = Ya*Poly(1/Yd.LC(), DE.t), Yd.monic()
return Y, C
def parametric_log_deriv_heu(fa, fd, wa, wd, DE, c1=None):
"""
Parametric logarithmic derivative heuristic.
Given a derivation D on k[t], f in k(t), and a hyperexponential monomial
theta over k(t), raises either NotImplementedError, in which case the
heuristic failed, or returns None, in which case it has proven that no
solution exists, or returns a solution (n, m, v) of the equation
n*f == Dv/v + m*Dtheta/theta, with v in k(t)* and n, m in ZZ with n != 0.
If this heuristic fails, the structure theorem approach will need to be
used.
The argument w == Dtheta/theta
"""
# TODO: finish writing this and write tests
c1 = c1 or Dummy('c1')
p, a = fa.div(fd)
q, b = wa.div(wd)
B = max(0, derivation(DE.t, DE).degree(DE.t) - 1)
C = max(p.degree(DE.t), q.degree(DE.t))
if q.degree(DE.t) > B:
eqs = [p.nth(i) - c1*q.nth(i) for i in range(B + 1, C + 1)]
s = solve(eqs, c1)
if not s or not s[c1].is_Rational:
# deg(q) > B, no solution for c.
return None
M, N = s[c1].as_numer_denom()
nfmwa = N*fa*wd - M*wa*fd
nfmwd = fd*wd
Qv = is_log_deriv_k_t_radical_in_field(N*fa*wd - M*wa*fd, fd*wd, DE,
'auto')
if Qv is None:
# (N*f - M*w) is not the logarithmic derivative of a k(t)-radical.
return None
Q, v = Qv
if Q.is_zero or v.is_zero:
return None
return (Q*N, Q*M, v)
if p.degree(DE.t) > B:
return None
c = lcm(fd.as_poly(DE.t).LC(), wd.as_poly(DE.t).LC())
l = fd.monic().lcm(wd.monic())*Poly(c, DE.t)
ln, ls = splitfactor(l, DE)
z = ls*ln.gcd(ln.diff(DE.t))
if not z.has(DE.t):
# TODO: We treat this as 'no solution', until the structure
# theorem version of parametric_log_deriv is implemented.
return None
u1, r1 = (fa*l.quo(fd)).div(z) # (l*f).div(z)
u2, r2 = (wa*l.quo(wd)).div(z) # (l*w).div(z)
eqs = [r1.nth(i) - c1*r2.nth(i) for i in range(z.degree(DE.t))]
s = solve(eqs, c1)
if not s or not s[c1].is_Rational:
# deg(q) <= B, no solution for c.
return None
M, N = s[c1].as_numer_denom()
nfmwa = N.as_poly(DE.t)*fa*wd - M.as_poly(DE.t)*wa*fd
nfmwd = fd*wd
Qv = is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE)
if Qv is None:
# (N*f - M*w) is not the logarithmic derivative of a k(t)-radical.
return None
Q, v = Qv
if Q.is_zero or v.is_zero:
return None
return (Q*N, Q*M, v)
def parametric_log_deriv(fa, fd, wa, wd, DE):
# TODO: Write the full algorithm using the structure theorems.
# try:
A = parametric_log_deriv_heu(fa, fd, wa, wd, DE)
# except NotImplementedError:
# Heuristic failed, we have to use the full method.
# TODO: This could be implemented more efficiently.
# It isn't too worrisome, because the heuristic handles most difficult
# cases.
return A
def is_deriv_k(fa, fd, DE):
r"""
Checks if Df/f is the derivative of an element of k(t).
a in k(t) is the derivative of an element of k(t) if there exists b in k(t)
such that a = Db. Either returns (ans, u), such that Df/f == Du, or None,
which means that Df/f is not the derivative of an element of k(t). ans is
a list of tuples such that Add(*[i*j for i, j in ans]) == u. This is useful
for seeing exactly which elements of k(t) produce u.
This function uses the structure theorem approach, which says that for any
f in K, Df/f is the derivative of a element of K if and only if there are ri
in QQ such that::
--- --- Dt
\ r * Dt + \ r * i Df
/ i i / i --- = --.
--- --- t f
i in L i in E i
K/C(x) K/C(x)
Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is
transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i
in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic
monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i
is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some
a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of
hyperexponential monomials of K over C(x)). If K is an elementary extension
over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the
transcendence degree of K over C(x). Furthermore, because Const_D(K) ==
Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and
deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x)
and L_K/C(x) are disjoint.
The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed
recursively using this same function. Therefore, it is required to pass
them as indices to D (or T). E_args are the arguments of the
hyperexponentials indexed by E_K (i.e., if i is in E_K, then T[i] ==
exp(E_args[i])). This is needed to compute the final answer u such that
Df/f == Du.
log(f) will be the same as u up to a additive constant. This is because
they will both behave the same as monomials. For example, both log(x) and
log(2*x) == log(x) + log(2) satisfy Dt == 1/x, because log(2) is constant.
Therefore, the term const is returned. const is such that
log(const) + f == u. This is calculated by dividing the arguments of one
logarithm from the other. Therefore, it is necessary to pass the arguments
of the logarithmic terms in L_args.
To handle the case where we are given Df/f, not f, use is_deriv_k_in_field().
See also
========
is_log_deriv_k_t_radical_in_field, is_log_deriv_k_t_radical
"""
# Compute Df/f
dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)), fd*fa
dfa, dfd = dfa.cancel(dfd, include=True)
# Our assumption here is that each monomial is recursively transcendental
if len(DE.exts) != len(DE.D):
if [i for i in DE.cases if i == 'tan'] or \
(set([i for i in DE.cases if i == 'primitive']) -
set(DE.indices('log'))):
raise NotImplementedError("Real version of the structure "
"theorems with hypertangent support is not yet implemented.")
# TODO: What should really be done in this case?
raise NotImplementedError("Nonelementary extensions not supported "
"in the structure theorems.")
E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')]
L_part = [DE.D[i].as_expr() for i in DE.indices('log')]
lhs = Matrix([E_part + L_part])
rhs = Matrix([dfa.as_expr()/dfd.as_expr()])
A, u = constant_system(lhs, rhs, DE)
if not all(derivation(i, DE, basic=True).is_zero for i in u) or not A:
# If the elements of u are not all constant
# Note: See comment in constant_system
# Also note: derivation(basic=True) calls cancel()
return None
else:
if not all(i.is_Rational for i in u):
raise NotImplementedError("Cannot work with non-rational "
"coefficients in this case.")
else:
terms = ([DE.extargs[i] for i in DE.indices('exp')] +
[DE.T[i] for i in DE.indices('log')])
ans = list(zip(terms, u))
result = Add(*[Mul(i, j) for i, j in ans])
argterms = ([DE.T[i] for i in DE.indices('exp')] +
[DE.extargs[i] for i in DE.indices('log')])
l = []
ld = []
for i, j in zip(argterms, u):
# We need to get around things like sqrt(x**2) != x
# and also sqrt(x**2 + 2*x + 1) != x + 1
# Issue 10798: i need not be a polynomial
i, d = i.as_numer_denom()
icoeff, iterms = sqf_list(i)
l.append(Mul(*([Pow(icoeff, j)] + [Pow(b, e*j) for b, e in iterms])))
dcoeff, dterms = sqf_list(d)
ld.append(Mul(*([Pow(dcoeff, j)] + [Pow(b, e*j) for b, e in dterms])))
const = cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld))
return (ans, result, const)
def is_log_deriv_k_t_radical(fa, fd, DE, Df=True):
r"""
Checks if Df is the logarithmic derivative of a k(t)-radical.
b in k(t) can be written as the logarithmic derivative of a k(t) radical if
there exist n in ZZ and u in k(t) with n, u != 0 such that n*b == Du/u.
Either returns (ans, u, n, const) or None, which means that Df cannot be
written as the logarithmic derivative of a k(t)-radical. ans is a list of
tuples such that Mul(*[i**j for i, j in ans]) == u. This is useful for
seeing exactly what elements of k(t) produce u.
This function uses the structure theorem approach, which says that for any
f in K, Df is the logarithmic derivative of a K-radical if and only if there
are ri in QQ such that::
--- --- Dt
\ r * Dt + \ r * i
/ i i / i --- = Df.
--- --- t
i in L i in E i
K/C(x) K/C(x)
Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is
transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i
in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic
monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i
is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some
a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of
hyperexponential monomials of K over C(x)). If K is an elementary extension
over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the
transcendence degree of K over C(x). Furthermore, because Const_D(K) ==
Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and
deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x)
and L_K/C(x) are disjoint.
The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed
recursively using this same function. Therefore, it is required to pass
them as indices to D (or T). L_args are the arguments of the logarithms
indexed by L_K (i.e., if i is in L_K, then T[i] == log(L_args[i])). This is
needed to compute the final answer u such that n*f == Du/u.
exp(f) will be the same as u up to a multiplicative constant. This is
because they will both behave the same as monomials. For example, both
exp(x) and exp(x + 1) == E*exp(x) satisfy Dt == t. Therefore, the term const
is returned. const is such that exp(const)*f == u. This is calculated by
subtracting the arguments of one exponential from the other. Therefore, it
is necessary to pass the arguments of the exponential terms in E_args.
To handle the case where we are given Df, not f, use
is_log_deriv_k_t_radical_in_field().
See also
========
is_log_deriv_k_t_radical_in_field, is_deriv_k
"""
H = []
if Df:
dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)).cancel(fd**2,
include=True)
else:
dfa, dfd = fa, fd
# Our assumption here is that each monomial is recursively transcendental
if len(DE.exts) != len(DE.D):
if [i for i in DE.cases if i == 'tan'] or \
(set([i for i in DE.cases if i == 'primitive']) -
set(DE.indices('log'))):
raise NotImplementedError("Real version of the structure "
"theorems with hypertangent support is not yet implemented.")
# TODO: What should really be done in this case?
raise NotImplementedError("Nonelementary extensions not supported "
"in the structure theorems.")
E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')]
L_part = [DE.D[i].as_expr() for i in DE.indices('log')]
lhs = Matrix([E_part + L_part])
rhs = Matrix([dfa.as_expr()/dfd.as_expr()])
A, u = constant_system(lhs, rhs, DE)
if not all(derivation(i, DE, basic=True).is_zero for i in u) or not A:
# If the elements of u are not all constant
# Note: See comment in constant_system
# Also note: derivation(basic=True) calls cancel()
return None
else:
if not all(i.is_Rational for i in u):
# TODO: But maybe we can tell if they're not rational, like
# log(2)/log(3). Also, there should be an option to continue
# anyway, even if the result might potentially be wrong.
raise NotImplementedError("Cannot work with non-rational "
"coefficients in this case.")
else:
n = reduce(ilcm, [i.as_numer_denom()[1] for i in u])
u *= n
terms = ([DE.T[i] for i in DE.indices('exp')] +
[DE.extargs[i] for i in DE.indices('log')])
ans = list(zip(terms, u))
result = Mul(*[Pow(i, j) for i, j in ans])
# exp(f) will be the same as result up to a multiplicative
# constant. We now find the log of that constant.
argterms = ([DE.extargs[i] for i in DE.indices('exp')] +
[DE.T[i] for i in DE.indices('log')])
const = cancel(fa.as_expr()/fd.as_expr() -
Add(*[Mul(i, j/n) for i, j in zip(argterms, u)]))
return (ans, result, n, const)
def is_log_deriv_k_t_radical_in_field(fa, fd, DE, case='auto', z=None):
"""
Checks if f can be written as the logarithmic derivative of a k(t)-radical.
It differs from is_log_deriv_k_t_radical(fa, fd, DE, Df=False)
for any given fa, fd, DE in that it finds the solution in the
given field not in some (possibly unspecified extension) and
"in_field" with the function name is used to indicate that.
f in k(t) can be written as the logarithmic derivative of a k(t) radical if
there exist n in ZZ and u in k(t) with n, u != 0 such that n*f == Du/u.
Either returns (n, u) or None, which means that f cannot be written as the
logarithmic derivative of a k(t)-radical.
case is one of {'primitive', 'exp', 'tan', 'auto'} for the primitive,
hyperexponential, and hypertangent cases, respectively. If case is 'auto',
it will attempt to determine the type of the derivation automatically.
See also
========
is_log_deriv_k_t_radical, is_deriv_k
"""
fa, fd = fa.cancel(fd, include=True)
# f must be simple
n, s = splitfactor(fd, DE)
if not s.is_one:
pass
z = z or Dummy('z')
H, b = residue_reduce(fa, fd, DE, z=z)
if not b:
# I will have to verify, but I believe that the answer should be
# None in this case. This should never happen for the
# functions given when solving the parametric logarithmic
# derivative problem when integration elementary functions (see
# Bronstein's book, page 255), so most likely this indicates a bug.
return None
roots = [(i, i.real_roots()) for i, _ in H]
if not all(len(j) == i.degree() and all(k.is_Rational for k in j) for
i, j in roots):
# If f is the logarithmic derivative of a k(t)-radical, then all the
# roots of the resultant must be rational numbers.
return None
# [(a, i), ...], where i*log(a) is a term in the log-part of the integral
# of f
respolys, residues = list(zip(*roots)) or [[], []]
# Note: this might be empty, but everything below should work find in that
# case (it should be the same as if it were [[1, 1]])
residueterms = [(H[j][1].subs(z, i), i) for j in range(len(H)) for
i in residues[j]]
# TODO: finish writing this and write tests
p = cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H, DE, z))
p = p.as_poly(DE.t)
if p is None:
# f - Dg will be in k[t] if f is the logarithmic derivative of a k(t)-radical
return None
if p.degree(DE.t) >= max(1, DE.d.degree(DE.t)):
return None
if case == 'auto':
case = DE.case
if case == 'exp':
wa, wd = derivation(DE.t, DE).cancel(Poly(DE.t, DE.t), include=True)
with DecrementLevel(DE):
pa, pd = frac_in(p, DE.t, cancel=True)
wa, wd = frac_in((wa, wd), DE.t)
A = parametric_log_deriv(pa, pd, wa, wd, DE)
if A is None:
return None
n, e, u = A
u *= DE.t**e
elif case == 'primitive':
with DecrementLevel(DE):
pa, pd = frac_in(p, DE.t)
A = is_log_deriv_k_t_radical_in_field(pa, pd, DE, case='auto')
if A is None:
return None
n, u = A
elif case == 'base':
# TODO: we can use more efficient residue reduction from ratint()
if not fd.is_sqf or fa.degree() >= fd.degree():
# f is the logarithmic derivative in the base case if and only if
# f = fa/fd, fd is square-free, deg(fa) < deg(fd), and
# gcd(fa, fd) == 1. The last condition is handled by cancel() above.
return None
# Note: if residueterms = [], returns (1, 1)
# f had better be 0 in that case.
n = reduce(ilcm, [i.as_numer_denom()[1] for _, i in residueterms], S(1))
u = Mul(*[Pow(i, j*n) for i, j in residueterms])
return (n, u)
elif case == 'tan':
raise NotImplementedError("The hypertangent case is "
"not yet implemented for is_log_deriv_k_t_radical_in_field()")
elif case in ['other_linear', 'other_nonlinear']:
# XXX: If these are supported by the structure theorems, change to NotImplementedError.
raise ValueError("The %s case is not supported in this function." % case)
else:
raise ValueError("case must be one of {'primitive', 'exp', 'tan', "
"'base', 'auto'}, not %s" % case)
common_denom = reduce(ilcm, [i.as_numer_denom()[1] for i in [j for _, j in
residueterms]] + [n], S(1))
residueterms = [(i, j*common_denom) for i, j in residueterms]
m = common_denom//n
if common_denom != n*m: # Verify exact division
raise ValueError("Inexact division")
u = cancel(u**m*Mul(*[Pow(i, j) for i, j in residueterms]))
return (common_denom, u)
| 40.123922
| 110
| 0.561222
| 8,964
| 51,158
| 3.151495
| 0.089023
| 0.008142
| 0.010619
| 0.004531
| 0.510867
| 0.467717
| 0.434301
| 0.388814
| 0.353522
| 0.324212
| 0
| 0.012373
| 0.297001
| 51,158
| 1,274
| 111
| 40.155416
| 0.773134
| 0.503929
| 0
| 0.311828
| 0
| 0
| 0.042779
| 0.002453
| 0
| 0
| 0
| 0.00471
| 0
| 1
| 0.03405
| false
| 0.001792
| 0.016129
| 0
| 0.137993
| 0.001792
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ce07b8311ef90c93682c15fc681abf9e95c0bb7
| 1,076
|
py
|
Python
|
ssh_telnet/netmiko/ex07_netmiko_command_mult_prompts.py
|
levs72/pyneng-examples
|
d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9
|
[
"MIT"
] | 11
|
2021-04-05T09:30:23.000Z
|
2022-03-09T13:27:56.000Z
|
ssh_telnet/netmiko/ex07_netmiko_command_mult_prompts.py
|
levs72/pyneng-examples
|
d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9
|
[
"MIT"
] | null | null | null |
ssh_telnet/netmiko/ex07_netmiko_command_mult_prompts.py
|
levs72/pyneng-examples
|
d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9
|
[
"MIT"
] | 11
|
2021-04-06T03:44:35.000Z
|
2022-03-04T21:20:40.000Z
|
from pprint import pprint
import yaml
import netmiko
import paramiko
def send_cmd_with_prompt(device, command, *, wait_for, confirmation):
if type(wait_for) == str:
wait_for = [wait_for]
if type(confirmation) == str:
confirmation = [confirmation]
with netmiko.Netmiko(**device) as ssh:
ssh.enable()
result = ssh.send_command_timing(
command, strip_prompt=False, strip_command=False
)
for wait, confirm in zip(wait_for, confirmation):
if wait in result:
result += ssh.send_command_timing(
confirm, strip_prompt=False, strip_command=False
)
return result
if __name__ == "__main__":
with open("devices.yaml") as f:
devices = yaml.safe_load(f)
r1 = devices[0]
out = send_cmd_with_prompt(
r1, "copy run start", wait_for="Destination filename", confirmation="\n"
)
print(out)
"""
R1#copy run start
Destination filename [startup-config]?
Building configuration...
[OK]
R1#
"""
| 25.619048
| 84
| 0.616171
| 127
| 1,076
| 4.992126
| 0.409449
| 0.066246
| 0.0347
| 0.053628
| 0.18612
| 0.104101
| 0
| 0
| 0
| 0
| 0
| 0.006485
| 0.283457
| 1,076
| 41
| 85
| 26.243902
| 0.815824
| 0
| 0
| 0
| 0
| 0
| 0.057318
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.142857
| 0
| 0.214286
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ce146b894402021fe89e46e79f310a76ff9ef08
| 2,479
|
py
|
Python
|
LightTestLoop.py
|
Growing-Beyond-Earth/GettingStarted
|
04c2fd5fa36224ac25a6c6c62c4d6e558b27e700
|
[
"Apache-2.0"
] | null | null | null |
LightTestLoop.py
|
Growing-Beyond-Earth/GettingStarted
|
04c2fd5fa36224ac25a6c6c62c4d6e558b27e700
|
[
"Apache-2.0"
] | null | null | null |
LightTestLoop.py
|
Growing-Beyond-Earth/GettingStarted
|
04c2fd5fa36224ac25a6c6c62c4d6e558b27e700
|
[
"Apache-2.0"
] | null | null | null |
# GROWNG BEYOND EARTH CONTROL BOX Traning
# RASPBERRY PI PICO / MICROPYTHON
# FAIRCHILD TROPICAL BOTANIC GARDEN, Oct 18, 2021
# The Growing Beyond Earth (GBE) control box is a device that controls
# the LED lights and fan in a GBE growth chamber. It can also control
# accessories including a 12v water pump and environmental sensors.
# The device is based on a Raspberry Pi Pico microcontroller running
# Micropython.
# lesson Written by @MarioTheMaker
from sys import stdin, stdout, exit
import machine
import time
#Set the brightness for each color
red_brightness = 100
green_brightness = 100
blue_brightness = 100
white_brightness = 100
# Pulse width modulation (PWM) is a way to get an artificial analog output on a digital pin.
# It achieves this by rapidly toggling the pin from low to high. There are two parameters
# associated with this: the frequency of the toggling, and the duty cycle.
# The duty cycle is defined to be how long the pin is high compared with the length of a
# single period (low plus high time). Maximum duty cycle is when the pin is high all of the
# time, and minimum is when it is low all of the time.
# https://projects.raspberrypi.org/en/projects/getting-started-with-the-pico/7#:
# control I/O pins
# machine.Pin(id, mode=- 1, pull=- 1, *, value, drive, alt)
# Access the pin peripheral (GPIO pin) associated with the given id.
# If additional arguments are given in the constructor then they are used to initialise
# the pin. Any settings that are not specified will remain in their previous state.
# More info https://docs.micropython.org/en/latest/library/machine.Pin.html
r=machine.PWM(machine.Pin(0)); r.freq(20000) # Red channel
g=machine.PWM(machine.Pin(2)); g.freq(20000) # Green channel
b=machine.PWM(machine.Pin(1)); b.freq(20000) # Blue channel
w=machine.PWM(machine.Pin(3)); w.freq(20000) # White channel
# More info https://docs.micropython.org/en/latest/library/machine.PWM.html
# Start a loop and change the brightness multiplier "n"
# PWM.duty_u16([value]) Get the current duty cycle of the PWM output,
# as an unsigned 16-bit value in the range 0 to 65535 inclusive.
n = 100
while n > 0:
print("Power Level ",n)
r.duty_u16(int(red_brightness)*n)
g.duty_u16(int(green_brightness)*n)
b.duty_u16(int(blue_brightness)*n)
w.duty_u16(int(white_brightness)*n)
time.sleep(.3)
n = n - 5
#Turn all the lights off
time.sleep(3)
r.duty_u16(0)
g.duty_u16(0)
b.duty_u16(0)
w.duty_u16(0)
| 38.138462
| 92
| 0.745058
| 425
| 2,479
| 4.305882
| 0.442353
| 0.034426
| 0.037158
| 0.043716
| 0.057924
| 0.057924
| 0.057924
| 0.057924
| 0.057924
| 0.057924
| 0
| 0.04058
| 0.164986
| 2,479
| 64
| 93
| 38.734375
| 0.843478
| 0.706737
| 0
| 0
| 0
| 0
| 0.017316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.12
| 0
| 0.12
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ce15c82ddc26277baddffb09d13b58c226ab5d6
| 3,409
|
py
|
Python
|
core/known_bugs_utils.py
|
nicolasbock/hotsos
|
6a0d650a8d76b5a5f85f4ddc8c0a9f8939e1de7a
|
[
"Apache-2.0"
] | null | null | null |
core/known_bugs_utils.py
|
nicolasbock/hotsos
|
6a0d650a8d76b5a5f85f4ddc8c0a9f8939e1de7a
|
[
"Apache-2.0"
] | null | null | null |
core/known_bugs_utils.py
|
nicolasbock/hotsos
|
6a0d650a8d76b5a5f85f4ddc8c0a9f8939e1de7a
|
[
"Apache-2.0"
] | null | null | null |
import os
import yaml
from core import plugintools
from core import constants
from core.searchtools import SearchDef
from core.issues.issue_utils import IssueEntry
LAUNCHPAD = "launchpad"
MASTER_YAML_KNOWN_BUGS_KEY = "bugs-detected"
KNOWN_BUGS = {MASTER_YAML_KNOWN_BUGS_KEY: []}
class BugSearchDef(SearchDef):
def __init__(self, pattern, bug_id, hint, reason,
reason_format_result_groups=None):
"""
@param reason: string reason describing the issue and why it has been
flagged. This string can be a template i.e. containing {} fields that
can be rendered using results.
@param reason_format_result_groups: if the reason string is a template,
this is a list of indexes in the results that can be extracted for
inclusion in the reason.
"""
super().__init__(pattern, tag=bug_id, hint=hint)
self._reason = reason
if reason is None:
self._reason = ""
self.reason_format_result_groups = reason_format_result_groups
@property
def reason(self):
return self._reason
def rendered_reason(self, search_result):
if self._reason and self.reason_format_result_groups:
values = []
for idx in self.reason_format_result_groups:
values.append(search_result.get(idx))
return self._reason.format(*values)
return self._reason
def _get_known_bugs():
"""
Fetch the current plugin known_bugs.yaml if it exists and return its
contents or None if it doesn't exist yet.
"""
if not os.path.isdir(constants.PLUGIN_TMP_DIR):
raise Exception("plugin tmp dir '{}' not found".
format(constants.PLUGIN_TMP_DIR))
known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, "known_bugs.yaml")
if not os.path.exists(known_bugs_yaml):
return {}
bugs = yaml.safe_load(open(known_bugs_yaml))
if bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY):
return bugs
return {}
def add_known_bug(bug_id, description=None, type=LAUNCHPAD):
"""
Fetch the current plugin known_bugs.yaml if it exists and add new bug with
description of the bug.
"""
if not os.path.isdir(constants.PLUGIN_TMP_DIR):
raise Exception("plugin tmp dir '{}' not found".
format(constants.PLUGIN_TMP_DIR))
if type == LAUNCHPAD:
new_bug = "https://bugs.launchpad.net/bugs/{}".format(bug_id)
if description is None:
description = "no description provided"
entry = IssueEntry(new_bug, description, key="id")
current = _get_known_bugs()
if current and current.get(MASTER_YAML_KNOWN_BUGS_KEY):
current[MASTER_YAML_KNOWN_BUGS_KEY].append(entry.data)
else:
current = {MASTER_YAML_KNOWN_BUGS_KEY: [entry.data]}
known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, "known_bugs.yaml")
with open(known_bugs_yaml, 'w') as fd:
fd.write(yaml.dump(current))
def add_known_bugs_to_master_plugin():
"""
Fetch the current plugin known_bugs.yaml and add it to the master yaml.
Note that this can only be called once per plugin and is typically
performed as a final part after all others have executed.
"""
bugs = _get_known_bugs()
if bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY):
plugintools.save_part(bugs, priority=99)
| 33.097087
| 79
| 0.680845
| 476
| 3,409
| 4.632353
| 0.277311
| 0.089796
| 0.058957
| 0.060317
| 0.33424
| 0.301587
| 0.233107
| 0.217687
| 0.217687
| 0.217687
| 0
| 0.000768
| 0.236433
| 3,409
| 102
| 80
| 33.421569
| 0.846331
| 0.217366
| 0
| 0.237288
| 0
| 0
| 0.067637
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101695
| false
| 0
| 0.101695
| 0.016949
| 0.322034
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ce1993cbdc65a6d053c9478a3f9b9475d29bb5c
| 7,083
|
py
|
Python
|
tf_pose/slim/nets/mobilenet/mobilenet_v2_test.py
|
gpspelle/pose-estimation
|
b817dcc120092002984d8a41431046f323bc02c8
|
[
"Apache-2.0"
] | 862
|
2019-12-11T18:40:48.000Z
|
2022-03-29T15:23:58.000Z
|
tf_pose/slim/nets/mobilenet/mobilenet_v2_test.py
|
bvanelli/tf-pose-estimation
|
1dec506ac8abf00616dc0fe76bf476ccdfd6b93e
|
[
"Apache-2.0"
] | 72
|
2019-05-07T18:33:32.000Z
|
2022-03-10T07:48:39.000Z
|
tf_pose/slim/nets/mobilenet/mobilenet_v2_test.py
|
bvanelli/tf-pose-estimation
|
1dec506ac8abf00616dc0fe76bf476ccdfd6b93e
|
[
"Apache-2.0"
] | 165
|
2019-12-11T20:04:22.000Z
|
2022-03-29T06:18:12.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mobilenet_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow as tf
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet
from nets.mobilenet import mobilenet_v2
slim = tf.contrib.slim
def find_ops(optype):
"""Find ops of a given type in graphdef or a graph.
Args:
optype: operation type (e.g. Conv2D)
Returns:
List of operations.
"""
gd = tf.get_default_graph()
return [var for var in gd.get_operations() if var.type == optype]
class MobilenetV2Test(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testCreation(self):
spec = dict(mobilenet_v2.V2_DEF)
_, ep = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)
num_convs = len(find_ops('Conv2D'))
# This is mostly a sanity test. No deep reason for these particular
# constants.
#
# All but first 2 and last one have two convolutions, and there is one
# extra conv that is not in the spec. (logits)
self.assertEqual(num_convs, len(spec['spec']) * 2 - 2)
# Check that depthwise are exposed.
for i in range(2, 17):
self.assertIn('layer_%d/depthwise_output' % i, ep)
def testCreationNoClasses(self):
spec = copy.deepcopy(mobilenet_v2.V2_DEF)
net, ep = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec,
num_classes=None)
self.assertIs(net, ep['global_pool'])
def testImageSizes(self):
for input_size, output_size in [(224, 7), (192, 6), (160, 5),
(128, 4), (96, 3)]:
tf.reset_default_graph()
_, ep = mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, input_size, input_size, 3)))
self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3],
[output_size] * 2)
def testWithSplits(self):
spec = copy.deepcopy(mobilenet_v2.V2_DEF)
spec['overrides'] = {
(ops.expanded_conv,): dict(split_expansion=2),
}
_, _ = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)
num_convs = len(find_ops('Conv2D'))
# All but 3 op has 3 conv operatore, the remainign 3 have one
# and there is one unaccounted.
self.assertEqual(num_convs, len(spec['spec']) * 3 - 5)
def testWithOutputStride8(self):
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=8,
scope='MobilenetV2')
self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])
def testDivisibleBy(self):
tf.reset_default_graph()
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
divisible_by=16,
min_depth=32)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
self.assertSameElements([32, 64, 96, 160, 192, 320, 384, 576, 960, 1280,
1001], s)
def testDivisibleByWithArgScope(self):
tf.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
self.assertSameElements(s, [32, 192, 128, 1001])
def testFineGrained(self):
tf.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided.
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01,
finegrain_classification_mode=True)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
# All convolutions will be 8->48, except for the last one.
self.assertSameElements(s, [8, 48, 1001, 1280])
def testMobilenetBase(self):
tf.reset_default_graph()
# Verifies that mobilenet_base returns pre-pooling layer.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
net, _ = mobilenet_v2.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1)
self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128])
def testWithOutputStride16(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=16)
self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])
def testWithOutputStride8AndExplicitPadding(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=8,
use_explicit_padding=True,
scope='MobilenetV2')
self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])
def testWithOutputStride16AndExplicitPadding(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=16,
use_explicit_padding=True)
self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])
def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self):
sc = mobilenet.training_scope(is_training=None)
self.assertNotIn('is_training', sc[slim.arg_scope_func_key(
slim.batch_norm)])
def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self):
sc = mobilenet.training_scope(is_training=False)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet.training_scope(is_training=True)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet.training_scope()
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
if __name__ == '__main__':
tf.test.main()
| 37.278947
| 80
| 0.674432
| 975
| 7,083
| 4.70359
| 0.255385
| 0.043175
| 0.03925
| 0.057567
| 0.527039
| 0.503053
| 0.497383
| 0.458352
| 0.433057
| 0.433057
| 0
| 0.059526
| 0.195962
| 7,083
| 189
| 81
| 37.47619
| 0.745742
| 0.201468
| 0
| 0.444444
| 0
| 0
| 0.030676
| 0.004459
| 0
| 0
| 0
| 0
| 0.134921
| 1
| 0.126984
| false
| 0
| 0.063492
| 0
| 0.206349
| 0.007937
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ce2ce6e7522a59e86a553aeb0f5ee90bd00e269
| 1,402
|
py
|
Python
|
firebase-gist.py
|
darwin/firebase-gist
|
5aa4eb89e82fbf2971d7afca07471e1f51ff6e51
|
[
"MIT"
] | 1
|
2017-08-15T15:37:21.000Z
|
2017-08-15T15:37:21.000Z
|
firebase-gist.py
|
darwin/firebase-gist
|
5aa4eb89e82fbf2971d7afca07471e1f51ff6e51
|
[
"MIT"
] | null | null | null |
firebase-gist.py
|
darwin/firebase-gist
|
5aa4eb89e82fbf2971d7afca07471e1f51ff6e51
|
[
"MIT"
] | null | null | null |
from firebase import firebase
import os
import datetime
import json
import logging
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from github3 import login
firebase_url = os.environ['FIREBASE_DB']
firebase_secret = os.environ['FIREBASE_SECRET']
firebase_path = os.environ['FIREBASE_PATH']
firebase_username = os.environ['FIREBASE_USERNAME'] # not checked ATM
gh_token = os.environ['GH_TOKEN']
gh_gist = os.environ['GH_GIST']
gh_fname = os.environ['GH_FNAME']
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def connect_firebase():
f = firebase.FirebaseApplication(firebase_url, None)
f.authentication = firebase.FirebaseAuthentication(firebase_secret, firebase_username, admin=True)
return f
logger.info('==================================')
logger.info('Fetching firebase data')
f = connect_firebase()
data = f.get(firebase_path, None)
new_content = json.dumps(data, ensure_ascii=False, indent=2, sort_keys=True)
logger.info('Reading existing gist')
gh = login(token=gh_token)
gist = gh.gist(gh_gist)
old_content = ""
for f in gist.iter_files():
if f.filename == gh_fname:
old_content = f.content
break
if old_content == new_content:
logger.info('No changes detected')
else:
logger.info('Updating gist with new content')
gist.edit(files={
gh_fname: {
"content": new_content
}
})
logger.info('Done.')
| 25.962963
| 100
| 0.738231
| 194
| 1,402
| 5.14433
| 0.381443
| 0.063126
| 0.068136
| 0.046092
| 0.054108
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004075
| 0.124822
| 1,402
| 54
| 101
| 25.962963
| 0.809291
| 0.010699
| 0
| 0
| 0
| 0
| 0.156566
| 0.024531
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0
| 0.181818
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ce34380af3cdc654ec22dc00486fd1079b00edb
| 25,614
|
py
|
Python
|
synapse/notifier.py
|
rkfg/synapse
|
0b3112123da5fae4964db784e3bab0c4d83d9d62
|
[
"Apache-2.0"
] | 1
|
2021-09-09T08:50:13.000Z
|
2021-09-09T08:50:13.000Z
|
synapse/notifier.py
|
rkfg/synapse
|
0b3112123da5fae4964db784e3bab0c4d83d9d62
|
[
"Apache-2.0"
] | null | null | null |
synapse/notifier.py
|
rkfg/synapse
|
0b3112123da5fae4964db784e3bab0c4d83d9d62
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014 - 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import namedtuple
from typing import (
Awaitable,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
import attr
from prometheus_client import Counter
from twisted.internet import defer
import synapse.server
from synapse.api.constants import EventTypes, HistoryVisibility, Membership
from synapse.api.errors import AuthError
from synapse.events import EventBase
from synapse.handlers.presence import format_user_presence_state
from synapse.logging.context import PreserveLoggingContext
from synapse.logging.opentracing import log_kv, start_active_span
from synapse.logging.utils import log_function
from synapse.metrics import LaterGauge
from synapse.streams.config import PaginationConfig
from synapse.types import (
Collection,
PersistedEventPosition,
RoomStreamToken,
StreamToken,
UserID,
)
from synapse.util.async_helpers import ObservableDeferred, timeout_deferred
from synapse.util.metrics import Measure
from synapse.visibility import filter_events_for_client
logger = logging.getLogger(__name__)
notified_events_counter = Counter("synapse_notifier_notified_events", "")
users_woken_by_stream_counter = Counter(
"synapse_notifier_users_woken_by_stream", "", ["stream"]
)
T = TypeVar("T")
# TODO(paul): Should be shared somewhere
def count(func: Callable[[T], bool], it: Iterable[T]) -> int:
"""Return the number of items in it for which func returns true."""
n = 0
for x in it:
if func(x):
n += 1
return n
class _NotificationListener:
"""This represents a single client connection to the events stream.
The events stream handler will have yielded to the deferred, so to
notify the handler it is sufficient to resolve the deferred.
"""
__slots__ = ["deferred"]
def __init__(self, deferred):
self.deferred = deferred
class _NotifierUserStream:
"""This represents a user connected to the event stream.
It tracks the most recent stream token for that user.
At a given point a user may have a number of streams listening for
events.
This listener will also keep track of which rooms it is listening in
so that it can remove itself from the indexes in the Notifier class.
"""
def __init__(
self,
user_id: str,
rooms: Collection[str],
current_token: StreamToken,
time_now_ms: int,
):
self.user_id = user_id
self.rooms = set(rooms)
self.current_token = current_token
# The last token for which we should wake up any streams that have a
# token that comes before it. This gets updated every time we get poked.
# We start it at the current token since if we get any streams
# that have a token from before we have no idea whether they should be
# woken up or not, so lets just wake them up.
self.last_notified_token = current_token
self.last_notified_ms = time_now_ms
with PreserveLoggingContext():
self.notify_deferred = ObservableDeferred(defer.Deferred())
def notify(
self,
stream_key: str,
stream_id: Union[int, RoomStreamToken],
time_now_ms: int,
):
"""Notify any listeners for this user of a new event from an
event source.
Args:
stream_key: The stream the event came from.
stream_id: The new id for the stream the event came from.
time_now_ms: The current time in milliseconds.
"""
self.current_token = self.current_token.copy_and_advance(stream_key, stream_id)
self.last_notified_token = self.current_token
self.last_notified_ms = time_now_ms
noify_deferred = self.notify_deferred
log_kv(
{
"notify": self.user_id,
"stream": stream_key,
"stream_id": stream_id,
"listeners": self.count_listeners(),
}
)
users_woken_by_stream_counter.labels(stream_key).inc()
with PreserveLoggingContext():
self.notify_deferred = ObservableDeferred(defer.Deferred())
noify_deferred.callback(self.current_token)
def remove(self, notifier: "Notifier"):
"""Remove this listener from all the indexes in the Notifier
it knows about.
"""
for room in self.rooms:
lst = notifier.room_to_user_streams.get(room, set())
lst.discard(self)
notifier.user_to_user_stream.pop(self.user_id)
def count_listeners(self) -> int:
return len(self.notify_deferred.observers())
def new_listener(self, token: StreamToken) -> _NotificationListener:
"""Returns a deferred that is resolved when there is a new token
greater than the given token.
Args:
token: The token from which we are streaming from, i.e. we shouldn't
notify for things that happened before this.
"""
# Immediately wake up stream if something has already since happened
# since their last token.
if self.last_notified_token != token:
return _NotificationListener(defer.succeed(self.current_token))
else:
return _NotificationListener(self.notify_deferred.observe())
class EventStreamResult(namedtuple("EventStreamResult", ("events", "tokens"))):
def __bool__(self):
return bool(self.events)
@attr.s(slots=True, frozen=True)
class _PendingRoomEventEntry:
event_pos = attr.ib(type=PersistedEventPosition)
extra_users = attr.ib(type=Collection[UserID])
room_id = attr.ib(type=str)
type = attr.ib(type=str)
state_key = attr.ib(type=Optional[str])
membership = attr.ib(type=Optional[str])
class Notifier:
"""This class is responsible for notifying any listeners when there are
new events available for it.
Primarily used from the /events stream.
"""
UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000
def __init__(self, hs: "synapse.server.HomeServer"):
self.user_to_user_stream = {} # type: Dict[str, _NotifierUserStream]
self.room_to_user_streams = {} # type: Dict[str, Set[_NotifierUserStream]]
self.hs = hs
self.storage = hs.get_storage()
self.event_sources = hs.get_event_sources()
self.store = hs.get_datastore()
self.pending_new_room_events = [] # type: List[_PendingRoomEventEntry]
# Called when there are new things to stream over replication
self.replication_callbacks = [] # type: List[Callable[[], None]]
# Called when remote servers have come back online after having been
# down.
self.remote_server_up_callbacks = [] # type: List[Callable[[str], None]]
self.clock = hs.get_clock()
self.appservice_handler = hs.get_application_service_handler()
self._pusher_pool = hs.get_pusherpool()
self.federation_sender = None
if hs.should_send_federation():
self.federation_sender = hs.get_federation_sender()
self.state_handler = hs.get_state_handler()
self.clock.looping_call(
self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS
)
# This is not a very cheap test to perform, but it's only executed
# when rendering the metrics page, which is likely once per minute at
# most when scraping it.
def count_listeners():
all_user_streams = set() # type: Set[_NotifierUserStream]
for streams in list(self.room_to_user_streams.values()):
all_user_streams |= streams
for stream in list(self.user_to_user_stream.values()):
all_user_streams.add(stream)
return sum(stream.count_listeners() for stream in all_user_streams)
LaterGauge("synapse_notifier_listeners", "", [], count_listeners)
LaterGauge(
"synapse_notifier_rooms",
"",
[],
lambda: count(bool, list(self.room_to_user_streams.values())),
)
LaterGauge(
"synapse_notifier_users", "", [], lambda: len(self.user_to_user_stream)
)
def add_replication_callback(self, cb: Callable[[], None]):
"""Add a callback that will be called when some new data is available.
Callback is not given any arguments. It should *not* return a Deferred - if
it needs to do any asynchronous work, a background thread should be started and
wrapped with run_as_background_process.
"""
self.replication_callbacks.append(cb)
def on_new_room_event(
self,
event: EventBase,
event_pos: PersistedEventPosition,
max_room_stream_token: RoomStreamToken,
extra_users: Optional[Collection[UserID]] = None,
):
"""Unwraps event and calls `on_new_room_event_args`."""
self.on_new_room_event_args(
event_pos=event_pos,
room_id=event.room_id,
event_type=event.type,
state_key=event.get("state_key"),
membership=event.content.get("membership"),
max_room_stream_token=max_room_stream_token,
extra_users=extra_users or [],
)
def on_new_room_event_args(
self,
room_id: str,
event_type: str,
state_key: Optional[str],
membership: Optional[str],
event_pos: PersistedEventPosition,
max_room_stream_token: RoomStreamToken,
extra_users: Optional[Collection[UserID]] = None,
):
"""Used by handlers to inform the notifier something has happened
in the room, room event wise.
This triggers the notifier to wake up any listeners that are
listening to the room, and any listeners for the users in the
`extra_users` param.
The events can be peristed out of order. The notifier will wait
until all previous events have been persisted before notifying
the client streams.
"""
self.pending_new_room_events.append(
_PendingRoomEventEntry(
event_pos=event_pos,
extra_users=extra_users or [],
room_id=room_id,
type=event_type,
state_key=state_key,
membership=membership,
)
)
self._notify_pending_new_room_events(max_room_stream_token)
self.notify_replication()
def _notify_pending_new_room_events(self, max_room_stream_token: RoomStreamToken):
"""Notify for the room events that were queued waiting for a previous
event to be persisted.
Args:
max_room_stream_token: The highest stream_id below which all
events have been persisted.
"""
pending = self.pending_new_room_events
self.pending_new_room_events = []
users = set() # type: Set[UserID]
rooms = set() # type: Set[str]
for entry in pending:
if entry.event_pos.persisted_after(max_room_stream_token):
self.pending_new_room_events.append(entry)
else:
if (
entry.type == EventTypes.Member
and entry.membership == Membership.JOIN
and entry.state_key
):
self._user_joined_room(entry.state_key, entry.room_id)
users.update(entry.extra_users)
rooms.add(entry.room_id)
if users or rooms:
self.on_new_event(
"room_key",
max_room_stream_token,
users=users,
rooms=rooms,
)
self._on_updated_room_token(max_room_stream_token)
def _on_updated_room_token(self, max_room_stream_token: RoomStreamToken):
"""Poke services that might care that the room position has been
updated.
"""
# poke any interested application service.
self._notify_app_services(max_room_stream_token)
self._notify_pusher_pool(max_room_stream_token)
if self.federation_sender:
self.federation_sender.notify_new_events(max_room_stream_token)
def _notify_app_services(self, max_room_stream_token: RoomStreamToken):
try:
self.appservice_handler.notify_interested_services(max_room_stream_token)
except Exception:
logger.exception("Error notifying application services of event")
def _notify_app_services_ephemeral(
self,
stream_key: str,
new_token: Union[int, RoomStreamToken],
users: Optional[Collection[Union[str, UserID]]] = None,
):
try:
stream_token = None
if isinstance(new_token, int):
stream_token = new_token
self.appservice_handler.notify_interested_services_ephemeral(
stream_key, stream_token, users or []
)
except Exception:
logger.exception("Error notifying application services of event")
def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken):
try:
self._pusher_pool.on_new_notifications(max_room_stream_token)
except Exception:
logger.exception("Error pusher pool of event")
def on_new_event(
self,
stream_key: str,
new_token: Union[int, RoomStreamToken],
users: Optional[Collection[Union[str, UserID]]] = None,
rooms: Optional[Collection[str]] = None,
):
"""Used to inform listeners that something has happened event wise.
Will wake up all listeners for the given users and rooms.
"""
users = users or []
rooms = rooms or []
with Measure(self.clock, "on_new_event"):
user_streams = set()
log_kv(
{
"waking_up_explicit_users": len(users),
"waking_up_explicit_rooms": len(rooms),
}
)
for user in users:
user_stream = self.user_to_user_stream.get(str(user))
if user_stream is not None:
user_streams.add(user_stream)
for room in rooms:
user_streams |= self.room_to_user_streams.get(room, set())
time_now_ms = self.clock.time_msec()
for user_stream in user_streams:
try:
user_stream.notify(stream_key, new_token, time_now_ms)
except Exception:
logger.exception("Failed to notify listener")
self.notify_replication()
# Notify appservices
self._notify_app_services_ephemeral(
stream_key,
new_token,
users,
)
def on_new_replication_data(self) -> None:
"""Used to inform replication listeners that something has happened
without waking up any of the normal user event streams"""
self.notify_replication()
async def wait_for_events(
self,
user_id: str,
timeout: int,
callback: Callable[[StreamToken, StreamToken], Awaitable[T]],
room_ids=None,
from_token=StreamToken.START,
) -> T:
"""Wait until the callback returns a non empty response or the
timeout fires.
"""
user_stream = self.user_to_user_stream.get(user_id)
if user_stream is None:
current_token = self.event_sources.get_current_token()
if room_ids is None:
room_ids = await self.store.get_rooms_for_user(user_id)
user_stream = _NotifierUserStream(
user_id=user_id,
rooms=room_ids,
current_token=current_token,
time_now_ms=self.clock.time_msec(),
)
self._register_with_keys(user_stream)
result = None
prev_token = from_token
if timeout:
end_time = self.clock.time_msec() + timeout
while not result:
try:
now = self.clock.time_msec()
if end_time <= now:
break
# Now we wait for the _NotifierUserStream to be told there
# is a new token.
listener = user_stream.new_listener(prev_token)
listener.deferred = timeout_deferred(
listener.deferred,
(end_time - now) / 1000.0,
self.hs.get_reactor(),
)
with start_active_span("wait_for_events.deferred"):
log_kv(
{
"wait_for_events": "sleep",
"token": prev_token,
}
)
with PreserveLoggingContext():
await listener.deferred
log_kv(
{
"wait_for_events": "woken",
"token": user_stream.current_token,
}
)
current_token = user_stream.current_token
result = await callback(prev_token, current_token)
log_kv(
{
"wait_for_events": "result",
"result": bool(result),
}
)
if result:
break
# Update the prev_token to the current_token since nothing
# has happened between the old prev_token and the current_token
prev_token = current_token
except defer.TimeoutError:
log_kv({"wait_for_events": "timeout"})
break
except defer.CancelledError:
log_kv({"wait_for_events": "cancelled"})
break
if result is None:
# This happened if there was no timeout or if the timeout had
# already expired.
current_token = user_stream.current_token
result = await callback(prev_token, current_token)
return result
async def get_events_for(
self,
user: UserID,
pagination_config: PaginationConfig,
timeout: int,
is_guest: bool = False,
explicit_room_id: Optional[str] = None,
) -> EventStreamResult:
"""For the given user and rooms, return any new events for them. If
there are no new events wait for up to `timeout` milliseconds for any
new events to happen before returning.
If explicit_room_id is not set, the user's joined rooms will be polled
for events.
If explicit_room_id is set, that room will be polled for events only if
it is world readable or the user has joined the room.
"""
if pagination_config.from_token:
from_token = pagination_config.from_token
else:
from_token = self.event_sources.get_current_token()
limit = pagination_config.limit
room_ids, is_joined = await self._get_room_ids(user, explicit_room_id)
is_peeking = not is_joined
async def check_for_updates(
before_token: StreamToken, after_token: StreamToken
) -> EventStreamResult:
if after_token == before_token:
return EventStreamResult([], (from_token, from_token))
events = [] # type: List[EventBase]
end_token = from_token
for name, source in self.event_sources.sources.items():
keyname = "%s_key" % name
before_id = getattr(before_token, keyname)
after_id = getattr(after_token, keyname)
if before_id == after_id:
continue
new_events, new_key = await source.get_new_events(
user=user,
from_key=getattr(from_token, keyname),
limit=limit,
is_guest=is_peeking,
room_ids=room_ids,
explicit_room_id=explicit_room_id,
)
if name == "room":
new_events = await filter_events_for_client(
self.storage,
user.to_string(),
new_events,
is_peeking=is_peeking,
)
elif name == "presence":
now = self.clock.time_msec()
new_events[:] = [
{
"type": "m.presence",
"content": format_user_presence_state(event, now),
}
for event in new_events
]
events.extend(new_events)
end_token = end_token.copy_and_replace(keyname, new_key)
return EventStreamResult(events, (from_token, end_token))
user_id_for_stream = user.to_string()
if is_peeking:
# Internally, the notifier keeps an event stream per user_id.
# This is used by both /sync and /events.
# We want /events to be used for peeking independently of /sync,
# without polluting its contents. So we invent an illegal user ID
# (which thus cannot clash with any real users) for keying peeking
# over /events.
#
# I am sorry for what I have done.
user_id_for_stream = "_PEEKING_%s_%s" % (
explicit_room_id,
user_id_for_stream,
)
result = await self.wait_for_events(
user_id_for_stream,
timeout,
check_for_updates,
room_ids=room_ids,
from_token=from_token,
)
return result
async def _get_room_ids(
self, user: UserID, explicit_room_id: Optional[str]
) -> Tuple[Collection[str], bool]:
joined_room_ids = await self.store.get_rooms_for_user(user.to_string())
if explicit_room_id:
if explicit_room_id in joined_room_ids:
return [explicit_room_id], True
if await self._is_world_readable(explicit_room_id):
return [explicit_room_id], False
raise AuthError(403, "Non-joined access not allowed")
return joined_room_ids, True
async def _is_world_readable(self, room_id: str) -> bool:
state = await self.state_handler.get_current_state(
room_id, EventTypes.RoomHistoryVisibility, ""
)
if state and "history_visibility" in state.content:
return (
state.content["history_visibility"] == HistoryVisibility.WORLD_READABLE
)
else:
return False
@log_function
def remove_expired_streams(self) -> None:
time_now_ms = self.clock.time_msec()
expired_streams = []
expire_before_ts = time_now_ms - self.UNUSED_STREAM_EXPIRY_MS
for stream in self.user_to_user_stream.values():
if stream.count_listeners():
continue
if stream.last_notified_ms < expire_before_ts:
expired_streams.append(stream)
for expired_stream in expired_streams:
expired_stream.remove(self)
@log_function
def _register_with_keys(self, user_stream: _NotifierUserStream):
self.user_to_user_stream[user_stream.user_id] = user_stream
for room in user_stream.rooms:
s = self.room_to_user_streams.setdefault(room, set())
s.add(user_stream)
def _user_joined_room(self, user_id: str, room_id: str):
new_user_stream = self.user_to_user_stream.get(user_id)
if new_user_stream is not None:
room_streams = self.room_to_user_streams.setdefault(room_id, set())
room_streams.add(new_user_stream)
new_user_stream.rooms.add(room_id)
def notify_replication(self) -> None:
"""Notify the any replication listeners that there's a new event"""
for cb in self.replication_callbacks:
cb()
def notify_remote_server_up(self, server: str):
"""Notify any replication that a remote server has come back up"""
# We call federation_sender directly rather than registering as a
# callback as a) we already have a reference to it and b) it introduces
# circular dependencies.
if self.federation_sender:
self.federation_sender.wake_destination(server)
| 36.025316
| 87
| 0.605216
| 2,981
| 25,614
| 4.94901
| 0.161355
| 0.021013
| 0.015861
| 0.021962
| 0.224836
| 0.154341
| 0.114282
| 0.085813
| 0.067444
| 0.057344
| 0
| 0.001791
| 0.324198
| 25,614
| 710
| 88
| 36.076056
| 0.850491
| 0.194854
| 0
| 0.206316
| 0
| 0
| 0.037745
| 0.012089
| 0
| 0
| 0
| 0.001408
| 0
| 1
| 0.052632
| false
| 0
| 0.042105
| 0.004211
| 0.153684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ce4682d4472c3403cd709b201e4107d5de073fb
| 20,891
|
py
|
Python
|
pytorch3dunet/unet3d/predictor.py
|
searobbersduck/pytorch-3dunet
|
5bb8ed2b6966b2cd06b1dc676b62d1ad98329305
|
[
"MIT"
] | null | null | null |
pytorch3dunet/unet3d/predictor.py
|
searobbersduck/pytorch-3dunet
|
5bb8ed2b6966b2cd06b1dc676b62d1ad98329305
|
[
"MIT"
] | null | null | null |
pytorch3dunet/unet3d/predictor.py
|
searobbersduck/pytorch-3dunet
|
5bb8ed2b6966b2cd06b1dc676b62d1ad98329305
|
[
"MIT"
] | null | null | null |
import time
import h5py
import hdbscan
import numpy as np
import torch
from sklearn.cluster import MeanShift
from pytorch3dunet.datasets.hdf5 import SliceBuilder
from pytorch3dunet.unet3d.utils import get_logger
from pytorch3dunet.unet3d.utils import unpad
logger = get_logger('UNet3DPredictor')
class _AbstractPredictor:
def __init__(self, model, loader, output_file, config, **kwargs):
self.model = model
self.loader = loader
self.output_file = output_file
self.config = config
self.predictor_config = kwargs
@staticmethod
def _volume_shape(dataset):
# TODO: support multiple internal datasets
raw = dataset.raws[0]
if raw.ndim == 3:
return raw.shape
else:
return raw.shape[1:]
@staticmethod
def _get_output_dataset_names(number_of_datasets, prefix='predictions'):
if number_of_datasets == 1:
return [prefix]
else:
return [f'{prefix}{i}' for i in range(number_of_datasets)]
def predict(self):
raise NotImplementedError
class StandardPredictor(_AbstractPredictor):
"""
Applies the model on the given dataset and saves the result in the `output_file` in the H5 format.
Predictions from the network are kept in memory. If the results from the network don't fit in into RAM
use `LazyPredictor` instead.
The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is
not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number
of the output head from the network.
Args:
model (Unet3D): trained 3D UNet model used for prediction
data_loader (torch.utils.data.DataLoader): input data loader
output_file (str): path to the output H5 file
config (dict): global config dict
"""
def __init__(self, model, loader, output_file, config, **kwargs):
super().__init__(model, loader, output_file, config, **kwargs)
def predict(self):
out_channels = self.config['model'].get('out_channels')
if out_channels is None:
out_channels = self.config['model']['dt_out_channels']
prediction_channel = self.config.get('prediction_channel', None)
if prediction_channel is not None:
logger.info(f"Using only channel '{prediction_channel}' from the network output")
device = self.config['device']
output_heads = self.config['model'].get('output_heads', 1)
logger.info(f'Running prediction on {len(self.loader)} batches...')
# dimensionality of the the output predictions
volume_shape = self._volume_shape(self.loader.dataset)
if prediction_channel is None:
prediction_maps_shape = (out_channels,) + volume_shape
else:
# single channel prediction map
prediction_maps_shape = (1,) + volume_shape
logger.info(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}')
avoid_block_artifacts = self.predictor_config.get('avoid_block_artifacts', True)
logger.info(f'Avoid block artifacts: {avoid_block_artifacts}')
# create destination H5 file
h5_output_file = h5py.File(self.output_file, 'w')
# allocate prediction and normalization arrays
logger.info('Allocating prediction and normalization arrays...')
prediction_maps, normalization_masks = self._allocate_prediction_maps(prediction_maps_shape,
output_heads, h5_output_file)
# Sets the module in evaluation mode explicitly (necessary for batchnorm/dropout layers if present)
self.model.eval()
# Set the `testing=true` flag otherwise the final Softmax/Sigmoid won't be applied!
self.model.testing = True
# Run predictions on the entire input dataset
with torch.no_grad():
for batch, indices in self.loader:
# send batch to device
batch = batch.to(device)
# forward pass
predictions = self.model(batch)
# wrap predictions into a list if there is only one output head from the network
if output_heads == 1:
predictions = [predictions]
# for each output head
for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps,
normalization_masks):
# convert to numpy array
prediction = prediction.cpu().numpy()
# for each batch sample
for pred, index in zip(prediction, indices):
# save patch index: (C,D,H,W)
if prediction_channel is None:
channel_slice = slice(0, out_channels)
else:
channel_slice = slice(0, 1)
index = (channel_slice,) + index
if prediction_channel is not None:
# use only the 'prediction_channel'
logger.info(f"Using channel '{prediction_channel}'...")
pred = np.expand_dims(pred[prediction_channel], axis=0)
logger.info(f'Saving predictions for slice:{index}...')
if avoid_block_artifacts:
# unpad in order to avoid block artifacts in the output probability maps
u_prediction, u_index = unpad(pred, index, volume_shape)
# accumulate probabilities into the output prediction array
prediction_map[u_index] += u_prediction
# count voxel visits for normalization
normalization_mask[u_index] += 1
else:
# accumulate probabilities into the output prediction array
prediction_map[index] += pred
# count voxel visits for normalization
normalization_mask[index] += 1
# save results to
self._save_results(prediction_maps, normalization_masks, output_heads, h5_output_file, self.loader.dataset)
# close the output H5 file
h5_output_file.close()
def _allocate_prediction_maps(self, output_shape, output_heads, output_file):
# initialize the output prediction arrays
prediction_maps = [np.zeros(output_shape, dtype='float32') for _ in range(output_heads)]
# initialize normalization mask in order to average out probabilities of overlapping patches
normalization_masks = [np.zeros(output_shape, dtype='uint8') for _ in range(output_heads)]
return prediction_maps, normalization_masks
def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset):
# save probability maps
prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')
for prediction_map, normalization_mask, prediction_dataset in zip(prediction_maps, normalization_masks,
prediction_datasets):
prediction_map = prediction_map / normalization_mask
if dataset.mirror_padding:
pad_width = dataset.pad_width
logger.info(f'Dataset loaded with mirror padding, pad_width: {pad_width}. Cropping before saving...')
prediction_map = prediction_map[:, pad_width:-pad_width, pad_width:-pad_width, pad_width:-pad_width]
logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...')
output_file.create_dataset(prediction_dataset, data=prediction_map, compression="gzip")
class LazyPredictor(StandardPredictor):
"""
Applies the model on the given dataset and saves the result in the `output_file` in the H5 format.
Predicted patches are directly saved into the H5 and they won't be stored in memory. Since this predictor
is slower than the `StandardPredictor` it should only be used when the predicted volume does not fit into RAM.
The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is
not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number
of the output head from the network.
Args:
model (Unet3D): trained 3D UNet model used for prediction
data_loader (torch.utils.data.DataLoader): input data loader
output_file (str): path to the output H5 file
config (dict): global config dict
"""
def __init__(self, model, loader, output_file, config, **kwargs):
super().__init__(model, loader, output_file, config, **kwargs)
def _allocate_prediction_maps(self, output_shape, output_heads, output_file):
# allocate datasets for probability maps
prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')
prediction_maps = [
output_file.create_dataset(dataset_name, shape=output_shape, dtype='float32', chunks=True,
compression='gzip')
for dataset_name in prediction_datasets]
# allocate datasets for normalization masks
normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization')
normalization_masks = [
output_file.create_dataset(dataset_name, shape=output_shape, dtype='uint8', chunks=True,
compression='gzip')
for dataset_name in normalization_datasets]
return prediction_maps, normalization_masks
def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset):
if dataset.mirror_padding:
logger.warn(
f'Mirror padding unsupported in LazyPredictor. Output predictions will be padded with pad_width: {dataset.pad_width}')
prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')
normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization')
# normalize the prediction_maps inside the H5
for prediction_map, normalization_mask, prediction_dataset, normalization_dataset in zip(prediction_maps,
normalization_masks,
prediction_datasets,
normalization_datasets):
# split the volume into 4 parts and load each into the memory separately
logger.info(f'Normalizing {prediction_dataset}...')
z, y, x = prediction_map.shape[1:]
# take slices which are 1/27 of the original volume
patch_shape = (z // 3, y // 3, x // 3)
for index in SliceBuilder._build_slices(prediction_map, patch_shape=patch_shape, stride_shape=patch_shape):
logger.info(f'Normalizing slice: {index}')
prediction_map[index] /= normalization_mask[index]
# make sure to reset the slice that has been visited already in order to avoid 'double' normalization
# when the patches overlap with each other
normalization_mask[index] = 1
logger.info(f'Deleting {normalization_dataset}...')
del output_file[normalization_dataset]
class EmbeddingsPredictor(_AbstractPredictor):
"""
Applies the embedding model on the given dataset and saves the result in the `output_file` in the H5 format.
The resulting volume is the segmentation itself (not the embedding vectors) obtained by clustering embeddings
with HDBSCAN or MeanShift algorithm patch by patch and then stitching the patches together.
"""
def __init__(self, model, loader, output_file, config, clustering, iou_threshold=0.7, noise_label=-1, **kwargs):
super().__init__(model, loader, output_file, config, **kwargs)
self.iou_threshold = iou_threshold
self.noise_label = noise_label
self.clustering = clustering
assert clustering in ['hdbscan', 'meanshift'], 'Only HDBSCAN and MeanShift are supported'
logger.info(f'IoU threshold: {iou_threshold}')
self.clustering_name = clustering
self.clustering = self._get_clustering(clustering, kwargs)
def predict(self):
device = self.config['device']
output_heads = self.config['model'].get('output_heads', 1)
logger.info(f'Running prediction on {len(self.loader)} patches...')
# dimensionality of the the output segmentation
volume_shape = self._volume_shape(self.loader.dataset)
logger.info(f'The shape of the output segmentation (DHW): {volume_shape}')
logger.info('Allocating segmentation array...')
# initialize the output prediction arrays
output_segmentations = [np.zeros(volume_shape, dtype='int32') for _ in range(output_heads)]
# initialize visited_voxels arrays
visited_voxels_arrays = [np.zeros(volume_shape, dtype='uint8') for _ in range(output_heads)]
# Sets the module in evaluation mode explicitly
self.model.eval()
self.model.testing = True
# Run predictions on the entire input dataset
with torch.no_grad():
for batch, indices in self.loader:
# logger.info(f'Predicting embeddings for slice:{index}')
# send batch to device
batch = batch.to(device)
# forward pass
embeddings = self.model(batch)
# wrap predictions into a list if there is only one output head from the network
if output_heads == 1:
embeddings = [embeddings]
for prediction, output_segmentation, visited_voxels_array in zip(embeddings, output_segmentations,
visited_voxels_arrays):
# convert to numpy array
prediction = prediction.cpu().numpy()
# iterate sequentially because of the current simple stitching that we're using
for pred, index in zip(prediction, indices):
# convert embeddings to segmentation with hdbscan clustering
segmentation = self._embeddings_to_segmentation(pred)
# stitch patches
self._merge_segmentation(segmentation, index, output_segmentation, visited_voxels_array)
# save results
with h5py.File(self.output_file, 'w') as output_file:
prediction_datasets = self._get_output_dataset_names(output_heads,
prefix=f'segmentation/{self.clustering_name}')
for output_segmentation, prediction_dataset in zip(output_segmentations, prediction_datasets):
logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...')
output_file.create_dataset(prediction_dataset, data=output_segmentation, compression="gzip")
def _embeddings_to_segmentation(self, embeddings):
"""
Cluster embeddings vectors with HDBSCAN and return the segmented volume.
Args:
embeddings (ndarray): 4D (CDHW) embeddings tensor
Returns:
3D (DHW) segmentation
"""
# shape of the output segmentation
output_shape = embeddings.shape[1:]
# reshape (C, D, H, W) -> (C, D * H * W) and transpose -> (D * H * W, C)
flattened_embeddings = embeddings.reshape(embeddings.shape[0], -1).transpose()
logger.info('Clustering embeddings...')
# perform clustering and reshape in order to get the segmentation volume
start = time.time()
clusters = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape)
logger.info(
f'Number of clusters found by {self.clustering}: {np.max(clusters)}. Duration: {time.time() - start} sec.')
return clusters
def _merge_segmentation(self, segmentation, index, output_segmentation, visited_voxels_array):
"""
Given the `segmentation` patch, its `index` in the `output_segmentation` array and the array visited voxels
merge the segmented patch (`segmentation`) into the `output_segmentation`
Args:
segmentation (ndarray): segmented patch
index (tuple): position of the patch inside `output_segmentation` volume
output_segmentation (ndarray): current state of the output segmentation
visited_voxels_array (ndarray): array of voxels visited so far (same size as `output_segmentation`); visited
voxels will be marked by a number greater than 0
"""
index = tuple(index)
# get new unassigned label
max_label = np.max(output_segmentation) + 1
# make sure there are no clashes between current segmentation patch and the output_segmentation
# but keep the noise label
noise_mask = segmentation == self.noise_label
segmentation += int(max_label)
segmentation[noise_mask] = self.noise_label
# get the overlap mask in the current patch
overlap_mask = visited_voxels_array[index] > 0
# get the new labels inside the overlap_mask
new_labels = np.unique(segmentation[overlap_mask])
merged_labels = self._merge_labels(output_segmentation[index], new_labels, segmentation)
# relabel new segmentation with the merged labels
for current_label, new_label in merged_labels:
segmentation[segmentation == new_label] = current_label
# update the output_segmentation
output_segmentation[index] = segmentation
# visit the patch
visited_voxels_array[index] += 1
def _merge_labels(self, current_segmentation, new_labels, new_segmentation):
def _most_frequent_label(labels):
unique, counts = np.unique(labels, return_counts=True)
ind = np.argmax(counts)
return unique[ind]
result = []
# iterate over new_labels and merge regions if the IoU exceeds a given threshold
for new_label in new_labels:
# skip 'noise' label assigned by hdbscan
if new_label == self.noise_label:
continue
new_label_mask = new_segmentation == new_label
# get only the most frequent overlapping label
most_frequent_label = _most_frequent_label(current_segmentation[new_label_mask])
# skip 'noise' label
if most_frequent_label == self.noise_label:
continue
current_label_mask = current_segmentation == most_frequent_label
# compute Jaccard index
iou = np.bitwise_and(new_label_mask, current_label_mask).sum() / np.bitwise_or(new_label_mask,
current_label_mask).sum()
if iou > self.iou_threshold:
# merge labels
result.append((most_frequent_label, new_label))
return result
def _get_clustering(self, clustering_alg, kwargs):
logger.info(f'Using {clustering_alg} for clustering')
if clustering_alg == 'hdbscan':
min_cluster_size = kwargs.get('min_cluster_size', 50)
min_samples = kwargs.get('min_samples', None),
metric = kwargs.get('metric', 'euclidean')
cluster_selection_method = kwargs.get('cluster_selection_method', 'eom')
logger.info(f'HDBSCAN params: min_cluster_size: {min_cluster_size}, min_samples: {min_samples}')
return hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, metric=metric,
cluster_selection_method=cluster_selection_method)
else:
bandwidth = kwargs['bandwidth']
logger.info(f'MeanShift params: bandwidth: {bandwidth}, bin_seeding: True')
# use fast MeanShift with bin seeding
return MeanShift(bandwidth=bandwidth, bin_seeding=True)
| 49.387707
| 134
| 0.631372
| 2,343
| 20,891
| 5.42723
| 0.160478
| 0.025165
| 0.017301
| 0.022649
| 0.421595
| 0.357896
| 0.341617
| 0.301274
| 0.267301
| 0.240249
| 0
| 0.005181
| 0.297784
| 20,891
| 422
| 135
| 49.504739
| 0.861622
| 0.255756
| 0
| 0.269565
| 0
| 0.004348
| 0.111748
| 0.019515
| 0
| 0
| 0
| 0.00237
| 0.004348
| 1
| 0.078261
| false
| 0
| 0.03913
| 0
| 0.182609
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ce65373a75b86fe5dcecdc0e2146bc5ea3033e1
| 3,593
|
py
|
Python
|
extra/convertBAMtoPILFER.py
|
MartaLoBalastegui/XICRA
|
74a7e74379c7e1b3fc1360d2c609994e884ee37a
|
[
"MIT"
] | 3
|
2021-05-16T21:13:22.000Z
|
2022-01-23T08:47:48.000Z
|
extra/convertBAMtoPILFER.py
|
MartaLoBalastegui/XICRA
|
74a7e74379c7e1b3fc1360d2c609994e884ee37a
|
[
"MIT"
] | 16
|
2021-03-11T10:51:25.000Z
|
2022-03-12T01:02:00.000Z
|
extra/convertBAMtoPILFER.py
|
MartaLoBalastegui/XICRA
|
74a7e74379c7e1b3fc1360d2c609994e884ee37a
|
[
"MIT"
] | 3
|
2021-03-05T10:07:38.000Z
|
2022-01-23T08:48:06.000Z
|
#usr/bin/env python
## useful imports
import time
import io
import os
import re
import sys
from sys import argv
import subprocess
## ARGV
if len (sys.argv) < 5:
print ("\nUsage:")
print ("python3 %s bam_file folder bedtools_bin samtools_bin logfile\n" %os.path.realpath(__file__))
exit()
bam_file = os.path.abspath(argv[1])
folder = argv[2]
bedtools_exe = argv[3]
samtools_exe = argv[4]
logFile = argv[5]
# start
output_file = open(logFile, 'a')
output_file.write("\nConvert BAM to Pilfer Input file:\n")
## Variables
dirname_name = os.path.dirname(bam_file)
split_name = os.path.splitext( os.path.basename(bam_file) )
bed_file = folder + '/' + split_name[0] + '.bed'
sam_file = folder + '/' + split_name[0] + '.sam'
pilfer_tmp = folder + '/' + split_name[0] + '.tmp.pilfer.bed'
pilfer_file = folder + '/' + split_name[0] + '.pilfer.bed'
## START
print ("\n+ Converting BAM file into PILFER input file")
## generate bed file with bedtools bamtobed -i bam_file
if (os.path.isfile(bed_file)):
print ("\t+ File %s already exists" %bed_file)
else:
cmd_bedtools = "%s bamtobed -i %s > %s" %(bedtools_exe, bam_file, bed_file)
output_file.write(cmd_bedtools)
output_file.write("\n")
try:
subprocess.check_output(cmd_bedtools, shell = True)
except Exception as exc:
print ('***ERROR:')
print (cmd_bedtools)
print('bedtools command generated an exception: %s' %exc)
exit()
## generate samtools
if (os.path.isfile(sam_file)):
print ("\t+ File %s already exists" %sam_file)
else:
cmd_samtools = "%s view %s > %s" %(samtools_exe, bam_file, sam_file)
output_file.write(cmd_samtools)
output_file.write("\n")
try:
subprocess.check_output(cmd_samtools, shell = True)
except Exception as exc:
print ('***ERROR:')
print (cmd_samtools)
print('samtools view command generated an exception: %s' %exc)
exit()
## generate paste filter tmp file
if (os.path.isfile(pilfer_tmp)):
print ("\t+ File %s already exists" %pilfer_tmp)
else:
## paste Aligned.sortedByCoord.out.bed Aligned.sortedByCoord.out.sam | awk -v "OFS=\t" '{print $1, $2, $3, $16, $6}'
cmd_paste = "paste %s %s | awk -v \"OFS=\t\" \'{print $1, $2, $3, $16, $6}\' > %s" %(bed_file, sam_file, pilfer_tmp)
output_file.write(cmd_paste)
output_file.write("\n")
try:
subprocess.check_output(cmd_paste, shell = True)
except Exception as exc:
print ('***ERROR:')
print (cmd_paste)
print('paste bed sam command generated an exception: %s' %exc)
exit()
## parse pilfer tmp file
counter = 1
previous_line = ()
# Open file OUT
output_file = open(pilfer_file, 'w')
# Open file IN
fileHandler = open (pilfer_tmp, "r")
while True:
# Get next line from file
line = fileHandler.readline().strip()
# If line is empty then end of file reached
if not line :
break;
seq = line.split('\t')[3]
real_seq = seq.split('::PU')
seq_len = len(str(real_seq[0]))
## Discard smaller
if (previous_line):
if (previous_line == line):
line = previous_line
counter += 1
else:
line_split = previous_line.split('\t')
output_file.write('%s\t%s\t%s\t%s::PI\t%s\t%s\n' %(line_split[0], line_split[1], line_split[2], line_split[3], counter, line_split[4]))
#counter += 1
while True:
#get next line
next_line = fileHandler.readline().strip()
if (next_line == line):
counter += 1
else:
line_split = line.split('\t')
output_file.write('%s\t%s\t%s\t%s::PI\t%s\t%s\n' %(line_split[0], line_split[1], line_split[2], line_split[3], counter, line_split[4]))
previous_line = next_line
counter = 1
break;
## close and finish
fileHandler.close()
output_file.close()
| 27.219697
| 138
| 0.680768
| 568
| 3,593
| 4.146127
| 0.216549
| 0.057325
| 0.057325
| 0.010191
| 0.420807
| 0.318896
| 0.287473
| 0.248832
| 0.212314
| 0.157537
| 0
| 0.013236
| 0.15892
| 3,593
| 131
| 139
| 27.427481
| 0.766049
| 0.125244
| 0
| 0.329787
| 0
| 0.021277
| 0.194668
| 0.017989
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074468
| 0
| 0.074468
| 0.170213
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ce89a8d2a94f66d0921f4dfd7dff6f5d544c025
| 2,727
|
py
|
Python
|
app/reader.py
|
lcarnevale/proxy-mqtt2influx
|
89b3cd354b465d7451556a2d2ec49ac8688b4f17
|
[
"MIT"
] | null | null | null |
app/reader.py
|
lcarnevale/proxy-mqtt2influx
|
89b3cd354b465d7451556a2d2ec49ac8688b4f17
|
[
"MIT"
] | null | null | null |
app/reader.py
|
lcarnevale/proxy-mqtt2influx
|
89b3cd354b465d7451556a2d2ec49ac8688b4f17
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
"""Writer class based on InfluxDB
This implementation does its best to follow the Robert Martin's Clean code guidelines.
The comments follows the Google Python Style Guide:
https://github.com/google/styleguide/blob/gh-pages/pyguide.md
"""
__copyright__ = 'Copyright 2021, FCRlab at University of Messina'
__author__ = 'Lorenzo Carnevale <lcarnevale@unime.it>'
__credits__ = ''
__description__ = 'Writer class based on InfluxDB'
import time
import logging
import threading
import persistqueue
from datetime import datetime
from influxdb_client.client.write_api import SYNCHRONOUS
from influxdb_client import InfluxDBClient, Point, WritePrecision
class Reader:
def __init__(self, host, port, token, organization, bucket, mutex, verbosity):
self.__url = "http://%s:%s" % (host, port)
self.__token = token
self.__organization = organization
self.__bucket = bucket
self.__mutex = mutex
self.__reader = None
self.__setup_logging(verbosity)
def __setup_logging(self, verbosity):
format = "%(asctime)s %(filename)s:%(lineno)d %(levelname)s - %(message)s"
filename='log/mqtt2influx.log'
datefmt = "%d/%m/%Y %H:%M:%S"
level = logging.INFO
if (verbosity):
level = logging.DEBUG
logging.basicConfig(filename=filename, filemode='a', format=format, level=level, datefmt=datefmt)
def setup(self):
self.__reader = threading.Thread(
target = self.__reader_job,
args = (self.__url, self.__token, self.__organization, self.__bucket)
)
def __reader_job(self, url, token, organization, bucket):
self.__mutex.acquire()
q = persistqueue.SQLiteQueue('data', multithreading=True, auto_commit=True)
self.__mutex.release()
client = InfluxDBClient(url=url, token=token)
write_api = client.write_api(write_options=SYNCHRONOUS)
try:
while (True):
raw_data = q.get()
logging.debug("Just got new data")
logging.debug("Parsing data points")
data = [
{
"measurement": raw_data['measurement'],
"tags": raw_data['tags'],
"fields": raw_data['fields'],
"time": raw_data['time']
}
]
write_api.write(bucket, organization, data)
logging.info("Data into InfluxDB")
time.sleep(0.3)
except KeyboardInterrupt:
pass
def start(self):
self.__reader.start()
| 32.464286
| 105
| 0.604694
| 293
| 2,727
| 5.37884
| 0.47099
| 0.022208
| 0.020305
| 0.022843
| 0.032995
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004128
| 0.289329
| 2,727
| 84
| 106
| 32.464286
| 0.809082
| 0.10231
| 0
| 0
| 0
| 0.016949
| 0.137649
| 0.018025
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084746
| false
| 0.016949
| 0.118644
| 0
| 0.220339
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ce89c46f636fde71ee0a887ac7403a640c90ce5
| 1,781
|
py
|
Python
|
example_problems/tutorial/tiling_mxn-boards_with_1x2-boards/services/tell_if_tilable/tell_if_tilable_server.py
|
DottaPaperella/TALight
|
580322c3121c9acde9827f996fd4e39e31d93a6f
|
[
"MIT"
] | null | null | null |
example_problems/tutorial/tiling_mxn-boards_with_1x2-boards/services/tell_if_tilable/tell_if_tilable_server.py
|
DottaPaperella/TALight
|
580322c3121c9acde9827f996fd4e39e31d93a6f
|
[
"MIT"
] | null | null | null |
example_problems/tutorial/tiling_mxn-boards_with_1x2-boards/services/tell_if_tilable/tell_if_tilable_server.py
|
DottaPaperella/TALight
|
580322c3121c9acde9827f996fd4e39e31d93a6f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from sys import stderr, exit, argv
from random import randrange
#from TALinputs import TALinput
from multilanguage import Env, Lang, TALcolors
# METADATA OF THIS TAL_SERVICE:
problem="tiling_mxn-boards_with_1x2-boards"
service="is_tilable"
args_list = [
('m',int),
('n',int),
('my_conjecture',str),
('h',int),
('k',int),
('lang',str),
('ISATTY',bool),
]
ENV =Env(problem, service, args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
TAc.print(LANG.opening_msg, "green")
# START CODING YOUR SERVICE:
assert ENV['h']==1
assert ENV['k']==2
print()
if (ENV['m'] * ENV['n']) % 2 == 1:
if ENV['my_conjecture'] == "yes":
TAc.NO()
print(LANG.render_feedback("FALSE-is-not-tilable", f"Contrary to what you have asserted, the {ENV['m']}x{ENV['n']}-grid is NOT tilable. If you are not convinced you can submit a tiling of that grid to the service 'check_my_tiling'."))
if ENV['my_conjecture'] == "no":
TAc.OK()
print(LANG.render_feedback("TRUE-is-not-tilable", f"You are perfecty right: the {ENV['m']}x{ENV['n']}-grid is NOT tilable."))
if (ENV['m'] * ENV['n']) % 2 == 0:
if ENV['my_conjecture'] == "yes":
TAc.OK()
print(LANG.render_feedback("TRUE-is-tilable", f"We agree on the fact that the {ENV['m']}x{ENV['n']}-grid is tilable. If you want to exhibit us a tiling for this grid you can submit it to the service 'check_my_tiling'."))
if ENV['my_conjecture'] == "no":
TAc.NO()
print(LANG.render_feedback("FALSE-is-tilable", f"No, the {ENV['m']}x{ENV['n']}-grid is tilable. If you can not believe a tiling of the {ENV['m']}x{ENV['n']}-grid exists try the service 'gimme_hints_on_a_tiling'."))
exit(0)
| 35.62
| 242
| 0.64009
| 291
| 1,781
| 3.831615
| 0.340206
| 0.025112
| 0.03139
| 0.035874
| 0.378475
| 0.378475
| 0.324664
| 0.310314
| 0.191928
| 0.191928
| 0
| 0.006845
| 0.179674
| 1,781
| 49
| 243
| 36.346939
| 0.756331
| 0.06064
| 0
| 0.222222
| 0
| 0.111111
| 0.482014
| 0.113309
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cee12e1d9ee7123f1ed98d591b1f1a9ee9c89f2
| 10,845
|
py
|
Python
|
sgains/tool.py
|
KrasnitzLab/sgains
|
501c42bfdad4542725f00ca8199983eccf8c0b3f
|
[
"MIT"
] | 1
|
2017-09-08T05:09:59.000Z
|
2017-09-08T05:09:59.000Z
|
sgains/tool.py
|
KrasnitzLab/sgains
|
501c42bfdad4542725f00ca8199983eccf8c0b3f
|
[
"MIT"
] | 35
|
2017-07-31T04:13:40.000Z
|
2019-09-06T13:32:17.000Z
|
sgains/tool.py
|
KrasnitzLab/sgains
|
501c42bfdad4542725f00ca8199983eccf8c0b3f
|
[
"MIT"
] | 3
|
2017-09-08T05:10:34.000Z
|
2019-06-11T09:06:41.000Z
|
import os
import sys
from copy import deepcopy
import traceback
import functools
from collections import defaultdict
import yaml
from argparse import ArgumentParser,\
RawDescriptionHelpFormatter, ArgumentDefaultsHelpFormatter
from sgains.configuration.parser import SgainsValidator, Config
from sgains.configuration.schema import sgains_schema
from sgains.executor import Executor
from sgains.pipelines.mappableregions_pipeline import MappableRegionsPipeline
from sgains.pipelines.genomeindex_pipeline import GenomeIndexPipeline
from sgains.pipelines.bins_pipeline import BinsPipeline
from sgains.pipelines.mapping_pipeline import MappingPipeline
from sgains.pipelines.extract_10x_pipeline import Extract10xPipeline
from sgains.pipelines.varbin_10x_pipeline import Varbin10xPipeline
from sgains.pipelines.varbin_pipeline import VarbinPipeline
from sgains.pipelines.r_pipeline import Rpipeline
from sgains.pipelines.composite_pipeline import CompositePipeline
SGAINS_COMMANDS = {
"genomeindex": {
"config_groups": ["aligner", "genome"],
"help": "builds appropriate hisat2 or bowtie index for the "
"reference genome",
},
"mappable_regions": {
"config_groups": ["aligner", "genome", "mappable_regions", "sge"],
"help": "finds all mappable regions in specified genome",
},
"bins": {
"config_groups": ["genome", "mappable_regions", "bins", "sge"],
"help": "calculates all bins boundaries for specified bins count "
"and read length",
},
"prepare": {
"config_groups": [
"aligner", "genome", "mappable_regions", "bins", "sge"],
"help": "combines all preparation steps ('genome', 'mappable-regions' "
"and 'bins') into single command",
},
"mapping": {
"config_groups": ["aligner", "genome", "reads", "mapping", "sge"],
"help": "performs mapping of cells reads to the reference genome",
},
"extract_10x": {
"config_groups": [
"data_10x", "reads", "sge"],
"help": "extracts cells reads from 10x Genomics datasets",
},
"varbin": {
"config_groups": ["bins", "mapping", "varbin", "sge"],
"help": "applies varbin algorithm to count read mappings in each bin",
},
"varbin_10x": {
"config_groups": [
"data_10x", "bins", "varbin", "sge"],
"help": "applies varbin algorithm to count read mappings in each bin "
"to 10x Genomics datasets without realigning",
},
"scclust": {
"config_groups": ["bins", "varbin", "scclust"],
"help": "segmentation and clustering based bin counts and "
"preparation of the SCGV input data"
},
"process": {
"config_groups": [
"aligner", "genome", "reads", "mapping", "bins", "varbin",
"scclust", "sge"],
"help": "combines all process steps ('mapping', 'varbin' "
"and 'scclust') into single command"
},
}
def build_common_options(parser):
parser.add_argument(
"-v", "--verbose",
dest="verbose",
action="count",
help="set verbosity level [default: %(default)s]",
default=0
)
parser.add_argument(
"-c", "--config",
dest="config",
help="configuration file",
metavar="path"
)
parser.add_argument(
"-n", "--dry-run",
dest="dry_run",
action="store_true",
help="perform a trial run with no changes made",
default=False
)
parser.add_argument(
"--force", "-F",
dest="force",
action="store_true",
help="allows overwriting nonempty results directory",
default=False
)
parser.add_argument(
"--parallel", "-p",
dest="parallel",
help="number of task to run in parallel",
type=int,
default=1
)
parser.add_argument(
"--sge",
dest="sge",
action="store_true",
help="parallelilizes commands using SGE cluster manager",
default=False
)
def _get_config_value(config, group_name, name):
if config is None:
return None
group = config.config.get(group_name)
if group is None:
return None
result = getattr(group, name)
return result
def build_cli_options(argparser, command=None, config=None, sge_flag=False):
work_dirname = os.getcwd()
if config is not None:
work_dirname = config.work_dirname
validator = SgainsValidator(
deepcopy(sgains_schema), work_dirname=work_dirname)
if command is None:
config_groups = list(validator.schema.keys())
else:
assert command in SGAINS_COMMANDS
command = SGAINS_COMMANDS[command]
config_groups = command["config_groups"]
for group_name in config_groups:
if group_name == "sge" and not sge_flag:
continue
group = validator.schema.get(group_name)
group_parser = argparser.add_argument_group(f"{group_name} group:")
assert group["type"] == "dict", (group_name, group)
group_schema = group["schema"]
for arg_name, arg_spec in group_schema.items():
name = f"--{arg_name.replace('_', '-')}"
arg_type = str
arg_type = arg_spec.get("type", "string")
if arg_type == "string":
arg_type = str
elif arg_type == "integer":
arg_type = int
elif arg_type == "float":
arg_type = float
elif arg_type == "list":
arg_type = list
else:
raise ValueError(f"wrong argument type {arg_type}")
help_data = None
meta_data = arg_spec.get("meta")
if meta_data is not None:
help_data = meta_data.get("help")
arg_default = _get_config_value(config, group_name, arg_name)
if arg_default is None:
arg_default = arg_spec.get("default")
group_parser.add_argument(
name,
help=help_data,
dest=arg_name,
type=arg_type,
default=arg_default)
return argparser
def parse_cli_options(args):
config_dict = defaultdict(dict)
work_dirname = os.getcwd()
if args.config is not None:
assert os.path.exists(args.config), args.config
with open(args.config, "r") as infile:
config_dict = yaml.safe_load(infile)
work_dirname = os.path.dirname(args.config)
validator = SgainsValidator(
deepcopy(sgains_schema), work_dirname=work_dirname)
result = defaultdict(dict)
config_groups = list(validator.schema.keys())
for group_name in config_groups:
if group_name == "sge" and not args.sge:
continue
group = validator.schema.get(group_name)
group_schema = group.get("schema")
if group_schema is None:
continue
group_result = {}
for arg_name in group_schema.keys():
arg_value = getattr(args, arg_name, None)
if arg_value is not None:
group_result[arg_name] = arg_value
else:
config_value = config_dict.get(group_name, None)
if config_value is not None:
config_value = config_value.get(arg_name, None)
if config_value is not None:
group_result[arg_name] = config_value
if group_result:
result[group_name] = group_result
config = Config.from_dict(result, work_dirname)
config.verbose = args.verbose
config.config_file = args.config
config.dry_run = args.dry_run
config.force = args.force
config.parallel = args.parallel
config.sge = args.sge
return config
def main(argv=sys.argv[1:]):
program_name = os.path.basename(sys.argv[0])
program_shortdesc = \
'sgains - sparse genomic analysis of individual nuclei by ' \
'sequencing pipeline'
program_description = '''%s
USAGE
''' % (program_shortdesc, )
try:
config = Config.parse_argv(argv)
sge_flag = Config.check_sge_argv(argv)
argparser = ArgumentParser(
description=program_description,
formatter_class=ArgumentDefaultsHelpFormatter)
build_common_options(argparser)
subparsers = argparser.add_subparsers(
title="sGAINS subcommands"
)
for command in SGAINS_COMMANDS:
command_name = command.replace("_", "-")
command_help = SGAINS_COMMANDS[command].get("help", "")
subparser = subparsers.add_parser(
name=command_name,
help=command_help,
formatter_class=ArgumentDefaultsHelpFormatter
)
build_cli_options(subparser, command, config, sge_flag=sge_flag)
subparser.set_defaults(func=functools.partial(execute, command))
args = argparser.parse_args(argv)
args.func(args)
except KeyboardInterrupt:
traceback.print_exc()
return 0
except Exception as e:
traceback.print_exc()
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help")
sys.stderr.write('\n')
return 2
def create_pipeline(command, config):
if command == "genomeindex":
return GenomeIndexPipeline(config)
elif command == "mappable_regions":
return MappableRegionsPipeline(config)
elif command == "bins":
return BinsPipeline(config)
elif command == "mapping":
return MappingPipeline(config)
elif command == "varbin":
return VarbinPipeline(config)
elif command == "scclust":
return Rpipeline(config)
elif command == "extract_10x":
return Extract10xPipeline(config)
elif command == "varbin_10x":
return Varbin10xPipeline(config)
elif command == "prepare":
pipelines = [
GenomeIndexPipeline(config),
MappableRegionsPipeline(config),
BinsPipeline(config),
]
return CompositePipeline(config, pipelines)
elif command == "process":
pipelines = [
MappingPipeline(config),
VarbinPipeline(config),
Rpipeline(config),
]
return CompositePipeline(config, pipelines)
raise ValueError(f"Unexpected command: {command}")
def execute(command, args):
config = parse_cli_options(args)
pipeline = create_pipeline(command, config)
assert pipeline is not None, command
executor = Executor(config)
executor.run_pipeline(pipeline)
if __name__ == "__main__":
sys.exit(main())
| 30.635593
| 79
| 0.616136
| 1,161
| 10,845
| 5.579673
| 0.204996
| 0.029639
| 0.026397
| 0.019296
| 0.180148
| 0.135073
| 0.084903
| 0.084903
| 0.054029
| 0.033652
| 0
| 0.004485
| 0.280406
| 10,845
| 353
| 80
| 30.72238
| 0.825602
| 0
| 0
| 0.158076
| 0
| 0
| 0.184786
| 0.002213
| 0
| 0
| 0
| 0
| 0.013746
| 1
| 0.024055
| false
| 0
| 0.068729
| 0
| 0.151203
| 0.006873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cee8f95a77e8d2ded7b9467b41b6c25c5fb7cdf
| 3,135
|
py
|
Python
|
lib/modeling/VGG16.py
|
rsumner31/Detectron
|
021685d42f7e8ac097e2bcf79fecb645f211378e
|
[
"Apache-2.0"
] | 429
|
2018-04-28T00:01:57.000Z
|
2021-12-18T12:53:22.000Z
|
lib/modeling/VGG16.py
|
absorbguo/Detectron
|
2f8161edc3092b0382cab535c977a180a8b3cc4d
|
[
"Apache-2.0"
] | 54
|
2018-12-26T13:04:32.000Z
|
2020-04-24T04:09:30.000Z
|
lib/modeling/VGG16.py
|
absorbguo/Detectron
|
2f8161edc3092b0382cab535c977a180a8b3cc4d
|
[
"Apache-2.0"
] | 96
|
2018-12-24T05:12:36.000Z
|
2021-04-23T15:51:21.000Z
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""VGG16 from https://arxiv.org/abs/1409.1556."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from core.config import cfg
def add_VGG16_conv5_body(model):
model.Conv('data', 'conv1_1', 3, 64, 3, pad=1, stride=1)
model.Relu('conv1_1', 'conv1_1')
model.Conv('conv1_1', 'conv1_2', 64, 64, 3, pad=1, stride=1)
model.Relu('conv1_2', 'conv1_2')
model.MaxPool('conv1_2', 'pool1', kernel=2, pad=0, stride=2)
model.Conv('pool1', 'conv2_1', 64, 128, 3, pad=1, stride=1)
model.Relu('conv2_1', 'conv2_1')
model.Conv('conv2_1', 'conv2_2', 128, 128, 3, pad=1, stride=1)
model.Relu('conv2_2', 'conv2_2')
model.MaxPool('conv2_2', 'pool2', kernel=2, pad=0, stride=2)
model.StopGradient('pool2', 'pool2')
model.Conv('pool2', 'conv3_1', 128, 256, 3, pad=1, stride=1)
model.Relu('conv3_1', 'conv3_1')
model.Conv('conv3_1', 'conv3_2', 256, 256, 3, pad=1, stride=1)
model.Relu('conv3_2', 'conv3_2')
model.Conv('conv3_2', 'conv3_3', 256, 256, 3, pad=1, stride=1)
model.Relu('conv3_3', 'conv3_3')
model.MaxPool('conv3_3', 'pool3', kernel=2, pad=0, stride=2)
model.Conv('pool3', 'conv4_1', 256, 512, 3, pad=1, stride=1)
model.Relu('conv4_1', 'conv4_1')
model.Conv('conv4_1', 'conv4_2', 512, 512, 3, pad=1, stride=1)
model.Relu('conv4_2', 'conv4_2')
model.Conv('conv4_2', 'conv4_3', 512, 512, 3, pad=1, stride=1)
model.Relu('conv4_3', 'conv4_3')
model.MaxPool('conv4_3', 'pool4', kernel=2, pad=0, stride=2)
model.Conv('pool4', 'conv5_1', 512, 512, 3, pad=1, stride=1)
model.Relu('conv5_1', 'conv5_1')
model.Conv('conv5_1', 'conv5_2', 512, 512, 3, pad=1, stride=1)
model.Relu('conv5_2', 'conv5_2')
model.Conv('conv5_2', 'conv5_3', 512, 512, 3, pad=1, stride=1)
blob_out = model.Relu('conv5_3', 'conv5_3')
return blob_out, 512, 1. / 16.
def add_VGG16_roi_fc_head(model, blob_in, dim_in, spatial_scale):
model.RoIFeatureTransform(
blob_in,
'pool5',
blob_rois='rois',
method=cfg.FAST_RCNN.ROI_XFORM_METHOD,
resolution=7,
sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO,
spatial_scale=spatial_scale
)
model.FC('pool5', 'fc6', dim_in * 7 * 7, 4096)
model.Relu('fc6', 'fc6')
model.FC('fc6', 'fc7', 4096, 4096)
blob_out = model.Relu('fc7', 'fc7')
return blob_out, 4096
| 41.25
| 78
| 0.648166
| 500
| 3,135
| 3.87
| 0.268
| 0.052713
| 0.033592
| 0.073902
| 0.272868
| 0.25323
| 0.25323
| 0.241344
| 0.189147
| 0.100258
| 0
| 0.111963
| 0.168102
| 3,135
| 75
| 79
| 41.8
| 0.629985
| 0.194258
| 0
| 0
| 0
| 0
| 0.183539
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037736
| false
| 0
| 0.09434
| 0
| 0.169811
| 0.018868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ceed4646921c1456f0b28f435da564f3dae7896
| 2,913
|
py
|
Python
|
setup.py
|
yangjing1127/xmind2testcase
|
49a581159a0d8e028f89939777399493662df111
|
[
"MIT"
] | 537
|
2018-12-26T03:02:54.000Z
|
2022-03-30T17:41:53.000Z
|
setup.py
|
yangjing1127/xmind2testcase
|
49a581159a0d8e028f89939777399493662df111
|
[
"MIT"
] | 49
|
2019-01-08T09:59:15.000Z
|
2022-03-30T00:58:47.000Z
|
setup.py
|
yangjing1127/xmind2testcase
|
49a581159a0d8e028f89939777399493662df111
|
[
"MIT"
] | 190
|
2018-12-29T07:09:48.000Z
|
2022-03-31T01:55:02.000Z
|
#!/usr/env/bin python
# -*- coding: utf-8 -*-
import io
import os
import sys
from shutil import rmtree
from setuptools import setup, find_packages, Command
about = {}
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, 'xmind2testcase', '__about__.py'), encoding='utf-8') as f: # custom
exec(f.read(), about)
with io.open('README.md', encoding='utf-8') as f:
long_description = f.read()
install_requires = [ # custom
"xmind",
"flask",
"arrow",
]
class PyPiCommand(Command):
""" Build and publish this package and make a tag.
Support: python setup.py pypi
Copied from requests_html
"""
user_options = []
@staticmethod
def status(s):
"""Prints things in green color."""
print('\033[0;32m{0}\033[0m'.format(s))
def initialize_options(self):
""" override
"""
pass
def finalize_options(self):
""" override
"""
pass
def run(self):
self.status('Building Source and Wheel (universal) distribution...')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine...')
os.system('twine upload dist/*')
self.status('Publishing git tags...')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
try:
self.status('Removing current build artifacts...')
rmtree(os.path.join(here, 'dist'))
rmtree(os.path.join(here, 'build'))
rmtree(os.path.join(here, 'xmind2testcase.egg-info')) # custom
except OSError:
pass
self.status('Congratulations! Upload PyPi and publish git tag successfully...')
sys.exit()
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
long_description=long_description,
long_description_content_type='text/markdown',
keywords=about['__keywords__'],
author=about['__author__'],
author_email=about['__author_email__'],
url=about['__url__'],
license=about['__license__'],
packages=find_packages(exclude=['tests', 'test.*', 'docs']), # custom
package_data={ # custom
'': ['README.md'],
'webtool': ['static/*', 'static/css/*', 'static/guide/*', 'templates/*', 'schema.sql'],
},
install_requires=install_requires,
extras_require={},
python_requires='>=3.0, <4', # custom
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={ # custom
'console_scripts': [
'xmind2testcase=xmind2testcase.cli:cli_main',
]
},
cmdclass={
# python3 setup.py pypi
'pypi': PyPiCommand
}
)
| 28.281553
| 100
| 0.604875
| 327
| 2,913
| 5.180428
| 0.486239
| 0.021251
| 0.023613
| 0.033058
| 0.141677
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011312
| 0.241332
| 2,913
| 102
| 101
| 28.558824
| 0.755204
| 0.096807
| 0
| 0.04
| 0
| 0
| 0.30811
| 0.025223
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053333
| false
| 0.04
| 0.066667
| 0
| 0.146667
| 0.013333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cef0095a10852826052b744b28e1db78c985b8d
| 2,670
|
py
|
Python
|
skultrafast/styles.py
|
Tillsten/skultrafast
|
778eaf1539b6d85f21ac53b011472605673ef7e8
|
[
"BSD-3-Clause"
] | 10
|
2019-02-17T15:57:51.000Z
|
2021-11-15T02:00:33.000Z
|
skultrafast/styles.py
|
cZahn/skultrafast
|
23572ba9ea32238f34a8a15390fb572ecd8bc6fa
|
[
"BSD-3-Clause"
] | 1
|
2019-01-17T11:56:38.000Z
|
2019-07-11T15:30:58.000Z
|
skultrafast/styles.py
|
cZahn/skultrafast
|
23572ba9ea32238f34a8a15390fb572ecd8bc6fa
|
[
"BSD-3-Clause"
] | 6
|
2018-11-08T14:11:06.000Z
|
2021-09-01T14:53:02.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 17 21:33:24 2015
@author: Tillsten
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
tableau20 = [(r/255., g/255., b/255.) for r,g,b, in tableau20]
#plt.rcParams['savefig.dpi'] = 110
#plt.rcParams['font.family'] = 'Vera Sans'
out_ticks = {'xtick.direction': 'out',
'xtick.major.width': 1.5,
'xtick.minor.width': 1,
'xtick.major.size': 6,
'xtick.minor.size': 3,
'xtick.minor.visible': True,
'ytick.direction': 'out',
'ytick.major.width': 1.5,
'ytick.minor.width': 1,
'ytick.major.size': 6,
'ytick.minor.size': 3,
'ytick.minor.visible': True,
'axes.spines.top': False,
'axes.spines.right': False,
'text.hinting': True,
'axes.titlesize': 'xx-large',
'axes.titleweight': 'semibold',
}
plt.figure(figsize=(6,4))
with plt.style.context(out_ticks):
ax = plt.subplot(111)
x = np.linspace(0, 7, 1000)
y = np.exp(-x/1.5)*np.cos(x/1*(2*np.pi))#*np.cos(x/0.05*(2*np.pi))
l, = plt.plot(x, np.exp(-x/1.5), lw=0.5, color='grey')
l, = plt.plot(x, -np.exp(-x/1.5), lw=0.5, color='grey')
l, = plt.plot(x, y, lw=1.1)
#l.set_clip_on(0)
plt.tick_params(which='both', top=False, right=False)
plt.margins(0.01)
ax.text(7, 1, r'$y(t)=\exp\left(-t/1.5\right)\cos(\omega_1t)\cos(\omega_2t)$',
fontsize=18, va='top', ha='right')
#plt.title("Hallo")
plt.setp(plt.gca(), xlabel='Time [s]', ylabel='Amplitude')
ax = plt.axes([0.57, 0.25, 0.3, .2])
#ax.plot(np.fft.fftfreq(x.size)[:y.size/2], abs(np.fft.fft(y))[:y.size/2])
ax.fill_between(np.fft.fftfreq(x.size, x[1]-x[0])[:y.size/2],
abs(np.fft.fft(y))[:y.size/2], alpha=0.2, color='r')
ax.set_xlim(0, 10)
ax.set_xlabel("Frequency")
ax.xaxis.labelpad = 1
plt.locator_params(nbins=4)
plt.tick_params(which='both', top=False, right=False)
plt.tick_params(which='minor', bottom=False, left=False)
#plt.grid(1, axis='y', linestyle='-', alpha=0.3, lw=.5)
plt.show()
| 37.083333
| 83
| 0.522846
| 416
| 2,670
| 3.324519
| 0.430288
| 0.008677
| 0.017354
| 0.015184
| 0.177151
| 0.146782
| 0.146782
| 0.146782
| 0.146782
| 0.146782
| 0
| 0.140759
| 0.2603
| 2,670
| 71
| 84
| 37.605634
| 0.559494
| 0.126217
| 0
| 0.04
| 0
| 0.02
| 0.183356
| 0.026702
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.06
| 0
| 0.06
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cef6acdfa1f2191c94118bdb071a657a3a738d4
| 3,634
|
py
|
Python
|
src/oci/devops/models/github_build_run_source.py
|
ezequielramos/oci-python-sdk
|
cc4235cf217beaf9feed75760e9ce82610222762
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 3
|
2020-09-10T22:09:45.000Z
|
2021-12-24T17:00:07.000Z
|
src/oci/devops/models/github_build_run_source.py
|
ezequielramos/oci-python-sdk
|
cc4235cf217beaf9feed75760e9ce82610222762
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/devops/models/github_build_run_source.py
|
ezequielramos/oci-python-sdk
|
cc4235cf217beaf9feed75760e9ce82610222762
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .build_run_source import BuildRunSource
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class GithubBuildRunSource(BuildRunSource):
"""
Specifies details of build run through GitHub.
"""
def __init__(self, **kwargs):
"""
Initializes a new GithubBuildRunSource object with values from keyword arguments. The default value of the :py:attr:`~oci.devops.models.GithubBuildRunSource.source_type` attribute
of this class is ``GITHUB`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param source_type:
The value to assign to the source_type property of this GithubBuildRunSource.
Allowed values for this property are: "MANUAL", "GITHUB", "GITLAB", "DEVOPS_CODE_REPOSITORY"
:type source_type: str
:param trigger_id:
The value to assign to the trigger_id property of this GithubBuildRunSource.
:type trigger_id: str
:param trigger_info:
The value to assign to the trigger_info property of this GithubBuildRunSource.
:type trigger_info: oci.devops.models.TriggerInfo
"""
self.swagger_types = {
'source_type': 'str',
'trigger_id': 'str',
'trigger_info': 'TriggerInfo'
}
self.attribute_map = {
'source_type': 'sourceType',
'trigger_id': 'triggerId',
'trigger_info': 'triggerInfo'
}
self._source_type = None
self._trigger_id = None
self._trigger_info = None
self._source_type = 'GITHUB'
@property
def trigger_id(self):
"""
**[Required]** Gets the trigger_id of this GithubBuildRunSource.
The trigger that invoked the build run.
:return: The trigger_id of this GithubBuildRunSource.
:rtype: str
"""
return self._trigger_id
@trigger_id.setter
def trigger_id(self, trigger_id):
"""
Sets the trigger_id of this GithubBuildRunSource.
The trigger that invoked the build run.
:param trigger_id: The trigger_id of this GithubBuildRunSource.
:type: str
"""
self._trigger_id = trigger_id
@property
def trigger_info(self):
"""
**[Required]** Gets the trigger_info of this GithubBuildRunSource.
:return: The trigger_info of this GithubBuildRunSource.
:rtype: oci.devops.models.TriggerInfo
"""
return self._trigger_info
@trigger_info.setter
def trigger_info(self, trigger_info):
"""
Sets the trigger_info of this GithubBuildRunSource.
:param trigger_info: The trigger_info of this GithubBuildRunSource.
:type: oci.devops.models.TriggerInfo
"""
self._trigger_info = trigger_info
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 33.648148
| 245
| 0.660704
| 439
| 3,634
| 5.241458
| 0.305239
| 0.070404
| 0.124294
| 0.052151
| 0.33681
| 0.235984
| 0.08518
| 0.060843
| 0.060843
| 0.060843
| 0
| 0.006711
| 0.26197
| 3,634
| 107
| 246
| 33.962617
| 0.85123
| 0.520914
| 0
| 0.05
| 0
| 0
| 0.083217
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.075
| 0.05
| 0.45
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cf24c69a41740779ba55ee7c2d11c15c8feec7e
| 12,133
|
py
|
Python
|
aiida_fleur/tests/tools/test_common_fleur_wf.py
|
anoopkcn/aiida-fleur
|
5d4cc2092b7c3ce5402f1d4b89787eae53b2e60f
|
[
"MIT"
] | null | null | null |
aiida_fleur/tests/tools/test_common_fleur_wf.py
|
anoopkcn/aiida-fleur
|
5d4cc2092b7c3ce5402f1d4b89787eae53b2e60f
|
[
"MIT"
] | null | null | null |
aiida_fleur/tests/tools/test_common_fleur_wf.py
|
anoopkcn/aiida-fleur
|
5d4cc2092b7c3ce5402f1d4b89787eae53b2e60f
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import pytest
import os
# is_code
def test_is_code_interface(fixture_code):
from aiida_fleur.tools.common_fleur_wf import is_code
assert is_code('random_string') is None
assert is_code('fleur.inpGUT') is None
assert is_code(99999) is None
code = fixture_code('fleur.inpgen')
code.store()
assert is_code(code.uuid)
assert is_code(code.pk)
assert is_code('@'.join([code.label, code.get_computer_name()]))
assert is_code(code)
def test_get_inputs_fleur():
'''
Tests if get_inputs_fleur assembles inputs correctly.
Note it is the work of FleurCalculation
to check if input types are correct i.e. 'code' is a Fleur code etc.
'''
from aiida_fleur.tools.common_fleur_wf import get_inputs_fleur
from aiida.orm import Dict
inputs = {'code': 'code', 'remote': 'remote', 'fleurinp': 'fleurinp',
'options': {'custom_scheduler_commands': 'test_command'}, 'label': 'label',
'description': 'description', 'settings': {'test': 1}, 'serial': False}
results = get_inputs_fleur(**inputs)
out_options = results['options'].get_dict()
out_settings = results['settings'].get_dict()
assert results['code'] == 'code'
assert results['fleurinpdata'] == 'fleurinp'
assert results['parent_folder'] == 'remote'
assert results['description'] == 'description'
assert results['label'] == 'label'
assert out_options == {'custom_scheduler_commands': 'test_command',
'withmpi': True}
assert out_settings == {'test': 1}
inputs = {'code': 'code', 'remote': 'remote', 'fleurinp': 'fleurinp',
'options': {'custom_scheduler_commands': 'test_command'}, 'serial': True}
results = get_inputs_fleur(**inputs)
out_options = results['options'].get_dict()
assert results['description'] == ''
assert results['label'] == ''
assert out_options == {'custom_scheduler_commands': 'test_command',
'withmpi': False, 'resources': {"num_machines": 1}}
def test_get_inputs_inpgen(fixture_code, generate_structure):
'''
Tests if get_inputs_fleur assembles inputs correctly.
Note it is the work of FleurinputgenCalculation
to check if input types are correct i.e. 'code' is a Fleur code etc.
'''
from aiida_fleur.tools.common_fleur_wf import get_inputs_inpgen
from aiida.orm import Dict
code = fixture_code('fleur.inpgen')
structure = generate_structure()
params = Dict(dict={'test': 1})
inputs = {'structure': structure, 'inpgencode': code, 'options': {},
'label': 'label', 'description': 'description',
'params': params}
returns = {'metadata': {
'options': {'withmpi': False, 'resources': {'num_machines': 1}},
'description': 'description', 'label': 'label'},
'code': code, 'parameters': params, 'structure': structure
}
assert get_inputs_inpgen(**inputs) == returns
# repeat without a label and description
inputs = {'structure': structure, 'inpgencode': code, 'options': {},
'params': params}
returns = {'metadata': {
'options': {'withmpi': False, 'resources': {'num_machines': 1}},
'description': '', 'label': ''},
'code': code, 'parameters': params, 'structure': structure}
assert get_inputs_inpgen(**inputs) == returns
@pytest.mark.skip(reason="Test is not implemented")
def test_get_scheduler_extras():
from aiida_fleur.tools.common_fleur_wf import get_scheduler_extras
# test_and_get_codenode
def test_test_and_get_codenode_inpgen(fixture_code):
from aiida_fleur.tools.common_fleur_wf import test_and_get_codenode
from aiida.orm import Code
from aiida.common.exceptions import NotExistent
# install code setup code
code = fixture_code('fleur.inpgen')
code_fleur = fixture_code('fleur.fleur')
code_fleur.label = 'fleur_test'
code_fleur.store()
expected = 'fleur.inpgen'
nonexpected = 'fleur.fleur'
not_existing = 'fleur.not_existing'
assert isinstance(test_and_get_codenode(code, expected), Code)
with pytest.raises(ValueError) as msg:
test_and_get_codenode(code, nonexpected, use_exceptions=True)
assert str(msg.value) == ("Given Code node is not of expected code type.\n"
"Valid labels for a fleur.fleur executable are:\n"
"* fleur_test@localhost-test")
with pytest.raises(ValueError) as msg:
test_and_get_codenode(code, not_existing, use_exceptions=True)
assert str(msg.value) == ("Code not valid, and no valid codes for fleur.not_existing.\n"
"Configure at least one first using\n"
" verdi code setup")
def test_get_kpoints_mesh_from_kdensity(generate_structure):
from aiida_fleur.tools.common_fleur_wf import get_kpoints_mesh_from_kdensity
from aiida.orm import KpointsData
a, b = get_kpoints_mesh_from_kdensity(generate_structure(), 0.1)
assert a == ([21, 21, 21], [0.0, 0.0, 0.0])
assert isinstance(b, KpointsData)
@pytest.mark.skip(reason="Test is not implemented")
def test_determine_favorable_reaction():
from aiida_fleur.tools.common_fleur_wf import determine_favorable_reaction
# @pytest.mark.skip(reason="There seems to be now way to add outputs to CalcJobNode")
def test_performance_extract_calcs(fixture_localhost,
generate_calc_job_node):
from aiida_fleur.tools.common_fleur_wf import performance_extract_calcs
from aiida.common.links import LinkType
from aiida.orm import Dict
out = Dict(dict={'title': 'A Fleur input generator calculation with aiida',
'energy': -138529.7052157,
'bandgap': 6.0662e-06,
'end_date': {'date': '2019/11/12', 'time': '16:12:08'},
'unparsed': [],
'walltime': 43,
'warnings': {'info': {}, 'debug': {}, 'error': {}, 'warning': {}},
'start_date': {'date': '2019/11/12', 'time': '16:11:25'},
'parser_info': 'AiiDA Fleur Parser v0.2beta',
'CalcJob_uuid': '3dc62d43-b607-4415-920f-e0d34e805711',
'creator_name': 'fleur 30',
'energy_units': 'eV',
'kmax': 4.2,
'fermi_energy': 0.0605833326,
'spin_density': 0.0792504665,
'bandgap_units': 'eV',
'force_largest': 0.0,
'energy_hartree': -5090.8728101494,
'walltime_units': 'seconds',
'charge_density1': 0.0577674505,
'charge_density2': 0.0461840944,
'number_of_atoms': 4,
'parser_warnings': [],
'magnetic_moments': [3.3720063737, 3.3719345944, 3.3719329177, 3.3719329162],
'number_of_kpoints': 8,
'number_of_species': 1,
'fermi_energy_units': 'Htr',
'sum_of_eigenvalues': -2973.4129786677,
'output_file_version': '0.27',
'energy_hartree_units': 'Htr',
'number_of_atom_types': 4,
'number_of_iterations': 11,
'number_of_symmetries': 8,
'energy_core_electrons': -2901.8120489845,
'magnetic_moment_units': 'muBohr',
'overall_charge_density': 0.0682602474,
'creator_target_structure': ' ',
'energy_valence_electrons': -71.6009296831,
'magnetic_spin_up_charges': [9.1494766577,
9.1494806151,
9.1494806833,
9.1494806834],
'orbital_magnetic_moments': [],
'density_convergence_units': 'me/bohr^3',
'number_of_spin_components': 2,
'charge_den_xc_den_integral': -223.295208608,
'magnetic_spin_down_charges': [5.777470284,
5.7775460208,
5.7775477657,
5.7775477672],
'number_of_iterations_total': 11,
'creator_target_architecture': 'GEN',
'orbital_magnetic_moment_units': 'muBohr',
'orbital_magnetic_spin_up_charges': [],
'orbital_magnetic_spin_down_charges': []})
out.store()
node = generate_calc_job_node('fleur.fleur', fixture_localhost)
node.store()
out.add_incoming(node, link_type=LinkType.CREATE, link_label='output_parameters')
result = performance_extract_calcs([node.pk])
assert result == {'n_symmetries': [8], 'n_spin_components': [2], 'n_kpoints': [8],
'n_iterations': [11], 'walltime_sec': [43],
'walltime_sec_per_it': [3.909090909090909],
'n_iterations_total': [11], 'density_distance': [0.0682602474],
'computer': ['localhost-test'],
'n_atoms': [4], 'kmax': [4.2], 'cost': [75866.11200000001],
'costkonstant': [147.02734883720933], 'walltime_sec_cor': [43],
'total_cost': [834527.2320000001], 'fermi_energy': [0.0605833326],
'bandgap': [6.0662e-06], 'energy': [-138529.7052157],
'force_largest': [0.0],
'ncores': [12], 'pk': [node.pk], 'uuid': [node.uuid],
'serial': [False],
'resources': [{'num_machines': 1, 'num_mpiprocs_per_machine': 1}]}
inputs_optimize = [(4, 8, 3, True, 0.5, None, 720),
(4, 8, 3, True, 2, None, 720),
(4, 8, 3, True, 100, None, 720),
(4, 8, 3, True, 100, None, 720, 0.5),
(4, 8, 3, False, 0.5, None, 720)]
results_optimize = [
(4, 3, 8, 'Computational setup is perfect! Nodes: 4, MPIs per node 3, OMP per MPI 8. Number of k-points is 720'),
(4, 6, 4, 'Computational setup is perfect! Nodes: 4, MPIs per node 6, OMP per MPI 4. Number of k-points is 720'),
(4, 12, 2, 'Computational setup is perfect! Nodes: 4, MPIs per node 12, OMP per MPI 2. Number of k-points is 720'),
(3, 24, 1, 'WARNING: Changed the number of nodes from 4 to 3'),
(4, 20, 1, 'WARNING: Changed the number of MPIs per node from 8 to 20 an OMP from 3 to 1. Changed the number of nodes from 4 to 4. Number of k-points is 720.')]
@pytest.mark.parametrize('input,result_correct', zip(inputs_optimize, results_optimize))
def test_optimize_calc_options(input, result_correct):
from aiida_fleur.tools.common_fleur_wf import optimize_calc_options
result = optimize_calc_options(*input)
assert result == result_correct
def test_find_last_in_restart(fixture_localhost,
generate_calc_job_node, generate_work_chain_node):
from aiida_fleur.tools.common_fleur_wf import find_last_in_restart
from aiida.common.links import LinkType
node1 = generate_calc_job_node('fleur.fleur', fixture_localhost)
node2 = generate_calc_job_node('fleur.fleur', fixture_localhost)
node3 = generate_calc_job_node('fleur.fleur', fixture_localhost)
node_main = generate_work_chain_node('fleur.base_relax', fixture_localhost)
node1.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL')
node2.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL')
node3.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL')
node1.store()
node2.store()
node3.store()
result = find_last_in_restart(node_main)
assert result == node3.uuid
| 43.332143
| 164
| 0.597461
| 1,399
| 12,133
| 4.932809
| 0.231594
| 0.023475
| 0.020287
| 0.027532
| 0.437038
| 0.407332
| 0.352558
| 0.314592
| 0.281843
| 0.218519
| 0
| 0.069417
| 0.282865
| 12,133
| 279
| 165
| 43.487455
| 0.72371
| 0.042117
| 0
| 0.146341
| 0
| 0.019512
| 0.268343
| 0.053237
| 0
| 0
| 0
| 0
| 0.131707
| 1
| 0.04878
| false
| 0
| 0.102439
| 0
| 0.15122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cf2ae07e37425db960a133be2b5c330c6ba9916
| 36,957
|
py
|
Python
|
src/probnum/random_variables/_random_variable.py
|
admdev8/probnum
|
792b6299bac247cf8b1b5056756f0f078855d83a
|
[
"MIT"
] | null | null | null |
src/probnum/random_variables/_random_variable.py
|
admdev8/probnum
|
792b6299bac247cf8b1b5056756f0f078855d83a
|
[
"MIT"
] | 2
|
2020-12-28T19:37:16.000Z
|
2020-12-28T19:37:31.000Z
|
src/probnum/random_variables/_random_variable.py
|
admdev8/probnum
|
792b6299bac247cf8b1b5056756f0f078855d83a
|
[
"MIT"
] | null | null | null |
"""
Random Variables.
This module implements random variables. Random variables are the main in- and outputs
of probabilistic numerical methods.
"""
from typing import Any, Callable, Dict, Generic, Optional, Tuple, TypeVar, Union
import numpy as np
from probnum import utils as _utils
from probnum.type import (
ArrayLikeGetitemArgType,
DTypeArgType,
FloatArgType,
RandomStateArgType,
RandomStateType,
ShapeArgType,
ShapeType,
)
try:
# functools.cached_property is only available in Python >=3.8
from functools import cached_property
except ImportError:
from cached_property import cached_property
_ValueType = TypeVar("ValueType")
class RandomVariable(Generic[_ValueType]):
"""
Random variables are the main objects used by probabilistic numerical methods.
Every probabilistic numerical method takes a random variable encoding the prior
distribution as input and outputs a random variable whose distribution encodes the
uncertainty arising from finite computation. The generic signature of a
probabilistic numerical method is:
``output_rv = probnum_method(input_rv, method_params)``
In practice, most random variables used by methods in ProbNum have Dirac or Gaussian
measure.
Instances of :class:`RandomVariable` can be added, multiplied, etc. with arrays and
linear operators. This may change their ``distribution`` and not necessarily all
previously available methods are retained.
The internals of :class:`RandomVariable` objects are assumed to be constant over
their whole lifecycle. This is due to the caches used to make certain computations
more efficient. As a consequence, altering the internal state of a
:class:`RandomVariable` (e.g. its mean, cov, sampling function, etc.) will result in
undefined behavior. In particular, this should be kept in mind when subclassing
:class:`RandomVariable` or any of its descendants.
Parameters
----------
shape :
Shape of realizations of this random variable.
dtype :
Data type of realizations of this random variable. If ``object`` will be
converted to ``numpy.dtype``.
as_value_type :
Function which can be used to transform user-supplied arguments, interpreted as
realizations of this random variable, to an easy-to-process, normalized format.
Will be called internally to transform the argument of functions like
``in_support``, ``cdf`` and ``logcdf``, ``pmf`` and ``logpmf`` (in
:class:`DiscreteRandomVariable`), ``pdf`` and ``logpdf`` (in
:class:`ContinuousRandomVariable`), and potentially by similar functions in
subclasses.
For instance, this method is useful if (``log``)``cdf`` and (``log``)``pdf``
both only work on :class:`np.float_` arguments, but we still want the user to be
able to pass Python :class:`float`. Then ``as_value_type`` should be set to
something like ``lambda x: np.float64(x)``.
See Also
--------
asrandvar : Transform into a :class:`RandomVariable`.
Examples
--------
"""
# pylint: disable=too-many-instance-attributes,too-many-public-methods
def __init__(
self,
shape: ShapeArgType,
dtype: DTypeArgType,
random_state: RandomStateArgType = None,
parameters: Optional[Dict[str, Any]] = None,
sample: Optional[Callable[[ShapeType], _ValueType]] = None,
in_support: Optional[Callable[[_ValueType], bool]] = None,
cdf: Optional[Callable[[_ValueType], np.float_]] = None,
logcdf: Optional[Callable[[_ValueType], np.float_]] = None,
quantile: Optional[Callable[[FloatArgType], _ValueType]] = None,
mode: Optional[Callable[[], _ValueType]] = None,
median: Optional[Callable[[], _ValueType]] = None,
mean: Optional[Callable[[], _ValueType]] = None,
cov: Optional[Callable[[], _ValueType]] = None,
var: Optional[Callable[[], _ValueType]] = None,
std: Optional[Callable[[], _ValueType]] = None,
entropy: Optional[Callable[[], np.float_]] = None,
as_value_type: Optional[Callable[[Any], _ValueType]] = None,
):
# pylint: disable=too-many-arguments,too-many-locals
"""Create a new random variable."""
self.__shape = _utils.as_shape(shape)
# Data Types
self.__dtype = np.dtype(dtype)
self.__median_dtype = RandomVariable.infer_median_dtype(self.__dtype)
self.__moment_dtype = RandomVariable.infer_moment_dtype(self.__dtype)
self._random_state = _utils.as_random_state(random_state)
# Probability distribution of the random variable
self.__parameters = parameters.copy() if parameters is not None else {}
self.__sample = sample
self.__in_support = in_support
self.__cdf = cdf
self.__logcdf = logcdf
self.__quantile = quantile
# Properties of the random variable
self.__mode = mode
self.__median = median
self.__mean = mean
self.__cov = cov
self.__var = var
self.__std = std
self.__entropy = entropy
# Utilities
self.__as_value_type = as_value_type
def __repr__(self) -> str:
return f"<{self.shape} {self.__class__.__name__} with dtype={self.dtype}>"
@property
def shape(self) -> ShapeType:
"""Shape of realizations of the random variable."""
return self.__shape
@cached_property
def ndim(self) -> int:
return len(self.__shape)
@cached_property
def size(self) -> int:
return int(np.prod(self.__shape))
@property
def dtype(self) -> np.dtype:
"""Data type of (elements of) a realization of this random variable."""
return self.__dtype
@property
def median_dtype(self) -> np.dtype:
"""The dtype of the :attr:`median`. It will be set to the dtype arising from
the multiplication of values with dtypes :attr:`dtype` and :class:`np.float_`.
This is motivated by the fact that, even for discrete random variables, e.g.
integer-valued random variables, the :attr:`median` might lie in between two
values in which case these values are averaged. For example, a uniform random
variable on :math:`\\{ 1, 2, 3, 4 \\}` will have a median of :math:`2.5`.
"""
return self.__median_dtype
@property
def moment_dtype(self) -> np.dtype:
"""The dtype of any (function of a) moment of the random variable, e.g. its
:attr:`mean`, :attr:`cov`, :attr:`var`, or :attr:`std`. It will be set to the
dtype arising from the multiplication of values with dtypes :attr:`dtype`
and :class:`np.float_`. This is motivated by the mathematical definition of a
moment as a sum or an integral over products of probabilities and values of the
random variable, which are represented as using the dtypes :class:`np.float_`
and :attr:`dtype`, respectively.
"""
return self.__moment_dtype
@property
def random_state(self) -> RandomStateType:
"""Random state of the random variable.
This attribute defines the RandomState object to use for drawing
realizations from this random variable.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local :class:`~numpy.random.RandomState`
instance.
"""
return self._random_state
@random_state.setter
def random_state(self, seed: RandomStateArgType):
"""Get or set the RandomState object of the underlying distribution.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
self._random_state = _utils.as_random_state(seed)
@property
def parameters(self) -> Dict[str, Any]:
"""
Parameters of the probability distribution.
The parameters of the distribution such as mean, variance, et cetera stored in a
``dict``.
"""
return self.__parameters.copy()
@cached_property
def mode(self) -> _ValueType:
"""
Mode of the random variable.
Returns
-------
mode : float
The mode of the random variable.
"""
if self.__mode is None:
raise NotImplementedError
mode = self.__mode()
RandomVariable._check_property_value(
"mode",
mode,
shape=self.__shape,
dtype=self.__dtype,
)
# Make immutable
if isinstance(mode, np.ndarray):
mode.setflags(write=False)
return mode
@cached_property
def median(self) -> _ValueType:
"""
Median of the random variable.
To learn about the dtype of the median, see :attr:`median_dtype`.
Returns
-------
median : float
The median of the distribution.
"""
if self.__shape != ():
raise NotImplementedError(
"The median is only defined for scalar random variables."
)
median = self.__median()
RandomVariable._check_property_value(
"median",
median,
shape=self.__shape,
dtype=self.__median_dtype,
)
# Make immutable
if isinstance(median, np.ndarray):
median.setflags(write=False)
return median
@cached_property
def mean(self) -> _ValueType:
"""
Mean :math:`\\mathbb{E}(X)` of the distribution.
To learn about the dtype of the mean, see :attr:`moment_dtype`.
Returns
-------
mean : array-like
The mean of the distribution.
"""
if self.__mean is None:
raise NotImplementedError
mean = self.__mean()
RandomVariable._check_property_value(
"mean",
mean,
shape=self.__shape,
dtype=self.__moment_dtype,
)
# Make immutable
if isinstance(mean, np.ndarray):
mean.setflags(write=False)
return mean
@cached_property
def cov(self) -> _ValueType:
"""
Covariance :math:`\\operatorname{Cov}(X) = \\mathbb{E}((X-\\mathbb{E}(X))(X-\\mathbb{E}(X))^\\top)`
of the random variable.
To learn about the dtype of the covariance, see :attr:`moment_dtype`.
Returns
-------
cov : array-like
The kernels of the random variable.
""" # pylint: disable=line-too-long
if self.__cov is None:
raise NotImplementedError
cov = self.__cov()
RandomVariable._check_property_value(
"covariance",
cov,
shape=(self.size, self.size) if self.ndim > 0 else (),
dtype=self.__moment_dtype,
)
# Make immutable
if isinstance(cov, np.ndarray):
cov.setflags(write=False)
return cov
@cached_property
def var(self) -> _ValueType:
"""
Variance :math:`\\operatorname{Var}(X) = \\mathbb{E}((X-\\mathbb{E}(X))^2)` of
the distribution.
To learn about the dtype of the variance, see :attr:`moment_dtype`.
Returns
-------
var : array-like
The variance of the distribution.
"""
if self.__var is None:
try:
var = np.diag(self.cov).reshape(self.__shape).copy()
except NotImplementedError as exc:
raise NotImplementedError from exc
else:
var = self.__var()
RandomVariable._check_property_value(
"variance",
var,
shape=self.__shape,
dtype=self.__moment_dtype,
)
# Make immutable
if isinstance(var, np.ndarray):
var.setflags(write=False)
return var
@cached_property
def std(self) -> _ValueType:
"""
Standard deviation of the distribution.
To learn about the dtype of the standard deviation, see :attr:`moment_dtype`.
Returns
-------
std : array-like
The standard deviation of the distribution.
"""
if self.__std is None:
try:
std = np.sqrt(self.var)
except NotImplementedError as exc:
raise NotImplementedError from exc
else:
std = self.__std()
RandomVariable._check_property_value(
"standard deviation",
std,
shape=self.__shape,
dtype=self.__moment_dtype,
)
# Make immutable
if isinstance(std, np.ndarray):
std.setflags(write=False)
return std
@cached_property
def entropy(self) -> np.float_:
if self.__entropy is None:
raise NotImplementedError
entropy = self.__entropy()
entropy = RandomVariable._ensure_numpy_float(
"entropy", entropy, force_scalar=True
)
return entropy
def in_support(self, x: _ValueType) -> bool:
if self.__in_support is None:
raise NotImplementedError
in_support = self.__in_support(self._as_value_type(x))
if not isinstance(in_support, bool):
raise ValueError(
f"The function `in_support` must return a `bool`, but its return value "
f"is of type `{type(x)}`."
)
return in_support
def sample(self, size: ShapeArgType = ()) -> _ValueType:
"""
Draw realizations from a random variable.
Parameters
----------
size : tuple
Size of the drawn sample of realizations.
Returns
-------
sample : array-like
Sample of realizations with the given ``size`` and the inherent ``shape``.
"""
if self.__sample is None:
raise NotImplementedError("No sampling method provided.")
return self.__sample(size=_utils.as_shape(size))
def cdf(self, x: _ValueType) -> np.float_:
"""
Cumulative distribution function.
Parameters
----------
x : array-like
Evaluation points of the cumulative distribution function.
The shape of this argument should be :code:`(..., S1, ..., SN)`, where
:code:`(S1, ..., SN)` is the :attr:`shape` of the random variable.
The cdf evaluation will be broadcast over all additional dimensions.
Returns
-------
q : array-like
Value of the cumulative density function at the given points.
"""
if self.__cdf is not None:
return RandomVariable._ensure_numpy_float(
"cdf", self.__cdf(self._as_value_type(x))
)
elif self.__logcdf is not None:
cdf = np.exp(self.logcdf(self._as_value_type(x)))
assert isinstance(cdf, np.float_)
return cdf
else:
raise NotImplementedError(
f"Neither the `cdf` nor the `logcdf` of the random variable object "
f"with type `{type(self).__name__}` is implemented."
)
def logcdf(self, x: _ValueType) -> np.float_:
"""
Log-cumulative distribution function.
Parameters
----------
x : array-like
Evaluation points of the cumulative distribution function.
The shape of this argument should be :code:`(..., S1, ..., SN)`, where
:code:`(S1, ..., SN)` is the :attr:`shape` of the random variable.
The logcdf evaluation will be broadcast over all additional dimensions.
Returns
-------
q : array-like
Value of the log-cumulative density function at the given points.
"""
if self.__logcdf is not None:
return RandomVariable._ensure_numpy_float(
"logcdf", self.__logcdf(self._as_value_type(x))
)
elif self.__cdf is not None:
logcdf = np.log(self.__cdf(x))
assert isinstance(logcdf, np.float_)
return logcdf
else:
raise NotImplementedError(
f"Neither the `logcdf` nor the `cdf` of the random variable object "
f"with type `{type(self).__name__}` is implemented."
)
def quantile(self, p: FloatArgType) -> _ValueType:
"""Quantile function.
The quantile function :math:`Q \\colon [0, 1] \\to \\mathbb{R}` of a random
variable :math:`X` is defined as
:math:`Q(p) = \\inf\\{ x \\in \\mathbb{R} \\colon p \\le F_X(x) \\}`, where
:math:`F_X \\colon \\mathbb{R} \\to [0, 1]` is the :meth:`cdf` of the random
variable. From the definition it follows that the quantile function always
returns values of the same dtype as the random variable. For instance, for a
discrete distribution over the integers, the returned quantiles will also be
integers. This means that, in general, :math:`Q(0.5)` is not equal to the
:attr:`median` as it is defined in this class. See
https://en.wikipedia.org/wiki/Quantile_function for more details and examples.
"""
if self.__shape != ():
raise NotImplementedError(
"The quantile function is only defined for scalar random variables."
)
if self.__quantile is None:
raise NotImplementedError
try:
p = _utils.as_numpy_scalar(p, dtype=np.floating)
except TypeError as exc:
raise TypeError(
"The given argument `p` can not be cast to a `np.floating` object."
) from exc
quantile = self.__quantile(p)
if quantile.shape != self.__shape:
raise ValueError(
f"The quantile function should return values of the same shape as the "
f"random variable, i.e. {self.__shape}, but it returned a value with "
f"{quantile.shape}."
)
if quantile.dtype != self.__dtype:
raise ValueError(
f"The quantile function should return values of the same dtype as the "
f"random variable, i.e. `{self.__dtype.name}`, but it returned a value "
f"with dtype `{quantile.dtype.name}`."
)
return quantile
def __getitem__(self, key: ArrayLikeGetitemArgType) -> "RandomVariable":
return RandomVariable(
shape=np.empty(shape=self.shape)[key].shape,
dtype=self.dtype,
random_state=_utils.derive_random_seed(self.random_state),
sample=lambda size: self.sample(size)[key],
mode=lambda: self.mode[key],
mean=lambda: self.mean[key],
var=lambda: self.var[key],
std=lambda: self.std[key],
entropy=lambda: self.entropy,
as_value_type=self.__as_value_type,
)
def reshape(self, newshape: ShapeArgType) -> "RandomVariable":
"""
Give a new shape to a random variable.
Parameters
----------
newshape : int or tuple of ints
New shape for the random variable. It must be compatible with the original
shape.
Returns
-------
reshaped_rv : ``self`` with the new dimensions of ``shape``.
"""
newshape = _utils.as_shape(newshape)
return RandomVariable(
shape=newshape,
dtype=self.dtype,
random_state=_utils.derive_random_seed(self.random_state),
sample=lambda size: self.sample(size).reshape(size + newshape),
mode=lambda: self.mode.reshape(newshape),
median=lambda: self.median.reshape(newshape),
mean=lambda: self.mean.reshape(newshape),
cov=lambda: self.cov,
var=lambda: self.var.reshape(newshape),
std=lambda: self.std.reshape(newshape),
entropy=lambda: self.entropy,
as_value_type=self.__as_value_type,
)
def transpose(self, *axes: int) -> "RandomVariable":
"""
Transpose the random variable.
Parameters
----------
axes : None, tuple of ints, or n ints
See documentation of numpy.ndarray.transpose.
Returns
-------
transposed_rv : The transposed random variable.
"""
return RandomVariable(
shape=np.empty(shape=self.shape).transpose(*axes).shape,
dtype=self.dtype,
random_state=_utils.derive_random_seed(self.random_state),
sample=lambda size: self.sample(size).transpose(*axes),
mode=lambda: self.mode.transpose(*axes),
median=lambda: self.median.transpose(*axes),
mean=lambda: self.mean.transpose(*axes),
cov=lambda: self.cov,
var=lambda: self.var.transpose(*axes),
std=lambda: self.std.transpose(*axes),
entropy=lambda: self.entropy,
as_value_type=self.__as_value_type,
)
T = property(transpose)
# Unary arithmetic operations
def __neg__(self) -> "RandomVariable":
return RandomVariable(
shape=self.shape,
dtype=self.dtype,
random_state=_utils.derive_random_seed(self.random_state),
sample=lambda size: -self.sample(size=size),
in_support=lambda x: self.in_support(-x),
mode=lambda: -self.mode,
median=lambda: -self.median,
mean=lambda: -self.mean,
cov=lambda: self.cov,
var=lambda: self.var,
std=lambda: self.std,
as_value_type=self.__as_value_type,
)
def __pos__(self) -> "RandomVariable":
return RandomVariable(
shape=self.shape,
dtype=self.dtype,
random_state=_utils.derive_random_seed(self.random_state),
sample=lambda size: +self.sample(size=size),
in_support=lambda x: self.in_support(+x),
mode=lambda: +self.mode,
median=lambda: +self.median,
mean=lambda: +self.mean,
cov=lambda: self.cov,
var=lambda: self.var,
std=lambda: self.std,
as_value_type=self.__as_value_type,
)
def __abs__(self) -> "RandomVariable":
return RandomVariable(
shape=self.shape,
dtype=self.dtype,
random_state=_utils.derive_random_seed(self.random_state),
sample=lambda size: abs(self.sample(size=size)),
)
# Binary arithmetic operations
__array_ufunc__ = None
"""
This prevents numpy from calling elementwise arithmetic
operations allowing expressions like: y = np.array([1, 1]) + RV
to call the arithmetic operations defined by RandomVariable
instead of elementwise. Thus no array of RandomVariables but a
RandomVariable with the correct shape is returned.
"""
def __add__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import add
return add(self, other)
def __radd__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import add
return add(other, self)
def __sub__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import sub
return sub(self, other)
def __rsub__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import sub
return sub(other, self)
def __mul__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import mul
return mul(self, other)
def __rmul__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import mul
return mul(other, self)
def __matmul__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import matmul
return matmul(self, other)
def __rmatmul__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import matmul
return matmul(other, self)
def __truediv__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import truediv
return truediv(self, other)
def __rtruediv__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import truediv
return truediv(other, self)
def __floordiv__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import floordiv
return floordiv(self, other)
def __rfloordiv__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import floordiv
return floordiv(other, self)
def __mod__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import mod
return mod(self, other)
def __rmod__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import mod
return mod(other, self)
def __divmod__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import divmod_
return divmod_(self, other)
def __rdivmod__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import divmod_
return divmod_(other, self)
def __pow__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import pow_
return pow_(self, other)
def __rpow__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import pow_
return pow_(other, self)
@staticmethod
def infer_median_dtype(value_dtype: DTypeArgType) -> np.dtype:
return RandomVariable.infer_moment_dtype(value_dtype)
@staticmethod
def infer_moment_dtype(value_dtype: DTypeArgType) -> np.dtype:
return np.promote_types(value_dtype, np.float_)
def _as_value_type(self, x: Any) -> _ValueType:
if self.__as_value_type is not None:
return self.__as_value_type(x)
return x
@staticmethod
def _check_property_value(
name: str,
value: Any,
shape: Optional[Tuple[int, ...]] = None,
dtype: Optional[np.dtype] = None,
):
if shape is not None:
if value.shape != shape:
raise ValueError(
f"The {name} of the random variable does not have the correct "
f"shape. Expected {shape} but got {value.shape}."
)
if dtype is not None:
if not np.issubdtype(value.dtype, dtype):
raise ValueError(
f"The {name} of the random variable does not have the correct "
f"dtype. Expected {dtype.name} but got {value.dtype.name}."
)
@classmethod
def _ensure_numpy_float(
cls, name: str, value: Any, force_scalar: bool = False
) -> Union[np.float_, np.ndarray]:
if np.isscalar(value):
if not isinstance(value, np.float_):
try:
value = _utils.as_numpy_scalar(value, dtype=np.float_)
except TypeError as err:
raise TypeError(
f"The function `{name}` specified via the constructor of "
f"`{cls.__name__}` must return a scalar value that can be "
f"converted to a `np.float_`, which is not possible for "
f"{value} of type {type(value)}."
) from err
elif not force_scalar:
try:
value = np.asarray(value, dtype=np.float_)
except TypeError as err:
raise TypeError(
f"The function `{name}` specified via the constructor of "
f"`{cls.__name__}` must return a value that can be converted "
f"to a `np.ndarray` of type `np.float_`, which is not possible "
f"for {value} of type {type(value)}."
) from err
else:
raise TypeError(
f"The function `{name}` specified via the constructor of "
f"`{cls.__name__}` must return a scalar value, but {value} of type "
f"{type(value)} is not scalar."
)
assert isinstance(value, (np.float_, np.ndarray))
return value
class DiscreteRandomVariable(RandomVariable[_ValueType]):
def __init__(
self,
shape: ShapeArgType,
dtype: DTypeArgType,
random_state: Optional[RandomStateType] = None,
parameters: Optional[Dict[str, Any]] = None,
sample: Optional[Callable[[ShapeArgType], _ValueType]] = None,
in_support: Optional[Callable[[_ValueType], bool]] = None,
pmf: Optional[Callable[[_ValueType], np.float_]] = None,
logpmf: Optional[Callable[[_ValueType], np.float_]] = None,
cdf: Optional[Callable[[_ValueType], np.float_]] = None,
logcdf: Optional[Callable[[_ValueType], np.float_]] = None,
quantile: Optional[Callable[[FloatArgType], _ValueType]] = None,
mode: Optional[Callable[[], _ValueType]] = None,
median: Optional[Callable[[], _ValueType]] = None,
mean: Optional[Callable[[], _ValueType]] = None,
cov: Optional[Callable[[], _ValueType]] = None,
var: Optional[Callable[[], _ValueType]] = None,
std: Optional[Callable[[], _ValueType]] = None,
entropy: Optional[Callable[[], np.float_]] = None,
):
# Probability mass function
self.__pmf = pmf
self.__logpmf = logpmf
super().__init__(
shape=shape,
dtype=dtype,
random_state=random_state,
parameters=parameters,
sample=sample,
in_support=in_support,
cdf=cdf,
logcdf=logcdf,
quantile=quantile,
mode=mode,
median=median,
mean=mean,
cov=cov,
var=var,
std=std,
entropy=entropy,
)
def pmf(self, x: _ValueType) -> np.float_:
if self.__pmf is not None:
return DiscreteRandomVariable._ensure_numpy_float("pmf", self.__pmf(x))
elif self.__logpmf is not None:
pmf = np.exp(self.__logpmf(x))
assert isinstance(pmf, np.float_)
return pmf
else:
raise NotImplementedError(
f"Neither the `pmf` nor the `logpmf` of the discrete random variable "
f"object with type `{type(self).__name__}` is implemented."
)
def logpmf(self, x: _ValueType) -> np.float_:
if self.__logpmf is not None:
return DiscreteRandomVariable._ensure_numpy_float(
"logpmf", self.__logpmf(self._as_value_type(x))
)
elif self.__pmf is not None:
logpmf = np.log(self.__pmf(self._as_value_type(x)))
assert isinstance(logpmf, np.float_)
return logpmf
else:
raise NotImplementedError(
f"Neither the `logpmf` nor the `pmf` of the discrete random variable "
f"object with type `{type(self).__name__}` is implemented."
)
class ContinuousRandomVariable(RandomVariable[_ValueType]):
def __init__(
self,
shape: ShapeArgType,
dtype: DTypeArgType,
random_state: Optional[RandomStateType] = None,
parameters: Optional[Dict[str, Any]] = None,
sample: Optional[Callable[[ShapeArgType], _ValueType]] = None,
in_support: Optional[Callable[[_ValueType], bool]] = None,
pdf: Optional[Callable[[_ValueType], np.float_]] = None,
logpdf: Optional[Callable[[_ValueType], np.float_]] = None,
cdf: Optional[Callable[[_ValueType], np.float_]] = None,
logcdf: Optional[Callable[[_ValueType], np.float_]] = None,
quantile: Optional[Callable[[FloatArgType], _ValueType]] = None,
mode: Optional[Callable[[], _ValueType]] = None,
median: Optional[Callable[[], _ValueType]] = None,
mean: Optional[Callable[[], _ValueType]] = None,
cov: Optional[Callable[[], _ValueType]] = None,
var: Optional[Callable[[], _ValueType]] = None,
std: Optional[Callable[[], _ValueType]] = None,
entropy: Optional[Callable[[], np.float_]] = None,
):
# Probability density function
self.__pdf = pdf
self.__logpdf = logpdf
super().__init__(
shape=shape,
dtype=dtype,
random_state=random_state,
parameters=parameters,
sample=sample,
in_support=in_support,
cdf=cdf,
logcdf=logcdf,
quantile=quantile,
mode=mode,
median=median,
mean=mean,
cov=cov,
var=var,
std=std,
entropy=entropy,
)
def pdf(self, x: _ValueType) -> np.float_:
"""
Probability density or mass function.
Following the predominant convention in mathematics, we express pdfs with
respect to the Lebesgue measure unless stated otherwise.
Parameters
----------
x : array-like
Evaluation points of the probability density / mass function.
The shape of this argument should be :code:`(..., S1, ..., SN)`, where
:code:`(S1, ..., SN)` is the :attr:`shape` of the random variable.
The pdf evaluation will be broadcast over all additional dimensions.
Returns
-------
p : array-like
Value of the probability density / mass function at the given points.
"""
if self.__pdf is not None:
return ContinuousRandomVariable._ensure_numpy_float(
"pdf", self.__pdf(self._as_value_type(x))
)
if self.__logpdf is not None:
pdf = np.exp(self.__logpdf(self._as_value_type(x)))
assert isinstance(pdf, np.float_)
return pdf
raise NotImplementedError(
f"Neither the `pdf` nor the `logpdf` of the continuous random variable "
f"object with type `{type(self).__name__}` is implemented."
)
def logpdf(self, x: _ValueType) -> np.float_:
"""
Natural logarithm of the probability density function.
Parameters
----------
x : array-like
Evaluation points of the log-probability density/mass function.
The shape of this argument should be :code:`(..., S1, ..., SN)`, where
:code:`(S1, ..., SN)` is the :attr:`shape` of the random variable.
The logpdf evaluation will be broadcast over all additional dimensions.
Returns
-------
logp : array-like
Value of the log-probability density / mass function at the given points.
"""
if self.__logpdf is not None:
return ContinuousRandomVariable._ensure_numpy_float(
"logpdf", self.__logpdf(self._as_value_type(x))
)
elif self.__pdf is not None:
logpdf = np.log(self.__pdf(self._as_value_type(x)))
assert isinstance(logpdf, np.float_)
return logpdf
else:
raise NotImplementedError(
f"Neither the `logpdf` nor the `pdf` of the continuous random variable "
f"object with type `{type(self).__name__}` is implemented."
)
| 34.668856
| 107
| 0.596152
| 4,133
| 36,957
| 5.153158
| 0.113477
| 0.012442
| 0.036388
| 0.017842
| 0.516387
| 0.473378
| 0.447272
| 0.412104
| 0.383933
| 0.36055
| 0
| 0.001089
| 0.304489
| 36,957
| 1,065
| 108
| 34.701408
| 0.827498
| 0.277376
| 0
| 0.407407
| 0
| 0
| 0.106221
| 0.00847
| 0
| 0
| 0
| 0
| 0.011785
| 1
| 0.097643
| false
| 0
| 0.042088
| 0.015152
| 0.249158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cf5ba1b8968aa69e6a1b87247368728da9bf55b
| 11,847
|
py
|
Python
|
tools/wasm-sourcemap.py
|
ngzhian/emscripten
|
94b1555a09f869d65354a2033da724ce77a43106
|
[
"MIT"
] | 1
|
2019-08-16T23:42:09.000Z
|
2019-08-16T23:42:09.000Z
|
tools/wasm-sourcemap.py
|
ngzhian/emscripten
|
94b1555a09f869d65354a2033da724ce77a43106
|
[
"MIT"
] | null | null | null |
tools/wasm-sourcemap.py
|
ngzhian/emscripten
|
94b1555a09f869d65354a2033da724ce77a43106
|
[
"MIT"
] | 1
|
2019-09-26T20:05:46.000Z
|
2019-09-26T20:05:46.000Z
|
#!/usr/bin/env python
# Copyright 2018 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""Utility tools that extracts DWARF information encoded in a wasm output
produced by the LLVM tools, and encodes it as a wasm source map. Additionally,
it can collect original sources, change files prefixes, and strip debug
sections from a wasm file.
"""
import argparse
from collections import OrderedDict, namedtuple
import json
import logging
from math import floor, log
import os
import re
from subprocess import Popen, PIPE
import sys
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from tools.shared import asstr
logger = logging.getLogger('wasm-sourcemap')
def parse_args():
parser = argparse.ArgumentParser(prog='wasm-sourcemap.py', description=__doc__)
parser.add_argument('wasm', help='wasm file')
parser.add_argument('-o', '--output', help='output source map')
parser.add_argument('-p', '--prefix', nargs='*', help='replace source debug filename prefix for source map', default=[])
parser.add_argument('-s', '--sources', action='store_true', help='read and embed source files from file system into source map')
parser.add_argument('-l', '--load-prefix', nargs='*', help='replace source debug filename prefix for reading sources from file system (see also --sources)', default=[])
parser.add_argument('-w', nargs='?', help='set output wasm file')
parser.add_argument('-x', '--strip', action='store_true', help='removes debug and linking sections')
parser.add_argument('-u', '--source-map-url', nargs='?', help='specifies sourceMappingURL section contest')
parser.add_argument('--dwarfdump', help="path to llvm-dwarfdump executable")
parser.add_argument('--dwarfdump-output', nargs='?', help=argparse.SUPPRESS)
return parser.parse_args()
class Prefixes:
def __init__(self, args):
prefixes = []
for p in args:
if '=' in p:
prefix, replacement = p.split('=')
prefixes.append({'prefix': prefix, 'replacement': replacement})
else:
prefixes.append({'prefix': p, 'replacement': None})
self.prefixes = prefixes
self.cache = {}
def resolve(self, name):
if name in self.cache:
return self.cache[name]
result = name
for p in self.prefixes:
if name.startswith(p['prefix']):
if p['replacement'] is None:
result = name[len(p['prefix'])::]
else:
result = p['replacement'] + name[len(p['prefix'])::]
break
self.cache[name] = result
return result
# SourceMapPrefixes contains resolver for file names that are:
# - "sources" is for names that output to source maps JSON
# - "load" is for paths that used to load source text
SourceMapPrefixes = namedtuple('SourceMapPrefixes', 'sources, load')
def encode_vlq(n):
VLQ_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
x = (n << 1) if n >= 0 else ((-n << 1) + 1)
result = ""
while x > 31:
result = result + VLQ_CHARS[32 + (x & 31)]
x = x >> 5
return result + VLQ_CHARS[x]
def read_var_uint(wasm, pos):
n = 0
shift = 0
b = ord(wasm[pos:pos + 1])
pos = pos + 1
while b >= 128:
n = n | ((b - 128) << shift)
b = ord(wasm[pos:pos + 1])
pos = pos + 1
shift += 7
return n + (b << shift), pos
def strip_debug_sections(wasm):
logger.debug('Strip debug sections')
pos = 8
stripped = wasm[:pos]
while pos < len(wasm):
section_start = pos
section_id, pos_ = read_var_uint(wasm, pos)
section_size, section_body = read_var_uint(wasm, pos_)
pos = section_body + section_size
if section_id == 0:
name_len, name_pos = read_var_uint(wasm, section_body)
name_end = name_pos + name_len
name = wasm[name_pos:name_end]
if name == "linking" or name == "sourceMappingURL" or name.startswith("reloc..debug_") or name.startswith(".debug_"):
continue # skip debug related sections
stripped = stripped + wasm[section_start:pos]
return stripped
def encode_uint_var(n):
result = bytearray()
while n > 127:
result.append(128 | (n & 127))
n = n >> 7
result.append(n)
return bytes(result)
def append_source_mapping(wasm, url):
logger.debug('Append sourceMappingURL section')
section_name = "sourceMappingURL"
section_content = encode_uint_var(len(section_name)) + section_name + encode_uint_var(len(url)) + url
return wasm + encode_uint_var(0) + encode_uint_var(len(section_content)) + section_content
def get_code_section_offset(wasm):
logger.debug('Read sections index')
pos = 8
while pos < len(wasm):
section_id, pos_ = read_var_uint(wasm, pos)
section_size, pos = read_var_uint(wasm, pos_)
if section_id == 10:
return pos
pos = pos + section_size
def remove_dead_entries(entries):
# Remove entries for dead functions. It is a heuristics to ignore data if the
# function starting address near to 0 (is equal to its size field length).
block_start = 0
cur_entry = 0
while cur_entry < len(entries):
if not entries[cur_entry]['eos']:
cur_entry += 1
continue
fn_start = entries[block_start]['address']
# Calculate the LEB encoded function size (including size field)
fn_size_length = floor(log(entries[cur_entry]['address'] - fn_start + 1, 128)) + 1
min_live_offset = 1 + fn_size_length # 1 byte is for code section entries
if fn_start < min_live_offset:
# Remove dead code debug info block.
del entries[block_start:cur_entry + 1]
cur_entry = block_start
continue
cur_entry += 1
block_start = cur_entry
def read_dwarf_entries(wasm, options):
if options.dwarfdump_output:
output = open(options.dwarfdump_output, 'r').read()
elif options.dwarfdump:
logger.debug('Reading DWARF information from %s' % wasm)
if not os.path.exists(options.dwarfdump):
logger.error('llvm-dwarfdump not found: ' + options.dwarfdump)
sys.exit(1)
process = Popen([options.dwarfdump, "-debug-info", "-debug-line", wasm], stdout=PIPE)
output, err = process.communicate()
exit_code = process.wait()
if exit_code != 0:
logger.error('Error during llvm-dwarfdump execution (%s)' % exit_code)
sys.exit(1)
else:
logger.error('Please specify either --dwarfdump or --dwarfdump-output')
sys.exit(1)
entries = []
debug_line_chunks = re.split(r"debug_line\[(0x[0-9a-f]*)\]", asstr(output))
maybe_debug_info_content = debug_line_chunks[0]
for i in range(1, len(debug_line_chunks), 2):
stmt_list = debug_line_chunks[i]
comp_dir_match = re.search(r"DW_AT_stmt_list\s+\(" + stmt_list + r"\)\s+" +
r"DW_AT_comp_dir\s+\(\"([^\"]+)", maybe_debug_info_content)
comp_dir = comp_dir_match.group(1) if comp_dir_match is not None else ""
line_chunk = debug_line_chunks[i + 1]
# include_directories[ 1] = "/Users/yury/Work/junk/sqlite-playground/src"
# file_names[ 1]:
# name: "playground.c"
# dir_index: 1
# mod_time: 0x00000000
# length: 0x00000000
#
# Address Line Column File ISA Discriminator Flags
# ------------------ ------ ------ ------ --- ------------- -------------
# 0x0000000000000006 22 0 1 0 0 is_stmt
# 0x0000000000000007 23 10 1 0 0 is_stmt prologue_end
# 0x000000000000000f 23 3 1 0 0
# 0x0000000000000010 23 3 1 0 0 end_sequence
# 0x0000000000000011 28 0 1 0 0 is_stmt
include_directories = {'0': comp_dir}
for dir in re.finditer(r"include_directories\[\s*(\d+)\] = \"([^\"]*)", line_chunk):
include_directories[dir.group(1)] = dir.group(2)
files = {}
for file in re.finditer(r"file_names\[\s*(\d+)\]:\s+name: \"([^\"]*)\"\s+dir_index: (\d+)", line_chunk):
dir = include_directories[file.group(3)]
file_path = (dir + '/' if file.group(2)[0] != '/' else '') + file.group(2)
files[file.group(1)] = file_path
for line in re.finditer(r"\n0x([0-9a-f]+)\s+(\d+)\s+(\d+)\s+(\d+)(.*?end_sequence)?", line_chunk):
entry = {'address': int(line.group(1), 16), 'line': int(line.group(2)), 'column': int(line.group(3)), 'file': files[line.group(4)], 'eos': line.group(5) is not None}
if not entry['eos']:
entries.append(entry)
else:
# move end of function to the last END operator
entry['address'] -= 1
if entries[-1]['address'] == entry['address']:
# last entry has the same address, reusing
entries[-1]['eos'] = True
else:
entries.append(entry)
remove_dead_entries(entries)
# return entries sorted by the address field
return sorted(entries, key=lambda entry: entry['address'])
def build_sourcemap(entries, code_section_offset, prefixes, collect_sources):
sources = []
sources_content = [] if collect_sources else None
mappings = []
sources_map = {}
last_address = 0
last_source_id = 0
last_line = 1
last_column = 1
for entry in entries:
line = entry['line']
column = entry['column']
# ignore entries with line 0
if line == 0:
continue
# start at least at column 1
if column == 0:
column = 1
address = entry['address'] + code_section_offset
file_name = entry['file']
source_name = prefixes.sources.resolve(file_name)
if source_name not in sources_map:
source_id = len(sources)
sources_map[source_name] = source_id
sources.append(source_name)
if collect_sources:
load_name = prefixes.load.resolve(file_name)
try:
with open(load_name, 'r') as infile:
source_content = infile.read()
sources_content.append(source_content)
except IOError:
print('Failed to read source: %s' % load_name)
sources_content.append(None)
else:
source_id = sources_map[source_name]
address_delta = address - last_address
source_id_delta = source_id - last_source_id
line_delta = line - last_line
column_delta = column - last_column
mappings.append(encode_vlq(address_delta) + encode_vlq(source_id_delta) + encode_vlq(line_delta) + encode_vlq(column_delta))
last_address = address
last_source_id = source_id
last_line = line
last_column = column
return OrderedDict([('version', 3),
('names', []),
('sources', sources),
('sourcesContent', sources_content),
('mappings', ','.join(mappings))])
def main():
options = parse_args()
wasm_input = options.wasm
with open(wasm_input, 'rb') as infile:
wasm = infile.read()
entries = read_dwarf_entries(wasm_input, options)
code_section_offset = get_code_section_offset(wasm)
prefixes = SourceMapPrefixes(sources=Prefixes(options.prefix), load=Prefixes(options.load_prefix))
logger.debug('Saving to %s' % options.output)
map = build_sourcemap(entries, code_section_offset, prefixes, options.sources)
with open(options.output, 'w') as outfile:
json.dump(map, outfile, separators=(',', ':'))
if options.strip:
wasm = strip_debug_sections(wasm)
if options.source_map_url:
wasm = append_source_mapping(wasm, options.source_map_url)
if options.w:
logger.debug('Saving wasm to %s' % options.w)
with open(options.w, 'wb') as outfile:
outfile.write(wasm)
logger.debug('Done')
return 0
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG if os.environ.get('EMCC_DEBUG') else logging.INFO)
sys.exit(main())
| 35.258929
| 171
| 0.653668
| 1,604
| 11,847
| 4.655237
| 0.2101
| 0.012053
| 0.022767
| 0.012053
| 0.093612
| 0.048078
| 0.042587
| 0.030267
| 0.030267
| 0.010982
| 0
| 0.026727
| 0.216764
| 11,847
| 335
| 172
| 35.364179
| 0.777993
| 0.156495
| 0
| 0.11157
| 0
| 0.004132
| 0.144063
| 0.021097
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053719
| false
| 0
| 0.041322
| 0
| 0.14876
| 0.004132
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cf7d0d22a5ee01c1d25faa33b9b8f99ef2f0210
| 3,300
|
py
|
Python
|
Unsupervised/pix2pixHD/extract_frames.py
|
Kebniss/AutoDetect
|
44ca4d6930ef5fbf044ebeed5c9fd925f04bc1a8
|
[
"MIT"
] | 1
|
2019-07-25T02:16:32.000Z
|
2019-07-25T02:16:32.000Z
|
Unsupervised/pix2pixHD/extract_frames.py
|
Kebniss/AutoDetect
|
44ca4d6930ef5fbf044ebeed5c9fd925f04bc1a8
|
[
"MIT"
] | null | null | null |
Unsupervised/pix2pixHD/extract_frames.py
|
Kebniss/AutoDetect
|
44ca4d6930ef5fbf044ebeed5c9fd925f04bc1a8
|
[
"MIT"
] | null | null | null |
import os
import cv2
import argparse
from utils import *
from tqdm import tqdm
from glob import glob
from pathlib import Path
def _extract_frames(video_path, parent, start=0, sampling_f=1):
vidcap = cv2.VideoCapture(video_path)
success, image = success, image = vidcap.read()
count = -1
saved = 0
print(f'Processing: {video_path}')
while success:
count += 1
if count % 300 == 0:
print('Processing frame: ', count)
if count % sampling_f == 0:
# sampling
cv2.imwrite(''.join([dest_folder, f"/{count + start}.jpg"]), image)
saved += 1
success, image = vidcap.read() # read next
print(f'Successfully saved {saved} frames to {dest_folder}')
return count + start
parser = argparse.ArgumentParser(
description='build a "frame dataset" from a given video')
parser.add_argument('-input', dest="input", required=True,
help='''Path to a single video or a folder. If path to folder the algorithm
will extract frames from all files with extension defined in
--extension and save them under separate folders under dest_folder.
The frames from each video will be saved under a folder with its name.
''')
parser.add_argument('--dest-folder', dest="dest_folder", default='./dataset/',
help='''Path where to store frames. NB all files in this folder will be
removed before adding the new frames''')
parser.add_argument('--same-folder', dest="same_folder", default=False,
help='''Set it to True if you want to save the frames of all videos to the
same folder in ascending order going from the first frame of the first video
to the last frame of the last video. If True frames will be saved in
dest_folder/frames.''')
parser.add_argument('--sampling', help='how many fps', default='3')
parser.add_argument('--run-type', help='train or test', default='train')
parser.add_argument('--extension', help='avi, mp4, mov...', default='mp4')
parser.add_argument('-width', help='output width', default=640, type=int)
parser.add_argument('-height', help='output height', default=480, type=int)
args = parser.parse_args()
mkdir(args.dest_folder)
if (args.width % 32 != 0) or (args.height % 32 != 0):
raise Exception("Please use width and height that are divisible by 32")
if os.path.isdir(args.input):
inp = str(Path(args.input) / f'*.{args.extension}')
videos = [v for v in glob(inp)]
if not videos:
raise Exception(f'No {args.extension} files in input directory {args.input}')
elif os.path.isfile(args.input):
_, ext = get_filename_extension(args.input)
if ext != args.extension:
raise ValueError(f'Correct inputs: folder or path to {args.extension} file only')
videos = [args.input]
else:
raise ValueError(f'Correct inputs: folder or path to {args.extension} file only')
if args.same_folder:
start = 0
dest_folder = str(Path(args.dest_folder) / f'{args.run_type}_frames')
mkdir(dest_folder)
for v in tqdm(videos):
if not args.same_folder:
start = 0
name, _ = get_filename_extension(v)
dest_folder = str(Path(args.dest_folder) / name)
mkdir(dest_folder)
start = _extract_frames(v, dest_folder, start, sampling_f=int(args.sampling))
| 39.285714
| 89
| 0.677879
| 484
| 3,300
| 4.533058
| 0.301653
| 0.06381
| 0.061987
| 0.020055
| 0.104831
| 0.0866
| 0.0866
| 0.058341
| 0.058341
| 0.058341
| 0
| 0.012552
| 0.203333
| 3,300
| 83
| 90
| 39.759036
| 0.821986
| 0.005455
| 0
| 0.084507
| 0
| 0
| 0.386703
| 0.006709
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014085
| false
| 0
| 0.098592
| 0
| 0.126761
| 0.042254
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cf8865345a71c46f4e1edec308e018d877fedb9
| 11,128
|
py
|
Python
|
AppServer/google/appengine/tools/devappserver2/login.py
|
loftwah/appscale
|
586fc1347ebc743d7a632de698f4dbfb09ae38d6
|
[
"Apache-2.0"
] | 790
|
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
AppServer/google/appengine/tools/devappserver2/login.py
|
loftwah/appscale
|
586fc1347ebc743d7a632de698f4dbfb09ae38d6
|
[
"Apache-2.0"
] | 1,361
|
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
AppServer/google/appengine/tools/devappserver2/login.py
|
loftwah/appscale
|
586fc1347ebc743d7a632de698f4dbfb09ae38d6
|
[
"Apache-2.0"
] | 155
|
2015-01-08T22:59:31.000Z
|
2020-04-08T08:01:53.000Z
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Handles login/logout pages and dealing with user cookies.
Includes a WSGI application that serves the login page and handles login and
logout HTTP requests. It accepts these GET query parameters:
continue: URL to redirect to after a login or logout has completed.
email: Email address to set for the client.
admin: If 'True', the client should be logged in as an admin.
action: What action to take ('Login' or 'Logout').
To view the current user information and a form for logging in and out,
supply no parameters.
"""
import cgi
import Cookie
import hashlib
import logging
import os
import sha
import sys
import urllib
import uuid
import webapp2
app_dashboard_lib = '/../../../../../AppDashboard/lib'
sys.path.append(os.path.dirname(__file__) + app_dashboard_lib)
from app_dashboard_helper import AppDashboardHelper
# URL of the login page within the dev appserver.
LOGIN_URL_RELATIVE = '_ah/login'
# CGI parameter constants.
CONTINUE_PARAM = 'continue'
_EMAIL_PARAM = 'email'
_ADMIN_PARAM = 'admin'
ACTION_PARAM = 'action'
# Values for the action parameter.
LOGOUT_ACTION = 'logout'
LOGIN_ACTION = 'login'
# Name of the cookie that stores the user info.
_COOKIE_NAME = 'dev_appserver_login'
# Indicates that the user has admin access to all applications.
CLOUD_ADMIN_MARKER = 'CLOUD_ADMIN'
# The port that the AppDashboard serves HTTPS traffic on.
DASHBOARD_HTTPS_PORT = "1443"
def get_user_info(http_cookie, cookie_name=_COOKIE_NAME):
"""Gets the requestor's user info from an HTTP Cookie header.
Args:
http_cookie: The value of the 'Cookie' HTTP request header.
cookie_name: The name of the cookie that stores the user info.
Returns:
A tuple (email, admin, user_id) where:
email: The user's email address, if any.
admin: True if the user is an admin; False otherwise.
user_id: The user ID, if any.
"""
try:
cookie = Cookie.SimpleCookie(http_cookie)
except Cookie.CookieError:
return '', False, ''
cookie_dict = dict((k, v.value) for k, v in cookie.iteritems())
return _get_user_info_from_dict(cookie_dict, cookie_name)
def _get_user_info_from_dict(cookie_dict, cookie_name=_COOKIE_NAME):
"""Gets the requestor's user info from a cookie dictionary.
Args:
cookie_dict: A dictionary mapping cookie names onto values.
cookie_name: The name of the cookie that stores the user info.
Returns:
A tuple (email, admin, user_id) where:
email: The user's email address, if any.
admin: True if the user is an admin; False otherwise.
user_id: The user ID, if any.
"""
cookie_secret = os.environ['COOKIE_SECRET']
cookie_value = cookie_dict.get(cookie_name, '')
cookie_value = cookie_value.replace("%3A",":")
cookie_value = cookie_value.replace("%40",'@')
cookie_value = cookie_value.replace("%2C",",")
email, nickname, admin, hsh = (cookie_value.split(':') + ['', '', '', ''])[:4]
if email == '':
nickname = ''
admin = ''
return '', False, ''
else:
vhsh = sha.new(email+nickname+admin+cookie_secret).hexdigest()
if hsh != vhsh:
logging.info("{0} has an invalid cookie, so ignoring it.".format(email))
return '', False, ''
admin_apps = admin.split(',')
current_app = os.environ['APPLICATION_ID']
is_admin = current_app in admin_apps or CLOUD_ADMIN_MARKER in admin_apps
return email, is_admin, nickname
def _create_cookie_data(email, admin):
"""Creates cookie payload data.
Args:
email: The user's email address.
admin: True if the user is an admin; False otherwise.
Returns:
A string containing the cookie payload.
"""
if email:
user_id_digest = hashlib.md5(email.lower()).digest()
user_id = '1' + ''.join(['%02d' % ord(x) for x in user_id_digest])[:20]
else:
user_id = ''
return '%s:%s:%s' % (email, admin, user_id)
def _set_user_info_cookie(email, admin, cookie_name=_COOKIE_NAME):
"""Creates a cookie to set the user information for the requestor.
Args:
email: The email to set for the user.
admin: True if the user should be admin; False otherwise.
cookie_name: The name of the cookie that stores the user info.
Returns:
Set-Cookie value for setting the user info of the requestor.
"""
cookie_value = _create_cookie_data(email, admin)
cookie = Cookie.SimpleCookie()
cookie[cookie_name] = cookie_value
cookie[cookie_name]['path'] = '/'
return cookie[cookie_name].OutputString()
def _clear_user_info_cookie(cookie_name=_COOKIE_NAME):
"""Clears the user info cookie from the requestor, logging them out.
Args:
cookie_name: The name of the cookie that stores the user info.
Returns:
A Set-Cookie value for clearing the user info of the requestor.
"""
cookie = Cookie.SimpleCookie()
cookie[cookie_name] = ''
cookie[cookie_name]['path'] = '/'
cookie[cookie_name]['max-age'] = '0'
if AppDashboardHelper.USE_SHIBBOLETH:
cookie[cookie_name]['domain'] = AppDashboardHelper.\
SHIBBOLETH_COOKIE_DOMAIN
return cookie[cookie_name].OutputString()
_LOGIN_TEMPLATE = """<html>
<head>
<title>Login</title>
</head>
<body>
<form method="get" action="%(login_url)s"
style="text-align:center; font: 13px sans-serif">
<div style="width: 20em; margin: 1em auto;
text-align:left;
padding: 0 2em 1.25em 2em;
background-color: #d6e9f8;
border: 2px solid #67a7e3">
<h3>%(login_message)s</h3>
<p style="padding: 0; margin: 0">
<label for="email" style="width: 3em">Email:</label>
<input name="email" type="email" value="%(email)s" id="email"/>
</p>
<p style="margin: .5em 0 0 3em; font-size:12px">
<input name="admin" type="checkbox" value="True"
%(admin_checked)s id="admin"/>
<label for="admin">Sign in as Administrator</label>
</p>
<p style="margin-left: 3em">
<input name="action" value="Login" type="submit"
id="submit-login" />
<input name="action" value="Logout" type="submit"
id="submit-logout" />
</p>
</div>
<input name="continue" type="hidden" value="%(continue_url)s"/>
</form>
</body>
</html>
"""
def _render_login_template(login_url, continue_url, email, admin):
"""Renders the login page.
Args:
login_url: The parameter to _login_response.
continue_url: The parameter to _login_response.
email: The email address of the current user, if any.
admin: True if the user is currently an admin; False otherwise.
Returns:
A string containing the contents of the login page.
"""
if email:
login_message = 'Logged in'
else:
login_message = 'Not logged in'
email = 'test\x40example.com'
admin_checked = 'checked' if admin else ''
template_dict = {
'email': cgi.escape(email, quote=True),
'admin_checked': admin_checked,
'login_message': login_message,
'login_url': cgi.escape(login_url, quote=True),
'continue_url': cgi.escape(continue_url, quote=True),
}
return _LOGIN_TEMPLATE % template_dict
def login_redirect(application_url, continue_url, start_response):
"""Writes a login redirection URL to a user.
This redirects to login_url with a continue parameter to return to
continue_url. The login_url should be on the canonical front-end server,
regardless of the host:port the user connected to.
Args:
application_url: The URL of the dev appserver domain
(e.g., 'http://localhost:8080').
continue_url: The URL to continue to after the user logs in.
start_response: A WSGI start_response function.
Returns:
An (empty) iterable over strings containing the body of the HTTP response.
"""
if AppDashboardHelper.USE_SHIBBOLETH:
redirect_url = '{0}:{1}/login?{2}={3}'.format(
AppDashboardHelper.SHIBBOLETH_CONNECTOR,
AppDashboardHelper.SHIBBOLETH_CONNECTOR_PORT,
CONTINUE_PARAM,
urllib.quote(continue_url)
)
else:
hostname = os.environ['NGINX_HOST']
redirect_url = 'https://{0}:{1}/login?{2}={3}'.format(
hostname,
DASHBOARD_HTTPS_PORT,
CONTINUE_PARAM,
urllib.quote(continue_url))
start_response('302 Requires login',
[('Location', redirect_url)])
return []
def fake_admin():
""" Generate the fake admin login secret
Returns:
A string containing the fake login secret
"""
return hashlib.sha1('{}/{}'.format(
os.environ.get('APPNAME', str(uuid.uuid4())),
os.environ.get('COOKIE_SECRET', str(uuid.uuid4())))).hexdigest()
class Handler(webapp2.RequestHandler):
"""The request handler for the login and logout pages."""
def get(self):
action = self.request.get(ACTION_PARAM)
set_email = self.request.get(_EMAIL_PARAM)
set_admin = self.request.get(_ADMIN_PARAM).lower() == 'true'
continue_url = self.request.get(CONTINUE_PARAM)
login_url = self.request.path_url
if action:
redirect_url = continue_url or login_url
# Perform the action.
if action.lower() == LOGOUT_ACTION.lower():
self.response.headers['Set-Cookie'] = _clear_user_info_cookie()
if AppDashboardHelper.USE_SHIBBOLETH:
redirect_url = AppDashboardHelper.SHIBBOLETH_LOGOUT_URL
elif action.lower() == LOGIN_ACTION.lower() and set_email:
self.response.headers['Set-Cookie'] = _set_user_info_cookie(set_email,
set_admin)
# URLs should be ASCII-only byte strings.
if isinstance(redirect_url, unicode):
redirect_url = redirect_url.encode('ascii')
# Redirect the user after performing the action.
self.response.status = 302
self.response.status_message = 'Redirecting to continue URL'
self.response.headers['Location'] = redirect_url
else:
# Send the user to the AppDashboard to log in before letting them view the
# specified URL.
if AppDashboardHelper.USE_SHIBBOLETH:
appscale_login_url = "{0}:{1}/login".format(
AppDashboardHelper.SHIBBOLETH_CONNECTOR, DASHBOARD_HTTPS_PORT)
else:
appscale_login_url = "https://{0}:{1}/login".format(
os.environ['NGINX_HOST'], DASHBOARD_HTTPS_PORT)
redirect_url = '{0}?{1}={2}'.format(appscale_login_url, CONTINUE_PARAM,
continue_url)
self.response.status = 302
self.response.status_message = 'Redirecting to login service URL'
self.response.headers['Location'] = redirect_url
application = webapp2.WSGIApplication([('/.*', Handler)], debug=True)
| 31.885387
| 80
| 0.689162
| 1,531
| 11,128
| 4.851731
| 0.224037
| 0.02356
| 0.02154
| 0.010097
| 0.244615
| 0.192919
| 0.16559
| 0.124125
| 0.120759
| 0.096661
| 0
| 0.010325
| 0.199317
| 11,128
| 348
| 81
| 31.977011
| 0.823345
| 0.360532
| 0
| 0.169492
| 0
| 0
| 0.245624
| 0.025025
| 0.00565
| 0
| 0
| 0
| 0
| 1
| 0.050847
| false
| 0
| 0.062147
| 0
| 0.180791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cf8d4321937161cb10d000e0dbd87e721b04ad3
| 7,060
|
py
|
Python
|
sdks/python/apache_beam/runners/portability/job_server.py
|
noah-goodrich/beam
|
5a851b734f53206c20efe08d93d15760bbc15b0c
|
[
"Apache-2.0"
] | 1
|
2019-12-05T04:36:46.000Z
|
2019-12-05T04:36:46.000Z
|
sdks/python/apache_beam/runners/portability/job_server.py
|
noah-goodrich/beam
|
5a851b734f53206c20efe08d93d15760bbc15b0c
|
[
"Apache-2.0"
] | 14
|
2020-02-12T22:20:41.000Z
|
2021-11-09T19:41:23.000Z
|
sdks/python/apache_beam/runners/portability/job_server.py
|
violalyu/beam
|
dd605e568d70b1a6ebea60c15b2aec3e240f3914
|
[
"Apache-2.0"
] | 1
|
2021-03-21T23:28:23.000Z
|
2021-03-21T23:28:23.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import atexit
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import grpc
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.runners.portability import local_job_service
from apache_beam.utils import subprocess_server
from apache_beam.version import __version__ as beam_version
class JobServer(object):
def start(self):
"""Starts this JobServer, returning a grpc service to which to submit jobs.
"""
raise NotImplementedError(type(self))
def stop(self):
"""Stops this job server."""
raise NotImplementedError(type(self))
class ExternalJobServer(JobServer):
def __init__(self, endpoint, timeout=None):
self._endpoint = endpoint
self._timeout = timeout
def start(self):
channel = grpc.insecure_channel(self._endpoint)
grpc.channel_ready_future(channel).result(timeout=self._timeout)
return beam_job_api_pb2_grpc.JobServiceStub(channel)
def stop(self):
pass
class EmbeddedJobServer(JobServer):
def start(self):
return local_job_service.LocalJobServicer()
def stop(self):
pass
class StopOnExitJobServer(JobServer):
"""Wraps a JobServer such that its stop will automatically be called on exit.
"""
def __init__(self, job_server):
self._lock = threading.Lock()
self._job_server = job_server
self._started = False
def start(self):
with self._lock:
if not self._started:
self._endpoint = self._job_server.start()
self._started = True
atexit.register(self.stop)
signal.signal(signal.SIGINT, self.stop)
return self._endpoint
def stop(self):
with self._lock:
if self._started:
self._job_server.stop()
self._started = False
class SubprocessJobServer(JobServer):
"""An abstract base class for JobServers run as an external process."""
def __init__(self):
self._local_temp_root = None
self._server = None
def subprocess_cmd_and_endpoint(self):
raise NotImplementedError(type(self))
def start(self):
if self._server is None:
self._local_temp_root = tempfile.mkdtemp(prefix='beam-temp')
cmd, endpoint = self.subprocess_cmd_and_endpoint()
port = int(endpoint.split(':')[-1])
self._server = subprocess_server.SubprocessServer(
beam_job_api_pb2_grpc.JobServiceStub, cmd, port=port)
return self._server.start()
def stop(self):
if self._local_temp_root:
shutil.rmtree(self._local_temp_root)
self._local_temp_root = None
return self._server.stop()
def local_temp_dir(self, **kwargs):
return tempfile.mkdtemp(dir=self._local_temp_root, **kwargs)
class JavaJarJobServer(SubprocessJobServer):
MAVEN_REPOSITORY = 'https://repo.maven.apache.org/maven2/org/apache/beam'
JAR_CACHE = os.path.expanduser("~/.apache_beam/cache")
def java_arguments(self, job_port, artifacts_dir):
raise NotImplementedError(type(self))
def path_to_jar(self):
raise NotImplementedError(type(self))
@staticmethod
def path_to_beam_jar(gradle_target):
return subprocess_server.JavaJarServer.path_to_beam_jar(gradle_target)
@staticmethod
def local_jar(url):
return subprocess_server.JavaJarServer.local_jar(url)
def subprocess_cmd_and_endpoint(self):
jar_path = self.local_jar(self.path_to_jar())
artifacts_dir = self.local_temp_dir(prefix='artifacts')
job_port, = subprocess_server.pick_port(None)
return (
['java', '-jar', jar_path] + list(
self.java_arguments(job_port, artifacts_dir)),
'localhost:%s' % job_port)
class DockerizedJobServer(SubprocessJobServer):
"""
Spins up the JobServer in a docker container for local execution.
"""
def __init__(self, job_host="localhost",
job_port=None,
artifact_port=None,
expansion_port=None,
harness_port_range=(8100, 8200),
max_connection_retries=5):
super(DockerizedJobServer, self).__init__()
self.job_host = job_host
self.job_port = job_port
self.expansion_port = expansion_port
self.artifact_port = artifact_port
self.harness_port_range = harness_port_range
self.max_connection_retries = max_connection_retries
def subprocess_cmd_and_endpoint(self):
# TODO This is hardcoded to Flink at the moment but should be changed
job_server_image_name = os.environ['USER'] + \
"-docker-apache.bintray.io/beam/flink-job-server:latest"
docker_path = subprocess.check_output(
['which', 'docker']).strip().decode('utf-8')
cmd = ["docker", "run",
# We mount the docker binary and socket to be able to spin up
# "sibling" containers for the SDK harness.
"-v", ':'.join([docker_path, "/bin/docker"]),
"-v", "/var/run/docker.sock:/var/run/docker.sock"]
self.job_port, self.artifact_port, self.expansion_port = (
subprocess_server.pick_port(
self.job_port, self.artifact_port, self.expansion_port))
args = ['--job-host', self.job_host,
'--job-port', str(self.job_port),
'--artifact-port', str(self.artifact_port),
'--expansion-port', str(self.expansion_port)]
if sys.platform == "darwin":
# Docker-for-Mac doesn't support host networking, so we need to explictly
# publish ports from the Docker container to be able to connect to it.
# Also, all other containers need to be aware that they run Docker-on-Mac
# to connect against the internal Docker-for-Mac address.
cmd += ["-e", "DOCKER_MAC_CONTAINER=1"]
cmd += ["-p", "{}:{}".format(self.job_port, self.job_port)]
cmd += ["-p", "{}:{}".format(self.artifact_port, self.artifact_port)]
cmd += ["-p", "{}:{}".format(self.expansion_port, self.expansion_port)]
cmd += ["-p", "{0}-{1}:{0}-{1}".format(
self.harness_port_range[0], self.harness_port_range[1])]
else:
# This shouldn't be set for MacOS because it detroys port forwardings,
# even though host networking is not supported on MacOS.
cmd.append("--network=host")
cmd.append(job_server_image_name)
return cmd + args, '%s:%s' % (self.job_host, self.job_port)
| 33.619048
| 79
| 0.703258
| 944
| 7,060
| 5.039195
| 0.291314
| 0.023544
| 0.018499
| 0.021442
| 0.14568
| 0.061593
| 0.018499
| 0.018499
| 0.018499
| 0
| 0
| 0.004555
| 0.191501
| 7,060
| 209
| 80
| 33.779904
| 0.828837
| 0.230312
| 0
| 0.208955
| 0
| 0
| 0.074005
| 0.021755
| 0
| 0
| 0
| 0.004785
| 0
| 1
| 0.164179
| false
| 0.014925
| 0.104478
| 0.029851
| 0.410448
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cf950bf294a91d7beefe8c59885eaed2c328e0e
| 14,856
|
py
|
Python
|
sympy/printing/pycode.py
|
tachycline/sympy
|
abf6fec12012852c7e6fae38461da9723cadc8b9
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/printing/pycode.py
|
tachycline/sympy
|
abf6fec12012852c7e6fae38461da9723cadc8b9
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/printing/pycode.py
|
tachycline/sympy
|
abf6fec12012852c7e6fae38461da9723cadc8b9
|
[
"BSD-3-Clause"
] | null | null | null |
from collections import defaultdict
from functools import wraps
from itertools import chain
from sympy.core import sympify
from .precedence import precedence
from .codeprinter import CodePrinter
_kw_py2and3 = {
'and', 'as', 'assert', 'break', 'class', 'continue', 'def', 'del', 'elif',
'else', 'except', 'finally', 'for', 'from', 'global', 'if', 'import', 'in',
'is', 'lambda', 'not', 'or', 'pass', 'raise', 'return', 'try', 'while',
'with', 'yield', 'None' # 'None' is actually not in Python 2's keyword.kwlist
}
_kw_only_py2 = {'exec', 'print'}
_kw_only_py3 = {'False', 'nonlocal', 'True'}
_known_functions = {
'Abs': 'abs',
}
_known_functions_math = {
'acos': 'acos',
'acosh': 'acosh',
'asin': 'asin',
'asinh': 'asinh',
'atan': 'atan',
'atan2': 'atan2',
'atanh': 'atanh',
'ceiling': 'ceil',
'cos': 'cos',
'cosh': 'cosh',
'erf': 'erf',
'erfc': 'erfc',
'exp': 'exp',
'expm1': 'expm1',
'factorial': 'factorial',
'floor': 'floor',
'gamma': 'gamma',
'hypot': 'hypot',
'loggamma': 'lgamma',
'log': 'log',
'log10': 'log10',
'log1p': 'log1p',
'log2': 'log2',
'sin': 'sin',
'sinh': 'sinh',
'Sqrt': 'sqrt',
'tan': 'tan',
'tanh': 'tanh'
} # Not used from ``math``: [copysign isclose isfinite isinf isnan ldexp frexp pow modf
# radians trunc fmod fsum gcd degrees fabs]
_known_constants_math = {
'Exp1': 'e',
'Pi': 'pi',
# Only in python >= 3.5:
# 'Infinity': 'inf',
# 'NaN': 'nan'
}
def _print_known_func(self, expr):
known = self.known_functions[expr.__class__.__name__]
return '{name}({args})'.format(name=self._module_format(known),
args=', '.join(map(self._print, expr.args)))
def _print_known_const(self, expr):
known = self.known_constants[expr.__class__.__name__]
return self._module_format(known)
class PythonCodePrinter(CodePrinter):
printmethod = "_pythoncode"
language = "Python"
standard = "python3"
reserved_words = _kw_py2and3.union(_kw_only_py3)
modules = None # initialized to a set in __init__
tab = ' '
_kf = dict(chain(
_known_functions.items(),
[(k, 'math.' + v) for k, v in _known_functions_math.items()]
))
_kc = {k: 'math.'+v for k, v in _known_constants_math.items()}
_operators = {'and': 'and', 'or': 'or', 'not': 'not'}
_default_settings = dict(
CodePrinter._default_settings,
user_functions={},
precision=17,
inline=True,
fully_qualified_modules=True
)
def __init__(self, settings=None):
super(PythonCodePrinter, self).__init__(settings)
self.module_imports = defaultdict(set)
self.known_functions = dict(self._kf, **(settings or {}).get(
'user_functions', {}))
self.known_constants = dict(self._kc, **(settings or {}).get(
'user_constants', {}))
def _declare_number_const(self, name, value):
return "%s = %s" % (name, value)
def _module_format(self, fqn, register=True):
parts = fqn.split('.')
if register and len(parts) > 1:
self.module_imports['.'.join(parts[:-1])].add(parts[-1])
if self._settings['fully_qualified_modules']:
return fqn
else:
return fqn.split('(')[0].split('[')[0].split('.')[-1]
def _format_code(self, lines):
return lines
def _get_comment(self, text):
return " # {0}".format(text)
def _print_NaN(self, expr):
return "float('nan')"
def _print_Infinity(self, expr):
return "float('inf')"
def _print_Mod(self, expr):
PREC = precedence(expr)
return ('{0} % {1}'.format(*map(lambda x: self.parenthesize(x, PREC), expr.args)))
def _print_Piecewise(self, expr):
result = []
i = 0
for arg in expr.args:
e = arg.expr
c = arg.cond
result.append('((')
result.append(self._print(e))
result.append(') if (')
result.append(self._print(c))
result.append(') else (')
i += 1
result = result[:-1]
result.append(') else None)')
result.append(')'*(2*i - 2))
return ''.join(result)
def _print_ITE(self, expr):
from sympy.functions.elementary.piecewise import Piecewise
return self._print(expr.rewrite(Piecewise))
def _print_Sum(self, expr):
loops = (
'for {i} in range({a}, {b}+1)'.format(
i=self._print(i),
a=self._print(a),
b=self._print(b))
for i, a, b in expr.limits)
return '(builtins.sum({function} {loops}))'.format(
function=self._print(expr.function),
loops=' '.join(loops))
def _print_ImaginaryUnit(self, expr):
return '1j'
def _print_MatrixBase(self, expr):
name = expr.__class__.__name__
func = self.known_functions.get(name, name)
return "%s(%s)" % (func, self._print(expr.tolist()))
_print_SparseMatrix = \
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
lambda self, expr: self._print_MatrixBase(expr)
for k in PythonCodePrinter._kf:
setattr(PythonCodePrinter, '_print_%s' % k, _print_known_func)
for k in _known_constants_math:
setattr(PythonCodePrinter, '_print_%s' % k, _print_known_const)
def pycode(expr, **settings):
return PythonCodePrinter(settings).doprint(expr)
_not_in_mpmath = 'log1p log2'.split()
_in_mpmath = [(k, v) for k, v in _known_functions_math.items() if k not in _not_in_mpmath]
_known_functions_mpmath = dict(_in_mpmath)
_known_constants_mpmath = {
'Pi': 'pi'
}
class MpmathPrinter(PythonCodePrinter):
"""
Lambda printer for mpmath which maintains precision for floats
"""
printmethod = "_mpmathcode"
_kf = dict(chain(
_known_functions.items(),
[(k, 'mpmath.' + v) for k, v in _known_functions_mpmath.items()]
))
def _print_Integer(self, e):
return '%s(%d)' % (self._module_format('mpmath.mpf'), e)
def _print_Float(self, e):
# XXX: This does not handle setting mpmath.mp.dps. It is assumed that
# the caller of the lambdified function will have set it to sufficient
# precision to match the Floats in the expression.
# Remove 'mpz' if gmpy is installed.
args = str(tuple(map(int, e._mpf_)))
return '{func}({args})'.format(func=self._module_format('mpmath.mpf'), args=args)
def _print_uppergamma(self,e): #printer for the uppergamma function
return "{0}({1}, {2}, {3})".format(
self._module_format('mpmath.gammainc'), self._print(e.args[0]), self._print(e.args[1]),
self._module_format('mpmath.inf'))
def _print_lowergamma(self,e): #printer for the lowergamma functioin
return "{0}({1}, 0, {2})".format(
self._module_format('mpmath.gammainc'), self._print(e.args[0]), self._print(e.args[1]))
def _print_log2(self, e):
return '{0}({1})/{0}(2)'.format(
self._module_format('mpmath.log'), self._print(e.args[0]))
def _print_log1p(self, e):
return '{0}({1}+1)'.format(
self._module_format('mpmath.log'), self._print(e.args[0]))
for k in MpmathPrinter._kf:
setattr(MpmathPrinter, '_print_%s' % k, _print_known_func)
for k in _known_constants_mpmath:
setattr(MpmathPrinter, '_print_%s' % k, _print_known_const)
_not_in_numpy = 'erf erfc factorial gamma lgamma'.split()
_in_numpy = [(k, v) for k, v in _known_functions_math.items() if k not in _not_in_numpy]
_known_functions_numpy = dict(_in_numpy, **{
'acos': 'arccos',
'acosh': 'arccosh',
'asin': 'arcsin',
'asinh': 'arcsinh',
'atan': 'arctan',
'atan2': 'arctan2',
'atanh': 'arctanh',
'exp2': 'exp2',
})
class NumPyPrinter(PythonCodePrinter):
"""
Numpy printer which handles vectorized piecewise functions,
logical operators, etc.
"""
printmethod = "_numpycode"
_kf = dict(chain(
PythonCodePrinter._kf.items(),
[(k, 'numpy.' + v) for k, v in _known_functions_numpy.items()]
))
_kc = {k: 'numpy.'+v for k, v in _known_constants_math.items()}
def _print_seq(self, seq, delimiter=', '):
"General sequence printer: converts to tuple"
# Print tuples here instead of lists because numba supports
# tuples in nopython mode.
return '({},)'.format(delimiter.join(self._print(item) for item in seq))
def _print_MatMul(self, expr):
"Matrix multiplication printer"
return '({0})'.format(').dot('.join(self._print(i) for i in expr.args))
def _print_DotProduct(self, expr):
# DotProduct allows any shape order, but numpy.dot does matrix
# multiplication, so we have to make sure it gets 1 x n by n x 1.
arg1, arg2 = expr.args
if arg1.shape[0] != 1:
arg1 = arg1.T
if arg2.shape[1] != 1:
arg2 = arg2.T
return "%s(%s, %s)" % (self._module_format('numpy.dot'), self._print(arg1), self._print(arg2))
def _print_Piecewise(self, expr):
"Piecewise function printer"
exprs = '[{0}]'.format(','.join(self._print(arg.expr) for arg in expr.args))
conds = '[{0}]'.format(','.join(self._print(arg.cond) for arg in expr.args))
# If [default_value, True] is a (expr, cond) sequence in a Piecewise object
# it will behave the same as passing the 'default' kwarg to select()
# *as long as* it is the last element in expr.args.
# If this is not the case, it may be triggered prematurely.
return '{0}({1}, {2}, default=numpy.nan)'.format(self._module_format('numpy.select'), conds, exprs)
def _print_Relational(self, expr):
"Relational printer for Equality and Unequality"
op = {
'==' :'equal',
'!=' :'not_equal',
'<' :'less',
'<=' :'less_equal',
'>' :'greater',
'>=' :'greater_equal',
}
if expr.rel_op in op:
lhs = self._print(expr.lhs)
rhs = self._print(expr.rhs)
return '{op}({lhs}, {rhs})'.format(op=self._module_format('numpy.'+op[expr.rel_op]),
lhs=lhs, rhs=rhs)
return super(NumPyPrinter, self)._print_Relational(expr)
def _print_And(self, expr):
"Logical And printer"
# We have to override LambdaPrinter because it uses Python 'and' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_and' to NUMPY_TRANSLATIONS.
return '{0}({1})'.format(self._module_format('numpy.logical_and'), ','.join(self._print(i) for i in expr.args))
def _print_Or(self, expr):
"Logical Or printer"
# We have to override LambdaPrinter because it uses Python 'or' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_or' to NUMPY_TRANSLATIONS.
return '{0}({1})'.format(self._module_format('numpy.logical_or'), ','.join(self._print(i) for i in expr.args))
def _print_Not(self, expr):
"Logical Not printer"
# We have to override LambdaPrinter because it uses Python 'not' keyword.
# If LambdaPrinter didn't define it, we would still have to define our
# own because StrPrinter doesn't define it.
return '{0}({1})'.format(self._module_format('numpy.logical_not'), ','.join(self._print(i) for i in expr.args))
def _print_Min(self, expr):
return '{0}(({1}))'.format(self._module_format('numpy.amin'), ','.join(self._print(i) for i in expr.args))
def _print_Max(self, expr):
return '{0}(({1}))'.format(self._module_format('numpy.amax'), ','.join(self._print(i) for i in expr.args))
def _print_Pow(self, expr):
if expr.exp == 0.5:
return '{0}({1})'.format(self._module_format('numpy.sqrt'), self._print(expr.base))
else:
return super(NumPyPrinter, self)._print_Pow(expr)
def _print_arg(self, expr):
return "%s(%s)" % (self._module_format('numpy.angle'), self._print(expr.args[0]))
def _print_im(self, expr):
return "%s(%s)" % (self._module_format('numpy.imag', self._print(expr.args[0])))
def _print_Mod(self, expr):
return "%s(%s)" % (self._module_format('numpy.mod'), ', '.join(map(self._print, expr.args)))
def _print_re(self, expr):
return "%s(%s)" % (self._module_format('numpy.real'), self._print(expr.args[0]))
def _print_MatrixBase(self, expr):
func = self.known_functions.get(expr.__class__.__name__, None)
if func is None:
func = self._module_format('numpy.array')
return "%s(%s)" % (func, self._print(expr.tolist()))
for k in NumPyPrinter._kf:
setattr(NumPyPrinter, '_print_%s' % k, _print_known_func)
for k in NumPyPrinter._kc:
setattr(NumPyPrinter, '_print_%s' % k, _print_known_const)
_known_functions_scipy_special = {
'erf': 'erf',
'erfc': 'erfc',
'gamma': 'gamma',
'loggamma': 'gammaln'
}
_known_constants_scipy_constants = {
'GoldenRatio': 'golden_ratio'
}
class SciPyPrinter(NumPyPrinter):
_kf = dict(chain(
NumPyPrinter._kf.items(),
[(k, 'scipy.special.' + v) for k, v in _known_functions_scipy_special.items()]
))
_kc = {k: 'scipy.constants.' + v for k, v in _known_constants_scipy_constants.items()}
def _print_SparseMatrix(self, expr):
i, j, data = [], [], []
for (r, c), v in expr._smat.items():
i.append(r)
j.append(c)
data.append(v)
return "{name}({data}, ({i}, {j}), shape={shape})".format(
name=self._module_format('scipy.sparse.coo_matrix'),
data=data, i=i, j=j, shape=expr.shape
)
_print_ImmutableSparseMatrix = _print_SparseMatrix
for k in SciPyPrinter._kf:
setattr(SciPyPrinter, '_print_%s' % k, _print_known_func)
for k in SciPyPrinter._kc:
setattr(SciPyPrinter, '_print_%s' % k, _print_known_const)
class SymPyPrinter(PythonCodePrinter):
_kf = dict([(k, 'sympy.' + v) for k, v in chain(
_known_functions.items(),
_known_functions_math.items()
)])
def _print_Function(self, expr):
mod = expr.func.__module__ or ''
return '%s(%s)' % (self._module_format(mod + ('.' if mod else '') + expr.func.__name__),
', '.join(map(self._print, expr.args)))
| 34.388889
| 119
| 0.601642
| 1,879
| 14,856
| 4.519957
| 0.195849
| 0.040268
| 0.047098
| 0.034617
| 0.33769
| 0.27505
| 0.258919
| 0.206405
| 0.178971
| 0.139291
| 0
| 0.009795
| 0.244076
| 14,856
| 431
| 120
| 34.468677
| 0.746483
| 0.132606
| 0
| 0.091483
| 0
| 0
| 0.142835
| 0.005373
| 0
| 0
| 0
| 0
| 0.003155
| 1
| 0.126183
| false
| 0.003155
| 0.031546
| 0.056782
| 0.391167
| 0.312303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cfb56c23b97ce934940b9509f58841e0ebbb0fe
| 3,493
|
py
|
Python
|
model_building/svr_experiment_configuration.py
|
eubr-atmosphere/a-MLLibrary
|
b6ba472baacea6d793ab4f03275cdfa874e83bc3
|
[
"Apache-2.0"
] | 3
|
2021-09-19T17:06:31.000Z
|
2021-12-10T23:21:21.000Z
|
model_building/svr_experiment_configuration.py
|
eubr-atmosphere/a-MLLibrary
|
b6ba472baacea6d793ab4f03275cdfa874e83bc3
|
[
"Apache-2.0"
] | null | null | null |
model_building/svr_experiment_configuration.py
|
eubr-atmosphere/a-MLLibrary
|
b6ba472baacea6d793ab4f03275cdfa874e83bc3
|
[
"Apache-2.0"
] | 1
|
2021-09-27T13:54:12.000Z
|
2021-09-27T13:54:12.000Z
|
"""
Copyright 2019 Marco Lattuada
Copyright 2019 Danilo Ardagna
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sklearn.svm as svm
import model_building.experiment_configuration as ec
class SVRExperimentConfiguration(ec.ExperimentConfiguration):
"""
Class representing a single experiment configuration for linear regression
Attributes
----------
_linear_regression : LinearRegression
The actual scikt object which performs the linear regression
Methods
-------
_train()
Performs the actual building of the linear model
compute_estimations()
Compute the estimated values for a give set of data
"""
def __init__(self, campaign_configuration, hyperparameters, regression_inputs, prefix):
"""
campaign_configuration: dict of dict:
The set of options specified by the user though command line and campaign configuration files
hyperparameters: dictionary
The set of hyperparameters of this experiment configuration
regression_inputs: RegressionInputs
The input of the regression problem to be solved
"""
super().__init__(campaign_configuration, hyperparameters, regression_inputs, prefix)
self.technique = ec.Technique.SVR
self._regressor = svm.SVR(C=self._hyperparameters['C'], epsilon=self._hyperparameters['epsilon'],
gamma=self._hyperparameters['gamma'], kernel=self._hyperparameters['kernel'],
degree=self._hyperparameters['degree'])
def _compute_signature(self, prefix):
"""
Compute the signature associated with this experiment configuration
"""
signature = prefix.copy()
signature.append("C_" + str(self._hyperparameters['C']))
signature.append("epsilon_" + str(self._hyperparameters['epsilon']))
signature.append("gamma_" + str(self._hyperparameters['gamma']))
signature.append("kernel_" + str(self._hyperparameters['kernel']))
signature.append("degree_" + str(self._hyperparameters['degree']))
return signature
def _train(self):
"""
Build the model with the experiment configuration represented by this object
"""
self._logger.debug("Building model for %s", self._signature)
assert self._regression_inputs
xdata, ydata = self._regression_inputs.get_xy_data(self._regression_inputs.inputs_split["training"])
self._regressor.fit(xdata, ydata)
self._logger.debug("Model built")
# for idx, col_name in enumerate(self._regression_inputs.x_columns):
# self._logger.debug("The coefficient for %s is %f", col_name, self._linear_regression.coef_[idx])
def compute_estimations(self, rows):
"""
Compute the estimations and the MAPE for runs in rows
"""
xdata, _ = self._regression_inputs.get_xy_data(rows)
return self._regressor.predict(xdata)
| 38.811111
| 111
| 0.692242
| 401
| 3,493
| 5.860349
| 0.394015
| 0.080851
| 0.046809
| 0.013617
| 0.074043
| 0.074043
| 0
| 0
| 0
| 0
| 0
| 0.004423
| 0.223304
| 3,493
| 89
| 112
| 39.247191
| 0.861777
| 0.466648
| 0
| 0
| 0
| 0
| 0.071986
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 1
| 0.153846
| false
| 0
| 0.076923
| 0
| 0.346154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cfd01e468c618706379749c3f05781c60e2fe7b
| 1,883
|
py
|
Python
|
papermill/tests/test_adl.py
|
dmartinpro/papermill
|
fbb0a60c97cde70e3b278f778cbd366cf54f83f0
|
[
"BSD-3-Clause"
] | null | null | null |
papermill/tests/test_adl.py
|
dmartinpro/papermill
|
fbb0a60c97cde70e3b278f778cbd366cf54f83f0
|
[
"BSD-3-Clause"
] | null | null | null |
papermill/tests/test_adl.py
|
dmartinpro/papermill
|
fbb0a60c97cde70e3b278f778cbd366cf54f83f0
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from ..adl import ADL
import six
if six.PY3:
from unittest.mock import Mock, MagicMock
else:
from mock import Mock, MagicMock
class ADLTest(unittest.TestCase):
"""
Tests for `ADL`
"""
def setUp(self):
self.ls = Mock(return_value=["foo", "bar", "baz"])
self.fakeFile = MagicMock()
self.fakeFile.__iter__.return_value = [b"a", b"b", b"c"]
self.fakeFile.__enter__.return_value = self.fakeFile
self.open = Mock(return_value=self.fakeFile)
self.fakeAdapter = Mock(open=self.open, ls=self.ls)
self.adl = ADL()
self.adl._create_adapter = Mock(return_value=self.fakeAdapter)
def test_split_url_raises_exception_on_invalid_url(self):
with self.assertRaises(Exception) as context:
ADL._split_url("this_is_not_a_valid_url")
self.assertTrue("Invalid ADL url 'this_is_not_a_valid_url'" in str(context.exception))
def test_split_url_splits_valid_url(self):
(store_name, path) = ADL._split_url("adl://foo.azuredatalakestore.net/bar/baz")
self.assertEqual(store_name, "foo")
self.assertEqual(path, "bar/baz")
def test_listdir_calls_ls_on_adl_adapter(self):
self.assertEqual(
self.adl.listdir("adl://foo_store.azuredatalakestore.net/path/to/file"),
["foo", "bar", "baz"],
)
self.ls.assert_called_once_with("path/to/file")
def test_read_opens_and_reads_file(self):
self.assertEquals(
self.adl.read("adl://foo_store.azuredatalakestore.net/path/to/file"), ["a", "b", "c"]
)
self.fakeFile.__iter__.assert_called_once_with()
def test_write_opens_file_and_writes_to_it(self):
self.adl.write("hello world", "adl://foo_store.azuredatalakestore.net/path/to/file")
self.fakeFile.write.assert_called_once_with(b"hello world")
| 36.211538
| 97
| 0.670738
| 258
| 1,883
| 4.596899
| 0.294574
| 0.070826
| 0.033727
| 0.073356
| 0.187184
| 0.141653
| 0.141653
| 0.106239
| 0
| 0
| 0
| 0.000664
| 0.199681
| 1,883
| 51
| 98
| 36.921569
| 0.78633
| 0.007966
| 0
| 0
| 0
| 0
| 0.175486
| 0.13013
| 0
| 0
| 0
| 0
| 0.230769
| 1
| 0.153846
| false
| 0
| 0.128205
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cfea92f95c14fb4efaa051120fc4e6f1facdf01
| 2,858
|
py
|
Python
|
stanza/models/common/dropout.py
|
rasimuvaikas/stanza
|
21793519a531b0e9d7151e42d180d97785c9a5b8
|
[
"Apache-2.0"
] | 3,633
|
2016-01-21T17:29:13.000Z
|
2022-03-31T13:36:47.000Z
|
stanza/models/common/dropout.py
|
rasimuvaikas/stanza
|
21793519a531b0e9d7151e42d180d97785c9a5b8
|
[
"Apache-2.0"
] | 593
|
2016-01-19T07:16:05.000Z
|
2022-03-31T20:23:58.000Z
|
stanza/models/common/dropout.py
|
rasimuvaikas/stanza
|
21793519a531b0e9d7151e42d180d97785c9a5b8
|
[
"Apache-2.0"
] | 525
|
2016-01-20T03:22:19.000Z
|
2022-03-24T05:51:56.000Z
|
import torch
import torch.nn as nn
class WordDropout(nn.Module):
""" A word dropout layer that's designed for embedded inputs (e.g., any inputs to an LSTM layer).
Given a batch of embedded inputs, this layer randomly set some of them to be a replacement state.
Note that this layer assumes the last dimension of the input to be the hidden dimension of a unit.
"""
def __init__(self, dropprob):
super().__init__()
self.dropprob = dropprob
def forward(self, x, replacement=None):
if not self.training or self.dropprob == 0:
return x
masksize = [y for y in x.size()]
masksize[-1] = 1
dropmask = torch.rand(*masksize, device=x.device) < self.dropprob
res = x.masked_fill(dropmask, 0)
if replacement is not None:
res = res + dropmask.float() * replacement
return res
def extra_repr(self):
return 'p={}'.format(self.dropprob)
class LockedDropout(nn.Module):
"""
A variant of dropout layer that consistently drops out the same parameters over time. Also known as the variational dropout.
This implementation was modified from the LockedDropout implementation in the flair library (https://github.com/zalandoresearch/flair).
"""
def __init__(self, dropprob, batch_first=True):
super().__init__()
self.dropprob = dropprob
self.batch_first = batch_first
def forward(self, x):
if not self.training or self.dropprob == 0:
return x
if not self.batch_first:
m = x.new_empty(1, x.size(1), x.size(2), requires_grad=False).bernoulli_(1 - self.dropprob)
else:
m = x.new_empty(x.size(0), 1, x.size(2), requires_grad=False).bernoulli_(1 - self.dropprob)
mask = m.div(1 - self.dropprob).expand_as(x)
return mask * x
def extra_repr(self):
return 'p={}'.format(self.dropprob)
class SequenceUnitDropout(nn.Module):
""" A unit dropout layer that's designed for input of sequence units (e.g., word sequence, char sequence, etc.).
Given a sequence of unit indices, this layer randomly set some of them to be a replacement id (usually set to be <UNK>).
"""
def __init__(self, dropprob, replacement_id):
super().__init__()
self.dropprob = dropprob
self.replacement_id = replacement_id
def forward(self, x):
""" :param: x must be a LongTensor of unit indices. """
if not self.training or self.dropprob == 0:
return x
masksize = [y for y in x.size()]
dropmask = torch.rand(*masksize, device=x.device) < self.dropprob
res = x.masked_fill(dropmask, self.replacement_id)
return res
def extra_repr(self):
return 'p={}, replacement_id={}'.format(self.dropprob, self.replacement_id)
| 37.605263
| 139
| 0.642407
| 397
| 2,858
| 4.508816
| 0.304786
| 0.113966
| 0.053631
| 0.031844
| 0.429609
| 0.413408
| 0.345251
| 0.345251
| 0.322346
| 0.322346
| 0
| 0.007046
| 0.255073
| 2,858
| 75
| 140
| 38.106667
| 0.833725
| 0.290413
| 0
| 0.520833
| 0
| 0
| 0.015768
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0.041667
| 0.0625
| 0.479167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cff5cb6c2fef0ecc0f5ac6be8e4bd36f4fe013c
| 365
|
py
|
Python
|
Day01-15/code/Day15/pdf2.py
|
bdfd/Python_Zero2Hero_DS
|
9dafe90b8112fdc3d07e1aa02e41ed3f019f733c
|
[
"MIT"
] | 3
|
2022-01-15T19:06:19.000Z
|
2022-01-18T16:47:27.000Z
|
Day01-15/code/Day15/pdf2.py
|
bdfd/4.5_Data-Science-Python-Zero2Hero-
|
9dafe90b8112fdc3d07e1aa02e41ed3f019f733c
|
[
"MIT"
] | null | null | null |
Day01-15/code/Day15/pdf2.py
|
bdfd/4.5_Data-Science-Python-Zero2Hero-
|
9dafe90b8112fdc3d07e1aa02e41ed3f019f733c
|
[
"MIT"
] | 1
|
2022-01-09T00:18:49.000Z
|
2022-01-09T00:18:49.000Z
|
"""
读取PDF文件
Version: 0.1
Author: BDFD
Date: 2018-03-26
"""
from PyPDF2 import PdfFileReader
with open('./res/Python课程大纲.pdf', 'rb') as f:
reader = PdfFileReader(f, strict=False)
print(reader.numPages)
if reader.isEncrypted:
reader.decrypt('')
current_page = reader.getPage(5)
print(current_page)
print(current_page.extractText())
| 19.210526
| 45
| 0.682192
| 47
| 365
| 5.234043
| 0.744681
| 0.134146
| 0.130081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040134
| 0.180822
| 365
| 18
| 46
| 20.277778
| 0.782609
| 0.139726
| 0
| 0
| 0
| 0
| 0.071895
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7cff626ae151f2363fd9919cb12cd92f5b8974de
| 2,335
|
py
|
Python
|
qt__pyqt__pyside__pyqode/qt__class_tree__parse_and_print__recursively__from__doc_qt_io/gui.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | null | null | null |
qt__pyqt__pyside__pyqode/qt__class_tree__parse_and_print__recursively__from__doc_qt_io/gui.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | null | null | null |
qt__pyqt__pyside__pyqode/qt__class_tree__parse_and_print__recursively__from__doc_qt_io/gui.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5 import QtWidgets as qtw
from PyQt5.QtTest import QTest
import time
import requests
from bs4 import BeautifulSoup
from console import get_inherited_children, ROOT_URL
class MainWindow(qtw.QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('qt__class_tree__parse_and_print__recursively__from__doc_qt_io')
self.tree = qtw.QTreeWidget()
self.tree.setAlternatingRowColors(True)
self.tree.setHeaderLabel('NAME')
self.setCentralWidget(self.tree)
self.number_total_class = 0
def _fill_root(self, node: qtw.QTreeWidgetItem, url: str, global_number: int, indent_level=0):
if global_number > 0 and self.number_total_class >= global_number:
return
QTest.qWait(1000)
indent = ' ' * indent_level
rs = requests.get(url)
root = BeautifulSoup(rs.content, 'html.parser')
name_class = root.select_one('.context > .title').text.split()[0]
inherited_children = get_inherited_children(url, root)
number_inherited_children = len(inherited_children)
if number_inherited_children > 0:
name_class = '{} ({})'.format(name_class, number_inherited_children)
print(indent + name_class + ':')
else:
print(indent + name_class)
item = qtw.QTreeWidgetItem([name_class])
if not node:
self.tree.addTopLevelItem(item)
else:
node.addChild(item)
node.setExpanded(True)
self.number_total_class += 1
for name, url in inherited_children:
self._fill_root(item, url, global_number, indent_level + 1)
def fill_tree(self, global_number=-1):
self.number_total_class = 0
self.tree.clear()
t = time.clock()
self._fill_root(None, ROOT_URL, global_number)
qtw.QMessageBox.information(
self,
'Complete!',
'Items: {}.\nElapsed: {:.3f} sec'.format(self.number_total_class, time.clock() - t)
)
def closeEvent(self, e):
quit()
if __name__ == '__main__':
app = qtw.QApplication([])
w = MainWindow()
w.resize(500, 500)
w.show()
w.fill_tree()
app.exec()
| 25.107527
| 98
| 0.628266
| 274
| 2,335
| 5.047445
| 0.390511
| 0.098337
| 0.05423
| 0.072307
| 0.030369
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014518
| 0.262527
| 2,335
| 92
| 99
| 25.380435
| 0.788618
| 0.018415
| 0
| 0.067797
| 0
| 0
| 0.069432
| 0.026638
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067797
| false
| 0
| 0.101695
| 0
| 0.20339
| 0.050847
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b00e8ebc8e80cec62f2565854961c322350a073
| 4,676
|
py
|
Python
|
virt/ansible-latest/lib/python2.7/site-packages/ansible/plugins/lookup/template.py
|
lakhlaifi/RedHat-Ansible
|
27c5077cced9d416081fcd5d69ea44bca0317fa4
|
[
"Apache-2.0"
] | 1
|
2020-03-22T01:04:39.000Z
|
2020-03-22T01:04:39.000Z
|
ansible/ansible/plugins/lookup/template.py
|
SergeyCherepanov/ansible
|
875711cd2fd6b783c812241c2ed7a954bf6f670f
|
[
"MIT"
] | 7
|
2020-09-07T17:27:56.000Z
|
2022-03-02T06:25:46.000Z
|
ansible/ansible/plugins/lookup/template.py
|
SergeyCherepanov/ansible
|
875711cd2fd6b783c812241c2ed7a954bf6f670f
|
[
"MIT"
] | 1
|
2020-03-22T01:04:48.000Z
|
2020-03-22T01:04:48.000Z
|
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2012-17, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: template
author: Michael DeHaan <michael.dehaan@gmail.com>
version_added: "0.9"
short_description: retrieve contents of file after templating with Jinja2
description:
- Returns a list of strings; for each template in the list of templates you pass in, returns a string containing the results of processing that template.
options:
_terms:
description: list of files to template
convert_data:
type: bool
description: whether to convert YAML into data. If False, strings that are YAML will be left untouched.
variable_start_string:
description: The string marking the beginning of a print statement.
default: '{{'
version_added: '2.8'
type: str
variable_end_string:
description: The string marking the end of a print statement.
default: '}}'
version_added: '2.8'
type: str
"""
EXAMPLES = """
- name: show templating results
debug:
msg: "{{ lookup('template', './some_template.j2') }}"
- name: show templating results with different variable start and end string
debug:
msg: "{{ lookup('template', './some_template.j2', variable_start_string='[%', variable_end_string='%]') }}"
"""
RETURN = """
_raw:
description: file(s) content after templating
"""
import os
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_bytes, to_text
from ansible.template import generate_ansible_template_vars
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
convert_data_p = kwargs.get('convert_data', True)
lookup_template_vars = kwargs.get('template_vars', {})
ret = []
variable_start_string = kwargs.get('variable_start_string', None)
variable_end_string = kwargs.get('variable_end_string', None)
for term in terms:
display.debug("File lookup term: %s" % term)
lookupfile = self.find_file_in_search_path(variables, 'templates', term)
display.vvvv("File lookup using %s as file" % lookupfile)
if lookupfile:
b_template_data, show_data = self._loader._get_file_contents(lookupfile)
template_data = to_text(b_template_data, errors='surrogate_or_strict')
# set jinja2 internal search path for includes
searchpath = variables.get('ansible_search_path', [])
if searchpath:
# our search paths aren't actually the proper ones for jinja includes.
# We want to search into the 'templates' subdir of each search path in
# addition to our original search paths.
newsearchpath = []
for p in searchpath:
newsearchpath.append(os.path.join(p, 'templates'))
newsearchpath.append(p)
searchpath = newsearchpath
searchpath.insert(0, os.path.dirname(lookupfile))
self._templar.environment.loader.searchpath = searchpath
if variable_start_string is not None:
self._templar.environment.variable_start_string = variable_start_string
if variable_end_string is not None:
self._templar.environment.variable_end_string = variable_end_string
# The template will have access to all existing variables,
# plus some added by ansible (e.g., template_{path,mtime}),
# plus anything passed to the lookup with the template_vars=
# argument.
vars = variables.copy()
vars.update(generate_ansible_template_vars(lookupfile))
vars.update(lookup_template_vars)
self._templar.set_available_variables(vars)
# do the templating
res = self._templar.template(template_data, preserve_trailing_newlines=True,
convert_data=convert_data_p, escape_backslashes=False)
ret.append(res)
else:
raise AnsibleError("the template file %s could not be found for the lookup" % term)
return ret
| 40.66087
| 159
| 0.64136
| 543
| 4,676
| 5.338858
| 0.360958
| 0.035874
| 0.045878
| 0.017937
| 0.135219
| 0.135219
| 0.086927
| 0.06209
| 0.031045
| 0.031045
| 0
| 0.007414
| 0.278871
| 4,676
| 114
| 160
| 41.017544
| 0.852313
| 0.13195
| 0
| 0.109756
| 0
| 0.02439
| 0.370489
| 0.040287
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012195
| false
| 0.012195
| 0.085366
| 0
| 0.121951
| 0.036585
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b032c5cf849bb1b6a9241eb068c04ff780d5adc
| 1,957
|
py
|
Python
|
2018/Round 1A/A.py
|
elvisyjlin/google-code-jam
|
7fe8244c5ae07a9896acf9c48f3a06b306b393b1
|
[
"MIT"
] | null | null | null |
2018/Round 1A/A.py
|
elvisyjlin/google-code-jam
|
7fe8244c5ae07a9896acf9c48f3a06b306b393b1
|
[
"MIT"
] | null | null | null |
2018/Round 1A/A.py
|
elvisyjlin/google-code-jam
|
7fe8244c5ae07a9896acf9c48f3a06b306b393b1
|
[
"MIT"
] | null | null | null |
def solve():
# Read input
R, C, H, V = map(int, input().split())
choco = []
for _ in range(R):
choco.append([0] * C)
choco_row, choco_col = [0]*R, [0]*C
num_choco = 0
for i in range(R):
row = input()
for j in range(C):
if row[j] == '@':
choco_col[j] += 1
choco[i][j] = 1
choco_row[i] = row.count('@')
num_choco += choco_row[i]
# Find H and V cuts
if num_choco == 0:
return 'POSSIBLE'
H_idx, V_idx = [], []
flag = True
if num_choco%(H+1)==0 and num_choco%(V+1)==0:
num_choco_h = num_choco/(H+1)
num_choco_v = num_choco/(V+1)
accum = 0
for i, r in enumerate(choco_row):
accum += r
if accum == num_choco_h:
accum = 0
H_idx.append(i)
elif accum > num_choco_h:
flag = False
break
if not flag:
return 'IMPOSSIBLE'
accum = 0
for i, c in enumerate(choco_col):
accum += c
if accum == num_choco_v:
accum = 0
V_idx.append(i)
elif accum > num_choco_v:
flag = False
break
if not flag:
return 'IMPOSSIBLE'
else:
return 'IMPOSSIBLE'
# Check each piece
r_from = 0
num_prev = None
for r in H_idx:
c_from = 0
for c in V_idx:
num = 0
for i in range(r_from, r+1):
for j in range(c_from, c+1):
num += choco[i][j]
if num_prev is None:
num_prev = num
elif num_prev != num:
return 'IMPOSSIBLE'
c_from = c+1
r_from = r+1
return 'POSSIBLE'
if __name__ == '__main__':
T = int(input())
for t in range(T):
print('Case #{}: {}'.format(t+1, solve()))
| 27.56338
| 50
| 0.444558
| 266
| 1,957
| 3.078947
| 0.203008
| 0.136752
| 0.054945
| 0.017094
| 0.222222
| 0.192918
| 0.161172
| 0.095238
| 0
| 0
| 0
| 0.022873
| 0.441492
| 1,957
| 71
| 51
| 27.56338
| 0.726441
| 0.022994
| 0
| 0.242424
| 0
| 0
| 0.040859
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015152
| false
| 0
| 0
| 0
| 0.106061
| 0.015152
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b041831b70999f5552fbde4cf4fd10965b426d5
| 9,798
|
py
|
Python
|
desktop/libs/liboozie/src/liboozie/submittion_tests.py
|
vinaymundada27/Hue
|
7bffb33bbe7cfa34d340241c4ba3b19476211b2a
|
[
"Apache-2.0"
] | 1
|
2018-08-01T05:10:26.000Z
|
2018-08-01T05:10:26.000Z
|
desktop/libs/liboozie/src/liboozie/submittion_tests.py
|
vinaymundada27/Hue
|
7bffb33bbe7cfa34d340241c4ba3b19476211b2a
|
[
"Apache-2.0"
] | null | null | null |
desktop/libs/liboozie/src/liboozie/submittion_tests.py
|
vinaymundada27/Hue
|
7bffb33bbe7cfa34d340241c4ba3b19476211b2a
|
[
"Apache-2.0"
] | 1
|
2019-07-23T12:36:09.000Z
|
2019-07-23T12:36:09.000Z
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.contrib.auth.models import User
from nose.plugins.attrib import attr
from nose.tools import assert_equal, assert_true, assert_not_equal
from hadoop import cluster, pseudo_hdfs4
from hadoop.conf import HDFS_CLUSTERS, MR_CLUSTERS, YARN_CLUSTERS
from liboozie.submittion import Submission
from oozie.tests import OozieMockBase
from desktop.lib.test_utils import clear_sys_caches
from desktop.lib.django_test_util import make_logged_in_client
LOG = logging.getLogger(__name__)
@attr('requires_hadoop')
def test_copy_files():
cluster = pseudo_hdfs4.shared_cluster()
try:
c = make_logged_in_client()
user = User.objects.get(username='test')
prefix = '/tmp/test_copy_files'
if cluster.fs.exists(prefix):
cluster.fs.rmtree(prefix)
# Jars in various locations
deployment_dir = '%s/workspace' % prefix
external_deployment_dir = '%s/deployment' % prefix
jar_1 = '%s/udf1.jar' % prefix
jar_2 = '%s/lib/udf2.jar' % prefix
jar_3 = '%s/udf3.jar' % deployment_dir
jar_4 = '%s/lib/udf4.jar' % deployment_dir # Never move
cluster.fs.mkdir(prefix)
cluster.fs.create(jar_1)
cluster.fs.create(jar_2)
cluster.fs.create(jar_3)
cluster.fs.create(jar_4)
class MockNode():
def __init__(self, jar_path):
self.jar_path = jar_path
class MockJob():
def __init__(self):
self.node_list = [
MockNode(jar_1),
MockNode(jar_2),
MockNode(jar_3),
MockNode(jar_4),
]
def get_application_filename(self):
return 'workflow.xml'
submission = Submission(user, job=MockJob(), fs=cluster.fs, jt=cluster.jt)
submission._copy_files(deployment_dir, "<xml>My XML</xml>")
submission._copy_files(external_deployment_dir, "<xml>My XML</xml>")
# All sources still there
assert_true(cluster.fs.exists(jar_1))
assert_true(cluster.fs.exists(jar_2))
assert_true(cluster.fs.exists(jar_3))
assert_true(cluster.fs.exists(jar_4))
deployment_dir = deployment_dir + '/lib'
external_deployment_dir = external_deployment_dir + '/lib'
list_dir_workspace = cluster.fs.listdir(deployment_dir)
list_dir_deployement = cluster.fs.listdir(external_deployment_dir)
# All destinations there
assert_true(cluster.fs.exists(deployment_dir + '/udf1.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf2.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf3.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf4.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf1.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf2.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf3.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf4.jar'), list_dir_deployement)
stats_udf1 = cluster.fs.stats(deployment_dir + '/udf1.jar')
stats_udf2 = cluster.fs.stats(deployment_dir + '/udf2.jar')
stats_udf3 = cluster.fs.stats(deployment_dir + '/udf3.jar')
stats_udf4 = cluster.fs.stats(deployment_dir + '/udf4.jar')
submission._copy_files('%s/workspace' % prefix, "<xml>My XML</xml>")
assert_not_equal(stats_udf1['fileId'], cluster.fs.stats(deployment_dir + '/udf1.jar')['fileId'])
assert_not_equal(stats_udf2['fileId'], cluster.fs.stats(deployment_dir + '/udf2.jar')['fileId'])
assert_not_equal(stats_udf3['fileId'], cluster.fs.stats(deployment_dir + '/udf3.jar')['fileId'])
assert_equal(stats_udf4['fileId'], cluster.fs.stats(deployment_dir + '/udf4.jar')['fileId'])
finally:
try:
cluster.fs.rmtree(prefix)
except:
LOG.exception('failed to remove %s' % prefix)
class MockFs():
def __init__(self, logical_name=None):
self.fs_defaultfs = 'hdfs://curacao:8020'
self.logical_name = logical_name if logical_name else ''
class MockJt():
def __init__(self, logical_name=None):
self.logical_name = logical_name if logical_name else ''
class TestSubmission(OozieMockBase):
def test_get_properties(self):
submission = Submission(self.user, fs=MockFs())
assert_equal({}, submission.properties)
submission._update_properties('curacao:8032', '/deployment_dir')
assert_equal({
'jobTracker': 'curacao:8032',
'nameNode': 'hdfs://curacao:8020'
}, submission.properties)
def test_get_logical_properties(self):
submission = Submission(self.user, fs=MockFs(logical_name='fsname'), jt=MockJt(logical_name='jtname'))
assert_equal({}, submission.properties)
submission._update_properties('curacao:8032', '/deployment_dir')
assert_equal({
'jobTracker': 'jtname',
'nameNode': 'fsname'
}, submission.properties)
def test_update_properties(self):
finish = []
finish.append(MR_CLUSTERS.set_for_testing({'default': {}}))
finish.append(MR_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True))
finish.append(YARN_CLUSTERS.set_for_testing({'default': {}}))
finish.append(YARN_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True))
try:
properties = {
'user.name': 'hue',
'test.1': 'http://localhost/test?test1=test&test2=test',
'nameNode': 'hdfs://curacao:8020',
'jobTracker': 'jtaddress'
}
final_properties = properties.copy()
submission = Submission(None, properties=properties, oozie_id='test', fs=MockFs())
assert_equal(properties, submission.properties)
submission._update_properties('jtaddress', 'deployment-directory')
assert_equal(final_properties, submission.properties)
clear_sys_caches()
fs = cluster.get_hdfs()
jt = cluster.get_next_ha_mrcluster()[1]
final_properties = properties.copy()
final_properties.update({
'jobTracker': 'jtaddress',
'nameNode': fs.fs_defaultfs
})
submission = Submission(None, properties=properties, oozie_id='test', fs=fs, jt=jt)
assert_equal(properties, submission.properties)
submission._update_properties('jtaddress', 'deployment-directory')
assert_equal(final_properties, submission.properties)
finish.append(HDFS_CLUSTERS['default'].LOGICAL_NAME.set_for_testing('namenode'))
finish.append(MR_CLUSTERS['default'].LOGICAL_NAME.set_for_testing('jobtracker'))
clear_sys_caches()
fs = cluster.get_hdfs()
jt = cluster.get_next_ha_mrcluster()[1]
final_properties = properties.copy()
final_properties.update({
'jobTracker': 'jobtracker',
'nameNode': 'namenode'
})
submission = Submission(None, properties=properties, oozie_id='test', fs=fs, jt=jt)
assert_equal(properties, submission.properties)
submission._update_properties('jtaddress', 'deployment-directory')
assert_equal(final_properties, submission.properties)
finally:
clear_sys_caches()
for reset in finish:
reset()
def test_get_external_parameters(self):
xml = """
<workflow-app name="Pig" xmlns="uri:oozie:workflow:0.4">
<start to="Pig"/>
<action name="Pig">
<pig>
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<prepare>
<delete path="${output}"/>
</prepare>
<script>aggregate.pig</script>
<argument>-param</argument>
<argument>INPUT=${input}</argument>
<argument>-param</argument>
<argument>OUTPUT=${output}</argument>
<configuration>
<property>
<name>mapred.input.format.class</name>
<value>org.apache.hadoop.examples.SleepJob$SleepInputFormat</value>
</property>
</configuration>
</pig>
<ok to="end"/>
<error to="kill"/>
</action>
<kill name="kill">
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<end name="end"/>
</workflow-app>
"""
properties = """
#
# Licensed to the Hue
#
nameNode=hdfs://localhost:8020
jobTracker=localhost:8021
queueName=default
examplesRoot=examples
oozie.use.system.libpath=true
oozie.wf.application.path=${nameNode}/user/${user.name}/${examplesRoot}/apps/pig
"""
parameters = Submission(self.user)._get_external_parameters(xml, properties)
assert_equal({'oozie.use.system.libpath': 'true',
'input': '',
'jobTracker': 'localhost:8021',
'oozie.wf.application.path': '${nameNode}/user/${user.name}/${examplesRoot}/apps/pig',
'examplesRoot': 'examples',
'output': '',
'nameNode': 'hdfs://localhost:8020',
'queueName': 'default'
},
parameters)
| 35.11828
| 106
| 0.677689
| 1,179
| 9,798
| 5.417303
| 0.217981
| 0.043682
| 0.030531
| 0.035698
| 0.431658
| 0.421325
| 0.376546
| 0.294504
| 0.263034
| 0.247221
| 0
| 0.012255
| 0.192182
| 9,798
| 278
| 107
| 35.244604
| 0.794694
| 0.086038
| 0
| 0.236453
| 0
| 0.004926
| 0.267264
| 0.081925
| 0
| 0
| 0
| 0
| 0.137931
| 1
| 0.049261
| false
| 0
| 0.049261
| 0.004926
| 0.128079
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b04b9ebe40e4d32dbf9b4d850ad1eefd373d8ea
| 12,721
|
py
|
Python
|
Training/train_baseHD.py
|
Wenyuan-Vincent-Li/SSL_Seg_GAN
|
8f6c45fd000ea12468dccf211b376fadbf4759c6
|
[
"Apache-2.0"
] | 1
|
2022-03-09T11:51:22.000Z
|
2022-03-09T11:51:22.000Z
|
Training/train_baseHD.py
|
Wenyuan-Vincent-Li/SSL_Seg_GAN
|
8f6c45fd000ea12468dccf211b376fadbf4759c6
|
[
"Apache-2.0"
] | null | null | null |
Training/train_baseHD.py
|
Wenyuan-Vincent-Li/SSL_Seg_GAN
|
8f6c45fd000ea12468dccf211b376fadbf4759c6
|
[
"Apache-2.0"
] | null | null | null |
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from Training import functions
from Training.imresize import imresize
import matplotlib.pyplot as plt
from Models.pix2pixHD_base import GANLoss, VGGLoss
from Models.pix2pixHD2 import mask2onehot
class Losses():
def __init__(self, opt):
self.criterionGAN = GANLoss(not opt.no_lsgan)
self.criterionFeat = nn.L1Loss()
if opt.contour:
self.crossEntropy = nn.BCEWithLogitsLoss()
else:
self.crossEntropy = nn.CrossEntropyLoss()
if not opt.no_vgg_loss:
self.criterionVGG = VGGLoss()
def train_single_scale(dataloader, netD, netG, netS, reals, Gs, Ss, in_s, in_s_S, NoiseAmp, NoiseAmpS, opt):
'''
:param netD: currD
:param netG: currG
:param netS: currS
:param reals: a list of image pyramid ## TODO: you can just pass image shape here
:param Gs: list of prev netG
:param Ss: list of prev netS
:param in_s: 0-> all zero [1, 3, 26, 26]
:param NoiseAmp: [] -> [1]
:param opt: config
:return:
'''
loss = Losses(opt)
real = reals[opt.scale_num] # find the current level image xn
opt.nzx = real[0]
opt.nzy = real[1]
# z_opt = 0 ## dummy z_opt
alpha = opt.alpha
# setup optimizer
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr_d, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr_g, betas=(opt.beta1, 0.999))
optimizerS = optim.Adam(netS.parameters(), lr=opt.lr_s, betas=(opt.beta1, 0.999))
schedulerD = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerD, milestones=[opt.niter * 0.8], gamma=opt.gamma)
schedulerG = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerG, milestones=[opt.niter * 0.8], gamma=opt.gamma)
schedulerS = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerS, milestones=[opt.niter * 0.8],
gamma=opt.gamma)
errD2plot = []
errG2plot = []
D_real2plot = []
D_fake2plot = []
for epoch in range(opt.niter): # niter = 2000
if Gs == [] and Ss == []:
noise_ = functions.generate_noise([1, opt.nzx, opt.nzy], opt.batchSize) # [None, 1, 32, 32]
noise_ = noise_.expand(opt.batchSize, 3, opt.nzx, opt.nzy)
## Noise_: for generated false samples through generator
else:
noise_ = functions.generate_noise([1, opt.nzx, opt.nzy], opt.batchSize)
for j, data in enumerate(dataloader):
data['image'] = data['image'].to(opt.device)
data['label'] = data['label'].long().to(opt.device)
############################
# (1) Update D network: maximize D(x) + D(G(z))
###########################
# train with real
netD.zero_grad()
pred_real = netD(data['image'], data['label'][:,0:1,...])
loss_D_real = loss.criterionGAN(pred_real, True)
D_x = loss_D_real.item()
# train with fake
if (j == 0) & (epoch == 0): # first iteration training in this level
if Gs == [] and Ss == []:
prev = torch.full([opt.batchSize, opt.nc_z, opt.nzx, opt.nzy], 0, device=opt.device)
in_s = prev # full of 0 [None, 3, 32, 32]
prev_S = torch.full([opt.batchSize, opt.label_nc, opt.nzx, opt.nzy], 0, device=opt.device)
in_s_S = prev_S # full of 0 [None, 4, 32, 32]
mask = data['label'][:,0:1,...]
opt.noise_amp = opt.noise_amp_init
opt.noise_amp_S = opt.noise_amp_init
else:
prev = draw_concat(Gs, data['down_scale_label'], reals, NoiseAmp, in_s, 'generator', opt)
## given a new noise, prev is a image generated by previous Generator with bilinear upsampling [1, 3, 33, 33]
criterion = nn.MSELoss()
RMSE = torch.sqrt(criterion(data['image'], prev))
opt.noise_amp = opt.noise_amp_init * RMSE
prev_S = draw_concat(Ss, data['down_scale_image'], reals, NoiseAmpS, in_s_S, 'segment', opt) ## prob with [None, 4, 32, 32]
onehot_label = mask2onehot(data['label'][:,0:1,...], opt.label_nc)
RMSE_S = torch.sqrt(criterion(onehot_label, prev_S))
# RMSE_S = 0
opt.noise_amp_S = opt.noise_amp_init * RMSE_S
mask = data['label'][:,0:1,...]
else:
prev = draw_concat(Gs, data['down_scale_label'], reals, NoiseAmp, in_s, 'generator', opt)
prev_S = draw_concat(Ss, data['down_scale_image'], reals, NoiseAmpS, in_s_S, 'segment', opt)
mask = data['label'][:,0:1,...]
if Gs == []:
noise = noise_ ## Gausiaan noise for generating image [None, 3, 42, 42]
else:
noise = opt.noise_amp * noise_ + prev ## [None, 3, 43, 43] new noise is equal to the prev generated image plus the gaussian noise.
fake = netG(noise.detach(), prev, mask) # [None, 3, 32, 32] the same size with the input image
# detach() make sure that the gradients don't go to the noise.
# prev:[None, 3, 42, 42] -> [None, 3, 43, 43] first step prev = 0, second step prev = a image generated by previous Generator with bilinaer upsampling
pred_fake = netD(fake.detach(), data['label'][:,0:1,...]) # output shape [1, 1, 16, 16] -> [1, 1, 23, 23]
# print(len(pred_fake), len(pred_fake[0]))
loss_D_fake = loss.criterionGAN(pred_fake, False)
D_G_z = loss_D_fake.item()
# segment_logit, segment_mask = netS(data['image'], mask2onehot(prev_S, opt.label_nc))
# print(data['image'].shape, onehot.shape)
# print(epoch, j)
segment_logit, segment_prob, segment_mask = netS(data['image'], prev_S.detach())
pred_fake_S = netD(data['image'], segment_prob.detach())
loss_D_fake_S = loss.criterionGAN(pred_fake_S, False)
D_S_z = loss_D_fake_S.item()
errD = (loss_D_real + 0.5 * loss_D_fake + 0.5 * loss_D_fake_S) ## Todo: figure out a proper coefficient
errD.backward()
optimizerD.step()
errD2plot.append(errD.detach()) ## errD for each iteration
############################
# (2) Update G network: maximize D(G(z))
###########################
netG.zero_grad()
pred_fake = netD(fake, data['label'][:,0:1,...])
loss_G_GAN = 0.5 * loss.criterionGAN(pred_fake, True)
# GAN feature matching loss
loss_G_GAN_Feat = 0
if not opt.no_ganFeat_loss:
feat_weights = 4.0 / (opt.n_layers_D + 1)
D_weights = 1.0 / opt.num_D
for i in range(opt.num_D):
for j in range(len(pred_fake[i]) - 1):
loss_G_GAN_Feat += D_weights * feat_weights * \
loss.criterionFeat(pred_fake[i][j],
pred_real[i][j].detach()) * opt.lambda_feat
# VGG feature matching loss
loss_G_VGG = 0
if not opt.no_vgg_loss:
loss_G_VGG = loss.criterionVGG(fake, data['image']) * opt.lambda_feat
## reconstruction loss
if alpha != 0: ## alpha = 10 calculate the reconstruction loss
Recloss = nn.MSELoss()
rec_loss = alpha * Recloss(fake, data['image'])
else:
rec_loss = 0
errG = loss_G_GAN + loss_G_GAN_Feat + loss_G_VGG + rec_loss
errG.backward()
optimizerG.step()
############################
# (3) Update S network: maximize D(S(z))
###########################
netS.zero_grad()
pred_fake_S = netD(data['image'], segment_prob)
loss_G_GAN_S = 0.03 * loss.criterionGAN(pred_fake_S, True)
# Segmentation loss
if opt.contour:
loss_G_Seg = loss.crossEntropy(segment_logit, data['label'].float())
else:
loss_G_Seg = loss.crossEntropy(segment_prob, torch.squeeze(data['label'][:,0:1,...], dim =1))
# GAN feature matching loss
loss_G_GAN_Feat_S = 0
if not opt.no_ganFeat_loss:
feat_weights = 4.0 / (opt.n_layers_D + 1)
D_weights = 1.0 / opt.num_D
for i in range(opt.num_D):
for j in range(len(pred_fake_S[i]) - 1):
loss_G_GAN_Feat_S += D_weights * feat_weights * \
loss.criterionFeat(pred_fake_S[i][j],
pred_real[i][j].detach()) * opt.lambda_feat
errS = loss_G_GAN_S + loss_G_GAN_Feat_S + loss_G_Seg
errS.backward()
optimizerS.step()
## for every epoch, do the following:
errG2plot.append(errG.detach()) ## ErrG for each iteration
D_real2plot.append(D_x) ## discriminator loss on real
D_fake2plot.append(D_G_z + D_S_z) ## discriminator loss on fake
if epoch % 25 == 0 or epoch == (opt.niter - 1):
print('scale %d:[%d/%d]' % (opt.scale_num, epoch, opt.niter))
if epoch % 25 == 0 or epoch == (opt.niter - 1):
plt.imsave('%s/fake_sample_%d.png' % (opt.outf, epoch),
functions.convert_image_np(fake.detach()), vmin=0, vmax=1)
plt.imsave('%s/fake_sample_real_%d.png' % (opt.outf, epoch),
functions.convert_image_np(data['image']), vmin=0, vmax=1)
plt.imsave('%s/fake_sample_mask_%d.png' % (opt.outf, epoch),
functions.convert_mask_np(data['label'][:,0:1,...], num_classes= opt.label_nc))
plt.imsave('%s/segmentation_mask_%d.png' % (opt.outf, epoch),
functions.convert_mask_np(segment_mask.detach(), num_classes=opt.label_nc))
schedulerD.step()
schedulerG.step()
schedulerS.step()
functions.save_networks(netG, netD, netS, opt) ## save netG, netD, z_opt, opt is used to parser output path
return in_s, in_s_S, netG, netS
def draw_concat(Gs, masks, reals, NoiseAmp, in_s, mode, opt):
'''
:param Gs: [G0]
:param mask: [down scaled _mask]
:param reals: [image pyramid] only used to represent the image shape
:param NoiseAmp: [1]
:param in_s: all zeros [1, 3, 26, 26]
:param mode: 'rand'
:param opt:
:return:
'''
G_z = in_s[:opt.batchSize, :, :, :] # [None, 3, 26, 26] all zeros, image input for the corest level
if len(Gs) > 0:
if mode == 'generator':
count = 0
for G, mask, real_curr, real_next, noise_amp in zip(Gs, masks, reals, reals[1:], NoiseAmp):
if count == 0:
z = functions.generate_noise([1, real_curr[0], real_curr[1]],
opt.batchSize)
z = z.expand(opt.batchSize, G_z.shape[1], z.shape[2], z.shape[3])
else:
z = functions.generate_noise(
[opt.nc_z, real_curr[0], real_curr[1]], opt.batchSize)
G_z = G_z[:, :, 0:real_curr[0], 0:real_curr[1]] ## G_z [None, 3, 32, 32]
z_in = noise_amp * z + G_z
G_z = G(z_in.detach(), G_z, mask) ## [1, 3, 26, 26] output of previous generator
G_z = imresize(G_z, real_next[1] / real_curr[1], opt)
G_z = G_z[:, :, 0:real_next[0],
0:real_next[1]] ## resize the image to be compatible with current G [1, 3, 33, 33]
count += 1
elif mode == 'segment':
count = 0
for G, mask, real_curr, real_next, noise_amp in zip(Gs, masks, reals, reals[1:], NoiseAmp):
G_z = G_z[:, :, 0:real_curr[0], 0:real_curr[1]] ## G_z [None, 3, 32, 32]
_, G_z, _ = G(mask, G_z) ## [1, 3, 26, 26] output of previous generator
if opt.contour:
G_z = torch.cat((G_z, 1-G_z), 1)
G_z = imresize(G_z, real_next[1] / real_curr[1], opt)
G_z = G_z[:, :, 0:real_next[0],
0:real_next[1]] ## resize the image to be compatible with current G [1, 3, 33, 33]
count += 1
return G_z
| 48.003774
| 162
| 0.537929
| 1,693
| 12,721
| 3.855877
| 0.163024
| 0.009191
| 0.012255
| 0.015165
| 0.418352
| 0.350184
| 0.309283
| 0.289675
| 0.213542
| 0.184436
| 0
| 0.031803
| 0.32521
| 12,721
| 264
| 163
| 48.185606
| 0.728681
| 0.190001
| 0
| 0.277778
| 0
| 0
| 0.033881
| 0.010024
| 0
| 0
| 0
| 0.007576
| 0
| 1
| 0.016667
| false
| 0
| 0.044444
| 0
| 0.077778
| 0.005556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b0543a7aff4c6ab6b022a2d8e6d154ed4873777
| 1,528
|
py
|
Python
|
trabantsim/prototypes/space_invaders.py
|
highfestiva/life
|
b05b592502d72980ab55e13e84330b74a966f377
|
[
"BSD-3-Clause"
] | 9
|
2019-09-03T18:33:31.000Z
|
2022-02-04T04:00:02.000Z
|
trabantsim/prototypes/space_invaders.py
|
highfestiva/life
|
b05b592502d72980ab55e13e84330b74a966f377
|
[
"BSD-3-Clause"
] | null | null | null |
trabantsim/prototypes/space_invaders.py
|
highfestiva/life
|
b05b592502d72980ab55e13e84330b74a966f377
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Space Invadersishkebab.
from trabant import *
# ASCII geometries.
shipascii = r'''
/\
/XXXXXXXX\
v v
'''
invader = r'''
/XXXXXX\
/XXXXXXXX\
XXXXXXXXXX
XX XX XX
\XXXXXXXX/
/XX XX\
/X/ \/ \X\
X/ \X
'''
cam(distance=250)
gravity((0,0,0))
ship = create_ascii_object(shipascii, pos=(0,0,-100), col='#070')
shots = []
invaderspeeds,isi = [(25,0,0), (0,0,-10), (-25,0,0), (0,0,-10)],0
invaders = set()
for y in range(2):
for x in range(8):
invaders.add(create_ascii_object(invader, pos=(x*25-130,0,100-y*20), col=rndvec().abs(), physmesh=True))
for invader in invaders:
invader.vel(invaderspeeds[0])
while loop():
# Steering.
vel = keydir()*50 + tapdir(ship.pos())*4
ship.vel((vel.x,0,0)) # Only move in X.
# Shooting.
is_tap_close = taps() and tapdir(ship.pos()).x < 3
is_shooting = 'Space' in keys() or 'LCtrl' in keys() or is_tap_close
if is_shooting and timeout(0.7, first_hit=True):
shots += [create_sphere(ship.pos()+vec3(0,0,10), vel=(0,0,200), col='#fff')]
sound(sound_bang, shots[-1].pos())
# Run invaders.
if timeout(3, timer='invaders'):
isi = (isi+1)%len(invaderspeeds)
[i.vel(invaderspeeds[isi]) for i in invaders]
# Check collisions, make explosions.
for o in collided_objects():
if o in invaders:
invaders.remove(o)
explode(o.pos(),o.vel(),5)
elif o == ship:
while loop():
pass
o.release()
| 24.645161
| 112
| 0.581806
| 229
| 1,528
| 3.820961
| 0.441048
| 0.027429
| 0.017143
| 0.011429
| 0.018286
| 0.018286
| 0
| 0
| 0
| 0
| 0
| 0.058369
| 0.237565
| 1,528
| 61
| 113
| 25.04918
| 0.692704
| 0.096859
| 0
| 0.130435
| 0
| 0
| 0.110058
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.021739
| 0.021739
| 0
| 0.021739
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b05df704fde4ca413cc3974d404975347c287a5
| 11,550
|
py
|
Python
|
model/backbone/xception.py
|
Shang-XH/BAFTT
|
62392325342f48b8a89f0c2bf71e48026dd90629
|
[
"MIT"
] | 4
|
2021-09-07T03:29:38.000Z
|
2021-09-07T04:24:31.000Z
|
model/backbone/xception.py
|
Shang-XH/BAFTT
|
62392325342f48b8a89f0c2bf71e48026dd90629
|
[
"MIT"
] | null | null | null |
model/backbone/xception.py
|
Shang-XH/BAFTT
|
62392325342f48b8a89f0c2bf71e48026dd90629
|
[
"MIT"
] | null | null | null |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from model.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
def fixed_padding(inputs, kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, BatchNorm=None):
super(SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0, dilation,
groups=inplanes, bias=bias)
self.bn = BatchNorm(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = fixed_padding(x, self.conv1.kernel_size[0], dilation=self.conv1.dilation[0])
x = self.conv1(x)
x = self.bn(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self, inplanes, planes, reps, stride=1, dilation=1, BatchNorm=None,
start_with_relu=True, grow_first=True, is_last=False):
super(Block, self).__init__()
if planes != inplanes or stride != 1:
self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)
self.skipbn = BatchNorm(planes)
else:
self.skip = None
self.relu = nn.ReLU(inplace=True)
rep = []
filters = inplanes
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(inplanes, planes, 3, 1, dilation, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
filters = planes
for i in range(reps - 1):
rep.append(self.relu)
rep.append(SeparableConv2d(filters, filters, 3, 1, dilation, BatchNorm=BatchNorm))
rep.append(BatchNorm(filters))
if not grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(inplanes, planes, 3, 1, dilation, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if stride != 1:
rep.append(self.relu)
rep.append(SeparableConv2d(planes, planes, 3, 2, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if stride == 1 and is_last:
rep.append(self.relu)
rep.append(SeparableConv2d(planes, planes, 3, 1, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if not start_with_relu:
rep = rep[1:]
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x = x + skip
return x
class AlignedXception(nn.Module):
"""
Modified Alighed Xception
"""
def __init__(self, output_stride, BatchNorm,
pretrained=True):
super(AlignedXception, self).__init__()
if output_stride == 16:
entry_block3_stride = 2
middle_block_dilation = 1
exit_block_dilations = (1, 2)
elif output_stride == 8:
entry_block3_stride = 1
middle_block_dilation = 2
exit_block_dilations = (2, 4)
else:
raise NotImplementedError
# Entry flow
self.conv1 = nn.Conv2d(3, 32, 3, stride=2, padding=1, bias=False)
self.bn1 = BatchNorm(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
self.bn2 = BatchNorm(64)
self.block1 = Block(64, 128, reps=2, stride=2, BatchNorm=BatchNorm, start_with_relu=False)
self.block2 = Block(128, 256, reps=2, stride=2, BatchNorm=BatchNorm, start_with_relu=False,
grow_first=True)
self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, BatchNorm=BatchNorm,
start_with_relu=True, grow_first=True, is_last=True)
# Middle flow
self.block4 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block12 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block13 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block14 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block15 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block16 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block17 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block18 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block19 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
# Exit flow
self.block20 = Block(728, 1024, reps=2, stride=1, dilation=exit_block_dilations[0],
BatchNorm=BatchNorm, start_with_relu=True, grow_first=False, is_last=True)
self.conv3 = SeparableConv2d(1024, 1536, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn3 = BatchNorm(1536)
self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn4 = BatchNorm(1536)
self.conv5 = SeparableConv2d(1536, 2048, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn5 = BatchNorm(2048)
# Init weights
self._init_weight()
# Load pretrained model
if pretrained:
self._load_pretrained_model()
def forward(self, x):
# Entry flow
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
# add relu here
x = self.relu(x)
low_level_feat = x
x = self.block2(x)
x = self.block3(x)
# Middle flow
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.block13(x)
x = self.block14(x)
x = self.block15(x)
x = self.block16(x)
x = self.block17(x)
x = self.block18(x)
x = self.block19(x)
# Exit flow
x = self.block20(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
return x, low_level_feat
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _load_pretrained_model(self):
pretrain_dict = model_zoo.load_url('http://data.lip6.fr/cadene/pretrainedmodels/xception-b5690688.pth')
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in model_dict:
if 'pointwise' in k:
v = v.unsqueeze(-1).unsqueeze(-1)
if k.startswith('block11'):
model_dict[k] = v
model_dict[k.replace('block11', 'block12')] = v
model_dict[k.replace('block11', 'block13')] = v
model_dict[k.replace('block11', 'block14')] = v
model_dict[k.replace('block11', 'block15')] = v
model_dict[k.replace('block11', 'block16')] = v
model_dict[k.replace('block11', 'block17')] = v
model_dict[k.replace('block11', 'block18')] = v
model_dict[k.replace('block11', 'block19')] = v
elif k.startswith('block12'):
model_dict[k.replace('block12', 'block20')] = v
elif k.startswith('bn3'):
model_dict[k] = v
model_dict[k.replace('bn3', 'bn4')] = v
elif k.startswith('conv4'):
model_dict[k.replace('conv4', 'conv5')] = v
elif k.startswith('bn4'):
model_dict[k.replace('bn4', 'bn5')] = v
else:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
if __name__ == "__main__":
import torch
model = AlignedXception(BatchNorm=nn.BatchNorm2d, pretrained=True, output_stride=16)
input = torch.rand(1, 3, 512, 512)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size())
| 40.104167
| 116
| 0.583117
| 1,439
| 11,550
| 4.512856
| 0.122307
| 0.032338
| 0.032338
| 0.049276
| 0.496612
| 0.481367
| 0.435633
| 0.406375
| 0.377425
| 0.345396
| 0
| 0.056061
| 0.305022
| 11,550
| 288
| 117
| 40.104167
| 0.752959
| 0.012208
| 0
| 0.242291
| 0
| 0
| 0.022396
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039648
| false
| 0
| 0.030837
| 0
| 0.101322
| 0.008811
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b096c429c0f219b1a8f9aeb011545c4774f439d
| 1,430
|
py
|
Python
|
Backend/autonomus/utils/mail.py
|
IrinaMBejan/Autonom
|
4a97da1b26ed22e3ec8bb939359148765392b692
|
[
"MIT"
] | 2
|
2019-03-08T10:04:35.000Z
|
2020-03-14T15:24:56.000Z
|
Backend/autonomus/utils/mail.py
|
IrinaMBejan/Autonom
|
4a97da1b26ed22e3ec8bb939359148765392b692
|
[
"MIT"
] | null | null | null |
Backend/autonomus/utils/mail.py
|
IrinaMBejan/Autonom
|
4a97da1b26ed22e3ec8bb939359148765392b692
|
[
"MIT"
] | 2
|
2019-03-16T14:47:36.000Z
|
2020-04-28T14:09:45.000Z
|
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail, Substitution
API_KEY = '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'
API_KEY_ID = 'egd1yywWRbeVF2gcGhTH2Q'
ENCODING = "utf-8"
DEFAULT_MAIL="irinam.bejan@gmail.com"
def link(urlsafe):
return "https://develop-dot-autonomus.appspot.com/events/details?event_id=" + urlsafe
def send_newsletter(users, event1, event2):
for user in users:
send_mail(DEFAULT_MAIL, user.username, user.email, event1, event2)
def send_mail(from_mail, username, to_mails, event1, event2):
message = Mail(
from_email=from_mail,
to_emails=to_mails
)
message.dynamic_template_data = {
'name': username,
'title1' : event1.title,
'src1' : link(event1.urlsafe),
'loc1': event1.location,
'date1': event1.date.strftime('%d-%m-%Y %H:%M'),
'title2' : event2.title,
'src2' : link(event2.urlsafe),
'loc2': event2.location,
'date2': event2.date.strftime('%d-%m-%Y %H:%M')
}
print('before')
message.template_id = 'd-6607926b2aba4f8fba984dccdaa9ece6'
client = SendGridAPIClient(API_KEY)
response = client.send(message)
code = response.status_code
print('after')
was_successful = lambda ret_code: ret_code // 100 in (2, 3)
if not was_successful(code):
raise Exception("Couldn't send e-mail: {} {}".format(code, response.body))
| 29.791667
| 89
| 0.68951
| 176
| 1,430
| 5.465909
| 0.517045
| 0.018711
| 0.027027
| 0.029106
| 0.035343
| 0.035343
| 0.035343
| 0
| 0
| 0
| 0
| 0.04932
| 0.177622
| 1,430
| 47
| 90
| 30.425532
| 0.768707
| 0
| 0
| 0
| 0
| 0
| 0.228132
| 0.102869
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.055556
| 0.027778
| 0.166667
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b09dfca59db461ba56fcce8bea683cfe5b5f132
| 22,696
|
py
|
Python
|
yellowbrick/features/pca.py
|
percygautam/yellowbrick
|
1ba6774a257bc85768a990293790caf4c14a5653
|
[
"Apache-2.0"
] | 1
|
2020-04-30T08:50:11.000Z
|
2020-04-30T08:50:11.000Z
|
yellowbrick/features/pca.py
|
percygautam/yellowbrick
|
1ba6774a257bc85768a990293790caf4c14a5653
|
[
"Apache-2.0"
] | null | null | null |
yellowbrick/features/pca.py
|
percygautam/yellowbrick
|
1ba6774a257bc85768a990293790caf4c14a5653
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# yellowbrick.features.pca
# Decomposition based feature visualization with PCA.
#
# Author: Carlo Morales
# Author: Raúl Peralta Lozada
# Author: Benjamin Bengfort
# Created: Tue May 23 18:34:27 2017 -0400
#
# Copyright (C) 2017 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: pca.py [] cmorales@pacificmetrics.com $
"""
Decomposition based feature visualization with PCA.
"""
##########################################################################
## Imports
##########################################################################
# NOTE: must import mplot3d to load the 3D projection
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from yellowbrick.style import palettes
from yellowbrick.features.projection import ProjectionVisualizer
from yellowbrick.exceptions import YellowbrickValueError, NotFitted
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA as PCATransformer
from sklearn.preprocessing import StandardScaler
from sklearn.exceptions import NotFittedError
##########################################################################
# 2D and 3D PCA Visualizer
##########################################################################
class PCA(ProjectionVisualizer):
"""
Produce a two or three dimensional principal component plot of a data array
projected onto its largest sequential principal components. It is common
practice to scale the data array ``X`` before applying a PC decomposition.
Variable scaling can be controlled using the ``scale`` argument.
Parameters
----------
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in, the current axes
will be used (or generated if required).
features : list, default: None
The names of the features specified by the columns of the input dataset.
This length of this list must match the number of columns in X, otherwise
an exception will be raised on ``fit()``.
classes : list, default: None
The class labels for each class in y, ordered by sorted class index. These
names act as a label encoder for the legend, identifying integer classes
or renaming string labels. If omitted, the class labels will be taken from
the unique values in y.
Note that the length of this list must match the number of unique values in
y, otherwise an exception is raised. This parameter is only used in the
discrete target type case and is ignored otherwise.
scale : bool, default: True
Boolean that indicates if user wants to scale data.
projection : int or string, default: 2
The number of axes to project into, either 2d or 3d. To plot 3d plots
with matplotlib, please ensure a 3d axes is passed to the visualizer,
otherwise one will be created using the current figure.
proj_features : bool, default: False
Boolean that indicates if the user wants to project the features
in the projected space. If True the plot will be similar to a biplot.
colors : list or tuple, default: None
A single color to plot all instances as or a list of colors to color each
instance according to its class in the discrete case or as an ordered
colormap in the sequential case. If not enough colors per class are
specified then the colors are treated as a cycle.
colormap : string or cmap, default: None
The colormap used to create the individual colors. In the discrete case
it is used to compute the number of colors needed for each class and
in the continuous case it is used to create a sequential color map based
on the range of the target.
alpha : float, default: 0.75
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
random_state : int, RandomState instance or None, optional (default None)
This parameter sets the random state on this solver. If the input X is
larger than 500x500 and the number of components to extract is lower
than 80% of the smallest dimension of the data, then the more efficient
`randomized` solver is enabled.
colorbar : bool, default: True
If the target_type is "continous" draw a colorbar to the right of the
scatter plot. The colobar axes is accessible using the cax property.
heatmap : bool, default: False
Add a heatmap showing contribution of each feature in the principal components.
Also draws a colorbar for readability purpose. The heatmap is accessible
using lax property and colorbar using uax property.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
pca_components_ : ndarray, shape (n_features, n_components)
This tells about the magnitude of each feature in the pricipal components.
This is primarily used to draw the biplots.
classes_ : ndarray, shape (n_classes,)
The class labels that define the discrete values in the target. Only
available if the target type is discrete. This is guaranteed to be
strings even if the classes are a different type.
features_ : ndarray, shape (n_features,)
The names of the features discovered or used in the visualizer that
can be used as an index to access or modify data in X. If a user passes
feature names in, those features are used. Otherwise the columns of a
DataFrame are used or just simply the indices of the data array.
range_ : (min y, max y)
A tuple that describes the minimum and maximum values in the target.
Only available if the target type is continuous.
Examples
--------
>>> from sklearn import datasets
>>> iris = datasets.load_iris()
>>> X = iris.data
>>> y = iris.target
>>> visualizer = PCA()
>>> visualizer.fit_transform(X, y)
>>> visualizer.show()
"""
def __init__(
self,
ax=None,
features=None,
classes=None,
scale=True,
projection=2,
proj_features=False,
colors=None,
colormap=None,
alpha=0.75,
random_state=None,
colorbar=True,
heatmap=False,
**kwargs
):
super(PCA, self).__init__(
ax=ax,
features=features,
classes=classes,
colors=colors,
colormap=colormap,
projection=projection,
alpha=alpha,
colorbar=colorbar,
**kwargs
)
# Data Parameters
self.scale = scale
self.proj_features = proj_features
# Create the PCA transformer
self.pca_transformer = Pipeline(
[
("scale", StandardScaler(with_std=self.scale)),
("pca", PCATransformer(self.projection, random_state=random_state)),
]
)
self.alpha = alpha
# Visual Parameters
self.heatmap = heatmap
self._uax, self._lax = None, None
# No heatmap can be drawn with 3d plots as they do not have permit axes
# division.
if self.projection == 3 and self.heatmap:
raise YellowbrickValueError(
"heatmap and colorbar are not compatible with 3d projections"
)
@property
def uax(self):
"""
The axes of the colorbar, bottom of scatter plot. This is the colorbar
for heatmap and not for the scatter plot.
"""
if self._uax is None:
raise AttributeError("This visualizer does not have an axes for colorbar")
return self._uax
@property
def lax(self):
"""
The axes of the heatmap below scatter plot.
"""
if self._lax is None:
raise AttributeError("This visualizer does not have an axes for heatmap")
return self._lax
def layout(self, divider=None):
"""
Creates the layout for colorbar and heatmap, adding new axes for the heatmap
if necessary and modifying the aspect ratio. Does not modify the axes or the
layout if ``self.heatmap`` is ``False`` or ``None``.
Parameters
----------
divider: AxesDivider
An AxesDivider to be passed among all layout calls.
"""
# Ensure matplotlib version compatibility
if make_axes_locatable is None:
raise YellowbrickValueError(
(
"heatmap requires matplotlib 2.0.2 or greater "
"please upgrade matplotlib or set heatmap=False on the visualizer"
)
)
# Create the new axes for the colorbar and heatmap
if divider is None:
divider = make_axes_locatable(self.ax)
# Call to super class ensures that a colorbar is drawn when target is
# continuous.
super(PCA, self).layout(divider)
if self.heatmap:
# Axes for colorbar(for heatmap).
if self._uax is None:
self._uax = divider.append_axes("bottom", size="10%", pad=0.7)
# Axes for heatmap
if self._lax is None:
self._lax = divider.append_axes("bottom", size="15%", pad=0.5)
def fit(self, X, y=None, **kwargs):
"""
Fits the PCA transformer, transforms the data in X, then draws the
decomposition in either 2D or 3D space as a scatter plot.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features.
y : ndarray or Series of length n
An array or series of target or class values.
Returns
-------
self : visualizer
Returns self for use in Pipelines.
"""
# Call super fit to compute features, classes, colors, etc.
super(PCA, self).fit(X=X, y=y, **kwargs)
self.pca_transformer.fit(X)
self.pca_components_ = self.pca_transformer.named_steps["pca"].components_
return self
def transform(self, X, y=None, **kwargs):
"""
Calls the internal `transform` method of the scikit-learn PCA transformer, which
performs a dimensionality reduction on the input features ``X``. Next calls the
``draw`` method of the Yellowbrick visualizer, finally returning a new array of
transformed features of shape ``(len(X), projection)``.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features.
y : ndarray or Series of length n
An array or series of target or class values.
Returns
-------
Xp : ndarray or DataFrame of shape n x m
Returns a new array-like object of transformed features of shape
``(len(X), projection)``.
"""
try:
Xp = self.pca_transformer.transform(X)
self.draw(Xp, y)
return Xp
except NotFittedError:
raise NotFitted.from_estimator(self, "transform")
def draw(self, Xp, y):
"""
Plots a scatterplot of points that represented the decomposition,
`pca_features_`, of the original features, `X`, projected into either 2 or
3 dimensions.
If 2 dimensions are selected, a colorbar and heatmap can also be optionally
included to show the magnitude of each feature value to the component.
Parameters
----------
Xp : array-like of shape (n, 2) or (n, 3)
The matrix produced by the ``transform()`` method.
y : array-like of shape (n,), optional
The target, used to specify the colors of the points.
Returns
-------
self.ax : matplotlib Axes object
Returns the axes that the scatter plot was drawn on.
"""
# Call to super draw which draws the scatter plot.
super(PCA, self).draw(Xp, y)
if self.proj_features:
# Draws projection features in transformed space.
self._draw_projection_features(Xp, y)
if self.projection == 2:
if self.heatmap:
if not self.colormap:
self.colormap = palettes.DEFAULT_SEQUENCE
# TODO: change to pcolormesh instead of imshow per #615 spec
im = self.lax.imshow(
self.pca_components_,
interpolation="none",
cmap=self.colormap,
aspect="auto",
)
plt.colorbar(
im,
cax=self.uax,
orientation="horizontal",
ticks=[self.pca_components_.min(), 0, self.pca_components_.max()],
)
return self.ax
def _draw_projection_features(self, Xp, y):
"""
Draw the projection of features in the transformed space.
Parameters
----------
Xp : array-like of shape (n, 2) or (n, 3)
The matrix produced by the ``transform()`` method.
y : array-like of shape (n,), optional
The target, used to specify the colors of the points.
Returns
-------
self.ax : matplotlib Axes object
Returns the axes that the scatter plot was drawn on.
"""
x_vector = self.pca_components_[0]
y_vector = self.pca_components_[1]
max_x = max(Xp[:, 0])
max_y = max(Xp[:, 1])
if self.projection == 2:
for i in range(self.pca_components_.shape[1]):
self.ax.arrow(
x=0,
y=0,
dx=x_vector[i] * max_x,
dy=y_vector[i] * max_y,
color="r",
head_width=0.05,
width=0.005,
)
self.ax.text(
x_vector[i] * max_x * 1.05,
y_vector[i] * max_y * 1.05,
self.features_[i],
color="r",
)
elif self.projection == 3:
z_vector = self.pca_components_[2]
max_z = max(Xp[:, 1])
for i in range(self.pca_components_.shape[1]):
self.ax.plot(
[0, x_vector[i] * max_x],
[0, y_vector[i] * max_y],
[0, z_vector[i] * max_z],
color="r",
)
self.ax.text(
x_vector[i] * max_x * 1.05,
y_vector[i] * max_y * 1.05,
z_vector[i] * max_z * 1.05,
self.features_[i],
color="r",
)
else:
raise YellowbrickValueError("Projection dimensions must be either 2 or 3")
return self.ax
def finalize(self, **kwargs):
"""
Draws the title, labels, legends, heatmap, and colorbar as specified by the
keyword arguments.
"""
super(PCA, self).finalize()
self.ax.set_title("Principal Component Plot")
self.ax.set_xlabel("$PC_1$")
self.ax.set_ylabel("$PC_2$")
if self.projection == 3:
self.ax.set_zlabel("$PC_3$")
if self.heatmap == True:
self.lax.set_xticks(np.arange(-0.5, len(self.features_)))
self.lax.set_xticklabels([])
# Makes the labels centered.
self.lax.set_xticks(np.arange(0, len(self.features_)), minor=True)
self.lax.set_xticklabels(
self.features_, rotation=90, fontsize=12, minor=True
)
self.lax.set_yticks(np.arange(0.5, 2))
self.lax.set_yticklabels(["$PC_1$", "$PC_2$"], va="bottom", fontsize=10)
self.fig.tight_layout()
##########################################################################
## Quick Method
##########################################################################
def pca_decomposition(
X,
y=None,
ax=None,
features=None,
classes=None,
scale=True,
projection=2,
proj_features=False,
colors=None,
colormap=None,
alpha=0.75,
random_state=None,
colorbar=True,
heatmap=False,
show=True,
**kwargs
):
"""
Produce a two or three dimensional principal component plot of the data array ``X``
projected onto its largest sequential principal components. It is common practice
to scale the data array ``X`` before applying a PC decomposition. Variable scaling
can be controlled using the ``scale`` argument.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features.
y : ndarray or Series of length n
An array or series of target or class values.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in, the current axes
will be used (or generated if required).
features : list, default: None
The names of the features specified by the columns of the input dataset.
This length of this list must match the number of columns in X, otherwise
an exception will be raised on ``fit()``.
classes : list, default: None
The class labels for each class in y, ordered by sorted class index. These
names act as a label encoder for the legend, identifying integer classes
or renaming string labels. If omitted, the class labels will be taken from
the unique values in y.
Note that the length of this list must match the number of unique values in
y, otherwise an exception is raised. This parameter is only used in the
discrete target type case and is ignored otherwise.
scale : bool, default: True
Boolean that indicates if user wants to scale data.
projection : int or string, default: 2
The number of axes to project into, either 2d or 3d. To plot 3d plots
with matplotlib, please ensure a 3d axes is passed to the visualizer,
otherwise one will be created using the current figure.
proj_features : bool, default: False
Boolean that indicates if the user wants to project the features
in the projected space. If True the plot will be similar to a biplot.
colors : list or tuple, default: None
A single color to plot all instances as or a list of colors to color each
instance according to its class in the discrete case or as an ordered
colormap in the sequential case. If not enough colors per class are
specified then the colors are treated as a cycle.
colormap : string or cmap, default: None
The colormap used to create the individual colors. In the discrete case
it is used to compute the number of colors needed for each class and
in the continuous case it is used to create a sequential color map based
on the range of the target.
alpha : float, default: 0.75
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
random_state : int, RandomState instance or None, optional (default None)
This parameter sets the random state on this solver. If the input X is
larger than 500x500 and the number of components to extract is lower
than 80% of the smallest dimension of the data, then the more efficient
`randomized` solver is enabled.
colorbar : bool, default: True
If the target_type is "continous" draw a colorbar to the right of the
scatter plot. The colobar axes is accessible using the cax property.
heatmap : bool, default: False
Add a heatmap showing contribution of each feature in the principal components.
Also draws a colorbar for readability purpose. The heatmap is accessible
using lax property and colorbar using uax property.
show : bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however you cannot
call ``plt.savefig`` from this signature, nor ``clear_figure``. If False, simply
calls ``finalize()``
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
pca_components_ : ndarray, shape (n_features, n_components)
This tells about the magnitude of each feature in the pricipal components.
This is primarily used to draw the biplots.
classes_ : ndarray, shape (n_classes,)
The class labels that define the discrete values in the target. Only
available if the target type is discrete. This is guaranteed to be
strings even if the classes are a different type.
features_ : ndarray, shape (n_features,)
The names of the features discovered or used in the visualizer that
can be used as an index to access or modify data in X. If a user passes
feature names in, those features are used. Otherwise the columns of a
DataFrame are used or just simply the indices of the data array.
range_ : (min y, max y)
A tuple that describes the minimum and maximum values in the target.
Only available if the target type is continuous.
Examples
--------
>>> from sklearn import datasets
>>> iris = datasets.load_iris()
>>> X = iris.data
>>> y = iris.target
>>> pca_decomposition(X, y, colors=['r', 'g', 'b'], projection=3)
"""
# Instantiate the visualizer
visualizer = PCA(
ax=ax,
features=features,
scale=scale,
projection=projection,
proj_features=proj_features,
colors=colors,
colormap=colormap,
alpha=alpha,
random_state=random_state,
colorbar=colorbar,
heatmap=heatmap,
**kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y)
visualizer.transform(X, y)
if show:
visualizer.show()
else:
visualizer.finalize()
# Returns the visualizer object.
return visualizer
# Alias for PCA
PCADecomposition = PCA
| 36.547504
| 88
| 0.606847
| 2,918
| 22,696
| 4.667238
| 0.160041
| 0.009178
| 0.008077
| 0.006608
| 0.633674
| 0.611572
| 0.604964
| 0.598282
| 0.590058
| 0.590058
| 0
| 0.009904
| 0.305957
| 22,696
| 620
| 89
| 36.606452
| 0.854685
| 0.587725
| 0
| 0.350711
| 0
| 0
| 0.05731
| 0
| 0
| 0
| 0
| 0.001613
| 0
| 1
| 0.047393
| false
| 0
| 0.047393
| 0
| 0.132701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b0a89ea28d57009a70965dacb867faddce3f86e
| 28,086
|
py
|
Python
|
shiSock-0.2.0/test_two/PySock/server.py
|
AnanyaRamanA/shiSock
|
51efb0eba17eb106b9480598d278536ddd7732c3
|
[
"MIT"
] | null | null | null |
shiSock-0.2.0/test_two/PySock/server.py
|
AnanyaRamanA/shiSock
|
51efb0eba17eb106b9480598d278536ddd7732c3
|
[
"MIT"
] | null | null | null |
shiSock-0.2.0/test_two/PySock/server.py
|
AnanyaRamanA/shiSock
|
51efb0eba17eb106b9480598d278536ddd7732c3
|
[
"MIT"
] | 1
|
2021-10-31T13:47:42.000Z
|
2021-10-31T13:47:42.000Z
|
from re import S
import select
import socket
import queue
import threading
import sys
import pickle
import base64
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from cryptography.hazmat.primitives.serialization import load_ssh_public_key
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.backends import default_backend
import hashlib
import yaml
import random
import time
class IPNC():
def __init__(self):
pass
def _read_yml(self,file = None):
with open(file) as file:
documents = yaml.full_load(file)
return documents
def _write_yml(self,file = None, dict_data = None,mode = "a+"):
with open(file, mode) as file:
yaml.dump(dict_data, file)
def _add_node(self,file = None, node = None):
try:
read = self._read_yml(file)
if read != None:
read[node[0]]
self._change_node_value(file,node)
else:
raise KeyError
except KeyError:
node_dict = {
node[0] : node[1]
}
self._write_yml(file, node_dict)
def _change_node_value(self,file = None, node = None):
r_yml = self._read_yml(file)
r_yml[node[0]] = node[1]
self._write_yml(file = file, dict_data = r_yml, mode = "w")
def _get_node(self,file = None, key = None, wait = True):
if key == None:
return self._read_yml(file)
if wait:
while True:
r_yml = self._read_yml(file)
try:
value = r_yml[key]
return value
except KeyError:
pass
except TypeError:
pass
else:
r_yml = self._read_yml(file)
try:
value = r_yml[key]
return value
except KeyError:
return None
except TypeError:
pass
def _remove_node(self,file,node):
try:
r_yml = self._read_yml(file = file)
r_yml[node]
r_yml.pop(node)
self._write_yml(file = file, dict_data = r_yml, mode = "w")
except KeyError:
return False
except:
pass
def _name_generator(self,_len_ = 16, onlyText = False):
lower_case = list("abcdefghijklmnopqrstuvwxyz")
upper_case = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
special = list("!@#$%&*?")
number = list("0123456789")
if onlyText:
_all_ = lower_case + upper_case
else:
_all_ = lower_case + upper_case + special + number
random.shuffle(_all_)
return "".join(random.sample(_all_,_len_))
class DSP():
def __init__(
self,
msg : str = None,
DSP_type : str = None,
device_id : int = None,
universalAesKey : bytes = None,
nonce : bytes = None,
aad : str = None,
):
if msg is not None:
self.msg = msg
else:
self.msg = msg
self.DSP_type = DSP_type
self.device_id = device_id
if universalAesKey is not None:
self.UNIVERSAL_AES_KEY = universalAesKey
else:
self.UNIVERSAL_AES_KEY = b't\x89\xcc\x87\xcca\xe8\xfb\x06\xed\xcf+\x0eVB\xd2\xd3\xbeMk\xfa\xd1J\xa7\xc8@\xf8\x05\x0f\xfc\x18\x00'
if nonce is not None:
self.NONCE = nonce
else:
self.NONCE = b'\xfe\x1e1\xc0\xfc`s\xbc6\x9fQ\xb2'
if aad is not None:
self.AAD = aad
else:
self.AAD = b"au$tica&tedbut@u32nencr#cdscypteddatafdrj"
def _messanger(self,MSG = None):
if MSG is not None:
self.msg = MSG
data = f'DSP("{self.msg}","{self.DSP_type}")'
data = pickle.dumps(data)
pickled_data = data
encrypted_data = [self.device_id, self.__encrypt(pickled_data)]
p_e_d = pickle.dumps(encrypted_data)
ret = base64.b64encode(p_e_d)
return ret
def __repr__(self):
return "_main.DSP._"
def __encrypt(self,data):
aesgcm = AESGCM(self.UNIVERSAL_AES_KEY,)
ct = aesgcm.encrypt(
self.NONCE,
data,
self.AAD
)
return ct
def _convert_to_class(self,OBJECT : bytes = None,secure : bool = True, secure_dict : list = None):
try:
OBJECT = base64.b64decode(OBJECT)
OBJECT = pickle.loads(OBJECT)
if secure == True:
if secure_dict is None:
raise TypeError(
"convert_to_class() missing 1 required positional argument: 'secure_lst'")
else:
secure_dict = pickle.loads(base64.b64decode(secure_dict))
aesgcm = AESGCM(secure_dict["aes_key"])
ct = aesgcm.decrypt(
secure_dict["nonce"], OBJECT[-1], secure_dict["aad"])
ct = pickle.loads(ct)
return eval(ct)
else:
aesgcm = AESGCM(self.UNIVERSAL_AES_KEY)
ct = aesgcm.decrypt(self.NONCE, OBJECT[-1], self.AAD)
ct = pickle.loads(ct)
return eval(ct)
except TypeError:
sys.exit()
except ValueError:
print("sender has not done the handshake")
class MAIN(IPNC):
def __init__(self,secure : bool = True,file = None):
"""async_server initializer class that will create the a asyncronouse tcp server.
"""
IPNC.__init__(self)
self.__secure = secure
self.__file_location = file
self.READABLE = []
self.WRITABLE = []
self.INPUTS = []
self.OUTPUTS = []
self.MESSAGE_QUEUES = {}
self.REQUEST_LIST = []
self.REQUEST_RESPONSE_LIST = []
self.MESSAGE_LIST = []
self.__VARIFIED_DEVICES = []
self.__CLIENT_KEYS = {}
self.__CUSTOM_CHANNEL = []
self.__CUSTOM_CHANNEL_MSG_REC = []
self.__CUSTOM_CHANNEL_MSG_SEND = []
self.__VARIFIER_LIST = []
self.__CALLBACK_LOOP = []
self.__RECEIVING_MSG = []
get = self._get_node(file = self.__file_location,key = hashlib.sha256(bytes("key", "utf-8")).digest(), wait = False)
if get is not None:
self.__CLIENT_KEYS = get
self.__VARIFIED_DEVICES.extend(list(get.keys()))
def SERVER(self,address : str = None, port : int = None, listeners : int = None):
self.address = address
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1 )
self.sock.setblocking(0)
self.sock.bind((self.address,self.port))
self.sock.listen(listeners)
print("[SERVER IS ACTIVATED | LISTENING]")
self.INPUTS.append(self.sock)
thread1 = threading.Thread(
target = self.receive_func,
args = (
self.__RECEIVING_MSG,
self.__VARIFIED_DEVICES,
self.__VARIFIER_LIST,
self.__CLIENT_KEYS,
self.OUTPUTS,
self.REQUEST_LIST,
self.REQUEST_RESPONSE_LIST,
self.MESSAGE_LIST,
self.__CUSTOM_CHANNEL_MSG_REC,
)
)
thread2 = threading.Thread(
target = self.send_func,
args = (
self.WRITABLE,
self.MESSAGE_QUEUES,
self.MESSAGE_LIST,
self.REQUEST_LIST,
self.REQUEST_RESPONSE_LIST,
self.__VARIFIER_LIST,
self.__CUSTOM_CHANNEL_MSG_SEND
)
)
thread3 = threading.Thread(
target = self.__callback_loop,
args = (
self.__CALLBACK_LOOP,
)
)
# thread1.daemon = True
thread1.start()
# thread2.daemon = True
thread2.start()
# thread3.daemon = True
thread3.start()
thread = threading.Thread(target = self.__server)
# thread.daemon = True
thread.start()
def __server(self):
data_recv_len = []
while True:
readable, writable, exceptions = select.select(self.INPUTS, self.OUTPUTS, self.INPUTS)
# handling the inputs
for r in readable:
if r is self.sock:
connection,addr = r.accept()
connection.setblocking(0)
self.INPUTS.append(connection)
self.MESSAGE_QUEUES[connection] = queue.Queue()
else:
ini = list(zip(*data_recv_len))
if len(ini) == 0 or r not in ini[0]:
try:
data_len = pickle.loads(base64.b64decode(r.recv(32).decode().strip("0").encode("utf-8")))
except ConnectionResetError:
print("Client Disconnected")
if r in self.OUTPUTS:
self.OUTPUTS.remove(r)
if r in self.WRITABLE:
self.WRITABLE.remove(r)
self.INPUTS.remove(r)
r.close()
del self.MESSAGE_QUEUES[r]
continue
except Exception as e:
pass
if data_len:
if type(data_len) == type([]):
data_recv_len.append(
[
r,
data_len[0]
]
)
else:
print("User Disconnected")
if r in self.OUTPUTS:
self.OUTPUTS.remove(r)
self.INPUTS.remove(r)
if r in self.WRITABLE:
self.WRITABLE.remove(r)
r.close()
del self.MESSAGE_QUEUES[r]
continue
else:
qwe = list(zip(*data_recv_len))
INDEX = qwe[0].index(r)
try:
recv_len = data_recv_len.pop(INDEX)[1]
data = r.recv(recv_len)
try:
data = data.decode().strip("0").encode("utf-8")
except:
print("Error in decoding")
self.__RECEIVING_MSG.append(data)
self.MESSAGE_QUEUES[r].put(pickle.loads(base64.b64decode(data))[0])
if r not in self.OUTPUTS:
self.OUTPUTS.append(r)
except Exception as e:
print("User Disconnected")
readable.remove(r)
self.INPUTS.remove(r)
writable.remove(r)
self.OUTPUTS.remove(r)
if r in self.WRITABLE:
self.WRITABLE.remove(r)
del self.MESSAGE_QUEUES[r]
continue
# handling the outputs
for w in writable:
if w not in self.WRITABLE:
self.WRITABLE.append(w)
# handling the errors
for e in exceptions:
self.INPUTS.remove(e)
if e in self.OUTPUTS:
self.OUTPUTS.remove(e)
e.close()
del self.MESSAGE_QUEUES[e]
def receive_func(self, __receiving_msg,__varified_devices, __varifier_lst, __client_keys, __outputs, __request_lst, __request_res_lst, __message_lst, __custom_c_m_r):
# __receiving_msg = self.__RECEIVING_MSG,
# __varified_devices = self.__VARIFIED_DEVICES,
# __varifier_lst = self.__VARIFIER_LIST,
# __client_keys = self.__CLIENT_KEYS,
# __outputs = self.OUTPUTS,
# __request_lst = self.REQUEST_LIST
# __request_res_lst = self.REQUEST_RESPONSE_LIST
# __message_lst = self.MESSAGE_LIS
# __custom_c_m_r = self.__CUSTOM_CHANNEL_MSG_REC
while True:
try:
for INDEX,_data_ in enumerate(__receiving_msg):
data = pickle.loads(base64.b64decode(_data_))
# print(f"data[0] : {data[0]}")
# print(f"__varified_devices : {__varified_devices}")
if data[0] not in __varified_devices:
_recv_ = DSP()._convert_to_class(_data_, secure = False)
if _recv_.DSP_type == "username_secure":
resolved_data = eval(_recv_.msg)
aes_key = AESGCM.generate_key(256)
nonce = os.urandom(32)
aad = bytes(self._name_generator(),"utf-8")
qw = {
"aes_key" : aes_key,
"nonce" : nonce,
"aad" : aad,
}
pickle_qw = pickle.dumps(qw)
b64_aes_key_pack = base64.b64encode(pickle_qw)
key = load_ssh_public_key(
bytes(
resolved_data["data"],
"utf-8"
),
backend=default_backend()
)
ciphertext = key.encrypt(
b64_aes_key_pack,
padding.OAEP(
mgf = padding.MGF1(algorithm = hashes.SHA256()),
algorithm = hashes.SHA256(),
label = None
)
)
ciphertext = base64.b64encode(ciphertext)
prepare_data = {"key" : ciphertext}
dsp_data = DSP(
DSP_type="username_secure_response"
)._messanger(
MSG = prepare_data
)
dsp_data = [resolved_data["username"],dsp_data]
__varifier_lst.append(dsp_data)
__varified_devices.append(resolved_data["username"])
__client_keys[resolved_data["username"]] = b64_aes_key_pack
get = self._get_node(
file = self.__file_location,
key = hashlib.sha256(bytes("key","utf-8")).digest(),
wait = False
)
if get is not None:
get[resolved_data["username"]] = b64_aes_key_pack
self._add_node(
file = self.__file_location,
node = [
hashlib.sha256(bytes("key","utf-8")).digest(),
get
]
)
else:
self._add_node(
file = self.__file_location,
node = [
hashlib.sha256(bytes("key","utf-8")).digest(),
{
resolved_data["username"] : b64_aes_key_pack
}
]
)
__receiving_msg.pop(INDEX)
else:
aes_key_pack = __client_keys[data[0]]
_recv_ = DSP()._convert_to_class(
OBJECT = _data_,
secure = True,
secure_dict = aes_key_pack
)
if _recv_.DSP_type == "DSP_REQ":
try:
resolved_data = eval(_recv_.msg)
resolved_data = pickle.loads(base64.b64decode(eval(_recv_.msg)))
__request_lst.append(
[
resolved_data["target_name"],
_recv_.msg
]
)
__receiving_msg.remove(_data_)
except:
pass
elif _recv_.DSP_type == "DSP_REQ_RES":
try:
resolved_data = pickle.loads(base64.b64decode(eval(_recv_.msg)))
__request_res_lst.append(
[
resolved_data["target_name"],
_recv_.msg
]
)
__receiving_msg.remove(_data_)
except:
pass
elif _recv_.DSP_type == "DSP_MSG":
try:
resolved_data = pickle.loads(base64.b64decode(eval(_recv_.msg)))
__message_lst.append(
[
resolved_data['target_name'],
_recv_.msg
]
)
__receiving_msg.remove(_data_)
except:
pass
elif _recv_.DSP_type in self.__CUSTOM_CHANNEL:
try:
resolved_data = pickle.loads(base64.b64decode(eval(_recv_.msg)))
__custom_c_m_r.append(resolved_data)
__receiving_msg.remove(_data_)
except:
pass
except:
pass
def send_func(self,Writable,message_q,message_list,requestList,requestResList,varifierList,customChannelMessageSend):
while True:
# print(f"Writable : {Writable}")
# time.sleep(2)
for s in Writable:
if s._closed == True and s.fileno() == -1:
Writable.remove(s)
# try:
try:
username = message_q[s].get_nowait()
message_q[s].put(username)
msg_lst = list(list(zip(*message_list)))
req_lst = list(list(zip(*requestList)))
req_res_lst = list(list(zip(*requestResList)))
vari_lst = list(list(zip(*varifierList)))
send_c_msg = list(zip(*customChannelMessageSend))
except KeyError:
pass
if len(msg_lst) > 0:
if username in msg_lst[0]:
INDEX = msg_lst[0].index(username)
aes_key_pack = self.__CLIENT_KEYS[username]
aes_key_pack = pickle.loads(base64.b64decode(aes_key_pack))
dsp_data = DSP(
DSP_type = "DSP_MSG",
universalAesKey = aes_key_pack["aes_key"],
nonce = aes_key_pack["nonce"],
aad = aes_key_pack["aad"]
)._messanger(
MSG = f"{msg_lst[1][INDEX]}"
).decode().center(len(msg_lst[1][INDEX]) + 100, "|").encode("utf-8")
try:
s.send(bytes(f"{len(dsp_data)}".center(16,"|"),"utf-8"))
s.send(
dsp_data
)
message_list.pop(INDEX)
except OSError:
pass
if len(req_lst) > 0:
if username in req_lst[0]:
INDEX = req_lst[0].index(username)
try:
aes_key_pack = self.__CLIENT_KEYS[username]
except KeyError:
continue
aes_key_pack = pickle.loads(base64.b64decode(aes_key_pack))
dsp_data = DSP(
DSP_type = "DSP_handshake_request",
universalAesKey = aes_key_pack["aes_key"],
nonce = aes_key_pack["nonce"],
aad = aes_key_pack["aad"]
)._messanger(
MSG = f"{req_lst[1][INDEX]}"
).decode().center(len(req_lst[1][INDEX]) + 100, "|").encode("utf-8")
s.send(bytes(f"{len(dsp_data)+100}".center(16,"|"),"utf-8"))
s.send(
dsp_data
)
requestList.pop(INDEX)
if len(req_res_lst) > 0:
if username in req_res_lst[0]:
INDEX = req_res_lst[0].index(username)
aes_key_pack = self.__CLIENT_KEYS[username]
aes_key_pack = pickle.loads(base64.b64decode(aes_key_pack))
dsp_data = DSP(
DSP_type = "DSP_handshake_request_res",
universalAesKey = aes_key_pack["aes_key"],
nonce = aes_key_pack["nonce"],
aad = aes_key_pack["aad"]
)._messanger(
MSG = f"{req_res_lst[1][INDEX]}"
).decode().center(len(req_res_lst[1][INDEX]) + 100, "|").encode("utf-8")
s.send(bytes(f"{len(dsp_data)+100}".center(16,"|"),"utf-8"))
s.send(
dsp_data
)
requestResList.pop(INDEX)
if len(vari_lst) > 0:
if username in vari_lst[0]:
INDEX = vari_lst[0].index(username)
s.send(bytes(f"{len(vari_lst[1][INDEX])}".center(16,"|"),"utf-8"))
s.send(
vari_lst[1][INDEX]
)
varifierList.pop(INDEX)
if len(send_c_msg) > 0:
if username in send_c_msg[0]:
INDEX = send_c_msg[0].index(username)
s.send(bytes(f"{len(send_c_msg[1][INDEX])}".center(16,"|"),"utf-8"))
s.send(send_c_msg[1][INDEX])
customChannelMessageSend.pop(INDEX)
# except:
# pass
def CREATE_CHANNEL(self,channel_name = None, multiple : bool = False):
if multiple:
if type(channel_name) == type([]):
for channel in channel_name:
if channel not in self.__CUSTOM_CHANNEL:
self.__CUSTOM_CHANNEL.append(channel)
else:
print(f"Channel : {channel} already exists.")
else:
raise TypeError("When 'mutliple' is to True then channel_name should be a list of multiple channel names")
else:
if channel_name not in self.__CUSTOM_CHANNEL:
self.__CUSTOM_CHANNEL.append(channel_name)
def LISTEN(self,channel : str = None,function : object = None,args = None):
if channel is not None:
found = False
index = None
if channel in self.__CUSTOM_CHANNEL:
for i,d in enumerate(self.__CUSTOM_CHANNEL_MSG_REC):
if d["channel"] == channel:
found = True
index = i
break
if found:
if args is None:
p_data = self.__CUSTOM_CHANNEL_MSG_REC.pop(index)
self.__CALLBACK_LOOP.append([function,[p_data]])
else:
p_data = self.__CUSTOM_CHANNEL_MSG_REC.pop(index)
args = list(args)
args.insert(0,p_data)
self.__CALLBACK_LOOP.append([function,args])
else:
raise TypeError("'channel' should not be None")
def __callback_loop(self,__callback_loop):
while True:
for index,func in enumerate(__callback_loop):
__callback_loop.pop(index)
func[0](*func[1])
def SEND(self,channel_name,target_name,data):
if channel_name in self.__CUSTOM_CHANNEL:
key_pack = self.__CLIENT_KEYS[target_name]
key_pack = pickle.loads(base64.b64decode(key_pack))
dsp_data = DSP(
DSP_type = channel_name,
universalAesKey=key_pack["aes_key"],
nonce = key_pack["nonce"],
aad= key_pack["aad"]
)._messanger(
MSG = base64.b64encode(pickle.dumps(data))
)
self.__CUSTOM_CHANNEL_MSG_SEND.append(
[
target_name,
dsp_data
]
)
class server():
def __init__(self, file = None, debug : bool = False, MTCL : bool = True, MPCL : bool = False, safeMode : bool = True):
"""
This class allows user to create multi-client server.
args:
secure : bool = True -> this should set to the default value True,
file : str = None -> here user need to pass a yaml file which saves all the keys and configurations.
if not specified, will raise an TypeError
"""
if not file:
raise TypeError("asyncServer() missing 1 required positional argument: 'file'")
__parent = MAIN(file,debug,MTCL,MPCL,safeMode)
self.SERVER = __parent.SERVER
self.CREATE_CHANNEL = __parent.CREATE_CHANNEL
self.LISTEN = __parent.LISTEN
self.SEND = __parent.SEND
| 38.73931
| 170
| 0.44054
| 2,565
| 28,086
| 4.524756
| 0.132164
| 0.019128
| 0.021541
| 0.026883
| 0.37877
| 0.292952
| 0.266931
| 0.231777
| 0.202309
| 0.19128
| 0
| 0.01709
| 0.47707
| 28,086
| 724
| 171
| 38.792818
| 0.773133
| 0.036424
| 0
| 0.317487
| 0
| 0.001698
| 0.04722
| 0.015085
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039049
| false
| 0.022071
| 0.032258
| 0.001698
| 0.098472
| 0.011885
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b0b0bbc4a2a5899aadcf7804e822911158b0d28
| 9,304
|
py
|
Python
|
server/www/packages/packages-windows/x86/ldap3/utils/asn1.py
|
zhoulhb/teleport
|
54da194697898ef77537cfe7032d774555dc1335
|
[
"Apache-2.0"
] | 640
|
2018-09-12T03:14:13.000Z
|
2022-03-30T04:38:09.000Z
|
server/www/packages/packages-windows/x86/ldap3/utils/asn1.py
|
zhoulhb/teleport
|
54da194697898ef77537cfe7032d774555dc1335
|
[
"Apache-2.0"
] | 175
|
2018-09-10T19:52:20.000Z
|
2022-03-30T04:37:30.000Z
|
server/www/packages/packages-windows/x86/ldap3/utils/asn1.py
|
zhoulhb/teleport
|
54da194697898ef77537cfe7032d774555dc1335
|
[
"Apache-2.0"
] | 230
|
2018-09-13T02:40:49.000Z
|
2022-03-29T11:53:58.000Z
|
"""
"""
# Created on 2015.08.19
#
# Author: Giovanni Cannata
#
# Copyright 2015 - 2018 Giovanni Cannata
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from pyasn1 import __version__ as pyasn1_version
from pyasn1.codec.ber import decoder # for usage in other modules
from pyasn1.codec.ber.encoder import Encoder # for monkeypatching of boolean value
from ..core.results import RESULT_CODES
from ..utils.conv import to_unicode
from ..protocol.convert import referrals_to_list
CLASSES = {(False, False): 0, # Universal
(False, True): 1, # Application
(True, False): 2, # Context
(True, True): 3} # Private
# Monkeypatching of pyasn1 for encoding Boolean with the value 0xFF for TRUE
# THIS IS NOT PART OF THE FAST BER DECODER
if pyasn1_version == 'xxx0.2.3':
from pyasn1.codec.ber.encoder import tagMap, BooleanEncoder, encode
from pyasn1.type.univ import Boolean
from pyasn1.compat.octets import ints2octs
class BooleanCEREncoder(BooleanEncoder):
_true = ints2octs((255,))
tagMap[Boolean.tagSet] = BooleanCEREncoder()
else:
from pyasn1.codec.ber.encoder import tagMap, typeMap, AbstractItemEncoder
from pyasn1.type.univ import Boolean
from copy import deepcopy
class LDAPBooleanEncoder(AbstractItemEncoder):
supportIndefLenMode = False
if pyasn1_version <= '0.2.3':
from pyasn1.compat.octets import ints2octs
_true = ints2octs((255,))
_false = ints2octs((0,))
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return value and self._true or self._false, 0
elif pyasn1_version <= '0.3.1':
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return value and (255,) or (0,), False, False
elif pyasn1_version <= '0.3.4':
def encodeValue(self, encodeFun, value, defMode, maxChunkSize, ifNotEmpty=False):
return value and (255,) or (0,), False, False
elif pyasn1_version <= '0.3.7':
def encodeValue(self, value, encodeFun, **options):
return value and (255,) or (0,), False, False
else:
def encodeValue(self, value, asn1Spec, encodeFun, **options):
return value and (255,) or (0,), False, False
customTagMap = deepcopy(tagMap)
customTypeMap = deepcopy(typeMap)
customTagMap[Boolean.tagSet] = LDAPBooleanEncoder()
customTypeMap[Boolean.typeId] = LDAPBooleanEncoder()
encode = Encoder(customTagMap, customTypeMap)
# end of monkey patching
# a fast BER decoder for LDAP responses only
def compute_ber_size(data):
"""
Compute size according to BER definite length rules
Returns size of value and value offset
"""
if data[1] <= 127: # BER definite length - short form. Highest bit of byte 1 is 0, message length is in the last 7 bits - Value can be up to 127 bytes long
return data[1], 2
else: # BER definite length - long form. Highest bit of byte 1 is 1, last 7 bits counts the number of following octets containing the value length
bytes_length = data[1] - 128
value_length = 0
cont = bytes_length
for byte in data[2: 2 + bytes_length]:
cont -= 1
value_length += byte * (256 ** cont)
return value_length, bytes_length + 2
def decode_message_fast(message):
ber_len, ber_value_offset = compute_ber_size(get_bytes(message[:10])) # get start of sequence, at maximum 3 bytes for length
decoded = decode_sequence(message, ber_value_offset, ber_len + ber_value_offset, LDAP_MESSAGE_CONTEXT)
return {
'messageID': decoded[0][3],
'protocolOp': decoded[1][2],
'payload': decoded[1][3],
'controls': decoded[2][3] if len(decoded) == 3 else None
}
def decode_sequence(message, start, stop, context_decoders=None):
decoded = []
while start < stop:
octet = get_byte(message[start])
ber_class = CLASSES[(bool(octet & 0b10000000), bool(octet & 0b01000000))]
ber_constructed = bool(octet & 0b00100000)
ber_type = octet & 0b00011111
ber_decoder = DECODERS[(ber_class, octet & 0b00011111)] if ber_class < 2 else None
ber_len, ber_value_offset = compute_ber_size(get_bytes(message[start: start + 10]))
start += ber_value_offset
if ber_decoder:
value = ber_decoder(message, start, start + ber_len, context_decoders) # call value decode function
else:
# try:
value = context_decoders[ber_type](message, start, start + ber_len) # call value decode function for context class
# except KeyError:
# if ber_type == 3: # Referral in result
# value = decode_sequence(message, start, start + ber_len)
# else:
# raise # re-raise, should never happen
decoded.append((ber_class, ber_constructed, ber_type, value))
start += ber_len
return decoded
def decode_integer(message, start, stop, context_decoders=None):
first = message[start]
value = -1 if get_byte(first) & 0x80 else 0
for octet in message[start: stop]:
value = value << 8 | get_byte(octet)
return value
def decode_octet_string(message, start, stop, context_decoders=None):
return message[start: stop]
def decode_boolean(message, start, stop, context_decoders=None):
return False if message[start: stop] == 0 else True
def decode_bind_response(message, start, stop, context_decoders=None):
return decode_sequence(message, start, stop, BIND_RESPONSE_CONTEXT)
def decode_extended_response(message, start, stop, context_decoders=None):
return decode_sequence(message, start, stop, EXTENDED_RESPONSE_CONTEXT)
def decode_intermediate_response(message, start, stop, context_decoders=None):
return decode_sequence(message, start, stop, INTERMEDIATE_RESPONSE_CONTEXT)
def decode_controls(message, start, stop, context_decoders=None):
return decode_sequence(message, start, stop, CONTROLS_CONTEXT)
def ldap_result_to_dict_fast(response):
response_dict = dict()
response_dict['result'] = int(response[0][3]) # resultCode
response_dict['description'] = RESULT_CODES[response_dict['result']]
response_dict['dn'] = to_unicode(response[1][3], from_server=True) # matchedDN
response_dict['message'] = to_unicode(response[2][3], from_server=True) # diagnosticMessage
if len(response) == 4:
response_dict['referrals'] = referrals_to_list([to_unicode(referral[3], from_server=True) for referral in response[3][3]]) # referrals
else:
response_dict['referrals'] = None
return response_dict
######
if str is not bytes: # Python 3
def get_byte(x):
return x
def get_bytes(x):
return x
else: # Python 2
def get_byte(x):
return ord(x)
def get_bytes(x):
return bytearray(x)
DECODERS = {
# Universal
(0, 1): decode_boolean, # Boolean
(0, 2): decode_integer, # Integer
(0, 4): decode_octet_string, # Octet String
(0, 10): decode_integer, # Enumerated
(0, 16): decode_sequence, # Sequence
(0, 17): decode_sequence, # Set
# Application
(1, 1): decode_bind_response, # Bind response
(1, 4): decode_sequence, # Search result entry
(1, 5): decode_sequence, # Search result done
(1, 7): decode_sequence, # Modify response
(1, 9): decode_sequence, # Add response
(1, 11): decode_sequence, # Delete response
(1, 13): decode_sequence, # ModifyDN response
(1, 15): decode_sequence, # Compare response
(1, 19): decode_sequence, # Search result reference
(1, 24): decode_extended_response, # Extended response
(1, 25): decode_intermediate_response, # intermediate response
(2, 3): decode_octet_string #
}
BIND_RESPONSE_CONTEXT = {
7: decode_octet_string # SaslCredentials
}
EXTENDED_RESPONSE_CONTEXT = {
10: decode_octet_string, # ResponseName
11: decode_octet_string # Response Value
}
INTERMEDIATE_RESPONSE_CONTEXT = {
0: decode_octet_string, # IntermediateResponseName
1: decode_octet_string # IntermediateResponseValue
}
LDAP_MESSAGE_CONTEXT = {
0: decode_controls, # Controls
3: decode_sequence # Referral
}
CONTROLS_CONTEXT = {
0: decode_sequence # Control
}
| 37.821138
| 161
| 0.661651
| 1,166
| 9,304
| 5.127787
| 0.228988
| 0.042148
| 0.04014
| 0.030774
| 0.250878
| 0.224787
| 0.184479
| 0.119083
| 0.119083
| 0.119083
| 0
| 0.035526
| 0.246668
| 9,304
| 245
| 162
| 37.97551
| 0.81752
| 0.234523
| 0
| 0.15
| 0
| 0
| 0.016551
| 0
| 0
| 0
| 0.000591
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.08125
| 0.09375
| 0.3625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b0c63a3de849494bdcf25b7c5c83e9a868cfc9f
| 2,351
|
py
|
Python
|
lib/utils/arg_scope.py
|
SimeonZhang/detectron2_tensorflow
|
ca03f633111d540ea91b3de75dbfa1da813647be
|
[
"Apache-2.0"
] | 3
|
2021-06-07T10:48:51.000Z
|
2022-03-01T11:43:40.000Z
|
lib/utils/arg_scope.py
|
SimeonZhang/detectron2_tensorflow
|
ca03f633111d540ea91b3de75dbfa1da813647be
|
[
"Apache-2.0"
] | null | null | null |
lib/utils/arg_scope.py
|
SimeonZhang/detectron2_tensorflow
|
ca03f633111d540ea91b3de75dbfa1da813647be
|
[
"Apache-2.0"
] | null | null | null |
import copy
from contextlib import contextmanager
from functools import wraps
from collections import defaultdict
import tensorflow as tf
_ArgScopeStack = []
@contextmanager
def arg_scope(layers, **kwargs):
"""
Args:
layers (list or layer): layer or list of layers to apply the arguments.
Returns:
a context where all appearance of these layer will by default have the
arguments specified by kwargs.
Example:
.. code-block:: python
with arg_scope(Conv2D, kernel_shape=3, nl=tf.nn.relu, out_channel=32):
x = Conv2D('conv0', x)
x = Conv2D('conv1', x)
x = Conv2D('conv2', x, out_channel=64) # override argscope
"""
if not isinstance(layers, list):
layers = [layers]
for l in layers:
assert hasattr(l, '__arg_scope_enabled__'), "Argscope not supported for {}".format(l)
# need to deepcopy so that changes to new_scope does not affect outer scope
new_scope = copy.deepcopy(get_arg_scope())
for l in layers:
new_scope[l.__name__].update(kwargs)
_ArgScopeStack.append(new_scope)
yield
del _ArgScopeStack[-1]
def get_arg_scope():
"""
Returns:
dict: the current argscope.
An argscope is a dict of dict: ``dict[layername] = {arg: val}``
"""
if len(_ArgScopeStack) > 0:
return _ArgScopeStack[-1]
else:
return defaultdict(dict)
def add_arg_scope(cls):
"""Decorator for function to support argscope
Example:
.. code-block:: python
from mylib import MyClass
myfunc = add_arg_scope(MyClass)
Args:
func: A function mapping one or multiple tensors to one or multiple
tensors.
Remarks:
If the function ``func`` returns multiple input or output tensors,
only the first input/output tensor shape is displayed during logging.
Returns:
The decorated function.
"""
original_init = cls.__init__
@wraps(original_init)
def wrapped_init(self, *args, **kwargs):
actual_args = copy.copy(get_arg_scope()[cls.__name__])
actual_args.update(kwargs)
instance = original_init(self, *args, **actual_args)
return instance
cls.__arg_scope_enabled__ = True
cls.__init__ = wrapped_init
return cls
| 26.41573
| 93
| 0.64228
| 295
| 2,351
| 4.915254
| 0.433898
| 0.049655
| 0.022759
| 0.030345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008757
| 0.271374
| 2,351
| 88
| 94
| 26.715909
| 0.837712
| 0.461506
| 0
| 0.058824
| 0
| 0
| 0.044366
| 0.018634
| 0
| 0
| 0
| 0
| 0.029412
| 1
| 0.117647
| false
| 0
| 0.147059
| 0
| 0.382353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b0cebe762170956488a4d3cddc7f97ae057f2da
| 754
|
py
|
Python
|
CORN-TEST/textfsm_parse.py
|
AnkitDeshwal89/NETMIKO
|
81c164e9cff46d11b56612f6adc343b6bcdfe87a
|
[
"Apache-2.0"
] | null | null | null |
CORN-TEST/textfsm_parse.py
|
AnkitDeshwal89/NETMIKO
|
81c164e9cff46d11b56612f6adc343b6bcdfe87a
|
[
"Apache-2.0"
] | null | null | null |
CORN-TEST/textfsm_parse.py
|
AnkitDeshwal89/NETMIKO
|
81c164e9cff46d11b56612f6adc343b6bcdfe87a
|
[
"Apache-2.0"
] | null | null | null |
import textfsm
import subprocess
import random
res = subprocess.run('ifconfig',stdout=subprocess.PIPE)
intstatus = res.stdout.decode('ascii')
with open("datafile","w+") as a:
a.write(intstatus)
a.close()
template_file= "ifconfig-template.template"
template = open(template_file)
with open("datafile") as f:
raw_data = f.read()
re_table = textfsm.TextFSM(template)
data = re_table.ParseText(raw_data)
print(data)
NL = []
for x in data:
NLD = {
'Interface' : x[0].split(':')[0],
'TX' : int(x[1])+int(random.randint(1,100))
}
NL.append(NLD)
print(NL)
import json
print('#'*12)
print(json.dumps(NL))
#Enter template FileName :ifconfig-template.template
#Input Data file : ifconfig_output.txt
| 18.390244
| 55
| 0.667109
| 105
| 754
| 4.72381
| 0.495238
| 0.096774
| 0.064516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01454
| 0.179045
| 754
| 40
| 56
| 18.85
| 0.786753
| 0.116711
| 0
| 0
| 0
| 0
| 0.105422
| 0.039157
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b0d7e26713e21d118eb39e3b4c51db758d9a74a
| 18,151
|
py
|
Python
|
installSynApps/data_model/install_config.py
|
NSLS-II/installSynApps
|
0f8e978939715bbba1a064ead3044fa36215cb09
|
[
"BSD-3-Clause"
] | null | null | null |
installSynApps/data_model/install_config.py
|
NSLS-II/installSynApps
|
0f8e978939715bbba1a064ead3044fa36215cb09
|
[
"BSD-3-Clause"
] | 2
|
2021-01-06T19:57:19.000Z
|
2021-03-11T20:48:42.000Z
|
installSynApps/data_model/install_config.py
|
NSLS-II/installSynApps
|
0f8e978939715bbba1a064ead3044fa36215cb09
|
[
"BSD-3-Clause"
] | 1
|
2020-12-14T20:35:20.000Z
|
2020-12-14T20:35:20.000Z
|
"""A file containing representations of install configurations.
The core Data representation for installSynApps. An InstallConfiguration object
is parsed from a configuration, and is then used throughout the build process.
InjectorFile objects are used for representing text that need to be injected
into configuration files prior to builds.
"""
import os
import installSynApps
from installSynApps.data_model.install_module import InstallModule as IM
class InstallConfiguration:
"""
Class that represents an Install Configuration for installSynApps
It stores the top level install_location, the path to the configuration files,
any OS specific configurations, and the actual list of modules that will be
installed.
Attributes
----------
install_location : str
path to top level install location
path_to_configure : str
path to configure folder of installSynApps
modules : List of InsallModule
list of InstallModule objects representing the modules that will be installed
base_path : str
abs path to install location of EPICS base
support_path : str
abs path to install location of EPICS support modules
ad_path : str
abs path to install location of EPICS area detector
motor_path : str
abs path to install location of EPICS motor
module_map : dict of str -> int
Dictionary storing relation of module names to build index
injector_files : list of InjectorFile
list of injector files loaded by install configuration
build_flags : list of list of str
list of macro-value pairs enforced at build time
"""
def __init__(self, install_location, path_to_configure):
"""Constructor for the InstallConfiguration object
"""
# Paths to configure and output locations
self.path_to_configure = path_to_configure
self.install_location = os.path.abspath(install_location)
# Modules loaded into install config
self.modules = []
# Dict that maps module name to index in module list for easier searching.
self.module_map = {}
self.injector_files = []
self.build_flags = []
# Paths to the three install location paths used for relative path correction
self.base_path = None
self.support_path = None
self.ad_path = None
self.motor_path = None
self.extensions_path = None
def is_install_valid(self):
"""Function that checks if given install location is valid
Parameters
----------
self : InstallConfiguration
Self object
Returns
-------
bool
True if install location is valid, false otherwise
str
Error message if applicable, None otherwise
"""
valid = True
message = None
target = self.install_location
if not os.path.exists(target):
target = os.path.dirname(self.install_location)
if not os.path.exists(target):
valid = False
message = 'Install location and parent directory do not exist'
elif not os.access(target, os.W_OK | os.X_OK):
valid = False
message = 'Permission Error: {}'.format(target)
return valid, message
def add_module(self, module):
"""Function that adds a module to the InstallConfiguration module list
First checks if parameter is a valid InstallModule, then sets the config, and abs path,
then if it is one of the three key modules to track, sets the appropriate variables. Also,
add the module to the map of modules which will keep track of which position each module is
in in the list/build order
Parameters
----------
module : InstallModule
new installation module being added.
"""
if isinstance(module, IM):
# Updates the abs path
module.abs_path = self.convert_path_abs(module.rel_path)
# Key paths to track
if module.name == "EPICS_BASE":
self.base_path = module.abs_path
elif module.name == "SUPPORT":
self.support_path = module.abs_path
elif module.name == "AREA_DETECTOR":
self.ad_path = module.abs_path
elif module.name == "MOTOR":
self.motor_path = module.abs_path
elif module.name == "EXTENSIONS":
self.extensions_path = module.abs_path
self.module_map[module.name] = len(self.modules)
self.modules.append(module)
def add_injector_file(self, name, contents, target):
"""Function that adds a new injector file to the install_config object
Parameters
----------
name : str
name of the file
contents : str
The contents of the file
target : str
The target location file into which contents will be injected.
"""
new_injector = InjectorFile(self.path_to_configure, name, contents, target)
self.injector_files.append(new_injector)
def add_macros(self, macro_list):
"""Function that adds macro-value pairs to a list of macros
Parameters
----------
macro_list : list of [str, str]
list of new macros to append
"""
self.build_flags = self.build_flags + macro_list
def get_module_list(self):
"""Function that gets the list of modules in the configuration
Returns
-------
List
self.modules - list of modules to install in this install configuration
"""
return self.modules
def get_module_by_name(self, name):
"""Function that returns install module object given module name
Uses module name as a key in a dictionary to return reference to given module object.
Parameters
----------
name : str
Module name
Returns
-------
obj - InstallModule
Return matching module, or None if not found.
"""
if name in self.module_map.keys():
return self.modules[self.module_map[name]]
else:
return None
def get_module_build_index(self, name):
"""Function that returns the index in the build order for the module
Used for ensuring dependencies are built before lower level packages.
Parameters
----------
name : str
Module name
Returns
-------
int
Index of module in build order if found, otherwise -1
"""
if name in self.module_map.keys():
return self.module_map[name]
else:
return -1
def get_core_version(self):
"""Funciton that returns selected version of ADCore
"""
return self.get_module_by_name('ADCORE').version
def swap_module_positions(self, module_A, module_B):
"""Swaps build order of modules
Used to ensure dependencies are built before lower level packages
Parameters
----------
module_A : str
Name of first module
module_B : str
Name of second module
"""
index_A = self.get_module_build_index(module_A)
index_B = self.get_module_build_index(module_B)
if index_A >= 0 and index_B >= 0:
temp_A = self.get_module_by_name(module_B)
temp_B = self.get_module_by_name(module_A)
self.modules[index_A] = temp_A
self.modules[index_B] = temp_B
self.module_map[module_A] = index_B
self.module_map[module_B] = index_A
def convert_path_abs(self, rel_path):
"""Function that converts a given modules relative path to an absolute path
If the macro name can be found in the list of accounted for modules, replace it with that module's absolute path
Parameters
----------
rel_path : str
The relative installation path for the given module
Returns
-------
str
The absolute installation path for the module. (Macros are replaced)
"""
temp = rel_path.split('/', 1)[-1]
if "$(INSTALL)" in rel_path and self.install_location != None:
return installSynApps.join_path(self.install_location, temp)
elif "$(EPICS_BASE)" in rel_path and self.base_path != None:
return installSynApps.join_path(self.base_path, temp)
elif "$(SUPPORT)" in rel_path and self.support_path != None:
return installSynApps.join_path(self.support_path, temp)
elif "$(AREA_DETECTOR)" in rel_path and self.ad_path != None:
return installSynApps.join_path(self.ad_path, temp)
elif "$(MOTOR)" in rel_path and self.motor_path != None:
return installSynApps.join_path(self.motor_path, temp)
elif "$(EXTENSIONS)" in rel_path and self.extensions_path != None:
return installSynApps.join_path(self.extensions_path, temp)
elif "$(" in rel_path:
macro_part = rel_path.split(')')[0]
rel_to = macro_part.split('(')[1]
rel_to_module = self.get_module_by_name(rel_to)
if rel_to_module is not None:
return installSynApps.join_path(rel_to_module.abs_path, temp)
return rel_path
def print_installation_info(self, fp = None):
"""Function that prints installation info
Prints list of all modules including clone/build/package information
Parameters
----------
fp = None : file pointer
Optional pointer to an external log file
"""
if fp == None:
print(self.get_printable_string().strip())
else:
fp.write(self.get_printable_string())
def get_printable_string(self):
"""Function that gets a toString for an InstallConfigurations
Returns
-------
str
A string representing the install configuration
"""
out = "--------------------------------\n"
out = out + "Install Location = {}\n".format(self.install_location)
out = out + "This Install Config is saved at {}\n".format(self.path_to_configure)
for module in self.modules:
if module.clone == 'YES':
out = out + module.get_printable_string()
return out
def get_module_names_list(self):
"""Function that gets list of modules being built
Returns
-------
list of str
list of module names that are set to build
"""
out = []
for module in self.modules:
if module.build == 'YES':
out.append(module.name)
return out
class InjectorFile:
"""Class that represents an injector file and stores its name, contents, and target
Injector file classes are used to represent data that needs to be appended to target files
at build time. Used to add to commonPlugins, commonPlugin_settings, etc.
TODO: This class can probably be abstracted into a simpler data structure (since its used as a struct anyway)
Attributes
----------
path_to_configure : str
path to the configure dir that houses this injector file
name : str
name of the file
contents : str
The contents of the file
target : str
The target location file into which contents will be injected.
"""
def __init__(self, path_to_configure, name, contents, target):
"""Constructor of InjectorFile class
"""
self.path_to_configure = path_to_configure
self.name = name
self.contents = contents
self.target = target
def generate_default_install_config(target_install_loc='/epics', update_versions=False, with_pva=True):
config = InstallConfiguration(target_install_loc, None)
y = 'YES'
n = 'NO'
gu = 'GIT_URL'
wu = 'WGET_URL'
base_org = 'https://github.com/epics-base/'
syn_org = 'https://github.com/EPICS-synApps/'
mod_org = 'https://github.com/epics-modules/'
ad_org = 'https://github.com/areaDetector/'
seq_rel = 'http://www-csr.bessy.de/control/SoftDist/sequencer/releases/'
psi_org = 'https://github.com/paulscherrerinstitute/'
# Add core modules that will generally always be built
config.add_module(IM("EPICS_BASE", "R7.0.3", "$(INSTALL)/base", gu, base_org, "epics-base", y, y, y))
config.add_module(IM("SUPPORT", "R6-1", "$(INSTALL)/support", gu, syn_org, "support", y, y, n))
config.add_module(IM("CONFIGURE", "R6-1", "$(SUPPORT)/configure", gu, syn_org, "configure", y, y, n))
config.add_module(IM("UTILS", "R6-1", "$(SUPPORT)/utils", gu, syn_org, "utils", y, y, n))
config.add_module(IM("SNCSEQ", "2.2.8", "$(SUPPORT)/seq", wu, seq_rel, "seq-2.2.8.tar.gz", y, y, y))
config.add_module(IM("IPAC", "2.15", "$(SUPPORT)/ipac", gu, mod_org, "ipac", y, y, y))
config.add_module(IM("ASYN", "R4-37", "$(SUPPORT)/asyn", gu, mod_org, "asyn", y, y, y))
config.add_module(IM("AUTOSAVE", "R5-10", "$(SUPPORT)/autosave", gu, mod_org, "autosave", y, y, y))
config.add_module(IM("BUSY", "R1-7-2", "$(SUPPORT)/busy", gu, mod_org, "busy", y, y, y))
config.add_module(IM("CALC", "R3-7-3", "$(SUPPORT)/calc", gu, mod_org, "calc", y, y, y))
config.add_module(IM("DEVIOCSTATS", "master", "$(SUPPORT)/iocStats", gu, mod_org, "iocStats", y, y, y))
config.add_module(IM("SSCAN", "R2-11-3", "$(SUPPORT)/sscan", gu, mod_org, "sscan", y, y, y))
config.add_module(IM("IPUNIDIG", "R2-11", "$(SUPPORT)/ipUnidig", gu, mod_org, "ipUnidig", y, y, y))
# Some modules that are commonly needed
config.add_module(IM("XSPRESS3", "master", "$(SUPPORT)/xspress3", gu, mod_org, "xspress3", y, y, y))
config.add_module(IM("MOTOR", "R7-1", "$(SUPPORT)/motor", gu, mod_org, "motor", y, y, y))
config.add_module(IM("QUADEM", "R9-3", "$(SUPPORT)/quadEM", gu, mod_org, "quadEM", y, y, y))
config.add_module(IM("STREAM", "2.8.10", "$(SUPPORT)/stream", gu, psi_org, "StreamDevice", y, y, y))
# AreaDetector and commonly used drivers
config.add_module(IM("AREA_DETECTOR", "R3-8", "$(SUPPORT)/areaDetector", gu, ad_org, "areaDetector", y, y, n))
config.add_module(IM("ADSUPPORT", "R1-9", "$(AREA_DETECTOR)/ADSupport", gu, ad_org, "ADSupport", y, y, y))
config.add_module(IM("ADCORE", "R3-8", "$(AREA_DETECTOR)/ADCore", gu, ad_org, "ADCore", y, y, y))
config.add_module(IM("ADPERKINELMER", "master", "$(AREA_DETECTOR)/ADPerkinElmer", gu, ad_org, "ADPerkinElmer", n, n, n))
config.add_module(IM("ADGENICAM", "master", "$(AREA_DETECTOR)/ADGenICam", gu, ad_org, "ADGenICam", n, n, n))
config.add_module(IM("ADANDOR3", "master", "$(AREA_DETECTOR)/ADAndor3", gu, ad_org, "ADAndor3", n, n, n))
config.add_module(IM("ADPROSILICA", "R2-5", "$(AREA_DETECTOR)/ADProsilica", gu, ad_org, "ADProsilica", n, n, n))
config.add_module(IM("ADSIMDETECTOR", "master", "$(AREA_DETECTOR)/ADSimDetector", gu, ad_org, "ADSimDetector", n, n, n))
config.add_module(IM("ADPILATUS", "R2-8", "$(AREA_DETECTOR)/ADPilatus", gu, ad_org, "ADPilatus", n, n, n))
config.add_module(IM("ADMERLIN", "master", "$(AREA_DETECTOR)/ADMerlin", gu, ad_org, "ADMerlin", n, n, n))
config.add_module(IM("ADARAVIS", "master", "$(AREA_DETECTOR)/ADAravis", gu, ad_org, "ADAravis", n, n, n))
config.add_module(IM("ADEIGER", "R2-6", "$(AREA_DETECTOR)/ADEiger", gu, ad_org, "ADEiger", n, n, n))
config.add_module(IM("ADVIMBA", "master", "$(AREA_DETECTOR)/ADVimba", gu, ad_org, "ADVimba", n, n, n))
config.add_module(IM("ADPOINTGREY", "master", "$(AREA_DETECTOR)/ADPointGrey", gu, ad_org, "ADPointGrey", n, n, n))
config.add_module(IM("ADANDOR", "R2-8", "$(AREA_DETECTOR)/ADAndor", gu, ad_org, "ADAndor", n, n, n))
config.add_module(IM("ADDEXELA", "R2-3", "$(AREA_DETECTOR)/ADDexela", gu, ad_org, "ADDexela", n, n, n))
config.add_module(IM("ADMYTHEN", "master", "$(AREA_DETECTOR)/ADMythen", gu, ad_org, "ADMythen", n, n, n))
config.add_module(IM("ADURL", "master", "$(AREA_DETECTOR)/ADURL", gu, ad_org, "ADURL", n, n, n))
common_plugins_str = 'dbLoadRecords("$(DEVIOCSTATS)/db/iocAdminSoft.db", "IOC=$(PREFIX)")\n'
autosave_str = 'file "sseqRecord_settings.req", P=$(P), S=AcquireSequence\n'
if with_pva:
autosave_str += 'file "NDPva_settings.req", P=$(P), R=Pva1:\n'
common_plugins_str += 'NDPvaConfigure("PVA1", $(QSIZE), 0, "$(PORT)", 0, $(PREFIX)Pva1:Image, 0, 0, 0)\n' \
'dbLoadRecords("NDPva.template", "P=$(PREFIX),R=Pva1:, PORT=PVA1,ADDR=0,TIMEOUT=1,NDARRAY_PORT=$(PORT)")\n' \
'# Must start PVA server if this is enabled\n' \
'startPVAServer\n' \
config.add_injector_file('PLUGIN_CONFIG', common_plugins_str, '$(AREA_DETECTOR)/ADCore/iocBoot/EXAMPLE_commonPlugins.cmd')
config.add_injector_file('AUTOSAVE_CONFIG', autosave_str, '$(AREA_DETECTOR)/ADCore/iocBoot/EXAMPLE_commonPlugin_settings.req')
if update_versions:
installSynApps.sync_all_module_tags(config)
return config
| 39.804825
| 130
| 0.598204
| 2,274
| 18,151
| 4.62577
| 0.166667
| 0.031657
| 0.04991
| 0.056564
| 0.260291
| 0.207434
| 0.17226
| 0.067877
| 0.050385
| 0.021295
| 0
| 0.006742
| 0.289075
| 18,151
| 455
| 131
| 39.892308
| 0.808431
| 0.29822
| 0
| 0.083799
| 0
| 0.011173
| 0.222765
| 0.071741
| 0
| 0
| 0
| 0.002198
| 0
| 1
| 0.089385
| false
| 0
| 0.01676
| 0
| 0.217877
| 0.027933
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b0ed79dd0939a74afbcf7db38081382144c0b6e
| 3,587
|
py
|
Python
|
apps/accounts/views.py
|
tarvitz/icu
|
9a7cdac9d26ea224539f68f678b90bf70084374d
|
[
"BSD-3-Clause"
] | 1
|
2022-03-12T23:44:21.000Z
|
2022-03-12T23:44:21.000Z
|
apps/accounts/views.py
|
tarvitz/icu
|
9a7cdac9d26ea224539f68f678b90bf70084374d
|
[
"BSD-3-Clause"
] | null | null | null |
apps/accounts/views.py
|
tarvitz/icu
|
9a7cdac9d26ea224539f68f678b90bf70084374d
|
[
"BSD-3-Clause"
] | null | null | null |
# Create your views here.
# -*- coding: utf-8 -*-
from apps.core.helpers import render_to, ajax_response, get_object_or_None
from apps.core.decorators import lock, login_required_json
from apps.accounts.models import Invite
from apps.accounts.decorators import check_invite
from apps.accounts.forms import (
LoginForm, AccountRegisterForm, SendInviteForm, InviteRegisterForm
)
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
@render_to('accounts/login.html')
def login(request):
form = LoginForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
user = form.cleaned_data['user']
auth.login(request, user)
return {'redirect': 'core:index'}
return {
'form': form
}
@render_to('index.html')
def logout(request):
auth.logout(request)
return {}
@render_to('accounts/profile.html')
def profile(request):
return {}
@login_required_json
@ajax_response
def generate_new_api_key(request):
if request.method == 'POST':
request.user.api_key.key = request.user.api_key.generate_key()
request.user.api_key.save()
key = request.user.api_key.key
return {'success': True, 'key': key}
return {'success': False}
@lock("REGISTER_ALLOWED")
@render_to('accounts/register.html')
def register(request):
form = AccountRegisterForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
user = form.save(commit=False)
user.set_password(form.cleaned_data['password'])
user.save()
return {'redirect': 'core:index'}
return {
'form': form
}
@login_required
@render_to('accounts/invite.html')
def invite(request):
form = SendInviteForm(request.POST or None, request=request)
if request.method == 'POST':
if form.is_valid():
form.save(commit=False)
invite = form.instance
email = form.cleaned_data['email']
msg = settings.INVITE_MESSAGE % {
'user': request.user.username,
'link': "http://b3ban.blacklibrary.ru%s" % reverse('accounts:invite-register', args=(invite.sid, ))
}
#no mail send, no money :)
send_mail(
subject=unicode(_('You have been invited to b3ban service')),
message=unicode(msg),
from_email=settings.EMAIL_FROM,
recipient_list=[email]
)
invite.save()
return {'redirect': 'accounts:invite-success'}
return {
'form': form
}
#@check for possibility to register
@transaction.commit_on_success
@check_invite(sid='sid')
@render_to('accounts/invite_register.html')
def invite_register(request, sid):
invite = get_object_or_None(Invite, sid=sid)
if not invite:
return {'redirect': 'core:ufo'}
form = InviteRegisterForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
invite.is_verified = True
invite.save()
user = form.save(commit=False)
user.email = invite.email
user.set_password(form.cleaned_data['password'])
user.save()
return {'redirect': 'accounts:invite-register-success'}
return {'form': form, 'sid': sid}
| 30.922414
| 115
| 0.642598
| 428
| 3,587
| 5.254673
| 0.257009
| 0.0249
| 0.035571
| 0.042241
| 0.256114
| 0.185416
| 0.166741
| 0.140062
| 0.125834
| 0.125834
| 0
| 0.0011
| 0.239755
| 3,587
| 115
| 116
| 31.191304
| 0.823616
| 0.028994
| 0
| 0.270833
| 0
| 0
| 0.12766
| 0.043416
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072917
| false
| 0.020833
| 0.125
| 0.010417
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b1048e91d3158720f5949f6fb7c7ea76df6e7a1
| 14,435
|
py
|
Python
|
testproject/testproject/settings.py
|
jackvz/mezzanine-cartridge-api
|
c956afa672fcf1035ab60cd5eb6589a06ccaafa0
|
[
"MIT"
] | 1
|
2019-04-18T23:28:03.000Z
|
2019-04-18T23:28:03.000Z
|
testproject/testproject/settings.py
|
jackvz/mezzanine-cartridge-api
|
c956afa672fcf1035ab60cd5eb6589a06ccaafa0
|
[
"MIT"
] | 1
|
2020-06-05T20:27:04.000Z
|
2020-06-05T20:27:04.000Z
|
testproject/testproject/settings.py
|
jackvz/mezzanine-cartridge-api
|
c956afa672fcf1035ab60cd5eb6589a06ccaafa0
|
[
"MIT"
] | 1
|
2020-12-13T15:55:53.000Z
|
2020-12-13T15:55:53.000Z
|
from __future__ import absolute_import, unicode_literals
import os
from django import VERSION as DJANGO_VERSION
from django.utils.translation import ugettext_lazy as _
SECRET_KEY = '%29hnw7d-dy4n)!@1yi#ov#^@x0b=o*2o8^31oe!+(xw!!oc9a'
######################
# CARTRIDGE SETTINGS #
######################
# The following settings are already defined in cartridge.shop.defaults
# with default values, but are common enough to be put here, commented
# out, for conveniently overriding. Please consult the settings
# documentation for a full list of settings Cartridge implements:
# http://cartridge.jupo.org/configuration.html#default-settings
# Sequence of available credit card types for payment.
# SHOP_CARD_TYPES = ("Mastercard", "Visa", "Diners", "Amex")
# Setting to turn on featured images for shop categories. Defaults to False.
# SHOP_CATEGORY_USE_FEATURED_IMAGE = True
# If True, the checkout process is split into separate
# billing/shipping and payment steps.
# SHOP_CHECKOUT_STEPS_SPLIT = True
# If True, the checkout process has a final confirmation step before
# completion.
# SHOP_CHECKOUT_STEPS_CONFIRMATION = True
# Controls the formatting of monetary values accord to the locale
# module in the python standard library. If an empty string is
# used, will fall back to the system's locale.
SHOP_CURRENCY_LOCALE = "en_GB.UTF-8"
# Dotted package path and name of the function that
# is called on submit of the billing/shipping checkout step. This
# is where shipping calculation can be performed and set using the
# function ``cartridge.shop.utils.set_shipping``.
# SHOP_HANDLER_BILLING_SHIPPING = \
# "cartridge.shop.checkout.default_billship_handler"
# Dotted package path and name of the function that
# is called once an order is successful and all of the order
# object's data has been created. This is where any custom order
# processing should be implemented.
# SHOP_HANDLER_ORDER = "cartridge.shop.checkout.default_order_handler"
# Dotted package path and name of the function that
# is called on submit of the payment checkout step. This is where
# integration with a payment gateway should be implemented.
# SHOP_HANDLER_PAYMENT = "cartridge.shop.checkout.default_payment_handler"
# Sequence of value/name pairs for order statuses.
# SHOP_ORDER_STATUS_CHOICES = (
# (1, "Unprocessed"),
# (2, "Processed"),
# )
# Sequence of value/name pairs for types of product options,
# eg Size, Colour. NOTE: Increasing the number of these will
# require database migrations!
# SHOP_OPTION_TYPE_CHOICES = (
# (1, "Size"),
# (2, "Colour"),
# )
# Sequence of indexes from the SHOP_OPTION_TYPE_CHOICES setting that
# control how the options should be ordered in the admin,
# eg for "Colour" then "Size" given the above:
# SHOP_OPTION_ADMIN_ORDER = (2, 1)
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for conveniently
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", (_("Media Library"), "media-library"),)),
# (_("Shop"), ("shop.Product", "shop.ProductOption", "shop.DiscountCode",
# "shop.Sale", "shop.Order")),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, _("Top navigation bar"), "pages/menus/dropdown.html"),
# (2, _("Left-hand tree"), "pages/menus/tree.html"),
# (3, _("Footer"), "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# (_("Image"),),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# (_("Another name"),),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the django-modeltranslation will be added to the
# INSTALLED_APPS setting.
USE_MODELTRANSLATION = False
########################
# MAIN DJANGO SETTINGS #
########################
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = True
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.dev',
}
}
#########
# PATHS #
#########
# Full filesystem path to the project.
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_APP = os.path.basename(PROJECT_APP_PATH)
PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH)
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_APP
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(PROJECT_ROOT, "templates")
],
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.static",
"django.template.context_processors.media",
"django.template.context_processors.request",
"django.template.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
],
"builtins": [
"mezzanine.template.loader_tags",
],
"loaders": [
"mezzanine.template.loaders.host_themes.Loader",
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
},
},
]
if DJANGO_VERSION < (1, 9):
del TEMPLATES[0]["OPTIONS"]["builtins"]
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.pages",
"cartridge.shop",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.galleries",
"mezzanine.twitter",
# "mezzanine.accounts",
'corsheaders',
'rest_framework',
'rest_framework_api_key',
'drf_yasg',
# 'oauth2_provider',
# 'rest_framework.authtoken',
'mezzanine_cartridge_api',
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
# Uncomment if using internationalisation or localisation
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"cartridge.shop.middleware.ShopMiddleware",
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
'corsheaders.middleware.CorsMiddleware',
)
if DJANGO_VERSION < (1, 10):
MIDDLEWARE_CLASSES = MIDDLEWARE
del MIDDLEWARE
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
# Instead of doing "from .local_settings import *", we use exec so that
# local_settings has full access to everything defined in this module.
# Also force into sys.modules so it's visible to Django's autoreload.
f = os.path.join(PROJECT_APP_PATH, "local_settings.py")
if os.path.exists(f):
import sys
import imp
module_name = "%s.local_settings" % PROJECT_APP
module = imp.new_module(module_name)
module.__file__ = f
sys.modules[module_name] = module
exec(open(f, "rb").read())
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
| 34.783133
| 79
| 0.699619
| 1,858
| 14,435
| 5.329925
| 0.297094
| 0.018378
| 0.012723
| 0.018782
| 0.120065
| 0.096738
| 0.070383
| 0.044128
| 0.044128
| 0.044128
| 0
| 0.003458
| 0.178594
| 14,435
| 414
| 80
| 34.86715
| 0.831745
| 0.614202
| 0
| 0.029197
| 0
| 0
| 0.436816
| 0.337512
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.007299
| 0.058394
| 0
| 0.058394
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b1163fcd99a8abc3b5c62d0ed18bd3324cc7b0a
| 959
|
py
|
Python
|
wordgen/data_gen.py
|
ishaanbakhle/wordgen.us
|
45c5247ce04b13badd2e1b3164cedc9176a805c7
|
[
"MIT"
] | null | null | null |
wordgen/data_gen.py
|
ishaanbakhle/wordgen.us
|
45c5247ce04b13badd2e1b3164cedc9176a805c7
|
[
"MIT"
] | null | null | null |
wordgen/data_gen.py
|
ishaanbakhle/wordgen.us
|
45c5247ce04b13badd2e1b3164cedc9176a805c7
|
[
"MIT"
] | null | null | null |
from wordgen import consts
import numpy as np
from sklearn import preprocessing
def fill_matrix(dataset):
assert type(dataset) == str
assert len(dataset) > 0, print("Dataset must be > 0")
matrix = []
for i in consts.rang:
matrix.append([])
for o in consts.rang:
matrix[i].append(0)
dataset = dataset.lower()
accepted = list("abcdefghijklmnopqrstuvqwxyz") + ['\n']
for i in range(len(dataset)-1):
# if (dataset[i+1] in accepted and dataset[i] in accepted):
if dataset[i] in accepted:
val2 = i+1
while (val2 < len(dataset) and not (dataset[val2] in accepted)):
val2 += 1
ind1 = consts.get_ord(dataset[i])
ind2 = consts.get_ord(dataset[val2])
matrix[ind2][ind1] += 1
matrix = preprocessing.normalize(matrix, norm='l1')
return matrix
if __name__ == '__main__':
print(fill_matrix("james as"))
| 25.236842
| 76
| 0.594369
| 123
| 959
| 4.536585
| 0.406504
| 0.021505
| 0.021505
| 0.064516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026201
| 0.283629
| 959
| 37
| 77
| 25.918919
| 0.786026
| 0.059437
| 0
| 0
| 0
| 0
| 0.073333
| 0.03
| 0
| 0
| 0
| 0
| 0.08
| 1
| 0.04
| false
| 0
| 0.12
| 0
| 0.2
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b1167f333bc4ee9231e98ecd5d13fbcbf6bc62d
| 30,725
|
py
|
Python
|
arcade/gl/context.py
|
Cleptomania/arcade
|
abb7f0a0229b7f3a7843856d4b0812a3a2b80468
|
[
"MIT"
] | null | null | null |
arcade/gl/context.py
|
Cleptomania/arcade
|
abb7f0a0229b7f3a7843856d4b0812a3a2b80468
|
[
"MIT"
] | null | null | null |
arcade/gl/context.py
|
Cleptomania/arcade
|
abb7f0a0229b7f3a7843856d4b0812a3a2b80468
|
[
"MIT"
] | null | null | null |
from ctypes import c_int, c_char_p, cast, c_float
from collections import deque
import logging
import weakref
from typing import Any, Dict, List, Tuple, Union, Sequence, Set
import pyglet
from pyglet.window import Window
from pyglet import gl
from .buffer import Buffer
from .program import Program
from .vertex_array import Geometry, VertexArray
from .framebuffer import Framebuffer, DefaultFrameBuffer
from typing import Optional
from .texture import Texture
from .query import Query
from .glsl import ShaderSource
from .types import BufferDescription
LOG = logging.getLogger(__name__)
class Context:
"""
Represents an OpenGL context. This context belongs to a ``pyglet.Window``
normally accessed through ``window.ctx``.
The Context class contains methods for creating resources,
global states and commonly used enums. All enums also exist
in the ``gl`` module. (``ctx.BLEND`` or ``arcade.gl.BLEND``).
"""
#: The active context
active: Optional["Context"] = None
# --- Store the most commonly used OpenGL constants
# Texture
#: Texture interpolation: Nearest pixel
NEAREST = 0x2600
#: Texture interpolation: Linear interpolate
LINEAR = 0x2601
#: Texture interpolation: Minification filter for mipmaps
NEAREST_MIPMAP_NEAREST = 0x2700
#: Texture interpolation: Minification filter for mipmaps
LINEAR_MIPMAP_NEAREST = 0x2701
#: Texture interpolation: Minification filter for mipmaps
NEAREST_MIPMAP_LINEAR = 0x2702
#: Texture interpolation: Minification filter for mipmaps
LINEAR_MIPMAP_LINEAR = 0x2703
#: Texture wrap mode: Repeat
REPEAT = gl.GL_REPEAT
# Texture wrap mode: Clamp to border pixel
CLAMP_TO_EDGE = gl.GL_CLAMP_TO_EDGE
# Texture wrap mode: Clamp to border color
CLAMP_TO_BORDER = gl.GL_CLAMP_TO_BORDER
# Texture wrap mode: Repeat mirrored
MIRRORED_REPEAT = gl.GL_MIRRORED_REPEAT
# Flags
#: Context flag: Blending
BLEND = gl.GL_BLEND
#: Context flag: Depth testing
DEPTH_TEST = gl.GL_DEPTH_TEST
#: Context flag: Face culling
CULL_FACE = gl.GL_CULL_FACE
#: Context flag: Enable ``gl_PointSize`` in shaders.
PROGRAM_POINT_SIZE = gl.GL_PROGRAM_POINT_SIZE
# Blend functions
#: Blend function
ZERO = 0x0000
#: Blend function
ONE = 0x0001
#: Blend function
SRC_COLOR = 0x0300
#: Blend function
ONE_MINUS_SRC_COLOR = 0x0301
#: Blend function
SRC_ALPHA = 0x0302
#: Blend function
ONE_MINUS_SRC_ALPHA = 0x0303
#: Blend function
DST_ALPHA = 0x0304
#: Blend function
ONE_MINUS_DST_ALPHA = 0x0305
#: Blend function
DST_COLOR = 0x0306
#: Blend function
ONE_MINUS_DST_COLOR = 0x0307
# Blend equations
#: source + destination
FUNC_ADD = 0x8006
#: Blend equations: source - destination
FUNC_SUBTRACT = 0x800A
#: Blend equations: destination - source
FUNC_REVERSE_SUBTRACT = 0x800B
#: Blend equations: Minimum of source and destination
MIN = 0x8007
#: Blend equations: Maximum of source and destination
MAX = 0x8008
# Blend mode shortcuts
#: Blend mode shortcut for default blend mode: ``SRC_ALPHA, ONE_MINUS_SRC_ALPHA``
BLEND_DEFAULT = 0x0302, 0x0303
#: Blend mode shortcut for additive blending: ``ONE, ONE``
BLEND_ADDITIVE = 0x0001, 0x0001
#: Blend mode shortcut for premultipled alpha: ``SRC_ALPHA, ONE``
BLEND_PREMULTIPLIED_ALPHA = 0x0302, 0x0001
# VertexArray: Primitives
#: Primitive mode
POINTS = gl.GL_POINTS # 0
#: Primitive mode
LINES = gl.GL_LINES # 1
#: Primitive mode
LINE_STRIP = gl.GL_LINE_STRIP # 3
#: Primitive mode
TRIANGLES = gl.GL_TRIANGLES # 4
#: Primitive mode
TRIANGLE_STRIP = gl.GL_TRIANGLE_STRIP # 5
#: Primitive mode
TRIANGLE_FAN = gl.GL_TRIANGLE_FAN # 6
#: Primitive mode
LINES_ADJACENCY = gl.GL_LINES_ADJACENCY # 10
#: Primitive mode
LINE_STRIP_ADJACENCY = gl.GL_LINE_STRIP_ADJACENCY # 11
#: Primitive mode
TRIANGLES_ADJACENCY = gl.GL_TRIANGLES_ADJACENCY # 12
#: Primitive mode
TRIANGLE_STRIP_ADJACENCY = gl.GL_TRIANGLE_STRIP_ADJACENCY # 13
#: Patch mode (tessellation)
PATCHES = gl.GL_PATCHES
# The most common error enums
_errors = {
gl.GL_INVALID_ENUM: "GL_INVALID_ENUM",
gl.GL_INVALID_VALUE: "GL_INVALID_VALUE",
gl.GL_INVALID_OPERATION: "GL_INVALID_OPERATION",
gl.GL_INVALID_FRAMEBUFFER_OPERATION: "GL_INVALID_FRAMEBUFFER_OPERATION",
gl.GL_OUT_OF_MEMORY: "GL_OUT_OF_MEMORY",
gl.GL_STACK_UNDERFLOW: "GL_STACK_UNDERFLOW",
gl.GL_STACK_OVERFLOW: "GL_STACK_OVERFLOW",
}
def __init__(self, window: pyglet.window.Window, gc_mode: str = "auto"):
self._window_ref = weakref.ref(window)
self.limits = Limits(self)
self._gl_version = (self.limits.MAJOR_VERSION, self.limits.MINOR_VERSION)
Context.activate(self)
# Texture unit we use when doing operations on textures to avoid
# affecting currently bound textures in the first units
self.default_texture_unit = self.limits.MAX_TEXTURE_IMAGE_UNITS - 1
# Detect the default framebuffer
self._screen = DefaultFrameBuffer(self)
# Tracking active program
self.active_program: Optional[Program] = None
# Tracking active framebuffer. On context creation the window is the default render target
self.active_framebuffer: Framebuffer = self._screen
self.stats: ContextStats = ContextStats(warn_threshold=1000)
# Hardcoded states
# This should always be enabled
gl.glEnable(gl.GL_TEXTURE_CUBE_MAP_SEAMLESS)
# Set primitive restart index to -1 by default
gl.glEnable(gl.GL_PRIMITIVE_RESTART)
self._primitive_restart_index = -1
self.primitive_restart_index = self._primitive_restart_index
# We enable scissor testing by default.
# This is always set to the same value as the viewport
# to avoid background color affecting areas outside the viewport
gl.glEnable(gl.GL_SCISSOR_TEST)
# States
self._blend_func = self.BLEND_DEFAULT
self._point_size = 1.0
self._flags: Set[int] = set()
# Normal garbage collection as default (what we expect in python)
self._gc_mode = "auto"
self.gc_mode = gc_mode
#: Collected objects to gc when gc_mode is "context_gc"
self.objects = deque()
@property
def window(self) -> Window:
"""
The window this context belongs to.
:type: ``pyglet.Window``
"""
return self._window_ref()
@property
def screen(self) -> Framebuffer:
"""
The framebuffer for the window.
:type: :py:class:`~arcade.Framebuffer`
"""
return self._screen
@property
def fbo(self) -> Framebuffer:
"""
Get the currently active framebuffer.
This property is read-only
:type: :py:class:`arcade.gl.Framebuffer`
"""
return self.active_framebuffer
@property
def gl_version(self) -> Tuple[int, int]:
"""
The OpenGL version as a 2 component tuple
:type: tuple (major, minor) version
"""
return self._gl_version
def gc(self):
"""
Run garbage collection of OpenGL objects for this context.
This is only needed when ``gc_mode`` is ``context_gc``.
"""
# Loop the array until all objects are gone.
# Deleting one object might add new ones so we need
while len(self.objects):
obj = self.objects.pop()
obj.delete()
@property
def gc_mode(self) -> str:
"""
Set the garbage collection mode for OpenGL resources.
Supported modes are:
# default: Auto
ctx.gc_mode = "auto"
"""
return self._gc_mode
@gc_mode.setter
def gc_mode(self, value: str):
modes = ["auto", "context_gc"]
if value not in modes:
raise ValueError("Unsupported gc_mode. Supported modes are:", modes)
self._gc_mode = value
@property
def error(self) -> Union[str, None]:
"""Check OpenGL error
Returns a string representation of the occurring error
or ``None`` of no errors has occurred.
Example::
err = ctx.error
if err:
raise RuntimeError("OpenGL error: {err}")
:type: str
"""
err = gl.glGetError()
if err == gl.GL_NO_ERROR:
return None
return self._errors.get(err, "GL_UNKNOWN_ERROR")
@classmethod
def activate(cls, ctx: "Context"):
"""Mark a context as the currently active one"""
cls.active = ctx
def enable(self, *args):
"""
Enables one or more context flags::
# Single flag
ctx.enable(ctx.BLEND)
# Multiple flags
ctx.enable(ctx.DEPTH_TEST, ctx.CULL_FACE)
"""
self._flags.update(args)
for flag in args:
gl.glEnable(flag)
def enable_only(self, *args):
"""
Enable only some flags. This will disable all other flags.
This is a simple way to ensure that context flag states
are not lingering from other sections of your code base::
# Ensure all flags are disabled (enable no flags)
ctx.enable_only()
# Make sure only blending is enabled
ctx.enable_only(ctx.BLEND)
# Make sure only depth test and culling is enabled
ctx.enable_only(ctx.DEPTH_TEST, ctx.CULL_FACE)
"""
self._flags = set(args)
if self.BLEND in self._flags:
gl.glEnable(self.BLEND)
else:
gl.glDisable(self.BLEND)
if self.DEPTH_TEST in self._flags:
gl.glEnable(self.DEPTH_TEST)
else:
gl.glDisable(self.DEPTH_TEST)
if self.CULL_FACE in self._flags:
gl.glEnable(self.CULL_FACE)
else:
gl.glDisable(self.CULL_FACE)
if self.PROGRAM_POINT_SIZE in self._flags:
gl.glEnable(self.PROGRAM_POINT_SIZE)
else:
gl.glDisable(self.PROGRAM_POINT_SIZE)
def disable(self, *args):
"""
Disable one or more context flags::
# Single flag
ctx.disable(ctx.BLEND)
# Multiple flags
ctx.disable(ctx.DEPTH_TEST, ctx.CULL_FACE)
"""
self._flags -= set(args)
for flag in args:
gl.glDisable(flag)
def is_enabled(self, flag) -> bool:
"""
Check if a context flag is enabled
:type: bool
"""
return flag in self._flags
@property
def viewport(self) -> Tuple[int, int, int, int]:
"""
Get or set the viewport for the currently active framebuffer.
The viewport simply describes what pixels of the screen
OpenGL should render to. Normally it would be the size of
the window's framebuffer::
# 4:3 screen
ctx.viewport = 0, 0, 800, 600
# 1080p
ctx.viewport = 0, 0, 1920, 1080
# Using the current framebuffer size
ctx.viewport = 0, 0, *ctx.screen.size
:type: tuple (x, y, width, height)
"""
return self.active_framebuffer.viewport
@viewport.setter
def viewport(self, value: Tuple[int, int, int, int]):
self.active_framebuffer.viewport = value
@property
def blend_func(self) -> Tuple[int, int]:
"""
Get or the blend function::
ctx.blend_func = ctx.ONE, ctx.ONE
:type: tuple (src, dst)
"""
return self._blend_func
@blend_func.setter
def blend_func(self, value: Tuple[int, int]):
self._blend_func = value
gl.glBlendFunc(value[0], value[1])
# def blend_equation(self)
# def front_face(self)
# def cull_face(self)
@property
def patch_vertices(self) -> int:
"""
Get or set number of vertices that will be used to make up a single patch primitive.
Patch primitives are consumed by the tessellation control shader (if present) and subsequently used for tessellation.
:type: int
"""
value = c_int()
gl.glGetIntegerv(gl.GL_PATCH_VERTICES, value)
return value.value
@patch_vertices.setter
def patch_vertices(self, value: int):
if not isinstance(value, int):
raise TypeError("patch_vertices must be an integer")
gl.glPatchParameteri(gl.GL_PATCH_VERTICES, value)
@property
def point_size(self) -> float:
"""float: Get or set the point size."""
return self._point_size
@point_size.setter
def point_size(self, value: float):
gl.glPointSize(self._point_size)
self._point_size = value
@property
def primitive_restart_index(self) -> int:
"""Get or set the primitive restart index. Default is -1"""
return self._primitive_restart_index
@primitive_restart_index.setter
def primitive_restart_index(self, value: int):
self._primitive_restart_index = value
gl.glPrimitiveRestartIndex(value)
def finish(self) -> None:
"""Wait until all OpenGL rendering commands are completed"""
gl.glFinish()
# --- Resource methods ---
def buffer(
self, *, data: Optional[Any] = None, reserve: int = 0, usage: str = "static"
) -> Buffer:
"""Create a new OpenGL Buffer object.
:param Any data: The buffer data, This can be ``bytes`` or an object supporting the buffer protocol.
:param int reserve: The number of bytes reserve
:param str usage: Buffer usage. 'static', 'dynamic' or 'stream'
:rtype: :py:class:`~arcade.gl.Buffer`
"""
# create_with_size
return Buffer(self, data, reserve=reserve, usage=usage)
def framebuffer(
self,
*,
color_attachments: Union[Texture, List[Texture]] = None,
depth_attachment: Texture = None
) -> Framebuffer:
"""Create a Framebuffer.
:param List[arcade.gl.Texture] color_attachments: List of textures we want to render into
:param arcade.gl.Texture depth_attachment: Depth texture
:rtype: :py:class:`~arcade.gl.Framebuffer`
"""
return Framebuffer(
self, color_attachments=color_attachments, depth_attachment=depth_attachment
)
def texture(
self,
size: Tuple[int, int],
*,
components: int = 4,
dtype: str = "f1",
data: Any = None,
wrap_x: gl.GLenum = None,
wrap_y: gl.GLenum = None,
filter: Tuple[gl.GLenum, gl.GLenum] = None
) -> Texture:
"""Create a 2D Texture.
Wrap modes: ``GL_REPEAT``, ``GL_MIRRORED_REPEAT``, ``GL_CLAMP_TO_EDGE``, ``GL_CLAMP_TO_BORDER``
Minifying filters: ``GL_NEAREST``, ``GL_LINEAR``, ``GL_NEAREST_MIPMAP_NEAREST``, ``GL_LINEAR_MIPMAP_NEAREST``
``GL_NEAREST_MIPMAP_LINEAR``, ``GL_LINEAR_MIPMAP_LINEAR``
Magnifying filters: ``GL_NEAREST``, ``GL_LINEAR``
:param Tuple[int, int] size: The size of the texture
:param int components: Number of components (1: R, 2: RG, 3: RGB, 4: RGBA)
:param str dtype: The data type of each component: f1, f2, f4 / i1, i2, i4 / u1, u2, u4
:param Any data: The texture data (optional). Can be bytes or an object supporting the buffer protocol.
:param GLenum wrap_x: How the texture wraps in x direction
:param GLenum wrap_y: How the texture wraps in y direction
:param Tuple[GLenum,GLenum] filter: Minification and magnification filter
"""
return Texture(
self,
size,
components=components,
data=data,
dtype=dtype,
wrap_x=wrap_x,
wrap_y=wrap_y,
filter=filter,
)
def depth_texture(self, size: Tuple[int, int], *, data=None) -> Texture:
"""Create a 2D depth texture
:param Tuple[int, int] size: The size of the texture
:param Any data: The texture data (optional). Can be bytes or an object supporting the buffer protocol.
"""
return Texture(self, size, data=data, depth=True)
def geometry(
self,
content: Optional[Sequence[BufferDescription]] = None,
index_buffer: Buffer = None,
mode: int = None,
index_element_size: int = 4,
):
"""
Create a Geomtry instance.
:param list content: List of :py:class:`~arcade.gl.BufferDescription` (optional)
:param Buffer index_buffer: Index/element buffer (optional)
:param int mode: The default draw mode (optional)
:param int mode: The default draw mode (optional)
:param int index_element_size: Byte size of the index buffer type. Can be 1, 2 or 4 (8, 16 or 32 bit unsigned integer)
"""
return Geometry(self, content, index_buffer=index_buffer, mode=mode, index_element_size=index_element_size)
def program(
self,
*,
vertex_shader: str,
fragment_shader: str = None,
geometry_shader: str = None,
tess_control_shader: str = None,
tess_evaluation_shader: str = None,
defines: Dict[str, str] = None
) -> Program:
"""Create a :py:class:`~arcade.gl.Program` given the vertex, fragment and geometry shader.
:param str vertex_shader: vertex shader source
:param str fragment_shader: fragment shader source (optional)
:param str geometry_shader: geometry shader source (optional)
:param str tess_control_shader: tessellation control shader source (optional)
:param str tess_evaluation_shader: tessellation evaluation shader source (optional)
:param dict defines: Substitute #defines values in the source (optional)
:rtype: :py:class:`~arcade.gl.Program`
"""
source_vs = ShaderSource(vertex_shader, gl.GL_VERTEX_SHADER)
source_fs = (
ShaderSource(fragment_shader, gl.GL_FRAGMENT_SHADER)
if fragment_shader
else None
)
source_geo = (
ShaderSource(geometry_shader, gl.GL_GEOMETRY_SHADER)
if geometry_shader
else None
)
source_tc = (
ShaderSource(tess_control_shader, gl.GL_TESS_CONTROL_SHADER)
if tess_control_shader
else None
)
source_te = (
ShaderSource(tess_evaluation_shader, gl.GL_TESS_EVALUATION_SHADER)
if tess_evaluation_shader
else None
)
# If we don't have a fragment shader we are doing transform feedback.
# When a geometry shader is present the out attributes will be located there
out_attributes = [] # type: List[str]
if not source_fs:
if source_geo:
out_attributes = source_geo.out_attributes
else:
out_attributes = source_vs.out_attributes
return Program(
self,
vertex_shader=source_vs.get_source(defines=defines),
fragment_shader=source_fs.get_source(defines=defines)
if source_fs
else None,
geometry_shader=source_geo.get_source(defines=defines)
if source_geo
else None,
tess_control_shader=source_tc.get_source(defines=defines)
if source_tc
else None,
tess_evaluation_shader=source_te.get_source(defines=defines)
if source_te
else None,
out_attributes=out_attributes,
)
def query(self):
"""
Create a query object for measuring rendering calls in opengl.
:rtype: :py:class:`~arcade.gl.Query`
"""
return Query(self)
class ContextStats:
def __init__(self, warn_threshold=100):
self.warn_threshold = warn_threshold
# (created, freed)
self.texture = (0, 0)
self.framebuffer = (0, 0)
self.buffer = (0, 0)
self.program = (0, 0)
self.vertex_array = (0, 0)
self.geometry = (0, 0)
def incr(self, key):
created, freed = getattr(self, key)
setattr(self, key, (created + 1, freed))
if created % self.warn_threshold == 0 and created > 0:
LOG.debug(
"%s allocations passed threshold (%s) [created = %s] [freed = %s] [active = %s]",
key,
self.warn_threshold,
created,
freed,
created - freed,
)
def decr(self, key):
created, freed = getattr(self, key)
setattr(self, key, (created, freed + 1))
class Limits:
"""OpenGL Limitations"""
def __init__(self, ctx):
self._ctx = ctx
#: Minor version number of the OpenGL API supported by the current context
self.MINOR_VERSION = self.get(gl.GL_MINOR_VERSION)
#: Major version number of the OpenGL API supported by the current context.
self.MAJOR_VERSION = self.get(gl.GL_MAJOR_VERSION)
self.VENDOR = self.get_str(gl.GL_VENDOR)
self.RENDERER = self.get_str(gl.GL_RENDERER)
#: Value indicating the number of sample buffers associated with the framebuffer
self.SAMPLE_BUFFERS = self.get(gl.GL_SAMPLE_BUFFERS)
#: An estimate of the number of bits of subpixel resolution
#: that are used to position rasterized geometry in window coordinates
self.SUBPIXEL_BITS = self.get(gl.GL_SUBPIXEL_BITS)
#: A mask value indicating what context profile is used (core, compat etc.)
self.CONTEXT_PROFILE_MASK = self.get(gl.GL_CONTEXT_PROFILE_MASK)
#: Minimum required alignment for uniform buffer sizes and offset
self.UNIFORM_BUFFER_OFFSET_ALIGNMENT = self.get(
gl.GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT
)
#: Value indicates the maximum number of layers allowed in an array texture, and must be at least 256
self.MAX_ARRAY_TEXTURE_LAYERS = self.get(gl.GL_MAX_ARRAY_TEXTURE_LAYERS)
#: A rough estimate of the largest 3D texture that the GL can handle. The value must be at least 64
self.MAX_3D_TEXTURE_SIZE = self.get(gl.GL_MAX_3D_TEXTURE_SIZE)
#: Maximum number of color attachments in a framebuffer
self.MAX_COLOR_ATTACHMENTS = self.get(gl.GL_MAX_COLOR_ATTACHMENTS)
#: Maximum number of samples in a color multisample texture
self.MAX_COLOR_TEXTURE_SAMPLES = self.get(gl.GL_MAX_COLOR_TEXTURE_SAMPLES)
#: the number of words for fragment shader uniform variables in all uniform blocks
self.MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS = self.get(
gl.GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS
)
#: Number of words for geometry shader uniform variables in all uniform blocks
self.MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS = self.get(
gl.GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS
)
#: Maximum supported texture image units that can be used to access texture maps from the vertex shader
self.MAX_COMBINED_TEXTURE_IMAGE_UNITS = self.get(
gl.GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS
)
#: Maximum number of uniform blocks per program
self.MAX_COMBINED_UNIFORM_BLOCKS = self.get(gl.GL_MAX_COMBINED_UNIFORM_BLOCKS)
#: Number of words for vertex shader uniform variables in all uniform blocks
self.MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS = self.get(
gl.GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS
)
#: A rough estimate of the largest cube-map texture that the GL can handle
self.MAX_CUBE_MAP_TEXTURE_SIZE = self.get(gl.GL_MAX_CUBE_MAP_TEXTURE_SIZE)
#: Maximum number of samples in a multisample depth or depth-stencil texture
self.MAX_DEPTH_TEXTURE_SAMPLES = self.get(gl.GL_MAX_DEPTH_TEXTURE_SAMPLES)
#: Maximum number of simultaneous outputs that may be written in a fragment shader
self.MAX_DRAW_BUFFERS = self.get(gl.GL_MAX_DRAW_BUFFERS)
#: Maximum number of active draw buffers when using dual-source blending
self.MAX_DUAL_SOURCE_DRAW_BUFFERS = self.get(gl.GL_MAX_DUAL_SOURCE_DRAW_BUFFERS)
#: Recommended maximum number of vertex array indices
self.MAX_ELEMENTS_INDICES = self.get(gl.GL_MAX_ELEMENTS_INDICES)
#: Recommended maximum number of vertex array vertices
self.MAX_ELEMENTS_VERTICES = self.get(gl.GL_MAX_ELEMENTS_VERTICES)
#: Maximum number of components of the inputs read by the fragment shader
self.MAX_FRAGMENT_INPUT_COMPONENTS = self.get(
gl.GL_MAX_FRAGMENT_INPUT_COMPONENTS
)
#: Maximum number of individual floating-point, integer, or boolean values that can be
#: held in uniform variable storage for a fragment shader
self.MAX_FRAGMENT_UNIFORM_COMPONENTS = self.get(
gl.GL_MAX_FRAGMENT_UNIFORM_COMPONENTS
)
#: maximum number of individual 4-vectors of floating-point, integer,
#: or boolean values that can be held in uniform variable storage for a fragment shader
self.MAX_FRAGMENT_UNIFORM_VECTORS = self.get(gl.GL_MAX_FRAGMENT_UNIFORM_VECTORS)
#: Maximum number of uniform blocks per fragment shader.
self.MAX_FRAGMENT_UNIFORM_BLOCKS = self.get(gl.GL_MAX_FRAGMENT_UNIFORM_BLOCKS)
#: Maximum number of components of inputs read by a geometry shader
self.MAX_GEOMETRY_INPUT_COMPONENTS = self.get(
gl.GL_MAX_GEOMETRY_INPUT_COMPONENTS
)
#: Maximum number of components of outputs written by a geometry shader
self.MAX_GEOMETRY_OUTPUT_COMPONENTS = self.get(
gl.GL_MAX_GEOMETRY_OUTPUT_COMPONENTS
)
#: Maximum supported texture image units that can be used to access texture maps from the geometry shader
self.MAX_GEOMETRY_TEXTURE_IMAGE_UNITS = self.get(
gl.GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS
)
#: Maximum number of uniform blocks per geometry shader
self.MAX_GEOMETRY_UNIFORM_BLOCKS = self.get(gl.GL_MAX_GEOMETRY_UNIFORM_BLOCKS)
#: Maximum number of individual floating-point, integer, or boolean values that can
#: be held in uniform variable storage for a geometry shader
self.MAX_GEOMETRY_UNIFORM_COMPONENTS = self.get(
gl.GL_MAX_GEOMETRY_UNIFORM_COMPONENTS
)
#: Maximum number of samples supported in integer format multisample buffers
self.MAX_INTEGER_SAMPLES = self.get(gl.GL_MAX_INTEGER_SAMPLES)
#: Maximum samples for a framebuffer
self.MAX_SAMPLES = self.get(gl.GL_MAX_SAMPLES)
#: A rough estimate of the largest rectangular texture that the GL can handle
self.MAX_RECTANGLE_TEXTURE_SIZE = self.get(gl.GL_MAX_RECTANGLE_TEXTURE_SIZE)
#: Maximum supported size for renderbuffers
self.MAX_RENDERBUFFER_SIZE = self.get(gl.GL_MAX_RENDERBUFFER_SIZE)
#: Maximum number of sample mask words
self.MAX_SAMPLE_MASK_WORDS = self.get(gl.GL_MAX_SAMPLE_MASK_WORDS)
#: Maximum number of texels allowed in the texel array of a texture buffer object
self.MAX_TEXTURE_BUFFER_SIZE = self.get(gl.GL_MAX_TEXTURE_BUFFER_SIZE)
#: Maximum number of uniform buffer binding points on the context
self.MAX_UNIFORM_BUFFER_BINDINGS = self.get(gl.GL_MAX_UNIFORM_BUFFER_BINDINGS)
#: Maximum number of uniform buffer binding points on the context
self.MAX_UNIFORM_BUFFER_BINDINGS = self.get(gl.GL_MAX_UNIFORM_BUFFER_BINDINGS)
#: The value gives a rough estimate of the largest texture that the GL can handle
self.MAX_TEXTURE_SIZE = self.get(gl.GL_MAX_TEXTURE_SIZE)
#: Maximum number of uniform buffer binding points on the context
self.MAX_UNIFORM_BUFFER_BINDINGS = self.get(gl.GL_MAX_UNIFORM_BUFFER_BINDINGS)
#: Maximum size in basic machine units of a uniform block
self.MAX_UNIFORM_BLOCK_SIZE = self.get(gl.GL_MAX_UNIFORM_BLOCK_SIZE)
#: The number 4-vectors for varying variables
self.MAX_VARYING_VECTORS = self.get(gl.GL_MAX_VARYING_VECTORS)
#: Maximum number of 4-component generic vertex attributes accessible to a vertex shader.
self.MAX_VERTEX_ATTRIBS = self.get(gl.GL_MAX_VERTEX_ATTRIBS)
#: Maximum supported texture image units that can be used to access texture maps from the vertex shader.
self.MAX_VERTEX_TEXTURE_IMAGE_UNITS = self.get(
gl.GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS
)
#: Maximum number of individual floating-point, integer, or boolean values that
#: can be held in uniform variable storage for a vertex shader
self.MAX_VERTEX_UNIFORM_COMPONENTS = self.get(
gl.GL_MAX_VERTEX_UNIFORM_COMPONENTS
)
#: Maximum number of 4-vectors that may be held in uniform variable storage for the vertex shader
self.MAX_VERTEX_UNIFORM_VECTORS = self.get(gl.GL_MAX_VERTEX_UNIFORM_VECTORS)
#: Maximum number of components of output written by a vertex shader
self.MAX_VERTEX_OUTPUT_COMPONENTS = self.get(gl.GL_MAX_VERTEX_OUTPUT_COMPONENTS)
#: Maximum number of uniform blocks per vertex shader.
self.MAX_VERTEX_UNIFORM_BLOCKS = self.get(gl.GL_MAX_VERTEX_UNIFORM_BLOCKS)
# self.MAX_VERTEX_ATTRIB_RELATIVE_OFFSET = self.get(gl.GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET)
# self.MAX_VERTEX_ATTRIB_BINDINGS = self.get(gl.GL_MAX_VERTEX_ATTRIB_BINDINGS)
self.MAX_TEXTURE_IMAGE_UNITS = self.get(gl.GL_MAX_TEXTURE_IMAGE_UNITS)
# TODO: Missing in pyglet
# self.MAX_TEXTURE_MAX_ANISOTROPY = self.get_float(gl.GL_MAX_TEXTURE_MAX_ANISOTROPY)
err = self._ctx.error
if err:
from warnings import warn
warn("Error happened while querying of limits. Moving on ..")
def get(self, enum: gl.GLenum) -> int:
"""Get an integer limit"""
value = c_int()
gl.glGetIntegerv(enum, value)
return value.value
def get_float(self, enum) -> float:
"""Get a float limit"""
value = c_float()
gl.glGetFloatv(enum, value)
return value.value
def get_str(self, enum: gl.GLenum) -> str:
"""Get a string limit"""
return cast(gl.glGetString(enum), c_char_p).value.decode() # type: ignore
| 38.310474
| 126
| 0.652823
| 3,919
| 30,725
| 4.918347
| 0.143404
| 0.018885
| 0.023813
| 0.029105
| 0.342049
| 0.26594
| 0.192789
| 0.147756
| 0.1062
| 0.101219
| 0
| 0.010897
| 0.271245
| 30,725
| 801
| 127
| 38.358302
| 0.84994
| 0.389162
| 0
| 0.109223
| 0
| 0.002427
| 0.022843
| 0.001832
| 0
| 0
| 0.009275
| 0.001248
| 0
| 1
| 0.09466
| false
| 0.002427
| 0.043689
| 0
| 0.31068
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b12470d00652efed9a53779a3b55749c6b298e3
| 9,350
|
py
|
Python
|
datatableview/tests/test_helpers.py
|
gregneagle/sal
|
74c583fb1c1b33d3201b308b147376b3dcaca33f
|
[
"Apache-2.0"
] | 2
|
2019-11-01T20:50:35.000Z
|
2021-01-13T22:02:55.000Z
|
datatableview/tests/test_helpers.py
|
gregneagle/sal
|
74c583fb1c1b33d3201b308b147376b3dcaca33f
|
[
"Apache-2.0"
] | null | null | null |
datatableview/tests/test_helpers.py
|
gregneagle/sal
|
74c583fb1c1b33d3201b308b147376b3dcaca33f
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
from datetime import datetime
from functools import partial
from django import get_version
from datatableview import helpers
import six
from .testcase import DatatableViewTestCase
from .test_app.models import ExampleModel, RelatedM2MModel
if get_version().split('.') < ['1', '7']:
test_data_fixture = 'test_data_legacy.json'
else:
test_data_fixture = 'test_data.json'
class HelpersTests(DatatableViewTestCase):
fixtures = [test_data_fixture]
def test_link_to_model(self):
""" Verifies that link_to_model works. """
helper = helpers.link_to_model
# Verify that a model without get_absolute_url() raises a complaint
related = RelatedM2MModel.objects.get(pk=1)
with self.assertRaises(AttributeError) as cm:
helper(related)
self.assertEqual(str(cm.exception), "'RelatedM2MModel' object has no attribute 'get_absolute_url'")
# Verify simple use
instance = ExampleModel.objects.get(pk=1)
output = helper(instance)
self.assertEqual(output, '<a href="#1">ExampleModel 1</a>')
# Verify text override
output = helper(instance, text="Special text")
self.assertEqual(output, '<a href="#1">Special text</a>')
# Verify ``key`` access to transition an instance to a related field
instance = ExampleModel.objects.get(pk=2)
secondary_helper = helper(key=lambda o: o.related)
output = secondary_helper(instance)
self.assertEqual(output, '<a href="#1">RelatedModel object</a>')
# Verify ``key`` access version of custom text
output = secondary_helper(instance, text="Special text")
self.assertEqual(output, '<a href="#1">Special text</a>')
def test_make_boolean_checkmark(self):
""" Verifies that make_boolean_checkmark works. """
helper = helpers.make_boolean_checkmark
# Verify simple use
output = helper("True-ish value")
self.assertEqual(output, '✔')
output = helper("")
self.assertEqual(output, '✘')
# Verify custom values
output = helper("True-ish value", true_value="Yes", false_value="No")
self.assertEqual(output, 'Yes')
output = helper("", true_value="Yes", false_value="No")
self.assertEqual(output, 'No')
def test_format_date(self):
""" Verifies that format_date works. """
helper = helpers.format_date
# Verify simple use
data = datetime.now()
secondary_helper = helper("%m/%d/%Y")
output = secondary_helper(data)
self.assertEqual(output, data.strftime("%m/%d/%Y"))
# Verify that None objects get swallowed without complaint.
# This helps promise that the helper won't blow up for models.DateTimeField that are allowed
# to be null.
output = secondary_helper(None)
self.assertEqual(output, "")
def test_format(self):
""" Verifies that format works. """
helper = helpers.format
# Verify simple use
data = 1234567890
secondary_helper = helper("{0:,}")
output = secondary_helper(data)
self.assertEqual(output, "{0:,}".format(data))
# Verify ``cast`` argument
data = "1234.56789"
secondary_helper = helper("{0:.2f}", cast=float)
output = secondary_helper(data)
self.assertEqual(output, "{0:.2f}".format(float(data)))
def test_through_filter(self):
""" Verifies that through_filter works. """
helper = helpers.through_filter
target_function = lambda data, arg=None: (data, arg)
# Verify simple use
data = "Data string"
secondary_helper = helper(target_function)
output = secondary_helper(data)
self.assertEqual(output, (data, None))
# Verify ``arg`` argument
secondary_helper = helper(target_function, arg="Arg data")
output = secondary_helper(data)
self.assertEqual(output, (data, "Arg data"))
def test_itemgetter(self):
""" Verifies that itemgetter works. """
helper = helpers.itemgetter
# Verify simple index access
data = list(range(5))
secondary_helper = helper(-1)
output = secondary_helper(data)
self.assertEqual(output, data[-1])
# Verify slicing access
secondary_helper = helper(slice(1, 3))
output = secondary_helper(data)
self.assertEqual(output, data[1:3])
# Verify ellipsis works for strings
data = str(range(10))
secondary_helper = helper(slice(0, 5), ellipsis=True)
output = secondary_helper(data)
self.assertEqual(output, data[:5] + "...")
# Verify ellipsis can be customized
secondary_helper = helper(slice(0, 5), ellipsis="custom")
output = secondary_helper(data)
self.assertEqual(output, data[:5] + "custom")
# Verify ellipsis does nothing for non-string data types
data = range(10)
output = secondary_helper(data)
self.assertEqual(output, data[:5])
def test_attrgetter(self):
""" Verifies that attrgetter works. """
helper = helpers.attrgetter
# Verify simple attr lookup
data = ExampleModel.objects.get(pk=1)
secondary_helper = helper('pk')
output = secondary_helper(data)
self.assertEqual(output, data.pk)
# Verify bad attribrute lookup
data = ExampleModel.objects.get(pk=1)
secondary_helper = helper('bad field name')
with self.assertRaises(AttributeError) as cm:
output = secondary_helper(data)
self.assertEqual(str(cm.exception), "'ExampleModel' object has no attribute 'bad field name'")
def test_make_xeditable(self):
""" Verifies that make_xeditable works. """
helper = helpers.make_xeditable
# Items that the helper normally expects in a callback context
internals = {'field_name': 'name'}
# Verify chain calls don't trigger rendering
secondary_helper = helper()
tertiary_helper = secondary_helper()
self.assertEqual(type(secondary_helper), partial)
self.assertEqual(type(tertiary_helper), partial)
# Verify chain ends with provision of a value
data = ExampleModel.objects.get(pk=1)
# This needs a "url" arg because we want to test successful use
output = tertiary_helper(data, url="/", **internals)
self.assertTrue(isinstance(output, six.string_types))
# Verify that no "view" kwarg means the url is required from the call
with self.assertRaises(ValueError) as cm:
tertiary_helper(data, **internals)
self.assertEqual(str(cm.exception), "'make_xeditable' cannot determine a value for 'url'.")
# Verify kwargs accumulate
kwargs1 = { 'type': 'textarea' }
kwargs2 = { 'other_arg': True }
secondary_helper = helper(**kwargs1)
expected_kwargs = dict(kwargs1, extra_attrs=[])
self.assertEqual(secondary_helper.keywords, expected_kwargs)
tertiary_helper = secondary_helper(**kwargs2)
expected_kwargs = dict(kwargs1, **dict(kwargs2, extra_attrs=[]))
self.assertEqual(tertiary_helper.keywords, expected_kwargs)
# Verify default kwarg names end up as attributes
data = ExampleModel.objects.get(pk=1)
kwargs = {
'pk': "PK DATA",
'type': "TYPE DATA",
'url': "URL DATA",
'source': "SOURCE DATA",
'title': "TITLE DATA",
'placeholder': "PLACEHOLDER DATA",
# Extra stuff not in anticipated to appear in rendered string
'special': "SPECIAL DATA",
'data_custom': "DATA-CUSTOM DATA",
}
secondary_helper = helper(**kwargs)
output = secondary_helper(data, **internals)
expected_output = """
<a href="#" data-name="name"
data-pk="PK DATA"
data-placeholder="PLACEHOLDER DATA"
data-source="SOURCE DATA"
data-title="TITLE DATA"
data-type="TYPE DATA"
data-url="URL DATA"
data-value="1"
data-xeditable="xeditable">
ExampleModel 1
</a>
"""
self.assertHTMLEqual(output, expected_output)
# Verify that explicit additions via ``extra_attrs`` allows kwargs to appear in HTML as
# "data-*" attributes.
secondary_helper = helper(extra_attrs=['special', 'data_custom', 'fake'], **kwargs)
output = secondary_helper(data, **internals)
expected_output = """
<a href="#" data-name="name"
data-pk="PK DATA"
data-placeholder="PLACEHOLDER DATA"
data-source="SOURCE DATA"
data-title="TITLE DATA"
data-type="TYPE DATA"
data-url="URL DATA"
data-value="1"
data-special="SPECIAL DATA"
data-custom="DATA-CUSTOM DATA"
data-xeditable="xeditable">
ExampleModel 1
</a>
"""
self.assertHTMLEqual(output, expected_output)
| 37.250996
| 107
| 0.610695
| 1,030
| 9,350
| 5.433981
| 0.21165
| 0.09916
| 0.07504
| 0.062534
| 0.381633
| 0.327854
| 0.296766
| 0.283902
| 0.201715
| 0.140075
| 0
| 0.012164
| 0.279037
| 9,350
| 250
| 108
| 37.4
| 0.818128
| 0.168235
| 0
| 0.304878
| 0
| 0
| 0.219075
| 0.028846
| 0
| 0
| 0
| 0
| 0.20122
| 1
| 0.04878
| false
| 0
| 0.042683
| 0
| 0.103659
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b136297b7f7ffe43bf97fc683bc6c2f3794e562
| 3,518
|
py
|
Python
|
discordbot.py
|
naari3/seibaribot
|
3686206ed0b28b318a4032753350be8d9f2223fd
|
[
"MIT"
] | null | null | null |
discordbot.py
|
naari3/seibaribot
|
3686206ed0b28b318a4032753350be8d9f2223fd
|
[
"MIT"
] | null | null | null |
discordbot.py
|
naari3/seibaribot
|
3686206ed0b28b318a4032753350be8d9f2223fd
|
[
"MIT"
] | 1
|
2022-02-09T16:45:40.000Z
|
2022-02-09T16:45:40.000Z
|
import traceback
from os import getenv
import discord
from discord import Message
from discord.ext import commands
from discord.ext.commands import Context
from asyncio import sleep
import asyncio
client = discord.Client()
# botの接頭辞を!にする
bot = commands.Bot(command_prefix='!')
# ギラティナのチャンネルのID
GIRATINA_CHANNEL_ID = 940610524415144036
WIP_CHANNEL_ID = 940966825087361025
@bot.event
async def on_command_error(ctx, error):
orig_error = getattr(error, 'original', error)
error_msg = ''.join(
traceback.TracebackException.from_exception(orig_error).format())
await ctx.send(error_msg)
# 起動時のメッセージの関数
async def ready_greet():
channel = bot.get_channel(GIRATINA_CHANNEL_ID)
await channel.send('ギラティナ、オォン!')
# Bot起動時に実行される関数
@bot.event
async def on_ready():
await ready_greet()
# ピンポン
@bot.command()
async def ping(ctx):
await ctx.send('pong')
@bot.event
async def on_message(message):
# 送信者がBotである場合は弾く
if message.author.bot:
return
# ドナルドの言葉狩り - https://qiita.com/sizumita/items/9d44ae7d1ce007391699
# メッセージの本文が ドナルド だった場合
if 'ドナルド' in str(message.content):
# 送信するメッセージをランダムで決める
# メッセージが送られてきたチャンネルに送る
await message.channel.send('https://tenor.com/view/ronald-mcdonald-insanity-ronald-mcdonald-gif-21974293')
# メッセージに場合
if message.attachments and message.channel.id == WIP_CHANNEL_ID:
for attachment in message.attachments:
# Attachmentの拡張子がmp3, wavのどれかだった場合
# https://discordpy.readthedocs.io/ja/latest/api.html#attachment
if attachment.content_type and "audio" in attachment.content_type:
await attachment.save("input.mp3")
command = "ffmpeg -y -loop 1 -i input.jpg -i input.mp3 -vcodec libx264 -vb 50k -acodec aac -strict experimental -ab 128k -ac 2 -ar 48000 -pix_fmt yuv420p -shortest output.mp4"
proc = await asyncio.create_subprocess_exec(
*command.split(" "),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
await message.channel.send(file=discord.File("output.mp4"))
await bot.process_commands(message)
# チーバくんの、なのはな体操
@bot.command()
async def chiibakun(ctx):
await ctx.send('https://www.youtube.com/watch?v=dC0eie-WQss')
# かおすちゃんを送信
@bot.command()
async def kaosu(ctx):
await ctx.send('https://pbs.twimg.com/media/E512yaSVIAQxfNn?format=jpg&name=large')
# イキス
@bot.command()
async def inm(ctx):
await ctx.send('聖バリ「イキスギィイクイク!!!ンアッー!!!マクラがデカすぎる!!!」\n\n'
f'{ctx.author.name}「聖なるバリア -ミラーフォース-、淫夢はもうやめてよ!淫夢ごっこは恥ずかしいよ!」\n\n聖バリ「{ctx.author.name}'
'、おっ大丈夫か大丈夫か〜???バッチェ冷えてるぞ〜淫夢が大好きだってはっきりわかんだね」')
# ギラティナの画像を送る
@bot.command()
async def giratina(ctx):
await ctx.send('https://img.gamewith.jp/article/thumbnail/rectangle/36417.png')
# bokuseku.mp3 流し逃げ - https://qiita.com/sizumita/items/cafd00fe3e114d834ce3
@bot.command()
async def bokuseku(ctx):
if ctx.author.voice is None:
await ctx.channel.send('望月くん・・・ボイスチャンネルに来なさい')
return
# ボイスチャンネルに接続する
await ctx.author.voice.channel.connect()
# 音声を再生する
ctx.guild.voice_client.play(discord.FFmpegPCMAudio('bokuseku.mp3'))
# 音声が再生中か確認する
while ctx.guild.voice_client.is_playing():
await sleep(1)
# 切断する
await ctx.guild.voice_client.disconnect()
token = getenv('DISCORD_BOT_TOKEN')
bot.run(token)
| 29.07438
| 191
| 0.689312
| 434
| 3,518
| 5.523041
| 0.456221
| 0.033375
| 0.030038
| 0.045056
| 0.069253
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036268
| 0.192723
| 3,518
| 120
| 192
| 29.316667
| 0.805986
| 0.130756
| 0
| 0.15493
| 0
| 0.042254
| 0.223138
| 0.055043
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.112676
| 0
| 0.140845
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b13e68ee45340f613741a1e02396fe2503dcda1
| 6,831
|
py
|
Python
|
test/cpp/naming/utils/dns_server.py
|
arghyadip01/grpc
|
9e10bfc8a096ef91a327e22f84f10c0fabff4417
|
[
"Apache-2.0"
] | 9
|
2020-12-04T07:34:08.000Z
|
2022-03-07T21:10:35.000Z
|
test/cpp/naming/utils/dns_server.py
|
arghyadip01/grpc
|
9e10bfc8a096ef91a327e22f84f10c0fabff4417
|
[
"Apache-2.0"
] | 62
|
2020-02-27T00:53:36.000Z
|
2021-02-05T06:10:53.000Z
|
test/cpp/naming/utils/dns_server.py
|
arghyadip01/grpc
|
9e10bfc8a096ef91a327e22f84f10c0fabff4417
|
[
"Apache-2.0"
] | 12
|
2020-07-14T23:59:57.000Z
|
2022-03-22T09:59:18.000Z
|
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starts a local DNS server for use in tests"""
import argparse
import sys
import yaml
import signal
import os
import threading
import time
import twisted
import twisted.internet
import twisted.internet.reactor
import twisted.internet.threads
import twisted.internet.defer
import twisted.internet.protocol
import twisted.names
import twisted.names.client
import twisted.names.dns
import twisted.names.server
from twisted.names import client, server, common, authority, dns
import argparse
import platform
_SERVER_HEALTH_CHECK_RECORD_NAME = 'health-check-local-dns-server-is-alive.resolver-tests.grpctestingexp' # missing end '.' for twisted syntax
_SERVER_HEALTH_CHECK_RECORD_DATA = '123.123.123.123'
class NoFileAuthority(authority.FileAuthority):
def __init__(self, soa, records):
# skip FileAuthority
common.ResolverBase.__init__(self)
self.soa = soa
self.records = records
def start_local_dns_server(args):
all_records = {}
def _push_record(name, r):
print('pushing record: |%s|' % name)
if all_records.get(name) is not None:
all_records[name].append(r)
return
all_records[name] = [r]
def _maybe_split_up_txt_data(name, txt_data, r_ttl):
start = 0
txt_data_list = []
while len(txt_data[start:]) > 0:
next_read = len(txt_data[start:])
if next_read > 255:
next_read = 255
txt_data_list.append(txt_data[start:start + next_read])
start += next_read
_push_record(name, dns.Record_TXT(*txt_data_list, ttl=r_ttl))
with open(args.records_config_path) as config:
test_records_config = yaml.load(config)
common_zone_name = test_records_config['resolver_tests_common_zone_name']
for group in test_records_config['resolver_component_tests']:
for name in group['records'].keys():
for record in group['records'][name]:
r_type = record['type']
r_data = record['data']
r_ttl = int(record['TTL'])
record_full_name = '%s.%s' % (name, common_zone_name)
assert record_full_name[-1] == '.'
record_full_name = record_full_name[:-1]
if r_type == 'A':
_push_record(record_full_name,
dns.Record_A(r_data, ttl=r_ttl))
if r_type == 'AAAA':
_push_record(record_full_name,
dns.Record_AAAA(r_data, ttl=r_ttl))
if r_type == 'SRV':
p, w, port, target = r_data.split(' ')
p = int(p)
w = int(w)
port = int(port)
target_full_name = '%s.%s' % (target, common_zone_name)
r_data = '%s %s %s %s' % (p, w, port, target_full_name)
_push_record(
record_full_name,
dns.Record_SRV(p, w, port, target_full_name, ttl=r_ttl))
if r_type == 'TXT':
_maybe_split_up_txt_data(record_full_name, r_data, r_ttl)
# Add an optional IPv4 record is specified
if args.add_a_record:
extra_host, extra_host_ipv4 = args.add_a_record.split(':')
_push_record(extra_host, dns.Record_A(extra_host_ipv4, ttl=0))
# Server health check record
_push_record(_SERVER_HEALTH_CHECK_RECORD_NAME,
dns.Record_A(_SERVER_HEALTH_CHECK_RECORD_DATA, ttl=0))
soa_record = dns.Record_SOA(mname=common_zone_name)
test_domain_com = NoFileAuthority(
soa=(common_zone_name, soa_record),
records=all_records,
)
server = twisted.names.server.DNSServerFactory(
authorities=[test_domain_com], verbose=2)
server.noisy = 2
twisted.internet.reactor.listenTCP(args.port, server)
dns_proto = twisted.names.dns.DNSDatagramProtocol(server)
dns_proto.noisy = 2
twisted.internet.reactor.listenUDP(args.port, dns_proto)
print('starting local dns server on 127.0.0.1:%s' % args.port)
print('starting twisted.internet.reactor')
twisted.internet.reactor.suggestThreadPoolSize(1)
twisted.internet.reactor.run()
def _quit_on_signal(signum, _frame):
print('Received SIGNAL %d. Quitting with exit code 0' % signum)
twisted.internet.reactor.stop()
sys.stdout.flush()
sys.exit(0)
def flush_stdout_loop():
num_timeouts_so_far = 0
sleep_time = 1
# Prevent zombies. Tests that use this server are short-lived.
max_timeouts = 60 * 10
while num_timeouts_so_far < max_timeouts:
sys.stdout.flush()
time.sleep(sleep_time)
num_timeouts_so_far += 1
print('Process timeout reached, or cancelled. Exitting 0.')
os.kill(os.getpid(), signal.SIGTERM)
def main():
argp = argparse.ArgumentParser(
description='Local DNS Server for resolver tests')
argp.add_argument('-p',
'--port',
default=None,
type=int,
help='Port for DNS server to listen on for TCP and UDP.')
argp.add_argument(
'-r',
'--records_config_path',
default=None,
type=str,
help=('Directory of resolver_test_record_groups.yaml file. '
'Defaults to path needed when the test is invoked as part '
'of run_tests.py.'))
argp.add_argument(
'--add_a_record',
default=None,
type=str,
help=('Add an A record via the command line. Useful for when we '
'need to serve a one-off A record that is under a '
'different domain then the rest the records configured in '
'--records_config_path (which all need to be under the '
'same domain). Format: <name>:<ipv4 address>'))
args = argp.parse_args()
signal.signal(signal.SIGTERM, _quit_on_signal)
signal.signal(signal.SIGINT, _quit_on_signal)
output_flush_thread = threading.Thread(target=flush_stdout_loop)
output_flush_thread.setDaemon(True)
output_flush_thread.start()
start_local_dns_server(args)
if __name__ == '__main__':
main()
| 37.125
| 143
| 0.639731
| 903
| 6,831
| 4.59247
| 0.284607
| 0.039788
| 0.027007
| 0.027731
| 0.12081
| 0.046057
| 0.033036
| 0.009163
| 0
| 0
| 0
| 0.011556
| 0.265261
| 6,831
| 183
| 144
| 37.327869
| 0.814704
| 0.117113
| 0
| 0.089655
| 0
| 0
| 0.151323
| 0.03679
| 0
| 0
| 0
| 0
| 0.006897
| 1
| 0.048276
| false
| 0
| 0.137931
| 0
| 0.2
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b13e6f6469b20dda5e5b5da9f0367c1ee7833b5
| 726
|
py
|
Python
|
colour/examples/models/examples_ictcp.py
|
BPearlstine/colour
|
40f0281295496774d2a19eee017d50fd0c265bd8
|
[
"Cube",
"BSD-3-Clause"
] | 2
|
2020-05-03T20:15:42.000Z
|
2021-04-09T18:19:06.000Z
|
colour/examples/models/examples_ictcp.py
|
BPearlstine/colour
|
40f0281295496774d2a19eee017d50fd0c265bd8
|
[
"Cube",
"BSD-3-Clause"
] | null | null | null |
colour/examples/models/examples_ictcp.py
|
BPearlstine/colour
|
40f0281295496774d2a19eee017d50fd0c265bd8
|
[
"Cube",
"BSD-3-Clause"
] | 1
|
2019-12-11T19:48:27.000Z
|
2019-12-11T19:48:27.000Z
|
# -*- coding: utf-8 -*-
"""
Showcases *ICTCP* *colour encoding* computations.
"""
import numpy as np
import colour
from colour.utilities import message_box
message_box('"ICTCP" Colour Encoding Computations')
RGB = np.array([0.45620519, 0.03081071, 0.04091952])
message_box(('Converting from "ITU-R BT.2020" colourspace to "ICTCP" colour '
'encoding given "RGB" values:\n'
'\n\t{0}'.format(RGB)))
print(colour.RGB_to_ICTCP(RGB))
print('\n')
ICTCP = np.array([0.07351364, 0.00475253, 0.09351596])
message_box(('Converting from "ICTCP" colour encoding to "ITU-R BT.2020" '
'colourspace given "ICTCP" values:\n'
'\n\t{0}'.format(ICTCP)))
print(colour.ICTCP_to_RGB(ICTCP))
| 27.923077
| 77
| 0.665289
| 102
| 726
| 4.656863
| 0.372549
| 0.092632
| 0.16
| 0.130526
| 0.155789
| 0.067368
| 0
| 0
| 0
| 0
| 0
| 0.108153
| 0.172176
| 726
| 25
| 78
| 29.04
| 0.682196
| 0.099174
| 0
| 0
| 0
| 0
| 0.368421
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b15e8a8bf1abf0fd58cab05c52fa68b6927df9e
| 3,732
|
py
|
Python
|
example_scripts/transect_tutorial.py
|
British-Oceanographic-Data-Centre/COAsT
|
4d3d57c9afb61a92063b665626c1828dd2998d2b
|
[
"MIT"
] | 8
|
2020-09-10T13:40:07.000Z
|
2022-03-10T22:52:44.000Z
|
example_scripts/transect_tutorial.py
|
British-Oceanographic-Data-Centre/COAsT
|
4d3d57c9afb61a92063b665626c1828dd2998d2b
|
[
"MIT"
] | 294
|
2020-05-11T12:17:17.000Z
|
2022-03-31T22:07:52.000Z
|
example_scripts/transect_tutorial.py
|
British-Oceanographic-Data-Centre/COAsT
|
4d3d57c9afb61a92063b665626c1828dd2998d2b
|
[
"MIT"
] | 4
|
2020-05-28T10:43:56.000Z
|
2021-09-07T10:40:09.000Z
|
"""
This is a demonstration script for using the Transect class in the COAsT
package. This object has strict data formatting requirements, which are
outlined in tranect.py.
Transect subsetting (a vertical slice of data between two coordinates): Creating them and performing some custom diagnostics with them.
---
In this tutorial we take a look at subsetting the model data along a transect (a custom straight line) and creating some bespoke diagnostics along it. We look at:
1. Creating a TRANSECT object, defined between two points.
2. Plotting data along a transect.
3. Calculating flow normal to the transect
"""
## Create a transect subset of the example dataset
# Load packages and define some file paths
import coast
import xarray as xr
import matplotlib.pyplot as plt
fn_nemo_dat_t = "./example_files/nemo_data_T_grid.nc"
fn_nemo_dat_u = "./example_files/nemo_data_U_grid.nc"
fn_nemo_dat_v = "./example_files/nemo_data_V_grid.nc"
fn_nemo_dom = "./example_files/COAsT_example_NEMO_domain.nc"
# Configuration files describing the data files
fn_config_t_grid = "./config/example_nemo_grid_t.json"
fn_config_f_grid = "./config/example_nemo_grid_f.json"
fn_config_u_grid = "./config/example_nemo_grid_u.json"
fn_config_v_grid = "./config/example_nemo_grid_v.json"
# %% Load data variables that are on the NEMO t-grid
nemo_t = coast.Gridded(fn_data=fn_nemo_dat_t, fn_domain=fn_nemo_dom, config=fn_config_t_grid)
# Now create a transect between the points (54 N 15 W) and (56 N, 12 W) using the `coast.TransectT` object. This needs to be passed the corresponding NEMO object and transect end points. The model points closest to these coordinates will be selected as the transect end points.
tran_t = coast.TransectT(nemo_t, (54, -15), (56, -12))
# Inspect the data
tran_t.data
# where `r_dim` is the dimension along the transect.
# %% Plot the data
# It is simple to plot a scalar such as temperature along the transect:
temp_mean = tran_t.data.temperature.mean(dim="t_dim")
plt.figure()
temp_mean.plot.pcolormesh(y="depth_0", yincrease=False)
plt.show()
# %% Flow across the transect
# With NEMO’s staggered grid, the first step is to define the transect on the f-grid so that the velocity components are between f-points. We do not need any model data on the f-grid, just the grid information, so create a nemo f-grid object
nemo_f = coast.Gridded(fn_domain=fn_nemo_dom, config=fn_config_f_grid)
# and a transect on the f-grid
tran_f = coast.TransectF(nemo_f, (54, -15), (56, -12))
tran_f.data
# We also need the i- and j-components of velocity so (lazy) load the model data on the u- and v-grid grids
nemo_u = coast.Gridded(fn_data=fn_nemo_dat_u, fn_domain=fn_nemo_dom, config=fn_config_u_grid)
nemo_v = coast.Gridded(fn_data=fn_nemo_dat_v, fn_domain=fn_nemo_dom, config=fn_config_v_grid)
# Now we can calculate the flow across the transect with the method
tran_f.calc_flow_across_transect(nemo_u, nemo_v)
# The flow across the transect is stored in a new dataset where the variables are all defined at the points between f-points.
tran_f.data_cross_tran_flow
# For example, to plot the time averaged velocity across the transect, we can plot the ‘normal_velocities’ variable
cross_velocity_mean = tran_f.data_cross_tran_flow.normal_velocities.mean(dim="t_dim")
plt.figure()
cross_velocity_mean.rolling(r_dim=2).mean().plot.pcolormesh(yincrease=False, y="depth_0", cbar_kwargs={"label": "m/s"})
plt.show()
# or the volume transport across the transect, we can plot the ‘normal_transports’ variable
plt.figure()
cross_transport_mean = tran_f.data_cross_tran_flow.normal_transports.mean(dim="t_dim")
cross_transport_mean.rolling(r_dim=2).mean().plot()
plt.ylabel("Sv")
plt.show()
| 38.081633
| 277
| 0.780279
| 657
| 3,732
| 4.2207
| 0.269406
| 0.043635
| 0.019473
| 0.030292
| 0.249549
| 0.161919
| 0.13956
| 0.09304
| 0
| 0
| 0
| 0.009612
| 0.135852
| 3,732
| 97
| 278
| 38.474227
| 0.850233
| 0.546892
| 0
| 0.176471
| 0
| 0
| 0.192077
| 0.168667
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.088235
| 0
| 0.088235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b16a33b1ae4cc31b9c80ce44c59e17df1095980
| 44,917
|
py
|
Python
|
diofant/logic/boolalg.py
|
skirpichev/diofant
|
16e280fdd6053be10c3b60fbb66fc26b52ede27a
|
[
"BSD-3-Clause"
] | null | null | null |
diofant/logic/boolalg.py
|
skirpichev/diofant
|
16e280fdd6053be10c3b60fbb66fc26b52ede27a
|
[
"BSD-3-Clause"
] | 1
|
2021-06-23T08:27:17.000Z
|
2021-06-23T08:27:17.000Z
|
diofant/logic/boolalg.py
|
skirpichev/diofant
|
16e280fdd6053be10c3b60fbb66fc26b52ede27a
|
[
"BSD-3-Clause"
] | 1
|
2021-06-23T07:58:58.000Z
|
2021-06-23T07:58:58.000Z
|
"""
Boolean algebra module for Diofant.
"""
from collections import defaultdict
from itertools import combinations, product
from ..core import Atom, cacheit
from ..core.expr import Expr
from ..core.function import Application
from ..core.numbers import Number
from ..core.operations import LatticeOp
from ..core.singleton import S
from ..core.singleton import SingletonWithManagedProperties as Singleton
from ..core.sympify import converter, sympify
from ..utilities import ordered
class Boolean(Expr):
"""A boolean object is an object for which logic operations make sense."""
def __and__(self, other):
"""Overloading for & operator."""
return And(self, other)
__rand__ = __and__
def __or__(self, other):
"""Overloading for | operator."""
return Or(self, other)
__ror__ = __or__
def __invert__(self):
"""Overloading for ~ operator."""
return Not(self)
def __rshift__(self, other):
"""Overloading for >> operator."""
return Implies(self, other)
def __lshift__(self, other):
"""Overloading for << operator."""
return Implies(other, self)
__rrshift__ = __lshift__
__rlshift__ = __rshift__
def __xor__(self, other):
return Xor(self, other)
__rxor__ = __xor__
def equals(self, other, failing_expression=False):
"""
Returns True if the given formulas have the same truth table.
For two formulas to be equal they must have the same literals.
Examples
========
>>> (a >> b).equals(~b >> ~a)
True
>>> Not(And(a, b, c)).equals(And(Not(a), Not(b), Not(c)))
False
>>> Not(And(a, Not(a))).equals(Or(b, Not(b)))
False
"""
from ..core.relational import Relational
from .inference import satisfiable
other = sympify(other)
if self.has(Relational) or other.has(Relational):
raise NotImplementedError('handling of relationals')
return self.atoms() == other.atoms() and \
not satisfiable(Not(Equivalent(self, other)))
class BooleanAtom(Atom, Boolean):
"""Base class of BooleanTrue and BooleanFalse."""
is_Boolean = True
@property
def canonical(self):
return self
def __int__(self):
return int(bool(self))
class BooleanTrue(BooleanAtom, metaclass=Singleton):
"""Diofant version of True, a singleton that can be accessed via ``true``.
This is the Diofant version of True, for use in the logic module. The
primary advantage of using true instead of True is that shorthand boolean
operations like ~ and >> will work as expected on this class, whereas with
True they act bitwise on 1. Functions in the logic module will return this
class when they evaluate to true.
Notes
=====
There is liable to be some confusion as to when ``True`` should
be used and when ``true`` should be used in various contexts
throughout Diofant. An important thing to remember is that
``sympify(True)`` returns ``true``. This means that for the most
part, you can just use ``True`` and it will automatically be converted
to ``true`` when necessary, similar to how you can generally use 1
instead of ``Integer(1)``.
The rule of thumb is:
"If the boolean in question can be replaced by an arbitrary symbolic
``Boolean``, like ``Or(x, y)`` or ``x > 1``, use ``true``.
Otherwise, use ``True``".
In other words, use ``true`` only on those contexts where the
boolean is being used as a symbolic representation of truth.
For example, if the object ends up in the ``.args`` of any expression,
then it must necessarily be ``true`` instead of ``True``, as
elements of ``.args`` must be ``Basic``. On the other hand,
``==`` is not a symbolic operation in Diofant, since it always returns
``True`` or ``False``, and does so in terms of structural equality
rather than mathematical, so it should return ``True``. The assumptions
system should use ``True`` and ``False``. Aside from not satisfying
the above rule of thumb, the
assumptions system uses a three-valued logic (``True``, ``False``, ``None``),
whereas ``true`` and ``false`` represent a two-valued logic. When in
doubt, use ``True``.
"``true == True is True``."
While "``true is True``" is ``False``, "``true == True``"
is ``True``, so if there is any doubt over whether a function or
expression will return ``true`` or ``True``, just use ``==``
instead of ``is`` to do the comparison, and it will work in either
case. Finally, for boolean flags, it's better to just use ``if x``
instead of ``if x is True``. To quote PEP 8:
Don't compare boolean values to ``True`` or ``False``
using ``==``.
* Yes: ``if greeting:``
* No: ``if greeting == True:``
* Worse: ``if greeting is True:``
Examples
========
>>> sympify(True)
true
>>> ~true
false
>>> ~True
-2
>>> Or(True, False)
true
See Also
========
BooleanFalse
"""
def __bool__(self):
return True
def __hash__(self):
return hash(True)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> true.as_set()
UniversalSet()
"""
return S.UniversalSet
class BooleanFalse(BooleanAtom, metaclass=Singleton):
"""Diofant version of False, a singleton that can be accessed via ``false``.
This is the Diofant version of False, for use in the logic module. The
primary advantage of using false instead of False is that shorthand boolean
operations like ~ and >> will work as expected on this class, whereas with
False they act bitwise on 0. Functions in the logic module will return this
class when they evaluate to false.
Notes
=====
See note in :py:class:`~diofant.logic.boolalg.BooleanTrue`.
Examples
========
>>> sympify(False)
false
>>> false >> false
true
>>> False >> False
0
>>> Or(True, False)
true
See Also
========
BooleanTrue
"""
def __bool__(self):
return False
def __hash__(self):
return hash(False)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> false.as_set()
EmptySet()
"""
from ..sets import EmptySet
return EmptySet()
true = BooleanTrue()
false: BooleanFalse = BooleanFalse()
# We want S.true and S.false to work, rather than S.BooleanTrue and
# S.BooleanFalse, but making the class and instance names the same causes some
# major issues (like the inability to import the class directly from this
# file).
S.true = true
S.false = false
converter[bool] = lambda x: true if x else false
class BooleanFunction(Application, Boolean):
"""Boolean function is a function that lives in a boolean space.
This is used as base class for And, Or, Not, etc.
"""
is_Boolean = True
def _eval_simplify(self, ratio, measure):
return simplify_logic(self)
def to_nnf(self, simplify=True):
return self._to_nnf(*self.args, simplify=simplify)
@classmethod
def _to_nnf(cls, *args, **kwargs):
simplify = kwargs.get('simplify', True)
argset = set()
for arg in args:
if not is_literal(arg):
arg = arg.to_nnf(simplify)
if simplify:
if isinstance(arg, cls):
arg = arg.args
else:
arg = arg,
for a in arg:
if Not(a) in argset:
return cls.zero
argset.add(a)
else:
argset.add(arg)
return cls(*argset)
class And(LatticeOp, BooleanFunction):
"""
Logical AND function.
It evaluates its arguments in order, giving False immediately
if any of them are False, and True if they are all True.
Examples
========
>>> x & y
x & y
Notes
=====
The ``&`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
and. Hence, ``And(a, b)`` and ``a & b`` will return different things if
``a`` and ``b`` are integers.
>>> And(x, y).subs({x: 1})
y
"""
zero = false
identity = true
nargs = None
@classmethod
def _new_args_filter(cls, args):
newargs = []
rel = []
for x in reversed(list(args)):
if isinstance(x, Number) or x in (0, 1):
newargs.append(True if x else False)
continue
if x.is_Relational:
c = x.canonical
if c in rel:
continue
nc = (~c).canonical
if any(r == nc for r in rel):
return [false]
rel.append(c)
newargs.append(x)
return LatticeOp._new_args_filter(newargs, And)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> And(x < 2, x > -2).as_set()
(-2, 2)
"""
from ..sets import Intersection
if len(self.free_symbols) == 1:
return Intersection(*[arg.as_set() for arg in self.args])
else:
raise NotImplementedError('Sorry, And.as_set has not yet been'
' implemented for multivariate'
' expressions')
class Or(LatticeOp, BooleanFunction):
"""
Logical OR function
It evaluates its arguments in order, giving True immediately
if any of them are True, and False if they are all False.
Examples
========
>>> x | y
x | y
Notes
=====
The ``|`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
or. Hence, ``Or(a, b)`` and ``a | b`` will return different things if
``a`` and ``b`` are integers.
>>> Or(x, y).subs({x: 0})
y
"""
zero = true
identity = false
@classmethod
def _new_args_filter(cls, args):
newargs = []
rel = []
for x in args:
if isinstance(x, Number) or x in (0, 1):
newargs.append(True if x else False)
continue
if x.is_Relational:
c = x.canonical
if c in rel:
continue
nc = (~c).canonical
if any(r == nc for r in rel):
return [true]
rel.append(c)
newargs.append(x)
return LatticeOp._new_args_filter(newargs, Or)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> Or(x > 2, x < -2).as_set()
[-oo, -2) U (2, oo]
"""
from ..sets import Union
if len(self.free_symbols) == 1:
return Union(*[arg.as_set() for arg in self.args])
else:
raise NotImplementedError('Sorry, Or.as_set has not yet been'
' implemented for multivariate'
' expressions')
class Not(BooleanFunction):
"""
Logical Not function (negation).
Returns True if the statement is False.
Returns False if the statement is True.
Examples
========
>>> Not(True)
false
>>> Not(False)
true
>>> Not(And(True, False))
true
>>> Not(Or(True, False))
false
>>> Not(And(And(True, x), Or(x, False)))
~x
>>> ~x
~x
>>> Not(And(Or(x, y), Or(~x, ~y)))
~((x | y) & (~x | ~y))
Notes
=====
The ``~`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
not. In particular, ``~a`` and ``Not(a)`` will be different if ``a`` is
an integer. Furthermore, since bools in Python subclass from ``int``,
``~True`` is the same as ``~1`` which is ``-2``, which has a boolean
value of True. To avoid this issue, use the Diofant boolean types
``true`` and ``false``.
>>> ~True
-2
>>> ~true
false
"""
is_Not = True
@classmethod
def eval(cls, arg):
from ..core import (Equality, GreaterThan, LessThan, StrictGreaterThan,
StrictLessThan, Unequality)
if isinstance(arg, Number) or arg in (True, False):
return false if arg else true
if arg.is_Not:
return arg.args[0]
# Simplify Relational objects.
if isinstance(arg, Equality):
return Unequality(*arg.args)
if isinstance(arg, Unequality):
return Equality(*arg.args)
if isinstance(arg, StrictLessThan):
return GreaterThan(*arg.args)
if isinstance(arg, StrictGreaterThan):
return LessThan(*arg.args)
if isinstance(arg, LessThan):
return StrictGreaterThan(*arg.args)
if isinstance(arg, GreaterThan):
return StrictLessThan(*arg.args)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> Not(x > 0, evaluate=False).as_set()
(-oo, 0]
"""
if len(self.free_symbols) == 1:
return self.args[0].as_set().complement(S.Reals)
else:
raise NotImplementedError('Sorry, Not.as_set has not yet been'
' implemented for mutivariate'
' expressions')
def to_nnf(self, simplify=True):
if is_literal(self):
return self
expr = self.args[0]
func, args = expr.func, expr.args
if func == And:
return Or._to_nnf(*[~arg for arg in args], simplify=simplify)
if func == Or:
return And._to_nnf(*[~arg for arg in args], simplify=simplify)
if func == Implies:
a, b = args
return And._to_nnf(a, ~b, simplify=simplify)
if func == Equivalent:
return And._to_nnf(Or(*args), Or(*[~arg for arg in args]), simplify=simplify)
if func == Xor:
result = []
for i in range(1, len(args)+1, 2):
for neg in combinations(args, i):
clause = [~s if s in neg else s for s in args]
result.append(Or(*clause))
return And._to_nnf(*result, simplify=simplify)
if func == ITE:
a, b, c = args
return And._to_nnf(Or(a, ~c), Or(~a, ~b), simplify=simplify)
raise ValueError(f'Illegal operator {func} in expression')
class Xor(BooleanFunction):
"""
Logical XOR (exclusive OR) function.
Returns True if an odd number of the arguments are True and the rest are
False.
Returns False if an even number of the arguments are True and the rest are
False.
Examples
========
>>> Xor(True, False)
true
>>> Xor(True, True)
false
>>> Xor(True, False, True, True, False)
true
>>> Xor(True, False, True, False)
false
>>> x ^ y
Xor(x, y)
Notes
=====
The ``^`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise xor. In
particular, ``a ^ b`` and ``Xor(a, b)`` will be different if ``a`` and
``b`` are integers.
>>> Xor(x, y).subs({y: 0})
x
"""
def __new__(cls, *args, **kwargs):
argset = set()
obj = super().__new__(cls, *args, **kwargs)
for arg in super(Xor, obj).args:
if isinstance(arg, Number) or arg in (True, False):
if not arg:
continue
else:
arg = true
if isinstance(arg, Xor):
for a in arg.args:
argset.remove(a) if a in argset else argset.add(a)
elif arg in argset:
argset.remove(arg)
else:
argset.add(arg)
rel = [(r, r.canonical, (~r).canonical) for r in argset if r.is_Relational]
odd = False # is number of complimentary pairs odd? start 0 -> False
remove = []
for i, (r, c, nc) in enumerate(rel):
for j in range(i + 1, len(rel)):
rj, cj = rel[j][:2]
if cj == nc:
odd = ~odd
break
elif cj == c:
break
else:
continue
remove.append((r, rj))
if odd:
argset.remove(true) if true in argset else argset.add(true)
for a, b in remove:
argset.remove(a)
argset.remove(b)
if len(argset) == 0:
return false
elif len(argset) == 1:
return argset.pop()
elif True in argset:
argset.remove(True)
return Not(Xor(*argset))
else:
obj._args = tuple(ordered(argset))
obj._argset = frozenset(argset)
return obj
@property # type: ignore[misc]
@cacheit
def args(self):
return tuple(ordered(self._argset))
def to_nnf(self, simplify=True):
args = []
for i in range(0, len(self.args)+1, 2):
for neg in combinations(self.args, i):
clause = [~s if s in neg else s for s in self.args]
args.append(Or(*clause))
return And._to_nnf(*args, simplify=simplify)
class Nand(BooleanFunction):
"""
Logical NAND function.
It evaluates its arguments in order, giving True immediately if any
of them are False, and False if they are all True.
Returns True if any of the arguments are False.
Returns False if all arguments are True.
Examples
========
>>> Nand(False, True)
true
>>> Nand(True, True)
false
>>> Nand(x, y)
~(x & y)
"""
@classmethod
def eval(cls, *args):
return Not(And(*args))
class Nor(BooleanFunction):
"""
Logical NOR function.
It evaluates its arguments in order, giving False immediately if any
of them are True, and True if they are all False.
Returns False if any argument is True.
Returns True if all arguments are False.
Examples
========
>>> Nor(True, False)
false
>>> Nor(True, True)
false
>>> Nor(False, True)
false
>>> Nor(False, False)
true
>>> Nor(x, y)
~(x | y)
"""
@classmethod
def eval(cls, *args):
return Not(Or(*args))
class Implies(BooleanFunction):
"""
Logical implication.
A implies B is equivalent to !A v B
Accepts two Boolean arguments; A and B.
Returns False if A is True and B is False.
Returns True otherwise.
Examples
========
>>> Implies(True, False)
false
>>> Implies(False, False)
true
>>> Implies(True, True)
true
>>> Implies(False, True)
true
>>> x >> y
Implies(x, y)
>>> y << x
Implies(x, y)
Notes
=====
The ``>>`` and ``<<`` operators are provided as a convenience, but note
that their use here is different from their normal use in Python, which is
bit shifts. Hence, ``Implies(a, b)`` and ``a >> b`` will return different
things if ``a`` and ``b`` are integers. In particular, since Python
considers ``True`` and ``False`` to be integers, ``True >> True`` will be
the same as ``1 >> 1``, i.e., 0, which has a truth value of False. To
avoid this issue, use the Diofant objects ``true`` and ``false``.
>>> True >> False
1
>>> true >> false
false
"""
@classmethod
def eval(cls, *args):
try:
newargs = []
for x in args:
if isinstance(x, Number) or x in (0, 1):
newargs.append(True if x else False)
else:
newargs.append(x)
A, B = newargs
except ValueError:
raise ValueError(f'{len(args)} operand(s) used for an Implies '
f'(pairs are required): {args!s}')
if A == true or A == false or B == true or B == false:
return Or(Not(A), B)
elif A == B:
return true
elif A.is_Relational and B.is_Relational:
if A.canonical == B.canonical:
return true
elif (~A).canonical == B.canonical:
return B
else:
return Expr.__new__(cls, *args)
def to_nnf(self, simplify=True):
a, b = self.args
return Or._to_nnf(~a, b, simplify=simplify)
class Equivalent(BooleanFunction):
"""
Equivalence relation.
Equivalent(A, B) is True iff A and B are both True or both False.
Returns True if all of the arguments are logically equivalent.
Returns False otherwise.
Examples
========
>>> Equivalent(False, False, False)
true
>>> Equivalent(True, False, False)
false
>>> Equivalent(x, And(x, True))
true
"""
def __new__(cls, *args, **options):
from ..core.relational import Relational
args = [sympify(arg, strict=True) for arg in args]
argset = set(args)
for x in args:
if isinstance(x, Number) or x in [True, False]: # Includes 0, 1
argset.discard(x)
argset.add(True if x else False)
rel = []
for r in argset:
if isinstance(r, Relational):
rel.append((r, r.canonical, (~r).canonical))
remove = []
for i, (r, c, nc) in enumerate(rel):
for j in range(i + 1, len(rel)):
rj, cj = rel[j][:2]
if cj == nc:
return false
elif cj == c:
remove.append((r, rj))
break
for a, b in remove:
argset.remove(a)
argset.remove(b)
argset.add(True)
if len(argset) <= 1:
return true
if True in argset:
argset.discard(True)
return And(*argset)
if False in argset:
argset.discard(False)
return And(*[~arg for arg in argset])
_args = frozenset(argset)
obj = super().__new__(cls, _args)
obj._argset = _args
return obj
@property # type: ignore[misc]
@cacheit
def args(self):
return tuple(ordered(self._argset))
def to_nnf(self, simplify=True):
args = []
for a, b in zip(self.args, self.args[1:]):
args.append(Or(~a, b))
args.append(Or(~self.args[-1], self.args[0]))
return And._to_nnf(*args, simplify=simplify)
class ITE(BooleanFunction):
"""
If then else clause.
ITE(A, B, C) evaluates and returns the result of B if A is true
else it returns the result of C.
Examples
========
>>> ITE(True, False, True)
false
>>> ITE(Or(True, False), And(True, True), Xor(True, True))
true
>>> ITE(x, y, z)
ITE(x, y, z)
>>> ITE(True, x, y)
x
>>> ITE(False, x, y)
y
>>> ITE(x, y, y)
y
"""
@classmethod
def eval(cls, *args):
try:
a, b, c = args
except ValueError:
raise ValueError('ITE expects exactly 3 arguments')
if a == true:
return b
elif a == false:
return c
elif b == c:
return b
elif b == true and c == false:
return a
elif b == false and c == true:
return Not(a)
def to_nnf(self, simplify=True):
a, b, c = self.args
return And._to_nnf(Or(~a, b), Or(a, c), simplify=simplify)
def _eval_derivative(self, x):
return self.func(self.args[0], *[a.diff(x) for a in self.args[1:]])
# end class definitions. Some useful methods
def conjuncts(expr):
"""Return a list of the conjuncts in the expr s.
Examples
========
>>> conjuncts(a & b) == frozenset([a, b])
True
>>> conjuncts(a | b) == frozenset([Or(a, b)])
True
"""
return And.make_args(expr)
def disjuncts(expr):
"""Return a list of the disjuncts in the sentence s.
Examples
========
>>> disjuncts(a | b) == frozenset([a, b])
True
>>> disjuncts(a & b) == frozenset([And(a, b)])
True
"""
return Or.make_args(expr)
def distribute_and_over_or(expr):
"""
Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in CNF.
Examples
========
>>> distribute_and_over_or(Or(a, And(Not(b), Not(c))))
(a | ~b) & (a | ~c)
"""
return _distribute((expr, And, Or))
def distribute_or_over_and(expr):
"""
Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in DNF.
Note that the output is NOT simplified.
Examples
========
>>> distribute_or_over_and(And(Or(Not(a), b), c))
(b & c) | (c & ~a)
"""
return _distribute((expr, Or, And))
def _distribute(info):
"""Distributes info[1] over info[2] with respect to info[0]."""
if isinstance(info[0], info[2]):
for arg in info[0].args:
if isinstance(arg, info[1]):
conj = arg
break
else:
return info[0]
rest = info[2](*[a for a in info[0].args if a is not conj])
return info[1](*list(map(_distribute,
((info[2](c, rest), info[1], info[2]) for c in conj.args))))
elif isinstance(info[0], info[1]):
return info[1](*list(map(_distribute,
((x, info[1], info[2]) for x in info[0].args))))
else:
return info[0]
def to_nnf(expr, simplify=True):
"""
Converts expr to Negation Normal Form.
A logical expression is in Negation Normal Form (NNF) if it
contains only And, Or and Not, and Not is applied only to literals.
If simplify is True, the result contains no redundant clauses.
Examples
========
>>> to_nnf(Not((~a & ~b) | (c & d)))
(a | b) & (~c | ~d)
>>> to_nnf(Equivalent(a >> b, b >> a))
(a | ~b | (a & ~b)) & (b | ~a | (b & ~a))
"""
expr = sympify(expr)
if is_nnf(expr, simplify):
return expr
return expr.to_nnf(simplify)
def to_cnf(expr, simplify=False):
"""
Convert a propositional logical sentence s to conjunctive normal form.
That is, of the form ((A | ~B | ...) & (B | C | ...) & ...).
If simplify is True, the expr is evaluated to its simplest CNF form.
Examples
========
>>> to_cnf(~(a | b) | c)
(c | ~a) & (c | ~b)
>>> to_cnf((a | b) & (a | ~a), True)
a | b
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
return simplify_logic(expr, 'cnf', True)
# Don't convert unless we have to
if is_cnf(expr):
return expr
expr = eliminate_implications(expr)
return distribute_and_over_or(expr)
def to_dnf(expr, simplify=False):
"""
Convert a propositional logical sentence s to disjunctive normal form.
That is, of the form ((A & ~B & ...) | (B & C & ...) | ...).
If simplify is True, the expr is evaluated to its simplest DNF form.
Examples
========
>>> to_dnf(b & (a | c))
(a & b) | (b & c)
>>> to_dnf((a & b) | (a & ~b) | (b & c) | (~b & c), True)
a | c
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
return simplify_logic(expr, 'dnf', True)
# Don't convert unless we have to
if is_dnf(expr):
return expr
expr = eliminate_implications(expr)
return distribute_or_over_and(expr)
def is_nnf(expr, simplified=True):
"""
Checks if expr is in Negation Normal Form.
A logical expression is in Negation Normal Form (NNF) if it
contains only And, Or and Not, and Not is applied only to literals.
If simplified is True, checks if result contains no redundant clauses.
Examples
========
>>> is_nnf(a & b | ~c)
True
>>> is_nnf((a | ~a) & (b | c))
False
>>> is_nnf((a | ~a) & (b | c), False)
True
>>> is_nnf(Not(a & b) | c)
False
>>> is_nnf((a >> b) & (b >> a))
False
"""
expr = sympify(expr)
if is_literal(expr):
return True
stack = [expr]
while stack:
expr = stack.pop()
if expr.func in (And, Or):
if simplified:
args = expr.args
for arg in args:
if Not(arg) in args:
return False
stack.extend(expr.args)
elif not is_literal(expr):
return False
return True
def is_cnf(expr):
"""
Test whether or not an expression is in conjunctive normal form.
Examples
========
>>> is_cnf(a | b | c)
True
>>> is_cnf(a & b & c)
True
>>> is_cnf((a & b) | c)
False
"""
return _is_form(expr, And, Or)
def is_dnf(expr):
"""
Test whether or not an expression is in disjunctive normal form.
Examples
========
>>> is_dnf(a | b | c)
True
>>> is_dnf(a & b & c)
True
>>> is_dnf((a & b) | c)
True
>>> is_dnf(a & (b | c))
False
"""
return _is_form(expr, Or, And)
def _is_form(expr, function1, function2):
"""Test whether or not an expression is of the required form."""
expr = sympify(expr)
# Special case of an Atom
if expr.is_Atom:
return True
# Special case of a single expression of function2
if isinstance(expr, function2):
for lit in expr.args:
if isinstance(lit, Not):
if not lit.args[0].is_Atom:
return False
else:
if not lit.is_Atom:
return False
return True
# Special case of a single negation
if isinstance(expr, Not):
if not expr.args[0].is_Atom:
return False
if not isinstance(expr, function1):
return False
for cls in expr.args:
if cls.is_Atom:
continue
if isinstance(cls, Not):
if not cls.args[0].is_Atom:
return False
elif not isinstance(cls, function2):
return False
for lit in cls.args:
if isinstance(lit, Not):
if not lit.args[0].is_Atom:
return False
else:
if not lit.is_Atom:
return False
return True
def eliminate_implications(expr):
"""
Change >>, <<, and Equivalent into &, |, and ~. That is, return an
expression that is equivalent to s, but has only &, |, and ~ as logical
operators.
Examples
========
>>> eliminate_implications(Implies(a, b))
b | ~a
>>> eliminate_implications(Equivalent(a, b))
(a | ~b) & (b | ~a)
>>> eliminate_implications(Equivalent(a, b, c))
(a | ~c) & (b | ~a) & (c | ~b)
"""
return to_nnf(expr)
def is_literal(expr):
"""
Returns True if expr is a literal, else False.
Examples
========
>>> is_literal(a)
True
>>> is_literal(~a)
True
>>> is_literal(a + b)
True
>>> is_literal(Or(a, b))
False
"""
if isinstance(expr, Not):
return not isinstance(expr.args[0], BooleanFunction)
else:
return not isinstance(expr, BooleanFunction)
def to_int_repr(clauses, symbols):
"""
Takes clauses in CNF format and puts them into an integer representation.
Examples
========
>>> to_int_repr([x | y, y], [x, y])
[{1, 2}, {2}]
"""
symbols = dict(zip(symbols, range(1, len(symbols) + 1)))
def append_symbol(arg, symbols):
if isinstance(arg, Not):
return -symbols[arg.args[0]]
else:
return symbols[arg]
return [{append_symbol(arg, symbols) for arg in Or.make_args(c)}
for c in clauses]
def _check_pair(minterm1, minterm2):
"""
Checks if a pair of minterms differs by only one bit. If yes, returns
index, else returns -1.
"""
index = -1
for x, (i, j) in enumerate(zip(minterm1, minterm2)):
if i != j:
if index == -1:
index = x
else:
return -1
return index
def _convert_to_varsSOP(minterm, variables):
"""
Converts a term in the expansion of a function from binary to it's
variable form (for SOP).
"""
temp = []
for i, m in enumerate(minterm):
if m == 0:
temp.append(Not(variables[i]))
elif m == 1:
temp.append(variables[i])
return And(*temp)
def _convert_to_varsPOS(maxterm, variables):
"""
Converts a term in the expansion of a function from binary to it's
variable form (for POS).
"""
temp = []
for i, m in enumerate(maxterm):
if m == 1:
temp.append(Not(variables[i]))
elif m == 0:
temp.append(variables[i])
return Or(*temp)
def _simplified_pairs(terms):
"""
Reduces a set of minterms, if possible, to a simplified set of minterms
with one less variable in the terms using QM method.
"""
simplified_terms = []
todo = list(range(len(terms)))
for i, ti in enumerate(terms[:-1]):
for j_i, tj in enumerate(terms[(i + 1):]):
index = _check_pair(ti, tj)
if index != -1:
todo[i] = todo[j_i + i + 1] = None
newterm = ti[:]
newterm[index] = 3
if newterm not in simplified_terms:
simplified_terms.append(newterm)
simplified_terms.extend(
[terms[i] for i in [_ for _ in todo if _ is not None]])
return simplified_terms
def _compare_term(minterm, term):
"""
Return True if a binary term is satisfied by the given term. Used
for recognizing prime implicants.
"""
for i, x in enumerate(term):
if x not in (3, minterm[i]):
return False
return True
def _rem_redundancy(l1, terms):
"""
After the truth table has been sufficiently simplified, use the prime
implicant table method to recognize and eliminate redundant pairs,
and return the essential arguments.
"""
essential = []
for x in terms:
temporary = []
for y in l1:
if _compare_term(x, y):
temporary.append(y)
if len(temporary) == 1:
if temporary[0] not in essential:
essential.append(temporary[0])
for x in terms:
for y in essential:
if _compare_term(x, y):
break
else:
for z in l1: # pragma: no branch
if _compare_term(x, z):
assert z not in essential
essential.append(z)
break
return essential
def SOPform(variables, minterms, dontcares=None):
"""
The SOPform function uses simplified_pairs and a redundant group-
eliminating algorithm to convert the list of all input combos that
generate '1' (the minterms) into the smallest Sum of Products form.
The variables must be given as the first argument.
Return a logical Or function (i.e., the "sum of products" or "SOP"
form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1],
... [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> SOPform([t, x, y, z], minterms, dontcares)
(y & z) | (z & ~t)
References
==========
* https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm
"""
variables = [sympify(v) for v in variables]
if minterms == []:
return false
minterms = [list(i) for i in minterms]
dontcares = [list(i) for i in (dontcares or [])]
for d in dontcares:
if d in minterms:
raise ValueError(f'{d} in minterms is also in dontcares')
old = None
new = minterms + dontcares
while new != old:
old = new
new = _simplified_pairs(old)
essential = _rem_redundancy(new, minterms)
return Or(*[_convert_to_varsSOP(x, variables) for x in essential])
def POSform(variables, minterms, dontcares=None):
"""
The POSform function uses simplified_pairs and a redundant-group
eliminating algorithm to convert the list of all input combinations
that generate '1' (the minterms) into the smallest Product of Sums form.
The variables must be given as the first argument.
Return a logical And function (i.e., the "product of sums" or "POS"
form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1],
... [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> POSform([t, x, y, z], minterms, dontcares)
z & (y | ~t)
References
==========
* https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm
"""
variables = [sympify(v) for v in variables]
if minterms == []:
return false
minterms = [list(i) for i in minterms]
dontcares = [list(i) for i in (dontcares or [])]
for d in dontcares:
if d in minterms:
raise ValueError(f'{d} in minterms is also in dontcares')
maxterms = []
for t in product([0, 1], repeat=len(variables)):
t = list(t)
if (t not in minterms) and (t not in dontcares):
maxterms.append(t)
old = None
new = maxterms + dontcares
while new != old:
old = new
new = _simplified_pairs(old)
essential = _rem_redundancy(new, maxterms)
return And(*[_convert_to_varsPOS(x, variables) for x in essential])
def _find_predicates(expr):
"""Helper to find logical predicates in BooleanFunctions.
A logical predicate is defined here as anything within a BooleanFunction
that is not a BooleanFunction itself.
"""
if not isinstance(expr, BooleanFunction):
return {expr}
return set().union(*(_find_predicates(i) for i in expr.args))
def simplify_logic(expr, form=None, deep=True):
"""
This function simplifies a boolean function to its simplified version
in SOP or POS form. The return type is an Or or And object in Diofant.
Parameters
==========
expr : string or boolean expression
form : string ('cnf' or 'dnf') or None (default).
If 'cnf' or 'dnf', the simplest expression in the corresponding
normal form is returned; if None, the answer is returned
according to the form with fewest args (in CNF by default).
deep : boolean (default True)
indicates whether to recursively simplify any
non-boolean functions contained within the input.
Examples
========
>>> b = (~x & ~y & ~z) | (~x & ~y & z)
>>> simplify_logic(b)
~x & ~y
>>> sympify(b)
(z & ~x & ~y) | (~x & ~y & ~z)
>>> simplify_logic(_)
~x & ~y
"""
if form == 'cnf' or form == 'dnf' or form is None:
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
variables = _find_predicates(expr)
truthtable = []
for t in product([0, 1], repeat=len(variables)):
t = list(t)
if expr.xreplace(dict(zip(variables, t))):
truthtable.append(t)
if deep:
from ..simplify import simplify
variables = [simplify(v) for v in variables]
if form == 'dnf' or \
(form is None and len(truthtable) >= (2 ** (len(variables) - 1))):
return SOPform(variables, truthtable)
elif form == 'cnf' or form is None: # pragma: no branch
return POSform(variables, truthtable)
else:
raise ValueError('form can be cnf or dnf only')
def _finger(eq):
"""
Assign a 5-item fingerprint to each symbol in the equation:
[
# of times it appeared as a Symbol,
# of times it appeared as a Not(symbol),
# of times it appeared as a Symbol in an And or Or,
# of times it appeared as a Not(Symbol) in an And or Or,
sum of the number of arguments with which it appeared,
counting Symbol as 1 and Not(Symbol) as 2
]
>>> eq = Or(And(Not(y), a), And(Not(y), b), And(x, y))
>>> dict(_finger(eq))
{(0, 0, 1, 0, 2): [x],
(0, 0, 1, 0, 3): [a, b],
(0, 0, 1, 2, 8): [y]}
So y and x have unique fingerprints, but a and b do not.
"""
f = eq.free_symbols
d = {fi: [0] * 5 for fi in f}
for a in eq.args:
if a.is_Symbol:
d[a][0] += 1
elif a.is_Not:
d[a.args[0]][1] += 1
else:
o = len(a.args) + sum(isinstance(ai, Not) for ai in a.args)
for ai in a.args:
if ai.is_Symbol:
d[ai][2] += 1
d[ai][-1] += o
else:
d[ai.args[0]][3] += 1
d[ai.args[0]][-1] += o
inv = defaultdict(list)
for k, v in ordered(d.items()):
inv[tuple(v)].append(k)
return inv
def bool_map(bool1, bool2):
"""
Return the simplified version of bool1, and the mapping of variables
that makes the two expressions bool1 and bool2 represent the same
logical behaviour for some correspondence between the variables
of each.
If more than one mappings of this sort exist, one of them
is returned.
For example, And(x, y) is logically equivalent to And(a, b) for
the mapping {x: a, y:b} or {x: b, y:a}.
If no such mapping exists, return False.
Examples
========
>>> function1 = SOPform([x, z, y], [[1, 0, 1], [0, 0, 1]])
>>> function2 = SOPform([a, b, c], [[1, 0, 1], [1, 0, 0]])
>>> bool_map(function1, function2)
(y & ~z, {y: a, z: b})
The results are not necessarily unique, but they are canonical. Here,
``(t, z)`` could be ``(a, d)`` or ``(d, a)``:
>>> eq1 = Or(And(Not(y), t), And(Not(y), z), And(x, y))
>>> eq2 = Or(And(Not(c), a), And(Not(c), d), And(b, c))
>>> bool_map(eq1, eq2)
((x & y) | (t & ~y) | (z & ~y), {t: a, x: b, y: c, z: d})
>>> eq = And(Xor(a, b), c, And(c, d))
>>> bool_map(eq, eq.subs({c: x}))
(c & d & (a | b) & (~a | ~b), {a: a, b: b, c: d, d: x})
"""
def match(function1, function2):
"""Return the mapping that equates variables between two
simplified boolean expressions if possible.
By "simplified" we mean that a function has been denested
and is either an And (or an Or) whose arguments are either
symbols (x), negated symbols (Not(x)), or Or (or an And) whose
arguments are only symbols or negated symbols. For example,
And(x, Not(y), Or(w, Not(z))).
Basic.match is not robust enough (see issue sympy/sympy#4835) so this is
a workaround that is valid for simplified boolean expressions.
"""
# do some quick checks
if function1.__class__ != function2.__class__:
return
if len(function1.args) != len(function2.args):
return
if function1.is_Symbol:
return {function1: function2}
# get the fingerprint dictionaries
f1 = _finger(function1)
f2 = _finger(function2)
# more quick checks
if len(f1) != len(f2):
return
# assemble the match dictionary if possible
matchdict = {}
for k in f1:
if k not in f2 or len(f1[k]) != len(f2[k]):
return
for i, x in enumerate(f1[k]):
matchdict[x] = f2[k][i]
return matchdict if matchdict else None
a = simplify_logic(bool1)
b = simplify_logic(bool2)
m = match(a, b)
if m:
return a, m
return m is not None
| 27.042143
| 93
| 0.552107
| 5,962
| 44,917
| 4.098457
| 0.102986
| 0.006384
| 0.002824
| 0.004584
| 0.404625
| 0.352282
| 0.311807
| 0.278085
| 0.262943
| 0.247596
| 0
| 0.009302
| 0.327471
| 44,917
| 1,660
| 94
| 27.058434
| 0.79959
| 0.414787
| 0
| 0.390909
| 0
| 0
| 0.021899
| 0
| 0
| 0
| 0
| 0
| 0.001515
| 1
| 0.101515
| false
| 0
| 0.028788
| 0.021212
| 0.377273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|