hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
41da78eeb82897161300840746eef404c63146f0 | 7,485 | py | Python | test_farbfeld.py | jmp/farbfeld | 08cb56957624cfe3a1fe872390aa3252226e57cd | [
"MIT"
] | 2 | 2020-02-01T14:59:15.000Z | 2021-11-03T12:39:10.000Z | test_farbfeld.py | jmp/farbfeld | 08cb56957624cfe3a1fe872390aa3252226e57cd | [
"MIT"
] | 5 | 2019-01-26T11:58:58.000Z | 2019-08-31T08:16:59.000Z | test_farbfeld.py | jmp/farbfeld | 08cb56957624cfe3a1fe872390aa3252226e57cd | [
"MIT"
] | null | null | null | # pylint: disable=missing-docstring
import io
import unittest
import farbfeld
class ReadTest(unittest.TestCase):
def test_read_empty_data(self):
self.assertRaises(
farbfeld.InvalidFormat,
farbfeld.read,
io.BytesIO(b''),
)
def test_read_header_only(self):
self.assertRaises(
farbfeld.InvalidFormat,
farbfeld.read,
io.BytesIO(b'farbfeld'),
)
def test_read_wrong_header_no_data(self):
self.assertRaises(
farbfeld.InvalidFormat,
farbfeld.read,
io.BytesIO(b'dlefbraf'),
)
def test_read_correct_data_wrong_header(self):
self.assertRaises(farbfeld.InvalidFormat, farbfeld.read, io.BytesIO(
b'dlefbraf' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x01' # height
b'\x01\x02\x03\x04\x05\x06\x07\x08' # RGBA
))
def test_read_valid_but_no_pixels(self):
pixels = farbfeld.read(io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x00' # width
b'\x00\x00\x00\x00' # height
))
self.assertListEqual([], pixels)
def test_read_valid_but_too_few_pixels(self):
self.assertRaises(
farbfeld.InvalidFormat,
farbfeld.read,
io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x02' # height
b'\xff\xff\xff\xff\xff\xff\xff\xff' # RGBA
),
)
def test_read_valid_but_too_many_pixels(self):
self.assertRaises(
farbfeld.InvalidFormat,
farbfeld.read,
io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x01' # height
b'\xff\xff\xff\xff\xff\xff\xff\xff' # RGBA
b'\xff\xff\xff\xff\xff\xff\xff\xff' # RGBA
),
)
def test_read_zero_width(self):
pixels = farbfeld.read(io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x00' # width
b'\x00\x00\x00\x01' # height
))
self.assertListEqual([], pixels)
def test_read_zero_height(self):
pixels = farbfeld.read(io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x00' # height
))
self.assertListEqual([], pixels)
def test_read_incomplete_pixel(self):
self.assertRaises(
farbfeld.InvalidFormat,
farbfeld.read,
io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x01' # height
b'\x00\x20\x00\x40\x00\x80\x00' # RGBA
),
)
def test_read_single_pixel(self):
pixels = farbfeld.read(io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x01' # height
b'\x00\x20\x00\x40\x00\x80\x00\xff' # RGBA
))
self.assertListEqual([[[32, 64, 128, 255]]], pixels)
def test_read_two_by_two(self):
pixels = farbfeld.read(io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x02' # width
b'\x00\x00\x00\x02' # height
b'\x00\x01\x00\x02\x00\x03\x00\x04' # RGBA
b'\x00\x05\x00\x06\x00\x07\x00\x08' # RGBA
b'\x00\x09\x00\x0a\x00\x0b\x00\x0c' # RGBA
b'\x00\x0d\x00\x0e\x00\x0f\x00\x10' # RGBA
))
self.assertListEqual([
[[1, 2, 3, 4], [5, 6, 7, 8]],
[[9, 10, 11, 12], [13, 14, 15, 16]],
], pixels)
class WriteTest(unittest.TestCase):
def test_write_invalid_data(self):
self.assertRaises(ValueError, farbfeld.write, io.BytesIO(), None)
def test_write_zero_height(self):
file = io.BytesIO()
farbfeld.write(file, [])
file.seek(0)
self.assertEqual(
file.read(),
b'farbfeld' # magic
b'\x00\x00\x00\x00' # width
b'\x00\x00\x00\x00' # height
)
def test_write_zero_width(self):
file = io.BytesIO()
farbfeld.write(file, [[]])
file.seek(0)
self.assertEqual(
file.read(),
b'farbfeld' # magic
b'\x00\x00\x00\x00' # width
b'\x00\x00\x00\x01' # height
)
def test_write_incomplete_pixels(self):
self.assertRaises(ValueError, farbfeld.write, io.BytesIO(), [[[]]])
def test_write_too_few_components(self):
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[1, 2, 3]]],
)
def test_write_too_many_components(self):
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[1, 2, 3, 4, 5]]],
)
def test_write_component_out_of_range(self):
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[0, 0, 0, -1]]],
)
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[0, 0, 0, 65536]]],
)
def test_write_component_within_range(self):
try:
farbfeld.write(io.BytesIO(), [[[0, 0, 0, 0]]])
farbfeld.write(io.BytesIO(), [[[32767, 32767, 32767, 32767]]])
farbfeld.write(io.BytesIO(), [[[65535, 65535, 65535, 65535]]])
except ValueError:
self.fail('ValueError raised unexpectedly')
def test_write_invalid_component(self):
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[0, 0, 0, 0.5]]],
)
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[0, 0, 0, '1']]],
)
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[0, 0, 0, None]]],
)
def test_write_inconsistent_width(self):
self.assertRaises(ValueError, farbfeld.write, io.BytesIO(), [[
[0, 0, 0, 0], [0, 0, 0, 0], # first row, two pixels
], [
[0, 0, 0, 0], # second row, only one pixel
]])
def test_write_single_pixel(self):
file = io.BytesIO()
farbfeld.write(file, [[[32, 64, 128, 255]]])
file.seek(0)
self.assertEqual(
file.read(),
b'farbfeld' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x01' # height
b'\x00\x20\x00\x40\x00\x80\x00\xff' # RGBA
)
def test_write_two_by_two(self):
file = io.BytesIO()
farbfeld.write(file, [
[[1, 2, 3, 4], [5, 6, 7, 8]],
[[9, 10, 11, 12], [13, 14, 15, 16]],
])
file.seek(0)
self.assertEqual(
file.read(),
b'farbfeld' # magic
b'\x00\x00\x00\x02' # width
b'\x00\x00\x00\x02' # height
b'\x00\x01\x00\x02\x00\x03\x00\x04' # RGBA
b'\x00\x05\x00\x06\x00\x07\x00\x08' # RGBA
b'\x00\x09\x00\x0a\x00\x0b\x00\x0c' # RGBA
b'\x00\x0d\x00\x0e\x00\x0f\x00\x10' # RGBA
)
| 30.303644 | 76 | 0.501536 | 871 | 7,485 | 4.205511 | 0.129736 | 0.096642 | 0.081081 | 0.07098 | 0.754573 | 0.754027 | 0.741742 | 0.712258 | 0.675949 | 0.675949 | 0 | 0.112829 | 0.360588 | 7,485 | 246 | 77 | 30.426829 | 0.652528 | 0.053975 | 0 | 0.62844 | 0 | 0 | 0.148512 | 0.067777 | 0 | 0 | 0 | 0 | 0.119266 | 1 | 0.110092 | false | 0 | 0.013761 | 0 | 0.133028 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 |
41dae6ca6afa36e98373f82982a75e2c50d8cc74 | 571 | py | Python | DigitRecognition/go-test.py | shifuture/kaggle-join | 8cc8fb6042982cba1d9a0eced1488c5a13557e80 | [
"MIT"
] | null | null | null | DigitRecognition/go-test.py | shifuture/kaggle-join | 8cc8fb6042982cba1d9a0eced1488c5a13557e80 | [
"MIT"
] | null | null | null | DigitRecognition/go-test.py | shifuture/kaggle-join | 8cc8fb6042982cba1d9a0eced1488c5a13557e80 | [
"MIT"
] | null | null | null | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
import csv
import numpy as np
def loadTestData():
l=[]
with open('./data/test.csv') as file:
lines=csv.reader(file)
for line in lines:
l.append(list(e if e=='0' else 1 for e in line))
#remove csv head
l.remove(l[0])
data=np.array(l, int)
data=data.reshape(28000,784)
return data
tests = loadTestData()
for i in range(len(tests)):
with open('./extract_tdata/line_%d.txt'%i, 'w') as file:
file.write("\n".join(str(e) for e in tests[i].reshape(28,28)))
| 24.826087 | 70 | 0.595447 | 96 | 571 | 3.520833 | 0.5625 | 0.047337 | 0.035503 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036446 | 0.231173 | 571 | 22 | 71 | 25.954545 | 0.733485 | 0.103328 | 0 | 0 | 0 | 0 | 0.090373 | 0.053045 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41dc7c7b8b8afe1b3ff6333c9f01816fc91e0652 | 54,070 | py | Python | src/funcFit/TutorialExampleSanity.py | mirofedurco/PyAstronomy | b0e5806a18bde647654e6c9de323327803722864 | [
"MIT"
] | 98 | 2015-01-01T12:46:05.000Z | 2022-02-13T14:17:36.000Z | src/funcFit/TutorialExampleSanity.py | mirofedurco/PyAstronomy | b0e5806a18bde647654e6c9de323327803722864 | [
"MIT"
] | 46 | 2015-02-10T19:53:38.000Z | 2022-01-11T17:26:05.000Z | src/funcFit/TutorialExampleSanity.py | mirofedurco/PyAstronomy | b0e5806a18bde647654e6c9de323327803722864 | [
"MIT"
] | 38 | 2015-01-08T17:00:34.000Z | 2022-03-04T05:15:22.000Z | from __future__ import print_function, division
import unittest
import os
class ExampleSanity(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def sanity_firstExample(self):
# Import numpy and matplotlib
from numpy import arange, sqrt, exp, pi, random, ones
import matplotlib.pylab as plt
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
# Before we can start fitting, we need something to fit.
# So let us create some data...
# Creating a Gaussian with some noise
# Choose some parameters...
gPar = {"A":-5.0, "sig":10.0, "mu":10.0, "off":1.0, "lin":0.0}
# Calculate profile
x = arange(100) - 50.0
y = gPar["off"] + gPar["A"] / sqrt(2*pi*gPar["sig"]**2) \
* exp(-(x-gPar["mu"])**2/(2*gPar["sig"]**2))
# Add some noise
y += random.normal(0.0, 0.01, x.size)
# Let us see what we have done...
plt.plot(x, y, 'bp')
# Now we can start exploiting the funcFit functionality to
# fit a Gaussian to our data. In the following lines, we
# create a fitting object representing a Gaussian and set guess parameters.
# Now let us come to the fitting
# First, we create the Gauss1d fit object
gf = fuf.GaussFit1d()
# See what parameters are available
print("List of available parameters: ", gf.availableParameters())
# Set guess values for the parameters
gf["A"] = -10.0
gf["sig"] = 15.77
gf["off"] = 0.87
gf["mu"] = 7.5
# Let us see whether the assignment worked
print("Parameters and guess values: ")
print(" A : ", gf["A"])
print(" sig : ", gf["sig"])
print(" off : ", gf["off"])
print(" mu : ", gf["mu"])
print("")
# Now some of the strengths of funcFit are demonstrated; namely, the
# ability to consider some parameters as free and others as fixed.
# By default, all parameters of the GaussFit1d are frozen.
# Show values and names of frozen parameters
print("Names and values of FROZEN parameters: ", gf.frozenParameters())
# Which parameters shall be variable during the fit?
# 'Thaw' those (the order is irrelevant)
gf.thaw(["A", "sig", "off", "mu"])
# Let us assume that we know that the amplitude is negative, i.e.,
# no lower boundary (None) and 0.0 as upper limit.
gf.setRestriction({"A":[None,0.0]})
# Now start the fit
gf.fit(x, y, yerr=ones(x.size)*0.01)
# Write the result to the screen and plot the best fit model
gf.parameterSummary()
plt.plot(x, gf.model, 'r--')
# Show the data and the best fit model
# plt.show()
def sanity_CustomModel(self):
# Import numpy and matplotlib
from numpy import arange, random
import matplotlib.pylab as plt
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
class StraightLine(fuf.OneDFit):
"""
Implements a straight line of the form y = "off" + x * "lin".
"""
def __init__(self):
fuf.OneDFit.__init__(self, ["off", "lin"])
def evaluate(self, x):
"""
Calculates and returns model according to the \
current parameter values.
Parameters:
- `x` - Array specifying the positions at \
which to evaluate the model.
"""
y = self["off"] + (self["lin"] * x)
return y
# Generate some data and add noise
x = arange(100)
y = 10.0 + 2.0 * x + random.normal(0.0, 5.0, 100)
# Create fitting class instance and set initial guess
# Note that all parameters are frozen by default
lf = StraightLine()
lf["off"] = 20.0
lf["lin"] = 1.0
# Thaw parameters
lf.thaw(["off", "lin"])
# Start fitting
lf.fit(x, y)
# Investigate the result
lf.parameterSummary()
plt.plot(x, y, 'bp')
plt.plot(x, lf.model, 'r--')
# plt.show()
def sanity_Relations(self):
# import numpy and matplotlib
from numpy import arange, random
import matplotlib.pylab as plt
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
class StraightLine(fuf.OneDFit):
"""
Implements a straight line of the form y = "off" + x * "lin".
"""
def __init__(self):
fuf.OneDFit.__init__(self, ["off", "lin"])
def evaluate(self, x):
"""
Calculates and returns model according to the current parameter values.
Parameters:
- x - Array specifying the positions at which to evaluate the model.
"""
y = self["off"] + (self["lin"] * x)
return y
# Create a function, which defines the relation.
def getLinearRelation(factor):
def linOffRel(off):
"""
Function used to relate parameters "lin" and "off".
"""
return factor * off
return linOffRel
# Note, above we used a nested function (a closure) to define
# the relation. This approach is very flexible. If we were already
# sure about the value of ``factor'' (e.g., 10.0), we could
# simply have used:
#
# def linOffRel(off):
# return 10.0 * off
# Generate some data with noise
x = arange(100)
y = 100.0 + 2.0 * x + random.normal(0.0, 5.0, 100)
# Create fitting class instance and set initial guess
lf = StraightLine()
lf["off"] = 20.0
lf["lin"] = 1.0
# Thaw parameters
lf.thaw(["off", "lin"])
# Assume we know about a relation between 'lin' and 'off'
# In particular, lin = 9.0 * off. We use the function getLinearRelation
# to obtain a function object defining the relation.
lf.relate("lin", ["off"], getLinearRelation(9))
# Start fitting
lf.fit(x, y)
# Investigate the result
lf.parameterSummary()
plt.plot(x, y, 'bp')
plt.plot(x, lf.model, 'r--')
# plt.show()
def sanity_CombiningModels(self):
# Import numpy and matplotlib
from numpy import arange, sqrt, exp, pi, random, ones
import matplotlib.pylab as plt
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
# Creating Gaussians with some noise
# Choose some parameters...
gPar1 = {"A":-5.0, "sig":10.0, "mu":20.0, "off":1.0, "lin":0.0}
gPar2 = {"A":+10.0, "sig":10.0, "mu":-20.0, "off":0.0, "lin":0.0}
# Calculate profile
x = arange(100) - 50.0
y = gPar1["off"] + gPar1["A"] / sqrt(2*pi*gPar1["sig"]**2) \
* exp(-(x-gPar1["mu"])**2/(2*gPar1["sig"]**2))
y -= gPar2["off"] + gPar2["A"] / sqrt(2*pi*gPar2["sig"]**2) \
* exp(-(x-gPar2["mu"])**2/(2*gPar2["sig"]**2))
# Add some noise
y += random.normal(0.0, 0.01, x.size)
# Let us see what we have done...
plt.plot(x, y, 'bp')
# Now let us come to the fitting
# First, we create two Gauss1d fit objects
gf1 = fuf.GaussFit1d()
gf2 = fuf.GaussFit1d()
# Assign guess values for the parameters
gf1["A"] = -0.3
gf1["sig"] = 3.0
gf1["off"] = 0.0
gf1["mu"] = +5.0
gf2["A"] = 3.0
gf2["sig"] = 15.0
gf2["off"] = 1.0
gf2["mu"] = -10.0
# Which parameters shall be variable during the fit?
# 'Thaw' those (the order is irrelevant)
gf1.thaw(["A", "sig", "mu"])
gf2.thaw(["sig", "mu", "off"])
# Our actual model is the sum of both Gaussians
twoG = gf1 + gf2
# Show a description of the model depending on the
# names of the individual components
print()
print("Description of the model: ", twoG.description())
print()
# Note that now the parameter names changed!
# Each parameter is now named using the "property"
# (e.g., 'A' or 'sig') as the first part, the component
# "root name" (in this case 'Gaussian') and a component
# number in parenthesis.
print("New parameter names and values: ")
twoG.parameterSummary()
# We forgot to thaw the amplitude of the second Gaussian, but
# we can still do it, but we have to refer to the correct name:
# either by using the (new) variable name:
twoG.thaw("A_Gaussian(2)")
# or by specifying property name, root name, and component number
# separately (note that a tuple is used to encapsulate them):
twoG.thaw(("A", "Gaussian", 2))
# We decide to rather freeze the offset of the second
# Gaussian (we could have used a tuple here, too).
twoG.freeze("off_Gaussian(2)")
# Start fit as usual
twoG.fit(x,y,yerr=ones(x.size)*0.01)
# Write the result to the screen and plot the best fit model
print()
print("--------------------------------")
print("Parameters for the combined fit:")
print("--------------------------------")
twoG.parameterSummary()
# Show the data and the best fit model
plt.plot(x, twoG.model, 'r--')
# plt.show()
def sanity_CustomObjectiveFunctions(self):
# Import numpy and matplotlib
from numpy import arange, exp, random, ones, sum, abs
import matplotlib.pylab as plt
# Import funcFit
from PyAstronomy import funcFit as fuf
# Define parameters of faked data
A = 1.0
tau = 10.
off = 0.2
t0 = 40.
# Calculate fake data set
x = arange(100)
y = A*exp(-(x-t0)/tau) * (x>t0) + off
y += random.normal(0., 0.1, 100)
yerr = ones(100)*0.01
# Exponential decay model
edf = fuf.ExpDecayFit1d()
# Define free quantities
edf.thaw(["A", "tau", "off", "t0"])
# Let the amplitude be positive
edf.setRestriction({"A":[0.0,None]})
# Define initial guess
edf.assignValue({"A":1.0, "tau": 15., "off":0.2, "t0":50.})
# Do not use chi square, but the linear deviation from model
# to evaluate quality of fit.
# Use the "MiniFunc" decorator to define your custom objective
# function. This decorator takes the fitting object as an
# argument. The function has to accept two arguments: the
# fitting object and the list of free parameters.
@fuf.MiniFunc(edf)
def mini(edf, P):
m = sum(abs(edf.model - edf.y)/edf.yerr)
print("mini - current parameters: ", P, ", value is: ", m)
return m
# Carry out fit WITH SELF-DEFINED OBJECTIVE FUNCTION
edf.fit(x, y, yerr=yerr, miniFunc=mini)
# Show parameter values and plot best-fit model.
edf.parameterSummary()
plt.errorbar(x,y,yerr)
plt.plot(x, edf.model, 'r-')
# plt.show()
def sanity_Overbinning(self):
# Import numpy and matplotlib
from numpy import arange, sqrt, exp, pi, random, ones
import matplotlib.pylab as plt
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
# Creating a Gaussian with some noise
# Choose some parameters...
gPar = {"A":-5.0, "sig":10.0, "mu":10.0, "off":1.0, "lin":0.0}
# Calculate profile
x = arange(20)/20.0 * 100.0 - 50.0
y = gPar["off"] + gPar["A"] / sqrt(2*pi*gPar["sig"]**2) \
* exp(-(x-gPar["mu"])**2/(2*gPar["sig"]**2))
# Add some noise
y += random.normal(0.0, 0.01, x.size)
# Let us see what we have done...
plt.plot(x, y, 'bp')
# First, we create a "GaussFit1d_Rebin" class object (note that the
# class object has still to be instantiated, the name is arbitrary).
GaussFit1d_Rebin = fuf.turnIntoRebin(fuf.GaussFit1d)
# Do the instantiation and specify how the overbinning should be
# carried out.
gf = GaussFit1d_Rebin()
gf.setRebinArray_Ndt(x, 10, x[1]-x[0])
# See what parameters are available
print("List of available parameters: ", gf.availableParameters())
# Set guess values for the parameters
gf["A"] = -10.0
gf["sig"] = 15.77
gf["off"] = 0.87
gf["mu"] = 7.5
# Let us see whether the assignment worked
print("Parameters and guess values: ")
print(" A : ", gf["A"])
print(" sig : ", gf["sig"])
print(" off : ", gf["off"])
print(" mu : ", gf["mu"])
print("")
# Now some of the strengths of funcFit are demonstrated; namely, the
# ability to consider some parameters as free and others as fixed.
# By default, all parameters of the GaussFit1d are frozen.
# Show values and names of frozen parameters
print("Names and values if FROZEN parameters: ", gf.frozenParameters())
# Which parameters shall be variable during the fit?
# 'Thaw' those (the order is irrelevant)
gf.thaw(["A", "sig", "off", "mu"])
# Let us assume that we know that the amplitude is negative, i.e.,
# no lower boundary (None) and 0.0 as upper limit.
gf.setRestriction({"A":[None,0.0]})
# Now start the fit
gf.fit(x, y, yerr=ones(x.size)*0.01)
# Write the result to the screen and plot the best fit model
gf.parameterSummary()
# Plot the final best-fit model
plt.plot(x, gf.model, 'rp--')
# Show the overbinned (=unbinned) model, indicate by color
# which point are averaged to obtain a point in the binned
# model.
for k, v in gf.rebinIdent.items():
c = "y"
if k % 2 == 0: c = "k"
plt.plot(gf.rebinTimes[v], gf.unbinnedModel[v], c+'.')
# Show the data and the best fit model
# plt.show()
def sanity_simultaneousFit(self):
from PyAstronomy import funcFit as fuf
import numpy
import matplotlib.pylab as plt
# Set up two different x axes.
x1 = numpy.arange(100.)/100. - 0.5
x2 = numpy.arange(150.)/150. - 0.25
# Getting the models ...
gauss = fuf.GaussFit1d()
calor = fuf.CauchyLorentz1d()
# and assign parameters.
gauss.assignValue({"A":0.02, "sig":0.1, "mu":0.0, "off":1.0, "lin":0.0})
calor.assignValue({"A":0.07, "g":0.1, "mu":0.2, "off":1.0, "lin":0.0})
# Create noisy data.
y1 = gauss.evaluate(x1) + numpy.random.normal(0., 0.01, 100)
y2 = calor.evaluate(x2) + numpy.random.normal(0., 0.01, 150)
# Plot the noisy data.
plt.subplot(2,1,1)
plt.errorbar(x1, y1, yerr=numpy.ones(100)*0.01)
plt.subplot(2,1,2)
plt.errorbar(x2, y2, yerr=numpy.ones(150)*0.01)
# Now, get ready two fit the data sets simultaneously.
sf = fuf.SyncFitContainer()
# Tell the class about the two components and save the
# component numbers assigned to them:
gaussCno = sf.addComponent(gauss)
calorCno = sf.addComponent(calor)
print("Component numbers in the syncFit container:")
print(" Gauss: ", gaussCno, ", Cauchy-Lorentz: ", calorCno)
print()
# See what happened to the parameters in the
# simultaneous fitting class.
# The variable names have changed.
sf.parameterSummary()
# Thaw all parameters (for later fit) ...
sf.thaw(list(sf.parameters()))
# but not the linear term.
sf.freeze(["lin_Gaussian[s1]", "lin_CauLor[s2]"])
# Tell the class about the identity of parameters,
# either by using the "property name" of the parameter:
sf.treatAsEqual("off")
# or by specifying the names explicitly.
sf.treatAsEqual(["g_CauLor[s2]", "sig_Gaussian[s1]"])
# See what happened to the parameters in the
# simultaneous fitting class.
print()
print("Parameters after 'treatAsEqual' has been applied:")
sf.parameterSummary()
# Randomize starting values.
for fp in sf.freeParamNames():
sf[fp] = sf[fp] + numpy.random.normal(0., 0.05)
# Set up the data appropriately.
data = {gaussCno:[x1, y1], calorCno:[x2, y2]}
yerr = {gaussCno: numpy.ones(100)*0.01, \
calorCno: numpy.ones(150)*0.01}
# Start the fit.
sf.fit(data, yerr=yerr)
# Show the best-fit values.
print()
print("Best-fit parameters:")
sf.parameterSummary()
# Plot the best-fit model(s).
plt.subplot(2,1,1)
plt.plot(x1, sf.models[gaussCno], 'r--')
plt.subplot(2,1,2)
plt.plot(x2, sf.models[calorCno], 'r--')
# plt.show()
def sanity_2dCircularFit(self):
import numpy as np
import matplotlib.pylab as plt
from PyAstronomy import funcFit as fuf
# Get the circular model and assign
# parameter values
c = fuf.Circle2d()
c["r"] = 1.0
c["t0"] = 0.0
c["per"] = 3.0
# Evaluate the model at a number of
# time stamps
t = np.linspace(0.0, 10.0, 20)
pos = c.evaluate(t)
# Add some error to the "measurement"
pos += np.reshape(np.random.normal(0.0, 0.2, pos.size), pos.shape)
err = np.reshape(np.ones(pos.size), pos.shape) * 0.2
# Define free parameters and fit the model
c.thaw(["r", "t0", "per"])
c.fit(t, pos, yerr=err)
c.parameterSummary()
# Evaluate the model at a larger number of
# points for plotting
tt = np.linspace(0.0, 10.0, 200)
model = c.evaluate(tt)
# Plot the result
plt.errorbar(pos[::,0], pos[::,1], yerr=err[::,1], \
xerr=err[::,0], fmt='bp')
plt.plot(model[::,0], model[::,1], 'r--')
# plt.show()
def sanity_2dGaussFit(self):
from PyAstronomy import funcFit as fuf
import numpy as np
import matplotlib.pylab as plt
# Constructing the individual coordinate axes
x = np.linspace(-2.,2.,50)
y = np.linspace(-2.,2.,50)
# Applying funcFit's "coordinateGrid" helper function
# to built appropriate array-index -> coordinate mapping
# needed for nD fitting.
g = fuf.coordinateGrid(x, y)
# Create the 2d-Gaussian model and assign
# some model parameters.
gf = fuf.GaussFit2d()
gf["sigx"] = 0.75
gf["sigy"] = 0.4
gf["A"] = 1.0
gf["rho"] = 0.4
# Get the "data" by evaluating the model
# and adding some noise. Note that the coordinate
# mapping (array g) is passed to evaluate here.
im = gf.evaluate(g)
im += np.reshape(np.random.normal(0.0, 0.1, 2500), (50,50))
err = np.ones((50,50))*0.1
# Thaw parameters and fit
gf.thaw(["A", "rho"])
gf.fit(g, im, yerr=err)
# Show the resulting parameter values ...
gf.parameterSummary()
# ... and plot the result.
plt.title("Image data")
plt.imshow(np.transpose(im), origin="lower")
# plt.show()
plt.title("Residuals")
plt.imshow(np.transpose(im - gf.evaluate(g)), origin="lower")
# plt.show()
def sanity_2gGaussFitTupleExample(self):
from PyAstronomy import funcFit as fuf
import numpy as np
import matplotlib.pylab as plt
# Constructing the individual coordinate axes
x = np.linspace(-2.,2.,50)
y = np.linspace(-2.,2.,50)
# Create the 2d-Gaussian model and assign
# some model parameters.
gf = fuf.GaussFit2dTuple()
gf["sigx"] = 0.75
gf["sigy"] = 0.4
gf["A"] = 1.0
gf["rho"] = 0.4
# Get the "data" by evaluating the model
# and adding some noise. Note that the coordinate
# mapping (array g) is passed to evaluate here.
im = gf.evaluate((x,y))
im += np.reshape(np.random.normal(0.0, 0.1, 2500), (50,50))
err = np.ones((50,50))*0.1
# Thaw parameters and fit
gf.thaw(["A", "rho"])
gf.fit((x,y), im, yerr=err)
# Show the resulting parameter values ...
gf.parameterSummary()
# ... and plot the result.
plt.title("Image data")
plt.imshow(np.transpose(im), origin="lower")
# plt.show()
plt.title("Residuals")
plt.imshow(np.transpose(im - gf.evaluate((x,y))), origin="lower")
# plt.show()
def sanity_coordinateGridExample(self):
from PyAstronomy import funcFit as fuf
import numpy as np
# Constructing the two individual coordinate axes
x = np.linspace(-2.,2.,50)
y = np.linspace(-2.,2.,50)
# Applying funcFit's "coordinateGrid" helper function
# to built appropriate array-index -> coordinate mapping
# needed for nD fitting.
g = fuf.coordinateGrid(x, y)
print("(x, y) coordinates at index (11, 28): ", g[11,28])
def sanity_CashStatisticsExample(self):
import numpy as np
import matplotlib.pylab as plt
from PyAstronomy import funcFit as fuf
# Get a Gaussian fitting object and
# set some parameters
g = fuf.GaussFit1d()
g["A"] = 5.1
g["sig"] = 0.5
g["mu"] = 3.94
# Generate some data with Poisson statistics
x = np.linspace(0.0, 7., 50)
y = np.zeros(len(x))
for i in range(len(x)):
y[i] = np.random.poisson(g.evaluate(x[i]))
# Choose free parameters and "disturb" the
# starting parameters for the fit a little.
g.thaw(["A", "sig", "mu"])
for par in g.freeParamNames():
g[par] += np.random.normal(0.0, g[par]*0.1)
# Fit using Cash statistic and print out
# result.
g.fit(x, y, miniFunc="cash79")
g.parameterSummary()
# Plot the result
plt.plot(x, y, 'bp')
plt.plot(x, g.evaluate(x), 'r--')
# plt.show()
def sanity_steppar1(self):
import numpy as np
import matplotlib.pylab as plt
from PyAstronomy import funcFit as fuf
# Set up a Gaussian model
# and create some "data"
x = np.linspace(0,2,100)
gf = fuf.GaussFit1d()
gf["A"] = 0.87
gf["mu"] = 1.0
gf["sig"] = 0.2
y = gf.evaluate(x)
y += np.random.normal(0.0, 0.1, len(x))
# Thaw parameters, which are to be fitted. Note
# that those parameters will also be fitted during
# the stepping; no further parameters will be thawed.
gf.thaw(["A", "mu", "sig"])
# ... and "disturb" starting values a little.
gf["A"] = gf["A"] + np.random.normal(0.0, 0.1)
gf["mu"] = gf["mu"] + np.random.normal(0.0, 0.1)
gf["sig"] = gf["sig"] + np.random.normal(0.0, 0.03)
# Find the best fit solution
gf.fit(x, y, yerr=np.ones(len(x))*0.1)
# Step the amplitude (area of the Gaussian) through
# the range 0.8 to 0.95 in 20 steps. Note that the
# last part of `ranges` ('lin') is optional. You may
# also use `log`; in this case, the stepping would be
# equidistant in the logarithm.
# In each step of `A`, "mu" and "sig" will be fitted,
# because they had been thawed earlier.
sp = gf.steppar("A", ranges={"A":[0.8, 0.95, 20, 'lin']})
# Extract the values for the Gaussian normalization
# (amplitude) ...
As = list(map(lambda x:x[0], sp))
# ... and chi square.
chis = list(map(lambda x:x[1], sp))
# Find minimum chi square
cmin = min(chis)
# Plot A vs. chi square
plt.title('A vs. $\chi^2$ with 68% and 90% confidence levels')
plt.xlabel("A")
plt.ylabel("$\chi^2$")
plt.plot(As, chis, 'bp-')
plt.plot(As, [cmin+1.0]*len(As), 'k--')
plt.plot(As, [cmin+2.706]*len(As), 'k:')
# plt.show()
def sanity_steppar2(self):
import numpy as np
import matplotlib.pylab as plt
from PyAstronomy import funcFit as fuf
# Set up a Gaussian model
# and create some "data"
x = np.linspace(0,2,100)
gf = fuf.GaussFit1d()
gf["A"] = 0.87
gf["mu"] = 1.0
gf["sig"] = 0.2
y = gf.evaluate(x)
y += np.random.normal(0.0, 0.1, len(x))
# Thaw parameters, which are to be fitted ...
gf.thaw(["A", "mu", "sig"])
# ... and "disturb" starting values a little.
gf["A"] = gf["A"] + np.random.normal(0.0, 0.1)
gf["mu"] = gf["mu"] + np.random.normal(0.0, 0.1)
gf["sig"] = gf["sig"] + np.random.normal(0.0, 0.03)
# Find the best fit solution
gf.fit(x, y, yerr=np.ones(len(x))*0.1)
# Step the amplitude (area of the Gaussian) and the
# center ("mu") of the Gaussian through the given
# ranges.
sp = gf.steppar(["A", "mu"], ranges={"A":[0.8, 0.95, 20], \
"mu":[0.96,1.05,15]})
# Get the values for `A`, `mu`, and chi-square
# from the output of steppar.
As = list(map(lambda x:x[0], sp))
mus = list(map(lambda x:x[1], sp))
chis = list(map(lambda x:x[2], sp))
# Create a chi-square array using the
# indices contained in the output.
z = np.zeros((20, 15))
for s in sp:
z[s[3]] = s[2]
# Find minimum chi-square and define levels
# for 68%, 90%, and 99% confidence intervals.
cm = min(chis)
levels = [cm+2.3, cm+4.61, cm+9.21]
# Plot the contours to explore the confidence
# interval and correlation.
plt.xlabel("mu")
plt.ylabel("A")
plt.contour(np.sort(np.unique(mus)), np.sort(np.unique(As)), z, \
levels=levels)
# Plot the input value
plt.plot([1.0], [0.87], 'k+', markersize=20)
# plt.show()
def sanity_errorConfInterval(self):
"""
Checking example of errorConfInterval
"""
import numpy as np
import matplotlib.pylab as plt
from PyAstronomy import funcFit as fuf
# Set up a Gaussian model
# and create some "data"
x = np.linspace(0,2,100)
gf = fuf.GaussFit1d()
gf["A"] = 0.87
gf["mu"] = 1.0
gf["sig"] = 0.2
y = gf.evaluate(x)
y += np.random.normal(0.0, 0.1, len(x))
# Thaw parameters, which are to be fitted. Note
# that those parameters will also be fitted during
# the stepping; no further parameters will be thawed.
gf.thaw(["A", "mu", "sig"])
# ... and "disturb" starting values a little.
gf["A"] = gf["A"] + np.random.normal(0.0, 0.1)
gf["mu"] = gf["mu"] + np.random.normal(0.0, 0.1)
gf["sig"] = gf["sig"] + np.random.normal(0.0, 0.03)
# Find the best fit solution
gf.fit(x, y, yerr=np.ones(len(x))*0.1)
# Step the amplitude (area of the Gaussian) through
# the range 0.8 to 0.95 in 20 steps. Note that the
# last part of `ranges` ('lin') is optional. You may
# also use `log`; in this case, the stepping would be
# equidistant in the logarithm.
# In each step of `A`, "mu" and "sig" will be fitted,
# because they had been thawed earlier.
sp = gf.steppar("A", ranges={"A":[0.8, 0.95, 20, 'lin']})
# Extract the values for the Gaussian normalization
# (amplitude) ...
As = [x[0] for x in sp]
# ... and chi square.
chis = [x[1] for x in sp]
# Calculate the confidence interval automatically
cfi90 = gf.errorConfInterval("A", dstat=2.706)
print("90% Confidence interval: ", cfi90["limits"])
print(" corresponding objective function values: ", cfi90["OFVals"])
print(" number of iterations needed: ", cfi90["iters"])
cfi68 = gf.errorConfInterval("A", dstat=1.0)
print("68% Confidence interval: ", cfi68["limits"])
print(" corresponding objective function values: ", cfi68["OFVals"])
print(" number of iterations needed: ", cfi68["iters"])
# Plot A vs. chi square
plt.title('A vs. $\chi^2$ 90% (black) and 68% (blue) confidence intervals')
plt.xlabel("A")
plt.ylabel("$\chi^2$")
plt.plot(As, chis, 'bp-')
# Indicate confidence levels by vertical lines
plt.plot(As, [cfi90["OFMin"] +1.0]*len(As), 'g:')
plt.plot(As, [cfi90["OFMin"]+2.706]*len(As), 'g:')
# PLot lines to indicate confidence intervals
plt.plot([cfi90["limits"][0]]*2, [min(chis), max(chis)], 'k--')
plt.plot([cfi90["limits"][1]]*2, [min(chis), max(chis)], 'k--')
plt.plot([cfi68["limits"][0]]*2, [min(chis), max(chis)], 'b--')
plt.plot([cfi68["limits"][1]]*2, [min(chis), max(chis)], 'b--')
# plt.show()
def sanity_conditionalRestrictions(self):
"""
Check the conditional restriction example.
"""
import numpy as np
import matplotlib.pylab as plt
from PyAstronomy import funcFit as fuf
# Get fitting object for a Gaussian ...
g = fuf.GaussFit1d()
# .. and define the parameters
g["A"] = 0.97
g["mu"] = 0.1
g["sig"] = 0.06
# Generate some "data" with noise included
x = np.linspace(-1.0,1.0,200)
y = g.evaluate(x) + np.random.normal(0.0, 0.1, len(x))
yerr = np.ones(len(x)) * 0.1
def myRestriction(A, sig):
"""
A conditional restriction.
Returns
-------
Penalty : float
A large value if condition is violated
and zero otherwise.
"""
if A > 10.0*sig:
return np.abs(A-10.0*sig + 1.0)*1e20
return 0.0
# Add the conditional restriction to the model and save
# the unique ID, which can be used to refer to that
# restriction.
uid = g.addConditionalRestriction(["A", "sig"], myRestriction)
print("Conditional restriction has been assigned the ID: ", uid)
print()
# Now see whether the restriction is really in place
g.showConditionalRestrictions()
# Define free parameters ...
g.thaw(["A", "mu", "sig"])
# ... and fit the model (restriction included)
g.fit(x, y, yerr=yerr)
# Save the resulting best-fit model
restrictedModel = g.model.copy()
# Remove the conditional restriction and re-fit
g.removeConditionalRestriction(uid)
g.fit(x, y, yerr=yerr)
# Save new model
unrestrictedModel = g.model.copy()
# Plot the result
# plt.errorbar(x, y, yerr=yerr, fmt='b.')
# plt.plot(x, restrictedModel, 'r--', label="Restricted")
# plt.plot(x, unrestrictedModel, 'g--', label="Unrestricted")
# plt.legend()
# plt.show()
class MCMCExampleSanity(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
try:
os.remove("mcmcExample.tmp")
except:
print("Could not remove file: mcmcExample.tmp")
try:
os.remove("mcmcTA.tmp")
except:
print("Could not remove file: mcmcTA.tmp")
try:
os.remove("mcmcSample.tmp")
except:
print("Could not remove file: mcmcSample.tmp")
try:
os.remove("chain.emcee")
except:
pass
try:
os.remove("gauss.emcee")
except:
print("Could not remove file: gauss.emcee")
try:
os.remove("musig.emcee")
except:
print("Could not remove file: musig.emcee")
def sanity_MCMCSampler(self):
# Import some required modules
from numpy import arange, sqrt, exp, pi, random, ones
import matplotlib.pylab as plt
import pymc
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
# Creating a Gaussian with some noise
# Choose some parameters...
gPar = {"A":-5.0, "sig":10.0, "mu":10.0, "off":1.0, "lin":0.0}
# Calculate profile
x = arange(100) - 50.0
y = gPar["off"] + gPar["A"] / sqrt(2*pi*gPar["sig"]**2) \
* exp(-(x-gPar["mu"])**2/(2*gPar["sig"]**2))
# Add some noise
y += random.normal(0.0, 0.01, x.size)
# Now let us come to the fitting
# First, we create the Gauss1d fit object
gf = fuf.GaussFit1d()
# See what parameters are available
print("List of available parameters: ", gf.availableParameters())
# Set guess values for the parameters
gf["A"] = -10.0
gf["sig"] = 15.77
gf["off"] = 0.87
gf["mu"] = 7.5
# Let us see whether the assignment worked
print("Parameters and guess values: ", gf.parameters())
# Which parameters shall be variable during the fit?
# 'Thaw' those (the order is irrelevant)
gf.thaw(["A", "sig", "off", "mu"])
# Now start a simplex fit
gf.fit(x,y,yerr=ones(x.size)*0.01)
# Obtain the best-fit values derived by the simplex fit.
# They are to be used as start values for the MCMC sampling.
# Note that 'A' is missing - we will introduce this later.
X0 = {"sig":gf["sig"], "off":gf["off"], "mu":gf["mu"]}
# Now we specify the limits within which the individual parameters
# can be varied (for those parameters listed in the 'X0' dictionary).
Lims = {"sig":[-20.,20.], "off":[0.,2.], "mu":[5.,15.]}
# For the parameters contained in 'X0', define the step widths, which
# are to be used by the MCMC sampler. The steps are specified using
# the same scale/units as the actual parameters.
steps = {"A":0.01, "sig":0.1, "off":0.1, "mu":0.1}
# In this example, we wish to define our ``own'' PyMC variable for the parameter
# 'A'. This can be useful, if nonstandard behavior is desired. Note that this
# is an optional parameter and you could simply include the parameter 'A' into
# The framework of X0, Lims, and steps.
ppa = {}
ppa["A"] = pymc.Uniform("A", value=gf["A"], lower=-20., \
upper=10.0, doc="Amplitude")
# Start the sampling. The resulting Marchov-Chain will be written
# to the file 'mcmcExample.tmp'. In default configuration, pickle
# is used to write that file.
# To save the chain to a compressed 'hdf5'
# file, you have to specify the dbArgs keyword; e.g., use:
# dbArgs = {"db":"hdf5", "dbname":"mcmcExample.hdf5"}
gf.fitMCMC(x, y, X0, Lims, steps, yerr=ones(x.size)*0.01, \
pymcPars=ppa, iter=2500, burn=0, thin=1, \
dbfile="mcmcExample.tmp")
# Reload the database (here, this is actually not required, but it is
# if the Marchov chain is to be analyzed later).
db = pymc.database.pickle.load('mcmcExample.tmp')
# Plot the trace of the amplitude, 'A'.
plt.hist(db.trace("A", 0)[:])
# plt.show()
def sanity_MCMCPriorExample(self):
from PyAstronomy import funcFit as fuf
import numpy as np
import matplotlib.pylab as plt
import pymc
# Create a Gauss-fit object
gf = fuf.GaussFit1d()
# Choose some parameters
gf["A"] = -0.65
gf["mu"] = 1.0
gf["lin"] = 0.0
gf["off"] = 1.1
gf["sig"] = 0.2
# Simulate data with noise
x = np.linspace(0., 2., 100)
y = gf.evaluate(x)
y += np.random.normal(0, 0.05, len(x))
gf.thaw(["A", "off", "mu", "sig"])
# Set up a normal prior for the offset parameter
# Note!---The name (first parameter) must correspond to that
# of the parameter.
# The expectation value us set to 0.9 while the width is given
# as 0.01 (tau = 1/sigma**2). The starting value is specified
# as 1.0.
offPar = pymc.Normal("off", mu=0.9, tau=(1./0.01)**2, value=1.0)
# Use a uniform prior for mu.
muPar = pymc.Uniform("mu", lower=0.95, upper=0.97, value=0.96)
# Collect the "extra"-variables in a dictionary using
# their names as keys
pymcPars = {"mu":muPar, "off":offPar}
# Specify starting values, X0, and limits, lims, for
# those parameter distributions not given specifically.
X0 = {"A":gf["A"], "sig":gf["sig"]}
lims = {"A":[-1.0,0.0], "sig":[0., 1.0]}
# Still, the steps dictionary has to contain all
# parameter distributions.
steps = {"A":0.02, "sig":0.02, "mu":0.01, "off":0.01}
# Carry out the MCMC sampling
gf.fitMCMC(x, y, X0, lims, steps, yerr=np.ones(len(x))*0.05, \
pymcPars=pymcPars, burn=1000, iter=3000)
# Setting parameters to mean values
for p in gf.freeParameters():
gf[p] = gf.MCMC.trace(p)[:].mean()
# Show the "data" and model in the upper panel
plt.subplot(2,1,1)
plt.title("Data and model")
plt.errorbar(x, y, yerr=np.ones(len(x))*0.05, fmt="bp")
# Plot lowest deviance solution
plt.plot(x, gf.evaluate(x), 'r--')
# Show the residuals in the lower panel
plt.subplot(2,1,2)
plt.title("Residuals")
plt.errorbar(x, y-gf.evaluate(x), yerr=np.ones(len(x))*0.05, fmt="bp")
plt.plot([min(x), max(x)], [0.0,0.0], 'r-')
#plt.show()
def sanity_autoMCMCExample1(self):
from PyAstronomy import funcFit as fuf
import numpy as np
import matplotlib.pylab as plt
x = np.linspace(0,30,1000)
gauss = fuf.GaussFit1d()
gauss["A"] = 1
gauss["mu"] = 23.
gauss["sig"] = 0.5
# Generate some "data" to fit
yerr = np.random.normal(0., 0.05, len(x))
y = gauss.evaluate(x) + yerr
# Thaw the parameters A, mu, and sig
gauss.thaw(["A","mu","sig"])
# Define the ranges, which are used to construct the
# uniform priors and step sizes.
# Note that for "sig", we give only a single value.
# In this case, the limits for the uniform prior will
# be constructed as [m0-1.5, m0+1.5], where m0 is the
# starting value interpreted as the current value of
# mu (23. in this case).
ranges = {"A":[0,10],"mu":3, "sig":[0.1,1.0]}
# Generate default input for X0, lims, and steps
X0, lims, steps = gauss.MCMCautoParameters(ranges)
# Show what happened...
print()
print("Auto-generated input parameters:")
print("X0: ", X0)
print("lims: ", lims)
print("steps: ", steps)
print()
# Call the usual sampler
gauss.fitMCMC(x, y, X0, lims, steps, yerr=yerr, iter=1000)
# and plot the results
plt.plot(x, y, 'k+')
plt.plot(x, gauss.evaluate(x), 'r--')
# plt.show()
def sanity_autoMCMCExample2(self):
from PyAstronomy import funcFit as fuf
import numpy as np
import matplotlib.pylab as plt
x = np.linspace(0,30,1000)
gauss = fuf.GaussFit1d()
gauss["A"] = 1
gauss["mu"] = 23.
gauss["sig"] = 0.5
# Generate some "data" to fit
yerr = np.random.normal(0., 0.05, len(x))
y = gauss.evaluate(x) + yerr
# Define the ranges, which are used to construct the
# uniform priors and step sizes.
# Note that for "sig", we give only a single value.
# In this case, the limits for the uniform prior will
# be constructed as [m0-1.5, m0+1.5], where m0 is the
# starting value interpreted as the current value of
# mu (23. in this case).
ranges = {"A":[0,10],"mu":3, "sig":[0.1,1.0]}
# Call the auto-sampler
# Note that we set picky to False here. In this case, the
# parameters specified in ranges will be thawed automatically.
# All parameters not mentioned there, will be frozen.
gauss.autoFitMCMC(x, y, ranges, yerr=yerr, picky=False, iter=1000)
# and plot the results
plt.plot(x, y, 'k+')
plt.plot(x, gauss.evaluate(x), 'r--')
# plt.show()
def sanity_TAtut_createTrace(self):
"""
TA tutorial, all examples
"""
import numpy as np
import matplotlib.pylab as plt
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
# Starting from with Voigt profile
vp = fuf.Voigt1d()
# Set some values to create a model
vp["A"] = -0.4
vp["al"] = 0.7
vp["mu"] = 5500.
vp["ad"] = 0.3
vp["off"] = 1.0
x = np.linspace(5490., 5510., 200)
# Create our data with some noise
yerr = np.ones(len(x))*0.01
y = vp.evaluate(x) + np.random.normal(0.0, 0.01, len(x))
# Say, we have a guess of the parameters, which is, however,
# not entirely correct
vp["A"] = -0.376
vp["al"] = 0.9
vp["mu"] = 5499.7
vp["ad"] = 0.4
vp["off"] = 1.0
# Plot the data and our guess
plt.errorbar(x, y, yerr=yerr, fmt='b.-')
plt.plot(x, vp.evaluate(x), 'r--')
# plt.show()
# Thaw the parameters, which we wish to vary
# during the sampling
vp.thaw(["A", "al", "mu", "ad"])
# Use current parameters as starting point for the sampling
X0 = vp.freeParameters()
print("Starting point for sampling: ", X0)
# Now we specify the limits within which the individual parameters
# can be varied. Actually, you specify the limits of uniform priors
# here.
lims = {"A":[-1.0,0.0], "al":[0.0,3.], "ad":[0.0,3.0], "mu":[5495., 5505.]}
# Provide a guess for the proposal step widths.
# Try to guess the scale of the problem in the individual
# parameters.
steps = {"A":0.02, "al":0.01, "ad":0.01, "mu":0.05}
# Start the sampling. The resulting Marchov-Chain will be written
# to the file 'mcmcTA.tmp'. In default configuration, pickle
# is used to write that file.
# To save the chain to a compressed 'hdf5'
# file, you have to specify the dbArgs keyword; e.g., use:
# dbArgs = {"db":"hdf5", "dbname":"mcmcExample.hdf5"}
vp.fitMCMC(x, y, X0, lims, steps, yerr=yerr, \
iter=2500, burn=0, thin=1, \
dbfile="mcmcTA.tmp")
######## Second example
from PyAstronomy import funcFit as fuf
# Create an instance of TraceAnalysis
# telling it which file to use
ta = fuf.TraceAnalysis("mcmcTA.tmp")
# Have a look at the deviance to check if and when
# the chains reached equilibrium.
ta.plotTrace("deviance")
# ta.show()
# Say, we are sure that after 500 iterations, the chain
# reached equilibrium. We use this as the burn-in phase
ta.setBurn(500)
# Have a second look at the deviance, this time considering
# the burn-in. Note that the first 500 iterations are not
# removed from the chain. They are just not considered any
# more.
ta.plotTrace("deviance")
# ta.show()
######## Third example
from PyAstronomy import funcFit as fuf
# Create an instance of TraceAnalysis
# telling it which file to use
ta = fuf.TraceAnalysis("mcmcTA.tmp")
# Use the burn-in from the previous example
ta.setBurn(500)
# See which model parameters have been sampled
print("Available parameters: ", ta.availableParameters())
# Access the traces of these parameters
print("Trace for A: ", ta["A"])
# Calculate mean, median, standard deviation, and
# credibility interval for the available parameters
for p in ta.availableParameters():
hpd = ta.hpd(p, cred=0.95)
print("Parameter %5s, mean = % g, median = % g, std = % g, 95%% HPD = % g - % g" \
% (p, ta.mean(p), ta.median(p), ta.std(p), hpd[0], hpd[1]))
######## Fourth example
from PyAstronomy import funcFit as fuf
# Create an instance of TraceAnalysis
# telling it which file to use
ta = fuf.TraceAnalysis("mcmcTA.tmp")
# Use the burn-in from the previous example
ta.setBurn(500)
# Have a look at the parameter correlations
ta.correlationTable()
# Calculate Pearson's and Spearman's r-coefficients
print("Pearson: ", ta.pearsonr("ad", "al"))
print("Spearman: ", ta.spearmanr("ad", "al"))
# Show a plot of the correlation
# Note that the plotCorrEnh method can also
# be used, which is useful in the case of long
# chains.
ta.plotCorr(parsList=["ad", "al"])
# ta.plotCorrEnh(parsList=["ad", "al"])
# ta.show()
######## Fifth example
from PyAstronomy import funcFit as fuf
import matplotlib.pylab as plt
import numpy as np
# Create an instance of TraceAnalysis
# telling it which file to use
ta = fuf.TraceAnalysis("mcmcTA.tmp")
# Use the burn-in from the previous example
ta.setBurn(500)
# Find sets of parameters
# First, the lowest deviance set
lds, index = ta.parameterSet(prescription="lowestDev")
print("Lowest deviance set: ", lds)
print(" at chain index: ", index)
means = ta.parameterSet(prescription="mean")
print("Set of mean values: ", means)
medians = ta.parameterSet(prescription="median")
print("Set of median values: ", means)
# Create Voigt model and plot the models belonging
# to the lowest deviance, mean, and median parameter
# set.
vp = fuf.Voigt1d()
# Generate the model wavelength axis
x = np.linspace(5490., 5510., 200)
# Calculate and plot the models
vp.assignValues(lds)
plt.plot(x, vp.evaluate(x), 'b.-')
vp.assignValues(means)
plt.plot(x, vp.evaluate(x), 'r.-')
vp.assignValues(medians)
plt.plot(x, vp.evaluate(x), 'g.-')
# plt.show()
######## Sixth example
from PyAstronomy import funcFit as fuf
# Create an instance of TraceAnalysis
# telling it which file to use
ta = fuf.TraceAnalysis("mcmcTA.tmp")
# Use the burn-in from the previous example
ta.setBurn(500)
# Investigate a trace
ta.plotTrace("mu")
# ta.show()
# and its distribution.
ta.plotHist("mu")
# ta.show()
# Combine trace and distribution
ta.plotTraceHist("mu")
# ta.show()
# Plot correlations
ta.plotCorr(parsList=["mu", "ad", "al"])
# ta.show()
def sanity_MCMCautoParameters(self):
"""
Checking sanity of MCMCautoParameters
"""
from PyAstronomy import funcFit as fuf
import numpy as np
import matplotlib.pylab as plt
x = np.linspace(0,30,1000)
gauss = fuf.GaussFit1d()
gauss["A"] = 1
gauss["mu"] = 23.
gauss["sig"] = 0.5
yerr = np.random.normal(0., 0.05, len(x))
y = gauss.evaluate(x) + yerr
# This step is not necessary if <picky>=False in MCMCautoParameters.
gauss.thaw(["A","mu","sig"])
X0, lims, steps = gauss.MCMCautoParameters({"A":[0,10],"mu":3, "sig":[0.1,1.0]})
gauss.fitMCMC(x, y, X0, lims, steps, yerr=yerr, iter=1000)
# plt.plot(x, y, 'k+')
# plt.plot(x, gauss.evaluate(x), 'r--')
# plt.show()
def sanity_EMCEEfirstexample(self):
# Import numpy and matplotlib
from numpy import arange, sqrt, exp, pi, random, ones
import matplotlib.pylab as plt
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
# Before we can start fitting, we need something to fit.
# So let us create some data...
# Choose some signal-to-noise ratio
snr = 25.0
# Creating a Gaussian with some noise
# Choose some parameters...
gf = fuf.GaussFit1d()
gf.assignValues({"A":-5.0, "sig":2.5, "mu":10.0, "off":1.0, "lin":0.0})
# Calculate profile
x = arange(100) - 50.0
y = gf.evaluate(x)
# Add some noise
y += random.normal(0.0, 1.0/snr, x.size)
# Define the free parameters
gf.thaw(["A", "sig", "mu", "off"])
# Start a fit (quite dispensable here)
gf.fit(x, y, yerr=ones(x.size)/snr)
# Say, we want 200 burn-in iterations and, thereafter,
# 1000 further iterations (per walker).
sampleArgs = {"iters":1000, "burn":200}
# Start the sampling (ps could be used to continueb the sampling)
ps = gf.fitEMCEE(x, y, yerr=ones(x.size)/snr, sampleArgs=sampleArgs)
# Plot the distributions of the chains
# NOTE: the order of the parameters in the chain object is the same
# as the order of the parameters returned by freeParamNames()
for i, p in enumerate(gf.freeParamNames()):
plt.subplot(len(gf.freeParamNames()), 1, i+1)
plt.hist(gf.emceeSampler.flatchain[::,i], label=p)
plt.legend()
# plt.show()
def sanity_EMCEEpriorexample(self):
# Import numpy and matplotlib
from numpy import arange, sqrt, exp, pi, random, ones
import matplotlib.pylab as plt
# ... and now the funcFit package
from PyAstronomy import funcFit as fuf
import numpy as np
# Before we can start fitting, we need something to fit.
# So let us create some data...
# Choose some signal-to-noise ratio
snr = 25.0
# Choosing an arbitrary constant and ...
c = 10.0
# ... an equally arbitrary number of data points
npoint = 10
# Define 'data'
x = arange(npoint)
y = np.ones(len(x)) * c
# Add some noise
y += random.normal(0.0, 1.0/snr, x.size)
# A funcFit object representing a constant
pf = fuf.PolyFit1d(0)
pf["c0"] = c
# The only parameter shall be free
pf.thaw("c0")
# Say, we want 200 burn-in iterations and, thereafter,
# 2500 further iterations (per walker).
sampleArgs = {"iters":2500, "burn":200}
# Start the sampling (ps could be used to continue the sampling)
ps = pf.fitEMCEE(x, y, yerr=ones(x.size)/snr, sampleArgs=sampleArgs)
print()
# Plot the distributions of the chains
# NOTE: the order of the parameters in the chain object is the same
# as the order of the parameters returned by freeParamNames()
h = plt.hist(pf.emceeSampler.flatchain[::,0], label="c0", normed=True)
# Construct "data points" in the middle of the bins
xhist = (h[1][1:] + h[1][0:-1]) / 2.0
yhist = h[0]
# Fit the histogram using a Gaussian
gf = fuf.GaussFit1d()
gf.assignValues({"A":1.0, "mu":c, "sig":1.0/snr/np.sqrt(npoint)})
# First fitting only "mu" is simply quite stable
gf.thaw("mu")
gf.fit(xhist, yhist)
gf.thaw(["A", "sig"])
gf.fit(xhist, yhist)
print()
print(" --- Sampling results ---")
print("Posterior estimate of constant: ", np.mean(pf.emceeSampler.flatchain[::,0]))
print("Nominal error of the mean: ", 1.0/snr/np.sqrt(npoint))
print("Estimate from Markov chain: ", np.std(pf.emceeSampler.flatchain[::,0]), end=' ')
print(" and from Gaussian fit to distribution: ", gf["sig"])
# Evaluate best-fit model ...
xmodel = np.linspace(c - 10.0/snr, c + 10.0/snr, 250)
ymodel = gf.evaluate(xmodel)
# ... and plot
plt.plot(xhist, yhist, 'rp')
plt.plot(xmodel, ymodel, 'r--')
plt.legend()
# plt.show()
# Defining a prior on c0. Prior knowledge tells us that its value
# is around 7. Let us choose the standard deviation of the prior so
# that the estimate will lie in the middle between 7 and 10. Here we
# exploit symmetry and make the prior information as strong as the
# information contained in the likelihood function.
priors = {"c0":fuf.FuFPrior("gaussian", sig=1.0/snr/np.sqrt(npoint), mu=7.0)}
# Start the sampling (ps could be used to continue the sampling)
ps = pf.fitEMCEE(x, y, yerr=ones(x.size)/snr, sampleArgs=sampleArgs, priors=priors)
print()
print(" --- Sampling results with strong prior information ---")
print("Posterior estimate of constant: ", np.mean(pf.emceeSampler.flatchain[::,0]), end=' ')
print(" +/-", np.std(pf.emceeSampler.flatchain[::,0]))
plt.hist(pf.emceeSampler.flatchain[::,0], label="c0", normed=True)
# plt.show()
def sanity_InstatiatePrior(self):
from PyAstronomy import funcFit as fuf
# Instantiate prior
gp = fuf.FuFPrior("gaussian", sig=0.1, mu=1.0)
# Current values (arbitrary)
cvals = {"a":1.4, "b":0.86, "c":1.1}
# Get log(prior) for parameter "b"
print(gp(cvals, "b"))
def sanity_sampleEMCEE_sampleFromGaussian(self):
"""
Checking first sampleEMCEE example (sample from Gaussian distribution)
"""
import numpy as np
from PyAstronomy import funcFit as fuf
import matplotlib.pylab as plt
def lfGauss(v, sigma, mu):
"""
Gaussian density
Parameters
----------
v : dictionary
Holds current values of "x"
mus, sigma : float
Mean and standard deviation of the Gaussian. Specified via
the `largs` argument.
Returns
-------
lp : float
Natural logarithm of the density.
"""
result = 0.0
# Log(density)
result += -0.5*np.log(2.*np.pi*sigma**2) - (v["x"] - mu)**2/(2.*sigma**2)
return result
# Sampling arguments
# burn: Number of burn-in steps per walker
# iters: Number of iterations per walker
sa = {"burn":1000, "iters":5000}
# Starting values
fv0 = {"x":0.5}
# Specify standard deviation and mean of Gaussian
la = {"mu":0.5, "sigma":0.25}
# Sample from distribution
ps = fuf.sampleEMCEE(["x"], fv0, lfGauss, largs=la, sampleArgs=sa, nwalker=4, dbfile="gauss.emcee")
print()
# Use TraceAnalysis to look at chains
ta = fuf.TraceAnalysis("gauss.emcee")
print("Available chains: ", ta.availableParameters())
print("Mean and STD of chain: ", np.mean(ta["x"]), np.std(ta["x"]))
# Check distribution of chain
# Plot histogram of chain
# plt.hist(ta["x"], 60, normed=True)
# Overplot Gaussian model
xx = np.linspace(la["mu"]-6*la["sigma"], la["mu"]+6*la["sigma"], 1000)
yy = 1./np.sqrt(2.*np.pi*la["sigma"]**2) * np.exp(-(xx - la["mu"])**2/(2.*la["sigma"]**2))
# plt.plot(xx, yy, 'r--')
# plt.show()
def sanity_sampleEMCEE_estimateMuSig(self):
"""
Checking sampleEMCEE example (estimate mu and sigma)
"""
import numpy as np
from PyAstronomy import funcFit as fuf
def lfGaussMS(v, x=None):
"""
Gaussian posterior with 1/sigma prior on sigma.
Parameters
----------
v : dictionary
Holds current values of "sigma" and "mu"
x : array
The 'data' observed. Will be specified by the `largs` keyword.
Returns
-------
lp : float
Natural logarithm of the density.
"""
if v["sigma"] < 0.:
# Penalize negative standard deviations
return -1e20*abs(v["sigma"])
result = 0.0
# Apply prior on sigma
result -= np.log(v["sigma"])
# Add log(likelihood)
result += np.sum(-0.5*np.log(2.*np.pi*v["sigma"]**2) - (x - v["mu"])**2/(2.*v["sigma"]**2))
return result
# Sampling arguments
# burn: Number of burn-in steps per walker
# iters: Number of iterations per walker
sa = {"burn":1000, "iters":5000}
# Starting values
fv0 = {"sigma":1., "mu":1.}
# 'Observed' data
la = {"x":np.random.normal(0.,1.,1000)}
print("Mean of 'data': ", np.mean(la["x"]))
print("Standard deviation of 'data': ", np.std(la["x"]))
# Scale width for distributing the walkers
s = {"mu":0.01, "sigma":0.5}
ps = fuf.sampleEMCEE(["mu", "sigma"], fv0, lfGaussMS, largs=la, sampleArgs=sa, nwalker=4, \
scales=s, dbfile="musig.emcee")
print()
# Use TraceAnalysis to look at chains
ta = fuf.TraceAnalysis("musig.emcee")
print("Available chains: ", ta.availableParameters())
# ta.plotTraceHist('mu')
# ta.show()
#
# ta.plotTraceHist('sigma')
# ta.show()
| 32.435513 | 103 | 0.597263 | 7,861 | 54,070 | 4.100369 | 0.111691 | 0.005522 | 0.014116 | 0.014767 | 0.546552 | 0.516148 | 0.481029 | 0.459901 | 0.448795 | 0.434275 | 0 | 0.036962 | 0.261457 | 54,070 | 1,666 | 104 | 32.454982 | 0.770215 | 0.389643 | 0 | 0.498018 | 0 | 0.002642 | 0.10606 | 0.002009 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054161 | false | 0.005284 | 0.11889 | 0 | 0.191546 | 0.110964 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41de05126656eb0665e6b6dd493d706236d85602 | 2,905 | py | Python | virtual_machines/update-matching-table.py | AmoVanB/chameleon-end-host | 573e1dccdaf4ca2bebedc96a7b902e622c50acab | [
"Apache-2.0"
] | null | null | null | virtual_machines/update-matching-table.py | AmoVanB/chameleon-end-host | 573e1dccdaf4ca2bebedc96a7b902e622c50acab | [
"Apache-2.0"
] | null | null | null | virtual_machines/update-matching-table.py | AmoVanB/chameleon-end-host | 573e1dccdaf4ca2bebedc96a7b902e622c50acab | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
"""
This script, to be used by VM 0, sends a configuration
message to the virtual switch to create a particular
tagging and shaping rule.
Author: Amaury Van Bemten <amaury.van-bemten@tum.de>
"""
from scapy.all import *
import sys
# import scapy config
from scapy.all import conf as scapyconf
# disable scapy promiscuous mode since it is already in this mode
scapyconf.sniff_promisc = 0
def update_matching_rule(kni_id, rule_id, protocol, source_ip, destination_ip, source_port, destination_port, tags, rate_bps, burst_bits):
payload = list(kni_id.to_bytes(1, byteorder = 'big'))
payload += list(rule_id.to_bytes(1, byteorder = 'big'))
payload += list(protocol.to_bytes(1, byteorder = 'big'))
payload += list(int(0).to_bytes(3, byteorder = 'big'))
if len(source_ip) != 4 or len(destination_ip) != 4:
print("Source and destination IPs should be arrays of size 4")
sys.exit(-1)
for ip_elem in list(source_ip) + list(destination_ip):
payload += list(ip_elem.to_bytes(1, byteorder = 'big'))
payload += list(source_port.to_bytes(2, byteorder = 'big'))
payload += list(destination_port.to_bytes(2, byteorder = 'big'))
payload += list(rate_bps.to_bytes(8, byteorder = 'little')) # rate, in bits per second
payload += list(burst_bits.to_bytes(8, byteorder = 'little')) # burst, in bits
n_tokens = burst_bits
rte_timestamp = int(10000)
payload += list(n_tokens.to_bytes(8, byteorder = 'little')) # n_tokens, should be initially the same as burst, but later on it is converted to burst*cpu_freq
payload += list(rte_timestamp.to_bytes(8, byteorder = 'little')) # timestamp, it will be overwritten anyway
payload += list(len(tags).to_bytes(2, byteorder = 'little'))
for tag in tags:
payload += list(0x8100.to_bytes(2, byteorder = 'big'))
payload += list(tag.to_bytes(2, byteorder = 'big'))
frame = Ether(type=0xbebe) / Raw(payload)
frame.show()
sendp(frame, iface="eth1")
def clean_table():
for kni_id in range(0, 20):
for rule_id in range(0, 5):
update_matching_rule(kni_id, rule_id, 0, [0, 0, 0, 0], [0, 0, 0, 0], 0, 0, [0, 0, 0, 0, 0])
if len(sys.argv) < 11:
print("Need at least 10 parameters")
sys.exit(-1)
kni_id = int(sys.argv[1])
rule_id = int(sys.argv[2])
protocol = int(sys.argv[3])
source_ip = [int(elem) for elem in sys.argv[4].split(".")]
destination_ip = [int(elem) for elem in sys.argv[5].split(".")]
source_port = int(sys.argv[6])
destination_port = int(sys.argv[7])
tags = [int(elem) for elem in sys.argv[8].split(",")]
rate_bps = int(sys.argv[9])
burst_bits = int(sys.argv[10])
if(len(tags) > 10):
print("At most 10 tags are allowed in the current implementation")
sys.exit(-1)
update_matching_rule(kni_id, rule_id, protocol, source_ip, destination_ip, source_port, destination_port, tags, rate_bps, burst_bits)
| 40.915493 | 161 | 0.683649 | 469 | 2,905 | 4.08742 | 0.294243 | 0.015649 | 0.021909 | 0.027126 | 0.346896 | 0.288472 | 0.288472 | 0.212833 | 0.115806 | 0.115806 | 0 | 0.031733 | 0.175559 | 2,905 | 70 | 162 | 41.5 | 0.768685 | 0.161102 | 0 | 0.06 | 0 | 0 | 0.082955 | 0 | 0 | 0 | 0.004953 | 0 | 0 | 1 | 0.04 | false | 0 | 0.06 | 0 | 0.1 | 0.06 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41df73d109c0b036f4dc5a0fd33804ce5856c662 | 1,351 | py | Python | radSeqAmp/_versioninfo.py | msettles/radSeqAmp | a89d1aa12601dcd7aba0e83b2ae28fc3ff76989f | [
"Apache-2.0"
] | null | null | null | radSeqAmp/_versioninfo.py | msettles/radSeqAmp | a89d1aa12601dcd7aba0e83b2ae28fc3ff76989f | [
"Apache-2.0"
] | null | null | null | radSeqAmp/_versioninfo.py | msettles/radSeqAmp | a89d1aa12601dcd7aba0e83b2ae28fc3ff76989f | [
"Apache-2.0"
] | null | null | null | # _versioninfo.py
#
# gets the version number from the package info
# checks it agains the github version
import sys
from pkg_resources import get_distribution, parse_version
try:
_dist = get_distribution('radSeqAmp')
version_num = _dist.version
except:
version_num = 'Please install this project with setup.py'
version_master = "https://raw.githubusercontent.com/msettles/radSeqAmp/master/VERSION"
repo_master = "https://github.com/msettles/radSeqAmp"
version_develop = "https://raw.githubusercontent.com/msettles/radSeqAmp/develop/VERSION"
repo_develop = "https://github.com/msettles/radSeqAmp/tree/develop"
try:
import urllib2
github_version_num = urllib2.urlopen(version_master).readline().strip()
if parse_version(github_version_num) > _dist.parsed_version:
sys.stderr.write("A newer version (%s) of radSeqAmp is available at %s\n" % (github_version_num, repo_master))
elif parse_version(github_version_num) < _dist.parsed_version:
github_version_num = urllib2.urlopen(version_develop).readline().strip()
if parse_version(github_version_num) > _dist.parsed_version:
sys.stderr.write("A newer version (%s) of radSeqAmp is available at %s\n" % (github_version_num, repo_develop))
except:
sys.stderr.write("Error retrieving github version_number\n")
__version__ = version_num
| 40.939394 | 123 | 0.763138 | 180 | 1,351 | 5.472222 | 0.327778 | 0.101523 | 0.113706 | 0.093401 | 0.547208 | 0.484264 | 0.317767 | 0.317767 | 0.272081 | 0.272081 | 0 | 0.002566 | 0.134715 | 1,351 | 32 | 124 | 42.21875 | 0.840034 | 0.071799 | 0 | 0.26087 | 0 | 0 | 0.336269 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.130435 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41df8366d44990c149549ad5a6aecb5e9bc2fcdb | 5,835 | py | Python | weekly_degradation.py | rajeevratan84/LTE-KPI-Anomaly-Detection | b5d3ce261f75b94956867645fd3479c0b2eb0cd8 | [
"MIT"
] | null | null | null | weekly_degradation.py | rajeevratan84/LTE-KPI-Anomaly-Detection | b5d3ce261f75b94956867645fd3479c0b2eb0cd8 | [
"MIT"
] | null | null | null | weekly_degradation.py | rajeevratan84/LTE-KPI-Anomaly-Detection | b5d3ce261f75b94956867645fd3479c0b2eb0cd8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from configuration.settings import Conf
from database.sql_connect import SQLDatabase
from KPIForecaster.forecaster import KPIForecaster
from datetime import datetime
import pandas as pd
import numpy as np
import time
import sys
import os.path
def findDegradation(df, weeks = 3):
df_prev = df.shift(1)['DEGRADED']
df_next = df.shift(-1)['DEGRADED']
df_next2 = df.shift(-2)['DEGRADED']
df_next3 = df.shift(-3)['DEGRADED']
df_next4 = df.shift(-4)['DEGRADED']
if weeks == 3:
df.loc[(df_prev != 1) & (df['DEGRADED'] == 1) & (df_next == 1) & (df_next2 == 1), 'FLAG'] = 1
#df.loc[(df['Degrade'] != 0) & (df_next == 0) & (df_next3 == 0), 'end'] = 1
else:
df.loc[(df_prev != 1) & (df['DEGRADED'] == 1) & (df_next == 1) & (df_next2 == 1) & (df_next3 == 1), 'FLAG'] = 1
#df.loc[(df['Degrade'] != 0) & (df_next == 0) & (df_next4 == 0), 'end'] = 1
df.fillna(0, inplace=True)
df['FLAG'] = df['FLAG'].astype(int)
#df['end'] = df['end'].astype(int)
return df
def getConsecutiveSequencesWeekly(df):
i = 0
ind = []
for index, row in df.iterrows():
if row['FLAG'] == 1:
ind.append(i)
i += 1
for i in ind:
for j in range(1,7):
s = i+j
if df.iloc[i+j,5] == 1:
df.iloc[s,6] = 1
else:
break
return df
def getSummaryReport(df):
dates = df['START_DATE'].unique()
dates = pd.to_datetime(dates)
dates = dates.sort_values()
dates = dates[-3:]
dates = pd.DataFrame(dates)
dates[0] = dates[0].dt.strftime('%Y-%m-%d')
recent = list(dates[0])
flagged_only_df = df[df['FLAG'] == 1]
recent_df = flagged_only_df[flagged_only_df['START_DATE'].isin(recent)]
recent_df = recent_df.groupby(['CELL_NAME']).mean().reset_index()
recent_df = recent_df[['CELL_NAME', 'DL_USER_THROUGHPUT_MBPS_AVERAGE',
'DL_USER_THROUGHPUT_MBPS_PCT_CHANGE']]
#recent_df.loc[recent_df.FLAG > 0, 'FLAG'] = 1
return recent_df
path = sys.argv[0].rsplit("/", 1)[0]
# Create configuration and Database connection and our KPI Forecaster Object
try:
conf = Conf(os.path.join(path,"config.json"))
except:
conf = Conf("config.json")
sql = SQLDatabase(conf)
# Creating out KPI Forecaster Object
KPIForecaster = KPIForecaster(conf)
#df_train = pd.read_csv('FT_CELL_NOV.csv')
# Starting Timer for benchmarking
T_START = time.time()
#df_train = sql.getHourlyKPIReportDegradation()
df_train = sql.getHourlyKPIReportXDays()
t0 = time.time()
completion_time = t0-T_START
print(f'[INFO] Total Time to Download Report: {completion_time}')
print("[INFO] Report Loaded")
# Replace UTC string from time
df_train['START_TIME'] = df_train['START_TIME'].str.replace('\(UTC-04:00\)', '')
# Set KPI here
KPI = 'DL_USER_THROUGHPUT_MBPS'
cell_names = df_train.CELL_NAME.unique()
df_train['START_TIME'] = pd.to_datetime(df_train['START_TIME'])
df_train['Week_Number'] = df_train['START_TIME'].dt.isocalendar().week
df_train['Year'] = df_train['START_TIME'].dt.year
df_train['YEAR_WEEK'] = df_train['Year'].astype(str) + "_" + df_train['Week_Number'].astype(str)
df_train['START_DATE'] = df_train['START_TIME'].dt.date
df_train['START_DATE'] = df_train['START_DATE'].astype(str)
start_dates = df_train[['START_DATE', 'YEAR_WEEK']].copy()
start_dates = start_dates.drop_duplicates(subset=['YEAR_WEEK'], keep='first').reset_index()
del start_dates['index']
df = pd.DataFrame()
appended_data = []
number_of_cells = len(cell_names)
for (i,cell_name) in enumerate(cell_names):
df = df_train[df_train["CELL_NAME"] == cell_name]
df2 = df.groupby(['CELL_NAME','YEAR_WEEK']).mean().pct_change().reset_index()
df2['KEY'] = df2['CELL_NAME'] + df2['YEAR_WEEK']
df3 = df.groupby(['CELL_NAME','YEAR_WEEK']).mean().reset_index()
df3['KEY'] = df3['CELL_NAME'] + df3['YEAR_WEEK']
df3 = df3[['DL_USER_THROUGHPUT_MBPS', 'KEY']].copy()
df4 = pd.merge(df2, df3, on='KEY')
df2 = df4.rename({"DL_USER_THROUGHPUT_MBPS_x": "DL_USER_THROUGHPUT_MBPS_PCT_CHANGE",
"DL_USER_THROUGHPUT_MBPS_y": "DL_USER_THROUGHPUT_MBPS_AVERAGE"
}, axis='columns')
df2 = df2[['CELL_NAME','YEAR_WEEK', 'DL_USER_THROUGHPUT_MBPS_PCT_CHANGE',
'DL_USER_THROUGHPUT_MBPS_AVERAGE']]
df2 = df2.fillna(0)
df2['DEGRADED'] = df2['DL_USER_THROUGHPUT_MBPS_PCT_CHANGE'].apply(lambda x: 1 if x <= -0.05 else 0)
df2 = findDegradation(df2, 3)
appended_data.append(df2)
print(f'[INFO] {i+1} of {number_of_cells} completed.')
#if i == 100:
# break
appended_data = pd.concat(appended_data, axis=0)
name = KPI + "_PCT_CHANGE"
appended_data = appended_data.rename({KPI: name,}, axis='columns')
result = pd.merge(appended_data, start_dates, on='YEAR_WEEK')
result = result.sort_values(['CELL_NAME','YEAR_WEEK'])
result = result[['CELL_NAME', 'YEAR_WEEK','START_DATE','DL_USER_THROUGHPUT_MBPS_AVERAGE',
'DL_USER_THROUGHPUT_MBPS_PCT_CHANGE','DEGRADED','FLAG']]
# Adding Flag Sequences
result = getConsecutiveSequencesWeekly(result)
result = result.fillna(0)
# Saving and Uploading to DWH
#path = "./Reports/DEGRADATION/"
#KPIForecaster.makeDir(path)
#date = datetime.today().strftime('%Y_%m_%d')
#file_name = path + "WEEKLY_DEGRADATION_REPORT_" + KPI + "_" + str(date) + ".csv"
#result.to_csv(file_name)
print("[INFO] Uploading Report to DWH.")
result['DL_USER_THROUGHPUT_MBPS_PCT_CHANGE'].replace(np.inf, 0, inplace=True)
sql.dumpToDWH(result, "KPI_DEGRADATION_WEEKLY", if_exists = 'append')
summary = getSummaryReport(result)
sql.deleteTable("KPI_DEGRADATION_WEEKLY_SUMMARY")
sql.dumpToDWH(summary, "KPI_DEGRADATION_WEEKLY_SUMMARY") | 35.150602 | 119 | 0.656041 | 833 | 5,835 | 4.343337 | 0.234094 | 0.042565 | 0.061913 | 0.077391 | 0.192371 | 0.155611 | 0.126866 | 0.095357 | 0.095357 | 0.095357 | 0 | 0.020515 | 0.18132 | 5,835 | 166 | 120 | 35.150602 | 0.736864 | 0.136247 | 0 | 0.035714 | 0 | 0 | 0.241633 | 0.100797 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026786 | false | 0 | 0.080357 | 0 | 0.133929 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41e2290afea8242f9b3ad627b8896fc8296f5d30 | 110 | py | Python | basis/cli/commands/logout.py | kvh/basis | 8d109ff5ccf2c30b1a11406827d2c1620691ad95 | [
"BSD-3-Clause"
] | 11 | 2020-05-29T20:56:48.000Z | 2021-09-22T15:44:42.000Z | basis/cli/commands/logout.py | kvh/basis | 8d109ff5ccf2c30b1a11406827d2c1620691ad95 | [
"BSD-3-Clause"
] | null | null | null | basis/cli/commands/logout.py | kvh/basis | 8d109ff5ccf2c30b1a11406827d2c1620691ad95 | [
"BSD-3-Clause"
] | null | null | null | from basis.cli.services import auth
def logout():
"""Log out of your Basis account"""
auth.logout()
| 15.714286 | 39 | 0.663636 | 16 | 110 | 4.5625 | 0.8125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.209091 | 110 | 6 | 40 | 18.333333 | 0.83908 | 0.263636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
41e4a272d03b82c913d673af1e5e15ff825ef623 | 2,074 | py | Python | blockade/komand_blockade/actions/add_user/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | blockade/komand_blockade/actions/add_user/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | blockade/komand_blockade/actions/add_user/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
USER_EMAIL = "user_email"
USER_NAME = "user_name"
USER_ROLE = "user_role"
class Output:
API_KEY = "api_key"
EMAIL = "email"
MESSAGE = "message"
NAME = "name"
ROLE = "role"
SUCCESS = "success"
class AddUserInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"user_email": {
"type": "string",
"title": "Email",
"description": "Email of new user",
"order": 1
},
"user_name": {
"type": "string",
"title": "Username",
"description": "Name of new user",
"order": 2
},
"user_role": {
"type": "string",
"title": "Role",
"description": "Role of new user",
"enum": [
"analyst",
"admin"
],
"order": 3
}
},
"required": [
"user_email",
"user_name",
"user_role"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class AddUserOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"api_key": {
"type": "string",
"title": "API Key",
"description": "API key",
"order": 1
},
"email": {
"type": "string",
"title": "Email",
"description": "Email",
"order": 2
},
"message": {
"type": "string",
"title": "Message",
"description": "Message",
"order": 6
},
"name": {
"type": "string",
"title": "Name",
"description": "Name",
"order": 3
},
"role": {
"type": "string",
"title": "Role",
"description": "Role",
"order": 4
},
"success": {
"type": "boolean",
"title": "Success",
"description": "Success",
"order": 5
}
},
"required": [
"success",
"message"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 18.192982 | 57 | 0.486982 | 194 | 2,074 | 5.005155 | 0.257732 | 0.082389 | 0.123584 | 0.035015 | 0.395469 | 0.352214 | 0.352214 | 0.189495 | 0.088568 | 0.088568 | 0 | 0.006401 | 0.322083 | 2,074 | 113 | 58 | 18.353982 | 0.684211 | 0.01784 | 0 | 0.333333 | 1 | 0 | 0.743489 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019608 | false | 0 | 0.019608 | 0 | 0.186275 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
41e5525e7a720e9de54e83ab3802a2d8a16f8134 | 7,937 | py | Python | starthinker/tool/example.py | Ressmann/starthinker | 301c5cf17e382afee346871974ca2f4ae905a94a | [
"Apache-2.0"
] | 138 | 2018-11-28T21:42:44.000Z | 2022-03-30T17:26:35.000Z | starthinker/tool/example.py | Ressmann/starthinker | 301c5cf17e382afee346871974ca2f4ae905a94a | [
"Apache-2.0"
] | 36 | 2019-02-19T18:33:20.000Z | 2022-01-24T18:02:44.000Z | starthinker/tool/example.py | Ressmann/starthinker | 301c5cf17e382afee346871974ca2f4ae905a94a | [
"Apache-2.0"
] | 54 | 2018-12-06T05:47:32.000Z | 2022-02-21T22:01:01.000Z | ###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
"""StarThinker generator for python examples.
Includes both the command line and libraries used by UI.
See main for usage description.
"""
import argparse
import textwrap
from starthinker.util.configuration import commandline_parser
from starthinker.util.recipe import dict_to_python
from starthinker.util.recipe import get_recipe
from starthinker.util.recipe import json_get_fields
from starthinker.util.recipe import json_expand_queries
DISCLAIMER = '''###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see scripts folder for possible source):
# - Command: "python starthinker_ui/manage.py example"
#
###########################################################################
'''
def parameters_to_argparse(description, instructions, parameters):
code = ' parser = argparse.ArgumentParser(\n'
code += ' formatter_class=argparse.RawDescriptionHelpFormatter,\n'
code += ' description=textwrap.dedent("""\n'
if description:
code += ' %s\n' % description
if instructions:
code += '\n'
for step, instruction in enumerate(instructions, 1):
code += ' %d. %s\n' % (step, instruction)
code += ' """))\n\n'
code += ' parser.add_argument("-project", help="Cloud ID of Google Cloud Project.", default=None)\n'
code += ' parser.add_argument("-key", help="API Key of Google Cloud Project.", default=None)\n'
code += ' parser.add_argument("-client", help="Path to CLIENT credentials json file.", default=None)\n'
code += ' parser.add_argument("-user", help="Path to USER credentials json file.", default=None)\n'
code += ' parser.add_argument("-service", help="Path to SERVICE credentials json file.", default=None)\n'
code += ' parser.add_argument("-verbose", help="Print all the steps as they happen.", action="store_true")\n'
code += '\n'
for parameter in parameters:
code += ' parser.add_argument("-%s", help="%s", default=%s)\n' % (parameter['name'], parameter.get('description', ''), repr(parameter.get('default')))
code += '\n'
return code
def recipe_to_python(name, description, instructions, tasks, parameters={}, project=None, client_credentials=None, user_credentials=None, service_credentials=None):
""" Converts a JSON recipe into a python stand alone example.
Sets up multiple steps to execute recipe:
1. Install starthinker from repository
2. Get Cloud Project ID.
3. Get Client Credentials ( optional if User Credentials exist ).
4. Enter Recipe parameters if fields present.
5. Execute recipe tasks.
Args:
* name: (string) The name of the notebook.
* description: (string) A description fo the recipe.
* instructions: (string) Recipe manual instructions, for example connecting datastudios.
* tasks: (list) The task JSON to execute.
* parameters: (dict) Values for field parameters in tasks, optional.
* project: (string) The GCP project id.
* client_credentials: (string) The GCP Desktop Client Credentials in JSON string.
* user_credentials: (string) Not used, placeholder.
* service_credentials: (string) Not used, placeholder.
Returns:
* (string) Rendered example source code to be written to a py file.
"""
# Expand all queries
tasks = json_expand_queries(tasks)
# Add imports
code = DISCLAIMER
code += 'import argparse\n'
code += 'import textwrap\n\n'
code += 'from starthinker.util.configuration import Configuration\n'
imported = set()
for task in tasks:
script, task = next(iter(task.items()))
if script not in imported:
code += 'from starthinker.task.%s.run import %s\n' % (script, script)
imported.add(script)
code += '\n'
code += '\n'
# Create function for recipe
fields = json_get_fields(tasks)
if fields:
code += 'def recipe_%s(config, %s):\n' % (name, ', '.join([f['name'] for f in fields]))
else:
code += 'def recipe_%s(config):\n' % name
# Add docstring
if description or fields:
code += ' """' + textwrap.fill(
description,
width=80,
subsequent_indent=" "
) + '\n'
if fields:
code += '\n Args:\n'
for field in fields:
code += ' %s (%s) - %s\n' % (field['name'], field['kind'], field.get('description', 'NA'))
code += ' """\n\n'
# Add calls
for task in tasks:
script, task = next(iter(task.items()))
code += ' %s(config, %s)\n\n' % (script, dict_to_python(task, indent=1))
code += '\n'
code += '\n'
code += 'if __name__ == "__main__":\n'
# Add argparse for each field
code += parameters_to_argparse(description, instructions, fields)
code += '\n'
code += ' args = parser.parse_args()\n'
code += '\n'
code += ''' config = Configuration(
project=args.project,
user=args.user,
service=args.service,
client=args.client,
key=args.key,
verbose=args.verbose
)'''
code += '\n\n'
if fields:
code += ' recipe_%s(config, %s)\n' % (name, ', '.join(['args.%s' % f['name'] for f in fields]))
else:
code += ' recipe_%s(config)\n' % name
return code
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
Command line to turn StarThinker Recipe into Python script.
Example:
python example.py [path to existing recipe.json] --fo [path to new python file.py]
"""))
parser.add_argument('json', help='Path to recipe json file to load.')
parser.add_argument(
'--file_out',
'-fo',
help='Path to recipe file to be written if replacing fields.',
default=None
)
# initialize project
parser = commandline_parser(parser, arguments=('-p', '-c', '-u', '-s'))
args = parser.parse_args()
# load json to get each task
recipe = get_recipe(args.json)
# create Python file
example = recipe_to_python(
name=(args.file_out or args.json).rsplit('/', 1)[-1].split('.')[0], # take filename without extension of destination or source
description=recipe['script'].get('description'),
instructions=recipe['script'].get('instructions'),
tasks=recipe['tasks'],
project=args.project,
client_credentials=args.client,
user_credentials=args.user,
service_credentials=args.service
)
# check to write converted fields to stdout
if args.file_out:
print('Writing to:', args.file_out)
f = open(args.file_out, 'w')
f.write(example)
f.close()
else:
print(example)
if __name__ == '__main__':
main()
| 32.528689 | 164 | 0.643442 | 1,008 | 7,937 | 4.993056 | 0.228175 | 0.016889 | 0.030399 | 0.029207 | 0.347705 | 0.27578 | 0.261872 | 0.252732 | 0.242798 | 0.242798 | 0 | 0.004338 | 0.18672 | 7,937 | 243 | 165 | 32.662551 | 0.775368 | 0.240393 | 0 | 0.174825 | 0 | 0.013986 | 0.463461 | 0.12151 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020979 | false | 0 | 0.097902 | 0 | 0.132867 | 0.013986 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41e5c2cff74537381853da7d7af093ed89964ce8 | 1,080 | py | Python | services/test_companies_match.py | devx3/hero-testes-tech-talk | 604641f41c18cd9f9f2b59c8c25cf0a2fc6adfcb | [
"MIT"
] | null | null | null | services/test_companies_match.py | devx3/hero-testes-tech-talk | 604641f41c18cd9f9f2b59c8c25cf0a2fc6adfcb | [
"MIT"
] | 3 | 2021-09-24T01:23:38.000Z | 2021-09-28T22:15:29.000Z | services/test_companies_match.py | devx3/hero-testes-tech-talk | 604641f41c18cd9f9f2b59c8c25cf0a2fc6adfcb | [
"MIT"
] | 1 | 2021-09-29T13:18:39.000Z | 2021-09-29T13:18:39.000Z | from unittest import mock, TestCase
from unittest.mock import Mock
from services.companies_match import CompaniesMatch
from services.mock import RESPONSE_MOCK
class TestCompaniesMatch(TestCase):
@mock.patch('services.companies_match.requests.get')
def test_if_response_is_ok(self, mock_get):
"""Return json with response"""
mock_get.return_value = Mock(ok=True)
mock_get.return_value.json.return_value = RESPONSE_MOCK
companies_match = CompaniesMatch()
companies_match.get()
self.assertIsInstance(companies_match._companies, list)
self.assertEqual(companies_match._companies[0]['request_id'], 'REQUEST_UUID')
@mock.patch('services.companies_match.requests.get')
def test_if_response_is_nok(self, mock_get):
"""Return empty list if response is not ok."""
mock_get.return_value = Mock(ok=False)
companies_match = CompaniesMatch()
companies_match.get()
self.assertIsInstance(companies_match._companies, list)
self.assertEqual(companies_match._companies, []) | 38.571429 | 85 | 0.730556 | 131 | 1,080 | 5.748092 | 0.282443 | 0.204515 | 0.086321 | 0.071713 | 0.571049 | 0.571049 | 0.507304 | 0.507304 | 0.507304 | 0.507304 | 0 | 0.001122 | 0.175 | 1,080 | 28 | 86 | 38.571429 | 0.843996 | 0.061111 | 0 | 0.4 | 0 | 0 | 0.095618 | 0.073705 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
41e6ee84a456c6b5415e417d61175deacfc9d081 | 2,618 | py | Python | src/plot_weighted_graph.py | suning-opensource/frustrated-random-walk | 7de559c20e96567a61853668f36d786b126ed57f | [
"Apache-2.0"
] | 18 | 2020-12-24T04:26:21.000Z | 2022-03-24T07:32:39.000Z | src/plot_weighted_graph.py | kiminh/frustrated-random-walk | 7de559c20e96567a61853668f36d786b126ed57f | [
"Apache-2.0"
] | 1 | 2021-05-07T06:47:34.000Z | 2021-05-07T06:47:34.000Z | src/plot_weighted_graph.py | kiminh/frustrated-random-walk | 7de559c20e96567a61853668f36d786b126ed57f | [
"Apache-2.0"
] | 8 | 2020-10-22T23:51:55.000Z | 2021-08-24T06:36:19.000Z | #!/usr/bin/env python
"""
==============
Weighted Graph
==============
An example using Graph as a weighted network.
"""
# Author: Aric Hagberg (hagberg@lanl.gov)
import matplotlib.pyplot as plt
import networkx as nx
def readEdgeFile(edgeFileName, pageRankFileName):
import os
assert(os.path.exists(edgeFileName))
page_ranked = False
if (os.path.exists(pageRankFileName)):
page_ranked = True
rank_result = dict()
ifile = open(pageRankFileName, "r")
for (index, string) in enumerate(ifile):
if ("value" in string):
break
rank_result[string.split(";")[1].split(":")[0]] = str(index + 1)
ifile.close()
G = nx.Graph()
ifile = open(edgeFileName, "r")
if (not page_ranked):
for (index, string) in enumerate(ifile):
a = string.strip("\n").split(";")
src = a[0]
dst = a[1]
if (len(a) > 2):
edge_weight = float(a[2])
else:
edge_weight = 1.0
G.add_edge(src, dst, weight = edge_weight)
else:
#print rank_result
writer = open(edgeFileName.replace(".csv", "_ranked.csv"), "w")
for (index, string) in enumerate(ifile):
a = string.strip("\n").split(";")
src = rank_result[a[0]]
dst = rank_result[a[1]]
if (len(a) > 2):
edge_weight = float(a[2])
else:
edge_weight = 1.0
writer.write(src + ";" + dst + ";" + str(edge_weight) + "\n")
G.add_edge(src, dst, weight = edge_weight)
writer.close()
ifile.close()
return G
def main():
import sys
if (len(sys.argv) < 2):
print "edgeFileName = sys.argv[1], pageRankFileName = sys.argv[2](optional). "
return -1
if (len(sys.argv) >= 2):
edgeFileName = sys.argv[1]
pageRankFileName = "non_existent_file_name"
if (len(sys.argv) == 3):
pageRankFileName = sys.argv[2]
assert(".csv" in edgeFileName)
G = readEdgeFile(edgeFileName, pageRankFileName)
pos = nx.spring_layout(G) # positions for all nodes
nx.draw_networkx_nodes(G, pos, node_size = 10)
# edges
edges = G.edges()
nx.draw_networkx(G, pos, edgelist = edges, width = 1, font_size = 15, font_color = "blue", with_labels = True, node_size = 100, node_color = "red")
# labels
#nx.draw_networkx_labels(G, pos, font_size = 25, font_family='sans-serif', font_color = "b")
plt.axis('off')
plt.show()
if __name__ == "__main__":
import sys
sys.exit(main())
| 31.542169 | 151 | 0.559587 | 328 | 2,618 | 4.332317 | 0.344512 | 0.049261 | 0.022519 | 0.033779 | 0.263195 | 0.194229 | 0.173118 | 0.173118 | 0.130894 | 0.130894 | 0 | 0.017742 | 0.289534 | 2,618 | 82 | 152 | 31.926829 | 0.746237 | 0.078304 | 0 | 0.31746 | 0 | 0 | 0.064586 | 0.019072 | 0 | 0 | 0 | 0 | 0.031746 | 0 | null | null | 0 | 0.079365 | null | null | 0.015873 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
41ec186af56971b756995a21107e22571cd744ca | 1,980 | py | Python | src/util/TournamentGenerationSelector.py | athoslag/Evolutiva | 408c71d89969f445fe8054666f18492a6ff26fb8 | [
"MIT"
] | null | null | null | src/util/TournamentGenerationSelector.py | athoslag/Evolutiva | 408c71d89969f445fe8054666f18492a6ff26fb8 | [
"MIT"
] | null | null | null | src/util/TournamentGenerationSelector.py | athoslag/Evolutiva | 408c71d89969f445fe8054666f18492a6ff26fb8 | [
"MIT"
] | null | null | null | import operator
import random
from src.domain.Individual import Individual
from src.util.AbstractGeneration import AbstractGeneration
'''
Seletor #2 - Método de seleção por torneio
A seleção por torneio sorteia aleatoriamente dois indivíduos e seleciona
para participar da próxima geração aquele com melhor avaliação. Os melhores
'''
class TournamentGenerationSelector(AbstractGeneration):
def __init__(self, popsize, evaluator, recombination_rate=0.25, t_max=100):
super().__init__(popsize, evaluator, recombination_rate, t_max)
def next_generation(self, individuals):
pop_fitness = self.evaluate_generation(individuals)
# creates a randomized copy of the population
current_fitness = pop_fitness
random.shuffle(current_fitness)
# configures the parameters of the tournaments
tournament_count = int(pow(self.popsize, 0.5))
tournament_participants = 2
tournament_fitness = []
for tournament in range(tournament_count):
participants = []
# selects each of the participants of the tournament
for p in range(tournament_participants):
participants.append(current_fitness.pop())
random.shuffle(current_fitness)
# selects the best participant of the tournament
selected = sorted(participants, key=operator.attrgetter('score'), reverse=True)[0]
tournament_fitness.append(selected)
pop_fitness = tournament_fitness
# crossover
new_pop = []
for p1 in pop_fitness:
for p2 in pop_fitness:
if p1 == p2:
continue
new_1, new_2 = p1.individual.genotype.crossover(p2.individual.genotype)
new_1.mutate()
new_2.mutate()
new_pop.append(Individual(new_1))
new_pop.append(Individual(new_2))
return new_pop, pop_fitness | 35.357143 | 94 | 0.660101 | 218 | 1,980 | 5.807339 | 0.449541 | 0.047393 | 0.026856 | 0.052133 | 0.039494 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015972 | 0.272727 | 1,980 | 56 | 95 | 35.357143 | 0.863194 | 0.09899 | 0 | 0.060606 | 0 | 0 | 0.003171 | 0 | 0 | 0 | 0 | 0.017857 | 0 | 1 | 0.060606 | false | 0 | 0.121212 | 0 | 0.242424 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
41eda4e4dba365b6d5b2482768194356e609bc8f | 596 | py | Python | scraper/collect_image_stats/get_domains_and_urls.py | martinGalajdaSchool/object-detection | 2c72b643464a89b91daac520a862ebaad2b3f9f0 | [
"Apache-2.0"
] | 2 | 2019-12-11T05:50:39.000Z | 2021-12-06T12:28:40.000Z | scraper/collect_image_stats/get_domains_and_urls.py | martinGalajdaSchool/object-detection | 2c72b643464a89b91daac520a862ebaad2b3f9f0 | [
"Apache-2.0"
] | 19 | 2019-12-16T21:23:00.000Z | 2022-03-02T14:59:12.000Z | scraper/collect_image_stats/get_domains_and_urls.py | martin-galajda/object-detection | 2c72b643464a89b91daac520a862ebaad2b3f9f0 | [
"Apache-2.0"
] | null | null | null | import csv
def get_domains_and_urls():
domains = []
urls = []
with open('./scraper/foto-domains-2019-03.csv', 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
row_idx = 0
for row in csvreader:
if row_idx == 0:
row_idx += 1
continue
domain, country = row[:2]
domains += [f'{domain}.{country}']
urls += [f'http://{domain}.{country}']
row_idx += 1
print(domains)
print(urls)
return {
'domains': domains,
'urls': urls,
}
| 22.923077 | 68 | 0.486577 | 65 | 596 | 4.353846 | 0.523077 | 0.084806 | 0.04947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029255 | 0.369128 | 596 | 25 | 69 | 23.84 | 0.723404 | 0 | 0 | 0.095238 | 0 | 0 | 0.151261 | 0.057143 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.047619 | 0 | 0.142857 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41ee3ae8e8b92e612ac4608c7da1d0e996a06da5 | 3,583 | py | Python | patches_tool/constants.py | Hybrid-Cloud/badam | 390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb | [
"Apache-2.0"
] | 2 | 2015-06-15T02:16:33.000Z | 2022-02-23T07:10:38.000Z | patches_tool/constants.py | Hybrid-Cloud/badam | 390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb | [
"Apache-2.0"
] | 7 | 2016-05-13T06:39:45.000Z | 2016-05-20T02:55:31.000Z | patches_tool/constants.py | Hybrid-Cloud/badam | 390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb | [
"Apache-2.0"
] | 4 | 2015-11-02T04:02:50.000Z | 2021-05-13T17:06:00.000Z | __author__ = 'nash.xiejun'
import os
class FileName(object):
PATCHES_TOOL_CONFIG_FILE = 'patches_tool_config.ini'
class CfgFilePath(object):
HYBRID_CLOUD_CONFIG_FILES = 'hybrid_cloud_config_files'
# 'neutron-l2-proxy.json'
NEUTRON_L2_PROXY_JSON_FILE = 'neutron-l2-proxy.json'
# '/etc/neutron/others/cfg_template/neutron-l2-proxy.json'
ETC = ''.join([os.path.sep, 'etc'])
NEUTRON_L2_PROXY_PATH = os.path.sep.join([ETC, 'neutron', 'others', 'cfg_template', NEUTRON_L2_PROXY_JSON_FILE])
# hybrid_cloud_config_files/neutron-l2-proxy.json
NEUTRON_L2_PROXY_PATH_TEMPLATE = os.sep.join([HYBRID_CLOUD_CONFIG_FILES, NEUTRON_L2_PROXY_JSON_FILE])
class PatchFilePath(object):
HYBRID_CLOUD_PATCHES = 'hybrid_cloud_patches'
AWS_CASCADED = 'aws_cascaded'
AWS_PROXY = 'aws_proxy'
CASCADING = 'cascading'
VCLOUD_PROXY = 'vcloud_proxy'
VCLOUD_CASCADED = 'VCLOUD_CASCADED'
PATCH_FOR_AWS_CASCADED = os.path.sep.join([HYBRID_CLOUD_PATCHES, AWS_CASCADED])
PATCH_FOR_AWS_PROXY = os.path.sep.join([HYBRID_CLOUD_PATCHES, AWS_PROXY])
PATCH_FOR_CASCADING = os.path.sep.join([HYBRID_CLOUD_PATCHES, CASCADING])
PATCH_FOR_VCLOUD_CASCADED = os.path.sep.join([HYBRID_CLOUD_PATCHES, VCLOUD_CASCADED])
PATCH_FOR_VCLOUD_PROXY = os.path.sep.join([HYBRID_CLOUD_PATCHES, VCLOUD_PROXY])
PATH_LOCAL_OS_CONFIG_NETWORK_PY = os.path.join('fusionsphere_patches', 'usr', 'bin', 'osConfigNetwork.py')
PATH_REMOTE_OS_CONFIG_NETWORK_PY = os.path.join(''.join([os.path.sep, 'usr']), 'bin', 'osConfigNetwork.py')
PATH_LOCAL_OS_CONFIG_CONTROL = os.path.join('fusionsphere_patches', 'usr', 'bin', 'osConfigControl')
PATH_REMOTE_OS_CONFIG_CONTROL = os.path.join(''.join([os.path.sep, 'usr']), 'bin', 'osConfigControl')
class ScriptFilePath(object):
SCRIPT = 'scripts'
EXECUTE_SH = 'execute.sh'
SU_CHANGE_SH = 'su_change.sh'
RESTART_CINDER_PROXY_SH='restart_cinder_proxy.sh'
HOME = ''.join([os.path.sep, 'home'])
FSP = 'fsp'
PATCHES_TOOL = 'patches_tool'
PATCH_FILE = 'patch_file.py'
AWS_PATCH = 'aws_patch'
VCLOUD_PATCH = 'vcloud_patch'
CONFIG_PY = 'config.py'
BIN = 'bin'
PATH_EXECUTE_SH = os.path.join(SCRIPT, EXECUTE_SH)
PATH_SU_CHANGE_SH = os.path.join(SCRIPT, SU_CHANGE_SH)
PATH_EXECUTE_SH_COPY_TO = os.path.join(HOME, FSP, BIN, EXECUTE_SH)
PATH_SU_CHANGE_SH_COPY_TO = os.path.join(HOME, FSP, BIN, SU_CHANGE_SH)
PATH_RESTART_CINDER_PROXY_SH = os.path.join(HOME, FSP, BIN, RESTART_CINDER_PROXY_SH)
PATH_REMOTE_AWS_PATCH_FILE = os.path.join(HOME, FSP, PATCHES_TOOL, AWS_PATCH, PATCH_FILE)
PATH_REMOTE_VCLOUD_PATCH_FILE = os.path.join(HOME, FSP, PATCHES_TOOL, VCLOUD_PATCH, PATCH_FILE)
PATCH_REMOTE_HYBRID_CONFIG_PY = os.path.join(HOME, FSP, PATCHES_TOOL, CONFIG_PY)
ADD_ROUTER_SH = 'add_router.sh'
PATH_LOCAL_ADD_ROUTER_SH = os.path.join(SCRIPT, ADD_ROUTER_SH)
PATH_REMOTE_ADD_ROUTER_SH = os.path.join(HOME, FSP, BIN, ADD_ROUTER_SH)
class SysPath(object):
HOME = ''.join([os.path.sep, 'home'])
FSP = 'fsp'
HOME_FSP = os.path.join(HOME, FSP)
PATCHES_TOOL = 'patches_tool'
PATCHES_TOOL_TAR_GZ = 'patches_tool.tar.gz'
PATH_PATCHES_TOOL =os.path.join(HOME_FSP, PATCHES_TOOL)
FS_CODE_BACKUP = 'fs_code_backup'
# /etc/home/fs_code_backup
PATH_FS_CODE_BACKUP = os.path.join(HOME_FSP, FS_CODE_BACKUP)
class SysUserInfo(object):
ROOT = 'root'
ROOT_PWD = 'Huawei@CLOUD8!'
FSP = 'fsp'
FSP_PWD = 'Huawei@CLOUD8' | 44.234568 | 117 | 0.718113 | 521 | 3,583 | 4.541267 | 0.136276 | 0.071006 | 0.071851 | 0.059172 | 0.533812 | 0.428994 | 0.385038 | 0.27388 | 0.163145 | 0.045647 | 0 | 0.003658 | 0.160759 | 3,583 | 81 | 118 | 44.234568 | 0.783173 | 0.042702 | 0 | 0.109375 | 0 | 0 | 0.158697 | 0.027496 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.015625 | 0 | 0.984375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
41eef09d352b027d57f768a34a0d9c5369b206e4 | 1,919 | py | Python | samples/physics-mecanum/src/physics.py | sarosenb/pyfrc | 9376b86dad6fab209bb4e904fead1155dda7a68b | [
"MIT"
] | null | null | null | samples/physics-mecanum/src/physics.py | sarosenb/pyfrc | 9376b86dad6fab209bb4e904fead1155dda7a68b | [
"MIT"
] | null | null | null | samples/physics-mecanum/src/physics.py | sarosenb/pyfrc | 9376b86dad6fab209bb4e904fead1155dda7a68b | [
"MIT"
] | null | null | null | #
# See the notes for the other physics sample
#
from pyfrc import wpilib
from pyfrc.physics import drivetrains
class PhysicsEngine(object):
'''
Simulates a 4-wheel robot using Tank Drive joystick control
'''
# Specified in feet
ROBOT_WIDTH = 2
ROBOT_HEIGHT = 3
ROBOT_STARTING_X = 18.5
ROBOT_STARTING_Y = 12
# In degrees, 0 is east, 90 is south
STARTING_ANGLE = 180
# Tank drive
#JOYSTICKS = [1, 2] # which joysticks to use for controlling?
# # first is left, second is right. 1-based index.
def __init__(self, physics_controller):
'''
:param physics_controller: `pyfrc.physics.core.Physics` object
to communicate simulation effects to
'''
self.physics_controller = physics_controller
self.position = 0
self.last_tm = None
def update_sim(self, now, tm_diff):
'''
Called when the simulation parameters for the program need to be
updated. This is mostly when wpilib.Wait is called.
:param now: The current time as a float
:param tm_diff: The amount of time that has passed since the last
time that this function was called
'''
# Simulate the drivetrain
# -> Remember, in the constructor we inverted the left motors, so
# invert the motor values here too!
lr_motor = -wpilib.DigitalModule._pwm[0].Get()
rr_motor = wpilib.DigitalModule._pwm[1].Get()
lf_motor = -wpilib.DigitalModule._pwm[2].Get()
rf_motor = wpilib.DigitalModule._pwm[3].Get()
vx, vy, vw = drivetrains.mecanum_drivetrain(lr_motor, rr_motor, lf_motor, rf_motor)
self.physics_controller.vector_drive(vx, vy, vw, tm_diff)
| 30.951613 | 91 | 0.593538 | 234 | 1,919 | 4.717949 | 0.521368 | 0.076993 | 0.086957 | 0.097826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017241 | 0.33507 | 1,919 | 61 | 92 | 31.459016 | 0.847962 | 0.427827 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.105263 | 0 | 0.526316 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
41efa0b1f23a58e01d79993abae419de0e29ae35 | 5,994 | py | Python | fonts/diamonstealth64_8x14.py | ccccmagicboy/st7735_mpy | b15f1bde69fbe6e0eb4931c57e71c136d8e7f024 | [
"MIT"
] | 6 | 2020-07-11T16:59:19.000Z | 2021-07-16T19:32:49.000Z | ports/esp32/user_modules/st7735_mpy/fonts/diamonstealth64_8x14.py | d4niele/micropython | a1f7b37d392bf46b28045ce215ae899fda8d8c38 | [
"MIT"
] | 1 | 2020-04-14T03:14:45.000Z | 2020-04-14T03:14:45.000Z | fonts/diamonstealth64_8x14.py | ccccmagicboy/st7735_mpy | b15f1bde69fbe6e0eb4931c57e71c136d8e7f024 | [
"MIT"
] | null | null | null | """converted from ..\fonts\DiamonStealth64_8x14.bin """
WIDTH = 8
HEIGHT = 14
FIRST = 0x20
LAST = 0x7f
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x18\x3c\x3c\x3c\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x66\x66\x66\x24\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x6c\x6c\xfe\x6c\x6c\x6c\xfe\x6c\x6c\x00\x00\x00\x18'\
b'\x18\x7c\xc6\xc2\xc0\x7c\x06\x86\xc6\x7c\x18\x18\x00\x00'\
b'\x00\x00\x00\xc2\xc6\x0c\x18\x30\x66\xc6\x00\x00\x00\x00'\
b'\x00\x38\x6c\x6c\x38\x76\xdc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x30\x30\x30\x60\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0c\x18\x30\x30\x30\x30\x30\x18\x0c\x00\x00\x00\x00'\
b'\x00\x30\x18\x0c\x0c\x0c\x0c\x0c\x18\x30\x00\x00\x00\x00'\
b'\x00\x00\x00\x66\x3c\xff\x3c\x66\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x18\x18\x7e\x18\x18\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x18\x18\x18\x30\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x02\x06\x0c\x18\x30\x60\xc0\x80\x00\x00\x00\x00\x00'\
b'\x00\x7c\xc6\xce\xde\xf6\xe6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x18\x38\x78\x18\x18\x18\x18\x18\x7e\x00\x00\x00\x00'\
b'\x00\x7c\xc6\x06\x0c\x18\x30\x60\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x7c\xc6\x06\x06\x3c\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x0c\x1c\x3c\x6c\xcc\xfe\x0c\x0c\x1e\x00\x00\x00\x00'\
b'\x00\xfe\xc0\xc0\xfc\x0e\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x38\x60\xc0\xc0\xfc\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\xfe\xc6\x06\x0c\x18\x30\x30\x30\x30\x00\x00\x00\x00'\
b'\x00\x7c\xc6\xc6\xc6\x7c\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x7c\xc6\xc6\xc6\x7e\x06\x06\x0c\x78\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x00\x00\x00\x18\x18\x00\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x00\x00\x00\x18\x18\x30\x00\x00\x00\x00'\
b'\x00\x06\x0c\x18\x30\x60\x30\x18\x0c\x06\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xfe\x00\x00\xfe\x00\x00\x00\x00\x00\x00'\
b'\x00\x60\x30\x18\x0c\x06\x0c\x18\x30\x60\x00\x00\x00\x00'\
b'\x00\x7c\xc6\xc6\x0c\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x7c\xc6\xc6\xde\xde\xde\xdc\xc0\x7c\x00\x00\x00\x00'\
b'\x00\x10\x38\x6c\xc6\xc6\xfe\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\xfc\x66\x66\x66\x7c\x66\x66\x66\xfc\x00\x00\x00\x00'\
b'\x00\x3c\x66\xc2\xc0\xc0\xc0\xc2\x66\x3c\x00\x00\x00\x00'\
b'\x00\xf8\x6c\x66\x66\x66\x66\x66\x6c\xf8\x00\x00\x00\x00'\
b'\x00\xfe\x66\x62\x68\x78\x68\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\xfe\x66\x62\x68\x78\x68\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x3c\x66\xc2\xc0\xc0\xde\xc6\x66\x3a\x00\x00\x00\x00'\
b'\x00\xc6\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x3c\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x1e\x0c\x0c\x0c\x0c\x0c\xcc\xcc\x78\x00\x00\x00\x00'\
b'\x00\xe6\x66\x6c\x6c\x78\x6c\x6c\x66\xe6\x00\x00\x00\x00'\
b'\x00\xf0\x60\x60\x60\x60\x60\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\xc6\xee\xfe\xfe\xd6\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\xc6\xe6\xf6\xfe\xde\xce\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x38\x6c\xc6\xc6\xc6\xc6\xc6\x6c\x38\x00\x00\x00\x00'\
b'\x00\xfc\x66\x66\x66\x7c\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x7c\xc6\xc6\xc6\xc6\xd6\xde\x7c\x0c\x0e\x00\x00\x00'\
b'\x00\xfc\x66\x66\x66\x7c\x6c\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x7c\xc6\xc6\x60\x38\x0c\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x7e\x7e\x5a\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\xc6\xc6\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00\x00'\
b'\x00\xc6\xc6\xc6\xc6\xd6\xd6\xfe\x7c\x6c\x00\x00\x00\x00'\
b'\x00\xc6\xc6\x6c\x38\x38\x38\x6c\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x66\x66\x66\x66\x3c\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\xfe\xc6\x8c\x18\x30\x60\xc2\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x3c\x30\x30\x30\x30\x30\x30\x30\x3c\x00\x00\x00\x00'\
b'\x00\x80\xc0\xe0\x70\x38\x1c\x0e\x06\x02\x00\x00\x00\x00'\
b'\x00\x3c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x3c\x00\x00\x00\x10'\
b'\x38\x6c\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x00\x30'\
b'\x30\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x78\x0c\x7c\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\xe0\x60\x60\x78\x6c\x66\x66\x66\xdc\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x7c\xc6\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x1c\x0c\x0c\x3c\x6c\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x7c\xc6\xfe\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x38\x6c\x64\x60\xf0\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x76\xcc\xcc\xcc\x7c\x0c\xcc\x78\x00\x00'\
b'\x00\xe0\x60\x60\x6c\x76\x66\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x18\x18\x00\x38\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x06\x06\x00\x0e\x06\x06\x06\x06\x66\x66\x3c\x00\x00'\
b'\x00\xe0\x60\x60\x66\x6c\x78\x6c\x66\xe6\x00\x00\x00\x00'\
b'\x00\x38\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xec\xfe\xd6\xd6\xd6\xd6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x7c\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xdc\x66\x66\x66\x7c\x60\x60\xf0\x00\x00'\
b'\x00\x00\x00\x00\x76\xcc\xcc\xcc\x7c\x0c\x0c\x1e\x00\x00'\
b'\x00\x00\x00\x00\xdc\x76\x62\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x7c\xc6\x70\x1c\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x10\x30\x30\xfc\x30\x30\x30\x36\x1c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xcc\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x66\x66\x66\x66\x3c\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xc6\xc6\xd6\xd6\xfe\x6c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xc6\x6c\x38\x38\x6c\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xc6\xc6\xc6\xc6\x7e\x06\x0c\xf8\x00\x00'\
b'\x00\x00\x00\x00\xfe\xcc\x18\x30\x66\xfe\x00\x00\x00\x00'\
b'\x00\x0e\x18\x18\x18\x70\x18\x18\x18\x0e\x00\x00\x00\x00'\
b'\x00\x18\x18\x18\x18\x00\x18\x18\x18\x18\x00\x00\x00\x00'\
b'\x00\x70\x18\x18\x18\x0e\x18\x18\x18\x70\x00\x00\x00\x00'\
b'\x00\x76\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x10\x38\x6c\xc6\xc6\xfe\x00\x00\x00\x00\x00'\
FONT = memoryview(_FONT)
| 57.085714 | 60 | 0.704705 | 1,458 | 5,994 | 2.895062 | 0.044582 | 0.599858 | 0.63113 | 0.508884 | 0.820185 | 0.749112 | 0.704335 | 0.630893 | 0.548448 | 0.41104 | 0 | 0.377808 | 0.019686 | 5,994 | 104 | 61 | 57.634615 | 0.340538 | 0.007841 | 0 | 0 | 0 | 0.941176 | 0.905203 | 0.905203 | 0 | 1 | 0.001347 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 |
41efccef82f28d187c0597489e64c7649630dd85 | 864 | py | Python | TreeDFS/SumPathNumbers.py | Feez/Algo-Challenges | 6b5f919b4e2c9ba9ed9b7c5d7697fe73740c139e | [
"MIT"
] | 2 | 2019-12-03T05:29:35.000Z | 2020-01-19T19:22:11.000Z | TreeDFS/SumPathNumbers.py | Feez/Algo-Challenges | 6b5f919b4e2c9ba9ed9b7c5d7697fe73740c139e | [
"MIT"
] | null | null | null | TreeDFS/SumPathNumbers.py | Feez/Algo-Challenges | 6b5f919b4e2c9ba9ed9b7c5d7697fe73740c139e | [
"MIT"
] | null | null | null | class TreeNode:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def dfs(self, total=0):
total = (total * 10) + self.val
if self.left is None and self.right is None:
return total
left = 0
right = 0
if self.left is not None:
left = self.left.dfs(total=total)
if self.right is not None:
right = self.right.dfs(total=total)
return left + right
def find_sum_of_path_numbers(root):
return root.dfs()
def main():
root = TreeNode(1)
root.left = TreeNode(0)
root.right = TreeNode(1)
root.left.left = TreeNode(1)
root.right.left = TreeNode(6)
root.right.right = TreeNode(5)
print("Total Sum of Path Numbers: " + str(find_sum_of_path_numbers(root)))
main()
| 22.153846 | 78 | 0.586806 | 124 | 864 | 3.991935 | 0.241935 | 0.064646 | 0.054545 | 0.09697 | 0.09697 | 0.09697 | 0 | 0 | 0 | 0 | 0 | 0.018272 | 0.303241 | 864 | 38 | 79 | 22.736842 | 0.803987 | 0 | 0 | 0 | 0 | 0 | 0.03125 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148148 | false | 0 | 0 | 0.037037 | 0.296296 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41f244af008573d038af8edc1801e70f08cd96ac | 1,730 | py | Python | src/src/modules/ZeroOptimizer.py | ychnlgy/LipoWithGradients | 4fe5228a3dae8bf5d457eef6191ba29314421f6b | [
"MIT"
] | null | null | null | src/src/modules/ZeroOptimizer.py | ychnlgy/LipoWithGradients | 4fe5228a3dae8bf5d457eef6191ba29314421f6b | [
"MIT"
] | null | null | null | src/src/modules/ZeroOptimizer.py | ychnlgy/LipoWithGradients | 4fe5228a3dae8bf5d457eef6191ba29314421f6b | [
"MIT"
] | null | null | null | import torch
EPS = 1e-32
class ZeroOptimizer(torch.optim.SGD):
def step(self):
lr = self.param_groups[0]["lr"]
with torch.no_grad():
for group in self.param_groups:
for p in group["params"]:
if p.grad is not None:
p.grad = calc_grad(lr, p, p.grad)
super().step()
def calc_grad(lr, W, J, eps=EPS):
Z = calc_z(W, eps)
G = gravitate_zero(lr, W)
return Z*G + (1-Z)*J
def gravitate_zero(lr, W):
G = torch.zeros_like(W)
A = W.abs()
I = A > 0
G[I] = lr/W[I]
J = (W-lr*G).abs() > A
G[J] = W[J]/lr
return G
def calc_p(x, eps=EPS):
x = x.abs()
return 1-x/(x.mean()+eps)
def calc_z(w, eps=EPS):
return torch.nn.functional.relu(calc_p(w, eps))
if __name__ == "__main__":
torch.manual_seed(10)
w_1 = torch.ones(10)*100-50
w_r = torch.rand(10)*100-50
w_s = torch.Tensor([1e-4, -1e-4, 1e-4, -1e-4, 1e-5, -1e-5, 1e-6, -1e-6, 1e-8, -1e-8])
w_w = 10**torch.arange(0, -10, -1).float()
w_0 = torch.zeros(10)
w_z = torch.Tensor([1e-4]*3+ [1e-8] + [0]*6)
w_u = torch.Tensor([1e2] + [1e-10]*9)
w_v = torch.Tensor([1] + [1e-10])
w_o = torch.Tensor([1])
w_x = torch.Tensor([1]*1 + [1e-4]*1)
def print_wz(w, fmt=".2f"):
z = calc_z(w)
buf = "%{}\t%.2f".format(fmt)
for a, b in zip(w, z):
print(buf % (a.item(), b.item()))
input("===")
#'''
print_wz(w_1)
print_wz(w_r)
print_wz(w_s, fmt=".0E")
print_wz(w_w, fmt=".0E")
print_wz(w_0)
print_wz(w_z, fmt=".0E")
print_wz(w_u, fmt=".0E")
print_wz(w_v, fmt=".0E")
print_wz(w_o)
#'''
print_wz(w_x, ".0E")
| 23.69863 | 89 | 0.509249 | 317 | 1,730 | 2.611987 | 0.264984 | 0.092995 | 0.10628 | 0.072464 | 0.095411 | 0.016908 | 0.016908 | 0 | 0 | 0 | 0 | 0.069579 | 0.285549 | 1,730 | 72 | 90 | 24.027778 | 0.600324 | 0.003468 | 0 | 0 | 0 | 0 | 0.028455 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.017857 | 0.017857 | 0.214286 | 0.214286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41f5bd3c227b6e90d9957fe3e9834571a6c5a926 | 2,010 | py | Python | python_lesson_4/python_lesson_4_homework_lightplus.py | cubecloud/simple_python | 2bc4ee1720214293dabfa5dbe661a49246c38842 | [
"MIT"
] | null | null | null | python_lesson_4/python_lesson_4_homework_lightplus.py | cubecloud/simple_python | 2bc4ee1720214293dabfa5dbe661a49246c38842 | [
"MIT"
] | 1 | 2020-04-24T10:19:24.000Z | 2020-04-24T10:19:24.000Z | python_lesson_4/python_lesson_4_homework_lightplus.py | cubecloud/simple_python | 2bc4ee1720214293dabfa5dbe661a49246c38842 | [
"MIT"
] | null | null | null | # задача
# В файле с логами найти дату самого позднего лога (по метке времени):
log_file_name = 'log'
# Вариант 1
# # открываем и читаем файл
with open(log_file_name, 'r', encoding='utf-8') as text_file:
max_date_str = ''
# Читаем строку и сравниваем
for line in text_file:
if line[:23] > max_date_str[:23]: max_date_str = line
# Выводим дату и время последнего лога
print("Вариант 1")
print(max_date_str)
# Вариант 2
# открываем и читаем файл
log_file_name = 'log'
# импортируем модуль re
import re
# Создаем словарь с ключами к листам
dict_data = {'Date_and_Time': [], 'Application': [], 'Type': [], 'Message': []}
with open(log_file_name, 'r', encoding='utf-8') as text_file:
for line in text_file:
# Делаем сплит строки регулярным выражением
log_split = re.split(r'\s[-]\s|\n', line)
i = 0
# Заполняем словарь по ключам данными
for key in dict_data.keys():
dict_data[key].append(log_split[i])
i += 1
# Получаем лист с датами по ключу
date_time_line = (dict_data['Date_and_Time'])
# Выводим дату и время последнего лога c помощью функции max
print("Вариант 2")
print (max(date_time_line))
print()
# Вариант 3
# импортируем модуль pandas
import pandas as pd
# заполняем переменную сериями обработанными функциями модуля
log_file = pd.read_csv(log_file_name, sep=' - ', names=['Date_and_Time', 'Application', 'Type', 'Message'],
engine='python')
print("Вариант 3")
print(log_file.sort_values('Date_and_Time', ascending=False).head(1))
print()
# Вариант 4
# импортируем модуль datetime as dt
import datetime as dt
log_dates = []
file = open(log_file_name, 'rb').readlines()
for line in file:
# заполняем лист датами обработанными функциями модуля
log_dates.append(dt.datetime.strptime(line.decode().split(' - ')[0], '%Y-%m-%d %H:%M:%S,%f'))
# Выводим дату и время последнего лога c помощью функции max
print("Вариант 4")
print(max([q for q in log_dates]))
| 31.904762 | 107 | 0.678109 | 303 | 2,010 | 4.336634 | 0.386139 | 0.042618 | 0.050228 | 0.034247 | 0.277017 | 0.230594 | 0.156773 | 0.156773 | 0.156773 | 0.156773 | 0 | 0.011187 | 0.199502 | 2,010 | 62 | 108 | 32.419355 | 0.805469 | 0.343284 | 0 | 0.235294 | 0 | 0 | 0.149576 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.088235 | 0 | 0.088235 | 0.294118 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41f650c872145facc783efbfb2b0dadcd4920f2a | 18,278 | py | Python | sappy/m4a.py | SomeShrug/SapPy | cee216bc5f89f0479748efdbeb75c4781d95b0f7 | [
"MIT"
] | 4 | 2018-04-21T15:43:50.000Z | 2018-07-10T17:11:31.000Z | sappy/m4a.py | SomeShrug/SapPy | cee216bc5f89f0479748efdbeb75c4781d95b0f7 | [
"MIT"
] | null | null | null | sappy/m4a.py | SomeShrug/SapPy | cee216bc5f89f0479748efdbeb75c4781d95b0f7 | [
"MIT"
] | 1 | 2018-04-08T03:00:06.000Z | 2018-04-08T03:00:06.000Z | # -*- coding: utf-8 -*-
"""Data-storage containers for internal use."""
import copy
import math
from collections import OrderedDict, deque
from enum import IntEnum
from random import random
from typing import Dict, List, NamedTuple, Union, Tuple, Deque
from .config import (BASE_FREQUENCY, PSG_SQUARE_FREQUENCY, PSG_SQUARE_VOLUME,
PSG_WAVEFORM_FREQUENCY, PSG_WAVEFORM_SIZE, SEMITONE_RATIO)
from .exceptions import InvalidArgument
from .fmod import (get_mute, set_frequency, set_mute, set_panning, set_volume)
from .inst_set import KeyArg, c_v, mxv
NOTES = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
class M4AVoiceMode(IntEnum):
DIRECTSOUND = 0x0
PSG_SQUARE1 = 0x1
PSG_SQUARE2 = 0x2
PSG_WAVE = 0x3
PSG_NOISE = 0x4
FIX_DSOUND = 0x8
KEY_ZONE = 0x40
PERCUSSION = 0x80
NULL = 0xFF
# region VOICE STRUCTS
class M4AVoice(object):
"""Voice base class."""
def __init__(self, mode: int, root: int, attack: int, decay: int,
sustain: int, release: int) -> None:
self._validate(mode, root)
self.mode: M4AVoiceMode = mode
self.root: KeyArg = root
self.envelope: SoundEnvelope = SoundEnvelope(attack, decay, sustain,
release)
self.fmod_handle = None
self.mode = M4AVoiceMode(self.mode)
self.root = KeyArg(self.root)
def __repr__(self):
return f'M4AVoice(mode=0x{self.mode:<X}, root={self.root}, ' \
f'envelope={self.envelope})'
@staticmethod
def _validate(mode, root) -> None:
try:
M4AVoiceMode(mode)
except ValueError:
raise InvalidArgument(mode, 'VOICE MODE')
try:
KeyArg(root)
except ValueError:
raise InvalidArgument(root, 'ROOT KEY')
class M4APSGVoice(M4AVoice):
"""PSG Voice base class."""
def __init__(self, mode: int, root: int, time_ctrl: int, attack: int,
decay: int, sustain: int, release: int) -> None:
attack = 255 - attack * 32
decay *= 32
sustain *= 16
release *= 32
super().__init__(mode, root, attack, decay, sustain, release)
self._validate(mode, root)
self.time_ctrl: int = time_ctrl
def __repr__(self):
return f'M4APSGVoice(mode=0x{self.mode:<X}, root={self.root}, ' \
f'time_ctrl={self.time_ctrl}, envelope={self.envelope})'
@staticmethod
def _validate(mode, root) -> None:
M4AVoice._validate(mode, root)
if mode in (0x0, 0x8):
raise InvalidArgument(mode, 'PSG MODE')
class M4ADirectSound(M4AVoice):
"""M4A DirectSound voice entry."""
def __init__(self, mode: int, root: int, panning: int, sample_ptr: int,
attack: int, decay: int, sustain: int, release: int) -> None:
super().__init__(mode, root, attack, decay, sustain, release)
self.fixed: bool = self.mode == M4AVoiceMode.FIX_DSOUND
self.panning: int = panning
self.sample_ptr: int = sample_ptr
class M4ASquare1(M4APSGVoice):
"""M4A PSG Square1 entry."""
def __init__(self, root: int, time_ctrl: int, sweep: int,
duty_cycle: int, attack: int, decay: int, sustain: int,
release: int) -> None:
super().__init__(M4AVoiceMode.PSG_SQUARE1, root, time_ctrl, attack,
decay, sustain, release)
self.sweep: int = sweep
self.duty_cycle: int = duty_cycle
self.sample_ptr: str = f'square{self.duty_cycle}'
def __repr__(self):
return f'M4ASquare1(root={self.root}, time_ctrl={self.time_ctrl}, ' \
f'sweep={self.sweep}, envelope={self.envelope})'
class M4ASquare2(M4APSGVoice):
"""M4A PSG Square2 entry."""
def __init__(self, root: int, time_ctrl: int, duty_cycle: int,
attack: int, decay: int, sustain: int, release: int) -> None:
super().__init__(M4AVoiceMode.PSG_SQUARE2, root, time_ctrl, attack,
decay, sustain, release)
self.duty_cycle: int = duty_cycle
self.sample_ptr: str = f'square{self.duty_cycle}'
class M4AWaveform(M4APSGVoice):
"""M4A PSG Waveform entry."""
def __init__(self, root: int, time_ctrl: int, sample_ptr: int, attack: int,
decay: int, sustain: int, release: int) -> None:
super().__init__(M4AVoiceMode.PSG_WAVE, root, time_ctrl, attack, decay,
sustain, release)
self.sample_ptr: int = sample_ptr
class M4ANoise(M4APSGVoice):
"""M4A PSG Noise entry."""
def __init__(self, root: int, time_ctrl: int, period: int, attack: int,
decay: int, sustain: int, release: int) -> None:
super().__init__(M4AVoiceMode.PSG_NOISE, root, time_ctrl, attack, decay,
sustain, release)
self.period: int = period
self.sample_ptr: str = f'noise{self.period}'
class M4ADrum(M4AVoice):
"""M4A Percussion voice entry."""
def __init__(self, voice_table: Dict) -> None:
"""Initialize every key-split instrument using track data."""
super().__init__(M4AVoiceMode.PERCUSSION, 0x0, 0x0, 0x0, 0x00, 0x0)
self.voice_table: Dict[int, M4AVoice] = voice_table
class M4AKeyZone(M4AVoice):
"""M4A Key-zone voice entry."""
def __init__(self, voice_table: Dict, keymap: Dict) -> None:
"""Initialize key-split instrument using track data."""
super().__init__(M4AVoiceMode.KEY_ZONE, 0x0, 0x0, 0x0, 0x00, 0x0)
self.voice_table: Dict[int, M4AVoice] = voice_table
self.keymap: Dict[int, int] = keymap
# endregion
# region SAMPLE STRUCTS
class M4ASample(object):
"""Sample base class."""
def __init__(self, looped: bool, frequency: int, loop_start: int,
sample_data: bytes) -> None:
self.looped = looped
self.frequency = frequency
self.loop_start = loop_start
self.sample_data = sample_data
self.fmod_handle = None
def __repr__(self):
return f'{self.__class__.__name__}(looped=0x{self.looped:X}, ' \
f'frequency=0x{self.frequency:X}, ' \
f'loop_start={self.loop_start}, size={self.size})'
@property
def size(self):
return len(self.sample_data)
class M4ADirectSoundSample(M4ASample):
"""PCM8 DirectSound sample."""
def __init__(self, looped: int, frequency: int, loop_start: int,
sample_data: bytes) -> None:
self._valid = self._is_valid(looped, loop_start, sample_data)
super().__init__(looped == 0x40, frequency // 1024, loop_start,
sample_data)
@staticmethod
def _is_valid(looped, loop_start, sample_data):
c_loop = looped in (0x0, 0x40)
c_loop_st = 0 <= loop_start <= len(sample_data)
return all((c_loop, c_loop_st))
def is_valid(self):
return self._valid
class M4ASquareSample(M4ASample):
"""PSG Square1/Square2 sample."""
VARIANCE = int(0x7F * PSG_SQUARE_VOLUME)
SQUARE_SIZE = 8
CYCLES = tuple(map(int, (SQUARE_SIZE * .125, SQUARE_SIZE * .25,
SQUARE_SIZE * .5, SQUARE_SIZE * .75)))
def __init__(self, duty_cycle: int):
self.duty_cycle = duty_cycle
data = self.square_wave(duty_cycle)
super().__init__(True, PSG_SQUARE_FREQUENCY, 0, data)
def __repr__(self):
return f'M4ASquareSample(duty_cycle={self.duty_cycle})'
@staticmethod
def square_wave(duty_cycle: int) -> bytes:
h_cycle = M4ASquareSample.CYCLES[duty_cycle]
l_cycle = M4ASquareSample.SQUARE_SIZE - h_cycle
high = h_cycle * [0x80 + M4ASquareSample.VARIANCE]
low = l_cycle * [0x80 - M4ASquareSample.VARIANCE]
wave = (high + low)
return bytes(wave)
class M4AWaveformSample(M4ASample):
"""PSG Programmable Waveform sample."""
def __init__(self, sample_data: bytes) -> None:
super().__init__(True, PSG_WAVEFORM_FREQUENCY, 0, sample_data)
@property
def is_looped(self) -> bool:
return True
@property
def size(self) -> int:
return PSG_WAVEFORM_SIZE
class M4ANoiseSample(M4ASample):
"""PSG Noise sample."""
VARIANCE = int(0x7F * PSG_SQUARE_VOLUME)
def __init__(self, period: int):
self.validate(period)
data = self.noise(period)
super().__init__(True, 7040, 0, data)
@staticmethod
def validate(period: int) -> None:
if not 0 <= period <= 1:
raise InvalidArgument(period, 'NOISE PERIOD')
@staticmethod
def noise(period: int) -> bytes:
"""Generate noise sample."""
if period == 0:
samples = 32767
elif period == 1:
samples = 127
else:
raise InvalidArgument(period, 'NOISE PERIOD')
high = 0x80 + M4ASquareSample.VARIANCE
low = 0x80 - M4ASquareSample.VARIANCE
noise_data = [high if random() > .5 else low for _ in range(samples)]
return bytes(noise_data)
# endregion
class SoundDriverMode(NamedTuple):
"""GBA SoundDriverMode call."""
reverb: int = 0
reverb_enabled: bool = False
polyphony: int = 8
volume_ind: int = 15
freq_ind: int = 4
dac_ind: int = 9
_DEFAULT = 0x0094F800
_FREQUENCY_TABLE = {
1: 5734,
2: 7884,
3: 10512,
4: 13379,
5: 15768,
6: 18157,
7: 21024,
8: 26758,
9: 31536,
10: 36314,
11: 40137,
12: 42048
}
_DAC_TABLE = {
8: 9,
9: 8,
10: 7,
11: 6
}
@property
def volume(self):
"""Return volume."""
return self.volume_ind * 17
@property
def frequency(self):
"""Return sample rate."""
return self._FREQUENCY_TABLE[self.freq_ind]
@property
def dac(self):
"""Return D/A converter bits."""
return self._DAC_TABLE[self.dac_ind]
class SoundEnvelope(object):
"""M4A ADSR sound envelope."""
ATTACK = 0
DECAY = 1
SUSTAIN = 2
RELEASE = 3
NOTE_OFF = 4
def __init__(self, attack: int, decay: int, sustain: int,
release: int) -> None:
"""Initialize envelope to M4AVoice ADSR settings."""
self.phase = self.ATTACK
self.attack = attack
self.decay = decay
self.sustain = sustain
self.release = release
self._rate = self.attack
self.env_pos = 0
def __repr__(self):
return f'SoundEnvelope({self.attack}, {self.decay}, {self.sustain}, ' \
f'{self.release})'
def note_off(self) -> None:
"""Switch to RELEASE phase on note-off."""
if self.phase >= self.RELEASE:
return
self.phase = self.RELEASE
self._rate = self.release / 256
def update(self) -> int:
"""Update sound envelope phase."""
if self.phase == self.ATTACK:
self.env_pos += self._rate
if self.env_pos >= 255:
self.phase = self.DECAY
self.env_pos = 255
self._rate = self.decay / 256
if self.phase == self.DECAY:
self.env_pos = int(self.env_pos * self._rate)
if self.env_pos <= self.sustain:
self.phase = self.SUSTAIN
self.env_pos = self.sustain
if self.phase == self.SUSTAIN:
pass
if self.phase == self.RELEASE:
self.env_pos = int(self.env_pos * self._rate)
if self.env_pos <= 0:
self.phase = self.NOTE_OFF
if self.phase == self.NOTE_OFF:
return -1
return self.env_pos
class MetaData(NamedTuple):
"""ROM/Track metadata."""
REGION = {
'J': 'JPN',
'E': 'USA',
'P': 'PAL',
'D': 'DEU',
'F': 'FRA',
'I': 'ITA',
'S': 'ESP'
}
rom_name: str = ...
rom_code: str = ...
tracks: int = ...
reverb: int = ...
priority: int = ...
main_ptr: int = ...
voice_ptr: int = ...
song_ptr: int = ...
unknown: int = ...
@property
def echo_enabled(self) -> bool:
"""Track reverb flag."""
return bin(self.reverb)[2:][0] == '1'
@property
def code(self) -> str:
"""ROM production code."""
return f'AGB-{self.rom_code}-{self.region}'
@property
def region(self) -> str:
"""ROM region code."""
return self.REGION.get(self.rom_code[3], 'UNK')
class FMODNote(object):
"""FMOD note."""
def __init__(self, ticks: int, midi_note: int, velocity: int,
voice: int) -> None:
"""Initialize note from track data."""
self.note_off: bool = False
self.voice: int = voice
self.midi_note: int = midi_note
self.velocity: int = velocity
self.ticks: int = ticks
self.lfo_pos: float = 0.0
self.frequency: int = 0
self.envelope: SoundEnvelope = ...
self.fmod_handle: int = 0
def __repr__(self):
return f'Note({self.midi_note}, {self.velocity}, {self.ticks}, ' \
f'{self.voice})'
__str__ = __repr__
# region PROPERTIES
@property
def volume(self) -> float:
"""Return volume of note."""
return self.velocity / 0x7F * self.envelope.env_pos / 0xFF
@property
def muted(self) -> bool:
"""Return mute state in FMOD."""
return get_mute(self.fmod_handle)
# endregion
def reset_mixer(self, voice: M4AVoice) -> None:
"""Install new voice envelope."""
self.envelope = copy.copy(voice.envelope)
def release(self) -> None:
"""Change note state to note-off."""
self.envelope.note_off()
self.note_off = True
def update(self) -> None:
"""Update note state."""
if self.ticks > 0:
self.ticks -= 1
if self.ticks == 0:
self.release()
def update_envelope(self) -> None:
"""Update sound envelope for this note."""
pos = self.envelope.update()
if pos == -1:
self.set_mute(True)
# region FMOD FUNCTIONS
def set_panning(self, panning: int) -> None:
set_panning(self.fmod_handle, panning)
def set_volume(self, volume: int) -> None:
set_volume(self.fmod_handle, volume)
def set_frequency(self, frequency: int) -> None:
set_frequency(self.fmod_handle, frequency)
def set_mute(self, state: bool) -> None:
set_mute(self.fmod_handle, state)
# endregion
class M4ASong(NamedTuple):
"""M4A song."""
tracks: List['M4ATrack'] = []
voices: Dict[int, M4AVoice] = {}
samples: Dict[Union[int, str], M4ASample] = {}
meta_data: 'MetaData' = MetaData()
sdm: SoundDriverMode = None
class M4ATrack(object):
"""M4A Track."""
NO_VOICE = -1
TEMPO = 75
KEY_SHIFT = 0
def __init__(self, track_data: OrderedDict):
"""Initialize blank track."""
self.enabled: bool = True
self.track_data: OrderedDict = track_data
self.cmd_addresses: Tuple[int] = tuple(track_data.keys())
self.commands: Tuple = tuple(track_data.values())
self.voices: Tuple[int] = ()
self.notes: List[FMODNote] = []
self.note_queue: Deque[FMODNote] = deque()
self.call_stack: Deque[int] = deque(maxlen=3)
self.type: M4AVoiceMode = M4AVoiceMode.NULL
self.voice: int = M4ATrack.NO_VOICE
self.key_shift: int = 0
self._volume: int = mxv
self._panning: int = c_v
self.pitch_bend: int = c_v
self.pitch_range: int = 2
self.mod: int = 0
self.lfo_speed: int = 0
self.lfo_pos: int = 0
self.ticks: int = 0
self.program_ctr: int = 0
self.return_ctr: int = 0
self.base_ctr: int = 0
self.in_patt: bool = False
self.out_vol: int = 0
# region PROPERTIES
@property
def volume(self) -> float:
return self._volume / 0x7F
@volume.setter
def volume(self, volume: int) -> None:
self._volume = volume
@property
def panning(self) -> int:
return self._panning * 2
@panning.setter
def panning(self, panning: int) -> None:
self._panning = panning
@property
def frequency(self) -> float:
pitch = (self.pitch_bend - c_v) / c_v * self.pitch_range
return math.pow(SEMITONE_RATIO, pitch)
# endregion
def update(self) -> None:
"""Execute M4A track commands and decrement wait counter."""
if not self.enabled:
return
if self.ticks > 0:
self.ticks -= 1
if self.ticks == 0:
self.base_ctr = self.program_ctr
while self.ticks == 0 and self.enabled:
cmd = self.commands[self.program_ctr]
cmd(self)
for note in self.notes:
note.update()
def update_envelope(self):
self.out_vol = 0
for note in self.notes[::]:
note.update_envelope()
if note.muted:
continue
volume = round(self.volume * note.volume * 255)
if self.type in (M4AVoiceMode.PSG_SQUARE1, M4AVoiceMode.PSG_SQUARE2,
M4AVoiceMode.PSG_NOISE):
volume = 15 * round(volume / 15)
self.out_vol = volume
note.set_volume(volume)
def note_name(midi_note: int) -> str:
"""Retrieve the string name of a MIDI note from its byte representation."""
octave, note = divmod(midi_note, 12)
octave -= 2
return f'{NOTES[note]}{"M" if octave < 0 else ""}{abs(octave)}'
def resample(midi_note: int, relative_c_freq: int = -1) -> int:
"""Retrieve the sound frequency in Hz of a MIDI note relative to C3."""
note = midi_note - KeyArg.Cn3
if relative_c_freq < 0:
base_freq = BASE_FREQUENCY // abs(relative_c_freq)
relative_c_freq = base_freq * math.pow(SEMITONE_RATIO, 3)
else:
relative_c_freq = relative_c_freq
freq = relative_c_freq * math.pow(SEMITONE_RATIO, note)
return int(freq)
| 28.875197 | 80 | 0.585239 | 2,210 | 18,278 | 4.638462 | 0.149774 | 0.011609 | 0.018242 | 0.013267 | 0.298605 | 0.230124 | 0.217052 | 0.185933 | 0.154131 | 0.099893 | 0 | 0.029489 | 0.294999 | 18,278 | 632 | 81 | 28.920886 | 0.766025 | 0.078236 | 0 | 0.185615 | 0 | 0 | 0.052055 | 0.030511 | 0 | 0 | 0.007522 | 0 | 0 | 1 | 0.141531 | false | 0.00232 | 0.023202 | 0.030162 | 0.392111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41f7aa5d337d6a6a04c73fadceef0f5775c6ce5a | 4,076 | py | Python | examples/manifold/plot_swissroll.py | jlopezNEU/scikit-learn | 593495eebc3c2f2ffdb244036adf57fab707a47d | [
"BSD-3-Clause"
] | 50,961 | 2015-01-01T06:06:31.000Z | 2022-03-31T23:40:12.000Z | examples/manifold/plot_swissroll.py | ashutoshpatelofficial/scikit-learn | 2fc9187879424556726d9345a6656884fa9fbc20 | [
"BSD-3-Clause"
] | 17,065 | 2015-01-01T02:01:58.000Z | 2022-03-31T23:48:34.000Z | examples/manifold/plot_swissroll.py | ashutoshpatelofficial/scikit-learn | 2fc9187879424556726d9345a6656884fa9fbc20 | [
"BSD-3-Clause"
] | 26,886 | 2015-01-01T00:59:27.000Z | 2022-03-31T18:03:23.000Z | """
===================================
Swiss Roll And Swiss-Hole Reduction
===================================
This notebook seeks to compare two popular non-linear dimensionality
techniques, T-distributed Stochastic Neighbor Embedding (t-SNE) and
Locally Linear Embedding (LLE), on the classic Swiss Roll dataset.
Then, we will explore how they both deal with the addition of a hole
in the data.
"""
# %%
# Swiss Roll
# ---------------------------------------------------
#
# We start by generating the Swiss Roll dataset.
import matplotlib.pyplot as plt
from sklearn import manifold, datasets
sr_points, sr_color = datasets.make_swiss_roll(n_samples=1500, random_state=0)
# %%
# Now, let's take a look at our data:
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection="3d")
fig.add_axes(ax)
ax.scatter(
sr_points[:, 0], sr_points[:, 1], sr_points[:, 2], c=sr_color, s=50, alpha=0.8
)
ax.set_title("Swiss Roll in Ambient Space")
ax.view_init(azim=-66, elev=12)
_ = ax.text2D(0.8, 0.05, s="n_samples=1500", transform=ax.transAxes)
# %%
# Computing the LLE and t-SNE embeddings, we find that LLE seems to unroll the
# Swiss Roll pretty effectively. t-SNE on the other hand, is able
# to preserve the general structure of the data, but, poorly represents the
# continous nature of our original data. Instead, it seems to unnecessarily
# clump sections of points together.
sr_lle, sr_err = manifold.locally_linear_embedding(
sr_points, n_neighbors=12, n_components=2
)
sr_tsne = manifold.TSNE(
n_components=2, learning_rate="auto", perplexity=40, init="pca", random_state=0
).fit_transform(sr_points)
fig, axs = plt.subplots(figsize=(8, 8), nrows=2)
axs[0].scatter(sr_lle[:, 0], sr_lle[:, 1], c=sr_color)
axs[0].set_title("LLE Embedding of Swiss Roll")
axs[1].scatter(sr_tsne[:, 0], sr_tsne[:, 1], c=sr_color)
_ = axs[1].set_title("t-SNE Embedding of Swiss Roll")
# %%
# .. note::
#
# LLE seems to be stretching the points from the center (purple)
# of the swiss roll. However, we observe that this is simply a byproduct
# of how the data was generated. There is a higher density of points near the
# center of the roll, which ultimately affects how LLE reconstructs the
# data in a lower dimension.
# %%
# Swiss-Hole
# ---------------------------------------------------
#
# Now let's take a look at how both algorithms deal with us adding a hole to
# the data. First, we generate the Swiss-Hole dataset and plot it:
sh_points, sh_color = datasets.make_swiss_roll(
n_samples=1500, hole=True, random_state=0
)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection="3d")
fig.add_axes(ax)
ax.scatter(
sh_points[:, 0], sh_points[:, 1], sh_points[:, 2], c=sh_color, s=50, alpha=0.8
)
ax.set_title("Swiss-Hole in Ambient Space")
ax.view_init(azim=-66, elev=12)
_ = ax.text2D(0.8, 0.05, s="n_samples=1500", transform=ax.transAxes)
# %%
# Computing the LLE and t-SNE embeddings, we obtain similar results to the
# Swiss Roll. LLE very capably unrolls the data and even preserves
# the hole. t-SNE, again seems to clump sections of points together, but, we
# note that it preserves the general topology of the original data.
sh_lle, sh_err = manifold.locally_linear_embedding(
sh_points, n_neighbors=12, n_components=2
)
sh_tsne = manifold.TSNE(
n_components=2, learning_rate="auto", perplexity=40, init="random", random_state=0
).fit_transform(sh_points)
fig, axs = plt.subplots(figsize=(8, 8), nrows=2)
axs[0].scatter(sh_lle[:, 0], sh_lle[:, 1], c=sh_color)
axs[0].set_title("LLE Embedding of Swiss-Hole")
axs[1].scatter(sh_tsne[:, 0], sh_tsne[:, 1], c=sh_color)
_ = axs[1].set_title("t-SNE Embedding of Swiss-Hole")
# %%
#
# Concluding remarks
# ------------------
#
# We note that t-SNE benefits from testing more combinations of parameters.
# Better results could probably have been obtained by better tuning these
# parameters.
#
# We observe that, as seen in the "Manifold learning on
# handwritten digits" example, t-SNE generally performs better than LLE
# on real world data.
| 33.966667 | 86 | 0.692345 | 658 | 4,076 | 4.173252 | 0.31459 | 0.03933 | 0.01748 | 0.016023 | 0.422433 | 0.353969 | 0.353969 | 0.319009 | 0.291333 | 0.265113 | 0 | 0.02876 | 0.146958 | 4,076 | 119 | 87 | 34.252101 | 0.761001 | 0.501472 | 0 | 0.304348 | 0 | 0 | 0.108531 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.043478 | 0 | 0.043478 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41f7f839e3be35c24720ab38c662be95d99e2886 | 2,044 | py | Python | INF101/TP/TP9/2.9.3.py | Marshellson/UGA_IMF | eb293deabcc5ef6e45617d8c5bb6268b63b34f21 | [
"MIT"
] | 1 | 2021-09-21T21:53:17.000Z | 2021-09-21T21:53:17.000Z | INF101/TP/TP9/2.9.3.py | Marshellson/UGA_INF | eb293deabcc5ef6e45617d8c5bb6268b63b34f21 | [
"MIT"
] | null | null | null | INF101/TP/TP9/2.9.3.py | Marshellson/UGA_INF | eb293deabcc5ef6e45617d8c5bb6268b63b34f21 | [
"MIT"
] | null | null | null | '''
Author: JIANG Yilun
Date: 2021-12-01 13:01:29
LastEditTime: 2021-12-01 13:30:06
LastEditors: JIANG Yilun
Description:
FilePath: /INF_101/INF101/TP/TP9/2.9.3.py
'''
import random
def initiale()->dict:
nombre_de_personnes = int(input("Entrez le nombre de personnes: "))
dict_personnes = {}
for i in range(nombre_de_personnes):
nom = input("Entrez le nom de la personne: ")
dict_personnes[nom] = 0
return dict_personnes
def traduire(dictio: dict, mot: str) -> str:
for k, v in dictio.items():
if mot == k:
return v
def jouerUnMot(dictio: dict) -> bool:
list_mot_francais = []
for k, v in dictio.items():
list_mot_francais.append(k)
hasard_choisir = random.choice(list_mot_francais)
mot_anglais_saissir = input("Entrez un mot anglais: ")
if dictio[hasard_choisir] == mot_anglais_saissir:
print("Bravo!")
return True
else:
print("Dommage!")
return False
dictionnaire = {'pomme': 'apple', 'orange': 'orange', 'banane': 'banana'}
nombre_round = int(input("Entrez le nombre de round: "))
dict_personnes = initiale()
for i in range(nombre_round):
for nom in dict_personnes:
print("cest le %s round du %s" % (i+1, nom))
if jouerUnMot(dictionnaire):
dict_personnes[nom] += 1
dict_pourcentage = {}
list_nom_ranking = []
list_score_ranking = []
for nom, score in dict_personnes.items():
dict_pourcentage[nom] = (score / nombre_round) * 100
list_nom_ranking.append(nom)
list_score_ranking.append(score)
for i in range(len(list_nom_ranking)):
for j in range(i+1, len(list_nom_ranking)):
if list_score_ranking[i] < list_score_ranking[j]:
list_score_ranking[i], list_score_ranking[j] = list_score_ranking[j], list_score_ranking[i]
list_nom_ranking[i], list_nom_ranking[j] = list_nom_ranking[j], list_nom_ranking[i]
for i in range(len(list_nom_ranking)):
print("{}: {}" % (list_nom_ranking[i], dict_pourcentage[list_nom_ranking[i]]), "%") | 29.623188 | 103 | 0.666341 | 295 | 2,044 | 4.386441 | 0.294915 | 0.059505 | 0.119011 | 0.034003 | 0.303709 | 0.224111 | 0.159196 | 0.125193 | 0.081917 | 0.064915 | 0 | 0.027692 | 0.20499 | 2,044 | 69 | 104 | 29.623188 | 0.768615 | 0.078278 | 0 | 0.086957 | 0 | 0 | 0.10016 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065217 | false | 0 | 0.021739 | 0 | 0.173913 | 0.086957 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41f845926aa3ec217a14d9100d2e6f115eb277d1 | 322 | py | Python | HLTriggerOffline/Exotica/python/analyses/hltExoticaLowPtTrimuon_cff.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:24:46.000Z | 2021-11-30T16:24:46.000Z | HLTriggerOffline/Exotica/python/analyses/hltExoticaLowPtTrimuon_cff.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 4 | 2021-11-29T13:57:56.000Z | 2022-03-29T06:28:36.000Z | HLTriggerOffline/Exotica/python/analyses/hltExoticaLowPtTrimuon_cff.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-23T09:25:45.000Z | 2021-11-23T09:25:45.000Z | import FWCore.ParameterSet.Config as cms
LowPtTrimuonPSet = cms.PSet(
hltPathsToCheck = cms.vstring(
),
recMuonLabel = cms.InputTag("muons"),
# -- Analysis specific cuts
minCandidates = cms.uint32(3),
# -- Analysis specific binnings
parametersDxy = cms.vdouble(50, -2.500, 2.500),
)
| 26.833333 | 56 | 0.65528 | 34 | 322 | 6.205882 | 0.735294 | 0.151659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052 | 0.223602 | 322 | 11 | 57 | 29.272727 | 0.792 | 0.170807 | 0 | 0 | 0 | 0 | 0.018939 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41fa41d8007e097936f791c465cb628fb82b64ed | 3,021 | py | Python | see/test/hooks_manager_test.py | nethunterslabs/see | da9387950d5db7c30ad8a5d1ba12e884afe8b1bb | [
"Apache-2.0"
] | null | null | null | see/test/hooks_manager_test.py | nethunterslabs/see | da9387950d5db7c30ad8a5d1ba12e884afe8b1bb | [
"Apache-2.0"
] | null | null | null | see/test/hooks_manager_test.py | nethunterslabs/see | da9387950d5db7c30ad8a5d1ba12e884afe8b1bb | [
"Apache-2.0"
] | null | null | null | import copy
import mock
import unittest
from see import Hook
from see import hooks
CONFIG = {
"configuration": {"key": "value"},
"hooks": [
{
"name": "see.test.hooks_manager_test.TestHook",
"configuration": {"foo": "bar"},
},
{"name": "see.test.hooks_manager_test.TestHookCleanup"},
],
}
class TestHook(Hook):
def __init__(self, parameters):
super(TestHook, self).__init__(parameters)
self.cleaned = False
class TestHookCleanup(Hook):
def __init__(self, parameters):
super(TestHookCleanup, self).__init__(parameters)
self.cleaned = False
def cleanup(self):
self.cleaned = True
class HookManagerLoadTest(unittest.TestCase):
def setUp(self):
self.hook_manager = hooks.HookManager("foo", copy.deepcopy(CONFIG))
def test_load_hooks(self):
"""TestHook is loaded into HookManager."""
context = mock.MagicMock()
self.hook_manager.load_hooks(context)
self.assertEqual(self.hook_manager.hooks[0].__class__.__name__, "TestHook")
def test_load_hooks_configuration(self):
"""Generic configuration are available in TestHook."""
context = mock.MagicMock()
self.hook_manager.load_hooks(context)
self.assertTrue("key" in self.hook_manager.hooks[0].configuration)
def test_load_hooks_specific_configuration(self):
"""Specific configuration are available in TestHook."""
context = mock.MagicMock()
self.hook_manager.load_hooks(context)
self.assertTrue("foo" in self.hook_manager.hooks[0].configuration)
def test_load_non_existing_hook(self):
"""Wrong Hooks are not loaded."""
context = mock.MagicMock()
config = copy.deepcopy(CONFIG)
config["hooks"][0]["name"] = "foo"
config["hooks"][1]["name"] = "bar"
hm = hooks.HookManager("foo", config)
hm.load_hooks(context)
self.assertEqual(len(hm.hooks), 0)
def test_load_missing_name(self):
"""Wrong Hooks are not loaded."""
context = mock.MagicMock()
config = copy.deepcopy(CONFIG)
del config["hooks"][0]["name"]
hm = hooks.HookManager("foo", config)
hm.load_hooks(context)
self.assertEqual(len(hm.hooks), 1)
class HooksManagerCleanupTest(unittest.TestCase):
def setUp(self):
self.hook_manager = hooks.HookManager("foo", copy.deepcopy(CONFIG))
def test_cleanup(self):
"""Cleanup is performed if specified."""
context = mock.MagicMock()
self.hook_manager.load_hooks(context)
hook = self.hook_manager.hooks[1]
self.hook_manager.cleanup()
self.assertTrue(hook.cleaned)
def test_no_cleanup(self):
"""Cleanup is not performed if not specified."""
context = mock.MagicMock()
self.hook_manager.load_hooks(context)
hook = self.hook_manager.hooks[0]
self.hook_manager.cleanup()
self.assertFalse(hook.cleaned)
| 31.14433 | 83 | 0.64482 | 345 | 3,021 | 5.449275 | 0.182609 | 0.059574 | 0.111702 | 0.074468 | 0.658511 | 0.613298 | 0.516489 | 0.516489 | 0.516489 | 0.516489 | 0 | 0.004297 | 0.229725 | 3,021 | 96 | 84 | 31.46875 | 0.80361 | 0.089043 | 0 | 0.4 | 0 | 0 | 0.070324 | 0.029087 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.171429 | false | 0 | 0.071429 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41fa5f1eb0cd72600e9f03de9938096cc411d598 | 538 | py | Python | api/works/migrations/0004_auto_20181201_2130.py | urbrob/hackathon-2018 | 3261b176304c7fb1a4ed97a3e7a1ac10b1d0608f | [
"MIT"
] | null | null | null | api/works/migrations/0004_auto_20181201_2130.py | urbrob/hackathon-2018 | 3261b176304c7fb1a4ed97a3e7a1ac10b1d0608f | [
"MIT"
] | 12 | 2018-12-01T12:41:36.000Z | 2022-03-11T23:35:55.000Z | api/works/migrations/0004_auto_20181201_2130.py | urbrob/hackathon-2018 | 3261b176304c7fb1a4ed97a3e7a1ac10b1d0608f | [
"MIT"
] | 1 | 2018-12-01T16:41:13.000Z | 2018-12-01T16:41:13.000Z | # Generated by Django 2.0.2 on 2018-12-01 21:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('works', '0003_auto_20181201_2104'),
]
operations = [
migrations.RemoveField(
model_name='test',
name='file',
),
migrations.AddField(
model_name='report',
name='file',
field=models.FileField(default='test', upload_to='reports/'),
preserve_default=False,
),
]
| 22.416667 | 73 | 0.565056 | 54 | 538 | 5.5 | 0.740741 | 0.060606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.084011 | 0.314126 | 538 | 23 | 74 | 23.391304 | 0.720867 | 0.083643 | 0 | 0.235294 | 1 | 0 | 0.118126 | 0.046843 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.058824 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
41fab0cdfc218549b2a3694e2626d8da1755a58e | 10,467 | py | Python | massloss_glacier2latlongrid.py | Wang518hongyu/PyGEM | 1c9fa133133b3d463b1383d4792c535fa61c5b8d | [
"MIT"
] | 25 | 2019-06-12T21:08:24.000Z | 2022-03-01T08:05:14.000Z | massloss_glacier2latlongrid.py | Wang518hongyu/PyGEM | 1c9fa133133b3d463b1383d4792c535fa61c5b8d | [
"MIT"
] | 2 | 2020-04-23T14:08:00.000Z | 2020-06-04T13:52:44.000Z | massloss_glacier2latlongrid.py | Wang518hongyu/PyGEM | 1c9fa133133b3d463b1383d4792c535fa61c5b8d | [
"MIT"
] | 24 | 2019-06-12T19:48:40.000Z | 2022-02-16T03:42:53.000Z | """ Analyze MCMC output - chain length, etc. """
# Built-in libraries
from collections import OrderedDict
import datetime
import glob
import os
import pickle
# External libraries
import cartopy
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.pyplot import MaxNLocator
from matplotlib.lines import Line2D
import matplotlib.patches as mpatches
from matplotlib.ticker import MultipleLocator
from matplotlib.ticker import EngFormatter
from matplotlib.ticker import StrMethodFormatter
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
import numpy as np
import pandas as pd
from scipy.stats import linregress
from scipy.ndimage import uniform_filter
import scipy
#from scipy import stats
#from scipy.stats.kde import gaussian_kde
#from scipy.stats import norm
#from scipy.stats import truncnorm
#from scipy.stats import uniform
#from scipy.stats import linregress
#from scipy.stats import lognorm
#from scipy.optimize import minimize
import xarray as xr
# Local libraries
import class_climate
import class_mbdata
import pygem.pygem_input as pygem_prms
import pygemfxns_gcmbiasadj as gcmbiasadj
import pygemfxns_massbalance as massbalance
import pygemfxns_modelsetup as modelsetup
import run_calibration as calibration
option_mass_bydeg = 1
#%% ===== Input data =====
netcdf_fp_cmip5 = '/Volumes/LaCie/HMA_PyGEM/2019_0914/'
regions = [13, 14, 15]
# GCMs and RCP scenarios
#gcm_names = ['bcc-csm1-1', 'CanESM2', 'CESM1-CAM5', 'CCSM4', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 'FGOALS-g2', 'GFDL-CM3',
# 'GFDL-ESM2G', 'GFDL-ESM2M', 'GISS-E2-R', 'HadGEM2-ES', 'IPSL-CM5A-LR', 'IPSL-CM5A-MR', 'MIROC-ESM',
# 'MIROC-ESM-CHEM', 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MRI-CGCM3', 'NorESM1-M', 'NorESM1-ME']
rcps = ['rcp26', 'rcp45', 'rcp60', 'rcp85']
# Grouping
grouping = 'degree'
degree_size = 0.1
#%% ===== FUNCTIONS =====
def pickle_data(fn, data):
"""Pickle data
Parameters
----------
fn : str
filename including filepath
data : list, etc.
data to be pickled
Returns
-------
.pkl file
saves .pkl file of the data
"""
with open(fn, 'wb') as f:
pickle.dump(data, f)
def select_groups(grouping, main_glac_rgi_all):
"""
Select groups based on grouping
"""
if grouping == 'degree':
groups = main_glac_rgi_all.deg_id.unique().tolist()
group_cn = 'deg_id'
try:
groups = sorted(groups, key=str.lower)
except:
groups = sorted(groups)
return groups, group_cn
def load_glacier_data(glac_no=None, rgi_regionsO1=None, rgi_regionsO2='all', rgi_glac_number='all',
load_caldata=0, startyear=2000, endyear=2018, option_wateryear=3):
"""
Load glacier data (main_glac_rgi, hyps, and ice thickness)
"""
# Load glaciers
main_glac_rgi_all = modelsetup.selectglaciersrgitable(
rgi_regionsO1=rgi_regionsO1, rgi_regionsO2 =rgi_regionsO2, rgi_glac_number=rgi_glac_number,
glac_no=glac_no)
# Glacier hypsometry [km**2], total area
main_glac_hyps_all = modelsetup.import_Husstable(main_glac_rgi_all, pygem_prms.hyps_filepath, pygem_prms.hyps_filedict,
pygem_prms.hyps_colsdrop)
# Ice thickness [m], average
main_glac_icethickness_all = modelsetup.import_Husstable(main_glac_rgi_all, pygem_prms.thickness_filepath,
pygem_prms.thickness_filedict, pygem_prms.thickness_colsdrop)
# Additional processing
main_glac_hyps_all[main_glac_icethickness_all == 0] = 0
main_glac_hyps_all = main_glac_hyps_all.fillna(0)
main_glac_icethickness_all = main_glac_icethickness_all.fillna(0)
# Add degree groups to main_glac_rgi_all
# Degrees
main_glac_rgi_all['CenLon_round'] = np.floor(main_glac_rgi_all.CenLon.values/degree_size) * degree_size
main_glac_rgi_all['CenLat_round'] = np.floor(main_glac_rgi_all.CenLat.values/degree_size) * degree_size
deg_groups = main_glac_rgi_all.groupby(['CenLon_round', 'CenLat_round']).size().index.values.tolist()
deg_dict = dict(zip(deg_groups, np.arange(0,len(deg_groups))))
main_glac_rgi_all.reset_index(drop=True, inplace=True)
cenlon_cenlat = [(main_glac_rgi_all.loc[x,'CenLon_round'], main_glac_rgi_all.loc[x,'CenLat_round'])
for x in range(len(main_glac_rgi_all))]
main_glac_rgi_all['CenLon_CenLat'] = cenlon_cenlat
main_glac_rgi_all['deg_id'] = main_glac_rgi_all.CenLon_CenLat.map(deg_dict)
if load_caldata == 1:
cal_datasets = ['shean']
startyear=2000
dates_table = modelsetup.datesmodelrun(startyear=startyear, endyear=endyear, spinupyears=0,
option_wateryear=option_wateryear)
# Calibration data
cal_data_all = pd.DataFrame()
for dataset in cal_datasets:
cal_subset = class_mbdata.MBData(name=dataset)
cal_subset_data = cal_subset.retrieve_mb(main_glac_rgi_all, main_glac_hyps_all, dates_table)
cal_data_all = cal_data_all.append(cal_subset_data, ignore_index=True)
cal_data_all = cal_data_all.sort_values(['glacno', 't1_idx'])
cal_data_all.reset_index(drop=True, inplace=True)
if load_caldata == 0:
return main_glac_rgi_all, main_glac_hyps_all, main_glac_icethickness_all
else:
return main_glac_rgi_all, main_glac_hyps_all, main_glac_icethickness_all, cal_data_all
#%%
# ===== Time series of glacier mass grouped by degree ======
if option_mass_bydeg == 1:
startyear = 2000
endyear = 2100
# Load glaciers
main_glac_rgi, main_glac_hyps, main_glac_icethickness = load_glacier_data(rgi_regionsO1=regions)
# Groups
groups, group_cn = select_groups(grouping, main_glac_rgi)
#%%
# Glacier and grouped annual specific mass balance and mass change
ds_multi = {}
for rcp in rcps:
# for rcp in ['rcp85']:
for region in regions:
# Load datasets
ds_fn = 'R' + str(region) + '_multimodel_' + rcp + '_c2_ba1_100sets_2000_2100.nc'
print(ds_fn)
ds = xr.open_dataset(netcdf_fp_cmip5 + ds_fn)
df = pd.DataFrame(ds.glacier_table.values, columns=ds.glac_attrs)
df['RGIId'] = ['RGI60-' + str(int(df.O1Region.values[x])) + '.' +
str(int(df.glacno.values[x])).zfill(5) for x in df.index.values]
# Extract time variable
time_values_annual = ds.coords['year_plus1'].values
time_values_monthly = ds.coords['time'].values
# Convert mass balance to monthly mass change
mb_monthly = ds['massbaltotal_glac_monthly'].values[:,:,0]
area_annual = ds.area_glac_annual[:,:,0].values
area_monthly = area_annual[:,0:-1].repeat(12,axis=1)
masschg_monthly_Gt_raw = mb_monthly / 1000 * area_monthly
masschg_annual_Gt_raw = (masschg_monthly_Gt_raw.reshape(-1,12).sum(1)
.reshape(masschg_monthly_Gt_raw.shape[0], int(masschg_monthly_Gt_raw.shape[1]/12)))
vol_annual_Gt = ds['volume_glac_annual'].values[:,:,0] * pygem_prms.density_ice / pygem_prms.density_water
volchg_annual_Gt = vol_annual_Gt[:,1:] - vol_annual_Gt[:,0:-1]
masschg_adjustment = masschg_annual_Gt_raw[:,0] / volchg_annual_Gt[:,0]
# Correction factor to ensure propagation of mean mass balance * area doesn't cause different annual volume
# change compared to the mean annual volume change
correction_factor_annual = np.zeros(volchg_annual_Gt.shape)
correction_factor_annual[np.nonzero(volchg_annual_Gt)] = (
volchg_annual_Gt[np.nonzero(volchg_annual_Gt)] / masschg_annual_Gt_raw[np.nonzero(volchg_annual_Gt)]
)
correction_factor_monthly = correction_factor_annual.repeat(12,axis=1)
masschg_monthly_Gt = masschg_monthly_Gt_raw * correction_factor_monthly
masschg_monthly_Gt_cumsum = np.cumsum(masschg_monthly_Gt, axis=1)
mass_monthly_Gt = vol_annual_Gt[:,0][:,np.newaxis] + masschg_monthly_Gt_cumsum
mass_monthly_Gt[mass_monthly_Gt < 0] = 0
if region == regions[0]:
ds_multi[rcp] = mass_monthly_Gt
df_all = df
else:
ds_multi[rcp] = np.concatenate((ds_multi[rcp], mass_monthly_Gt), axis=0)
df_all = pd.concat([df_all, df], axis=0)
ds.close()
# Remove RGIIds from main_glac_rgi that are not in the model runs
rgiid_df = list(df_all.RGIId.values)
rgiid_all = list(main_glac_rgi.RGIId.values)
rgi_idx = [rgiid_all.index(x) for x in rgiid_df]
main_glac_rgi = main_glac_rgi.loc[rgi_idx,:]
main_glac_rgi.reset_index(inplace=True, drop=True)
deg_dict = dict(zip(main_glac_rgi['deg_id'].values, main_glac_rgi['CenLon_CenLat']))
ds_deg = {}
for rcp in rcps:
# for rcp in ['rcp85']:
ds_deg[rcp] = {}
deg_groups_ordered = []
mass_deg_output = pd.DataFrame(np.zeros((len(deg_dict), mass_monthly_Gt.shape[1])),
columns=time_values_monthly)
for ngroup, group in enumerate(groups):
deg_group_rounded = (np.round(deg_dict[group][0],1), np.round(deg_dict[group][1],1))
deg_groups_ordered.append(deg_group_rounded)
if ngroup%500 == 0:
print(group, deg_group_rounded)
# Sum volume change for group
group_glac_indices = main_glac_rgi.loc[main_glac_rgi[group_cn] == group].index.values.tolist()
vn_group = ds_multi[rcp][group_glac_indices,:].sum(axis=0)
mass_deg_output.loc[ngroup, :] = vn_group
mass_deg_output.index = deg_groups_ordered
mass_deg_output_fn = (('mass_Gt_monthly_' + rcp + '_' + str(np.round(degree_size,2)) + 'deg').replace('.','p')
+ '.csv')
mass_deg_output.to_csv(pygem_prms.output_filepath + mass_deg_output_fn)
| 41.868 | 123 | 0.651572 | 1,384 | 10,467 | 4.612717 | 0.24711 | 0.06015 | 0.056861 | 0.046053 | 0.218358 | 0.160244 | 0.091949 | 0.052475 | 0.034148 | 0.034148 | 0 | 0.020214 | 0.248495 | 10,467 | 249 | 124 | 42.036145 | 0.791381 | 0.166619 | 0 | 0.040541 | 0 | 0 | 0.040051 | 0.010246 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02027 | false | 0 | 0.202703 | 0 | 0.243243 | 0.013514 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
41fc333b33ca8b6f1db8177902d0d972f89bd8ec | 3,233 | py | Python | corehq/apps/auditcare/migrations/0003_truncatechars.py | akashkj/commcare-hq | b00a62336ec26cea1477dfb8c048c548cc462831 | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/apps/auditcare/migrations/0003_truncatechars.py | akashkj/commcare-hq | b00a62336ec26cea1477dfb8c048c548cc462831 | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/apps/auditcare/migrations/0003_truncatechars.py | akashkj/commcare-hq | b00a62336ec26cea1477dfb8c048c548cc462831 | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | # Generated by Django 2.2.16 on 2021-04-01 20:23
import corehq.util.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auditcare', '0002_uniques'),
]
operations = [
migrations.AlterField(
model_name='accessaudit',
name='path',
field=corehq.util.models.TruncatingCharField(blank=True, default='', max_length=255),
),
migrations.AlterField(
model_name='navigationeventaudit',
name='params',
field=corehq.util.models.TruncatingCharField(blank=True, default='', max_length=4096),
),
migrations.AlterField(
model_name='navigationeventaudit',
name='path',
field=corehq.util.models.TruncatingCharField(blank=True, default='', max_length=255),
),
migrations.RunSQL("""
ALTER INDEX auditcare_accessaudit_user_event_date_c06ba8d2_idx
RENAME TO auditcare_a_user_58e671_idx;
ALTER INDEX auditcare_accessaudit_domain_event_date_81983947_idx
RENAME TO auditcare_a_domain_5a6b57_idx;
ALTER INDEX auditcare_navigationeventaudit_user_event_date_6dff2cf3_idx
RENAME TO auditcare_n_user_d3ff7f_idx;
ALTER INDEX auditcare_navigationeventaudit_domain_event_date_0808ba14_idx
RENAME TO auditcare_n_domain_c90cfb_idx;
""", reverse_sql="""
ALTER INDEX auditcare_a_user_58e671_idx
RENAME TO auditcare_accessaudit_user_event_date_c06ba8d2_idx;
ALTER INDEX auditcare_a_domain_5a6b57_idx
RENAME TO auditcare_accessaudit_domain_event_date_81983947_idx;
ALTER INDEX auditcare_n_user_d3ff7f_idx
RENAME TO auditcare_navigationeventaudit_user_event_date_6dff2cf3_idx;
ALTER INDEX auditcare_n_domain_c90cfb_idx
RENAME TO auditcare_navigationeventaudit_domain_event_date_0808ba14_idx;
"""),
migrations.SeparateDatabaseAndState(
database_operations=[],
state_operations=[
migrations.AlterIndexTogether(name='accessaudit', index_together=set()),
migrations.AlterIndexTogether(name='navigationeventaudit', index_together=set()),
migrations.AddIndex(
model_name='accessaudit',
index=models.Index(fields=['user', 'event_date'], name="auditcare_a_user_58e671_idx"),
),
migrations.AddIndex(
model_name='accessaudit',
index=models.Index(fields=['domain', 'event_date'], name="auditcare_a_domain_5a6b57_idx"),
),
migrations.AddIndex(
model_name='navigationeventaudit',
index=models.Index(fields=['user', 'event_date'], name="auditcare_n_user_d3ff7f_idx"),
),
migrations.AddIndex(
model_name='navigationeventaudit',
index=models.Index(fields=['domain', 'event_date'], name="auditcare_n_domain_c90cfb_idx"),
),
],
),
]
| 44.902778 | 110 | 0.62759 | 309 | 3,233 | 6.190939 | 0.229773 | 0.056456 | 0.079456 | 0.083638 | 0.756404 | 0.573445 | 0.525353 | 0.318348 | 0.318348 | 0.199164 | 0 | 0.048366 | 0.290133 | 3,233 | 71 | 111 | 45.535211 | 0.785185 | 0.014228 | 0 | 0.4 | 1 | 0 | 0.458713 | 0.24741 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.030769 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5100323735b98828e50f67b34742d19a272551ac | 6,781 | py | Python | tests/infrastructures/repositories/test_user_repository.py | harokki/fastapi-authenticated | baca823d3489739843b4df68f57fa785da7bf50d | [
"MIT"
] | null | null | null | tests/infrastructures/repositories/test_user_repository.py | harokki/fastapi-authenticated | baca823d3489739843b4df68f57fa785da7bf50d | [
"MIT"
] | null | null | null | tests/infrastructures/repositories/test_user_repository.py | harokki/fastapi-authenticated | baca823d3489739843b4df68f57fa785da7bf50d | [
"MIT"
] | null | null | null | from app.domains.entities.role import Role
from app.domains.entities.user import User
from app.domains.entities.user_role import UserRole
from app.schemas.user import UserCreateSchema
from tests.conftest import DbFixtureType
SQLALCEMY_DATABASE_URL = "sqlite:///./test.db"
def test_find_by_user_id(db: DbFixtureType):
repositories, session_factory = db
user_repository = repositories.user_repository
user = User(
username="john",
email="john@example.com",
account_name="ジョン",
hashed_password="aaaaa",
created_by="john",
)
with session_factory() as session:
session.add(user)
session.commit()
session.refresh(user)
got_user = user_repository.find_by_username("john")
assert got_user.username == "john"
assert got_user.email == "john@example.com"
assert got_user.account_name == "ジョン"
assert got_user.is_active is True
assert got_user.hashed_password == "aaaaa"
assert got_user.created_by == "john"
assert got_user.created_at
assert got_user.updated_by == "john"
assert got_user.updated_at
def test_find_by_email(db: DbFixtureType):
repositories, session_factory = db
user_repository = repositories.user_repository
user = User(
username="john",
email="john@example.com",
account_name="ジョン",
hashed_password="aaaaa",
created_by="john",
)
with session_factory() as session:
session.add(user)
session.commit()
session.refresh(user)
got_user = user_repository.find_by_email("john@example.com")
assert got_user.username == "john"
assert got_user.email == "john@example.com"
assert got_user.account_name == "ジョン"
assert got_user.is_active is True
assert got_user.hashed_password == "aaaaa"
assert got_user.created_by == "john"
assert got_user.created_at
assert got_user.updated_by == "john"
assert got_user.updated_at
def test_get_user_role(db: DbFixtureType):
repositories, session_factory = db
user_repository = repositories.user_repository
user = User(
username="john",
email="john@example.com",
account_name="ジョン",
hashed_password="aaaaa",
created_by="john",
)
role = Role("Admin", "")
super_role = Role("Super Admin", "")
user_role = UserRole(user.username, role.id)
user_role_2 = UserRole(user.username, super_role.id)
with session_factory() as session:
session.add(user)
session.add(role)
session.add(super_role)
session.add(user_role)
session.add(user_role_2)
session.commit()
session.refresh(user)
session.refresh(role)
session.refresh(super_role)
session.refresh(user_role)
session.refresh(user_role_2)
got_user = user_repository.find_by_username("john")
assert len(got_user.user_role) == 2
assert sorted(got_user.get_role_names()) == sorted(["Admin", "Super Admin"])
def test_create_user(db: DbFixtureType):
data = {
"username": "emma",
"email": "emma@example.com",
"account_name": "エマ",
"is_active": "True",
"password": "plainplain",
"created_by": "john",
}
user = UserCreateSchema(**data)
repositories, _ = db
user_repository = repositories.user_repository
user_repository.create_user(user)
got_user = user_repository.find_by_username("emma")
assert got_user.username == "emma"
assert got_user.email == "emma@example.com"
assert got_user.account_name == "エマ"
assert got_user.is_active is True
assert got_user.hashed_password != "plainplain"
assert got_user.created_by == "john"
assert got_user.created_at
assert got_user.updated_by == "john"
assert got_user.updated_at
def test_get_users(db: DbFixtureType):
repositories, session_factory = db
user_repository = repositories.user_repository
user1 = User(
username="john",
email="john@example.com",
account_name="ジョン",
hashed_password="plainplain",
created_by="john",
)
user2 = User(
username="anny",
email="anny@example.com",
account_name="アニー",
hashed_password="plainplain",
created_by="john",
)
with session_factory() as session:
session.add(user1)
session.add(user2)
session.commit()
session.refresh(user1)
session.refresh(user2)
got_user = user_repository.get_users()
got_user1 = got_user[0]
got_user2 = got_user[1]
assert len(got_user) == 2
assert got_user1.username == "john"
assert got_user1.email == "john@example.com"
assert got_user1.account_name == "ジョン"
assert got_user1.is_active is True
assert got_user1.hashed_password == "plainplain"
assert got_user1.created_by == "john"
assert got_user1.created_at
assert got_user1.updated_by == "john"
assert got_user1.updated_at
assert got_user2.username == "anny"
assert got_user2.email == "anny@example.com"
assert got_user2.account_name == "アニー"
assert got_user2.is_active is True
assert got_user2.hashed_password == "plainplain"
assert got_user2.created_by == "john"
assert got_user2.created_at
assert got_user2.updated_by == "john"
assert got_user2.updated_at
def test_get_users_with_skip_limit(db: DbFixtureType):
repositories, session_factory = db
user_repository = repositories.user_repository
user1 = User(
username="john",
email="john@example.com",
account_name="ジョン",
hashed_password="plainplain",
created_by="john",
)
user2 = User(
username="anny",
email="anny@example.com",
account_name="アニー",
hashed_password="plainplain",
created_by="john",
)
user3 = User(
username="James",
email="james@example.com",
account_name="ジェームズ",
hashed_password="plainplain",
created_by="john",
)
with session_factory() as session:
session.add(user1)
session.add(user2)
session.add(user3)
session.commit()
session.refresh(user1)
session.refresh(user2)
session.refresh(user3)
got_user = user_repository.get_users(skip=1, limit=1)
got_user1 = got_user[0]
assert len(got_user) == 1
assert got_user1.username == "anny"
assert got_user1.email == "anny@example.com"
assert got_user1.account_name == "アニー"
assert got_user1.is_active is True
assert got_user1.hashed_password == "plainplain"
assert got_user1.created_by == "john"
assert got_user1.created_at
assert got_user1.updated_by == "john"
assert got_user1.updated_at
| 29.354978 | 80 | 0.663766 | 846 | 6,781 | 5.062648 | 0.088652 | 0.113472 | 0.081952 | 0.042027 | 0.782162 | 0.688303 | 0.65188 | 0.612421 | 0.581135 | 0.556619 | 0 | 0.010697 | 0.22799 | 6,781 | 230 | 81 | 29.482609 | 0.80745 | 0 | 0 | 0.605128 | 0 | 0 | 0.101902 | 0 | 0 | 0 | 0 | 0 | 0.297436 | 1 | 0.030769 | false | 0.076923 | 0.025641 | 0 | 0.05641 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
510071411b69aed8738cceefd50874bfee413c3c | 146 | py | Python | python/reverse_string.py | jendama/training-kit | 738b9743e1f3948bc867f420f75efa47d0971217 | [
"MIT"
] | null | null | null | python/reverse_string.py | jendama/training-kit | 738b9743e1f3948bc867f420f75efa47d0971217 | [
"MIT"
] | null | null | null | python/reverse_string.py | jendama/training-kit | 738b9743e1f3948bc867f420f75efa47d0971217 | [
"MIT"
] | null | null | null | a = "HACKTOBERFEST"
# reverse by slicing inverse
print(a[::-1])
# reverse text
b = "I Love Fosti"
b = b.split(" ")[::-1]
print(" ".join(b)) | 10.428571 | 28 | 0.575342 | 22 | 146 | 3.818182 | 0.681818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017094 | 0.19863 | 146 | 14 | 29 | 10.428571 | 0.700855 | 0.267123 | 0 | 0 | 0 | 0 | 0.257143 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.4 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5100b17784b04cdb6c2f4076aaca2a9b3a839c93 | 56,140 | py | Python | entry/main.py | way864/BattleTracker | 7204d613165b1c461ee301e5078cd4e2b7a072c4 | [
"MIT"
] | null | null | null | entry/main.py | way864/BattleTracker | 7204d613165b1c461ee301e5078cd4e2b7a072c4 | [
"MIT"
] | null | null | null | entry/main.py | way864/BattleTracker | 7204d613165b1c461ee301e5078cd4e2b7a072c4 | [
"MIT"
] | null | null | null | import math
import random
import json
import copy
from tkinter.constants import COMMAND
from zipfile import ZipFile
import PIL.Image
from PIL import ImageTk
import tkinter as tk
from tkinter import ttk, font, messagebox
from ttkthemes import ThemedStyle
from tooltip import *
from event_manager import EventManager
from calc import Calculator
from stat_collector import StatCollector
from quotes import Quote
from target import Target
from condition_info import InfoClass
from dice import DiceRoller
from player_window import PlayerWin
from starter import *
from undo_redo import ActionStack
from light_menu import *
from object_builder import ObjectBuilder
map_win = tk.Tk()
map_win.overrideredirect(1)
map_win.withdraw()
class BattleMap():
def __init__(self, root):
self.root = root
self.reg_font = ('Papyrus', '14')
self.small_font = ("Papyrus", "9")
self.big_font = ("Papyrus", "16")
self.start_win = StartWindow(self.root)
self.start_win.btn_new_file.config(command=lambda: self.start_up_seq('new'))
self.start_win.btn_open_existing.config(command=lambda: self.start_up_seq('open'))
self.start_win.win_start.protocol('WM_DELETE_WINDOW', lambda: self.root.destroy())
def start_up_seq(self, opt):
if opt == 'new':
self.start_win.new_file()
self.start_win.btn_start_game.config(command=lambda: self.new_game_btns('start'))
self.start_win.btn_cancel.config(command=lambda: self.new_game_btns('cancel'))
elif opt == 'open':
open_complete = self.start_win.open_file()
if open_complete:
self.start_win.win_start.destroy()
self.main_window()
def new_game_btns(self, btn):
if btn == 'start':
start_complete = self.start_win.start_new_battle()
if start_complete:
self.start_win.win_start.destroy()
self.main_window()
elif btn == 'cancel':
self.start_win.game_start_win.destroy()
def main_window(self):
self.root.overrideredirect(0)
game_title = self.root.game_name
if len(game_title) > 32:
game_title = game_title[0:32] + "..."
self.root.title(f"Battle Map | {game_title}")
style = ThemedStyle(self.root)
style.theme_use("equilux")
bg = style.lookup('TLabel', 'background')
fg = style.lookup('TLabel', 'foreground')
self.root.configure(bg=style.lookup('TLabel', 'background'))
# Window definition
with ZipFile(self.root.filename, 'r') as savefile:
battle_bytes = savefile.read('battle_info.json')
battle_obj = json.loads(battle_bytes.decode('utf-8'))
self.map_size = battle_obj['map_size']
self.round = battle_obj['round']
self.turn = battle_obj['turn']
self.top_frame = ttk.Frame(master=self.root, borderwidth=2, relief='ridge')
self.top_frame.pack(side='top', fill='both')
self.top_frame.columnconfigure(1, weight=1)
self.top_frame.rowconfigure(0, minsize=100, weight=1)
self.quote_frame = ttk.Frame(master=self.root)
self.quote_frame.pack(side='top', fill='x')
self.quote_frame.columnconfigure(0, minsize=20)
self.bottom_frame = ttk.Frame(master=self.root, borderwidth=2, relief='ridge')
self.bottom_frame.pack(side='top', fill='both', expand=True)
self.bottom_frame.columnconfigure(0, minsize=100)
self.bottom_frame.columnconfigure(1, weight=1, minsize=500)
self.bottom_frame.columnconfigure(2, minsize=150)
self.bottom_frame.columnconfigure(3, minsize=50)
self.bottom_frame.rowconfigure(0, weight=1, minsize=350)
self.controller_frame = ttk.Frame(master=self.root)
self.controller_frame.pack(side='top', fill='x')
self.em = EventManager(self.root)
self.calculator = Calculator(self.root)
self.quoter = Quote()
self.count_quotes = 0
self.target = Target(self.root)
self.info = InfoClass(self.root)
self.dice_roll = DiceRoller(self.root)
self.copy_win = PlayerWin(self.root, self.map_size, game_title)
self.go_back = ActionStack(self.root)
# Board Setup
lbl_map = ttk.Label(master=self.top_frame, text=game_title, font=('Papyrus', '16'))
lbl_map.grid(row=0, column=1)
btn_player_win = ttk.Button(master=self.top_frame, command=self.open_for_players, text="Player Window")
btn_player_win.grid(row=0, column=2, sticky='se')
btn_save = ttk.Button(master=self.top_frame, command=self.save_game, text="Save")
btn_save.grid(row=0, column=3, sticky='se')
btn_clear = ttk.Button(master=self.top_frame, command=self.clear_map, text="Clear Map")
btn_clear.grid(row=0, column=4, sticky='se')
btn_input = ttk.Button(master=self.top_frame, command=self.input_creature_window, text="New Creature")
btn_input.grid(row=0, column=5, sticky='se')
btn_obj = ttk.Button(master=self.top_frame, command=self.input_object_window, text="New Object")
btn_obj.grid(row=0, column=6, sticky='se')
btn_reset = ttk.Button(master=self.top_frame, command=lambda: self.refresh_map(reset=True), text="Reset Map")
btn_reset.grid(row=0, column=7, sticky='se')
btn_restart = ttk.Button(master=self.top_frame, command=self.full_reset, text="Reset Battle")
btn_restart.grid(row=0, column=8, sticky='se')
btn_close_all = ttk.Button(master=self.top_frame, command=self.root.destroy, text="Close All")
btn_close_all.grid(row=0, column=9, sticky='se')
self.lbl_quote = ttk.Label(master=self.quote_frame, text="", font=self.reg_font)
self.lbl_quote.grid(row=0, column=0, sticky='w', pady=5)
self.find_quote()
self.side_board = ttk.Frame(master=self.bottom_frame)
self.side_board.grid(row=0, column=0, padx=5, pady=10, sticky="nw")
self.side_count = 0
canvas_frame = ttk.Frame(master=self.bottom_frame, borderwidth=2, relief='ridge')
self.grid_canvas = tk.Canvas(master=canvas_frame, bg='gray28', bd=0, highlightthickness=0)
grid_scroll_vert = ttk.Scrollbar(master=canvas_frame, command=self.grid_canvas.yview)
grid_scroll_horz = ttk.Scrollbar(master=self.bottom_frame, orient='horizontal', command=self.grid_canvas.xview)
self.grid_frame = ttk.Frame(master=self.grid_canvas)
canvas_frame.grid(row=0, column=1, sticky="nsew")
self.grid_canvas.pack(side='left', fill='both', expand=True)
self.grid_canvas.config(yscrollcommand=grid_scroll_vert.set, xscrollcommand=grid_scroll_horz.set)
grid_scroll_vert.pack(side='right', fill='y')
grid_scroll_horz.grid(row=1, column=1, sticky='ew')
self.grid_canvas.create_window((4,4), window=self.grid_frame, anchor='nw', tags='self.grid_frame')
self.grid_frame.bind("<Configure>", self._on_config)
self.grid_canvas.bind('<Enter>', self._on_enter_canvas)
self.grid_canvas.bind('<Leave>', self._on_leave_canvas)
self.grid_frame.lower()
self.round_bar = ttk.Frame(master=self.bottom_frame)
self.tool_bar = ttk.Frame(master=self.bottom_frame)
self.round_bar.grid(row=0, column=2, padx=5, pady=10, sticky="nw")
self.tool_bar.grid(row=0, column=3, padx=5, pady=10, sticky="nw")
# Image paths
undo_icon_path = "entry\\bin\\red_undo.png"
undo_icon = ImageTk.PhotoImage(image=PIL.Image.open(undo_icon_path).resize((20,20)))
redo_icon_path = "entry\\bin\\red_redo.png"
redo_icon = ImageTk.PhotoImage(image=PIL.Image.open(redo_icon_path).resize((20,20)))
move_icon_path = "entry\\bin\\red_icons8-circled-down-left-32.png"
move_icon = ImageTk.PhotoImage(image=PIL.Image.open(move_icon_path).resize((20,20)))
trig_icon_path = "entry\\bin\\red_trig.png"
trig_icon = ImageTk.PhotoImage(image=PIL.Image.open(trig_icon_path).resize((20,20)))
target_icon_path = "entry\\bin\\red_target.png"
target_icon = ImageTk.PhotoImage(image=PIL.Image.open(target_icon_path).resize((20,20)))
cond_info_icon_path = "entry\\bin\\red_page_icon.png"
cond_info_icon = ImageTk.PhotoImage(image=PIL.Image.open(cond_info_icon_path).resize((20,20)))
turn_icon_path = "entry\\bin\\swords.png"
self.turn_icon = ImageTk.PhotoImage(image=PIL.Image.open(turn_icon_path).resize((20,20)))
d20_icon_path = "entry\\bin\\red_role-playing.png"
d20_icon = ImageTk.PhotoImage(image=PIL.Image.open(d20_icon_path).resize((20,20)))
highlight_path = "entry\\bin\\highlight.png"
highlight_img = ImageTk.PhotoImage(image=PIL.Image.open(highlight_path).resize((20,20)))
ally_path = "entry\\bin\\ally_token.png"
self.ally_img = ImageTk.PhotoImage(image=PIL.Image.open(ally_path).resize((27,27)))
enemy_path = "entry\\bin\\enemy_token.png"
self.enemy_img = ImageTk.PhotoImage(image=PIL.Image.open(enemy_path).resize((27,27)))
bystander_path = "entry\\bin\\bystander_token.png"
self.bystander_img = ImageTk.PhotoImage(image=PIL.Image.open(bystander_path).resize((27,27)))
dead_path = "entry\\bin\\dead_token.png"
self.dead_img = ImageTk.PhotoImage(image=PIL.Image.open(dead_path).resize((27,27)))
up_btn_path = "entry\\bin\\up_button.png"
down_btn_path = "entry\\bin\\down_button.png"
left_btn_path = "entry\\bin\\left_button.png"
right_btn_path = "entry\\bin\\right_button.png"
nw_btn_path = "entry\\bin\\nw_button.png"
ne_btn_path = "entry\\bin\\ne_button.png"
sw_btn_path = "entry\\bin\\sw_button.png"
se_btn_path = "entry\\bin\\se_button.png"
z_up_btn_path = "entry\\bin\\z_up.png"
undo_move_path = "entry\\bin\\undo_move.png"
z_down_btn_path = "entry\\bin\\z_down.png"
self.up_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(up_btn_path).resize((40,40)))
self.down_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(down_btn_path).resize((40,40)))
self.left_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(left_btn_path).resize((40,40)))
self.right_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(right_btn_path).resize((40,40)))
self.nw_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(nw_btn_path).resize((40,40)))
self.ne_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(ne_btn_path).resize((40,40)))
self.sw_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(sw_btn_path).resize((40,40)))
self.se_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(se_btn_path).resize((40,40)))
self.z_up_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(z_up_btn_path).resize((40,40)))
self.undo_move_img = ImageTk.PhotoImage(image=PIL.Image.open(undo_move_path).resize((40,40)))
self.z_down_btn_img = ImageTk.PhotoImage(image=PIL.Image.open(z_down_btn_path).resize((40,40)))
self.map_frames = []
self.root.token_list = []
self.root.obj_list = []
self.root.copy_win_open = False
self.root.light_params = {}
# Grid labels
for col_spot in range(self.map_size[1]):
lbl_grid_top = ttk.Label(master=self.grid_frame, text=col_spot+1, font=self.small_font)
lbl_grid_top.grid(row=0, column=col_spot+1)
self.grid_frame.columnconfigure(col_spot+1, minsize=33)#, weight=1)
for row_spot in range(self.map_size[0]):
lbl_grid_side = ttk.Label(master=self.grid_frame, text=row_spot+1, font=self.small_font)
lbl_grid_side.grid(row=row_spot+1, column=0)
self.grid_frame.rowconfigure(row_spot+1, minsize=33)#, weight=1)
self.grid_frame.columnconfigure(0, minsize=33)#, weight=1)
self.grid_frame.rowconfigure(0, minsize=33)#, weight=1)
# Space frames
self.token_labels = []
for i in range(self.map_size[0]):
self.map_frames.append([])
self.token_labels.append([])
for j in range(self.map_size[1]):
self.space = tk.Frame(master=self.grid_frame, relief=tk.RAISED, borderwidth=1, bg='gray28')
self.space.grid(row=i+1, column=j+1, sticky='nsew')
self.space.coord = (j, i)
CreateToolTip(self.space, text=f"{i+1}, {j+1}")
self.map_frames[i].append(self.space)
self.token_labels[i].append(None)
self.initialize()
go_back_frame = ttk.Frame(master=self.top_frame)
go_back_frame.grid(row=0, column=0, sticky='nw')
self.btn_undo = tk.Button(master=go_back_frame, command=lambda: self.time_travel(True), image=undo_icon, bd=0, bg='gray28', activebackground='gray28')
self.btn_undo.grid(row=0, column=0, padx=5, pady=5, sticky='nw')
self.btn_undo.image = undo_icon
self.btn_undo['state'] = 'disabled'
self.btn_redo = tk.Button(master=go_back_frame, command=lambda: self.time_travel(False), image=redo_icon, bd=0, bg='gray28', activebackground='gray28')
self.btn_redo.grid(row=0, column=1, padx=5, pady=5, sticky='nw')
self.btn_redo.image = redo_icon
self.btn_redo['state'] = 'disabled'
# Round bar
lbl_round_title = ttk.Label(master=self.round_bar, text="Round: ", font=self.big_font)
lbl_round_title.grid(row=0, column=0, sticky='e')
if self.round == 0:
tmp_round = "S"
else:
tmp_round = self.round
self.lbl_round = ttk.Label(master=self.round_bar, text=tmp_round, font=self.big_font, borderwidth=1, relief=tk.RAISED, width=3, anchor=tk.CENTER)
self.lbl_round.grid(row=0, column=1, sticky='w')
self.initiative_frame = ttk.Frame(master=self.round_bar)
self.initiative_frame.grid(row=1, column=0, columnspan=2, sticky='ew')
self.initiative_frame.columnconfigure([0,1], weight=1)
btn_next_turn = ttk.Button(master=self.round_bar, text="Turn Complete", command=self.next_turn, width=18)
btn_next_turn.grid(row=2, column=0, columnspan=2)
btn_next_round = ttk.Button(master=self.round_bar, text="Round Complete", command=self.next_round, width=18)
btn_next_round.grid(row=3, column=0, columnspan=2)
btn_reset_rounds = ttk.Button(master=self.round_bar, text="Reset Rounds", command=self.reset_round, width=18)
btn_reset_rounds.grid(row=4, column=0, columnspan=2)
# Tool bar Buttons
self.btn_move = ttk.Button(master=self.tool_bar, command=self.move_token, image=move_icon)
self.btn_move.grid(row=0, column=0, sticky="n")
self.btn_move.image = move_icon
CreateToolTip(self.btn_move, text="Move Token", left_disp=True)
self.btn_trig = ttk.Button(master=self.tool_bar, command=self.open_trig, image=trig_icon)
self.btn_trig.grid(row=1, column=0, sticky='n')
self.btn_trig.image = trig_icon
CreateToolTip(self.btn_trig, text="Distance", left_disp=True)
self.btn_target = ttk.Button(master=self.tool_bar, command=self.target_item,image=target_icon)
self.btn_target.grid(row=2, column=0, sticky='n')
self.btn_target.image = target_icon
CreateToolTip(self.btn_target, text="Target", left_disp=True)
self.btn_cond_info = ttk.Button(master=self.tool_bar, command=self.show_cond_info, image=cond_info_icon)
self.btn_cond_info.grid(row=3, column=0, sticky='n')
self.btn_cond_info.image = cond_info_icon
CreateToolTip(self.btn_cond_info, text="Condition Info", left_disp=True)
self.btn_dice_roller = ttk.Button(master=self.tool_bar, command=self.open_dice_roller, image=d20_icon)
self.btn_dice_roller.grid(row=4, column=0, sticky='n')
self.btn_dice_roller.image = d20_icon
CreateToolTip(self.btn_dice_roller, text="Dice Roller", left_disp=True)
self.btn_field_light = ttk.Button(master=self.tool_bar, command=self.field_light, image=highlight_img)
self.btn_field_light.grid(row=5, column=0, sticky='n')
self.btn_field_light.image = highlight_img
CreateToolTip(self.btn_field_light, text="Field Highlight", left_disp=True)
#Controller Pane
self.controller_frame.columnconfigure(0, weight=1)
dpad_frame = ttk.Frame(master=self.controller_frame)
dpad_frame.grid(row=0, column=0, rowspan=4, sticky='w', padx=100)
btn_nw = tk.Button(master=dpad_frame, image=self.nw_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.dpad_move('nw'))
btn_nw.grid(row=0, column=0)
btn_nw.image = self.nw_btn_img
btn_nw.name = 'nw'
btn_up = tk.Button(master=dpad_frame, image=self.up_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.dpad_move('n'))
btn_up.grid(row=0, column=1)
btn_up.image = self.up_btn_img
btn_up.name = 'up'
btn_ne = tk.Button(master=dpad_frame, image=self.ne_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.dpad_move('ne'))
btn_ne.grid(row=0, column=2)
btn_ne.image = self.ne_btn_img
btn_ne.name = 'ne'
btn_left = tk.Button(master=dpad_frame, image=self.left_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.dpad_move('w'))
btn_left.grid(row=1, column=0)
btn_left.image = self.left_btn_img
btn_left.name = 'w'
btn_right = tk.Button(master=dpad_frame, image=self.right_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.dpad_move('e'))
btn_right.grid(row=1, column=2)
btn_right.image = self.right_btn_img
btn_right.name = 'e'
btn_sw = tk.Button(master=dpad_frame, image=self.sw_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.dpad_move('sw'))
btn_sw.grid(row=2, column=0)
btn_sw.image = self.sw_btn_img
btn_sw.name = 'sw'
btn_down = tk.Button(master=dpad_frame, image=self.down_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.dpad_move('s'))
btn_down.grid(row=2, column=1)
btn_down.image = self.down_btn_img
btn_down.name = 's'
btn_se = tk.Button(master=dpad_frame, image=self.se_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.dpad_move('se'))
btn_se.grid(row=2, column=2)
btn_se.image = self.se_btn_img
btn_se.name = 'se'
btn_z_up = tk.Button(master=dpad_frame, image=self.z_up_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.zpad('+'))
btn_z_up.grid(row=0, column=3, padx=10)
btn_z_up.image = self.z_up_btn_img
btn_undo_move = tk.Button(master=dpad_frame, image=self.undo_move_img, bg='gray28', bd=0, activebackground='gray28', command=self.undo_move)
btn_undo_move.grid(row=1, column=3, padx=10)
btn_undo_move.image = self.undo_move_img
btn_undo_move.name = 'undom'
btn_z_down = tk.Button(master=dpad_frame, image=self.z_down_btn_img, bg='gray28', bd=0, activebackground='gray28', command=lambda: self.zpad('-'))
btn_z_down.grid(row=2, column=3, padx=10)
btn_z_down.image = self.z_down_btn_img
self.z_frame = tk.Frame(master=dpad_frame, bg='gray28')
self.z_frame.grid(row=1, column=1, sticky='nsew')
self.z_frame.name = 'zf'
cont_btn_frame = ttk.Frame(master=self.controller_frame)
cont_btn_frame.grid(row=0, column=1, rowspan=4, sticky='e', padx=20)
btn_turn_complete = ttk.Button(master=cont_btn_frame, text="Turn Complete", command=self.next_turn, width=19)
btn_turn_complete.grid(row=0, column=0, columnspan=2)
self.cont_targets = ttk.Combobox(master=cont_btn_frame, width=18, state='readonly')
self.cont_targets.grid(row=1, column=0, columnspan=2)
self.cont_targets.bind("<<ComboboxSelected>>", self._on_select_target)
self.target_names = []
self.ent_target_delta = ttk.Entry(master=cont_btn_frame, width=20)
self.ent_target_delta.grid(row=2, column=0, columnspan=2, pady=5)
self.ent_target_delta.insert(0, '0')
self.ent_target_delta.bind("<FocusIn>", lambda e: self._on_delta_focus(event=e, typ='in'))
self.ent_target_delta.bind("<FocusOut>", lambda e: self._on_delta_focus(event=e, typ='out'))
btn_heal = ttk.Button(master=cont_btn_frame, text='Heal', command=lambda: self.target_hp('heal'), width=8)
btn_heal.grid(row=3, column=0, padx=5, pady=5)
btn_dmg = ttk.Button(master=cont_btn_frame, text='Damage', command=lambda: self.target_hp('dmg'), width=8)
btn_dmg.grid(row=3, column=1, padx=5, pady=5)
lbl_ac = ttk.Label(master=cont_btn_frame, text="AC: ", font=self.reg_font)
lbl_ac.grid(row=0, column=2, sticky='w', pady=5)
lbl_max_hp = ttk.Label(master=cont_btn_frame, text="Max HP: ", font=self.reg_font)
lbl_max_hp.grid(row=1, column=2, sticky='w', pady=5)
lbl_curr_hp = ttk.Label(master=cont_btn_frame, text="Current HP: ", font=self.reg_font)
lbl_curr_hp.grid(row=2, column=2, sticky='w', pady=5)
lbl_temp_hp = ttk.Label(master=cont_btn_frame, text="Temp HP: ", font=self.reg_font)
lbl_temp_hp.grid(row=3, column=2, sticky='w', pady=5)
self.lbl_target_ac = ttk.Label(master=cont_btn_frame, text="", font=self.reg_font)
self.lbl_target_ac.grid(row=0, column=3, sticky='w', padx=5, pady=5)
self.lbl_target_max_hp = ttk.Label(master=cont_btn_frame, text="", font=self.reg_font)
self.lbl_target_max_hp.grid(row=1, column=3, sticky='w', padx=5, pady=5)
self.lbl_target_hp = ttk.Label(master=cont_btn_frame, text="", font=self.reg_font)
self.lbl_target_hp.grid(row=2, column=3, sticky='w', padx=5, pady=5)
self.lbl_target_temp_hp = ttk.Label(master=cont_btn_frame, text="", font=self.reg_font)
self.lbl_target_temp_hp.grid(row=3, column=3, sticky='w', padx=5, pady=5)
lbl_title_turn = ttk.Label(master=self.controller_frame, text="Current Turn", font=self.big_font)
lbl_title_turn.grid(row=0, column=2, sticky='e', padx=20)
self.lbl_current_turn = tk.Label(master=self.controller_frame, text="", font=self.reg_font, bg='gray28')
self.lbl_current_turn.grid(row=1, column=2, sticky='e', padx=20)
lbl_title_pos = ttk.Label(master=self.controller_frame, text="Position", font=self.big_font)
lbl_title_pos.grid(row=2, column=2, sticky='e', padx=20)
self.lbl_position = ttk.Label(master=self.controller_frame, text="", font=self.reg_font)
self.lbl_position.grid(row=3, column=2, sticky='e', padx=20)
lbl_max_move_title = ttk.Label(master=self.controller_frame, text="Movement Speed", font=self.big_font)
lbl_max_move_title.grid(row=0, column=3, sticky='e', padx=20)
self.lbl_max_move = tk.Label(master=self.controller_frame, text="", font=self.reg_font, bg='gray28', fg='gray70')
self.lbl_max_move.grid(row=1, column=3, sticky='e', padx=20)
lbl_amount_move_title = ttk.Label(master=self.controller_frame, text="Feet Moved", font=self.big_font)
lbl_amount_move_title.grid(row=2, column=3, sticky='e', padx=20)
self.lbl_amount_moved = tk.Label(master=self.controller_frame, text="", font=self.reg_font, bg='gray28', fg='gray70')
self.lbl_amount_moved.grid(row=3, column=3, sticky='e', padx=20)
self.z_delta = 0
self.root.bind("<Key>", self._on_numpad_keys)
self.controller_frame.bind("<Button-1>", self._on_defocus)
self.place_tokens()
self.root.deiconify()
def _on_config(self, event):
self.grid_canvas.configure(scrollregion=self.grid_canvas.bbox('all'))
def _on_enter_canvas(self, event):
self.grid_canvas.bind_all('<MouseWheel>', self._on_mousewheel)
self.grid_canvas.bind_all('<Shift-MouseWheel>', self._on_shift_mousewheel)
def _on_leave_canvas(self, event):
self.grid_canvas.unbind_all('<MouseWheel>')
self.grid_canvas.unbind_all('<Shift-MouseWheel>')
def _on_mousewheel(self, event):
self.grid_canvas.yview_scroll(int(-1*(event.delta/120)), 'units')
def _on_shift_mousewheel(self, event):
self.grid_canvas.xview_scroll(int(-1*(event.delta/120)), 'units')
def _on_select_target(self, event):
for being in self.root.token_list:
if being['name'] == self.cont_targets.get():
sel_obj = being
self.lbl_target_ac.config(text=sel_obj['ac'])
self.lbl_target_max_hp.config(text=sel_obj['max_HP'])
self.lbl_target_hp.config(text=sel_obj['current_HP'])
self.lbl_target_temp_hp.config(text=sel_obj['temp_HP'])
def _on_numpad_keys(self, event):
# Controller movements
if event.keysym == '0' or event.keysym == 'Insert':
self.undo_move()
elif event.keysym == '1' or event.keysym == 'End':
self.dpad_move('sw')
elif event.keysym == '2' or event.keysym == 'Down':
self.dpad_move('s')
elif event.keysym == '3' or event.keysym == 'Next':
self.dpad_move('se')
elif event.keysym == '4' or event.keysym == 'Left':
self.dpad_move('w')
elif event.keysym == '5' or event.keysym == 'Clear':
if self.z_delta != 0:
if self.z_delta == 1:
self.dpad_move('+')
elif self.z_delta == -1:
self.dpad_move('-')
elif event.keysym == '6' or event.keysym == 'Right':
self.dpad_move('e')
elif event.keysym == '7' or event.keysym == 'Home':
self.dpad_move('nw')
elif event.keysym == '8' or event.keysym == 'Up':
self.dpad_move('n')
elif event.keysym == '9' or event.keysym == 'Prior':
self.dpad_move('ne')
elif event.keysym == 'minus':
self.zpad('-')
elif event.keysym == 'plus':
self.zpad('+')
elif event.keysym == 'Return':
self.next_turn()
if self.z_delta == 0:
self.z_frame.config(bg='gray28')
self.root.unbind_all("<Button-1>")
def _on_delta_focus(self, event, typ):
if typ == 'in':
self.root.unbind("<Key>")
elif typ == 'out':
self.root.bind("<Key>", self._on_numpad_keys)
def _on_defocus(self, event):
event.widget.focus_set()
def initialize(self):
self.root.token_list = []
self.root.obj_list = []
with ZipFile(self.root.filename, "r") as savefile:
creat_bytes = savefile.read('creatures.json')
creat_str = creat_bytes.decode('utf-8')
creatures = json.loads(creat_str)
obj_bytes = savefile.read('objects.json')
obj_str = obj_bytes.decode('utf-8')
objects = json.loads(obj_str)
for being in creatures.values():
self.root.token_list.append(being)
for thing in objects.values():
self.root.obj_list.append(thing)
def place_tokens(self):
self.initiative_holder = {}
spaces_taken = []
self.target_names = []
for item in self.root.obj_list:
occupied = False
if item["coordinate"][0] != "" and item["coordinate"][1] != "":
row_pos = int(item["coordinate"][1])
col_pos = int(item["coordinate"][0])
self.target_names.append(item['name'])
for space_tuple in spaces_taken:
if space_tuple[0] == row_pos and space_tuple[1] == col_pos and space_tuple[2] == int(item["coordinate"][2]):
occupied = True
if occupied == False:
spaces_taken.append((row_pos, col_pos, int(item["coordinate"][2])))
o_length = item["length"]
o_width = item["width"]
f_len = 5 * round(o_length / 5)
if f_len < 5:
f_len = 5
f_wid = 5 * round(o_width / 5)
if f_wid < 5:
f_wid = 5
o_col = int(f_wid / 5)
o_row = int(f_len / 5)
for x in range(o_col):
col_pos = int(item["coordinate"][0]) + x
for y in range(o_row):
row_pos = int(item["coordinate"][1]) + y
obj_img = ImageTk.PhotoImage(image=PIL.Image.open(item["img_ref"]).resize((30,30)))
lbl_unit = tk.Label(master=self.map_frames[col_pos][row_pos], image=obj_img, bg="gray28", borderwidth=0)
lbl_unit.image = obj_img
lbl_unit.coord = (row_pos, col_pos)
lbl_unit.pack(fill='both', expand=True, padx=2, pady=2)
CreateToolTip(lbl_unit, text=f"{item['name']}: {row_pos}, {col_pos}", left_disp=True)
for being in self.root.token_list:
token_type = being["type"]
if token_type == "ally":
token_img = self.ally_img
elif token_type == "enemy":
token_img = self.enemy_img
elif token_type == "bystander":
token_img = self.bystander_img
elif token_type == "dead":
token_img = self.dead_img
else:
raise NameError("Token type not specified.")
occupied = False
if being["coordinate"][0] != "" and being["coordinate"][1] != "":
row_pos = int(being["coordinate"][1])
col_pos = int(being["coordinate"][0])
self.target_names.append(being['name'])
for space_tuple in spaces_taken:
if space_tuple[0] == row_pos and space_tuple[1] == col_pos and space_tuple[2] == int(being["coordinate"][2]):
occupied = True
if occupied == False:
spaces_taken.append((row_pos, col_pos, int(being["coordinate"][2])))
lbl_unit = tk.Label(master=self.map_frames[col_pos][row_pos], image=token_img, bg="gray28", borderwidth=0)
lbl_unit.image = token_img
lbl_unit.coord = (row_pos, col_pos)
lbl_unit.pack(fill='both', expand=True, padx=2, pady=2)
self.token_labels[col_pos][row_pos] = lbl_unit
CreateToolTip(lbl_unit, text="{0}, {1}".format(being["name"], being["coordinate"][2]), left_disp=True)
if being['initiative'] != math.inf:
self.initiative_holder[being['name']] = being
if being["size"] == "large" or being["size"] == "huge" or being["size"] == "gargantuan":
if being["size"] == "large":
space_need = 4
elif being["size"] == "huge":
space_need = 9
else:
space_need = 16
row_offset = 0
col_offset = 0
go_to_next_row = math.sqrt(space_need)
for i in range(1, space_need):
if i < space_need:
col_offset += 1
if col_offset == go_to_next_row:
col_offset = 0
row_offset += 1
row_pos = int(being["coordinate"][1]) + row_offset
col_pos = int(being["coordinate"][0]) + col_offset
lbl_unit = tk.Label(master=self.map_frames[col_pos][row_pos], image=token_img, bg="gray28", borderwidth=0)
lbl_unit.image = token_img
lbl_unit.coord = (row_pos, col_pos)
lbl_unit.pack(fill='both', expand=True)
CreateToolTip(lbl_unit, text="{0}, {1}".format(being["name"], being["coordinate"][2]), left_disp=True)
else:
messagebox.showerror("Internal Error", "Restart program\nError 0x006")
return
else:
self.unused_tokens(being, token_img)
self.cont_targets.config(values=self.target_names)
self.refresh_initiatives()
def unused_tokens(self, creature, token_img):
next_row = int(self.side_count / 2)
next_col = self.side_count % 2
lbl_side_unit = tk.Label(master=self.side_board, image=token_img, bg="gray28", borderwidth=0)
lbl_side_unit.grid(row=next_row, column=next_col, padx=5, pady=5, sticky="ne")
#lbl_side_unit.bind("<Button-3>", self.em.right_click_menu)
lbl_side_unit.image = token_img
CreateToolTip(lbl_side_unit, text=creature["name"])
self.side_count += 1
def post_initiatives(self):
init_dict_in_order = {k:v for k, v in sorted(self.initiative_holder.items(), key= lambda item: item[1]['initiative'], reverse=True)}
order_count = 0
lbl_turn_img = tk.Label(master=self.initiative_frame, image=self.turn_icon, bg="gray28", borderwidth=0)
lbl_turn_img.grid(row=self.turn, column=0, sticky='w')
lbl_turn_img.image = self.turn_icon
self.move_path = []
for next_up in init_dict_in_order.items():
if next_up[1]['initiative'] != math.inf and next_up[1]['type'] != 'dead':
lbl_your_turn = ttk.Label(master=self.initiative_frame, text=f"{next_up[0]}: ", font=self.small_font)
lbl_your_turn.grid(row=order_count, column=1, sticky='w')
lbl_your_init = ttk.Label(master=self.initiative_frame, text=next_up[1]['initiative'], font=self.small_font)
lbl_your_init.grid(row=order_count, column=2, sticky='e')
if order_count == self.turn:
self.turn_obj = next_up[1]
curr_pos = (int(self.turn_obj['coordinate'][0]), int(self.turn_obj['coordinate'][1]), int(self.turn_obj['coordinate'][2]))
self.move_path.append(curr_pos)
self.lbl_current_turn.config(text=self.turn_obj['name'])
self.lbl_max_move.config(text=self.turn_obj['speed'])
self.lbl_position.config(text=f"{curr_pos[0]+1}: {curr_pos[1]+1}: {curr_pos[2]}")
self.lbl_amount_moved.config(text="0")
self.map_frames[curr_pos[0]][curr_pos[1]].config(bg='orange3')
if self.turn_obj['status'] == 'PC':
self.lbl_current_turn.config(fg='green3')
elif self.turn_obj['status'] == 'Monster':
self.lbl_current_turn.config(fg='orange3')
else:
self.lbl_current_turn.config(fg='DodgerBlue2')
if self.root.copy_win_open:
if self.turn_obj['status'] != 'PC':
self.copy_win.set_turn_lbl("X")
else:
self.copy_win.set_turn_lbl(self.turn_obj['name'])
order_count += 1
def refresh_initiatives(self):
init_frame_slaves = self.initiative_frame.grid_slaves()
if len(init_frame_slaves):
for item in init_frame_slaves:
item.destroy()
for i in range(len(self.map_frames)):
for frm in self.map_frames[i]:
frm.config(bg='gray28')
self.post_initiatives()
def next_turn(self, not_from_redo=True):
self.lbl_amount_moved.config(bg='gray28')
if not_from_redo:
self.log_action('turn button')
on_board_inits = self.initiative_holder
inf_exists = True
fucked_up = 100
while inf_exists and fucked_up > 0:
for key, value in on_board_inits.items():
if value == math.inf:
del on_board_inits[key]
break
if math.inf not in on_board_inits:
inf_exists = False
fucked_up -= 1
self.turn += 1
if self.turn > len(self.initiative_holder) - 1:
self.next_round()
else:
for being in self.root.token_list:
if being['name'] == self.turn_obj['name']:
being['coordinate'] = [str(self.move_path[-1][0]), str(self.move_path[-1][1]), str(self.move_path[-1][2])]
if self.root.copy_win_open:
self.copy_win.gray_map()
self.refresh_map()
#self.refresh_initiatives()
def next_round(self, not_from_redo=True):
if not_from_redo:
self.log_action('round button', {'turn': self.turn})
self.round += 1
self.lbl_round.config(text=self.round)
self.turn = 0
for being in self.root.token_list:
if being['name'] == self.turn_obj['name']:
being['coordinate'] = [str(self.move_path[-1][0]), str(self.move_path[-1][1]), str(self.move_path[-1][2])]
if self.root.copy_win_open:
self.copy_win.gray_map()
self.refresh_map()
#self.refresh_initiatives()
def reset_round(self, not_from_redo=True):
if not_from_redo:
restore_round = {
'round': self.round,
'turn': self.turn
}
self.log_action('reset round', restore_round)
self.round = 0
self.lbl_round.config(text="S")
self.turn = 0
self.refresh_map()
#self.refresh_initiatives()
def refresh_map(self, reset=False):
for row in self.map_frames:
for col in row:
remove_tokens = col.pack_slaves()
if len(remove_tokens) > 0:
for token in remove_tokens:
token.destroy()
remove_side_list = self.side_board.grid_slaves()
if len(remove_side_list) > 0:
for side_token in remove_side_list:
side_token.destroy()
self.side_count = 0
if reset:
self.initialize()
self.place_tokens()
if self.root.copy_win_open:
self.copy_win.update_players()
self.refresh_initiatives()
def open_for_players(self):
self.copy_win.start_win()
self.refresh_map()
def save_game(self):
new_token_dict = {}
for being in self.root.token_list:
name = being["name"]
new_token_dict[name] = being
new_object_dict = {}
for obj in self.root.obj_list:
obj_name = obj["name"]
new_object_dict[obj_name] = obj
battle_dict = {
"map_size": self.map_size,
"round": self.round,
"turn": self.turn
}
battleJSON = json.dumps(battle_dict, indent=4)
with ZipFile(self.root.filename, "w") as savefile:
creatJSON = json.dumps(new_token_dict, indent=4)
objJSON = json.dumps(new_object_dict, indent=4)
savefile.writestr('battle_info.json', battleJSON)
savefile.writestr('creatures.json', creatJSON)
savefile.writestr('objects.json', objJSON)
self.go_back.clear_all()
def clear_map(self):
restore_tokens = copy.deepcopy(self.root.token_list)
self.log_action('list', restore_tokens)
for being in self.root.token_list:
being["coordinate"] = ['', '', '']
self.refresh_map()
def dpad_move(self, dir):
last_pos = copy.deepcopy(self.move_path[-1])
if dir == 'n':
curr_pos = (last_pos[0] - 1, last_pos[1], last_pos[2] + self.z_delta)
elif dir == 's':
curr_pos = (last_pos[0] + 1, last_pos[1], last_pos[2] + self.z_delta)
elif dir == 'w':
curr_pos = (last_pos[0], last_pos[1] - 1, last_pos[2] + self.z_delta)
elif dir == 'e':
curr_pos = (last_pos[0], last_pos[1] + 1, last_pos[2] + self.z_delta)
elif dir == 'ne':
curr_pos = (last_pos[0] - 1, last_pos[1] + 1, last_pos[2] + self.z_delta)
elif dir == 'se':
curr_pos = (last_pos[0] + 1, last_pos[1] + 1, last_pos[2] + self.z_delta)
elif dir == 'sw':
curr_pos = (last_pos[0] + 1, last_pos[1] - 1, last_pos[2] + self.z_delta)
elif dir == 'nw':
curr_pos = (last_pos[0] - 1, last_pos[1] - 1, last_pos[2] + self.z_delta)
else:
curr_pos = (last_pos[0], last_pos[1], last_pos[2] + self.z_delta)
if curr_pos[0] < 0 or curr_pos[0] > self.map_size[0] - 1 or curr_pos[1] < 0 or curr_pos[1] > self.map_size[1] - 1:
messagebox.showwarning("BattleTracker", "Cannot move off map.")
return
self.z_delta = 0
if self.turn_obj['size'] == 'large':
space_need = 4
elif self.turn_obj['size'] == 'huge':
space_need = 9
elif self.turn_obj['size'] == 'gargantuan':
space_need = 16
else:
space_need = 1
next_row_num = math.sqrt(space_need)
row_offset = 0
col_offset = 0
if dir == '+':
for i in range(space_need):
self.map_frames[curr_pos[0] + col_offset][curr_pos[1] + row_offset].config(bg='orange1')
col_offset += 1
if col_offset == next_row_num:
col_offset = 0
row_offset += 1
elif dir == '-':
for i in range(space_need):
self.map_frames[curr_pos[0] + col_offset][curr_pos[1] + row_offset].config(bg='DarkOrange4')
col_offset += 1
if col_offset == next_row_num:
col_offset = 0
row_offset += 1
else:
for i in range(space_need):
self.map_frames[last_pos[0] + col_offset][last_pos[1] + row_offset].config(bg='orange4')
col_offset += 1
if col_offset == next_row_num:
col_offset = 0
row_offset += 1
col_offset = 0
row_offset = 0
for i in range(space_need):
self.map_frames[curr_pos[0] + col_offset][curr_pos[1] + row_offset].config(bg='orange3')
col_offset += 1
if col_offset == next_row_num:
col_offset = 0
row_offset += 1
self.move_path.append(curr_pos)
if self.root.copy_win_open:
self.copy_win.track_moves(self.move_path)
feet_moved = int(self.lbl_amount_moved.cget('text'))
feet_moved += 5
self.lbl_amount_moved.config(text=feet_moved)
self.lbl_position.config(text=f"{curr_pos[0]+1}: {curr_pos[1]+1}: {curr_pos[2]}")
if feet_moved > int(self.lbl_max_move.cget('text')):
self.lbl_amount_moved.config(bg='red4')
else:
self.lbl_amount_moved.config(bg='gray28')
def zpad(self, dir):
if dir == '+':
self.z_delta = 1
else:
self.z_delta = -1
self.z_frame.config(bg='green3')
self.root.bind_all("<Button-1>", self.green_handle)
def green_handle(self, event):
try:
name = event.widget.name
if name == 'zf':
if self.z_delta == 1:
self.dpad_move('+')
elif self.z_delta == -1:
self.dpad_move('-')
except:
pass
self.z_frame.config(bg='gray28')
self.root.unbind_all("<Button-1>")
def target_hp(self, type):
sel_target = self.cont_targets.get()
tgt_delta = self.ent_target_delta.get()
try:
tgt_delta = int(tgt_delta)
if type == 'dmg':
tgt_delta *= -1
except ValueError:
messagebox.showwarning("BattleTracker", "HP difference must be a whole number.")
return
for being in self.root.token_list:
if being['name'] == sel_target:
if type == 'dmg' and abs(tgt_delta) > being['temp_HP']:
tgt_delta += being['temp_HP']
being['temp_HP'] = 0
else:
being['temp_HP'] += tgt_delta
break
being['current_HP'] += tgt_delta
if being['current_HP'] > being['max_HP']:
being['current_HP'] = being['max_HP']
elif being['current_HP'] <= 0:
being['type'] = 'dead'
self._on_select_target(None)
def input_creature_window(self):
self.in_win = StatCollector(self.root, self.map_size, self.round, self.turn)
self.in_win.btn_submit.configure(command=lambda arg=['in_win', 'submit']: self.change_token_list(arg))
def input_object_window(self):
self.obj_win = ObjectBuilder(self.root, self.map_size)
try:
self.obj_win.btn_submit.configure(command=lambda: self.change_obj_list())
except AttributeError:
self.root.destroy()
def change_obj_list(self):
change_complete = self.obj_win.submit()
if change_complete:
self.obj_win.obj_win.destroy()
self.refresh_map()
def change_token_list(self, arg):
origin = arg[0]
select_btn = arg[1]
if origin == 'move_win':
if select_btn == 'set':
old_copy = copy.deepcopy(self.root.token_list)
self.log_action('list', old_copy)
set_complete = self.em.set_new_coord()
if set_complete:
self.em.move_win.destroy()
self.refresh_map()
elif select_btn == 'remove':
old_copy = copy.deepcopy(self.root.token_list)
self.log_action('list', old_copy)
rem_complete = self.em.remove_token()
if rem_complete:
self.em.move_win.destroy()
self.refresh_map()
elif origin == 'target_win':
if select_btn == 'submit':
old_copy = copy.deepcopy(self.root.token_list)
self.log_action('list', old_copy)
submit_complete = self.target.on_submit()
if submit_complete:
self.target.target_win.destroy()
self.refresh_map()
elif select_btn == 'delete':
old_copy = copy.deepcopy(self.root.token_list)
self.log_action('list', old_copy)
delete_complete = self.target.delete_token()
if delete_complete:
self.target.target_win.destroy()
self.refresh_map()
elif origin == 'in_win':
if select_btn == 'submit':
old_copy = copy.deepcopy(self.root.token_list)
self.log_action('list', old_copy)
submit_complete = self.in_win.submit()
if submit_complete:
self.in_win.range_win.destroy()
self.refresh_map()
def move_token(self):
self.em.move_token(self.map_size)
self.em.btn_set.configure(command=lambda arg=['move_win', 'set']: self.change_token_list(arg))
self.em.btn_remove.configure(command=lambda arg=['move_win', 'remove']: self.change_token_list(arg))
#self.wait_destroy_move_win()
def open_trig(self):
self.calculator.trig_win()
def target_item(self):
self.target.target_window()
self.target.btn_submit.configure(command=lambda arg=['target_win', 'submit']: self.change_token_list(arg))
self.target.btn_delete_target.configure(command=lambda arg=['target_win', 'delete']: self.change_token_list(arg))
#self.target.target_win.protocol("WM_DELETE_WINDOW", lambda stuff=(self.target.token_list): self.refresh_map(tokens=stuff, origWin='target'))
def open_dice_roller(self):
self.dice_roll.dice_pane()
def field_light(self):
try:
self.lighter.escape()
except AttributeError:
pass
self.lighter = GenLightWin(self.root, self.btn_field_light)
self.lighter.open_light_win()
self.lighter.btn_confirm.config(command=self.get_offsets)
def get_offsets(self):
self.light_list, self.light_shape = self.lighter.collect()
self.lighter.escape()
if len(self.light_list) == 0:
return
for sect in range(len(self.map_frames)):
for item in self.map_frames[sect]:
item.bind("<Button-1>", self.on_light)
pieces = item.pack_slaves()
for piece in pieces:
piece.bind("<Button-1>", self.on_light)
def on_light(self, event):
start = list(event.widget.coord)
if self.light_shape == 'Square':
self.map_frames[start[1]][start[0]].config(bg='SpringGreen3')
curr_pos = start
for i in range(len(self.light_list)):
curr_pos[0] += self.light_list[i][0]
curr_pos[1] += self.light_list[i][1]
if curr_pos[0] < self.map_size[1] and curr_pos[1] < self.map_size[0] and curr_pos[0] >= 0 and curr_pos[1] >= 0:
self.map_frames[curr_pos[1]][curr_pos[0]].config(bg='SpringGreen3')
start = list(event.widget.coord)
if self.light_shape == 'Line':
self.map_frames[start[1]][start[0]].config(bg='gray28')
self.root.bind_all("<Escape>", self.clear_light)
self.root.bind_all("<Button-3>", self.clear_light)
def clear_light(self, event):
for sect in range(len(self.map_frames)):
for item in self.map_frames[sect]:
item.config(bg='gray28')
item.unbind("<Button-1>")
pieces = item.pack_slaves()
for piece in pieces:
piece.unbind("<Button-1>")
self.root.unbind_all("<Escape>")
self.root.unbind_all("<Button-3>")
def full_reset(self):
empty_dict = {}
make_sure = messagebox.askokcancel("Warning", "Confirm request to delete ALL tokens and FULL RESET MAP.\nIf confirmed, this action cannot be undone.")
if make_sure:
battle_dict = {
"map_size": self.map_size,
"round": 0,
"turn": 0
}
battleJSON = json.dumps(battle_dict, indent=4)
with ZipFile(self.root.filename, "w") as savefile:
creatJSON = json.dumps(empty_dict)
objJSON = json.dumps(empty_dict)
savefile.writestr('battle_info.json', battleJSON)
savefile.writestr('creatures.json', creatJSON)
savefile.writestr('objects.json', objJSON)
self.refresh_map(reset=True)
self.go_back.clear_all()
def find_quote(self):
last_index = len(self.quoter.quote_list) - 1
rand_index = random.randint(0, last_index)
random_quote = self.quoter.get_quote(rand_index)
self.lbl_quote.config(text=random_quote)
def show_cond_info(self):
self.info.explain_conditions()
def time_travel(self, do_undo):
if do_undo:
hist_action = self.go_back.undo()
if self.go_back.undo_empty():
self.btn_undo['state'] = 'disabled'
self.btn_redo['state'] = 'normal'
else:
hist_action = self.go_back.redo()
if self.go_back.redo_empty():
self.btn_redo['state'] = 'disabled'
self.btn_undo['state'] = 'normal'
action_name = hist_action['origin']
if action_name == 'turn button':
if do_undo:
self.turn -= 1
if self.turn < 0:
self.turn = len(self.initiative_holder) - 1
self.round -= 1
if self.round <= 0:
self.round = 0
self.lbl_round.config(text="S")
else:
self.lbl_round.config(text=self.round)
self.refresh_initiatives()
else:
self.next_turn(False)
elif action_name == 'round button':
if do_undo:
self.round -= 1
if self.round <= 0:
self.round = 0
self.lbl_round.config(text="S")
else:
self.lbl_round.config(text=self.round)
self.turn = hist_action['restore']['turn']
self.refresh_initiatives()
else:
self.next_round(False)
elif action_name == 'reset round':
if do_undo:
self.round = hist_action['restore']['round']
self.turn = hist_action['restore']['turn']
if self.round <= 0:
self.round = 0
self.lbl_round.config(text="S")
else:
self.lbl_round.config(text=self.round)
self.refresh_initiatives()
else:
self.reset_round(False)
elif action_name == 'list':
self.root.token_list = copy.deepcopy(hist_action['restore'])
self.refresh_map()
def undo_move(self):
if len(self.move_path) > 1:
last_move = self.move_path.pop()
else:
return
if self.turn_obj['size'] == 'large':
space_need = 4
elif self.turn_obj['size'] == 'huge':
space_need = 9
elif self.turn_obj['size'] == 'gargantuan':
space_need = 16
else:
space_need = 1
next_row_num = math.sqrt(space_need)
row_offset = 0
col_offset = 0
for i in range(space_need):
self.map_frames[last_move[0] + col_offset][last_move[1] + row_offset].config(bg='gray28')
col_offset += 1
if col_offset == next_row_num:
col_offset = 0
row_offset += 1
feet_moved = int(self.lbl_amount_moved.cget('text'))
feet_moved -= 5
self.lbl_amount_moved.config(text=feet_moved)
if feet_moved > int(self.lbl_max_move.cget('text')):
self.lbl_amount_moved.config(bg='red4')
else:
self.lbl_amount_moved.config(bg='gray28')
new_curr_move = self.move_path[-1]
self.lbl_position.config(text=f"{new_curr_move[0]+1}: {new_curr_move[1]+1}: {new_curr_move[2]}")
row_offset = 0
col_offset = 0
for i in range(space_need):
self.map_frames[new_curr_move[0] + col_offset][new_curr_move[1] + row_offset].config(bg='orange3')
col_offset += 1
if col_offset == next_row_num:
col_offset = 0
row_offset += 1
if self.root.copy_win_open:
self.copy_win.gray_map()
self.copy_win.track_moves(self.move_path)
def log_action(self, origin, restore_data=None):
if self.btn_undo['state'] == 'disabled':
self.btn_undo['state'] = 'normal'
self.go_back.add_undo(origin, restore_data)
if self.go_back.redo_empty() == False:
self.go_back.clear_redo()
self.btn_redo['state'] = 'disabled'
battle = BattleMap(map_win)
if __name__ == '__main__':
map_win.mainloop() | 48.271711 | 159 | 0.59658 | 7,647 | 56,140 | 4.143586 | 0.067085 | 0.015906 | 0.008079 | 0.014139 | 0.570189 | 0.475383 | 0.388058 | 0.30332 | 0.244524 | 0.208988 | 0 | 0.02091 | 0.273352 | 56,140 | 1,163 | 160 | 48.271711 | 0.755822 | 0.008497 | 0 | 0.276555 | 0 | 0 | 0.069406 | 0.011843 | 0 | 0 | 0.00009 | 0 | 0 | 1 | 0.044976 | false | 0.001914 | 0.022967 | 0 | 0.073684 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51033cdbbaedcb29f8ed65dc37e4cdc367f17763 | 1,411 | py | Python | qrtt/technical/rsi.py | leopoldsw/qrtt | 271f23888847f9a0a9a7da360be22c5000b058ab | [
"MIT"
] | null | null | null | qrtt/technical/rsi.py | leopoldsw/qrtt | 271f23888847f9a0a9a7da360be22c5000b058ab | [
"MIT"
] | null | null | null | qrtt/technical/rsi.py | leopoldsw/qrtt | 271f23888847f9a0a9a7da360be22c5000b058ab | [
"MIT"
] | null | null | null | """
RSI CALCULATION
The very first calculations for average gain and average loss are simple n-period averages:
First Average Gain = Sum of Gains over the past n periods / n.
First Average Loss = Sum of Losses over the past n periods / n
The second, and subsequent, calculations are based on the prior averages and the current gain loss:
Average Gain = [(previous Average Gain) x (n-1) + current Gain] / n.
Average Loss = [(previous Average Loss) x (n-1) + current Loss] / n.
RS = Average Gain / Average Loss
RSI = 100 - (100 / (1 + RS))
"""
import numpy as np
def rsi(ohlcv, period=14, ohlcv_series="close"):
_ohlcv = ohlcv[[ohlcv_series]].copy(deep=True)
_ohlcv["diff"] = _ohlcv[ohlcv_series].diff(periods=1)
_ohlcv["diff_up"] = np.where(_ohlcv["diff"] >= 0, _ohlcv["diff"], 0)
_ohlcv["diff_down"] = np.where(_ohlcv["diff"] < 0, _ohlcv["diff"], 0)
# Calculate Average Gain and Average Loss
_ohlcv[["rsi_u", "rsi_d"]] = _ohlcv[["diff_up", "diff_down"]].ewm(alpha=1 / period, min_periods=period).mean()
_ohlcv["rs"] = abs(_ohlcv["rsi_u"]) / abs(_ohlcv["rsi_d"])
indicator_values = 100 - (100 / (1 + _ohlcv["rs"]))
return indicator_values
def ADD_RSIs(ohlcv, periods=[10,20,30], ohlcv_series="close"):
for p in periods:
indicator_name = f'rsi_{p}_{ohlcv_series[0]}'
ohlcv[indicator_name] = rsi(ohlcv, p, ohlcv_series)
return ohlcv
| 34.414634 | 114 | 0.66832 | 215 | 1,411 | 4.213953 | 0.330233 | 0.07947 | 0.04415 | 0.049669 | 0.168874 | 0.103753 | 0.059603 | 0.059603 | 0 | 0 | 0 | 0.02698 | 0.185684 | 1,411 | 41 | 115 | 34.414634 | 0.761532 | 0.421687 | 0 | 0 | 0 | 0 | 0.137376 | 0.030941 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.066667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51040493dd07307ebedc78ed9f4e2afac76edbf4 | 1,394 | py | Python | ext/numdoc.py | kawaken/typescript-guide | e3f6731e370fb834f4f292ff5806556e203c9233 | [
"CC0-1.0"
] | 456 | 2019-06-11T15:48:52.000Z | 2022-03-28T06:44:10.000Z | ext/numdoc.py | kawaken/typescript-guide | e3f6731e370fb834f4f292ff5806556e203c9233 | [
"CC0-1.0"
] | 18 | 2019-06-13T19:33:24.000Z | 2022-02-27T21:31:26.000Z | ext/numdoc.py | kawaken/typescript-guide | e3f6731e370fb834f4f292ff5806556e203c9233 | [
"CC0-1.0"
] | 62 | 2019-06-12T10:11:14.000Z | 2022-03-29T07:31:09.000Z | # coding:utf-8
from docutils import nodes, utils
from sphinx.util.nodes import split_explicit_title
from sphinx import addnodes
from sphinx.writers.latex import LaTeXTranslator
from six import u
def numdoc_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Role for making latex ref to the doc head."""
env = inliner.document.settings.env
text = utils.unescape(text)
has_explicit, title, target = split_explicit_title(text)
pnode = nodes.inline(rawtext, title, classes=['xref','doc'])
pnode['reftarget'] = target
return [pnode], []
def visit_inline(self, node):
# type: (nodes.Node) -> None
classes = node.get('classes', [])
if classes in [['menuselection'], ['guilabel']]:
self.body.append(r'\sphinxmenuselection{')
self.context.append('}')
elif classes in [['accelerator']]:
self.body.append(r'\sphinxaccelerator{')
self.context.append('}')
elif classes in [['xref','doc']] and not self.in_title:
self.body.append(ur'第\DUrole{%s}{' % ','.join(classes))
self.context.append(u'}章')
elif classes and not self.in_title:
self.body.append(r'\DUrole{%s}{' % ','.join(classes))
self.context.append('}')
else:
self.context.append('')
def setup(app):
app.add_role('numdoc', numdoc_role)
LaTeXTranslator.visit_inline = visit_inline
| 33.190476 | 78 | 0.652798 | 177 | 1,394 | 5.067797 | 0.440678 | 0.061316 | 0.09476 | 0.050167 | 0.214047 | 0.214047 | 0.147157 | 0.069119 | 0 | 0 | 0 | 0.000887 | 0.191535 | 1,394 | 41 | 79 | 34 | 0.795031 | 0.027977 | 0 | 0.096774 | 0 | 0 | 0.107444 | 0.016117 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.16129 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
51047009ea571a8e1f957cb448ebd1d72b071fc4 | 2,679 | py | Python | build/lib/bandwidth/voice/bxml/verbs/redirect.py | Spaced-Out/python-bandwidth-sdk | 5332f29d1c093003444384f63a9d4a00843c954f | [
"MIT"
] | 5 | 2020-11-04T14:29:37.000Z | 2022-02-23T20:33:07.000Z | build/lib/bandwidth/voice/bxml/verbs/redirect.py | Spaced-Out/python-bandwidth-sdk | 5332f29d1c093003444384f63a9d4a00843c954f | [
"MIT"
] | 3 | 2021-07-23T18:48:48.000Z | 2022-03-15T14:59:07.000Z | build/lib/bandwidth/voice/bxml/verbs/redirect.py | Spaced-Out/python-bandwidth-sdk | 5332f29d1c093003444384f63a9d4a00843c954f | [
"MIT"
] | 8 | 2020-04-14T09:22:53.000Z | 2022-03-11T10:46:06.000Z | """
redirect.py
Representation of Bandwidth's redirect BXML verb
@copyright Bandwidth INC
"""
from lxml import etree
from .base_verb import AbstractBxmlVerb
REDIRECT_TAG = "Redirect"
class Redirect(AbstractBxmlVerb):
def __init__(self, redirect_url=None, redirect_method=None, tag=None, username=None, password=None,
redirect_fallback_url=None, redirect_fallback_method=None,
fallback_username=None, fallback_password=None):
"""
Initializes the Redirect class with the following parameters
:param str redirect_url: The url to retrieve the next BXML
:param str redirect_method: The HTTP method used to retrieve the next url
:param str tag: Optional tag to include in the callback
:param str username: Username for http authentication on the redirect url
:param str password: Password for http authentication on the redirect url
:param str redirect_fallback_url: URL for fallback events
:param str redirect_fallback_method: HTTP method for fallback events
:param str fallback_username: Basic auth username for fallback events
:param str fallback_password: Basic auth password for fallback events
"""
self.redirect_url = redirect_url
self.redirect_method = redirect_method
self.tag = tag
self.username = username
self.password = password
self.redirect_fallback_url = redirect_fallback_url
self.redirect_fallback_method = redirect_fallback_method
self.fallback_username = fallback_username
self.fallback_password = fallback_password
def to_bxml(self):
root = etree.Element(REDIRECT_TAG)
if self.redirect_url is not None:
root.set("redirectUrl", self.redirect_url)
if self.redirect_method is not None:
root.set("redirectMethod", self.redirect_method)
if self.tag is not None:
root.set("tag", self.tag)
if self.username is not None:
root.set("username", self.username)
if self.password is not None:
root.set("password", self.password)
if self.redirect_fallback_url is not None:
root.set("redirectFallbackUrl", self.redirect_fallback_url)
if self.redirect_fallback_method is not None:
root.set("redirectFallbackMethod", self.redirect_fallback_method)
if self.fallback_username is not None:
root.set("fallbackUsername", self.fallback_username)
if self.fallback_password is not None:
root.set("fallbackPassword", self.fallback_password)
return etree.tostring(root).decode()
| 41.215385 | 103 | 0.695035 | 331 | 2,679 | 5.456193 | 0.193353 | 0.086379 | 0.044851 | 0.064784 | 0.207641 | 0.184939 | 0.049834 | 0.049834 | 0.049834 | 0 | 0 | 0 | 0.240388 | 2,679 | 64 | 104 | 41.859375 | 0.887469 | 0.281075 | 0 | 0 | 0 | 0 | 0.068568 | 0.012068 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0.216216 | 0.054054 | 0 | 0.162162 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
5105155b27194145a1c2533733e5d9490dd7a885 | 132 | py | Python | numba_stream/__init__.py | jackd/numba-stream | 79a12616a4a5b3107d8e9c17dc98cdeb79b2430a | [
"Apache-2.0"
] | null | null | null | numba_stream/__init__.py | jackd/numba-stream | 79a12616a4a5b3107d8e9c17dc98cdeb79b2430a | [
"Apache-2.0"
] | null | null | null | numba_stream/__init__.py | jackd/numba-stream | 79a12616a4a5b3107d8e9c17dc98cdeb79b2430a | [
"Apache-2.0"
] | null | null | null | from . import grid, lif, neighbors, ragged, utils
__all__ = [
"grid",
"utils",
"ragged",
"neighbors",
"lif",
]
| 13.2 | 49 | 0.537879 | 13 | 132 | 5.153846 | 0.615385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.287879 | 132 | 9 | 50 | 14.666667 | 0.712766 | 0 | 0 | 0 | 0 | 0 | 0.204545 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
510589bac3c231ff9dfff00d74d2b2a73850d0b7 | 934 | py | Python | release/stubs.min/Tekla/Structures/ModelInternal_parts/dotTemporaryStatesEnum.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/Tekla/Structures/ModelInternal_parts/dotTemporaryStatesEnum.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/Tekla/Structures/ModelInternal_parts/dotTemporaryStatesEnum.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | class dotTemporaryStatesEnum(Enum):
""" enum dotTemporaryStatesEnum,values: DOT_TEMPORARY_STATE_ACCEPTED (8),DOT_TEMPORARY_STATE_ACTIVE (6),DOT_TEMPORARY_STATE_DELETED (3),DOT_TEMPORARY_STATE_DM_ONGOING (4),DOT_TEMPORARY_STATE_MODIFIED (2),DOT_TEMPORARY_STATE_NEW (1),DOT_TEMPORARY_STATE_ORIGINAL (7),DOT_TEMPORARY_STATE_REJECTED (9),DOT_TEMPORARY_STATE_UNCHANGED (5),DOT_TEMPORARY_STATE_UNKNOWN (0),DOT_TEMPORARY_STATE_USE_EXISTING_REPRESENTATION (10) """
DOT_TEMPORARY_STATE_ACCEPTED = None
DOT_TEMPORARY_STATE_ACTIVE = None
DOT_TEMPORARY_STATE_DELETED = None
DOT_TEMPORARY_STATE_DM_ONGOING = None
DOT_TEMPORARY_STATE_MODIFIED = None
DOT_TEMPORARY_STATE_NEW = None
DOT_TEMPORARY_STATE_ORIGINAL = None
DOT_TEMPORARY_STATE_REJECTED = None
DOT_TEMPORARY_STATE_UNCHANGED = None
DOT_TEMPORARY_STATE_UNKNOWN = None
DOT_TEMPORARY_STATE_USE_EXISTING_REPRESENTATION = None
value__ = None
| 58.375 | 424 | 0.826552 | 124 | 934 | 5.629032 | 0.274194 | 0.378224 | 0.535817 | 0.30086 | 0.194842 | 0.120344 | 0 | 0 | 0 | 0 | 0 | 0.01451 | 0.114561 | 934 | 15 | 425 | 62.266667 | 0.829504 | 0.441113 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 4 |
5106aa713d6626d3d954ada527f0fad7a1c15261 | 1,872 | py | Python | modules/aerodyn/ad_EllipticalWingInf_OLAF/Main_PostPro.py | OpenFAST/openfast-regression | 7892739f47f312ce014711192fd70253ea40c8e8 | [
"Apache-2.0"
] | null | null | null | modules/aerodyn/ad_EllipticalWingInf_OLAF/Main_PostPro.py | OpenFAST/openfast-regression | 7892739f47f312ce014711192fd70253ea40c8e8 | [
"Apache-2.0"
] | null | null | null | modules/aerodyn/ad_EllipticalWingInf_OLAF/Main_PostPro.py | OpenFAST/openfast-regression | 7892739f47f312ce014711192fd70253ea40c8e8 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Local
import weio
import welib.fast.fastlib as fastlib
# --- Reference simulations OmniVor / AWSM
ref20 = weio.read('AnalyticalResults/Elliptic_NumReference20.csv').toDataFrame()
ref40 = weio.read('AnalyticalResults/Elliptic_NumReference40.csv').toDataFrame()
ref80 = weio.read('AnalyticalResults/Elliptic_NumReference80.csv').toDataFrame()
# --- OLAF
# _,sim20 = fastlib.spanwisePostPro('Main_EllipticalWing20.fst',avgMethod='constantwindow',avgParam=0.1,out_ext='.outb')
_,sim40,_,_ = fastlib.spanwisePostPro('Main_EllipticalWingInf_OLAF.dvr',avgMethod='constantwindow',avgParam=0.1,out_ext='.outb')
# _,sim80,_,_ = fastlib.spanwisePostPro('Main_EllipticalWing.fst',avgMethod='constantwindow',avgParam=0.1,out_ext='.outb')
# --- Theory
b = 5
c0 = 1.0
V = [1,0.1]
U0 = np.sqrt(V[0]**2 + V[1]**2)
alpha_rad = np.arctan2(V[1],V[0])
AR = b*b/(np.pi*b*c0/4.)
CL_th = 2.*np.pi*(alpha_rad)/(1.+2./AR);
# --- Plot
fig,ax = plt.subplots(1, 1, sharey=False, figsize=(6.4,4.8)) # (6.4,4.8)
fig.subplots_adjust(left=0.12, right=0.90, top=0.88, bottom=0.11, hspace=0.20, wspace=0.20)
ax.plot([-1,1], [CL_th, CL_th], 'k-', label ='Theory', lw=2)
# ax.plot((ref20['r/R_[-]']-0.5)*2 , ref20['Cl_[-]'] , '-' , label ='n=20')
ax.plot((ref40['r/R_[-]']-0.5)*2 , ref40['Cl_[-]'] , '-' , label ='n=40 (ref)')
# ax.plot((ref80['r/R_[-]']-0.5)*2 , ref80['Cl_[-]'] , '-' , label ='n=80 (ref)')
# ax.plot((sim20['r/R_[-]']-0.5)*2 , sim20['B1Cl_[-]'].values, 'k:', label='OLAF')
ax.plot((sim40['r/R_[-]']-0.5)*2 , sim40['B1Cl_[-]'].values, 'k:')
# ax.plot((sim80['r/R_[-]']-0.5)*2 , sim80['B1Cl_[-]'].values, 'k:')
ax.set_xlabel('y/b [-]')
ax.set_ylabel(r'$C_l$ [-]')
ax.set_ylim([0.47,0.48])
# ax.set_xlim([-1,1])
ax.legend()
ax.tick_params(direction='in')
plt.show()
| 39 | 128 | 0.631944 | 302 | 1,872 | 3.781457 | 0.36755 | 0.036778 | 0.015762 | 0.021016 | 0.144483 | 0.118214 | 0.118214 | 0.118214 | 0.08056 | 0 | 0 | 0.081682 | 0.110577 | 1,872 | 47 | 129 | 39.829787 | 0.604204 | 0.347222 | 0 | 0 | 0 | 0 | 0.209611 | 0.137531 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.185185 | 0 | 0.185185 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
510acc236531ba47fd115389b56d91a1e8269505 | 15,135 | py | Python | venv/lib/python2.7/site-packages/plotnine/data/__init__.py | nuriale207/preprocesspack | cc06a9cb79c5e3b392371fcd8d1ccf7185e71821 | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/plotnine/data/__init__.py | nuriale207/preprocesspack | cc06a9cb79c5e3b392371fcd8d1ccf7185e71821 | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/plotnine/data/__init__.py | nuriale207/preprocesspack | cc06a9cb79c5e3b392371fcd8d1ccf7185e71821 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import pandas as pd
from pandas.api.types import CategoricalDtype
__all__ = ['diamonds', 'economics', 'economics_long',
'midwest', 'mpg', 'msleep', 'presidential',
'seals', 'txhousing', 'luv_colours',
'faithful', 'faithfuld',
# extras for backward compatibility!
'huron', 'meat', 'mtcars', 'pageviews']
__all__ = [str(u) for u in __all__]
_ROOT = os.path.abspath(os.path.dirname(__file__))
mtcars = pd.read_csv(os.path.join(_ROOT, 'mtcars.csv'))
meat = pd.read_csv(os.path.join(_ROOT, 'meat.csv'), parse_dates=[0])
pageviews = pd.read_csv(os.path.join(_ROOT, 'pageviews.csv'), parse_dates=[0])
huron = pd.read_csv(os.path.join(_ROOT, 'huron.csv'))
seals = pd.read_csv(os.path.join(_ROOT, 'seals.csv'))
economics = pd.read_csv(os.path.join(_ROOT, 'economics.csv'), parse_dates=[0])
economics_long = pd.read_csv(os.path.join(_ROOT, 'economics_long.csv'),
parse_dates=[0])
presidential = pd.read_csv(os.path.join(_ROOT, 'presidential.csv'),
parse_dates=[1, 2])
txhousing = pd.read_csv(os.path.join(_ROOT, 'txhousing.csv'))
luv_colours = pd.read_csv(os.path.join(_ROOT, 'luv_colours.csv'))
faithfuld = pd.read_csv(os.path.join(_ROOT, 'faithfuld.csv'))
faithful = pd.read_csv(os.path.join(_ROOT, 'faithful.csv'))
# add factors
def _ordered_categories(df, categories):
"""
Make the columns in df categorical
Parameters:
-----------
categories: dict
Of the form {str: list},
where the key the column name and the value is
the ordered category list
"""
for col, cats in categories.items():
df[col] = df[col].astype(CategoricalDtype(cats, ordered=True))
return df
def _unordered_categories(df, columns):
"""Make the columns in df categorical"""
for col in columns:
df[col] = df[col].astype(CategoricalDtype(ordered=False))
return df
diamonds = pd.read_csv(os.path.join(_ROOT, 'diamonds.csv'))
categories = {
'cut': ['Fair', 'Good', 'Very Good', 'Premium', 'Ideal'],
'clarity': ['I1', 'SI2', 'SI1', 'VS2', 'VS1', 'VVS2', 'VVS1', 'IF'],
'color': ['D', 'E', 'F', 'G', 'H', 'I', 'J']}
diamonds = _ordered_categories(diamonds, categories)
midwest = pd.read_csv(os.path.join(_ROOT, 'midwest.csv'))
midwest = _unordered_categories(midwest, ['category'])
mpg = pd.read_csv(os.path.join(_ROOT, 'mpg.csv'))
columns = ['manufacturer', 'model', 'trans', 'fl', 'drv', 'class']
mpg = _unordered_categories(mpg, columns)
msleep = pd.read_csv(os.path.join(_ROOT, 'msleep.csv'))
msleep = _unordered_categories(msleep, ['vore', 'conservation'])
# Documentation
mtcars.__doc__ = """
Motor Trend Car Road Tests
.. rubric:: Description
The data was extracted from the 1974 *Motor Trend* US magazine,
and comprises fuel consumption and 10 aspects of automobile
design and performance for 32 automobiles (1973–74 models).
.. rubric:: Format
A data frame with 32 observations on 11 variables.
====== =========================================
Column Description
====== =========================================
mpg Miles/(US) gallon
cyl Number of cylinders
disp Displacement (cu.in.)
hp Gross horsepower
drat Rear axle ratio
wt Weight (1000 lbs)
qsec 1/4 mile time
vs V/S
am Transmission (0 = automatic, 1 = manual)
gear Number of forward gears
carb Number of carburetors
====== =========================================
.. rubric:: Source
Henderson and Velleman (1981), Building multiple regression
models interactively. *Biometrics*, **37**, 391–411.
"""
meat.__doc__ = """
"""
pageviews.__doc__ = """
"""
huron.__doc__ = """
Level of Lake Huron 1875–1972
.. rubric:: Description
Annual measurements of the level, in feet, of Lake Huron 1875–1972.
.. rubric:: Format
========= ==============
Column Description
========= ==============
year Year
level Water level
decade Decade
========= ==============
.. rubric:: Source
Brockwell, P. J. and Davis, R. A. (1991). Time Series and Forecasting Methods.
Second edition. Springer, New York. Series A, page 555.
Brockwell, P. J. and Davis, R. A. (1996). Introduction to Time Series and
Forecasting. Springer, New York. Sections 5.1 and 7.6.
"""
seals.__doc__ = """
Vector field of seal movements.
.. rubric:: Description
This vector field was produced from the data described in Brillinger,
D.R., Preisler, H.K., Ager, A.A. and Kie, J.G.
"An exploratory data analysis (EDA) of the paths of moving animals". J.
Statistical Planning and Inference 122 (2004), 43-63, using the methods
of Brillinger, D.R., "Learning a potential function from a trajectory",
Signal Processing Letters. December (2007).
.. rubric:: Format
A data frame with 1155 rows and 4 variables
=========== ===================
Column Description
=========== ===================
lat Latitude
long Longitude
delta_long Change in Longitude
delta_lat Change in Latitude
=========== ===================
.. rubric:: References
http://www.stat.berkeley.edu/~brill/Papers/jspifinal.pdf
"""
economics.__doc__ = """
US economic time series.
.. rubric:: Description
This dataset was produced from US economic time series data available
from http://research.stlouisfed.org/fred2.
`economics` is in "wide" format, `economics_long` is in "long" format.
.. rubric:: Format
A data frame with 478 rows and 6 variables
========= ==========================================================
Column Description
========= ==========================================================
date Month of data collection
psavert personal savings rate [1_]
pce personal consumption expenditures, in billions of dollars [2_]
unemploy number of unemployed in thousands, [3_]
uempmed median duration of unemployment, in week [4_]
pop total population, in thousands [5_]
========= ==========================================================
.. _1: http://research.stlouisfed.org/fred2/series/PSAVERT/
.. _2: http://research.stlouisfed.org/fred2/series/PCE
.. _3: http://research.stlouisfed.org/fred2/series/UNEMPLOY
.. _4: http://research.stlouisfed.org/fred2/series/UEMPMED
.. _5: http://research.stlouisfed.org/fred2/series/POP
"""
economics_long.__doc__ = economics.__doc__
presidential.__doc__ = """
Terms of 11 presidents from Eisenhower to Obama.
.. rubric:: Description
The names of each president, the start and end date
of their term, and their party of 11 US presidents
from Eisenhower to Obama.
========== ===========================
Column Description
========== ===========================
name Name of president
start Start of presidential term
end End of presidential term
party Political Party
========== ===========================
.. rubric:: Format
A data frame with 11 rows and 4 variables
"""
txhousing.__doc__ = """
Housing sales in TX.
.. rubric:: Description
Information about the housing market in Texas provided
by the TAMU real estate center, http://recenter.tamu.edu/.
.. rubric:: Format
A data frame with 8602 observations and 9 variables:
========= ===============================================
Column Description
========= ===============================================
city Name of MLS area
year Year
month Month
sales Number of sales
volume Total value of sales
median Median sale price
listings Total active listings
inventory "Months inventory": amount of time it would \n
take to sell all current listings at current \n
pace of sales.
date Date
========= ===============================================
"""
luv_colours.__doc__ = """
colors in Luv space.
.. rubric:: Description
Named colors translated into Luv colour space.
luv_colours
.. rubric:: Format
A data frame with 657 observations and 4 variables:
====== ============================
Column Description
====== ============================
L L position in Luv colour space
u u position in Luv colour space
v v position in Luv colour space
col Colour name
====== ============================
"""
faithful.__doc__ = """
Old Faithful Geyser Data
.. rubric:: Description
Waiting time between eruptions and the duration of the
eruption for the Old Faithful geyser in Yellowstone
National Park, Wyoming, USA.
.. rubric:: Format
A data frame with 272 observations on 2 variables.
========== ========================================
Column Description
========== ========================================
eruptions Eruption time in mins
waiting Waiting time to next eruption (in mins)
========== ========================================
.. rubric:: Details
A closer look at `faithful.eruptions` reveals that these are
heavily rounded times originally in seconds, where multiples
of 5 are more frequent than expected under non-human measurement.
For a better version of the eruption times, see the example below.
There are many versions of this dataset around:
Azzalini and Bowman (1990) use a more complete version.
.. rubric:: Source
W. Härdle.
.. rubric:: References
Härdle, W. (1991) *Smoothing Techniques with Implementation in S*.
New York: Springer.
Azzalini, A. and Bowman, A. W. (1990). A look at some data
on the Old Faithful geyser. **Applied Statistics** *39*, 357–365.
"""
faithfuld.__doc__ = """
Old Faithful Geyser Data
.. rubric:: Description
Waiting time between eruptions and the duration of the
eruption for the Old Faithful geyser in Yellowstone
National Park, Wyoming, USA.
.. rubric:: Format
A data frame with *grid data* for 272 observations on 2
variables and the density at those locations.
========== ========================================
Column Description
========== ========================================
eruptions Eruption time in mins
waiting Waiting time to next eruption (in mins)
density Density Estimate
========== ========================================
.. rubric:: Details
A closer look at `faithful.eruptions` reveals that these are
heavily rounded times originally in seconds, where multiples
of 5 are more frequent than expected under non-human measurement.
For a better version of the eruption times, see the example below.
There are many versions of this dataset around:
Azzalini and Bowman (1990) use a more complete version.
.. rubric:: Source
W. Härdle.
.. rubric:: References
Härdle, W. (1991) *Smoothing Techniques with Implementation in S*.
New York: Springer.
Azzalini, A. and Bowman, A. W. (1990). A look at some data
on the Old Faithful geyser. **Applied Statistics** *39*, 357–365.
"""
diamonds.__doc__ = """
Prices of 50,000 round cut diamonds
.. rubric:: Description
A dataset containing the prices and other attributes
of almost 54,000 diamonds. The variables are as follows:
.. rubric:: Format
A data frame with 53940 rows and 10 variables:
======== ==================================
Column Description
======== ==================================
price price in US dollars ($326–$18,823)
carat weight of the diamond (0.2–5.01)
cut quality of the cut (Fair, Good, Very Good, Premium, Ideal)
color diamond colour, from J (worst) to D (best)
clarity a measurement of how clear the diamond is \n
(I1 (worst), SI1, SI2, VS1, VS2, VVS1, VVS2, IF (best))
x length in mm (0–10.74)
y width in mm (0–58.9)
z depth in mm (0–31.8)
depth total depth percentage = z / mean(x, y) = 2 * z / (x + y) (43–79)
table width of top of diamond relative to widest point (43–95)
======== ==================================
"""
midwest.__doc__ = """
Midwest demographics.
.. rubric:: Description
Demographic information of midwest counties
.. rubric:: Format
A data frame with 437 rows and 28 variables
===================== ============================
Column Description
===================== ============================
PID
county
state
area
poptotal Total population
popdensity Population density
popwhite Number of whites
popblack Number of blacks
popamerindian Number of American Indians
popasian Number of Asians
popother Number of other races
percwhite Percent white
percblack Percent black
percamerindan Percent American Indian
percasian Percent Asian
percother Percent other races
popadults Number of adults
perchsd
percollege Percent college educated
percprof Percent profession
poppovertyknown
percpovertyknown
percbelowpoverty
percchildbelowpovert
percadultpoverty
percelderlypoverty
inmetro In a metro area
category
===================== ============================
"""
mpg.__doc__ = """
Fuel economy data from 1999 and 2008 for 38 popular models of car
.. rubric:: Description
This dataset contains a subset of the fuel economy data that
the EPA makes available on http://fueleconomy.gov.
It contains only models which had a new release every year
between 1999 and 2008 - this was used as a proxy for
the popularity of the car.
.. rubric:: Format
A data frame with 234 rows and 11 variables
============ ====================================================
Column Description
============ ====================================================
manufacturer
model
displ engine displacement, in litres
year
cyl number of cylinders
trans type of transmission
drv f = front-wheel drive, r = rear wheel drive, 4 = 4wd
cty city miles per gallon
hwy highway miles per gallon
fl
class
============ ====================================================
"""
msleep.__doc__ = """
An updated and expanded version of the mammals sleep dataset.
.. rubric:: Description
This is an updated and expanded version of the mammals
sleep dataset. Updated sleep times and weights were taken
from V. M. Savage and G. B. West. A quantitative, theoretical
framework for understanding mammalian sleep. Proceedings of
the National Academy of Sciences, 104 (3):1051-1056, 2007.
.. rubric:: Format
A data frame with 83 rows and 11 variables
============= =====================================
Column Description
============= =====================================
name common name
genus
vore carnivore, omnivore or herbivore?
order
conservation the conservation status of the animal
sleep_total total amount of sleep, in hours
sleep_rem rem sleep, in hours
sleep_cycle length of sleep cycle, in hours
awake amount of time spent awake, in hours
brainwt brain weight in kilograms
bodywt body weight in kilograms
============= =====================================
.. rubric:: Details
Additional variables order, conservation status and
vore were added from wikipedia.
"""
| 29.331395 | 78 | 0.611034 | 1,856 | 15,135 | 4.900862 | 0.324892 | 0.011873 | 0.015831 | 0.019349 | 0.337291 | 0.306728 | 0.242964 | 0.183817 | 0.176781 | 0.176781 | 0 | 0.024362 | 0.191807 | 15,135 | 515 | 79 | 29.38835 | 0.718198 | 0.020152 | 0 | 0.376 | 0 | 0.013333 | 0.838249 | 0.091936 | 0 | 0 | 0 | 0 | 0 | 1 | 0.005333 | false | 0 | 0.010667 | 0 | 0.021333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
510be31bb9c422b20d34df7cba9b6a88cf0c6ddf | 235 | py | Python | accounts/forms.py | medfiras/Bazinga | 2f77b70a3fe627410ddf0a5be0f074de5e0dccdd | [
"Apache-2.0"
] | null | null | null | accounts/forms.py | medfiras/Bazinga | 2f77b70a3fe627410ddf0a5be0f074de5e0dccdd | [
"Apache-2.0"
] | 1 | 2015-05-31T10:42:36.000Z | 2015-11-03T17:52:06.000Z | accounts/forms.py | medfiras/Bazinga | 2f77b70a3fe627410ddf0a5be0f074de5e0dccdd | [
"Apache-2.0"
] | null | null | null | from userena.forms import EditProfileForm
from userena import views as userena_views
class CustomEditProfileForm(userena_views.EditProfileForm):
class Meta(EditProfileForm.Meta):
exclude = EditProfileForm.Meta.exclude + ['privacy'] | 39.166667 | 59 | 0.834043 | 26 | 235 | 7.461538 | 0.461538 | 0.113402 | 0.268041 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.093617 | 235 | 6 | 60 | 39.166667 | 0.910798 | 0 | 0 | 0 | 0 | 0 | 0.029661 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.4 | 0 | 0.8 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 5 |
510ca732ab0febae2e491cf6527efb03bb28960e | 2,400 | py | Python | Pension & Commutation Calculator/SignUp.py | Abhijit-007a/Pension_And_Commutation_Calculator | 787467af25286fa26f50a2c025711992eb445c8f | [
"Apache-2.0"
] | 1 | 2021-09-19T11:47:44.000Z | 2021-09-19T11:47:44.000Z | Pension & Commutation Calculator/SignUp.py | Abhijit-007a/Pension_And_Commutation_Calculator | 787467af25286fa26f50a2c025711992eb445c8f | [
"Apache-2.0"
] | null | null | null | Pension & Commutation Calculator/SignUp.py | Abhijit-007a/Pension_And_Commutation_Calculator | 787467af25286fa26f50a2c025711992eb445c8f | [
"Apache-2.0"
] | null | null | null | #importing all the tkinter modules.
from tkinter import *
from tkinter import messagebox
from PIL import Image, ImageTk
import mysql.connector as sql
win = Tk()
#defining basic geometry and characteristics of the window.
win.geometry("1000x700")
image = Image.open('SignUpImage.png')
photo_image = ImageTk.PhotoImage(image)
label = Label(win, image = photo_image)
label.pack()
#defining the title of the window
win.title("Pension And Commutation Calculator -- SignUp")
#function login that checks the criteria, username and password with the database.
def login():
db = sql.connect(host="localhost", user="root", passwd="Password@123")
cur = db.cursor()
try:
cur.execute("create database loginbase")
db = sql.connect(host="localhost", user="root", passwd="Password@123", database="loginbase")
cur = db.cursor()
except sql.errors.DatabaseError:
db = sql.connect(host="localhost", user="root", passwd="Password@123", database="loginbase")
cur = db.cursor()
try:
cur.execute("create table main(username varchar(50), NOT NULL, password int NOT NULL)")
except sql.errors.ProgrammingError:
pass
finally:
try:
cur.execute("create table main(username varchar(50) NOT NULL, "
"password int NOT NULL)")
except sql.errors.ProgrammingError:
pass
while True:
user = user1.get()
passwd = passwd1.get()
cur.execute("insert into main values('{}', {})".format(str(user), passwd))
db.commit()
messagebox.showinfo("Status", "Account created! Now Please Login")
break
cur.close()
db.close()
#setting the lable and entry box characteristics.
userlvl = Label(win, text="Username :")
passwdlvl = Label(win, text="PIN :")
user1 = Entry(win, textvariable=StringVar(),width=25)
passwd1 = Entry(win, textvariable=IntVar().set(""),width= 25)
enter = Button(win, text="SignUp",width= 9,command=lambda: login(), bd=0)
enter.configure(bg="black",fg= "white")
#using place function to put the lable and entry into right places.
user1.place(x=750, y=450)
passwd1.place(x=750, y=490)
userlvl.place(x=650, y=450)
passwdlvl.place(x=650, y=490)
enter.place(x=890, y=540)
win.mainloop() | 25.806452 | 101 | 0.635833 | 301 | 2,400 | 5.063123 | 0.445183 | 0.019685 | 0.023622 | 0.031496 | 0.292651 | 0.292651 | 0.292651 | 0.272966 | 0.272966 | 0.272966 | 0 | 0.033843 | 0.236667 | 2,400 | 93 | 102 | 25.806452 | 0.798035 | 0.132917 | 0 | 0.230769 | 0 | 0 | 0.217742 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019231 | false | 0.25 | 0.076923 | 0 | 0.096154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
510ef8a7510ae1de9af9073513790cd07023ac0b | 26,324 | py | Python | wikidata_research/dictionary/dictionary_evaluation.py | sjuenger/WikiMETA | 13ed293b4bda8ff0fc10b532907ca35c24a12616 | [
"MIT"
] | null | null | null | wikidata_research/dictionary/dictionary_evaluation.py | sjuenger/WikiMETA | 13ed293b4bda8ff0fc10b532907ca35c24a12616 | [
"MIT"
] | null | null | null | wikidata_research/dictionary/dictionary_evaluation.py | sjuenger/WikiMETA | 13ed293b4bda8ff0fc10b532907ca35c24a12616 | [
"MIT"
] | null | null | null | # module to evaluate the property dictionary
import json
# global variable for the path to the dictionary
path_to_json_dictionary = "data/property_dictionary.json"
# overload method
#
# recommended == true
# query only those references or qualifiers, that are intended by Wikidata
# .. for References: these are properties, which are a facet of "Wikipedia:Citing sources"
# .. for Qualifiers: these are properties, which are a facet of "restrictive qualifier"
# ,"non-restrictive qualifier"
# ,"Wikidata property used as \"depicts\" (P180) qualifier on Commons"
# ,"Wikidata qualifier"
#
# recommended == false
# query only those references or qualifiers, that are NOT intended by Wikidata
# i.e., who do not fulfil the above mentioned requirements
# BUT are min. 1x times used as a reference / qualifier in Wikidata
#
# recommended == None
# query every property available to the mode
def get_top_x_metadata(x, mode, recommended = None):
with open(path_to_json_dictionary, "r") as dict_data:
property_dictionary = json.load(dict_data)
result_dictionary = {}
result_dictionary["properties"] = {}
result_dictionary["total_usages_of_" + mode] = 0
result_dictionary["total_unique_properties"] = 0
for PID in property_dictionary:
# check, if the property is /is not a recommended reference/qualifier by Wikidata
recommended_bool = False
if recommended == True:
if mode == "reference":
recommended_bool = bool(property_dictionary[PID]["is_reference"])
elif mode == "qualifier":
recommended_bool = property_dictionary[PID]["qualifier_class"] != []
else:
recommended_bool = False
elif recommended == False:
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
if mode == "reference" and int(property_dictionary[PID][mode + "_no"]) > 0\
and not bool(property_dictionary[PID]["is_reference"]):
recommended_bool = True
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
elif mode == "qualifier" and int(property_dictionary[PID][mode + "_no"]) > 0\
and property_dictionary[PID]["qualifier_class"] == []:
recommended_bool = True
else:
recommended_bool = False
elif recommended is None:
# just exclude those, who either aren't a recommended qualifier/reference property
# .. and are never used as a reference / qualifier
if (mode == "reference" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or bool(property_dictionary[PID]["is_reference"]))):
recommended_bool = True
elif (mode == "qualifier" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or property_dictionary[PID]["qualifier_class"] != [])):
recommended_bool = True
else:
recommended_bool = False
if recommended_bool:
result_dictionary["total_usages_of_" + mode] += \
int(property_dictionary[PID][mode + "_no"])
result_dictionary["total_unique_properties"] += 1
# check, if the current property is smaller than any property in the result dictionary and swap them
# or, if the result dictionary has not yet got 'X' entries, just add the property
if len(result_dictionary["properties"]) < x:
result_dictionary["properties"][PID] = property_dictionary[PID]
else:
# no need to check for (non-) recommended properties here (only (non-) recommended properties
# can be added to this dictionary)
for result_PID in result_dictionary["properties"]:
if PID != result_PID \
and (int(property_dictionary[PID][mode + "_no"]) >
int(result_dictionary["properties"][result_PID][mode + "_no"])):
# swap with the smallest in the result property
smallest_PID = ""
for test_PID in result_dictionary["properties"]:
if smallest_PID == "" or \
int(result_dictionary["properties"][test_PID][mode + "_no"]) \
< int(result_dictionary["properties"][smallest_PID][mode + "_no"]):
smallest_PID = test_PID
result_dictionary["properties"].pop(smallest_PID)
result_dictionary["properties"][PID] = property_dictionary[PID]
break
# once all the top x entries are created, store them in a .json file
if recommended:
tmp_string = "recommended"
elif recommended is not None:
tmp_string = "non_recommended"
else:
tmp_string = "all"
with open("data/statistical_information/wikidata_research/" + mode + "/" + tmp_string +
"/properties/top_" + str(x) + ".json", "w") \
as result_json:
json.dump(result_dictionary, result_json)
# query the top facets (properties have) from qualifier / reference
# .. of recommended / non-recommended / overall properties
#
# if a property has no facet -> count it "as" itself
#
def get_top_x_facets_by_metadata(x, mode, recommended = None):
with open(path_to_json_dictionary, "r") as dict_data:
property_dictionary = json.load(dict_data)
facets_dictionary = {}
facets_dictionary["facets"] = {}
# add a counter for the total amount of facets and properties
facets_dictionary["total_facets"] = 0
facets_dictionary["total_properties_without_facets"] = 0
facets_dictionary["total_properties"] = 0
for PID in property_dictionary:
# check, if the property is /is not a recommended reference/qualifier by Wikidata
recommended_bool = True
if recommended == True:
if mode == "reference":
recommended_bool = bool(property_dictionary[PID]["is_reference"])
elif mode == "qualifier":
recommended_bool = property_dictionary[PID]["qualifier_class"] != []
else:
recommended_bool = False
elif recommended == False:
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
if mode == "reference" and int(property_dictionary[PID][mode + "_no"]) > 0\
and not bool(property_dictionary[PID]["is_reference"]):
recommended_bool = True
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
elif mode == "qualifier" and int(property_dictionary[PID][mode + "_no"]) > 0\
and property_dictionary[PID]["qualifier_class"] == []:
recommended_bool = True
else:
recommended_bool = False
elif recommended is None:
# just exclude those, who either aren't a recommended qualifier/reference property
# .. and are never used as a reference / qualifier
if mode == "reference" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or bool(property_dictionary[PID]["is_reference"])):
recommended_bool = True
elif mode == "qualifier" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or property_dictionary[PID]["qualifier_class"] != []):
recommended_bool = True
else:
recommended_bool = False
if recommended_bool:
facets_dictionary["total_properties"] += 1
current_facet_list = property_dictionary[PID]["facet_of"]
# if no facet can be found for the property
# -> count the property with its ID
if len(current_facet_list) == 0:
facets_dictionary["total_properties_without_facets"] += 1
facets_dictionary["facets"][PID] = 1
for facet in current_facet_list:
facets_dictionary["total_facets"] += 1
# add the facet as keys to a dictionary, if it wasn't added before
if facet not in facets_dictionary["facets"]:
facets_dictionary["facets"][facet] = 1
else:
facets_dictionary["facets"][facet] += 1
# store the facet dictionary
if recommended:
tmp_string = "recommended"
elif recommended is not None:
tmp_string = "non_recommended"
else:
tmp_string = "all"
with open("data/statistical_information/wikidata_research/" + mode + "/" + tmp_string +
"/facets/facets.json", "w") \
as result_json:
json.dump(facets_dictionary, result_json)
# extract the top x facets by usages
result_facets_dictionary = {"facets" : {}}
result_facets_dictionary["total_facets"] = facets_dictionary["total_facets"]
result_facets_dictionary["total_properties"] = facets_dictionary["total_properties"]
result_facets_dictionary["total_properties_without_facet"] =\
facets_dictionary["total_properties_without_facets"]
result_facets_dictionary["total_unique_facets"] = len(facets_dictionary["facets"])
for facet in facets_dictionary["facets"]:
if len(result_facets_dictionary["facets"]) < x:
result_facets_dictionary["facets"][facet] = facets_dictionary["facets"][facet]
else:
# swap with the smallest in the result list -> it is greater than that
smallest_ID = ""
for facet_ID in result_facets_dictionary["facets"]:
if smallest_ID == "" or \
int(result_facets_dictionary["facets"][facet_ID]) \
< int(result_facets_dictionary["facets"][smallest_ID]):
smallest_ID = facet_ID
if facets_dictionary["facets"][facet] > facets_dictionary["facets"][smallest_ID]:
result_facets_dictionary["facets"].pop(smallest_ID)
result_facets_dictionary["facets"][facet] = facets_dictionary["facets"][facet]
# once all the top x entries are creaed, store them in a .json file
if recommended:
tmp_string = "recommended"
elif recommended is not None:
tmp_string = "non_recommended"
else:
tmp_string = "all"
with open("data/statistical_information/wikidata_research/" + mode + "/" + tmp_string +
"/facets/top_" + str(x) + ".json", "w") \
as result_json:
json.dump(result_facets_dictionary, result_json)
# get the used datatypes for every metadata
# -> a datatype can e.g. be String, WikibaseItem, etc.
#
def get_datatypes_by_metadata(mode, recommended = None):
with open(path_to_json_dictionary, "r") as dict_data:
property_dictionary = json.load(dict_data)
datatypes_dictionary = {}
datatypes_dictionary["datatypes"] = {}
# add a counter for the total amount of datatypes and properties
datatypes_dictionary["total_properties"] = 0
for PID in property_dictionary:
# check, if the property is /is not a recommended reference/qualifier by Wikidata
recommended_bool = True
if recommended == True:
if mode == "reference":
recommended_bool = bool(property_dictionary[PID]["is_reference"])
elif mode == "qualifier":
recommended_bool = property_dictionary[PID]["qualifier_class"] != []
else:
recommended_bool = False
elif recommended == False:
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
if mode == "reference" and int(property_dictionary[PID][mode + "_no"]) > 0\
and not bool(property_dictionary[PID]["is_reference"]):
recommended_bool = True
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
elif mode == "qualifier" and int(property_dictionary[PID][mode + "_no"]) > 0\
and property_dictionary[PID]["qualifier_class"] == []:
recommended_bool = True
else:
recommended_bool = False
elif recommended is None:
# just exclude those, who either aren't a recommended qualifier/reference property
# .. and are never used as a reference / qualifier
if mode == "reference" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or bool(property_dictionary[PID]["is_reference"])):
recommended_bool = True
elif mode == "qualifier" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or property_dictionary[PID]["qualifier_class"] != []):
recommended_bool = True
else:
recommended_bool = False
if recommended_bool:
datatypes_dictionary["total_properties"] += 1
current_datatype = property_dictionary[PID]["datatype"]
# add the datatype as a key to the dictionary, if it wasn't added before
if current_datatype not in datatypes_dictionary["datatypes"]:
datatypes_dictionary["datatypes"][current_datatype] = 1
else:
datatypes_dictionary["datatypes"][current_datatype] += 1
datatypes_dictionary["total_unique_datatypes"] = len(datatypes_dictionary["datatypes"])
# once all the top x entries are creaed, store them in a .json file
if recommended:
tmp_string = "recommended"
elif recommended is not None:
tmp_string = "non_recommended"
else:
tmp_string = "all"
with open("data/statistical_information/wikidata_research/" + mode + "/" + tmp_string +
"/datatypes/datatypes.json", "w") \
as result_json:
json.dump(datatypes_dictionary, result_json)
# get the accumulated facets by occurences of a (recommended) property in Wikidata
# so, e.g. if "Series Ordinal" occures as a reference 5Miox times in Wikidata, count all of his facets 5Miox times
#
# if a property has no facet -> count it "as" itself
#
def get_top_x_facets_by_accumulated_properties(x, mode, recommended = None):
with open(path_to_json_dictionary, "r") as dict_data:
property_dictionary = json.load(dict_data)
facets_dictionary = {}
facets_dictionary["facets"] = {}
# add a counter for the total amount of facets and properties
facets_dictionary["total_accumulated_facets"] = 0
facets_dictionary["total_accumulated_properties_without_facets"] = 0
facets_dictionary["total_accumulated_properties"] = 0
for PID in property_dictionary:
# check, if the property is /is not a recommended reference/qualifier by Wikidata
recommended_bool = True
if recommended == True:
if mode == "reference":
recommended_bool = bool(property_dictionary[PID]["is_reference"])
elif mode == "qualifier":
recommended_bool = property_dictionary[PID]["qualifier_class"] != []
else:
recommended_bool = False
elif recommended == False:
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
if mode == "reference" and int(property_dictionary[PID][mode + "_no"]) > 0\
and not bool(property_dictionary[PID]["is_reference"]):
recommended_bool = True
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
elif mode == "qualifier" and int(property_dictionary[PID][mode + "_no"]) > 0\
and property_dictionary[PID]["qualifier_class"] == []:
recommended_bool = True
else:
recommended_bool = False
elif recommended is None:
# just exclude those, who either aren't a recommended qualifier/reference property
# .. and are never used as a reference / qualifier
if mode == "reference" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or bool(property_dictionary[PID]["is_reference"])):
recommended_bool = True
elif mode == "qualifier" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or property_dictionary[PID]["qualifier_class"] != []):
recommended_bool = True
else:
recommended_bool = False
if recommended_bool:
facets_dictionary["total_accumulated_properties"] += int(property_dictionary[PID][mode + "_no"])
current_facet_list = property_dictionary[PID]["facet_of"]
facets_dictionary["total_accumulated_facets"] += \
len(current_facet_list) * int(property_dictionary[PID][mode + "_no"])
# if no facet can be found for the property
# -> count the property with its ID
if len(current_facet_list) == 0:
facets_dictionary["total_accumulated_properties_without_facets"] += \
int(property_dictionary[PID][mode + "_no"])
facets_dictionary["facets"][PID] = int(property_dictionary[PID][mode + "_no"])
for facet in current_facet_list:
# add the facet as keys to a dictionary, if it wasn't added before
if facet not in facets_dictionary["facets"]:
facets_dictionary["facets"][facet] = int(property_dictionary[PID][mode + "_no"])
else:
facets_dictionary["facets"][facet] += int(property_dictionary[PID][mode + "_no"])
# extract the top x facets by usages
result_facets_dictionary = {"facets": {}}
result_facets_dictionary["total_accumulated_facets"] = facets_dictionary["total_accumulated_facets"]
result_facets_dictionary["total_accumulated_properties"] = facets_dictionary["total_accumulated_properties"]
result_facets_dictionary["total_accumulated_properties_without_facets"] =\
facets_dictionary["total_accumulated_properties_without_facets"]
# store the dictionar< of accumulated facets
if recommended:
tmp_string = "recommended"
elif recommended is not None:
tmp_string = "non_recommended"
else:
tmp_string = "all"
with open("data/statistical_information/wikidata_research/" + mode + "/" + tmp_string +
"/accumulated_facets/accumulated_facets.json", "w") \
as result_json:
json.dump(facets_dictionary, result_json)
for facet in facets_dictionary["facets"]:
if len(result_facets_dictionary["facets"]) < x:
result_facets_dictionary["facets"][facet] = facets_dictionary["facets"][facet]
else:
# swap with the smallest in the result list -> it is greater than that
smallest_ID = ""
for facet_ID in result_facets_dictionary["facets"]:
if smallest_ID == "" or \
int(result_facets_dictionary["facets"][facet_ID]) \
< int(result_facets_dictionary["facets"][smallest_ID]):
smallest_ID = facet_ID
if facets_dictionary["facets"][facet] > facets_dictionary["facets"][smallest_ID]:
result_facets_dictionary["facets"].pop(smallest_ID)
result_facets_dictionary["facets"][facet] = facets_dictionary["facets"][facet]
# once all the top x entries are creaed, store them in a .json file
if recommended:
tmp_string = "recommended"
elif recommended is not None:
tmp_string = "non_recommended"
else:
tmp_string = "all"
with open("data/statistical_information/wikidata_research/" + mode + "/" + tmp_string +
"/accumulated_facets/top_" + str(x) + ".json", "w") \
as result_json:
json.dump(result_facets_dictionary, result_json)
# get the accumulated datatypes by occurences of a (recommended) property in Wikidata
# so, e.g. if "Series Ordinal" occures as a reference 5Miox times in Wikidata, count his datatype 5Miox times
def get_datatypes_by_accumulated_properties(mode, recommended = None):
with open(path_to_json_dictionary, "r") as dict_data:
property_dictionary = json.load(dict_data)
datatypes_dictionary = {}
datatypes_dictionary["datatypes"] = {}
# add a counter for the total amount of datatypes and properties
datatypes_dictionary["total_properties"] = 0
datatypes_dictionary["total_accumulated_datatypes"] = 0
for PID in property_dictionary:
# check, if the property is /is not a recommended reference/qualifier by Wikidata
recommended_bool = True
if recommended == True:
if mode == "reference":
recommended_bool = bool(property_dictionary[PID]["is_reference"])
elif mode == "qualifier":
recommended_bool = property_dictionary[PID]["qualifier_class"] != []
else:
recommended_bool = False
elif recommended == False:
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
if mode == "reference" and int(property_dictionary[PID][mode + "_no"]) > 0\
and not bool(property_dictionary[PID]["is_reference"]):
recommended_bool = True
# --> but they are min. 1x times used as a reference/qualifier, but not recommended
elif mode == "qualifier" and int(property_dictionary[PID][mode + "_no"]) > 0\
and property_dictionary[PID]["qualifier_class"] == []:
recommended_bool = True
else:
recommended_bool = False
elif recommended is None:
# just exclude those, who either aren't a recommended qualifier/reference property
# .. and are never used as a reference / qualifier
if mode == "reference" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or bool(property_dictionary[PID]["is_reference"])):
recommended_bool = True
elif mode == "qualifier" and (int(property_dictionary[PID][mode + "_no"]) > 0\
or property_dictionary[PID]["qualifier_class"] != []):
recommended_bool = True
else:
recommended_bool = False
if recommended_bool:
datatypes_dictionary["total_properties"] += 1
datatypes_dictionary["total_accumulated_datatypes"] += int(property_dictionary[PID][mode + "_no"])
current_datatype = property_dictionary[PID]["datatype"]
# add the datatype as a key to the dictionary, if it wasn't added before
if current_datatype not in datatypes_dictionary["datatypes"]:
datatypes_dictionary["datatypes"][current_datatype] = int(property_dictionary[PID][mode + "_no"])
else:
datatypes_dictionary["datatypes"][current_datatype] += int(property_dictionary[PID][mode + "_no"])
datatypes_dictionary["total_unique_datatypes"] = len(datatypes_dictionary["datatypes"])
# once all the top x entries are creaed, store them in a .json file
if recommended:
tmp_string = "recommended"
elif recommended is not None:
tmp_string = "non_recommended"
else:
tmp_string = "all"
with open("data/statistical_information/wikidata_research/" + mode + "/" + tmp_string +
"/accumulated_datatypes/accumulated_datatypes.json", "w") \
as result_json:
json.dump(datatypes_dictionary, result_json)
# get the acummulated facets by occurences of a (recommended) property in Wikidata
def get_top_x_metadata_recommended_by_facet(x, mode):
return
# get all datatypes, that are available inside the property dictionary
def get_all_datatypes_from_property_dictionary():
with open(path_to_json_dictionary, "r") as dict_data:
property_dictionary = json.load(dict_data)
result_dict = {}
i = 0
for PID in property_dictionary:
tmp_datatype = property_dictionary[PID]["datatype"]
if tmp_datatype not in result_dict:
result_dict[tmp_datatype] = 0
dict_data.close()
return result_dict
| 48.83859 | 128 | 0.584334 | 2,761 | 26,324 | 5.352409 | 0.063021 | 0.102314 | 0.09663 | 0.050345 | 0.875694 | 0.839153 | 0.803762 | 0.766816 | 0.74435 | 0.737989 | 0 | 0.003615 | 0.327534 | 26,324 | 538 | 129 | 48.929368 | 0.831206 | 0.201793 | 0 | 0.769663 | 0 | 0 | 0.137989 | 0.054775 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019663 | false | 0 | 0.002809 | 0.002809 | 0.02809 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
510f0704162b83a55e2da583192211cbda73f8f2 | 2,987 | py | Python | Test13_talking_robot/Test13_preprocess.py | hooloong/My_TensorFlow | ef115989035b9ae14938dca47c0814b0d16dd6ba | [
"MIT"
] | 3 | 2018-07-29T17:31:58.000Z | 2019-06-27T10:36:34.000Z | Test13_talking_robot/Test13_preprocess.py | hooloong/My_TensorFlow | ef115989035b9ae14938dca47c0814b0d16dd6ba | [
"MIT"
] | null | null | null | Test13_talking_robot/Test13_preprocess.py | hooloong/My_TensorFlow | ef115989035b9ae14938dca47c0814b0d16dd6ba | [
"MIT"
] | 1 | 2019-02-18T02:27:39.000Z | 2019-02-18T02:27:39.000Z | # coding=utf-8
import os
import random
import sys
conv_path = 'dgk_shooter_min.conv'
if not os.path.exists(conv_path):
print('数据集不存在')
exit()
# 数据集格式
"""
E
M 畹/华/吾/侄/
M 你/接/到/这/封/信/的/时/候/
M 不/知/道/大/伯/还/在/不/在/人/世/了/
E
M 咱/们/梅/家/从/你/爷/爷/起/
M 就/一/直/小/心/翼/翼/地/唱/戏/
M 侍/奉/宫/廷/侍/奉/百/姓/
M 从/来/不/曾/遭/此/大/祸/
M 太/后/的/万/寿/节/谁/敢/不/穿/红/
M 就/你/胆/儿/大/
M 唉/这/我/舅/母/出/殡/
M 我/不/敢/穿/红/啊/
M 唉/呦/唉/呦/爷/
M 您/打/得/好/我/该/打/
M 就/因/为/没/穿/红/让/人/赏/咱/一/纸/枷/锁/
M 爷/您/别/给/我/戴/这/纸/枷/锁/呀/
E
M 您/多/打/我/几/下/不/就/得/了/吗/
M 走/
M 这/是/哪/一/出/啊/…/ / /这/是/
M 撕/破/一/点/就/弄/死/你/
M 唉/
M 记/着/唱/戏/的/再/红/
M 还/是/让/人/瞧/不/起/
M 大/伯/不/想/让/你/挨/了/打/
M 还/得/跟/人/家/说/打/得/好/
M 大/伯/不/想/让/你/再/戴/上/那/纸/枷/锁/
M 畹/华/开/开/门/哪/
E
...
"""
# 我首先使用文本编辑器sublime把dgk_shooter_min.conv文件编码转为UTF-8,一下子省了不少麻烦
convs = [] # 对话集合
with open(conv_path, "r", encoding='utf-8',) as f:
one_conv = [] # 一次完整对话
cnt = 0
for line in f:
cnt += 1
# print(line)
# print(cnt)
line = line.strip('\n').replace('/', '')
if line == '':
continue
if line[0] == 'E':
if one_conv:
convs.append(one_conv)
one_conv = []
elif line[0] == 'M':
one_conv.append(line.split(' ')[1])
"""
print(convs[:3]) # 个人感觉对白数据集有点不给力啊
[ ['畹华吾侄', '你接到这封信的时候', '不知道大伯还在不在人世了'],
['咱们梅家从你爷爷起', '就一直小心翼翼地唱戏', '侍奉宫廷侍奉百姓', '从来不曾遭此大祸', '太后的万寿节谁敢不穿红', '就你胆儿大', '唉这我舅母出殡', '我不敢穿红啊', '唉呦唉呦爷', '您打得好我该打', '就因为没穿红让人赏咱一纸枷锁', '爷您别给我戴这纸枷锁呀'],
['您多打我几下不就得了吗', '走', '这是哪一出啊 ', '撕破一点就弄死你', '唉', '记着唱戏的再红', '还是让人瞧不起', '大伯不想让你挨了打', '还得跟人家说打得好', '大伯不想让你再戴上那纸枷锁', '畹华开开门哪'], ....]
"""
# 把对话分成问与答
ask = [] # 问
response = [] # 答
for conv in convs:
if len(conv) == 1:
continue
if len(conv) % 2 != 0: # 奇数对话数, 转为偶数对话
conv = conv[:-1]
for i in range(len(conv)):
if i % 2 == 0:
ask.append(conv[i])
else:
response.append(conv[i])
"""
print(len(ask), len(response))
print(ask[:3])
print(response[:3])
['畹华吾侄', '咱们梅家从你爷爷起', '侍奉宫廷侍奉百姓']
['你接到这封信的时候', '就一直小心翼翼地唱戏', '从来不曾遭此大祸']
"""
def convert_seq2seq_files(questions, answers, TESTSET_SIZE=8000):
# 创建文件
train_enc = open('train.enc', 'w',encoding="utf-8") # 问
train_dec = open('train.dec', 'w',encoding="utf-8") # 答
test_enc = open('test.enc', 'w',encoding="utf-8") # 问
test_dec = open('test.dec', 'w',encoding="utf-8") # 答
# 选择20000数据作为测试数据
test_index = random.sample([i for i in range(len(questions))], TESTSET_SIZE)
for i in range(len(questions)):
if i in test_index:
test_enc.write(questions[i] + '\n')
test_dec.write(answers[i] + '\n')
else:
train_enc.write(questions[i] + '\n')
train_dec.write(answers[i] + '\n')
if i % 1000 == 0:
print(len(range(len(questions))), '处理进度:', i)
train_enc.close()
train_dec.close()
test_enc.close()
test_dec.close()
convert_seq2seq_files(ask, response)
# 生成的*.enc文件保存了问题
# 生成的*.dec文件保存了回答 | 24.08871 | 153 | 0.546368 | 549 | 2,987 | 2.919854 | 0.40255 | 0.014972 | 0.03743 | 0.032439 | 0.1335 | 0.07985 | 0.008734 | 0 | 0 | 0 | 0 | 0.016024 | 0.226984 | 2,987 | 124 | 154 | 24.08871 | 0.676916 | 0.067292 | 0 | 0.111111 | 0 | 0 | 0.062897 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018519 | false | 0 | 0.055556 | 0 | 0.074074 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
510f5b8577eceaa2d67a2069c8c286b003c41202 | 2,371 | py | Python | rescape_region/models/project.py | calocan/rescape-region | 8cb0c4d5e2f4c92939eb9a30473808decdb4bef2 | [
"MIT"
] | 1 | 2021-05-08T12:04:23.000Z | 2021-05-08T12:04:23.000Z | rescape_region/models/project.py | rescapes/rescape-region | b247aa277928d126bcf020c8204994b00ae4c18d | [
"MIT"
] | 15 | 2021-04-06T18:05:04.000Z | 2022-03-12T00:22:35.000Z | rescape_region/models/project.py | rescapes/rescape-region | b247aa277928d126bcf020c8204994b00ae4c18d | [
"MIT"
] | null | null | null | import reversion
from django.contrib.auth import get_user_model
from django.contrib.gis.db.models import SET_NULL, CASCADE, Q
from django.db.models import (
CharField,
ForeignKey, ManyToManyField, UniqueConstraint)
from django.db.models import JSONField
from safedelete.models import SafeDeleteModel
from rescape_region.model_helpers import feature_collection_default, project_data_default
from rescape_region.models.revision_mixin import RevisionModelMixin
@reversion.register()
class Project(SafeDeleteModel, RevisionModelMixin):
"""
Models a geospatial project
"""
# Unique human readable identifier for URLs, etc
key = CharField(max_length=50, null=False)
name = CharField(max_length=50, null=False)
# TODO probably unneeded. Locations have geojson
geojson = JSONField(null=False, default=feature_collection_default)
data = JSONField(null=False, default=project_data_default)
# The optional Region of the Project.
# Don't create a related name. It leads Graphene to register classes by following the reverse relationship.
# We don't want this because we might use Region but have our own Project class. This prevents Graphene from
# reaching Project from Region
region = ForeignKey('Region', null=True, on_delete=SET_NULL, related_name='+', )
# Locations in the project. It might be better in some cases to leave this empty and specify locations by queries
locations = ManyToManyField('Location', blank=True, related_name='projects')
# Projects must be owned by someone
user = ForeignKey(get_user_model(), on_delete=CASCADE, related_name='+', )
class Meta:
app_label = "rescape_region"
constraints = [
# https://stackoverflow.com/questions/33307892/django-unique-together-with-nullable-foreignkey
# This says that for deleted locations, user and key and deleted date must be unique
UniqueConstraint(fields=['user', 'deleted', 'key'],
name='unique_project_with_deleted'),
# This says that for non-deleted locations, user and key must be unique
UniqueConstraint(fields=['user', 'key'],
condition=Q(deleted=None),
name='unique_project_without_deleted'),
]
def __str__(self):
return self.name
| 45.596154 | 117 | 0.710671 | 291 | 2,371 | 5.66323 | 0.439863 | 0.024272 | 0.025485 | 0.021845 | 0.14199 | 0.081311 | 0 | 0 | 0 | 0 | 0 | 0.006424 | 0.212147 | 2,371 | 51 | 118 | 46.490196 | 0.875803 | 0.334036 | 0 | 0 | 0 | 0 | 0.074887 | 0.036798 | 0 | 0 | 0 | 0.019608 | 0 | 1 | 0.033333 | false | 0 | 0.266667 | 0.033333 | 0.633333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
510fb73823084b9ff3de955296518a2eb7c922e4 | 540 | py | Python | wiktts/__init__.py | pettarin/wiktts | 37f9a865ec01604c36a3ab15325f62d8c26e4484 | [
"MIT"
] | 5 | 2016-06-02T04:52:11.000Z | 2018-08-01T20:05:37.000Z | wiktts/__init__.py | pettarin/wiktts | 37f9a865ec01604c36a3ab15325f62d8c26e4484 | [
"MIT"
] | null | null | null | wiktts/__init__.py | pettarin/wiktts | 37f9a865ec01604c36a3ab15325f62d8c26e4484 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
"""
TBW
"""
from __future__ import absolute_import
from __future__ import print_function
import io
__author__ = "Alberto Pettarin"
__copyright__ = "Copyright 2016, Alberto Pettarin (www.albertopettarin.it)"
__license__ = "MIT"
__email__ = "alberto@albertopettarin.it"
__version__ = "0.1.0"
__status__ = "Development"
def write_file(formatted_data, output_file_path):
with io.open(output_file_path, "w", encoding="utf-8") as output_file:
output_file.write(u"\n".join(formatted_data))
| 20.769231 | 75 | 0.748148 | 72 | 540 | 5.013889 | 0.638889 | 0.110803 | 0.088643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019149 | 0.12963 | 540 | 25 | 76 | 21.6 | 0.748936 | 0.068519 | 0 | 0 | 0 | 0 | 0.256619 | 0.101833 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.25 | 0 | 0.333333 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
510fcdad15a6c8447153e32692aae7694f685210 | 2,456 | py | Python | oxe-api/test/resource/user/test_update_user.py | CybersecurityLuxembourg/openxeco | 8d4e5578bde6a07f5d6d569b16b4de224abf7bf0 | [
"BSD-2-Clause"
] | null | null | null | oxe-api/test/resource/user/test_update_user.py | CybersecurityLuxembourg/openxeco | 8d4e5578bde6a07f5d6d569b16b4de224abf7bf0 | [
"BSD-2-Clause"
] | null | null | null | oxe-api/test/resource/user/test_update_user.py | CybersecurityLuxembourg/openxeco | 8d4e5578bde6a07f5d6d569b16b4de224abf7bf0 | [
"BSD-2-Clause"
] | null | null | null | from test.BaseCase import BaseCase
class TestUpdateUser(BaseCase):
@BaseCase.login
@BaseCase.grant_access("/user/update_user")
def test_ok(self, token):
self.db.insert({
"id": 14,
"email": "myemail@test.lu",
"password": "MySecret2!",
"is_admin": 0,
}, self.db.tables["User"])
payload = {
"id": 14,
"is_admin": True
}
response = self.application.post('/user/update_user',
headers=self.get_standard_post_header(token),
json=payload)
users = self.db.get(self.db.tables["User"], {"id": 14})
self.assertEqual(200, response.status_code)
self.assertEqual(len(users), 1)
self.assertEqual(users[0].is_admin, 1)
@BaseCase.login
@BaseCase.grant_access("/user/update_user")
def test_ko_password_param(self, token):
self.db.insert({
"id": 2,
"email": "myemail@test.lu",
"password": "MySecret2!",
"is_admin": 0,
}, self.db.tables["User"])
payload = {
"id": 2,
"is_admin": True,
"password": "new pass"
}
response = self.application.post('/user/update_user',
headers=self.get_standard_post_header(token),
json=payload)
users = self.db.get(self.db.tables["User"], {"id": 2})
self.assertEqual("422 UNPROCESSABLE ENTITY", response.status)
self.assertEqual(users[0].is_admin, 0)
@BaseCase.login
@BaseCase.grant_access("/user/update_user")
def test_ko_email_param(self, token):
self.db.insert({
"id": 2,
"email": "myemail@test.lu",
"password": "MySecret2!",
"is_admin": 0,
}, self.db.tables["User"])
payload = {
"id": 2,
"is_admin": True,
"email": "myemail@test.lu"
}
response = self.application.post('/user/update_user',
headers=self.get_standard_post_header(token),
json=payload)
users = self.db.get(self.db.tables["User"], {"id": 2})
self.assertEqual("422 UNPROCESSABLE ENTITY", response.status)
self.assertEqual(users[0].is_admin, 0)
| 30.7 | 86 | 0.506107 | 254 | 2,456 | 4.755906 | 0.208661 | 0.059603 | 0.069536 | 0.07947 | 0.84851 | 0.84851 | 0.806291 | 0.806291 | 0.806291 | 0.806291 | 0 | 0.021438 | 0.354235 | 2,456 | 79 | 87 | 31.088608 | 0.740227 | 0 | 0 | 0.758065 | 0 | 0 | 0.158795 | 0 | 0 | 0 | 0 | 0 | 0.112903 | 1 | 0.048387 | false | 0.080645 | 0.016129 | 0 | 0.080645 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 8 |
51106aa7bc640784a651d3a06d5663b7d7680ea4 | 2,834 | py | Python | addons/mixins.py | kilinger/marathon-rocketchat-hubot | 682454b90265eb2c66ea222cf0c970370816a9e1 | [
"BSD-3-Clause"
] | 1 | 2018-07-10T07:03:12.000Z | 2018-07-10T07:03:12.000Z | addons/mixins.py | kilinger/marathon-rocketchat-hubot | 682454b90265eb2c66ea222cf0c970370816a9e1 | [
"BSD-3-Clause"
] | null | null | null | addons/mixins.py | kilinger/marathon-rocketchat-hubot | 682454b90265eb2c66ea222cf0c970370816a9e1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
:copyright: (c) 2015 by the xxxxx Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from hubot.utils.mesos import clean_container_path
class AddonsMixin(object):
addon_name = "addon"
addon_depend = None
addon_container_image = "library/busybox"
addon_default_version = "latest"
addon_config_vars = []
def get_addon_slug(self):
return NotImplemented
def get_addon_id(self):
return NotImplemented
def get_color(self):
return NotImplemented
def get_host(self, suffix=True):
if suffix:
return "{0}.weave.local".format(self.get_addon_id())
else:
return "{0}".format(self.get_addon_id())
def get_plugin_volume_name(self, path):
return "{0}-{1}".format(self.get_addon_slug(), clean_container_path(path))
def get_plugin_volumes(self):
paths = getattr(self, 'addon_container_paths', [])
return list("{0}:{1}".format(self.get_plugin_volume_name(path), path) for path in paths)
def get_docker_parameters(self):
parameters = [dict(key="label", value="weave_hostname={0}".format(self.get_host(suffix=False)))]
volumes = self.get_plugin_volumes()
if volumes:
parameters.append(dict(key="volume-driver", value="rexray"))
for volume in volumes:
parameters.append(dict(key="volume", value=volume))
return parameters
def get_config_vars(self):
return self.addon_config_vars
def get_config(self, primary=True, alias=None):
config = dict()
for var in self.get_config_vars():
var = var.upper()
if primary:
key = var
else:
parts = var.split('_')
parts.insert(-1, self.get_color().upper())
key = '_'.join(parts)
if alias:
key = alias.upper()
func = getattr(self, "get_config_{0}".format(var.lower()), None)
if func:
config[key] = func()
return config
def has_snapshot_support(self):
return bool(self.get_plugin_volumes())
def create_snapshot(self, description=None):
from addons.models import AddonSnapshot
snapshot = AddonSnapshot.objects.create(addon=self, description=description or '')
snapshot.create(description=description)
return snapshot
def destroy_snapshot(self, snapshot_short_id):
from addons.models import AddonSnapshot
try:
snapshot = AddonSnapshot.objects.get(addon=self, short_id=snapshot_short_id)
except AddonSnapshot.DoesNotExist:
pass
else:
snapshot.destroy()
| 30.473118 | 104 | 0.623853 | 334 | 2,834 | 5.086826 | 0.320359 | 0.041201 | 0.038258 | 0.047675 | 0.202472 | 0.042378 | 0 | 0 | 0 | 0 | 0 | 0.006747 | 0.267819 | 2,834 | 92 | 105 | 30.804348 | 0.812048 | 0.048342 | 0 | 0.123077 | 0 | 0 | 0.05318 | 0.00781 | 0 | 0 | 0 | 0 | 0 | 1 | 0.184615 | false | 0.015385 | 0.061538 | 0.092308 | 0.523077 | 0.015385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
511493f52cb1eeb1e8430922732560c6965e53e7 | 5,069 | py | Python | flexmatcher/classify/nGramClassifier.py | austinkwillis/flexmatcher | c771cea696014f62bf919ecf678835d8c655d04f | [
"Apache-2.0"
] | 28 | 2017-07-19T19:02:56.000Z | 2022-01-11T10:40:06.000Z | flexmatcher/classify/nGramClassifier.py | austinkwillis/flexmatcher | c771cea696014f62bf919ecf678835d8c655d04f | [
"Apache-2.0"
] | 253 | 2018-02-10T22:22:16.000Z | 2022-03-27T18:43:17.000Z | flexmatcher/classify/nGramClassifier.py | austinkwillis/flexmatcher | c771cea696014f62bf919ecf678835d8c655d04f | [
"Apache-2.0"
] | 10 | 2018-02-21T06:41:30.000Z | 2022-02-20T12:18:46.000Z | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.model_selection import StratifiedKFold
from sklearn import linear_model
from flexmatcher.classify import Classifier
import numpy as np
class NGramClassifier(Classifier):
"""Classify data-points using counts of n-gram sequence of words or chars.
The NGramClassifier uses n-grams of words or characters (based on user
preference) and extracts count features or binary features (based on user
preference) to train a classifier. It uses a LogisticRegression
classifier as its training model.
Attributes:
labels (ndarray): Vector storing the labels of each data-point.
features (ndarray): Matrix storing the extracting features.
vectorizer (object): Vectorizer for transforming text to features. It
will be either of type CountVectorizer or HashingVectorizer.
clf (LogisticRegression): The classifier instance.
num_classes (int): Number of classes/columns to match to
all_classes (ndarray): Sorted array of all possible classes
"""
def __init__(self, ngram_range=(1, 1), analyzer='word', count=True,
n_features=200):
"""Initializes the classifier.
Args:
ngram_range (tuple): Pair of ints specifying the range of ngrams.
analyzer (string): Determines what type of analyzer to be used.
Setting it to 'word' will consider each word as a unit of language
and 'char' will consider each character as a unit of language.
count (boolean): Determines if features are counts of n-grams
versus a binary value encoding if the n-gram is present or not.
n_features (int): Maximum number of features used.
"""
# checking what type of vectorizer to create
if count:
self.vectorizer = CountVectorizer(analyzer=analyzer,
ngram_range=ngram_range,
max_features=n_features)
else:
self.vectorizer = HashingVectorizer(analyzer=analyzer,
ngram_range=ngram_range,
n_features=n_features)
def fit(self, data):
"""
Args:
data (dataframe): Training data (values and their correct column).
"""
self.labels = np.array(data['class'])
self.num_classes = len(data['class'].unique())
self.all_classes = np.sort(np.unique(self.labels))
values = list(data['value'])
self.features = self.vectorizer.fit_transform(values).toarray()
# training the classifier
self.lrm = linear_model.LogisticRegression(class_weight='balanced')
self.lrm.fit(self.features, self.labels)
def predict_training(self, folds=5):
"""Do cross-validation and return probabilities for each data-point.
Args:
folds (int): Number of folds used for prediction on training data.
"""
partial_clf = linear_model.LogisticRegression(class_weight='balanced')
prediction = np.zeros((len(self.features), self.num_classes))
skf = StratifiedKFold(n_splits=folds)
for train_index, test_index in skf.split(self.features, self.labels):
# prepare the training and test data
training_features = self.features[train_index]
test_features = self.features[test_index]
training_labels = self.labels[train_index]
# fitting the model and predicting
partial_clf.fit(training_features, training_labels)
curr_pred = partial_clf.predict_proba(test_features)
prediction[test_index] = \
self.predict_proba_ordered(curr_pred, partial_clf.classes_)
return prediction
def predict_proba_ordered(self, probs, classes):
"""Fills out the probability matrix with classes that were missing.
Args:
probs (list): list of probabilities, output of predict_proba
classes_ (ndarray): list of classes from clf.classes_
all_classes (ndarray): list of all possible classes
"""
proba_ordered = np.zeros((probs.shape[0], self.all_classes.size),
dtype=np.float)
sorter = np.argsort(self.all_classes)
idx = sorter[np.searchsorted(self.all_classes, classes, sorter=sorter)]
proba_ordered[:, idx] = probs
return proba_ordered
def predict(self, data):
"""Predict the class for a new given data.
Args:
data (dataframe): Dataframe of values to predict the column for.
"""
values = list(data['value'])
features = self.vectorizer.transform(values).toarray()
return self.lrm.predict_proba(features)
| 44.858407 | 79 | 0.64924 | 595 | 5,069 | 5.394958 | 0.310924 | 0.026168 | 0.017445 | 0.017445 | 0.086604 | 0.076012 | 0 | 0 | 0 | 0 | 0 | 0.001912 | 0.27757 | 5,069 | 112 | 80 | 45.258929 | 0.874659 | 0.395147 | 0 | 0.076923 | 0 | 0 | 0.014342 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096154 | false | 0 | 0.173077 | 0 | 0.346154 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5116e871b3a1ab4846b46b9fec5ed8c06b14c048 | 3,001 | py | Python | experiment/test/nngen.py | seonglae/commit-autosuggestions | 49c0ab65f20bda835b7537e042ffc9d338a0d482 | [
"Apache-2.0"
] | 303 | 2020-08-27T06:59:55.000Z | 2022-03-18T17:50:16.000Z | experiment/test/nngen.py | seonglae/commit-autosuggestions | 49c0ab65f20bda835b7537e042ffc9d338a0d482 | [
"Apache-2.0"
] | 4 | 2020-12-01T15:06:46.000Z | 2021-11-10T17:38:19.000Z | experiment/test/nngen.py | seonglae/commit-autosuggestions | 49c0ab65f20bda835b7537e042ffc9d338a0d482 | [
"Apache-2.0"
] | 11 | 2020-11-08T01:52:30.000Z | 2021-10-03T18:45:45.000Z | # encoding=utf-8
import os
import time
import fire
from typing import List
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from nltk.translate.bleu_score import sentence_bleu
def load_data(path):
"""load lines from a file"""
with open(path, 'r') as f:
lines = f.read().split('\n')[0:-1]
lines = [l.strip() for l in lines]
return lines
def find_mixed_nn(simi, diffs, test_diff, bleu_thre :int =5) -> int:
"""Find the nearest neighbor using cosine simialrity and bleu score"""
candidates = simi.argsort()[-bleu_thre:][::-1]
max_score = 0
max_idx = 0
for j in candidates:
score = sentence_bleu([diffs[j].split()], test_diff.split())
if score > max_score:
max_score = score
max_idx = j
return max_idx
def find_nn(simi) -> int:
"""Find the nearest neighbor"""
max_idx = simi.argsort()[-1]
return max_idx
def nngen(train_diffs :List[str], train_msgs :List[str], test_diffs :List[str],
type :"'mixed': cosine + bleu, 'cos': cosine only" ='mixed',
bleu_thre :"how many candidates to consider before calculating bleu_score" =5) -> List[str]:
"""NNGen
NOTE: currently, we haven't optmize for large dataset. You may need to split the
large training set into several chunks and then calculate the similarities between
train set and test set to speed up the algorithm. You may also leverage GPU through
pytorch or other libraries.
"""
if type not in ["mixed", "cos"]:
raise ValueError('Wrong tyoe for nngen.')
counter = CountVectorizer()
train_matrix = counter.fit_transform(train_diffs)
# print(len(counter.vocabulary_))
test_matrix = counter.transform(test_diffs)
similarities = cosine_similarity(test_matrix, train_matrix)
test_msgs = []
for idx, test_simi in enumerate(similarities):
if (idx + 1) % 100 == 0:
print(idx+1)
if type == 'mixed':
max_idx = find_mixed_nn(test_simi, train_diffs, test_diffs[idx], bleu_thre)
else:
max_idx = find_nn(test_simi)
test_msgs.append(train_msgs[max_idx])
return test_msgs
def main(train_diff_file :str, train_msg_file :str, test_diff_file :str):
"""Run NNGen with default given dataset using default setting"""
start_time = time.time()
test_dirname = os.path.dirname(test_diff_file)
test_basename = os.path.basename(test_diff_file)
out_file = "./nngen." + test_basename.replace('.diff', '.msg')
train_diffs = load_data(train_diff_file)
train_msgs = load_data(train_msg_file)
test_diffs = load_data(test_diff_file)
out_msgs = nngen(train_diffs, train_msgs, test_diffs)
with open(out_file, 'w') as out_f:
out_f.write("\n".join(out_msgs) + "\n")
time_cost = time.time() -start_time
print("Done, cost {}s".format(time_cost))
if __name__ == "__main__":
fire.Fire({
'main':main
})
| 35.305882 | 96 | 0.671776 | 434 | 3,001 | 4.421659 | 0.33871 | 0.025013 | 0.025013 | 0.017718 | 0.026055 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006364 | 0.214595 | 3,001 | 84 | 97 | 35.72619 | 0.807807 | 0.167611 | 0 | 0.032787 | 0 | 0 | 0.078776 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081967 | false | 0 | 0.114754 | 0 | 0.262295 | 0.032787 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51183bc6bdbf292c7756f837673e942a502dd193 | 237 | py | Python | torchtraps/__init__.py | winzurk/torchtraps | 9a15a2f723a4cddea7efc187f84956b27eb36f2f | [
"MIT"
] | null | null | null | torchtraps/__init__.py | winzurk/torchtraps | 9a15a2f723a4cddea7efc187f84956b27eb36f2f | [
"MIT"
] | 3 | 2021-09-08T01:49:02.000Z | 2022-03-12T00:21:18.000Z | torchtraps/__init__.py | winzurk/torchtraps | 9a15a2f723a4cddea7efc187f84956b27eb36f2f | [
"MIT"
] | null | null | null | """Top-level package for Torch Traps."""
__author__ = """Zac Winzurk"""
__email__ = 'zwinzurk@asu.edu'
__version__ = '0.1.0'
# from .coco_camera_traps_loader import *
# from .imagenet import imagenet_classes
# from .lightning import *
| 23.7 | 41 | 0.725738 | 31 | 237 | 5.032258 | 0.774194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014634 | 0.135021 | 237 | 9 | 42 | 26.333333 | 0.746341 | 0.586498 | 0 | 0 | 0 | 0 | 0.355556 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
511bd43634ea3f540136626b6213102cf02c3ef9 | 7,711 | py | Python | odrive/Firmware/fibre/python/fibre/utils.py | kirmani/doggo | f5aadba2a5b664f2d383bca0b35155d65363c498 | [
"MIT"
] | null | null | null | odrive/Firmware/fibre/python/fibre/utils.py | kirmani/doggo | f5aadba2a5b664f2d383bca0b35155d65363c498 | [
"MIT"
] | 3 | 2020-02-26T00:07:53.000Z | 2022-02-26T05:18:31.000Z | odrive/Firmware/fibre/python/fibre/utils.py | kirmani/doggo | f5aadba2a5b664f2d383bca0b35155d65363c498 | [
"MIT"
] | null | null | null |
import sys
import time
import threading
import platform
import subprocess
import os
try:
if platform.system() == 'Windows':
import win32console
# TODO: we should win32console anyway so we could just omit colorama
import colorama
colorama.init()
except ModuleNotFoundError:
print("Could not init terminal features.")
sys.stdout.flush()
pass
def get_serial_number_str(device):
if hasattr(device, 'serial_number'):
return format(device.serial_number, 'x').upper()
else:
return "[unknown serial number]"
## Threading utils ##
class Event():
"""
Alternative to threading.Event(), enhanced by the subscribe() function
that the original fails to provide.
@param Trigger: if supplied, the newly created event will be triggered
as soon as the trigger event becomes set
"""
def __init__(self, trigger=None):
self._evt = threading.Event()
self._subscribers = []
self._mutex = threading.Lock()
if not trigger is None:
trigger.subscribe(lambda: self.set())
def is_set(self):
return self._evt.is_set()
def set(self):
"""
Sets the event and invokes all subscribers if the event was
not already set
"""
self._mutex.acquire()
try:
if not self._evt.is_set():
self._evt.set()
for s in self._subscribers:
s()
finally:
self._mutex.release()
def subscribe(self, handler):
"""
Invokes the specified handler exactly once as soon as the
specified event is set. If the event is already set, the
handler is invoked immediately.
Returns a function that can be invoked to unsubscribe.
"""
if handler is None:
raise TypeError
self._mutex.acquire()
try:
self._subscribers.append(handler)
if self._evt.is_set():
handler()
finally:
self._mutex.release()
return handler
def unsubscribe(self, handler):
self._mutex.acquire()
try:
self._subscribers.pop(self._subscribers.index(handler))
finally:
self._mutex.release()
def wait(self, timeout=None):
if not self._evt.wait(timeout=timeout):
raise TimeoutError()
def trigger_after(self, timeout):
"""
Triggers the event after the specified timeout.
This function returns immediately.
"""
def delayed_trigger():
if not self.wait(timeout=timeout):
self.set()
threading.Thread(target=delayed_trigger, daemon=True).start()
def wait_any(timeout=None, *events):
"""
Blocks until any of the specified events are triggered.
Returns the index of the event that was triggerd or raises
a TimeoutError
Param timeout: A timeout in seconds
"""
or_event = threading.Event()
subscriptions = []
for event in events:
subscriptions.append((event, event.subscribe(lambda: or_event.set())))
or_event.wait(timeout=timeout)
for event, sub in subscriptions:
event.unsubscribe(sub)
for i in range(len(events)):
if events[i].is_set():
return i
raise TimeoutError()
## Log utils ##
class Logger():
"""
Logs messages to stdout
"""
COLOR_DEFAULT = 0
COLOR_GREEN = 1
COLOR_CYAN = 2
COLOR_YELLOW = 3
COLOR_RED = 4
_VT100Colors = {
COLOR_GREEN: '\x1b[92;1m',
COLOR_CYAN: '\x1b[96;1m',
COLOR_YELLOW: '\x1b[93;1m',
COLOR_RED: '\x1b[91;1m',
COLOR_DEFAULT: '\x1b[0m'
}
_Win32Colors = {
COLOR_GREEN: 0x0A,
COLOR_CYAN: 0x0B,
COLOR_YELLOW: 0x0E,
COLOR_RED: 0x0C,
COLOR_DEFAULT: 0x07
}
def __init__(self, verbose=True):
self._prefix = ''
self._skip_bottom_line = False # If true, messages are printed one line above the cursor
self._verbose = verbose
self._print_lock = threading.Lock()
if platform.system() == 'Windows':
self._stdout_buf = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE)
def indent(self, prefix=' '):
indented_logger = Logger()
indented_logger._prefix = self._prefix + prefix
return indented_logger
def print_on_second_last_line(self, text, color):
"""
Prints a text on the second last line.
This can be used to print a message above the command
prompt. If the command prompt spans multiple lines
there will be glitches.
If the printed text spans multiple lines there will also
be glitches (though this could be fixed).
"""
if platform.system() == 'Windows':
# Windows <10 doesn't understand VT100 escape codes and the colorama
# also doesn't support the specific escape codes we need so we use the
# native Win32 API.
info = self._stdout_buf.GetConsoleScreenBufferInfo()
cursor_pos = info['CursorPosition']
scroll_rect=win32console.PySMALL_RECTType(
Left=0, Top=1,
Right=info['Window'].Right,
Bottom=cursor_pos.Y-1)
scroll_dest = win32console.PyCOORDType(scroll_rect.Left, scroll_rect.Top-1)
self._stdout_buf.ScrollConsoleScreenBuffer(
scroll_rect, scroll_rect, scroll_dest, # clipping rect is same as scroll rect
u' ', Logger._Win32Colors[color]) # fill with empty cells with the desired color attributes
line_start = win32console.PyCOORDType(0, cursor_pos.Y-1)
self._stdout_buf.WriteConsoleOutputCharacter(text, line_start)
else:
# Assume we're in a terminal that interprets VT100 escape codes.
# TODO: test on macOS
# Escape character sequence:
# ESC 7: store cursor position
# ESC 1A: move cursor up by one
# ESC 1S: scroll entire viewport by one
# ESC 1L: insert 1 line at cursor position
# (print text)
# ESC 8: restore old cursor position
self._print_lock.acquire()
sys.stdout.write('\x1b7\x1b[1A\x1b[1S\x1b[1L')
sys.stdout.write(Logger._VT100Colors[color] + text + Logger._VT100Colors[Logger.COLOR_DEFAULT])
sys.stdout.write('\x1b8')
sys.stdout.flush()
self._print_lock.release()
def print_colored(self, text, color):
if self._skip_bottom_line:
self.print_on_second_last_line(text, color)
else:
# On Windows, colorama does the job of interpreting the VT100 escape sequences
self._print_lock.acquire()
sys.stdout.write(Logger._VT100Colors[color] + text + Logger._VT100Colors[Logger.COLOR_DEFAULT] + '\n')
sys.stdout.flush()
self._print_lock.release()
def debug(self, text):
if self._verbose:
self.print_colored(self._prefix + text, Logger.COLOR_DEFAULT)
def success(self, text):
self.print_colored(self._prefix + text, Logger.COLOR_GREEN)
def info(self, text):
self.print_colored(self._prefix + text, Logger.COLOR_DEFAULT)
def notify(self, text):
self.print_colored(self._prefix + text, Logger.COLOR_CYAN)
def warn(self, text):
self.print_colored(self._prefix + text, Logger.COLOR_YELLOW)
def error(self, text):
# TODO: write to stderr
self.print_colored(self._prefix + text, Logger.COLOR_RED)
| 33.672489 | 114 | 0.611983 | 921 | 7,711 | 4.970684 | 0.296417 | 0.023591 | 0.024465 | 0.026212 | 0.181083 | 0.140673 | 0.125819 | 0.114024 | 0.088903 | 0.088903 | 0 | 0.018878 | 0.299313 | 7,711 | 228 | 115 | 33.820175 | 0.828429 | 0.240954 | 0 | 0.202703 | 0 | 0 | 0.034736 | 0.004655 | 0 | 0 | 0.003581 | 0.008772 | 0 | 1 | 0.135135 | false | 0.006757 | 0.054054 | 0.006757 | 0.290541 | 0.101351 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51201b8f267ec7a2dece0fc4da0d42b14f47ffba | 2,720 | py | Python | 2021/day09/main.py | ingjrs01/adventofcode | c5e4f0158dac0efc2dbfc10167f2700693b41fea | [
"Apache-2.0"
] | null | null | null | 2021/day09/main.py | ingjrs01/adventofcode | c5e4f0158dac0efc2dbfc10167f2700693b41fea | [
"Apache-2.0"
] | null | null | null | 2021/day09/main.py | ingjrs01/adventofcode | c5e4f0158dac0efc2dbfc10167f2700693b41fea | [
"Apache-2.0"
] | null | null | null |
def search_low(matrix):
positions = []
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if (j > 0):
if (matrix[i][j] >= matrix[i][j-1]):
continue
if (j < len(matrix[i])-1):
if (matrix[i][j] >= matrix[i][j+1]):
continue
if (i > 0):
if (matrix[i][j] >= matrix[i-1][j]):
continue
if (i < len(matrix)-1):
if (matrix[i][j] >= matrix[i+1][j]):
continue
positions.append((i,j))
return positions
def f_in(p,m):
for e in m:
if (e[0] == p[0] and e[1] == p[1]):
return True
return False
def calc_basin(matrix,coords):
# Nos pasan una coordenada, y calculo el tamaño del basin
pendientes = []
finalizados = []
pendientes.append((coords[0],coords[1]))
while (len(pendientes)>0):
actual = pendientes.pop(0)
if (actual[1] > 0):
if (matrix[actual[0]][actual[1]-1] != 9):
if (f_in((actual[0],actual[1]-1),pendientes) == False and f_in((actual[0],actual[1]-1),finalizados)==False):
pendientes.append((actual[0],actual[1]-1))
if (actual[1] < len(matrix[actual[0]])-1):
if (matrix[actual[0]][actual[1]+1] != 9):
if (f_in((actual[0],actual[1]+1),pendientes)==False and f_in((actual[0],actual[1]+1),finalizados)==False):
pendientes.append((actual[0],actual[1]+1))
if (actual[0] > 0):
if (matrix[actual[0]-1][actual[1]] != 9):
if (f_in((actual[0]-1,actual[1]),pendientes)==False and f_in((actual[0]-1,actual[1]),finalizados)==False):
pendientes.append((actual[0]-1,actual[1]))
if (actual[0] < len(matrix)-1):
if (matrix[actual[0]+1][actual[1]] != 9):
if (f_in((actual[0]+1,actual[1]),pendientes)==False and f_in((actual[0+1],actual[1]),finalizados)==False):
pendientes.append((actual[0]+1,actual[1]))
if (f_in(actual,finalizados)==False):
finalizados.append(actual)
return (len(finalizados))
matrix = []
lines = open('real','r').readlines()
for line in lines:
row = []
l = line.strip()
for i in range(len(l)):
row.append(int(l[i]))
matrix.append(row)
positions = search_low(matrix)
total = 0
for p in positions:
valor = matrix[p[0]][p[1]] + 1
total += valor
print(total)
tams = []
print("Segunda parte")
for p in positions:
tams.append(calc_basin(matrix,p))
la = sorted(tams)
t = len(la)-1
resultado = la[t] * la[t-1] * la[t-2]
print(resultado)
| 30.561798 | 124 | 0.516176 | 386 | 2,720 | 3.601036 | 0.163212 | 0.095683 | 0.058273 | 0.080576 | 0.499281 | 0.463309 | 0.463309 | 0.460432 | 0.460432 | 0.421583 | 0 | 0.043864 | 0.295956 | 2,720 | 88 | 125 | 30.909091 | 0.681984 | 0.020221 | 0 | 0.085714 | 0 | 0 | 0.006762 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042857 | false | 0 | 0 | 0 | 0.1 | 0.042857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51219e777435d891fa05113f0da030e18ce7d68a | 4,364 | py | Python | handwrite/sheettopng.py | sakshamarora1/handwrite | 628c53f9fbca0bf9731e0ebc7d6c8ca2525f1b29 | [
"MIT"
] | null | null | null | handwrite/sheettopng.py | sakshamarora1/handwrite | 628c53f9fbca0bf9731e0ebc7d6c8ca2525f1b29 | [
"MIT"
] | null | null | null | handwrite/sheettopng.py | sakshamarora1/handwrite | 628c53f9fbca0bf9731e0ebc7d6c8ca2525f1b29 | [
"MIT"
] | null | null | null | import os
import sys
import itertools
import cv2
# Seq: A-Z, a-z, 0-9, SPECIAL_CHARS
ALL_CHARS = list(
itertools.chain(
range(65, 91),
range(97, 123),
range(48, 58),
[ord(i) for i in ".,;:!?\"'-+=/%&()[]"],
)
)
class SheetToPNG:
def __init__(self):
pass
def convert(self, sheet, characters_dir, cols=8, rows=10, threshold_value=200):
# TODO If directory given instead of image file, read all images and wrtie the images
# (example) 0.png, 1.png, 2.png inside every character folder in characters/
# sheet_images = []
# for s in os.listdir(sheet_dir):
# sheet_images.append(cv2.imread(sheet_dir + "/" + s))
characters = self.detectCharacters(sheet, threshold_value, cols=cols, rows=rows)
self.createCharacterDirectory(characters, characters_dir)
def detectCharacters(self, sheet_image, threshold_value, cols=8, rows=10):
# TODO Raise errors and suggest where the problem might be
# Read the image and convert to grayscale
image = cv2.imread(sheet_image)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Threshold and filter the image for better contour detection
ret, thresh = cv2.threshold(gray, threshold_value, 255, 1)
close_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
close = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, close_kernel, iterations=2)
# Search for contours.
contours, h = cv2.findContours(
close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
# Filter contours based on number of sides and then reverse sort by area.
contours = sorted(
filter(
lambda cnt: len(
cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True)
)
== 4,
contours,
),
key=cv2.contourArea,
reverse=True,
)
# Calculate the bounding of the first contour and approximate the height
# and width for final cropping.
x, y, w, h = cv2.boundingRect(contours[0])
space_h, space_w = 7 * h // 16, 7 * w // 16
# Since amongst all the contours, the expected case is that the 4 sided contours
# containing the characters should have the maximum area, so we loop through the first
# rows*colums contours and add them to final list after cropping.
characters = []
for i in range(rows * cols):
x, y, w, h = cv2.boundingRect(contours[i])
cx, cy = x + w // 2, y + h // 2
roi = image[cy - space_h : cy + space_h, cx - space_w : cx + space_w]
characters.append([roi, cx, cy])
# Now we have the characters but since they are all mixed up we need to position them.
# Sort characters based on 'y' coordinate and group them by number of rows at a time. Then
# sort each group based on the 'x' coordinate.
characters.sort(key=lambda x: x[2])
sorted_characters = []
for k in range(rows):
sorted_characters.extend(
sorted(characters[cols * k : cols * (k + 1)], key=lambda x: x[1])
)
return sorted_characters
def createCharacterDirectory(self, characters, characters_dir):
if not os.path.exists(characters_dir):
os.mkdir(characters_dir)
# Create directory for each character and save the png for the characters
# Structure: UserProvidedDir/ord(character)/ord(character).png
for k, images in enumerate(characters):
character = os.path.join(characters_dir, str(ALL_CHARS[k]))
if not os.path.exists(character):
os.mkdir(character)
cv2.imwrite(
os.path.join(character, str(ALL_CHARS[k]) + ".png"), images[0],
)
def main():
if len(sys.argv) > 1:
if len(sys.argv) == 3:
sys.argv.append(200)
a = SheetToPNG().convert(
sheet=sys.argv[1],
characters_dir=sys.argv[2],
cols=8,
rows=10,
threshold_value=int(sys.argv[3]),
)
else:
print(
"Usage: sheettopng [SHEET_PATH] [CHARACTER_DIRECTORY_PATH] [THRESHOLD_VALUE (Default: 200)]"
)
| 36.366667 | 104 | 0.592117 | 558 | 4,364 | 4.546595 | 0.354839 | 0.035869 | 0.010642 | 0.013007 | 0.054395 | 0.040993 | 0.021285 | 0 | 0 | 0 | 0 | 0.028458 | 0.307516 | 4,364 | 119 | 105 | 36.672269 | 0.811052 | 0.281852 | 0 | 0 | 0 | 0.025641 | 0.032455 | 0.008355 | 0 | 0 | 0 | 0.008403 | 0 | 1 | 0.064103 | false | 0.012821 | 0.051282 | 0 | 0.141026 | 0.012821 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5121b7e379746aac2a670977c3fda4d01dade4a4 | 1,261 | py | Python | 2021/Day 7/solution.py | theleteron/advent-of-code | 45900a8c14a966e4ecbe699e6423072254d09d95 | [
"MIT"
] | 1 | 2021-12-02T18:28:28.000Z | 2021-12-02T18:28:28.000Z | 2021/Day 7/solution.py | theleteron/advent-of-code | 45900a8c14a966e4ecbe699e6423072254d09d95 | [
"MIT"
] | null | null | null | 2021/Day 7/solution.py | theleteron/advent-of-code | 45900a8c14a966e4ecbe699e6423072254d09d95 | [
"MIT"
] | null | null | null | class Day():
def __init__(self, data_path):
with open(data_path, "r") as file:
for line in file:
self.positions = [(int(position)) for position in line.strip().split(',')]
def part1(self):
fuel_cost = -1
for target in range(min(self.positions), max(self.positions)+1):
current_cost = 0
for number in self.positions:
current_cost += target - number if target > number else number - target
if current_cost < fuel_cost or fuel_cost == -1:
fuel_cost = current_cost
return fuel_cost
def cost(self, x):
return x * (x + 1) // 2
def part2(self):
fuel_cost = -1
for target in range(min(self.positions), max(self.positions)+1):
current_cost = 0
for number in self.positions:
current_cost += self.cost(target - number) if target > number else self.cost(number - target)
if current_cost < fuel_cost or fuel_cost == -1:
fuel_cost = current_cost
return fuel_cost
if __name__ == "__main__":
DATA_INPUT_LOCATION = "data.in"
day = Day(DATA_INPUT_LOCATION)
print(day.part1())
print(day.part2()) | 32.333333 | 109 | 0.569389 | 162 | 1,261 | 4.209877 | 0.265432 | 0.117302 | 0.052786 | 0.038123 | 0.639296 | 0.639296 | 0.639296 | 0.545455 | 0.545455 | 0.545455 | 0 | 0.016529 | 0.328311 | 1,261 | 39 | 110 | 32.333333 | 0.788666 | 0 | 0 | 0.466667 | 0 | 0 | 0.013471 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0 | 0.033333 | 0.266667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5121cc545c9a55cdbb8d25e3c6ef3ab3548b3342 | 849 | py | Python | dataflows/processors/deduplicate.py | cschloer/dataflows | 78a683b5d202512c06021ff6be8ac7f60ef1cd9b | [
"MIT"
] | 160 | 2018-06-13T23:16:26.000Z | 2022-03-11T21:26:44.000Z | dataflows/processors/deduplicate.py | cschloer/dataflows | 78a683b5d202512c06021ff6be8ac7f60ef1cd9b | [
"MIT"
] | 164 | 2018-07-08T13:05:30.000Z | 2021-09-30T08:54:59.000Z | dataflows/processors/deduplicate.py | cschloer/dataflows | 78a683b5d202512c06021ff6be8ac7f60ef1cd9b | [
"MIT"
] | 41 | 2018-08-07T08:05:30.000Z | 2021-12-18T04:34:06.000Z | from dataflows import PackageWrapper, ResourceWrapper
from ..helpers.resource_matcher import ResourceMatcher
def deduper(rows: ResourceWrapper):
pk = rows.res.descriptor['schema'].get('primaryKey', [])
if len(pk) == 0:
yield from rows
else:
keys = set()
for row in rows:
key = tuple(row[k] for k in pk)
if key in keys:
continue
keys.add(key)
yield row
def deduplicate(resources=None):
def func(package: PackageWrapper):
resource_matcher = ResourceMatcher(resources, package)
yield package.pkg
resource: ResourceWrapper
for resource in package:
if resource_matcher.match(resource.res.name):
yield deduper(resource)
else:
yield resource
return func
| 26.53125 | 62 | 0.599529 | 91 | 849 | 5.56044 | 0.461538 | 0.088933 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001739 | 0.322733 | 849 | 31 | 63 | 27.387097 | 0.878261 | 0 | 0 | 0.08 | 0 | 0 | 0.018846 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.08 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
512428cd6c69268176ef05fa7a8650131e018089 | 563 | py | Python | vendor/mo_logs/log_usingNothing.py | klahnakoski/auth0-api | eda9c2554c641da76687f64445b8d35543d012d9 | [
"MIT"
] | null | null | null | vendor/mo_logs/log_usingNothing.py | klahnakoski/auth0-api | eda9c2554c641da76687f64445b8d35543d012d9 | [
"MIT"
] | null | null | null | vendor/mo_logs/log_usingNothing.py | klahnakoski/auth0-api | eda9c2554c641da76687f64445b8d35543d012d9 | [
"MIT"
] | null | null | null | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
from mo_future import is_text, is_binary
class StructuredLogger(object):
"""
ABSTRACT BASE CLASS FOR JSON LOGGING
"""
def write(self, template, params):
pass
def stop(self):
pass
| 20.107143 | 75 | 0.694494 | 84 | 563 | 4.547619 | 0.761905 | 0.026178 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011338 | 0.216696 | 563 | 27 | 76 | 20.851852 | 0.854875 | 0.51865 | 0 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.285714 | false | 0.285714 | 0.285714 | 0 | 0.714286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 3 |
51277cbf373e68b6520295ae14d8965cded5385d | 158 | py | Python | basic_system/routing.py | JagerCox/basic-project-django2-channels2-gunicorn-dahpne | 5329a06d251706f6f02cf202dff04959982d8214 | [
"MIT"
] | 3 | 2019-04-08T12:51:57.000Z | 2021-07-28T16:54:41.000Z | basic_system/routing.py | JagerCox/basic-project-django2-channels2-gunicorn-dahpne | 5329a06d251706f6f02cf202dff04959982d8214 | [
"MIT"
] | null | null | null | basic_system/routing.py | JagerCox/basic-project-django2-channels2-gunicorn-dahpne | 5329a06d251706f6f02cf202dff04959982d8214 | [
"MIT"
] | 1 | 2019-04-08T12:52:00.000Z | 2019-04-08T12:52:00.000Z | from django.urls import path
from .consumers import ChatConsumer
websocket_urlpatterns = [
path('ws/<str:room_name>/', ChatConsumer, name='consumer'),
]
| 22.571429 | 63 | 0.746835 | 19 | 158 | 6.105263 | 0.736842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.126582 | 158 | 6 | 64 | 26.333333 | 0.84058 | 0 | 0 | 0 | 0 | 0 | 0.170886 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.4 | 0 | 0.4 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
5127e82ffefa06ac56296824a5c55b26831611d6 | 3,679 | py | Python | examples/development/simulate_policy.py | iclavera/cassie | f2e253bf29fa0f872974188aed1fdfbe06efc37e | [
"MIT"
] | null | null | null | examples/development/simulate_policy.py | iclavera/cassie | f2e253bf29fa0f872974188aed1fdfbe06efc37e | [
"MIT"
] | 11 | 2020-01-28T22:32:20.000Z | 2022-03-11T23:37:57.000Z | examples/development/simulate_policy.py | iclavera/cassie | f2e253bf29fa0f872974188aed1fdfbe06efc37e | [
"MIT"
] | null | null | null | import argparse
from distutils.util import strtobool
import json
import os
import pickle
import tensorflow as tf
import numpy as np
from softlearning.policies.utils import get_policy_from_variant
from softlearning.samplers import rollouts
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('checkpoint_path',
type=str,
help='Path to the checkpoint.')
parser.add_argument('--max-path-length', '-l', type=int, default=1000)
parser.add_argument('--num-rollouts', '-n', type=int, default=10)
parser.add_argument('--render-mode', '-r',
type=str,
default='human',
choices=('human', 'rgb_array', None),
help="Mode to render the rollouts in.")
parser.add_argument('--deterministic', '-d',
type=strtobool,
nargs='?',
const=True,
default=True,
help="Evaluate policy deterministically.")
args = parser.parse_args()
return args
def simulate_policy(args):
session = tf.keras.backend.get_session()
checkpoint_path = args.checkpoint_path.rstrip('/')
experiment_path = os.path.dirname(checkpoint_path)
variant_path = os.path.join(experiment_path, 'params.json')
with open(variant_path, 'r') as f:
variant = json.load(f)
with session.as_default():
pickle_path = os.path.join(checkpoint_path, 'checkpoint.pkl')
with open(pickle_path, 'rb') as f:
pickleable = pickle.load(f)
env = pickleable['env']
policy = (
get_policy_from_variant(variant, env, Qs=[None]))
policy.set_weights(pickleable['policy_weights'])
if True: #hard coded
import numpy as np
import scipy.io as sio
ws = policy.get_weights()
w0, b0, w1, b1, w2, b2 = ws[0], ws[1], ws[2], ws[3], ws[4], ws[5]
savematpath = '/home/parsa/projects/cassie/cassie_ignasi3/policy_weights.mat' #hard coded
sio.savemat(savematpath, {'w0':w0, 'b0':b0, 'w1':w1, 'b1':b1, 'w2':w2, 'b2':b2})
# env.unwrapped.vis.start_recording()
with policy.set_deterministic(args.deterministic):
paths = rollouts(env,
policy,
path_length=args.max_path_length,
n_paths=args.num_rollouts,
render_mode=args.render_mode)
import matplotlib.pyplot as plt
real = [path['observations'][:, 0] for path in paths][0]
filtered = [path['observations'][:, 1] for path in paths][0]
fig, axarr = plt.subplots(2, 1)
axarr[0].plot(range(len(real)), real)
axarr[1].plot(range(len(filtered)), filtered)
# velocities_pelvis_filtered = [path['observations'][:, :3] for path in paths]
# velocities_pelvis = [path['observations'][:, -3:] for path in paths]
# fig, axarr = plt.subplots(3, 2)
# for i in range(3):
# for vel_path in velocities_pelvis:
# axarr[i, 0].plot(range(len(vel_path)), vel_path[:,i])
# for vel_path in velocities_pelvis_filtered:
# axarr[i, 1].plot(range(len(vel_path)), np.cumsum(vel_path[:,i]) * 10)
# plt.show()
if args.render_mode != 'human':
from pprint import pprint; import pdb; pdb.set_trace()
pass
# env.unwrapped.vis.stop_recording('./test_vid.mp4', speedup=1, frame_skip=20, timestep=env.unwrapped.dt)
return paths
if __name__ == '__main__':
args = parse_args()
simulate_policy(args)
| 34.383178 | 109 | 0.589562 | 452 | 3,679 | 4.646018 | 0.329646 | 0.017143 | 0.040476 | 0.026667 | 0.088571 | 0.05619 | 0.029524 | 0 | 0 | 0 | 0 | 0.019667 | 0.281326 | 3,679 | 106 | 110 | 34.707547 | 0.774584 | 0.162544 | 0 | 0.057143 | 0 | 0 | 0.112777 | 0.019883 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0.014286 | 0.185714 | 0 | 0.242857 | 0.014286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5128aaf3cd26118d6b07e1e07b47d69c770a4e85 | 79 | py | Python | pathfinder/__init__.py | dsp/aa-pathfinder | 1fdb43948921922d60b8b489b53b33b8ea929d31 | [
"MIT"
] | null | null | null | pathfinder/__init__.py | dsp/aa-pathfinder | 1fdb43948921922d60b8b489b53b33b8ea929d31 | [
"MIT"
] | null | null | null | pathfinder/__init__.py | dsp/aa-pathfinder | 1fdb43948921922d60b8b489b53b33b8ea929d31 | [
"MIT"
] | null | null | null | default_app_config = "pathfinder.apps.PathfinderConfig"
__version__ = "0.1.0"
| 19.75 | 55 | 0.78481 | 10 | 79 | 5.6 | 0.9 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041667 | 0.088608 | 79 | 3 | 56 | 26.333333 | 0.736111 | 0 | 0 | 0 | 0 | 0 | 0.468354 | 0.405063 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
51297e9b178ac6da2a46669f12b122f74df2ecf7 | 415 | py | Python | settings/live.py | mhfowler/abridgedmaps | d0802bd6955714d174d208bea809191bff4615b3 | [
"MIT"
] | null | null | null | settings/live.py | mhfowler/abridgedmaps | d0802bd6955714d174d208bea809191bff4615b3 | [
"MIT"
] | null | null | null | settings/live.py | mhfowler/abridgedmaps | d0802bd6955714d174d208bea809191bff4615b3 | [
"MIT"
] | null | null | null | from settings.common import *
DEBUG=True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'mydatabase',
}
}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
| 18.863636 | 62 | 0.684337 | 49 | 415 | 5.591837 | 0.836735 | 0.072993 | 0.109489 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002924 | 0.175904 | 415 | 21 | 63 | 19.761905 | 0.798246 | 0.26506 | 0 | 0 | 0 | 0 | 0.333333 | 0.16 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
512a41bacb1cddefe094cb3758285c75286428d1 | 680 | py | Python | labelgame/migrations/0011_auto_20170822_1902.py | capriciash/civicu_app | 102a40fb771a9d9b37878da02ea66a0920006d8c | [
"MIT"
] | null | null | null | labelgame/migrations/0011_auto_20170822_1902.py | capriciash/civicu_app | 102a40fb771a9d9b37878da02ea66a0920006d8c | [
"MIT"
] | null | null | null | labelgame/migrations/0011_auto_20170822_1902.py | capriciash/civicu_app | 102a40fb771a9d9b37878da02ea66a0920006d8c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-23 02:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('labelgame', '0010_auto_20170822_1857'),
]
operations = [
migrations.AlterField(
model_name='userlabel',
name='sel_animal',
field=models.CharField(choices=[('Wolf', 'Wolf'), ('Bear', 'Bear'), ('Monkey', 'Monkey'), ('Not an Animal', 'NotAnimal'), ('Skipped', 'Skipped')], default=None, max_length=20, null=True),
),
migrations.DeleteModel(
name='Animals',
),
]
| 28.333333 | 199 | 0.598529 | 72 | 680 | 5.5 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.067961 | 0.242647 | 680 | 23 | 200 | 29.565217 | 0.700971 | 0.1 | 0 | 0.125 | 1 | 0 | 0.200328 | 0.037767 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
512cb58d8316e571507a93e75a64d19559f26b6b | 2,762 | py | Python | models/tag.py | noahkw/botw-bot | 8d8c9515a177c52270093fb64abf34d111535d16 | [
"MIT"
] | 1 | 2020-11-29T23:00:27.000Z | 2020-11-29T23:00:27.000Z | models/tag.py | noahkw/botw-bot | 8d8c9515a177c52270093fb64abf34d111535d16 | [
"MIT"
] | 18 | 2020-08-05T11:59:31.000Z | 2022-03-15T03:48:40.000Z | models/tag.py | noahkw/botw-bot | 8d8c9515a177c52270093fb64abf34d111535d16 | [
"MIT"
] | null | null | null | import re
import discord
from sqlalchemy import (
Column,
String,
BigInteger,
Integer,
Boolean,
update,
delete,
)
from sqlalchemy.ext.hybrid import hybrid_property
from models.base import Base, PendulumDateTime
from util import safe_mention
IMAGE_URL_REGEX = r"https?:\/\/.*\.(jpe?g|png|gif)"
class Tag(Base):
__tablename__ = "tags"
EDITABLE = frozenset(["trigger", "reaction", "in_msg"])
tag_id = Column(Integer, primary_key=True)
trigger = Column(String, nullable=False)
reaction = Column(String, nullable=False)
in_msg = Column(Boolean, default=False)
_creator = Column(BigInteger, nullable=False)
_guild = Column(BigInteger, nullable=False)
use_count = Column(Integer, default=0)
date = Column(PendulumDateTime, default=PendulumDateTime.now())
@hybrid_property
def creator(self):
return self.bot.get_user(self._creator)
@hybrid_property
def guild(self):
return self.bot.get_guild(self._guild)
def __eq__(self, other):
if not isinstance(other, Tag):
return NotImplemented
return (
str.lower(self.trigger) == str.lower(other.trigger)
and str.lower(self.reaction) == str.lower(other.reaction)
and self._guild == other._guild
)
def to_list_element(self, index):
return f"*{index + 1}*. `{self.tag_id}`: *{self.trigger}* by {self.creator}"
def info_embed(self):
embed = (
discord.Embed(title=f"Tag `{self.tag_id}`", timestamp=self.date)
.add_field(name="Trigger", value=self.trigger)
.add_field(name="Reaction", value=self.reaction)
.add_field(name="Creator", value=safe_mention(self.creator))
.add_field(name="Triggers in message", value=str(self.in_msg))
.add_field(name="Use Count", value=str(self.use_count))
.set_footer(text="Created")
)
if re.search(IMAGE_URL_REGEX, self.reaction):
embed.set_image(url=self.reaction)
return embed
async def increment_use_count(self, session):
self.use_count += 1
statement = (
update(Tag)
.where(Tag.tag_id == self.tag_id)
.values(use_count=self.use_count)
)
await session.execute(statement)
async def delete(self, session):
statement = delete(Tag).where(Tag.tag_id == self.tag_id)
await session.execute(statement)
async def update(self, session, key, value):
setattr(self, key, value)
statement = update(Tag).where(Tag.tag_id == self.tag_id).values({key: value})
await session.execute(statement)
@classmethod
def inject_bot(cls, bot):
cls.bot = bot
| 30.351648 | 85 | 0.633599 | 340 | 2,762 | 4.982353 | 0.302941 | 0.026564 | 0.026564 | 0.024793 | 0.135183 | 0.11157 | 0.069067 | 0.069067 | 0.054309 | 0.054309 | 0 | 0.001436 | 0.243664 | 2,762 | 90 | 86 | 30.688889 | 0.809478 | 0 | 0 | 0.068493 | 0 | 0.013699 | 0.071325 | 0.010862 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082192 | false | 0 | 0.082192 | 0.041096 | 0.39726 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
512d55659640ebb6a556347baa4a333ef9d98192 | 1,478 | py | Python | tests/highlevel/highlink-schema-tf.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
] | 15 | 2017-06-07T12:49:12.000Z | 2020-07-25T18:06:04.000Z | tests/highlevel/highlink-schema-tf.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
] | 110 | 2016-06-21T23:20:44.000Z | 2022-02-24T16:15:22.000Z | tests/highlevel/highlink-schema-tf.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
] | 6 | 2016-06-21T11:19:22.000Z | 2019-01-21T13:45:39.000Z | from seamless.highlevel import Context, Cell, Transformer
ctx = Context()
ctx.v = lambda a: 42
ctx.v.a = "test"
ctx.v_schema = Cell()
ctx.v_schema.celltype = "plain"
ctx.translate()
ctx.link(ctx.v.schema, ctx.v_schema)
ctx.translate()
ctx.v_schema.set({'type': 'object', 'properties': {'a': {'type': 'integer'}}})
ctx.compute()
print(ctx.v.schema)
print("*" * 50)
print(ctx.v.inp.exception)
print("*" * 50)
ctx.v.schema.set({})
ctx.compute() # this is needed, else the 1.2 below might take effect first,
# and then be overwritten by this. Seamless is async!!
print(ctx.v.schema)
print(ctx.v_schema.value)
ctx.v.inp.example.a = 1.2
ctx.compute()
print("value:", ctx.v.inp.value)
print("data:", ctx.v.inp.data)
print("buffered:", ctx.v.inp.buffered)
print(ctx.v_schema.value)
print("*" * 50)
print(ctx.v.inp.exception)
print("*" * 50)
ctx.v_schema.set({'type': 'object', 'properties': {'a': {'type': 'string'}}})
ctx.compute()
print(ctx.v_schema.value)
print(ctx.v.schema)
print("value:", ctx.v.inp.value)
print()
ctx.unlink(ctx.v.schema, ctx.v_schema)
ctx.link(ctx.v.result.schema, ctx.v_schema)
ctx.compute()
print("result value:", ctx.v.result.value)
print("result data:", ctx.v.result.data)
print("result buffered:", ctx.v.result.buffered)
print(ctx.v_schema.value)
print("*" * 50)
print(ctx.v.result.exception)
print("*" * 50)
ctx.v_schema.set({'type': 'integer'})
ctx.compute()
print(ctx.v.result.schema)
print(ctx.v_schema.value)
print(ctx.v.result.value) | 28.423077 | 78 | 0.691475 | 246 | 1,478 | 4.101626 | 0.215447 | 0.138751 | 0.188305 | 0.11893 | 0.551041 | 0.517344 | 0.469772 | 0.319128 | 0.22894 | 0.166501 | 0 | 0.013483 | 0.096752 | 1,478 | 52 | 79 | 28.423077 | 0.742322 | 0.075778 | 0 | 0.530612 | 0 | 0 | 0.11437 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.020408 | 0 | 0.020408 | 0.55102 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 2 |
512e099f596e3955243cf492631352af559a94af | 246 | wsgi | Python | pastr.wsgi | hossainalhaidari/pastr | 49417cbe6ef64c763cda41ada6d42ef8d643d081 | [
"MIT"
] | 3 | 2019-01-16T14:01:20.000Z | 2019-09-29T14:16:26.000Z | pastr.wsgi | hossainalhaidari/pastr | 49417cbe6ef64c763cda41ada6d42ef8d643d081 | [
"MIT"
] | null | null | null | pastr.wsgi | hossainalhaidari/pastr | 49417cbe6ef64c763cda41ada6d42ef8d643d081 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import os
from dotenv import load_dotenv
load_dotenv()
sys.path.insert(0, os.getenv('PASTR_PATH', 'App Path'))
from pastr import app as application
application.secret_key = os.getenv('SECRET_KEY', 'Secret Key') | 24.6 | 62 | 0.768293 | 40 | 246 | 4.6 | 0.5 | 0.146739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009132 | 0.109756 | 246 | 10 | 62 | 24.6 | 0.83105 | 0.085366 | 0 | 0 | 0 | 0 | 0.168889 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.571429 | 0 | 0.571429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 3 |
512ea83053328520e513cb86e1b1a689945a30dc | 1,174 | py | Python | check_data_integrity/fix/fixer/fixer.py | mpmuc84/helper-scripts | 573353827d94f65efe763a93770ba75bcda90fff | [
"BSD-2-Clause"
] | 4 | 2018-10-11T09:48:39.000Z | 2019-11-12T06:17:17.000Z | check_data_integrity/fix/fixer/fixer.py | mpmuc84/helper-scripts | 573353827d94f65efe763a93770ba75bcda90fff | [
"BSD-2-Clause"
] | 26 | 2017-11-10T15:46:03.000Z | 2021-08-13T12:02:27.000Z | check_data_integrity/fix/fixer/fixer.py | mpmuc84/helper-scripts | 573353827d94f65efe763a93770ba75bcda90fff | [
"BSD-2-Clause"
] | 21 | 2017-10-12T11:47:37.000Z | 2021-05-05T13:07:59.000Z | from abc import ABCMeta, abstractmethod
class Fixer:
"""
Abstract class defining the methods every subclass has to implement for fixing inconsistent data.
"""
__metaclass__ = ABCMeta
@abstractmethod
def fix(self, opencast_url, digest_login, event_id):
"""
Fix the given event.
:param opencast_url: URL to opencast instance
:type opencast_url: str
:param digest_login: User and password for digest authentication
:type digest_login: DigestLogin
:param event_id: ID of event to be fixed
:type event_id: str
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def get_errors():
"""
Return which errors this fixer can fix.
:return: A list of errors this fixer can fix
:rtype: list
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def get_fix_description():
"""
Return a description of what this fixer does to fix inconsistent description.
:return: Description of what this fixer does.
:rtype: str
"""
raise NotImplementedError
| 26.088889 | 101 | 0.636286 | 130 | 1,174 | 5.623077 | 0.446154 | 0.049248 | 0.073871 | 0.136799 | 0.29275 | 0.235294 | 0 | 0 | 0 | 0 | 0 | 0 | 0.30494 | 1,174 | 44 | 102 | 26.681818 | 0.895833 | 0.496593 | 0 | 0.571429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214286 | false | 0 | 0.071429 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5130a25d5ed3634e87ef47dcbe79178f60a2783b | 4,454 | py | Python | Desktop-App/UI-Based App/Result_MLP.py | EddieKaleb/CricAI | 2ce714c7dfe1495b9f579d87490ab40914670f02 | [
"MIT"
] | 9 | 2018-09-25T06:19:52.000Z | 2021-10-06T11:06:01.000Z | Desktop-App/UI-Based App/Result_MLP.py | govind-bisht03/CricAI | fa497197627ec769d9a9be6c9f1ddeab3c826f7e | [
"MIT"
] | 21 | 2017-11-23T19:44:29.000Z | 2020-10-17T11:26:46.000Z | Desktop-App/UI-Based App/Result_MLP.py | govind-bisht03/CricAI | fa497197627ec769d9a9be6c9f1ddeab3c826f7e | [
"MIT"
] | 29 | 2017-12-11T10:24:22.000Z | 2020-10-16T13:04:13.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Result_MLP.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.labelTitle = QtWidgets.QLabel(self.centralwidget)
self.labelTitle.setGeometry(QtCore.QRect(180, 40, 451, 51))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(28)
font.setBold(True)
font.setUnderline(True)
font.setWeight(75)
self.labelTitle.setFont(font)
self.labelTitle.setObjectName("labelTitle")
self.labelT1Name = QtWidgets.QLabel(self.centralwidget)
self.labelT1Name.setGeometry(QtCore.QRect(130, 170, 201, 101))
font = QtGui.QFont()
font.setPointSize(20)
self.labelT1Name.setFont(font)
self.labelT1Name.setAlignment(QtCore.Qt.AlignCenter)
self.labelT1Name.setObjectName("labelT1Name")
self.labelT1Percent = QtWidgets.QLabel(self.centralwidget)
self.labelT1Percent.setGeometry(QtCore.QRect(130, 300, 201, 41))
font = QtGui.QFont()
font.setPointSize(36)
font.setBold(True)
font.setWeight(75)
self.labelT1Percent.setFont(font)
self.labelT1Percent.setAlignment(QtCore.Qt.AlignCenter)
self.labelT1Percent.setObjectName("labelT1Percent")
self.labelT2Name = QtWidgets.QLabel(self.centralwidget)
self.labelT2Name.setGeometry(QtCore.QRect(480, 170, 201, 101))
font = QtGui.QFont()
font.setPointSize(20)
self.labelT2Name.setFont(font)
self.labelT2Name.setAlignment(QtCore.Qt.AlignCenter)
self.labelT2Name.setObjectName("labelT2Name")
self.labelT2Percent = QtWidgets.QLabel(self.centralwidget)
self.labelT2Percent.setGeometry(QtCore.QRect(480, 300, 201, 41))
font = QtGui.QFont()
font.setPointSize(36)
font.setBold(True)
font.setWeight(75)
self.labelT2Percent.setFont(font)
self.labelT2Percent.setAlignment(QtCore.Qt.AlignCenter)
self.labelT2Percent.setObjectName("labelT2Percent")
self.pushButtonHome = QtWidgets.QPushButton(self.centralwidget)
self.pushButtonHome.setGeometry(QtCore.QRect(370, 450, 85, 27))
self.pushButtonHome.setObjectName("pushButtonHome")
self.pushButtonExit = QtWidgets.QPushButton(self.centralwidget)
self.pushButtonExit.setGeometry(QtCore.QRect(370, 490, 85, 27))
self.pushButtonExit.setObjectName("pushButtonExit")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 20))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.labelTitle.setText(_translate("MainWindow", "Multi Level Perceptron Classifier"))
self.labelT1Name.setText(_translate("MainWindow", "<html><head/><body><p><img src=\":/flags/images/flags/India.jpg\"/></p></body></html>"))
self.labelT1Percent.setText(_translate("MainWindow", "60 %"))
self.labelT2Name.setText(_translate("MainWindow", "<html><head/><body><p><img src=\":/flags/images/flags/Pakistan.jpg\"/></p></body></html>"))
self.labelT2Percent.setText(_translate("MainWindow", "40 %"))
self.pushButtonHome.setText(_translate("MainWindow", "Home"))
self.pushButtonExit.setText(_translate("MainWindow", "Exit"))
import CricAI_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 44.989899 | 150 | 0.691513 | 451 | 4,454 | 6.780488 | 0.2949 | 0.055592 | 0.054938 | 0.052322 | 0.267168 | 0.117724 | 0.117724 | 0.117724 | 0.117724 | 0.117724 | 0 | 0.039394 | 0.185002 | 4,454 | 98 | 151 | 45.44898 | 0.80303 | 0.041087 | 0 | 0.178571 | 1 | 0 | 0.090077 | 0.012198 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.035714 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
51311102d9e729c7834446c830b3c543962cbf40 | 5,283 | py | Python | xray/train.py | kibernetika-ai/image_captioning | e0248758d293d7dabc0cfdbed4568de06a20d048 | [
"MIT"
] | null | null | null | xray/train.py | kibernetika-ai/image_captioning | e0248758d293d7dabc0cfdbed4568de06a20d048 | [
"MIT"
] | null | null | null | xray/train.py | kibernetika-ai/image_captioning | e0248758d293d7dabc0cfdbed4568de06a20d048 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
import argparse
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import tensorflow as tf
from xray import model
slim = tf.contrib.slim
tf.logging.set_verbosity(tf.logging.INFO)
log = tf.logging
def calc_max_length(tensor):
return max(len(t) for t in tensor)
def plot_attention(image, result, attention_plot):
temp_image = np.array(Image.open(image))
fig = plt.figure(figsize=(10, 10))
len_result = len(result)
for l in range(len_result):
temp_att = np.resize(attention_plot[l], (8, 8))
ax = fig.add_subplot(len_result, len_result, l + 1)
ax.set_title(result[l])
img = ax.imshow(temp_image)
ax.imshow(temp_att, cmap='gray', alpha=0.6, extent=img.get_extent())
plt.tight_layout()
plt.show()
# captions on the validation set
# rid = np.random.randint(0, len(img_name_val))
# image = img_name_val[rid]
# real_caption = ' '.join([tokenizer.index_word[i] for i in cap_val[rid] if i not in [0]])
# ids, result, attention_plot = evaluate(image)
# print(cap_val[rid])
# print('Real Caption:', real_caption)
# for real in real_caption.split()[1:-1]:
# print(' %s' % label_map[real])
#
# print(ids)
# print('Pred Caption: ', ' '.join(result))
# for pred in result[:-1]:
# print(' %s' % label_map[pred])
#
# plot_attention(image, result, attention_plot)
# opening the image
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--annotations', type=str, default='./annotations.json', help='Annotations file path')
parser.add_argument('--data-dir', type=str, default='./data', help='Dataset directory')
parser.add_argument('--train-dir', type=str, default='training', help='Training dir')
parser.add_argument('--inception-path', type=str, default='./inception_v3.ckpt', help='Inception checkpoint')
parser.add_argument('--steps', type=int, default=1000, help='Training steps count')
parser.add_argument('--learning-rate', type=float, default=0.0001, help='Learning rate')
parser.add_argument('--log-step-count-steps', type=int, default=5, help='Log every N step')
parser.add_argument('--batch-size', type=int, default=8, help='Batch size')
parser.add_argument('--mode', default='train', choices=['train', 'export', 'eval'], help='Mode')
parser.add_argument('--eval', default=False, action='store_true', help='Run evaluation during train')
parser.add_argument('--export', default=False, action='store_true', help='Changes mode to export')
return parser.parse_args()
def export(xray, train_dir, params):
feature_placeholders = {
'images': tf.placeholder(tf.float32, [params['batch_size'], 299, 299, 3], name='images'),
}
receiver = tf.estimator.export.build_raw_serving_input_receiver_fn(
feature_placeholders,
default_batch_size=params['batch_size']
)
export_path = xray.export_savedmodel(
train_dir,
receiver,
)
export_path = export_path.decode("utf-8")
log.info('Exported to %s.' % export_path)
shutil.copy(
os.path.join(params['data_dir'], 'label_map.json'),
os.path.join(export_path, 'label_map.json'),
)
def main():
args = parse_args()
params = {
'batch_size': args.batch_size,
'buffer_size': 1000,
'embedding_size': 256,
'units': 512,
'limit_length': 10,
'grad_clip': 1.0,
'learning_rate': args.learning_rate,
'data_dir': args.data_dir,
'inception_path': args.inception_path,
'vocab_size': 0,
'attention_features_shape': 64,
'features_shape': 2048,
'log_step_count_steps': args.log_step_count_steps,
'keep_checkpoint_max': 5,
}
params['word_index'] = model.get_word_index(params)
params['max_length'] = params['limit_length']
vocab_size = len(params['word_index'])
params['vocab_size'] = vocab_size
conf = tf.estimator.RunConfig(
model_dir=args.train_dir,
save_summary_steps=100,
save_checkpoints_secs=120,
save_checkpoints_steps=None,
keep_checkpoint_max=params['keep_checkpoint_max'],
log_step_count_steps=params['log_step_count_steps'],
)
xray = model.Model(
params=params,
model_dir=args.train_dir,
config=conf,
)
mode = args.mode
if args.export:
mode = 'export'
if mode == 'train':
input_fn = model.input_fn(params, True)
if args.eval:
eval_input_fn = model.input_fn(params, False)
train_spec = tf.estimator.TrainSpec(input_fn=input_fn, max_steps=args.steps)
eval_spec = tf.estimator.EvalSpec(
input_fn=eval_input_fn, steps=1, start_delay_secs=10, throttle_secs=10
)
tf.estimator.train_and_evaluate(xray, train_spec, eval_spec)
else:
xray.train(input_fn=input_fn, steps=args.steps)
elif mode == 'eval':
eval_input_fn = model.input_fn(params, False)
xray.evaluate(eval_input_fn, steps=1)
elif mode == 'export':
# export
export(xray, args.train_dir, params)
if __name__ == '__main__':
main()
| 33.226415 | 113 | 0.658906 | 715 | 5,283 | 4.634965 | 0.282517 | 0.027459 | 0.056427 | 0.025649 | 0.102897 | 0.071515 | 0.022933 | 0.022933 | 0.022933 | 0 | 0 | 0.016619 | 0.202726 | 5,283 | 158 | 114 | 33.436709 | 0.77018 | 0.106379 | 0 | 0.035088 | 0 | 0 | 0.168864 | 0.009783 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04386 | false | 0 | 0.078947 | 0.008772 | 0.140351 | 0.008772 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5133c8da0c788e7f385523bfdad1962d8bd3df18 | 280 | py | Python | testCreateDb.py | chaya7282/flask-shop-master | 9d1aedd6e39d4cac2b3c5585257415eee2558b71 | [
"BSD-3-Clause"
] | 1 | 2021-08-17T08:14:47.000Z | 2021-08-17T08:14:47.000Z | testCreateDb.py | chaya7282/flask-shop-master | 9d1aedd6e39d4cac2b3c5585257415eee2558b71 | [
"BSD-3-Clause"
] | null | null | null | testCreateDb.py | chaya7282/flask-shop-master | 9d1aedd6e39d4cac2b3c5585257415eee2558b71 | [
"BSD-3-Clause"
] | 1 | 2021-08-17T08:14:48.000Z | 2021-08-17T08:14:48.000Z | # -*- coding: utf-8 -*-
"""Click commands."""
from flask_script import Manager
from flaskshop import random_data
import flaskshop
import flaskshop.commands as commands
app= flaskshop.create_app()
manager = Manager(app)
if __name__ == "__main__":
manager.run()
manager.se | 21.538462 | 37 | 0.735714 | 36 | 280 | 5.416667 | 0.583333 | 0.153846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004184 | 0.146429 | 280 | 13 | 38 | 21.538462 | 0.811715 | 0.135714 | 0 | 0 | 0 | 0 | 0.033755 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.444444 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
513455e1e49a2cc7f2af7ad92dbc3662b0e294bf | 761 | py | Python | day-4.py | shadowfool/advent-of-code-2017 | 9f2312c2cef9891c3bdb7c970eccc4eb48f714df | [
"MIT"
] | null | null | null | day-4.py | shadowfool/advent-of-code-2017 | 9f2312c2cef9891c3bdb7c970eccc4eb48f714df | [
"MIT"
] | null | null | null | day-4.py | shadowfool/advent-of-code-2017 | 9f2312c2cef9891c3bdb7c970eccc4eb48f714df | [
"MIT"
] | null | null | null |
input = [line.rstrip() for line in open('./inputs/day4.txt')]
badWords = 0
for line in input:
dictionary = {}
words = line.split(' ')
for word in words:
print(words)
if word in dictionary:
badWords = badWords + 1
break
dictionary[ word ] = 1
print(len(input) - badWords)
# ---- CHALLENGE 2 ------
badCount = 0
def letterMap(word=''):
mp = {}
for i, letter in enumerate(''.join(sorted(word))):
if letter not in mp: mp[ letter ] = 0
mp[ letter ] = mp[ letter ] + 1
return mp
for line in input:
words = line.split(' ')
words = [ letterMap(word) for word in words ]
if len(words) != len([dict(t) for t in set([tuple(d.items()) for d in words])]):
badCount = badCount + 1
print(len(input) - badCount) | 21.742857 | 82 | 0.599212 | 110 | 761 | 4.145455 | 0.354545 | 0.046053 | 0.059211 | 0.061404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015625 | 0.243101 | 761 | 35 | 83 | 21.742857 | 0.776042 | 0.030223 | 0 | 0.16 | 0 | 0 | 0.025815 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0 | 0 | 0.08 | 0.12 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
513b7781461e774c4edea5cc00a442a4bd33ae7d | 51 | py | Python | gym-voilier-v2-discrete/gym_voilier/envs/__init__.py | pfontana96/smart-sailboat | 25b2a524b2601b3f8e72092d7a34beb849b617db | [
"MIT"
] | null | null | null | gym-voilier-v2-discrete/gym_voilier/envs/__init__.py | pfontana96/smart-sailboat | 25b2a524b2601b3f8e72092d7a34beb849b617db | [
"MIT"
] | null | null | null | gym-voilier-v2-discrete/gym_voilier/envs/__init__.py | pfontana96/smart-sailboat | 25b2a524b2601b3f8e72092d7a34beb849b617db | [
"MIT"
] | null | null | null | from gym_voilier.envs.voilier_env import VoilierEnv | 51 | 51 | 0.901961 | 8 | 51 | 5.5 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.058824 | 51 | 1 | 51 | 51 | 0.916667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 5 |
513c933b5d0724d79a4413fb53c8512d831f68a7 | 7,464 | py | Python | prd_score_classifier.py | DrLSimon/precision-recall-distributions-icml19 | 364188eaa26ac1bf39ebf038136c79aeee97da3a | [
"Apache-2.0"
] | null | null | null | prd_score_classifier.py | DrLSimon/precision-recall-distributions-icml19 | 364188eaa26ac1bf39ebf038136c79aeee97da3a | [
"Apache-2.0"
] | null | null | null | prd_score_classifier.py | DrLSimon/precision-recall-distributions-icml19 | 364188eaa26ac1bf39ebf038136c79aeee97da3a | [
"Apache-2.0"
] | null | null | null | import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from prdataset import *
from torch.utils.data import DataLoader
from torchvision import transforms
import tqdm
from models import *
from inception_torch import InceptionV3
cuda = torch.cuda.is_available()
if cuda:
device = torch.device('cuda:0')
cudnn.benchmark = True
else:
device = torch.device('cpu')
def progressbar(iterable, leave=False):
return tqdm.tqdm(iterable, leave=leave)
def createTrainTestSets(source_folder, target_folder, noise=False):
transform_test = [transforms.ToTensor()]
if noise:
addGaussianNoise = lambda tensor: tensor+torch.randn(tensor.shape)*0.1
transform_test.append(transforms.Lambda(addGaussianNoise))
transform_train = transforms.Compose([
] + transform_test)
return SourceTargetDataset(source_folder, target_folder,
transform_train=transform_train,
transform_test=transforms.ToTensor())
class ClassifierTrainer:
def __init__(self, dataset, description):
self.dataset = dataset
self.totalLoss = np.inf
self.description = description
self.__load()
def __load(self):
if self.description == 'alex':
self.features = AlexDiscriminator().eval().to(device)
self.feat_size = 4096
elif self.description == 'vgg':
features = VGGDiscriminator().eval().to(device)
self.feat_size = 4096
elif self.description == "inception":
self.feat_size = dims = 2048
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
features = InceptionV3([block_idx], normalize_input=True)
self.features = features.eval().to(device)
else:
raise ValueError('Unknown classifier')
self.batch_size = 64
self.dataset.precomputeFeatures(self.features, self.batch_size, device)
def initClassifier(self):
nh=128
self.classifier = nn.Sequential(
nn.Linear(self.feat_size, 1, bias=False),
)
self.classifier.to(device).train()
def train(self):
self.totalLoss=0
for batch_num, (samples, flips) in enumerate(progressbar(self.train_loader)):
def closure():
self.optimizer.zero_grad()
predictions = self.classifier(samples.to(device))
loss = self.log_loss(predictions.squeeze(), flips.to(device))
loss.backward()
self.totalLoss += float(loss)
return loss
self.optimizer.step(closure)
def test(self):
self.classifier.eval()
self.dataset.eval()
error_I = 0
error_II = 0
cnt_I = 0
cnt_II = 0
for batch_num, (samples, flips) in enumerate(progressbar(self.train_loader)):
predictions = self.classifier(samples.to(device))
predictions = (predictions > 0)
flips = (flips > 0)
cnt_I += int((flips.to(device) == 0).sum())
cnt_II += int((flips.to(device) == 1).sum())
typeI = (predictions.squeeze() == 1) & (flips.to(device) == 0)
typeII = (predictions.squeeze() == 0) & (flips.to(device) == 1)
error_I += int(typeI.sum())
error_II += int(typeII.sum())
error_I = float(error_I) / float(cnt_I)
error_II = float(error_II) / float(cnt_II)
self.classifier.train()
self.dataset.train()
error = 0.5*(error_I + error_II)
self.scheduler.step(error)
self.pbar.set_postfix(loss=self.totalLoss, error=f'({error_I:.2}+{error_II:.2})/2={error:.2}', lr=self.optimizer.param_groups[0]['lr'])
return self.stopper.step(error)
def run(self, num_epochs, patience):
early_stopping = (patience >= 1)
if early_stopping:
from early_stopping import EarlyStopping
self.stopper = EarlyStopping(patience=patience)
self.initClassifier()
self.dataset.train()
self.train_loader = DataLoader(self.dataset, self.batch_size, shuffle=True, num_workers=0)
self.optimizer = optim.Adam(self.classifier.parameters(), lr=1e-3, weight_decay=1e-1, amsgrad=False)
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, 'min', patience=2, cooldown=3, factor=0.5)
self.log_loss = torch.nn.BCEWithLogitsLoss()
self.pbar = progressbar(range(num_epochs))
for ep in self.pbar:
if early_stopping:
with torch.no_grad():
shouldStop = self.test()
if shouldStop:
self.pbar.close()
break
self.train()
return self.classifier
def estimatePRD(classifier, dataset, num_angles, epsilon=1e-10):
if not (num_angles >= 3 and num_angles <= 1e6):
raise ValueError('num_angles must be in [3, 1e6] but is %d.' % num_angles)
dataset.eval()
classifier.eval()
test_loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0)
# Compute slopes for linearly spaced angles between [0, pi/2]
angles = np.linspace(epsilon, np.pi/2 - epsilon, num=num_angles)
slopes = np.tan(angles)
toTorch = lambda z: torch.from_numpy(z).unsqueeze(0).to(device)
with torch.no_grad():
fValsAndUs = [(float(classifier(Z.to(device))), int(U)) for Z, U in progressbar(test_loader)]
fVals = [val for val, U in fValsAndUs]
fVals = [np.min(fVals)-1] + fVals + [np.max(fVals)+1]
errorRates = []
for t in fVals:
fpr=sum([(fOfZ>=t) and U==0 for fOfZ,U in fValsAndUs]) / float(sum([U==0 for fOfZ,U in fValsAndUs]))
fnr=sum([(fOfZ<t) and U==1 for fOfZ,U in fValsAndUs]) / float(sum([U==1 for fOfZ,U in fValsAndUs]))
errorRates.append((float(fpr), float(fnr)))
precision = []
recall = []
for slope in slopes:
prec = min([slope*fnr+fpr for fpr,fnr in errorRates])
precision.append(prec)
rec = min([fnr+fpr/slope for fpr,fnr in errorRates])
recall.append(rec)
# handle numerical instabilities leaing to precision/recall just above 1
max_val = max(np.max(precision), np.max(recall))
if max_val > 1.001:
print(max_val)
raise ValueError('Detected value > 1.001, this should not happen.')
precision = np.clip(precision, 0, 1)
recall = np.clip(recall, 0, 1)
return precision, recall
class EnsembleClassifier(nn.Module):
def __init__(self):
super().__init__()
self.networks=[]
def append(self, net):
self.networks.append(net)
def forward(self, x):
preds = []
for net in self.networks:
preds.append(net(x))
return torch.median(torch.stack(preds), dim=0)[0]
def computePRD(source_folder, target_folder, num_angles=1001, num_runs=10, num_epochs=10, patience=0):
precisions = []
recalls = []
ensemble = EnsembleClassifier()
dataset = createTrainTestSets(source_folder, target_folder)
trainer = ClassifierTrainer(dataset, 'inception')
for k in progressbar(range(num_runs)):
classifier = trainer.run(num_epochs, patience)
ensemble.append(classifier)
precision, recall = estimatePRD(ensemble, trainer.dataset, num_angles)
return precision, recall
| 36.950495 | 143 | 0.624732 | 913 | 7,464 | 4.992333 | 0.250821 | 0.022817 | 0.014261 | 0.021062 | 0.120667 | 0.087758 | 0.070206 | 0.060114 | 0.047389 | 0.047389 | 0 | 0.017867 | 0.257637 | 7,464 | 201 | 144 | 37.134328 | 0.804728 | 0.017417 | 0 | 0.095238 | 0 | 0 | 0.025372 | 0.005593 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.071429 | 0.005952 | 0.214286 | 0.005952 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5141a9a0670c91a2b930b40b597fedf7c7054b49 | 8,906 | py | Python | lambdaproject/settings/base.py | dragetd/LambdaCast | a8227d8d19a2fdb1ff1d5e8ad7366d60a1e253f7 | [
"BSD-2-Clause"
] | 6 | 2015-04-05T01:28:23.000Z | 2022-02-06T17:29:47.000Z | lambdaproject/settings/base.py | dragetd/LambdaCast | a8227d8d19a2fdb1ff1d5e8ad7366d60a1e253f7 | [
"BSD-2-Clause"
] | 2 | 2022-01-05T23:07:10.000Z | 2022-03-30T17:52:45.000Z | lambdaproject/settings/base.py | dragetd/LambdaCast | a8227d8d19a2fdb1ff1d5e8ad7366d60a1e253f7 | [
"BSD-2-Clause"
] | 2 | 2022-02-06T17:29:53.000Z | 2022-02-26T17:23:09.000Z | import os
# Path to your LambdaCast instance (no / behind the path)
try:
from local import ABSOLUTE_PATH
except ImportError:
ABSOLUTE_PATH = os.path.dirname(os.path.abspath(__file__)) + "/../.."
# Domain your instance should use, for example: 'http://example.com' (no / behind the path)
try:
from local import DOMAIN
except ImportError:
DOMAIN = 'http://localhost:8000'
ALLOWED_HOSTS = ['*',]
# Domain of your website, for example: 'http://example.com' (no / behind the path)
WEBSITE_URL = 'http://example.com'
# Name of your website, will be displayed in title, header and opengraph
SITE_NAME = 'LambdaCast'
# Name of the author of the rss feed
AUTHOR_NAME = 'Author Name'
# E-mail adress for the contact link in the sidebar on index page
CONTACT_EMAIL = 'root@example.com'
# URL or path to your logo that will be displayed above the right sidebar
LOGO_URL = DOMAIN + '/static/logo.png'
# Django settings for lambdaproject.project
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# If you use an virtualenv (you schould) enter it here
VIRTUALENV = ABSOLUTE_PATH + '/.venv/lib/python2.7/site-packages'
# The guys who will get an email if something is wrong
ADMINS = (
('name', 'root@localhost'),
)
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your database settings, sqlite is good for development and testing, not for deployment
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'test.sql', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Berlin'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'de-de'
# Language code for the OpenGraph implementation.
OG_LANGUAGE_CODE = 'de_DE'
LOCALE_PATHS = (
ABSOLUTE_PATH + '/locale',
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
try:
from local import MEDIA_ROOT
except ImportError:
MEDIA_ROOT = ABSOLUTE_PATH + '/media'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = DOMAIN + '/media/'
# Where do you want your upload cache to live (there should be some space left)
FILE_UPLOAD_TEMP_DIR = MEDIA_ROOT + '/upload'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ABSOLUTE_PATH + '/static_files/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
ABSOLUTE_PATH + '/lambdaproject/static/',
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ThisOneIsNotUniqeSoPleaseChange'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'lambdaproject.middleware.SettingsMiddleware',
'pages.middleware.PagesMiddleware',
'portal.middleware.SubmittalMiddleware',
)
ROOT_URLCONF = 'lambdaproject.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'lambdaproject.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
ABSOLUTE_PATH + '/templates',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_admin_bootstrapped.bootstrap3',
'django_admin_bootstrapped',
'django.contrib.admin',
#'django.contrib.admindocs',
'taggit',
'portal',
'livestream',
'pages',
'djangotasks',
'taggit_templatetags',
'simple_open_graph',
'captcha',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = 'django.contrib.auth.views.login'
LOGOUT_URL = 'django.contrib.auth.views.logout'
# ehemalig "portal/appsettings.py"
ENCODING_OUTPUT_DIR = MEDIA_ROOT + '/encoded/'
# How can we reach this files (public access is needed)
ENCODED_BASE_URL = DOMAIN + '/media/encoded/'
THUMBNAILS_DIR = MEDIA_ROOT + '/thumbnails/'
THUMBNAILS_BASE_URL = DOMAIN + '/media/thumbnails/'
ENABLE_LIVESTREAMS = False
ENABLE_AUDIO = True
ENABLE_VIDEO = True
# Host and port for the mail server to send mails for new comments
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
USE_BITTORRENT = False
# example: "udp://tracker.example.com:80"
BITTORRENT_TRACKER_ANNOUNCE_URL = ''
# example: "udp://tracker.example1.com:80,udp://tracker.example2.com:80,udp://tracker.example3.com:80"
BITTORRENT_TRACKER_BACKUP = ''
BITTORRENT_FILES_DIR = MEDIA_ROOT + '/torrents/'
# Where does transmission expects the original files? (This directory must be writeable for both transmission and LambdaCast!)
BITTORRENT_DOWNLOADS_DIR = ''
# What is the URL of the BITTORRENT_FILES_DIR?
BITTORRENT_FILES_BASE_URL = DOMAIN + '/media/torrents/'
# Host and port Transmission is listining on (probably localhost
TRANSMISSION_HOST = '127.0.0.1'
TRANSMISSION_PORT = 9091
# Base-Dir vor Hotfolders, example: "/opt/hotfolder/"
HOTFOLDER_BASE_DIR = ''
HOTFOLDER_MOVE_TO_DIR = MEDIA_ROOT + '/raw/'
# django-simple-captcha
CAPTCHA_LETTER_ROTATION = None
CAPTCHA_CHALLENGE_FUNCT = 'captcha.helpers.math_challenge'
CAPTCHA_NOISE_FUNCTIONS = None
CAPTCHA_FILTER_FUNCTIONS = None
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| 33.355805 | 126 | 0.717045 | 1,144 | 8,906 | 5.473776 | 0.357517 | 0.035292 | 0.009582 | 0.011498 | 0.123283 | 0.0939 | 0.075695 | 0.066113 | 0.053338 | 0.040882 | 0 | 0.006688 | 0.177296 | 8,906 | 266 | 127 | 33.481203 | 0.84796 | 0.484505 | 0 | 0.054054 | 0 | 0 | 0.40062 | 0.254598 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.006757 | 0.047297 | 0 | 0.047297 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
514411ea3d0032d20f78be6935784f8081b90d34 | 2,072 | py | Python | sentiment_classifier/process.py | dang-trung/stocktwits-sentiment-classifier | 5b6a75abce3a6b701da81f616a0e5b63e9c0dba6 | [
"MIT"
] | null | null | null | sentiment_classifier/process.py | dang-trung/stocktwits-sentiment-classifier | 5b6a75abce3a6b701da81f616a0e5b63e9c0dba6 | [
"MIT"
] | 1 | 2020-11-18T19:15:50.000Z | 2020-11-24T02:21:33.000Z | sentiment_classifier/process.py | dang-trung/stocktwits-sentiment-classifier | 5b6a75abce3a6b701da81f616a0e5b63e9c0dba6 | [
"MIT"
] | null | null | null | """Text Pre-processing.
This module process text messages based on Chen et al. (2019).
Added some steps (such as escaping HTML symbols, or having a more detailed list
of stop and negative words).
"""
import html
import re
import string
import pandas as pd
# Repeated chars more than 3 times
repeat_regex = r'(\w)\1{2,}'
# Cashtag: $<word><opt .-><opt word>
cashtag_regex = r'\$\w+[.-]?\w?'
# Money: <$ or €><digits><opt word>
moneytag_regex = r'[\$€]\d+\w?'
# Numbers: <1 or more nums>
numbertag_regex = r'\d+[\.,]?\d?\w?'
# Hyperlinks: http<opt s>://<opt www.><words><. words><opt />
linktag_regex = r'https?://(www\.)?(\w+)(\.\w+)/?'
# Users: @<opt words>
usertag_regex = r'@\w+'
# Remove stopwords
stops = pd.read_csv('data/00_external/stopwords.csv', header=None)[0].to_list()
stop_set = '|'.join(stops)
stop_regex = rf"\b({stop_set})\s"
# Negative words
negs = pd.read_csv('data/00_external/negative.csv', header=None)[0].to_list()
neg_set = '|'.join(negs)
negtag_regex = rf"({neg_set})\s(\w?)"
# Remove punctuations
punctuation = string.punctuation
punctuation = punctuation.replace('!', '')
punctuation = punctuation.replace('?', '')
punctuation = punctuation.replace("'", '')
punc_regex = rf"[{punctuation}]"
def pre_process(text):
"""
Text-process to remove all words unnecessary for classifying sentiment
Parameters
----------
text : str
Text to be processed
Returns
-------
str
Processed text
"""
text = text.lower() # Lowercase
text = html.unescape(text) # Convert html codes to normal strings
text = re.sub(repeat_regex, r'\1\1\1', text)
text = re.sub(cashtag_regex, 'cashtag', text)
text = re.sub(moneytag_regex, 'moneytag', text)
text = re.sub(numbertag_regex, 'numbertag', text)
text = re.sub(linktag_regex, 'linktag', text)
text = re.sub(usertag_regex, 'usertag', text)
text = re.sub(stop_regex, '', text)
text = re.sub(punc_regex, '', text)
text = re.sub(negtag_regex, r' negtag_\2', text)
text = re.sub(r"'", '', text)
return text
| 29.183099 | 79 | 0.642857 | 295 | 2,072 | 4.420339 | 0.372881 | 0.07362 | 0.069018 | 0.089724 | 0.160276 | 0.132669 | 0 | 0 | 0 | 0 | 0 | 0.010514 | 0.173745 | 2,072 | 70 | 80 | 29.6 | 0.75 | 0.326255 | 0 | 0 | 0 | 0 | 0.189189 | 0.067568 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.114286 | 0 | 0.171429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
514491bebe24982f7e39bca4c4425c0e236edb60 | 2,062 | py | Python | fredo/editor/brush_dialog.py | yasiupl/FreDo | 73bdc380dd82df171fe63998f0affa092e30759a | [
"BSD-3-Clause"
] | 6 | 2015-08-21T08:43:25.000Z | 2021-12-29T16:16:59.000Z | fredo/editor/brush_dialog.py | yasiupl/FreDo | 73bdc380dd82df171fe63998f0affa092e30759a | [
"BSD-3-Clause"
] | 2 | 2019-03-25T10:16:18.000Z | 2022-01-11T19:14:01.000Z | fredo/editor/brush_dialog.py | yasiupl/FreDo | 73bdc380dd82df171fe63998f0affa092e30759a | [
"BSD-3-Clause"
] | 2 | 2020-10-29T06:15:03.000Z | 2021-12-29T16:42:28.000Z | from PySide.QtGui import QDialog
from ..gui.brush_dialog import Ui_BrushDialog
from PySide.QtGui import QPixmap
from PySide.QtCore import Qt
from ..brushes import SquareBrush
import math
class BrushDialog(QDialog):
def __init__(self, parent=None, brush=None):
super(BrushDialog, self).__init__(parent)
self.ui = Ui_BrushDialog()
self.ui.setupUi(self)
self.ui.size_slider.valueChanged.connect(self.size_changed)
self.ui.brush_combo_box.currentIndexChanged.connect(self.brush_changed)
self.ui.brush_done_btn.clicked.connect(self.select_brush)
self.ui.brush_combo_box.setCurrentIndex(0)
self.brush_changed(0)
self.selected_brush = brush
self.ui.size_slider.setSliderPosition(10)
if brush:
self.ui.size_slider.setSliderPosition(brush.size)
degrees = brush.angle*180/(math.pi)
self.ui.angle_slider.setSliderPosition(degrees)
self.ui.magnitude_box.setValue(brush.magnitude)
def size_changed(self, value):
"Handle the slider drag event."
size = self.ui.brush_demo_label.size()
pixmap = QPixmap(100, 100)
pixmap.fill(Qt.white)
cx, cy = int(size.width()/2), int(size.height()/2)
self.current_brush.set_size(value)
self.current_brush.draw_marker(cx, cy, pixmap, 1)
self.ui.brush_demo_label.setPixmap(pixmap)
def brush_changed(self, index):
"Handle the brush type change"
if index == 0:
self.current_brush = SquareBrush(size=self.ui.size_slider.value())
def get_brush(self):
" Get the selected brush or `None` if dialog was closed. "
return self.selected_brush
def select_brush(self):
" Select the currentently configured brush params "
self.selected_brush = self.current_brush
self.selected_brush.set_magnitude(self.ui.magnitude_box.value())
radians = self.ui.angle_slider.value()*math.pi/180.0
self.selected_brush.set_angle(radians)
self.close()
| 34.366667 | 79 | 0.679922 | 271 | 2,062 | 4.98893 | 0.309963 | 0.066568 | 0.04068 | 0.047337 | 0.113905 | 0.056213 | 0 | 0 | 0 | 0 | 0 | 0.013052 | 0.21969 | 2,062 | 59 | 80 | 34.949153 | 0.827222 | 0.079049 | 0 | 0 | 0 | 0 | 0.078565 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.130435 | 0 | 0.282609 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5144c4d72df0f27c405d9c186efa2f5400ee41ce | 332 | py | Python | Tarea1b_SpaceWars/hitmarker.py | salistito/Computer-Graphics | f56b453609bbe8496f504f438770fde992af68fe | [
"MIT"
] | null | null | null | Tarea1b_SpaceWars/hitmarker.py | salistito/Computer-Graphics | f56b453609bbe8496f504f438770fde992af68fe | [
"MIT"
] | null | null | null | Tarea1b_SpaceWars/hitmarker.py | salistito/Computer-Graphics | f56b453609bbe8496f504f438770fde992af68fe | [
"MIT"
] | null | null | null | """
Sebastián Salinas, CC3501, 6/05/20
Tarea1b
"""
# Importamos algunos modulos
import math
# Función para evaluar la colisión entre 2 objetos
def hitmarker(x1,y1,x2,y2):
distancia = math.sqrt((x1-x2)**2 + (y1-y2)**2) # Ecuación de la distancia
if distancia < 0.075: #0.075
return True
else:
return False | 25.538462 | 77 | 0.665663 | 50 | 332 | 4.42 | 0.74 | 0.036199 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 0.213855 | 332 | 13 | 78 | 25.538462 | 0.735632 | 0.448795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.571429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
51457008bd685f3f5dea47108bc2573ac5535321 | 1,510 | py | Python | src/lib/osta.py | anroots/osta-exporter | 14b05bb905b9df59f9e62e72b33c64a890eb973b | [
"Apache-2.0"
] | null | null | null | src/lib/osta.py | anroots/osta-exporter | 14b05bb905b9df59f9e62e72b33c64a890eb973b | [
"Apache-2.0"
] | null | null | null | src/lib/osta.py | anroots/osta-exporter | 14b05bb905b9df59f9e62e72b33c64a890eb973b | [
"Apache-2.0"
] | null | null | null | from json import JSONDecodeError
import requests
import sys
class Osta:
def __init__(self, logger, api_url):
self.api_url = api_url
self.logger = logger
def get_user_items(self, user_id):
self.logger.debug('Starting collection of osta.ee meters')
query_params = {
'userId': [user_id]
}
items = self.make_request('/items/active', query_params)
self.logger.debug('Received {} items from osta.ee'.format(len(items)))
return items
@staticmethod
def get_request_headers():
return {
'Accept': 'application/json',
'Accept-Language': 'en',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36'
}
def make_request(self, uri, query_params):
self.logger.debug('Sending request to Osta API')
uri = self.api_url + uri
try:
r = requests.get(url=uri, params=query_params, headers=self.get_request_headers())
except requests.exceptions.RequestException as e:
self.logger.fatal(e)
self.logger.fatal('Received error from HTTP request, exiting')
sys.exit(1)
try:
response = r.json()
except JSONDecodeError as e:
self.logger.fatal('Osta HTTP endpoint returned invalid JSON, can not parse it')
self.logger.fatal(r.text)
sys.exit(1)
return response
| 32.826087 | 141 | 0.609272 | 190 | 1,510 | 4.726316 | 0.447368 | 0.100223 | 0.066815 | 0.053452 | 0.097996 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026827 | 0.284106 | 1,510 | 45 | 142 | 33.555556 | 0.803885 | 0 | 0 | 0.105263 | 0 | 0.026316 | 0.247682 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.078947 | 0.026316 | 0.289474 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5146d58c84bb7104d16ee9a9d1a76b8231e89082 | 224 | py | Python | nyaggle/experiment/__init__.py | harupy/nyaggle | 132a93079e364d60b5598de77ab636a603ec06a4 | [
"MIT"
] | null | null | null | nyaggle/experiment/__init__.py | harupy/nyaggle | 132a93079e364d60b5598de77ab636a603ec06a4 | [
"MIT"
] | null | null | null | nyaggle/experiment/__init__.py | harupy/nyaggle | 132a93079e364d60b5598de77ab636a603ec06a4 | [
"MIT"
] | null | null | null | from nyaggle.experiment.experiment import Experiment, add_leaderboard_score
from nyaggle.experiment.averaging import average_results
from nyaggle.experiment.run import autoprep_gbdt, run_experiment, find_best_lgbm_parameter
| 56 | 90 | 0.892857 | 29 | 224 | 6.62069 | 0.586207 | 0.171875 | 0.328125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.066964 | 224 | 3 | 91 | 74.666667 | 0.91866 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
5146da738f04b9b9d8f97c34d071f17da9198bde | 794 | py | Python | configs/fdf/deep_privacy_v1.py | skoskjei/DP-ATT | eb7380099f5c7e533fd0d247456b4a418529d62b | [
"MIT"
] | 1,128 | 2019-09-11T01:38:09.000Z | 2022-03-31T17:06:56.000Z | configs/fdf/deep_privacy_v1.py | skoskjei/DP-ATT | eb7380099f5c7e533fd0d247456b4a418529d62b | [
"MIT"
] | 45 | 2019-09-11T05:39:53.000Z | 2021-12-05T17:52:07.000Z | configs/fdf/deep_privacy_v1.py | skoskjei/DP-ATT | eb7380099f5c7e533fd0d247456b4a418529d62b | [
"MIT"
] | 185 | 2019-09-11T02:15:56.000Z | 2022-03-23T16:12:41.000Z |
_base_config_ = "base.py"
model_size = 512
model_url = "http://folk.ntnu.no/haakohu/checkpoints/step_42000000.ckpt"
models = dict(
scalar_pose_input=False,
max_imsize=128,
conv_size={
4: model_size,
8: model_size,
16: model_size,
32: model_size,
64: model_size//2,
128: model_size//4,
256: model_size//8,
512: model_size//16
},
generator=dict(
conv2d_config=dict(
conv=dict(
gain=2**0.5
)
),
type="DeepPrivacyV1"),
)
trainer = dict(
progressive=dict(
enabled=False,
lazy_regularization=True
),
batch_size_schedule={
128: 32,
256: 32
},
optimizer=dict(
learning_rate=0.0015
)
)
| 18.465116 | 72 | 0.540302 | 92 | 794 | 4.413043 | 0.565217 | 0.199507 | 0.049261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107692 | 0.345088 | 794 | 42 | 73 | 18.904762 | 0.673077 | 0 | 0 | 0.054054 | 0 | 0 | 0.098361 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
514737538b6050cbe92637918e942f1823b10292 | 1,699 | py | Python | server/weather/RestWeatherProvider.py | EveryOtherUsernameWasAlreadyTaken/BIS | e132ce42dcc74e634231398dfecb08834d478cba | [
"MIT"
] | 3 | 2019-07-09T08:51:20.000Z | 2019-09-16T17:27:54.000Z | server/weather/RestWeatherProvider.py | thomasw-mitutoyo-ctl/BIS | 08525cc12164902dfe968ae41beb6de0cd5bc411 | [
"MIT"
] | 24 | 2019-06-17T12:33:35.000Z | 2020-03-27T08:17:35.000Z | server/weather/RestWeatherProvider.py | EveryOtherUsernameWasAlreadyTaken/BIS | e132ce42dcc74e634231398dfecb08834d478cba | [
"MIT"
] | 1 | 2020-03-24T17:54:07.000Z | 2020-03-24T17:54:07.000Z | import json
import logging
import threading
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
log = logging.getLogger(__name__)
class RestWeatherProvider(threading.Thread):
"""
The RestWeatherProvider serves the collected weather data using a simple http server. The weather data can be
obtained by doing a simple http GET request
"""
def __init__(self, repository, address, port):
super(RestWeatherProvider, self).__init__()
self.repository = repository
self.port = port
self.address = address
def run(self):
try:
log.info("Starting WeatherProvider")
# Create and start the http server
server = HTTPServer((self.address, self.port), self.request_handler)
server.serve_forever()
except Exception as e:
log.exception("WeatherProvider threw an exception: " + str(e))
def request_handler(self, *args):
HTTPRequestHandler(self.repository, *args)
class HTTPRequestHandler(BaseHTTPRequestHandler):
"""
HTTPRequestHandler for the RestWeatherProvider
"""
def __init__(self, repository, *args):
self.repository = repository
BaseHTTPRequestHandler.__init__(self, *args)
# noinspection PyPep8Naming
def do_GET(self):
"""
Handles the GET request and returns the weather in json format
"""
self.send_response(200)
self.send_header('Content-type', 'application/json;charset=utf-8')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
data = self.repository.get_all_data()
self.wfile.write(str(json.dumps(data)))
| 30.339286 | 114 | 0.669806 | 183 | 1,699 | 6.054645 | 0.469945 | 0.075812 | 0.048736 | 0.037906 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00387 | 0.239553 | 1,699 | 55 | 115 | 30.890909 | 0.853715 | 0.1907 | 0 | 0.064516 | 0 | 0 | 0.098784 | 0.043313 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16129 | false | 0 | 0.129032 | 0 | 0.354839 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5147aa629fe9091066db83d5ac4d5067803255bc | 359 | py | Python | src/dev.py | Inigoperez/Proyecto_Interfaces | 4a2b98cd499fabf3789301e9eb488297bebfcf2a | [
"MIT"
] | null | null | null | src/dev.py | Inigoperez/Proyecto_Interfaces | 4a2b98cd499fabf3789301e9eb488297bebfcf2a | [
"MIT"
] | null | null | null | src/dev.py | Inigoperez/Proyecto_Interfaces | 4a2b98cd499fabf3789301e9eb488297bebfcf2a | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
from dotenv import load_dotenv
load_dotenv()
from config import StageConfig
from util import route
from util.commands import commands
else:
from .config import StageConfig
from .util import route
from .util.commands import commands
app = route(config=StageConfig, name=__name__)
commands(app=app)
| 22.4375 | 46 | 0.732591 | 46 | 359 | 5.413043 | 0.304348 | 0.128514 | 0.128514 | 0.216867 | 0.610442 | 0.610442 | 0.610442 | 0.610442 | 0.610442 | 0.610442 | 0 | 0 | 0.206128 | 359 | 15 | 47 | 23.933333 | 0.873684 | 0 | 0 | 0 | 0 | 0 | 0.022346 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.583333 | 0 | 0.583333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 4 |
51493c53717606761bb6b13240fa86406df2fac3 | 985 | py | Python | cms/tests.py | royaleagle-dev/djangoblog | 624a17972772d54fc44d5a480fe42011c9b272fa | [
"Apache-2.0"
] | null | null | null | cms/tests.py | royaleagle-dev/djangoblog | 624a17972772d54fc44d5a480fe42011c9b272fa | [
"Apache-2.0"
] | null | null | null | cms/tests.py | royaleagle-dev/djangoblog | 624a17972772d54fc44d5a480fe42011c9b272fa | [
"Apache-2.0"
] | null | null | null | from django.test import TestCase
from . models import Post, Tag, Category
def createCategory(title):
return Category.objects.create(title = title)
def createTag(name):
return Tag.objects.create(name = name)
def createPost(title, body, author, postState):
tag = Tag.objects.create(name = 'NewT')
post = Post.objects.create(title=title, body=body, category=createCategory("New"), author=author, postState=postState)
post.tags.add(tag)
return post
class PostTestCase(TestCase):
def setUp(self):
createPost('New Post', 'This is a new Post', 'Ayotunde', 'p')
def testPostStr(self):
post = Post.objects.get(id=1)
self.assertEqual(post.__str__(), 'New Post')
class TagTestCase(TestCase):
def testTagStr(self):
tag = Tag.objects.create(name = 'New Tag')
self.assertEqual(tag.__str__(), 'New Tag')
class CategortTestCase(TestCase):
def testCategoryStr(self):
category = Category.objects.create(title = "New Cat")
self.assertEqual(category.__str__(), "New Cat")
| 29.848485 | 119 | 0.73401 | 131 | 985 | 5.427481 | 0.343511 | 0.109705 | 0.075949 | 0.084388 | 0.064698 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00116 | 0.124873 | 985 | 32 | 120 | 30.78125 | 0.823666 | 0 | 0 | 0 | 0 | 0 | 0.079188 | 0 | 0 | 0 | 0 | 0 | 0.12 | 1 | 0.28 | false | 0 | 0.08 | 0.08 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
514976db1636eff4f1c2e435911894cc18620e2c | 1,531 | py | Python | test/test_solver.py | akiFQC/pyqubo | 6a8033365562756328577eda42e255853e760488 | [
"Apache-2.0"
] | 1 | 2019-03-17T11:26:36.000Z | 2019-03-17T11:26:36.000Z | test/test_solver.py | akiFQC/pyqubo | 6a8033365562756328577eda42e255853e760488 | [
"Apache-2.0"
] | null | null | null | test/test_solver.py | akiFQC/pyqubo | 6a8033365562756328577eda42e255853e760488 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Recruit Communications Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pyqubo import solve_qubo, solve_ising, Spin
class TestSolver(unittest.TestCase):
@staticmethod
def create_number_partition_model():
s1, s2, s3 = Spin("s1"), Spin("s2"), Spin("s3")
H = (2 * s1 + 4 * s2 + 6 * s3) ** 2
return H.compile()
def test_solve_qubo(self):
model = TestSolver.create_number_partition_model()
qubo, offset = model.to_qubo()
solution = solve_qubo(qubo, num_reads=1, sweeps=10)
self.assertTrue(solution == {'s1': 0, 's2': 0, 's3': 1} or {'s1': 1, 's2': 1, 's3': 0})
def test_solve_ising(self):
model = TestSolver.create_number_partition_model()
linear, quad, offset = model.to_ising()
solution = solve_ising(linear, quad, num_reads=1, sweeps=10)
self.assertTrue(solution == {'s1': -1, 's2': -1, 's3': 1} or {'s1': 1, 's2': 1, 's3': -1})
if __name__ == '__main__':
unittest.main()
| 36.452381 | 98 | 0.665578 | 221 | 1,531 | 4.479638 | 0.475113 | 0.060606 | 0.063636 | 0.078788 | 0.205051 | 0.205051 | 0.2 | 0.109091 | 0.082828 | 0 | 0 | 0.042079 | 0.208361 | 1,531 | 41 | 99 | 37.341463 | 0.774752 | 0.371653 | 0 | 0.1 | 0 | 0 | 0.040084 | 0 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.15 | false | 0 | 0.1 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
514a79000a503d03dd773bda6a965fb3b20a7db7 | 737 | py | Python | solution/viz/clean_words.py | GabrielMissael/solution | aff33732d04efedb60c1ebc70fd5108ae5cc558e | [
"MIT"
] | null | null | null | solution/viz/clean_words.py | GabrielMissael/solution | aff33732d04efedb60c1ebc70fd5108ae5cc558e | [
"MIT"
] | null | null | null | solution/viz/clean_words.py | GabrielMissael/solution | aff33732d04efedb60c1ebc70fd5108ae5cc558e | [
"MIT"
] | null | null | null | import re
import unicodedata
import nltk
from nltk.corpus import stopwords
import pandas as pd
def clean_words(text_DataFrame:pd.DataFrame):
"""
A simple function to clean up the data. All the words that
are not designated as a stop word is then lemmatized after
encoding and basic regex parsing are performed.
"""
text = ''.join(str(text_DataFrame['text'].tolist()))
wnl = nltk.stem.WordNetLemmatizer()
stopwords = nltk.corpus.stopwords.words('spanish')
text = (unicodedata.normalize('NFKD', text)
.encode('ascii', 'ignore')
.decode('utf-8', 'ignore')
.lower())
words = re.sub(r'[^\w\s]', '', text).split()
return [wnl.lemmatize(word) for word in words if word not in stopwords]
| 33.5 | 75 | 0.683853 | 103 | 737 | 4.864078 | 0.631068 | 0.03992 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001672 | 0.188602 | 737 | 21 | 76 | 35.095238 | 0.83612 | 0.223881 | 0 | 0 | 0 | 0 | 0.080292 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.333333 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
514a92812de5f63e1e53a1ba432f40d277ebdbba | 780 | py | Python | imagemodel/common/predictor.py | tenkeyless/imagemodel | 360c672117b5ccb1bfb3d6771b0720fa1a1f513c | [
"MIT"
] | null | null | null | imagemodel/common/predictor.py | tenkeyless/imagemodel | 360c672117b5ccb1bfb3d6771b0720fa1a1f513c | [
"MIT"
] | null | null | null | imagemodel/common/predictor.py | tenkeyless/imagemodel | 360c672117b5ccb1bfb3d6771b0720fa1a1f513c | [
"MIT"
] | null | null | null | from typing import Optional
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.python.distribute.tpu_strategy import TPUStrategy
class Predictor:
def __init__(
self,
model: Model,
predict_dataset: tf.data.Dataset,
predict_dataset_description: str,
predict_batch_size: int,
strategy_optional: Optional[TPUStrategy] = None):
self.model: Model = model
self.predict_dataset: tf.data.Dataset = predict_dataset
self.predict_dataset_description: str = predict_dataset_description
self.predict_batch_size: int = predict_batch_size
self.strategy_optional: Optional[TPUStrategy] = strategy_optional
def predict(self):
pass
| 32.5 | 75 | 0.694872 | 86 | 780 | 6.034884 | 0.348837 | 0.16185 | 0.144509 | 0.077071 | 0.265896 | 0.157996 | 0.157996 | 0 | 0 | 0 | 0 | 0 | 0.246154 | 780 | 23 | 76 | 33.913043 | 0.882653 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0.052632 | 0.210526 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
514b2ccf532fafc08943123f355c409988b89713 | 6,778 | py | Python | reporter.py | Danielto1404/ssat-msp-make-transfer | 36731ab79ba517d6c66516054ebd6179674a953e | [
"MIT"
] | null | null | null | reporter.py | Danielto1404/ssat-msp-make-transfer | 36731ab79ba517d6c66516054ebd6179674a953e | [
"MIT"
] | null | null | null | reporter.py | Danielto1404/ssat-msp-make-transfer | 36731ab79ba517d6c66516054ebd6179674a953e | [
"MIT"
] | null | null | null | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Reporter class."""
import logging
import os
import time
from datetime import datetime
import mindspore.ops as ops
from mindspore.train.serialization import save_checkpoint
from tools import save_image
class Reporter(logging.Logger):
"""
This class includes several functions that can save images/checkpoints and print/save logging information.
Args:
args (class): Option class.
"""
def __init__(self, args):
super(Reporter, self).__init__("SSAT")
self.log_dir = os.path.join(args.outputs_dir, 'log')
self.imgs_dir = os.path.join(args.outputs_dir, "imgs")
self.ckpts_dir = os.path.join(args.outputs_dir, "ckpt")
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir, exist_ok=True)
if not os.path.exists(self.imgs_dir):
os.makedirs(self.imgs_dir, exist_ok=True)
if not os.path.exists(self.ckpts_dir):
os.makedirs(self.ckpts_dir, exist_ok=True)
self.save_checkpoint_epochs = args.save_checkpoint_epochs
self.save_imgs = args.save_imgs
# console handler
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
self.addHandler(console)
# file handler
log_name = datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S') + '_.log'
self.log_fn = os.path.join(self.log_dir, log_name)
fh = logging.FileHandler(self.log_fn)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
self.addHandler(fh)
self.save_args(args)
self.step = 0
self.epoch = 0
self.dataset_size = args.dataset_size // args.device_num
self.device_num = args.device_num
self.print_iter = args.print_iter
self.G_loss = []
self.D_loss = []
def info(self, msg, *args, **kwargs):
if self.isEnabledFor(logging.INFO):
self._log(logging.INFO, msg, args, **kwargs)
def save_args(self, args):
self.info('Args:')
args_dict = vars(args)
for key in args_dict.keys():
self.info('--> %s: %s', key, args_dict[key])
self.info('')
def epoch_start(self):
self.step_start_time = time.time()
self.epoch_start_time = time.time()
self.step = 0
self.epoch += 1
self.G_loss = []
self.D_loss = []
def step_end(self, res_G, res_D):
"""print log when step end."""
self.step += 1
loss_D = float(res_D.asnumpy())
loss_G = float(res_G.asnumpy())
self.G_loss.append(loss_G)
self.D_loss.append(loss_D)
if self.step % self.print_iter == 0:
step_cost = (time.time() - self.step_start_time) * 1000 / self.print_iter
losses = "G_loss: {:.2f}, D_loss:{:.2f}".format(loss_G, loss_D)
self.info("Epoch[{}] [{}/{}] step cost: {:.2f} ms, {}".format(
self.epoch, self.step, self.dataset_size, step_cost, losses))
self.step_start_time = time.time()
def epoch_end(self, net):
"""print log and save checkpoints when epoch end."""
epoch_cost = (time.time() - self.epoch_start_time) * 1000
per_step_time = epoch_cost / self.dataset_size
mean_loss_G = sum(self.G_loss) / self.dataset_size
mean_loss_D = sum(self.D_loss) / self.dataset_size
self.info("Epoch [{}] total cost: {:.2f} ms, per step: {:.2f} ms, G_loss: {:.2f}, D_loss: {:.2f}".format(
self.epoch, epoch_cost, per_step_time, mean_loss_G, mean_loss_D))
if self.epoch % self.save_checkpoint_epochs == 0:
save_checkpoint(net.G.gen, os.path.join(self.ckpts_dir, f"SSAT_G_{self.epoch}.ckpt"))
# save_checkpoint(net.G.dis_non_makeup, os.path.join(self.ckpts_dir, f"SSAT_D_non_makeup_{self.epoch}.ckpt"))
# save_checkpoint(net.G.dis_makeup, os.path.join(self.ckpts_dir, f"SSAT_D_makeup_{self.epoch}.ckpt"))
def visualizer(self, non_makeup, makeup, mapX, mapY, z_transfer, z_removal, transfer_g, removal_g,
z_rec_non_makeup, z_rec_makeup, z_cycle_non_makeup, z_cycle_makeup):
if self.save_imgs and self.step % self.dataset_size == 0:
_, C, H, W = non_makeup.shape
concat_2 = ops.Concat(axis=2)
concat_3 = ops.Concat(axis=3)
bmm = ops.BatchMatMul()
nearest_256 = ops.ResizeNearestNeighbor((H, W))
nearest_64 = ops.ResizeNearestNeighbor((H // 4, W // 4))
non_makeup_down = nearest_64(non_makeup)
n, c, h, w = non_makeup_down.shape
non_makeup_down_warp = bmm(non_makeup_down.reshape(n, c, h * w), mapY) # n*HW*1
non_makeup_down_warp = non_makeup_down_warp.reshape(n, c, h, w)
non_makeup_warp = nearest_256(non_makeup_down_warp)
makeup_down = nearest_64(makeup)
n, c, h, w = makeup_down.shape
makeup_down_warp = bmm(makeup_down.reshape(n, c, h * w), mapX) # n*HW*1
makeup_down_warp = makeup_down_warp.reshape(n, c, h, w)
makeup_warp = nearest_256(makeup_down_warp)
row_1 = concat_3((non_makeup, makeup_warp, transfer_g, z_transfer, z_rec_non_makeup, z_cycle_non_makeup))
row_2 = concat_3((makeup, non_makeup_warp, removal_g, z_removal, z_rec_makeup, z_cycle_makeup))
result = concat_2((row_1, row_2))
save_image(result, os.path.join(self.imgs_dir, f"{self.epoch}_result.jpg"))
def start_predict(self, direction):
self.predict_start_time = time.time()
self.direction = direction
self.info('==========start predict %s===============', self.direction)
def end_predict(self):
cost = (time.time() - self.predict_start_time) * 1000
per_step_cost = cost / self.dataset_size
self.info('total {} imgs cost {:.2f} ms, per img cost {:.2f}'.format(self.dataset_size, cost, per_step_cost))
self.info('==========end predict %s===============\n', self.direction)
| 43.729032 | 121 | 0.627176 | 961 | 6,778 | 4.182102 | 0.213319 | 0.042548 | 0.019905 | 0.005972 | 0.253546 | 0.155511 | 0.121423 | 0.070416 | 0.034337 | 0.034337 | 0 | 0.012932 | 0.235615 | 6,778 | 154 | 122 | 44.012987 | 0.762787 | 0.165683 | 0 | 0.073395 | 0 | 0.009174 | 0.071824 | 0.012328 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082569 | false | 0 | 0.06422 | 0 | 0.155963 | 0.027523 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
514d5df2cbf6b921babea277704186e66c61af98 | 3,700 | py | Python | Algo_Ds_Notes-master/Algo_Ds_Notes-master/Dijkstra_Algorithm/Dijkstra_Algorithm.py | rajatenzyme/Coding-Journey- | 65a0570153b7e3393d78352e78fb2111223049f3 | [
"MIT"
] | null | null | null | Algo_Ds_Notes-master/Algo_Ds_Notes-master/Dijkstra_Algorithm/Dijkstra_Algorithm.py | rajatenzyme/Coding-Journey- | 65a0570153b7e3393d78352e78fb2111223049f3 | [
"MIT"
] | null | null | null | Algo_Ds_Notes-master/Algo_Ds_Notes-master/Dijkstra_Algorithm/Dijkstra_Algorithm.py | rajatenzyme/Coding-Journey- | 65a0570153b7e3393d78352e78fb2111223049f3 | [
"MIT"
] | null | null | null | '''
Dijkstra's algorithm for weighted undirected graph
'''
from collections import deque
class Dijkstra:
def __init__(self, graph):
self.vertex_visited = list()
self.distance = {}
self.graph = graph
self.source = None
self.queue_size = 0
self.min_queue = deque()
def initialise(self):
self.vertex_visited = list()
self.distance = {}
#Initialize vertex cost
for k,v in self.graph.iteritems():
if k == self.source:
self.distance.update({k:0})
else:
self.distance.update({k:float('inf')})
#Store source vetex and cost
for k,v in self.graph[self.source].iteritems():
self.priorityQueue({k:v})
def priorityQueue(self,weight):
self.min_queue.append(weight)
self.queue_size = self.queue_size + 1
self.heapify(self.queue_size)
def heapify(self,i):
while i/2 > 0:
if self.min_queue[i].values() <= self.min_queue[i/2].values():
temp = self.min_queue[i]
self.min_queue[i] = self.min_queue[i/2]
self.min_queue[i/2] = temp
i = i/2
def del_min(self):
popped = self.min_queue[1]
self.min_queue[1] = self.min_queue[self.queue_size] #Assign last element to first
self.queue_size = self.queue_size - 1;
self.min_queue.pop()
self.re_heapify(1)
return popped
def re_heapify(self, i):
while 2 * i <= self.queue_size:
mc = self.min_node(i)
if self.min_queue[mc].values() < self.min_queue[i].values():
temp = self.min_queue[i]
self.min_queue[i] = self.min_queue[mc]
self.min_queue[mc] = temp
i = mc
def min_node(self, i):
if (2 * i + 1) > self.queue_size:
return 2 * i;
else:
if self.min_queue[2 * i].values() < self.min_queue[2 * i + 1].values():
return 2 * i
else:
return 2 * i +1
def minDistance(self, source):
self.source = source
self.min_queue.append({self.source:0}) #Insert source vertex into pq and make its distance as 0.
self.initialise() # Reset values for new source
while len(self.min_queue) > 1:
vertex = self.del_min() #Pop out minimum distance vertex from minimum priority queue
if vertex not in self.vertex_visited:
self.vertex_visited.append(vertex)
for parentNode, parentCost in vertex.iteritems():
for adjVertex, adjCost in self.graph[parentNode].iteritems():
if adjVertex not in self.distance:
self.distance.update({adjVertex:adjCost})
else:
#Compare
if self.distance[adjVertex] > (self.distance[parentNode] + adjCost):
self.distance[adjVertex] = self.distance[parentNode] + adjCost
self.priorityQueue({adjVertex:adjCost}) #Add to minimum priority queue
return self.distance
#Graph stored as adjacent list
g = { 'A': {'C': 9, 'B': 7, 'F': 14},
'B': {'A': 7, 'C': 10, 'D': 15},
'C': {'A': 9, 'B': 10, 'D': 11, 'F': 2},
'D': {'E': 6, 'B': 15, 'C': 11},
'E': {'F': 9, 'D': 6},
'F': {'C': 2, 'A': 14, 'E': 9}
}
dijkstra = Dijkstra(g)
print dijkstra.minDistance('A')
print dijkstra.minDistance('E')
'''
Output
-------
{'A': 0, 'C': 9, 'B': 7, 'E': 20, 'D': 20, 'F': 11}
{'A': 26, 'C': 17, 'B': 21, 'E': 0, 'D': 6, 'F': 9}
'''
| 33.333333 | 104 | 0.526216 | 481 | 3,700 | 3.954262 | 0.216216 | 0.084648 | 0.138801 | 0.061514 | 0.267087 | 0.213985 | 0.179285 | 0.138276 | 0.050473 | 0.050473 | 0 | 0.028386 | 0.333514 | 3,700 | 110 | 105 | 33.636364 | 0.742903 | 0.077027 | 0 | 0.125 | 0 | 0 | 0.009006 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.0125 | null | null | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
514d67f48ef86730369a1f20030702bb95bc30ac | 33,053 | py | Python | ServerFiles/FrontFollowingNetwork.py | Forence1999/SmartWalker | 635410bf44234eead9fd1e2fe226eb8eafa9d27d | [
"MIT"
] | 2 | 2021-11-13T14:16:06.000Z | 2022-01-12T06:07:32.000Z | ServerFiles/FrontFollowingNetwork.py | Forence1999/SmartWalker | 635410bf44234eead9fd1e2fe226eb8eafa9d27d | [
"MIT"
] | null | null | null | ServerFiles/FrontFollowingNetwork.py | Forence1999/SmartWalker | 635410bf44234eead9fd1e2fe226eb8eafa9d27d | [
"MIT"
] | 3 | 2021-08-30T04:40:39.000Z | 2022-01-09T11:34:04.000Z | #-*- coding: UTF-8 -*-
import sys,os
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
sys.path.append(father_path)
import tensorflow as tf
from tensorflow import keras
import numpy as np
import os
from typing import Tuple
import random
from Network import resnet
class Conv_part(keras.Model):
def __init__(self, filter_unit: int = 100):
super().__init__()
self.layer1 = keras.layers.Conv2D(filters=filter_unit, kernel_size=3, strides=1, activation="relu",
padding="SAME")
self.layer1_bn = keras.layers.BatchNormalization()
self.layer2 = keras.layers.Conv2D(filters=filter_unit, kernel_size=3, strides=1, activation="relu",
padding="SAME")
self.layer2_bn = keras.layers.BatchNormalization()
self.layer3 = keras.layers.Conv2D(filters=filter_unit, kernel_size=3, strides=1, activation="relu",
padding="SAME")
self.layer3_bn = keras.layers.Conv2D(filters=filter_unit, kernel_size=3, strides=1, activation="relu",
padding="SAME")
self.layer4 = keras.layers.MaxPool2D(pool_size=3, strides=2)
def call(self, inputs):
y = self.layer1(inputs)
y = self.layer1_bn(y)
y = self.layer2(y)
y = self.layer2_bn(y)
y = self.layer3(y)
y = self.layer3_bn(y)
y = self.layer4(y)
return y
class Skin_part(keras.Model):
def __init__(self, input_shape):
super().__init__()
self.softskin_width = 128
self.layer_r = keras.layers.Reshape(input_shape=(input_shape), target_shape=(1, self.softskin_width))
self.layer_1 = keras.layers.Dense(self.dense_unit, activation="relu")
self.layer_2 = keras.layers.Dense(self.dense_unit, activation="relu")
def call(self, input):
y = self.layer_r(input)
y = self.layer_1(y)
y = self.layer_2(y)
return y
class FrontFollowing_Model(object):
def __init__(self, win_width: int = 10, tendency_CNN_unit:int = 10, current_CNN_unit:int = 10,
is_skin_input: bool = False, is_multiple_output: bool = False,show:bool=False):
super().__init__()
"""data shape part"""
self.win_width = win_width
self.ir_data_width = 768
self.softskin_width = 32
self.leg_width = 4
"""network parameter"""
self.dense_unit = 10
self.CNN_filter_unit_tendency = 20
self.CNN_filter_unit_current = 20
self.show_summary = show
self.is_multiple_output = is_multiple_output
self.is_skin_input = is_skin_input
"""network building"""
self.tendency_ir_part = Conv_part(self.CNN_filter_unit_tendency)
self.current_ir_part = Conv_part(self.CNN_filter_unit_current)
# self.tendency_ir_part = resnet.get_model("resnet34")
# self.current_ir_part = resnet.get_model("resnet34")
# self.skin_part = Skin_part()
self.tendency_net = self.create_tendency_net()
self.current_net = self.create_current_net()
self.combine_net = self.creat_combine_net()
def call(self, inputs: np.ndarray) -> tf.Tensor:
return self.model(inputs)
def skin_dense_layers(self, inputs: tf.Tensor, input_shape: Tuple) -> tf.Tensor:
y = keras.layers.Reshape(input_shape=(input_shape), target_shape=(1, self.softskin_width))(inputs)
y = keras.layers.Dense(self.dense_unit, activation="relu")(y)
y = keras.layers.Dense(self.dense_unit, activation="relu")(y)
return y
def feature_abstraction(self, ir_data: tf.Tensor, skin_data: tf.Tensor, leg_data: tf.Tensor) -> tf.Tensor:
if self.is_skin_input:
"""skin data as part of input"""
data_num = int(ir_data.shape[1]/self.ir_data_width)
for i in range(data_num):
# split the tensor buffer into frames
[ir_one_frame, ir_data] = tf.split(ir_data,
[self.ir_data_width, self.ir_data_width * (data_num - 1 - i)],
axis=1)
[skin_one_frame, skin_data] = tf.split(skin_data, [self.softskin_width,
self.softskin_width * (data_num - 1 - i)],
axis=1)
# ir image feature abstraction
output_ir = keras.layers.Reshape(input_shape=(self.ir_data_width, 1), target_shape=(32, 24, 1))(
ir_one_frame)
output_ir = self.tendency_ir_part(output_ir)
output_ir = keras.layers.Flatten()(output_ir)
# soft skin feature abstraction
skin_shape = skin_one_frame.shape
output_skin = keras.layers.Flatten()(self.skin_dense_layers(skin_one_frame, skin_shape))
output_leg = keras.layers.Flatten()(leg_data)
# feature vector concatenate
if i == 0:
output_feature = keras.layers.concatenate([output_ir, output_skin, output_leg])
else:
output_feature = keras.layers.concatenate([output_feature, output_ir, output_skin, output_leg])
return output_feature
else:
"""skin data is not included in the input"""
ir_data_num = int(ir_data.shape[1]/self.ir_data_width)
for i in range(ir_data_num):
# split the tensor buffer into frames
[ir_one_frame, ir_data] = tf.split(ir_data,
[self.ir_data_width, self.ir_data_width * (ir_data_num - 1 - i)],
axis=1)
# ir image feature abstraction
output_ir = keras.layers.Reshape(input_shape=(self.ir_data_width, 1), target_shape=(32, 24, 1))(
ir_one_frame)
output_ir = self.tendency_ir_part(output_ir)
output_ir = keras.layers.Flatten()(output_ir)
# leg feature just shortcut
output_leg = keras.layers.Flatten()(leg_data)
if i == 0:
output_feature = keras.layers.concatenate([output_ir, output_leg])
else:
output_feature = keras.layers.concatenate([output_feature, output_ir, output_leg])
return output_feature
def create_tendency_net(self) -> tf.keras.Model:
win_width = self.win_width - 1
if self.is_skin_input:
input_all = keras.Input(shape=((self.ir_data_width + self.softskin_width + self.leg_width) * win_width, 1))
"""Split the input data into two parts:ir data and softskin data"""
[input_ir, input_softskin, input_leg] = tf.split(input_all, [self.ir_data_width * win_width,
self.softskin_width * win_width,
self.leg_width * win_width],
axis=1)
output_combine = self.feature_abstraction(ir_data=input_ir, skin_data=input_softskin, leg_data=input_leg)
else:
input_all = keras.Input(shape=((self.ir_data_width + self.leg_width) * win_width, 1))
[input_ir, input_leg] = tf.split(input_all, [self.ir_data_width * win_width,
self.leg_width * win_width],
axis=1)
output_combine = self.feature_abstraction(ir_data=input_ir, leg_data=input_leg, skin_data=input_leg)
output_reshape = keras.layers.Reshape(input_shape=(output_combine.shape),
target_shape=(win_width, int(output_combine.shape[1] / win_width)))(
output_combine)
# LSTM part
output_tendency = keras.layers.LSTM(64, activation='tanh',kernel_regularizer=keras.regularizers.l2(0.001))(output_reshape)
output_tendency = keras.layers.Dense(128, activation='relu',kernel_regularizer=keras.regularizers.l2(0.001))(output_tendency)
output_tendency = keras.layers.Dropout(0.5)(output_tendency)
output_tendency = keras.layers.Dense(256, activation='relu',kernel_regularizer=keras.regularizers.l2(0.001))(output_tendency)
output_tendency = keras.layers.Dropout(0.5)(output_tendency)
output_tendency = keras.layers.Dense(64, activation='relu',kernel_regularizer=keras.regularizers.l2(0.001))(output_tendency)
output_final = keras.layers.Dropout(0.5)(output_tendency)
if not self.is_multiple_output:
output_final = keras.layers.Dense(6, activation='softmax')(output_final)
model = keras.Model(inputs=input_all, outputs=output_final)
if self.show_summary:
model.summary()
return model
else:
actor = keras.layers.Dense(6, activation='relu')(output_final)
critic = keras.layers.Dense(1)(output_final)
model = keras.Model(inputs=input_all, outputs=[actor, critic])
if self.show_summary:
model.summary()
model.compile(optimizer='RMSprop',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
def create_current_net(self) -> tf.keras.Model:
input_figure = keras.Input(shape=(self.ir_data_width, 1))
output_ir = keras.layers.Reshape(input_shape=(self.ir_data_width, 1), target_shape=(32, 24, 1))(input_figure)
output_ir = self.current_ir_part(output_ir)
output_ir = keras.layers.Flatten()(output_ir)
output_ir = keras.layers.Dense(128, activation='relu',kernel_regularizer=keras.regularizers.l2(0.001))(output_ir)
output_ir = keras.layers.Dropout(0.5)(output_ir)
output_ir = keras.layers.Dense(256, activation='relu',kernel_regularizer=keras.regularizers.l2(0.001))(output_ir)
output_ir = keras.layers.Dropout(0.5)(output_ir)
output_ir = keras.layers.Dense(64, activation='relu',kernel_regularizer=keras.regularizers.l2(0.001))(output_ir)
output_ir = keras.layers.Dropout(0.5)(output_ir)
if not self.is_multiple_output:
output_final = keras.layers.Dense(6, activation='softmax')(output_ir)
model = keras.Model(inputs=input_figure, outputs=output_final)
if self.show_summary:
model.summary()
return model
else:
actor = keras.layers.Dense(6, activation='relu')(output_ir)
critic = keras.layers.Dense(1)(output_ir)
model = keras.Model(inputs=input_figure, outputs=[actor, critic])
if self.show_summary:
model.summary()
model.compile(optimizer='RMSprop',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
def creat_combine_net(self) -> tf.keras.Model:
input_all = keras.Input(shape=((self.ir_data_width + self.leg_width) * self.win_width, 1))
[input_ir, input_leg] = tf.split(input_all, [self.ir_data_width * self.win_width,
self.leg_width * self.win_width],
axis=1)
[input_ir_tendency, input_ir_current] = tf.split(input_ir,
[self.ir_data_width * (self.win_width - 1),
self.ir_data_width],
axis=1)
[input_leg_tendency, input_leg_current] = tf.split(input_leg,
[self.leg_width * (self.win_width - 1),
self.leg_width],
axis=1)
# tendency part
tendency_feture_combine = self.feature_abstraction(ir_data=input_ir_tendency, leg_data=input_leg_tendency,
skin_data=input_leg_tendency)
tendency_feature = keras.layers.Reshape(input_shape=(tendency_feture_combine.shape),
target_shape=(self.win_width-1,
int(tendency_feture_combine.shape[1] / (self.win_width-1))))(
tendency_feture_combine)
output_tendency = keras.layers.LSTM(64, activation='tanh',kernel_regularizer=keras.regularizers.l2(0.001))(tendency_feature)
output_tendency = keras.layers.Dense(128, activation='relu',kernel_regularizer=keras.regularizers.l2(0.001))(output_tendency)
output_tendency = keras.layers.Dropout(0.5)(output_tendency)
output_tendency = keras.layers.Dense(256, activation='relu',kernel_regularizer=keras.regularizers.l2(0.001))(output_tendency)
output_tendency = keras.layers.Dropout(0.5)(output_tendency)
output_tendency = keras.layers.Dense(64, activation='relu',kernel_regularizer=keras.regularizers.l2(0.001))(output_tendency)
output_tendency = keras.layers.Dropout(0.5)(output_tendency)
# current part
output_current = keras.layers.Reshape(input_shape=(self.ir_data_width, 1), target_shape=(32, 24, 1))(
input_ir_current)
output_current = self.current_ir_part(output_current)
output_current = keras.layers.Flatten()(output_current)
output_current = keras.layers.Dense(128, activation='relu',kernel_regularizer=keras.regularizers.l2(0.001))(output_current)
output_current = keras.layers.Dropout(0.5)(output_current)
output_current = keras.layers.Dense(256, activation='relu',kernel_regularizer=keras.regularizers.l2(0.001))(output_current)
output_current = keras.layers.Dropout(0.5)(output_current)
output_current = keras.layers.Dense(64, activation='relu',kernel_regularizer=keras.regularizers.l2(0.001))(output_current)
output_current = keras.layers.Dropout(0.5)(output_current)
# print(output_tendency.shape,output_current.shape)
Lambda = 0.1
output_current = tf.math.multiply(output_current,Lambda)
output_tendency = tf.math.multiply(output_tendency,1-Lambda)
output_final = tf.add(output_current, output_tendency)
output_final = keras.layers.Dense(6, activation='softmax')(output_final)
model = keras.Model(inputs=input_all, outputs=output_final)
if self.show_summary:
model.summary()
return model
def setup_seed(seed):
tf.random.set_seed(seed)
random.seed(seed) # 为python设置随机种子
np.random.seed(seed) # 为numpy设置随机种子
tf.random.set_seed(seed) # tf cpu fix seed
if __name__ == "__main__":
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
FFL_Model = FrontFollowing_Model(win_width=10)
train_current = False
setup_seed(20)
def training(train_net_name:str="t", max_epochs:int=1000):
if train_net_name == "c":
"""data loading"""
current_os_data_path = "/data/cyzhao/os_data.txt"
current_os_data = np.loadtxt(current_os_data_path)
current_os_label_path = "/data/cyzhao/os_label.txt"
current_os_label = np.loadtxt(current_os_label_path)
current_os_label = current_os_label.reshape((current_os_label.shape[0],1))
current_s_data_path = "/data/cyzhao/s_data.txt"
current_s_data = np.loadtxt(current_s_data_path)
current_s_label_path = "/data/cyzhao/s_label.txt"
current_s_label = np.loadtxt(current_s_label_path)
current_s_label = current_s_label.reshape((current_s_label.shape[0],1))
test_data_path = "/data/cyzhao/test_o_data.txt"
test_data = np.loadtxt(test_data_path)
test_label_path = "/data/cyzhao/test_o_label.txt"
test_label = np.loadtxt(test_label_path)
test_label = test_label.reshape((test_label.shape[0],1))
optimizer = tf.keras.optimizers.Adam(learning_rate=0.000001)
FFL_Model.current_net.compile(optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
FFL_Model.current_net.fit(current_s_data,current_s_label,batch_size=128,epochs=20,verbose=1)
FFL_Model.current_net.save_weights('./checkpoints_s_current/Current')
FFL_Model.current_net.evaluate(test_data,test_label,verbose=1)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.0000001)
FFL_Model.current_net.compile(optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
epochs_num = 0
max_test_acc = 0
max_acc_epoch = 0
file_curve_path = "./current_curve.txt"
file_curve = open(file_curve_path,'w')
while True:
current_os_dataset = np.concatenate([current_os_label, current_os_data], axis=1)
np.random.shuffle(current_os_dataset)
current_os_label = current_os_dataset[:, 0]
current_os_data = current_os_dataset[:, 1:current_os_dataset.shape[1]]
current_os_label = current_os_label.reshape((current_os_label.shape[0], 1))
print("epoch now: %d" % epochs_num)
if epochs_num >= max_epochs:
break
else:
history = FFL_Model.current_net.fit(current_os_data, current_os_label, batch_size=64, epochs=1, validation_data=(test_data,test_label), verbose=1)
epochs_num += 1
test_loss = history.history['val_loss'][0]
test_acc = history.history['val_accuracy'][0]
train_loss = history.history['loss'][0]
train_acc = history.history['accuracy'][0]
file_curve.write(str([train_loss, train_acc, test_loss, test_acc]) + "\n")
file_curve.flush()
if test_acc >= max_test_acc:
max_test_acc = test_acc
max_acc_epoch = epochs_num
FFL_Model.current_net.save_weights('./checkpoints_os_current/Current')
if test_acc > 0.8:
break
print("The maximum test accuracy is:%.3f, at epochs:%d"%(max_test_acc,max_acc_epoch))
file_curve.close()
elif train_net_name == "t":
tendency_data_path = "/data/cyzhao/t_data.txt"
tendency_data = np.loadtxt(tendency_data_path)
frames = int(tendency_data.shape[1]/(768+4))
ir_data = tendency_data[:,0:int((frames-1)*768)]
leg_data = tendency_data[:,int(frames*768):int(frames*768+(frames-1)*4)]
tendency_data = np.concatenate([ir_data,leg_data],axis=1)
tendency_label_path = "/data/cyzhao/t_label.txt"
tendency_label = np.loadtxt(tendency_label_path)
tendency_label = tendency_label.reshape((tendency_label.shape[0],1))
"""train data and test data are from different dataset"""
test_data_path = "/data/cyzhao/test_t_data.txt"
test_data = np.loadtxt(test_data_path)
test_label_path = "/data/cyzhao/test_t_label.txt"
test_label = np.loadtxt(test_label_path)
ir_data = test_data[:,0:int((frames-1)*768)]
leg_data = test_data[:,int(frames*768):int(frames*768+(frames-1)*4)]
test_data = np.concatenate([ir_data, leg_data], axis=1)
test_label = test_label.reshape((test_label.shape[0], 1))
test_data = np.reshape(test_data, (test_data.shape[0], test_data.shape[1], 1))
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00001)
FFL_Model.tendency_net.compile(optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
epochs_num = 0
max_test_acc = 0
max_acc_epoch = 0
file_curve_path = "./tendency_curve.txt"
file_curve = open(file_curve_path,'w')
while True:
tendency_dataset = np.concatenate([tendency_label, tendency_data], axis=1)
np.random.shuffle(tendency_dataset)
tendency_label = tendency_dataset[:, 0]
tendency_data = tendency_dataset[:, 1:tendency_dataset.shape[1]]
tendency_label = tendency_label.reshape((tendency_label.shape[0], 1))
if epochs_num >= max_epochs:
break
print("epoch now: %d" % epochs_num)
# FFL_Model.tendency_net.fit(train_data, train_label, batch_size=128, epochs=1,validation_data=(validation_data,validation_label),verbose=1)
history = FFL_Model.tendency_net.fit(tendency_data, tendency_label, validation_data=(test_data,test_label), batch_size=64, epochs=1, verbose=1)
test_loss = history.history['val_loss'][0]
test_acc = history.history['val_accuracy'][0]
train_loss = history.history['loss'][0]
train_acc = history.history['accuracy'][0]
file_curve.write(str([train_loss,train_acc,test_loss,test_acc])+"\n")
file_curve.flush()
epochs_num += 1
if test_acc >= max_test_acc:
FFL_Model.tendency_net.save_weights('./checkpoints_tendency/Tendency')
max_test_acc = test_acc
max_acc_epoch = epochs_num
if test_acc >= 0.88:
break
print("The maximum test accuracy is:%.3f, at epochs:%d" % (max_test_acc, max_acc_epoch))
file_curve.close()
elif train_net_name == "a":
all_data_path = "/data/cyzhao/t_data.txt"
all_data = np.loadtxt(all_data_path)
all_label_path = "/data/cyzhao/t_label.txt"
all_label = np.loadtxt(all_label_path)
all_label = all_label.reshape((all_label.shape[0], 1))
print(all_data.shape)
"""train data and test data are from different dataset"""
test_data_path = "/data/cyzhao/test_t_data.txt"
test_data = np.loadtxt(test_data_path)
test_label_path = "/data/cyzhao/test_t_label.txt"
test_label = np.loadtxt(test_label_path)
test_label = test_label.reshape((test_label.shape[0], 1))
test_data = np.reshape(test_data, (test_data.shape[0], test_data.shape[1], 1))
print(test_data.shape)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00005)
FFL_Model.combine_net.compile(optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
epochs_num = 0
max_test_acc = 0
max_acc_epoch = 0
file_curve_path = "./combine_curve.txt"
file_curve = open(file_curve_path, 'w')
while True:
all_dataset = np.concatenate([all_label, all_data], axis=1)
np.random.shuffle(all_dataset)
all_label = all_dataset[:, 0]
all_data = all_dataset[:, 1:all_dataset.shape[1]]
all_label = all_label.reshape((all_label.shape[0], 1))
if epochs_num >= max_epochs:
break
print("epoch now: %d" % epochs_num)
# FFL_Model.tendency_net.fit(train_data, train_label, batch_size=128, epochs=1,validation_data=(validation_data,validation_label),verbose=1)
history = FFL_Model.combine_net.fit(all_data, all_label,
validation_data=(test_data, test_label), batch_size=100, epochs=1,
verbose=1)
test_loss = history.history['val_loss'][0]
test_acc = history.history['val_accuracy'][0]
train_loss = history.history['loss'][0]
train_acc = history.history['accuracy'][0]
file_curve.write(str([train_loss, train_acc, test_loss, test_acc]) + "\n")
file_curve.flush()
epochs_num += 1
if test_acc >= max_test_acc:
FFL_Model.combine_net.save_weights('./checkpoints_combine/Combine')
max_test_acc = test_acc
max_acc_epoch = epochs_num
if test_acc >= 0.88:
break
print("The maximum test accuracy is:%.3f, at epochs:%d" % (max_test_acc, max_acc_epoch))
if epochs_num == 30:
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00001)
FFL_Model.combine_net.compile(optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
file_curve.close()
elif train_net_name == "three":
all_data_path = "/data/cyzhao/t_data.txt"
all_data = np.loadtxt(all_data_path)
all_label_path = "/data/cyzhao/t_label.txt"
all_label = np.loadtxt(all_label_path)
all_label = all_label.reshape((all_label.shape[0], 1))
# tendency part
frames = int(all_data.shape[1] / (768 + 4))
ir_data = all_data[:, 0:int((frames - 1) * 768)]
leg_data = all_data[:, int(frames * 768):int(frames * 768 + (frames - 1) * 4)]
tendency_data = np.concatenate([ir_data, leg_data], axis=1)
# current part
ir_data = all_data[:, int((frames - 1) * 768):int(frames*768)]
# leg_data = all_data[:, int(frames * 768 + (frames - 1) * 4):int(frames*(768+4))]
current_data = ir_data
# label the same
all_label_path = "/data/cyzhao/t_label.txt"
all_label = np.loadtxt(all_label_path)
all_label = all_label.reshape((all_label.shape[0], 1))
"""train data and test data are from different dataset"""
test_data_path = "/data/cyzhao/test_t_data.txt"
test_data = np.loadtxt(test_data_path)
ir_data = test_data[:, 0:int((frames - 1) * 768)]
leg_data = test_data[:, int(frames * 768):int(frames * 768 + (frames - 1) * 4)]
test_tendency_data = np.concatenate([ir_data, leg_data], axis=1)
ir_data = test_data[:, int((frames - 1) * 768):int(frames*768)]
# leg_data = test_data[:, int(frames * 768 + (frames - 1) * 4):int(frames*(768+4))]
test_current_data = ir_data
test_label_path = "/data/cyzhao/test_t_label.txt"
test_label = np.loadtxt(test_label_path)
test_label = test_label.reshape((test_label.shape[0], 1))
test_data = np.reshape(test_data, (test_data.shape[0], test_data.shape[1], 1))
test_tendency_data = np.reshape(test_tendency_data, (test_tendency_data.shape[0], test_tendency_data.shape[1], 1))
test_current_data = np.reshape(test_current_data, (test_current_data.shape[0], test_current_data.shape[1], 1))
print(test_data.shape)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00001)
FFL_Model.combine_net.compile(optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
FFL_Model.current_net.compile(optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00002)
FFL_Model.tendency_net.compile(optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
epochs_num = 0
max_all_acc = 0
max_all_epoch = max_tendency_epoch = max_current_epoch = 0
max_tendency_acc = 0
max_current_acc = 0
file_curve_path = "./combine_curve.txt"
file_curve = open(file_curve_path, 'w')
file_tendency_path = "./tendency_curve.txt"
file_tendency= open(file_tendency_path, 'w')
file_current_path = "./current_curve.txt"
file_current = open(file_current_path, 'w')
while True:
if epochs_num >= max_epochs:
break
print("epoch now: %d" % epochs_num)
# FFL_Model.tendency_net.fit(train_data, train_label, batch_size=128, epochs=1,validation_data=(validation_data,validation_label),verbose=1)
history = FFL_Model.combine_net.fit(all_data, all_label,
validation_data=(test_data, test_label), batch_size=100, epochs=1,
verbose=1)
history_t = FFL_Model.tendency_net.fit(tendency_data, all_label,
validation_data=(test_tendency_data, test_label), batch_size=100, epochs=1,
verbose=1)
history_c = FFL_Model.current_net.fit(current_data, all_label,
validation_data=(test_current_data, test_label), batch_size=100, epochs=1,
verbose=1)
test_loss = history.history['val_loss'][0]
test_acc = history.history['val_accuracy'][0]
train_loss = history.history['loss'][0]
train_acc = history.history['accuracy'][0]
file_curve.write(str([train_loss, train_acc, test_loss, test_acc]) + "\n")
file_curve.flush()
if test_acc >= max_all_acc:
FFL_Model.combine_net.save_weights('./checkpoints_combine/Combine')
max_all_acc = test_acc
max_all_epoch = epochs_num
test_loss = history_t.history['val_loss'][0]
test_acc = history_t.history['val_accuracy'][0]
train_loss = history_t.history['loss'][0]
train_acc = history_t.history['accuracy'][0]
file_tendency.write(str([train_loss, train_acc, test_loss, test_acc]) + "\n")
file_tendency.flush()
if test_acc >= max_tendency_acc:
FFL_Model.tendency_net.save_weights('./checkpoints_tendency/Tendency')
max_tendency_acc = test_acc
max_tendency_epoch = epochs_num
test_loss = history_c.history['val_loss'][0]
test_acc = history_c.history['val_accuracy'][0]
train_loss = history_c.history['loss'][0]
train_acc = history_c.history['accuracy'][0]
file_current.write(str([train_loss, train_acc, test_loss, test_acc]) + "\n")
file_current.flush()
if test_acc >= max_current_acc:
FFL_Model.current_net.save_weights('./checkpoints_os_current/Current')
max_current_acc = test_acc
max_current_epoch = epochs_num
epochs_num += 1
print("A:%.3f acc,%d epoch, T:%.3f acc,%d epoch, C:%.3f acc,%d epoch"%(max_all_acc,max_all_epoch,max_tendency_acc,max_tendency_epoch,max_current_acc,max_current_epoch))
file_curve.close()
file_tendency.flush()
file_current.flush()
training("three",max_epochs=1000)
| 55.365159 | 184 | 0.587571 | 3,975 | 33,053 | 4.581384 | 0.064151 | 0.038054 | 0.020208 | 0.016474 | 0.75092 | 0.708912 | 0.670035 | 0.630498 | 0.614079 | 0.598649 | 0 | 0.024984 | 0.308535 | 33,053 | 596 | 185 | 55.458054 | 0.771822 | 0.033885 | 0 | 0.530815 | 0 | 0.001988 | 0.049478 | 0.022343 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025845 | false | 0 | 0.015905 | 0.001988 | 0.069583 | 0.021869 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
514d779997818ca67945865e73aa82b847c739ae | 3,302 | py | Python | docs/_build/html/_downloads/152c7b8f9bc6f2cd3750f0cb8ddc0be4/lesson_2_a.py | olklymov/valkka-examples | 92be5f815cd3927100ccc4220c588bdd7c510797 | [
"MIT"
] | 12 | 2018-06-28T13:40:53.000Z | 2022-01-07T12:46:15.000Z | docs/_build/html/_downloads/152c7b8f9bc6f2cd3750f0cb8ddc0be4/lesson_2_a.py | olklymov/valkka-examples | 92be5f815cd3927100ccc4220c588bdd7c510797 | [
"MIT"
] | 6 | 2019-04-29T16:55:38.000Z | 2022-03-04T17:00:15.000Z | docs/_build/html/_downloads/152c7b8f9bc6f2cd3750f0cb8ddc0be4/lesson_2_a.py | olklymov/valkka-examples | 92be5f815cd3927100ccc4220c588bdd7c510797 | [
"MIT"
] | 5 | 2019-04-21T15:42:55.000Z | 2021-08-16T10:53:30.000Z | #<hide>
"""
filtergraph:
Streaming part | Decoding part
|
(LiveThread:livethread) -->> (AVThread:avthread) --> {InfoFrameFilter:info_filter}
"""
#</hide>
#<hide>
import time
from valkka.core import *
#</hide>
"""<rtf>
Let's consider the following filtergraph:
::
Streaming part | Decoding part
|
(LiveThread:livethread) -->> (AVThread:avthread) --> {InfoFrameFilter:info_filter}
Like in the previous lessons, we are reading frames from an IP camera. Instead of churning them through a series of filters, we pass them to another, independently running thread that performs decoding (AVThread).
Let's list all the symbols used until now and the corresponding objects:
====== ============ ==================================
Symbol Base class Explanation
====== ============ ==================================
() Thread An independently running thread
>> Crossover between two threads
{} FrameFilter A framefilter
====== ============ ==================================
That's all you need to create complex filtergraphs with Valkka.
We start as usual, by constructing the filterchain from end-to-beginning:
<rtf>"""
# decoding part
info_filter =InfoFrameFilter("info_filter")
avthread =AVThread("avthread",info_filter)
"""<rtf>
We need a framefilter to feed the frames into AVThread. This framefilter is requested from the AVThread itself:
<rtf>"""
# streaming part
av_in_filter =avthread.getFrameFilter()
livethread =LiveThread("livethread")
"""<rtf>
Finally, proceed as before: pass *av_in_filter* as a parameter to the connection context, start threads, etc.
<rtf>"""
ctx =LiveConnectionContext(LiveConnectionType_rtsp, "rtsp://admin:nordic12345@192.168.1.41", 1, av_in_filter)
"""<rtf>
Start threads. Starting the threads should be done in end-to-beginning order (in the same order we constructed the filterchain).
<rtf>"""
avthread.startCall()
livethread.startCall()
# start decoding
avthread.decodingOnCall()
livethread.registerStreamCall(ctx)
livethread.playStreamCall(ctx)
time.sleep(5)
# stop decoding
# avthread.decodingOffCall()
"""<rtf>
Stop threads. Stop threads in beginning-to-end order (i.e., following the filtergraph from left to right).
<rtf>"""
livethread.stopCall()
avthread.stopCall()
print("bye")
"""<rtf>
You will see output like this:
::
InfoFrameFilter: info_filter start dump>>
InfoFrameFilter: FRAME : <AVBitmapFrame: timestamp=1525870759898 subsession_index=0 slot=1 / h=1080; w=1920; l=(1920,960,960); f=12>
InfoFrameFilter: PAYLOAD : [47 47 47 47 47 47 47 47 47 47 ]
InfoFrameFilter: timediff: -22
InfoFrameFilter: info_filter <<end dump
InfoFrameFilter: info_filter start dump>>
InfoFrameFilter: FRAME : <AVBitmapFrame: timestamp=1525870759938 subsession_index=0 slot=1 / h=1080; w=1920; l=(1920,960,960); f=12>
InfoFrameFilter: PAYLOAD : [47 47 47 47 47 47 47 47 47 47 ]
InfoFrameFilter: timediff: -11
InfoFrameFilter: info_filter <<end dump
...
...
So, instead of H264 packets, we have decoded bitmap frames here.
In the next lesson, we'll dump them on the screen.
<rtf>"""
| 31.447619 | 214 | 0.657783 | 394 | 3,302 | 5.467005 | 0.418782 | 0.033426 | 0.044568 | 0.051996 | 0.292479 | 0.262767 | 0.262767 | 0.262767 | 0.262767 | 0.192201 | 0 | 0.050208 | 0.197759 | 3,302 | 104 | 215 | 31.75 | 0.762929 | 0.086008 | 0 | 0 | 0 | 0 | 0.113487 | 0.060855 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
514e969fdf154b0e8e5327483cdde2b37efd808d | 44,964 | py | Python | cheshire3/normalizer.py | cheshire3/cheshire3 | 306348831ec110229c78a7c5f0f2026a0f394d2c | [
"Python-2.0",
"Unlicense"
] | 3 | 2015-08-02T09:03:28.000Z | 2017-12-06T09:26:14.000Z | cheshire3/normalizer.py | cheshire3/cheshire3 | 306348831ec110229c78a7c5f0f2026a0f394d2c | [
"Python-2.0",
"Unlicense"
] | 5 | 2015-08-17T01:16:35.000Z | 2015-09-16T21:51:27.000Z | cheshire3/normalizer.py | cheshire3/cheshire3 | 306348831ec110229c78a7c5f0f2026a0f394d2c | [
"Python-2.0",
"Unlicense"
] | 6 | 2015-05-17T15:32:20.000Z | 2020-04-22T08:43:16.000Z | # -*- coding: utf-8 -ü-
import os
import re
import types
try:
from zopyx.txng3.ext import stemmer as Stemmer
except ImportError:
Stemmer = None
from cheshire3.baseObjects import Normalizer
from cheshire3.exceptions import (
ConfigFileException,
MissingDependencyException
)
class SimpleNormalizer(Normalizer):
"""Abstract Base Class for Normalizer.
Simply returns the data and should never be used as it will simply waste
CPU time.
"""
def __init__(self, session, config, parent):
Normalizer.__init__(self, session, config, parent)
def process_string(self, session, data):
"""Process a string into an alternative form."""
return data
def process_hash(self, session, data):
"""Process a hash of values into alternative forms."""
kw = {}
if not data:
return kw
process = self.process_string
#items = data.items()
#prox = items[0][1].has_key('positions')
for (k, d) in data.iteritems():
dv = d['text']
if type(dv) == list:
new = []
# Process list backwards so as not to munge character offsets
for x in range(len(dv) - 1, -1, -1):
dvi = dv[x]
ndvi = process(session, dvi)
if ndvi:
new.append(ndvi)
else:
try:
d['charOffsets'].pop(x)
except KeyError:
pass
new.reverse()
nd = d.copy()
nd['text'] = new
kw[k] = nd
continue
else:
new = process(session, d['text'])
if not new:
continue
if isinstance(new, dict):
# From string to hash
for nv in new.itervalues():
txt = nv['text']
if txt in kw:
kw[txt]['occurences'] += nv['occurences']
try:
kw[txt]['positions'].extend(nv['positions'])
except:
pass
try:
kw[txt]['proxLoc'].extend(nv['proxLoc'])
except:
pass
else:
kw[txt] = nv
else:
if new is not None:
try:
kw[new]['occurences'] += d['occurences']
try:
kw[new]['positions'].extend(d['positions'])
except:
pass
try:
kw[new]['proxLoc'].extend(d['proxLoc'])
except:
pass
except KeyError:
d = d.copy()
try:
d['positions'] = d['positions'][:]
except:
pass
try:
d['proxLoc'] = d['proxLoc'][:]
except:
pass
d['text'] = new
kw[new] = d
return kw
class DataExistsNormalizer(SimpleNormalizer):
""" Return '1' if any data exists, otherwise '0' """
def process_string(self, session, data):
if data:
return "1"
else:
return "0"
class TermExistsNormalizer(SimpleNormalizer):
""" Un-stoplist anonymizing normalizer. Eg for use with data mining """
_possibleSettings = {
'termlist': {
'docs': ("'splitChar' (defaulting to space) separated list of "
"terms. For each term, if it exists in this list, the "
"normalizer returns '1', otherwise '0'"),
'required': True
},
'splitChar': {
'docs': "Override for the character to split on"
},
'frequency': {
'docs': ("if 1, accumulate total occurences, otherwise add one "
"per term"),
'type': int,
'options': "0|1"
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
tlstr = self.get_setting(session, 'termlist', '')
splitter = self.get_setting(session, 'splitChar', ' ')
self.termlist = tlstr.split(splitter)
self.frequency = self.get_setting(session, 'frequency', 0)
def process_string(self, session, data):
if data in self.termlist:
return "1"
else:
return "0"
def process_hash(self, session, data):
kw = {}
vals = data.values()
if not vals:
return kw
process = self.process_string
total = 0
for d in vals:
new = process(session, d['text'])
if new == "1":
if self.frequency:
total += d['occurences']
else:
total += 1
return str(total)
class UndocumentNormalizer(SimpleNormalizer):
""" Take a document as if it were a string and turn into a string """
def process_string(self, session, data):
return data.get_raw(session)
class CaseNormalizer(SimpleNormalizer):
""" Reduce text to lower case """
def process_string(self, session, data):
return data.lower()
class ReverseNormalizer(SimpleNormalizer):
""" Reverse string (eg for left truncation) """
def process_string(self, session, data):
return data[::-1]
class SpaceNormalizer(SimpleNormalizer):
""" Reduce multiple whitespace to single space character """
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
# all strings should be treated as unicode internally
# this is default for lxml - primary Record implementation
self.whitespace = re.compile("\s+", re.UNICODE)
def process_string(self, session, data):
data = data.strip()
data = self.whitespace.sub(' ', data)
return data
class ArticleNormalizer(SimpleNormalizer):
"""Remove leading english articles (the, a, an)"""
def process_string(self, session, data):
d = data.lower()
if (d[:4] == "the "):
return data[4:]
elif (d[:2] == "a "):
return data[2:]
elif (d[:3] == "an "):
return data[3:]
else:
return data
class NumericEntityNormalizer(SimpleNormalizer):
"""Replaces named XML entities with numeric ones.
Replace encoded XML entities matching a regular expression with the
equivalent numeric character entity
"""
_possibleSettings = {
'regexp': {
'docs': ("Regular expression of that matches characters to turn "
"into XML numeric character entities")
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
regex = self.get_setting(session,
'regexp',
'([\x0e-\x1f]|[\x7b-\xff])')
self.regexp = re.compile(regex)
self.function = lambda x: "&#%s;" % ord(x.group(1))
def process_string(self, session, data):
return self.regexp.sub(self.function, data)
# Non printable characters (Printable)
# self.asciiRe = re.compile('([\x0e-\x1f]|[\x7b-\xff])')
# Non useful characters (Stripper)
# self.asciiRe = re.compile('["%#@~!*{}]')
class PointlessCharacterNormalizer(SimpleNormalizer):
def process_string(self, session, data):
t = data.replace(u'\ufb00', 'ff')
t = t.replace(u'\ufb01', 'fi')
t = t.replace(u'\xe6', 'fi')
t = t.replace(u'\ufb02', 'fl')
t = t.replace(u'\u201c', '"')
t = t.replace(u'\u201d', '"')
t = t.replace(u'\u2019', "'")
t = t.replace(u'\u2026', " ")
return t
class RegexpNormalizer(SimpleNormalizer):
"""Strip, replace or keep data matching a regular expression."""
_possibleSettings = {
'char': {
'docs': ("Character(s) to replace matches in the regular "
"expression with. Defaults to empty string (eg strip "
"matches)")
},
'regexp': {
'docs': "Regular expression to match in the data.",
'required': True
},
'keep': {
'docs': ("Should instead keep only the matches. Boolean, defaults "
"to False"),
'type': int,
'options': "0|1"
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
self.char = self.get_setting(session, 'char', '')
self.keep = self.get_setting(session, 'keep', 0)
regex = self.get_setting(session, 'regexp')
if regex:
self.regexp = re.compile(regex)
else:
raise ConfigFileException("Missing regexp setting for "
"%s." % (self.id))
def process_string(self, session, data):
if self.keep:
try:
l = self.regexp.findall(data)
except UnicodeDecodeError:
data = data.decode('utf-8')
l = self.regexp.findall(data)
return self.char.join(l)
else:
try:
return self.regexp.sub(self.char, data)
except UnicodeDecodeError:
data = data.decode('utf-8')
try:
return self.regexp.sub(self.char, data)
except:
raise
class NamedRegexpNormalizer(RegexpNormalizer):
"""A RegexpNormalizer with templating for named groups.
As RegexpNormalizer, but allow named groups and reconstruction of token
using a template and those groups.
"""
_possibleSettings = {
'template': {
'docs': ("Template using group names for replacement, as per % "
"substitution. Eg regexp = (?P<word>.+)/(?P<pos>.+) and "
"template = --%(pos)s--, cat/NN would generate --NN--")
}
}
def __init__(self, session, config, parent):
RegexpNormalizer.__init__(self, session, config, parent)
self.template = self.get_setting(session, 'template', '')
def process_string(self, session, data):
m = self.regexp.match(data)
if m:
try:
return self.template % m.groupdict()
except:
return ""
else:
return ""
class RegexpFilterNormalizer(SimpleNormalizer):
"""Normalizer to filter data with a regular expression.
If 'keep' setting is True:
filters out data that DOES NOT match 'regexp' setting
If 'keep' setting is False:
filters out data that DOES match the 'regexp' setting
"""
_possibleSettings = {
'regexp': {
'docs': "Regular expression to match in the data."
},
'keep': {
'docs': ("Should keep only data matching the regexp. Boolean "
"setting, defaults to True"),
'type': int
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
regex = self.get_setting(session,
'regexp',
'^[a-zA-Z\'][a-zA-Z\'.-]+[?!,;:]?$')
self.re = re.compile(regex)
self.keep = self.get_setting(session, 'keep', 1)
def process_string(self, session, data):
if self.re.match(data):
return data if self.keep else None
else:
return None if self.keep else data
def process_hash(self, session, data):
data = SimpleNormalizer.process_hash(self, session, data)
try:
del data[None]
except:
# may not have filtered anything
pass
return data
class PossessiveNormalizer(SimpleNormalizer):
""" Remove trailing 's or s' from words """
def process_string(self, session, data):
# Not totally correct... eg: it's == 'it is', not 'of it'
if (data[-2:] == "s'"):
return data[:-1]
elif (data[-2:] == "'s"):
return data[:-2]
else:
return data
class IntNormalizer(SimpleNormalizer):
""" Turn a string into an integer """
def process_string(self, session, data):
try:
return long(data)
except:
return None
class StringIntNormalizer(SimpleNormalizer):
""" Turn an integer into a 0 padded string, 12 chrs long """
def process_string(self, session, data):
try:
d = long(data)
return "%012d" % (d)
except:
return None
class FileAssistedNormalizer(SimpleNormalizer):
"""Base Class for Normalizers configured with an additional file.
Abstract class for Normalizers that can be configured using an additional
file e.g. for specifying a stoplist, or a list of acronyms and their
expansions.
"""
def _processPath(self, session, path):
fp = self.get_path(session, path)
if fp is None:
raise ConfigFileException("No {0} file specified for object with "
"id '{1}'.".format(path, self.id))
if (not os.path.isabs(fp)):
dfp = self.get_path(session, "defaultPath")
fp = os.path.join(dfp, fp)
try:
fh = open(fp, 'r')
except IOError as e:
raise ConfigFileException("{0} for object with id '{1}'."
"".format(str(e), self.id))
l = fh.readlines()
fh.close()
return l
class StoplistNormalizer(FileAssistedNormalizer):
"""Normalizer to remove words that occur in a stopword list."""
stoplist = {}
_possiblePaths = {
'stoplist': {
'docs': ("Path to file containing set of stop terms, one term "
"per line."),
'required': True
}
}
def __init__(self, session, config, parent):
FileAssistedNormalizer.__init__(self, session, config, parent)
self.stoplist = {}
lines = self._processPath(session, 'stoplist')
for sw in lines:
self.stoplist[sw.strip()] = 1
def process_string(self, session, data):
if (data in self.stoplist):
return None
else:
return data
class TokenExpansionNormalizer(FileAssistedNormalizer):
""" Expand acronyms or compound words.
Only works with tokens NOT exact strings.
"""
expansions = {}
_possiblePaths = {
'expansions': {
'docs': ("Path to file containing set of expansions, one "
"expansion per line. First token in line is taken to be "
"the thing to be expanded, remaining tokens are what "
"occurences should be replaced with."),
'required': True
}
}
_possibleSettings = {
'keepOriginal': {
'docs': ("Should the original token be kept as well as its "
"expansion (e.g. potentialy useful when browsing). "
"Defaults to False."),
'type': int,
'options': "0|1"
}
}
def __init__(self, session, config, parent):
FileAssistedNormalizer.__init__(self, session, config, parent)
self.expansions = {}
self.keepOriginal = self.get_setting(session, 'keepOriginal', 0)
lines = self._processPath(session, 'expansions')
for exp in lines:
bits = unicode(exp).split()
self.expansions[bits[0]] = bits[1:]
def process_string(self, session, data):
try:
return ' '.join(self.expansions[data])
except KeyError:
return data
def process_hash(self, session, data):
kw = {}
if not len(data):
return kw
keep = self.keepOriginal
process = self.process_string
map = self.expansions
for d in data.itervalues():
if 'positions' in d or 'charOffsets' in d:
raise NotImplementedError
t = d['text']
if (t in map):
dlist = map[t]
for new in dlist:
if (new in kw):
kw[new]['occurences'] += 1
else:
nd = d.copy()
nd['text'] = new
kw[new] = nd
if keep:
kw[t] = d
else:
kw[t] = d
return kw
class StemNormalizer(SimpleNormalizer):
"""Use a Snowball stemmer to stem the terms."""
stemmer = None
_possibleSettings = {
'language': {
'docs': ("Language to create a stemmer for, defaults to "
"english."),
'options': ("danish|dutch|english|finnish|french|german|"
"italian|norwegian|porter|portuguese|russian|"
"spanish|swedish")
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
if Stemmer is None:
raise MissingDependencyException(self.objectType,
"zopyx.txng3.ext"
)
lang = self.get_setting(session, 'language', 'english')
try:
self.stemmer = Stemmer.Stemmer(lang)
except:
raise ConfigFileException("Unknown stemmer language: "
"%s" % (lang))
def process_string(self, session, data):
if (type(data) != type(u"")):
data = unicode(data, 'utf-8')
return self.stemmer.stem([data])[0]
class PhraseStemNormalizer(SimpleNormalizer):
"""Use a Snowball stemmer to stem multiple words in a phrase.
Deprecated: Should instead use normalizer after tokenizer and before
tokenMerger.
"""
stemmer = None
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
if Stemmer is None:
raise MissingDependencyException(self.objectType,
"zopyx.txng3.ext"
)
lang = self.get_setting(session, 'language', 'english')
self.punctuationRe = re.compile(
"((?<!s)'|[-.,]((?=\s)|$)|(^|(?<=\s))[-.,']|"
"[~`!@+=\#\&\^*()\[\]{}\\\|\":;<>?/])"
)
try:
self.stemmer = Stemmer.Stemmer(lang)
except:
raise ConfigFileException("Unknown stemmer language: %s" %
(lang))
def process_string(self, session, data):
if (type(data) != type(u"")):
data = unicode(data, 'utf-8')
s = self.punctuationRe.sub(' ', data)
wds = data.split()
stemmed = self.stemmer.stem(wds)
return ' '.join(stemmed)
class PhoneticNormalizer(SimpleNormalizer):
u"""Carries out phonetic normalization.
Currently fairly simple normalization after "Introduction to Information
Retrieval" by Christopher D. Manning, Prabhakar Raghavan & Hinrich Schütze
except that length of final term is configurable (not hard-coded to 4
characters.)"""
_possibleSettings = {
'termSize': {
'docs': ("Number of characters to reduce/pad the phonetically "
"normalized term to. If not a positive integer no "
"reduction/padding applied (default)."),
'type': int
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
self.nChars = self.get_setting(session, 'termSize', 0)
self.re0 = re.compile('[aeiouhwy]+', re.IGNORECASE | re.UNICODE)
self.re1 = re.compile('[bfpv]+', re.IGNORECASE | re.UNICODE)
self.re2 = re.compile('[cgjkqsxz]+', re.IGNORECASE | re.UNICODE)
self.re3 = re.compile('[dt]+', re.IGNORECASE | re.UNICODE)
self.re4 = re.compile('[l]+', re.IGNORECASE | re.UNICODE)
self.re5 = re.compile('[mn]+', re.IGNORECASE | re.UNICODE)
self.re6 = re.compile('[r]+', re.IGNORECASE | re.UNICODE)
def process_string(self, session, data):
# 0. Prepare by stripping leading/trailing whitespace
data = data.strip()
# 1. Retain the first letter of the term.
# 2. Change all occurrences of the following letters to '0' (zero):
# 'A', E', 'I', 'O', 'U', 'H', 'W', 'Y'.
# 3. Change letters to digits as follows:
# B, F, P, V to 1. C, G, J, K, Q, S, X, Z -> 2
# D,T to 3. L -> 4
# M, N -> 5
# R -> 6.
# 4. Repeatedly remove one out of each pair of consecutive identical
# digits.
tail = data[1:]
for i, regex in enumerate([self.re0, self.re1, self.re2, self.re3,
self.re4, self.re5, self.re6]):
tail = regex.sub(str(i), tail)
# 5. Remove all zeros from the resulting string.
tail = tail.replace('0', '')
result = data[0] + tail
if self.nChars:
# Pad the resulting string with trailing zeros and return the first
# self.nChars positions
result = '{0:0<{1}}'.format(result[:self.nChars], self.nChars)
if type(data) == unicode:
return unicode(result)
else:
return result
class DateStringNormalizer(SimpleNormalizer):
"""Turns a Date object into ISO8601 format."""
def process_string(self, session, data):
# str() defaults to iso8601 format
return str(data)
class DateYearNormalizer(SimpleNormalizer):
"""Normalizes a date in ISO8601 format to simply a year
Very crude implementation, simply returns first 4 characters.
"""
def process_string(self, session, data):
return data[:4]
class IdToFilenameNormalizer(SimpleNormalizer):
"""Turn an id into a filename with appropriate extension(s).
Extension to use is a configurable setting, defaults to .xml
"""
_possibleSettings = {
'extension': {
'docs': ("File extension (including leading period / stop) to "
"append to given id to produce and appropriate "
"filename."),
'type': str,
'default': '.xml'
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
self.ext = self.get_setting(session, 'extension', '.xml')
def process_string(self, session, data):
return str(data) + self.ext
class FilenameToIdNormalizer(SimpleNormalizer):
""" Turn a filename into an id by stripping off the filename extension.
Only strips off the final extension, including the period / stop.
"""
def process_string(self, session, data):
id, ext = os.path.splitext(data)
return id
class RangeNormalizer(SimpleNormalizer):
""" XXX: This is actually a job for a TokenMerger. Deprecated"""
def process_hash(self, session, data):
# Need to step through positions in order
kw = {}
vals = data.values()
if not vals:
return kw
prox = 'positions' in vals[0]
if not prox:
# Bad. Assume low -> high order
tmplist = [(d['text'], d) for d in vals]
else:
# Need to duplicate across occs, as all in same hash from record
tmplist = []
for d in vals:
for x in range(0, len(d['positions']), 2):
tmplist.append(("%s-%s" %
(d['positions'][x], d['positions'][x + 1]),
d))
tmplist.sort()
for t in range(0, len(tmplist), 2):
base = tmplist[t][1]
try:
text = base['text'] + " " + tmplist[t + 1][1]['text']
except:
text = base['text'] + " " + base['text']
base['text'] = text
try:
del base['positions']
except:
pass
kw[text] = base
return kw
class UnicodeCollationNormalizer(SimpleNormalizer):
""" Use pyuca to create sort key for string
Only, but Very, useful for sorting
"""
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
keyPath = self.get_path(session, 'keyFile', 'allkeys.txt')
# This is handy -- means if no pyuca, no problem
from pyuca import Collator
self.collator = Collator(keyPath)
def process_string(self, session, data):
# fix eszett sorting
data = data.replace(u'\u00DF', 'ss')
ints = self.collator.sort_key(data)
exp = ["%04d" % i for i in ints]
return ''.join(exp)
class DiacriticNormalizer(SimpleNormalizer):
"""Normalizer to turn XML entities into their closes ASCII approximation.
Slow implementation of Unicode 4.0 character decomposition.
Eg that &eacute; -> e
"""
map = {}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
# Decomposition as per Unicode 4.0 Data file
self.map = {
u"\u00A7": u"Section",
u"\u00A9": u"(c)",
# Exhaustive accented alphabetical, diacrytics and ligatures
u"\u00C0": u"\u0041",
u"\u00C1": u"\u0041",
u"\u00C2": u"\u0041",
u"\u00C3": u"\u0041",
u"\u00C4": u"\u0041",
u"\u00C5": u"\u0041",
u"\u00C6": u"AE",
u"\u00C7": u"\u0043",
u"\u00C8": u"\u0045",
u"\u00C9": u"\u0045",
u"\u00CA": u"\u0045",
u"\u00CB": u"\u0045",
u"\u00CC": u"\u0049",
u"\u00CD": u"\u0049",
u"\u00CE": u"\u0049",
u"\u00CF": u"\u0049",
u"\u00D0": u"\u0044",
u"\u00D1": u"\u004E",
u"\u00D2": u"\u004F",
u"\u00D3": u"\u004F",
u"\u00D4": u"\u004F",
u"\u00D5": u"\u004F",
u"\u00D6": u"\u004F",
u"\u00D7": u"x",
u"\u00D8": u"O",
u"\u00D9": u"\u0055",
u"\u00DA": u"\u0055",
u"\u00DB": u"\u0055",
u"\u00DC": u"\u0055",
u"\u00DD": u"\u0059",
u"\u00DE": u"TH",
u"\u00DF": u"ss",
u"\u00E0": u"\u0061",
u"\u00E1": u"\u0061",
u"\u00E2": u"\u0061",
u"\u00E3": u"\u0061",
u"\u00E4": u"\u0061",
u"\u00E5": u"\u0061",
u"\u00E6": u"\u0061\u0065",
u"\u00E7": u"\u0063",
u"\u00E8": u"\u0065",
u"\u00E9": u"\u0065",
u"\u00EA": u"\u0065",
u"\u00EB": u"\u0065",
u"\u00EC": u"\u0069",
u"\u00ED": u"\u0069",
u"\u00EE": u"\u0069",
u"\u00EF": u"\u0069",
u"\u00F0": u"\u0064",
u"\u00F1": u"\u006E",
u"\u00F2": u"\u006F",
u"\u00F3": u"\u006F",
u"\u00F4": u"\u006F",
u"\u00F5": u"\u006F",
u"\u00F6": u"\u006F",
u"\u00F7": u"/",
u"\u00F8": u"\u006F",
u"\u00F9": u"\u0075",
u"\u00FA": u"\u0075",
u"\u00FB": u"\u0075",
u"\u00FC": u"\u0075",
u"\u00FD": u"\u0079",
u"\u00FE": u"th",
u"\u00FF": u"\u0079",
u"\u0100": u"\u0041",
u"\u0101": u"\u0061",
u"\u0102": u"\u0041",
u"\u0103": u"\u0061",
u"\u0104": u"\u0041",
u"\u0105": u"\u0061",
u"\u0106": u"\u0043",
u"\u0107": u"\u0063",
u"\u0108": u"\u0043",
u"\u0109": u"\u0063",
u"\u010A": u"\u0043",
u"\u010B": u"\u0063",
u"\u010C": u"\u0043",
u"\u010D": u"\u0063",
u"\u010E": u"\u0044",
u"\u010F": u"\u0064",
u"\u0110": u"D",
u"\u0111": u"d",
u"\u0112": u"\u0045",
u"\u0113": u"\u0065",
u"\u0114": u"\u0045",
u"\u0115": u"\u0065",
u"\u0116": u"\u0045",
u"\u0117": u"\u0065",
u"\u0118": u"\u0045",
u"\u0119": u"\u0065",
u"\u011A": u"\u0045",
u"\u011B": u"\u0065",
u"\u011C": u"\u0047",
u"\u011D": u"\u0067",
u"\u011E": u"\u0047",
u"\u011F": u"\u0067",
u"\u0120": u"\u0047",
u"\u0121": u"\u0067",
u"\u0122": u"\u0047",
u"\u0123": u"\u0067",
u"\u0124": u"\u0048",
u"\u0125": u"\u0068",
u"\u0126": u"H",
u"\u0127": u"h",
u"\u0128": u"\u0049",
u"\u0129": u"\u0069",
u"\u012A": u"\u0049",
u"\u012B": u"\u0069",
u"\u012C": u"\u0049",
u"\u012D": u"\u0069",
u"\u012E": u"\u0049",
u"\u012F": u"\u0069",
u"\u0130": u"\u0049",
u"\u0131": u"i",
u"\u0132": u"\u0049",
u"\u0133": u"\u0069",
u"\u0134": u"\u004A",
u"\u0135": u"\u006A",
u"\u0136": u"\u004B",
u"\u0137": u"\u006B",
u"\u0138": u"k",
u"\u0139": u"\u004C",
u"\u013A": u"\u006C",
u"\u013B": u"\u004C",
u"\u013C": u"\u006C",
u"\u013D": u"\u004C",
u"\u013E": u"\u006C",
u"\u013F": u"\u004C",
u"\u0140": u"\u006C",
u"\u0141": u"L",
u"\u0142": u"l",
u"\u0143": u"\u004E",
u"\u0144": u"\u006E",
u"\u0145": u"\u004E",
u"\u0146": u"\u006E",
u"\u0147": u"\u004E",
u"\u0148": u"\u006E",
u"\u0149": u"\u02BC",
u"\u014A": u"N",
u"\u014B": u"n",
u"\u014C": u"\u004F",
u"\u014D": u"\u006F",
u"\u014E": u"\u004F",
u"\u014F": u"\u006F",
u"\u0150": u"\u004F",
u"\u0151": u"\u006F",
u"\u0152": u"OE",
u"\u0153": u"oe",
u"\u0154": u"\u0052",
u"\u0155": u"\u0072",
u"\u0156": u"\u0052",
u"\u0157": u"\u0072",
u"\u0158": u"\u0052",
u"\u0159": u"\u0072",
u"\u015A": u"\u0053",
u"\u015B": u"\u0073",
u"\u015C": u"\u0053",
u"\u015D": u"\u0073",
u"\u015E": u"\u0053",
u"\u015F": u"\u0073",
u"\u0160": u"\u0053",
u"\u0161": u"\u0073",
u"\u0162": u"\u0054",
u"\u0163": u"\u0074",
u"\u0164": u"\u0054",
u"\u0165": u"\u0074",
u"\u0166": u"T",
u"\u0167": u"t",
u"\u0168": u"\u0055",
u"\u0169": u"\u0075",
u"\u016A": u"\u0055",
u"\u016B": u"\u0075",
u"\u016C": u"\u0055",
u"\u016D": u"\u0075",
u"\u016E": u"\u0055",
u"\u016F": u"\u0075",
u"\u0170": u"\u0055",
u"\u0171": u"\u0075",
u"\u0172": u"\u0055",
u"\u0173": u"\u0075",
u"\u0174": u"\u0057",
u"\u0175": u"\u0077",
u"\u0176": u"\u0059",
u"\u0177": u"\u0079",
u"\u0178": u"\u0059",
u"\u0179": u"\u005A",
u"\u017A": u"\u007A",
u"\u017B": u"\u005A",
u"\u017C": u"\u007A",
u"\u017D": u"\u005A",
u"\u017E": u"\u007A",
u"\u017F": u"s",
# Big Gap, and scattered from here
u"\u01A0": u"\u004F",
u"\u01A1": u"\u006F",
u"\u01AF": u"\u0055",
u"\u01B0": u"\u0075",
u"\u01C4": u"\u0044",
u"\u01C5": u"\u0044",
u"\u01C6": u"\u0064",
u"\u01C7": u"\u004C",
u"\u01C8": u"\u004C",
u"\u01C9": u"\u006C",
u"\u01CA": u"\u004E",
u"\u01CB": u"\u004E",
u"\u01CC": u"\u006E",
u"\u01CD": u"\u0041",
u"\u01CE": u"\u0061",
u"\u01CF": u"\u0049",
u"\u01D0": u"\u0069",
u"\u01D1": u"\u004F",
u"\u01D2": u"\u006F",
u"\u01D3": u"\u0055",
u"\u01D4": u"\u0075",
u"\u01D5": u"\u0055",
u"\u01D6": u"\u0075",
u"\u01D7": u"\u0055",
u"\u01D8": u"\u0075",
u"\u01D9": u"\u0055",
u"\u01DA": u"\u0075",
u"\u01DB": u"\u0055",
u"\u01DC": u"\u0075",
u"\u01DE": u"\u0041",
u"\u01DF": u"\u0061",
u"\u01E0": u"\u0226",
u"\u01E1": u"\u0227",
u"\u01E2": u"\u00C6",
u"\u01E3": u"\u00E6",
u"\u01E6": u"\u0047",
u"\u01E7": u"\u0067",
u"\u01E8": u"\u004B",
u"\u01E9": u"\u006B",
u"\u01EA": u"\u004F",
u"\u01EB": u"\u006F",
u"\u01EC": u"\u004F",
u"\u01ED": u"\u006F",
u"\u01EE": u"\u01B7",
u"\u01EF": u"\u0292",
u"\u01F0": u"\u006A",
u"\u01F1": u"\u0044",
u"\u01F2": u"\u0044",
u"\u01F3": u"\u0064",
u"\u01F4": u"\u0047",
u"\u01F5": u"\u0067",
u"\u01F8": u"\u004E",
u"\u01F9": u"\u006E",
u"\u01FA": u"\u0041",
u"\u01FB": u"\u0061",
u"\u01FC": u"\u00C6",
u"\u01FD": u"\u00E6",
u"\u01FE": u"\u00D8",
u"\u01FF": u"\u00F8",
u"\u0200": u"\u0041",
u"\u0201": u"\u0061",
u"\u0202": u"\u0041",
u"\u0203": u"\u0061",
u"\u0204": u"\u0045",
u"\u0205": u"\u0065",
u"\u0206": u"\u0045",
u"\u0207": u"\u0065",
u"\u0208": u"\u0049",
u"\u0209": u"\u0069",
u"\u020A": u"\u0049",
u"\u020B": u"\u0069",
u"\u020C": u"\u004F",
u"\u020D": u"\u006F",
u"\u020E": u"\u004F",
u"\u020F": u"\u006F",
u"\u0210": u"\u0052",
u"\u0211": u"\u0072",
u"\u0212": u"\u0052",
u"\u0213": u"\u0072",
u"\u0214": u"\u0055",
u"\u0215": u"\u0075",
u"\u0216": u"\u0055",
u"\u0217": u"\u0075",
u"\u0218": u"\u0053",
u"\u0219": u"\u0073",
u"\u021A": u"\u0054",
u"\u021B": u"\u0074",
u"\u021E": u"\u0048",
u"\u021F": u"\u0068",
u"\u0226": u"\u0041",
u"\u0227": u"\u0061",
u"\u0228": u"\u0045",
u"\u0229": u"\u0065",
u"\u022A": u"\u004F",
u"\u022B": u"\u006F",
u"\u022C": u"\u004F",
u"\u022D": u"\u006F",
u"\u022E": u"\u004F",
u"\u022F": u"\u006F",
u"\u0230": u"\u004F",
u"\u0231": u"\u006F",
u"\u0232": u"\u0059",
u"\u0233": u"\u0079",
u"\u1E00": u"\u0041",
u"\u1E01": u"\u0061",
u"\u1E02": u"\u0042",
u"\u1E03": u"\u0062",
u"\u1E04": u"\u0042",
u"\u1E05": u"\u0062",
u"\u1E06": u"\u0042",
u"\u1E07": u"\u0062",
u"\u1E08": u"\u0043",
u"\u1E09": u"\u0063",
u"\u1E0A": u"\u0044",
u"\u1E0B": u"\u0064",
u"\u1E0C": u"\u0044",
u"\u1E0D": u"\u0064",
u"\u1E0E": u"\u0044",
u"\u1E0F": u"\u0064",
u"\u1E10": u"\u0044",
u"\u1E11": u"\u0064",
u"\u1E12": u"\u0044",
u"\u1E13": u"\u0064",
u"\u1E14": u"\u0045",
u"\u1E15": u"\u0065",
u"\u1E16": u"\u0045",
u"\u1E17": u"\u0065",
u"\u1E18": u"\u0045",
u"\u1E19": u"\u0065",
u"\u1E1A": u"\u0045",
u"\u1E1B": u"\u0065",
u"\u1E1C": u"\u0045",
u"\u1E1D": u"\u0065",
u"\u1E1E": u"\u0046",
u"\u1E1F": u"\u0066",
u"\u1E20": u"\u0047",
u"\u1E21": u"\u0067",
u"\u1E22": u"\u0048",
u"\u1E23": u"\u0068",
u"\u1E24": u"\u0048",
u"\u1E25": u"\u0068",
u"\u1E26": u"\u0048",
u"\u1E27": u"\u0068",
u"\u1E28": u"\u0048",
u"\u1E29": u"\u0068",
u"\u1E2A": u"\u0048",
u"\u1E2B": u"\u0068",
u"\u1E2C": u"\u0049",
u"\u1E2D": u"\u0069",
u"\u1E2E": u"\u0049",
u"\u1E2F": u"\u0069",
u"\u1E30": u"\u004B",
u"\u1E31": u"\u006B",
u"\u1E32": u"\u004B",
u"\u1E33": u"\u006B",
u"\u1E34": u"\u004B",
u"\u1E35": u"\u006B",
u"\u1E36": u"\u004C",
u"\u1E37": u"\u006C",
u"\u1E38": u"\u004C",
u"\u1E39": u"\u006C",
u"\u1E3A": u"\u004C",
u"\u1E3B": u"\u006C",
u"\u1E3C": u"\u004C",
u"\u1E3D": u"\u006C",
u"\u1E3E": u"\u004D",
u"\u1E3F": u"\u006D",
u"\u1E40": u"\u004D",
u"\u1E41": u"\u006D",
u"\u1E42": u"\u004D",
u"\u1E43": u"\u006D",
u"\u1E44": u"\u004E",
u"\u1E45": u"\u006E",
u"\u1E46": u"\u004E",
u"\u1E47": u"\u006E",
u"\u1E48": u"\u004E",
u"\u1E49": u"\u006E",
u"\u1E4A": u"\u004E",
u"\u1E4B": u"\u006E",
u"\u1E4C": u"\u004F",
u"\u1E4D": u"\u006F",
u"\u1E4E": u"\u004F",
u"\u1E4F": u"\u006F",
u"\u1E50": u"\u004F",
u"\u1E51": u"\u006F",
u"\u1E52": u"\u004F",
u"\u1E53": u"\u006F",
u"\u1E54": u"\u0050",
u"\u1E55": u"\u0070",
u"\u1E56": u"\u0050",
u"\u1E57": u"\u0070",
u"\u1E58": u"\u0052",
u"\u1E59": u"\u0072",
u"\u1E5A": u"\u0052",
u"\u1E5B": u"\u0072",
u"\u1E5C": u"\u0052",
u"\u1E5D": u"\u0072",
u"\u1E5E": u"\u0052",
u"\u1E5F": u"\u0072",
u"\u1E60": u"\u0053",
u"\u1E61": u"\u0073",
u"\u1E62": u"\u0053",
u"\u1E63": u"\u0073",
u"\u1E64": u"\u0053",
u"\u1E65": u"\u0073",
u"\u1E66": u"\u0053",
u"\u1E67": u"\u0073",
u"\u1E68": u"\u0053",
u"\u1E69": u"\u0073",
u"\u1E6A": u"\u0054",
u"\u1E6B": u"\u0074",
u"\u1E6C": u"\u0054",
u"\u1E6D": u"\u0074",
u"\u1E6E": u"\u0054",
u"\u1E6F": u"\u0074",
u"\u1E70": u"\u0054",
u"\u1E71": u"\u0074",
u"\u1E72": u"\u0055",
u"\u1E73": u"\u0075",
u"\u1E74": u"\u0055",
u"\u1E75": u"\u0075",
u"\u1E76": u"\u0055",
u"\u1E77": u"\u0075",
u"\u1E78": u"\u0055",
u"\u1E79": u"\u0075",
u"\u1E7A": u"\u0055",
u"\u1E7B": u"\u0075",
u"\u1E7C": u"\u0056",
u"\u1E7D": u"\u0076",
u"\u1E7E": u"\u0056",
u"\u1E7F": u"\u0076",
u"\u1E80": u"\u0057",
u"\u1E81": u"\u0077",
u"\u1E82": u"\u0057",
u"\u1E83": u"\u0077",
u"\u1E84": u"\u0057",
u"\u1E85": u"\u0077",
u"\u1E86": u"\u0057",
u"\u1E87": u"\u0077",
u"\u1E88": u"\u0057",
u"\u1E89": u"\u0077",
u"\u1E8A": u"\u0058",
u"\u1E8B": u"\u0078",
u"\u1E8C": u"\u0058",
u"\u1E8D": u"\u0078",
u"\u1E8E": u"\u0059",
u"\u1E8F": u"\u0079",
u"\u1E90": u"\u005A",
u"\u1E91": u"\u007A",
u"\u1E92": u"\u005A",
u"\u1E93": u"\u007A",
u"\u1E94": u"\u005A",
u"\u1E95": u"\u007A",
u"\u1E96": u"\u0068",
u"\u1E97": u"\u0074",
u"\u1E98": u"\u0077",
u"\u1E99": u"\u0079",
u"\u1E9A": u"\u0061",
u"\u1E9B": u"\u017F",
u"\u1EA0": u"\u0041",
u"\u1EA1": u"\u0061",
u"\u1EA2": u"\u0041",
u"\u1EA3": u"\u0061",
u"\u1EA4": u"\u0041",
u"\u1EA5": u"\u0061",
u"\u1EA6": u"\u0041",
u"\u1EA7": u"\u0061",
u"\u1EA8": u"\u0041",
u"\u1EA9": u"\u0061",
u"\u1EAA": u"\u0041",
u"\u1EAB": u"\u0061",
u"\u1EAC": u"\u0041",
u"\u1EAD": u"\u0061",
u"\u1EAE": u"\u0041",
u"\u1EAF": u"\u0061",
u"\u1EB0": u"\u0041",
u"\u1EB1": u"\u0061",
u"\u1EB2": u"\u0041",
u"\u1EB3": u"\u0061",
u"\u1EB4": u"\u0041",
u"\u1EB5": u"\u0061",
u"\u1EB6": u"\u0041",
u"\u1EB7": u"\u0061",
u"\u1EB8": u"\u0045",
u"\u1EB9": u"\u0065",
u"\u1EBA": u"\u0045",
u"\u1EBB": u"\u0065",
u"\u1EBC": u"\u0045",
u"\u1EBD": u"\u0065",
u"\u1EBE": u"\u0045",
u"\u1EBF": u"\u0065",
u"\u1EC0": u"\u0045",
u"\u1EC1": u"\u0065",
u"\u1EC2": u"\u0045",
u"\u1EC3": u"\u0065",
u"\u1EC4": u"\u0045",
u"\u1EC5": u"\u0065",
u"\u1EC6": u"\u0045",
u"\u1EC7": u"\u0065",
u"\u1EC8": u"\u0049",
u"\u1EC9": u"\u0069",
u"\u1ECA": u"\u0049",
u"\u1ECB": u"\u0069",
u"\u1ECC": u"\u004F",
u"\u1ECD": u"\u006F",
u"\u1ECE": u"\u004F",
u"\u1ECF": u"\u006F",
u"\u1ED0": u"\u004F",
u"\u1ED1": u"\u006F",
u"\u1ED2": u"\u004F",
u"\u1ED3": u"\u006F",
u"\u1ED4": u"\u004F",
u"\u1ED5": u"\u006F",
u"\u1ED6": u"\u004F",
u"\u1ED7": u"\u006F",
u"\u1ED8": u"\u004F",
u"\u1ED9": u"\u006F",
u"\u1EDA": u"\u004F",
u"\u1EDB": u"\u006F",
u"\u1EDC": u"\u004F",
u"\u1EDD": u"\u006F",
u"\u1EDE": u"\u004F",
u"\u1EDF": u"\u006F",
u"\u1EE0": u"\u004F",
u"\u1EE1": u"\u006F",
u"\u1EE2": u"\u004F",
u"\u1EE3": u"\u006F",
u"\u1EE4": u"\u0055",
u"\u1EE5": u"\u0075",
u"\u1EE6": u"\u0055",
u"\u1EE7": u"\u0075",
u"\u1EE8": u"\u0055",
u"\u1EE9": u"\u0075",
u"\u1EEA": u"\u0055",
u"\u1EEB": u"\u0075",
u"\u1EEC": u"\u0055",
u"\u1EED": u"\u0075",
u"\u1EEE": u"\u0055",
u"\u1EEF": u"\u0075",
u"\u1EF0": u"\u0055",
u"\u1EF1": u"\u0075",
u"\u1EF2": u"\u0059",
u"\u1EF3": u"\u0079",
u"\u1EF4": u"\u0059",
u"\u1EF5": u"\u0079",
u"\u1EF6": u"\u0059",
u"\u1EF7": u"\u0079",
u"\u1EF8": u"\u0059",
u"\u1EF9": u"\u0079"
}
def process_string(self, session, data):
d = []
m = self.map
if not data:
return None
# With scarcity of diacritics, this is faster than try/except
for c in data:
if (c in m):
d.append(m[c])
else:
d.append(c)
return ''.join(d)
| 33.134856 | 79 | 0.455231 | 4,943 | 44,964 | 4.102367 | 0.233461 | 0.034717 | 0.012082 | 0.031068 | 0.207318 | 0.176793 | 0.151297 | 0.127626 | 0.10001 | 0.086646 | 0 | 0.129748 | 0.382929 | 44,964 | 1,356 | 80 | 33.159292 | 0.601096 | 0.095121 | 0 | 0.212938 | 0 | 0.001797 | 0.229401 | 0.005109 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043127 | false | 0.008086 | 0.007188 | 0.006289 | 0.140162 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
514e96ff8378de7f403d3d5b87b39b9cacaa544f | 1,976 | py | Python | tests/tests_auth.py | wolfram74/flask_exploration | 6c83eee93830792969b8c6b4dbbbf6708c08ef9d | [
"MIT"
] | null | null | null | tests/tests_auth.py | wolfram74/flask_exploration | 6c83eee93830792969b8c6b4dbbbf6708c08ef9d | [
"MIT"
] | null | null | null | tests/tests_auth.py | wolfram74/flask_exploration | 6c83eee93830792969b8c6b4dbbbf6708c08ef9d | [
"MIT"
] | null | null | null | import unittest
from flask.ext.testing import TestCase
from project import app, db
from project.models import User, BlogPost
from base import BaseTestCase
class FlaskTestCase(BaseTestCase):
# def setUp(self):
# self.tester = app.test_client(self)
# self.good_cred = dict(username='admin', password='admin')
# self.bad_cred = dict(username='buttts', password='farts')
def test_login_good_auth(self):
response = self.client.post(
'/login', data=self.good_cred,
follow_redirects= True)
self.assertIn(b'log in successful', response.data)
self.assertEqual(response.status_code, 200)
def test_login_bad_auth(self):
response = self.client.post('/login', data=self.bad_cred, follow_redirects= True)
self.assertIn(b'Invalid credentials', response.data)
self.assertEqual(response.status_code, 200)
def test_logout_valid(self):
self.client.post(
'/login', data=self.good_cred,
follow_redirects= True)
response = self.client.get('/logout', content_type = 'html/text',follow_redirects= True)
self.assertEqual(response.status_code, 200)
self.assertIn('successful', response.data)
def test_logout_protected(self):
response = self.client.get('/logout', content_type = 'html/text',follow_redirects= True)
self.assertEqual(response.status_code, 200)
self.assertIn('Please log in', response.data)
def test_home_valid_if_authed(self):
self.client.post(
'/login', data=self.good_cred,
follow_redirects= True)
response = self.client.get('/', content_type = 'html/text')
self.assertEqual(response.status_code, 200)
def test_home_protected(self):
response = self.client.get('/', content_type = 'html/text', follow_redirects=True)
self.assertIn('Please log in', response.data)
if __name__== '__main__':
unittest.main()
| 37.283019 | 96 | 0.671053 | 243 | 1,976 | 5.263374 | 0.26749 | 0.062549 | 0.103987 | 0.089914 | 0.656763 | 0.656763 | 0.636435 | 0.555903 | 0.45817 | 0.412041 | 0 | 0.009603 | 0.209514 | 1,976 | 52 | 97 | 38 | 0.809219 | 0.091093 | 0 | 0.447368 | 0 | 0 | 0.087102 | 0 | 0 | 0 | 0 | 0 | 0.263158 | 1 | 0.157895 | false | 0 | 0.131579 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
514fd7037382f0059f2a8f6642dd4dde72119752 | 1,024 | py | Python | calendar_manager/accounts/views/signin.py | greyhub/calendar_manager | 837a0d938fdd58684279fb32f8a50805bb306fe1 | [
"MIT"
] | 6 | 2021-11-23T19:52:55.000Z | 2022-03-30T13:45:05.000Z | calendar_manager/accounts/views/signin.py | greyhub/calendar_manager | 837a0d938fdd58684279fb32f8a50805bb306fe1 | [
"MIT"
] | null | null | null | calendar_manager/accounts/views/signin.py | greyhub/calendar_manager | 837a0d938fdd58684279fb32f8a50805bb306fe1 | [
"MIT"
] | 1 | 2021-11-23T10:14:54.000Z | 2021-11-23T10:14:54.000Z | from django.views.generic import View
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login
from accounts.forms import SignInForm
class SignInView(View):
""" User registration view """
template_name = 'accounts/signin.html'
form_class = SignInForm
def get(self, request, *args, **kwargs):
forms = self.form_class()
context = {
'form': forms
}
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
forms = self.form_class(request.POST)
if forms.is_valid():
email = forms.cleaned_data['email']
password = forms.cleaned_data['password']
user = authenticate(email=email, password=password)
if user:
login(request, user)
return redirect('events_calendar:calendar')
context = {
'form': forms
}
return render(request, self.template_name, context) | 32 | 63 | 0.621094 | 110 | 1,024 | 5.690909 | 0.390909 | 0.047923 | 0.047923 | 0.067093 | 0.309904 | 0.309904 | 0.309904 | 0.309904 | 0.185304 | 0.185304 | 0 | 0 | 0.27832 | 1,024 | 32 | 64 | 32 | 0.847091 | 0.021484 | 0 | 0.230769 | 0 | 0 | 0.065327 | 0.024121 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0.076923 | 0.153846 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
5150d680a6e92b183496e1677c8274249125d1ee | 3,794 | py | Python | magmap/settings/logs.py | kaparna126/magellanmapper | 6a50e82b3bcdbbb4706f749f366b055f0c6f13f2 | [
"BSD-3-Clause"
] | null | null | null | magmap/settings/logs.py | kaparna126/magellanmapper | 6a50e82b3bcdbbb4706f749f366b055f0c6f13f2 | [
"BSD-3-Clause"
] | null | null | null | magmap/settings/logs.py | kaparna126/magellanmapper | 6a50e82b3bcdbbb4706f749f366b055f0c6f13f2 | [
"BSD-3-Clause"
] | null | null | null | # MagellanMapper logging
"""Logging utilities."""
import logging
from logging import handlers
import pathlib
class LogWriter:
"""File-like object to write standard output to logging functions.
Attributes:
fn_logging (func): Logging function
buffer (list[str]): String buffer.
"""
def __init__(self, fn_logging):
"""Create a writer for a logging function."""
self.fn_logger = fn_logging
self.buffer = []
def write(self, msg):
"""Write to logging function with buffering.
Args:
msg (str): Line to write, from which trailing newlines will be
removed.
"""
if msg.endswith("\n"):
# remove trailing newlines in buffer and pass to logging function
self.buffer.append(msg.rstrip("\n"))
self.fn_logger("".join(self.buffer))
self.buffer = []
else:
self.buffer.append(msg)
def flush(self):
"""Empty function, deferring to logging handler's flush."""
pass
def setup_logger():
"""Set up a basic root logger with a stream handler.
Returns:
:class:`logging.Logger`: Root logger for the application.
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# set up handler for console
handler_stream = logging.StreamHandler()
handler_stream.setLevel(logging.INFO)
handler_stream.setFormatter(logging.Formatter(
"%(name)s - %(levelname)s - %(message)s"))
logger.addHandler(handler_stream)
return logger
def update_log_level(logger, level):
"""Update the logging level.
Args:
logger (:class:`logging.Logger`): Logger to update.
level (Union[str, int]): Level given either as a string corresponding
to ``Logger`` levels, or their corresponding integers, ranging
from 0 (``NOTSET``) to 50 (``CRITICAL``). For convenience,
values can be given from 0-5, which will be multiplied by 10.
Returns:
:class:`logging.Logger`: The logger for chained calls.
"""
if isinstance(level, str):
# specify level by level name
level = level.upper()
elif isinstance(level, int):
# specify by level integer (0-50)
if level < 10:
# for convenience, assume values under 10 are 10-fold
level *= 10
else:
return
try:
# set level for the logger and all its handlers
logger.setLevel(level)
for handler in logger.handlers:
handler.setLevel(level)
except (TypeError, ValueError) as e:
logger.error(e, exc_info=True)
return logger
def add_file_handler(logger, path, backups=5):
"""Add a rotating log file handler with a new log file.
Args:
logger (:class:`logging.Logger`): Logger to update.
path (str): Path to log.
backups (int): Number of backups to maintain; defaults to 5.
Returns:
:class:`logging.Logger`: The logger for chained calls.
"""
# check if log file already exists
pathl = pathlib.Path(path)
roll = pathl.is_file()
# create a rotations file handler to manage number of backups while
# manually managing rollover based on file presence rather than size
pathl.parent.mkdir(parents=True, exist_ok=True)
handler_file = handlers.RotatingFileHandler(path, backupCount=backups)
handler_file.setLevel(logger.level)
handler_file.setFormatter(logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
logger.addHandler(handler_file)
if roll:
# create a new log file if exists, backing up the old one
handler_file.doRollover()
return logger
| 29.874016 | 77 | 0.624671 | 463 | 3,794 | 5.062635 | 0.358531 | 0.021331 | 0.038396 | 0.031997 | 0.116894 | 0.116894 | 0.116894 | 0.116894 | 0.081058 | 0 | 0 | 0.007305 | 0.278334 | 3,794 | 126 | 78 | 30.111111 | 0.848795 | 0.457828 | 0 | 0.134615 | 0 | 0 | 0.050976 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0.019231 | 0.057692 | 0 | 0.269231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5151eb6104a2efda27edf90bc5d40eacc7c63499 | 3,322 | py | Python | src/training/har_train.py | sanglee/MC-ATON | 8393cdb20957bf2fe11633c062aa7979ca389cc4 | [
"Apache-2.0"
] | null | null | null | src/training/har_train.py | sanglee/MC-ATON | 8393cdb20957bf2fe11633c062aa7979ca389cc4 | [
"Apache-2.0"
] | null | null | null | src/training/har_train.py | sanglee/MC-ATON | 8393cdb20957bf2fe11633c062aa7979ca389cc4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Created : 2021/10/28 17:12
# @Author : Junhyung Kwon
# @Site :
# @File : har_train.py
# @Software : PyCharm
import os
import torch
import torch.nn.functional as F
from torch import nn, optim
from torch.optim.lr_scheduler import MultiStepLR
from tqdm.auto import tqdm
from attacker import LinfPGDAttack
from data import uci_har
from models import HARClassifier
from training import structured_har
from utils import AverageMeter
def train_iter(model, optimizer, criterion, data_loader, device, mode=0, comp_ratio=0.):
model.train()
iteration_loss = AverageMeter()
for i, (X, y) in enumerate(data_loader):
X, y = X.to(device), y.to(device)
output = model(X)
loss = criterion(output, y.long())
iteration_loss.update(loss.item(), X.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if comp_ratio > 0:
idxs, lams = structured_har(model, comp_ratio)
if comp_ratio > 0:
idxs, lams = structured_har(model, comp_ratio)
return iteration_loss.avg
def test_iter(model, data_loader, device, check_adv=False, epsilon=0.3, alpha=0.0073, k=7):
model.eval()
normal_acc = AverageMeter()
if check_adv:
adv = LinfPGDAttack(model, epsilon=epsilon, alpha=alpha, k=k)
off_acc = AverageMeter()
for i, (X, y) in enumerate(data_loader):
X, y = X.to(device), y.to(device)
output = model(X)
out = F.softmax(output, dim=1)
_, predicted = out.max(1)
idx = predicted.eq(y)
acc = idx.sum().item() / X.size(0)
normal_acc.update(acc)
if check_adv:
adv_x = adv.perturb(X, y.long())
out = model(adv_x)
out = F.softmax(out, dim=1)
_, predicted = out.max(1)
idx = predicted.eq(y)
acc = idx.sum().item() / X.size(0)
off_acc.update(acc)
if check_adv:
return normal_acc.avg, off_acc.avg
else:
return normal_acc.avg
def har_train(cuda_num=4, EPOCH=100, save_interval=5, resume=True, comp_ratio=0., model_dir='./simulation/HAR_UCI/'):
if not os.path.exists(model_dir):
os.mkdir(model_dir)
train_loader, test_loader = uci_har('/workspace/Dataset/TSData/uci_data/np/')
device = 'cuda:%d' % cuda_num
model = HARClassifier()
model.to(device)
if resume:
model.load_state_dict(torch.load(os.path.join(model_dir, 'comp0_0-model-epoch{}.pt'.format(99))))
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.91)
scheduler = MultiStepLR(optimizer, milestones=[55, 75, 90], gamma=0.1)
criterion = nn.CrossEntropyLoss()
for epoch in tqdm(range(EPOCH)):
train_loss = train_iter(model, optimizer, criterion, train_loader, device, comp_ratio)
val_acc = test_iter(model, test_loader, device)
scheduler.step()
print('Epoch {}\tTrain loss: {:.4f}\tValidation accuracy: {:.4f}'.format(epoch + 1, train_loss, val_acc))
if (epoch + 1) % save_interval == 0:
torch.save(model.state_dict(),
os.path.join(model_dir,
'comp{}-model-epoch{}.pt'.format(str(comp_ratio * 100).replace('.', '_'), epoch)))
| 30.477064 | 118 | 0.621915 | 460 | 3,322 | 4.345652 | 0.336957 | 0.036018 | 0.02001 | 0.015008 | 0.244122 | 0.194097 | 0.172086 | 0.172086 | 0.172086 | 0.172086 | 0 | 0.025549 | 0.245936 | 3,322 | 108 | 119 | 30.759259 | 0.772455 | 0.048164 | 0 | 0.256757 | 0 | 0 | 0.054517 | 0.033597 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040541 | false | 0 | 0.148649 | 0 | 0.22973 | 0.013514 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51523224ed1784558c61cf16ca3f354c9e20fd86 | 1,741 | py | Python | brainTeasers/numberOfLines/unitTests.py | christopher-henderson/Experiments | d970249a4895424cb5ff5b557948f3a4bda4879c | [
"MIT"
] | null | null | null | brainTeasers/numberOfLines/unitTests.py | christopher-henderson/Experiments | d970249a4895424cb5ff5b557948f3a4bda4879c | [
"MIT"
] | null | null | null | brainTeasers/numberOfLines/unitTests.py | christopher-henderson/Experiments | d970249a4895424cb5ff5b557948f3a4bda4879c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
from functools import wraps
from lines import getNumLines
assertion = 0
exception = 0
passed = 0
def Test(function):
@wraps(function)
def wrapper(*args, **kwargs):
global assertion
global exception
global passed
try:
function(*args, **kwargs)
except Exception as e:
if isinstance(e, AssertionError):
assertion += 1
print ("ASSERTION: {FUNC}: {ERROR}".format(FUNC=function.__name__, ERROR=e))
else:
exception += 1
print ("EXCEPTION: {FUNC}: {ERROR}".format(FUNC=function.__name__, ERROR=e))
else:
print ("PASSED: {FUNC}".format(FUNC=function.__name__))
return wrapper
@Test
def empty():
assert getNumLines(()) is 0
@Test
def one():
assert getNumLines(((1,1),)) is 1
@Test
def axis():
assert getNumLines(((0,1),)) is 1
assert getNumLines(((0,1), (0,-1))) is 1
assert getNumLines(((1,0),)) is 1
assert getNumLines(((1,0), (-1,0))) is 1
@Test
def origin():
assert getNumLines(((0,0),)) is 1
assert getNumLines(((0,0), (0,0), (0,0), (0,0))) is 1
assert getNumLines(((0,0), (1,1))) is 1
assert getNumLines(((0,0), (1,1), (1,2))) is 2
@Test
def counterQuadrants():
assert getNumLines(((1,1), (-1,-1), (2,2), (-2,-2))) is 1
assert getNumLines(((-1,1), (1,-1))) is 1
assert getNumLines(((2,2), (-2,-1))) is 2
assert getNumLines(((-2,2), (2,-1))) is 2
@Test
def scientificInts():
assert getNumLines(((1+10**10000, 10**10000), (1, 1))) is 2
def main():
empty()
one()
axis()
origin()
counterQuadrants()
scientificInts()
main()
| 24.521127 | 92 | 0.570936 | 233 | 1,741 | 4.193133 | 0.2103 | 0.261003 | 0.073695 | 0.163767 | 0.332651 | 0.32958 | 0.212897 | 0.208802 | 0.08393 | 0 | 0 | 0.069839 | 0.25158 | 1,741 | 70 | 93 | 24.871429 | 0.679969 | 0.011488 | 0 | 0.135593 | 0 | 0 | 0.038372 | 0 | 0 | 0 | 0 | 0 | 0.338983 | 1 | 0.152542 | false | 0.050847 | 0.050847 | 0 | 0.220339 | 0.067797 | 0 | 0 | 0 | null | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 3 |
515285c60dcab2bc55d78dce23f2882c85fc837c | 279 | py | Python | print-var.py | christoga/python | 1395b3177e7baf46677a7a7a4ae89d2488c6f0fa | [
"MIT"
] | 5 | 2015-11-15T19:08:31.000Z | 2015-11-27T02:34:28.000Z | print-var.py | christoga/python | 1395b3177e7baf46677a7a7a4ae89d2488c6f0fa | [
"MIT"
] | null | null | null | print-var.py | christoga/python | 1395b3177e7baf46677a7a7a4ae89d2488c6f0fa | [
"MIT"
] | null | null | null | my_name = 'Andre Christoga'
my_age = 11 # Really, this is my age
# my_height = 150 # Centimeter
# my_weight = 32 # Kilogram
# my_eyes = "Brown"
# my_teeth = "White"
print "Oh, Hello there."
print "My name is " + my_name
print "My age is 11"
print "I know, too young to code?"
| 23.25 | 36 | 0.670251 | 48 | 279 | 3.75 | 0.604167 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040909 | 0.21147 | 279 | 11 | 37 | 25.363636 | 0.777273 | 0.401434 | 0 | 0 | 0 | 0 | 0.503145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.666667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 2 |
5153b4798f2410a6aa6709c5a6328570a85d91d6 | 3,052 | py | Python | src/blip_sdk/extensions/artificial_intelligence/ai_model/ai_model_extension.py | mirlarof/blip-sdk-python | f958149b2524d4340eeafad8739a33db71df45ed | [
"MIT"
] | 2 | 2021-07-02T20:10:48.000Z | 2021-07-13T20:51:18.000Z | src/blip_sdk/extensions/artificial_intelligence/ai_model/ai_model_extension.py | mirlarof/blip-sdk-python | f958149b2524d4340eeafad8739a33db71df45ed | [
"MIT"
] | 3 | 2021-06-24T13:27:21.000Z | 2021-07-30T15:37:43.000Z | src/blip_sdk/extensions/artificial_intelligence/ai_model/ai_model_extension.py | mirlarof/blip-sdk-python | f958149b2524d4340eeafad8739a33db71df45ed | [
"MIT"
] | 3 | 2021-06-23T19:53:20.000Z | 2022-01-04T17:50:44.000Z | from lime_python import Command
from ...extension_base import ExtensionBase
from .content_type import ContentType
from .uri_templates import UriTemplates
class AIModelExtension(ExtensionBase):
"""Extension to handle Blip Analytics Services."""
async def get_models_async(
self,
skip: int = 0,
take: int = 100,
ascending: bool = False,
**kwargs
) -> Command:
"""Search in all trained and/or published models.
Args:
skip (int): Number of models to be skipped.
take (int): Number of model to be take.
ascending (bool): Sets ascending alphabetical order.
kwargs: any other optional parameter not covered by the method
Returns:
Command: Command response
"""
models_resource_query = self.build_resource_query(
UriTemplates.MODELS,
{
'$skip': skip,
'$take': take,
'$ascending': ascending,
**kwargs
}
)
return await self.process_command_async(
self.create_get_command(
models_resource_query,
)
)
async def get_model_async(self, id: str) -> Command:
"""Get specific AI model.
Args:
id (str): Model id
Returns:
Command: Command response
"""
return await self.process_command_async(
self.create_get_command(
self.build_uri(UriTemplates.MODEL, id)
)
)
async def get_model_summary_async(self) -> Command:
"""Get model summary.
Returns:
Command: Command response
"""
return await self.process_command_async(
self.create_get_command(UriTemplates.MODELS_SUMMARY)
)
async def get_last_trained_or_published_model_async(self) -> Command:
"""Get last trained or published model.
Returns:
Command: Command response
"""
return await self.process_command_async(
self.create_get_command(UriTemplates.LAST_TRAINED_OR_PUBLISH_MODEL)
)
async def train_model_async(self) -> Command:
"""Train model.
Returns:
Command: Command response
"""
train_model_command = self.create_set_command(
UriTemplates.MODELS,
{},
ContentType.MODEL_TRAINING
)
return await self.process_command_async(train_model_command)
async def publish_model_async(self, id: str) -> Command:
"""Publish an existing artificial intelligence model.
Args:
id (str): model id
Returns:
Command: Command response
"""
return await self.process_command_async(
self.create_set_command(
UriTemplates.MODELS,
{
'id': id
},
ContentType.MODEL_PUBLISHING
)
)
| 27.495495 | 79 | 0.564548 | 298 | 3,052 | 5.57047 | 0.281879 | 0.059639 | 0.075904 | 0.104819 | 0.414458 | 0.393976 | 0.266265 | 0.266265 | 0.266265 | 0.266265 | 0 | 0.002056 | 0.362385 | 3,052 | 110 | 80 | 27.745455 | 0.850976 | 0.014417 | 0 | 0.210526 | 0 | 0 | 0.0108 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.070175 | 0 | 0.192982 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5155a6295dbb373ee9af2abbcd7551a82f2a7146 | 1,683 | py | Python | src/datajunction/console.py | DataJunction/datajunction | d2293255bb7df0e5144c7e448a0ca2b590b6c20f | [
"MIT"
] | null | null | null | src/datajunction/console.py | DataJunction/datajunction | d2293255bb7df0e5144c7e448a0ca2b590b6c20f | [
"MIT"
] | null | null | null | src/datajunction/console.py | DataJunction/datajunction | d2293255bb7df0e5144c7e448a0ca2b590b6c20f | [
"MIT"
] | null | null | null | """
DataJunction (DJ) is a metric repository.
Usage:
dj compile [REPOSITORY] [-f] [--loglevel=INFO] [--reload]
Actions:
compile Compile repository
Options:
-f, --force Force indexing. [default: false]
--loglevel=LEVEL Level for logging. [default: INFO]
--reload Watch for changes. [default: false]
Released under the MIT license.
(c) 2018 Beto Dealmeida <roberto@dealmeida.net>
"""
import asyncio
import logging
from pathlib import Path
from docopt import docopt
from datajunction import __version__
from datajunction.cli import compile as compile_
from datajunction.errors import DJException
from datajunction.utils import get_settings, setup_logging
_logger = logging.getLogger(__name__)
async def main() -> None:
"""
Dispatch command.
"""
arguments = docopt(__doc__, version=__version__)
setup_logging(arguments["--loglevel"])
if arguments["REPOSITORY"] is None:
settings = get_settings()
repository = settings.repository
else:
repository = Path(arguments["REPOSITORY"])
try:
if arguments["compile"]:
try:
await compile_.run(
repository,
arguments["--force"],
arguments["--reload"],
)
except DJException as exc:
_logger.error(exc)
except asyncio.CancelledError:
_logger.info("Canceled")
def run() -> None:
"""
Run the DJ CLI.
"""
try:
asyncio.run(main())
except KeyboardInterrupt:
_logger.info("Stopping DJ")
if __name__ == "__main__":
run()
| 23.054795 | 63 | 0.608437 | 168 | 1,683 | 5.892857 | 0.428571 | 0.064646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003344 | 0.289364 | 1,683 | 72 | 64 | 23.375 | 0.824415 | 0.276887 | 0 | 0.083333 | 0 | 0 | 0.068045 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.222222 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5156148de28f4156b72965f292cfcca3cbc248dc | 3,617 | py | Python | main.py | yappy2000d/MoodBot | b62fc96fb15aa6f7bc95696f7a1614f1f50614dd | [
"MIT"
] | null | null | null | main.py | yappy2000d/MoodBot | b62fc96fb15aa6f7bc95696f7a1614f1f50614dd | [
"MIT"
] | null | null | null | main.py | yappy2000d/MoodBot | b62fc96fb15aa6f7bc95696f7a1614f1f50614dd | [
"MIT"
] | null | null | null | import os
from ai import model, process, vectorizer
from youtube import comment, getid, video
from linebot import LineBotApi, WebhookHandler
from linebot.exceptions import InvalidSignatureError
from linebot.models import MessageEvent, TextMessage, TextSendMessage
SECRET = os.environ['SECRET']
TOKEN = os.environ['TOKEN']
from flask import Flask, request, abort
import numpy as np
app = Flask('')
bot = LineBotApi(TOKEN)
handler = WebhookHandler(SECRET)
@app.route('/')
def home():
return "I'm alive"
@app.route('/discord')
def discord():
return 'Hello, World'
#callback
@app.route("/callback", methods=['POST'])
def callback():
signature = request.headers['X-Line-Signature']
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle(event):
msg = event.message.text
print(f"{event.source.user_id}, 說了{msg}")
if msg.startswith("!comment"):
cmts = []
id = getid(msg.split(" ")[1])
next_page = ''
count = int(video(id)["items"][0]["statistics"]['commentCount'])
while count > 0:
items, next_page = comment(id, next_page)
item = items["items"]
for cmt in item:
cmts.append(cmt["snippet"]["topLevelComment"]["snippet"]["textOriginal"])
count -= 1
print(cmts)
resault = {0: 0, 1: 0}
for i in cmts:
inputs = [process(i)]
vec = vectorizer.transform(inputs)
pre = model.predict(vec)
if pre[0] == np.int64(0):
resault[0] += 1
elif pre[0] == np.int64(1):
resault[1] += 1
else:
raise
info = video(id)["items"][0]
snippet = info["snippet"]
name = snippet["title"]
upload_time = snippet["publishedAt"]
channel = snippet["channelTitle"]
statistics = info["statistics"]
view = statistics["viewCount"]
like = statistics.get("likeCount", 0)
dislike = statistics.get("dislikeCount", 0)
text = "由:{}\n標題:\n{}\n上傳於:{}\n觀看次數:{}\n喜歡:{} 不喜歡:{}\n負面評價:{}則,{:.2f}%\n正面評價:{}則,{:.2f}%".format(
channel, name,
upload_time.split("T")[0], view, like, dislike, resault[0],
resault[0] / len(cmts) * 100, resault[1],
resault[1] / len(cmts) * 100)
bot.reply_message(event.reply_token, TextSendMessage(text=text))
elif msg.startswith("!check"):
message = msg.split(" ")[1]
inputs = [process(message)]
vec = vectorizer.transform(inputs)
pre = model.predict(vec)
mood = {0: "負面", 1: "正面"}
text = mood[pre[0]]
bot.reply_message(event.reply_token, TextSendMessage(text=text))
elif msg == "!help":
text = "!help |取得幫助\n!intro |介紹\n!comment {YT影片網址或ID} |影片評價\n!check {一段文字} |此文情緒\n輸入指令時,記得把\"{}\"去掉"
bot.reply_message(event.reply_token, TextSendMessage(text=text))
elif msg == "!intro":
text = "MoodBot 使用新浪微博之 weibo_senti_100k 資料庫,廣泛蒐集了正負向評論約各 5 萬條,並且搭載了 Multinomial Naive Bayes 機率模型做作為機器學習演算法,是一種監督式的分類演算法,對文字具有不錯的準確度(雖然說這隻得準確率 accuracy 只來到了 70%)。
bot.reply_message(event.reply_token, TextSendMessage(text=text))
else:
text = "輸入 !help 來取得幫助"
bot.reply_message(event.reply_token, TextSendMessage(text=text))
app.run(host='0.0.0.0', port=8080)
| 31.72807 | 170 | 0.592756 | 423 | 3,617 | 5.021277 | 0.408983 | 0.018832 | 0.035311 | 0.047081 | 0.177966 | 0.177966 | 0.177966 | 0.177966 | 0.134652 | 0.084746 | 0 | 0.021277 | 0.259331 | 3,617 | 113 | 171 | 32.00885 | 0.771557 | 0.002212 | 0 | 0.123596 | 0 | 0.033708 | 0.148836 | 0.028271 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.089888 | null | null | 0.022472 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5159402d19245aba61bb579f188067d4ee6bc977 | 274 | py | Python | vivit/extensions/secondorder/sqrt_ggn/dropout.py | PwLo3K46/vivit | 937642975be2ade122632d4eaef273461992d7ab | [
"MIT"
] | 1 | 2021-06-07T05:15:22.000Z | 2021-06-07T05:15:22.000Z | vivit/extensions/secondorder/sqrt_ggn/dropout.py | PwLo3K46/vivit | 937642975be2ade122632d4eaef273461992d7ab | [
"MIT"
] | 2 | 2021-08-10T12:45:37.000Z | 2021-08-10T12:49:51.000Z | vivit/extensions/secondorder/sqrt_ggn/dropout.py | PwLo3K46/vivit | 937642975be2ade122632d4eaef273461992d7ab | [
"MIT"
] | null | null | null | from backpack.core.derivatives.dropout import DropoutDerivatives
from vivit.extensions.secondorder.sqrt_ggn.sqrt_ggn_base import SqrtGGNBaseModule
class SqrtGGNDropout(SqrtGGNBaseModule):
def __init__(self):
super().__init__(derivatives=DropoutDerivatives())
| 30.444444 | 81 | 0.821168 | 28 | 274 | 7.642857 | 0.714286 | 0.065421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.10219 | 274 | 8 | 82 | 34.25 | 0.869919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.4 | 0 | 0.8 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 4 |
5159b6cde26f7df203594b171f65dbf811705a8e | 1,286 | py | Python | phonon/registry.py | akellehe/phonon | 4b61fd6042af1bec7bc949bcc713a0dd0fcfcefb | [
"MIT"
] | 4 | 2015-03-30T22:46:35.000Z | 2020-09-08T02:03:53.000Z | phonon/registry.py | akellehe/phonon | 4b61fd6042af1bec7bc949bcc713a0dd0fcfcefb | [
"MIT"
] | 21 | 2015-02-03T23:12:36.000Z | 2017-09-15T21:03:24.000Z | phonon/registry.py | akellehe/phonon | 4b61fd6042af1bec7bc949bcc713a0dd0fcfcefb | [
"MIT"
] | 2 | 2016-08-14T20:18:52.000Z | 2019-09-30T16:02:22.000Z | import sys
import collections
import tornado
class Registry(object):
def __init__(self, max_entries=10000, ioloop=None):
self.models = collections.OrderedDict()
self.timeouts = {}
self.ioloop = ioloop or tornado.ioloop.IOLoop.current()
self.max_entries = max_entries
def register(self, model, *args, **kwargs):
if model.registry_key() in self.models:
self.models[model.registry_key()].merge(model)
self.ioloop.remove_timeout(self.timeouts[model.registry_key()])
else:
self.models[model.registry_key()] = model
self.timeouts[model.registry_key()] = self.ioloop.add_timeout(
model.TTL, self.on_expire, model, *args, **kwargs
)
def on_expire(self, model, *args, **kwargs):
del self.models[model.registry_key()]
del self.timeouts[model.registry_key()]
if not model.reference.dereference(callback=model.on_complete,
args=args,
kwargs=kwargs):
model.cache()
registry = Registry()
def configure(max_entries=10000):
global registry
registry = Registry(max_entries=max_entries)
def register(model):
registry.register(model)
| 28.577778 | 75 | 0.618974 | 144 | 1,286 | 5.375 | 0.305556 | 0.134367 | 0.144703 | 0.089147 | 0.289406 | 0.080103 | 0 | 0 | 0 | 0 | 0 | 0.010672 | 0.271384 | 1,286 | 44 | 76 | 29.227273 | 0.815368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16129 | false | 0 | 0.096774 | 0 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
515bc2e00165e4793cb2c05d11188ceed1d51545 | 1,672 | py | Python | project_template/account/permission.py | AdityaBhalsod/django-rest-api-template | ae530c9c246d074707e26d9c4d6c2f15177bd1f7 | [
"Apache-2.0"
] | 3 | 2020-11-04T19:34:47.000Z | 2021-06-30T04:13:55.000Z | project_template/account/permission.py | AdityaBhalsod/django-rest-api-template | ae530c9c246d074707e26d9c4d6c2f15177bd1f7 | [
"Apache-2.0"
] | null | null | null | project_template/account/permission.py | AdityaBhalsod/django-rest-api-template | ae530c9c246d074707e26d9c4d6c2f15177bd1f7 | [
"Apache-2.0"
] | 1 | 2021-01-31T19:30:59.000Z | 2021-01-31T19:30:59.000Z | # -*- coding: utf-8 -*-
from rest_framework import permissions
from account.models import BlackList
class BlacklistPermission(permissions.BasePermission):
"""
Global permission check for blacklisted IPs.
"""
def has_permission(self, request, view):
ip_address = request.META["REMOTE_ADDR"]
blacklisted = BlackList.objects.filter(ip_address=ip_address).exists()
return not blacklisted
class BaseModelPermissions(permissions.DjangoModelPermissions):
perms_map = {
'GET': ['%(app_label)s.view_%(model_name)s'],
'OPTIONS': [],
'HEAD': [],
'POST': ['%(app_label)s.add_%(model_name)s'],
'PUT': ['%(app_label)s.change_%(model_name)s'],
'PATCH': ['%(app_label)s.change_%(model_name)s'],
'DELETE': ['%(app_label)s.delete_%(model_name)s'],
}
def has_object_permission(self, request, view, obj):
has_permission = super().has_permission(request, view)
if has_permission and view.action == 'retrieve':
return self._queryset(view).viewable().filter(pk=obj.pk).exists()
if has_permission and view.action == 'list':
return self._queryset(view).viewable().filter(pk=obj.pk).exists()
if has_permission and view.action == 'update':
return self._queryset(view).editable().filter(pk=obj.pk).exists()
if has_permission and view.action == 'partial_update':
return self._queryset(view).editable().filter(pk=obj.pk).exists()
if has_permission and view.action == 'destroy':
return self._queryset(view).deletable().filter(pk=obj.pk).exists()
return False | 35.574468 | 78 | 0.641746 | 198 | 1,672 | 5.232323 | 0.353535 | 0.100386 | 0.043436 | 0.086873 | 0.40251 | 0.38417 | 0.357143 | 0.30888 | 0.30888 | 0.30888 | 0 | 0.000757 | 0.209928 | 1,672 | 47 | 79 | 35.574468 | 0.783497 | 0.040072 | 0 | 0.133333 | 0 | 0 | 0.158491 | 0.106918 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
515c32b2748b2c0d803dfa8b97d5d1d27008566b | 1,498 | py | Python | 006_multiples.py | mkduer/code-nibbles | 3482b5159bc0fdc18079bf2de27a47a77ae4753a | [
"Apache-2.0"
] | null | null | null | 006_multiples.py | mkduer/code-nibbles | 3482b5159bc0fdc18079bf2de27a47a77ae4753a | [
"Apache-2.0"
] | null | null | null | 006_multiples.py | mkduer/code-nibbles | 3482b5159bc0fdc18079bf2de27a47a77ae4753a | [
"Apache-2.0"
] | null | null | null | from helpers import Helpers
import numpy as np
def multiples(numbers: [int]) -> [int]:
"""
Multiplies all of the values in the list excepting the value at the current index
e.g. original numbers = [4, 1, 6] returns the multiples = [6, 24, 4] where the
first value is the product of 1 * 6 and does not include 4.
:param numbers: the original list of numbers
:return: the list of multiplied values
"""
nonzero_indices = np.flatnonzero(numbers)
total_numbers = len(numbers)
total_non_zeros = nonzero_indices.size
# more than one zero
if total_numbers - total_non_zeros > 1:
return np.zeros(total_numbers, dtype=np.int16)
# if there are no zeros, divide each index from the total product
if total_numbers == total_non_zeros:
total_product = np.prod(numbers)
return (total_product / numbers).astype(int)
# one zero
total_product = np.zeros(total_numbers, dtype=np.int16)
np_numbers = np.asarray(numbers)
zero_index = np.where(np_numbers == 0)[0][0]
np_numbers[zero_index] = 1
multiplied_total = np.prod(np_numbers)
total_product[zero_index] = multiplied_total
return total_product
def main():
helper = Helpers()
randlist = helper.create_random_numbers_with_choice(6, 9)
print(f'original list:')
helper.multiline_print(randlist)
multiplied_values = multiples(randlist)
print(f'\nmultiplied values: {multiplied_values}')
if __name__ == '__main__':
main()
| 31.208333 | 85 | 0.695594 | 212 | 1,498 | 4.716981 | 0.367925 | 0.072 | 0.045 | 0.06 | 0.116 | 0.116 | 0.062 | 0 | 0 | 0 | 0 | 0.017812 | 0.212951 | 1,498 | 47 | 86 | 31.87234 | 0.830365 | 0.26502 | 0 | 0 | 0 | 0 | 0.058107 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.074074 | 0 | 0.259259 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |