blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9f26cdfe8c9414fad6874292ee6a42386645ef4f | 75716fadd5592ac172188d3c9f04fff05ca9c453 | /csky/csky-elfabiv2/lib/big/ck807/libstdc++.a-gdb.py | fdc2c3c14d69a7ca6b35294ccacbf16109ad9857 | [] | no_license | yz-wulala/iServer01Bkup | 3f98983c98349f7d49d6408853d992dc82eaad1c | 1be06709fab95ca37d7de5911169a8aee2e05d34 | refs/heads/master | 2022-11-20T12:23:20.578411 | 2020-07-26T17:30:04 | 2020-07-26T17:30:04 | 270,225,610 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,540 | py | # -*- python -*-
# Copyright (C) 2009-2016 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/ldhome/software/toolsbuild/slave/workspace/tools3_build_64_elf_v2/install/share/gcc-6.3.0/python'
libdir = '/ldhome/software/toolsbuild/slave/workspace/tools3_build_64_elf_v2/install/csky-elfabiv2/lib/big/ck807'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
| [
"yzjiang18@fudan.edu.cn"
] | yzjiang18@fudan.edu.cn |
d3a1c9a2af6d1dffcac8c7788fcb38022aba262c | 27b2a74627b185ea754f1145db0003cb2ab3344d | /06/solution.py | b6d3325272a2065724452a59a5367f467330c543 | [
"MIT"
] | permissive | lbenedix/aoc2020 | b39de1f61deb834b78beaa118c82953fd9120013 | fcb001bb6648c6db78cff0b3ef4e56baeef4decd | refs/heads/main | 2023-01-29T10:24:57.920779 | 2020-12-07T07:45:58 | 2020-12-07T07:45:58 | 317,469,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | from pathlib import Path
lines = Path('input').open().readlines()
part_1 = 0
group = []
all_groups = []
for line in lines:
line = line.strip()
if len(line) > 0:
group.append(line)
else:
all_groups.append(group)
distinct_letters = set(''.join(group))
part_1 += len(distinct_letters)
group = []
print('part1', part_1)
total = 0
for group in all_groups:
counts = dict()
for person in group:
for a in person:
if a not in counts:
counts[a] = 0
counts[a] += 1
for answer, count in counts.items():
if count == len(group):
total += 1
print('part2', total)
| [
"lbenedix@googlemail.com"
] | lbenedix@googlemail.com |
7c522e09e37bfa9cd52933f4b3a202340868c5d4 | 8c95e2185100db97f74d948407f9f6ac563905e5 | /metronotation/routemap.py | 8a6691a352602ddc2fcb031cd4e836d9009a1748 | [
"MIT"
] | permissive | kitao/metro-notation | c5fec21fccba4ef2a21c3294575fd29498ff8ebc | 34a9d2ca9fe17452c8eb5426636484f7cc29c605 | refs/heads/main | 2023-08-20T15:02:04.631092 | 2021-10-30T04:28:17 | 2021-10-30T04:28:17 | 321,700,124 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,037 | py | LAYER_TP = 0
LAYER_MD = 1
LAYER_BT = 2
LAYER_TM = 3
LAYER_BM = 4
LAYER_AL = 5
DIR_UP = (0, -1)
DIR_DN = (0, 1)
DIR_LT = (-1, 0)
DIR_RT = (1, 0)
DIR_LU = (-1, -1)
DIR_RD = (1, 1)
LETTER_TABLE = [
("R", (LAYER_TP, DIR_UP, 1)),
("M", (LAYER_MD, DIR_DN, 1)),
("L", (LAYER_BT, DIR_DN, 1)),
("U", (LAYER_TP, DIR_RT, 1)),
("E", (LAYER_MD, DIR_LT, 1)),
("D", (LAYER_BT, DIR_LT, 1)),
("F", (LAYER_TP, DIR_RD, 1)),
("S", (LAYER_MD, DIR_RD, 1)),
("B", (LAYER_BT, DIR_LU, 1)),
#
("R2", (LAYER_TP, DIR_UP, 2)),
("M2", (LAYER_MD, DIR_DN, 2)),
("L2", (LAYER_BT, DIR_DN, 2)),
("U2", (LAYER_TP, DIR_RT, 2)),
("E2", (LAYER_MD, DIR_LT, 2)),
("D2", (LAYER_BT, DIR_LT, 2)),
("F2", (LAYER_TP, DIR_RD, 2)),
("S2", (LAYER_MD, DIR_RD, 2)),
("B2", (LAYER_BT, DIR_LU, 2)),
#
("R'", (LAYER_TP, DIR_DN, 1)),
("M'", (LAYER_MD, DIR_UP, 1)),
("L'", (LAYER_BT, DIR_UP, 1)),
("U'", (LAYER_TP, DIR_LT, 1)),
("E'", (LAYER_MD, DIR_RT, 1)),
("D'", (LAYER_BT, DIR_RT, 1)),
("F'", (LAYER_TP, DIR_LU, 1)),
("S'", (LAYER_MD, DIR_LU, 1)),
("B'", (LAYER_BT, DIR_RD, 1)),
#
("R2'", (LAYER_TP, DIR_DN, 2)),
("M2'", (LAYER_MD, DIR_UP, 2)),
("L2'", (LAYER_BT, DIR_UP, 2)),
("U2'", (LAYER_TP, DIR_LT, 2)),
("E2'", (LAYER_MD, DIR_RT, 2)),
("D2'", (LAYER_BT, DIR_RT, 2)),
("F2'", (LAYER_TP, DIR_LU, 2)),
("S2'", (LAYER_MD, DIR_LU, 2)),
("B2'", (LAYER_BT, DIR_RD, 2)),
#
("Rw", (LAYER_TM, DIR_UP, 1)),
("Lw", (LAYER_BM, DIR_DN, 1)),
("Uw", (LAYER_TM, DIR_RT, 1)),
("Dw", (LAYER_BM, DIR_LT, 1)),
("Fw", (LAYER_TM, DIR_RD, 1)),
("Bw", (LAYER_BM, DIR_LU, 1)),
#
("Rw2", (LAYER_TM, DIR_UP, 2)),
("Lw2", (LAYER_BM, DIR_DN, 2)),
("Uw2", (LAYER_TM, DIR_RT, 2)),
("Dw2", (LAYER_BM, DIR_LT, 2)),
("Fw2", (LAYER_TM, DIR_RD, 2)),
("Bw2", (LAYER_BM, DIR_LU, 2)),
#
("Rw'", (LAYER_TM, DIR_DN, 1)),
("Lw'", (LAYER_BM, DIR_UP, 1)),
("Uw'", (LAYER_TM, DIR_LT, 1)),
("Dw'", (LAYER_BM, DIR_RT, 1)),
("Fw'", (LAYER_TM, DIR_LU, 1)),
("Bw'", (LAYER_BM, DIR_RD, 1)),
#
("Rw2'", (LAYER_TM, DIR_DN, 2)),
("Lw2'", (LAYER_BM, DIR_UP, 2)),
("Uw2'", (LAYER_TM, DIR_LT, 2)),
("Dw2'", (LAYER_BM, DIR_RT, 2)),
("Fw2'", (LAYER_TM, DIR_LU, 2)),
("Bw2'", (LAYER_BM, DIR_RD, 2)),
#
("x", (LAYER_AL, DIR_UP, 1)),
("x'", (LAYER_AL, DIR_DN, 1)),
("y", (LAYER_AL, DIR_RT, 1)),
("y'", (LAYER_AL, DIR_LT, 1)),
("z", (LAYER_AL, DIR_RD, 1)),
("z'", (LAYER_AL, DIR_LU, 1)),
]
LETTER_TABLE.sort(key=lambda x: len(x[0]), reverse=True)
CUBE_RF = 0
CUBE_OF = 1
CUBE_BF = 2
CUBE_GF = 3
CUBE_WF = 4
CUBE_YF = 5
CUBE_RB = 6
CUBE_OB = 7
CUBE_BB = 8
CUBE_GB = 9
CUBE_WB = 10
CUBE_YB = 11
CUBE_TABLE = {
"R": CUBE_RF,
"O": CUBE_OF,
"B": CUBE_BF,
"G": CUBE_GF,
"W": CUBE_WF,
"Y": CUBE_YF,
"r": CUBE_RB,
"o": CUBE_OB,
"b": CUBE_BB,
"g": CUBE_GB,
"w": CUBE_WB,
"y": CUBE_YB,
}
class Node:
def __init__(self, letters, layer, direction, distance):
self.letters = letters
self.layer = layer
self.direction = direction
self.distance = distance
self.is_start_hit = False
self.is_end_hit = False
def from_letters(letters):
for l, n in LETTER_TABLE:
if letters.startswith(l):
return Node(l, *n), letters[len(l) :]
raise ValueError
class Route:
def __init__(self, nodes):
x = y = 0
min_x = min_y = 0
max_x = max_y = 0
route_count = {(0, 0): 1}
last_direction = (0, 0)
last_layer = -1
for node in nodes:
if (
node.direction == last_direction
and node.layer == last_layer
or node.direction[0] + last_direction[0] == 0
and node.direction[1] + last_direction[1] == 0
):
raise ValueError
last_direction = node.direction
last_layer = node.layer
for i in range(node.distance):
x += node.direction[0]
y += node.direction[1]
min_x = min(x, min_x)
min_y = min(y, min_y)
max_x = max(x, max_x)
max_y = max(y, max_y)
if (x, y) in route_count:
route_count[(x, y)] += 1
else:
route_count[(x, y)] = 1
for pos, count in route_count.items():
if count >= 3 or count >= 2 and pos != (0, 0) and pos != (x, y):
raise ValueError
self.nodes = nodes
self.width = max_x - min_x
self.height = max_y - min_y
self.start_x = -min_x
self.start_y = -min_y
nodes[0].is_start_hit = route_count[(0, 0)] > 1
nodes[-1].is_end_hit = route_count[(x, y)] > 1
def from_letters(letters):
try:
nodes = []
rest = letters
while rest:
node, rest = Node.from_letters(rest)
nodes.append(node)
route = Route(nodes)
except ValueError:
raise ValueError(letters)
return route
class RouteMap:
def __init__(self, name, cube, routes):
self.name = name
self.cube = cube
self.routes = routes
self.width = sum([route.width for route in routes])
self.height = max([route.height for route in routes])
for route in routes:
route.start_y += (self.height - route.height) / 2
def from_letters(name, cube, letters):
if not cube:
cube = "w" * 21
elif len(cube) != 21:
raise ValueError(cube)
try:
cube = [CUBE_TABLE[c] for c in cube]
except KeyError:
raise ValueError(cube)
name = name or "no name"
routes = [Route.from_letters(l) for l in letters.split()]
return RouteMap(name, cube, routes)
| [
"takashi.kitao@gmail.com"
] | takashi.kitao@gmail.com |
49ec6ca3b4c19afae7c996dfc69df388f79a6d93 | d813d2fd748f534f75c55f0a174bb72bf872e47d | /tests/test_chrome_gdoc_pageend.sikuli/test_chrome_gdoc_pageend.py | 1d6fccab2f0400abfbdc8b5dec4498136e118c68 | [] | no_license | MikeLien/Hasal | 7889bf50f8b80281607f20723ca9baa75e67f26f | 8049a67ee514bc49809326c7d1d2e6da1f9dac52 | refs/heads/master | 2020-12-25T22:58:25.064393 | 2016-04-21T06:35:40 | 2016-04-21T06:35:40 | 55,055,080 | 0 | 0 | null | 2016-03-30T10:43:33 | 2016-03-30T10:43:33 | null | UTF-8 | Python | false | false | 338 | py | sys.path.append(sys.argv[2])
import browser
import common
import gdoc
com = common.General()
chrome = browser.Chrome()
gd = gdoc.gDoc()
chrome.clickBar()
chrome.enterLink("https://docs.google.com/document/d/1EpYUniwtLvBbZ4ECgT_vwGUfTHKnqSWi7vgNJQBemFk/edit?hl=en")
chrome.focus()
gd.wait_for_loaded()
wait(3)
type(Key.END, Key.CTRL)
| [
"wachen@mozilla.com"
] | wachen@mozilla.com |
04daea66a40d45b05d5e069646a555609e285927 | caaa87102b710e1973f4a434d1ecf2c80a972395 | /newsletter/pandas/pandas/tests/indexing/test_coercion.py | 23600e1f4241c143f5b266935a18a452b0513844 | [
"BSD-3-Clause",
"LicenseRef-scancode-other-permissive",
"BSD-2-Clause"
] | permissive | SKNIRBHAY/TechWise-1 | e0df8868af17133a32da8e2341fa30aa0349af10 | 027895cb91aada1d8eecc1ddbb79e41fa66cdc75 | refs/heads/master | 2020-05-24T21:22:31.458963 | 2017-03-14T02:11:05 | 2017-03-14T02:11:05 | 84,881,955 | 1 | 0 | null | 2017-03-13T22:48:51 | 2017-03-13T22:48:51 | null | UTF-8 | Python | false | false | 20,644 | py | # -*- coding: utf-8 -*-
import nose
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class TestIndexCoercion(tm.TestCase):
_multiprocess_can_split_ = True
def test_setitem_index_numeric_coercion_int(self):
# tests setitem with non-existing numeric key
s = pd.Series([1, 2, 3, 4])
self.assertEqual(s.index.dtype, np.int64)
# int + int -> int
temp = s.copy()
temp[5] = 5
tm.assert_series_equal(temp, pd.Series([1, 2, 3, 4, 5],
index=[0, 1, 2, 3, 5]))
self.assertEqual(temp.index.dtype, np.int64)
# int + float -> float
temp = s.copy()
temp[1.1] = 5
tm.assert_series_equal(temp, pd.Series([1, 2, 3, 4, 5],
index=[0, 1, 2, 3, 1.1]))
self.assertEqual(temp.index.dtype, np.float64)
def test_setitem_index_numeric_coercion_float(self):
# tests setitem with non-existing numeric key
s = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(s.index.dtype, np.float64)
# float + int -> int
temp = s.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
temp = s.copy()
temp[5.1] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=[1.1, 2.1, 3.1, 4.1, 5.1])
tm.assert_series_equal(temp, exp)
self.assertEqual(temp.index.dtype, np.float64)
def test_insert_numeric_coercion_int(self):
idx = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(idx.dtype, np.int64)
# int + int -> int
res = idx.insert(1, 1)
tm.assert_index_equal(res, pd.Index([1, 1, 2, 3, 4]))
self.assertEqual(res.dtype, np.int64)
# int + float -> float
res = idx.insert(1, 1.1)
tm.assert_index_equal(res, pd.Index([1, 1.1, 2, 3, 4]))
self.assertEqual(res.dtype, np.float64)
# int + bool -> int
res = idx.insert(1, False)
tm.assert_index_equal(res, pd.Index([1, 0, 2, 3, 4]))
self.assertEqual(res.dtype, np.int64)
def test_insert_numeric_coercion_float(self):
idx = pd.Float64Index([1, 2, 3, 4])
self.assertEqual(idx.dtype, np.float64)
# float + int -> int
res = idx.insert(1, 1)
tm.assert_index_equal(res, pd.Index([1., 1., 2., 3., 4.]))
self.assertEqual(res.dtype, np.float64)
# float + float -> float
res = idx.insert(1, 1.1)
tm.assert_index_equal(res, pd.Index([1., 1.1, 2., 3., 4.]))
self.assertEqual(res.dtype, np.float64)
# float + bool -> float
res = idx.insert(1, False)
tm.assert_index_equal(res, pd.Index([1., 0., 2., 3., 4.]))
self.assertEqual(res.dtype, np.float64)
class TestSeriesCoercion(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.rep = {}
self.rep['object'] = ['a', 'b']
self.rep['int64'] = [4, 5]
self.rep['float64'] = [1.1, 2.2]
self.rep['complex128'] = [1 + 1j, 2 + 2j]
self.rep['bool'] = [True, False]
def test_setitem_numeric_coercion_int(self):
s = pd.Series([1, 2, 3, 4])
self.assertEqual(s.dtype, np.int64)
# int + int -> int
temp = s.copy()
temp[1] = 1
tm.assert_series_equal(temp, pd.Series([1, 1, 3, 4]))
self.assertEqual(temp.dtype, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
temp = s.copy()
temp[1] = 1.1
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
tm.assert_series_equal(temp, pd.Series([1, 1, 3, 4]))
self.assertEqual(temp.dtype, np.int64)
# int + complex -> complex
temp = s.copy()
temp[1] = 1 + 1j
tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 3, 4]))
self.assertEqual(temp.dtype, np.complex128)
# int + bool -> int
temp = s.copy()
temp[1] = True
tm.assert_series_equal(temp, pd.Series([1, 1, 3, 4]))
self.assertEqual(temp.dtype, np.int64)
def test_setitem_numeric_coercion_float(self):
s = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(s.dtype, np.float64)
# float + int -> float
temp = s.copy()
temp[1] = 1
tm.assert_series_equal(temp, pd.Series([1.1, 1.0, 3.3, 4.4]))
self.assertEqual(temp.dtype, np.float64)
# float + float -> float
temp = s.copy()
temp[1] = 1.1
tm.assert_series_equal(temp, pd.Series([1.1, 1.1, 3.3, 4.4]))
self.assertEqual(temp.dtype, np.float64)
# float + complex -> complex
temp = s.copy()
temp[1] = 1 + 1j
tm.assert_series_equal(temp, pd.Series([1.1, 1 + 1j, 3.3, 4.4]))
self.assertEqual(temp.dtype, np.complex128)
# float + bool -> float
temp = s.copy()
temp[1] = True
tm.assert_series_equal(temp, pd.Series([1.1, 1.0, 3.3, 4.4]))
self.assertEqual(temp.dtype, np.float64)
def test_setitem_numeric_coercion_complex(self):
s = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(s.dtype, np.complex128)
# complex + int -> complex
temp = s.copy()
temp[1] = 1
tm.assert_series_equal(temp, pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j]))
self.assertEqual(temp.dtype, np.complex128)
# complex + float -> complex
temp = s.copy()
temp[1] = 1.1
tm.assert_series_equal(temp, pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j]))
self.assertEqual(temp.dtype, np.complex128)
# complex + complex -> complex
temp = s.copy()
temp[1] = 1 + 1j
tm.assert_series_equal(temp,
pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j]))
self.assertEqual(temp.dtype, np.complex128)
# complex + bool -> complex
temp = s.copy()
temp[1] = True
tm.assert_series_equal(temp, pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j]))
self.assertEqual(temp.dtype, np.complex128)
def test_setitem_numeric_coercion_bool(self):
s = pd.Series([True, False, True, False])
self.assertEqual(s.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
temp = s.copy()
temp[1] = 1
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
tm.assert_series_equal(temp, pd.Series([True, True, True, False]))
self.assertEqual(temp.dtype, np.bool)
# TODO_GH12747 The result must be int
temp = s.copy()
temp[1] = 3 # greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
tm.assert_series_equal(temp, pd.Series([True, True, True, False]))
self.assertEqual(temp.dtype, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
temp = s.copy()
temp[1] = 1.1
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
tm.assert_series_equal(temp, pd.Series([True, True, True, False]))
self.assertEqual(temp.dtype, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
temp = s.copy()
temp[1] = 1 + 1j
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
tm.assert_series_equal(temp, pd.Series([True, True, True, False]))
self.assertEqual(temp.dtype, np.bool)
# bool + bool -> int
temp = s.copy()
temp[1] = True
tm.assert_series_equal(temp, pd.Series([True, True, True, False]))
self.assertEqual(temp.dtype, np.bool)
def test_where_numeric_coercion_int(self):
s = pd.Series([1, 2, 3, 4])
self.assertEqual(s.dtype, np.int64)
cond = pd.Series([True, False, True, False])
# int + int -> int
res = s.where(cond, 1)
tm.assert_series_equal(res, pd.Series([1, 1, 3, 1]))
self.assertEqual(res.dtype, np.int64)
res = s.where(cond, pd.Series([5, 6, 7, 8]))
tm.assert_series_equal(res, pd.Series([1, 6, 3, 8]))
self.assertEqual(res.dtype, np.int64)
# int + float -> float
res = s.where(cond, 1.1)
tm.assert_series_equal(res, pd.Series([1, 1.1, 3, 1.1]))
self.assertEqual(res.dtype, np.float64)
res = s.where(cond, pd.Series([5.5, 6.6, 7.7, 8.8]))
tm.assert_series_equal(res, pd.Series([1, 6.6, 3, 8.8]))
self.assertEqual(res.dtype, np.float64)
# int + complex -> complex
res = s.where(cond, 1 + 1j)
tm.assert_series_equal(res, pd.Series([1, 1 + 1j, 3, 1 + 1j]))
self.assertEqual(res.dtype, np.complex128)
res = s.where(cond, pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j]))
tm.assert_series_equal(res, pd.Series([1, 6 + 6j, 3, 8 + 8j]))
self.assertEqual(res.dtype, np.complex128)
# int + bool -> int
res = s.where(cond, True)
tm.assert_series_equal(res, pd.Series([1, 1, 3, 1]))
self.assertEqual(res.dtype, np.int64)
res = s.where(cond, pd.Series([True, False, True, True]))
tm.assert_series_equal(res, pd.Series([1, 0, 3, 1]))
self.assertEqual(res.dtype, np.int64)
def test_where_numeric_coercion_float(self):
s = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(s.dtype, np.float64)
cond = pd.Series([True, False, True, False])
# float + int -> float
res = s.where(cond, 1)
tm.assert_series_equal(res, pd.Series([1.1, 1.0, 3.3, 1.0]))
self.assertEqual(res.dtype, np.float64)
res = s.where(cond, pd.Series([5, 6, 7, 8]))
tm.assert_series_equal(res, pd.Series([1.1, 6.0, 3.3, 8.0]))
self.assertEqual(res.dtype, np.float64)
# float + float -> float
res = s.where(cond, 1.1)
tm.assert_series_equal(res, pd.Series([1.1, 1.1, 3.3, 1.1]))
self.assertEqual(res.dtype, np.float64)
res = s.where(cond, pd.Series([5.5, 6.6, 7.7, 8.8]))
tm.assert_series_equal(res, pd.Series([1.1, 6.6, 3.3, 8.8]))
self.assertEqual(res.dtype, np.float64)
# float + complex -> complex
res = s.where(cond, 1 + 1j)
tm.assert_series_equal(res, pd.Series([1.1, 1 + 1j, 3.3, 1 + 1j]))
self.assertEqual(res.dtype, np.complex128)
res = s.where(cond, pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j]))
tm.assert_series_equal(res, pd.Series([1.1, 6 + 6j, 3.3, 8 + 8j]))
self.assertEqual(res.dtype, np.complex128)
# float + bool -> float
res = s.where(cond, True)
tm.assert_series_equal(res, pd.Series([1.1, 1.0, 3.3, 1.0]))
self.assertEqual(res.dtype, np.float64)
res = s.where(cond, pd.Series([True, False, True, True]))
tm.assert_series_equal(res, pd.Series([1.1, 0.0, 3.3, 1.0]))
self.assertEqual(res.dtype, np.float64)
def test_where_numeric_coercion_complex(self):
s = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(s.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> float
res = s.where(cond, 1)
tm.assert_series_equal(res, pd.Series([1 + 1j, 1, 3 + 3j, 1]))
self.assertEqual(res.dtype, np.complex128)
res = s.where(cond, pd.Series([5, 6, 7, 8]))
tm.assert_series_equal(res, pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0]))
self.assertEqual(res.dtype, np.complex128)
# complex + float -> float
res = s.where(cond, 1.1)
tm.assert_series_equal(res, pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1]))
self.assertEqual(res.dtype, np.complex128)
res = s.where(cond, pd.Series([5.5, 6.6, 7.7, 8.8]))
tm.assert_series_equal(res, pd.Series([1 + 1j, 6.6, 3 + 3j, 8.8]))
self.assertEqual(res.dtype, np.complex128)
# complex + complex -> complex
res = s.where(cond, 1 + 1j)
tm.assert_series_equal(res,
pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 1 + 1j]))
self.assertEqual(res.dtype, np.complex128)
res = s.where(cond, pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j]))
tm.assert_series_equal(res,
pd.Series([1 + 1j, 6 + 6j, 3 + 3j, 8 + 8j]))
self.assertEqual(res.dtype, np.complex128)
# complex + bool -> complex
res = s.where(cond, True)
tm.assert_series_equal(res, pd.Series([1 + 1j, 1, 3 + 3j, 1]))
self.assertEqual(res.dtype, np.complex128)
res = s.where(cond, pd.Series([True, False, True, True]))
tm.assert_series_equal(res, pd.Series([1 + 1j, 0, 3 + 3j, 1]))
self.assertEqual(res.dtype, np.complex128)
def test_where_numeric_coercion_bool(self):
s = pd.Series([True, False, True, False])
self.assertEqual(s.dtype, np.bool)
cond = pd.Series([True, False, True, False])
# bool + int -> int
res = s.where(cond, 1)
tm.assert_series_equal(res, pd.Series([1, 1, 1, 1]))
self.assertEqual(res.dtype, np.int64)
res = s.where(cond, pd.Series([5, 6, 7, 8]))
tm.assert_series_equal(res, pd.Series([1, 6, 1, 8]))
self.assertEqual(res.dtype, np.int64)
# bool + float -> float
res = s.where(cond, 1.1)
tm.assert_series_equal(res, pd.Series([1.0, 1.1, 1.0, 1.1]))
self.assertEqual(res.dtype, np.float64)
res = s.where(cond, pd.Series([5.5, 6.6, 7.7, 8.8]))
tm.assert_series_equal(res, pd.Series([1.0, 6.6, 1.0, 8.8]))
self.assertEqual(res.dtype, np.float64)
# bool + complex -> complex
res = s.where(cond, 1 + 1j)
tm.assert_series_equal(res, pd.Series([1, 1 + 1j, 1, 1 + 1j]))
self.assertEqual(res.dtype, np.complex128)
res = s.where(cond, pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j]))
tm.assert_series_equal(res, pd.Series([1, 6 + 6j, 1, 8 + 8j]))
self.assertEqual(res.dtype, np.complex128)
# bool + bool -> bool
res = s.where(cond, True)
tm.assert_series_equal(res, pd.Series([True, True, True, True]))
self.assertEqual(res.dtype, np.bool)
res = s.where(cond, pd.Series([True, False, True, True]))
tm.assert_series_equal(res, pd.Series([True, False, True, True]))
self.assertEqual(res.dtype, np.bool)
# not indexing, but place here for consisntency
def test_fillna_numeric_coercion_int(self):
# int can't hold NaN
pass
def test_fillna_numeric_coercion_float(self):
s = pd.Series([1.1, np.nan, 3.3, 4.4])
self.assertEqual(s.dtype, np.float64)
# float + int -> float
res = s.fillna(1)
tm.assert_series_equal(res, pd.Series([1.1, 1.0, 3.3, 4.4]))
self.assertEqual(res.dtype, np.float64)
# float + float -> float
res = s.fillna(1.1)
tm.assert_series_equal(res, pd.Series([1.1, 1.1, 3.3, 4.4]))
self.assertEqual(res.dtype, np.float64)
# float + complex -> complex
res = s.fillna(1 + 1j)
tm.assert_series_equal(res, pd.Series([1.1, 1 + 1j, 3.3, 4.4]))
self.assertEqual(res.dtype, np.complex128)
# float + bool -> float
res = s.fillna(True)
tm.assert_series_equal(res, pd.Series([1.1, 1.0, 3.3, 4.4]))
self.assertEqual(res.dtype, np.float64)
def test_fillna_numeric_coercion_complex(self):
s = pd.Series([1 + 1j, np.nan, 3 + 3j, 4 + 4j])
self.assertEqual(s.dtype, np.complex128)
# complex + int -> complex
res = s.fillna(1)
tm.assert_series_equal(res, pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j]))
self.assertEqual(res.dtype, np.complex128)
# complex + float -> complex
res = s.fillna(1.1)
tm.assert_series_equal(res, pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j]))
self.assertEqual(res.dtype, np.complex128)
# complex + complex -> complex
res = s.fillna(1 + 1j)
tm.assert_series_equal(res,
pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j]))
self.assertEqual(res.dtype, np.complex128)
# complex + bool -> complex
res = s.fillna(True)
tm.assert_series_equal(res, pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j]))
self.assertEqual(res.dtype, np.complex128)
def test_fillna_numeric_coercion_bool(self):
# bool can't hold NaN
pass
def _assert_replace_conversion(self, from_key, to_key, how):
index = pd.Index([3, 4], name='xxx')
s = pd.Series(self.rep[from_key], index=index, name='yyy')
self.assertEqual(s.dtype, from_key)
if how == 'dict':
replacer = dict(zip(self.rep[from_key], self.rep[to_key]))
elif how == 'series':
replacer = pd.Series(self.rep[to_key], index=self.rep[from_key])
else:
raise ValueError
result = s.replace(replacer)
if ((from_key == 'float64' and
to_key in ('bool', 'int64')) or
(from_key == 'complex128' and
to_key in ('bool', 'int64', 'float64')) or
(from_key == 'int64' and
to_key in ('bool')) or
# TODO_GH12747 The result must be int?
(from_key == 'bool' and to_key in ('int64'))):
# buggy on 32-bit
if tm.is_platform_32bit():
raise nose.SkipTest("32-bit platform buggy: {0} -> {1}".format
(from_key, to_key))
# Expected: do not downcast by replacement
exp = pd.Series(self.rep[to_key], index=index,
name='yyy', dtype=from_key)
else:
exp = pd.Series(self.rep[to_key], index=index, name='yyy')
self.assertEqual(exp.dtype, to_key)
tm.assert_series_equal(result, exp)
def test_replace_conversion_dict_from_object(self):
from_key = 'object'
for to_key in self.rep:
self._assert_replace_conversion(from_key, to_key, how='dict')
def test_replace_conversion_dict_from_int(self):
from_key = 'int64'
for to_key in self.rep:
self._assert_replace_conversion(from_key, to_key, how='dict')
def test_replace_conversion_dict_from_float(self):
from_key = 'float64'
for to_key in self.rep:
self._assert_replace_conversion(from_key, to_key, how='dict')
def test_replace_conversion_dict_from_complex(self):
from_key = 'complex128'
for to_key in self.rep:
self._assert_replace_conversion(from_key, to_key, how='dict')
def test_replace_conversion_dict_from_bool(self):
from_key = 'bool'
for to_key in self.rep:
self._assert_replace_conversion(from_key, to_key, how='dict')
# Series
def test_replace_conversion_series_from_object(self):
from_key = 'object'
for to_key in self.rep:
self._assert_replace_conversion(from_key, to_key, how='series')
def test_replace_conversion_series_from_int(self):
from_key = 'int64'
for to_key in self.rep:
self._assert_replace_conversion(from_key, to_key, how='series')
def test_replace_conversion_series_from_float(self):
from_key = 'float64'
for to_key in self.rep:
self._assert_replace_conversion(from_key, to_key, how='series')
def test_replace_conversion_series_from_complex(self):
from_key = 'complex128'
for to_key in self.rep:
self._assert_replace_conversion(from_key, to_key, how='series')
def test_replace_conversion_series_from_bool(self):
from_key = 'bool'
for to_key in self.rep:
if compat.PY3:
# doesn't work in PY3, though ...dict_from_bool works fine
raise nose.SkipTest("doesn't work as in PY3")
self._assert_replace_conversion(from_key, to_key, how='series')
| [
"ubuntu@ubuntu.ubuntu-domain"
] | ubuntu@ubuntu.ubuntu-domain |
12dfb84448a69a860cb2cc5e01f180a55448328f | 7b38b2cd39340474a042789c03cadf90da279dc8 | /webapp/python/venv/bin/snakeviz | 78098a6757b8d9c9720f6ec13b90784a16d7ed5c | [
"MIT"
] | permissive | koucs/isucon8q | 7dba9d07d962e6db3ba48244fafda0bced23dc5d | f273a7c072e70c1bbb41d491ee6ff6de9a812a04 | refs/heads/master | 2022-12-04T08:52:24.235762 | 2020-08-29T18:08:29 | 2020-08-29T18:08:29 | 289,645,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | #!/home/isucon/torb/webapp/python/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from snakeviz.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"kou.cshu@gmail.com"
] | kou.cshu@gmail.com | |
b043ed9fa8b36f548c4387b356e9a3489a921a96 | 0a27ebb2d590d08685f0b63a4fd10023ce03dfee | /selection/migrations/0003_alter_floor_floor_levels.py | 124ba53bb7ca799a0be461f974a53deca3911666 | [] | no_license | RebecaDumitrascuta/office-booking-app | 66eedd4d14ea5820451e36d7e6349a986a6934fb | 6214a47f174ab6ae1c173744a51aea02c84caca0 | refs/heads/master | 2023-09-05T01:13:46.763058 | 2021-11-25T15:53:07 | 2021-11-25T15:53:07 | 406,746,316 | 0 | 1 | null | 2021-11-25T15:53:08 | 2021-09-15T12:04:10 | JavaScript | UTF-8 | Python | false | false | 451 | py | # Generated by Django 3.2.7 on 2021-10-25 20:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('selection', '0002_office_zone'),
]
operations = [
migrations.AlterField(
model_name='floor',
name='floor_levels',
field=models.CharField(choices=[('F1', 'F1'), ('F2', 'F2'), ('F3', 'F3'), ('F4', 'F4')], max_length=2),
),
]
| [
"rebecadumitrascuta02@gmail.com"
] | rebecadumitrascuta02@gmail.com |
5224d8389d28f53149bb9a84556ad05b34511670 | 32711a21edff968fdbf9fa9baf0e0f8373d0e131 | /authapp/forms.py | fa7e7e88283332593f25f77dbbd8f2f33b5d24c6 | [] | no_license | acid-n/GeekShop | ca836a4daeb97754fafd44d36e705f0e160c8d4d | 9749debe92e6ded46ed01082fbdb497a5f8485fa | refs/heads/master | 2023-01-15T15:29:18.172547 | 2020-11-25T18:34:39 | 2020-11-25T18:34:39 | 296,569,582 | 0 | 0 | null | 2020-10-04T18:39:29 | 2020-09-18T09:02:03 | JavaScript | UTF-8 | Python | false | false | 2,620 | py | import hashlib
import random
from django import forms
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm, UserChangeForm
from authapp.models import ShopUser, ShopUserProfile
class ShopUserLoginForm(AuthenticationForm):
class Meta:
model = ShopUser
fields = ('username', 'password')
def __init__(self, *args, **kwargs):
super(ShopUserLoginForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = "form-control"
field.help_text = ''
class ShopUserRegisterForm(UserCreationForm):
class Meta:
model = ShopUser
fields = ('username', 'first_name', 'password1', 'password2', 'email', 'avatar', 'age')
def __init__(self, *args, **kwargs):
super(ShopUserRegisterForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = "form-control"
field.help_text = ''
def clean_age(self):
data = self.cleaned_data['age']
if data < 18:
raise forms.ValidationError("Вы слишком молоды")
return data
def save(self, **kwargs):
user = super(ShopUserRegisterForm, self).save()
user.is_active = False
salt = hashlib.sha1(str(random.random()).encode('utf8')).hexdigest()[:6]
user.activation_key = hashlib.sha1((user.email + salt).encode('utf8')).hexdigest()
user.save()
return user
class ShopUserEditForm(UserChangeForm):
class Meta:
model = ShopUser
fields = ('username', 'first_name', 'email', 'avatar', 'age')
def __init__(self, *args, **kwargs):
super(ShopUserEditForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = "form-control"
field.help_text = ''
if field_name == 'password':
field.widget = forms.HiddenInput()
def clean_age(self):
data = self.cleaned_data['age']
if data < 18:
raise forms.ValidationError("Вы слишком молоды")
return data
class ShopUserProfileEditForm(forms.ModelForm):
class Meta:
model = ShopUserProfile
fields = ('tagline', 'about_me', 'gender')
def __init__(self, *args, **kwargs):
super(ShopUserProfileEditForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = "form-control"
field.help_text = '' | [
"acid_n@mail.ru"
] | acid_n@mail.ru |
3fb870fae8110b6233d35ad50a8c39fce9fb7372 | 99c41b3d58247e457d2c14c8a7200c8a72cbe258 | /Quetion5.py | accc1b39cb0e5b654ed6bd3ec689e14c4863730b | [] | no_license | atharvspathak/CloudCounselageLp3 | 75d3f701256fca5d143672626c7d873c90fdb8af | 87ba91876e960273571409c0b42e20fb069f916f | refs/heads/master | 2022-10-23T02:14:15.006245 | 2020-06-12T08:30:52 | 2020-06-12T08:30:52 | 262,038,140 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | '''
Program:Remove duplicates charecters from string
Input:String
Output:String without duplicate charecters
'''
x=input()
li=[0]*122
y=''
for i in x:
ascii = ord(i)
if li[ascii]==0:
li[ascii]=1
y=y+i
print(y)
| [
"noreply@github.com"
] | noreply@github.com |
4f5f6cf6b975bc75e55183392098c5035bdaf30d | a742bd051641865d2e5b5d299c6bc14ddad47f22 | /algorithm/牛客网/55-链表中环的入口节点.py | cb9f7c566cc7b629c3e7d7a7aef88c03f3a1a921 | [] | no_license | lxconfig/UbuntuCode_bak | fb8f9fae7c42cf6d984bf8231604ccec309fb604 | 3508e1ce089131b19603c3206aab4cf43023bb19 | refs/heads/master | 2023-02-03T19:10:32.001740 | 2020-12-19T07:27:57 | 2020-12-19T07:27:57 | 321,351,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,604 | py |
"""
给一个链表,若其中包含环,请找出该链表的环的入口结点,否则,输出null。
思路:
双指针法
快指针先走两步,慢指针走一步
当两个指针又相遇了,此时指向的节点可能是环的入口节点
再次让慢指针回到链表头,然后和快指针一起走,再次相遇时,就是环的入口节点
否则,快指针不存在时,表示没有环
或:
先让快指针走n步,n=链表的长度
之后再让快指针和慢指针一起走,直到相遇,此时就是环的入口节点
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def EntryNodeOfLoop(self, pHead):
# 运行时间:22ms 占用内存:5864k
if not pHead:
return None
fast = slow = pHead
while fast and fast.next:
fast = fast.next.next
slow = slow.next
if fast == slow:
break
if not fast or not fast.next:
return None
slow = pHead
while fast != slow:
fast = fast.next
slow = slow.next
return fast.val
if __name__ == "__main__":
solution = Solution()
a = ListNode(1)
b = ListNode(2)
c = ListNode(3)
d = ListNode(4)
e = ListNode(5)
f = ListNode(6)
a.next= b
b.next = c
c.next = d
d.next = e
e.next = c
# f.next = d
print(solution.EntryNodeOfLoop(a)) | [
"525868229@qq.com"
] | 525868229@qq.com |
36947ff60d9483b71bca94ec94dc5aea17a9862c | 75d9be48e266ea4802edc5c61461ef96769517f9 | /src/code/mf_imp.py | ab0780fa05fd340b7bca85f8465fbd461e20996d | [] | no_license | PV-LN/Movie_rec_rwr | 74d413e4101ceaba1043e042371e5b5d5a302849 | 66d544adf51fb59d44d4652a585f96d8fdabb4b1 | refs/heads/master | 2020-08-17T20:03:01.993751 | 2019-10-17T06:31:26 | 2019-10-17T06:31:26 | 215,705,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,805 | py | """""""""""""""""""""""""""""""""""""""""""""""""""""""""
MFRWR
- A comparative study of matrix factorization and
random walk with restart in recommender system
Authors
- Haekyu Park (hkpark627@snu.ac.kr)
- Jinhong Jung (jinhongjung@snu.ac.kr)
- U Kang (ukang@snu.ac.kr)
from Data Mining Lab., Seoul National University
(https://datalab.snu.ac.kr)
File
- mf_imp.py
: matrix factorization when implicit ratings are given
This software is free of charge under research purposes.
For commercial purposes, please contact the authors.
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# Import packages
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
from entire_helper import *
from mf_helper import *
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# MF_imp
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def mf_imp(args, fold):
# 0. Basic settings
lr = args.lr
lamb = args.lamb
mtd = args.method
dim = args.dim
dataset = args.dataset
alpha = args.alpha
result_path = args.result_path
# 1. Read ratings
train, test = read_rating(args.input_path, fold)
# 2. Mapping entities
u2th, th2u, _, num_u = map_entity(train.u, dict(), dict(), 0, 0)
i2th, th2i, _, num_i = map_entity(train.i, dict(), dict(), 0, 0)
# 3. Initialize vectors of users and items
X = (np.random.random((num_u, dim)))
Y = (np.random.random((num_i, dim)))
# 4. Learn
prev_rmse = 10
num_iter = 0
max_iter = 30
while num_iter < max_iter:
num_iter += 1
# Update latent features
for u, i, r in zip(train.u, train.i, train.r):
user = u2th[int(u)]
item = i2th[int(i)]
hat_r_ui = np.matmul(X[user], Y[item])
c_ui = 1 + alpha * r
err = 1 - hat_r_ui
X[user] += lr * (c_ui * err * Y[item] - lamb * X[user])
Y[item] += lr * (c_ui * err * X[user] - lamb * Y[item])
# Check train rmse with train examples
rmse, mae = mf_imp_accuracy(train, X, Y, u2th, i2th, 1)
# Check convergence
if prev_rmse < rmse:
break
if np.absolute(np.subtract(prev_rmse, rmse)) < 0.001:
break
prev_rmse = rmse
# 5. Save results
paras = '%s_%s_%s_%s_%s' % (dim, lr, lamb, alpha, fold)
# Save vectors
np.savetxt(result_path + '%s_%s_X_%s.txt' % (mtd, dataset, paras), X, fmt='%.5f', delimiter='\t')
np.savetxt(result_path + '%s_%s_Y_%s.txt' % (mtd, dataset, paras), Y, fmt='%.5f', delimiter='\t')
# Save mapping of users
f = open(result_path + '%s_%s_user_%s.txt' % (mtd, dataset, paras), 'w')
for u_th in range(num_u):
u = th2u[u_th]
f.write('%d\n' % u)
f.close()
# Save mapping of items
f = open(result_path + '%s_%s_item_%s.txt' % (mtd, dataset, paras), 'w')
for i_th in range(num_i):
i = th2i[i_th]
f.write('%d\n' % i)
f.close()
return rmse, testRMSE, testMAE
| [
"noreply@github.com"
] | noreply@github.com |
0c602c4d5aba8185e74b266e1050df2cd0ec026c | 111082d7fd02a5f64cd1784b923a109cc95dc557 | /dj_rulitool/wsgi.py | 7ac1fe5349618d888f4dc721c11e7dfd58b406e3 | [] | no_license | 270466585/dj_rulitool | ba65a6ef1bc44b599f19ac1172d86e8d4b2a12af | 0d2e97454c66d30537780d81b2a0b4b2f953b2ed | refs/heads/master | 2020-04-14T20:25:36.592762 | 2019-01-04T10:28:18 | 2019-01-04T10:28:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | """
WSGI config for dj_rulitool project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dj_rulitool.settings")
application = get_wsgi_application()
| [
"27066585@qq.com"
] | 27066585@qq.com |
c0a76cd341cf357e4792a24b8c7dab2578e28b9e | ddc41e2a37ff82eb65404b9fd434a47a33ceb743 | /api.py | 8f3c3acf232085551caeb5c4d8b866ae002533d2 | [] | no_license | abdullah-algumar/ArabicMorphologicAnalysis | 30028ab808c3334a43a136b87e9e6796034ef296 | f695bed7045b4330d42713bf96bcd7d9418e748e | refs/heads/master | 2023-02-03T08:19:35.066362 | 2020-12-19T16:26:08 | 2020-12-19T16:26:08 | 322,889,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | from flask import Flask, make_response, jsonify
from nlp_naftawayh import nlp_naf
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
app.config['JSON_SORT_KEYS'] = False
@app.route('/')
@app.route('/morfolojik/<string:sentence>', methods=['GET', 'POST'])
def find_stem(sentence):
try:
nlp_result = nlp_naf(sentence)
result = {'message': 'SUCCESS', 'result': nlp_result}
return make_response(jsonify(result), 200)
except:
print('CALISMADI')
result = {'message': 'FAILED'}
return make_response(jsonify(result), 404)
if __name__ == '__main__':
app.run(debug=True)
| [
"54884686+abdullah-algumar@users.noreply.github.com"
] | 54884686+abdullah-algumar@users.noreply.github.com |
6b8caec8ea96cb715c6ece897689627a1d8ec789 | eb5c7ddab43fbb24b91c29a4560749a2386490fb | /tensorflow/contrib/learn/python/learn/estimators/head_test.py | 99bdcc7d8fe281c928257d70ff31d4db8ac7f386 | [
"Apache-2.0"
] | permissive | ivankreso/tensorflow | 8fb3f599564a6afe8c054bf05ea5a63aa8f4ec3d | 38d8238edb041b9fbf3b2762b09df450937a5b40 | refs/heads/master | 2021-01-21T16:27:58.859552 | 2017-01-31T10:33:22 | 2017-01-31T10:33:22 | 80,117,846 | 0 | 1 | null | 2017-01-26T13:42:27 | 2017-01-26T13:42:27 | null | UTF-8 | Python | false | false | 41,317 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
# pylint: disable=g-bad-todo,g-import-not-at-top
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
import six
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# pylint: enable=g-bad-todo,g-import-not-at-top
def _assert_variables(test_case,
expected_global=None,
expected_model=None,
expected_trainable=None):
test_case.assertItemsEqual(
tuple([] if expected_global is None else expected_global),
tuple([k.name for k in variables.global_variables()]))
test_case.assertItemsEqual(
tuple([] if expected_model is None else expected_model),
tuple([k.name for k in variables.model_variables()]))
test_case.assertItemsEqual(
tuple([] if expected_trainable is None else expected_trainable),
tuple([k.name for k in variables.trainable_variables()]))
def _assert_no_variables(test_case):
_assert_variables(test_case)
# This must be called from within a tf.Session.
def _assert_metrics(test_case, expected_loss, expected_eval_metrics,
model_fn_ops):
test_case.assertAlmostEqual(expected_loss, model_fn_ops.loss.eval(), places=4)
for k in six.iterkeys(expected_eval_metrics):
test_case.assertIn(k, six.iterkeys(model_fn_ops.eval_metric_ops))
variables.initialize_local_variables().run()
for key, expected_value in six.iteritems(expected_eval_metrics):
value_tensor, update_tensor = model_fn_ops.eval_metric_ops[key]
update = update_tensor.eval()
test_case.assertAlmostEqual(
expected_value,
update,
places=4,
msg="%s: update, expected %s, got %s." % (key, expected_value, update))
value = value_tensor.eval()
test_case.assertAlmostEqual(
expected_value,
value,
places=4,
msg="%s: value, expected %s, got %s." % (key, expected_value, value))
# This must be called from within a tf.Session.
def _assert_summary_tags(test_case, expected_tags=None):
actual_tags = []
for summary_op in ops.get_collection(ops.GraphKeys.SUMMARIES):
summ = summary_pb2.Summary()
summ.ParseFromString(summary_op.eval())
actual_tags.append(summ.value[0].tag)
test_case.assertItemsEqual(expected_tags or [], actual_tags)
def _sigmoid(x):
return 1. / (1. + math.exp(-1 * x))
class RegressionModelHeadTest(test.TestCase):
# TODO(zakaria): test multilabel regression.
def testRegressionWithLogits(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1.,), (1.,), (3.,)))
_assert_summary_tags(self, ["loss"])
_assert_no_variables(self)
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithInvalidLogits(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1., 1.), (1., 1.), (3., 1.)))
def testRegressionWithLogitsInput(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits_input=((0., 0.), (0., 0.), (0., 0.)))
w = ("logits/weights:0", "logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 2. / 3, {"loss": 2. / 3}, model_fn_ops)
def testRegressionWithLogitsAndLogitsInput(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits_input=((0., 0.), (0., 0.), (0., 0.)),
logits=((1.,), (1.,), (3.,)))
def testRegressionEvalMode(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((1.,), (1.,), (3.,)),
mode=model_fn.ModeKeys.EVAL,
train_op_fn=_noop_train_op,
logits=((0.,), (1.,), (1.,)))
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithLabelName(self):
label_name = "my_label"
head = head_lib._regression_head(label_name=label_name)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels={label_name: ((0.,), (1.,), (1.,))},
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1.,), (1.,), (3.,)))
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithWeights(self):
head = head_lib._regression_head(weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
weights = ((2.,), (5.,), (0.,))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weights},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1.,), (1.,), (3.,)))
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 2. / len(weights), {"loss": 2. / np.sum(weights)},
model_fn_ops)
def testRegressionWithCenteredBias(self):
head = head_lib._regression_head(enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1.,), (1.,), (3.,)))
_assert_variables(
self,
expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",),
expected_trainable=("centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss", "centered_bias/bias_0"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionErrorInSparseTensorLabels(self):
head = head_lib._regression_head()
with ops.Graph().as_default():
labels = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (2, 0)),
values=(0., 1., 1.),
dense_shape=(3, 1))
with self.assertRaisesRegexp(ValueError,
"SparseTensor is not supported as labels."):
head.create_model_fn_ops(
{},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1.,), (1.,), (3.,)))
class MultiLabelModelHeadTest(test.TestCase):
def setUp(self):
self._logits = ((1., 0., 0.),)
self._labels = ((0, 0, 1),)
def _expected_eval_metrics(self, expected_loss):
return {
"accuracy": 1. / 3,
"auc": 1. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": self._labels[0][0],
"labels/actual_label_mean/class1": self._labels[0][1],
"labels/actual_label_mean/class2": self._labels[0][2],
"labels/logits_mean/class0": self._logits[0][0],
"labels/logits_mean/class1": self._logits[0][1],
"labels/logits_mean/class2": self._logits[0][2],
"labels/prediction_mean/class0": self._logits[0][0],
"labels/prediction_mean/class1": self._logits[0][1],
"labels/prediction_mean/class2": self._logits[0][2],
"labels/probability_mean/class0": _sigmoid(self._logits[0][0]),
"labels/probability_mean/class1": _sigmoid(self._logits[0][1]),
"labels/probability_mean/class2": _sigmoid(self._logits[0][2]),
}
def testMultiLabelWithLogits(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelWithInvalidLogits(self):
head = head_lib._multi_label_head(n_classes=len(self._labels[0]) + 1)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits=self._logits)
def testMultiLabelWithLogitsInput(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits_input=((0., 0.),))
w = ("logits/weights:0", "logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = .69314718
_assert_metrics(self, expected_loss, {
"accuracy": 2. / 3,
"auc": 2. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": self._labels[0][0],
"labels/actual_label_mean/class1": self._labels[0][1],
"labels/actual_label_mean/class2": self._labels[0][2],
"labels/logits_mean/class0": 0.,
"labels/logits_mean/class1": 0.,
"labels/logits_mean/class2": 0.,
"labels/prediction_mean/class0": 0.,
"labels/prediction_mean/class1": 0.,
"labels/prediction_mean/class2": 0.,
"labels/probability_mean/class0": .5,
"labels/probability_mean/class1": .5,
"labels/probability_mean/class2": .5,
}, model_fn_ops)
def testMultiLabelWithLogitsAndLogitsInput(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits_input=((0., 0.),), logits=self._logits)
def testMultiLabelEvalMode(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.EVAL, _noop_train_op,
logits=self._logits)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelWithLabelName(self):
n_classes = 3
label_name = "my_label"
head = head_lib._multi_label_head(
n_classes=n_classes,
label_name=label_name,
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, {label_name: self._labels}, model_fn.ModeKeys.TRAIN,
_noop_train_op, logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelWithWeight(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": .1},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, .089985214,
self._expected_eval_metrics(2.69956), model_fn_ops)
def testMultiLabelWithCenteredBias(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes,
enable_centered_bias=True,
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits=self._logits)
_assert_variables(
self,
expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",),
expected_trainable=("centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, (
"loss", "centered_bias/bias_0", "centered_bias/bias_1",
"centered_bias/bias_2"
))
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
class BinaryClassificationModelHeadTest(test.TestCase):
def setUp(self):
self._logits = ((1.,), (1.,))
self._labels = ((1.,), (0.,))
def _expected_eval_metrics(self, expected_loss):
label_mean = np.mean(self._labels)
return {
"accuracy": 1. / 2,
"accuracy/baseline_label_mean": label_mean,
"accuracy/threshold_0.500000_mean": 1. / 2,
"auc": 1. / 2,
"labels/actual_label_mean": label_mean,
"labels/prediction_mean": .731059, # softmax
"loss": expected_loss,
"precision/positive_threshold_0.500000_mean": 1. / 2,
"recall/positive_threshold_0.500000_mean": 1. / 1,
}
def testBinaryClassificationWithLogits(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationWithInvalidLogits(self):
head = head_lib._multi_class_head(n_classes=len(self._labels) + 1)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits=self._logits)
def testBinaryClassificationWithLogitsInput(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits_input=((0., 0.), (0., 0.)))
w = ("logits/weights:0", "logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = .69314718
label_mean = np.mean(self._labels)
_assert_metrics(self, expected_loss, {
"accuracy": 1. / 2,
"accuracy/baseline_label_mean": label_mean,
"accuracy/threshold_0.500000_mean": 1. / 2,
"auc": 1. / 2,
"labels/actual_label_mean": label_mean,
"labels/prediction_mean": .5, # softmax
"loss": expected_loss,
"precision/positive_threshold_0.500000_mean": 0. / 2,
"recall/positive_threshold_0.500000_mean": 0. / 1,
}, model_fn_ops)
def testBinaryClassificationWithLogitsAndLogitsInput(self):
head = head_lib._multi_class_head(n_classes=2)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits_input=((0., 0.), (0., 0.)), logits=self._logits)
def testBinaryClassificationEvalMode(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.EVAL, _noop_train_op,
logits=self._logits)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationInferMode(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.INFER, _noop_train_op,
logits=self._logits)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
self.assertEquals(1, len(model_fn_ops.output_alternatives))
self.assertEquals(constants.ProblemType.LOGISTIC_REGRESSION,
model_fn_ops.output_alternatives[None][0])
def testErrorInSparseTensorLabels(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default():
labels = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 1),
dense_shape=(3, 1))
with self.assertRaisesRegexp(ValueError,
"SparseTensor is not supported as labels."):
head.create_model_fn_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=((1.,), (1.,), (3.,)))
def testBinaryClassificationWithLabelName(self):
label_name = "my_label"
head = head_lib._multi_class_head(n_classes=2, label_name=label_name)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{},
labels={label_name: self._labels},
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationWithWeights(self):
n_classes = 2
head = head_lib._multi_class_head(
n_classes=n_classes, weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
weights = ((1.,), (0.,))
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weights},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_total_loss = .31326166
_assert_metrics(
self,
expected_total_loss / len(weights),
{
"accuracy": 1. / 1,
"accuracy/baseline_label_mean": 1. / 1,
"accuracy/threshold_0.500000_mean": 1. / 1,
"auc": 0. / 1,
"labels/actual_label_mean": 1. / 1,
"labels/prediction_mean": .731059, # softmax
# TODO(ptucker): Is this the correct eval loss, sum not average?
"loss": expected_total_loss,
"precision/positive_threshold_0.500000_mean": 1. / 1,
"recall/positive_threshold_0.500000_mean": 1. / 1,
},
model_fn_ops)
def testBinaryClassificationWithCenteredBias(self):
head = head_lib._multi_class_head(n_classes=2, enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits=self._logits)
_assert_variables(
self,
expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",),
expected_trainable=("centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss", "centered_bias/bias_0"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
class MultiClassModelHeadTest(test.TestCase):
def setUp(self):
self._logits = ((1., 0., 0.),)
self._labels = (2,)
def _expected_eval_metrics(self, expected_loss):
return {
"accuracy": 0.,
"auc": 1. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": 0. / 1,
"labels/actual_label_mean/class1": 0. / 1,
"labels/actual_label_mean/class2": 1. / 1,
"labels/logits_mean/class0": self._logits[0][0],
"labels/logits_mean/class1": self._logits[0][1],
"labels/logits_mean/class2": self._logits[0][2],
"labels/prediction_mean/class0": self._logits[0][0],
"labels/prediction_mean/class1": self._logits[0][1],
"labels/prediction_mean/class2": self._logits[0][2],
"labels/probability_mean/class0": 0.576117, # softmax
"labels/probability_mean/class1": 0.211942, # softmax
"labels/probability_mean/class2": 0.211942, # softmax
}
def testMultiClassWithLogits(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514446
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassWithInvalidLogits(self):
head = head_lib._multi_class_head(n_classes=len(self._logits[0]) + 1)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits=self._logits)
def testMultiClassWithLogitsInput(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits_input=((0., 0.),))
w = ("logits/weights:0", "logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = 1.0986123
_assert_metrics(self, expected_loss, {
"accuracy": 0.,
"auc": 2. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": 0. / 1,
"labels/actual_label_mean/class1": 0. / 1,
"labels/actual_label_mean/class2": 1. / 1,
"labels/logits_mean/class0": 0.,
"labels/logits_mean/class1": 0.,
"labels/logits_mean/class2": 0.,
"labels/prediction_mean/class0": 1.,
"labels/prediction_mean/class1": 0.,
"labels/prediction_mean/class2": 0.,
"labels/probability_mean/class0": 0.333333, # softmax
"labels/probability_mean/class1": 0.333333, # softmax
"labels/probability_mean/class2": 0.333333, # softmax
}, model_fn_ops)
def testMultiClassWithLogitsAndLogitsInput(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits_input=((0., 0.),), logits=self._logits)
def testMultiClassEvalMode(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.EVAL, _noop_train_op,
logits=self._logits)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514446
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassWithWeight(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
weight = .1
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weight},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514446
_assert_metrics(self, expected_loss * weight,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testInvalidNClasses(self):
for n_classes in (None, -1, 0, 1):
with self.assertRaisesRegexp(ValueError, "n_classes must be > 1"):
head_lib._multi_class_head(n_classes=n_classes)
class BinarySvmModelHeadTest(test.TestCase):
def setUp(self):
# Prediction for first example is in the right side of the hyperplane
# (i.e., < 0) but it is within the [-1,1] margin. There is a 0.5 loss
# incurred by this example. The 2nd prediction is outside the margin so it
# incurs no loss at all.
self._predictions = ((-.5,), (1.2,))
self._labels = (0, 1)
self._expected_losses = (.5, 0.)
def testBinarySVMWithLogits(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
self._labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=self._predictions)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithInvalidLogits(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits=np.ones((2, 2)))
def testBinarySVMWithLogitsInput(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
self._labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits_input=((0., 0.), (0., 0.)))
w = ("logits/weights:0", "logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = 1.
_assert_metrics(self, expected_loss, {
"accuracy": .5,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithLogitsAndLogitsInput(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{},
self._labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits_input=((0., 0.), (0., 0.)),
logits=self._predictions)
def testBinarySVMEvalMode(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
self._labels,
model_fn.ModeKeys.EVAL,
_noop_train_op,
logits=self._predictions)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithLabelName(self):
label_name = "my_label"
head = head_lib._binary_svm_head(label_name=label_name)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
{label_name: self._labels},
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=self._predictions)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithWeights(self):
head = head_lib._binary_svm_head(weight_column_name="weights")
with ops.Graph().as_default(), session.Session():
weights = (7., 11.)
model_fn_ops = head.create_model_fn_ops(
features={"weights": weights},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=self._predictions)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_weighted_sum = np.sum(
np.multiply(weights, self._expected_losses))
_assert_metrics(self, expected_weighted_sum / len(weights), {
"accuracy": 1.,
"loss": expected_weighted_sum / np.sum(weights),
}, model_fn_ops)
def testBinarySVMWithCenteredBias(self):
head = head_lib._binary_svm_head(enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
self._labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=self._predictions)
_assert_variables(
self,
expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",),
expected_trainable=("centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss", "centered_bias/bias_0"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
class MultiHeadTest(test.TestCase):
def testTrain_withNoHeadWeights(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head((head1, head2))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNotNone(model_fn_ops.train_op)
self.assertFalse(model_fn_ops.eval_metric_ops)
self.assertIsNone(model_fn_ops.output_alternatives)
with session.Session() as sess:
self.assertAlmostEqual(2.224, sess.run(model_fn_ops.loss), places=3)
def testTrain_withHeadWeights(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head((head1, head2), (1, .5))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNotNone(model_fn_ops.train_op)
self.assertFalse(model_fn_ops.eval_metric_ops)
self.assertIsNone(model_fn_ops.output_alternatives)
with session.Session() as sess:
self.assertAlmostEqual(1.531, sess.run(model_fn_ops.loss), places=3)
def testInfer(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head((head1, head2), (1, .5))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.INFER,
train_op_fn=_noop_train_op,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNone(model_fn_ops.loss)
self.assertIsNone(model_fn_ops.train_op)
self.assertFalse(model_fn_ops.eval_metric_ops)
self.assertEquals(2, len(model_fn_ops.output_alternatives))
# Tests predictions keys.
pred_keys = model_fn_ops.predictions.keys()
self.assertIn(
("head1", prediction_key.PredictionKey.PROBABILITIES), pred_keys)
self.assertIn(
("head1", prediction_key.PredictionKey.CLASSES), pred_keys)
self.assertIn(
("head2", prediction_key.PredictionKey.PROBABILITIES), pred_keys)
self.assertIn(
("head2", prediction_key.PredictionKey.CLASSES), pred_keys)
# Tests output alternative.
out_alts = model_fn_ops.output_alternatives
self.assertEquals(constants.ProblemType.CLASSIFICATION,
out_alts["head1"][0])
self.assertIn(prediction_key.PredictionKey.PROBABILITIES,
out_alts["head1"][1].keys())
self.assertIn(
prediction_key.PredictionKey.CLASSES, out_alts["head1"][1].keys())
self.assertEquals(constants.ProblemType.CLASSIFICATION,
out_alts["head2"][0])
self.assertIn(prediction_key.PredictionKey.PROBABILITIES,
out_alts["head2"][1].keys())
self.assertIn(
prediction_key.PredictionKey.CLASSES, out_alts["head2"][1].keys())
def testEval(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head((head1, head2), (1, .5))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.EVAL,
train_op_fn=_noop_train_op,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNone(model_fn_ops.train_op)
self.assertIsNotNone(model_fn_ops.eval_metric_ops)
self.assertIsNone(model_fn_ops.output_alternatives)
metric_ops = model_fn_ops.eval_metric_ops
# Tests eval keys.
self.assertIn("accuracy/head1", metric_ops.keys())
self.assertIn("accuracy/head2", metric_ops.keys())
def _noop_train_op(unused_loss):
return control_flow_ops.no_op()
if __name__ == "__main__":
test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
737cbcf0fa9ca6c5ac71c112d5726ac3704e5da5 | ffb7e32769cf5928fcca5abf3a60f86e456bb79a | /base_folder/type_folder/type_.py | ea021ebcb24a63aee9d5be8892f8a7673a82a70f | [
"Apache-2.0"
] | permissive | relax-space/python-learning | 92f8173af26d05beae89e928b1df4aa907b5701b | 45457fc6c3a6583cb28bd14161439ec557c4ce2b | refs/heads/master | 2022-05-03T13:33:45.646838 | 2021-11-30T21:38:06 | 2022-03-19T15:34:22 | 250,942,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | from typing import Dict, List
class Dto:
def __init__(self,name):
self.name = name
err_list: List[str] = []
err_list.append(12121)
print(err_list)
err_list: List[Dto] = []
err_list.append(Dto(name='1'))
print(err_list[0].__dict__)
print('1212{0},1212{1}'.format(5, 6))
from string import Formatter
yourstring = "{{path/to/{self.category}/{self.name}}}"
fieldnames = [fname for _, fname, _, _ in Formatter().parse(yourstring) if fname]
print(fieldnames)
yourstring2 = "{{path/to/}}"
fieldnames2 = [fname for _, fname, _,
_ in Formatter().parse(yourstring2) if fname]
print(fieldnames2)
| [
"xiaoxm_001@163.com"
] | xiaoxm_001@163.com |
e9742b2d398fb6e4bd6b6e5b85cc647b302f8db7 | 2e1dde592cb2076dd42ce300319a816a905b3b31 | /src/examples/ConquestService/testNetwork.py | 87eee1a62167483dff50532c0b1a9c7f943ce7b2 | [
"MIT"
] | permissive | mikesneider/YAFS | 601ec4fe224b408770988cb129fb14ae3b0a996e | 1b805cb0dc5ceb438ab335c347750e1cb1cdd34a | refs/heads/master | 2021-01-07T14:54:33.985507 | 2019-10-17T13:19:54 | 2019-10-17T13:19:54 | 241,733,345 | 1 | 0 | MIT | 2020-02-19T21:47:04 | 2020-02-19T21:47:03 | null | UTF-8 | Python | false | false | 1,116 | py |
import networkx as nx
import json
data = json.load(open('exp1/networkDefinition.json'))
G = nx.Graph()
for edge in data["link"]:
G.add_edge(edge["s"], edge["d"],BW=edge["BW"],PR=edge["PR"])
# ok
print len(G.nodes)
minPath = nx.shortest_path(G, source=0, target=3)
print "Min path %s"%minPath
for path in nx.all_simple_paths(G, source=0, target=9,cutoff=len(minPath)):
print(path)
# 0 4 7 9
for path in nx.all_simple_paths(G, source=0, target=3,cutoff=len(minPath)):
print(path)
# 0 4 7 3
print G[0][15]
from itertools import islice
def k_shortest_paths(G, source, target, k, weight=None):
return list(islice(nx.shortest_simple_paths(G, source, target, weight=weight), k))
# DE PM! LO Ordena de menor a menos segun el weight
for path in k_shortest_paths(G, 0, 9, 10,"BW"):
bw = 0
pr = 0
for i in range(len(path)-1):
# print path[i],path[i+1]
# print G[path[i]][path[i+1]]
bw +=G[path[i]][path[i+1]]["BW"]
pr +=G[path[i]][path[i+1]]["PR"]
print path," BW:%i PR:%i"%(bw,pr)
# it = iter(path)
# for x in it:
# print x,next(it) | [
"isaac.lera@gmail.com"
] | isaac.lera@gmail.com |
26316ce49a825944d441fc9e9660c411724a665c | e99b52816d4c89fd248f524db1875afb02e34b9d | /tareas/Tarea 7/Tarea7.py | f0a895aec92a5fef2f4d535bba5ba89fa63fce6c | [
"MIT"
] | permissive | mdmirabal/uip-prog3 | 6a96165651ac6aa6c2c4a052aa3fdbd01d525cb3 | e093bea7ee0f6146293719621ec0beaa26343516 | refs/heads/master | 2016-08-03T22:37:59.684547 | 2015-12-13T00:26:26 | 2015-12-13T00:26:26 | 42,831,348 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | from kivy.app import App
lista = []
def guardar(name):
lista.append(name)
print(lista)
def exportar():
archivo = open('asistencia.txt', 'wt')
archivo.write(str(lista))
archivo.close()
class AsistenciaApp(App):
def build (self):
pass
if __name__ == '__main__':
AsistenciaApp().run()
| [
"vmdaac@mail.com"
] | vmdaac@mail.com |
b4e490e5958a77b59810b424835b255f12306e78 | 498912dfdbf6d30d2ecf48831a77d810b0c6feaf | /case/migrations/0001_initial.py | a71cd7fc289bb8cbf5fb4a002950bffc4ad982e1 | [] | no_license | sout001/case | 23ad7049e94496e300f592a28898628b9a77fc1c | 37dc8ae0f3b545516f0f5feee4c3c54f4d25a3bc | refs/heads/master | 2023-02-16T17:17:46.662422 | 2021-01-14T09:07:53 | 2021-01-14T09:07:53 | 326,878,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,449 | py | # Generated by Django 2.0 on 2021-01-05 02:26
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FileUpload',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间')),
('update_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='修改时间')),
('delete_time', models.DateTimeField(blank=True, null=True, verbose_name='删除时间')),
('file_name', models.CharField(max_length=100, verbose_name='文件名称')),
('file_cover', models.ImageField(upload_to='', verbose_name='文件封面')),
('file_uid', models.UUIDField(blank=True, null=True, verbose_name='文件uid')),
('file', models.FileField(upload_to='', verbose_name='文件')),
('file_size', models.CharField(max_length=100, verbose_name='文件大小')),
],
options={
'verbose_name': '资源中心',
'verbose_name_plural': '资源中心',
'db_table': 'resource_center',
},
),
]
| [
"chenshaoqiang@huanyin.onaliyun.com"
] | chenshaoqiang@huanyin.onaliyun.com |
fab63b0499414f2346a1e35802c0ee084f7e31f4 | 3ec57d7c860b048a9978d769c8dd99732d437b66 | /firstapp/loginsys/views.py | b58654195a75fc60eb845521258bb832e21d44fc | [] | no_license | WolfgangGrimmer/Test | 69c19fa1c07fd63a369900bbea8510afb9e613bf | f403275c6f314115b3f66afde1abbfae47b90bd2 | refs/heads/master | 2020-04-09T19:21:47.909605 | 2018-12-05T16:05:08 | 2018-12-05T16:05:08 | 160,541,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,455 | py | # -*- coding: utf-8 -*-
from django.shortcuts import render_to_response, redirect
from django.contrib import auth
from django.contrib.auth.forms import UserCreationForm
from django.core.context_processors import csrf
def login(request):
args = {}
args.update(csrf(request))
print "test"
if request.POST:
username = request.POST.get('username', '')
password = request.POST.get('password', '')
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
return redirect('/')
else:
args['login_error'] = "Користувача не існує"
return render_to_response('login.html', args)
else:
return render_to_response('login.html', args)
def logout(request):
auth.logout(request)
return redirect("/")
def register(request):
args = {}
args.update(csrf(request))
args['form'] = UserCreationForm()
if request.POST:
newuser_form = UserCreationForm(request.POST)
if newuser_form.is_valid():
newuser_form.save()
newuser = auth.authenticate(username=newuser_form.cleaned_data['username'],
password=newuser_form.cleaned_data['password2'])
auth.login(request, newuser)
return redirect('/')
else:
args['form'] = newuser_form
return render_to_response('register.html', args)
| [
"noreply@github.com"
] | noreply@github.com |
2175fe3f2ecea3f112a13f9afcf4c4e90e474152 | a8dd7eb5b7901178e24d51cf768c6d40e61b3fde | /dijkstras_algorithm.py | 8d9628a2fcc6c48d8929117ad38ae9326594b2f7 | [] | no_license | Pasha-lt/Algorithm_and_small_program | 06462f1fbfc648c92edea72453aa20976183ac27 | 09c6697f0755b75056d00ac293f651b7cc7e0838 | refs/heads/master | 2023-08-25T03:11:22.028375 | 2021-10-24T06:59:19 | 2021-10-24T06:59:19 | 215,867,752 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,781 | py | raph = {}
graph['start'] = {}
graph['start']['a'] = 6
graph['start']['b'] = 2
# print(graph['start'].keys()) # Для получения всех соседей начального узла.
# Включаем в граф остальные узлы и их соседей:
graph['a'] = {}
graph['a']['fin'] = 1
graph['b'] = {}
graph['b']['a'] = 3
graph['b']['fin'] = 5
graph['fin'] = {} # У конечного узла нет соседей.
# создаем таблицы стоимостей costs:
infinity = float('inf') # Бесконечнсть
costs = {}
costs['a'] = 6
costs['b'] = 2
costs['fin'] = infinity
# для родителей также создаем отдельную таблицу:
parents = {}
parents['a'] = 'start'
parents['b'] = 'start'
parents['fin'] = None
# Делаем массив для отслежывания всех уже обработаных узлов, так как один узел не должен обрабатыватся многократно.
processed = []
def find_lowest_cost_node(costs):
lowest_cost = float('inf')
lowest_cost_node = None
for node in costs: # перебераем все узлы.
cost = costs[node]
if cost < lowest_cost and node not in processed:
# Если это узел с наименьшей стоимостью из уже виденных и он еще не был обработан...
lowest_cost = cost # ... он назначается новым узлом с наименьшей стоимостью.
lowest_cost_node = node
return lowest_cost_node
# Переходим к алгоритму
node = find_lowest_cost_node(costs) # Находим узел с найменьшей стоимостью среди необработынных
while node is not None: # Если обработаны все узлы, цикл while завершен.
cost = costs[node]
neighbors = graph[node]
for n in neighbors.keys(): # Перебираем всех соседей текущего узла.
new_cost = cost + neighbors[n]
if costs[n] > new_cost: # если к соседу можно добратся быстрее через текущий узел.
costs[n] = new_cost # обновляем стоимость для этого узла.
parents[n] = node # этот узел становится новым родителем для соседа.
processed.append(node) # Узел помечается как обработанный
node = find_lowest_cost_node(costs) # Найти следующий узел для обработки и повторить цыкл.
print("Cost from the start to each node:")
print(costs)
| [
"noreply@github.com"
] | noreply@github.com |
70b83b4e8d4e43fd66482974cc9405558e7acd9d | 45681de2f46bb8712ab65588fc0f8dece7ef97b2 | /bipolar/open_dns_relay_ip_from_es.py | 4635dbd1401c166be050d3c323e95d64a625a113 | [] | no_license | cleverhandle1/bipolar | 3db51315c574367df383d27d24f07d1c645236ca | 36b66a7b299ebbc875c8d9263e98a70548b1a1fe | refs/heads/master | 2020-04-05T18:14:38.197680 | 2019-02-12T06:23:51 | 2019-02-12T06:23:51 | 157,093,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | #!/usr/bin/env python
import json
import sys
from elasticsearch_dsl import Search
from elasticsearch_dsl.query import QueryString
from elasticsearch import Elasticsearch
query = "recursion AND true"
es = Elasticsearch('192.168.0.69')
start = sys.argv[1]
end = sys.argv[2]
s = Search(using=es, index="celery")
s = s.query("query_string", query=query, analyze_wildcard=True)
s = s.filter('range', ** {'@timestamp': {'gte': start, 'lt': end}})
response = s.scan()
for hit in response:
print json.loads(hit.result)['result']['ip']
| [
"cleverhandle1@protonmail.com"
] | cleverhandle1@protonmail.com |
a4536f0fe2f8a612a01725277078ce3b79778683 | 5707a6e95d6388a320416d7c06c275daf61e3406 | /Unidad2/ej1.py | 54e6a486f22b1ad338b6cbaf6b579ebbc1bebc68 | [] | no_license | hectorrdz98/lenguajes-y-automatas-1 | ed3406e8a7b7eaad489c530146cddac5a972bc81 | 0004c4696a92cdd33a86a24f82d0f9b7e01e455c | refs/heads/master | 2022-01-21T21:24:21.819330 | 2019-05-29T12:42:42 | 2019-05-29T12:42:42 | 167,203,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,084 | py |
"""
Autor: Hector Rodriguez
"""
"""
Este codigo lee el archivo doc.txt (debe estar al mismo nivel de carpeta que este archivo)
y muestra en consola para cada linea del documento que tipo de elemento o a que categoría pertenece.
Estas son mis condiciones:
1.- Entero: Números de 0-9
2.- Flotante: Números de 0-9 seguido de un . y más números de 0-9
3.- Variable: Conjunto de letras de la A-Z mayúsculas y minúsculas, _ y dígitos de 0-9 que no debe iniciar con 0-9
4.- String: Cadena de carateres que inicia y cierra con "
5.- Aritmética: Expresión con entero, flotante o variable seguida de un * + - / ^ y luego otro entero, flotante o variable
no pueden haber dos * + - / ^ juntos o terminar la expresión con * + - / ^
6.- Relacional: Expresión con entero, flotante o variable seguida de un < > y un posible = o un != o == y luego otro entero, flotante o variable
no pueden haber dos < > y un posible = o un != o == juntos o terminar la expresión con < > y un posible = o un != o ==
"""
import re
# Regex necesarias
RegexPatterns = {
'entero': r'^[\-|\+]?\d+$',
'flotante': r'^[\-|\+]?\d+\.\d+$',
'variable': r'^[a-zA-Z_]\w{0,29}$',
'string': r'^\"[^\"]*\"$',
'aritmetica': r'^(\d+|\d+\.\d+|[a-zA-Z_]\w{0,29})([\*\/\+\-\^](\d+|\d+\.\d+|[a-zA-Z_]\w{0,29}))+$',
'relacional': r'^(\d+|\d+\.\d+|[a-zA-Z_]\w{0,29})(([\<\>]\=?|[\!\=]=)(\d+|\d+\.\d+|[a-zA-Z_]\w{0,29}))+$'
}
try:
with open('doc.txt', encoding='utf-8') as file:
for line in file:
flag = False
for regexName, regex in RegexPatterns.items():
foundRegex = re.findall(regex, line)
if line != '\n':
if foundRegex != []:
flag = True
print('{}: es {}'.format(line[0:len(line)-1], regexName))
break
if not flag and line != '\n':
print('{}: no lo conozco'.format(line[0:len(line)-1]))
except Exception as e:
print('Error al abrir el archivo: {}'.format(e)) | [
"="
] | = |
8ada0d71b51aba2305aa55b8d1b6763a39766144 | 76b49bab52dc088828816bf00605471af4536dbc | /server/aplicacionesweb/settings.py | a28d868116ac3be06e430bde862039a2e9266b1b | [] | no_license | mboscovich/Kerberus-Control-Parental | a5352d53c588854805f124febf578345f3033ed2 | 5025078af9ebdcf7b3feb4b0d5efa6cbde6e6a37 | refs/heads/master | 2020-12-24T16:58:51.288519 | 2015-05-25T15:18:25 | 2015-05-25T15:18:25 | 23,494,363 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,693 | py | # Django settings for aplicacionesweb project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Maximiliano Boscovich', maximiliano@kerberus.com.ar'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'kerberus_webapps', # Or path to database file if using sqlite3.
'USER': 'kerberus_appsweb', # Not used with sqlite3.
'PASSWORD': 'p3r1c0', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Argentina/Buenos_Aires'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'es-ar'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '0=&pkp2ww5pr0*-d#w0tg94f%c-%y&q(1*!m56&flax^^ca+zi'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'aplicacionesweb.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"/home/mboscovich/proyectos/control_parental/server/aplicacionesweb/templates",
#os.path.join(os.path.dirname(__file__), 'templates').replace('\\','/'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
#'django.contrib.admindocs',
'inicio',
'books',
)
| [
"maximiliano@boscovich.com.ar"
] | maximiliano@boscovich.com.ar |
8ae5df63ca9e208144fb5f3e5afdb6e50e2d4245 | 9657a5bc5deb8125001463b15e6003fb525d6d87 | /{{cookiecutter.repo_name}}/features/environment.py | bca9d99b15a02c3412cfe9969ec4edd40dff9db9 | [] | no_license | ajaxon/cookiecutter-django | d6ded5febb902a032de23b331a9e68b870928a77 | 2c07704d39130b1ae033e264724710406c28eec8 | refs/heads/master | 2020-12-26T03:33:04.226196 | 2016-03-26T14:14:56 | 2016-03-26T14:14:56 | 49,108,599 | 0 | 0 | null | 2016-01-06T03:06:49 | 2016-01-06T03:06:49 | null | UTF-8 | Python | false | false | 890 | py |
from behave import *
from selenium.webdriver.firefox.webdriver import WebDriver
from django.core import management
def before_all(context):
# Unless we tell our test runner otherwise, set our default browser to PhantomJS
context.browser = WebDriver()
def before_scenario(context, scenario):
# Reset the database before each scenario
# This means we can create, delete and edit objects within an
# individual scenerio without these changes affecting our
# other scenarios
management.call_command('flush', verbosity=0, interactive=False)
# At this stage we can (optionally) mock additional data to setup in the database.
# For example, if we know that all of our tests require a 'SiteConfig' object,
# we could create it here.
def after_all(context):
# Quit our browser once we're done!
context.browser.quit()
context.browser = None
| [
"jaxon77@gmail.com"
] | jaxon77@gmail.com |
09997d079fdba85719df5fe4ccf2d3f6d5988d74 | 0e9789668dcfeeedacf78aa9917bb95ec9a5f763 | /preprocessing/load_data.py | 5ff6f999bcc4fb4aae3d0baad46dc27ccc9be878 | [] | no_license | mma1979/Simple-Sentence-Similarity | 76151619bcdfd39054f8b6cbe1e26af99d0f6a37 | dfacb34c325df771056f34f85c7927148d69691c | refs/heads/master | 2022-04-11T00:15:07.415752 | 2020-01-28T13:06:42 | 2020-01-28T13:06:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,926 | py | import os
import pandas as pd
import requests
import tensorflow as tf
def load_sts_dataset(filename):
"""
Loads a subset of the STS dataset into a DataFrame.
In particular both sentences and their human rated similarity score.
:param filename:
:return:
"""
sent_pairs = []
with tf.gfile.GFile(filename, "r") as f:
for line in f:
ts = line.strip().split("\t")
sent_pairs.append((ts[5], ts[6], float(ts[4])))
return pd.DataFrame(sent_pairs, columns=["sent_1", "sent_2", "sim"])
def download_and_load_sts_data():
sts_dataset = tf.keras.utils.get_file(
fname="Stsbenchmark.tar.gz",
origin="http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz",
extract=True)
sts_dev = load_sts_dataset(os.path.join(os.path.dirname(sts_dataset), "stsbenchmark", "sts-dev.csv"))
sts_test = load_sts_dataset(os.path.join(os.path.dirname(sts_dataset), "stsbenchmark", "sts-test.csv"))
return sts_dev, sts_test
def download_sick_dataset(url):
response = requests.get(url).text
lines = response.split("\n")[1:]
lines = [l.split("\t") for l in lines if len(l) > 0]
lines = [l for l in lines if len(l) == 5]
df = pd.DataFrame(lines, columns=["idx", "sent_1", "sent_2", "sim", "label"])
df['sim'] = pd.to_numeric(df['sim'])
return df
def download_and_load_sick_dataset():
sick_train = download_sick_dataset(
"https://raw.githubusercontent.com/alvations/stasis/master/SICK-data/SICK_train.txt")
sick_dev = download_sick_dataset(
"https://raw.githubusercontent.com/alvations/stasis/master/SICK-data/SICK_trial.txt")
sick_test = download_sick_dataset(
"https://raw.githubusercontent.com/alvations/stasis/master/SICK-data/SICK_test_annotated.txt")
sick_all = sick_train.append(sick_test).append(sick_dev)
return sick_all, sick_train, sick_test, sick_dev
| [
"rhtdranasinghe@gmail.com"
] | rhtdranasinghe@gmail.com |
6ac6c0894bfa4d2f46e20bd466eb57471523bfb5 | ed78041a12c60e46bb0c4d347c47536e84307a96 | /app/__init__.py | ea59f07f74501ccfe1fe861e921ef187326337da | [] | no_license | Garfield247/news_nlp | 4875842af4249def6ffdc65a6e5896b02610dd8d | e18d178824ea9bf11d3895c58037a211f4b21cb6 | refs/heads/master | 2022-12-11T21:00:36.967826 | 2019-03-15T02:32:46 | 2019-03-15T02:32:46 | 161,159,779 | 0 | 0 | null | 2022-12-08T04:52:15 | 2018-12-10T10:48:41 | JavaScript | UTF-8 | Python | false | false | 945 | py | # -*- coding: utf-8 -*-
from flask import Flask, render_template
from app.config import config
from app.extensions import config_extensions
from app.views import config_blueprint
# 封装一个方法,专门用于创建Flask实例
def create_app(config_name): # development
# 创建应用实例
app = Flask(__name__)
# 初始化配置
app.config.from_object(config.get(config_name) or config['default'])
# 调用初始化函数
config[config_name].init_app(app)
# 配置扩展
config_extensions(app)
# 配置蓝本
config_blueprint(app)
# 错误页面定制
config_errorhandler(app)
# 返回应用实例
return app
def config_errorhandler(app):
# 如果在蓝本定制,只针对本蓝本的错误有效,
# 可以使用app_errorhandler定制全局有效的错误显示
@app.errorhandler(404)
def page_not_found(e):
return render_template('errors/404.html')
| [
"Garfield_lv@163.com"
] | Garfield_lv@163.com |
362b027f6cef1cd2e40667e05c2c92ed58d5fc91 | cd1364be99f4e7b439fd2f6decc6e2b215f17507 | /networks.py | 8777825c5c4443fbba07db3c7a489dd5332d6ea5 | [] | no_license | kargenk/ECO_scene_classifier | 44f4d8d28f961fdbc0d7d36f5612e4cda2e5ed30 | 8fee70276a055640e0f7ab8809df88cc2ae78896 | refs/heads/master | 2022-11-24T05:10:49.074319 | 2020-07-20T18:14:44 | 2020-07-20T18:14:44 | 280,678,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,175 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
class Conv2D(nn.Module):
""" conv -> batchnorm -> relu の標準的な畳み込み層クラス. """
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
super(Conv2D, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
out = self.conv(x)
return out
class Conv3D(nn.Module):
""" conv -> (batchnorm -> relu) の標準的な畳み込み層クラス. """
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, flag=False):
super(Conv3D, self).__init__()
if flag == True:
self.conv = nn.Sequential(
nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding),
nn.BatchNorm3d(out_channels),
nn.ReLU(inplace=True)
)
else:
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding)
def forward(self, x):
out = self.conv(x)
return out
class BasicConv(nn.Module):
""" ECOの2D Netモジュール内の最初の畳み込みネットワーク """
def __init__(self):
super(BasicConv, self).__init__()
self.conv1 = nn.Sequential(
Conv2D(3, 64, kernel_size=7, stride=2, padding=3), # size 1/2
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), # size 1/2
)
self.conv2 = Conv2D(64, 64, kernel_size=1, stride=1)
self.conv3 = nn.Sequential(
Conv2D(64, 192, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), # size 1/2
)
def forward(self, x):
c1 = self.conv1(x) # [3, 224, 224] -> [64, 56, 56]
c2 = self.conv2(c1) # [64, 56, 56] -> [64, 56, 56]
out = self.conv3(c2) # [64, 56, 56] -> [192, 28, 28]
return out
class Inception_A(nn.Module):
""" ECOの2D Netモジュール内のInceptionモジュールの1つ目 """
def __init__(self):
super(Inception_A, self).__init__()
self.inception1 = Conv2D(192, 64, kernel_size=1, stride=1)
self.inception2 = nn.Sequential(
Conv2D(192, 64, kernel_size=1, stride=1),
Conv2D(64, 64, kernel_size=3, stride=1, padding=1),
)
self.inception3 = nn.Sequential(
Conv2D(192, 64, kernel_size=1, stride=1),
Conv2D(64, 96, kernel_size=3, stride=1, padding=1),
Conv2D(96, 96, kernel_size=3, stride=1, padding=1),
)
self.inception4 = nn.Sequential(
nn.AvgPool2d(kernel_size=3, stride=1, padding=1),
Conv2D(192, 32, kernel_size=1, stride=1),
)
def forward(self, x):
out1 = self.inception1(x) # [192, 28, 28] -> [64, 28, 28]
out2 = self.inception2(x) # [192, 28, 28] -> [64, 28, 28]
out3 = self.inception3(x) # [192, 28, 28] -> [96, 28, 28]
out4 = self.inception4(x) # [192, 28, 28] -> [32, 28, 28]
# channels方向に結合,shape: [64+64+96+32 = 256, 28, 28]
out = torch.cat([out1, out2, out3, out4], dim=1)
return out
class Inception_B(nn.Module):
""" ECOの2D Netモジュール内のInceptionモジュールの2つ目 """
def __init__(self):
super(Inception_B, self).__init__()
self.inception1 = Conv2D(256, 64, kernel_size=1, stride=1)
self.inception2 = nn.Sequential(
Conv2D(256, 64, kernel_size=1, stride=1),
Conv2D(64, 96, kernel_size=3, stride=1, padding=1),
)
self.inception3 = nn.Sequential(
Conv2D(256, 64, kernel_size=1, stride=1),
Conv2D(64, 96, kernel_size=3, stride=1, padding=1),
Conv2D(96, 96, kernel_size=3, stride=1, padding=1),
)
self.inception4 = nn.Sequential(
nn.AvgPool2d(kernel_size=3, stride=1, padding=1),
Conv2D(256, 64, kernel_size=1, stride=1),
)
def forward(self, x):
out1 = self.inception1(x) # [256, 28, 28] -> [64, 28, 28]
out2 = self.inception2(x) # [256, 28, 28] -> [96, 28, 28]
out3 = self.inception3(x) # [256, 28, 28] -> [96, 28, 28]
out4 = self.inception4(x) # [256, 28, 28] -> [64, 28, 28]
# channels方向に結合,shape: [64+96+96+64 = 320, 28, 28]
out = torch.cat([out1, out2, out3, out4], dim=1)
return out
class Inception_C(nn.Module):
""" ECOの2D Netモジュール内のInceptionモジュールの2つ目 """
def __init__(self):
super(Inception_C, self).__init__()
self.inception = nn.Sequential(
Conv2D(320, 64, kernel_size=1, stride=1),
Conv2D(64, 96, kernel_size=3, stride=1, padding=1)
)
def forward(self, x):
out = self.inception(x) # [320, 28, 28] -> [96, 28, 28]
return out
class ECO_2D(nn.Module):
""" Basic Conv, Inception A-Cを連結させた,ECOの2D Netモジュール全体 """
def __init__(self):
super(ECO_2D, self).__init__()
self.basic_conv = BasicConv()
self.inception_a = Inception_A()
self.inception_b = Inception_B()
self.inception_c = Inception_C()
def forward(self, x):
"""
Parameters
----------
x : torch.Tensor, torch.Size([batch_size, 3, 224, 224])
入力
"""
out = self.basic_conv(x)
out = self.inception_a(out)
out = self.inception_b(out)
out = self.inception_c(out)
return out
class Resnet3D_1(nn.Module):
""" ECOの3D Netモジュール内のResNetモジュールの1つ目 """
def __init__(self):
super(Resnet3D_1, self).__init__()
self.conv1 = Conv3D(96, 128, kernel_size=3, stride=1, padding=1,
flag=False)
self.res_1 = nn.Sequential(
nn.BatchNorm3d(128),
nn.ReLU(inplace=True),
Conv3D(128, 128, kernel_size=3, stride=1, padding=1, flag=True),
Conv3D(128, 128, kernel_size=3, stride=1, padding=1, flag=False),
)
self.bn_relu = nn.Sequential(
nn.BatchNorm3d(128),
nn.ReLU(inplace=True),
)
def forward(self, x):
residual = self.conv1(x)
out = self.res_1(residual)
out += residual # Skip Connection
out = self.bn_relu(out)
return out
class Resnet3D_2(nn.Module):
""" ECOの3D Netモジュール内のResNetモジュールの2つ目 """
def __init__(self):
super(Resnet3D_2, self).__init__()
self.res1 = nn.Sequential(
Conv3D(128, 256, kernel_size=3, stride=2, padding=1, flag=True),
Conv3D(256, 256, kernel_size=3, stride=1, padding=1, flag=False),
)
self.skip1 = Conv3D(128, 256, kernel_size=3, stride=2, padding=1)
self.res2 = nn.Sequential(
nn.BatchNorm3d(256),
nn.ReLU(inplace=True),
Conv3D(256, 256, kernel_size=3, stride=1, padding=1, flag=True),
Conv3D(256, 256, kernel_size=3, stride=1, padding=1, flag=False),
)
self.bn_relu = nn.Sequential(
nn.BatchNorm3d(256),
nn.ReLU(inplace=True)
)
def forward(self, x):
res1 = self.res1(x)
skip1 = self.skip1(x)
out = res1 + skip1 # [128, 16, 28, 28] -> [256, 8, 14, 14]
res2 = self.res2(out)
skip2 = out
out = res2 + skip2
return out
class Resnet3D_3(nn.Module):
"""
ECOの3D Netモジュール内のResNetモジュールの3つ目.
レイヤー構成はResNet3D_2と同じ.
"""
def __init__(self):
super(Resnet3D_3, self).__init__()
self.res1 = nn.Sequential(
Conv3D(256, 512, kernel_size=3, stride=2, padding=1, flag=True),
Conv3D(512, 512, kernel_size=3, stride=1, padding=1, flag=False),
)
self.skip1 = Conv3D(256, 512, kernel_size=3, stride=2, padding=1)
self.res2 = nn.Sequential(
nn.BatchNorm3d(512),
nn.ReLU(inplace=True),
Conv3D(512, 512, kernel_size=3, stride=1, padding=1, flag=True),
Conv3D(512, 512, kernel_size=3, stride=1, padding=1, flag=False),
)
self.bn_relu = nn.Sequential(
nn.BatchNorm3d(512),
nn.ReLU(inplace=True)
)
def forward(self, x):
res1 = self.res1(x)
skip1 = self.skip1(x)
out = res1 + skip1 # [256, 8, 14, 14] -> [512, 4, 7, 7]
res2 = self.res2(out)
skip2 = out
out = res2 + skip2
return out
class ECO_3D(nn.Module):
""" ResNet3D 1-3を連結させた,ECOの3D Netモジュール全体 """
def __init__(self):
super(ECO_3D, self).__init__()
# ResNet3D
self.res3d_1 = Resnet3D_1()
self.res3d_2 = Resnet3D_2()
self.res3d_3 = Resnet3D_3()
# Global Average Pooling
self.global_avg_pool = nn.AvgPool3d(
kernel_size=(4, 7, 7), stride=1, padding=0)
def forward(self, x):
"""
Parameters
----------
x : torch.Tensor, torch.Size([batch_size, frames, 96, 28, 28])
入力
"""
x = torch.transpose(x, 1, 2) # [frames, C, H, W] -> [C, frames, H, W]
out = self.res3d_1(x)
out = self.res3d_2(out)
out = self.res3d_3(out)
out = self.global_avg_pool(out)
# tensorのサイズ変更,[batch_size, 512, 1, 1, 1] -> [batch_size, 512]
out = out.view(out.size()[0], out.size()[1])
return out
class ECO_Lite(nn.Module):
def __init__(self):
super(ECO_Lite, self).__init__()
self.eco_2d = ECO_2D() # 2D Net モジュール
self.eco_3d = ECO_3D() # 3D Net モジュール
# クラス分類用の全結合層
self.fc_final = nn.Linear(512, 400, bias=True)
def forward(self, x):
'''
Inputs
----------
x : torch.tensor, size = [batch_size, num_segments=16, 3, 224, 224]
'''
bs, ns, c, h, w = x.shape # 入力の各次元のサイズを取得
_x = x.view(-1, c, h, w) # 入力xのサイズ[bs*ns, c, h, w]に変換
out_2d = self.eco_2d(_x) # [bs*ns, 3, 224, 224] -> [bs*ns, 96, 28, 28]
# 2次元画像tensorを3次元用に変換
_out_2d = out_2d.view(-1, ns, 96, 28, 28)
out_3d = self.eco_3d(_out_2d) # [bs, ns, 96, 28, 28] -> [bs, 512]
out = self.fc_final(out_3d) # [bs, 512] -> [bs, 400]
return out
if __name__ == '__main__':
batch_size = 1
# (2D | 3D) Netへの入力用テストtensor
input_tensor_for2d = torch.randn(batch_size, 3, 224, 224)
input_tensor_for3d = torch.randn(batch_size, 16, 96, 28, 28)
input_tensor_forLite = torch.randn(batch_size, 16, 3, 224, 224)
# # Basic Convモジュールのテスト
# basic_conv = BasicConv()
# basic_out = basic_conv(input_tensor_for2d)
# print('Basic Conv output:', basic_out.shape)
# # InceptionAモジュールのテスト
# inception_a = Inception_A()
# inception_a_out = inception_a(basic_out)
# print('Inception A output:', inception_a_out.shape)
# # InceptionBモジュールのテスト
# inception_b = Inception_B()
# inception_b_out = inception_b(inception_a_out)
# print('Inception B output:', inception_b_out.shape)
# # InceptionCモジュールのテスト
# inception_c = Inception_C()
# inception_c_out = inception_c(inception_b_out)
# print('Inception C output:', inception_c_out.shape)
# # ECO 2D ネットワークのテスト
# eco_2d = ECO_2D()
# eco_2d_out = eco_2d(input_tensor_for2d)
# print('ECO 2D output:', eco_2d_out.shape) # [batch_size, 96, 28, 28]
# # ResNet_3D_1モジュールのテスト
# resnet3d_1 = Resnet3D_1()
# resnet3d_1_out = resnet3d_1(input_tensor_for3d)
# print('ResNet3D_1 output:', resnet3d_1_out.shape) # [N, 128, 16, 28, 28]
# # ResNet_3D_2モジュールのテスト
# resnet3d_2 = Resnet3D_2()
# resnet3d_2_out = resnet3d_2(resnet3d_1_out)
# print('ResNet3D_2 output:', resnet3d_2_out.shape) # [N, 256, 8, 14, 14]
# # ResNet_3D_3モジュールのテスト
# resnet3d_3 = Resnet3D_3()
# resnet3d_3_out = resnet3d_3(resnet3d_2_out)
# print('ResNet3D_3 output:', resnet3d_3_out.shape) # [N, 512, 4, 7, 7]
# # ECO 3D ネットワークのテスト
# eco_3d = ECO_3D()
# eco_3d_out = eco_3d(input_tensor_for3d)
# print('ECO 3D output:', eco_3d_out.shape) # [batch_size, 512]
# ECO Lite ネットワークのテスト
eco_lite = ECO_Lite()
eco_lite_out = eco_lite(input_tensor_forLite)
print('ECO 3D output:', eco_lite_out.shape) # [batch_size, 400]
| [
"gengen0630@gmail.com"
] | gengen0630@gmail.com |
d5dafb6d91a7f3772bf48f1ae18150067f839a20 | 57be2b50c04433f97f5b3880d4fe0814d65b3d36 | /django/apps/users/views.py | 831215956e652389e19be4a39f3091626b49cbc1 | [] | no_license | ojbkaaaa/- | 279005ee6c32403f34a3663092aa9ad1f04c9e88 | 465ccf33bd3cca1a768077c56bdbdcc1dc9a10c0 | refs/heads/master | 2020-03-16T21:09:11.213994 | 2018-04-02T06:41:11 | 2018-04-02T06:41:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,978 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.views.generic import View
from django.core.urlresolvers import reverse
import re
from users.models import User, Address
from django import db
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from django.conf import settings
from itsdangerous import SignatureExpired
from celery_tasks.tasks import send_active_email
from django.contrib.auth import authenticate, login, logout
from utils.views import LoginRequiredMixin
from django_redis import get_redis_connection
from goods.models import GoodsSKU
import json
# Create your views here.
#
# def register(request):
# """用户注册"""
# # 用户的请求方式
# if request.method == "GET":
# # 处理get请求方式,提供页面
# return render(request, "register.html")
# else:
# # 处理post请求方式,处理注册数据
# return HttpResponse("这是post请求返回的页面")
class RegisterView(View):
"""用户注册"""
def get(self, request):
"""处理get请求, 提供注册页面"""
return render(request, "register.html")
def post(self, request):
"""处理post请求,处理注册数据"""
# 获取前端发送的数据/参数
user_name = request.POST.get("user_name")
password = request.POST.get("pwd")
email = request.POST.get("email")
allow = request.POST.get("allow")
# 参数校验
if not all([user_name, password, email]):
# 如果参数不完整,从定向到注册页面
return redirect(reverse("users:register"))
if not re.match(r"^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$", email):
# 如果email格式不正确
return render(request, "register.html", {"errmsg": "邮箱格式不正确!"})
if allow != "on":
# 如果用户没有勾选协议
return render(request, "register.html", {"errmsg": "请同意协议!"})
# 进行业务逻辑处理
# 将密码加密
# 将用户数据保存到数据库中
# user = User()
# user.save()
# 使用django的认证系统创建用户
try:
user = User.objects.create_user(user_name, email, password)
except db.IntegrityError:
# 如果抛出此异常,表示用户已经注册
return render(request, "register.html", {"errmsg": "用户已注册!"})
# 将用户的激活状态设置为假
user.is_active = False
user.save()
# 生成激活token
token = user.generate_active_token()
# 使用celery发送邮件
send_active_email.delay(email, user_name, token)
# 返回给前端结果
return redirect(reverse("goods:index"))
class ActiveView(View):
"""激活"""
def get(self, request, token):
# 根据token 解析,获取用户的id
# 创建转换工具(序列化器)
s = Serializer(settings.SECRET_KEY, 3600)
# 解析
try:
ret = s.loads(token)
except SignatureExpired:
# 如果出现异常,表示token过期,返回信息给用户
return HttpResponse("激活链接已过期")
# 更新用户在数据库中的激活状态
user_id = ret.get("confirm")
# 查询数据库
try:
user = User.objects.get(id=user_id)
except User.DoesNotExist:
# 用户不存在
return HttpResponse("用户不存在")
user.is_active = True
user.save()
# 返回信息给用户
return redirect(reverse("users:login"))
class LoginView(View):
"""登录"""
def get(self, request):
"""提供登录页面"""
return render(request, "login.html")
def post(self, request):
"""处理登录请求"""
user_name = request.POST.get("username")
password = request.POST.get("pwd")
remembered = request.POST.get("remembered")
if not all([user_name, password]):
return redirect(reverse("users:login"))
#
# password = sha256(password)
# User.objects.get(username=user_name, password=password)
# 使用django的认证系统
user = authenticate(username=user_name, password=password)
if user is None:
# 表示用户的认证失败
return render(request, "login.html", {"errmsg": "用户名或密码错误"})
# 表示用户认证成功
# 判断用户的激活状态
if user.is_active is False:
# 表示用户未激活
return render(request, "login.html", {"errmsg": "用户名尚未激活"})
# 在session中保存用户的登录状态信息
login(request, user)
# 处理记住用户名的逻辑
if remembered != "on":
# 不需要记住用户状态
# 使用set_expiry设置 session 有效期
request.session.set_expiry(0)
else:
# 需要记住用户状态
request.session.set_expiry(None)
# 将cookie中的购物车数据与redis中的购物车数据合并
# 从cookie中获取购物车数据
cart_json = request.COOKIES.get("cart")
if cart_json is not None:
cart_cookie = json.loads(cart_json)
else:
cart_cookie = {}
# 从redis中获取购物车数据
redis_conn = get_redis_connection("default")
cart_redis = redis_conn.hgetall("cart_%s" % user.id)
# 进行合并
# cart_redis.update(cart_cookie)
for sku_id, count in cart_cookie.items():
# 在redis中的键与值都是bytes类型, 在cookie中的sku_id是str类型
sku_id = sku_id.encode() # 将str类型的sku_id转为bytes类型
if sku_id in cart_redis: # {b'1': b'3'}
# cookie中有的商品,在redis中也有,进行数量求和,再设置到redis对应的购物车中
origin_count = cart_redis[sku_id]
count += int(origin_count)
cart_redis[sku_id] = count
# 将合并的购物车数据保存到redis中
if cart_redis:
redis_conn.hmset("cart_%s" % user.id, cart_redis)
# 清除cookie中的购物车数据
# 登录成功,根据next参数跳转页面
next = request.GET.get("next")
if next is None:
# 如果没有next参数,跳转到主页
response = redirect(reverse("goods:index"))
else:
# 如果next存在,跳转到next路径
response = redirect(next)
response.delete_cookie("cart")
return response
class LogoutView(View):
"""退出"""
def get(self, request):
""""""
# 将用户的session数据删除 ,使用logout将用户的session数据删除
logout(request)
return redirect(reverse("goods:index"))
class AddressView(LoginRequiredMixin, View):
"""用户地址"""
def get(self, request):
# 获取登录的用户
user = request.user
# 获取地址信息
#
# Address.objects.filter(user=user).order_by("create_time")[0]
#
# user.address_set.order_by("create_time")[0]
#
try:
address = user.address_set.latest("create_time")
except Address.DoesNotExist:
# 如果地址信息不存在
address = None
context = {
# "user": user, # django的模板中可以直接使用user模板变量
"address": address
}
return render(request, "user_center_site.html", context)
def post(self, request):
"""修改地址信息"""
user = request.user
recv_name = request.POST.get("recv_name")
addr = request.POST.get("addr")
zip_code = request.POST.get("zip_code")
recv_mobile = request.POST.get("recv_mobile")
if all([recv_name, addr, zip_code, recv_mobile]):
# address = Address(
# user=user,
# receiver_name=recv_name,
# detail_addr=addr,
# zip_code=zip_code,
# receiver_mobile=recv_mobile
# )
# address.save()
Address.objects.create(
user=user,
receiver_name=recv_name,
detail_addr=addr,
zip_code=zip_code,
receiver_mobile=recv_mobile
)
return redirect(reverse("users:address"))
class UserInfoView(LoginRequiredMixin, View):
"""用户中心"""
def get(self, request):
user = request.user
# 获取地址信息
# Address.objects.all().order_by("-create_time")[0]
# Address.objects.filter().order_by("-create_time")
try:
address = user.address_set.latest("create_time")
except Address.DoesNotExist:
# 如果地址信息不存在
address = None
# 从django_redis中拿到一个与redis的连接对象
redis_conn = get_redis_connection("default")
# 从redis中查询用户的历史记录信息
sku_ids = redis_conn.lrange("history_%s" % user.id, 0, 4)
# sku_ids = [5,6,3,9,1]
# 从数据库中查询商品的信息
# select * from goods_sku where id in ()
# skus = GoodsSKU.objects.filter(id__in=sku_ids)
skus = []
for sku_id in sku_ids: # [5,6,3,9,1]
sku = GoodsSKU.objects.get(id=sku_id)
skus.append(sku)
# 形成模板所用的变量,渲染模板
context = {
"address": address,
"skus": skus # [5,6,3,9,1]
}
return render(request, "user_center_info.html", context)
| [
"wgf@zenmeasure.com"
] | wgf@zenmeasure.com |
e1aa506829b30e945319226cb301e841dd6bc86d | ebb524cef91d9e1e6979c4e12a66c8aeb22658a1 | /AIND-Planning/tests/test_my_planning_graph.py | 2050bcaaaa9712e8d9cc4c5455c4e1ef502020a7 | [
"MIT"
] | permissive | kalyancv/Udacity_AIND_Projects | 4cc370d0e1cd704d5158fc28fd825257eca25ce8 | 3e1bdd678cafc880bf083a395a04dcca4a46b22c | refs/heads/master | 2020-04-26T10:10:07.403392 | 2019-03-03T07:20:44 | 2019-03-03T07:20:44 | 173,479,272 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,405 | py | import os
import sys
parent = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(os.path.dirname(parent), "aimacode"))
import unittest
from aimacode.utils import expr
from aimacode.planning import Action
from example_have_cake import have_cake
from my_planning_graph import (
PlanningGraph , PgNode_a, PgNode_s, mutexify
)
class TestPlanningGraphLevels(unittest.TestCase):
def setUp(self):
self.p = have_cake()
self.pg = PlanningGraph(self.p, self.p.initial)
def test_add_action_level(self):
# for level, nodeset in enumerate(self.pg.a_levels):
# for node in nodeset:
# print("Level {}: {}{})".format(level, node.action.name, node.action.args))
self.assertEqual(len(self.pg.a_levels[0]), 3, len(self.pg.a_levels[0]))
self.assertEqual(len(self.pg.a_levels[1]), 6, len(self.pg.a_levels[1]))
def test_add_literal_level(self):
# for level, nodeset in enumerate(self.pg.s_levels):
# for node in nodeset:
# print("Level {}: {})".format(level, node.literal))
self.assertEqual(len(self.pg.s_levels[0]), 2, len(self.pg.s_levels[0]))
self.assertEqual(len(self.pg.s_levels[1]), 4, len(self.pg.s_levels[1]))
self.assertEqual(len(self.pg.s_levels[2]), 4, len(self.pg.s_levels[2]))
class TestPlanningGraphMutex(unittest.TestCase):
def setUp(self):
self.p = have_cake()
self.pg = PlanningGraph(self.p, self.p.initial)
# some independent nodes for testing mutex
self.na1 = PgNode_a(Action(expr('Go(here)'),
[[], []], [[expr('At(here)')], []]))
self.na2 = PgNode_a(Action(expr('Go(there)'),
[[], []], [[expr('At(there)')], []]))
self.na3 = PgNode_a(Action(expr('Noop(At(there))'),
[[expr('At(there)')], []], [[expr('At(there)')], []]))
self.na4 = PgNode_a(Action(expr('Noop(At(here))'),
[[expr('At(here)')], []], [[expr('At(here)')], []]))
self.na5 = PgNode_a(Action(expr('Reverse(At(here))'),
[[expr('At(here)')], []], [[], [expr('At(here)')]]))
self.ns1 = PgNode_s(expr('At(here)'), True)
self.ns2 = PgNode_s(expr('At(there)'), True)
self.ns3 = PgNode_s(expr('At(here)'), False)
self.ns4 = PgNode_s(expr('At(there)'), False)
self.na1.children.add(self.ns1)
self.ns1.parents.add(self.na1)
self.na2.children.add(self.ns2)
self.ns2.parents.add(self.na2)
self.na1.parents.add(self.ns3)
self.na2.parents.add(self.ns4)
def test_serialize_mutex(self):
self.assertTrue(PlanningGraph.serialize_actions(self.pg, self.na1, self.na2),
"Two persistence action nodes not marked as mutex")
self.assertFalse(PlanningGraph.serialize_actions(self.pg, self.na3, self.na4), "Two No-Ops were marked mutex")
self.assertFalse(PlanningGraph.serialize_actions(self.pg, self.na1, self.na3),
"No-op and persistence action incorrectly marked as mutex")
def test_inconsistent_effects_mutex(self):
self.assertTrue(PlanningGraph.inconsistent_effects_mutex(self.pg, self.na4, self.na5),
"Canceling effects not marked as mutex")
self.assertFalse(PlanningGraph.inconsistent_effects_mutex(self.pg, self.na1, self.na2),
"Non-Canceling effects incorrectly marked as mutex")
def test_interference_mutex(self):
self.assertTrue(PlanningGraph.interference_mutex(self.pg, self.na4, self.na5),
"Precondition from one node opposite of effect of other node should be mutex")
self.assertTrue(PlanningGraph.interference_mutex(self.pg, self.na5, self.na4),
"Precondition from one node opposite of effect of other node should be mutex")
self.assertFalse(PlanningGraph.interference_mutex(self.pg, self.na1, self.na2),
"Non-interfering incorrectly marked mutex")
def test_competing_needs_mutex(self):
self.assertFalse(PlanningGraph.competing_needs_mutex(self.pg, self.na1, self.na2),
"Non-competing action nodes incorrectly marked as mutex")
mutexify(self.ns3, self.ns4)
self.assertTrue(PlanningGraph.competing_needs_mutex(self.pg, self.na1, self.na2),
"Opposite preconditions from two action nodes not marked as mutex")
def test_negation_mutex(self):
self.assertTrue(PlanningGraph.negation_mutex(self.pg, self.ns1, self.ns3),
"Opposite literal nodes not found to be Negation mutex")
self.assertFalse(PlanningGraph.negation_mutex(self.pg, self.ns1, self.ns2),
"Same literal nodes found to be Negation mutex")
def test_inconsistent_support_mutex(self):
self.assertFalse(PlanningGraph.inconsistent_support_mutex(self.pg, self.ns1, self.ns2),
"Independent node paths should NOT be inconsistent-support mutex")
mutexify(self.na1, self.na2)
self.assertTrue(PlanningGraph.inconsistent_support_mutex(self.pg, self.ns1, self.ns2),
"Mutex parent actions should result in inconsistent-support mutex")
self.na6 = PgNode_a(Action(expr('Go(everywhere)'),
[[], []], [[expr('At(here)'), expr('At(there)')], []]))
self.na6.children.add(self.ns1)
self.ns1.parents.add(self.na6)
self.na6.children.add(self.ns2)
self.ns2.parents.add(self.na6)
self.na6.parents.add(self.ns3)
self.na6.parents.add(self.ns4)
mutexify(self.na1, self.na6)
mutexify(self.na2, self.na6)
self.assertFalse(PlanningGraph.inconsistent_support_mutex(
self.pg, self.ns1, self.ns2),
"If one parent action can achieve both states, should NOT be inconsistent-support mutex, even if parent actions are themselves mutex")
class TestPlanningGraphHeuristics(unittest.TestCase):
def setUp(self):
self.p = have_cake()
self.pg = PlanningGraph(self.p, self.p.initial)
def test_levelsum(self):
self.assertEqual(self.pg.h_levelsum(), 1)
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | noreply@github.com |
a5c8acc3f261fc484e471a9c6729ba0a2951f7ea | 6cc37dfc44880f57823bb9523ea5f8206d5e3f22 | /python_OOP/labs_and_homeworks/09_decorators_exercise/07_execution_time.py | 672ebddef602603926ee47bec252adbc7b08d114 | [] | no_license | dimitar-daskalov/SoftUni-Courses | 70d265936fd86712a7bfe0586ec6ebd1c7384f77 | 2054bc58ffb5f41ed86f5d7c98729b101c3b1368 | refs/heads/main | 2023-05-31T06:44:35.498399 | 2021-07-11T10:16:08 | 2021-07-11T10:16:08 | 322,896,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | import time
def exec_time(func):
def wrapper(*args):
start = time.time()
func(*args)
end = time.time()
time_spend = end - start
return time_spend
return wrapper
@exec_time
def loop(start, end):
total = 0
for x in range(start, end):
total += x
return total
print(loop(1, 10000000))
@exec_time
def concatenate(strings):
result = ""
for string in strings:
result += string
return result
print(concatenate(["a" for i in range(1000000)]))
| [
"dimitar.daskalov22@gmail.com"
] | dimitar.daskalov22@gmail.com |
cfe30dfb145e5c7610d9b424ad9cb71f37e95724 | 09e63e204cf3f70b0f878fe237f231af0786611e | /LifeQA/LSTM_QA.py | 2e7249b817077a5418b4be8df812dcb9c1c1f866 | [] | no_license | shubham14/Machine_learning_research | 8f00788366abf2d330afe8914e48d4279fcd8aea | b134e4e6b1e6c110fad8cb38b033c92c34d3c8ce | refs/heads/master | 2022-11-08T13:24:58.722027 | 2019-11-10T09:21:28 | 2019-11-10T09:21:28 | 132,386,307 | 3 | 2 | null | 2022-10-17T15:36:25 | 2018-05-07T00:16:38 | Python | UTF-8 | Python | false | false | 3,186 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 16 16:55:06 2018
@author: Shubham
"""
import numpy as np
from keras import backend as K
from keras.layers import Embedding
from keras.layers import LSTM, Input, merge, Lambda
from keras.layers.wrappers import Bidirectional
from keras.layers.convolutional import Convolution1D
from keras.models import Model
class Model:
def __init__(self, margin, enc_timesteps, dec_timesteps,
margin, hidden_dim, embedding_file, vocab_size):
self.margin = margin
self.enc_timesteps = enc_timesteps
self.dec_timesteps = dec_timesteps
self.hidden_dim = hidden_dim
self.embedding_file = embedding_file
self.vocab_size = vocab_size
def cosine_similarity(self):
dot = lambda a, b: K.batch_dot(a, b, axes=1)
return lambda x: dot(x[0], x[1]) / K.maximum(K.sqrt(dot(x[0], x[0]) * dot(x[1], x[1])), K.epsilon())
def build_model(self):
# initialize the question and answer shapes and datatype
question = Input(shape=(self.enc_timesteps,), dtype='int32', name='question_base')
answer = Input(shape=(self.dec_timesteps,), dtype='int32', name='answer')
answer_good = Input(shape=(self.dec_timesteps,), dtype='int32', name='answer_good_base')
answer_bad = Input(shape=(self.dec_timesteps,), dtype='int32', name='answer_bad_base')
weights = np.load(self.embedding_file)
qa_embedding = Embedding(input_dim=self.vocab_size,
output_dim=weights.shape[1],mask_zero=True,weights=[weights])
bi_lstm = Bidirectional(LSTM(activation='tanh', dropout=0.2, units=self.hidden_dim,
return_sequences=False))
# embed the question and pass it through bilstm
question_embedding = qa_embedding(question)
question_enc_1 = bi_lstm(question_embedding)
# embed the answer and pass it through bilstm
answer_embedding = qa_embedding(answer)
answer_enc_1 = bi_lstm(answer_embedding)
# get the cosine similarity
similarity = self.get_cosine_similarity()
question_answer_merged = merge(inputs=[question_enc_1, answer_enc_1], mode=similarity, output_shape=lambda _: (None, 1))
lstm_model = Model(name="bi_lstm", inputs=[question, answer], outputs=question_answer_merged)
good_similarity = lstm_model([question, answer_good])
bad_similarity = lstm_model([question, answer_bad])
loss = merge(
[good_similarity, bad_similarity],
mode=lambda x: K.relu(margin - x[0] + x[1]),
output_shape=lambda x: x[0])
training_model = Model(inputs=[question, answer_good, answer_bad], outputs=loss, name='training_model')
training_model.compile(loss=lambda y_true, y_pred: y_pred, optimizer="rmsprop")
prediction_model = Model(inputs=[question, answer_good], outputs=good_similarity, name='prediction_model')
prediction_model.compile(loss=lambda y_true, y_pred: y_pred, optimizer="rmsprop")
return training_model, prediction_model
| [
"shubham.ddash@gmail.com"
] | shubham.ddash@gmail.com |
ef6d646a37c629648bf8804c3e3265b1c7146eeb | 9d403bf658c47beff876eeae99d10110f28b924a | /check_ec2_spot_price | 3a1f84ab7e7ecc282739a52cdfe471329507d651 | [] | no_license | kozdincer/nagios-ec2-spot-price-plugin | 8de01d871da89ff01f4a58977ec4f18c646b6bcd | 640190ba5d24439ac36b3df26a76d4db00ec85e2 | refs/heads/master | 2021-01-16T20:43:51.887044 | 2012-07-07T21:27:45 | 2012-07-07T21:27:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,955 | #!/usr/bin/env python
#
# Copyright Gareth Bowles 2010
#
# Based on the check_svn plugin written by Hari Sekhon
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""Nagios plugin to check the current EC2 instance spot price. Requires
the Amazon EC2 command line tools to be installed somewhere in the path"""
# Standard Nagios return codes
OK = 0
WARNING = 1
CRITICAL = 2
UNKNOWN = 3
import datetime
import os
import re
import sys
import signal
import time
try:
from subprocess import Popen, PIPE, STDOUT
except ImportError:
print "UNKNOWN: Failed to import python subprocess module.",
print "Perhaps you are using a version of python older than 2.4?"
sys.exit(CRITICAL)
from optparse import OptionParser
__author__ = "Gareth Bowles"
__title__ = "Nagios Plugin for Amazon EC2 Spot Price"
__version__ = 0.1
DEFAULT_TIMEOUT = 20
def end(status, message):
"""Prints a message and exits. First arg is the status code
Second Arg is the string message"""
check_name = "EC2-SPOT-PRICE "
if status == OK:
print "%sOK: %s" % (check_name, message)
sys.exit(OK)
elif status == WARNING:
print "%sWARNING: %s" % (check_name, message)
sys.exit(WARNING)
elif status == CRITICAL:
print "%sCRITICAL: %s" % (check_name, message)
sys.exit(CRITICAL)
else:
# This one is intentionally different
print "UNKNOWN: %s" % message
sys.exit(UNKNOWN)
class SpotPriceTester:
"""Holds state for the EC2 spot price check"""
def __init__(self):
"""Initializes all variables to their default states"""
self.warning = ""
self.critical = ""
self.instance_type = ""
self.os_platform = "Linux/UNIX"
self.timeout = DEFAULT_TIMEOUT
self.verbosity = 0
self.BIN = ""
try:
self.ec2_home = os.environ["EC2_HOME"]
except KeyError:
self.vprint(3, "Environment variable EC2_HOME not set, using value passed on command line")
try:
self.ec2_cert = os.environ["EC2_CERT"]
except KeyError:
self.vprint(3, "Environment variable EC2_CERT not set, using value passed on command line")
try:
self.ec2_private_key = os.environ["EC2_PRIVATE_KEY"]
except KeyError:
self.vprint(3, "Environment variable EC2_PRIVATE_KEY not set, using value passed on command line")
try:
self.java_home = os.environ["JAVA_HOME"]
except KeyError:
self.vprint(3, "Environment variable JAVA_HOME not set, using value passed on command line")
def validate_variables(self):
"""Runs through the validation of all test variables
Should be called before the main test to perform a sanity check
on the environment and settings"""
self.validate_warning()
self.validate_critical()
self.validate_instance_type()
self.validate_os_platform()
self.validate_timeout()
self.verify_check_command()
def validate_warning(self):
"""Exits with an error if the warning price level
does not conform to expected format"""
if self.warning == None:
end(UNKNOWN, "You must supply a warning value " \
+ "See --help for details")
self.warning = self.warning.strip()
# Input Validation -
re_warning = re.compile("^\d\.\d+$")
if not re_warning.match(self.warning):
end(UNKNOWN, "Warning value given does not appear to be a valid " \
+ "number - use a dollar value e.g. 0.03 for 3 cents")
def validate_critical(self):
"""Exits with an error if the critical price level
does not conform to expected format"""
if self.critical == None:
end(UNKNOWN, "You must supply a critical value " \
+ "See --help for details")
self.critical = self.critical.strip()
# Input Validation -
re_critical = re.compile("^\d\.\d+$")
if not re_critical.match(self.critical):
end(UNKNOWN, "Critical value given does not appear to be a valid " \
+ "number - use a dollar value e.g. 0.03 for 3 cents")
def validate_instance_type(self):
"""Validates the EC2 instance type"""
if self.instance_type == None:
end(UNKNOWN, "You must supply a valid EC2 instance type " \
+ "See --help for details")
self.instance_type = self.instance_type.strip()
re_instance_type = re.compile("^[cmt][12]\.\w+$")
if not re_instance_type.match(self.instance_type):
end(UNKNOWN, "You must supply a valid EC2 instance type " \
+ "See --help for details")
def validate_os_platform(self):
"""Exits with an error if the O/S platform is not valid"""
self.os_platform = self.os_platform.strip()
if self.os_platform == None:
self.os_platform = "Linux/UNIX"
if self.os_platform != "Linux/UNIX":
if self.os_platform != "Windows":
end(UNKNOWN, "O/S platform must be \"Linux/UNIX\" or \"Windows\"")
def validate_timeout(self):
"""Exits with an error if the timeout is not valid"""
if self.timeout == None:
self.timeout = DEFAULT_TIMEOUT
try:
self.timeout = int(self.timeout)
if not 1 <= self.timeout <= 65535:
end(UNKNOWN, "timeout must be between 1 and 3600 seconds")
except ValueError:
end(UNKNOWN, "timeout number must be a whole number between " \
+ "1 and 3600 seconds")
if self.verbosity == None:
self.verbosity = 0
def verify_check_command(self):
""" Ensures the ec2-describe-spot-price-history command exists and is executable """
check_command = self.ec2_home + "/bin/ec2-describe-spot-price-history"
self.vprint(3, "verify_check_command: Check command is " + check_command)
self.BIN = self.check_executable(check_command)
if not self.BIN:
end(UNKNOWN, "The EC2 command 'ec2-describe-spot-price-history' cannot be found in your path. Please check that " \
+ " you have the Amazon EC2 command line tools installed and that the EC2 environment variables are set " \
+ "correctly (see http://docs.amazonwebservices.com/AWSEC2/latest/CommandLineReference/) ")
# Pythonic version of "which"
#
def check_executable(self, file):
"""Takes an executable path as a string and tests if it is executable.
Returns the full path of the executable if it is executable, or None if not"""
self.vprint(3, "check_executable: Check command is " + file)
if os.path.isfile(file):
self.vprint(3, "check_executable: " + file + " is a file")
if os.access(file, os.X_OK):
self.vprint(3, "check_executable: " + file + " is executable")
return file
else:
#print >> sys.stderr, "Warning: '%s' in path is not executable"
self.vprint(3, "check_executable: " + file + "is not executable" )
end(UNKNOWN, "EC2 utility '%s' is not executable" % file)
self.vprint(3, "check_executable: Check command " + file+ " not found ")
return None
def run(self, cmd):
"""runs a system command and returns a tuple containing
the return code and the output as a single text block"""
if cmd == "" or cmd == None:
end(UNKNOWN, "Internal python error - " \
+ "no cmd supplied for run function")
self.vprint(3, "running command: %s" % cmd)
local_env = os.environ.copy()
local_env["EC2_HOME"] = self.ec2_home
local_env["JAVA_HOME"] = self.java_home
try:
process = Popen( cmd.split(),
bufsize=0,
shell=False,
stdin=PIPE,
stdout=PIPE,
stderr=STDOUT,
env=local_env )
except OSError, error:
error = str(error)
if error == "No such file or directory":
end(UNKNOWN, "Cannot find utility '%s'" % cmd.split()[0])
else:
end(UNKNOWN, "Error trying to run utility '%s' - %s" \
% (cmd.split()[0], error))
stdout, stderr = process.communicate()
if stderr == None:
pass
if stdout == None or stdout == "":
end(UNKNOWN, "No output from utility '%s'" % cmd.split()[0])
returncode = process.returncode
self.vprint(3, "Returncode: '%s'\nOutput: '%s'" % (returncode, stdout))
return (returncode, str(stdout))
def set_timeout(self):
"""Sets an alarm to time out the test"""
if self.timeout == 1:
self.vprint(2, "setting plugin timeout to 1 second")
else:
self.vprint(2, "setting plugin timeout to %s seconds"\
% self.timeout)
signal.signal(signal.SIGALRM, self.sighandler)
signal.alarm(self.timeout)
def sighandler(self, discarded, discarded2):
"""Function to be called by signal.alarm to kill the plugin"""
# Nop for these variables
discarded = discarded2
discarded2 = discarded
if self.timeout == 1:
timeout = "(1 second)"
else:
timeout = "(%s seconds)" % self.timeout
end(CRITICAL, "svn plugin has self terminated after exceeding " \
+ "the timeout %s" % timeout)
def test_spot_price(self):
"""Performs the EC2 spot price check"""
self.validate_variables()
self.set_timeout()
dtstring = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
cmd = self.BIN + " -K " + self.ec2_private_key + " -C " + self.ec2_cert + " -t " + self.instance_type \
+ " -d " + self.os_platform + " -s " + dtstring
self.vprint(2, "now running EC2 spot price check:\n" + cmd)
result, output = self.run(cmd)
if result == 0:
if len(output) == 0:
return (WARNING, "Check passed but no output was received " \
+ "from ec2-describe-spot-price-history command, abnormal condition, " \
+ "please check.")
else:
stripped_output = output.replace("\n", " ").rstrip(" ")
price = stripped_output.split()[1]
status = OK
message = "Current " + self.instance_type + " " + self.os_platform + " spot price is " + price
if float(price) > float(self.critical):
status = CRITICAL
else:
if float(price) > float(self.warning):
status = WARNING
if self.verbosity >= 1:
return(status, "Got data from ec2-describe-spot-price-history: " + stripped_output)
else:
return (status, message)
else:
if len(output) == 0:
return (CRITICAL, "Command failed. " \
+ "There was no output from ec2-describe-spot-price-history")
def vprint(self, threshold, message):
"""Prints a message if the first arg is numerically greater than or
equal to the verbosity level"""
if self.verbosity >= threshold:
print "%s" % message
def main():
"""Parses args and calls func to check EC2 spot price"""
tester = SpotPriceTester()
parser = OptionParser()
parser.add_option( "-w",
"--warning",
dest="warning",
help="Set status to WARNING if the current EC2 spot price in U.S. dollars is above this value.")
parser.add_option( "-c",
"--critical",
dest="critical",
help="Set status to CRITICAL if the current EC2 spot price in U.S. dollars is above this value.")
parser.add_option( "-i",
"--instance-type",
dest="instance_type",
help="EC2 instance type API name (see http://aws.amazon.com/ec2/instance-types/)")
parser.add_option( "-o",
"--os-platform",
dest="os_platform",
help="O/S platform (allowable values are \"Windows\" or \"Linux/UNIX\": default \"Linux/UNIX\")")
parser.add_option( "-e",
"--ec2-home",
dest="ec2_home",
help="Path to EC2 command line tools (defaults to environment variable $EC2_HOME)")
parser.add_option( "-C",
"--ec2-cert",
dest="ec2_cert",
help="Path to EC2 certificate file (defaults to environment variable $EC2_CERT)")
parser.add_option( "-K",
"--ec2-private-key",
dest="ec2_private_key",
help="Path to EC2 private key file (defaults to environment variable $EC2_PRIVATE_KEY)")
parser.add_option( "-j",
"--java-home",
dest="java_home",
help="Path to Java installation (defaults to environment variable $JAVA_HOME)")
parser.add_option( "-t",
"--timeout",
dest="timeout",
help="Sets a timeout after which the the plugin will" \
+ " self terminate. Defaults to %s seconds." \
% DEFAULT_TIMEOUT)
parser.add_option( "-T",
"--timing",
action="store_true",
dest="timing",
help="Enable timer output")
parser.add_option( "-v",
"--verbose",
action="count",
dest="verbosity",
help="Verbose mode. Good for testing plugin. By " \
+ "default only one result line is printed as per" \
+ " Nagios standards")
parser.add_option( "-V",
"--version",
action = "store_true",
dest = "version",
help = "Print version number and exit" )
(options, args) = parser.parse_args()
if args:
parser.print_help()
sys.exit(UNKNOWN)
if options.version:
print "%s %s" % (__title__, __version__)
sys.exit(UNKNOWN)
tester.warning = options.warning
tester.critical = options.critical
tester.instance_type = options.instance_type
tester.os_platform = options.os_platform
tester.verbosity = options.verbosity
tester.ec2_home = options.ec2_home
tester.ec2_cert = options.ec2_cert
tester.ec2_private_key = options.ec2_private_key
tester.java_home = options.java_home
tester.timeout = options.timeout
if options.timing:
start_time = time.time()
returncode, output = tester.test_spot_price()
if options.timing:
finish_time = time.time()
total_time = finish_time - start_time
output += ". Test completed in %.3f seconds" % total_time
end(returncode, output)
sys.exit(UNKNOWN)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print "Caught Control-C..."
sys.exit(CRITICAL)
| [
"gareth@optimalops.net"
] | gareth@optimalops.net | |
856a7efd13311564c72105c9170b5e1b7289b241 | 4fb8f756e9a8aa8cb082192887f2adac6d02aaf3 | /RGAT-GloVe/test.py | 7ed9ef17426177bd4f809ee75dbf51f474682c26 | [] | no_license | wuharlem/AHRGAT | a2bc8153b2a63a625a5eaa65ed27e926df78ac07 | 8f9e3aa85759754b0e7af6c1b22e488e4338ab4e | refs/heads/master | 2023-06-30T11:07:12.072698 | 2021-08-04T01:30:16 | 2021-08-04T01:30:16 | 368,099,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,891 | py | # encoding=utf-8
import sys
sys.path.append('../')
import torch
import random
import argparse
import numpy as np
from vocab import Vocab
from utils import helper
from sklearn import metrics
from loader import ABSADataLoader
from trainer import ABSATrainer
from load_w2v import load_pretrained_embedding
import wandb
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
import pandas as pd
import pickle
from networkx.drawing.nx_pydot import graphviz_layout
import networkx as nx
import matplotlib.pyplot as plt
import pydot
import tqdm
import matplotlib as mpl
import netgraph
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="dataset/Restaurants")
parser.add_argument("--vocab_dir", type=str, default="dataset/Restaurants")
parser.add_argument("--glove_dir", type=str, default="dataset/glove")
parser.add_argument("--emb_dim", type=int, default=300, help="Word embedding dimension.")
parser.add_argument("--post_dim", type=int, default=30, help="Position embedding dimension.")
parser.add_argument("--pos_dim", type=int, default=30, help="Pos embedding dimension.")
parser.add_argument("--dep_dim", type=int, default=30, help="dep embedding dimension.")
parser.add_argument("--hidden_dim", type=int, default=50, help="hidden dim.")
parser.add_argument("--num_layers", type=int, default=2, help="Num of RGAT layers.")
parser.add_argument("--num_class", type=int, default=3, help="Num of sentiment class.")
parser.add_argument("--cross_val_fold", type=int, default=10, help="Num of cross valid class.")
parser.add_argument("--input_dropout", type=float, default=0.7, help="Input dropout rate.")
parser.add_argument("--layer_dropout", type=float, default=0, help="RGAT layer dropout rate.")
parser.add_argument(
"--att_dropout", type=float, default=0, help="self-attention layer dropout rate."
)
parser.add_argument("--attn_heads", type=int, default=5, help="Num of GAT/RGAT attention heads.")
parser.add_argument("--alpha", type=float, default=1.0, help="Weight of structure attention.")
parser.add_argument("--beta", type=float, default=1.0, help="Weight of structure values.")
parser.add_argument("--lower", default=True, help="Lowercase all words.")
parser.add_argument("--direct", default=False)
parser.add_argument("--loop", default=True)
parser.add_argument("--bidirect", default=True, help="Do use bi-RNN layer.")
parser.add_argument("--rnn_hidden", type=int, default=50, help="RNN hidden state size.")
parser.add_argument("--rnn_layers", type=int, default=1, help="Number of RNN layers.")
parser.add_argument("--rnn_dropout", type=float, default=0.1, help="RNN dropout rate.")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate.")
parser.add_argument(
"--optim",
choices=["sgd", "adagrad", "adam", "adamax"],
default="adamax",
help="Optimizer: sgd, adagrad, adam or adamax.",
)
parser.add_argument("--num_epoch", type=int, default=100, help="Number of total training epochs.")
parser.add_argument("--batch_size", type=int, default=32, help="Training batch size.")
parser.add_argument("--log_step", type=int, default=20, help="Print log every k steps.")
parser.add_argument("--log", type=str, default="logs.txt", help="Write training log to file.")
parser.add_argument("--save_dir", type=str, default="./saved_models", help="Root dir for saving models.")
parser.add_argument("--model", type=str, default="std", help="model to use, (std, GAT, RGAT)")
parser.add_argument(
"--pooling", type=str, default="avg", help="pooling method to use, (avg, max, attn)"
)
parser.add_argument(
"--output_merge", type=str, default="gate", help="merge method to use, (addnorm, add, attn)"
)
parser.add_argument("--shuffle", default=False, action="store_true")
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--tune", default=False, action="store_true")
parser.add_argument("--wandb", default=False)
parser.add_argument("--modify", type=int, default=0)
parser.add_argument("--aspect", type=int, default=0)
parser.add_argument("--tfidf", type=int, default=0)
parser.add_argument("--name", type=str)
args = parser.parse_args()
# load data
def get_dataloaders(args, vocab):
train_batch = ABSADataLoader(
args.data_dir + "/train_v1.json", args.batch_size, args, vocab, shuffle=args.shuffle
)
valid_batch = ABSADataLoader(
args.data_dir + "/valid_v1.json", args.batch_size, args, vocab, shuffle=False
)
test_batch = ABSADataLoader(
args.data_dir + "/test_v1.json", args.batch_size, args, vocab, shuffle=False
)
return train_batch, valid_batch, test_batch
def accuracy_score_by_class(predict, target):
train_matrix = confusion_matrix(target, predict)
train_accs = train_matrix.diagonal()/train_matrix.sum(axis=1)
return train_accs
def evaluate(model, data_loader):
predictions, labels = [], []
val_loss, val_acc, val_step = 0.0, 0.0, 0
val_neg_loss, val_neu_loss, val_pos_loss = 0.0, 0.0, 0
for i, batch in enumerate(data_loader):
loss, loss_by_class, acc, pred, label, _, _ = model.predict(batch)
val_neg_loss += loss_by_class[0]
val_neu_loss += loss_by_class[1]
val_pos_loss += loss_by_class[2]
val_loss += loss
val_acc += acc
predictions += pred
labels += label
val_step += 1
# f1 score
f1_score = metrics.f1_score(labels, predictions, average="macro")
# accuracy
test_acc_by_class = accuracy_score_by_class(predictions, labels)
return val_loss / val_step, val_neg_loss / val_step, val_neu_loss / val_step, val_pos_loss / val_step, \
val_acc / val_step, f1_score, test_acc_by_class,
def _totally_parameters(model): #
n_params = sum([p.nelement() for p in model.parameters()])
return n_params
# load vocab
print("Loading vocab...")
token_vocab = Vocab.load_vocab(args.vocab_dir + "/vocab_tok.vocab") # token
post_vocab = Vocab.load_vocab(args.vocab_dir + "/vocab_post.vocab") # position
pos_vocab = Vocab.load_vocab(args.vocab_dir + "/vocab_pos.vocab") # POS
dep_vocab = Vocab.load_vocab(args.vocab_dir + "/vocab_dep.vocab") # deprel
pol_vocab = Vocab.load_vocab(args.vocab_dir + "/vocab_pol.vocab") # polarity
vocab = (token_vocab, post_vocab, pos_vocab, dep_vocab, pol_vocab)
print(
"token_vocab: {}, post_vocab: {}, pos_vocab: {}, dep_vocab: {}, pol_vocab: {}".format(
len(token_vocab), len(post_vocab), len(pos_vocab), len(dep_vocab), len(pol_vocab)
)
)
args.tok_size = len(token_vocab)
args.post_size = len(post_vocab)
args.pos_size = len(pos_vocab)
args.dep_size = len(dep_vocab)
# print(dep_vocab.itos)
# load pretrained word emb
print("Loading pretrained word emb...")
word_emb = load_pretrained_embedding(glove_dir=args.glove_dir, word_list=token_vocab.itos)
assert len(word_emb) == len(token_vocab)
assert len(word_emb[0]) == args.emb_dim
word_emb = torch.FloatTensor(word_emb) # convert to tensor
def test(path):
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
torch.cuda.manual_seed(args.seed)
train_batch, valid_batch, test_batch = get_dataloaders(args, vocab)
trainer = ABSATrainer(args, emb_matrix=word_emb)
best_path = "/home/wuharlem/projects/paper/RGAT-ABSA/RGAT-GloVe/saved_models/Restaurants/train"
trainer = torch.load(best_path + path)
val_loss, val_neg_loss, val_neu_loss, val_pos_loss, val_acc, val_f1, val_acc_by_class = evaluate(trainer, test_batch)
print("Evaluation Results: test_loss:{}, test_acc:{}, test_f1:{}".format(val_loss, val_acc, val_f1))
if __name__ == "__main__":
# torch.nn.Module.dump_patches = True
# for i in range(16, 31):
# test(f'/modify/v4/best_checkpoint_modify_aspect_{i}.pt')
# test(f'/modify/v4/best_checkpoint_modify_{i}.pt')
test(f'/modify/best_checkpoint_modify_0.pt') | [
"harlemwu0930@gmail.com"
] | harlemwu0930@gmail.com |
f0a2ebbd8db8f2b4791838bb672c3f3becad2b37 | ff71e9daa14f0733b1f87b23bcd756e72ca4fb2f | /examples/login_by_css_tag_id.py | 5b974f5b2b3f653f68e10c65682523737b5af5bc | [] | no_license | aikram96/selenium_basic | 541c4ad14ef77815d35da271aa18b4951f808ca7 | 12f185aac195da10a053825885a1c8272270cd4b | refs/heads/master | 2023-04-06T14:33:36.584203 | 2021-02-13T22:41:33 | 2021-02-13T22:41:33 | 332,104,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | from selenium import webdriver
import time
driver = webdriver.Chrome(executable_path="../drivers/chromedriver.exe")
driver.maximize_window()
driver.get('https://opensource-demo.orangehrmlive.com/')
time.sleep(1)
username = driver.find_element_by_css_selector('input#txtUsername')
password = driver.find_element_by_css_selector('input#txtPassword')
# Put values
username.send_keys('Admin')
password.send_keys('admin123')
time.sleep(1)
login_btn = driver.find_element_by_css_selector('input#btnLogin')
# Click on screen
login_btn.click()
time.sleep(5)
driver.close() | [
"marcia.gutierrez.castillo@gmail.com"
] | marcia.gutierrez.castillo@gmail.com |
7400ad3efefe2756cac43ed0443e0292298504eb | af1938a4185d7c512c1856a070e3c81104876cae | /Check-Box².py | 502f4e8366555f2f83736a37baca5e016158fa49 | [] | no_license | marcofigueiredo224/Selenium-Python | 1cd186ab3e9e7d02470680d9e01866fa2f03d9f0 | dcbf68490640faf571a7cf7cbe85a0c1a424a015 | refs/heads/main | 2023-06-16T23:42:53.277537 | 2021-07-05T18:09:15 | 2021-07-05T18:09:15 | 380,259,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from random import choice
lista_de_selector = ['tree-node > ol > li > ol > li:nth-child(1) > span > label > span.rct-title',
'#tree-node > ol > li > ol > li:nth-child(2) > span > label > span.rct-title',
'#tree-node > ol > li > ol > li:nth-child(3) > span > label > span.rct-title']
## Navegador
navegador = webdriver.Chrome()
navegador.get('https://demoqa.com/checkbox')
# ELEMENTOS
btnView = '#tree-node > ol > li > span > button' #SELECTOR
# FIND ELEMENTS
elementoSelecionado = choice(lista_de_selector)
btnViewElement = navegador.find_element_by_css_selector(btnView).click()
btnSelecionado = navegador.find_element_by_css_selector(elementoSelecionado).click()
| [
"noreply@github.com"
] | noreply@github.com |
488718466f0f0e87ffa34be480e9e92c0c8df57a | 9a701c23ef6e70dc3704f012ffbb1e2689f7a8cb | /Lib/zDogPy/box.py | 5006b0c8963abce56336c69e361803f02212a395 | [
"MIT"
] | permissive | gferreira/zdogpy | a832db713524d1343b85de1c8215511f438a2e41 | 41304e5db7cc2e145d43b6b2f7d77d25ec3c8b08 | refs/heads/master | 2020-05-30T07:50:24.621323 | 2019-09-11T09:30:59 | 2019-09-11T09:30:59 | 189,606,401 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,196 | py | '''Box composite shape'''
from importlib import reload
import zDogPy.anchor
reload(zDogPy.anchor)
import zDogPy.shape
reload(zDogPy.shape)
import zDogPy.rect
reload(zDogPy.rect)
from zDogPy.boilerplate import hexToRGB, TAU
from zDogPy.anchor import Anchor
from zDogPy.shape import Shape
from zDogPy.rect import Rect
# -------
# BoxRect
# -------
class BoxRect(Rect):
def copyGraph(self):
pass
# ---
# Box
# ---
class Box(Anchor):
frontFace = None
rearFace = None
leftFace = None
rightFace = None
topFace = None
bottomFace = None
def __init__(self, width=1, height=1, depth=1, stroke=1, fill=True, color=True, frontFace=True, rearFace=True, leftFace=True, rightFace=True, topFace=True, bottomFace=True, **kwargs):
self.width = width
self.height = height
self.depth = depth
self.stroke = stroke
self.fill = fill
self.color = color
self.frontFace = frontFace
self.rearFace = rearFace
self.leftFace = leftFace
self.rightFace = rightFace
self.topFace = topFace
self.bottomFace = bottomFace
Anchor.__init__(self, **kwargs)
self.updatePath()
def updatePath(self):
self.setFace('frontFace', {
'width' : self.width,
'height' : self.height,
'translate' : { 'z': self.depth / 2 },
})
self.setFace('rearFace', {
'width' : self.width,
'height' : self.height,
'translate' : { 'z': -self.depth / 2 },
})
self.setFace('leftFace', {
'width' : self.depth,
'height' : self.height,
'translate' : { 'x': -self.width / 2 },
'rotate' : { 'y': -TAU / 4 },
})
self.setFace('rightFace', {
'width' : self.depth,
'height' : self.height,
'translate' : { 'x': self.width / 2 },
'rotate' : { 'y': TAU / 4 },
})
self.setFace('topFace', {
'width' : self.width,
'height' : self.depth,
'translate' : { 'y': -self.height / 2 },
'rotate' : { 'x': -TAU / 4 },
})
self.setFace('bottomFace', {
'width' : self.width,
'height' : self.depth,
'translate' : { 'y': self.height / 2 },
'rotate' : { 'x': -TAU / 4 },
})
def setFace(self, faceName, options):
attr = getattr(self, faceName)
rectProperty = faceName + 'Rect'
# remove if False (??)
if not attr:
# self.removeChild(rectProperty)
return
if isinstance(attr, tuple):
color = attr
elif type(attr) is str:
color = hexToRGB(attr)
else:
color = self.color
rect = BoxRect(**options)
rect.stroke = self.stroke
rect.fill = self.fill
rect.color = color
# rect.backface = self.backface
# rect.front = self.front
# rect.visible = self.visible
rect.updatePath()
self.addChild(rect)
| [
"gustavo@hipertipo.com"
] | gustavo@hipertipo.com |
1d96ee2f279714a545379c36f976344ed44bc81b | c3509799f2623e80eab30535ed97d40e93aa963b | /PYTHON/Programmers/연습문제/서울에서김서방찾기.py | a19433994678a0768231549f9252fdf898da8e54 | [] | no_license | jjmin321/Algorithm | a0d99099e118b8d76df45836ed791c0eb6e202ec | 619d6ce1f897d35c5c80b222494bb0137cb60171 | refs/heads/master | 2023-05-05T12:58:57.456622 | 2021-06-04T00:08:00 | 2021-06-04T00:08:00 | 277,247,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | '''
https://programmers.co.kr/learn/courses/30/lessons/12919
풀이 : 파이썬 리스트 내장함수를 통해 Kim의 인덱스 위치를 찾고 그에 맞게 반환하였습니다.
'''
def solution(seoul):
return '김서방은 {}에 있다'.format(seoul.index('Kim'))
seoul = ["Jane", "Kim"]
print(solution(seoul)) | [
"jjmin321@naver.com"
] | jjmin321@naver.com |
f61072493bf553dd506507e12612a22d103fa400 | 8326a2b9a0b3ba2c47ad4a48f278d481b2b8b6d0 | /openapi_client/test/test_paginated_moderation_list.py | d4d1627d44d0bd80dfe4f28a0fa3523d3694ff78 | [
"MIT"
] | permissive | osuka/dognews-scraper | 1101f92f00569388e81d7d4b8d2c72b7d5a7d59e | 12373064061157083a48ced8e2cabf9d1ace30a5 | refs/heads/master | 2023-07-09T18:45:05.718450 | 2021-08-15T21:50:28 | 2021-08-15T21:50:28 | 209,788,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | """
Dognews Server API
Dognews Server client API # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.moderation import Moderation
globals()['Moderation'] = Moderation
from openapi_client.model.paginated_moderation_list import PaginatedModerationList
class TestPaginatedModerationList(unittest.TestCase):
"""PaginatedModerationList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPaginatedModerationList(self):
"""Test PaginatedModerationList"""
# FIXME: construct object with mandatory attributes with example values
# model = PaginatedModerationList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"486736+osuka@users.noreply.github.com"
] | 486736+osuka@users.noreply.github.com |
801368db205f7329c25d94329502d145a0be6f36 | 95fb24ce069b957dde2ab455fe36330dde05c184 | /Online learning ML.py | eec41d7a333b014fd6dd2442bd47a0bd23f4b5be | [] | no_license | RitikDutta/Onlineline-Learning-basic--ML | c0f2d3f18b9d1ec555c5949ae23cfeaf316f2a94 | 015266b20b8abbec33e90763a38c401271821401 | refs/heads/master | 2020-03-21T02:51:15.786447 | 2018-06-20T11:37:08 | 2018-06-20T11:37:08 | 138,024,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
class OnlineLearn():
from sklearn.linear_model import LinearRegression
regg = LinearRegression()
def onlineLearn(self):
num_array = list()
num = input("Enter how many elements you want:")
print ('Enter numbers in array: ')
for i in range(int(num)):
n = input("num :")
num_array.append(int(n))
y = np.array(num_array).reshape(-1,1)
X = np.array(np.arange(1,len(y)+1)).reshape(-1, 1)
self.regg.fit(X, y)
nextOutput = self.regg.predict(len(y)+1)
print("Your next value will be", round(nextOutput[0,0]))
#plt.bar(plt.bar([1,2,3,4,5,6,7,8,9],num_array))
plt.plot(X, y, color = 'red')
plt.scatter(len(y)+1, round(nextOutput[0,0]))
onn = OnlineLearn() | [
"noreply@github.com"
] | noreply@github.com |
4b77e30eb184d924f372874506bbda39b571225f | ca8be92d186022018ce5157a56f365602eb70158 | /src/201408/click_0003.py | fd98326f44771e3e9994c36ac33c7e3d325a88cf | [] | no_license | songgz/toys | 05a8214cd86b747bb14b0870c1a12be4794299dd | f808e7c4ed1a76db4800c8e1ee6d163242df52cc | refs/heads/master | 2020-06-19T15:36:11.175281 | 2014-09-28T13:54:41 | 2014-09-28T13:54:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | '''
Created on Jul 29, 2014
@author: fcmeng
'''
import click
@click.group()
def cli():
pass
@click.command()
def hello1():
print 'hello 1'
@click.command()
@click.argument('arg1')
def hello2(arg1):
print 'hello 2'
print arg1
cli.add_command(hello1)
cli.add_command(hello2)
if __name__ == '__main__':
cli()
| [
"fanchao.meng@rea-group.com"
] | fanchao.meng@rea-group.com |
7656d7d6387213b075923c6d3acd0f845b561202 | bf2f215b3686eb0e02bb83676d1f56eca1b319dd | /utils/jietu.py | 5b6527ea0ce80a7a07223766ed085a1bd3e559d6 | [] | no_license | liangshu-code/selenium_jicheng | d8b5377eeafdc21c1313294cf37b94a928a5f8ee | 9c7ad5e4497451810325be3b23d3f32e2b953f8a | refs/heads/main | 2023-02-17T12:55:45.035912 | 2021-01-19T15:38:38 | 2021-01-19T15:38:38 | 331,023,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | import time
def jietu(driver, Cname):
now = time.strftime("%y-%m-%d")
now_1 = time.strftime("%y-%m-%d %H_%M_%S")[9:]
driver.get_screenshot_as_file(".\\report\\" + now + "\\png\\" + Cname + "_" + now_1 + ".png")
| [
"hls1948287451"
] | hls1948287451 |
89fb6f38062500309cf03e18cdb6d858bc663305 | 379c28104ea1b9f392463defeda71f60ca24c8c6 | /app.py | e6079714d840f176be394bd20ae91e8a0066d8d6 | [] | no_license | markStone141/flasker | 7a2bb491bb8c2b21cb9d7d8133acba37f68130cd | f224d181f0a15c8b6137bafadff9e09847df89c8 | refs/heads/main | 2023-07-12T02:23:00.942534 | 2021-08-27T22:32:37 | 2021-08-27T22:32:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | from flask import Flask, render_template
# Create a Flask Instance
app = Flask(__name__)
# Create a route decorator
@app.route('/', methods = ['GET'])
def index():
first_name = "John"
stuff = "This is the text"
favorite_pizza = ['Pepperoni', 'Cheese', 'Mushroom', 41]
return render_template('index.html', first_name = first_name, stuff = stuff, favorite_pizza = favorite_pizza)
@app.route('/user/<name>')
def user(name):
return render_template('user.html', username = name)
@app.errorhandler(404)
def page_not_found(err):
return render_template("404.html"), 404
# Internal Server Error
@app.errorhandler(500)
def page_not_found(err):
return render_template("500.html"), 500
# App execution
if __name__ == "__main__":
app.run(debug=True)
| [
"65819197+markStone141@users.noreply.github.com"
] | 65819197+markStone141@users.noreply.github.com |
1a6ea7d278f3045c4f5e026e1485d457be6d8432 | e2f65f808d637acc2997adc14e575c4f0368ef4c | /applications/blog/urls_subdomain.py | 73d132c4d9ab3362f1a6133a4aba128a56dfe526 | [] | no_license | amoisoo/APPJINYOUNG | 4f9c445b91afecd7494f9172cfd35c62275784a9 | 83db043fb3dc5795c0407d6b2ddcbb348d10ee83 | refs/heads/master | 2023-02-12T12:47:54.323228 | 2021-01-08T23:24:02 | 2021-01-08T23:24:02 | 328,032,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | from django.conf import settings
from django.urls import include, path
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
path("", include("applications.blog.urls" ) , name = 'index'),
path("accounts/", include("accounts.accounts.urls"), ),
path("support/", include("applications.support.urls"), ),
]+ static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
| [
"dev@amoisoo.com"
] | dev@amoisoo.com |
2d74ed4c1052a28ef94510b2ea4cf998943f87cf | 2bae293897774a4612b5216562f9347fd95b7a28 | /hacktheglobe/bin/s3put | 5939f15ddc784b5721806bf720ebba7b59072319 | [] | no_license | xwinxu/DOC | c73a54a62a22edd87eedcd07533be8d38b173716 | b8de83d65c294c3a86a20b4878bdda656bfafdd7 | refs/heads/master | 2020-04-29T08:27:15.043833 | 2019-03-20T21:43:04 | 2019-03-20T21:43:04 | 175,987,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,883 | #!/Users/winniexu/Hack-the-Globe-Team12/hacktheglobe/bin/python3
# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import getopt
import sys
import os
import boto
from boto.compat import six
try:
# multipart portions copyright Fabian Topfstedt
# https://gist.github.com/924094
import math
import mimetypes
from multiprocessing import Pool
from boto.s3.connection import S3Connection
from filechunkio import FileChunkIO
multipart_capable = True
usage_flag_multipart_capable = """ [--multipart]"""
usage_string_multipart_capable = """
multipart - Upload files as multiple parts. This needs filechunkio.
Requires ListBucket, ListMultipartUploadParts,
ListBucketMultipartUploads and PutObject permissions."""
except ImportError as err:
multipart_capable = False
usage_flag_multipart_capable = ""
if six.PY2:
attribute = 'message'
else:
attribute = 'msg'
usage_string_multipart_capable = '\n\n "' + \
getattr(err, attribute)[len('No module named '):] + \
'" is missing for multipart support '
DEFAULT_REGION = 'us-east-1'
usage_string = """
SYNOPSIS
s3put [-a/--access_key <access_key>] [-s/--secret_key <secret_key>]
-b/--bucket <bucket_name> [-c/--callback <num_cb>]
[-d/--debug <debug_level>] [-i/--ignore <ignore_dirs>]
[-n/--no_op] [-p/--prefix <prefix>] [-k/--key_prefix <key_prefix>]
[-q/--quiet] [-g/--grant grant] [-w/--no_overwrite] [-r/--reduced]
[--header] [--region <name>] [--host <s3_host>]""" + \
usage_flag_multipart_capable + """ path [path...]
Where
access_key - Your AWS Access Key ID. If not supplied, boto will
use the value of the environment variable
AWS_ACCESS_KEY_ID
secret_key - Your AWS Secret Access Key. If not supplied, boto
will use the value of the environment variable
AWS_SECRET_ACCESS_KEY
bucket_name - The name of the S3 bucket the file(s) should be
copied to.
path - A path to a directory or file that represents the items
to be uploaded. If the path points to an individual file,
that file will be uploaded to the specified bucket. If the
path points to a directory, it will recursively traverse
the directory and upload all files to the specified bucket.
debug_level - 0 means no debug output (default), 1 means normal
debug output from boto, and 2 means boto debug output
plus request/response output from httplib
ignore_dirs - a comma-separated list of directory names that will
be ignored and not uploaded to S3.
num_cb - The number of progress callbacks to display. The default
is zero which means no callbacks. If you supplied a value
of "-c 10" for example, the progress callback would be
called 10 times for each file transferred.
prefix - A file path prefix that will be stripped from the full
path of the file when determining the key name in S3.
For example, if the full path of a file is:
/home/foo/bar/fie.baz
and the prefix is specified as "-p /home/foo/" the
resulting key name in S3 will be:
/bar/fie.baz
The prefix must end in a trailing separator and if it
does not then one will be added.
key_prefix - A prefix to be added to the S3 key name, after any
stripping of the file path is done based on the
"-p/--prefix" option.
reduced - Use Reduced Redundancy storage
grant - A canned ACL policy that will be granted on each file
transferred to S3. The value of provided must be one
of the "canned" ACL policies supported by S3:
private|public-read|public-read-write|authenticated-read
no_overwrite - No files will be overwritten on S3, if the file/key
exists on s3 it will be kept. This is useful for
resuming interrupted transfers. Note this is not a
sync, even if the file has been updated locally if
the key exists on s3 the file on s3 will not be
updated.
header - key=value pairs of extra header(s) to pass along in the
request
region - Manually set a region for buckets that are not in the US
classic region. Normally the region is autodetected, but
setting this yourself is more efficient.
host - Hostname override, for using an endpoint other then AWS S3
""" + usage_string_multipart_capable + """
If the -n option is provided, no files will be transferred to S3 but
informational messages will be printed about what would happen.
"""
def usage(status=1):
print(usage_string)
sys.exit(status)
def submit_cb(bytes_so_far, total_bytes):
print('%d bytes transferred / %d bytes total' % (bytes_so_far, total_bytes))
def get_key_name(fullpath, prefix, key_prefix):
if fullpath.startswith(prefix):
key_name = fullpath[len(prefix):]
else:
key_name = fullpath
l = key_name.split(os.sep)
return key_prefix + '/'.join(l)
def _upload_part(bucketname, aws_key, aws_secret, multipart_id, part_num,
source_path, offset, bytes, debug, cb, num_cb,
amount_of_retries=10):
"""
Uploads a part with retries.
"""
if debug == 1:
print("_upload_part(%s, %s, %s)" % (source_path, offset, bytes))
def _upload(retries_left=amount_of_retries):
try:
if debug == 1:
print('Start uploading part #%d ...' % part_num)
conn = S3Connection(aws_key, aws_secret)
conn.debug = debug
bucket = conn.get_bucket(bucketname)
for mp in bucket.get_all_multipart_uploads():
if mp.id == multipart_id:
with FileChunkIO(source_path, 'r', offset=offset,
bytes=bytes) as fp:
mp.upload_part_from_file(fp=fp, part_num=part_num,
cb=cb, num_cb=num_cb)
break
except Exception as exc:
if retries_left:
_upload(retries_left=retries_left - 1)
else:
print('Failed uploading part #%d' % part_num)
raise exc
else:
if debug == 1:
print('... Uploaded part #%d' % part_num)
_upload()
def check_valid_region(conn, region):
if conn is None:
print('Invalid region (%s)' % region)
sys.exit(1)
def multipart_upload(bucketname, aws_key, aws_secret, source_path, keyname,
reduced, debug, cb, num_cb, acl='private', headers={},
guess_mimetype=True, parallel_processes=4,
region=DEFAULT_REGION):
"""
Parallel multipart upload.
"""
conn = boto.s3.connect_to_region(region, aws_access_key_id=aws_key,
aws_secret_access_key=aws_secret)
check_valid_region(conn, region)
conn.debug = debug
bucket = conn.get_bucket(bucketname)
if guess_mimetype:
mtype = mimetypes.guess_type(keyname)[0] or 'application/octet-stream'
headers.update({'Content-Type': mtype})
mp = bucket.initiate_multipart_upload(keyname, headers=headers,
reduced_redundancy=reduced)
source_size = os.stat(source_path).st_size
bytes_per_chunk = max(int(math.sqrt(5242880) * math.sqrt(source_size)),
5242880)
chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk)))
pool = Pool(processes=parallel_processes)
for i in range(chunk_amount):
offset = i * bytes_per_chunk
remaining_bytes = source_size - offset
bytes = min([bytes_per_chunk, remaining_bytes])
part_num = i + 1
pool.apply_async(_upload_part, [bucketname, aws_key, aws_secret, mp.id,
part_num, source_path, offset, bytes,
debug, cb, num_cb])
pool.close()
pool.join()
if len(mp.get_all_parts()) == chunk_amount:
mp.complete_upload()
key = bucket.get_key(keyname)
key.set_acl(acl)
else:
mp.cancel_upload()
def singlepart_upload(bucket, key_name, fullpath, *kargs, **kwargs):
"""
Single upload.
"""
k = bucket.new_key(key_name)
k.set_contents_from_filename(fullpath, *kargs, **kwargs)
def expand_path(path):
path = os.path.expanduser(path)
path = os.path.expandvars(path)
return os.path.abspath(path)
def main():
# default values
aws_access_key_id = None
aws_secret_access_key = None
bucket_name = ''
ignore_dirs = []
debug = 0
cb = None
num_cb = 0
quiet = False
no_op = False
prefix = '/'
key_prefix = ''
grant = None
no_overwrite = False
reduced = False
headers = {}
host = None
multipart_requested = False
region = None
try:
opts, args = getopt.getopt(
sys.argv[1:], 'a:b:c::d:g:hi:k:np:qs:wr',
['access_key=', 'bucket=', 'callback=', 'debug=', 'help', 'grant=',
'ignore=', 'key_prefix=', 'no_op', 'prefix=', 'quiet',
'secret_key=', 'no_overwrite', 'reduced', 'header=', 'multipart',
'host=', 'region='])
except:
usage(1)
# parse opts
for o, a in opts:
if o in ('-h', '--help'):
usage(0)
if o in ('-a', '--access_key'):
aws_access_key_id = a
if o in ('-b', '--bucket'):
bucket_name = a
if o in ('-c', '--callback'):
num_cb = int(a)
cb = submit_cb
if o in ('-d', '--debug'):
debug = int(a)
if o in ('-g', '--grant'):
grant = a
if o in ('-i', '--ignore'):
ignore_dirs = a.split(',')
if o in ('-n', '--no_op'):
no_op = True
if o in ('-w', '--no_overwrite'):
no_overwrite = True
if o in ('-p', '--prefix'):
prefix = a
if prefix[-1] != os.sep:
prefix = prefix + os.sep
prefix = expand_path(prefix)
if o in ('-k', '--key_prefix'):
key_prefix = a
if o in ('-q', '--quiet'):
quiet = True
if o in ('-s', '--secret_key'):
aws_secret_access_key = a
if o in ('-r', '--reduced'):
reduced = True
if o == '--header':
(k, v) = a.split("=", 1)
headers[k] = v
if o == '--host':
host = a
if o == '--multipart':
if multipart_capable:
multipart_requested = True
else:
print("multipart upload requested but not capable")
sys.exit(4)
if o == '--region':
regions = boto.s3.regions()
for region_info in regions:
if region_info.name == a:
region = a
break
else:
raise ValueError('Invalid region %s specified' % a)
if len(args) < 1:
usage(2)
if not bucket_name:
print("bucket name is required!")
usage(3)
connect_args = {
'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key
}
if host:
connect_args['host'] = host
c = boto.s3.connect_to_region(region or DEFAULT_REGION, **connect_args)
check_valid_region(c, region or DEFAULT_REGION)
c.debug = debug
b = c.get_bucket(bucket_name, validate=False)
# Attempt to determine location and warn if no --host or --region
# arguments were passed. Then try to automagically figure out
# what should have been passed and fix it.
if host is None and region is None:
try:
location = b.get_location()
# Classic region will be '', any other will have a name
if location:
print('Bucket exists in %s but no host or region given!' % location)
# Override for EU, which is really Ireland according to the docs
if location == 'EU':
location = 'eu-west-1'
print('Automatically setting region to %s' % location)
# Here we create a new connection, and then take the existing
# bucket and set it to use the new connection
c = boto.s3.connect_to_region(location, **connect_args)
c.debug = debug
b.connection = c
except Exception as e:
if debug > 0:
print(e)
print('Could not get bucket region info, skipping...')
existing_keys_to_check_against = []
files_to_check_for_upload = []
for path in args:
path = expand_path(path)
# upload a directory of files recursively
if os.path.isdir(path):
if no_overwrite:
if not quiet:
print('Getting list of existing keys to check against')
for key in b.list(get_key_name(path, prefix, key_prefix)):
existing_keys_to_check_against.append(key.name)
for root, dirs, files in os.walk(path):
for ignore in ignore_dirs:
if ignore in dirs:
dirs.remove(ignore)
for path in files:
if path.startswith("."):
continue
files_to_check_for_upload.append(os.path.join(root, path))
# upload a single file
elif os.path.isfile(path):
fullpath = os.path.abspath(path)
key_name = get_key_name(fullpath, prefix, key_prefix)
files_to_check_for_upload.append(fullpath)
existing_keys_to_check_against.append(key_name)
# we are trying to upload something unknown
else:
print("I don't know what %s is, so i can't upload it" % path)
for fullpath in files_to_check_for_upload:
key_name = get_key_name(fullpath, prefix, key_prefix)
if no_overwrite and key_name in existing_keys_to_check_against:
if b.get_key(key_name):
if not quiet:
print('Skipping %s as it exists in s3' % fullpath)
continue
if not quiet:
print('Copying %s to %s/%s' % (fullpath, bucket_name, key_name))
if not no_op:
# 0-byte files don't work and also don't need multipart upload
if os.stat(fullpath).st_size != 0 and multipart_capable and \
multipart_requested:
multipart_upload(bucket_name, aws_access_key_id,
aws_secret_access_key, fullpath, key_name,
reduced, debug, cb, num_cb,
grant or 'private', headers,
region=region or DEFAULT_REGION)
else:
singlepart_upload(b, key_name, fullpath, cb=cb, num_cb=num_cb,
policy=grant, reduced_redundancy=reduced,
headers=headers)
if __name__ == "__main__":
main()
| [
"winnie.xu97@gmail.com"
] | winnie.xu97@gmail.com | |
85bbb1146b933e5be79b70692edba72d7199c1ed | 1f08a7e8191cce4d8d4e88b9a29d0b579ad1ffeb | /quickstart/checklist/models.py | 37273dcff771f13c32bbc138dd206a3ba7b99f30 | [] | no_license | Learning-Something/django-rest | 2be341346bf19e5e52023257a79d85ddeb47de9f | dfb766bc946972daf9d2088fb29961b4fc677d57 | refs/heads/master | 2021-07-03T14:12:20.151867 | 2017-09-23T22:05:32 | 2017-09-23T22:05:32 | 103,187,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,735 | py | from django.db import models
# class Relatorio(models.Model):
'''
Relatório 1
'''
class AcompanhamentoCantina(models.Model):
data = models.DateField()
responsavel_preenchimento = models.CharField(max_length=100, blank=False)
responsavel_informacao = models.CharField(max_length=100, blank=False)
escola = models.CharField(max_length=100, blank=False)
nome_cantineiro = models.CharField(max_length=100, blank=False)
resposta_alimentos = models.CharField(max_length=200, blank=False)
#relatorio = models.ForeignKey(Relatorio, on_delete=models.CASCADE)
class AvaliacaoCantina(models.Model):
pergunta = models.CharField(max_length=50, blank=False)
resposta_avaliacao = models.CharField(max_length=3, blank=False)
acompanhamento = models.ForeignKey(AcompanhamentoCantina, on_delete=models.CASCADE)
'''
Relatório 2
class AcompanhamentoAlgumaCoisa(models.Model):
resposta_alimentos = models.CharField(max_length=200, blank=False)
relatorio = models.ForeignKey(Relatorio, on_delete=models.CASCADE)
class AvaliacaoAlgumaCoisa(models.Model):
pergunta = models.CharField(max_length=50, blank=False)
resposta_avaliacao = models.CharField(max_length=3, blank=False)
observacao = models.CharField(max_length=100, blank=True)
acompanhamento = models.ForeignKey(AcompanhamentoCantina, on_delete=models.CASCADE)
'''
#Relatório 3
'''
class AcompanhamentoAlgumaCoisa2(models.Model):
resposta_alimentos = models.CharField(max_length=200, blank=False)
relatorio = models.ForeignKey(Relatorio, on_delete=models.CASCADE)
class AvaliacaoAlgumaCoisa2(models.Model):
acompanhamento = models.ForeignKey(AcompanhamentoCantina, on_delete=models.CASCADE)
''' | [
"vitor.nere@hotmail.com"
] | vitor.nere@hotmail.com |
33b9a0b28178626117cfa52bbee000bdf746fae2 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /QcswPnY2cAbrfwuWE_1.py | 3dc657a83394c0074459ebb833c7727b69c41094 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py |
def filter_factorials(n):
def is_fact(x):
i=1
while(True):
if x%i<1:x//=i
else:break
i+=1
return x==1
return[i for i in n if is_fact(i)]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
778c63b312887248dd0acb4685c2ef035c4ed8e6 | be3143e12bdbf2b19967f2782351356270db34ab | /apps/users/models.py | 52c6fc91cde2e0052f7b76d348422063016865b7 | [] | no_license | proysistem/corporacion | 5dd2d71f7f0c8f33af0ffa1e351f1bbf19531a52 | 67f2c189ab2b8eb0ea7659e4634dc21a447fec31 | refs/heads/master | 2021-01-24T01:27:53.158384 | 2018-03-09T03:43:13 | 2018-03-09T03:43:13 | 122,807,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,739 | py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
class UserManager(BaseUserManager, models.Manager):
def _create_user(self, username, email, password, is_staff, is_superuser, **extra_fields):
email = self.normalize_email(email)
if not email:
raise ValueError('Email obligatorio')
usermodel = self.model(username=username, email=email, is_active=True, is_staff=is_staff, is_superuser=is_superuser, **extra_fields)
usermodel.set_password(password)
usermodel.save(using=self._db)
return usermodel
def create_user(self, username, email, password=None, **extra_fields):
return self._create_user(username, email, password, False, False, **extra_fields)
def create_superuser(self, username, email, password=None, **extra_fields):
return self._create_user(username, email, password, True, True, **extra_fields)
class User(AbstractBaseUser, PermissionsMixin):
username = models.CharField('Usuario', max_length=25, unique=True)
email = models.EmailField('e-mail')
first_name = models.CharField('Primer Nombre', max_length=20)
last_name = models.CharField('Apellidos', max_length=20)
telefono = models.CharField('Teléfono', max_length=15)
celular = models.CharField('Celular', max_length=15)
picture_user = models.ImageField(upload_to="usuarios")
objects = UserManager()
is_active = models.BooleanField('Status (Activo=On/Off', default=True)
is_staff = models.BooleanField('Usuario Staff (On/Off)', default=False)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
def get_short_name(self):
return self.username
| [
"proysistem@yahoo.com"
] | proysistem@yahoo.com |
4f8410227745a3f05d1ded00c637145222b001f5 | acf03baf55e36dc2a9a5ecd697ec5cb15446ae32 | /vng_api_common/decorators.py | eeb5d658ccfe42cff1208a7c375b34363017ad85 | [] | no_license | GemeenteUtrecht/vng-api-common | b6eb55121dc44c72179cbcd63896bbc020dc3975 | 97786cac041d867468007f9b9a9703d21644391a | refs/heads/master | 2021-06-29T01:53:11.647065 | 2019-07-18T12:04:39 | 2019-07-18T12:05:27 | 198,412,309 | 0 | 0 | null | 2019-07-23T10:57:53 | 2019-07-23T10:57:52 | null | UTF-8 | Python | false | false | 279 | py | from typing import Any
from django.db.models.base import ModelBase
def field_default(field: str, default: Any):
def decorator(cls: ModelBase):
model_field = cls._meta.get_field(field)
model_field.default = default
return cls
return decorator
| [
"sergei@maykinmedia.nl"
] | sergei@maykinmedia.nl |
2eaad527d6144119a665568a07cc208bc6b68d28 | 28eec2ca8a89bd198fb506067a7d3dcc74d798ba | /LivePlayPlanBackEnd/VkApps/settings.py | 2c04a094324be3ad8f1422c6571cb515e908a44e | [] | no_license | EtzSladkoewka/Test1 | c50e13dbdf25f68a0ed47e04dfa18ba2e38e6e85 | 8f10a6f85588234dccbfca679cd7d765ad52720f | refs/heads/master | 2023-03-20T07:25:10.592380 | 2021-03-18T10:19:51 | 2021-03-18T10:19:51 | 340,701,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,905 | py | from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7y==j3j8t+pm3st!h5wek#0eag)j9v9douwbva^&7&^rlaw4lv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'user',
'achievements',
'task',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'VkApps.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'VkApps.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/') | [
"70232430+EtzSladkoewka@users.noreply.github.com"
] | 70232430+EtzSladkoewka@users.noreply.github.com |
cbd9f0587be52b2ebc40436f2728a01e989cfb6b | d2838c88c0a46c58337f5959f46a75de1c7c1b91 | /habraPageParser.py | 6421e6e5b388dbeb31da1a1dfed28b0a38477dae | [] | no_license | SergeyParamonov/HabraData | 434478e86212d9a920c8dc12056649967c1dd754 | 5fc219121aacfc32f91b9f8327eb05bd543201b7 | refs/heads/master | 2021-01-17T19:04:21.635470 | 2020-08-03T15:28:16 | 2020-08-03T15:28:16 | 18,575,291 | 46 | 14 | null | null | null | null | UTF-8 | Python | false | false | 1,954 | py | import urllib2
from bs4 import BeautifulSoup
import re
from article import HabraArticle
class HabraPageParser:
@staticmethod
def parse(url):
try:
response = urllib2.urlopen(url)
except urllib2.HTTPError, err:
if err.code == 404:
return None
else:
raise
html = response.read().decode("utf-8")
soup = BeautifulSoup(html)
#print(soup.decode('utf-8'))
#if the post is closed, return None
cyrillicPostIsClosed = '\xd0\xa5\xd0\xb0\xd0\xb1\xd1\x80\xd0\xb0\xd1\x85\xd0\xb0\xd0\xb1\xd1\x80 \xe2\x80\x94 \xd0\x94\xd0\xbe\xd1\x81\xd1\x82\xd1\x83\xd0\xbf \xd0\xba \xd1\x81\xd1\x82\xd1\x80\xd0\xb0\xd0\xbd\xd0\xb8\xd1\x86\xd0\xb5 \xd0\xbe\xd0\xb3\xd1\x80\xd0\xb0\xd0\xbd\xd0\xb8\xd1\x87\xd0\xb5\xd0\xbd'
if soup.title.text == cyrillicPostIsClosed.decode('utf-8'):
return None
articles = soup.find_all(class_="post shortcuts_item")
habraArticles = []
for article in articles:
isScoreShown = article.find(class_="mark positive ")
#if the score is published already, then article is in, otherwise we go on to next one
if not isScoreShown:
continue
post_id = article["id"]
author = article.find(class_="author")
if author:
author = author.a.text
title = article.find(class_="post_title").text
score = article.find(class_="score" ).text
views = article.find(class_="pageviews" ).text
favors = article.find(class_="favs_count").text
tutorial = article.find(class_="flag flag_tutorial")
#we need to escape the symbols in the title, it might contain commas
title = re.sub(r',', " comma ", title)
#if something went wrong skip this article
if not post_id or not author or not title:
return None
habraArticle = HabraArticle(post_id,title,author,score,views,favors,tutorial)
habraArticles.append(habraArticle)
return habraArticles
| [
"sergey.paramonov@phystech.edu"
] | sergey.paramonov@phystech.edu |
e21368c69c0f51eb75de11bf6c6829d4a2eff22e | fb5384a3277f022b1f7ef8ebc5bd9c72edf62a31 | /habraproxy/logging.py | 255a424c99edaea4491d7b5090abfb89a9d7d1d7 | [] | no_license | dndred/habraproxy | 55b8a121e37b67e3bf8dda90ce8f1545c51cdbd2 | f438a80d9c00de5aeb9fda1df421abceeadbe515 | refs/heads/master | 2023-05-17T22:45:19.666929 | 2021-06-02T10:36:24 | 2021-06-02T10:39:23 | 373,069,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | import logging
def get_module_logger(mod_name):
'''
To use this, do logger = get_module_logger(__name__)
'''
logger = logging.getLogger(mod_name)
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s [%(name)-12s] %(levelname)-8s %(message)s'
)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
| [
"dndred@gmail.com"
] | dndred@gmail.com |
7fddbfc6b50ccae88c71d3b3ae36fd6288bc4b5c | 10d48a36fd6b68cf59293c170400753690064a54 | /turtlebot/camera.py | 391a9d8662a03c5a4277cfee7637a07667130cef | [] | no_license | srbhchandra/hri_project | 5c45cc216842069128b4ff06f7212f2155d1af34 | e65fc0c7c172b80d9658238606d7f5384c90c4c1 | refs/heads/master | 2021-01-19T09:41:32.419486 | 2017-03-16T13:54:15 | 2017-03-16T13:54:15 | 82,137,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,747 | py | import rospy
#import faces
import cv2
import cv2.cv as cv
import sys
Notebook_webcam = 0
USB_webcam = 1
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.1, minNeighbors=3, minSize=(30, 30), flags = cv.CV_HAAR_SCALE_IMAGE)
if len(rects) == 0:
return None
rects[:,2:] += rects[:,:2]
return rects
def draw_rects(img, rects, color):
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
if __name__ == '__main__':
cv2.namedWindow("preview")
vc = cv2.VideoCapture(USB_webcam)
cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
# cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
pub = rospy.Publisher('face_points', faces)
rospy.init_node('face_detect', anonymous=True)
r = rospy.Rate(10)
msg = face()
if vc.isOpened(): # try to get the first frame
rval, frame = vc.read()
else:
rval = False
while rval:
rval, frame = vc.read()
gray = cv2.cvtColor(frame, cv.CV_BGR2GRAY)
gray = cv2.equalizeHist(gray)
# gray_small = cv2.resize(gray, size(gray), 0.5, 0.5)
rects = detect(gray, cascade)
if rects is not None:
## Extract face coordinates
x1 = rects[0][1]
y1 = rects[0][0]
x2 = rects[0][3]
y2 = rects[0][2]
msg.x1=x1
msg.x2=x2
msg.y1=y1
msg.y2=y2
while not rospy.is_shutdown():
rospy.loginfo(msg)
pub.publish(msg)
r.sleep()
## Extract face ROI
#faceROI = gray[x1:x2, y1:y2]
#draw_rects(frame, rects, (0, 255, 0))
# cv2.imshow("preview", faceROI) ## Show image in the window
#cv2.imshow("preview", frame) ## Show image in the window
#key = cv2.waitKey(15) # time in milliseconds
#if key == 27: # exit on ESC
# break
cv2.destroyAllWindows()
| [
"srbhchandra@gmail.com"
] | srbhchandra@gmail.com |
ce048b17eef211ec4e392b0c45d2f208f09499dc | e01c15411711a5bb0485760263d3f9587c60bcdc | /method.py | 3c7d28ce1c6c902b3009cec4eec98a457c42fe68 | [] | no_license | spiritman1990/my-first-python-workbench | 50540b6129d4f7ecb491408c8dc260b174d47cb9 | 29f291a03f6d014d42a609f505305b476f890a80 | refs/heads/master | 2021-01-15T11:49:21.222282 | 2015-11-13T10:30:41 | 2015-11-13T10:30:41 | 41,729,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | #!/usr/bin/python
# Filename: method.py
class Person:
def sayHi(self):
print 'Hello, how are you?'
p = Person()
p.sayHi()
# This short example can also be written as Person().sayHi()
| [
"36917663@qq.com"
] | 36917663@qq.com |
07e7f05c86afb0643a2049ea590bf26c009ad663 | ec6f8a634c607e65300bf9812c79dbf780c351d0 | /raspberrypi_files/field4off.py | 6f2ec758df0e1d77b337b75f96e84b9be40755e0 | [] | no_license | prateek-chouhan05/Home-automatation-system | ccbcd8edaba74ac5564c49187517a2991b4163db | aafc3eebe45bdc075f04403b63e44e67ab0c2bc7 | refs/heads/master | 2023-07-09T13:56:28.028748 | 2020-11-09T06:29:13 | 2020-11-09T06:29:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | import RPi.GPIO as GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.OUT)
GPIO.output(23, GPIO.HIGH)
| [
"tanmay.ambadkar@gmail.com"
] | tanmay.ambadkar@gmail.com |
1b3f396208e7862399916f34d807270190ffc84e | 369d32f172f7e6a0fd3a5a95062437aaaf79bba6 | /tests/rsa_enc/ex2/debug_lib.py | 7be0246c66c8e5f8e74678bd05de45c1ac770dad | [] | no_license | amit-choudhari/cryscanner | b9f6dc723817e294a4927a4c9bcca84446b40d13 | 3865e875f14dbb319d68f01010128825db115ae7 | refs/heads/main | 2023-08-04T03:48:37.851387 | 2021-09-26T12:25:32 | 2021-09-26T12:25:32 | 391,231,124 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,406 | py | import gdb
import logging
import gdb.printing
import time
import os
apis = ['RSA_public_encrypt','RSA_private_decrypt','EVP_PKEY_CTX_set_rsa_keygen_bits', 'EVP_PKEY_CTX_set_rsa_padding']
class SimpleCommand(gdb.Command):
fo=open("log.txt","w+")
fi=open("scratch.txt", "w+")
def __init__(self):
super(SimpleCommand, self).__init__("simple_command", gdb.COMMAND_USER)
def print_args(self):
print('=> START arguments')
gdb.execute('info args')
print('=> END arguments')
def print_stacktrace(self):
print('=> START stack frame')
cur_frame = gdb.selected_frame()
while cur_frame is not None:
print(cur_frame.name())
# get the next frame
cur_frame = cur_frame.older()
print('=> END stack frame')
def parse_bt(self):
line = self.fi.readline()
gdb.execute("set logging off")
gdb.execute("set logging file log.txt")
gdb.execute("set logging on")
print (line)
gdb.execute("set logging off")
gdb.execute("set logging file scratch.txt")
gdb.execute("set logging on")
def parse_args(self):
line = self.fi.readline()
try:
while(line != ''):
if '=> INFO ARGS-END' in line:
break
arg = line.split(' =')[0]
s = "ptype "+arg
gdb.execute("set logging off")
ft = open("tmp_scratch.txt", "w+")
gdb.execute("set logging file tmp_scratch.txt")
gdb.execute("set logging on")
gdb.execute(s)
gdb.execute("set logging off")
gdb.execute("set logging file log.txt")
gdb.execute("set logging on")
tmp_line = ft.readline()
if 'struct' in tmp_line:
while(tmp_line != ''):
check_line = tmp_line
tmp_line = ft.readline()
print ('struct '+arg)
if '} *' in check_line:
s1 = 'print/x *' +arg
else:
s1 = 'print/x ' +arg
try:
gdb.execute(s1)
except:
pass
else:
s1 = 'print/x '+arg
print (s+' '+line)
line = self.fi.readline()
except:
pass
gdb.execute("set logging off")
gdb.execute("set logging file scratch.txt")
gdb.execute("set logging on")
def invoke(self, arg, from_tty):
# when we call simple_command from gdb, this method
# is invoked
gdb.execute("set logging file scratch.txt")
gdb.execute("set print pretty on")
gdb.execute("set print repeats 0")
print("Hello from simple_command")
gdb.execute('start')
# Add breakpoints
for api in apis:
bp = gdb.Breakpoint(api)
print('')
print('=> BREAKPOINT END')
logging.basicConfig(filename="scratch.txt", level=logging.INFO)
while True:
gdb.execute("set logging on")
# TODO fix finish command
#gdb.execute("finish")
gdb.execute("continue")
line = self.fi.readline()
inferiors = gdb.inferiors()
test = 0
for inf in gdb.inferiors():
print('INF PROC'+str(inf.pid))
if inf.pid:
print('Continue')
else:
print('EXIT!!')
test = 1
if test == 1:
break
print('NEXT')
gdb.execute('next')
# TODO: Note the api called and i/p params
# TODO: Fetch different types of parameters
# like structs, pointer to structs.
# TODO: Find a way to get output arguments.
# TODO: Push to log file
print('=> BREAKPOINT HIT!!')
print('=> Backtrace')
gdb.execute('backtrace')
print('=> INFO ARGS-START')
gdb.execute('info args')
print('=> INFO ARGS-END')
gdb.execute('set logging off')
gdb.execute('set logging on')
print('=> BEFORE PARSE')
gdb.execute("set logging off")
gdb.execute("set logging on")
# consume all commands
# line = self.fi.readline()
while(line != ''):
if 'INFO ARGS-START' in line:
self.parse_args()
if 'Backtrace' in line:
self.parse_bt()
line = self.fi.readline()
print('=> AFTER CONSUMING COMMANDS')
gdb.execute("set logging off")
SimpleCommand()
| [
"amitsirius92@gmail.com"
] | amitsirius92@gmail.com |
53ce967712dde0cbc2a8c1fcf7bf9a94a2737fbf | e7ea21a55c7733c05ff1a25bb2d47560e1ac4e0c | /src/housing_value_n_walkscore.py | c28c4e3da2f13580367d25d87bd64b09c8e8ce53 | [] | no_license | YueHao96/inf510_project | b6620ffbcf32d1e1af0ba6886cdd6086c591f236 | 194bed74cd896330c4a4738ddd4050e2e6499306 | refs/heads/master | 2020-09-29T05:34:51.287374 | 2019-12-13T11:33:45 | 2019-12-13T11:33:45 | 226,421,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,439 | py | #!/usr/bin/env python
# coding: utf-8
import csv
import requests
import json
import re
from bs4 import BeautifulSoup
#import sqlite3
import sql
zws_id="X1-ZWz1hgvbaoqsy3_3tnpx"
key="3eeaa7fc9f4a7ea97878520335caf746"
def get_cities():
"""The function is used to scrape city names in LA county from wikipedia.
Returns:
cities:A list of cities.
"""
la_county=[]
cities=[]
try:
request=requests.get("https://en.wikipedia.org/wiki/List_of_cities_in_Los_Angeles_County,_California")
request.raise_for_status()
except requests.exceptions.HTTPError as e:
print(e)
else:
soup = BeautifulSoup(request.content, 'lxml')
main_table = soup.find('table')
main_body = main_table.find('tbody')
info = soup.find_all('tr')
for city in info:
if (len(city.find_all('td')) > 0):
name1 = city.find('a').text.split()
split_city="".join(name1).lower()
la_county.append(split_city)
name2 = city.find('a').text
cities.append(name2.lower())
cities=cities[:-6]
return(cities)
def get_value(cities):
"""Use city name to contrust url in order to get housing value, longitude and latitude.
Store data into csv file.
Args:
cities:A list of city names.
Raises:
HTTPError: An error occurred when doing GET request.
Returns:
value_list: A list.Including zipcode of the place and its longitude and latitude value.
"""
value_list=[]
for city in cities:
url=(f"http://www.zillow.com/webservice/GetRegionChildren.htm?zws-id={zws_id}&state=ca&city={city}&childtype=zipcode")
try:
response=requests.get(url)
response.raise_for_status()
except requests.exceptions.HTTPError as e:
print(e)
else:
soup = BeautifulSoup(response.content, 'xml')
try:
info = soup.find("list")
contents=info.find_all("region")
except:
print(f"{url} does not work")
else:
if len(contents)!=0:
for content in contents:
if content.find("name")==None:
zip_code="-1"
else:
zip_code=content.find("name").text
if content.find("zindex")==None:
value="-1"
else:
value=content.find("zindex").text
if content.find("latitude")==None:
lat="N/A"
else:
lat=content.find("latitude").text
if content.find("longitude")==None:
lon="N/A"
else:
lon=content.find("longitude").text
value_list.append((value,zip_code,lat,lon))
try:
f= open(r'..\data\housing value.csv','w',encoding='utf-8')
except IOError as err:
print(f"File error: {err}.")
else:
f.write("{},{},{},{}\n".format("housing value","zip code","latitude","longitude"))
for value,zip_code,lat,lon in value_list:
f.write("{},{},{},{}\n".format(value,zip_code,lat,lon))
f.close()
return value_list
def get_walk_score(zip_code,lat,lon):
"""Store walk score data into vsc file.
Args:
zip_code: A 5 digit unique zip code.
lat: Latitude of the recorded place.
lon: Longitude of the recorded place.
Raises:
HTTPError: Error occurred when doing GET request.
Returns:
A tuple.With area zipcode and its walk score.
"""
walk_url=(f"http://api.walkscore.com/score?format=json&address=%20CA&lat={lat}&lon={lon}&wsapikey={key}")
try:
response=requests.get(walk_url)
response.raise_for_status()
except requests.exceptions.HTTPError as e:
print(e)
else:
content=response.json()
#print(content)
try:
walk_score=content["walkscore"]
except:
walk_score=-1
#walk_score_list.append((city,zip_code,walk_score))
return (zip_code,walk_score)
def store_walk_score():
"""Store walk score data into database.
Args:
walk_score_list: A list contains area zipcode and the walk score.
"""
try:
with open(r"..\data\walk score.csv","r") as csvfile:
file=csvfile.readlines()
except:
print("error")
else:
info_list=[]
for row in file:
row=row.strip()
row=row.split(",")
info_list.append(row)
sql.create_walk_score_table()
for r in range(1,len(info_list)):
walk_score=info_list[r][1]
walk_score=float(walk_score)
zip_code=info_list[r][0]
zip_code=str(zip_code)
zipcode_id=sql.get_zipcode_id(zip_code)
sql.insert_walk_score(walk_score,zipcode_id)
def store_house_value():
"""Store hosing value data into database.
Args:
value_list: Return value from get_value() function.A list with area zipcode, hosing value and coordinates.
"""
try:
with open(r"..\data\housing value.csv","r") as csvfile:
file=csvfile.readlines()
except:
print("error")
else:
info_list=[]
for row in file:
row=row.strip()
row=row.split(",")
info_list.append(row)
sql.create_housing_value_talble()
for r in range(1,len(info_list)):
value=info_list[r][0]
if value!="-1":
value=int(value)
else:
value=-1
latitude=info_list[r][2]
latitude=float(latitude)
longitude=info_list[r][3]
longitude=float(longitude)
zip_code=info_list[r][1]
zip_code=str(zip_code)
zipcode_id=sql.get_zipcode_id(zip_code)
sql.insert_housing_value(value,latitude,longitude,zipcode_id)
def run_housing():
"""Run functions.
Firsltly, scrape city names from Wikipedia.
Secondly,using city names as API parameter to get housing value, longitude and latitude.
Thirdly, using longitude and latitude as API parameter to get walk score value.
Lastly, storing housing value data and walk score data into database.
"""
cities=get_cities()
value_list = get_value(cities)
walk_score_list = []
for i in range (len(set(value_list))):
(zip_code,lat,lon)=value_list[i][1:]
if (zip_code,lat,lon)!=("N/A","N/A","N/A","N/A"):
walk_score_list.append(
get_walk_score(zip_code,lat,lon))
else:
walk_score_list.append((zip_code,-1))
try:
f= open(r'..\data\walk score.csv','w',encoding='utf-8')
except IOError as err:
print(f"File error: {err}.")
else:
f.write("{},{}\n".format("zip code","walk score"))
for zip_code,walk_score in walk_score_list:
f.write("{},{}\n".format(zip_code,walk_score))
f.close()
store_walk_score()
store_house_value()
if __name__=="__main__":
run_housing()
| [
"noreply@github.com"
] | noreply@github.com |
3ad7105c5241d18e90f315ec62cb2cee19973942 | f4e6cda74a1393ec96d0effdeccf02fb02314f8c | /strict_typing/__init__.py | d3840cfea0d3a5e0d39b23d6c9af02a62a71cef5 | [
"MIT"
] | permissive | whdev1/py-strict-typing | 427c67e7b15dc6a661943dd515dca118952500ca | 9266ff037a1cd179034ac77170fead3fc1a3ef46 | refs/heads/main | 2023-08-02T18:15:42.494565 | 2021-09-27T02:05:12 | 2021-09-27T02:05:12 | 409,736,176 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,682 | py | from copy import copy
from inspect import signature
from typing import get_type_hints, Union
from types import FunctionType as function
class _typed_function:
__origcall__ = None
__params__ = None
__types__ = None
def __init__(self, func: function):
self.__origcall__ = func.__call__
self.__params__ = list(signature(func).parameters.keys())
self.__types__ = get_type_hints(func)
# check for a return type
if 'return' in self.__types__.keys():
self.__returntype__ = self.__types__['return']
def __call__(self, *args, **kwargs):
params_copy = copy(self.__params__)
# type check all kwargs
for key in kwargs:
if key in self.__types__.keys():
if not issubclass(kwargs[key].__class__, self.__types__[key]) and not kwargs[key].__class__ == type(None):
raise TypeError(
'object of type \'' + str(kwargs[key].__class__.__name__)+ '\' cannot be assigned '
'to parameter \'' + key + '\' of type \'' + str(self.__types__[key].__name__) + '\''
)
params_copy.remove(key)
else:
params_copy.remove(key)
# type check all positional args
for n in range(len(args)):
if params_copy[n] in self.__types__.keys():
if not issubclass(args[n].__class__, self.__types__[params_copy[n]]) and not args[n].__class__ == type(None):
raise TypeError(
'object of type \'' + str(args[n].__class__.__name__)+ '\' cannot be assigned '
'to parameter \'' + params_copy[n] + '\' of type \'' + str(self.__types__[params_copy[n]].__name__) + '\''
)
# call the function that we are wrapping
retval = self.__origcall__(*args, **kwargs)
# if a return type was specified, check the return type
if hasattr(self, '__returntype__'):
if not issubclass(retval.__class__, self.__returntype__) and not retval.__class__ == type(None):
raise TypeError(
'object of type \'' + str(retval.__class__.__name__)+ '\' cannot be assigned '
'to return value of type \'' + str(self.__returntype__.__name__) + '\''
)
return retval
def _checked_setattr(inst: object, name: str, attr: object) -> None:
# check if this attribute has a type hint
if name in inst.__types__.keys():
# if it does, ensure that the object being passed is of the correct type
if issubclass(attr.__class__, inst.__types__[name]):
inst.__origsetattr__(name, attr)
else:
# if it is not the correct type, throw an error
raise TypeError(
'object of type \'' + str(attr.__class__.__name__) + '\' cannot be ' +
'assigned to member \'' + name + '\' of type \'' + str(inst.__types__[name].__name__) + '\''
)
# otherwise, if this attribute has no type hint
else:
# check that the attribute has been declared
if hasattr(inst, name):
inst.__origsetattr__(name, attr)
else:
# if not, throw an error
raise NameError('object has no attribute \'' + name + '\'')
def strictly_typed(class_def: Union[type, function]):
"""
A decorator that allows Python classes and functions to enforce strict typing.
Additionally, disallows assignment to class attributes that have not been declared.
"""
# check if the received object is a class or a function
if issubclass(class_def.__class__, type):
# define the __types__ member which will contain a direct reference to type hints
class_def.__types__ = get_type_hints(class_def)
# ensure that all attributes are initialized. if not, initialize them to None
for key in class_def.__types__:
if key not in class_def.__dict__.keys():
setattr(class_def, key, None)
# swap out the existing __setattr__ function for our checked one. preserve
# the original as __origsetattr__
class_def.__origsetattr__ = class_def.__setattr__
class_def.__setattr__ = _checked_setattr
elif issubclass(class_def.__class__, function):
class_def = _typed_function(class_def)
else:
raise TypeError(
'cannot apply strictly_typed decorator to objects of type ' + str(type(class_def).__name__)
)
# simply return the class or function defintion
return class_def | [
"anon-github@hinson.ai"
] | anon-github@hinson.ai |
d52897974dca71896f8010946a8b51fba5aaf253 | 7d78a18fcb8f34cc84e9439bd19cf491e3e0ec49 | /Code/Particle_Identification/msc-hpc/OLD/OLD/round1/feedforward_python/model/model1/feed_forward_1_dropout_window_sum.py | f0f09278e5686de8562c923232116972c2fce202 | [] | no_license | PsycheShaman/MSc-thesis | 62767951b67b922ce5a21cad5bdb258998b7d2ea | 34504499df64c7d6cc7c89af9618cd58d6378e8e | refs/heads/master | 2022-03-12T07:17:57.309357 | 2019-12-10T21:17:39 | 2019-12-10T21:17:39 | 151,471,442 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,372 | py | print("==============================================================================================")
print("starting........................................................................................")
import glob
import numpy as np
print("imported glob, np........................................................................................")
x_files = glob.glob("/scratch/vljchr004/data/msc-thesis-data/ff/x_*.pkl")
y_files = glob.glob("/scratch/vljchr004/data/msc-thesis-data/ff/y_*.pkl")
#x_files = glob.glob("C:\\Users\\gerhard\\Documents\\msc-thesis-data\\ff\\x_*.pkl")
#y_files = glob.glob("C:\\Users\\gerhard\\Documents\\msc-thesis-data\\ff\\y_*.pkl")
import pickle
print("loading first x pickle........................................................................................")
with open(x_files[0], 'rb') as x_file0:
x = pickle.load(x_file0)
print("loading first y pickle........................................................................................")
with open(y_files[0], 'rb') as y_file0:
y = pickle.load(y_file0)
print("recursively adding x pickles........................................................................................")
for i in x_files[1:]:
#for i in x_files[1:2]:
with open(i,'rb') as x_file:
xi = pickle.load(x_file)
x = np.concatenate((x,xi),axis=0)
print("recursively adding y pickles........................................................................................")
for i in y_files[1:]:
#for i in y_files[1:2]:
with open(i,'rb') as y_file:
yi = pickle.load(y_file)
y = np.concatenate((y,yi),axis=None)
nz = np.array([np.count_nonzero(i) for i in x])
zeros = np.where(nz==0)
x = np.delete(x,zeros,axis=0)
y = np.delete(y,zeros)
#oversample electrons
elec = np.where(y==1)
pion = np.where(y!=1)
electrons_x = x[elec,:]
electrons_y = y[elec]
electrons_x = np.squeeze(electrons_x)
x = np.concatenate((electrons_x,x,electrons_x),axis=0)
y = np.concatenate((electrons_y,y,electrons_y),axis=None)
mu = np.mean(x)
x = np.true_divide(x,mu)
x_add = np.array([np.array((np.sum(i[0:2]),np.sum(i[3:5]),np.sum(i[6:8]),np.sum(i[9:11]),
np.sum(i[12:14]),
np.sum(i[15:17]),np.sum(i[18:20]),np.sum(i[21:23]))) for i in x])
x = np.hstack((x,x_add))
from tensorflow.keras.utils import to_categorical
#y = to_categorical(y)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2,random_state=123456)
import tensorflow
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation
num_classes = 2
epochs = 100
y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes)
y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes)
model1_dropout_0_5 = Sequential([
Dense(256, input_shape=(32,)),
Activation('relu'),
Dropout(0.5),
Dense(128),
Activation('relu'),
Dropout(0.5),
Dense(128),
Activation('relu'),
Dropout(0.5),
Dense(64),
Activation('relu'),
Dropout(0.5),
Dense(2),
Activation('softmax')
])
model1_dropout_0_5.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
history = model1_dropout_0_5.fit(x_train, y_train,
#batch_size=batch_size,
epochs=epochs,
validation_split=0.15,
shuffle=True,
verbose=2)
import matplotlib.pyplot as plt
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.close()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('/home/vljchr004/msc-hpc/feedforward_python/fig/feed_forward_1_dropout_0_5_history.png', bbox_inches='tight')
model1_dropout_0_5.probs = model1_dropout_0_5.predict_proba(x_test)
import numpy as np
np.savetxt("/home/vljchr004/msc-hpc/feedforward_python/results/feed_forward_1__dropout_0_5_results.csv", np.array(model1_dropout_0_5.probs), fmt="%s")
model1_dropout_0_5.save('/home/vljchr004/msc-hpc/feedforward_python/feed_forward_1__dropout_0_5.h5') # creates a HDF5 file 'my_model.h5'
del model1_dropout_0_5
model1_dropout_0_8 = Sequential([
Dense(256, input_shape=(32,)),
Activation('relu'),
Dropout(0.8),
Dense(128),
Activation('relu'),
Dropout(0.8),
Dense(128),
Activation('relu'),
Dropout(0.8),
Dense(64),
Activation('relu'),
Dropout(0.8),
Dense(2),
Activation('softmax')
])
model1_dropout_0_8.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
history = model1_dropout_0_8.fit(x_train, y_train,
#batch_size=batch_size,
epochs=epochs,
validation_split=0.15,
shuffle=True,
verbose=2)
import matplotlib.pyplot as plt
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('/home/vljchr004/msc-hpc/feedforward_python/fig/feed_forward_1_dropout_0_8_history.png', bbox_inches='tight')
model1_dropout_0_8.probs = model1_dropout_0_8.predict_proba(x_test)
import numpy as np
np.savetxt("/home/vljchr004/msc-hpc/feedforward_python/results/feed_forward_1__dropout_0_8_results.csv", np.array(model1_dropout_0_8.probs), fmt="%s")
model1_dropout_0_8.save('/home/vljchr004/msc-hpc/feedforward_python/feed_forward_1__dropout_0_8.h5') # creates a HDF5 file 'my_model.h5'
del model1_dropout_0_8
model1_dropout_0_8_0_5 = Sequential([
Dense(256, input_shape=(32,)),
Activation('relu'),
Dropout(0.8),
Dense(128),
Activation('relu'),
Dropout(0.8),
Dense(128),
Activation('relu'),
Dropout(0.5),
Dense(64),
Activation('relu'),
Dropout(0.5),
Dense(2),
Activation('softmax')
])
model1_dropout_0_8_0_5.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
history = model1_dropout_0_8_0_5.fit(x_train, y_train,
#batch_size=batch_size,
epochs=epochs,
validation_split=0.15,
shuffle=True,
verbose=2)
import matplotlib.pyplot as plt
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('/home/vljchr004/msc-hpc/feedforward_python/fig/feed_forward_1_dropout_0_8_0_5_history2.png', bbox_inches='tight')
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('/home/vljchr004/msc-hpc/feedforward_python/fig/feed_forward_1_dropout_0_8_0_5_history2.png', bbox_inches='tight')
model1_dropout_0_8_0_5.probs = model1_dropout_0_8_0_5.predict_proba(x_test)
import numpy as np
np.savetxt("/home/vljchr004/msc-hpc/feedforward_python/results/feed_forward_1__dropout_0_8_0_5_results.csv", np.array(model1_dropout_0_8_0_5.probs), fmt="%s")
model1_dropout_0_8_0_5.save('/home/vljchr004/msc-hpc/feedforward_python/feed_forward_1__dropout_0_8_0_5.h5') # creates a HDF5 file 'my_model.h5'
del model1_dropout_0_8_0_5
| [
"christiaan.viljoen@cern.ch"
] | christiaan.viljoen@cern.ch |
20295597a1a0e27950bcdb817065231c55557d7b | 841e3d8497b6f779e2106ff8009d9de6664ae113 | /root/urls.py | 29dd0cd9573a11299ac7134477e374eaa6d6dafd | [] | no_license | tungnt620/happypiggybank | 6f3ff5b22be418e44aa99fb032e1e33f9a9168b9 | 9ddf4da37a94b7f570a9f22501b520f8eb19e711 | refs/heads/master | 2021-09-14T19:40:37.606921 | 2018-05-18T07:02:47 | 2018-05-18T07:02:47 | 106,623,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | """root URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('happypiggybank.urls')),
url(r'^users/', include('users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
]
| [
"thanhtung.nguyen@ved.com.vn"
] | thanhtung.nguyen@ved.com.vn |
d4073c28b2eba9cecc9125245c66c2a9f0448b86 | d0de9d9cd78ecff44ff931e4361da29edb2073ef | /ARK Software/Task3/finding templete of level 2.py | c69a62386e48678076cac897c2a6af6a4de91d17 | [] | no_license | ankitranjan5/ARKTask | b77e3d8a8b96ef58416823b3cb6e06a0c9038d27 | 390e932d72ba75ad17f4bcf3bf679174f0be9793 | refs/heads/main | 2023-06-01T00:28:05.558551 | 2021-06-19T12:03:58 | 2021-06-19T12:03:58 | 377,490,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | import cv2 as cv
import numpy as np
img = cv.imread('Task round\zucky_elon.png')
template = cv.imread('level 1 chupi image.png')
h = np.shape(template)[0]
w = np.shape(template)[1]
img2 = img.copy()
result = cv.matchTemplate(img2, template,cv.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
location=max_loc
bottom_right = (location[0] + w, location[1] + h)
print(location)
cv.rectangle(img, location, bottom_right, 255, 5)
cv.imshow('COLOR',img)
cv.waitKey(0) | [
"noreply@github.com"
] | noreply@github.com |
f7a06c6b66d1d7b10dec13cdc0b0c20e99db3505 | 34be4c498093701b3e2fe806ab5e4087bba0d83e | /Q684.py | 47c72bdfb045ef584b917a9cb11a29e7fc66e185 | [] | no_license | iMashiro/QuestoesHuxley | a2872695fa518455bf474502d4f0c506f8975be0 | 8cebd8db2bdde3e6c2c9a0dbb70fd4db2cb6a332 | refs/heads/master | 2020-03-27T17:56:41.420496 | 2018-10-30T11:09:43 | 2018-10-30T11:09:43 | 146,886,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | parametros = input()
parametros = parametros.split()
casas = input()
casas = casas.split()
encomendas = input()
encomendas = encomendas.split()
parametros = list(map(int, parametros))
casas = list(map(int, casas))
encomendas = list(map(int, encomendas))
dicionario = {}
i = 0
while i < parametros[0]:
dicionario[casas[i]] = i
i += 1
i = 0
j = 0
contador = 0
while i < parametros[1]:
if encomendas[i] != casas[j]:
x = dicionario[encomendas[i]]
k = j - x
j = x
if(k < 0):
k *= -1
contador += k
i += 1
print(contador)
| [
"noreply@github.com"
] | noreply@github.com |
4f474113f69909f678d9a00677b4e9219da14dbf | a2f88bfa884b9003045c7a1f3ee344a4aeccdd40 | /note.py | 6eaf540c2a8e67f1918197dc4c40ba7dca7984ef | [] | no_license | deverser/ABB_drive_param_diff | 7659432c7234a523699a98b8f4636429e3e280fb | 90ab65ee4efd32bf2e4f5f96490fc87f18734d0c | refs/heads/master | 2022-09-06T05:18:30.245774 | 2022-08-10T11:47:25 | 2022-08-10T11:47:25 | 177,919,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | from tkinter import *
from tkinter import filedialog as fd
def insertText():
file_name = fd.askopenfilename()
f = open(file_name)
s = f.read()
text.insert(1.0, s)
f.close()
def extractText():
file_name = fd.asksaveasfilename(filetypes=(("TXT files", "*.txt"),
("HTML files", "*.html;*.htm"),
("All files", "*.*")))
f = open(file_name, 'w')
s = text.get(1.0, END)
f.write(s)
f.close()
root = Tk()
text = Text(width=50, height=25)
text.grid(columnspan=2)
b1 = Button(text="Открыть", command=insertText)
b1.grid(row=1, sticky=E)
b2 = Button(text="Сохранить", command=extractText)
b2.grid(row=1, column=1, sticky=W)
root.mainloop() | [
"alexmal74@gmail.com"
] | alexmal74@gmail.com |
bff17f1452138b1f5d43b8ce8a2cfba8add771b5 | 8c17ed3760bc64be1c2ff4cade8047df57751159 | /saildrone/models/route.py | 79520ee8b76cac96436cd5d5ecaf4360c74c12ff | [] | no_license | askiefer/saildrone | b8f00880de3f29c8edb015f5f06bcd2e26043b43 | ed15e221e2a9dff05d5b50ffc15ae14818b7f76b | refs/heads/master | 2020-04-07T13:33:40.471564 | 2018-11-20T15:40:46 | 2018-11-20T15:40:46 | 158,411,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | from django.contrib.gis.db import models
class Route(models.Model):
'''
Models a drone route
'''
class Meta:
required_db_features = ['gis_enabled']
route = models.MultiLineStringField(srid=4326)
| [
"anna@kevalaanalytics.com"
] | anna@kevalaanalytics.com |
aa43c453817cb402a595dcda4a55653d1befd1e8 | 9db35644ff1603eef458558fd6a40aaf06356fc1 | /特征预测/主成分降维/PCA_CircuitBreaker.py | ce4c4e1bfec7b333f7d4a3bd5305e454bd394ede | [] | no_license | ldcrchao/PycharmProjects | 0bf83e3d44d96be7bad5dbcc47dfcd8fa729883a | a6e8bb4ec211f241aecd4be344d04c14e14f59b3 | refs/heads/main | 2023-04-19T15:12:50.070297 | 2021-05-09T08:07:42 | 2021-05-09T08:07:42 | 374,944,409 | 1 | 0 | null | 2021-06-08T08:50:56 | 2021-06-08T08:50:56 | null | UTF-8 | Python | false | false | 6,403 | py | #%%
# -*- coding UTF-8 -*-
'''
@Project : python学习工程文件夹
@File : PCA_CircuitBreaker.py
@Author : chenbei
@Date : 2020/12/17 10:33
'''
from matplotlib.pylab import mpl
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['Times New Roman'] # 设置字体风格,必须在前然后设置显示中文
mpl.rcParams['font.sans-serif'] = ['SimHei'] # 显示中文的命令
mpl.rcParams['axes.unicode_minus'] = False # 显示负号的命令
from sklearn.decomposition import PCA
#from sklearn.cross_decomposition import CCA # CCA用于执行监督降维
#from sklearn.datasets import make_multilabel_classification # 生成随机的多标签分类问题
#from sklearn.multiclass import OneVsRestClassifier # 一对多分类器 , 每个类分配一个二分类器
#from sklearn.preprocessing import LabelEncoder
#from sklearn.svm import SVC
import seaborn as sns
import pandas as pd
import numpy as np
#%% 一级标签 8 分类问题
X = pd.read_csv("C:/Users\chenbei\Desktop\陈北个人论文\图源数据及其文件/Circuit_Breaker_FirstLevelLabel.csv",encoding='gbk')#解决不能识别希腊字母的问题
model=PCA(n_components=2)
X_Features = X.iloc[:,0:-1] # 没有分类标签的纯数据
X_new=model.fit_transform(X_Features) # 训练并转换
X_new_dataframe = pd.DataFrame(X_new)
Category = X.iloc[:,-1] # 取出标签
X_new_category = pd.concat([X_new_dataframe,Category],axis=1,ignore_index= True) # 按列拼接
X_new_category.columns = ['PCA1','PCA2','Category'] # 重命名
#X_new_category.to_csv("C:/Users\chenbei\Desktop\陈北个人论文\图源数据及其文件/FirstLevelPCA.csv",encoding='gbk',index=False)
noise_variance = model.noise_variance_
score = model.score(X_Features)
singular_value = model.singular_values_
print('噪声协方差为:',noise_variance)
print('似然平均值为:',score)
print('奇异值为:',singular_value)
plt.figure(figsize=(7.8,3.8),dpi=600) # 设置图片大小、精度
fig1 = sns.scatterplot(data=X_new_category,x='PCA1',y='PCA2',hue='Category') # 主成分降维后的特征量的散点图可视化
fig1.set_xlabel('PCA1',fontsize=10.5) # 设置字体大小
fig1.set_ylabel('PCA2',fontsize=10.5)
plt.title('主成分散点图(n_conponents=2)')
plt.text(0,1.0 ,"噪声协方差 : "+str(round(noise_variance,5)) ,fontsize=10.5,verticalalignment='center',horizontalalignment='center',family='SimHei')
plt.text(0,1.5 ,"似然平均值 : "+str(round(score,5)) ,fontsize=10.5,verticalalignment='center',horizontalalignment='center',family='SimHei')
plt.tick_params(axis='both',which='major',labelsize=10.5) # 设置刻度
plt.tight_layout()
plt.show()
'''取出4个也是一样的,为了得到主成分方差贡献率,专门一段代码用于绘制方差贡献率图'''
model1 = PCA(n_components=4)
X_new1 = model1.fit_transform(X_Features)
X_new1_dataframe = pd.DataFrame(X_new1)
np.set_printoptions(precision=5)
ratio1 = np.around(model1.explained_variance_ratio_,5)# 主成分降维的方差贡献率
component_nums = model1.n_components_
plt.figure(figsize=(7.8,3.8),dpi=600)
reats = plt.bar(range(component_nums),ratio1) # 所有柱子
plt.ylabel('百分比')
plt.title('主成分方差贡献率')
plt.xlabel('维度')
plt.xticks(ticks=[0,1,2,3],labels=['PCA1','PCA2','PCA3','PCA4'])
plt.tick_params(axis='both',which='major',labelsize=10.5)
for reat in reats : # 每一个柱子循环标注数值大小
reat_height = reat.get_height() # 获取柱子高度
plt.text(reat.get_x() + reat.get_width()/2,reat_height,str(reat_height),size=10.5,ha='center',va='bottom')
plt.tight_layout()
plt.show()
# 保存数据
#X_new_category.to_csv("C:/Users\chenbei\Desktop\陈北个人论文\图源数据及其文件/PCA.csv",encoding='gbk',index=False)
#%% 二级标签 5 分类问题
X = pd.read_csv("C:/Users\chenbei\Desktop\陈北个人论文\图源数据及其文件/Circuit_Breaker_SecondLevelLabel.csv",encoding='gbk')#解决不能识别希腊字母的问题
model=PCA(n_components=2)
X_Features = X.iloc[:,0:-1] # 没有分类标签的纯数据
X_new=model.fit_transform(X_Features) # 训练并转换
X_new_dataframe = pd.DataFrame(X_new)
Category = X.iloc[:,-1] # 取出标签
X_new_category = pd.concat([X_new_dataframe,Category],axis=1,ignore_index= True) # 按列拼接
X_new_category.columns = ['PCA1','PCA2','Category'] # 重命名
#X_new_category.to_csv("C:/Users\chenbei\Desktop\陈北个人论文\图源数据及其文件/SecondLevelPCA.csv",encoding='gbk',index=False)
noise_variance = model.noise_variance_
score = model.score(X_Features)
singular_value = model.singular_values_
print('噪声协方差为:',noise_variance)
print('似然平均值为:',score)
print('奇异值为:',singular_value)
plt.figure(figsize=(7.8,3.8),dpi=600) # 设置图片大小、精度
fig1 = sns.scatterplot(data=X_new_category,x='PCA1',y='PCA2',hue='Category') # 主成分降维后的特征量的散点图可视化
fig1.set_xlabel('PCA1',fontsize=10.5) # 设置字体大小
fig1.set_ylabel('PCA2',fontsize=10.5)
plt.text(0,1.0 ,"噪声协方差 : "+str(round(noise_variance,5)) ,fontsize=10.5,verticalalignment='center',horizontalalignment='center',family='SimHei')
plt.text(0,1.5 ,"似然平均值 : "+str(round(score,5)) ,fontsize=10.5,verticalalignment='center',horizontalalignment='center',family='SimHei')
plt.title('主成分散点图(n_conponents=2)')
plt.tick_params(axis='both',which='major',labelsize=10.5) # 设置刻度
plt.tight_layout()
plt.show()
'''取出4个也是一样的,为了得到主成分方差贡献率,专门一段代码用于绘制方差贡献率图'''
model1 = PCA(n_components=4)
X_new1 = model1.fit_transform(X_Features)
X_new1_dataframe = pd.DataFrame(X_new1)
np.set_printoptions(precision=5)
ratio1 = np.around(model1.explained_variance_ratio_,5)# 主成分降维的方差贡献率
component_nums = model1.n_components_
plt.figure(figsize=(7.8,3.8),dpi=600)
reats = plt.bar(range(component_nums),ratio1) # 所有柱子
plt.ylabel('百分比')
plt.title('主成分方差贡献率')
plt.xlabel('维度')
plt.xticks(ticks=[0,1,2,3],labels=['PCA1','PCA2','PCA3','PCA4'])
plt.tick_params(axis='both',which='major',labelsize=10.5)
for reat in reats : # 每一个柱子循环标注数值大小
reat_height = reat.get_height() # 获取柱子高度
plt.text(reat.get_x() + reat.get_width()/2,reat_height,str(reat_height),size=10.5,ha='center',va='bottom')
plt.tight_layout()
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
cb9254defb1dc4344f4dc02075844f7a20a3bc07 | 69729ce2a0d2147b7b52e14008d8fc9960e3c099 | /fast_rl/core/metrics.py | 3233e2fc611672b7ffd218e9a04ec813f65eb1a7 | [
"Apache-2.0"
] | permissive | swelchm/fast-reinforcement-learning | 2f5d5aa51830f774ca0e6814833a736029e88f4d | 9649b6d1bb931c4e4b7200a73b172325a1d8346f | refs/heads/master | 2020-07-29T16:04:10.926035 | 2019-09-18T02:43:50 | 2019-09-18T02:43:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | import torch
from fastai.basic_train import LearnerCallback
from fastai.callback import Callback, is_listy, add_metrics
class EpsilonMetric(LearnerCallback):
_order = -20 # Needs to run before the recorder
def __init__(self, learn):
super().__init__(learn)
self.epsilon = 0
if not hasattr(self.learn.model, 'exploration_strategy'):
raise ValueError('Your model is not using an exploration strategy! Please use epsilon based exploration')
if not hasattr(self.learn.model.exploration_strategy, 'epsilon'):
raise ValueError('Please use epsilon based exploration (should have an epsilon field)')
def on_train_begin(self, **kwargs):
self.learn.recorder.add_metric_names(['epsilon'])
def on_epoch_end(self, last_metrics, **kwargs):
self.epsilon = self.learn.model.exploration_strategy.epsilon
return add_metrics(last_metrics, [self.epsilon]) | [
"jokellum@northstate.net"
] | jokellum@northstate.net |
cf18bbbb07fc91e196affd7b0f2463dcc8c10e58 | 939d249bf5ef283144475a544c7f48fd36bcd86a | /seedbox/torrent/parser.py | 428d32e14c59909b10e59bb25a9677e70094949f | [
"MIT"
] | permissive | shad7/seedbox | 3809606ee7bfb6e7b34541b133f6ae444145f310 | 6ed912a8e1c65a66603c693172ac65eac7f818c0 | refs/heads/develop | 2020-06-02T10:07:58.531071 | 2015-07-29T04:42:34 | 2015-07-29T04:42:34 | 9,738,034 | 5 | 2 | null | 2015-06-17T04:26:58 | 2013-04-28T23:45:21 | Python | UTF-8 | Python | false | false | 6,558 | py | """Parses a torrent file."""
import io
import logging
import os
import six
from seedbox.torrent import bencode
LOG = logging.getLogger(__name__)
class ParsingError(Exception):
"""Holds parsing error messages.
Error class representing errors that occur while parsing
the torrent content.
"""
def __init__(self, error_msg):
Exception.__init__(self)
self.error_msg = error_msg
def __str__(self):
return repr(self.error_msg)
DICT_TOKEN = 'd'
LIST_TOKEN = 'l'
INT_TOKEN = 'i'
END_TOKEN = 'e'
NEGATIVE = '-'
STR_SEP_TOKEN = ':'
class Bdecode(object):
def __init__(self, data):
self.data = six.BytesIO(data)
def _next_char(self):
return self.data.read(1).decode('utf-8', 'replace')
def _prev_char(self):
# offset: -1
# mode/whence: SEEK_CUR => 1
self.data.seek(-1, 1)
def _parse_str(self):
self._prev_char()
str_len = self._parse_number(delimiter=STR_SEP_TOKEN)
if not str_len:
raise ParsingError(
'Empty string length found while parsing at position %d'
% self.data.tell())
return self.data.read(str_len)
def _parse_int(self):
return self._parse_number(delimiter=END_TOKEN)
def _parse_number(self, delimiter):
parsed_int = ''
while True:
parsed_int_char = self._next_char()
if parsed_int_char != NEGATIVE and not parsed_int_char.isdigit():
if parsed_int_char != delimiter:
raise ParsingError(
'Invalid character %s found after parsing an '
'integer (%s expected) at position %d.' %
(parsed_int_char, delimiter, self.data.tell()))
else:
break
parsed_int += parsed_int_char
return int(parsed_int)
def _parse_dict(self):
parsed_dict = {}
while True:
dict_key = self.decode()
if not dict_key:
# End of dict
break
# parse value
dict_value = self.decode()
if isinstance(dict_value, six.binary_type):
dict_value = dict_value.decode('utf-8', 'replace')
parsed_dict.setdefault(dict_key.decode('utf-8'),
dict_value)
return parsed_dict
def _parse_list(self):
parsed_list = []
while True:
list_item = self.decode()
if not list_item:
# End of list
break
if isinstance(list_item, six.binary_type):
list_item = list_item.decode('utf-8', 'replace')
parsed_list.append(list_item)
return parsed_list
def decode(self):
"""Decode torrent content.
:returns: parsed content
"""
parsed_char = self._next_char()
if parsed_char == END_TOKEN:
return None
elif parsed_char == INT_TOKEN:
return self._parse_int()
elif parsed_char.isdigit():
return self._parse_str()
elif parsed_char == DICT_TOKEN:
return self._parse_dict()
elif parsed_char == LIST_TOKEN:
return self._parse_list()
@classmethod
def parse(cls, data):
"""Helper method that creates decoder and decodes content.
:returns: parsed content
"""
return cls(data).decode()
class TorrentParser(object):
def __init__(self, filepath):
"""Reads the torrent file and parses content.
:param str filepath: Path to the torrent file to be parsed
:raises IOError: when a file does not exists
"""
if not os.path.exists(filepath):
raise IOError('No file found at %s' % filepath)
self.file = filepath
self._content = None
@property
def content(self):
if self._content is None:
self._content = self.load_content()
return self._content
def load_content(self):
"""Reads the torrent file and decodes content.
.. note::
bencode is supremely more efficient parser for torrents but
extremely strict in the format of the file. A custom parser based
on another implementation handles parsing that is more flexible
but it is not as efficient. Therefore when the file is well formed
bencode is used but if it fails then the custom parser is used.
If the custom parser fails then a ParsingError is raised.
"""
with io.open(file=self.file, mode='rb') as handle:
content = handle.read()
try:
return bencode.bdecode(content)
except bencode.BTFailure as bterr:
LOG.info('bencode.bdecode failed: (%s); trying alternate approach',
bterr)
return Bdecode.parse(content)
def get_file_details(self):
"""Retrieves details of the file(s) contained in the torrent.
File details tuple:
* name
* length (size)
:returns: file details embedded within torrent
:rtype: list of tuples (name, length)
"""
parsed_files_info = []
files_info = self.content.get(b'info')
if not files_info:
return parsed_files_info
multiple_files_info = files_info.get(b'files')
LOG.debug('files: %s', multiple_files_info)
if multiple_files_info:
# the name attribute was holding the directory name that each
# of the multiple files were contained within.
dir_name = files_info.get(b'name').decode('utf-8')
LOG.debug('dirname: %s', dir_name)
for file_info in multiple_files_info:
LOG.debug('file_info: %s', file_info)
# simply append the directory to the concatenated list
# of items under path, mostly it is a single item.
parsed_files_info.append(
(os.path.join(dir_name,
os.path.sep.join(
[x.decode('utf-8') for x in
file_info.get(b'path')])),
file_info.get(b'length')))
else:
parsed_files_info.append(
(files_info.get(b'name').decode('utf-8'),
files_info.get(b'length')))
return parsed_files_info
| [
"kenny.shad7@gmail.com"
] | kenny.shad7@gmail.com |
7dec4fd5d7eb9e2a8f7a0211e17b89f742b2e587 | 3fff0adf969f47bb3da53e2b56ad72af658cfe9f | /IITJHealthCare/HealthCentre/migrations/0002_prescription.py | 3fc67e9e77b4214740124c7ba25df9092966973a | [] | no_license | Manish9473/HealthCenter-Webapp | d62d4a6f15da711551ff818208c06f5e1b248bc3 | 0256291b3550ac64598bc30fe423788e22bd4c69 | refs/heads/master | 2022-12-25T06:01:27.171057 | 2020-09-26T11:05:30 | 2020-09-26T11:05:30 | 298,789,523 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | # Generated by Django 2.1.5 on 2019-02-10 20:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('HealthCentre', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Prescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prescriptionText', models.CharField(max_length=2000)),
('doctor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='doctorRecords', to='HealthCentre.Doctor')),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='patientRecords', to='HealthCentre.Patient')),
],
),
]
| [
"kumar.53@iitj.ac.in"
] | kumar.53@iitj.ac.in |
829867d9d8ec99574bd8aaa34dee709898c1c390 | 695b4e671a694bab0d41e6de294d0e97e1dc051b | /snli5.py | af4b8df44162eec89c6a5b3ac9678e141156de4d | [] | no_license | letavino/bird | 8ebdd2938fc0c074a72393f3db5e2c431dac3af6 | 6221df7abaa021fc693dac68debdfa11f5f913c1 | refs/heads/master | 2021-06-18T03:17:21.399079 | 2019-09-09T20:42:47 | 2019-09-09T20:42:47 | 105,765,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,408 | py | import numpy as np
import json_lines
import time, datetime
from unidecode import unidecode
from keras.utils import to_categorical
import keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import tensorflow as tf
import tensorflow_hub as hub
from keras import backend as K
import numpy as np
from numpy import array
from numpy import asarray
from numpy import zeros
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Embedding
from keras.layers import Input, Dense
from keras.models import Model
from keras.models import Sequential, Model
from keras.layers import Concatenate, Dense, CuDNNLSTM, Input, concatenate, Bidirectional, Dropout, Lambda, GlobalMaxPooling1D
from keras.optimizers import Adagrad
from keras.utils import to_categorical
from attention import SelfAttention
# Initialize session
sess = tf.Session()
K.set_session(sess)
# Now instantiate the elmo model
elmo_model = hub.Module("https://tfhub.dev/google/elmo/2", trainable=True)
use_model = hub.Module("https://tfhub.dev/google/universal-sentence-encoder/2")
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
# Build our model
# We create a function to integrate the tensorflow model with a Keras model
# This requires explicitly casting the tensor to a string, because of a Keras quirk
def ElmoEmbedding(x):
return elmo_model(tf.squeeze(tf.cast(x, tf.string)), signature="default", as_dict=True)["elmo"]
def UseEmbedding(x):
return elmo_model(tf.squeeze(tf.cast(x, tf.string)), signature="default", as_dict=True)["default"]
def loadDataset(path, max_length,k=-1, t=None):
data = list()
with open(path, 'rb') as f: # opening file in binary(rb) mode
for item in json_lines.reader(f):
if item['gold_label'] == '-':
item['gold_label'] = 'neutral'
data.append([item['gold_label'], f"{item['sentence1']:<1024}", f"{item['sentence2']:<1024}"])
#print('|', f"{item['sentence1']:<128}", '|')
data = np.array(data[:k])
#print("Text:",data[:,2])
if t == None:
t = [Tokenizer(),Tokenizer()]
t[0].fit_on_texts(data[:,0])
encodedLabels = t[0].texts_to_sequences(data[:,0])
encodedLabels = np.array(encodedLabels).flatten()
padded_sen1 = data[:,1]
padded_sen2 = data[:,2]
x_train1 = data[:,1]
x_train2 = data[:,2]
y_train = to_categorical(encodedLabels)
return x_train1, x_train2, y_train, t
max_length = 1 #one sentence instead of 30 words
batch_size = 32
epochs = 300000
steps_per_epoch = 100
x_train1, x_train2, y_train, t = loadDataset(path = "snli_1.0\\snli_1.0_train.jsonl", max_length=max_length)
x_valid1, x_valid2, y_valid, _ = loadDataset(path = "snli_1.0\\snli_1.0_dev.jsonl", max_length=max_length, t=t)
max_features = t[1].document_count
def dataGenerator(x1,x2,y,batch_size):
import random
while True:
start = 0#random.randint(0,len(x1)-batch_size)
yield ([x1[start:start+batch_size], x2[start:start+batch_size]], y[start:start+batch_size])
#yield ([x1[0:2], x2[0:2]], y[0:2])
split = len(x_train1)*0.8
genTrain = dataGenerator(x_train1,x_train2,y_train,batch_size)
genVal = dataGenerator(x_valid1,x_valid2,y_valid,batch_size)
print('Build model...')
input1 = Input(shape=(max_length,), dtype=tf.string)
x1 = Lambda(ElmoEmbedding, output_shape=(1024,))(input1)
x1 = Bidirectional(CuDNNLSTM(128, return_sequences=True))(x1)#dropout=0.2, recurrent_dropout=0.2,
x1 = Bidirectional(CuDNNLSTM(128, return_sequences=True))(x1)
x1 = Bidirectional(CuDNNLSTM(128, return_sequences=True))(x1)
x1 = SelfAttention()(x1)
x1 = GlobalMaxPooling1D()(x1)
input2 = Input(shape=(max_length,), dtype=tf.string)
x2 = Lambda(ElmoEmbedding, output_shape=(1024,))(input2)
x2 = Bidirectional(CuDNNLSTM(128, return_sequences=True))(x2)
x2 = Bidirectional(CuDNNLSTM(128, return_sequences=True))(x2)
x2 = Bidirectional(CuDNNLSTM(128, return_sequences=True))(x2)
x2 = SelfAttention(name="test2")(x2)
x2 = GlobalMaxPooling1D()(x2)
x3 = Lambda(UseEmbedding, output_shape=(1024,))(input1)
x3 = Dense(256, activation='relu')(x3)
x4 = Lambda(UseEmbedding, output_shape=(1024,))(input2)
x4 = Dense(256, activation='relu')(x4)
x = concatenate([x1,x2,x3,x4])
x = Dropout(0.4)(x)
x = Dense(512, activation='relu')(x)
x = Dense(256, activation='relu')(x)
x = Dense(64, activation='relu')(x)
x = Dense(4, activation='softmax')(x)
tensorboard = keras.callbacks.TensorBoard(log_dir='./logs/'+datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S"))
checkpoint = keras.callbacks.ModelCheckpoint(filepath='./models/'+datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S"), save_best_only=True, save_weights_only=True)
model = Model(inputs=[input1, input2], outputs=x)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
print('Train...')
model.fit_generator(
generator=genTrain,
validation_data=genVal,
epochs=epochs,
steps_per_epoch = steps_per_epoch,
validation_steps=10,
callbacks=[tensorboard,checkpoint])# checkpoint
#score, acc = model.evaluate([x_test1, x_test2], y_test, batch_size=batch_size) | [
"noreply@github.com"
] | noreply@github.com |
51f9b3b1b918fc50ed7ea88401859a1e966bb365 | e90c91d02e199d2b39a2f49f7399fcb20a317a35 | /morning/second_session/parse_struc.py | a5da6ab0b61ecee16cca9bc7133e9349237d4b3b | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | andersgs/canberra_workshop_day2 | 92c2e252a608877cb7117787f84f77500af54d1e | 75007409d6458409b71951331a24d27e3032c1e3 | refs/heads/master | 2016-09-15T05:51:52.084209 | 2014-03-27T03:04:54 | 2014-03-27T03:04:54 | 17,579,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | #!/usr/bin/env python
'''
parse_struc.py <chain_prefix>
e.g.,
./parse_struc.py chain_K2
'''
import sys,os,re
prefix = sys.argv[1]
def find_files(prefix):
'''
will return a list of files in the current directory that contain the prefix
'''
return [f for f in os.listdir('.') if re.search(prefix,f)]
def parse_files(filelist,prefix):
'''
output a file prefix_sum.txt with the summary chains
'''
fo = open(prefix+'_sum.txt','w')
file_count = 0
out = []
for f in filelist:
fi = open(f,'r')
rep = f.split('_')[-1]
tmp1 = []
count = 0
for line in fi:
tmp = line.strip().lstrip()
if re.search('[0-9]{3,}:',tmp):
tmp1.append(re.sub(':','',tmp))
else:
if re.search('BURNIN completed',tmp):
count = 1
else:
if count==1 and re.search('Rep#:',tmp) and file_count==0:
header=tmp
header = re.sub('[ ]{2,}',';',header)
header = re.sub('[ ,]{1}','_',header)
header = re.sub(';',' ',header)
header = re.sub('Rep#:','Step',header)
header = 'Rep '+ header
count +=1
file_count+=1
else:
continue
#tmp1 = [re.sub(':','',r.strip().lstrip()) for r in fi if re.search('[0-9]{3,}:',r)]
tmp2 = [re.sub('[ ]{1,}',' ',r) for r in tmp1]
tmp3 = [re.sub('--','- -',r) for r in tmp2]
tmp4 = [rep+' '+r+'\n' for r in tmp3]
out.extend(tmp4)
fo.write(header+'\n')
for r in out:
fo.write(r)
fo.close()
return out
if __name__=="__main__":
flist = find_files(prefix)
parse_files(flist,prefix)
| [
"andersgs@gmail.com"
] | andersgs@gmail.com |
23b68c952f8eef5e972814b8c1323a75da7a4145 | 900635f697d67341ebde7d20ecd2788905ac7767 | /cms/migrations/0004_auto_20210314_1730.py | 6a079f4b64658e6468d8e7a310ce1a05bc09883d | [] | no_license | unaguna/portfolio-django | 7f271a5b06556e81d505fa088124b0a4e0dfdce5 | 4456fe13b0581fb1c6b70a4cadd213de93864aa7 | refs/heads/main | 2023-03-25T22:27:45.347075 | 2021-03-20T08:13:03 | 2021-03-20T08:13:03 | 349,658,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | # Generated by Django 3.1.7 on 2021-03-14 08:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cms', '0003_auto_20210313_1946'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='url_chrome_store',
),
migrations.RemoveField(
model_name='article',
name='url_github',
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url_github', models.CharField(blank=True, max_length=255, verbose_name='GitHub URL')),
('url_chrome_store', models.CharField(blank=True, max_length=255, verbose_name='GoogleChrome URL')),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='product', to='cms.article', verbose_name='記事')),
],
),
]
| [
"k.izumi.ysk@gmail.com"
] | k.izumi.ysk@gmail.com |
b675e00f7e897ede441ba41eba68cb7f9be08d24 | 3d145abc06306f2a68efa15077b1cb6f14a3c7d0 | /brandoncoin_genesis.py | 4e4a6af6c806946cf2d8e8b174803fb3a6ffc0b6 | [] | no_license | Brandon05/Tiniest_Blockchain | 9be5224cecb31ac4a63e85b2f025e19747a5e278 | 20ef57b32a0e8cde5bf1754d8d1c5f375474116b | refs/heads/master | 2021-07-13T01:26:46.868445 | 2017-10-15T23:25:33 | 2017-10-15T23:25:33 | 107,056,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | import datetime as date
from brandoncoin_block import *
def create_genesis_block():
"""
Manually create a block with
index zero and arbitrary hash
:return: genesis block
"""
return Block(0, date.datetime.now(), {
"proof-of-work": 9,
"transactions": None
}, "0")
| [
"brandonsanchez05@gmail.com"
] | brandonsanchez05@gmail.com |
6243ec060f8be600433c8a5818986edad129bb27 | 5e7d0921887469c651faf90ecfcf3736d4802f6b | /HinataKikuchi/chapter01/knock03.py | 828f8a835ecd17b5ddc1d5cf230aeb25bed896a9 | [] | no_license | tmu-nlp/100knock2021 | 5e4029f2ee330fa95269740d7bd372be533d125d | 0a9f8150588648fbd86d51b4c9168acf3e45ed98 | refs/heads/main | 2023-07-03T14:00:08.604762 | 2021-08-05T09:23:16 | 2021-08-05T09:23:16 | 359,641,037 | 10 | 1 | null | 2021-08-05T09:22:25 | 2021-04-20T00:57:21 | Jupyter Notebook | UTF-8 | Python | false | false | 264 | py | strings = 'Now I need a drink, alcoholic of course, after the heavy lectures involving quantum mechanics.'
ans = strings.split()
print(ans)
###ANS###
#求めたいのは単語の文字数のリストなので回答が違うゾ
print([len(s) for s in ans]) | [
"haiao4041@gmail.com"
] | haiao4041@gmail.com |
18299bf862afb8cacee70275fc0106262c766e72 | fc300e990d4748ecd6da10bcbd6cbb7bc41d27b1 | /project2/test/test_lrv_hat_nw_2_of_single_t.py | dda9f8659a7e1fafa8827ef6db927596319e986b | [] | no_license | borisgarbuzov/project2 | 46ba9310143b85d558874fbeab93c3cafd730467 | 2ee908fe29dabff19869887b01d55629174b9b5c | refs/heads/master | 2023-06-26T21:03:01.364442 | 2020-07-07T20:43:30 | 2020-07-07T20:43:30 | 217,331,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | from src.lrv_hat_nw_2_of_single_t import lrv_hat_nw_2_of_single_t
from timeit import default_timer as timer
import numpy as np
import unittest
class Test_lrv_hat_nw_2_of_signle_t(unittest.TestCase):
def test_lrv_hat_nw_2_of_single_t(self,
sample=np.array([1, 2, 3, 4, 5]),
t_par=0.5):
print('\n\n===============================================================================')
print('Testing "lrv_hat_nw_2_of_single_t"')
start_time = timer()
returned = lrv_hat_nw_2_of_single_t(sample=sample,
t_par=t_par)
print('Test parameters:')
print('sample =', sample)
print('t_par =', t_par)
print('\nreturned = ', type(returned))
if isinstance(returned, list):
print('returned shape = ', len(returned))
elif isinstance(returned, np.ndarray):
print('returned shape = ', returned.shape)
print('returned = ', returned)
print("\nDuration: {:g} secs".format(timer() - start_time))
print('End of test {}'.format('lrv_hat_nw_2_of_single_t'))
print('===============================================================================\n')
if __name__ == '__main__':
unittest.main()
| [
"makovskiyms@gmail.com"
] | makovskiyms@gmail.com |
79c2b3fa152614e3c590109c2ddf024d5dcca380 | 69ecf73040deec4fcb34cac22e15254a4d0a2f6e | /loggers.py | 294529db21e4ec2b704db4c039c96f8b37a774f5 | [] | no_license | erelin6613/ML_Logger | e64a59ad0cb2cb9f83a2d7c4bccd72c3686674fa | 1c29cd8d6a1f65b6077e14228d6ca106f49a76f7 | refs/heads/master | 2022-11-19T12:33:03.765426 | 2020-07-25T07:41:15 | 2020-07-25T07:41:15 | 282,395,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,387 | py | import os
import csv
import time
import threading
import functools
import logging
import json
from datetime import datetime
def deduce_model(model):
from torch.nn.modules.module import Module
from sklearn.base import (ClassifierMixin,
RegressorMixin, BaseEstimator, ClusterMixin)
if isinstance(model, Module):
return model.state_dict
if issubclass(model.__class__, ClassifierMixin):
return model.get_params
class BaseLogger:
def __init__(self, log_dir):
self.log_dir = log_dir
def log(self, params, logfile='logfile.log'):
with open(logfile, 'a') as f:
f.write(str(time.time())+': '+str(params)+'\n')
def print(self, logfile='logfile.log'):
with open(logfile, 'r') as f:
print(f.read())
class ModelLogger(BaseLogger):
def __init__(self, model, log_dir='model_log'):
super(ModelLogger, self).__init__(log_dir)
self.model = model
self.params = self.deduce_model()
self.logfile = f'{str(self.model)}.log'
threading.Thread(target=self.monitor).start()
def monitor(self, timelaps=180):
ref = hash(self.model)
self.log(self.model) #frozenset({self.model.__getstate__()}.items))
while True:
# time.sleep(timelaps)
checksum = hash(self.model)
if ref != checksum:
self.log({self.model})
print('model object was changes')
def log_model(path=os.path.join('logs', 'model_log.log')):
def log_state(model):
@functools.wraps(model)
def wrapper(*args, **kwargs):
state_func = deduce_model(model())
# print(str(state_func()))
logger = logging.getLogger('LogModel')
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler(path)
log_format = '{%(levelname)s %(asctime)s %(message)s}'
logger.addHandler(file_handler)
logger.info(str(datetime.now().strftime(
'%Y-%m-%d %H:%M:%S'))+'\n'+str(state_func())+'\n')
return state_func
return wrapper
return log_state
def log_params(path=os.path.join('logs', 'params_log.log')):
def log_p(params):
@functools.wraps(params)
def wrapper(*args, **kwargs):
p = params.__defaults__
logger = logging.getLogger('LogParams')
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler(path)
log_format = '{%(levelname)s %(asctime)s %(message)s}'
logger.addHandler(file_handler)
logger.info(str(datetime.now().strftime(
'%Y-%m-%d %H:%M:%S'))+'\n'+str(p)+'\n')
# return state_func
return wrapper
return log_p | [
"valentineburn@gmail.com"
] | valentineburn@gmail.com |
9ddf85b9b6ff2b50660ca5aacd4c0e949f1ebaf0 | 2756869b89326d07b9ec29d6817310825398b169 | /PiSpec20_LED_oceanoptics/OO_LED_spec.py | d1567b86cf34973a40dc7606b8191790a8dc044d | [
"MIT"
] | permissive | acpo/PiSpec20_LED_oceanoptics | 2629375b7dbbcdaa112e2c1f809c3d9ea7f9e008 | 1cf4f169cb36513afa24c0023fa3d18b71b199d5 | refs/heads/master | 2020-09-05T09:58:11.022047 | 2019-12-20T19:44:00 | 2019-12-20T19:44:00 | 220,066,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,595 | py | # ## pigpio daemon must be running first
# either issue 'sudo pigpiod' from terminal
# or use 'sudo systemctl enable pigpiod' to allow auto-start on boot
# disable with 'sudo systemctl disable pigpiod'
# ##
import seabreeze.spectrometers as sb
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
style.use("ggplot")
from sys import version_info
if version_info.major == 2:
# use Python 2.7 style
import Tkinter as tk
import ttk
elif version_info.major == 3:
# use Python 3.x style
import tkinter as tk
from tkinter import ttk
# added for LED support via the hardware PWM (software RPi.GPIO has flicker from timing issues)
# this part is specific to the Raspberry PI
import pigpio
gpio = pigpio.pi()
gpio.hardware_PWM(18, 2000, 0) #BCM pin 18, 2000 Hz, 0% duty (off)
LARGE_FONT= ("Verdana", 12)
NORM_FONT= ("Verdana", 10)
# Enumerate spectrometer, set a default integration, get x & y extents
spec = sb.Spectrometer.from_serial_number()
IntTime = 20000 #20 ms, set default integration time to a reasonable value
spec.integration_time_micros(IntTime)
x = spec.wavelengths()
data = spec.intensities(correct_dark_counts=True, correct_nonlinearity=False)
xmin = np.around(min(x), decimals=2)
xmax = np.around(max(x), decimals=2)
ymin = np.around(min(data), decimals=2)
ymax = np.around(max(data), decimals=2)
minIntTime =spec.minimum_integration_time_micros
def popupmsg(msg): # in case you want to have warning popup
popup = tk.Tk()
popup.wm_title("!")
popup.geometry('300x200-100+200')
label = ttk.Label(popup, text=msg, font=NORM_FONT, wraplength = 250)
label.pack(side="top", fill="x", pady=10)
B1 = ttk.Button(popup, text="Okay", command = popup.destroy)
B1.pack()
popup.mainloop()
class Spec(tk.Tk):
def __init__(self, ax, *args, **kwargs):
global data, x, dark, incident
global IntTime, Averages
global xmin, xmax, ymin, ymax
global AbMode, LEDdutycycle, LEDfrequency, LEDpin, LEDstate
global monitorwave, monitorindex, monitor
x = spec.wavelengths()
# Integration time set above
Averages=1 #set default averages to a reasonable value
dark = np.zeros(len(x))
incident = np.ones(len(x)) #dummy values to prevent error in Absorbance when no dark recorded
LEDpin = 18 #BCM pin 18 is Physical pin 12 on the RPi
LEDfrequency = 2000 # 2000 Hz is suitable for an integration time of 2 ms or longer
LEDdutycycle = 50000 #50000 is 5%, scale is zero to 1 million, 0 is off
LEDstate = 0 # LED off initially
AbMode = 0 # initial mode is raw intensity
self.ax = ax
self.x = x
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.data = data
self.line = Line2D(self.x, self.data, color='red')
self.ax.add_line(self.line)
self.ax.set_ylim(ymin*0.8, ymax*1.1)
self.ax.set_xlim(self.xmin, self.xmax)
monitorwave = np.median(x) #set monitor wavelength to middle of hardware range
tk.Tk.__init__(self, *args, **kwargs)
# tk.Tk.iconbitmap(self, default="clienticon.ico") set window icon
tk.Tk.wm_title(self, "Ocean Optics Spectrometer Control")
container = tk.Frame(self)
container.pack(side="top", fill="both", expand = True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
label = tk.Label(self, text="Spectrometer on a Pi", font=LARGE_FONT)
label.pack(pady=10,padx=10)
self.frame1 = tk.Frame(self)
self.frame1.pack(side='left', anchor=tk.N)
labelint = tk.Label(self.frame1, text='Integration Time (ms)', relief='ridge')
labelint.pack(side='top', pady=2)
labelavg = tk.Label(self.frame1, text='# of spectra to average', relief='ridge', width='17', wraplength='100')
labelavg.pack(side='top', pady=1)
labelxmin = tk.Label(self.frame1, text='Minimum wavelength', relief='ridge')
labelxmin.pack(side='top', pady=2)
labelxmax = tk.Label(self.frame1, text='Maximum wavelength', relief='ridge')
labelxmax.pack(side='top', pady=2)
self.button_dark = tk.Button(self.frame1, text='Measure Dark', background='light grey')
self.button_dark.pack(side='top', pady=2)
self.button_dark.bind('<ButtonRelease-1>', self.getdark)
self.buttonAbMode = tk.Button(self.frame1, text='Absorbance Mode (off)', background = 'light grey')
self.buttonAbMode.pack(side='top', pady=1)
self.buttonAbMode.bind('<ButtonRelease-1>', self.AbMode)
monitorindex = np.searchsorted(x, monitorwave, side='left')
monitor = np.round(self.data[monitorindex], decimals=3)
self.text = self.ax.text(0.9, 0.9, monitor, transform=ax.transAxes, fontsize=14)
self.ax.axvline(x=monitorwave, lw=2, color='blue', alpha = 0.5)
self.labelmonitor = tk.Label(self.frame1, text='Wavelength to monitor (nm)', font=LARGE_FONT)
self.labelmonitor.pack(side='top')
self.entrymonitor = tk.Entry(self.frame1, width='7')
self.entrymonitor.pack(side='top', pady=1, anchor=tk.N)
self.entrymonitor.insert(0, np.round(x[monitorindex], decimals=2))
self.entrymonitor.bind('<Return>', self.entrymonitor_return)
self.labelmonitor2 = tk.Label(self.frame1, text="press <Enter> to set new wavelength")
self.labelmonitor2.pack(side='top')
self.button_reset_y = tk.Button(self.frame1, text='Reset Y axis scale', background='light blue')
self.button_reset_y.pack(side='top', pady=10)
self.button_reset_y.bind('<ButtonRelease-1>', self.reset_y)
self.buttonLED = tk.Button(self.frame1, text='LED on / off', background = 'light grey')
self.buttonLED.pack(side='top', pady=1)
self.buttonLED.bind('<ButtonRelease-1>', self.LEDstate)
self.labelLED = tk.Label(self.frame1, text="LED power (1 to 100%)")
self.labelLED.pack(side='top', pady=1, anchor=tk.N)
self.entryLED = tk.Entry(self.frame1, width= '5')
self.entryLED.pack(side='top', pady=1, anchor=tk.N)
self.entryLED.insert(0, LEDdutycycle / 10000)
self.entryLED.bind('<Return>', self.entryLED_return)
self.frame2 = tk.Frame(self)
self.frame2.pack(side='left', anchor=tk.N)
self.entryint = tk.Entry(self.frame2, width='6')
self.entryint.pack(side='top', pady=1, anchor=tk.N)
self.entryint.insert(0, IntTime/1000)
self.entryint.bind('<Return>', self.EntryInt_return)
self.entryavg = tk.Entry(self.frame2, width='4')
self.entryavg.pack(side='top', pady=5)
self.entryavg.insert(0, Averages)
self.entryavg.bind('<Return>', self.EntryAvg_return)
self.entryxmin = tk.Entry(self.frame2, width='7')
self.entryxmin.pack(side='top', pady=2)
self.entryxmin.insert(0, xmin)
self.entryxmin.bind('<Return>', self.Entryxmin_return)
self.entryxmax = tk.Entry(self.frame2, width='7')
self.entryxmax.pack(side='top', pady=2)
self.entryxmax.insert(0, xmax)
self.entryxmax.bind('<Return>', self.Entryxmax_return)
self.button_incident = tk.Button(self.frame2, text='Measure 100% T', background='light grey')
self.button_incident.pack(side='top', pady=2)
self.button_incident.bind('<ButtonRelease-1>', self.getincident)
button_quit = ttk.Button(self, text='Quit')
button_quit.pack(side='right', anchor=tk.N)
button_quit.bind('<ButtonRelease-1>', self.ButtonQuit)
ax.set_xlabel('Wavelength (nm)')
ax.set_ylabel('Counts')
canvas = FigureCanvasTkAgg(fig, self)
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
def update(self, data):
global AbMode
self.data = spec.intensities(correct_dark_counts=True, correct_nonlinearity=False)
if AbMode == 1:
self.data = np.array(self.data, dtype=float)
self.data = np.log10((incident-dark)/(self.data-dark))
self.line.set_data(self.x, self.data)
monitor = np.round(self.data[monitorindex], decimals=3)
self.text.set_text(monitor)
return self.line,
else:
#y-axis handled by reset button
self.line.set_data(self.x, self.data)
monitor = np.round(self.data[monitorindex], decimals=3)
self.text.set_text(monitor)
#self.ax.canvas.blit() the blit approach isn't working
return self.line,
def ButtonQuit(root, event):
gpio.hardware_PWM(LEDpin, LEDfrequency, 0) #sets LED off
root.destroy()
exit()
def getdark(self, event):
global dark
darkj = spec.intensities(correct_dark_counts=True, correct_nonlinearity=True)
dark = np.array(darkj, dtype=float)
self.button_dark.configure(background = 'light green')
def getincident(self,event):
global incident
incidentj = spec.intensities(correct_dark_counts=True, correct_nonlinearity=True)
incident = np.array(incidentj, dtype=float)
self.button_incident.configure(background = 'light green')
# SET CONFIGURATION
def setconfig(self):
global IntTime, Averages
spec.integration_time_micros(IntTime)
# write new configuration to dialog
self.entryint.delete(0, "end")
self.entryint.insert(0,IntTime / 1000) #write ms, but IntTime is microseconds
self.entryavg.delete(0, "end")
self.entryavg.insert(0,Averages) #set text in averages box
def EntryInt_return(self, event):
global IntTime
#typically OO spectrometers cant read faster than 4 ms
IntTimeTemp = self.entryint.get()
if IntTimeTemp.isdigit() == True:
if int(IntTimeTemp) > 65000:
msg = "The integration time must be 65000 ms or smaller. You set " +(IntTimeTemp)
self.setconfig()
popupmsg(msg)
elif int(IntTimeTemp) < 4:
msg = "The integration time must be greater than 4 ms. You set " +(IntTimeTemp)
self.setconfig()
popupmsg(msg)
else:
IntTime = int(IntTimeTemp) * 1000 #convert ms to microseconds
self.setconfig()
else:
msg = "Integration time must be an integer between 4 and 65000 ms. You set " +str(IntTimeTemp)
self.setconfig()
popupmsg(msg)
def EntryAvg_return(self, event):
## averaging needs to be implemented here in code
# cseabreeze has average working, but python-seabreeze doesn't (2019)
global Averages
Averages = self.entryavg.get()
if Averages.isdigit() == True:
Averages = int(float(Averages))
else:
msg = "Averages must be an integer. You tried " + str(Averages) + ". Setting value to 1."
Averages = 1
self.entryavg.delete(0, "end")
self.entryavg.insert(0,Averages) #set text in averages box
popupmsg(msg)
def Entryxmax_return(self,event):
global xmax
xmaxtemp = self.entryxmax.get()
try:
float(xmaxtemp)
xmaxtemp = float(self.entryxmax.get())
if xmaxtemp > xmin:
xmax = xmaxtemp
self.entryxmax.delete(0, 'end')
self.entryxmax.insert(0, xmax) #set text in box
self.ax.set_xlim(xmin,xmax)
else:
msg = "Maximum wavelength must be larger than minimum wavelength. You entered " + str(xmaxtemp) + " nm."
self.entryxmax.delete(0, 'end')
self.entryxmax.insert(0, xmax) #set text in box
popupmsg(msg)
except:
self.entryxmax.delete(0, 'end')
self.entryxmax.insert(0, xmax) #set text in box to unchanged value
def Entryxmin_return(self, event):
global xmin
xmintemp = self.entryxmin.get()
try:
float(xmintemp)
xmintemp = float(self.entryxmin.get())
if xmintemp < xmax:
xmin = xmintemp
self.entryxmin.delete(0, 'end')
self.entryxmin.insert(0, xmin) #set text in box
self.ax.set_xlim(xmin,xmax)
else:
msg = "Minimum wavelength must be smaller than maximum wavelength. You entered " + str(xmintemp) + " nm."
self.entryxmin.delete(0, 'end')
self.entryxmin.insert(0, xmin) #set text in box
popupmsg(msg)
except:
self.entryxmin.delete(0, 'end')
self.entryxmin.insert(0, xmin) #set text in box to unchanged value
def AbMode(self, event):
global AbMode
if AbMode == 1:
AbMode = 0
ax.set_ylabel('Counts')
self.buttonAbMode.configure(text='Absorbance Mode (off)', background = 'light grey')
self.reset_y(self)
else:
AbMode = 1
ax.set_ylabel('Absorbance')
ax.set_ylim(-0.1,1.2)
self.buttonAbMode.configure(text='Absorbance Mode (on)', background = 'light green')
def LEDstate(self, event):
global LEDstate, LEDpin, LEDfrequency, LEDdutycycle
if LEDstate == 1:
LEDstate = 0
gpio.hardware_PWM(LEDpin, LEDfrequency, 0)
self.buttonLED.configure(background = 'light grey')
else:
LEDstate = 1
gpio.hardware_PWM(LEDpin, LEDfrequency, LEDdutycycle)
self.buttonLED.configure(background = 'light green')
def reset_y(self, event):
if AbMode == 0:
data = spec.intensities(correct_dark_counts=True, correct_nonlinearity=False)
ymin = min(data)
ymax = max(data)
ax.set_ylim(ymin * 0.9, ymax * 1.1)
else:
pass
def entryLED_return(self, event):
global LEDpin, LEDfrequency, LEDdutycycle
LEDdutycycletemp = self.entryLED.get()
try:
float(LEDdutycycletemp)
LEDdutycycletemp = float(self.entryLED.get())
if 0 < LEDdutycycletemp <= 100:
LEDdutycycle = int(10000 * LEDdutycycletemp) #factor of 10000 translates % to PWM units
self.entryLED.delete(0, 'end')
self.entryLED.insert(0, LEDdutycycle / 10000)
gpio.hardware_PWM(LEDpin, LEDfrequency, LEDdutycycle)
else:
msg = "LED power entry must be between 1 and 100 %"
self.entryLED.delete(0, 'end')
self.entryLED.insert(0, LEDdutycycle / 10000)
popupmsg(msg)
except:
self.entryLED.delete(0, 'end')
self.entryLED.insert(0, LEDdutycycle / 10000)
def entrymonitor_return(self, event):
global monitorwave, monitorindex, x
monitorwavetemp = self.entrymonitor.get()
try:
float(monitorwavetemp)
monitorwavetemp = float(self.entrymonitor.get())
if xmin < monitorwavetemp < xmax:
monitorwave = monitorwavetemp
monitorindex = np.searchsorted(x, monitorwave, side='left')
monitorwave = np.around(x[monitorindex], decimals=2)
self.entrymonitor.delete(0, 'end')
self.entrymonitor.insert(0,monitorwave)
self.ax.lines.pop(-1)
self.ax.axvline(x=monitorwave, lw=2, color='blue', alpha = 0.5)
else:
msg = "Monitored wavelength must be within the detected range. Range is " + str(xmin) + " to " + str(xmax) + " nm."
self.entrymonitor.delete(0, 'end')
self.entrymonitor.insert(0, monitorwave)
popupmsg(msg)
except:
self.entrymonitor.delete(0, 'end')
self.entrymonitor.insert(0, monitorwave)
fig, ax = plt.subplots()
spectro = Spec(ax)
# animate
ani = animation.FuncAnimation(fig, spectro.update, interval=10, blit=False)
spectro.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
f5a0e695f1a50cbc20de0463e14d5f362bb054ee | 6499d0b71b19fd4416bfd74fa9fd88e3d0b0618a | /king_phisher/client/dialogs/exception.py | 70b21f7817f9aa453b632b5a60c493afdd5eccd9 | [
"BSD-3-Clause"
] | permissive | Meatballs1/king-phisher | dfb0a539a2d0455113b40698f7151521774addb1 | a16b1de055260f6f33d8c1fd0765bd06ffb733c2 | refs/heads/master | 2020-05-20T17:55:30.441239 | 2015-10-15T19:21:22 | 2015-10-15T19:21:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,018 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/dialogs/exception.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import platform
import sys
import traceback
from king_phisher import its
from king_phisher import utilities
from king_phisher import version
from king_phisher.client import gui_utilities
from king_phisher.third_party import AdvancedHTTPServer
from gi.repository import Gtk
__all__ = ['ExceptionDialog']
EXCEPTION_DETAILS_TEMPLATE = """
Error Type: {error_type}
Error Details: {error_details}
Error UID: {error_uid}
RPC Error: {rpc_error_details}
King Phisher Version: {king_phisher_version}
Platform Version: {platform_version}
Python Version: {python_version}
Gtk Version: {gtk_version}
{stack_trace}
"""
class ExceptionDialog(gui_utilities.GladeGObject):
"""
Display a dialog which shows an error message for a python exception.
The dialog includes useful details for reporting and debugging the exception
which occurred.
"""
gobject_ids = ('linkbutton_github_issues',)
top_gobject = 'dialog'
def __init__(self, application, exc_info=None, error_uid=None):
"""
:param application: The parent application for this object.
:type application: :py:class:`Gtk.Application`
:param tuple exc_info: The exception information as provided by :py:func:`sys.exc_info`.
:param str error_uid: An optional unique identifier for the exception that can be provided for tracking purposes.
"""
super(ExceptionDialog, self).__init__(application)
self.error_description = self.gtk_builder_get('label_error_description')
self.error_details = self.gtk_builder_get('textview_error_details')
self.exc_info = exc_info or sys.exc_info()
self.error_uid = error_uid
linkbutton = self.gobjects['linkbutton_github_issues']
linkbutton.set_label('Project Issue Tracker')
linkbutton.connect('activate-link', lambda _: utilities.open_uri(linkbutton.get_property('uri')))
def interact(self):
exc_type, exc_value, exc_traceback = self.exc_info
pversion = 'UNKNOWN'
if its.on_linux:
pversion = 'Linux: ' + ' '.join(platform.linux_distribution())
elif its.on_windows:
pversion = 'Windows: ' + ' '.join(platform.win32_ver())
if its.frozen:
pversion += ' (Frozen=True)'
else:
pversion += ' (Frozen=False)'
exc_name = "{0}.{1}".format(exc_type.__module__, exc_type.__name__)
rpc_error_details = 'N/A (Not a remote RPC error)'
if isinstance(exc_value, AdvancedHTTPServer.AdvancedHTTPServerRPCError) and exc_value.is_remote_exception:
rpc_error_details = "Name: {0}".format(exc_value.remote_exception['name'])
if exc_value.remote_exception.get('message'):
rpc_error_details += " Message: '{0}'".format(exc_value.remote_exception['message'])
details = EXCEPTION_DETAILS_TEMPLATE.format(
error_details=repr(exc_value),
error_type=exc_name,
error_uid=(self.error_uid or 'N/A'),
rpc_error_details=rpc_error_details,
king_phisher_version=version.version,
platform_version=pversion,
python_version="{0}.{1}.{2}".format(*sys.version_info),
gtk_version="{0}.{1}.{2}".format(Gtk.get_major_version(), Gtk.get_minor_version(), Gtk.get_micro_version()),
stack_trace=''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
)
details = details.strip() + '\n'
if exc_name.startswith('king_phisher.third_party.'):
exc_name = exc_name[25:]
self.error_description.set_text("Error type: {0}".format(exc_name))
self.error_details.get_buffer().set_text(details)
self.dialog.show_all()
self.dialog.run()
self.dialog.destroy()
return
| [
"zeroSteiner@gmail.com"
] | zeroSteiner@gmail.com |
fb4eb2d3fc2b6b557ef5f486e64a77a51611a0bc | 87a6d7e83a25cb3b1696fb6094fda88858754c19 | /src/review/views.py | a2a5efdfd814e12518e46fac60f1fd21ab2a9492 | [
"BSD-3-Clause"
] | permissive | tegarty/socialrating | 20b45f8eb233fed0b69ae0fd8110cf8a73f1f782 | b80888ee8e637bd0a5517614c78235d563fead2e | refs/heads/master | 2020-04-20T08:42:52.231718 | 2018-12-06T17:57:43 | 2018-12-06T17:57:43 | 168,747,496 | 1 | 0 | BSD-3-Clause | 2019-02-01T19:11:19 | 2019-02-01T19:11:19 | null | UTF-8 | Python | false | false | 4,163 | py | from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django import forms
from django.shortcuts import redirect, reverse
from item.mixins import ItemViewMixin
from rating.models import Vote
from team.mixins import TeamViewMixin
from context.models import Context
from .models import Review
class ReviewListView(ItemViewMixin, ListView):
model = Review
paginate_by = 100
template_name = 'review_list.html'
def get_queryset(self):
return super().get_queryset().filter(item=self.item)
class ReviewCreateView(TeamViewMixin, ItemViewMixin, CreateView):
model = Review
template_name = 'review_form.html'
fields = ['headline', 'body', 'context']
def get_context_data(self):
"""
Add Item to the context
"""
context = super().get_context_data()
context['item'] = self.item
return context
def get_form(self, form_class=None):
"""
Add ratings to the form and set initial Context
QuerySet
"""
form = super().get_form(form_class)
for rating in self.item.category.ratings.all():
choices = []
for choice in range(1, rating.max_rating+1):
choices.append((choice, choice))
form.fields["%s_vote" % rating.slug] = forms.TypedChoiceField(
choices=choices,
coerce=int,
widget=forms.widgets.RadioSelect,
required=False,
label='%s: Please vote between 1-%s' % (rating.name, rating.max_rating),
)
form.fields["%s_comment" % rating.slug] = forms.CharField(
label='%s: A short comment for the Vote above' % rating.name,
required=False,
)
form.fields['context'].queryset = Context.objects.filter(team=self.team)
return form
def form_valid(self, form):
"""
First save the new Review,
then save any Votes, Attachments and Tags.
"""
review = form.save(commit=False)
review.item = self.item
review.actor = self.request.user.actor
review.save()
# loop over ratings available for this item,
# saving a new Vote for each as needed
for rating in self.item.category.ratings.all():
votefield = "%s_vote" % rating.slug
commentfield = "%s_comment" % rating.slug
if votefield in form.fields and form.cleaned_data[votefield]:
Vote.objects.create(
review=review,
rating=rating,
vote=form.cleaned_data[votefield],
comment=form.cleaned_data[commentfield] if commentfield in form.cleaned_data else '',
)
return redirect(reverse(
'team:category:item:review:detail',
kwargs={
'team_slug': self.team.slug,
'category_slug': self.item.category.slug,
'item_slug': self.item.slug,
'review_uuid': review.pk
}
))
class ReviewDetailView(ItemViewMixin, DetailView):
model = Review
template_name = 'review_detail.html'
pk_url_kwarg = 'review_uuid'
class ReviewUpdateView(ItemViewMixin, UpdateView):
model = Review
template_name = 'review_form.html'
pk_url_kwarg = 'review_uuid'
fields = ['headline', 'body', 'context']
class ReviewDeleteView(ItemViewMixin, DeleteView):
model = Review
template_name = 'review_delete.html'
pk_url_kwarg = 'review_uuid'
def delete(self, request, *args, **kwargs):
messages.success(self.request, "Review %s has been deleted, along with all Votes that related to it." % self.get_object())
return super().delete(request, *args, **kwargs)
def get_success_url(self):
return(reverse('team:category:item:detail', kwargs={
'camp_slug': self.camp.slug,
'category_slug': self.category.slug,
'item_slug': self.item.slug,
}))
| [
"thomas@gibfest.dk"
] | thomas@gibfest.dk |
bf41fe93de037aa8e1359ef03847e1799623c699 | 1709d77fe679b0c2b68ce3be1d11feb539018821 | /Needleman_Wunsch.py | e4dfb6ae088d1301518213a0f2b2f311c4cca5e7 | [] | no_license | RoxanneDewing/Biological-Sequencing | a3cbba351c9febf009b98359e0e8a82e87f21b9f | 108be59bbe430240b9b47cb60ceaf5d798c95e27 | refs/heads/master | 2020-12-27T01:09:51.874205 | 2020-02-02T04:14:06 | 2020-02-02T04:14:06 | 237,714,548 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,785 | py | import numpy as np
#This is a recursive TraceBack for NeedleMen Wunsch, it explores every path back to F(0, 0)
def TraceBack_Wunsch(i, j, n, m, temp_n, temp_m, arr, lst):
if (i==0 and j==0):
return lst.append([temp_n, temp_m])
if (i==0):
TraceBack_Wunsch(i, j-1, n, m, "-"+temp_n, m[j-1]+temp_m, arr, lst)
if (j==0):
TraceBack_Wunsch(i-1, j, n, m, n[i-1]+temp_n, "-"+temp_m, arr, lst)
Case_Match = (arr[i][j] == arr[i-1][j-1] + Score(n[i-1], m[j-1]))
Case_GapX = (arr[i][j] == arr[i][j-1] - 2)
Case_GapY = (arr[i][j] == arr[i-1][j] - 2)
if(Case_Match):
TraceBack_Wunsch(i-1, j-1, n, m, n[i-1]+temp_n, m[j-1]+temp_m, arr, lst)
if(Case_GapX):
TraceBack_Wunsch(i, j-1, n, m, "-"+temp_n, m[j-1]+temp_m, arr, lst)
if(Case_GapY):
TraceBack_Wunsch(i-1, j, n, m, n[i-1]+temp_n, "-"+temp_m, arr, lst)
return lst
#This is a helper function that returns the score
def Score(i, j):
if (i==j):
return 2
else:
return -1
#This function will create the matrix using the recurrence relations and also call the
#trace back function once the matrix is complete
#This function also opens all the text files and writes to them
def Needleman_Wunsch(n, m):
arr = np.zeros((len(n)+1, len(m)+1))
for i in range(len(n)+1):
arr[i][0] = -2*i
for j in range(len(m)+1):
arr[0][j] = -2*j
for i_Index in range(1, len(n)+1):
for j_Index in range(1, len(m)+1):
Val_one = arr[i_Index-1][j_Index-1]+Score(n[i_Index-1], m[j_Index-1])
Val_two = arr[i_Index-1][j_Index] -2
Val_three = arr[i_Index][j_Index-1] -2
Max = max([Val_one, Val_two, Val_three])
arr[i_Index][j_Index] = Max
#Print the max score in file 2.o1
f = open("2.o1.txt", "w+")
f.write(str(int(arr[len(n)][len(m)])))
f.close()
#Print the array in file 2.o2
f = open("2.o2.txt", "w+")
newstr = " "
for h in range(0, len(n)+1):
for r in range(0, len(m)+1):
newstr = newstr + " "+str(int(arr[h][r]))
f.write(newstr.strip()+"\n")
newstr = " "
f.close()
lst = TraceBack_Wunsch(len(n), len(m), n, m, "", "", arr, [])
#Print an alignment
f = open("2.o3.txt", "w+")
f.write(str(lst[0][0]) + "\n" )
f.write(str(lst[0][1]))
f.close()
#Print if multiple alignments
f = open("2.o4.txt", "w+")
if (len(lst) > 1):
f.write("YES")
else:
f.write("NO")
f.close()
#print all alignments - Bonus
f = open("2.o5.txt", "w+")
f.write(str(len(lst)))
for g in range(0, len(lst)):
f.write("\n")
f.write(str(lst[g][0]) + "\n" )
f.write(str(lst[g][1]) + "\n")
f.close()
#print(lst)
#This is a main function , only used to call NeedleMan_Wunsch with input sequences from 2.in file
def main():
file = open("2.in.txt", 'r')
x = file.readline().strip().upper()
y = file.readline().strip().upper()
file.close()
Needleman_Wunsch(x, y)
if __name__== "__main__":
main() | [
"noreply@github.com"
] | noreply@github.com |
6c95a83cddc72797d8ba2454dfc1051fd4a72573 | fc5e9514eebe4e6423083b1b066264f048603799 | /driver.py | 1220bb958f83a5c5d7b2c0905ee01cd0360dacab | [] | no_license | truongnguyenlinh/marketing_analytics | fc9ee0050fb551a5b29d51f9fd7fde7b447ad0f5 | 5eed99b742e633d066fc0630c624f59c50713542 | refs/heads/master | 2023-04-08T17:27:56.862091 | 2021-04-16T08:59:35 | 2021-04-16T08:59:35 | 357,425,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,802 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import norm
import warnings
warnings.filterwarnings('ignore')
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
pd.set_option("display.width", 1000)
def prepare_df(data):
# View data
print(data.head(1))
# View data-types
print(data.dtypes)
# Convert categorical data
data.columns = data.columns.str.strip()
data["Education"] = data["Education"].replace(["2n Cycle"], "2n_Cycle")
data["Education"] = data["Education"].astype("category")
data["Marital_Status"] = data["Marital_Status"].astype("category")
data["Country"] = data["Country"].astype("object")
data["Dt_Customer"] = pd.to_datetime(data["Dt_Customer"])
# Encode categorical predictor variables
categorical_columns = ["Education", "Marital_Status"]
for cc in categorical_columns:
dummies = pd.get_dummies(data[cc])
dummies = dummies.add_prefix("{}#".format(cc))
data = data.join(dummies)
# Convert income to int
data["Income"] = data["Income"].replace({"\$": "", ",": ""}, regex=True)
data["Income"] = data["Income"].astype("float")
# Enrollment date
data["Dt_Year"] = data["Dt_Customer"].dt.year
data["Dt_Month"] = data["Dt_Customer"].dt.month
data["Dt_Day"] = data["Dt_Customer"].dt.month
# View updated dataset
print(data.head(1))
# Find null values and impute
print(data.isnull().sum().sort_values(ascending=False))
data["Income"] = data["Income"].fillna(data["Income"].median())
return data
def monte_carlo(data):
# Number of simulations will equal length of dataset (number of rows)
num_simulations = len(data)
def generate_random_numbers(mean, sd):
random_nums = norm.rvs(loc=mean,
scale=sd,
size=num_simulations)
return random_nums
# Target variable is MntWines, obtaining median and sd
mnt_wines_expected = data[['MntWines']].mean()
mnt_wines_sd = data[['MntWines']].std()
wine_spent = generate_random_numbers(mnt_wines_expected, mnt_wines_sd)
df = pd.DataFrame(columns=["MntWineSpent"])
plt.hist(wine_spent, bins='auto')
plt.show()
for i in range(num_simulations):
dictionary = {"MntWineSpent": round(wine_spent[i], 2)}
df = df.append(dictionary, ignore_index=True)
data = pd.concat([data, df], axis=1, join="inner")
print("=========Monte Carlo Simulation=========")
print(data.head(10))
print("========================================")
def main():
df = pd.read_csv("marketing_data.csv", sep=",")
df = prepare_df(df)
monte_carlo(df)
if __name__ == "__main__":
main()
| [
"ltruong28@my.bcit.ca"
] | ltruong28@my.bcit.ca |
d10e767d72cdab9836c4d3638f52e6136f36d302 | 7a74633361f89d3bf234776a3fd352a2cdeccc9b | /Arrays.py | 3cda5c191d2eb3702b9fb176def6fbcc2a98fd05 | [] | no_license | khushal2911/hackerrank-30days-of-code | 97bebc178f05491ca247e0ecfaaa03d733c3750e | 453ef0a955732c4eb682829aedc41318a701e26a | refs/heads/master | 2022-12-07T15:42:10.656202 | 2020-08-10T05:58:19 | 2020-08-10T05:58:19 | 284,650,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | #!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().rstrip().split()))
print(" ".join([str(arr[n-i-1]) for i in range(n)]))
| [
"noreply@github.com"
] | noreply@github.com |
a3fb7180882889a442a4ea4d87be4fbbea6d7f9e | 9074257bae2b933912df961f08b1823eacad3d55 | /app_base/forms.py | 26e95f1b20ebf924392764279e137f50436d6346 | [] | no_license | bedlex/final-project | dc0d90e8647843952d5a945916e8d9334fa16f89 | 5003fb7acc0cb551bd22a0e2f95ccd54b8e0dcbc | refs/heads/master | 2020-09-26T07:23:31.148373 | 2019-12-05T22:40:07 | 2019-12-05T22:40:07 | 226,201,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,393 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, MultipleFileField
from wtforms.validators import DataRequired, Length, Email, EqualTo
from flask_wtf.file import FileAllowed
class RegistrationForm(FlaskForm):
username = StringField('user name',validators=[DataRequired(), Length(min = 2, max = 100)])
firstname = StringField("first name", validators=[DataRequired(), Length(min = 2, max = 100)])
lastname = StringField("last name", validators=[DataRequired(), Length(min = 2, max = 100)])
password = PasswordField("password", validators=[DataRequired()])
confirm_password = PasswordField("confirm password", validators=[DataRequired(), EqualTo("password")])
submit = SubmitField('SignUp')
class EditForm(FlaskForm):
username = StringField('user name',validators=[DataRequired(), Length(min = 2, max = 100)])
firstname = StringField("first name", validators=[DataRequired(), Length(min = 2, max = 100)])
lastname = StringField("last name", validators=[DataRequired(), Length(min = 2, max = 100)])
submit = SubmitField('update')
class RequestPassReset(FlaskForm):
email = StringField('email', validators=[DataRequired(), Email()])
submit = SubmitField('send verification email')
class ConfirmPassReset(FlaskForm):
password = PasswordField("password", validators=[DataRequired()])
confirm_password = PasswordField("confirm password", validators=[DataRequired(), EqualTo("password")])
submit = SubmitField('reset password')
class LoginForm(FlaskForm):
email = StringField('email', validators=[DataRequired(), Email()])
password = PasswordField('password', validators=[DataRequired()])
submit = SubmitField('login')
class ArticleForm(FlaskForm):
title = StringField('Title', validators=[DataRequired()])
category = StringField('Category', validators=[DataRequired()])
location = StringField("Location", validators=[DataRequired()])
article = TextAreaField("Article", validators=[DataRequired()])
source = StringField("Source", validators=[DataRequired()])
photos = MultipleFileField('Image', validators=[FileAllowed(['jpg', 'png'])])
submit = SubmitField("Submit")
class EmailConfirm(FlaskForm):
email = StringField("Enter your Email", validators=[DataRequired(),Email()])
submit = SubmitField("Submit")
| [
"almaztransportinc@gmail.com"
] | almaztransportinc@gmail.com |
ee2c11888cd4367182d112d90dd8ed59a43d181f | e8706c26bb06263f4e6492e141b701c2293e06a0 | /graph_traversal/1260_DFS와BFS/가은/sol1.py | 46c2bb894a7cb9a6db5237be491a8fa9b9537ec3 | [] | no_license | ssabum/algorithm_study | a8d20a70d14ea398b07d43e2550abd25576ab8c2 | 473c1c89502c44f08e82280af144c01af44535d8 | refs/heads/master | 2023-04-21T11:35:07.075751 | 2021-05-03T08:09:18 | 2021-05-03T08:09:18 | 345,941,181 | 2 | 1 | null | 2021-05-03T08:09:18 | 2021-03-09T08:51:16 | Python | UTF-8 | Python | false | false | 1,260 | py | from collections import deque
import sys
sys.stdin = open("input.txt")
T = 3
for tc in range(1, T+1):
N, M, V = map(int, input().split())
edge_input = [list(map(int, input().split())) for _ in range(M)]
# print(edge_input)
edge_list = [[] for _ in range(N+1)]
for e in edge_input:
n1, n2 = e
edge_list[n1].append(n2)
edge_list[n2].append(n1)
# print(edge_list)
# 갈 수 있는 노드
stack = [V]
# 방문 노드
visited = [0] * (N+1)
def dfs():
while stack:
# 현재
now = stack.pop()
# print(stack)
# 방문 안 했을 때
if not visited[now]:
visited[now] = 1
print(now, end=' ')
# now에 연결된 node 찾기
for v in reversed(sorted(edge_list[now])):
stack.append(v)
print()
dfs()
queue = deque([V])
visited = [0] * (N+1)
def bfs():
while queue:
now = queue.popleft()
if not visited[now]:
visited[now] = 1
print(now, end=' ')
for v in sorted(edge_list[now]):
queue.append(v)
print()
bfs()
| [
"shrkdms3389@naver.com"
] | shrkdms3389@naver.com |
5dc3e5eb54602009e6f8a02450af13bf34566f0c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_workers.py | 925498db6457cf5ab2857092165fbc8709111a52 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.nouns._worker import _WORKER
#calss header
class _WORKERS(_WORKER, ):
def __init__(self,):
_WORKER.__init__(self)
self.name = "WORKERS"
self.specie = 'nouns'
self.basic = "worker"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
d56f23cc972c99e69f424e86bff1a9736e7e8d2f | 49710e3fef905bf3de86ed51cd74b22da9ea83a7 | /barChart/bundesligaTopFive.py | 0c9782282b556c07ecad98ea5d30e4820a8ae58d | [] | no_license | tjsdla520/VisualizationProject | 52c06457b36764ce703c9f7d78fc35f8f33e05bb | d7c87af95f903975a4084f09b42af55793b8385b | refs/heads/master | 2023-06-26T22:00:53.738280 | 2021-07-26T05:40:38 | 2021-07-26T05:40:38 | 374,260,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,938 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
################################################
#한글 깨짐 방지 설정
plt.rc('font', family='Malgun Gothic')
#저장파일 이름 설정
cnt, PNG, UNDERBAR = 0, '.png', '_'
CHART_NAME = 'bundesligaTopFive'
#정보가 담긴 파일
filename = '../bundesliga.csv'
################################################
data = pd.read_csv(filename, index_col='teamName')
TEAM = ['FC 바이에른 뮌헨','RB 라이프치히','보루시아 도르트문트','VfL 볼프스부르크','아인트라흐트 프랑크푸르트']
WHEN = ['gainGoal', 'loseGoal', 'goalGap']
data = data.loc[TEAM, WHEN]
print(data)
data.index.name = '팀명'
data.columns.name = '점'
plt.figure()
stacked=False
yticks_interval = 5
data.plot(kind='bar', rot=10, title='bundesliga top 5 골득실차', legend=True, stacked=stacked)
plt.legend(loc='best')
if stacked == False:
maxlim = (int(max(data.max()) / yticks_interval) + 1) * yticks_interval
print('maxlim : ', maxlim)
values = np.arange(0, maxlim + 1, yticks_interval)
plt.yticks(values, ['%s' % format(val, ',') for val in values])
else : # 누적 막대 그래프
# 누적 합인 data.sum(axis=1))의 최대 값에 대한 연산이 이루어 져야 합니다.
maxlim = (int(max(chartdata.sum(axis=1)) / yticks_interval) + 1) * yticks_interval
print('maxlim : ', maxlim)
values = np.arange(0, maxlim + 1, yticks_interval)
plt.yticks(values, ['%s' % format(val, ',') for val in values])
# y축의 상하한 값이 주어 지는 경우에만 설정합니다.
if ylim != None :
plt.ylim(ylim)
################################################
#반복되는 차트저장 형식을 저장하기
cnt += 1
savefilename = CHART_NAME + UNDERBAR + str(cnt).zfill(2) + PNG
plt.savefig(savefilename, dpi=400)
print(savefilename + ' 파일 저장 완료')
| [
"noreply@github.com"
] | noreply@github.com |
fafd6acdb83668e2523fefd141073e72445aa0c3 | 8c466de1fb9de881718b6f59a71e02f54963ea96 | /DJCelery/settings.py | 20c38704059e817d454b264b1fca88630e817064 | [] | no_license | IsaacNewLee/DjCeelry | d17837b50cda8f4a2d96b1f36d953956b54fa7ad | 06f5c6cdddeb4bbf130fe52bde5c32922cd66c5e | refs/heads/master | 2022-02-18T20:31:47.997005 | 2019-09-13T14:18:47 | 2019-09-13T14:18:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,362 | py | """
Django settings for DJCelery project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(_8tu7mjzx@&g3tk-4+=va*d6fpl7@64l_^2x8%&g3kg!$q!f-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'celeryapp.apps.CeleryappConfig',
'djcelery' # 注册celer
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DJCelery.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DJCelery.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
# celery配置
from .celeryconfig import *
BROKER_BACKEND = 'redis'
BROKER_URL = 'redis://localhost:6379/1'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/2'
| [
"635434705@qq.com"
] | 635434705@qq.com |
44ef0d48cd4f5834426ca51b1aa632623165b1ff | fd4c0363e407b9be4d6b0a897ca15400333ce05d | /graphics/DummyGraphicModule.py | f9dbe9a56ea0af76ef905b1ae0a7cee9a9d7d080 | [] | no_license | junghyun397/TetrisXQ | 13be3477a45a938368c74225e53e8ce9dd68c8e9 | 16106b47210fe1d0b8497ce2171dac02f1896694 | refs/heads/master | 2020-04-10T12:03:13.996028 | 2019-09-22T12:51:59 | 2019-09-22T12:51:59 | 161,010,029 | 13 | 3 | null | 2019-01-29T14:46:43 | 2018-12-09T06:06:30 | Python | UTF-8 | Python | false | false | 269 | py | from graphics.GraphicInterface import GraphicInterface
class DummyGraphicModule(GraphicInterface):
def draw_graphic(self, tetromino_y, tetromino_x):
pass
def set_tetris_model(self, tetris_model):
pass
def pump_event(self):
pass
| [
"junghyun397@gmail.com"
] | junghyun397@gmail.com |
4a0ce61e2d5550b8108f7b67f5fa15ed1337f07c | a18868e1c1c9103b9b60fdbdf29926225fee35c0 | /leetCode/GoatLatin.py | b38330f6cb743da2ec90e6625deaa32dd16eb934 | [] | no_license | walkershashi/myCoders | d8f4db6239b38f519e1c03fce4a6d3f957aec090 | 22cef0a6ba9d7a53f8e0de6bc383d82c1c774852 | refs/heads/master | 2022-12-07T04:07:19.087454 | 2020-09-05T15:11:17 | 2020-09-05T15:11:17 | 270,000,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | class Solution:
def toGoatLatin(self, S: str) -> str:
if not S:
return
vowels = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
S = S.split()
for i in range(1, len(S)+1):
if S[i-1][0] in vowels:
S[i-1] += 'ma' + 'a'*i
else:
S[i-1] = S[i-1][1:] + S[i-1][0] + 'ma' + 'a'*i
return ' '.join(i for i in S)
| [
"skssunny30@gmail.com"
] | skssunny30@gmail.com |
00f5f7f0fb9dd81d402dae15c1697694b631bf76 | 2f675a6df5e509ff0a3a58ed992aaaf5fbfed0d9 | /source/resources/hosters/vidlox.py | 7831e5f7ab4c6f23b42611aee363fde6296530ab | [] | no_license | kar10s/TvWatch | 459c38f9ba3db90b3ecae92460f3d460ebb82923 | 39a4dca8cf48ba255074bc738d09839eecae6ef2 | refs/heads/master | 2020-11-29T16:03:22.168652 | 2019-12-22T20:17:39 | 2019-12-22T20:17:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,185 | py | #-*- coding: utf-8 -*-
#Vstream https://github.com/Kodi-vStream/venom-xbmc-addons
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.parser import cParser
from resources.hosters.hoster import iHoster
from resources.lib.util import dialog, isKrypton
class cHoster(iHoster):
def __init__(self):
if not (isKrypton() == True):
self.__sDisplayName = '(Windows\Android Nécessite Kodi17)' + ' Vidlox'
else:
self.__sDisplayName = 'Vidlox'
self.__sFileName = self.__sDisplayName
self.__sHD = ''
def getDisplayName(self):
return self.__sDisplayName
def setDisplayName(self, sDisplayName):
self.__sDisplayName = sDisplayName + ' [COLOR skyblue]' + self.__sDisplayName + '[/COLOR]'
def setFileName(self, sFileName):
self.__sFileName = sFileName
def getFileName(self):
return self.__sFileName
def getPluginIdentifier(self):
return 'vidlox'
def setHD(self, sHD):
self.__sHD = ''
def getHD(self):
return self.__sHD
def isDownloadable(self):
return True
def setUrl(self, sUrl):
sUrl = sUrl.replace('embed-dlox.me/','embed-')
self.__sUrl = str(sUrl)
def getMediaLink(self):
return self.__getMediaLinkForGuest()
def __getMediaLinkForGuest(self):
oParser = cParser()
oRequest = cRequestHandler(self.__sUrl)
sHtmlContent = oRequest.request()
#accelère le traitement
sHtmlContent = oParser.abParse(sHtmlContent, 'var player', 'vvplay')
sPattern = '([^"]+\.mp4)'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
#initialisation des tableaux
url=[]
qua=["HD", "SD"] #sd en 2eme pos generalement quand sd
api_call = ''
#Remplissage des tableaux
for i in aResult[1]:
url.append(str(i))
#dialogue qualité
api_call = dialog().VSselectqual(qua, url)
if (api_call):
return True, api_call
return False, False
| [
"tvwatch.kodi@outlook.com"
] | tvwatch.kodi@outlook.com |
ebf9b4a30f7ce8099e5020d7dc4df985c9055dc2 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2138/60585/257214.py | 6a0f930bf2e12114cdb52468e38c6cf2a97dc12e | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | arr=list(map(int,input().strip().split(',')))
k=eval(input())
n=len(arr)
isM=False
for i in range(0,n-1):
j=i+1
temp=arr[i]
while j<n:
temp+=arr[j]
j+=1
if temp%k==0:
isM=True
break
if isM:
break
print(isM) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
1d01b17589e954f3dd2578ee3bc07e5bbed380dc | ff99c677aba11e27c252f773b52cd54f5de79279 | /ctt-server/openapi_server/models/project.py | 0220631032783c2d8b7da9e44e5e0a94cbfdbdab | [
"Apache-2.0"
] | permissive | radon-h2020/radon-ctt | b7eeb82f59e36e2a258d0a2ba9cd9483eb3dd247 | 97fcf5e800a0129d24e119b430d94f07ca248ba9 | refs/heads/master | 2023-01-04T23:44:49.611599 | 2021-09-15T15:34:41 | 2021-09-15T15:34:41 | 235,379,642 | 0 | 7 | Apache-2.0 | 2022-12-27T15:56:38 | 2020-01-21T15:48:45 | Python | UTF-8 | Python | false | false | 2,758 | py | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class Project(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, uuid=None, name=None, repository_url=None): # noqa: E501
"""Project - a model defined in OpenAPI
:param uuid: The uuid of this Project. # noqa: E501
:type uuid: str
:param name: The name of this Project. # noqa: E501
:type name: str
:param repository_url: The repository_url of this Project. # noqa: E501
:type repository_url: str
"""
self.openapi_types = {
'uuid': str,
'name': str,
'repository_url': str
}
self.attribute_map = {
'uuid': 'uuid',
'name': 'name',
'repository_url': 'repository_url'
}
self._uuid = uuid
self._name = name
self._repository_url = repository_url
@classmethod
def from_dict(cls, dikt) -> 'Project':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Project of this Project. # noqa: E501
:rtype: Project
"""
return util.deserialize_model(dikt, cls)
@property
def uuid(self):
"""Gets the uuid of this Project.
:return: The uuid of this Project.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this Project.
:param uuid: The uuid of this Project.
:type uuid: str
"""
self._uuid = uuid
@property
def name(self):
"""Gets the name of this Project.
:return: The name of this Project.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Project.
:param name: The name of this Project.
:type name: str
"""
self._name = name
@property
def repository_url(self):
"""Gets the repository_url of this Project.
:return: The repository_url of this Project.
:rtype: str
"""
return self._repository_url
@repository_url.setter
def repository_url(self, repository_url):
"""Sets the repository_url of this Project.
:param repository_url: The repository_url of this Project.
:type repository_url: str
"""
self._repository_url = repository_url
| [
"duellmann@iste.uni-stuttgart.de"
] | duellmann@iste.uni-stuttgart.de |
457a4af2a0527e35d4fe94dd64c5d4d54bd607c0 | 3a494af9cbf3d55e3b61e77c29080eea4b33307b | /blog/views.py | b7fab63fa47485f4b4b84526fe36e7f0b24696a0 | [] | no_license | ghdwlgus0607/girls | 6e93f152db1d50f8372f125a996bf851c1b959c8 | fbaead0bd37cd374a6e123fafba77e91789f27d7 | refs/heads/master | 2020-08-04T04:01:58.682634 | 2019-11-18T05:24:16 | 2019-11-18T05:24:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,006 | py | from django.shortcuts import render
from django.utils import timezone
from .models import Post
from django.shortcuts import render, get_object_or_404
from .forms import PostForm
def post_list(request):
# views.post_list 함수는 이제 DB에서 필요한 데이터를 가져와서
# post_list.html에게 넘겨줘야 함
posts = Post.objects.filter(published_date__lte=timezone.now()).\
order_by('-published_date')
# 출판일자가 지금보다 이전으로 들어있는 행만 검색
return render( # render() 함수를 호출하여 화면을 출력
request, # 함수에게 사용자가 요청한 정보를 전달
'blog/post_list.html', # 화면 출력 주체 지정
{'posts': posts}) # 화면 출력에 사용할 데이터 전달
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
# def post_new(request):
# form = PostForm()
# return render(request, 'blog/post_edit.html', {'form': form})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form})
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form}) | [
"ghdwlgus0607@naver.com"
] | ghdwlgus0607@naver.com |
4ef38efee3db65f28657b1d721188e859341b52e | e27e113d73cf90da91d9161847460b4dbe18e593 | /venv/bin/odfimgimport | 368c91ac7a55e4e2110b041bf80d546acaba76f9 | [] | no_license | rsdeus/dt-store | 14a75ddca20029a1ee44c10abd8e7c3af46219cd | 94292e4a3fcd51e4fe89b29d360bad82a0a3689a | refs/heads/master | 2023-02-27T00:22:18.291658 | 2021-01-26T18:12:23 | 2021-01-26T18:12:23 | 220,774,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,070 | #!/home/renato/Projetos/src/dtstore/venv/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2007-2009 Søren Roug, European Environment Agency
#
# This is free software. You may redistribute it under the terms
# of the Apache license and the GNU General Public License Version
# 2 or at your option any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from __future__ import print_function
import zipfile, sys, getopt, mimetypes
try:
from urllib2 import urlopen, quote, unquote
except ImportError:
from urllib.request import urlopen, quote, unquote
try:
from urlparse import urlunsplit, urlsplit
except ImportError:
from urllib.parse import urlunsplit, urlsplit
from odf.opendocument import load
from odf.draw import Image
if sys.version_info[0]==3: unicode=str
#sys.tracebacklimit = 0
# Variable to count the number of retrieval failures
failures = 0
# Set to one if quiet behaviour is wanted
quiet = 0
# If set will write every url to import
verbose = 0
# Dictionary with new pictures. Key is original file path
# Item is newfilename
newpictures = {}
doc = None
def importpicture(href):
""" Add the picture to the ZIP file
Returns the new path name to the file in the zip archive
If it is unable to import, then it returns the original href
Sideeffect: add line to manifest
"""
global doc, newpictures, failures, verbose
# Check that it is not already in the manifest
if href in doc.Pictures: return href
image = None
if verbose: print ("Importing", href, file=sys.stderr)
if href[:7] == "http://" or href[:8] == "https://" or href[:6] == "ftp://":
# There is a bug in urlopen: It can't open urls with non-ascii unicode
# characters. Convert to UTF-8 and then use percent encoding
try:
goodhref = href.encode('ascii')
except:
o = list(urlsplit(href))
o[2] = quote(o[2].encode('utf-8'))
goodhref = urlunsplit(o)
if goodhref in newpictures:
if verbose: print ("already imported", file=sys.stderr)
return newpictures[goodhref] # Already imported
try:
f = urlopen(goodhref.decode("utf-8"))
image = f.read()
headers = f.info()
f.close()
# Get the mimetype from the headerlines
c_t = headers['Content-Type'].split(';')[0].strip()
if c_t: mediatype = c_t.split(';')[0].strip()
if verbose: print ("OK", file=sys.stderr)
except:
failures += 1
if verbose: print ("failed", file=sys.stderr)
return href
# Remove query string
try: href= href[:href.rindex('?')]
except: pass
try:
lastslash = href[href.rindex('/'):]
ext = lastslash[lastslash.rindex('.'):]
except: ext = mimetypes.guess_extension(mediatype)
# Everything is a simple path.
else:
goodhref = href
if href[:3] == '../':
if directory is None:
goodhref = unquote(href[3:])
else:
goodhref = unquote(directory + href[2:])
if goodhref in newpictures:
if verbose: print ("already imported", file=sys.stderr)
return newpictures[goodhref] # Already imported
mediatype, encoding = mimetypes.guess_type(goodhref)
if mediatype is None:
mediatype = ''
try: ext = goodhref[goodhref.rindex('.'):]
except: ext=''
else:
ext = mimetypes.guess_extension(mediatype)
try:
image = file(goodhref).read()
if verbose: print ("OK", file=sys.stderr)
except:
failures += 1
if verbose: print ("failed", file=sys.stderr)
return href
# If we have a picture to import, the image variable contains it
# and manifestfn, ext and mediatype has a value
if image:
manifestfn = doc.addPictureFromString(image, unicode(mediatype))
newpictures[goodhref] = manifestfn
return manifestfn
if verbose: print ("not imported", file=sys.stderr)
return href
def exitwithusage(exitcode=2):
""" Print out usage information and exit """
print ("Usage: %s [-q] [-v] [-o output] [inputfile]" % sys.argv[0], file=sys.stderr)
print ("\tInputfile must be OpenDocument format", file=sys.stderr)
sys.exit(exitcode)
outputfile = None
writefile = True
try:
opts, args = getopt.getopt(sys.argv[1:], "qvo:")
except getopt.GetoptError:
exitwithusage()
for o, a in opts:
if o == "-o":
outputfile = a
writefile = True
if o == "-q":
quiet = 1
if o == "-v":
verbose = 1
if len(args) == 0:
try:
doc = load(sys.stdin)
directory = None
except:
print ("Couldn't open OpenDocument file", file=sys.stderr)
exitwithusage()
else:
fn = unicode(args[0])
if not zipfile.is_zipfile(fn):
exitwithusage()
dirinx = max(fn.rfind('\\'), fn.rfind('/'))
if dirinx >= 0: directory = fn[:dirinx]
else: directory = "."
doc = load(fn)
for image in doc.getElementsByType(Image):
href = image.getAttribute('href')
newhref = importpicture(href)
image.setAttribute('href',newhref)
if writefile:
if outputfile is None:
doc.save(fn)
else:
doc.save(unicode(outputfile))
if quiet == 0 and failures > 0:
print ("Couldn't import %d image(s)" % failures, file=sys.stderr)
sys.exit( int(failures > 0) )
# Local Variables: ***
# mode: python ***
# End: ***
| [
"rsdeus@gmail.com"
] | rsdeus@gmail.com | |
f5053d55e7dfd80861b7d268d991e8ad2d4ff27e | 780a18c55af7a8744b408e1efd4aaf08a0d3a3e7 | /passbook/api/__init__.py | d5deace33843d920f8be361ce93e1885635aed8c | [] | no_license | squidnee/passbook | 86507c6675122f1b67333f55048eb55f3dff664a | 551de76b95049185820a3fc8729fbc126c423994 | refs/heads/master | 2020-03-22T05:27:31.718897 | 2018-07-21T12:30:32 | 2018-07-21T12:30:32 | 139,567,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d | [
"smaples@stanford.edu"
] | smaples@stanford.edu |
b5df670ed20498c70ae17c07a2a477581d6430c3 | 5b354510e56b50c7526248136c54f0de86a69c24 | /mysite/settings.py | 87ca334c3fb203a9ca069d9abbe0558714f60016 | [] | no_license | powerfulsheron/GeepsLicenseCenter | 2d420f88ac219fbe3c271f4fff1b309594e996a2 | 6ffeda427f3012b298dc8067404a6d231e443188 | refs/heads/master | 2020-03-20T00:21:47.831301 | 2018-10-16T09:15:02 | 2018-10-16T09:15:02 | 137,040,989 | 0 | 1 | null | 2018-10-16T08:41:35 | 2018-06-12T08:29:38 | Python | UTF-8 | Python | false | false | 3,199 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i5+qr+ps@tln1qst@cuk-3y@=e^lfo9z55hv3ay$+6mq-^ft_n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'geeps',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
| [
"lorenzo.canavaggio@laposte.net"
] | lorenzo.canavaggio@laposte.net |
b15189617cbdd65ffa64ed54945ace70f2e754d1 | b517ddb3ddbf0cf016535f75f17c1a186fda19d4 | /inventory/admin.py | bfc64c330a91f4511eeb0dd58ef0b61bc5d32846 | [] | no_license | tsinghgill/FirstDjangoApp | ee06f5fef6af5e07f3c57829fd6fda3db40b864d | 9f456214ced678a4d121227035ac4ff2964385c0 | refs/heads/master | 2021-12-14T09:33:15.779691 | 2016-06-29T20:03:50 | 2016-06-29T20:03:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | from django.contrib import admin
from .models import Item
class ItemAdmin(admin.ModelAdmin):
list_display = ['title', 'amount']
admin.site.register(Item, ItemAdmin) | [
"tanveet.gill@gmail.com"
] | tanveet.gill@gmail.com |
6d0ff22e12319d91f5df4a4590db755faef894d5 | ef19ebc1cd0d673d0b69ea73feace437b1e28154 | /project/transactions/forms.py | ed957ec8a33351c5cdd49048ddc730e1577461f7 | [] | no_license | salmansajid/IMS | 2f6c2189287a14f8137a1ace64a06cccfb23cacc | 2883f841aee2bbb7c8a24e9bec4ce129ce7d17af | refs/heads/main | 2023-06-04T01:22:07.626112 | 2021-06-27T07:42:58 | 2021-06-27T07:42:58 | 380,503,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,984 | py | from django import forms
from django.forms import formset_factory
from .models import (
Supplier,
PurchaseBill,
PurchaseItem,
PurchaseBillDetails,
SaleBill,
SaleItem,
SaleBillDetails
)
from inventory.models import Stock
# form used to select a supplier
class SelectSupplierForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['supplier'].queryset = Supplier.objects.filter(is_deleted=False)
self.fields['supplier'].widget.attrs.update({'class': 'textinput form-control'})
class Meta:
model = PurchaseBill
fields = ['supplier']
# form used to render a single stock item form
class PurchaseItemForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['stock'].queryset = Stock.objects.filter(is_deleted=False)
self.fields['stock'].widget.attrs.update({'class': 'textinput form-control setprice stock', 'required': 'true'})
self.fields['quantity'].widget.attrs.update({'class': 'textinput form-control setprice quantity', 'min': '0', 'required': 'true'})
self.fields['perprice'].widget.attrs.update({'class': 'textinput form-control setprice price', 'min': '0', 'required': 'true'})
class Meta:
model = PurchaseItem
fields = ['stock', 'quantity', 'perprice']
# formset used to render multiple 'PurchaseItemForm'
PurchaseItemFormset = formset_factory(PurchaseItemForm, extra=1)
# form used to accept the other details for purchase bill
class PurchaseDetailsForm(forms.ModelForm):
class Meta:
model = PurchaseBillDetails
fields = ['eway','veh', 'destination', 'po', 'cgst', 'sgst', 'igst', 'cess', 'tcs', 'total']
# form used for supplier
class SupplierForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['name'].widget.attrs.update({'class': 'textinput form-control', 'pattern' : '[a-zA-Z\s]{1,50}', 'title' : 'Alphabets and Spaces only'})
self.fields['phone'].widget.attrs.update({'class': 'textinput form-control', 'maxlength': '10', 'pattern' : '[0-9]{10}', 'title' : 'Numbers only'})
self.fields['email'].widget.attrs.update({'class': 'textinput form-control'})
self.fields['gstin'].widget.attrs.update({'class': 'textinput form-control', 'maxlength': '13', 'pattern' : '[A-Z0-9]{13}', 'title' : 'CNIC Format Required'})
class Meta:
model = Supplier
fields = ['name', 'phone', 'address', 'email', 'gstin']
widgets = {
'address' : forms.Textarea(
attrs = {
'class' : 'textinput form-control',
'rows' : '4'
}
)
}
# form used to get customer details
class SaleForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['name'].widget.attrs.update({'class': 'textinput form-control', 'pattern' : '[a-zA-Z\s]{1,50}', 'title' : 'Alphabets and Spaces only', 'required': 'true'})
self.fields['phone'].widget.attrs.update({'class': 'textinput form-control', 'maxlength': '10', 'pattern' : '[0-9]{10}', 'title' : 'Numbers only', 'required': 'true'})
self.fields['email'].widget.attrs.update({'class': 'textinput form-control'})
self.fields['gstin'].widget.attrs.update({'class': 'textinput form-control', 'maxlength': '13', 'pattern' : '[A-Z0-9]{13}', 'title' : 'CNIC Format Required'})
class Meta:
model = SaleBill
fields = ['name', 'phone', 'address', 'email', 'gstin']
widgets = {
'address' : forms.Textarea(
attrs = {
'class' : 'textinput form-control',
'rows' : '4'
}
)
}
# form used to render a single stock item form
class SaleItemForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['stock'].queryset = Stock.objects.filter(is_deleted=False)
self.fields['stock'].widget.attrs.update({'class': 'textinput form-control setprice stock', 'required': 'true'})
self.fields['quantity'].widget.attrs.update({'class': 'textinput form-control setprice quantity', 'min': '0', 'required': 'true'})
self.fields['perprice'].widget.attrs.update({'class': 'textinput form-control setprice price', 'min': '0', 'required': 'true'})
class Meta:
model = SaleItem
fields = ['stock', 'quantity', 'perprice']
# formset used to render multiple 'SaleItemForm'
SaleItemFormset = formset_factory(SaleItemForm, extra=1)
# form used to accept the other details for sales bill
class SaleDetailsForm(forms.ModelForm):
class Meta:
model = SaleBillDetails
fields = ['eway','veh', 'destination', 'po', 'cgst', 'sgst', 'igst', 'cess', 'tcs', 'total']
| [
"salmansajid92@gmail.com"
] | salmansajid92@gmail.com |
1bb3336fbed092dabfeff60c76c8e38019e5566f | 3735dce3931b6159b8f83c91348ee52e1e80769a | /ensebmles/Agregation/agregation.py | a198586cc00c67b886d18feeafdd892a18a5eacc | [
"MIT"
] | permissive | GavrilovMike/EnsembleLearning | e1d5a47e7a5d6c0c30df6e475b3c1f35e31c3627 | 6badedf2b6e9f2d3b01c11246c32916864ad3848 | refs/heads/master | 2022-12-12T22:05:17.818325 | 2020-02-07T08:13:21 | 2020-02-07T08:13:21 | 238,803,548 | 0 | 0 | MIT | 2022-12-08T04:24:03 | 2020-02-06T23:10:22 | Python | UTF-8 | Python | false | false | 754 | py | import pickle
import numpy as np
print("\n 1st Q Table: \n")
with open("/Users/mgavrilov/Study/ENSEMBLEALGS/learn/main_QTable.pkl", 'rb') as f:
Q_table_1st = pickle.load(f)
print(Q_table_1st)
print("\n 2nd Q Table: \n")
with open("/Users/mgavrilov/Study/ENSEMBLEALGS/learn/rotated_QTable.pkl", 'rb') as f2:
Q_table_2nd = pickle.load(f2)
print(Q_table_2nd)
print(Q_table_1st.shape)
Q_table_agregated = np.zeros([76, 7])
print(Q_table_agregated)
print(Q_table_agregated)
for i in range (76):
for j in range (7):
Q_table_agregated[i][j] = (Q_table_1st[i][j] + Q_table_2nd[i][j]) / 2
print(Q_table_agregated)
print(Q_table_agregated.shape)
with open("Agregation.pkl", 'wb') as f3:
pickle.dump(Q_table_agregated, f3) | [
"mgavrilov@aquivalabs.com"
] | mgavrilov@aquivalabs.com |
12dcdd8aca64f39aa454f09779d895751d7b85a2 | 3dbc51d8928d90823ca507a550bb3ea3a9ac02f0 | /examples/titanic_keras_exp.py | f98db0280d6fff1700712566bcb19bb410a941a5 | [
"MIT"
] | permissive | SimonCarozza/autolrn | 869a3441267f8d89b7aca09b04c4a26d5d5646ae | d0875844a3e9b4fc22510ef320aa498e339b6192 | refs/heads/master | 2022-02-26T13:10:45.678162 | 2019-10-23T21:13:23 | 2019-10-23T21:13:23 | 189,698,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,211 | py | """Evaluate Keras Classifiers and learn from the Titanic data set."""
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.dummy import DummyClassifier
from pandas import read_csv
# from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
from keras.wrappers.scikit_learn import KerasClassifier
sys.stderr = stderr
from autolrn.classification import eval_utils as eu
from autolrn import auto_utils as au
from autolrn.classification import neuralnets as nn
from autolrn.classification import train_calibrate as tc
import autolrn.getargs as ga
from autolrn.classification.param_grids_distros import Keras_param_grid
from autolrn.classification.evaluate import create_keras_classifiers
from autolrn.classification.evaluate import create_best_keras_clf_architecture
from pkg_resources import resource_string
from io import StringIO
def select_cv_method():
is_valid = 0
choice = 0
while not is_valid:
try:
choice = int(input("Select cv method: [1] Classical CV, [2] Nested-CV?\n"))
if choice in (1, 2):
is_valid = 1
else:
print("Invalid number. Try again...")
except ValueError as e:
print("'%s' is not a valid integer." % e.args[0].split(": ")[1])
return choice
# starting program
if __name__ == '__main__':
print("### Probability Calibration Experiment -- CalibratedClassifierCV "
"with cv=cv (no prefit) ###")
print()
d_name = ga.get_name()
if d_name is None:
d_name = "titanic"
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
# load data
try:
df = read_csv(
'datasets\\titanic_train.csv', delimiter=",",
na_values={'Age': '', 'Cabin': '', 'Embarked': ''},
dtype={'Name': 'category', 'Sex': 'category',
'Ticket': 'category', 'Cabin': 'category',
'Embarked': 'category'})
print("Found data in autolrn\\autolrn\\datasets")
except FileNotFoundError as fe:
titanic_bytes = resource_string(
"autolrn", os.path.join("datasets", 'titanic_train.csv'))
titanic_file = StringIO(str(titanic_bytes,'utf-8'))
names = ['PassengerId','Survived','Pclass','Name','Sex','Age','SibSp',
'Parch','Ticket','Fare','Cabin','Embarked']
df = read_csv(
titanic_file, delimiter=",",
# header=0, names=names,
na_values={'Age': '', 'Cabin': '', 'Embarked': ''},
dtype={'Name': 'category', 'Sex': 'category',
'Ticket': 'category', 'Cabin': 'category',
'Embarked': 'category'})
except Exception as e:
raise e
# data exploration
print("shape: ", df.shape)
# statistical summary
description = df.describe()
print("description - no encoding:\n", description)
print()
plt.style.use('ggplot')
# input("Enter key to continue... \n")
# Feature-Feature Relationships
# scatter_matrix(df)
print()
# too many missing values in 'Cabin' columns: about 3/4
print("Dropping 'Cabin' column -- too many missing values")
# df.Cabin.replace(to_replace=np.nan, value='Unknown', inplace=True)
df.drop(['Cabin'], axis=1, inplace=True)
print()
print("Now, shape: ", df.shape)
print("df.head():\n", df.head())
print()
description = df.describe()
print("Once again, description - no encoding:\n", description)
print()
# input("Enter key to continue... \n")
target = 'Survived'
# feature preprocessing
sltt = eu.scoring_and_tt_split(df, target, 0.2, seed)
X_train, X_test, y_train, y_test = sltt['arrays']
scoring = sltt['scoring']
Y_type = sltt['target_type']
labels = sltt['labels']
print("scoring:", scoring)
print()
print("Classes:", labels)
print()
print("X_train shape: ", X_train.shape)
print("X_train -- first row:", X_train.values[0])
print("y_train shape: ", y_train.shape)
print()
print("X_test shape: ", X_test.shape)
print("X_test -- first row:", X_test.values[0])
print("y_test shape: ", y_test.shape)
print()
print("y_train:", y_train[:3])
# input("Enter key to continue... \n")
print()
auto_feat_eng_data = eu.auto_X_encoding(sltt, seed)
print()
encoding = auto_feat_eng_data['encoding']
scaler_tuple = auto_feat_eng_data['scaler']
featselector = auto_feat_eng_data['feat_selector']
steps = auto_feat_eng_data['steps']
X_train_transformed, y_train, X_test_transformed, y_test = auto_feat_eng_data['data_arrays']
X, y = auto_feat_eng_data['Xy']
train_index, test_index = auto_feat_eng_data['tt_index']
n_splits = au.select_nr_of_splits_for_kfold_cv()
# n_iter = au.select_nr_of_iterations()
print()
# This cross-validation object is a variation of KFold that returns stratified folds.
# The folds are made by preserving the percentage of samples for each class.
# uncomment to evaluate models != KerasClfs or GaussianNB w nested cv
# inner_cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
outer_cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
### reproducing the whole autolrn workflow
names = []
results = []
print("Metric:", scoring)
print("Calibration of untrained models -- CCCV 2nd")
print()
# Evaluation of best modelwith nested CV -- inner: RSCV
# dict of models and their associated parameters
# if it comes out that the best model is LogReg, no comparison is needed
# scoring == 'roc_auc' ==>
best_score = 0.5 # 0.0
best_score_dev = 0.1
best_cv_results = np.zeros(n_splits)
best_exec_time = 31536000 # one year in seconds
best_model = ('Random', None, None)
Dummy_scores = []
models_data = []
names = []
results = []
scores_of_best_model = (best_score, best_score_dev, best_cv_results,
best_exec_time, best_model)
# Start evaluation process
print()
print("=== [task] Evaluation of DummyClassifier")
print()
wtr = eu.calculate_sample_weight(y_train)
average_scores_and_best_scores = eu.single_classic_cv_evaluation(
X_train_transformed, y_train, 'DummyClf_2nd',
DummyClassifier(strategy='most_frequent'), wtr, scoring, outer_cv,
dict(), scores_of_best_model, results, names, seed)
scores_of_best_model = average_scores_and_best_scores[1]
Dummy_scores.append(scores_of_best_model[0]) # Dummy score -- ROC_AUC
Dummy_scores.append(scores_of_best_model[1]) # Dummy score std
Dummy_scores.append(scores_of_best_model[2]) # Dummy cv results
Dummy_scores.append(scores_of_best_model[3]) # Dummy execution time
# Dummy model's name and estimator
Dummy_scores.append(scores_of_best_model[4])
names = []
results = []
print()
complex_models_and_parameters = dict()
average_scores_across_outer_folds_complex = dict()
all_models_and_parameters = dict()
# Let's add some simple neural network
print("=== [task] Comparing DummyClassifier to best Keras Clf (NN)")
print()
# This is an experiment to check
# how different Keras architectures perform
# to avoid hard-coding NNs, you should determine at least
# nr of layers and nr of nodes by using Grid or Randomized Search CV
input_dim = int(X_train_transformed.shape[1])
output_dim = 1
nb_epoch = au.select_nr_of_iterations('nn')
# evaluate Keras clfs
cv_method = select_cv_method()
if cv_method == 1:
batch_size = 32
complex_models_and_parameters = create_keras_classifiers(
Y_type, input_dim, labels, nb_epoch, batch_size)
average_scores_and_best_scores = eu.classic_cv_model_evaluation(
X_train_transformed, y_train, complex_models_and_parameters, scoring,
outer_cv, average_scores_across_outer_folds_complex,
scores_of_best_model, results, names, seed)
else:
inner_cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
n_iter = au.select_nr_of_iterations()
keras_clf_name = "KerasClf_2nd"
keras_nn_model, keras_param_grid = create_best_keras_clf_architecture(
keras_clf_name, Y_type, labels, input_dim, nb_epoch, Keras_param_grid)
complex_models_and_parameters[keras_clf_name] = (
keras_nn_model, keras_param_grid)
average_scores_and_best_scores = eu.nested_rscv_model_evaluation(
X_train_transformed, y_train, complex_models_and_parameters,
scoring, n_iter, inner_cv, outer_cv,
average_scores_across_outer_folds_complex, scores_of_best_model,
results, names, seed)
print()
au.box_plots_of_models_performance(results, names)
cv_method_name = "Classic" if cv_method == 1 else "Nested"
print()
print("=== After %s CV evaluation of Keras NNs..." % cv_method_name)
print()
scores_of_best_model = average_scores_and_best_scores[1]
best_model_name = scores_of_best_model[4][0]
best_model_estim = scores_of_best_model[4][1]
best_score = scores_of_best_model[0]
best_score_dev = scores_of_best_model[1]
best_cv_results = scores_of_best_model[2]
# best_brier_score = scores_of_best_model[2]
best_exec_time = scores_of_best_model[3]
Dummy_score = Dummy_scores[0]
Dummy_score_dev = Dummy_scores[1]
Dummy_cv_results = Dummy_scores[2]
# Dummy_brier_score = Dummy_scores[3]
Dummy_exec_time = Dummy_scores[3]
print()
print("Currently, best model is '%s' with score '%s': %1.3f (%1.3f)... :" %
(best_model_name, scoring.strip('neg_'), best_score, best_score_dev))
if best_model_name in (
'baseline_nn_default_Clf_2nd', 'baseline_nn_smaller_Clf_2nd',
'larger_nn_Clf_2nd', 'deep_nn_Clf_2nd', 'deeper_nn_Clf_2nd',
'KerasClf_2nd'):
best_nn_build_fn = scores_of_best_model[4][2]
print("Best build function:", best_nn_build_fn)
print("... execution time: %.2fs" % best_exec_time)
# print("and prediction confidence: %1.3f" % best_brier_score)
print()
if best_model_name != 'DummyClf_2nd':
# It's assumed best model's performance is
# satistically better than that of DummyClf on this dataset
print("DummyClassifier's scores -- '%s': %1.3f (%1.3f)" % (
scoring.strip('neg_'), Dummy_score, Dummy_score_dev))
print("'%s' does better than DummyClassifier." % best_model_name)
if best_exec_time < Dummy_exec_time:
print("'%s' is quicker than DummyClf." % best_model_name)
print()
print()
# input("Press key to continue...")
preprocessing = (encoding, scaler_tuple, featselector)
if labels is not None:
print("You have labels:", labels)
all_models_and_parameters['labels'] = labels
print("Defined dictionary with models, parameters and related data.")
print()
if cv_method == 1:
tc.calibrate_best_model(
X, y, X_train_transformed, X_test_transformed,
y_train, y_test, auto_feat_eng_data['tt_index'],
preprocessing, scores_of_best_model,
all_models_and_parameters, n_splits, nb_epoch,
scoring, models_data, d_name, seed)
else:
tc.tune_calibrate_best_model(
X, y, X_train_transformed, X_test_transformed,
y_train, y_test, auto_feat_eng_data['tt_index'],
preprocessing, scores_of_best_model,
all_models_and_parameters, n_splits, n_iter, nb_epoch,
scoring, models_data, d_name, seed)
else:
sys.exit("Your best classifier is not a good classifier.")
input("=== [End Of Program] Enter key to continue... \n") | [
"simoncarozza@gmail.com"
] | simoncarozza@gmail.com |
06acec666b3d111f5a3ce4fd440aa9cb4b82dc57 | 66f47030d53b4338138dc3a0ef1c1f6369000cbd | /todo/test_forms.py | 0a24cd051ad14bdcaadede4721de79c62d25a8d4 | [] | no_license | Baynuts/django | 4c75a54413a8e43b3ba2bad18fe796487fffce18 | 3340ebdff6a94fb805c15b446965bde83956f1ee | refs/heads/master | 2021-06-20T21:10:49.927241 | 2019-12-01T15:52:41 | 2019-12-01T15:52:41 | 221,563,422 | 0 | 0 | null | 2021-06-10T22:20:15 | 2019-11-13T22:26:45 | HTML | UTF-8 | Python | false | false | 502 | py | from django.test import TestCase
from .forms import ItemForm
# Create your tests here.
class TestToDoItemForm(TestCase):
def test_can_create_an_item_with_just_a_name(self):
form = ItemForm({'name' : 'Create Tests'})
self.assertTrue(form.is_valid())
def test_correct_message_for_missing_name(self):
form = ItemForm({'name' : ''})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'], [u'This field is required.'] )
| [
"github@baileygb.com"
] | github@baileygb.com |
a3d39360dcbd2e1a093274c5cd218d6cce15b86a | 0d9b2842b799b2f27886c2a9904e5a9cc05634dc | /ermine/test/units.py | 1cfa27af28b64ddf3e4b4683be8bfc78c3c6febc | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | kitfactory/ermine | fbf544402b30bbae72fcc79ee4d66667b0166fa3 | d2095a52ac7e8feab0846d82077214433202805f | refs/heads/master | 2020-04-22T03:15:06.821078 | 2020-01-07T15:25:06 | 2020-01-07T15:25:06 | 170,079,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | from typing import List
from .. base import Bucket, OptionInfo, OptionDirection, ErmineUnit
class FooUnit(ErmineUnit):
def __init__(self):
super().__init__()
@staticmethod
def prepare_option_infos() -> List[OptionInfo]:
o = OptionInfo(
name='message_key',
direction=OptionDirection.OUTPUT,
values=['Message'])
g = OptionInfo(
name = 'task',
direction=OptionDirection.INPUT,
values=['$GLOBAL.Task$'])
return [o,g]
def run(self, bucket: Bucket):
print('foo unit options ', self.options)
bucket[self.options['message_key']] = 'HelloWorld'
print( 'option[task]' , self.options['task'])
bucket['task'] = self.options['task']
print("FooUnit.run()")
print('FooUnit.task',self.options['task'])
| [
"kitfactory@gmail.com"
] | kitfactory@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.