blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ab5f894430e4173d4f912b2ff27306986e39d566 | 146c71808bdd5fa458ef73df4a9b5837c83e779d | /tests/check_accuracy/check_accuracy_tests.py | 5bad11a87167ac8aab9336ec6f018f0846e9a884 | [
"MIT"
] | permissive | aladdinpersson/aladdin | 62cff7ed8c014db91505545986e17b85e1656f98 | 4fd92ff3b6e74761fff75b01070930c9ec6ce29f | refs/heads/main | 2023-04-15T14:41:27.236738 | 2021-04-15T10:39:11 | 2021-04-15T10:39:11 | 352,296,885 | 13 | 3 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | # Import folder where sorting algorithms
import sys
import unittest
import numpy as np
# For importing from different folders
# OBS: This is supposed to be done with automated testing,
# hence relative to folder we want to import from
sys.path.append("aladdin/")
# If run from local:
# sys.path.append('../../ML/algorithms/linearregression')
from check_accuracy import check_accuracy
class TestCheckAccuracy(unittest.TestCase):
def setUp(self):
pass
def test(self):
pass
if __name__ == "__main__":
print("Running Check Accuracy tests")
unittest.main()
| [
"aladdin.persson@hotmail.com"
] | aladdin.persson@hotmail.com |
63ccfbaa0b7c07a464cd8183391df273f7cbb8a8 | f30d84401a8ad53bb7ed2688a0ad600002b6312b | /inbuilt_module.py | 2009d561eb8f189c1253e99c006f7129d5a9b984 | [] | no_license | edmonddante125/python_class | 05b81551c6ebf7acc3bff0a9c25b492e6c2d761c | ad99f9a84b1d553dbaa785703a18221c65c20752 | refs/heads/master | 2022-04-23T20:55:22.057306 | 2020-04-19T05:47:57 | 2020-04-19T05:47:57 | 256,922,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 18 10:38:54 2020
@author: manish
"""
#webscraping
import urllib.request
urllib.request.urlopen("https://www.indeed.co.in/jobs?q=python+developer&1=Hyderabad%2C+Telangana")
print(data.read()) | [
"manishsaikris12@gmail.com"
] | manishsaikris12@gmail.com |
1b79a3126d4e62c9a1b815a8ccfb253d84753b69 | df2aec72123e616139ff5864e50bd9a6bc8b64ac | /proyecto/views.py | ca3c33da2fa297919c2bc757b0bde76b1c629cc0 | [] | no_license | rbalda/induccion2017 | fbbd726b401082c6cdce0675a6ab6a3e0c90d073 | 08cba952da3c44f409c7c426c365ccfb8509aae2 | refs/heads/master | 2021-01-19T04:44:44.452592 | 2017-04-05T17:36:13 | 2017-04-05T17:36:13 | 87,392,098 | 0 | 0 | null | 2017-04-06T06:03:59 | 2017-04-06T06:03:59 | null | UTF-8 | Python | false | false | 2,764 | py | from proyecto.serializers import UserSerializer,ProfileSerializer,AcertijoSerializer,TesoroSerializer,MedallaSerializer,FacultadSerializer,OpcionSerializer
from proyecto.forms import UserForm,ProfileForm,AcertijoForm,TesoroForm,MedallaForm,FacultadForm,OpcionForm
from proyecto.models import User,Profile,Acertijo,Tesoro,Medalla,Facultad,Opcion
from django.views.decorators.csrf import csrf_exempt
from django.template.context_processors import csrf
from django.http import HttpResponse, JsonResponse
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from rest_framework import viewsets
# Create your views here.
class AcertijoViewSet(viewsets.ModelViewSet):
queryset = Acertijo.objects.all()
serializer_class = AcertijoSerializer
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
class TesoroViewSet(viewsets.ModelViewSet):
queryset = Tesoro.objects.all()
serializer_class = TesoroSerializer
class MedallaViewSet(viewsets.ModelViewSet):
queryset = Medalla.objects.all()
serializer_class = MedallaSerializer
class OpcionViewSet(viewsets.ModelViewSet):
queryset = Opcion.objects.all()
serializer_class = OpcionSerializer
class ProfileViewSet(viewsets.ModelViewSet):
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
"""def savePost(request):
user_form = UserForm()
profile_form = ProfileForm()
acertijo_form = AcertijoForm()
facultad_form = FacultadForm()
medalla_form = MedallaForm()
tesoro_form = TesoroForm()
opcion_form = OpcionForm()
if request.method == 'POST':
user_form = UserForm(request.POST,prefix="user")
profile_form = ProfileForm(request.POST, prefix = "profile")
acertijo_form = AcertijoForm(request.POST, prefix = "acertijo")
facultad_form = FacultadForm(request.POST, prefix = "facultad")
medalla_form = MedallaForm(request.POST, prefix = "medalla")
tesoro_form = TesoroForm(request.POST, prefix = "tesoro")
opcion_form = OpcionForm(request.POST, prefix = "opcion")
if profile_form.is_valid() and acertijo_form.is_valid() and facultad_form.is_valid() and medalla_form.is_valid() and tesoro_form.is_valid() and opcion_form.is_valid():
profile_form.save()
acertijo_form.save()
facultad_form.save()
medalla_form.cleaned_data["medalla"]
medalla_form.save()
tesoro_form.cleaned_data["tesoro"]
tesoro_form.save()
opcion_form.save()
else:
return HttpResponse(status=500)
return HttpResponse(status=200)
else:
return HttpResponse(status=400)"""
| [
"ngalvare@hotmail.com"
] | ngalvare@hotmail.com |
fe6df273d0824aeb08610dde5812f46f73da6587 | 17cb31350a9d0996e19dd111fc31980df03f82bf | /strawberryfields/devicespecs/device_specs.py | a32f9ef474ec3c53c88b0f76495cc55a33f98019 | [
"Apache-2.0"
] | permissive | zeta1999/strawberryfields | 3eee705b711bd195cc6f1510461d75f6e7d9821b | 1bf05585be3553a7bb5c2f687dc45b7a064ddb17 | refs/heads/master | 2020-06-09T02:56:19.840324 | 2019-06-21T16:50:59 | 2019-06-21T16:50:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,785 | py | # Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract base class for storing device data for validation"""
from typing import List, Set, Dict, Union
import abc
import blackbird
from blackbird.utils import to_DiGraph
class DeviceSpecs(abc.ABC):
"""Abstract base class for backend data"""
@property
@abc.abstractmethod
def modes(self) -> Union[int, None]:
"""The supported number of modes of the device.
If the device supports arbitrary number of modes, set this to 0.
Returns:
int: number of supported modes
"""
@property
@abc.abstractmethod
def local(self) -> bool:
"""Whether the backend supports local execution.
Returns:
bool: ``True`` if the backend supports local execution
"""
@property
@abc.abstractmethod
def remote(self) -> bool:
"""Whether the backend supports remote execution.
Returns:
bool: ``True`` if the backend supports remote execution
"""
@property
@abc.abstractmethod
def interactive(self) -> bool:
"""Whether the backend can be used interactively, that is,
the backend state is not reset between engine executions.
Returns:
bool: ``True`` if the backend supports interactive use
"""
@property
@abc.abstractmethod
def primitives(self) -> Set[str]:
"""The primitive set of quantum operations directly supported
by the backend.
Returns:
set[str]: the quantum primitives the backend supports
"""
@property
@abc.abstractmethod
def decompositions(self) -> Dict[str, Dict]:
"""Quantum operations that are not quantum primitives for the
backend, but are supported via specified decompositions.
This should be of the form
.. code-block:: python
{'operation_name': {'option1': val, 'option2': val,...}}
For each operation specified in the dictionary, the
:meth:`~Operation.decompose` method will be called during
:class:`Program` compilation, with keyword arguments
given by the dictionary value.
Returns:
dict[str, dict]: the quantum operations that are supported
by the backend via decomposition
"""
@property
def parameter_ranges(self) -> Dict[str, List[List[float]]]:
"""Allowed parameter ranges for supported quantum operations.
This property is optional.
Returns:
dict[str, list]: a dictionary mapping an allowed quantum operation
to a nested list of the form ``[[p0_min, p0_max], [p1_min, p0_max], ...]``.
where ``pi`` corresponds to the ``i`` th gate parameter.
"""
return dict()
@property
def graph(self):
"""The allowed circuit topology of the backend device as a directed
acyclic graph.
This property is optional; if arbitrary topologies are allowed by the device,
this will simply return ``None``.
Returns:
networkx.DiGraph: a directed acyclic graph
"""
if self.circuit is None:
return None
# returned DAG has all parameters set to 0
bb = blackbird.loads(self.circuit)
if bb.is_template():
params = bb.parameters
kwargs = {p: 0 for p in params}
# initialize the topology with all template
# parameters set to zero
topology = to_DiGraph(bb(**kwargs))
else:
topology = to_DiGraph(bb)
return topology
@property
def circuit(self):
"""The Blackbird circuit that will be accepted by the backend device.
This property is optional. If arbitrary topologies are allowed by the device,
**do not define this property**. In such a case, it will simply return ``None``.
If the device expects a specific template for the recieved Blackbird
script, this method will return the serialized Blackbird circuit in string
form.
Returns:
Union[str, None]: Blackbird program or template representing the circuit
"""
return None
| [
"noreply@github.com"
] | noreply@github.com |
af01f88b07e52b445a67db5324834fedfdd90840 | 0ae30f2f0fa9ac6df3ad3d2eafee0a3c5e83445b | /rllab/envs/learning_base.py | 9e46b0d2dcc2bbbcad9c1155f8e104e2c08c899a | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | richa-verma/ChaserInvader | 114c1bc7e40a36358bf434aa9057c0945c176e89 | e6f63b20a13a264c3cb15c9633304f7677264dc7 | refs/heads/master | 2021-05-01T06:25:43.438745 | 2018-03-04T08:05:26 | 2018-03-04T08:05:26 | 121,142,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,392 | py | import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from rllab.spaces import Box
from rllab.envs.base import Env
from rllab.envs.base import Step
from rllab.spaces import Box
from rllab.misc.overrides import overrides
from rllab.misc import logger
#continous mountain car
#author: richa
class ChaserInvaderEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self):
self.viewer = None
self.reward = 0.0
self.L = 2.0
self.prev_reward = 0.0
self.phi_max = math.pi/2
self.rho_min = self.L/math.tan(self.phi_max)
self.min_position = 0.0
self.max_position = 30.0
self.vel_g = 1.2
self.vel_b = 1.0
self.vel_i = 1.0
self.min_action_vel = -1.0
self.max_action_vel = 1.2
self.thresh_distance = 1.0 #difference between distances at which invader is caught
self.thresh_distance_gi = 0.25 #distance between invader and guard when invader is considered caught
self.thresh_distance_ib = 0.75 #distance between invader and base station when the invader has reached the base station
#state: guard stats, invader stats, base station stats
self.low_state = np.array([self.min_position,self.min_position, -self.vel_g, -self.phi_max,self.min_position,self.min_position, -self.vel_i, -self.phi_max,self.min_position,self.min_position, -self.vel_b, -self.phi_max])
self.high_state = np.array([self.max_position,self.max_position,self.vel_g, self.phi_max,self.max_position,self.max_position,self.vel_i, self.phi_max,self.max_position,self.max_position,self.vel_b, -self.phi_max])
self.observation_space = Box(low=self.low_state, high=self.high_state)
self.action_space = Box(np.array([self.min_action_vel,-self.phi_max,self.min_action_vel,-self.phi_max]), np.array([self.vel_g,+self.phi_max,self.vel_b,+self.phi_max])) # speed, angle
self.seed()
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
position_g = [self.state[0],self.state[1]] #guard
velocity_g = self.state[2]
phi_g = self.state[3]
position_i = [self.state[4],self.state[5]] #invader
velocity_i = self.state[6]
phi_i = self.state[7]
position_b = [self.state[8],self.state[9]] #base station
velocity_b = self.state[10]
phi_b = self.state[11]
action_vel_g = action[0] #velocity from action
action_phi_g = action[1] #phi from action
action_vel_b = action[2]
action_phi_b = action[3]
d3 = math.sqrt((position_g[1]-position_i[1])**2+(position_g[0]-position_i[0])**2) #d3 = position_g - position_i
d1 = math.sqrt((position_b[1]-position_i[1])**2+(position_b[0]-position_i[0])**2) #d1 = position_b - position_i
done = 0
reward = 0.0
info = []
if((d3-d1)>0 and (d3-d1)<self.thresh_distance):
reward = 10.0
done = True
info = ["invader caught"]
elif((d1-d3)>0 and (d1-d3)<self.thresh_distance):
reward = -10.0
done = True
info = ["invader won"]
elif(d3<self.thresh_distance_gi):
reward = 10.0
done = True
info = ["invader caught"]
elif(d1<self.thresh_distance_ib):
reward = -10.0
done = True
info = ["invader won"]
else:
if action_vel_g > self.max_action_vel:
action_vel_g = self.max_action_vel
if action_vel_g < self.min_action_vel:
action_vel_g = self.min_action_vel
if action_vel_b > self.max_action_vel:
action_vel_b = self.max_action_vel
if action_vel_b < self.min_action_vel:
action_vel_b = self.min_action_vel
#compute position of guard
theta = (action_vel_g/self.L)*math.tan(action_phi_g)
x_g = position_g[0] + action_vel_g*math.cos(theta)
y_g = position_g[1] + action_vel_g*math.sin(theta)
if x_g < self.min_position:
x_g = 0.0
if x_g > self.max_position:
x_g = 30.0
if y_g < self.min_position:
y_g = 0.0
if y_g > self.max_position:
y_g = 30.0
#compute position of base if its velocity is not 0
if(action_vel_b != 0):
theta = (action_vel_b/self.L)*math.atan(action_phi_b)
x_b = position_b[0] + action_vel_b*math.cos(theta)
y_b = position_b[1] + action_vel_b*math.sin(theta)
if x_b < self.min_position:
x_b = 0.0
if x_b > self.max_position:
x_b = 30.0
if y_b < self.min_position:
y_b = 0.0
if y_b > self.max_position:
y_b = 30.0
reward = reward - 2
#computing invader's position(invader will move on straight line between base and invader)
t_theta = ((self.state[9]-self.state[5])/(self.state[8]-self.state[4]))
self.state[7] = math.atan(t_theta)
x_i = self.state[4] + self.state[6]*math.cos(self.state[7])
y_i = self.state[5] + self.state[6]*math.sin(self.state[7])
if x_i < self.min_position:
x_i = 0.0
if x_i > self.max_position:
x_i = 30.0
if y_i < self.min_position:
y_i = 0.0
if y_i > self.max_position:
y_i = 30.0
info = ["still working"]
self.state = np.array([x_g,y_g, action_vel_g, action_phi_g, x_i,y_i, self.state[6], self.state[7], x_b,y_b,action_vel_b,action_phi_b]) #keep velocity of invader and base station constant
return Step(observation=self.state, reward=reward, done=done,info=info)
def reset(self):
self.state = np.array([20,0, 1.2, 0, 0,0, 1.0, float(0.0), 10,0,0.0,0]) #acc to constraint d3 <= d1
return np.array(self.state)
@overrides
def log_diagnostics(self, paths):
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular('AverageForwardProgress', np.mean(progs))
logger.record_tabular('MaxForwardProgress', np.max(progs))
logger.record_tabular('MinForwardProgress', np.min(progs))
logger.record_tabular('StdForwardProgress', np.std(progs))
def render(self, mode='human'):
screen_width = 600
screen_height = 400
world_width = self.max_position - self.min_position
scale = screen_width/world_width
cartwidth=40
cartheight=20
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
l,r,t,b = -cartwidth/2, cartwidth/2, cartheight/2, -cartheight/2
guard = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
self.guardtrans = rendering.Transform()
guard.add_attr(self.guardtrans)
self.viewer.add_geom(guard)
invader = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
self.invadertrans = rendering.Transform()
invader.add_attr(self.invadertrans)
self.viewer.add_geom(invader)
base = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
self.basetrans = rendering.Transform()
base.add_attr(self.basetrans)
self.viewer.add_geom(base)
if self.state is None: return None
x = self.state
guardx = x[0]*scale+screen_width/2.0
guardy = x[1]*scale+screen_height/2.0
invaderx = x[4]*scale+screen_width/2.0
invadery = x[5]*scale+screen_height/2.0
basex = x[8]*scale+screen_width/2.0
basey = x[9]*scale+screen_width/2.0
return self.viewer.render(return_rgb_array = mode=='rgb_array')
def close(self):
if self.viewer: self.viewer.close() | [
"sagar15056@iiitd.ac.in"
] | sagar15056@iiitd.ac.in |
7f7a6e5e0d461fb8690c5fcb3502db66bced6184 | 30fe7671b60825a909428a30e3793bdf16eaaf29 | /.metadata/.plugins/org.eclipse.core.resources/.history/ba/b0bdbe08c6f800161174a93fd5908e78 | d28f1b3c11c1df4b08726984489283823c27df6f | [] | no_license | abigdream84/PythonStudy | 0fc7a3b6b4a03a293b850d0ed12d5472483c4fb1 | 059274d3ba6f34b62ff111cda3fb263bd6ca8bcb | refs/heads/master | 2021-01-13T04:42:04.306730 | 2017-03-03T14:54:16 | 2017-03-03T14:54:16 | 79,123,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | #!/usr/bin/env python
#coding:UTF-8
from audit_demo.utility.MySqlHelper import MySqlHelper
class g_table(object):
| [
"abigdream@hotmail.com"
] | abigdream@hotmail.com | |
2b15d4edbdaa9bd7a46091e71cc25e06ad6b5669 | 94ec31691a32b78953bc57144079cd7527240c00 | /scripts/tools/format_jsonnet.py | ddf4760342d0ae4d3c690162d12282d3f11499aa | [
"Apache-2.0"
] | permissive | realmar/Jobbernetes | c543b0a89db456997632a85b7b836fa79b1a9aeb | 1989d8f24beabb6d8a0e482bb1812db8a6015458 | refs/heads/main | 2023-06-19T03:20:05.747892 | 2021-07-14T13:41:24 | 2021-07-14T13:41:24 | 356,053,263 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | #!/usr/bin/env python3
import __init__
from itertools import chain
from pathlib import Path
from lib import run_shell_print, SPECS_DIR, K3D_VOLUMES_PATH
if __name__ == "__main__":
command = "jsonnetfmt --indent 2 --max-blank-lines 2 --sort-imports --string-style s --comment-style s -i {}"
for file in chain(Path(SPECS_DIR).rglob('*.jsonnet'), Path(SPECS_DIR).rglob('*.libsonnet'), [Path(K3D_VOLUMES_PATH)]):
strfile = str(file.absolute())
if "vendor" not in strfile:
run_shell_print(command.format(strfile))
| [
"anastassios.martakos@outlook.com"
] | anastassios.martakos@outlook.com |
1cbeaf068eba123dc4966e2c3b506aa29148b80b | 3ae62276c9aad8b9612d3073679b5cf3cb695e38 | /easyleetcode/leetcodes/Leetcode_105_Construct_Binary_Tree_from_Preorder_and_Inorder_Traversal.py | 1485bb44b74ef3a8f62d1d7d1e19faff930fb29d | [
"Apache-2.0"
] | permissive | gongtian1234/easy_leetcode | bc0b33c3c4f61d58a6111d76707903efe0510cb4 | d2b8eb5d2cafc71ee1ca633ce489c1a52bcc39ce | refs/heads/master | 2022-11-16T17:48:33.596752 | 2020-07-13T02:55:03 | 2020-07-13T02:55:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,921 | py |
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def buildTree(self, preorder, inorder):
n = len(inorder)
inOrderMap = {inorder[i]: i for i in range(n)}
return self.buildTreeUtil(preorder, inorder, inOrderMap, 0, n - 1, 0, n - 1)
def buildTreeUtil(self, preorder, inorder, inOrderMap, pStart, pEnd, iStart, iEnd):
if pStart > pEnd or iStart > iEnd:
return None
# 根节点,永远是先序遍历第一个点
root = TreeNode(preorder[pStart])
# 根节点索引,根据它,找到中序遍历中根节点位置
rootIdx = inOrderMap[root.val]
# 根节点左边 // rootIdx - iStart 得到左边节点数
# 先序遍历(pStart + 1, pStart + rootIdx - iStart)(左边新起点,左边新终点(左边新起点+左边节点数))
root.left = self.buildTreeUtil(preorder, inorder, inOrderMap, pStart + 1, pStart + rootIdx - iStart, iStart,
rootIdx - 1)
# 根节点右边
# 先序遍历(pStart + rootIdx - iStart+1)(左边新终点(左边新起点+左边节点数)的后一个数 (右边起点))
root.right = self.buildTreeUtil(preorder, inorder, inOrderMap, pStart + rootIdx - iStart + 1, pEnd, rootIdx + 1,
iEnd)
return root
def buildTree2(self, preorder, inorder):
if not preorder:
return None
# preorder:根左右
# inorder:左根右
x = preorder.pop(0)
node = TreeNode(x)
i = inorder.index(x)
# preorder.pop(0) ,此时preorder只剩 左右,:i是左部分
node.left = self.buildTree2(preorder[:i], inorder[:i])
node.right = self.buildTree2(preorder[i:], inorder[i + 1:])
return node
| [
"425776024@qq.com"
] | 425776024@qq.com |
90904f213074558cd90e413783c1a851ce07f3da | 55550afe1c18aacba9a481c690755cb7395d35f1 | /Week_01/G20190343020019/LeetCode_26_0019.py | 84af779cd76ff44c31d90633db3c8cc0cfbca318 | [] | no_license | algorithm005-class02/algorithm005-class02 | eb5c0865fbb2c58362fddcd4fc8f8b9d02bb208c | 1a1abf5aabdd23755769efaa6c33579bc5b0917b | refs/heads/master | 2020-09-22T11:48:20.613692 | 2020-03-02T05:31:11 | 2020-03-02T05:31:11 | 225,177,649 | 45 | 153 | null | 2020-03-02T05:31:13 | 2019-12-01T14:47:06 | Java | UTF-8 | Python | false | false | 366 | py | class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
size = len(nums)
if size == 0:
return 0
j, pre = 1, nums[0]
for i in range(1, size):
if nums[i] != pre:
if i != j:
nums[j] = nums[i]
j += 1
pre = nums[i]
return j | [
"your@email.com"
] | your@email.com |
0f03a302c230541b088a7d1a1fe72c11c2e23cb3 | 473035074bd546694d5e3dbe6decb900ba79e034 | /traffic fluid simulator/backend/env_4_6/model/ExportData.py | e92e4e0140bedbeb4290ef2eb08d29b3a966c9a7 | [] | no_license | johny1614/magazyn | 35424203036191fb255c410412c195c8f41f0ba5 | a170fea3aceb20f59716a7b5088ccdcb6eea472f | refs/heads/master | 2022-03-26T01:10:04.472374 | 2019-09-19T16:34:22 | 2019-09-19T16:34:22 | 171,033,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | import json
from typing import List
import attr
from numpy.core.multiarray import ndarray
from model.Net import Net
@attr.s(auto_attribs=True)
class ExportData:
learningMethod: str
learningEpochs: int
nets: List[Net]
netName: str
densityName: str
def __attrs_post_init__(self):
for net_index in range(len(self.nets)):
if isinstance(self.nets[net_index].densities, ndarray):
self.nets[net_index].densities = self.nets[net_index].densities.tolist()
if isinstance(self.nets[net_index].lights, ndarray):
self.nets[net_index].lights = self.nets[net_index].lights.tolist()
def saveToJson(self):
# self.shift_lights()
dicSelf = attr.asdict(self)
try:
jsonData = json.dumps(dicSelf)
outfile = open('../../front/src/assets/densities/' + self.netName + '_' + self.densityName + '.json', 'w')
except:
outfile = open('../../../front/src/assets/densities/' + self.netName + '_' + self.densityName + '.json',
'w')
outfile.write(str(jsonData))
outfile.close()
def shift(lista, n):
return lista[n:] + lista[:n]
| [
"johny1614@gmail.com"
] | johny1614@gmail.com |
2511436d300089fc345156b39b9a4e78d798bb77 | 82243e81094429a3b4d0cbaa1179c2b9ce7eb8a6 | /basic_method.py | 877ce768a02e29e797929acbfd645f8987becece | [] | no_license | drake-young/Space_Removal_and_Replacement | 741fd0c9ca2da9bcd077f040dca937f192e21866 | 3b4e51d31967bcedb5d14985e4f079e4596d4cad | refs/heads/master | 2020-05-18T05:04:30.760047 | 2019-04-30T14:09:28 | 2019-04-30T14:09:28 | 184,193,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,616 | py | # ==================================================
# basic_method.py
# ==================================================
#
# Program: Space Removal and Replacement
# Author: Drake Young
# Date: 27 April 2018
#
# Program Description:
# The intent of this program is a simple
# proof-of-concept. The program begins with a
# given piece of text. Then the program performs
# two general operations
# 1. Remove all the spaces (' ') from the text
# 2. Put all the spaces (' ') back into the text
#
# File Description:
# This file (basic_method.py) is meant to highlight
# a basic method for doing this. It records the
# indices that spaces (' ') appear in the original
# text, then removes/restores spaces at those indices.
#
# This method is considered "basic" compared to the
# other methods implemented, which use various forms
# of genetic/learning algorithms to "learn" what it
# is supposed to be doing (see those files for more
# information)
#
# ==================================================
# ==================================================
# IMPORTS
# ==================================================
#
# * timeit: default_timer is used for testing
# performance
#
# ==================================================
from timeit import default_timer # used to time performance
# ==================================================
# FUNCTION: gather_text_from_file
# ==================================================
#
# Input: Name of file (or defaults to 'text.txt'
#
# Output: String containing the contents of the file
#
# Task: Open the file, read its contents into a
# string, close the file.
#
# ==================================================
def gather_text_from_file( file='text.txt' ):
f = open( file , 'r' ) # open file for reading
text = f.read( ) # read the file
f.close( ) # be a good program -- close the file
return text
# ==================================================
# FUNCTION: get_space_indices
# ==================================================
#
# Input: String to operate on
#
# Output: list of indices of the given string that
# contain the space ' ' character
#
# Task: Use a generator to create a list of indices
# on a condition that the index contains a space
# in the original string
#
# ==================================================
def get_space_indices( text ):
return set(i for i in range( len( text ) ) if text[ i ] is ' ' )
# ==================================================
# FUNCTION: remove_spaces
# ==================================================
#
# Input: String to operate on.
#
# Output: string with all the given indices removed
#
# Task: iterate backwards over the given indices, and
# "slice out" each of these indices
#
# ==================================================
def remove_spaces( text , indices ):
return ''.join(text[i] for i in range(len(text)) if i not in indices)
# ==================================================
# FUNCTION: restore_spaces
# ==================================================
#
# Input: String to operate on.
#
# Output: string with spaces inserted at each of the
# specified indices (index represents position
# of the string post-insertion)
#
# Task: Iterate through the given indices and
# "slice in" a space ' ' character to the
# given text
#
# ==================================================
def restore_spaces( text , indices ):
result = text
for i in indices:
result = result[ : i ] + ' ' + result[ i : ]
return result
# ==================================================
# FUNCTION: print_pretty
# ==================================================
#
# Input:
# - label: string to be used as output
# label
# - text: string used as the body of
# output
# - divider_char: character used as divider
# in output
# - divider_len: number of divider characters
# to be printed each time
#
# Output: prints the specified content to the
# console with formatting
#
# Task: format the output to the console window
#
# ==================================================
def print_pretty(label='RESULT', text='', divider_char='-', divider_len=50):
print( '' ) # empty line
print( divider_char * divider_len ) # line of dividers
print( label ) # label
print( divider_char * divider_len ) # line of dividers
print( text ) # body of output
print( divider_char * divider_len ) # line of dividers
print( '' ) # empty line
return
# ==================================================
# FUNCTION: main
# ==================================================
#
# Input: N/A
#
# Output: N/A
#
# Task: Act as the main driver of the program,
# calling function calls appropriately to
# showcase the problem an solution
#
# ==================================================
def main():
# Time to Gather Data
start = default_timer( )
original_string = gather_text_from_file( 'large.txt' )
end = default_timer( )
print( 'Reading File: %.3f ms' % ( ( end - start ) * 1000 ) )
# Time to Gather Space Indices
start = default_timer( )
space_indices = get_space_indices( original_string )
end = default_timer( )
print( 'Gathering Indices: %.3f ms' % ( ( end - start ) * 1000 ) )
# Time to Remove Spaces
start = default_timer( )
spaces_removed = remove_spaces( original_string , space_indices )
end = default_timer( )
print( 'Removing Spaces: %.3f ms' % ( ( end - start ) * 1000 ) )
# Time to Restore the Spaces
start = default_timer( )
spaces_restored = restore_spaces( spaces_removed , space_indices )
end = default_timer( )
print( 'Restoring Spaces: %.3f ms' % ( ( end - start ) * 1000 ) )
# Print the Results "Pretty"
print_pretty( 'Original Text:' , original_string )
print_pretty( 'Indices of Spaces:' , space_indices )
print_pretty( 'Spaces Removed:' , spaces_removed )
print_pretty( 'Spaces Replaced:' , spaces_restored )
return
# Only perform program operations if this file if it's the main file
if __name__ == '__main__':
main()
| [
"36168851+drake-young@users.noreply.github.com"
] | 36168851+drake-young@users.noreply.github.com |
a8ca657e20b911bfc85568a3275b444b0cb82e97 | 8d3855d12eca0d962dae9c4b9d57511b0a23d164 | /fundamentals/forLoopBasicII.py | b86842e4936d662235bb906f9d99a4f31ebe0bce | [] | no_license | mannyinwang/Pyhton-Stack | 75ef5d7a76143edc79c6dada2c8cad6898a1931e | 0e3500d93f5e8db8201a54617803995943e98daa | refs/heads/master | 2020-12-13T13:23:48.669837 | 2020-01-21T09:34:58 | 2020-01-21T09:34:58 | 234,430,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,822 | py | # Biggie Size
def bigSize (a):
list_length = len(a)
for i in range(list_length):
if a[i] > 0:
a[i] = "big"
return a
print(bigSize([-1,3,5,-5]))
# Count Positives
def countPositive(a):
list_length = len(a)
count = 0
for i in range(list_length):
if a[i] >= 0:
count = count + 1
a[list_length -1] = count
return a
print(countPositive([-1,1,1,1]))
# Sum Total
def sumTotal(a):
sum = 0
for i in range(len(a)):
sum = sum + a[i]
return sum
print(sum([1,2,3,4]))
# Average
def averageNumber (a):
sum = 0
avg = 0
for i in range(len(a)):
sum = sum + a[i]
avg = sum/len(a)
return avg
print(averageNumber([1,2,3,4]))
# Length
def lengthList(a):
count = 0
for i in a:
count = count + 1
return count
print(lengthList([]))
# Minimum
def mini(a):
min = a[0]
for i in range(len(a)):
if a[i] < min:
min = a[i]
return min
print(mini([5,2,6,3,4]))
# Maximum
def maxi(a):
max = a[0]
for i in range(len(a)):
if a[i] > max:
max = a[i]
return max
print(maxi([5,2,6,3,4]))
# Ultimate Analysis
def ultimateAnalysis (a):
min = a[0]
max = a[0]
sum = 0
avg = 0
newBook = {}
for i in range(len(a)):
if a[i] > max:
max = a[i]
if a[i] < min:
min = a[i]
sum = sum + a[i]
avg = sum /len(a)
newBook = {"sumTotal":sum, "Average":avg, "Maximum":max, "Minimum":min, "Length":len(a)}
return newBook
print(ultimateAnalysis([37,2,1,-9]))
# Reverse list
def reverseList(a):
b = len(a)
temp = a[0]
for i in range(round(b/2)):
a[i] = a[len(a)-1-i]
a[len(a)-1-i] = temp
temp = a[i+1]
return a
print(reverseList([1,2,3,4,5,6,7]))
| [
"mannyinwang@gmail.com"
] | mannyinwang@gmail.com |
a2b5b6e5312b7c6de352f327289016ff2e14e11b | 71a4fe1659534092e86f19d2cd645ffa7a2dd06c | /week1b/pe1.py | f57f737502924e3e3a6f9bf71dbecd07da88f733 | [] | no_license | foenyth/ca_aitipip | e04d7bf37596f0899dee3fae23d71fcb8367829c | 49b76ca897315ff01c1b350b8604fe7c5fcb5590 | refs/heads/master | 2021-01-15T15:30:59.159198 | 2016-08-27T09:11:29 | 2016-08-27T09:11:29 | 65,615,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | # Compute whether an integer is even.
###################################################
# Is even formula
# Student should enter function on the next lines.
def is_even(number):
return (number % 2) == 0
###################################################
# Tests
# Student should not change this code.
def test(number):
"""Tests the is_even function."""
if is_even(number):
print number, "is even."
else:
print number, "is odd."
test(8)
test(3)
test(12)
###################################################
# Expected output
# Student should look at the following comments and compare to printed output.
#8 is even.
#3 is odd.
#12 is even.
| [
"foenyth@gmail.com"
] | foenyth@gmail.com |
889178905a0c94d6f492f3c62559edfd6bc207fe | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_quill.py | 606bcee98c72edaf39b621ab0b0cf03cce527925 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py |
#calss header
class _QUILL():
def __init__(self,):
self.name = "QUILL"
self.definitions = [u'any of the long sharp pointed hairs on the body of a porcupine', u"a pen made from a bird's feather, used in the past"]
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
b3b8f445150b5fb6c1dccd539c37ffd21cfbe811 | 8104792f0f43c89169733846ffc6fe3d374fedd8 | /data_management/delete_junk.py | ee89547d25fda1d452409565afe6a04ed2619bbc | [] | no_license | rgalvanmadrid/general | fd3e0562070b3ac24a553edba834d65c7dc041dc | 3540e38ce8421e6da12b2b9c3cbb35057fcc27b5 | refs/heads/master | 2022-06-14T07:30:09.978123 | 2022-05-13T15:29:59 | 2022-05-13T15:29:59 | 142,596,766 | 0 | 0 | null | 2019-04-22T16:18:43 | 2018-07-27T15:52:08 | Shell | UTF-8 | Python | false | false | 869 | py | '''
Script to delete "junk" files, or large files that will not be used anytime soon even if they are not junk.
'''
import glob
import os
import shutil
root_path = "/lustre/roberto/ALMA_IMF/lines/normalized"
delete_type = "*ms.split.cal"
files_to_delete = glob.glob("/".join([root_path,"science_goal*","group*","member*","calibrated",delete_type]))
print('Files to delete are: \n')
print(files_to_delete)
print('\n')
# Note: raw_input for Python 2.7, i.e., if script is run within CASA 5.X
# use input instead if running in Python 3.X or CASA 6.X
if raw_input("Do You Want To Continue? [y/n]") == "y":
# do something
for file in files_to_delete:
if os.path.exists(file):
print("Deleting {}".format(file))
shutil.rmtree(file)
else:
print("{} does not exist. Will continue ignoring this.".format(file))
| [
"robertogalvanmadrid@gmail.com"
] | robertogalvanmadrid@gmail.com |
1f521210b944fba4b071cab3142d9a054dcff27a | 07c61596c1fba2e2a7034fe5af9707794ea2e2c1 | /Hackerrank/Algorithms/The_Time_in_Words.py3 | 6a108c9d2715cc2b096f03b911b89a2ab181b31e | [] | no_license | H-Shen/Collection_of_my_coding_practice | 2fcb2f8fef9451ad4a3a9c063bbf6a34ea5966b4 | 6415552d38a756c9c89de0c774799654c73073a6 | refs/heads/master | 2023-08-24T21:19:08.886667 | 2023-08-22T03:47:39 | 2023-08-22T03:47:39 | 180,731,825 | 8 | 1 | null | 2021-08-13T18:25:25 | 2019-04-11T06:48:09 | C++ | UTF-8 | Python | false | false | 983 | py3 | #!/bin/python3
import sys
table = {1:'one',2:'two',3:'three',4:'four',
5:'five',6:'six',7:'seven',8:'eight',
9:'nine',10:'ten',11:'eleven',12:'twelve',
13:'thirteen',14:'fourteen',15:'fifteen',
16:'sixteen',17:'seventeen',18:'eighteen',
19:'nineteen',20:'twenty',30:'thirty',40:'forty',
50:'fifty'}
def handle(n):
global table
if n <= 20:
return table[n]
if n <= 100 and n % 10 == 0:
return table[n]
return table[n // 10 * 10] + ' ' + table[n - n // 10 * 10]
h = int(input().strip())
m = int(input().strip())
if m == 0:
print(table[h] + " o' clock")
elif m == 30:
print("half past " + table[h])
elif m == 45:
print("quarter to " + table[h + 1])
elif m == 15:
print("quarter past " + table[h])
elif m > 30:
print(handle(60 - m) + " minutes to " + table[h + 1])
elif m == 1:
print("one minute past " + table[h])
else:
print(handle(m) + " minutes past " + table[h])
| [
"haohu.shen@ucalgary.ca"
] | haohu.shen@ucalgary.ca |
794b3a77bc3d17bcf3abcff67fd13ef7b934a601 | 6203656b6468b9b0c5b92144b21290ff0fdfed7b | /projectr/models.py | 6f450098c852cee67e3ef44d1111fa22e84f69cf | [] | no_license | ydo30/JDDKYD | fe2e71ec3f029c5a36301193a3949ba5b23648af | c97619b086a9d9c57203f6aadfa41b01bb1bb879 | refs/heads/master | 2021-06-29T04:50:29.297153 | 2017-09-25T19:11:37 | 2017-09-25T19:11:37 | 104,789,846 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,554 | py | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User, Group
from django.db.models.signals import post_save
from django.dispatch import receiver
import random, string
# =======================================================================================================
class Profile(models.Model):
"""
An extension of the built-in Django user model to allow for the different
types of users
"""
USER_TYPES = (
('S', 'Student'),
('I', 'Instructor'),
('C', 'Client')
)
user = models.OneToOneField(User, on_delete=models.CASCADE)
user_type = models.CharField(max_length=1, choices=USER_TYPES)
section_id = models.IntegerField(null=True)
# These methods are for linking the Profile model with Django built-in User model for authentication
# Reference: https://simpleisbetterthancomplex.com/tutorial/2016/07/22/how-to-extend-django-user-model.html
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
# ========================================================================================================
class Project(models.Model):
"""
A model object representing a client-submitted project.
"""
name = models.CharField(max_length=255)
requirements = models.CharField(max_length=255)
keywords = models.CharField(max_length=255)
description = models.CharField(max_length=255)
client = models.ForeignKey(User, on_delete=models.CASCADE)
is_approved = models.BooleanField() # defaults to false
class Question(models.Model):
"""
Represents a question that students can submit on project pages
"""
text = models.CharField(max_length=255)
project = models.ForeignKey(Project, on_delete=models.CASCADE) # project the question was asked on
asker = models.ForeignKey(User, on_delete=models.CASCADE) # The student asking the question
reply = models.CharField(max_length=255) # initially blank until client fills out a reply
class Section(models.Model):
"""
Represents a class section (i.e CS 3312 JDA, JDB, JDC, JDD)
Students and instructors are associated with sections, which allows the correct instructors to see
the students bids
"""
name = models.CharField(max_length=255)
students = models.ManyToManyField(User, related_name="students_for_section")
class Message(models.Model):
"""
Represents a message that students / instructors can send to each other
"""
sender = models.ForeignKey(User, on_delete=models.CASCADE, related_name="sender")
recipient = models.ForeignKey(User, on_delete=models.CASCADE, related_name="recipient")
subject = models.CharField(max_length=255)
text = models.TextField()
class Bid(models.Model):
"""
Represents a bid.
The bid is only associated with the submitter of the bid, rather than every person in a group, so
the accept/reject notification will only show up to the submitter (i.e the "leader" of the group)
"""
team_members = models.CharField(max_length=255)
description = models.CharField(max_length=255)
is_approved = models.BooleanField()
project = models.ForeignKey(Project, on_delete=models.CASCADE) # For (Bid <-> Project) ; many to one
student = models.ForeignKey(User, on_delete=models.CASCADE)
instructors = models.ManyToManyField(User, related_name="instructors_for_bid")
class Notification(models.Model):
"""
Represents a notification, which is distinct from a message in that it does not have a sender
and these are generated by the application (instead of from other users) for events such as
* a clients project is rejected by an instructor
* a bid is awarded / denied
"""
recipient = models.ForeignKey(User, on_delete=models.CASCADE, related_name="notification_recipient")
subject = models.CharField(max_length=255)
text = models.TextField()
# TODO: Change where we put this, maybe in another file
def random_key():
length = random.randrange(20)
return ''.join(random.choice(string.ascii_letters + string.digits) for i in range(length))
class InstructorKey(models.Model):
"""
Represents an Instructor Key used by instructors to create a section
"""
key = models.CharField(max_length=255, default=random_key, unique=True)
| [
"ydo30@gatech.edu"
] | ydo30@gatech.edu |
05488b74e06f143a147e1b5d9892a1eb406e1b21 | a08fc91ecafa7f2b6c8aed7e1ceb33822d4caa49 | /python/algorithms/tree/segmenttree.py | aec0172273542d5f029e5d57384084e6aba33d5d | [] | no_license | bryand1/snippets | 1fcdd4b67809aa27b58e1239d5cca22cfb962f3d | f779bf147c420996613b0778e243154cd750c3dd | refs/heads/master | 2023-01-23T18:47:07.389246 | 2020-12-31T20:10:13 | 2020-12-31T20:10:13 | 138,767,383 | 0 | 0 | null | 2023-01-19T13:02:49 | 2018-06-26T16:56:15 | Python | UTF-8 | Python | false | false | 853 | py | from sys import maxsize
minsize = -99999
def maxquery(segtree, qlo, qhi, lo, hi, pos):
if qlo <= lo and qhi >= hi:
return segtree[pos]
if qlo > hi or qhi < lo:
return minsize
mid = (lo + hi) // 2
return max(
maxquery(segtree, qlo, qhi, lo, mid, 2 * pos + 1),
maxquery(segtree, qlo, qhi, mid + 1, hi, 2 * pos + 2))
def construct(arr, segtree, lo, hi, pos):
if lo == hi:
segtree[pos] = arr[lo]
return
mid = (lo + hi) // 2
construct(arr, segtree, lo, mid, 2 * pos + 1)
construct(arr, segtree, mid + 1, hi, 2 * pos + 2)
segtree[pos] = max(segtree[2 * pos + 1], segtree[2 * pos + 2])
if __name__ == '__main__':
A = [-1, 0, 3, 2, 5]
tree = [minsize] * 2 * (len(A))
construct(A, tree, 0, len(A) - 1, 0)
print(maxquery(tree, 2, 4, 0, 4, 0))
print(tree)
| [
"bryand1@gmail.com"
] | bryand1@gmail.com |
2cec811450ad60935e4c5e8b03e76ddc9f0d1a62 | ffca40812b6cdf6d2578bdad441a7df154947028 | /ca2/Testing_hand.py | 4f2749535a7ebd11c79734d943009d4b75cb1ccf | [] | no_license | satlikh/python21 | 07568d48da6e32297fd10de5b031f04dbd93b7ce | 26375f0d139a91991a13cc4efb37970461ad0466 | refs/heads/main | 2023-03-20T04:15:18.503945 | 2021-03-13T19:17:48 | 2021-03-13T19:17:48 | 335,094,688 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,905 | py | from cardlib import *
import pytest
def test_deck():
# Check that there's 13 cards of each suit
deck = StandardDeck()
assert len(deck.cards) == 52
cards = {'Clubs': [], 'Diamonds': [], 'Hearts': [], 'Spades': []}
for i, suit_type in enumerate(cards):
for card_type in range(13):
new_card = deck.draw()
assert new_card # check that we pick up a card
cards[suit_type].append(new_card)
assert new_card.suit.name == suit_type
un = np.unique(cards)
for i, suit_type in enumerate(cards):
assert len(un[0][suit_type]) == 13 # checks uniqueness in suits
def test_hand():
deck = StandardDeck()
deck.shuffle()
hand = Hand()
hand2 = Hand()
for i in range(6):
hand.add_card(deck.draw())
hand.sort_cards()
# Check that the order is from low to high
for i in range(1, len(hand.cards)-1):
assert hand.cards[i].value >= hand.cards[i-1].value
def test_straight_flush():
hand = Hand()
hand.add_card(AceCard(Suit.Spades))
hand.add_card(NumberedCard(2, Suit.Hearts))
hand.add_card(NumberedCard(3, Suit.Hearts))
hand.add_card(NumberedCard(4, Suit.Hearts))
hand.add_card(NumberedCard(5, Suit.Hearts))
# hand.add_card(NumberedCard(7, Suit.Spades))
hand.add_card(AceCard(Suit.Hearts))
check_hand = PokerHand(hand.cards)
assert check_hand.hand_type.value == 9
assert check_hand.hand_type.name == 'straight_flush'
# print('Straight-flush gives:', check_hand.points, 'points with hand:')
# for i in check_hand.best_cards:
# print(i, end=',')
# print('\n')
def test_pair():
deck = StandardDeck()
hand = Hand()
hand.add_card(deck.cards[0])
hand.add_card(deck.cards[13])
hand.add_card(deck.cards[14])
hand.add_card(deck.cards[15])
hand.add_card(deck.cards[16])
check_hand = PokerHand(hand.cards)
assert check_hand.hand_type.value == 2
assert check_hand.hand_type.name == 'one_pair'
# print('Pair gives:', check_hand.points,'points with hand:')
# for i in check_hand.best_cards:
# print(i, end=',')
# print('\n')
def test_two_pairs():
deck = StandardDeck()
hand = Hand()
hand.add_card(deck.cards[0])
hand.add_card(deck.cards[13])
hand.add_card(deck.cards[14])
hand.add_card(deck.cards[27])
hand.add_card(deck.cards[16])
hand.show_hand()
check_hand = PokerHand(hand.cards)
assert check_hand.hand_type.value == 3
assert check_hand.hand_type.name == 'two_pair'
# print('Two pair gives:', check_hand.points,'points with hand:')
# for i in check_hand.best_cards:
# print(i, end=',')
# print('\n')
def test_three():
deck = StandardDeck()
hand = Hand()
hand.add_card(deck.cards[0])
hand.add_card(deck.cards[13])
hand.add_card(deck.cards[26])
hand.add_card(deck.cards[15])
hand.add_card(deck.cards[16])
hand.show_hand()
check_hand = PokerHand(hand.cards)
assert check_hand.hand_type.value == 4
assert check_hand.hand_type.name == 'three_of_a_kind'
# print('Three of a kind gives:', check_hand.points, 'points with hand:')
# for i in check_hand.best_cards:
# print(i, end=',')
# print('\n')
def test_full_house():
deck = StandardDeck()
hand = Hand()
hand.add_card(deck.cards[0])
hand.add_card(deck.cards[13])
hand.add_card(deck.cards[2])
hand.add_card(deck.cards[15])
hand.add_card(deck.cards[14])
hand.add_card(deck.cards[27])
hand.add_card(deck.cards[40])
hand.show_hand()
check_hand = PokerHand(hand.cards)
assert check_hand.hand_type.value == 7
assert check_hand.hand_type.name == 'full_house'
# print('Full house gives:', check_hand.points, 'points with hand:')
# for i in check_hand.best_cards:
# print(i, end=',')
# print('\n')
def test_four():
deck = StandardDeck()
hand = Hand()
hand.add_card(deck.cards[0])
hand.add_card(deck.cards[13])
hand.add_card(deck.cards[26])
hand.add_card(deck.cards[39])
hand.add_card(deck.cards[16])
hand.show_hand()
check_hand = PokerHand(hand.cards)
print(check_hand.best_cards)
assert check_hand.hand_type.value == 8
assert check_hand.hand_type.name == 'four_of_a_kind'
# print('Four of a kind gives:', check_hand.points, 'points with hand:')
# for i in check_hand.best_cards:
# print(i, end=',')
# print('\n')
def test_straight():
deck = StandardDeck()
hand = Hand()
hand.add_card(deck.cards[5])
hand.add_card(deck.cards[14])
hand.add_card(deck.cards[15])
hand.add_card(deck.cards[16])
hand.add_card(deck.cards[17])
hand.show_hand()
check_hand = PokerHand(hand.cards)
assert check_hand.hand_type.value == 5
assert check_hand.hand_type.name == 'straight'
# print('Straight gives:', check_hand.points, 'points with hand:')
# for i in check_hand.best_cards:
# print(i, end=',')
# print('\n')
def test_flush():
deck = StandardDeck()
hand = Hand()
hand.add_card(deck.cards[0])
hand.add_card(deck.cards[2])
hand.add_card(deck.cards[3])
hand.add_card(deck.cards[5])
hand.add_card(deck.cards[12])
hand.show_hand()
check_hand = PokerHand(hand.cards)
assert check_hand.hand_type.value == 6
assert check_hand.hand_type.name == 'flush'
# print('Flush:', check_hand.points, 'points with hand:')
# for i in check_hand.best_cards:
# print(i, end=',')
# print('\n')
def test_PokerHand_order():
hand1 = Hand([NumberedCard(3, Suit.Spades),
NumberedCard(3, Suit.Hearts),
NumberedCard(5, Suit.Diamonds),
NumberedCard(5, Suit.Clubs),
NumberedCard(10, Suit.Spades)])
hand2 = Hand([NumberedCard(4, Suit.Diamonds),
NumberedCard(4, Suit.Hearts),
NumberedCard(5, Suit.Spades),
NumberedCard(5, Suit.Hearts),
NumberedCard(9, Suit.Hearts)])
hand3 = Hand([NumberedCard(4, Suit.Diamonds),
NumberedCard(4, Suit.Hearts),
NumberedCard(5, Suit.Spades),
NumberedCard(5, Suit.Hearts),
NumberedCard(10, Suit.Hearts)])
hand4 = Hand([NumberedCard(4, Suit.Clubs),
NumberedCard(4, Suit.Spades),
NumberedCard(5, Suit.Clubs),
NumberedCard(5, Suit.Diamonds),
NumberedCard(10, Suit.Diamonds)])
ph1 = PokerHand(hand1.cards)
ph2 = PokerHand(hand2.cards)
ph3 = PokerHand(hand3.cards)
ph4 = PokerHand(hand4.cards)
assert ph1 < ph2
assert ph2 < ph3
assert ph3 == ph4
test_pair()
test_two_pairs()
test_three()
test_full_house()
test_flush()
test_straight()
test_four()
test_straight_flush()
test_deck()
test_PokerHand_order()
test_hand()
| [
"obergv@student.chalmers.se"
] | obergv@student.chalmers.se |
1d41eb6ae4fc12acb15c60378e1c758be087de68 | 7cf119239091001cbe687f73018dc6a58b5b1333 | /datashufflepy-zeus/src/branch_scripts2/NEWS/ZX_ZCGG/ZX_ZCGG_SJS_SJSGG.py | 88a9dab2b0df420ba808ab19e9438d674c65ae30 | [
"Apache-2.0"
] | permissive | ILKKAI/dataETL | 0f5b80c3482994f735f092a1e01fa1009bac4109 | 32f7ec3aaaf32b5074536a615cb9cd5c28bd499c | refs/heads/master | 2022-04-04T19:27:05.747852 | 2020-02-28T11:17:48 | 2020-02-28T11:17:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # -*- coding: utf-8 -*-
from database._mongodb import MongoClient
def data_shuffle(data):
if data.get('URL_')[-3:] == 'pdf':
data['PDF_'] = data.get('URL_')
if not data['PDF_']:
del data['PDF_']
return data
if __name__ == '__main__':
main_mongo = MongoClient(entity_code="ZX_ZCGG_SJS_SJSGG", mongo_collection="ZX_ZCGG")
data_list = main_mongo.main()
for data in data_list:
re_data = data_shuffle(data)
print(re_data)
| [
"499413642@qq.com"
] | 499413642@qq.com |
a186015bedc024774761a69cf11220b1625da282 | 01ad22f0500fb8d00120cd983166ff559c6f6238 | /Zahid_ass2_pb10.py | 7baba97e81cf6af8c08bd07b74e333ea4572f453 | [] | no_license | ZAHID-8017/Comp_Ass2 | eed4f5496224adeb358da76b174189c9998e162f | 69b5305d5654bfbe6dc26671723d0ab23360a45e | refs/heads/master | 2022-06-18T20:21:50.090993 | 2020-04-23T06:40:28 | 2020-04-23T06:40:28 | 257,973,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
'''
Variable transformation being used here is t=z/(1+z)
thus dx/dz=1/(x^(2)*(1+z)^(2)+z^2) with boundary condition
0<=z<=infinity
'''
h = float(input("enter the step size=", ))
def func(z,x): return 1/(x**(2)*(1-z)**(2)+z**2)
z=np.arange(0,1+h,h)
n=len(z)
x = np.zeros(n)
x[0]=1
for i in range(n-1):
k0 = h*func(z[i],x[i])
k1 = h*func(z[i]+h/2,x[i]+k0/2)
k2 = h*func(z[i]+h/2,x[i]+k1/2)
k3 = h*func(z[i]+h,x[i]+k2)
x[i+1]=x[i]+(k0+2*k1+2*k2+k3)/6
plt.plot(z,x,"-b")
plt.xlabel("$z$",fontsize=20)
plt.ylabel("x(z)",fontsize=20)
plt.show()
t=(3.5e06)/(3.5e06+1)
f = interpolate.interp1d(z, x)
print("solution(3.5e+06)=",f(t))
| [
"noreply@github.com"
] | noreply@github.com |
5ead9fc6fe3fb783c676ead8dba80288741305d2 | e7831cdfb29d7c0867aea6e1ed95927396c9500c | /userInterface.py | 6f5178d98a23c4311339c18ecb17541249970543 | [] | no_license | derekyee97/Checkout_Simulator- | 590f4ad9f77a45ac6ffe0b2d00c09947d06be813 | e67615ce5ccf287635d8a27bce620312ac48e5bb | refs/heads/master | 2021-06-23T18:25:16.779823 | 2019-08-08T23:52:05 | 2019-08-08T23:52:05 | 150,893,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,016 | py | ##Creates connection to sqlite database
# @param None
# @return sqlite cursor
import sqlite3
def setSQLConnection():
#Add Database name
connection = sqlite3.connect("snackOverFlow.db")
cursor = connection.cursor()
return cursor
##Prints out items from proudct table
# reads data base, puts desired elements into sectioned lists.
# @return None
def displayProducts(c):
names=['NAMES']
price=['PRICES']
description=['DESCRIPTION']
stock=['STOCK']
c.execute("SELECT * FROM product")
result= c.fetchall()
for r in result:
names.append(r[2])
price.append(r[1])
description.append(r[3])
stock.append(r[4])
for i in range(len(names)):
line_new = '{:<20} {:<20} {:<20} {:<20}'.format(str(names[i]),str(price[i]), str(description[i]),str(stock[i]))
print(line_new)
'''
c.execute("SELECT * FROM member")
result =c.fetchall()
print(result)
'''
##Prints out items from cart table
# reads data base, puts desired elements into sectioned lists.
# @return None
def displayCart(c):
items=['ITEMS'] #items
amount=['AMOUNT'] #amountItem
c.execute("SELECT * FROM cart")
result=c.fetchall()
for r in result:
items.append(r[1])
amount.append(r[2])
for i in range(len(items)):
line_new = '{:<20} {:<20}\n'.format(str(items[i]),str(amount[i]))
print(line_new)
##ADDS TO CART given user input
#will use sql method to put desired data into table
#@return none
def addToCart(name, amount, pricePer, c):
c.execute("INSERT INTO cart (cartID, itemName, amountItem, pricePerUnit) values (NULL,?,?,?)",(name, amount, pricePer))#add info to database table "cart"
print("added: ",name, "in the quanity of: ",str(amount), " at the price per unit of: ", str(pricePer)) #printing confirmation
##checks if product name user entered is in database, will return the price. Returns -1 if not in database
def checkInProd(item,c):
try:
c.execute('''SELECT price FROM product WHERE name=?''',(item,)) #statement to access the item name the user entered
retrieved=c.fetchall() #retrieves line if it is there
if(retrieved is not None):
retrieved=float(list(retrieved[0])[0]) #retrieved returned as a list of tuples, so converting tuple down to element to cast to float
return retrieved
except: #if not found, returns dummy value that will be caught
return -1
#If could take out desired amount, will update stock. Else will return False to inisinuate not enough stock.
def updateStock(c, numToTake, itemName):
c.execute('''SELECT stock from product WHERE name=?''',(itemName,))
retrieved=c.fetchall()
if(retrieved is not None):
retrieved=int(list(retrieved[0])[0])
if(numToTake<=retrieved):
remaining=retrieved-numToTake
c.execute(''' UPDATE product SET stock =? WHERE name=?''',(remaining,itemName))
return True #return True means successfully updated
else:
return False
#checks if product in cart based on user input
def deleteInCart(item,c):
try:
c.execute('''DELETE FROM cart WHERE itemName=?''',(item,)) #sql statement to delete
print("Deleted ",item," from your cart.") #printing confirmation
except:
print("Selected item is not in the cart.")
##calculats sum of all items in cart, and prints receipt
def calcSumOfCart(c):
itemNames=[] #creating lists to store from table to print
itemAmount=[]
itemPrice=[]
subTotal=[]
totalPrice=0
c.execute("SELECT * FROM cart")
result=c.fetchall()
for x in result: #storing info into lists to use to calculate price total
itemNames.append(x[1])
itemAmount.append(x[2])
itemPrice.append(x[3])
print(itemNames)
print(itemAmount)
print(itemPrice)
for i in range(len(itemNames)): #looping through list and calculating price, summing them up all together
totalPrice=totalPrice+(int(itemAmount[i])*float(itemPrice[i]))
subTotal.append(round((int(itemAmount[i])*float(itemPrice[i])),2)) #calculating item price *amount purchased to get subtotal of that item, adds to list
print(subTotal)
print("SNACKOVERFLOW RECEIPT: \n")
line_new = '{:<20} {:<18} {:<18} {:<18}'.format(" ITEM "," AMOUNT "," PRICE/UNIT "," SUBTOTAL ")
print(line_new)
for x in range(len(itemNames)):
line_new = '{:<20} {:<20} {:<20} {:<20}'.format(str(itemNames[x]),str(itemAmount[x]), str(itemPrice[x]),str(subTotal[x]))
print(line_new)
totalPrice=round(totalPrice,2)
print("THE TOTAL TO BE PAID: $",totalPrice)
##Displays Registration Form to console
# @param None
# @return None
def register(c):
print("--- SNACKER REGISTRATION ---")
name = input("Please enter your username: ")
email = input("Please enter your email address: ")
pin = input("Please enter a 4-digit passcode: ")
BoD = input("Please enter your birthday day(MM/DD/YYYY): ")
address = input("Please enter your address: ")
#SQL COMMAND: Add info to database
c.execute("INSERT INTO member (memberID, name, email, password, berf, address) values (NULL,?,?,?,?,?)", (name, email, pin, BoD, address))
print("Thank you for registering today!\nWe're returning you to the main menu")
#Displays Menu Options to console
# @param None
# @return None
def mainMenu():
print("\n--- MENU OPTIONS ---")
print("1. Register as new snacker.")
print("2. Go shopping.")
print("3. Exit.\n")
def shoppingMenu(c):
c.execute(''' DELETE FROM cart''') #clears cart table for new user
while (True):
print("\n--- SNACKS IN STOCK ---")
displayProducts(c)
print("\n--- Shopping Options ---")
print("1. Add item to Cart.")
print("2. Remove item from Cart.")
print("3. View Cart items.")
print("4. Go to checkout.")
print("5. Exit shopping.")
userShop = int(input("Please enter a shopping selection (1|2|3|4|5): "))
if (userShop == 1):
#SQL COMMAND: Insert item from cart
item=input("Please enter the name of the product you would like to add: ")
priceOfItem=checkInProd(item, c) #gets price of item from database, it will be -1 if not in db
if(priceOfItem==-1):
print("We do not carry this item. Returning back to shopping menu.")
else:
amount=int(input("Please enter the amount you would like to get: "))
if(not updateStock(c,amount,item)):
print("We do not have enough in stock.")
else:
addToCart(item,amount,priceOfItem,c)
elif (userShop == 2):
#SQL COMMAND: Delete item from cart
itemName=input("Please enter the item name in your cart that you would like to delete: ")
deleteInCart(itemName,c)
elif (userShop == 3):
print("\ncart contains: ")
displayCart(c)
elif (userShop == 4):
#Calcualte total of all items in shoppingCart
calcSumOfCart(c)
elif (userShop == 5):
print("Thank you for your purchase(s)!\n")
return
else:
print("ERROR: This is an invalid choice, please try again.\n")
##Verify that userName and pin match in database
# @param userName, pin, cursor
# @return Boolean
def verifyPin(userName, pin, c):
#SQL COMMAND: Select username from database
c.execute('''SELECT name FROM member WHERE name=?''', (userName,))
user = c.fetchone()
if(user is not None):
#SQL COMMAND: Select password from database
c.execute('''SELECT password FROM member WHERE name=?''', (userName,))
pw = c.fetchone()
#Validate username and pin
for i in pw:
if (i == pin):
return True #Correct password
else:
return False #Wrong password
else:
print("You are not a registered snacker!\n")
return False
##Displays exit prompt to console
# @param None
# @return None
def exitDisplay():
print("\n\nThis program was created by: ")
print(" _ _ _ _ _ _ ")
print(" (c).-.(c) (c).-.(c) (c).-.(c) ")
print(" / ._. \ / ._. \ / ._. \ ")
print(" __\( Y )/__ __\( Y )/__ __\( Y )/__ ")
print("(_.-/'-'\-._)(_.-/'-'\-._)(_.-/'-'\-._)")
print(" || K || || I || || M || ")
print(" _.' `-' '._ _.' `-' '._ _.' `-' '._ ")
print("(.-./`-'\.-.)(.-./`-'\.-.)(.-./`-'\.-.)")
print(" `-' `-' `-' `-' `-' `-' \n")
print(" _ _ _ _ _ _ ")
print(" (c).-.(c) (c).-.(c) (c).-.(c) ")
print(" / ._. \ / ._. \ / ._. \ ")
print(" __\( Y )/__ __\( Y )/__ __\( Y )/__ ")
print("(_.-/'-'\-._)(_.-/'-'\-._)(_.-/'-'\-._)")
print(" || T || || A || || N || ")
print(" _.' `-' '._ _.' `-' '._ _.' `-' '._ ")
print("(.-./`-'\.-.)(.-./`-'\.-.)(.-./`-'\.-.)")
print(" `-' `-' `-' `-' `-' `-' \n")
print(" _ _ _ _ _ _ ")
print(" (c).-.(c) (c).-.(c) (c).-.(c) ")
print(" / ._. \ / ._. \ / ._. \ ")
print(" __\( Y )/__ __\( Y )/__ __\( Y )/__ ")
print("(_.-/'-'\-._)(_.-/'-'\-._)(_.-/'-'\-._)")
print(" || D || || E || || R || ")
print(" _.' `-' '._ _.' `-' '._ _.' `-' '._ ")
print("(.-./`-'\.-.)(.-./`-'\.-.)(.-./`-'\.-.)")
print(" `-' `-' `-' `-' `-' `-' ")
#@method: isInt
#@param: a string
#description: tests to see if string is integer, return False if isnt
def isInt(x):
try:
int(x)
return True
except:
return False
##Main Function
def main():
print(" _ __ _ ")
print(" ___ _ __ __ _ ___| | _______ _____ _ __ / _| | _____ __")
print("/ __| '_ \ / _` |/ __| |/ / _ \ \ / / _ \ '__| |_| |/ _ \ \ /\ / /")
print("\__ \ | | | (_| | (__| < (_) \ V / __/ | | _| | (_) \ V V / ")
print("|___/_| |_|\__,_|\___|_|\_\___/ \_/ \___|_| |_| |_|\___/ \_/\_/ ")
print("\n***** WECLOME TO SNACKOVERFLOW *****")
print("We have an assortment of snacks to satisfy your every craving.\n")
#Create cursor
cursor = setSQLConnection()
while (True):
mainMenu()
userChoice =(input("Please enter a menu selection (1|2|3): "))
if(isInt(userChoice)):
userChoice=int(userChoice)
elif(not isInt(userChoice)):
print("ERROR: Invalid choice, please try again.\n")
continue
if (userChoice == 1):
register(cursor)
elif (userChoice == 2):
ERROR = 0
while ( ERROR< 3 ):
print("\n--- PLEASE LOGIN ---")
userName = input("Please enter your username: ")
pin = input("Please enter your pin: ")
if ( verifyPin(userName, pin, cursor) ):
shoppingMenu(cursor)
break
else:
ERROR += 1
if(ERROR == 3):
print("Sorry, you inputted the wrong pin too many times.\nWe're returning you to the main menu.\n")
elif (userChoice == 3):
print("Thank you for snacking with us today.\nWe look forward to seeing you again!\n")
exitDisplay()
exit(0)
elif (userChoice==4):
displayProducts(cursor)
else:
print("ERROR: This is an invalid choice, please try again.\n")
#Calls main function
main()
| [
"noreply@github.com"
] | noreply@github.com |
2725023487fc23afd507ba7f21e96be58e1d8690 | 98e443f61d43206091e7e6470a46721f7282167a | /setup.py | 78c3da83d888c46bd718c298a7cb8ec949d13d2f | [
"Apache-2.0"
] | permissive | gazalpatel/topic_ai | 6951b8d78621a142177ecdaaf8084136a8fd46a9 | a84308587a914c16dc1dfb9e5de6c61121d4fce6 | refs/heads/main | 2023-04-26T02:00:54.862148 | 2021-06-03T14:12:17 | 2021-06-03T14:12:17 | 370,997,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | from setuptools import setup, find_packages
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="top_ai",
version="0.2021.7.1",
author="Gazal Patel",
author_email="gpatel@phigrc.com",
description="Text processing and topic extraction library.",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://gazalpatel.wordpress.com/",
download_url="https://github.com/gazalpatel/topic_ai/archive/refs/heads/main.zip",
packages=find_packages(),
install_requires=[
'numpy',
'scikit-learn',
'scipy',
'nltk',
'scipy',
'unidecode',
'contractions',
'word2number',
'bs4',
'spacy'
],
classifiers=[
"Development Status :: Work in profress",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.7.4",
"Topic :: Scientific/Engineering :: Artificial Intelligence"
],
keywords="Topic modeling keyphrases text processing Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning"
)
| [
"gpatel@phigrc.com"
] | gpatel@phigrc.com |
2a88f9776fb6356dd14d00555c404bb5c8a5ab0f | 009cb5a547d4d91431225d21f490a24842e67c63 | /copy_cheat.py | c77ffb1176e91d83f1e12e8bb385b46592dbf91c | [] | no_license | don-lin/copy_cheat | 47e5e81c480e5e16ca75126bb8bd9aed1dbf9a3b | d64e705f3207623820d37b7cbb9e8f6860b26c7f | refs/heads/master | 2022-11-11T08:24:44.008118 | 2020-06-28T06:20:07 | 2020-06-28T06:20:07 | 275,519,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,399 | py | #pip install pywin32 pyperclip
import win32api,time,win32con,win32clipboard
last="""hello world, this is a test content"""
def click_key(value):
print(value)
win32api.keybd_event(value, 0, 0, 0)
win32api.keybd_event(value, 0, win32con.KEYEVENTF_KEYUP, 0)
def input_str(s):
s=s.upper()
A=ord('A')
Z=ord('Z')
k0=ord('0')
k9=ord('9')
space=ord(' ')
nl=ord('\n')
for i in s:
i=ord(i)
if (i<=Z and i>=A):
click_key(i)
elif i>=k0 and i<=k9:
click_key(i+48)
elif i==nl:
click_key(13)
else:
click_key(space)
def clear_clipboard():
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardText('')
win32clipboard.CloseClipboard()
def get_clipboard():
win32clipboard.OpenClipboard()
data = win32clipboard.GetClipboardData()
win32clipboard.CloseClipboard()
return data
def get_key():
q=win32api.GetAsyncKeyState(ord('Q'))
F2=win32api.GetAsyncKeyState(113)
esc=win32api.GetAsyncKeyState(27)
global last
if esc!=0:
data=get_clipboard()
if data!=last:
input_str(data)
last=data
else:
last=""
if F2!=0:
clear_clipboard()
while(True):
time.sleep(0.3)
get_key() | [
"noreply@github.com"
] | noreply@github.com |
7f9eee3b5f2344775ddcd223da083387c56563be | bd6f84956f24604fbb9e3ec9dfcb75343959c135 | /PA3/EE451-PA3/readgraph.py | 2892af87f6686437e611c5c1f7dbc03c3b4d68f9 | [] | no_license | rv-harsha/parallel-and-distributed-computation | 2918ca6e62a559f7fc6fe1236d42ff367305c4f6 | b0bbb53a1623b9a7593076c8ed6fcc3ae4d49280 | refs/heads/main | 2023-08-28T03:30:44.170296 | 2021-10-21T04:44:21 | 2021-10-21T04:44:21 | 419,580,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py |
nodes=[]
edges=[]
def checkedge(edge):
if edge in edges:
return True;
return False
def readgraph(filename):
with open(filename) as f:
rows = f.readlines()
rows = rows[4:]
for row in rows:
columns = row.split()
#print(columns[0])
#print(columns[1])
edges.append((int(columns[0]), int(columns[1])))
if int(columns[0]) not in nodes:
nodes.append(int(columns[0]))
if int(columns[1]) not in nodes:
nodes.append(int(columns[1]))
if __name__ == "__main__":
filename = "p2p-Gnutella06.txt"
readgraph(filename)
print(nodes)
print(edges)
print("0 - 2: " + str(checkedge((0,2))))
print("0 - 3: " + str(checkedge((0,3))))
print("1 - 2: " + str(checkedge((1,2))))
| [
"harsha.rv67@gmail.com"
] | harsha.rv67@gmail.com |
8d55cf556ab9aa1c469f6021ac768c5c963cda4f | 0dab38e55043456b6d40b04c63a35d49999eb4bd | /classwork3sept13(finish).py | fd5c2226a1d5d9a727bc4a43e6c884113d8911b3 | [] | no_license | Dillon1john/Python | 922d47e378c36efabf4ae9dd7752318e75f69e55 | dec5d8123f51564ab3e308ad8c8bab0624c546e6 | refs/heads/master | 2020-06-15T01:51:59.591201 | 2019-07-04T06:15:27 | 2019-07-04T06:15:27 | 195,179,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | x=float(input("Enter kilometers"))
step1=x/2
step2= step1*(1/4)
total=step1+step2
print(total,"m")
| [
"noreply@github.com"
] | noreply@github.com |
f3b575a591741af71ff96affecc01aa7f7b1eeef | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/109/usersdata/188/63757/submittedfiles/av2_p3_civil.py | a916fc1a714178afe44fb0829312c99cd7cc0417 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | # -*- coding: utf-8 -*-
import numpy as np
def linhas (a,m):
soma=0
for i in range(0,m.shape[1],1):
soma=soma+m[a,i]
return(soma)
def colunas (a,m):
soma=0
for i in range(0,m.shape[0],1):
soma=soma+m[i,a]
return(soma)
h=int(input("Digite a dimensão da matriz:"))
x=int(input("Digite x:"))
y=int(input("Digite y:"))
q=np.zeros((h,h))
print(q)
for i in range(0,q.shape[0],1):
for j in range(0,q.shape[1],1):
q[i,j]=float(input("Digite o termo:"))
b=(linhas(x,q)+colunas(y,q)-(2*q[x,y]))
printJ
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
ffcb0c922b684275e416c82190ce8ed330061603 | e9ec8382062c40bc23e8967c36b9f0860fd2fae4 | /temp.py | e329c4e75639c0bf02ee5b379caa1cd5a78c7dfe | [] | no_license | slinkoandrey/phys | 74537c9a7fdd8ce751d5d84b5db49d34f62dd25c | 2d0d85471b2e82718198f1728c562a90dc5aa9b4 | refs/heads/master | 2021-05-09T20:23:42.275748 | 2019-03-14T21:37:41 | 2019-03-14T21:37:41 | 118,689,528 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
spots = sm.datasets.sunspots
print(spots.NOTE)
data = spots.load_pandas().data['SUNACTIVITY']
plt.plot(data)
plt.figure()
sm.graphics.tsa.plot_acf(data, lags=50, alpha=0.01)
ar = sm.tsa.ARMA(data.values, (12, 0))
model = ar.fit()
print(model.arparams)
plt.figure()
plt.plot(model.arparams)
forecast, f_err, f_conf = model.forecast(50, alpha=0.05)
plt.figure()
plt.plot(data.index[-100:], data.values[-100:])
plt.plot(np.arange(50) + data.index[-1], forecast, color='red')
plt.fill_between(np.arange(50) + data.index[-1], *f_conf.T, color='red', alpha=0.3) | [
"noreply@github.com"
] | noreply@github.com |
9a36090e137b6c733f445cb587a0720eccd62adb | 3bb70650b4b83e4653dcc18c8233c106c7a5611a | /sale_shortcut/shortcut_getter.py | 44c35030e8a22f587ada781c66cd6059851922bb | [] | no_license | khanhlu2013/pos_connect_code | 48e736a6b1c5ca6a5c4ff39d842d8a93f66e67ef | fdf70de858c10b175832af31ecc0cf770d028396 | refs/heads/master | 2023-04-08T02:35:46.181265 | 2016-10-18T21:12:51 | 2016-10-18T21:12:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | from sale_shortcut.models import Parent,Child
def get_shortcut(id):
return Parent.objects.prefetch_related('child_set').get(pk=id)
def get_shorcut_lst(store_id):
return Parent.objects.filter(store_id=store_id).prefetch_related('child_set') | [
"khanhlu2013@gmail.com"
] | khanhlu2013@gmail.com |
69b032c7099a9e8e353caac87a95e1e1500777ff | 2beb0e86f08de749e7eaddbf2e5ae24abb8ddff9 | /treer.py | 7c8ad9fd81e20945b6cff34fe23cc2088b43acb6 | [] | no_license | NanamiTakayama/treer | 59ea22e8d6837900cb72c6c4ad0b74e185c1cc37 | bb99bbfceda4f701d130e1777a8f09c48a75e306 | refs/heads/master | 2020-04-11T18:01:39.215498 | 2018-12-16T09:39:06 | 2018-12-16T09:39:06 | 161,983,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | import datetime
import time
import subprocess
from time import sleep
cmd1 = "pwd"
cmd2 = "tree"
file = open('new.txt','a')
file.write('new file\n')
while True:
print(datetime.datetime.now())
subprocess.call(cmd1)
subprocess.call(cmd2)
sleep(20)
| [
"ns.potable@gmail.com"
] | ns.potable@gmail.com |
7cf482daf8a47cd604c5fa2b83bb75aa350f97dd | aee5f372ba1b5fbb1c8acf6080c4c86ae195c83f | /cern-stubs/lsa/client/rest/cern/api/v1/feign/__init__.pyi | 96054066f4590355d07f8781d938bb4307bcfd26 | [] | no_license | rdemaria/pjlsa | 25221ae4a4b6a4abed737a41a4cafe7376e8829f | e64589ab2203338db4253fbc05ff5131142dfd5f | refs/heads/master | 2022-09-03T13:18:05.290012 | 2022-08-16T13:45:57 | 2022-08-16T13:45:57 | 51,926,309 | 1 | 5 | null | 2019-07-11T11:50:44 | 2016-02-17T13:56:40 | Python | UTF-8 | Python | false | false | 5,523 | pyi | import cern.lsa.client.rest.api.v1.dto
import cern.lsa.client.rest.api.v1.feign
import cern.lsa.domain.cern.settings
import java.util
import typing
class IncaFeignService(cern.lsa.client.rest.api.v1.feign.FeignService):
"""
public interface IncaFeignService extends cern.lsa.client.rest.api.v1.feign.FeignService
"""
def findIncaPropertyFieldInfos(self, incaPropertyFieldInfosRequestDto: cern.lsa.client.rest.api.v1.dto.IncaPropertyFieldInfosRequestDto) -> java.util.Set[cern.lsa.client.rest.api.v1.dto.IncaPropertyFieldInfoDto]: ...
def saveIncaPropertyFieldInfos(self, collection: typing.Union[java.util.Collection[cern.lsa.client.rest.api.v1.dto.IncaPropertyFieldInfoDto], typing.Sequence[cern.lsa.client.rest.api.v1.dto.IncaPropertyFieldInfoDto]]) -> None: ...
class Urls:
FIND_INCA_PROPERTY_FIELDS_INFO: typing.ClassVar[str] = ...
SAVE_INCA_PROPERTY_FIELDS_INFO: typing.ClassVar[str] = ...
class ParameterFeignService(cern.lsa.client.rest.api.v1.feign.FeignService):
"""
public interface ParameterFeignService extends cern.lsa.client.rest.api.v1.feign.FeignService
"""
def addParametersToParameterGroup(self, long: int, collection: typing.Union[java.util.Collection[int], typing.Sequence[int]]) -> None: ...
def deleteCriticalProperty(self, long: int, string: str) -> None: ...
def deleteParameterGroup(self, long: int) -> None: ...
def deleteParameterTypes(self, collection: typing.Union[java.util.Collection[int], typing.Sequence[int]]) -> None: ...
def deleteParameters(self, collection: typing.Union[java.util.Collection[int], typing.Sequence[int]]) -> None: ...
def findAllHierarchies(self) -> java.util.List[str]: ...
def findCommonHierarchyNames(self, list: java.util.List[int]) -> java.util.Set[str]: ...
def findHierarchyNames(self, list: java.util.List[int]) -> java.util.Set[str]: ...
def findMakeRuleForParameterRelation(self, long: int, long2: int) -> cern.lsa.client.rest.api.v1.dto.MakeRuleConfigInfoDto: ...
def findParameterGroupsByAccelerator(self, string: str) -> java.util.Set[cern.lsa.client.rest.api.v1.dto.ParameterGroupDto]: ...
def findParameterTrees(self, parameterTreesRequestDto: cern.lsa.client.rest.api.v1.dto.ParameterTreesRequestDto) -> cern.lsa.client.rest.api.v1.dto.ParameterTreeDataDto: ...
def findParameterTypes(self, parameterTypesRequestDto: cern.lsa.client.rest.api.v1.dto.ParameterTypesRequestDto) -> java.util.Set[cern.lsa.client.rest.api.v1.dto.ParameterTypeDto]: ...
def findParameters(self, parametersRequestDto: cern.lsa.client.rest.api.v1.dto.ParametersRequestDto) -> java.util.Set[cern.lsa.client.rest.api.v1.dto.ParameterDto]: ...
def findParametersWithSettings(self, long: int, string: str) -> java.util.Set[cern.lsa.client.rest.api.v1.dto.ParameterDto]: ...
def findParametersWithoutSettings(self, long: int, string: str) -> java.util.Set[cern.lsa.client.rest.api.v1.dto.ParameterDto]: ...
def getMaxDelta(self, long: int) -> float: ...
def removeParametersFromParameterGroup(self, long: int, collection: typing.Union[java.util.Collection[int], typing.Sequence[int]]) -> None: ...
def saveCriticalProperty(self, propertyAndDeviceDto: cern.lsa.client.rest.api.v1.dto.PropertyAndDeviceDto) -> None: ...
def saveParameterGroup(self, parameterGroupDto: cern.lsa.client.rest.api.v1.dto.ParameterGroupDto) -> None: ...
def saveParameterTypes(self, collection: typing.Union[java.util.Collection[cern.lsa.client.rest.api.v1.dto.ParameterTypeDto], typing.Sequence[cern.lsa.client.rest.api.v1.dto.ParameterTypeDto]]) -> None: ...
def saveParameters(self, list: java.util.List[cern.lsa.client.rest.api.v1.dto.ParameterAttributesDto]) -> None: ...
class Urls:
FIND_PARAMETERS_URL: typing.ClassVar[str] = ...
PARAMETERS_URL: typing.ClassVar[str] = ...
PARAMETER_TYPES_URL: typing.ClassVar[str] = ...
FIND_ALL_HIERARCHIES_URL: typing.ClassVar[str] = ...
FIND_HIERARCHIES_BY_PARAMETERS_URL: typing.ClassVar[str] = ...
FIND_COMMON_HIERARCHIES_BY_PARAMETERS_URL: typing.ClassVar[str] = ...
SAVE_PARAMETER_RELATIONS: typing.ClassVar[str] = ...
CRITICAL_PROPERTIES_URL: typing.ClassVar[str] = ...
PARAMETER_GROUPS_URL: typing.ClassVar[str] = ...
PARAMETER_GROUP_BY_ID_URL: typing.ClassVar[str] = ...
PARAMETER_GROUP_PARAMETERS_URL: typing.ClassVar[str] = ...
PARAMETER_RELATION_MAKE_RULE_URL: typing.ClassVar[str] = ...
FIND_PARAMETER_TREES_URL: typing.ClassVar[str] = ...
PARAMETERS_WITHOUT_SETTINGS_URL: typing.ClassVar[str] = ...
PARAMETERS_WITH_SETTINGS_URL: typing.ClassVar[str] = ...
PARAMETER_MAX_DELTA_URL: typing.ClassVar[str] = ...
class ReDriveSettingsFeignService(cern.lsa.client.rest.api.v1.feign.FeignService):
"""
public interface ReDriveSettingsFeignService extends cern.lsa.client.rest.api.v1.feign.FeignService
"""
def reDriveDeviceSettings(self, reDriveRequest: cern.lsa.domain.cern.settings.ReDriveRequest) -> cern.lsa.domain.cern.settings.ReDriveResponse: ...
class Urls:
REDRIVE_DEVICES: typing.ClassVar[str] = ...
class __module_protocol__(typing.Protocol):
# A module protocol which reflects the result of ``jp.JPackage("cern.lsa.client.rest.cern.api.v1.feign")``.
IncaFeignService: typing.Type[IncaFeignService]
ParameterFeignService: typing.Type[ParameterFeignService]
ReDriveSettingsFeignService: typing.Type[ReDriveSettingsFeignService]
| [
"michi.hostettler@cern.ch"
] | michi.hostettler@cern.ch |
044aad4fa71bcd126aeaddc1ade5415f47768d7e | ab9adb7706360e349c0fec19f44bf7b7eb41c7b2 | /comments/urls.py | 31dcde2ae09b2efba9f5e172317c94aa645b8f0f | [] | no_license | dujiyuan/blogproject | b5226f9e521be5a0eed4689e7cc9f95c80fc8fd7 | 34c39b0d130e4da5fcced591543d54b86e050d15 | refs/heads/master | 2020-03-12T13:03:47.059264 | 2018-06-13T03:01:54 | 2018-06-13T03:01:54 | 130,633,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | from django.conf.urls import url
from . import views
app_name = 'comments'
urlpatterns = [
url(r'^conmment/post/(?P<post_pk>[0-9]+)/$',views.post_comment,name='post_comment')
] | [
"463071864@qq.com"
] | 463071864@qq.com |
d368f4a86a6510a5b4544a08552856d776373977 | 202bf6cd78eb19025aa63dea609b40903ce1a735 | /code/library_clusters.py | b159f8a7f9ca23ec704b02c9382692e2679fb567 | [
"MIT"
] | permissive | jbkinney/17_inducibility | 1e4a5943bd680252f19008fb77a0b9bb8cfbdaf6 | cb2ddf984608fba6f85c98711bce0d7e450fba0e | refs/heads/master | 2020-04-06T07:28:02.386966 | 2018-12-05T16:25:35 | 2018-12-05T16:25:35 | 157,273,701 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 18,247 | py | spacing_constructs={
'51.5':{'ind':'b17B7', 'null':''},
'52.5':{'ind':'b17B9', 'null':''},
'53.5':{'ind':'b17C2', 'null':''},
'54.5':{'ind':'b17C4', 'null':''},
'55.5':{'ind':'b17C6', 'null':''},
'56.5':{'ind':'b17C8', 'null':''},
'57.5':{'ind':'b17D1', 'null':''},
'58.5':{'ind':'b17D3', 'null':''},
'59.5':{'ind':'b17D5', 'null':''},
'60.5':{'ind':'b3C7', 'null':'b3C9'},
'61.5':{'ind':'b1A1', 'null':'b1A7'},
'62.5':{'ind':'b3D3', 'null':'b3D4'},
'63.5':{'ind':'b17D7', 'null':'b17D9'},
'64.5':{'ind':'b17E2', 'null':'b17E4'},
'65.5':{'ind':'b9F9', 'null':'b3D8'},
'66.5':{'ind':'b3D6', 'null':'b3D8'},
'70.5':{'ind':'b3E1', 'null':'b3E3'},
'71.5':{'ind':'b1I1', 'null':'b1H6'},
'72.5':{'ind':'b3E5', 'null':'b3E7'},
'73.5':{'ind':'b3E9', 'null':'b3F2'},
'76.5':{'ind':'b3F4', 'null':'b3F6'},
'80.5':{'ind':'b3F8', 'null':'b3G1'},
'81.5':{'ind':'b3G3', 'null':'b3G5'},
'82.5':{'ind':'b3G7', 'null':'b3G9'},
'83.5':{'ind':'b3H2', 'null':'b3H4'},
'87.5':{'ind':'b17F1', 'null':''},
'92.5':{'ind':'b17F2', 'null':''},
'97.5':{'ind':'b17F4', 'null':''},
'102.5':{'ind':'b17F6', 'null':''},
'103.5':{'ind':'b17F8', 'null':''},
'104.5':{'ind':'b17G1', 'null':''}
}
rgap_cons={
'35wt10L':['b13C5','b13C6','b13C7','b13C8','b13C9','b13D1','b13D2','b13D3','b13D4','b13D5','b13D6'],
'35con10L':['b13D7','b13D8','b13D9','b13E1','b13E2','b13E3','b13E4','b13E5','b13E6','b13E7','b13E8'],
'35L10wt':['b13E9','b13F1','b13F2','b13F3','b13F4','b13F5','b13F6','b13F7','b13F8','b13F9','b13G1'],
'35L10con':['b13G2','b13G3','b13G4','b13G5','b13G6','b13G7','b13G8','b13G9','b13H1','b13H2','b13H3'],
'35L10ext':['b13B2','b13B3','b13B4','b13B5','b13B6','b13B7','b13B8','b13B9','b13C1','b13C2','b13C3','b13C4']
}
library_groups = {
'c61r18':{
'all':['b1A1', 'b1B6', 'b1B7', 'b1B8', 'b1B9', 'b1C1', 'b1C2', 'b1C3', 'b1C4', 'b1C5', 'b1C6', 'b1C7', 'b1C8', 'b1C9', 'b1D1', 'b1D2', 'b1D3', 'b1D4', 'b1D5', 'b1D6', 'b1D7', 'b1D8', 'b1D9', 'b1E1', 'b1E2', 'b2C6', 'b2C7', 'b2C8', 'b2C9', 'b2D1', 'b2D2', 'b2D3', 'b2D4', 'b2D5', 'b2D6', 'b2D7', 'b2D8', 'b2D9', 'b2E1', 'b2E2', 'b2E3', 'b2E4', 'b2E5', 'b2E6', 'b2E7', 'b2E8', 'b2E9', 'b2F1', 'b2F2'],
'inliers':['b2C6', 'b2C7', 'b2C8', 'b2C9', 'b2D1', 'b2D2', 'b2D3', 'b2D4', 'b2D5', 'b2D6', 'b2D7', 'b2D8', 'b2D9', 'b2E1', 'b2E3', 'b2E6', 'b2E7', 'b2E8', 'b2E9', 'b2F1', 'b2F2', 'b1B8', 'b1B9', 'b1C1', 'b1C2', 'b1C3', 'b1C4', 'b1C5', 'b1C6', 'b1C7', 'b1C8', 'b1C9', 'b1D1', 'b1D2', 'b1D3', 'b1D4', 'b1D5', 'b1D6', 'b1D7', 'b1D8', 'b1D9', 'b1E1', 'b1E2'],
'outliers':['b1B7','b2E2','b2E4'],
'wt':['b1A1']},
'c61r18.10':{
'all':['b1A1', 'b2C6', 'b2C7', 'b2C8', 'b2C9', 'b2D1', 'b2D2', 'b2D3', 'b2D4', 'b2D5', 'b2D6', 'b2D7', 'b2D8', 'b2D9', 'b2E1', 'b2E2', 'b2E3', 'b2E4', 'b2E5', 'b2E6', 'b2E7', 'b2E8', 'b2E9', 'b2F1', 'b2F2'],
'inliers':['b2C6', 'b2C7', 'b2C8', 'b2C9', 'b2D1', 'b2D2', 'b2D3', 'b2D4', 'b2D5', 'b2D6', 'b2D7', 'b2D8', 'b2D9', 'b2E1', 'b2E3', 'b2E6', 'b2E7', 'b2E8', 'b2E9', 'b2F1', 'b2F2'],
'outliers':['b2E2','b2E4'],
'wt':['b1A1']},
'c61r18.35':{
'all':['b1A1', 'b1B6', 'b1B7', 'b1B8', 'b1B9', 'b1C1', 'b1C2', 'b1C3', 'b1C4', 'b1C5', 'b1C6', 'b1C7', 'b1C8', 'b1C9', 'b1D1', 'b1D2', 'b1D3', 'b1D4', 'b1D5', 'b1D6', 'b1D7', 'b1D8', 'b1D9', 'b1E1', 'b1E2'],
'inliers':['b1B8', 'b1B9', 'b1C1', 'b1C2', 'b1C3', 'b1C4', 'b1C5', 'b1C6', 'b1C7', 'b1C8', 'b1C9', 'b1D1', 'b1D2', 'b1D3', 'b1D4', 'b1D5', 'b1D6', 'b1D7', 'b1D8', 'b1D9', 'b1E1', 'b1E2'],
'outliers':['b1B7'],
'wt':['b1A1']},
'c71r18':{
'all':['b1I1', 'b2F3', 'b2F4', 'b2F5', 'b2F6', 'b2F7', 'b2F8', 'b2F9', 'b2G1', 'b2G2', 'b2G3', 'b2G4', 'b2G5', 'b2G6', 'b2G7', 'b2G8', 'b2G9', 'b2H1', 'b2H2', 'b2H3', 'b2H4', 'b2H5', 'b2H6', 'b2H7', 'b2H8', 'b1I5', 'b1I6', 'b1I7', 'b1I8', 'b1I9', 'b2A1', 'b2A2', 'b2A3', 'b2A4', 'b2A5', 'b2A6', 'b2A7', 'b2A8', 'b2A9', 'b2B1', 'b2B2', 'b2B3', 'b2B4', 'b2B5', 'b2B6', 'b2B7', 'b2B8', 'b2B9', 'b2C1'],
'inliers':['b2F3', 'b2F5', 'b2F6', 'b2F7', 'b2F8', 'b2F9', 'b2G2', 'b2G3', 'b2G4', 'b2G5', 'b2G6', 'b2G7', 'b2G8', 'b2H1', 'b2H3', 'b2H4', 'b2H5', 'b2H7', 'b2H8', 'b1I8', 'b1I9', 'b2A1', 'b2A2', 'b2A4', 'b2A5', 'b2A6', 'b2A7', 'b2A8', 'b2A9', 'b2B1', 'b2B2', 'b2B3', 'b2B4', 'b2B5', 'b2B6', 'b2B7', 'b2B8', 'b2B9', 'b2C1'],
'outliers':['b2F4'],
'wt':['b1I1']},
'c71r18.10':{
'all':['b1I1', 'b2F3', 'b2F4', 'b2F5', 'b2F6', 'b2F7', 'b2F8', 'b2F9', 'b2G1', 'b2G2', 'b2G3', 'b2G4', 'b2G5', 'b2G6', 'b2G7', 'b2G8', 'b2G9', 'b2H1', 'b2H2', 'b2H3', 'b2H4', 'b2H5', 'b2H6', 'b2H7', 'b2H8'],
'inliers':['b2F3', 'b2F5', 'b2F6', 'b2F7', 'b2F8', 'b2F9', 'b2G2', 'b2G3', 'b2G4', 'b2G5', 'b2G6', 'b2G7', 'b2G8', 'b2H1', 'b2H3', 'b2H4', 'b2H5', 'b2H7', 'b2H8'],
'outliers':['b2F4'],
'wt':['b1I1']},
'c71r18.35':{
'all':['b1I1', 'b1I5', 'b1I6', 'b1I7', 'b1I8', 'b1I9', 'b2A1', 'b2A2', 'b2A3', 'b2A4', 'b2A5', 'b2A6', 'b2A7', 'b2A8', 'b2A9', 'b2B1', 'b2B2', 'b2B3', 'b2B4', 'b2B5', 'b2B6', 'b2B7', 'b2B8', 'b2B9', 'b2C1'],
'inliers':['b1I8', 'b1I9', 'b2A1', 'b2A2', 'b2A4', 'b2A5', 'b2A6', 'b2A7', 'b2A8', 'b2A9', 'b2B1', 'b2B2', 'b2B3', 'b2B4', 'b2B5', 'b2B6', 'b2B7', 'b2B8', 'b2B9', 'b2C1'],
'outliers':[],
'wt':['b1I1']},
'occlusion':{
'all':['b5E4', 'b5A8', 'b5B2', 'b5B3', 'b5B4', 'b5B5', 'b5B6', 'b5B7', 'b5B8', 'b5B9', 'b5C1', 'b5C2', 'b5C3', 'b5C4', 'b5C5', 'b5C6', 'b5C7', 'b5C8', 'b5C9', 'b5D1', 'b5D2', 'b5D3', 'b5D4', 'b5D5', 'b5D6', 'b5D7', 'b5F2', 'b5F3', 'b5F4', 'b5F5', 'b5F6', 'b5F7', 'b5F8', 'b5F9', 'b5G1', 'b5G2', 'b5G3', 'b5G4', 'b5G5', 'b5G6', 'b5G7', 'b5G8', 'b5G9', 'b5H1', 'b5H2', 'b5H3', 'b5H4', 'b5H5', 'b5H6', 'b5H7'],
'inliers':['b5B2', 'b5B3', 'b5B5', 'b5B7', 'b5B8', 'b5B9', 'b5C1', 'b5C2', 'b5C3', 'b5C4', 'b5C5', 'b5C7', 'b5C8', 'b5C9', 'b5D1', 'b5D2', 'b5D3', 'b5D4', 'b5D5', 'b5D6', 'b5D7', 'b5F2', 'b5F3', 'b5F4', 'b5F5', 'b5F6', 'b5F7', 'b5F8', 'b5F9', 'b5G1', 'b5G3', 'b5G4', 'b5G6', 'b5G8', 'b5G9', 'b5H1', 'b5H2', 'b5H3', 'b5H4', 'b5H5', 'b5H6', 'b5H7'],
'outliers':['b5G5', 'b5G7'],
'wt':[]},
'oc4':{
'all':['b5A8', 'b5B2', 'b5B3', 'b5B4', 'b5B5', 'b5B6', 'b5B7', 'b5B8', 'b5B9', 'b5C1', 'b5C2', 'b5C3', 'b5C4', 'b5C5', 'b5C6', 'b5C7', 'b5C8', 'b5C9', 'b5D1', 'b5D2', 'b5D3', 'b5D4', 'b5D5', 'b5D6', 'b5D7'],
'inliers':['b5B2', 'b5B3', 'b5B5', 'b5B7', 'b5B8', 'b5B9', 'b5C1', 'b5C2', 'b5C3', 'b5C4', 'b5C5', 'b5C7', 'b5C8', 'b5C9', 'b5D1', 'b5D2', 'b5D3', 'b5D4', 'b5D5', 'b5D6', 'b5D7'],
'outliers':[],
'wt':['b5A8']},
'oc0':{
'all':['b5E4', 'b5F2', 'b5F3', 'b5F4', 'b5F5', 'b5F6', 'b5F7', 'b5F8', 'b5F9', 'b5G1', 'b5G2', 'b5G3', 'b5G4', 'b5G5', 'b5G6', 'b5G7', 'b5G8', 'b5G9', 'b5H1', 'b5H2', 'b5H3', 'b5H4', 'b5H5', 'b5H6', 'b5H7'],
'inliers':['b5F2', 'b5F3', 'b5F4', 'b5F5', 'b5F6', 'b5F7', 'b5F8', 'b5F9', 'b5G1', 'b5G3', 'b5G4', 'b5G6', 'b5G8', 'b5G9', 'b5H1', 'b5H2', 'b5H3', 'b5H4', 'b5H5', 'b5H6', 'b5H7'],
'outliers':['b5G5', 'b5G7'],
'wt':['b5E4']},
'61c-r18.35':{
'all':['b1A7', 'b1E3', 'b1E4', 'b1E5', 'b1E6', 'b1E7', 'b1E8', 'b1E9', 'b1F1', 'b1F2', 'b1F3', 'b1F4', 'b1F5', 'b1F6', 'b1F7', 'b1F8', 'b1F9', 'b1G1', 'b1G2', 'b1G3', 'b1G4', 'b1G5', 'b1G6', 'b1G7', 'b1G8'],
'inliers':['b1E3', 'b1E4', 'b1E5', 'b1E7', 'b1E8', 'b1F1', 'b1F2', 'b1F3', 'b1F4', 'b1F5', 'b1F6', 'b1F7', 'b1F8', 'b1F9', 'b1G1', 'b1G2', 'b1G3', 'b1G4', 'b1G5', 'b1G6', 'b1G8'],
'outliers':[],
'wt':['b1A7']},
'wtc60r18':{
'all':['b3H6', 'b3H7', 'b3H8', 'b3H9', 'b3I1', 'b3I2', 'b3I3', 'b3I4', 'b3I5', 'b3I6', 'b3I7', 'b3I8', 'b3I9', 'b4A1', 'b4A2', 'b4A3', 'b4A4', 'b4A5', 'b4A6', 'b4A7', 'b4A8', 'b4A9', 'b4B1', 'b4B2', 'b4B3', 'b4B4', 'b4B5', 'b4B6', 'b4B7', 'b4B8', 'b4B9', 'b4C1', 'b4C2', 'b4C3', 'b4C4', 'b4C5', 'b4C6', 'b4C7', 'b4C8', 'b4C9', 'b4D1', 'b4D2', 'b4D3', 'b4D4', 'b4D5', 'b4D6', 'b4D7', 'b4D8'],
'inliers':['b3H6', 'b3H7', 'b3H8', 'b3H9', 'b3I1', 'b3I2', 'b3I3', 'b3I4', 'b3I5', 'b3I6', 'b3I7', 'b3I8', 'b3I9', 'b4A1', 'b4A2', 'b4A3', 'b4A4', 'b4A5', 'b4A6', 'b4A8', 'b4A9', 'b4B1', 'b4B2', 'b4B3', 'b4B4', 'b4B5', 'b4B6', 'b4B7', 'b4B8', 'b4B9', 'b4C1', 'b4C2', 'b4C4', 'b4C5', 'b4C6', 'b4C7', 'b4C8', 'b4C9', 'b4D1', 'b4D2', 'b4D3', 'b4D4', 'b4D5', 'b4D6', 'b4D7', 'b4D8'],
'outliers':['b4A7', 'b4C3'],
'wt':[]},
'wtc60r18.35':{
'all':['b3H6', 'b3H7', 'b3H8', 'b3H9', 'b3I1', 'b3I2', 'b3I3', 'b3I4', 'b3I5', 'b3I6', 'b3I7', 'b3I8', 'b3I9', 'b4A1', 'b4A2', 'b4A3', 'b4A4', 'b4A5', 'b4A6', 'b4A7', 'b4A8', 'b4A9', 'b4B1', 'b4B2'],
'inliers':['b3H6', 'b3H7', 'b3H8', 'b3H9', 'b3I1', 'b3I2', 'b3I3', 'b3I4', 'b3I5', 'b3I6', 'b3I7', 'b3I8', 'b3I9', 'b4A1', 'b4A2', 'b4A3', 'b4A4', 'b4A5', 'b4A6', 'b4A8', 'b4A9', 'b4B1', 'b4B2'],
'outliers':['b4A7'],
'wt':[]},
'wtc60r18.10':{
'all':['b4B3', 'b4B4', 'b4B5', 'b4B6', 'b4B7', 'b4B8', 'b4B9', 'b4C1', 'b4C2', 'b4C3', 'b4C4', 'b4C5', 'b4C6', 'b4C7', 'b4C8', 'b4C9', 'b4D1', 'b4D2', 'b4D3', 'b4D4', 'b4D5', 'b4D6', 'b4D7', 'b4D8'],
'inliers':['b4B3', 'b4B4', 'b4B5', 'b4B6', 'b4B7', 'b4B8', 'b4B9', 'b4C1', 'b4C2', 'b4C4', 'b4C5', 'b4C6', 'b4C7', 'b4C8', 'b4C9', 'b4D1', 'b4D2', 'b4D3', 'b4D4', 'b4D5', 'b4D6', 'b4D7', 'b4D8'],
'outliers':['b4C3'],
'wt':[]},
'wtc61r18.10':{
'all':['b4D9', 'b4E1', 'b4E2', 'b4E3', 'b4E4', 'b4E5', 'b4E6', 'b4E7', 'b4E8', 'b4E9', 'b4F1', 'b4F2', 'b4F3', 'b4F4', 'b4F5', 'b4F6', 'b4F7', 'b4F8', 'b4F9', 'b4G1', 'b4G2', 'b4G3', 'b4G4', 'b4G5'],
'inliers':['b4E1', 'b4E2', 'b4E3', 'b4E4', 'b4E6', 'b4E7', 'b4E8', 'b4E9', 'b4F1', 'b4F2', 'b4F3', 'b4F4', 'b4F5', 'b4F6', 'b4F8', 'b4F9', 'b4G1', 'b4G2', 'b4G3', 'b4G4', 'b4G5'],
'outliers':['b4D9', 'b4F7', 'b4E5'],
'wt':[]},
'wtc71r18.10':{
'all':['b4G6', 'b4G7', 'b4G8', 'b4G9', 'b4H1', 'b4H2', 'b4H3', 'b4H4', 'b4H5', 'b4H6', 'b4H7', 'b4H8', 'b4H9', 'b4I1', 'b4I2', 'b4I3', 'b4I4', 'b4I5', 'b4I6', 'b4I7', 'b4I8', 'b4I9', 'b5A1', 'b5A2'],
'inliers':['b4G6', 'b4G7', 'b4G8', 'b4G9', 'b4H1', 'b4H2', 'b4H3', 'b4H4', 'b4H5', 'b4H6', 'b4H7', 'b4H8', 'b4H9', 'b4I1', 'b4I2', 'b4I3', 'b4I4', 'b4I5', 'b4I6', 'b4I7', 'b4I8', 'b4I9', 'b5A1', 'b5A2'],
'outliers':[],
'wt':[]},
'DJc61r18':{
'all':['DJb01', 'DJb02', 'DJb03', 'DJb04', 'DJb05', 'DJb06', 'DJb07', 'DJb08', 'DJb09', 'DJb10', 'DJb11', 'DJb12', 'DJb13', 'DJb14', 'DJb15', 'DJb16', 'DJb17', 'DJb18', 'DJb19', 'DJb20', 'DJb21', 'DJb22', 'DJb23', 'DJb24', 'DJa01', 'DJa02', 'DJa03', 'DJa04', 'DJa05', 'DJa06', 'DJa07', 'DJa08', 'DJa09', 'DJa10', 'DJa11', 'DJa12', 'DJa13', 'DJa14', 'DJa15', 'DJa16', 'DJa17', 'DJa18', 'DJa19', 'DJa20', 'DJa21', 'DJa22', 'DJa23', 'DJa24'],
'inliers':['DJb01', 'DJb02', 'DJb03', 'DJb04', 'DJb05', 'DJb06', 'DJb07', 'DJb08', 'DJb09', 'DJb10', 'DJb11', 'DJb12', 'DJb13', 'DJb14', 'DJb15', 'DJb16', 'DJb17', 'DJb18', 'DJb19', 'DJb20', 'DJb21', 'DJb22', 'DJb23', 'DJb24', 'DJa01', 'DJa02', 'DJa03', 'DJa04', 'DJa05', 'DJa06', 'DJa07', 'DJa08', 'DJa09', 'DJa10', 'DJa11', 'DJa12', 'DJa13', 'DJa14', 'DJa15', 'DJa16', 'DJa17', 'DJa18', 'DJa19', 'DJa20', 'DJa21', 'DJa23', 'DJa24'],
'outliers':['DJa22'],
'wt':['DJb03']},
'DJc61r18.35':{
'all':['DJb01', 'DJb02', 'DJb03', 'DJb04', 'DJb05', 'DJb06', 'DJb07', 'DJb08', 'DJb09', 'DJb10', 'DJb11', 'DJb12', 'DJb13', 'DJb14', 'DJb15', 'DJb16', 'DJb17', 'DJb18', 'DJb19', 'DJb20', 'DJb21', 'DJb22', 'DJb23', 'DJb24'],
'inliers':['DJb01', 'DJb02', 'DJb03', 'DJb04', 'DJb05', 'DJb06', 'DJb07', 'DJb08', 'DJb09', 'DJb10', 'DJb11', 'DJb12', 'DJb13', 'DJb14', 'DJb15', 'DJb16', 'DJb17', 'DJb18', 'DJb19', 'DJb20', 'DJb21', 'DJb22', 'DJb23', 'DJb24'],
'outliers':[],
'wt':['DJb03']},
'DJc61r18.10':{
'all':['DJb03', 'DJa01', 'DJa02', 'DJa03', 'DJa04', 'DJa05', 'DJa06', 'DJa07', 'DJa08', 'DJa09', 'DJa10', 'DJa11', 'DJa12', 'DJa13', 'DJa14', 'DJa15', 'DJa16', 'DJa17', 'DJa18', 'DJa19', 'DJa20', 'DJa21', 'DJa22', 'DJa23', 'DJa24'],
'inliers':['DJa01', 'DJa02', 'DJa03', 'DJa04', 'DJa05', 'DJa06', 'DJa07', 'DJa08', 'DJa09', 'DJa10', 'DJa11', 'DJa12', 'DJa13', 'DJa14', 'DJa15', 'DJa16', 'DJa17', 'DJa18', 'DJa19', 'DJa20', 'DJa21', 'DJa23', 'DJa24'],
'outliers':['DJa22'],
'wt':['DJb03']},
'c60r18.10':{
'all':['b3C7', 'b7D8', 'b7D9', 'b7E1', 'b7E2', 'b7E3', 'b7E4', 'b7E5', 'b7E6', 'b7E7', 'b7E8', 'b7E9', 'b7F1', 'b7F2', 'b7F3', 'b7F4', 'b7F5', 'b7F6', 'b7F7', 'b7F8', 'b7F9', 'b7G1', 'b7G2', 'b7G3', 'b7G4'],
'inliers':['b7D8', 'b7D9', 'b7E1', 'b7E2', 'b7E3', 'b7E4', 'b7E5', 'b7E6', 'b7E7', 'b7E8', 'b7E9', 'b7F1', 'b7F2', 'b7F3', 'b7F4', 'b7F5', 'b7F6', 'b7F7', 'b7F8', 'b7G1', 'b7G2', 'b7G3', 'b7G4'],
'outliers':['b7F9'],
'wt':['b3C7']},
'c62r18.10':{
'all':['b3D3', 'b7G5', 'b7G6', 'b7G7', 'b7G8', 'b7G9', 'b7H1', 'b7H2', 'b7H3', 'b7H4', 'b7H5', 'b7H6', 'b7H7', 'b7H8', 'b7H9', 'b7I1', 'b7I2', 'b7I3', 'b7I4', 'b7I5', 'b7I6', 'b7I7', 'b7I8', 'b7I9', 'b8A1'],
'inliers':['b7G5', 'b7G6', 'b7G7', 'b7G8', 'b7G9', 'b7H1', 'b7H2', 'b7H3', 'b7H4', 'b7H5', 'b7H6', 'b7H7', 'b7H8', 'b7H9', 'b7I1', 'b7I2', 'b7I3', 'b7I4', 'b7I5', 'b7I6', 'b7I7', 'b7I8', 'b7I9', 'b8A1'],
'outliers':[],
'wt':['b3D3']},
'c81r18.10':{
'all':['b3G3', 'b8A2', 'b8A3', 'b8A4', 'b8A5', 'b8A6', 'b8A7', 'b8A8', 'b8A9', 'b8B1', 'b8B2', 'b8B3', 'b8B4', 'b8B5', 'b8B6', 'b8B7', 'b8B8', 'b8B9', 'b8C1', 'b8C2', 'b8C3', 'b8C4', 'b8C5', 'b8C6', 'b8C7'],
'inliers':['b8A2', 'b8A3', 'b8A4', 'b8A5', 'b8A6', 'b8A7', 'b8A8', 'b8B1', 'b8B2', 'b8B3', 'b8B4', 'b8B5', 'b8B6', 'b8B7', 'b8B8', 'b8B9', 'b8C1', 'b8C2', 'b8C3', 'b8C4', 'b8C5', 'b8C6', 'b8C7','b8A9'],
'outliers':[],
'wt':['b3G3']},
'c63r18.10':{
'all':['b17D7','b17D8', 'b9D1', 'b9D2', 'b9D3', 'b9D4', 'b9D5', 'b9D6', 'b9D7', 'b9D8', 'b9D9', 'b9E1', 'b9E2', 'b9E3'],
'inliers':['b9D1', 'b9D2', 'b9D3', 'b9D4', 'b9D6', 'b9D7', 'b9D8', 'b9D9', 'b9E1', 'b9E2', 'b9E3'],
'outliers':['b9D5'],
'wt':['b17D7','b17D8']},
'c64r18.10':{
'all':['b17E2','b17E3', 'b9E4', 'b9E5', 'b9E6', 'b9E7', 'b9E8', 'b9E9', 'b9F1', 'b9F2', 'b9F3', 'b9F4', 'b9F5', 'b9F6'],
'inliers':['b9E4', 'b9E5', 'b9E6', 'b9E7', 'b9E8', 'b9E9', 'b9F1', 'b9F2', 'b9F3', 'b9F4', 'b9F5', 'b9F6'],
'outliers':[],
'wt':['b17E2','b17E3']},
'c65r18.10':{
'all':['b9F7', 'b9F8', 'b9F9', 'b9G1', 'b9G2', 'b9G3', 'b9G4', 'b9G5', 'b9G6', 'b9G7', 'b9G8', 'b9G9', 'b9H1', 'b9H2', 'b9H3', 'b9H4', 'b9H5', 'b9H6', 'b9H7', 'b9H8', 'b9H9', 'b9I1', 'b9I2', 'b9I3'],
'inliers':['b9F7', 'b9F8', 'b9F9', 'b9G1', 'b9G2', 'b9G3', 'b9G4', 'b9G5', 'b9G6', 'b9G7', 'b9G8', 'b9G9', 'b9H1', 'b9H2', 'b9H3', 'b9H4', 'b9H5', 'b9H6', 'b9H7', 'b9H8', 'b9H9', 'b9I1', 'b9I2', 'b9I3'],
'outliers':[],
'wt':[]},
'c66r18.10':{
'all':['b3D6', 'b10F8', 'b10F9', 'b10G1', 'b10G2', 'b10G3', 'b10G4', 'b10G5', 'b10G6', 'b10G7', 'b10G8', 'b10G9', 'b10H1', 'b10H2', 'b10H3', 'b10H4', 'b10H5', 'b10H6', 'b10H7', 'b10H8', 'b10H9', 'b10I1', 'b10I2', 'b10I3', 'b10I4'],
'inliers':['b10F8', 'b10F9', 'b10G1', 'b10G2', 'b10G4', 'b10G5', 'b10G6', 'b10G7', 'b10G8', 'b10G9', 'b10H1', 'b10H2', 'b10H3', 'b10H4', 'b10H5', 'b10H6', 'b10H7', 'b10H8', 'b10H9', 'b10I1', 'b10I2', 'b10I3', 'b10I4'],
'outliers':['b10G3'],
'wt':['b3D6']},
'c76r18.10':{
'all':['b3F4', 'b10I5', 'b10I6', 'b10I7', 'b10I8', 'b10I9', 'b11A1', 'b11A2', 'b11A3', 'b11A4', 'b11A5', 'b11A6', 'b11A7', 'b11A8', 'b11A9', 'b11B1', 'b11B2', 'b11B3', 'b11B4', 'b11B5', 'b11B6', 'b11B7', 'b11B8', 'b11B9', 'b11C1'],
'inliers':['b10I5', 'b10I6', 'b10I7', 'b10I8', 'b10I9', 'b11A1', 'b11A2', 'b11A3', 'b11A4', 'b11A5', 'b11A6', 'b11A7', 'b11A8', 'b11A9', 'b11B1', 'b11B2', 'b11B3', 'b11B4', 'b11B5', 'b11B6', 'b11B7', 'b11B8', 'b11B9', 'b11C1'],
'outliers':[],
'wt':['b3F4']},
'c82r18.10':{
'all':['b13H4', 'b13H5', 'b13H6', 'b13H7', 'b13H8', 'b13H9', 'b13I1', 'b13I2', 'b13I3', 'b13I4', 'b13I5', 'b13I6', 'b13I7', 'b13I8', 'b13I9', 'b14A1', 'b14A2', 'b14A3', 'b14A4', 'b14A5', 'b14A6', 'b14A7'],
'inliers':['b13H4', 'b13H5', 'b13H6', 'b13H7', 'b13H8', 'b13H9', 'b13I1', 'b13I2', 'b13I3', 'b13I4', 'b13I5', 'b13I6', 'b13I7', 'b13I8', 'b13I9', 'b14A1', 'b14A2', 'b14A3', 'b14A4', 'b14A5', 'b14A6', 'b14A7'],
'outliers':[],
'wt':[]},
'c72r18.10':{
'all':['b14A8', 'b14A9', 'b14B1', 'b14B2', 'b14B3', 'b14B4', 'b14B5', 'b14B6', 'b14B7', 'b14B8', 'b14B9', 'b14C1', 'b14C2', 'b14C3', 'b14C4', 'b14C5', 'b14C6', 'b14C7', 'b14C8', 'b14C9', 'b14D1', 'b14D2'],
'inliers':['b14A8', 'b14A9', 'b14B1', 'b14B2', 'b14B3', 'b14B4', 'b14B5', 'b14B6', 'b14B7', 'b14B8', 'b14B9', 'b14C1', 'b14C2', 'b14C3', 'b14C4', 'b14C5', 'b14C6', 'b14C7', 'b14C8', 'b14C9', 'b14D1', 'b14D2'],
'outliers':[],
'wt':[]},
'c40':{
'all':['b12I1', 'b12I2', 'b14E3', 'b14E4', 'b14E5', 'b14E6', 'b14E7', 'b14E8', 'b14E9', 'b14F1', 'b14F2', 'b14F3', 'b14F4', 'b14G7', 'b14G8', 'b14G9', 'b14H1', 'b14H2', 'b14H3', 'b14H4', 'b14H5', 'b14H6', 'b14H7', 'b14H8', 'b15C6', 'b15C7', 'b15C8', 'b15C9', 'b15D1', 'b15D2', 'b15D3', 'b15D4', 'b15D5', 'b15D6', 'b15D7'],
'inliers':['b14E3', 'b14E4', 'b14E5', 'b14E6', 'b14E7', 'b14E8', 'b14E9', 'b14F1', 'b14F2', 'b14F3', 'b14F4', 'b14G7', 'b14G8', 'b14G9', 'b14H1', 'b14H2', 'b14H3', 'b14H4', 'b14H5', 'b14H6', 'b14H7', 'b14H8', 'b15C6', 'b15C7', 'b15C8', 'b15C9', 'b15D1', 'b15D2', 'b15D3', 'b15D4', 'b15D5', 'b15D6', 'b15D7'],
'outliers':[],
'wt':['b12I1', 'b12I2', 'b12I3']},
'c41':{
'all':['b14D3', 'b14D4', 'b14D5', 'b14F5', 'b14F6', 'b14F7', 'b14F8', 'b14F9', 'b14G1', 'b14G2', 'b14G3', 'b14G4', 'b14G5', 'b14G6', 'b14H9', 'b14I1', 'b14I2', 'b14I3', 'b14I4', 'b14I5', 'b14I6', 'b14I7', 'b14I8', 'b14I9', 'b15A1', 'b15D8', 'b15D9', 'b15E1', 'b15E2', 'b15E3', 'b15E4', 'b15E5', 'b15E6', 'b15E7', 'b15E8', 'b15E9'],
'inliers':['b14F5', 'b14F6', 'b14F7', 'b14F8', 'b14F9', 'b14G1', 'b14G2', 'b14G3', 'b14G4', 'b14G5', 'b14G6', 'b14H9', 'b14I1', 'b14I2', 'b14I5', 'b14I6', 'b14I7', 'b14I8', 'b15A1', 'b15D8', 'b15D9', 'b15E1', 'b15E2', 'b15E3', 'b15E4', 'b15E5', 'b15E6', 'b15E8', 'b15E9'],
'outliers':['b14I3','b14I4','b14I9','b15E7'],
'wt':['b14D3','b14D4','b14D5']},
'gal':{
'all':['b14D6','b14D7','b14D8','b15A2','b15A3','b15A4','b15A5','b15A6','b15A7','b15A8','b15A9','b15B1','b15B2','b15B3','b15B4','b15B5','b15B6','b15B7','b15B8','b15B9','b15C1','b15C2','b15C3','b15C4','b15C5'],
'inliers':['b15A2','b15A3','b15A5','b15A6','b15A7','b15A9','b15B1','b15B2','b15B3','b15B4','b15B5','b15B6','b15B7','b15B8','b15B9','b15C1','b15C2','b15C5'],
'outliers':['b15A4','b15A8','b15C3','b15C4'],
'wt':['b14D6','b14D7','b14D8']}
}
| [
"jkinney@cshl.edu"
] | jkinney@cshl.edu |
e95e4723eef9aab484f4fb53e4ac61f4ba72cbb9 | a283507c237802d3983ca1e5e1752299f9b5d0b7 | /python/vizu.py | 9a30e9bd3e9b404f9c61ab89ccf05ff111abbfd3 | [] | no_license | eureko/ThesisRepo | 59016ef3d58d5b6ec15dee6c8ca5012f70fa8e18 | eb922c15debd83446f7a86fd830a35c03b1bd459 | refs/heads/master | 2020-06-10T03:11:20.362518 | 2017-10-19T08:56:26 | 2017-10-19T08:56:26 | 76,110,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | import numpy as np
import matplotlib.pyplot as plt
conf_arr = [[33,2,0,0,0,0,0,0,0,1,3],
[3,31,0,0,0,0,0,0,0,0,0],
[0,4,41,0,0,0,0,0,0,0,1],
[0,1,0,30,0,6,0,0,0,0,1],
[0,0,0,0,38,10,0,0,0,0,0],
[0,0,0,3,1,39,0,0,0,0,4],
[0,2,2,0,4,1,31,0,0,0,2],
[0,1,0,0,0,0,0,36,0,2,0],
[0,0,0,0,0,0,1,5,37,5,1],
[3,0,0,0,0,0,0,0,0,39,0],
[0,0,0,0,0,0,0,0,0,0,38]]
norm_conf = []
for i in conf_arr:
a = 0
tmp_arr = []
a = sum(i, 0)
for j in i:
tmp_arr.append(float(j)/float(a))
norm_conf.append(tmp_arr)
fig = plt.figure()
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect(1)
res = ax.imshow(np.array(norm_conf), cmap=plt.cm.jet,
interpolation='nearest')
width, height = conf_arr.shape
for x in xrange(width):
for y in xrange(height):
ax.annotate(str(conf_arr[x][y]), xy=(y, x),
horizontalalignment='center',
verticalalignment='center')
cb = fig.colorbar(res)
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
plt.xticks(range(width), alphabet[:width])
plt.yticks(range(height), alphabet[:height])
plt.savefig('confusion_matrix.png', format='png') | [
"e.g.caldarola@gmail.com"
] | e.g.caldarola@gmail.com |
5b7cd1b73da7534c2ba8d6e327363de129009de5 | fe21ea819fe85dcbc2e2adb4e246693611c71380 | /manage.py | 9972677d59224764eb959f5a5b7a0398c5ff9d19 | [] | no_license | jsinghw/Bug_Tracker | bdc0c3b6c690d2e0123d23a20d932bc43fe13d12 | bd0758c0ae35d0436677d4a6f417443e5d7042bb | refs/heads/master | 2022-12-24T21:08:50.916713 | 2020-10-06T20:00:54 | 2020-10-06T20:00:54 | 265,694,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Bug_Tracker.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"jsinghw94@gmail.com"
] | jsinghw94@gmail.com |
fd34687517ec4febd82cb9e4f68edd35ab70b5e0 | cc423690d7e5c36cbca0dc4a7527b536b63bd5a2 | /numbers.py | 8481b9f39119440df818ca5c175a3b998dfbadd0 | [] | no_license | Odisssssey/py | 49e2c55395b9ad2b4b0530e894e2219bd4914faf | 94add8e694c31317914f99fff49b85941b6b6a4c | refs/heads/master | 2021-01-12T00:52:50.097817 | 2017-03-04T13:36:06 | 2017-03-04T13:36:06 | 78,310,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,209 | py | i = 1
myNumber = 0
while myNumber < 1 or myNumber > 10:
myNumber = int(input('Введите число от 1 до 10: '))
number = randint(1, 10)
while number:
if myNumber == number:
print("Вы угадали! ПК загадал число", number)
break
else:
if myNumber < number:
print('ПК загадал число, больше введенного вами. Количество ошибок сделанное вами:', i)
myNumber = 0
while myNumber < 1 or myNumber > 10:
myNumber = int(input('Постарайтесь угадать вновь: '))
if myNumber < 1 or myNumber > 10:
print("Вы можете вводить число от 1 до 10.")
else:
print('ПК загадал число, меньше введенного вами. Количество ошибок сделанное вами:', i)
myNumber = 0
while myNumber < 1 or myNumber > 10:
myNumber = int(input('Постарайтесь угадать вновь: '))
if myNumber < 1 or myNumber > 10:
print("Вы можете вводить число от 1 до 10.")
i += 1
| [
"noreply@github.com"
] | noreply@github.com |
16199a07ec5548bf9b2b006a3514bd5f372cc9fd | 8eb7e2224cd81cd21fd5b0c4dd54abe85ba07e49 | /examples/autodetect_snmp.py | f5263fe58da8a57174eda76459460e8151f56f9a | [
"MIT"
] | permissive | ktbyers/netmiko | f8b980569fd863f0a7bfe28580366339c4bd31ec | 2e56b40ec639da130471c59dd1f3c93983471e41 | refs/heads/develop | 2023-08-30T20:33:05.554926 | 2023-08-29T21:50:45 | 2023-08-29T21:50:45 | 27,283,062 | 3,397 | 1,594 | MIT | 2023-09-04T03:04:31 | 2014-11-28T21:42:52 | Python | UTF-8 | Python | false | false | 636 | py | import sys
from getpass import getpass
from netmiko.snmp_autodetect import SNMPDetect
from netmiko import ConnectHandler
host = "cisco1.lasthop.io"
device = {"host": host, "username": "pyclass", "password": getpass()}
snmp_community = getpass("Enter SNMP community: ")
my_snmp = SNMPDetect(host, snmp_version="v2c", community=snmp_community)
device_type = my_snmp.autodetect()
print(device_type)
if device_type is None:
sys.exit("SNMP failed!")
# Update the device dictionary with the device_type and connect
device["device_type"] = device_type
with ConnectHandler(**device) as net_connect:
print(net_connect.find_prompt())
| [
"noreply@github.com"
] | noreply@github.com |
1c70c159d67344bfbfeb2cf097d7e7ee72ef07d9 | 63a5baed621ad6b8a95436dcbb2a3771cfea303f | /LABS/LAB4/stack.py | 9b3901561562449b1c9645131608d8f4385f6af0 | [] | no_license | ew0s/ITMO_Algorithms1 | b9ed21e51fc5971bdd081d1dfac13aa1dc7e0680 | cd5ecb4d3527db72ee44fdec4fd9db345c5cb55d | refs/heads/master | 2021-02-15T09:36:47.724218 | 2020-03-31T21:52:52 | 2020-03-31T21:52:52 | 244,886,294 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | stack = [0] * 1000000
fin = open("stack.in")
fout = open("stack.out", "w")
n = int(fin.readline())
top = -1
for i in range(n):
data = list(fin.readline().split())
if data[0] == "+":
top += 1
data[1] = int(data[1])
stack[top] = data[1]
else:
print(stack[top], file=fout)
top -= 1
fout.close() | [
"Fomesare1"
] | Fomesare1 |
71962d7e86cb76c775309a6190d8d73cdbbb4cf6 | 184bcb482ea5f0f13aa35275847b0e7dd56d8219 | /tests/test_cli.py | 75f2834a1bf6022a049f31136d917db671e85112 | [
"LPPL-1.3c",
"MIT"
] | permissive | svenkreiss/unicodeit | 643a3ead7fc69160eff82099b33c25ba5d01de28 | d7f3f0cb9b7f8c3abf8e47ea6158b2ee1f6cbf05 | refs/heads/main | 2023-08-23T07:44:45.029170 | 2023-03-12T09:21:04 | 2023-03-12T09:21:04 | 10,319,674 | 234 | 34 | NOASSERTION | 2023-07-18T22:48:57 | 2013-05-27T17:52:57 | Python | UTF-8 | Python | false | false | 1,743 | py | import subprocess
import sys
import pytest
PYTHON = 'python3' if sys.platform != 'win32' else 'python'
def test_cli_symbols1():
r = subprocess.check_output([
PYTHON, '-m', 'unicodeit.cli',
'\\Sigma'
])
print(r.decode())
assert r.decode().strip() == 'Σ'
def test_cli_symbols2():
r = subprocess.check_output([
PYTHON, '-m', 'unicodeit.cli',
'def\\Sigma_{01234}abc\\alpha_{567}ggg\\beta_{1234}lll "\\Sigma e_0 e^3"'
])
print(r.decode())
assert r.decode().strip() == 'defΣ₀₁₂₃₄abcα₅₆₇gggβ₁₂₃₄lll "Σ e₀ e³"'
def test_cli_symbols3():
r = subprocess.check_output([
PYTHON, '-m', 'unicodeit.cli',
'def^{01234}abc\\alpha^{567abc} "\\:) \\:G"'
])
print(r.decode())
assert r.decode().strip() == 'def⁰¹²³⁴abcα⁵⁶⁷ᵃᵇᶜ "☺ ㋡"'
@pytest.mark.skip('this was already broken')
def test_cli_symbols4():
r = subprocess.check_output([
PYTHON, '-m', 'unicodeit.cli',
'ggg\\beta^{1234=\\(5\\)}lll'
])
print(r.decode())
assert r.decode().strip() == 'Σ'
def test_subscripts():
r = subprocess.check_output([
PYTHON, '-m', 'unicodeit.cli',
'a_{\\beta\\gamma\\varphi\\rho\\chi}'
])
print(r.decode())
assert r.decode().strip() == 'aᵦᵧᵩᵨᵪ'
def test_superscripts():
r = subprocess.check_output([
PYTHON, '-m', 'unicodeit.cli',
'm^{ABDEGHIJKLMNOPRTUWabcdefghiklmnoprstuvwxyz\\beta\\gamma\\delta\\varphi\\chi<>}'
])
print(r.decode())
assert r.decode().strip() == 'mᴬᴮᴰᴱᴳᴴᴵᴶᴷᴸᴹᴺᴼᴾᴿᵀᵁᵂᵃᵇᶜᵈᵉᶠᵍʰⁱᵏˡᵐⁿᵒᵖʳˢᵗᵘᵛʷˣʸᶻᵝᵞᵟᵠᵡ˂˃'
| [
"me@svenkreiss.com"
] | me@svenkreiss.com |
4771144c3821e2f59a6bee905ea7ac1ed4d37184 | f93e68e139b41f9544a04c5eda1384ff9ed74d07 | /46.py | 4109947069c0b4a53c228b159b47f0ce29cf6b8b | [] | no_license | c109156147/py20 | c1b24d42cd32d4cedcd4faed617fb659b5f8733a | ecb31d0fa0fe1d36aa5923e401e46fe6215b5953 | refs/heads/main | 2023-05-03T03:07:44.497761 | 2021-04-29T16:23:43 | 2021-04-29T16:23:43 | 362,872,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | list1=[]
list2=[]
n = int(input("輸入筆數n:"))
for i in range (0,n):
obj,num=input("").split(" ")
list1.append(obj)
list2.append(num)
for j in range(len(list1)):
print(str(list1[j])+"牌得到"+str(list2[j])+"面")
| [
"noreply@github.com"
] | noreply@github.com |
2bfb42342e5f1d0ab116b6a4081f0e2342f9d83f | 65ba09c7becc80f30e087671b2360cc195b1ab9c | /psql/dump1090-stream-parser-psql.py | bd9e9b10d582157449d64ecc06dae51df990582d | [] | no_license | mtigas/dump1090-stream-parser | fc367597e3a13596cac4c63c4ad7c9241e80b1b1 | 8ff038cfaf7487b6f209dfde0afb50b99ea73c42 | refs/heads/master | 2020-05-29T12:31:49.475438 | 2019-04-16T20:02:45 | 2019-04-16T20:02:45 | 56,412,903 | 13 | 1 | null | 2016-04-17T01:07:44 | 2016-04-17T01:07:43 | Python | UTF-8 | Python | false | false | 13,624 | py | #!/usr/bin/env python
# encoding: utf-8
"""
see README in this directory
"""
import socket
from decimal import Decimal
import datetime
import psycopg2
import argparse
import time
import traceback
import sys
#defaults
HOST = "localhost"
PORT = 30003
BUFFER_SIZE = 50
BATCH_SIZE = 20
CONNECT_ATTEMPT_DELAY = 1.0
#
TRANSMISSION_TYPE_TTL = (
None, # 0 doesnt exist
datetime.timedelta(seconds=1), #1: callsign, infrequently broadcast anyway
datetime.timedelta(seconds=1), #2: position
None, #3: position/alt -> #2
datetime.timedelta(seconds=5), #4: speed/heading
datetime.timedelta(seconds=10), #5: altitude-only
datetime.timedelta(seconds=1), #6: squawk
None, #7: altitude-only -> #5
None, #8: -> 2
)
TRANSMISSION_TYPE_ALIAS = (
0,
1,
2,
2, # 3 -> 2
4,
5,
6,
5, # 7 -> 5
2, # 8 -> 2
)
# we'll discard any rows with `transmission_type` not in this set.
# 8 (all call reply) is very frequent but does not normally carry data
# for us. 7 (air to air) is also common but only contains altitude.
# your mileage may vary. see the following:
# http://woodair.net/SBS/Article/Barebones42_Socket_Data.htm
# https://github.com/wiseman/node-sbs1
#ONLY_LOG_TYPES = frozenset({1,2,3,4,5,6,7,8})
ONLY_LOG_TYPES = frozenset({1,2,3,4,5,6,7})
def main():
#set up command line options
parser = argparse.ArgumentParser(description="A program to process dump1090 messages then insert them into a database")
parser.add_argument("-l", "--location", type=str, default=HOST, help="This is the network location of your dump1090 broadcast. Defaults to %s" % (HOST,))
parser.add_argument("-p", "--port", type=int, default=PORT, help="The port broadcasting in SBS-1 BaseStation format. Defaults to %s" % (PORT,))
parser.add_argument("-c", "--client-id", type=int, default=0, help="A custom identifier to tag rows from different input sources.")
parser.add_argument("--timezone", type=str, default="UTC")
parser.add_argument("--psql-host", type=str, default="localhost")
parser.add_argument("--psql-port", type=int, default=5432)
parser.add_argument("--psql-user", type=str, default="dump1090")
parser.add_argument("--psql-pass", type=str, default="dump1090")
parser.add_argument("--psql-database", type=str, default="dump1090")
parser.add_argument("--psql-sslmode", type=str, default="prefer")
parser.add_argument("--psql-sslcert", type=str, default=None)
parser.add_argument("--psql-sslkey", type=str, default=None)
parser.add_argument("--buffer-size", type=int, default=BUFFER_SIZE, help="An integer of the number of bytes to read at a time from the stream. Defaults to %s" % (BUFFER_SIZE,))
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE, help="An integer of the number of rows to write to the database at a time. If you turn off WAL mode, a lower number makes it more likely that your database will be locked when you try to query it. Defaults to %s" % (BATCH_SIZE,))
parser.add_argument("--connect-attempt-delay", type=float, default=CONNECT_ATTEMPT_DELAY, help="The number of seconds to wait after a failed connection attempt before trying again. Defaults to %s" % (CONNECT_ATTEMPT_DELAY,))
# parse command line options
args = parser.parse_args()
# Are we receiving data from the flightaware mlat client?
is_mlat = (args.port == 31003)
# print args.accumulate(args.in)
count_since_commit = 0
count_total = 0
print "%s: Connecting to psql..." % args.client_id
conn = psycopg2.connect(
host=args.psql_host,
port=args.psql_port,
user=args.psql_user,
password=args.psql_pass,
database=args.psql_database,
sslmode=args.psql_sslmode,
sslcert=args.psql_sslcert,
sslkey=args.psql_sslkey,
)
cur = conn.cursor()
print "%s: Connected." % args.client_id
# log {(icao, msgtype): timestamp} pairs to eliminate some duplicate
# entries. based on a timestamp here, we throttle based on
# the value of TRANSMISSION_TYPE_TTL[msgtype]
aircraft_msg_ttls = {}
start_time = datetime.datetime.utcnow()
# open a socket connection
print "%s: Connecting to dump1090..." % args.client_id
s = connect_to_socket(args.location, args.port)
# listen to socket for data
data_str = ""
try:
#loop until an exception
while True:
#get current time
cur_time = datetime.datetime.utcnow()
ts = cur_time.strftime("%H:%M:%S")
# receive a stream message
try:
message = ""
message = s.recv(args.buffer_size)
data_str += message.strip("\n")
except socket.error:
# this happens if there is no connection and is delt with below
pass
if len(message) == 0:
print ts, "No broadcast received. Attempting to reconnect"
time.sleep(args.connect_attempt_delay)
s.close()
s = connect_to_socket(args.location, args.port)
continue
# it is possible that more than one line has been received
# so split it then loop through the parts and validate
data = data_str.split("\n")
for d in data:
line = d.split(",")
#if the line has 22 items, it's valid
if len(line) == 22:
# clean up some values first
for (idx, val) in enumerate(line):
v = val.strip()
# 0 message type is 2-3 char
if idx == 0:
line[idx] = v.strip("0123456789").strip()
# 1 transmission type is int or null
if idx == 1:
if v == '':
line[idx] = None
else:
line[idx] = int(v)
# 2-10 string
elif idx in range(2,11):
if v == '':
line[idx] = None
elif idx in set([6,8]): # generated_date, logged_date
line[idx] = datetime.datetime.strptime(v, '%Y/%m/%d').date()
elif idx in set([7,9]): # generated_time, logged_time
line[idx] = datetime.datetime.strptime(v, '%H:%M:%S.%f').time()
else:
line[idx] = v
# 11-13 is int or null
elif idx in range(11,14):
if v == '':
line[idx] = None
else:
line[idx] = int(v)
# 14,15 is float-ish or null
elif idx in range(14,16):
if v == '':
line[idx] = None
else:
line[idx] = Decimal(v)
# 16 is int or null
elif idx == 16:
if v == '':
line[idx] = None
else:
line[idx] = int(v)
# 17 string
elif idx == 17:
if v == '':
line[idx] = None
else:
line[idx] = v
# 18-21 bool or null
elif idx in range(18,22):
if v == '0':
line[idx] = False
elif v == '':
line[idx] = None
else:
line[idx] = True
# transmission types; skip if it's a type that we
# don't care to log in the database.
if line[1] not in ONLY_LOG_TYPES:
continue
# Decide whether or not to skip recording datapoint based on
# a TTL (based on transmission_type).
msgtype_timeout_alias = TRANSMISSION_TYPE_ALIAS[line[1]]
msgtype_key = (line[4], msgtype_timeout_alias)
msgtype_ttl = TRANSMISSION_TYPE_TTL[msgtype_timeout_alias]
existing_timestamp = aircraft_msg_ttls.get(msgtype_key, datetime.datetime(1970,1,1))
#print msgtype_key, existing_timestamp
if (not is_mlat) and (cur_time - existing_timestamp) <= msgtype_ttl:
# too soon.
#print "\ttoo soon"
continue
#print "\tok"
# Reset TTL timer now that we're storing data for this packet
aircraft_msg_ttls[msgtype_key] = cur_time
# session_id, aircraft_id, flight_id are sometimes censored with '11111'?
if (line[2] == '111' and line[3] == '11111' and line[5] == '111111') \
or (line[2] == '1' and line[3] == '1' and line[5] == '1'):
line[2] = None
line[3] = None
line[5] = None
if line[2] == None:
line[2] = ''
if line[3] == None:
line[3] = ''
if line[5] == None:
line[5] = ''
if line[10] == None:
line[10] = ''
# "parsed_time"
line.append("{} {}".format(cur_time, args.timezone))
# "generated_datetime"
if (line[6] and line[7]):
generated_datetime = datetime.datetime.combine(line[6], line[7])
else:
generated_datetime = None
line.append("{} {}".format(generated_datetime, args.timezone))
# "logged_datetime"
if (line[8] and line[9]):
logged_datetime = datetime.datetime.combine(line[8], line[9])
else:
logged_datetime = None
line.append("{} {}".format(logged_datetime, args.timezone))
# store whether we got this from the piaware mlat output basestation
# (otherwise, we got it directly from dump1090)
line.append(is_mlat)
line.append(args.client_id)
# remove the generated & logged date/time fields. we'll just
# store the combined value that we just calculated
line.pop(6)
line.pop(6)
line.pop(6)
line.pop(6)
# move squawk from line[13] to line[0]
sq = line.pop(13)
if sq != None:
sq = int(sq, 8)
line = [sq] + line
# move icao address from line[5] (originally 4) to line[0]
ic = line.pop(5)
if ic != None:
ic = int(ic, 16)
line = [ic] + line
try:
lat = line.pop(11)
lon = line.pop(11)
if lat != None and lon != None and lat != '' and lon != '':
qry = """INSERT INTO squitters (
icao_addr,
decimal_squawk,
message_type,
transmission_type,
session_id,
aircraft_id,
flight_id,
callsign,
altitude,
ground_speed,
track,
vertical_rate,
alert,
emergency,
spi,
is_on_ground,
parsed_time,
generated_datetime,
logged_datetime,
is_mlat,
client_id,
latlon
)
VALUES (
%s,
%s,
""" + ", ".join(["%s"] * (len(line)-2)) + """,
ST_PointFromText('POINT(%s %s)', 4326)
)"""
line.append(lon)
line.append(lat)
else:
qry = """INSERT INTO squitters (
icao_addr,
decimal_squawk,
message_type,
transmission_type,
session_id,
aircraft_id,
flight_id,
callsign,
altitude,
ground_speed,
track,
vertical_rate,
alert,
emergency,
spi,
is_on_ground,
parsed_time,
generated_datetime,
logged_datetime,
is_mlat,
client_id
)
VALUES (
%s,
%s,
""" + ", ".join(["%s"] * (len(line)-2)) + """)"""
cur.executemany(qry, [line])
# increment counts
count_total += 1
count_since_commit += 1
# commit the new rows to the database in batches
if count_since_commit % args.batch_size == 0:
conn.commit()
print "%s: %s:%s - avg %.1f rows/sec" % (args.client_id, args.location, args.port, float(count_total) / (cur_time - start_time).total_seconds(),)
if count_since_commit > args.batch_size:
print ts, "All caught up, %s rows, successfully written to database" % (count_since_commit)
count_since_commit = 0
except psycopg2.OperationalError:
print
print ts, "Could not write to database"
print line
traceback.print_exc()
raise
sys.exit(1)
# since everything was valid we reset the stream message
data_str = ""
else:
# the stream message is too short, prepend to the next stream message
data_str = d
continue
except KeyboardInterrupt:
print "\n%s Closing connection" % (ts,)
s.close()
conn.commit()
conn.close()
print ts, "%s squitters added to your database" % (count_total,)
sys.exit(0)
except psycopg2.OperationalError as err:
print "Error with ", line
traceback.print_exc()
raise
sys.exit(1)
def connect_to_socket(loc,port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((loc, port))
return s
if __name__ == '__main__':
main()
| [
"mike@tig.as"
] | mike@tig.as |
7d165408282b6f951470ba596061fcfd1b4c0a97 | 7eb5db9fa00df1a912bc2edd4f85199c7398f027 | /Codes/Colebrook_nomo.py | 3b82c94e17d138055b88eb66ffed38cef6fe5438 | [] | no_license | theclimb369/Culvert-Nomograph | 344b270e012819a8273cdb8a19e75df17f943c1e | fe74beebb30bb023ec27a7c0ee29cc596fd6832b | refs/heads/main | 2023-09-03T19:10:19.522469 | 2021-10-26T09:13:34 | 2021-10-26T09:13:34 | 396,614,332 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,378 | py | ### Usage ###
# Nomogprahs based on Colebrook White Equations
# BlackMAX PP/SewerMAX for k = 0.06 & 0.01
### Created by YW on July 21 2021 ###
### Updated on Aug 18 2021 ###
import numpy as np
from scipy import interpolate
import math
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib import pyplot as plt
from scipy.optimize import fsolve
from matplotlib import rc
# rc('text', usetex=True)
k = 0.06 * 10**(-3) ## equivalent hydraulic roughness m
mu = 1.01 * 10 ** (-6) ## kinematic viscosity of water m^2/s
g = 9.8 ## gravity (m/s^2)
pipe1 = 'BlackMAX PP'
pipe2 = 'SewerMAX+ PP'
# Set up output file paths & names
fig1nm = 'Nomograph ColeBrook %s k-%.2f.pdf' % (pipe2, k*10**3)
S = np.array([1.0/4, 1.0/5, 1.0/6, 1.0/7, 1.0/8, 1.0/9, 1.0/10, 1.0/20, 1.0/30, 1.0/40, 1.0/50, 1.0/60, 1.0/70, 1.0/80, 1.0/90, 1.0/100, 1.0/200, 1.0/300, 1.0/400, 1.0/500, 1.0/600, 1.0/700, 1.0/800, 1.0/900, 1.0/1000, 1.0/2000, 1.0/3000, 1.0/4000, 1.0/5000, 1.0/6000, 1.0/7000, 1.0/8000, 1.0/9000, 1.0/10000])
q = [0.005,0.006,0.007,0.008,0.009,0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100]
V = [0.5, 0.6, 0.7, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 7.0, 8.0, 10.0, 12.0, 14.0]
# D1 = [0.225, 0.3, 0.373, 0.447, 0.522, 0.596] # BlackMAX D actual
D1 = [0.224, 0.297, 0.370, 0.433, 0.516] # SewerMAX+ D actual
# D1_label = [0.225, 0.3, 0.375, 0.450, 0.525, 0.6] #BlackMAX D label (DN)
D1_label = [0.225, 0.3, 0.375, 0.450, 0.525] # SewerMAX+ label
const1 = -math.pi * g**(1.0/2) / 2**(1.0/2)
const2 = k / 3.7
const3 = 2.51 * mu / (2*g) ** (1.0/2)
###### Plot ##########
fig, ax = plt.subplots(figsize=(8, 12))
### BlackMAX PP Diameter lines - ColeBrook_white equation ###
for d in D1:
q1 =const1 * d**(5.0/2) * S**(1.0/2) * np.log10(const2 * d**(-1.0/2) + const3 * d**(-3.0/2) * S**(-1.0/2))
# print (q)
ax.loglog(S, q1, '#0054A6')
### BlackMAX PP velocity lines - ColeBrook-white approximation ###
for v in V:
# Approach I - approximation #
y = np.log10(1.558 / (v**2/(2*g*S*k)) ** 0.8 + 15.045 / (v**3/(2*g*S*mu)) ** 0.73)
Dv = v **2 / (8 * g * S * y**2)
Qv = v * math.pi * Dv**2 / 4.0
# ax.loglog(S, Qv, '#0054A6')
# d_ref = 0.9
# diff_aray = np.abs(Dv-d)
# d_index = diff_aray.argmin()
# ax.text(S[d_index], Qv[d_index], str(v), rotation=48, size=6,
# horizontalalignment='left',
# verticalalignment='baseline',
# )
# print (v, Dv[d_index])
# Approach II - fsolve #
func = lambda D : v + 2*(2*g*D*S)**(0.5) * np.log10(const2 * D**(-1.0/2) + const3 * D**(-3.0/2) * S**(-1.0/2))
D_initial_guess = Dv
D_solution = fsolve(func, D_initial_guess)
Q_cal = v* math.pi * D_solution**2 / 4.0
# print ("The solution for V=%f S=%f is D = %f Q=%f Dv = %f Qv=%f " % (v, S, D_solution, Q_cal, Dv, Qv))
ax.loglog(S, Q_cal, '#0054A6')
### S~(D,V) approxi ColeBrook-white ###
# Sf = v**2 / (8*g*d*(np.log10(k/(3.7*d) + (6.28*mu/(v*d))**0.89))**2)
### Labeling ###
ylabel1 = []
for i in q:
ylabel1.append(str(i*1000))
xlabel = []
xlabel1 = []
den = [4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100,200,300,400,500,600,700,800,900,1000,2000,3000,4000,5000,6000,7000,8000,9000,10000]
for i in den:
deno = str(i)
xlabel1.append("%.2f %%" % (1/i*100))
xlabel.append(str(1) + "/" + str(deno))
ax1 = ax.twiny()
ax.set_xlim(max(S), min(S))
ax1.set_xlim(ax.get_xlim())
ax.set_ylim(0.005, 100)
ax.set_yticks(q)
ax.set_xticks(S)
ax1.set_xscale('log')
ax1.set_xticks(S)
ax.set_yticklabels(ylabel1, Fontsize=5)
ax.set_xticklabels(xlabel, rotation=90, Fontsize=5)
ax1.set_xticklabels(xlabel1, rotation=90, Fontsize=5)
ax.set_xlabel('Hydraulic Grade')
ax.set_ylabel('Flow (L/s)')
# ax.set_title('Slope vs. Flow %s' % (pipe1), y=1.02, pad=20)
# ax.set_title('Nomograph (Colebrook white) - %s (k= %.2f mm)' % (pipe1, k*10**3), y=1.02, pad=20)
ax.set_title('Nomograph (Colebrook white) - %s (k= %.2f mm)' % (pipe2, k*10**3), y=1.02, pad=20)
for d in D1_label:
v = 0.5
s_l = v**2 / (8*g*d*(np.log10(k/(3.7*d) + (6.28*mu/(v*d))**0.89))**2)
# q_l =const1 * d**(5.0/2) * s_l**(1.0/2) * np.log10(const2 * d**(-1.0/2) + const3 * d**(-3.0/2) * s_l**(-1.0/2))
q_l = v * math.pi * (d-0.03) **2 /4
ax.text(s_l, q_l, str(int(d*1000)), rotation=330, size=6,
horizontalalignment='left',
verticalalignment='baseline',
multialignment='center')
for v in V:
d = 1.5
s_l_ini = v**2 / (8*g*d*(np.log10(k/(3.7*d) + (6.28*mu/(v*d))**0.89))**2)
func1 = lambda s_l : v + 2*(2*g*d*s_l)**(0.5) * np.log10(const2 * d**(-1.0/2) + const3 * d**(-3.0/2) * s_l**(-1.0/2))
sl_initial_guess = s_l_ini
s_l = fsolve(func1, sl_initial_guess)
q_l = v * math.pi * d**2 /4.0
if v == 0.5:
ax.text(min(S), q_l, str(v), rotation=48, size=6,
horizontalalignment='left',
verticalalignment='baseline',
)
else:
ax.text(s_l, q_l, str(v), rotation=48, size=6,
horizontalalignment='left',
verticalalignment='baseline',
)
ax.text(0.1, 20, "velocity (m/s)", rotation=60, size=10,
horizontalalignment='left',
verticalalignment='baseline',
)
ax.grid(True, which="both", ls='-' )
fig.savefig(fig1nm)
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
9e76287d8b5010b56cf06f625a309aab0d1e9854 | 9164c3e1b76a12737baf96f55501e24ebf19f735 | /number13/number13/src/Mask_RCNN/mrcnn/model.py | c2a2f9da333d6daf45d81531761e215bfb6c0469 | [
"Apache-2.0",
"MIT",
"CC-BY-NC-SA-4.0",
"BSD-3-Clause"
] | permissive | chritter/SpaceNet_Off_Nadir_Solutions | b3c2ced7b521ee3cee30ca6a9e17244a5faeb876 | 2f875c0a873692912cceb5daac14eee32fc037b2 | refs/heads/master | 2020-08-07T16:19:27.093474 | 2019-10-27T20:33:49 | 2019-10-27T20:33:49 | 213,522,639 | 0 | 0 | Apache-2.0 | 2019-10-08T01:36:11 | 2019-10-08T01:36:10 | null | UTF-8 | Python | false | false | 123,898 | py | """
Mask R-CNN
The main Mask R-CNN model implemenetation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import random
import datetime
import re
import math
import logging
from collections import OrderedDict
import multiprocessing
import numpy as np
import skimage.transform
import tensorflow as tf
import keras
import keras.backend as K
import keras.layers as KL
import keras.engine as KE
import keras.models as KM
from mrcnn import utils
# Requires TensorFlow 1.3+ and Keras 2.0.8+.
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("1.3")
assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} min: {:10.5f} max: {:10.5f} {}".format(
str(array.shape),
array.min() if array.size else "",
array.max() if array.size else "",
array.dtype))
print(text)
class BatchNorm(KL.BatchNormalization):
"""Extends the Keras BatchNormalization class to allow a central place
to make changes if needed.
Batch normalization has a negative effect on training if batches are small
so this layer is often frozen (via setting in Config class) and functions
as linear layer.
"""
def call(self, inputs, training=None):
"""
Note about training values:
None: Train BN layers. This is the normal mode
False: Freeze BN layers. Good when batch size is small
True: (don't use). Set layer in training mode even when inferencing
"""
return super(self.__class__, self).call(inputs, training=training)
def compute_backbone_shapes(config, image_shape):
"""Computes the width and height of each stage of the backbone network.
Returns:
[N, (height, width)]. Where N is the number of stages
"""
# Currently supports ResNet only
assert config.BACKBONE in ["resnet50", "resnet101"]
return np.array(
[[int(math.ceil(image_shape[0] / stride)),
int(math.ceil(image_shape[1] / stride))]
for stride in config.BACKBONE_STRIDES])
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True, train_bn=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layres
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True, train_bn=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layres
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
"""Build a ResNet graph.
architecture: Can be resnet50 or resnet101
stage5: Boolean. If False, stage5 of the network is not created
train_bn: Boolean. Train or freeze Batch Norm layres
"""
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(name='bn_conv1')(x, training=train_bn)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)] boxes to update
deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, (y1, x1, y2, x2)]
window: [4] in the form y1, x1, y2, x2
"""
# Split
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, anchors, (bg prob, fg prob)]
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
anchors: [batch, (y1, x1, y2, x2)] anchors in normalized coordinates
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Anchors
anchors = inputs[2]
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = tf.minimum(6000, tf.shape(anchors)[1])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([pre_nms_anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. Since we're in normalized coordinates,
# clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]
window = np.array([0, 0, 1, 1], dtype=np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Non-max suppression
def nms(boxes, scores):
indices = tf.image.non_max_suppression(
boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
proposals = tf.pad(proposals, [(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementatin of Log2. TF doesn't have a native implemenation."""
return tf.log(x) / tf.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [height, width] of the output pooled regions. Usually [7, 7]
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- Feature maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Image meta
# Holds details about the image. See compose_image_meta()
image_meta = inputs[1]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[2:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Use shape of first image. Images in a batch must have the same size.
image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indicies for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
pooled = tf.expand_dims(pooled, 0)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeate boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeate() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
Class-specific bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width). Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
crowd_masks = tf.gather(gt_masks, crowd_ix, axis=2)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine postive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.argmax(positive_overlaps, axis=1)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI corrdinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,
(dy, dx, log(dh), log(dw), class_id)]
Class-specific bbox refinements.
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, 1), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in image coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [N, (y1, x1, y2, x2, class_id, score)] where
coordinates are normalized.
"""
# Class IDs per ROI
class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.gather(pre_nms_rois, ixs),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indicies
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are normalized.
detections = tf.concat([
tf.gather(refined_rois, keep),
tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are normalized.
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Get windows of images in normalized coordinates. Windows are the area
# in the image that excludes the padding.
# Use the shape of the first image in the batch to normalize the window
# because we know that all images get resized to the same size.
m = parse_image_meta_graph(image_meta)
image_shape = m['image_shape'][0]
window = norm_boxes_graph(m['window'], image_shape[:2])
# Run detection refinement graph on each item in the batch
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_score)] in
# normalized coordinates
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
############################################################
# Region Proposal Network (RPN)
############################################################
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the featuremap
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location, depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layres
Returns:
logits: [N, NUM_CLASSES] classifier logits (before softmax)
probs: [N, NUM_CLASSES] classifier probabilities
bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_classifier")([rois, image_meta] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(1024, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(1024, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layres
Returns: Masks [batch, roi_count, height, width, num_classes]
"""
# ROI Pooling
# Shape: [batch, boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_mask")([rois, image_meta] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn3')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn4')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typicallly: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Crossentropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
# TODO: use smooth_l1_loss() rather than reimplementing here
# to reduce code duplication
diff = K.abs(target_bbox - rpn_bbox)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
# During model building, Keras calls this function with
# target_class_ids of type float32. Unclear why. Cast it
# to int to get around it.
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indicies.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augment=False, augmentation=None,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: (Depricated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
original_shape = image.shape
image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
min_scale=config.IMAGE_MIN_SCALE,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
mask = utils.resize_mask(mask, scale, padding, crop)
# Random horizontal flips.
# TODO: will be removed in a future update in favor of augmentation
if augment:
logging.warning("'augment' is depricated. Use 'augmentation' instead.")
if random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmentors that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return (augmenter.__class__.__name__ in MASK_AUGMENTERS)
# Store shapes before augmentation to compare
image_shape = image.shape
mask_shape = mask.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
image = det.augment_image(image)
# Change mask to np.uint8 because imgaug doesn't support np.bool
mask = det.augment_image(mask.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert image.shape == image_shape, "Augmentation shouldn't change image size"
assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
mask = mask.astype(np.bool)
# Note that some boxes might be all zeros if the corresponding mask got cropped out.
# and here is to filter them out
_idx = np.sum(mask, axis=(0, 1)) > 0
mask = mask[:, :, _idx]
class_ids = class_ids[_idx]
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, original_shape, image.shape,
window, scale, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Grund truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indicies of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(skimage.transform.resize(
class_mask, (gt_h, gt_w), order=1, mode="constant")).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = skimage.transform.resize(m, config.MASK_SHAPE, order=1, mode="constant")
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# TODO: If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argmax(overlaps, axis=0)
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
def data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,
random_rois=0, batch_size=1, detection_targets=False):
"""A generator that returns images and corresponding target class ids,
bounding box deltas, and masks.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augment: (Depricated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
batch_size: How many images to return in each call
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
Returns a Python generator. Upon calling next() on it, the
generator returns two lists, inputs and outputs. The containtes
of the lists differs depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
b = 0 # batch item index
image_index = -1
image_ids = np.copy(dataset.image_ids)
error_count = 0
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
backbone_shapes,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# Keras requires a generator to run indefinately.
while True:
try:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(image_ids)
if shuffle and image_index == 0:
np.random.shuffle(image_ids)
# Get GT bounding boxes and masks for image.
image_id = image_ids[image_index]
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(dataset, config, image_id, augment=augment,
augmentation=augmentation,
use_mini_mask=config.USE_MINI_MASK)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,
gt_class_ids, gt_boxes, config)
# Mask R-CNN Targets
if random_rois:
rpn_rois = generate_random_rois(
image.shape, random_rois, gt_class_ids, gt_boxes)
if detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)
batch_gt_masks = np.zeros(
(batch_size, gt_masks.shape[0], gt_masks.shape[1],
config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)
if random_rois:
batch_rpn_rois = np.zeros(
(batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if detection_targets:
batch_rois = np.zeros(
(batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if random_rois:
batch_rpn_rois[b] = rpn_rois
if detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
# Batch full?
if b >= batch_size:
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if random_rois:
inputs.extend([batch_rpn_rois])
if detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
yield inputs, outputs
# start a new batch
b = 0
except (GeneratorExit, KeyboardInterrupt):
raise
except:
# Log it and skip the image
logging.exception("Error processing image {}".format(
dataset.image_info[image_id]))
error_count += 1
if error_count > 5:
raise
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=[None, None, 3], name="input_image")
input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],
name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
elif mode == "inference":
# Anchors in normalized coordinates
input_anchors = KL.Input(shape=[None, 4], name="input_anchors")
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
_, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,
stage5=True, train_bn=config.TRAIN_BN)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(256, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(256, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(256, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(256, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Anchors
if mode == "training":
anchors = self.get_anchors(config.IMAGE_SHAPE)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)
# A hack to get around Keras's bad support for constants
anchors = KL.Lambda(lambda x: tf.Variable(anchors), name="anchors")(input_image)
else:
anchors = input_anchors
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), 256)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(
proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
config=config)([rpn_class, rpn_bbox, anchors])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
active_class_ids = KL.Lambda(
lambda x: parse_image_meta_graph(x)["active_class_ids"]
)(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates
target_rois = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in
# normalized coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Create masks for detections
detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
model = KM.Model([input_image, input_image_meta, input_anchors],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from mrcnn.parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
log_dir: The directory where events and weights are saved
checkpoint_path: the path to the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
return None, None
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
return dir_name, None
checkpoint = os.path.join(dir_name, checkpoints[-1])
return dir_name, checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the correspoding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exlude: list of layer names to excluce
"""
import h5py
from keras.engine import topology
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
topology.load_weights_from_hdf5_group_by_name(f, layers)
else:
topology.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(
lr=learning_rate, momentum=momentum,
clipnorm=self.config.GRADIENT_CLIP_NORM)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = [
"rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_loss(loss)
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [
keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(
optimizer=optimizer,
loss=[None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.metrics_tensors.append(loss)
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainble layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5
regex = r".*/\w+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})/mask\_rcnn\_\w+(\d{4})\.h5"
m = re.match(regex, model_path)
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
# Epoch number in file is 1-based, and in Keras code it's 0-based.
# So, adjust for that then increment by one to start from the next epoch
self.epoch = int(m.group(6)) - 1 + 1
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,
augmentation=None):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heaads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)
augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)
flips images right/left 50% of the time. You can pass complex
augmentations as well. This augmentation applies 50% of the
time, and when it does it flips images right/left half the time
and adds a Gausssian blur with a random sigma in range 0 to 5.
augmentation = imgaug.augmenters.Sometimes(0.5, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))
])
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = data_generator(train_dataset, self.config, shuffle=True,
augmentation=augmentation,
batch_size=self.config.BATCH_SIZE)
val_generator = data_generator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name is 'nt':
workers = 0
else:
workers = multiprocessing.cpu_count()
self.keras_model.fit_generator(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
workers=workers,
use_multiprocessing=True,
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matricies [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matricies:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image
# TODO: move resizing to mold_image()
molded_image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
min_scale=self.config.IMAGE_MIN_SCALE,
max_dim=self.config.IMAGE_MAX_DIM,
mode=self.config.IMAGE_RESIZE_MODE)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, molded_image.shape, window, scale,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, original_image_shape,
image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates
mrcnn_mask: [N, height, width, num_classes]
original_image_shape: [H, W, C] Original image shape before resizing
image_shape: [H, W, C] Shape of the image after resizing and padding
window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real
image is excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Translate normalized coordinates in the resized image to pixel
# coordinates in the original image before resizing
window = utils.norm_boxes(window, image_shape[:2])
wy1, wx1, wy2, wx2 = window
shift = np.array([wy1, wx1, wy1, wx1])
wh = wy2 - wy1 # window height
ww = wx2 - wx1 # window width
scale = np.array([wh, ww, wh, ww])
# Convert boxes to normalized coordinates on the window
boxes = np.divide(boxes - shift, scale)
# Convert boxes to pixel coordinates on the original image
boxes = utils.denorm_boxes(boxes, original_image_shape[:2])
# Filter out detections with zero area. Happens in early training when
# network weights are still random
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty(masks.shape[1:3] + (0,))
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape,\
"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def detect_molded(self, molded_images, image_metas, verbose=0):
"""Runs the detection pipeline, but expect inputs that are
molded already. Used mostly for debugging and inspecting
the model.
molded_images: List of images loaded using load_image_gt()
image_metas: image meta data, also retruned by load_image_gt()
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(molded_images) == self.config.BATCH_SIZE,\
"Number of images must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(molded_images)))
for image in molded_images:
log("image", image)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape, "Images must have the same size"
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(molded_images):
window = [0, 0, image.shape[0], image.shape[1]]
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
window)
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def get_anchors(self, image_shape):
"""Returns anchor pyramid for the given image size."""
backbone_shapes = compute_backbone_shapes(self.config, image_shape)
# Cache anchors and reuse if image shape is the same
if not hasattr(self, "_anchor_cache"):
self._anchor_cache = {}
if not tuple(image_shape) in self._anchor_cache:
# Generate Anchors
a = utils.generate_pyramid_anchors(
self.config.RPN_ANCHOR_SCALES,
self.config.RPN_ANCHOR_RATIOS,
backbone_shapes,
self.config.BACKBONE_STRIDES,
self.config.RPN_ANCHOR_STRIDE)
# Keep a copy of the latest anchors in pixel coordinates because
# it's used in inspect_model notebooks.
# TODO: Remove this after the notebook are refactored to not use it
self.anchors = a
# Normalize coordinates
self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
return self._anchor_cache[tuple(image_shape)]
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs, image_metas=None):
"""Runs a sub-set of the computation graph that computes the given
outputs.
image_metas: If provided, the images are assumed to be already
molded (i.e. resized, padded, and noramlized)
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Prepare inputs
if image_metas is None:
molded_images, image_metas, _ = self.mold_inputs(images)
else:
molded_images = images
image_shape = molded_images[0].shape
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
model_in = [molded_images, image_metas, anchors]
# Run inference
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, original_image_shape, image_shape,
window, scale, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
original_image_shape: [H, W, C] before resizing or padding.
image_shape: [H, W, C] after resizing and padding
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
scale: The scaling factor applied to the original image (float32)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(original_image_shape) + # size=3
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
[scale] + # size=1
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta(meta):
"""Parses an array that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed values.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id.astype(np.int32),
"original_image_shape": original_image_shape.astype(np.int32),
"image_shape": image_shape.astype(np.int32),
"window": window.astype(np.int32),
"scale": scale.astype(np.float32),
"active_class_ids": active_class_ids.astype(np.int32),
}
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed tensors.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id,
"original_image_shape": original_image_shape,
"image_shape": image_shape,
"window": window,
"scale": scale,
"active_class_ids": active_class_ids,
}
def mold_image(images, config):
"""Expects an RGB image (or array of images) and subtraces
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name=None):
"""Often boxes are represented with matricies of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
def norm_boxes_graph(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [..., (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.divide(boxes - shift, scale)
def denorm_boxes_graph(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [..., (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)
| [
"nicholas.r.weir@gmail.com"
] | nicholas.r.weir@gmail.com |
a47a6018b7a2d2eccb45ce25cd36f57c5e4a641f | 6ddb489d40cecaa671bf5367bf8d4c3ccfc5e53d | /news_app/migrations/0003_auto_20210408_1316.py | 909d80825dd40bae8753591603f69a38a2cb3f6e | [] | no_license | elenghazaryan/News-App | 5790323c9dc34d1e220560e3399309b31ec1bf9b | 52df021d7d26a03eec928904926141f8cc09433d | refs/heads/master | 2023-04-07T01:34:39.904032 | 2021-04-15T13:07:03 | 2021-04-15T13:07:03 | 351,228,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | # Generated by Django 3.1.7 on 2021-04-08 09:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('news_app', '0002_remove_user_username'),
]
operations = [
migrations.RenameField(
model_name='comment',
old_name='comment',
new_name='com',
),
migrations.AddField(
model_name='comment',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='news_app.comment'),
),
]
| [
"kelen_2000@mail.ru"
] | kelen_2000@mail.ru |
cf0d6f85fb03cb3c291887f4ed66771d139d48d3 | 36dff14d4874911a5c3bcc2f9f4c9abbea428d63 | /CheckChineseName.py | 9d7d46ebf1dc015981c0aef89e7a894cbbf72674 | [] | no_license | yuhao-zhou/Chinese-student-in-class | f8c768cdc05eef08ffcc73d57008d85b4c94fdc8 | f19f99cd260be6b653275c888c1f7f495c9dc49d | refs/heads/master | 2022-11-22T07:19:35.437871 | 2020-07-28T14:57:04 | 2020-07-28T14:57:04 | 283,243,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | from wikiScrap import get_surname
from bs4 import BeautifulSoup
chinese_surname = get_surname()
with open("Subject Roster_ Database Systems & Information Modelling (INFO90002_2020_SM2).html", "r", encoding='utf-8') as file:
soup = BeautifulSoup(file, 'lxml')
student_name = []
for tag in soup():
if "data-student_id" in tag.attrs:
name = tag.string[:-1] # name is just the text component
student_name.append(name.split())
# if we want to add some extra (check from student surname)
chinese_surname.extend(["qi","you","xuan","xi","yong","weng","si","qu","quan","pu","miao","hui","jing","ji","geng","bian", "dou"])
chinese_number = 0
for name in student_name:
if str.lower(name[-1]) in chinese_surname:
chinese_number+=1
# 77% from the class have a Chinese surname!
print(chinese_number/len(student_name))
| [
"noreply@github.com"
] | noreply@github.com |
646e2f533fcba1d8f9349e4a7bee479b0d7569cc | 560e7d7676f5f14b738bae660b7485653ddedb46 | /tests/TestBQS.py | c46b91700c1001bf615a6448c1de879a89fabd82 | [] | no_license | dxiao/SmootLight | 1efe3a369287f16fdf1383c9923adaffca9bbb4e | 58ec94a477f5edef0bf75a60252af96adec34d8d | refs/heads/master | 2020-12-24T22:20:06.099820 | 2011-02-19T05:38:03 | 2011-02-19T05:38:03 | 1,361,133 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,823 | py | import unittest
import util.BehaviorQuerySystem as bqs
from behaviors.ColorChangerBehavior import *
import util.Geo as geo
class TestBQS(unittest.TestCase):
def setUp(self):
bqs.initBQS()
b = ColorChangerBehavior({'Id': 'color','ColorList':[(255,0,0)]})
c = ColorChangerBehavior({'Id': 'color2', 'ColorList':[(0,0,255)]})
bqs.addBehavior(b)
bqs.addBehavior(c)
b.addInput({'Location':(3,4)})
c.addInput({'Location':(5,12)})
b.timeStep()
c.timeStep()
def tearDown(self):
bqs.initBQS()
def test_simple_query(self):
validQuery = lambda args:args['Color']==(255,0,0)
invalidQuery = lambda args:args['Color']==(254,0,0)
assert bqs.query(validQuery) == [{'Color':(255,0,0), 'Location':(3,4)}]
assert bqs.query(invalidQuery) == []
def test_dist_query(self):
validDist = lambda args:geo.dist(args['Location'], (0,0)) <= 5
invalidDist = lambda args:geo.dist(args['Location'], (0,0)) <= 2
doubleDist = lambda args:geo.dist(args['Location'], (0,0)) <= 20
assert bqs.query(validDist) == [{'Color':(255,0,0), 'Location':(3,4)}]
assert bqs.query(invalidDist) == []
assert bqs.query(doubleDist) == [{'Color':(255,0,0), 'Location':(3,4)}, {'Color':(0,0,255),\
'Location':(5,12)}]
def test_complex_queries(self):
validQuery = lambda args:args['Color']==(255,0,0)
doubleDist = lambda args:geo.dist(args['Location'], (0,0)) <= 20
twoPartPredicate = lambda args:doubleDist(args) and validQuery(args)
assert bqs.query(twoPartPredicate) == [{'Color':(255,0,0), 'Location':(3,4)}]
assert bqs.query([validQuery, doubleDist]) == [{'Color':(255,0,0), 'Location':(3,4)}]
| [
"rcoh@mit.edu"
] | rcoh@mit.edu |
9eef71a00657c2a1af55bf2727b31f1ed01a6070 | 4d254d236a6006763d71d482a6aae2609d83203d | /blog/models.py | 19ce481cc5c4adeb25ba70da7c925055423894d3 | [] | no_license | Juneju/djangosite | 95fb6bfd90d0e43aacb12cd1f360e30a0e05c39a | b6c56b0a61ef2170ad517af297e4df864a6144ba | refs/heads/master | 2020-03-18T17:51:00.742856 | 2019-01-28T05:31:41 | 2019-01-28T05:31:41 | 135,054,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | from django.db import models
from django.urls import reverse
# Create your models here.
class Article(models.Model):
title=models.CharField(max_length=50,blank=True) #title
category=models.CharField(max_length=50,blank=True) #label
date_time=models.DateTimeField(auto_now_add=True) #time
content=models.TextField(blank=True,null=True) #content
#计算对象标准的url
def get_absolute_url(self):
return reverse('detail',args=[str(self.id)])
#return "/blog/%i/" %self.id
def __str__(self): #show the object by using its title
return self.title
class Meta:
ordering=['-date_time']
| [
"noreply@github.com"
] | noreply@github.com |
7bfdbb5e6063f995eee90c58ef5f6a1f063ec21b | 447a715b552ad9351e6f0c099cb620792b5973af | /Exceptions/exceptions3.py | 85999f31f9aafeeb005ceae0798f7c8b5416740e | [] | no_license | durandv-forks/code_snippets | 57162da316f5b9568f886cf86cc9aff0914aedf4 | 2a537cb1ffab476c266211e05d79d26f4e746e21 | refs/heads/master | 2020-03-20T02:29:13.078366 | 2018-12-07T22:13:06 | 2018-12-07T22:13:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | # Define shout_echo
def shout_echo(word1, echo=1):
"""Concatenate echo copies of word1 and three
exclamation marks at the end of the string."""
# Raise an error with raise
if echo < 0:
raise("echo must be greater than 0")
# Concatenate echo copies of word1 using *: echo_word
echo_word = word1 * echo
# Concatenate '!!!' to echo_word: shout_word
shout_word = echo_word + '!!!'
# Return shout_word
return shout_word
# Call shout_echo
shout_echo("particle", echo=-1)
| [
"victor.durand@farmacity.com.ar"
] | victor.durand@farmacity.com.ar |
875780690d038c0421c17a804f53b4c74107fe53 | 0050f01cdff48fc5ec162e0498cfc2ba2c3a1aa5 | /job_tracker_env/bin/pip-sync | 30caff343b983452c74b6fbe64c720e417186859 | [] | no_license | andrewrosario/job_tracker_backend | f327fdde9c43b23b35a4e5ce38f6c1352c009d6d | 0093199aa28924e52f188e66aae03720d11ae57e | refs/heads/master | 2021-10-08T18:20:25.053242 | 2019-12-05T20:09:31 | 2019-12-05T20:09:31 | 224,273,955 | 0 | 0 | null | 2021-09-22T18:04:12 | 2019-11-26T19:52:56 | Python | UTF-8 | Python | false | false | 308 | #!/Users/andrewrosario/Development/code/Python/job_tracker/job_tracker_backend/job_tracker_env/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from piptools.scripts.sync import cli
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(cli())
| [
"rosario.andrew@gmail.com"
] | rosario.andrew@gmail.com | |
3ed2beb303c28748c85454eea580163c3338d096 | 3ccd3465c241071827ad98bac61f85d8405bffc9 | /test/talkytest/clients/voice/tts/test_osxsay.py | eb28c42d580273e2a2717801aba2380cd9cfccaf | [
"MIT"
] | permissive | keiffster/talk-y | a12e2590f3170af1debb4add9c27fd12adb279fa | dd2bb2a816c868770d9bec8f02ee9f2bbfcbae2a | refs/heads/master | 2021-06-25T08:32:39.020921 | 2020-02-16T17:18:45 | 2020-02-16T17:18:45 | 102,565,196 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | import unittest
from talky.clients.voice.tts.osxsay import OSXSayTextToSpeach
from talky.config.sections.client.voice.voice import VoiceConfiguration
class OSXSayTextToSpeachTests(unittest.TestCase):
def test_init(self):
config = VoiceConfiguration()
tts = OSXSayTextToSpeach(config)
self.assertIsNotNone(tts)
| [
"keith@keithsterling.com"
] | keith@keithsterling.com |
bfa4bc2ce6ce4fc12f63223c6419c52e683ba101 | 5682e1b9ff4d7387e69c8fcb75fda7c641e68e50 | /LeeJehwan/scraper/1.THEORY/1.6.returns.py | d080dc94f96697dc93d383af081feaeba169bac3 | [] | no_license | daehyun1023/Python | 18b68cd626f8e0f15102eec7280decd773fb84c5 | 99d3c1badd31c3aef2bc9f4fe52296768c5c117e | refs/heads/main | 2023-02-28T08:12:33.314536 | 2021-02-07T23:54:31 | 2021-02-07T23:54:31 | 332,975,801 | 1 | 0 | null | 2021-01-26T04:54:13 | 2021-01-26T04:54:13 | null | UTF-8 | Python | false | false | 69 | py | def plus(a, b):
return a + b
result = plus(2, 4)
print(result)
| [
"wpghks7@naver.com"
] | wpghks7@naver.com |
d7584bd394365c25cd5c1f4eea0f3b694108071d | 3ae1cd026faf87eef31d471f499da3ffe1cb8c2e | /PyCharm/decision_if.py | 5cc624d6a673ccdfa10e52c912598aea32d2d6f2 | [] | no_license | rohan5077/Meditab_office | 830b276268bd63aff3f77cb851014fcd297d29ab | 8714338c439b73750952e5416ab669186f7ff7dd | refs/heads/master | 2020-03-19T06:02:29.216746 | 2018-06-14T13:46:29 | 2018-06-14T13:46:29 | 135,985,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | language = 'python'
if language == 'python':
print('python')
elif language=='java':
print('Java')
else:
print('No Match')
logged_in = False
if language == 'python' or logged_in:
print('admin')
else:
print('wrong creds') | [
"rohansharma5077@gmail.com"
] | rohansharma5077@gmail.com |
28cd030174bc51a2bfd216d6a1049f4f5468cb6f | e87000043d443ef2dbe4ae2af6cb07f319ad82ac | /extra_apps/xadmin/filters.py | e51634dda6be90e83414d3382a47d5a360d7f8fb | [] | no_license | williamcullen/MXOline-github | 5891ac6493bed9e0445e1b7660850712d937f1bf | e00fd30c8239aadeb0a58bd39948a2ea5b9047f5 | refs/heads/master | 2021-01-19T12:08:02.031643 | 2017-05-05T01:51:02 | 2017-05-05T01:51:12 | 88,019,381 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 21,746 | py | from django.db import models
from django.core.exceptions import ImproperlyConfigured
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.template.loader import get_template
from django.template.context import Context
from django.utils.safestring import mark_safe
from django.utils.html import escape, format_html
from django.utils.text import Truncator
from django.core.cache import cache, caches
from xadmin.views.list import EMPTY_CHANGELIST_VALUE
from xadmin.util import is_related_field, is_related_field2
import datetime
FILTER_PREFIX = '_p_'
SEARCH_VAR = '_q_'
from util import (get_model_from_relation,
reverse_field_path, get_limit_choices_to_from_path, prepare_lookup_value)
class BaseFilter(object):
title = None
template = 'xadmin/filters/list.html'
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
pass
def __init__(self, request, params, model, admin_view):
self.used_params = {}
self.request = request
self.params = params
self.model = model
self.admin_view = admin_view
if self.title is None:
raise ImproperlyConfigured(
"The filter '%s' does not specify "
"a 'title'." % self.__class__.__name__)
def query_string(self, new_params=None, remove=None):
return self.admin_view.get_query_string(new_params, remove)
def form_params(self):
return self.admin_view.get_form_params(
remove=map(lambda k: FILTER_PREFIX + k, self.used_params.keys()))
def has_output(self):
"""
Returns True if some choices would be output for this filter.
"""
raise NotImplementedError
@property
def is_used(self):
return len(self.used_params) > 0
def do_filte(self, queryset):
"""
Returns the filtered queryset.
"""
raise NotImplementedError
def get_context(self):
return {'title': self.title, 'spec': self, 'form_params': self.form_params()}
def __str__(self):
tpl = get_template(self.template)
return mark_safe(tpl.render(context=self.get_context()))
class FieldFilterManager(object):
_field_list_filters = []
_take_priority_index = 0
def register(self, list_filter_class, take_priority=False):
if take_priority:
# This is to allow overriding the default filters for certain types
# of fields with some custom filters. The first found in the list
# is used in priority.
self._field_list_filters.insert(
self._take_priority_index, list_filter_class)
self._take_priority_index += 1
else:
self._field_list_filters.append(list_filter_class)
return list_filter_class
def create(self, field, request, params, model, admin_view, field_path):
for list_filter_class in self._field_list_filters:
if not list_filter_class.test(field, request, params, model, admin_view, field_path):
continue
return list_filter_class(field, request, params,
model, admin_view, field_path=field_path)
manager = FieldFilterManager()
class FieldFilter(BaseFilter):
lookup_formats = {}
def __init__(self, field, request, params, model, admin_view, field_path):
self.field = field
self.field_path = field_path
self.title = getattr(field, 'verbose_name', field_path)
self.context_params = {}
super(FieldFilter, self).__init__(request, params, model, admin_view)
for name, format in self.lookup_formats.items():
p = format % field_path
self.context_params["%s_name" % name] = FILTER_PREFIX + p
if p in params:
value = prepare_lookup_value(p, params.pop(p))
self.used_params[p] = value
self.context_params["%s_val" % name] = value
else:
self.context_params["%s_val" % name] = ''
map(lambda kv: setattr(
self, 'lookup_' + kv[0], kv[1]), self.context_params.items())
def get_context(self):
context = super(FieldFilter, self).get_context()
context.update(self.context_params)
context['remove_url'] = self.query_string(
{}, map(lambda k: FILTER_PREFIX + k, self.used_params.keys()))
return context
def has_output(self):
return True
def do_filte(self, queryset):
return queryset.filter(**self.used_params)
class ListFieldFilter(FieldFilter):
template = 'xadmin/filters/list.html'
def get_context(self):
context = super(ListFieldFilter, self).get_context()
context['choices'] = list(self.choices())
return context
@manager.register
class BooleanFieldListFilter(ListFieldFilter):
lookup_formats = {'exact': '%s__exact', 'isnull': '%s__isnull'}
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
return isinstance(field, (models.BooleanField, models.NullBooleanField))
def choices(self):
for lookup, title in (
('', _('All')),
('1', _('Yes')),
('0', _('No'))):
yield {
'selected': self.lookup_exact_val == lookup and not self.lookup_isnull_val,
'query_string': self.query_string({
self.lookup_exact_name: lookup,
}, [self.lookup_isnull_name]),
'display': title,
}
if isinstance(self.field, models.NullBooleanField):
yield {
'selected': self.lookup_isnull_val == 'True',
'query_string': self.query_string({
self.lookup_isnull_name: 'True',
}, [self.lookup_exact_name]),
'display': _('Unknown'),
}
@manager.register
class ChoicesFieldListFilter(ListFieldFilter):
lookup_formats = {'exact': '%s__exact'}
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
return bool(field.choices)
def choices(self):
yield {
'selected': self.lookup_exact_val is '',
'query_string': self.query_string({}, [self.lookup_exact_name]),
'display': _('All')
}
for lookup, title in self.field.flatchoices:
yield {
'selected': smart_unicode(lookup) == self.lookup_exact_val,
'query_string': self.query_string({self.lookup_exact_name: lookup}),
'display': title,
}
@manager.register
class TextFieldListFilter(FieldFilter):
template = 'xadmin/filters/char.html'
lookup_formats = {'in': '%s__in', 'search': '%s__contains'}
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
return (isinstance(field, models.CharField) and field.max_length > 20) or isinstance(field, models.TextField)
@manager.register
class NumberFieldListFilter(FieldFilter):
template = 'xadmin/filters/number.html'
lookup_formats = {'equal': '%s__exact', 'lt': '%s__lt', 'gt': '%s__gt',
'ne': '%s__ne', 'lte': '%s__lte', 'gte': '%s__gte',
}
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
return isinstance(field, (models.DecimalField, models.FloatField, models.IntegerField))
def do_filte(self, queryset):
params = self.used_params.copy()
ne_key = '%s__ne' % self.field_path
if ne_key in params:
queryset = queryset.exclude(
**{self.field_path: params.pop(ne_key)})
return queryset.filter(**params)
@manager.register
class DateFieldListFilter(ListFieldFilter):
template = 'xadmin/filters/date.html'
lookup_formats = {'since': '%s__gte', 'until': '%s__lt',
'year': '%s__year', 'month': '%s__month', 'day': '%s__day',
'isnull': '%s__isnull'}
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
return isinstance(field, models.DateField)
def __init__(self, field, request, params, model, admin_view, field_path):
self.field_generic = '%s__' % field_path
self.date_params = dict([(FILTER_PREFIX + k, v) for k, v in params.items()
if k.startswith(self.field_generic)])
super(DateFieldListFilter, self).__init__(
field, request, params, model, admin_view, field_path)
now = timezone.now()
# When time zone support is enabled, convert "now" to the user's time
# zone so Django's definition of "Today" matches what the user expects.
if now.tzinfo is not None:
current_tz = timezone.get_current_timezone()
now = now.astimezone(current_tz)
if hasattr(current_tz, 'normalize'):
# available for pytz time zones
now = current_tz.normalize(now)
if isinstance(field, models.DateTimeField):
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
else: # field is a models.DateField
today = now.date()
tomorrow = today + datetime.timedelta(days=1)
self.links = (
(_('Any date'), {}),
(_('Has date'), {
self.lookup_isnull_name: False
}),
(_('Has no date'), {
self.lookup_isnull_name: 'True'
}),
(_('Today'), {
self.lookup_since_name: str(today),
self.lookup_until_name: str(tomorrow),
}),
(_('Past 7 days'), {
self.lookup_since_name: str(today - datetime.timedelta(days=7)),
self.lookup_until_name: str(tomorrow),
}),
(_('This month'), {
self.lookup_since_name: str(today.replace(day=1)),
self.lookup_until_name: str(tomorrow),
}),
(_('This year'), {
self.lookup_since_name: str(today.replace(month=1, day=1)),
self.lookup_until_name: str(tomorrow),
}),
)
def get_context(self):
context = super(DateFieldListFilter, self).get_context()
context['choice_selected'] = bool(self.lookup_year_val) or bool(self.lookup_month_val) \
or bool(self.lookup_day_val)
return context
def choices(self):
for title, param_dict in self.links:
yield {
'selected': self.date_params == param_dict,
'query_string': self.query_string(
param_dict, [FILTER_PREFIX + self.field_generic]),
'display': title,
}
@manager.register
class RelatedFieldSearchFilter(FieldFilter):
template = 'xadmin/filters/fk_search.html'
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
if not is_related_field2(field):
return False
related_modeladmin = admin_view.admin_site._registry.get(
get_model_from_relation(field))
return related_modeladmin and getattr(related_modeladmin, 'relfield_style', None) in ('fk-ajax', 'fk-select')
def __init__(self, field, request, params, model, model_admin, field_path):
other_model = get_model_from_relation(field)
if hasattr(field, 'rel'):
rel_name = field.rel.get_related_field().name
else:
rel_name = other_model._meta.pk.name
self.lookup_formats = {'in': '%%s__%s__in' % rel_name, 'exact': '%%s__%s__exact' % rel_name}
super(RelatedFieldSearchFilter, self).__init__(
field, request, params, model, model_admin, field_path)
related_modeladmin = self.admin_view.admin_site._registry.get(other_model)
self.relfield_style = related_modeladmin.relfield_style
if hasattr(field, 'verbose_name'):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
self.search_url = model_admin.get_admin_url('%s_%s_changelist' % (
other_model._meta.app_label, other_model._meta.model_name))
self.label = self.label_for_value(other_model, rel_name, self.lookup_exact_val) if self.lookup_exact_val else ""
self.choices = '?'
if field.rel.limit_choices_to:
for i in list(field.rel.limit_choices_to):
self.choices += "&_p_%s=%s" % (i, field.rel.limit_choices_to[i])
self.choices = format_html(self.choices)
def label_for_value(self, other_model, rel_name, value):
try:
obj = other_model._default_manager.get(**{rel_name: value})
return '%s' % escape(Truncator(obj).words(14, truncate='...'))
except (ValueError, other_model.DoesNotExist):
return ""
def get_context(self):
context = super(RelatedFieldSearchFilter, self).get_context()
context['search_url'] = self.search_url
context['label'] = self.label
context['choices'] = self.choices
context['relfield_style'] = self.relfield_style
return context
@manager.register
class RelatedFieldListFilter(ListFieldFilter):
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
return is_related_field2(field)
def __init__(self, field, request, params, model, model_admin, field_path):
other_model = get_model_from_relation(field)
if hasattr(field, 'rel'):
rel_name = field.rel.get_related_field().name
else:
rel_name = other_model._meta.pk.name
self.lookup_formats = {'in': '%%s__%s__in' % rel_name, 'exact': '%%s__%s__exact' %
rel_name, 'isnull': '%s__isnull'}
self.lookup_choices = field.get_choices(include_blank=False)
super(RelatedFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
if hasattr(field, 'verbose_name'):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
def has_output(self):
if (is_related_field(self.field)
and self.field.field.null or hasattr(self.field, 'rel')
and self.field.null):
extra = 1
else:
extra = 0
return len(self.lookup_choices) + extra > 1
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self):
yield {
'selected': self.lookup_exact_val == '' and not self.lookup_isnull_val,
'query_string': self.query_string({},
[self.lookup_exact_name, self.lookup_isnull_name]),
'display': _('All'),
}
for pk_val, val in self.lookup_choices:
yield {
'selected': self.lookup_exact_val == smart_unicode(pk_val),
'query_string': self.query_string({
self.lookup_exact_name: pk_val,
}, [self.lookup_isnull_name]),
'display': val,
}
if (is_related_field(self.field)
and self.field.field.null or hasattr(self.field, 'rel')
and self.field.null):
yield {
'selected': bool(self.lookup_isnull_val),
'query_string': self.query_string({
self.lookup_isnull_name: 'True',
}, [self.lookup_exact_name]),
'display': EMPTY_CHANGELIST_VALUE,
}
@manager.register
class MultiSelectFieldListFilter(ListFieldFilter):
""" Delegates the filter to the default filter and ors the results of each
Lists the distinct values of each field as a checkbox
Uses the default spec for each
"""
template = 'xadmin/filters/checklist.html'
lookup_formats = {'in': '%s__in'}
cache_config = {'enabled': False, 'key': 'quickfilter_%s', 'timeout': 3600, 'cache': 'default'}
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
return True
def get_cached_choices(self):
if not self.cache_config['enabled']:
return None
c = caches(self.cache_config['cache'])
return c.get(self.cache_config['key'] % self.field_path)
def set_cached_choices(self, choices):
if not self.cache_config['enabled']:
return
c = caches(self.cache_config['cache'])
return c.set(self.cache_config['key'] % self.field_path, choices)
def __init__(self, field, request, params, model, model_admin, field_path, field_order_by=None, field_limit=None,
sort_key=None, cache_config=None):
super(MultiSelectFieldListFilter, self).__init__(field, request, params, model, model_admin, field_path)
# Check for it in the cachce
if cache_config is not None and type(cache_config) == dict:
self.cache_config.update(cache_config)
if self.cache_config['enabled']:
self.field_path = field_path
choices = self.get_cached_choices()
if choices:
self.lookup_choices = choices
return
# Else rebuild it
queryset = self.admin_view.queryset().exclude(**{"%s__isnull" % field_path: True}).values_list(field_path,
flat=True).distinct()
# queryset = self.admin_view.queryset().distinct(field_path).exclude(**{"%s__isnull"%field_path:True})
if field_order_by is not None:
# Do a subquery to order the distinct set
queryset = self.admin_view.queryset().filter(id__in=queryset).order_by(field_order_by)
if field_limit is not None and type(field_limit) == int and queryset.count() > field_limit:
queryset = queryset[:field_limit]
self.lookup_choices = [str(it) for it in queryset.values_list(field_path, flat=True) if str(it).strip() != ""]
if sort_key is not None:
self.lookup_choices = sorted(self.lookup_choices, key=sort_key)
if self.cache_config['enabled']:
self.set_cached_choices(self.lookup_choices)
def choices(self):
self.lookup_in_val = (type(self.lookup_in_val) in (tuple, list)) and self.lookup_in_val or list(
self.lookup_in_val)
yield {
'selected': len(self.lookup_in_val) == 0,
'query_string': self.query_string({}, [self.lookup_in_name]),
'display': _('All'),
}
for val in self.lookup_choices:
yield {
'selected': smart_unicode(val) in self.lookup_in_val,
'query_string': self.query_string({self.lookup_in_name: ",".join([val] + self.lookup_in_val), }),
'remove_query_string': self.query_string(
{self.lookup_in_name: ",".join([v for v in self.lookup_in_val if v != val]), }),
'display': val,
}
@manager.register
class AllValuesFieldListFilter(ListFieldFilter):
lookup_formats = {'exact': '%s__exact', 'isnull': '%s__isnull'}
@classmethod
def test(cls, field, request, params, model, admin_view, field_path):
return True
def __init__(self, field, request, params, model, admin_view, field_path):
parent_model, reverse_path = reverse_field_path(model, field_path)
queryset = parent_model._default_manager.all()
# optional feature: limit choices base on existing relationships
# queryset = queryset.complex_filter(
# {'%s__isnull' % reverse_path: False})
limit_choices_to = get_limit_choices_to_from_path(model, field_path)
queryset = queryset.filter(limit_choices_to)
self.lookup_choices = (queryset
.distinct()
.order_by(field.name)
.values_list(field.name, flat=True))
super(AllValuesFieldListFilter, self).__init__(
field, request, params, model, admin_view, field_path)
def choices(self):
yield {
'selected': (self.lookup_exact_val is '' and self.lookup_isnull_val is ''),
'query_string': self.query_string({}, [self.lookup_exact_name, self.lookup_isnull_name]),
'display': _('All'),
}
include_none = False
for val in self.lookup_choices:
if val is None:
include_none = True
continue
val = smart_unicode(val)
yield {
'selected': self.lookup_exact_val == val,
'query_string': self.query_string({self.lookup_exact_name: val},
[self.lookup_isnull_name]),
'display': val,
}
if include_none:
yield {
'selected': bool(self.lookup_isnull_val),
'query_string': self.query_string({self.lookup_isnull_name: 'True'},
[self.lookup_exact_name]),
'display': EMPTY_CHANGELIST_VALUE,
}
| [
"williamcullen@foxmail.com"
] | williamcullen@foxmail.com |
422f776bae7562d99ebe7ce2b3159a227a5665c3 | 32c640ce93ece6c87d1ad842c1dcd4bc12cb4c26 | /src/unison/src/Unison/Target/X86/SpecsGen/input/skylake/llvm-resource-model/X86SchedSkylakeClient-parser.py | d430b5890f9840392863eb3972e00e734dc55af5 | [
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | matsc-at-sics-se/unison | 54b225539724040b7c8089178fb1ecab0d87dbba | 8d23b73aaf8f3af5c4d86f26e0e4432d70bab564 | refs/heads/master | 2021-04-27T14:27:50.918235 | 2018-12-27T09:21:47 | 2018-12-27T09:21:47 | 122,455,374 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,487 | py | #!/usr/bin/env python
#
# Main authors:
# Jacob Kimblad <jacob.kimblad@ri.se>
#
# This file is part of Unison, see http://unison-code.github.io
#
# Copyright (c) 2018, RISE SICS AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys
import json
import yaml
import pyparsing as pp
import re
def main():
#Get all instructions defined in unison
unisonInstructions = getUnisonInstructions()
#Parser to find each basic WriteRes def with no additional attributes
writeResDefParser = getWriteResDefParser()
#Parser to find each WriteRes def with additional attributes (latency, resourcecycles etc.)
writeResVerboseDefParser = getWriteResVerboseDef()
#Parser to find each SKLWriteResPair def
sklWriteResPairDefParser = getSklWriteResPairDefParser()
#Parser to find each regex-instructions that belongs to a SKLWriteResGroup
llvmInstructionParser = getLlvmInstructionParser()
#Parser to find each SKLWriteResGroup definition
sklWriteResGroupDefParser = getSklWriteResGroupDefParser()
#Open LLVM-tablegen file defining skylake resources
schedSkylakeClientTD = open(sys.argv[2]).read()
#Find all WriteRes defs
writeResDefs = getWriteResDefs(writeResDefParser, schedSkylakeClientTD)
#Find all verbose WriteRes defs
writeResVerboseDefs = getWriteResVerboseDefs(writeResVerboseDefParser, schedSkylakeClientTD)
#Find all SKLWriteResPair defs
sklWriteResPairDefs = getSklWriteResPairDefs(sklWriteResPairDefParser, schedSkylakeClientTD)
#Find all SKLWriteResGroup defs
sklWriteResGroupDefs = getSklWriteResGroupDefs(sklWriteResGroupDefParser, schedSkylakeClientTD)
#Find all instructions defined for skylake by llvm
llvmInstructions = getLlvmInstructions(llvmInstructionParser, schedSkylakeClientTD)
# Find out which unison instructions has a matching regular-expression defined in llvm .td
matchings = regexMatching(unisonInstructions, llvmInstructions)
#Open file that contains output from tablegen
tablegenOutput = json.load(open(sys.argv[4]))
# Try and match all remaining instructions, that are not matched with any resource group, with what their schedRWGroups are defined as from the output of tablegen
schedRWMatchings = getSchedRWMatchings(matchings['Unmatched'], tablegenOutput)
#Save all defined resource groups
resourceGroups = []
for group in sklWriteResPairDefs + sklWriteResGroupDefs + writeResVerboseDefs + writeResDefs:
resourceGroups.append(group['Name'])
#Remove undefined resourcegroups from each defined instruction
undefinedSchedRWGroup = []
resourceGroupTuples = []
for instruction in list(schedRWMatchings['Matched']):
tempInstruction = removeUndefinedResourceGroups(instruction, resourceGroups)
#Instruction had no defined resource group for skylake, so its resource usage is undefined
if not tempInstruction['ResourceGroup']:
undefinedSchedRWGroup.append({'Instruction': tempInstruction['Instruction']})
schedRWMatchings['Matched'].remove(instruction)
#Instruction has more than a singled defined resource group
elif len(tempInstruction['ResourceGroup']) > 1:
resourceGroupTuples.append(tempInstruction['ResourceGroup'])
tempInstruction['ResourceGroup'] = "".join(tempInstruction['ResourceGroup'])
#Instruction has a single defined resource group
else:
# Transform from list to single element
tempInstruction['ResourceGroup'] = tempInstruction['ResourceGroup'][0]
instruction = tempInstruction
#Some instructions uses several resource-groups, so we create custom combined resourcegroups here.
# Currently, the only collection of resource groups used by instructions are "WriteALULd" and "WriteRMW"
# which create the combined resource "WriteALULdWriteRMW"
definedResourceGroups = sklWriteResPairDefs + sklWriteResGroupDefs + writeResVerboseDefs + writeResDefs
combinedResourceGroups = []
for resourceGroups in set(tuple(row) for row in resourceGroupTuples):
tempResource = combineResourceGroups(resourceGroups, definedResourceGroups)
combinedResourceGroups.append(tempResource)
definedResourceGroups.extend(combinedResourceGroups)
#Load instructions that have been manually mapped to resource groups in an external file
customInstructions = getCustomInstructions()
undefinedInstructions = schedRWMatchings['Unmatched'] + undefinedSchedRWGroup
#Remove manually defined instructions from the list of undefined instructions
for instruction in customInstructions:
undefinedInstructions[:] = [d for d in undefinedInstructions if d.get('Instruction') != instruction['Instruction']]
# Check which instructions have defined ReadAfterLd, which means they have 5 cycles until the operand is fetched from memory and the instruction can actually be issued
definedInstructions = matchings['Matched'] + schedRWMatchings['Matched'] + customInstructions
for instruction in list(definedInstructions):
if checkReadAdvance(tablegenOutput, instruction['Instruction']):
instruction['ReadAdvance'] = True
else:
instruction['ReadAdvance'] = False
#Format the output and print json (indent=4 enables pretty print)
output = {
'ResourceGroups': definedResourceGroups,
'DefinedInstructions': definedInstructions,
'UndefinedInstructions': undefinedInstructions,
}
print(json.dumps(output, indent=4))
# Uncomment to print number of instructions NOT mapped to a resource group
# print("unmatched: " + str(len(output['UndefinedInstructions'])))
# Uncomment to print number of instructions mapped to a resource group
# print("matched: " + str(len(output['DefinedInstructions'])))
# Check which instructions have defined ReadAfterLd, which means they have 5 cycles until the operand is fetched from memory and the instruction can actually be issued
def checkReadAdvance(tablegenOutput, instruction):
match = list(filter(lambda schedRW : schedRW['Instruction'] == instruction, tablegenOutput))
resourceGroups = []
if match and match[0]['SchedRW'] != '?':
resourceGroups = match[0]['SchedRW'].strip("[").strip("]").replace(" ", "").split(",")
readAfterLdCount = 0
for resourceGroup in resourceGroups:
if resourceGroup == "ReadAfterLd":
readAfterLdCount += 1
if readAfterLdCount > 0:
return True
return False
#Get the SchedRW groups that belongs to each instruction passed as argument
def getSchedRWMatchings(instructions, tablegenOutput):
matches = {
'Matched': [],
'Unmatched': []
}
for data in instructions:
instruction = data['Instruction']
match = list(filter(lambda schedRW : schedRW['Instruction'] == instruction, tablegenOutput))
if match and match[0]['SchedRW'] != '?':
matching = {
'Instruction': instruction,
'ResourceGroup': match[0]['SchedRW'].strip("[").strip("]").replace(" ", "").split(",")
}
matches['Matched'].append(matching)
else:
matches['Unmatched'].append(data)
return matches
#Some instructions does not have a given resourcegroup and have instead been manually mapped to a resource group, so we fetch them from that input file to include in the output
def getCustomInstructions():
data = json.load(open(sys.argv[3]))
return data['ManualMapping']
#Fetch all instructions defined for unison in x86.yaml
def getUnisonInstructions():
data = yaml.safe_load(open(sys.argv[1], 'r'))
instructions = []
for instruction in data['instruction-set'][0]['instructions']:
instructions.append(instruction['id'])
return instructions
#Combines several defined resource groups into a single one
def combineResourceGroups (resourceGroupNames, definedResourceGroups):
tempResourceGroup = {
"Name" : "",
"Latency" : 0,
"Resources" : [],
"ResourceCycles" : [],
}
for resourceGroupName in resourceGroupNames:
tempResourceTuple = next(item for item in definedResourceGroups if item['Name'] == resourceGroupName)
tempResourceGroup['Name'] += resourceGroupName
#Check if new largest latency
if tempResourceGroup['Latency'] < tempResourceTuple['Latency']:
tempResourceGroup['Latency'] = tempResourceTuple['Latency']
n = 0
while n < len(tempResourceTuple['Resources']):
#Resource is not yet defined
if tempResourceTuple['Resources'][n] not in tempResourceGroup['Resources']:
tempResourceGroup['Resources'].append(tempResourceTuple['Resources'][n])
tempResourceGroup['ResourceCycles'].append(tempResourceTuple['ResourceCycles'][n])
#Resource is defined, so we need to add resource cycles to existing
else:
resourceIndex = tempResourceGroup['Resources'].index(tempResourceTuple['Resources'][n])
tempResourceGroup['ResourceCycles'][resourceIndex] += tempResourceGroup['ResourceCycles'][n]
n += 1
return tempResourceGroup
#Removes undefined resouorcegroups from an instruction
def removeUndefinedResourceGroups (instruction, resourceGroups):
definedResources = []
#Get resource groups that are part of the ones defined for skylake AND are part of the instruction
for resource in instruction['ResourceGroup']:
if resource in resourceGroups:
definedResources.append(resource)
instruction['ResourceGroup'] = sorted(definedResources)
return instruction
#Parser to find each basic WriteRes def with no additional attributes
def getWriteResDefParser():
return pp.Suppress("def : WriteRes<") + pp.SkipTo(",")("Name") + pp.SkipTo(">")("Resources") + pp.Suppress(">") + ~pp.Suppress("{")
#Parser to find each WriteRes def with additional attributes (latency, resourcecycles etc.)
def getWriteResVerboseDef():
return pp.Suppress("def : WriteRes<") + pp.SkipTo(",")("Name") + pp.SkipTo(">")("Resources") + pp.Suppress(">") + pp.Suppress("{") + pp.SkipTo("}")("Data") + pp.Suppress("}")
#Parser to find each SKLWriteResPair def
def getSklWriteResPairDefParser():
return pp.Suppress("defm : SKLWriteResPair<") + pp.SkipTo(",")("Name") + pp.Suppress(",") + pp.SkipTo(",")("Resources") + pp.Suppress(",") + pp.SkipTo(">")("Latency")
#Parser to find each regex-instructions that belongs to a SKLWriteResGroup
def getLlvmInstructionParser():
writeResGroup = pp.Word(pp.alphanums)
instRegex = pp.SkipTo("\"")
return pp.Suppress("def: InstRW<[") + writeResGroup("ResourceGroup") + pp.Suppress("], (instregex \"") + instRegex("Regex") + pp.Suppress("\")>;")
#Parser to find each SKLWriteResGroup definition
def getSklWriteResGroupDefParser():
writeResGroup = pp.Word(pp.alphanums)
resources = pp.SkipTo("]")
latency = pp.Word(pp.nums)
microOps = pp.Word(pp.nums)
resourceCycles = pp.SkipTo("]")
return pp.Suppress("def ") + writeResGroup("SKLWriteResGroup") + pp.Suppress(": SchedWriteRes<[") + resources("Resources") + pp.Suppress(pp.restOfLine) + (
pp.Suppress("let Latency = ") + latency("Latency") + pp.Suppress(pp.restOfLine) +
pp.Suppress("let NumMicroOps = ") + microOps("NumMicroOps") + pp.Suppress(pp.restOfLine) +
pp.Suppress("let ResourceCycles = [") + resourceCycles("ResourceCycles") + pp.Suppress(pp.restOfLine)
)
#Find all WriteRes defs
def getWriteResDefs(writeResDef, schedSkylakeClientTD):
writeResDefs = []
for writeRes in writeResDef.searchString(schedSkylakeClientTD):
#Pretty up the parsed data
tempDict = {
"Name": writeRes['Name'],
"Latency": 1,
"Resources": writeRes['Resources'].strip(",").strip().strip("[").strip("]").replace(" ", "").split(","),
"ResourceCycles": [],
}
#Check if Resources contains only an empty element and should be completely empty instead
if len(tempDict['Resources'][0]) == 0:
tempDict['Resources'] = []
#Set one resource cycle for each resource (implicit in .td-file)
else:
for resource in tempDict['Resources']:
tempDict['ResourceCycles'].append(1)
writeResDefs.append(tempDict)
return writeResDefs
#Find all verbose WriteRes defs
def getWriteResVerboseDefs(writeResVerboseDef, schedSkylakeClientTD):
writeResVerboseDefs = []
for writeRes in writeResVerboseDef.searchString(schedSkylakeClientTD):
#Pretty up the parsed data
writeResDict = writeRes.asDict()
tempDict = {
'Name' : writeRes['Name'],
'Latency' : 1,
'Resources' : writeRes['Resources'].strip(",").strip().strip("[").strip("]").replace(" ", "").split(","),
'ResourceCycles' : []
}
#Go through each line of data that belongs to the WriteRes
tempData = writeResDict['Data'].strip().split("\n")
for data in tempData:
#Remove comments that may have been parsed
data = data.split("//")[0]
if data:
data = data.strip(";").strip()
if data.find("Latency") >= 0 :
#Latency is not an int
if isNumber(data.split("=")[1].strip()):
tempDict['Latency'] = int(data.split("=")[1].strip())
#Latency is NaN (This happens due to wrongfully parsing once inside a specially defined function)
else:
tempDict['Latency'] = 1
elif data.find("ResourceCycles") >= 0:
tempData = data.split("=")[1].strip().replace(" ", "").strip("[").strip("]").split(",")
#Check all list items are numericals
tempIntData = [s for s in tempData if s.isdigit()]
tempDict['ResourceCycles'] = list(map(int, tempIntData))
#Check if Resources contains only an empty element and should be completely empty instead
if len(tempDict['Resources'][0]) == 0:
tempDict['Resources'] = []
#Check if resourceCycles are not defined although resources are (resource-group "WriteLoad" suffers from this)
if len(tempDict['ResourceCycles']) == 0:
for resource in tempDict['Resources']:
tempDict['ResourceCycles'].append(1)
writeResVerboseDefs.append(tempDict)
return writeResVerboseDefs
#Find all SKLWriteResPair defs
def getSklWriteResPairDefs(sklWriteResPairDef, schedSkylakeClientTD):
sklWriteResPairs = []
for sklWriteResPair in sklWriteResPairDef.searchString(schedSkylakeClientTD):
tempDict = {
'Name' : sklWriteResPair['Name'],
'Latency' : int(sklWriteResPair['Latency']),
'Resources' : sklWriteResPair["Resources"].strip(",").strip().split(","),
#RecourceCycles is implicit for the current version of the .td file, but may have to be updated if the file is changed to a later version of llvm than 6.0.0
'ResourceCycles' : []
}
#Set one resource cycle for each resource (implicit in .td-file)
#Check if Resources is empty and should be empty instead of containing just double quotations("")
if len(tempDict['Resources'][0]) == 0:
tempDict['Resources'] = []
#Set one resource cycle for each resource (implicit in .td-file)
else:
for resource in tempDict['Resources']:
tempDict['ResourceCycles'].append(1)
sklWriteResPairs.append(tempDict)
# Defined the corresponding resource with a folded load
resourcesFolded = list(tempDict['Resources'])
added = False
#Check if resource-group already uses port23
if 'SKLPort23' not in tempDict['Resources']:
resourcesFolded.append('SKLPort23')
added = True
tempDictFolded = {
'Name' : sklWriteResPair['Name'] + 'Ld',
'Latency' : int(sklWriteResPair['Latency']) + 5,
'Resources' : resourcesFolded,
#RecourceCycles is implicit for the current version of the .td file, but may have to be updated if the file is changed to a later version of llvm than 6.0.0
'ResourceCycles' : []
}
#Add resource cycles for port23
# if added:
# tempDictFolded['ResourceCycles'] = tempDict['ResourceCycles'].append(1)
#Add implicitly defined resource cycles
for resource in tempDictFolded['Resources']:
tempDictFolded['ResourceCycles'].append(1)
#Add to return-list
sklWriteResPairs.append(tempDictFolded)
return sklWriteResPairs
#Find all SKLWriteResGroup defs
def getSklWriteResGroupDefs(sklWriteResGroupDef, schedSkylakeClientTD):
sklWriteResGroupDefs = []
for sklWriteResGroup in sklWriteResGroupDef.searchString(schedSkylakeClientTD):
tempDict = {
'Name' : sklWriteResGroup['SKLWriteResGroup'],
'Latency' : int(sklWriteResGroup['Latency']),
'Resources' : sklWriteResGroup['Resources'].strip(",").strip().split(","),
'ResourceCycles' : list(map(int, sklWriteResGroup['ResourceCycles'].split(",")))
}
sklWriteResGroupDefs.append(tempDict)
return sklWriteResGroupDefs
#Find all instructions defined for skylake by llvm
def getLlvmInstructions(instrGroupDef, schedSkylakeClientTD):
instructions = []
for instrGroup in instrGroupDef.searchString(schedSkylakeClientTD):
instructions.append(instrGroup.asDict())
return instructions
#InstrGroup.asDict() returns the following data structure
# dict = {
# SKLWriteResGroup
# instRegex
# }
#Check if parameter is a number
def isNumber(a):
try:
int(a)
return True
except ValueError:
return False
#Find out if there are any instructions not matched to the instructions defined in unison, this function is slow due to testing all combinations of both input lists, so alot of things are aimed at improving its speed
def regexMatching (unisonInstructions, instructions):
#Dictionary to save results
matchings = {
'Matched' : [],
'Unmatched' : []
}
alNumRegex = []
notAlNumRegex = []
#Divide regex'es into those with just alphanumericals, as they dont contain any special rules and we can just perform regular string-matching
for instruction in instructions:
#TODO: Allow instruction to contain "_" as well as alphanumericals for speed improvements
if instruction['Regex'].isalnum():
alNumRegex.append(instruction)
else:
notAlNumRegex.append(instruction)
tempUnmatched = []
#See if we get a string match before trying expensive regex
for unisonInstruction in unisonInstructions:
# Match possible unison instructions with llvm-regex(containing only alphanumericals) by checking string equality
match = list(filter(lambda alNum : alNum['Regex'] == unisonInstruction, alNumRegex))
# Unison-instruction matched a llvm-regex
if match:
matching = {
'Instruction' : unisonInstruction,
'ResourceGroup' : match[0]['ResourceGroup'],
}
matchings['Matched'].append(matching)
else:
tempUnmatched.append(unisonInstruction)
#Perform more expensive regex matching for instructions not found through string-matching
for instruction in tempUnmatched:
matched = False
for regex in notAlNumRegex:
#Check if we already matched the instruction with an earlier regex
if matched:
continue;
searchResult = re.search(regex['Regex'], instruction)
#Check if we matched the whole instruction
if (not (searchResult is None) and searchResult.end() - searchResult.start() == len(instruction)):
matching = {
'Instruction' : instruction,
'ResourceGroup' : regex['ResourceGroup'],
}
matchings['Matched'].append(matching)
matched = True
if not matched:
#Instruction wasnt matched with any regex
matchings['Unmatched'].append({'Instruction' : instruction})
return matchings
if __name__ == '__main__':
main()
| [
"rcas@sics.se"
] | rcas@sics.se |
af133b71314acc64bdbd6be37d282d55ba8fde6d | 5b3caf64b77161748d0929d244798a8fb914d9c5 | /Python Excel Examples/WorksheetsApiDemo/background/deleteBackground.py | e5297d90f3031e8342735c46a68eb63f86226e60 | [] | no_license | EiceblueCloud/Spire.Cloud.Excel | 0d56864991eaf8d44c38f21af70db614b1d804b7 | d9845d5cefd15a3ab408b2c9f80828a4767e2b82 | refs/heads/master | 2021-07-20T23:44:39.068568 | 2021-07-15T03:04:49 | 2021-07-15T03:04:49 | 230,225,396 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | import spirecloudexcel
from spirecloudexcel.configuration import Configuration as ExcelConfiguration
appId = "your id"
appKey = "your key"
baseUrl="https://api.e-iceblue.cn"
configuration = ExcelConfiguration(appId, appKey,baseUrl)
api = spirecloudexcel.api.worksheets_api.WorksheetsApi(configuration)
name = "DeleteBackground.xlsx"
storage = ""
folder = "ExcelDocument"
sheet_name = "Sheet1"
api.delete_background(name, sheet_name, folder=folder, storage=storage) | [
"noreply@github.com"
] | noreply@github.com |
d779f777c6f0445645db6a3c53c16ef06714f220 | fc4db75c1fc5a410c99b6e2db7fb18e78b841100 | /Lab5/myApi/logic/limiter.py | 3a6bee8a30f230256a48af4f81e29400088894be | [] | no_license | KraftUnderscore/skryptowe20 | a2e16906619210bc621e53195c09103ce404cac1 | 63b447825961f837a0bb1c184ee04c97ba854500 | refs/heads/master | 2023-03-19T23:41:37.905253 | 2021-03-08T22:38:57 | 2021-03-08T22:38:57 | 302,128,523 | 0 | 0 | null | 2021-03-08T22:38:58 | 2020-10-07T18:39:15 | null | UTF-8 | Python | false | false | 489 | py | from time import time as current_time
from logic.DataAPI.constants import MAX_REQ_PER_MINUTE, MINUTE_IN_SEC
class StaticVariables():
requests = 0
last_reset = current_time()
def can_request():
if current_time() - StaticVariables.last_reset > MINUTE_IN_SEC:
StaticVariables.requests = 0
StaticVariables.last_reset = current_time()
if StaticVariables.requests < MAX_REQ_PER_MINUTE:
StaticVariables.requests += 1
return True
return False
| [
"spieta2@gmail.com"
] | spieta2@gmail.com |
b9844d14cd1c8d49e55e4db8b42c0855b6fd370a | 171f2f1cb97bd3e016e0d77ea448cdfdcd778ade | /softlearning/environments/gym/mujoco/half_cheetah.py | 669c7850071783d7ab69bb83924b9c3a56753193 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | JiazhengChai/synergyDRL | 735cff04ef26732aad5eba8019a16e43a09baac3 | c08e78e5fe39d9d46213e1bf07b8dafc2195b05a | refs/heads/master | 2022-12-13T00:26:25.990732 | 2021-07-25T12:01:09 | 2021-07-25T12:01:09 | 224,543,407 | 2 | 1 | NOASSERTION | 2022-12-08T01:50:19 | 2019-11-28T01:02:57 | Python | UTF-8 | Python | false | false | 13,056 | py | import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
import os
from . import path
DEFAULT_CAMERA_CONFIG = {
'distance': 4.0,
}
class HalfCheetahEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self.joint_list=['bthigh','bshin','bfoot','fthigh','fshin','ffoot']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(6):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahHeavyEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_heavy.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self.joint_list=['bthigh','bshin','bfoot','fthigh','fshin','ffoot']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(6):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class FullCheetahEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='full_cheetah.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self.joint_list=['bthighL','bshinL','bfootL','fthighL','fshinL','ffootL',
'bthighR', 'bshinR', 'bfootR', 'fthighR', 'fshinR', 'ffootR']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
| [
"jaysoninsa@gmail.com"
] | jaysoninsa@gmail.com |
03d4dabb08c2218cfbc7c5a4f6dc7ad9b2a87fdf | 36d62e33b63ca629c91856ab4941245a61706bab | /omtk/managing/shotInfo.py | 93f76f6fd1b0e65c918f7967fbe7f4024c4bbcf0 | [] | no_license | Leopardob/omtk | 668386cc2f09ca7948dca2ed45faa0cb94fe88cf | 36def5207a2a70d88d0d405a5518d4fa6b8ed5ee | refs/heads/master | 2020-12-11T05:46:53.023632 | 2015-06-25T13:56:29 | 2015-06-25T13:56:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,644 | py | from omtk.rigging.autorig import RigNode
"""
Since Pixar don't seem to want to release the OSD (Open Scene Descriptor) projet, here's my take on the matter.
data = {
"shots":[
{
"location":{
"type":"Shot",
"id":001,
"name":"shotName"
},
"frame_range":{
"start":0,
"end":100,
"padding-l":24,
"padding-r":24,
"preroll":10,
"postroll":10
},
"assets":[
{
"name": "asset01", # used for reference only
"location":{ # shotgun reference
"type":"Asset",
"id":001,
"name":"assetName"
},
"transforms":{ # animation file
"type":"PublishedFile",
"id":001
}
},
{
"name": "asset02", # used for reference only
"location": "X:/assets/ex.ma",
"transforms": [1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1] # matrix
}
]
}
]
}
"""
class Attribute(object):
def __init__(self, value):
self.value = value
class Asset(object):
field_type = 'camera'
locked = False
pass
class Shot(object):
pass
class FrameRange(object):
def __init__(self, start=None, end=None, handle=None, tail=None, preroll=None, postroll=None, **kwargs):
self.__dict__.update(kwargs) | [
"sigmao@gmail.com"
] | sigmao@gmail.com |
192e0a22a39afd8de4675f9032f1eaadfbe026fb | 0cb064f4e2f5b27a189b3e7631bb19f7842e150b | /zvt/recorders/eastmoney/dividend_financing/spo_detail_recorder.py | 5afee75a91dc41f48230b292ccc4813ddf9fab99 | [
"MIT"
] | permissive | stellar2016/zvt | 35e514927302cffb3577f3535344e2ca55ec9abd | f6c2c05c136b14c0c0f239960f08f85bcdee7c28 | refs/heads/master | 2021-04-19T13:18:01.020365 | 2020-03-22T14:44:26 | 2020-03-22T14:44:26 | 249,607,341 | 0 | 0 | MIT | 2020-03-24T03:51:06 | 2020-03-24T03:51:05 | null | UTF-8 | Python | false | false | 2,077 | py | # -*- coding: utf-8 -*-
from zvdata.utils.pd_utils import pd_is_not_null
from zvdata.utils.time_utils import now_pd_timestamp
from zvdata.utils.utils import to_float
from zvt.api.api import get_dividend_financing, get_spo_detail
from zvt.domain import SpoDetail, DividendFinancing
from zvt.recorders.eastmoney.common import EastmoneyPageabeDataRecorder
class SPODetailRecorder(EastmoneyPageabeDataRecorder):
data_schema = SpoDetail
url = 'https://emh5.eastmoney.com/api/FenHongRongZi/GetZengFaMingXiList'
page_url = url
path_fields = ['ZengFaMingXiList']
def get_original_time_field(self):
return 'ZengFaShiJian'
def get_data_map(self):
return {
"spo_issues": ("ShiJiZengFa", to_float),
"spo_price": ("ZengFaJiaGe", to_float),
"spo_raising_fund": ("ShiJiMuJi", to_float)
}
def on_finish(self):
last_year = str(now_pd_timestamp().year)
codes = [item.code for item in self.entities]
need_filleds = get_dividend_financing(provider=self.provider, codes=codes,
return_type='domain',
session=self.session,
filters=[DividendFinancing.spo_raising_fund.is_(None)],
end_timestamp=last_year)
for item in need_filleds:
df = get_spo_detail(provider=self.provider, entity_id=item.entity_id,
columns=[SpoDetail.timestamp, SpoDetail.spo_raising_fund],
start_timestamp=item.timestamp,
end_timestamp="{}-12-31".format(item.timestamp.year))
if pd_is_not_null(df):
item.spo_raising_fund = df['spo_raising_fund'].sum()
self.session.commit()
super().on_finish()
__all__ = ['SPODetailRecorder']
if __name__ == '__main__':
# init_log('spo_detail.log')
recorder = SPODetailRecorder(codes=['000999'])
recorder.run()
| [
"5533061@qq.com"
] | 5533061@qq.com |
ebe77479a6f993340f1cd08739bde04ffc75258f | 893a1151fcdeb5ad9a2219fd03cf7277b817a55e | /translate.py | 9808b2274596a13025466b1bad959f7d37c62b56 | [] | no_license | busbyjrj/translate | a818207e62e4224fd13d520b8c6a0d4581648703 | 326cdad60cb8ec9dd03c9abdb90600c8619aa98a | refs/heads/master | 2020-03-26T03:18:28.189556 | 2018-08-12T08:12:18 | 2018-08-12T08:12:18 | 144,449,079 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,738 | py | import requests
import hashlib
import random
import json
import re
import sys
import time
def get_content():
'''
处理PDF文件复制后多余的换行
'''
print('请输入要翻译的内容:')
transText = ""
line = sys.stdin.readline()
while line != "#\n":
transText += line
line = sys.stdin.readline()
transText = transText.replace(".\n","段落")
transText = transText.replace("\n"," ")
transText = transText.replace("段落","\n")
pattern = re.compile(r'\n[A-Z]')
res = pattern.findall(transText)
for i in res:
transText = transText.replace(str(i),'\n'+str(i)[1])
transText = transText.split('\n')
for each in transText:
get_translation(each)
def get_translation(q):
'''
调用百度翻译Api实现翻译
'''
appid = '百度翻译appid'
secretKey = '百度翻译secreKey'
url = 'https://fanyi-api.baidu.com/api/trans/vip/translate'
try:
compile_trans = re.compile(r'(^[\u4e00-\u9fa5]{0,}$)')
if compile_trans.match(q):
from_ = 'zh'
to_ = 'en'
else:
from_ = 'en'
to_ = 'zh'
salt = random.randint(32768, 65536)
sign = (appid + q + str(salt) + secretKey).encode("utf-8")
m1 = hashlib.md5(sign)
sign = m1.hexdigest()
data = {
'q' : q,
'from' : from_,
'to' : to_,
'appid' : appid,
'salt' : salt,
'sign' : sign,}
res = requests.post(url, data).text
target = json.loads(res)
print(target['trans_result'][0]['dst']+'\n\n\n')
except:
pass
while True:
get_content()
| [
"admin@busby.com.cn"
] | admin@busby.com.cn |
33e123b6737da78e5502366ca8247dc2a6cd8e35 | d1b3f9a802068d310fb1c1461753c6a00094f861 | /basics/class_vs_static.py | 09da26a169e5e2fa099ddb5018fa8a3fbe4a4c9f | [] | no_license | Mathanrajseenuvasan/pythonCrud_reactUi | d783897e1b2bc6671fca84968cef854939249bf3 | 45856125bfd60e0e6793a4893932e1789e833729 | refs/heads/master | 2022-10-03T10:00:56.184641 | 2020-01-20T05:15:55 | 2020-01-20T05:15:55 | 235,020,383 | 0 | 1 | null | 2022-09-16T18:16:44 | 2020-01-20T04:43:10 | Python | UTF-8 | Python | false | false | 881 | py | class Number():
def __init__(self, value):
self.value = value
@staticmethod
def sum(value1, value2):
return Number(value1+value2)
@classmethod
def zum(cls, value1, value2):
return cls(value1+value2)
def print(self):
print(str(self.value))
class Float(Number):
# Skip defining an __init__ method, and it uses the same as Number
# Skip defining the sum() method, and it uses the same as Number
# Skip defining the print method, and it uses the same as Number
# Or we could define our own print method for this class.
def print(self):
# Prints the number with 2 decimal places
print("{:.2f}".format(self.value))
f = Float.sum(0.11, 0.1593)
f.print()
f1 = Float.zum(0.11, 0.1593)
f1.print()
m = Number.sum(15, 15)
m.print()
n = Number(0.15647)
n.print()
f = Float(0.15647)
f.print()
| [
"noreply@github.com"
] | noreply@github.com |
987eb84fbf26d29415ffd129fa743655d8efb924 | b9610a2d8fbe887d8348143758a9ae16b8760f2a | /KnOWLearn-Tool_V2/ConnectionManager.py | 116e839a50774db1a13cc43f8d8920bc94f993b2 | [] | no_license | davan690/knOWLearn-ToolV2 | 8b167b40fcd87b026313e9710c0e2bf496038f04 | 5739dc7c32446064880e7b808bad879c399cf920 | refs/heads/master | 2021-05-29T00:51:29.898742 | 2014-12-04T17:50:11 | 2014-12-04T17:50:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,437 | py | # -*- coding: utf-8 -*-
from KnOWLearn.TermExtractor.CandidateTerms import CandidateTerms as extractTerms
from KnOWLearn.WSDisambiguator.Disambiguator import Disambiguator as disambiguateTerms
import socket
import sys
def getMDTerms(disambiguateTerms):
metadataTerms = ''
for i in range(0,disambiguateTerms.__len__()):
term = Senses.termsdis[i]
metadataTerms += str(term.term)+'/TermName/'
if term.senseprobability > 0.5:
metadataTerms += str(term.sense.name)+'/TermSense/'
else:
metadataTerms += 'None/TermSense/'
for j in range(0,term.synsets.__len__()):
synset = term.synsets[j][0]
metadataTerms += str(synset.name)+'/SynsetName/'
metadataTerms += str(synset.definition)+'/SynsetDefinition/'
if(term.synsets.__len__() > j):
metadataTerms += '/Synset/'
if(Senses.termsdis.__len__() > i):
metadataTerms += '/Term/'
return metadataTerms
if __name__ == "__main__":
Terms = []
Senses = None
HOST = 'localhost' # Symbolic name meaning the localhost
PORT = 50003 # Arbitrary non-privileged port
if len(sys.argv) > 1:
PORT = int(sys.argv[1])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
print (HOST, PORT)
conn, addr = s.accept()
print 'Connected by', addr
platform = sys.platform
charToDelete = 2
if platform == 'linux2':
charToDelete = 1
while 1:
data = conn.recv(1024)
print 'Received: '+data
if not data: break
elif data.startswith('terms for'):
directory = str(data.split('Path:')[1])[:-(charToDelete)]
directory = directory.strip()
Terms = extractTerms(str(directory),0.05,0.005)
print 'Terms founded: ',Terms
conn.send(str(Terms)+'\n')
elif data.startswith('recompute: '):
values = data[11:-(charToDelete)].split(',')
print 'recomputing terms with ',values
Terms.recompute(float(values[0]),float(values[1]))
print 'Terms recomputed: ',Terms
conn.send(str(Terms)+'\n')
elif data.startswith('disambiguate terms'):
Senses = disambiguateTerms(Terms)
conn.send(getMDTerms(Senses.termsdis)+'\n')
elif data.startswith('delete term: '):
trashterm = str(data[13:-(charToDelete)])
print trashterm, ' to delete'
Terms.trash(trashterm)
print Terms
conn.send(str(Terms)+'\n')
else:
conn.send(data)
conn.close()
addr.close()
| [
"svieyra@svieyra-Latitude-E7440"
] | svieyra@svieyra-Latitude-E7440 |
9134086afe0892942ae14d735341baddd0d855b8 | 43d403057456c3c11baca210b341a5c944e0c396 | /sandbox/dkuhlman/docutils/test/DocutilsTestSupport.py | e40300984d66b98ccf531f1c454fdf5270d151e3 | [] | no_license | docutils/docutils | f6bf7265c7d84b9985acc4e5f2700f9643d0e3f0 | 3b53ded52bc439d8068b6ecb20ea0a761247e479 | refs/heads/master | 2023-08-07T17:03:26.038557 | 2022-01-16T17:57:56 | 2023-04-24T16:41:48 | 44,840,244 | 36 | 17 | null | 2023-07-23T21:42:44 | 2015-10-23T21:46:17 | Python | UTF-8 | Python | false | false | 24,423 | py | # Authors: David Goodger; Garth Kidd
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Exports the following:
:Modules:
- `statemachine` is 'docutils.statemachine'
- `nodes` is 'docutils.nodes'
- `urischemes` is 'docutils.urischemes'
- `utils` is 'docutils.utils'
- `transforms` is 'docutils.transforms'
- `states` is 'docutils.parsers.rst.states'
- `tableparser` is 'docutils.parsers.rst.tableparser'
:Classes:
- `CustomTestSuite`
- `CustomTestCase`
- `TransformTestSuite`
- `TransformTestCase`
- `ParserTestSuite`
- `ParserTestCase`
- `PEPParserTestSuite`
- `PEPParserTestCase`
- `GridTableParserTestSuite`
- `GridTableParserTestCase`
- `SimpleTableParserTestSuite`
- `SimpleTableParserTestCase`
- 'LatexPublishTestSuite'
- 'LatexPublishTestCase'
- 'PythonLatexPublishTestSuite'
- 'PythonLatexPublishTestCase'
- `DevNull` (output sink)
"""
__docformat__ = 'reStructuredText'
import sys
import os
import unittest
import difflib
import inspect
from pprint import pformat
from types import UnicodeType
import package_unittest
import docutils
import docutils.core
from docutils import frontend, nodes, statemachine, urischemes, utils
from docutils.transforms import universal
from docutils.parsers import rst
from docutils.parsers.rst import states, tableparser, directives, languages
from docutils.readers import standalone, pep, python
from docutils.statemachine import StringList, string2lines
try:
from docutils.readers.python import moduleparser
except:
moduleparser = None
try:
import mypdb as pdb
except:
import pdb
# Hack to make repr(StringList) look like repr(list):
StringList.__repr__ = StringList.__str__
class DevNull:
"""Output sink."""
def write(self, string):
pass
class CustomTestSuite(unittest.TestSuite):
"""
A collection of custom TestCases.
"""
id = ''
"""Identifier for the TestSuite. Prepended to the
TestCase identifiers to make identification easier."""
next_test_case_id = 0
"""The next identifier to use for non-identified test cases."""
def __init__(self, tests=(), id=None):
"""
Initialize the CustomTestSuite.
Arguments:
id -- identifier for the suite, prepended to test cases.
"""
unittest.TestSuite.__init__(self, tests)
if id is None:
mypath = os.path.abspath(
sys.modules[CustomTestSuite.__module__].__file__)
outerframes = inspect.getouterframes(inspect.currentframe())
for outerframe in outerframes[1:]:
if outerframe[3] != '__init__':
callerpath = outerframe[1]
if callerpath is None:
# It happens sometimes. Why is a mystery.
callerpath = os.getcwd()
callerpath = os.path.abspath(callerpath)
break
mydir, myname = os.path.split(mypath)
if not mydir:
mydir = os.curdir
if callerpath.startswith(mydir):
self.id = callerpath[len(mydir) + 1:] # caller's module
else:
self.id = callerpath
else:
self.id = id
def addTestCase(self, test_case_class, method_name, input, expected,
id=None, run_in_debugger=0, short_description=None,
**kwargs):
"""
Create a custom TestCase in the CustomTestSuite.
Also return it, just in case.
Arguments:
test_case_class --
method_name --
input -- input to the parser.
expected -- expected output from the parser.
id -- unique test identifier, used by the test framework.
run_in_debugger -- if true, run this test under the pdb debugger.
short_description -- override to default test description.
"""
if id is None: # generate id if required
id = self.next_test_case_id
self.next_test_case_id += 1
# test identifier will become suiteid.testid
tcid = '%s: %s' % (self.id, id)
# generate and add test case
tc = test_case_class(method_name, input, expected, tcid,
run_in_debugger=run_in_debugger,
short_description=short_description,
**kwargs)
self.addTest(tc)
return tc
class CustomTestCase(unittest.TestCase):
compare = difflib.Differ().compare
"""Comparison method shared by all subclasses."""
def __init__(self, method_name, input, expected, id,
run_in_debugger=0, short_description=None):
"""
Initialise the CustomTestCase.
Arguments:
method_name -- name of test method to run.
input -- input to the parser.
expected -- expected output from the parser.
id -- unique test identifier, used by the test framework.
run_in_debugger -- if true, run this test under the pdb debugger.
short_description -- override to default test description.
"""
self.id = id
self.input = input
self.expected = expected
self.run_in_debugger = run_in_debugger
# Ring your mother.
unittest.TestCase.__init__(self, method_name)
def __str__(self):
"""
Return string conversion. Overridden to give test id, in addition to
method name.
"""
return '%s; %s' % (self.id, unittest.TestCase.__str__(self))
def __repr__(self):
return "<%s %s>" % (self.id, unittest.TestCase.__repr__(self))
def compare_output(self, input, output, expected):
"""`input`, `output`, and `expected` should all be strings."""
if type(input) == UnicodeType:
input = input.encode('raw_unicode_escape')
if type(output) == UnicodeType:
output = output.encode('raw_unicode_escape')
if type(expected) == UnicodeType:
expected = expected.encode('raw_unicode_escape')
try:
self.assertEquals('\n' + output, '\n' + expected)
except AssertionError:
print >>sys.stderr, '\n%s\ninput:' % (self,)
print >>sys.stderr, input
print >>sys.stderr, '-: expected\n+: output'
print >>sys.stderr, ''.join(self.compare(expected.splitlines(1),
output.splitlines(1)))
raise
def skip_test(self):
print >>sys.stderr, '%s: Test skipped' % self
class TransformTestSuite(CustomTestSuite):
"""
A collection of TransformTestCases.
A TransformTestSuite instance manufactures TransformTestCases,
keeps track of them, and provides a shared test fixture (a-la
setUp and tearDown).
"""
def __init__(self, parser):
self.parser = parser
"""Parser shared by all test cases."""
CustomTestSuite.__init__(self)
def generateTests(self, dict, dictname='totest',
testmethod='test_transforms'):
"""
Stock the suite with test cases generated from a test data dictionary.
Each dictionary key (test type's name) maps to a list of transform
classes and list of tests. Each test is a list: input, expected
output, optional modifier. The optional third entry, a behavior
modifier, can be 0 (temporarily disable this test) or 1 (run this test
under the pdb debugger). Tests should be self-documenting and not
require external comments.
"""
for name, (transforms, cases) in dict.items():
for casenum in range(len(cases)):
case = cases[casenum]
run_in_debugger = 0
if len(case)==3:
if case[2]:
run_in_debugger = 1
else:
continue
self.addTestCase(
TransformTestCase, testmethod,
transforms=transforms, parser=self.parser,
input=case[0], expected=case[1],
id='%s[%r][%s]' % (dictname, name, casenum),
run_in_debugger=run_in_debugger)
class TransformTestCase(CustomTestCase):
"""
Output checker for the transform.
Should probably be called TransformOutputChecker, but I can deal with
that later when/if someone comes up with a category of transform test
cases that have nothing to do with the input and output of the transform.
"""
option_parser = frontend.OptionParser(components=(rst.Parser,))
settings = option_parser.get_default_values()
settings.report_level = 1
settings.halt_level = 5
settings.debug = package_unittest.debug
settings.warning_stream = DevNull()
def __init__(self, *args, **kwargs):
self.transforms = kwargs['transforms']
"""List of transforms to perform for this test case."""
self.parser = kwargs['parser']
"""Input parser for this test case."""
del kwargs['transforms'], kwargs['parser'] # only wanted here
CustomTestCase.__init__(self, *args, **kwargs)
def supports(self, format):
return 1
def test_transforms(self):
if self.run_in_debugger:
pdb.set_trace()
document = utils.new_document('test data', self.settings)
self.parser.parse(self.input, document)
# Don't do a ``populate_from_components()`` because that would
# enable the Transformer's default transforms.
document.transformer.add_transforms(self.transforms)
document.transformer.add_transform(universal.TestMessages)
document.transformer.components['writer'] = self
document.transformer.apply_transforms()
output = document.pformat()
self.compare_output(self.input, output, self.expected)
def test_transforms_verbosely(self):
if self.run_in_debugger:
pdb.set_trace()
print '\n', self.id
print '-' * 70
print self.input
document = utils.new_document('test data', self.settings)
self.parser.parse(self.input, document)
print '-' * 70
print document.pformat()
for transformClass in self.transforms:
transformClass(document).apply()
output = document.pformat()
print '-' * 70
print output
self.compare_output(self.input, output, self.expected)
class ParserTestCase(CustomTestCase):
"""
Output checker for the parser.
Should probably be called ParserOutputChecker, but I can deal with
that later when/if someone comes up with a category of parser test
cases that have nothing to do with the input and output of the parser.
"""
parser = rst.Parser()
"""Parser shared by all ParserTestCases."""
option_parser = frontend.OptionParser(components=(parser,))
settings = option_parser.get_default_values()
settings.report_level = 5
settings.halt_level = 5
settings.debug = package_unittest.debug
def test_parser(self):
if self.run_in_debugger:
pdb.set_trace()
document = utils.new_document('test data', self.settings)
self.parser.parse(self.input, document)
output = document.pformat()
self.compare_output(self.input, output, self.expected)
class ParserTestSuite(CustomTestSuite):
"""
A collection of ParserTestCases.
A ParserTestSuite instance manufactures ParserTestCases,
keeps track of them, and provides a shared test fixture (a-la
setUp and tearDown).
"""
test_case_class = ParserTestCase
def generateTests(self, dict, dictname='totest'):
"""
Stock the suite with test cases generated from a test data dictionary.
Each dictionary key (test type name) maps to a list of tests. Each
test is a list: input, expected output, optional modifier. The
optional third entry, a behavior modifier, can be 0 (temporarily
disable this test) or 1 (run this test under the pdb debugger). Tests
should be self-documenting and not require external comments.
"""
for name, cases in dict.items():
for casenum in range(len(cases)):
case = cases[casenum]
run_in_debugger = 0
if len(case)==3:
if case[2]:
run_in_debugger = 1
else:
continue
self.addTestCase(
self.test_case_class, 'test_parser',
input=case[0], expected=case[1],
id='%s[%r][%s]' % (dictname, name, casenum),
run_in_debugger=run_in_debugger)
class PEPParserTestCase(ParserTestCase):
"""PEP-specific parser test case."""
parser = rst.Parser(rfc2822=1, inliner=pep.Inliner())
"""Parser shared by all PEPParserTestCases."""
option_parser = frontend.OptionParser(components=(parser, pep.Reader))
settings = option_parser.get_default_values()
settings.report_level = 5
settings.halt_level = 5
settings.debug = package_unittest.debug
class PEPParserTestSuite(ParserTestSuite):
"""A collection of PEPParserTestCases."""
test_case_class = PEPParserTestCase
class GridTableParserTestCase(CustomTestCase):
parser = tableparser.GridTableParser()
def test_parse_table(self):
self.parser.setup(StringList(string2lines(self.input), 'test data'))
try:
self.parser.find_head_body_sep()
self.parser.parse_table()
output = self.parser.cells
except Exception, details:
output = '%s: %s' % (details.__class__.__name__, details)
self.compare_output(self.input, pformat(output) + '\n',
pformat(self.expected) + '\n')
def test_parse(self):
try:
output = self.parser.parse(StringList(string2lines(self.input),
'test data'))
except Exception, details:
output = '%s: %s' % (details.__class__.__name__, details)
self.compare_output(self.input, pformat(output) + '\n',
pformat(self.expected) + '\n')
class GridTableParserTestSuite(CustomTestSuite):
"""
A collection of GridTableParserTestCases.
A GridTableParserTestSuite instance manufactures GridTableParserTestCases,
keeps track of them, and provides a shared test fixture (a-la setUp and
tearDown).
"""
test_case_class = GridTableParserTestCase
def generateTests(self, dict, dictname='totest'):
"""
Stock the suite with test cases generated from a test data dictionary.
Each dictionary key (test type name) maps to a list of tests. Each
test is a list: an input table, expected output from parse_table(),
expected output from parse(), optional modifier. The optional fourth
entry, a behavior modifier, can be 0 (temporarily disable this test)
or 1 (run this test under the pdb debugger). Tests should be
self-documenting and not require external comments.
"""
for name, cases in dict.items():
for casenum in range(len(cases)):
case = cases[casenum]
run_in_debugger = 0
if len(case) == 4:
if case[-1]:
run_in_debugger = 1
else:
continue
self.addTestCase(self.test_case_class, 'test_parse_table',
input=case[0], expected=case[1],
id='%s[%r][%s]' % (dictname, name, casenum),
run_in_debugger=run_in_debugger)
self.addTestCase(self.test_case_class, 'test_parse',
input=case[0], expected=case[2],
id='%s[%r][%s]' % (dictname, name, casenum),
run_in_debugger=run_in_debugger)
class SimpleTableParserTestCase(GridTableParserTestCase):
parser = tableparser.SimpleTableParser()
class SimpleTableParserTestSuite(CustomTestSuite):
"""
A collection of SimpleTableParserTestCases.
"""
test_case_class = SimpleTableParserTestCase
def generateTests(self, dict, dictname='totest'):
"""
Stock the suite with test cases generated from a test data dictionary.
Each dictionary key (test type name) maps to a list of tests. Each
test is a list: an input table, expected output from parse(), optional
modifier. The optional third entry, a behavior modifier, can be 0
(temporarily disable this test) or 1 (run this test under the pdb
debugger). Tests should be self-documenting and not require external
comments.
"""
for name, cases in dict.items():
for casenum in range(len(cases)):
case = cases[casenum]
run_in_debugger = 0
if len(case) == 3:
if case[-1]:
run_in_debugger = 1
else:
continue
self.addTestCase(self.test_case_class, 'test_parse',
input=case[0], expected=case[1],
id='%s[%r][%s]' % (dictname, name, casenum),
run_in_debugger=run_in_debugger)
class PythonModuleParserTestCase(CustomTestCase):
def test_parser(self):
if self.run_in_debugger:
pdb.set_trace()
module = moduleparser.parse_module(self.input, 'test data')
output = str(module)
self.compare_output(self.input, output, self.expected)
def test_token_parser_rhs(self):
if self.run_in_debugger:
pdb.set_trace()
tr = moduleparser.TokenParser(self.input)
output = tr.rhs(1)
self.compare_output(self.input, output, self.expected)
class PythonModuleParserTestSuite(CustomTestSuite):
"""
A collection of PythonModuleParserTestCase.
"""
if moduleparser is None:
PythonModuleParserTestCase.test_parser = CustomTestCase.skip_test
PythonModuleParserTestCase.test_token_parser_rhs = \
CustomTestCase.skip_test
def generateTests(self, dict, dictname='totest',
testmethod='test_parser'):
"""
Stock the suite with test cases generated from a test data dictionary.
Each dictionary key (test type's name) maps to a list of tests. Each
test is a list: input, expected output, optional modifier. The
optional third entry, a behavior modifier, can be 0 (temporarily
disable this test) or 1 (run this test under the pdb debugger). Tests
should be self-documenting and not require external comments.
"""
for name, cases in dict.items():
for casenum in range(len(cases)):
case = cases[casenum]
run_in_debugger = 0
if len(case)==3:
if case[2]:
run_in_debugger = 1
else:
continue
self.addTestCase(
PythonModuleParserTestCase, testmethod,
input=case[0], expected=case[1],
id='%s[%r][%s]' % (dictname, name, casenum),
run_in_debugger=run_in_debugger)
# @@@ These should be generalized to WriterPublishTestCase/Suite or
# just PublishTestCase/Suite, as per TransformTestCase/Suite.
class LatexPublishTestCase(CustomTestCase, docutils.SettingsSpec):
"""
Test case for publish.
"""
settings_default_overrides = {'_disable_config': 1}
def test_publish(self):
if self.run_in_debugger:
pdb.set_trace()
output = docutils.core.publish_string(
source=self.input,
reader_name='standalone',
parser_name='restructuredtext',
writer_name='latex',
settings_spec=self)
self.compare_output(self.input, output, self.expected)
class LatexPublishTestSuite(CustomTestSuite):
def __init__(self):
CustomTestSuite.__init__(self)
def generateTests(self, dict, dictname='totest'):
for name, cases in dict.items():
for casenum in range(len(cases)):
case = cases[casenum]
run_in_debugger = 0
if len(case)==3:
if case[2]:
run_in_debugger = 1
else:
continue
self.addTestCase(
LatexPublishTestCase, 'test_publish',
input=case[0], expected=case[1],
id='%s[%r][%s]' % (dictname, name, casenum),
run_in_debugger=run_in_debugger)
class PythonLatexPublishTestCase(CustomTestCase, docutils.SettingsSpec):
"""
Test case for publish.
"""
settings_default_overrides = {'_disable_config': 1}
def test_publish(self):
if self.run_in_debugger:
pdb.set_trace()
output = docutils.core.publish_string(
source=self.input,
reader_name='standalone',
parser_name='restructuredtext',
writer_name='python_latex',
settings_spec=self)
self.compare_output(self.input, output, self.expected)
def compare_output(self, input, output, expected):
"""`input`, `output`, and `expected` should all be strings."""
if type(input) == UnicodeType:
input = input.encode('raw_unicode_escape')
if type(output) == UnicodeType:
output = output.encode('raw_unicode_escape')
if type(expected) == UnicodeType:
expected = expected.encode('raw_unicode_escape')
# Remove "generated on" lines.
output = self.remove_lines(output, ('generated on --',))
expected = self.remove_lines(expected, ('generated on --',))
try:
self.assertEquals('\n' + output, '\n' + expected)
except AssertionError:
print >>sys.stderr, '\n%s\ninput:' % (self,)
print >>sys.stderr, input
print >>sys.stderr, '-: expected\n+: output'
print >>sys.stderr, ''.join(self.compare(expected.splitlines(1),
output.splitlines(1)))
raise
def remove_lines(self, inStr, targetList):
inLines = inStr.splitlines()
outLines = []
for line in inLines:
remove = False
for target in targetList:
if line.find(target) > -1:
remove = True
break
if not remove:
outLines.append(line)
outStr = '\n'.join(outLines)
return outStr
class PythonLatexPublishTestSuite(CustomTestSuite):
def __init__(self):
CustomTestSuite.__init__(self)
def generateTests(self, dict, dictname='totest'):
for name, cases in dict.items():
for casenum in range(len(cases)):
case = cases[casenum]
run_in_debugger = 0
if len(case)==3:
if case[2]:
run_in_debugger = 1
else:
continue
self.addTestCase(
PythonLatexPublishTestCase, 'test_publish',
input=case[0], expected=case[1],
id='%s[%r][%s]' % (dictname, name, casenum),
run_in_debugger=run_in_debugger)
def exception_data(code):
"""
Execute `code` and return the resulting exception, the exception arguments,
and the formatted exception string.
"""
try:
exec(code)
except Exception, detail:
return (detail, detail.args,
'%s: %s' % (detail.__class__.__name__, detail))
| [
"dkuhlman@929543f6-e4f2-0310-98a6-ba3bd3dd1d04"
] | dkuhlman@929543f6-e4f2-0310-98a6-ba3bd3dd1d04 |
2b226d2afb54feee4e3cdae2b5a60229a30fa896 | d503f3155dd9b5c5008008c8735f23c5199af277 | /inheritance/multiple_inheritance.py | 4bbf644127b9a3a79e4dcaa747943d386f425b35 | [] | no_license | Kev4HackPro/project_python_learn | 2fcfdc3d6f2513c99c7a90c64a522dbaca57ed4c | 521f206dbcbcc3e4c59e09bfd6ba436f72ca9f6b | refs/heads/master | 2022-11-29T05:37:31.575024 | 2020-08-14T20:15:37 | 2020-08-14T20:15:37 | 277,941,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | class Car:
def move(self):
print('Car - move()')
class Toy:
def move(self):
print('Toy - move()')
class ToyCar(Toy, Car):
"""A Toy car"""
tc = ToyCar()
tc.move()
| [
"kavitakelvin07@gamil.com"
] | kavitakelvin07@gamil.com |
dafa29da6a8e0d53fe8d347ef38b527f74b0187b | 52d833912e56f808c8e572615cbc28c3c137b226 | /Linked list/ Remove-Duplicates-from-Sorted-List.py | c7dcec1a5a05062f6a5f4135f3731051b3e8d2cf | [] | no_license | vidhi-mody/DSA | bf33cfb407152e9f134596b73adfbe4979168984 | ccc3a53bb5764f808aefe3a569564d7b4a0cc338 | refs/heads/master | 2023-05-10T05:37:39.394306 | 2021-06-10T08:01:17 | 2021-06-10T08:01:17 | 284,305,102 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 966 | py | # Given the head of a sorted linked list, delete all duplicates such that each element appears only once.
# Return the linked list sorted as well.
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return None
if(head.next == None):
return head
node = head.next
current = head
value = head.val
while(node.next):
if(node.val == value):
current.next = node.next
node = node.next
else:
current = node
value = node.val
node = node.next
if(node.val == value):
current.next = None
return head
| [
"vidhimody6@gmail.com"
] | vidhimody6@gmail.com |
f79fb1ac50680340ecb85ed7520074385d9f59bc | abe2560a9df7d7a882d2ca484a0136f6d235edff | /atividades/roteiro6/meu_grafo.py | 35decf5bc268e99a429ce8a964b607c93a79373e | [] | no_license | joaovictornsv/teoria-dos-grafos | 33890ba14d01b7a6cbbb76cb6a06d081b0c89ae7 | 2c00f957bcae6ee56606914e47e3eede105611ac | refs/heads/main | 2023-07-30T23:03:49.751368 | 2021-09-21T21:35:05 | 2021-09-21T21:35:05 | 380,078,740 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,526 | py | from bibgrafo.grafo_matriz_adj_dir import GrafoMatrizAdjacenciaDirecionado
from bibgrafo.grafo_exceptions import *
from copy import deepcopy
from time import sleep
class MeuGrafo(GrafoMatrizAdjacenciaDirecionado):
def clone_matriz(self):
matriz_clone = deepcopy(self.M)
return matriz_clone
def clone_grafo(self):
grafo_clone = deepcopy(self)
return grafo_clone
def warshall(self):
matriz_copia = self.clone_matriz()
tamanho_matriz = len(matriz_copia)
matriz_copia_uns_e_zeros = deepcopy(matriz_copia)
for i in range(tamanho_matriz):
for j in range(tamanho_matriz):
if len(matriz_copia_uns_e_zeros[j][i]) >= 1:
matriz_copia_uns_e_zeros[j][i] = 1
else:
matriz_copia_uns_e_zeros[j][i] = 0
matriz_alcancabilidade = deepcopy(matriz_copia_uns_e_zeros)
for i in range(tamanho_matriz):
for j in range(tamanho_matriz):
if matriz_alcancabilidade[j][i] == 1:
for k in range(tamanho_matriz):
item_jk = matriz_alcancabilidade[j][k]
item_ik = matriz_alcancabilidade[i][k]
if item_jk >= item_ik:
matriz_alcancabilidade[j][k] = item_jk
elif item_jk < item_ik:
matriz_alcancabilidade[j][k] = item_ik
return matriz_alcancabilidade
| [
"joaovictornsv@gmail.com"
] | joaovictornsv@gmail.com |
5a4d9f72555ea7f88dad7b042b86c2ffe5a6fb8e | 33920bc6c82d8e58bb217b47020313080471d6e4 | /RowTemplate2.py | 295dff6767b54e8b641f3eb3299f1737e02bd8a9 | [] | no_license | chenghongm/OOB-ticketingSystem | 5b16ea810d55e71b82149e3203e0a96a28e68362 | 43383d5927661f206c01c83a79834664ebe2a21e | refs/heads/master | 2020-04-26T05:39:03.256153 | 2019-05-07T01:26:08 | 2019-05-07T01:26:08 | 173,341,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,531 | py | from anvil import *
import stripe.checkout
import anvil.google.auth, anvil.google.drive
from anvil.google.drive import app_files
import anvil.users
import anvil.server
import anvil.tables as tables
import anvil.tables.query as q
from anvil.tables import app_tables
class RowTemplate2(RowTemplate2Template):
def __init__(self, **properties):
# Set Form properties and Data Bindings.
self.init_components(**properties)
# Any code you write here will run when the form opens.
def check_box_change(self, **event_args):
"""This method is called when this checkbox is checked or unchecked"""
pass
def check_box_1_change(self, **event_args):
"""This method is called when this checkbox is checked or unchecked"""
trip = self.label_date.text +' | '+self.label_train.text+' | '+self.label_fstation.text+' | '+self.label_tstation.text+' | '+self.label_stime.text+' | '+self.label_etime.text + ' | '+self.label_duration.text
row = app_tables.order_tmp.get(Trip=trip)
if event_args['sender'].checked:
if row is None :
app_tables.order_tmp.add_row(Trip=trip)
else:
if row is not None:
row.delete()
def form_refreshing_data_bindings(self, **event_args):
"""This method is called when refreshing_data_bindings is called"""
pass
def button_add_click(self, **event_args):
"""This method is called when the button is clicked"""
pass
| [
"noreply@github.com"
] | noreply@github.com |
aa8788f329966a2f453efcc85dcb17dc64883c53 | 443b59b399e935fd66b9de93c57dd9fa78defec5 | /public/common.py | ae599acf4e7baa88945ccdc6524b12d361d85549 | [] | no_license | dongyueqian/ApiAutoTest | 3b61a37c56c071da57a7cd0bfa9def3f6dbdb067 | 6a19d4ea4af2b0879ae75079e8b3fc520bd32213 | refs/heads/master | 2023-08-21T14:38:49.699228 | 2021-10-13T13:17:56 | 2021-10-13T13:17:56 | 415,588,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | import requests
# 公用方法
def get_accesstoken():
return "1e0d9ca5-4ff3-4ce6-918d-8ba853ed6694"
def create_topics(topics_data):
url = "http://47.100.175.62:3000/api/v1/topics"
r = requests.post(url=url, json=topics_data)
return r
def topic_detail(id):
'''get /topic/:id 主题详情'''
url = "http://47.100.175.62:3000/api/v1/topic/"+id
return requests.get(url)
| [
"874974405@qq.com"
] | 874974405@qq.com |
2a99c1e567547ad471064a62fa5571a65b29f715 | 71324aca11e16d6da17b0440e72d0107f5af6e04 | /todo_vue_restful/todo_with_vue_and_restful/todo/migrations/0001_initial.py | 18a12c83dc377d8c7d800fdd1d51722e1aa0f4de | [
"MIT"
] | permissive | n3k0fi5t/Django_Tutorial | 6bad82a919d1de0162b34f4c7f753cd126b05cc3 | e3953335ca88fe22c68268fd76afb7c4f9bbb55f | refs/heads/master | 2023-02-16T07:56:56.416031 | 2021-01-11T23:17:33 | 2021-01-11T23:17:33 | 291,436,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | # Generated by Django 3.1 on 2020-12-14 00:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='TodoItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('content', models.TextField()),
('create_time', models.DateTimeField(auto_now_add=True)),
('finish_time', models.DateTimeField(auto_now=True)),
('is_finished', models.BooleanField(default=False)),
('user', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'todo_item',
'ordering': ['-create_time'],
},
),
]
| [
"r05922078@ntu.edu.tw"
] | r05922078@ntu.edu.tw |
6209c7321ca355fc2465338319c0e4eb013f15dd | d15d8e89080fa4a3b91c10a3b6f6e3852b6644a9 | /cheese-py-venv/bin/pip2.7 | a5009c13e10ddda945e1ce56bf2acd5b16c6fe94 | [] | no_license | brantstuns/cheese-py | 36e19e0116fea506a9e66c91ef1a01d352fb10a9 | 7ed4bc5ad3827c00284faab2ea079678bef5cbef | refs/heads/master | 2021-04-30T22:37:35.387783 | 2016-02-22T06:57:56 | 2016-02-22T06:57:56 | 52,253,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | 7 | #!/Users/Brant/dev/cheese-py/cheese-py-venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"brantstuns@gmail.com"
] | brantstuns@gmail.com |
55011e7d72f18177422f55514b292382081f4dcd | 2caf6885511af24443e22aaa43cd679d694f6f80 | /note/my_note/first_month/day06/demo01.py | 18fab09a906c452f8cebed4af58ddeba84253c43 | [] | no_license | nandadao/Python_note | 7f9ba54a73af05c935b4f7e24cacb728859a6c69 | abddfc2e9a1704c88867cff1898c9251f59d4fb5 | refs/heads/master | 2020-11-25T18:29:50.607670 | 2019-12-19T01:28:02 | 2019-12-19T01:28:02 | 228,793,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | """
猜拳
石头 剪刀
剪刀 布
布 石头
随机产生
"""
# import random
# win = ("石头剪刀", "剪刀布", "布石头")
# same = ("石头石头", "剪刀剪刀", "布布")
# choice = ("石头", "剪刀", "布")
# pc1 = choice[random.randint(0, 2)]
# # pc2 = choice[random.randint(0, 2)]
# pc2 = input("请出拳:")
# print(str(pc1)+str(pc2))
# # if str(pc1)+str(pc2) in win or str(pc2)+str(pc1) in win:
# if str(pc2)+str(pc1) in win:
# print("获胜")
# elif str(pc2)+str(pc1) in same:
# print("相同重新开始")
# else:
# print("你输了")
# 统一管理多个数据 :思想很重要
# import random
# tuple_win = (
# ("石头", "剪刀"),
# ("剪刀", "布"),
# ("布", "石头"),
# )
# tuple_item = ("石头", "剪刀", "布")
#
# item_input = input("请出拳:")
# # random.randint(0, 2) # 生成0 1 2
# index_system = random.randint(0, 2)
# item_system = tuple_item[index_system]
#
# if item_input == item_system:
# print("平局")
# elif (item_input, item_system) in tuple_win:
# print("你获胜")
# else:
# print("你失败")
| [
"1361335953@qq.com"
] | 1361335953@qq.com |
a8510ed4c832d93162788220a8e618a6ac40439c | 4a09376ef4ddd8cd5752e79bb0f3c18de6906455 | /iHubCrowdSourcing/tmpScripts/GetUniques.py | 9d7f8666fb8302f725b05fcd7aee852179ddeb03 | [] | no_license | GBelzoni/iHub | 33f36500f090fbfd962977ae266117be499b7cb5 | e816954bfca2127fdaaf750aef8b0442b287003c | refs/heads/master | 2021-01-18T14:45:09.589406 | 2013-09-06T22:52:15 | 2013-09-06T22:52:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | fin =open('modeltree.txt')
lines = fin.readlines()
fin.close()
fout =open('unique.txt','w')
lines2 = [ line.lower() for line in lines]
uniques = set(lines2)
print uniques
fout.writelines(uniques) | [
"patrickhcostello@gmail.com"
] | patrickhcostello@gmail.com |
405f03f5515603aa23722b6e2fe9f54458420056 | 5d3e3b9d2fb4aafb4e7c944a50fc8dae5d7c2368 | /tuner.py | 71fa466eb465eacb38678ed16c69e5b43c7218c1 | [] | no_license | johnsonc/tuner | 4bf98017de3849e8e1163a6c9156fc7c1a90b0ea | 53adf0ec527b00b78cfd5ae2bf0cbb9107400efe | refs/heads/master | 2021-07-21T02:34:06.298709 | 2017-10-29T04:41:42 | 2017-10-29T04:41:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,601 | py | #!/usr/bin/env python3
from __future__ import print_function, division
import sys
from select import select
from array import array
import pyaudio
import numpy as np
import matplotlib.pyplot as plt
FORMAT = pyaudio.paInt16
CHUNK_SIZE = 128 # Depends on human persistence of hearing
RATE = 2048 # Depends on desired frequencies to capture
RESOLUTION = 0.5 # Desired resolution in Hz
THRESHOLD = 20000 # Minimum amplitude of the largest frequency spike
KAISER_BETA = 7.5 # The `beta' parameter of the Kaiser window
def tune(plotfreq=False, plottime=False, input_device_index=None):
# Set up the Kaiser window
n = np.arange(CHUNK_SIZE) + 0.5 # Assuming CHUNK_SIZE is even
x = (n - CHUNK_SIZE / 2) / (CHUNK_SIZE / 2)
window = np.i0(KAISER_BETA * np.sqrt(1 - x ** 2)) / np.i0(KAISER_BETA)
# Get audio data
p = pyaudio.PyAudio()
#device_info = p.get_device_info_by_index(input_device_index)
#print(device_info)
stream = p.open(format=FORMAT, channels=1, rate=RATE, input=True,
input_device_index=input_device_index,
frames_per_buffer=CHUNK_SIZE)
if plotfreq or plottime:
# Set up plotting paraphernalia
plt.interactive(True)
if plottime:
figtime = plt.figure()
axtime = figtime.gca()
if plotfreq:
figfreq = plt.figure()
axfreq = figfreq.gca()
print('Press return to stop...')
i = 0
while 1:
i += 1
# Check if something has been input. If so, exit.
if sys.stdin in select([sys.stdin, ], [], [], 0)[0]:
# Absorb the input and break
sys.stdin.readline()
break
# Acquire sound data
snd_data = array('h', stream.read(CHUNK_SIZE))
signal = np.array(snd_data, dtype=float)
#if sys.byteorder == 'big':
#snd_data.byteswap()
if plottime:
if i > 1:
axtime.lines.remove(timeline)
[timeline, ] = axtime.plot(signal, 'b-')
figtime.canvas.draw()
# Apply a Kaiser window on the signal before taking the FFT. This
# makes the signal look better if it is periodic. Derivatives at the
# edges of the signal match better, which means that the frequency
# domain will have fewer side-lobes. However, it does cause each spike
# to grow a bit broader.
# One can change the value of beta to tradeoff between side-lobe height
# and main lobe width.
signal *= window
spectrum = np.fft.rfft(signal, int(RATE / RESOLUTION))
peak = np.argmax(abs(spectrum)) # peak directly gives the
# desired frequency
# Threshold on the maximum peak present in the signal (meaning we
# expect the signal to be approximately unimodal)
if spectrum[peak] > THRESHOLD:
# Put a band-pass filter in place to look at only those frequencies
# we want. The desired peak is the harmonic located in the
# frequency region of interest.
desired_peak = np.argmax(abs(spectrum[90:550]))
print(desired_peak)
if plotfreq:
try:
axfreq.lines.remove(freqline)
except UnboundLocalError:
pass
[freqline, ] = axfreq.plot(abs(spectrum), 'b-')
figfreq.canvas.draw()
stream.stop_stream()
stream.close()
p.terminate()
| [
"praveenv253@gmail.com"
] | praveenv253@gmail.com |
9693cb56e4ed6ceaf6d46cd9253ef674ede6c153 | 786778fefd65dd171c6ce2eddebf51302925f722 | /models/test.py | 5bb3af250cdf0f5c238321caf29c6c5de6418f5d | [
"MIT"
] | permissive | Oya00Oya/PaintsPytorch | d3a90e8554361c6d3de17c05c1cc4927ac78a8e0 | 41cf321722a035101758c0717f082d71c12c6cf4 | refs/heads/master | 2020-03-25T07:51:58.191820 | 2018-03-24T07:04:13 | 2018-03-24T07:04:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,521 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
I2V_PATH = 'i2v.pth'
class ResNeXtBottleneck(nn.Module):
def __init__(self, in_channels=256, out_channels=256, stride=1, cardinality=32, dilate=1):
super(ResNeXtBottleneck, self).__init__()
D = out_channels // 2
self.out_channels = out_channels
self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)
self.conv_conv = nn.Conv2d(D, D, kernel_size=2 + stride, stride=stride, padding=dilate, dilation=dilate,
groups=cardinality,
bias=False)
self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.shortcut = nn.Sequential()
if stride != 1:
self.shortcut.add_module('shortcut',
nn.AvgPool2d(2, stride=2))
def forward(self, x):
bottleneck = self.conv_reduce.forward(x)
bottleneck = F.leaky_relu(bottleneck, 0.2, True)
bottleneck = self.conv_conv.forward(bottleneck)
bottleneck = F.leaky_relu(bottleneck, 0.2, True)
bottleneck = self.conv_expand.forward(bottleneck)
x = self.shortcut.forward(x)
return x + bottleneck
class NetI(nn.Module):
def __init__(self):
super(NetI, self).__init__()
i2v_model = nn.Sequential( # Sequential,
nn.Conv2d(3, 64, (3, 3), (1, 1), (1, 1)),
nn.ReLU(),
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.Conv2d(64, 128, (3, 3), (1, 1), (1, 1)),
nn.ReLU(),
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.Conv2d(128, 256, (3, 3), (1, 1), (1, 1)),
nn.ReLU(),
nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1)),
nn.ReLU(),
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.Conv2d(256, 512, (3, 3), (1, 1), (1, 1)),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1)),
nn.ReLU(),
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1)),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1)),
nn.ReLU(),
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.Conv2d(512, 1024, (3, 3), (1, 1), (1, 1)),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1)),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1)),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv2d(1024, 1539, (3, 3), (1, 1), (1, 1)),
nn.AvgPool2d((7, 7), (1, 1), (0, 0), ceil_mode=True), # AvgPool2d,
)
i2v_model.load_state_dict(torch.load(I2V_PATH))
i2v_model = nn.Sequential(
*list(i2v_model.children())[:15]
)
self.model = i2v_model
self.register_buffer('mean', torch.FloatTensor([164.76139251, 167.47864617, 181.13838569]).view(1, 3, 1, 1))
def forward(self, images):
images = F.avg_pool2d(images, 2, 2)
images = images.mul(0.5).add(0.5).mul(255)
return self.model(images.expand(1, 3, images.shape[2], images.shape[3]) - Variable(self.mean))
netI = NetI().cuda()
class NetG(nn.Module):
def __init__(self, ngf=64):
super(NetG, self).__init__()
self.toH = nn.Sequential(nn.Conv2d(4, ngf, kernel_size=7, stride=1, padding=3), nn.LeakyReLU(0.2, True))
self.to0 = nn.Sequential(nn.Conv2d(1, ngf // 2, kernel_size=3, stride=1, padding=1), # 512
nn.LeakyReLU(0.2, True))
self.to1 = nn.Sequential(nn.Conv2d(ngf // 2, ngf, kernel_size=4, stride=2, padding=1), # 256
nn.LeakyReLU(0.2, True))
self.to2 = nn.Sequential(nn.Conv2d(ngf, ngf * 2, kernel_size=4, stride=2, padding=1), # 128
nn.LeakyReLU(0.2, True))
self.to3 = nn.Sequential(nn.Conv2d(ngf * 3, ngf * 4, kernel_size=4, stride=2, padding=1), # 64
nn.LeakyReLU(0.2, True))
self.to4 = nn.Sequential(nn.Conv2d(ngf * 4, ngf * 8, kernel_size=4, stride=2, padding=1), # 32
nn.LeakyReLU(0.2, True))
tunnel4 = nn.Sequential(*[ResNeXtBottleneck(ngf * 8, ngf * 8, cardinality=32, dilate=1) for _ in range(20)])
self.tunnel4 = nn.Sequential(nn.Conv2d(ngf * 8 + 512, ngf * 8, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(0.2, True),
tunnel4,
nn.Conv2d(ngf * 8, ngf * 4 * 4, kernel_size=3, stride=1, padding=1),
nn.PixelShuffle(2),
nn.LeakyReLU(0.2, True)
) # 64
depth = 2
tunnel = [ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=1) for _ in range(depth)]
tunnel += [ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=2) for _ in range(depth)]
tunnel += [ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=4) for _ in range(depth)]
tunnel += [ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=2),
ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=1)]
tunnel3 = nn.Sequential(*tunnel)
self.tunnel3 = nn.Sequential(nn.Conv2d(ngf * 8, ngf * 4, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(0.2, True),
tunnel3,
nn.Conv2d(ngf * 4, ngf * 2 * 4, kernel_size=3, stride=1, padding=1),
nn.PixelShuffle(2),
nn.LeakyReLU(0.2, True)
) # 128
tunnel = [ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=1) for _ in range(depth)]
tunnel += [ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=2) for _ in range(depth)]
tunnel += [ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=4) for _ in range(depth)]
tunnel += [ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=2),
ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=1)]
tunnel2 = nn.Sequential(*tunnel)
self.tunnel2 = nn.Sequential(nn.Conv2d(ngf * 4, ngf * 2, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(0.2, True),
tunnel2,
nn.Conv2d(ngf * 2, ngf * 4, kernel_size=3, stride=1, padding=1),
nn.PixelShuffle(2),
nn.LeakyReLU(0.2, True)
)
tunnel = [ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=1)]
tunnel += [ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=2)]
tunnel += [ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=4)]
tunnel += [ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=2),
ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=1)]
tunnel1 = nn.Sequential(*tunnel)
self.tunnel1 = nn.Sequential(nn.Conv2d(ngf * 2, ngf, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(0.2, True),
tunnel1,
nn.Conv2d(ngf, ngf * 2, kernel_size=3, stride=1, padding=1),
nn.PixelShuffle(2),
nn.LeakyReLU(0.2, True)
)
self.exit = nn.Conv2d(ngf, 3, kernel_size=3, stride=1, padding=1)
def forward(self, sketch, hint):
hint = self.toH(hint)
sketch_feat = netI(sketch)
x0 = self.to0(sketch)
x1 = self.to1(x0)
x2 = self.to2(x1)
x3 = self.to3(torch.cat([x2, hint], 1)) # !
x4 = self.to4(x3)
x = self.tunnel4(torch.cat([x4, sketch_feat], 1))
x = self.tunnel3(torch.cat([x, x3], 1))
x = self.tunnel2(torch.cat([x, x2], 1))
x = self.tunnel1(torch.cat([x, x1], 1))
x = F.tanh(self.exit(torch.cat([x, x0], 1)))
return x
| [
"yuanzheng.ci@gmail.com"
] | yuanzheng.ci@gmail.com |
2989c5521615440740f4bdd1b4eb7ceb64e82717 | 5d60c7c040e13d9f30d18d2aedc4b3b437008c15 | /integrated_data_processing/shared_functions/models.py | 18239a3f1455c69c775d6fbb34cc575474f05b41 | [
"MIT"
] | permissive | carola173/kafka-implementation | a59b066f51ead88b40bb44d517df38c2affca370 | a29c05f7c9ec5f4903610b077a776cf3473c2b37 | refs/heads/main | 2023-08-19T12:17:43.865054 | 2021-10-20T12:09:43 | 2021-10-20T12:09:43 | 419,306,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | import datetime
import faust
from faust.models.fields import DatetimeField
class TripRecord(faust.Record):
id: int
vendor_id: int
passenger_count: int
pickup_longitude: float
pickup_latitude: float
dropoff_latitude: float
drop_longitude: float
store_and_fwd_flag: str
pickup_datetime: datetime = DatetimeField(coerce=False)
| [
"carola.amu@gmail.com"
] | carola.amu@gmail.com |
4c0c02e2de532e29579ebe035474430c01faea5d | fe85b3a7cad5dd3d9d08fb430f4887c5259962ed | /review_form/settings.py | e317d0c9902fdf08e8bc038a69f793affb7857e5 | [] | no_license | Aniket-Sharma/Review-System | aad72dc646cf4496706c44477b82e88f0e5b9891 | 72b32b7a0cbde50f0f3a8aa6d32b9049250d7263 | refs/heads/master | 2022-12-05T11:02:57.743129 | 2019-10-02T18:04:00 | 2019-10-02T18:04:00 | 212,013,873 | 0 | 0 | null | 2022-11-22T03:36:18 | 2019-10-01T04:41:23 | Python | UTF-8 | Python | false | false | 3,131 | py | """
Django settings for roomswap project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f^y*0dvq0oet*_s31olh!pto#2-fh^iuz_#9k8@=_)sv5c0-d9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap4',
'review',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'review_form.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'review_form.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"noreply@github.com"
] | noreply@github.com |
cf10c491b936df2b8ed8fb2269a9a5d76bfe085f | 99d9acdce21b3f00cfc35e344597e50986118cf6 | /train.py | f03e3fffcec53678b855b5abd66c96e9a0310e27 | [] | no_license | bigsoftcms/deepcluster-3 | d2d5639e5557089d0135ceb9ae1590600081a991 | e753d2893d60ebabc0619832fe2e90c71301bda7 | refs/heads/master | 2022-12-22T00:09:17.149801 | 2020-09-19T18:51:55 | 2020-09-19T18:51:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,270 | py | import argparse
import os
import time
import numpy as np
from sklearn.metrics.cluster import normalized_mutual_info_score
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import clustering
from clustering.utils import cluster_assign, arrange_clustering
import models
from models.utils import compute_features, restore
from lib.utils import AverageMeter, Logger, UnifLabelSampler
def parse_args():
parser = argparse.ArgumentParser(description='PyTorch Implementation of DeepCluster in Python3')
parser.add_argument('--data', type=str, help='Path to dataset.')
parser.add_argument('--arch', type=str, default='vgg16', choices=['alexnet', 'vgg16'], help='CNN architecture')
parser.add_argument('--sobel', action='store_true', help='Sobel filtering')
parser.add_argument('--nmb_cluster', '--k', type=int, default=10000, help='number of cluster for k-means (default: 10000)')
parser.add_argument('--cluster_alg', default='KMeans', type=str, choices=['KMeans', 'PIC'], help='clustering algorithm (default: Kmeans)')
parser.add_argument('--batch', type=int, default=256, help='mini-batch size (default: 256)')
parser.add_argument('--resume', type=str, default='', metavar='PATH', help='path to checkpoint (default: None)')
parser.add_argument('--experiment', '--exp', type=str, metavar='PATH', help='path to dir where train will be saved')
parser.add_argument('--learning_rate', '--lr', type=float, default=0.05, help='learning rate')
parser.add_argument('--weight_decay', '--wd', type=float, default=-5, help='weight decay')
parser.add_argument('--workers', type=int, default=6, help='number of data loading workers (default: 4)')
parser.add_argument('--start_epoch', type=int, default=0, help='manual epoch number (useful on restarts) (default: 0)')
parser.add_argument('--epochs', type=int, default=200, help='number of total epochs to run (default: 200)')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum (default: 0.9)')
parser.add_argument('--checkpoints', type=int, default=25000, help='how many iterations between two checkpoints (default: 25000)')
parser.add_argument('--reassign', type=float, default=1., help='how many epochs of training between two consecutive reassignments of clusters (default: 1)')
parser.add_argument('--verbose', action='store_true', help='chatty')
parser.add_argument('--dropout', type=float, default=0.5, help='dropout percentage in Dropout layers (default: 0.5')
parser.add_argument('--seed', type=int, default=None, help='random seed (default: None)')
return parser.parse_args()
def main(args):
# fix random seeds
if args.seed:
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
# CNN
if args.verbose:
print('Architecture: {}'.format(args.arch))
model = models.__dict__[args.arch](sobel=args.sobel, dropout=args.dropout)
fd = int(model.top_layer.weight.size()[1])
model.top_layer = None
model.features = torch.nn.DataParallel(model.features)
model.cuda()
cudnn.benchmark = True
# create optimizer
optimizer = torch.optim.SGD(
filter(lambda x: x.requires_grad, model.parameters()),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=10**args.weight_decay,
)
# define loss function
criterion = nn.CrossEntropyLoss().cuda()
restore(model, args.resume)
# creating checkpoint repo
exp_check = os.path.join(args.experiment, 'checkpoints')
if not os.path.isdir(exp_check):
os.makedirs(exp_check)
# creating cluster assignments log
cluster_log = Logger(os.path.join(args.experiment, 'clusters'))
# preprocessing of data
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
tra = [transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize]
# load the data
end = time.time()
dataset = datasets.ImageFolder(args.data, transform=transforms.Compose(tra))
if args.verbose:
print('Load dataset: {0:.2f} s'.format(time.time() - end))
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=args.batch,
num_workers=args.workers,
pin_memory=True)
algs = {
'KMeans': clustering.KMeans,
'PIC': clustering.PIC,
}
cluster_alg = algs[args.cluster_alg](args.nmb_cluster)
# training convnet with cluster_alg
for epoch in range(args.start_epoch, args.epochs):
end = time.time()
# remove head
model.top_layer = None
model.classifier = nn.Sequential(*list(model.classifier.children())[:-1])
# get the features for the whole dataset
features = compute_features(dataloader, model, len(dataset), args.batch)
# cluster the features
if args.verbose:
print('Cluster the features')
clustering_loss = cluster_alg.cluster(features, verbose=args.verbose)
# assign pseudo-labels
if args.verbose:
print('Assign pseudo labels')
train_dataset = cluster_assign(cluster_alg.images_lists,
dataset.imgs)
# uniformly sample per target
sampler = UnifLabelSampler(int(args.reassign * len(train_dataset)),
cluster_alg.images_lists)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch,
num_workers=args.workers,
sampler=sampler,
pin_memory=True,
)
# set last fully connected layer
mlp = list(model.classifier.children())
mlp.append(nn.ReLU(inplace=True).cuda())
model.classifier = nn.Sequential(*mlp)
model.top_layer = nn.Linear(fd, len(cluster_alg.images_lists))
model.top_layer.weight.data.normal_(0, 0.01)
model.top_layer.bias.data.zero_()
model.top_layer.cuda()
# train network with clusters as pseudo-labels
end = time.time()
loss = train(train_dataloader, model, criterion, optimizer, epoch)
# print log
if args.verbose:
print('###### Epoch [{0}] ###### \n'
'Time: {1:.3f} s\n'
'Clustering loss: {2:.3f} \n'
'ConvNet loss: {3:.3f}'
.format(epoch, time.time() - end, clustering_loss, loss))
try:
nmi = normalized_mutual_info_score(
arrange_clustering(cluster_alg.images_lists),
arrange_clustering(cluster_log.data[-1])
)
print('NMI against previous assignment: {0:.3f}'.format(nmi))
except IndexError:
pass
print('####################### \n')
# save running checkpoint
torch.save({'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict()},
os.path.join(args.experiment, 'checkpoint.pth.tar'))
# save cluster assignments
cluster_log.log(cluster_alg.images_lists)
def train(loader, model, crit, opt, epoch):
"""Training of the CNN.
Args:
loader (torch.utils.data.DataLoader): Data loader
model (nn.Module): CNN
crit (torch.nn): loss
opt (torch.optim.SGD): optimizer for every parameters with True
requires_grad in model except top layer
epoch (int)
"""
batch_time = AverageMeter()
losses = AverageMeter()
data_time = AverageMeter()
forward_time = AverageMeter()
backward_time = AverageMeter()
# switch to train mode
model.train()
# create an optimizer for the last fc layer
optimizer_tl = torch.optim.SGD(
model.top_layer.parameters(),
lr=args.learning_rate,
weight_decay=10**args.weight_decay,
)
end = time.time()
for i, (input_tensor, target) in enumerate(loader):
data_time.update(time.time() - end)
# save checkpoint
n = len(loader) * epoch + i
if n % args.checkpoints == 0:
path = os.path.join(
args.experiment,
'checkpoints',
'checkpoint_' + str(n / args.checkpoints) + '.pth.tar',
)
if args.verbose:
print('Save checkpoint at: {0}'.format(path))
torch.save({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'optimizer' : opt.state_dict()
}, path)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input_tensor.cuda())
target_var = torch.autograd.Variable(target)
output = model(input_var)
loss = crit(output, target_var)
# record loss
losses.update(loss.data.item(), input_tensor.shape[0])
# compute gradient and do SGD step
opt.zero_grad()
optimizer_tl.zero_grad()
loss.backward()
opt.step()
optimizer_tl.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.verbose and (i % 200) == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data: {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss: {loss.val:.4f} ({loss.avg:.4f})'
.format(epoch, i, len(loader), batch_time=batch_time,
data_time=data_time, loss=losses))
return losses.avg
if __name__ == '__main__':
args = parse_args()
main(args) | [
"pero356@gmail.com"
] | pero356@gmail.com |
2fe84b99d7e12b8b6f1ee597865729eae570e431 | 7e858aa71ae1ad8463b8a9a06448b29faa9eb453 | /account/urls.py | b9f8311ca34d73fe55b1eb048db67592c0c519f9 | [] | no_license | JDPatel1729/Society-Management-System-BackEnd | 74f42d88ae3478159d97314c9c33665a47892781 | b772c67fd9805f108510e63b60d733c57fbb23f5 | refs/heads/master | 2023-04-04T00:06:11.617344 | 2021-04-07T04:48:33 | 2021-04-07T04:48:33 | 352,247,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | from django.urls import path
from .views import ledger
urlpatterns = [
path('',ledger,name='ledger_api'),
]
| [
"jdpeachblack@protonmail.com"
] | jdpeachblack@protonmail.com |
ff2d90b1f5ac7311985425547060f8d42ed0a4e2 | 86100df5db058ea25b1859b6d2d4eafef720bede | /dirWatch.py | f7e991a8815c3efb30907af01c6dd56ae91c92e1 | [] | no_license | SniPE7/MonitorPy | 1afb156d1d8185158012e54bb3c387cfde29c7cd | e3f7aa672a2909abfa080bf3db9b4ff56bd6b97e | refs/heads/master | 2020-12-24T16:42:52.899784 | 2014-12-15T11:32:58 | 2014-12-15T11:32:58 | 28,030,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | #!/usr/bin/env python
import time
from time import gmtime, strftime
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class Handler(FileSystemEventHandler):
def on_created(self, event):
print strftime("%Y-%m-%d %H:%M:%S", gmtime()), event
def on_deleted(self, event):
print strftime("%Y-%m-%d %H:%M:%S", gmtime()), event
def on_moved(self, event):
print strftime("%Y-%m-%d %H:%M:%S", gmtime()), event
observer = Observer()
observer.schedule(Handler(), path='/var', recursive=True)
observer.start()
try:
while True:
time.sleep(0.1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
d54388890a5e78b747a36105e26e799f1a83ed8d | f1bccb61fe08abde483ec01aafb785a4a25ff61f | /Utils/files.py | e08cfc85272eadd94165c62e74ba511166374212 | [] | no_license | ashnaider/DifferentialEquations | 3817fc87b63bbb7d7bd7067a9a28a0e231025c15 | 53d36c2fe193285f830f47d37d8f038073d6995a | refs/heads/main | 2023-08-18T21:33:01.651376 | 2021-10-09T08:10:12 | 2021-10-09T08:10:12 | 414,854,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,175 | py | import sys
from PyQt5.QtWidgets import QApplication, QWidget, QInputDialog, QLineEdit, QFileDialog
from PyQt5.QtGui import QIcon
def getFileNameToSave(parent, filename):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
actual_filename, _ = QFileDialog.getSaveFileName(parent, "Сохранить как", filename,
"All Files (*);;Text Files (*.txt);; Coma separated value files (*.csv)", options=options)
return actual_filename
def getFileNameToOpen(parent):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(parent,"QFileDialog.getOpenFileName()", "","Text Files (*.txt)", options=options)
return fileName
def read_data(file_name, headers):
"""Из указанного файла прочитать данные с указанными заголовками"""
res = {} # инициализируем словарь
# открываем файл на чтение
with open(file=file_name, mode='r') as f:
# читаем каждую строку в файле
for line in f:
# если строка пустая
if line.strip() == "":
continue # переходим на след итерацию
# ищем позицию символа ":" в строке
semi_index = line.index(':')
# получаем в заголовок строки
# (срез массива до индекса символа ":")
curr_header = line[:semi_index]
# если заголовок один из тех, что нам нужен
if curr_header in headers:
# читаем данные из него в массив
# res[curr_header] = list(map( lambda x: float(x.replace(',', '').strip()),
# line[semi_index+1:].split(',')))
res[curr_header] = line[semi_index+1:].split(',')
return res # возвращаем словарь
| [
"ash2003@ukr.net"
] | ash2003@ukr.net |
5b0c427c59c3f900fc2f681738a7253d68c9bc70 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /110_concurrency_parallelism/_exercises/templates/Mastering Concurrency in Python/Chapter03/example2.py | 86fd9c8132767986c2d2392ff9d84973ea193f0b | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 642 | py | # # ch3/example2.py
#
# ______ _th.. __ thread
# ____ ma__ ______ sqrt
#
# ___ is_prime x
# __ ? < 2
# print('@ is not a prime number.' ?
#
# ____ ? __ 2
# print('@ is a prime number.' ?
#
# ____ ? % 2 __ 0
# print('@ is not a prime number.' ?
#
# ____
# limit _ __. sq ? + 1
# ___ i __ ra.. 3 ? 2
# __ x % i __ 0
# print('@ is not a prime number.' ?
# r_
#
# print('@ is a prime number.' ?
#
# my_input _ 2, 193, 323, 1327, 433785907
#
# ___ x __ ?
# ?.s_n_t.. ? ?
#
# a _ __..('Type something to quit: \n')
# print('Finished.')
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
fb873db4bf3aa718c82d101dda25aca24cd84ce9 | 4edadc6b0c733b208df760e8491b1f1808ed4395 | /image process/plot_loss.py | 9bdbe07e304b8e5f2c385c95b6d5b208c45e8f10 | [] | no_license | RichardcLee/Expression_Transformation | d3f8a1bd0b11071f3f085efe821fabc716e617e6 | ae849789a6c77e4cec0909c0c490305ad13ba06d | refs/heads/master | 2021-04-11T04:51:02.496921 | 2020-09-16T08:21:41 | 2020-09-16T08:21:41 | 248,993,500 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,005 | py | from matplotlib import pyplot as plt
import re
plt.rcParams['font.sans-serif']=['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False # 用来正常显示负号
lines = []
with open(r"C:\Users\81955\Desktop\ckpts\face\ganimation\200513_232207\logs.txt", "r+") as f:
lines = f.readlines()
# dis_fake WGAN-GP对抗损失第二项,值越大越好(正值)
# dis_real WGAN-GP对抗损失第一项,值越小越好(负值)
# dis_real_aus 条件表情损失第二项
# gen_rec 循环一致性损失
# dis 判别器损失
# gen 生成器损失
loss = {
"dis_fake": [],
"dis_real": [],
"dis_real_aus": [],
"gen_rec": [],
'dis': [],
'gen': [],
"total": []
}
for line in lines:
a, b, c, d = float(re.findall("dis_fake:(.*?)\|", line)[0].strip()), float(re.findall("dis_real:(.*?)\|", line)[0].strip()), float(re.findall("dis_real_aus:(.*?)\|", line)[0].strip()), float(re.findall("gen_rec:(.*?)\|", line)[0].strip())
e, f = float(re.findall("dis:(.*?)\|", line)[0].strip()), float(re.findall("gen:(.*?)\|", line)[0].strip())
loss["dis_fake"].append(a)
loss["dis_real"].append(b)
loss["dis_real_aus"].append(c)
loss["dis"].append(d)
loss["gen"].append(f)
loss["gen_rec"].append(d)
loss["total"].append(10*d + 1*(a+b) + 160*c)
# print(loss)
plt.figure(dpi=120)
plt.tight_layout()
plt.subplots_adjust(wspace=0.45, hspace=0.5) # 调整子图间距
xy = ["321","322", "323", "324", "325", "326"]
widths = [0.07, 0.07, 0.07, 0.09, 0.09, 0.07]
labels = ['adversarial loss 2', 'adversarial loss 1', 'condition loss', 'cycle consistency loss', 'dis loss', 'gen loss', 'total loss']
ticks_y = [[0, 1, 2, 3, 4, 5], [-5, -4, -3, -2, -1, 0], [0, 0.004, 0.008, 0.012, 0.016], [0, 0.1, 0.2, 0.3, 0.4], [0, 0.1, 0.2, 0.3, 0.4], [-3, -2, -1, 0, 1, 2, 3, 4 ,5, 6]]
ticks_x = ['0', '1w', '2w', '3w', '4w']
scale_x = [0, 10000, 20000, 30000, 40000]
idx = 0
space = 10 # 控制损失显示间距,避免图像线条过于集中
step = [i for i in range(len(loss["dis_fake"]))] # step数
fontsize = 10
for name, val in loss.items():
if idx == 6:
continue
plt.subplot(xy[idx])
plt.title(labels[idx], fontsize=fontsize+2)
plt.plot(step[::space], val[::space], linewidth=widths[idx], color='k') # label=labels[idx]
# plt.legend(loc='best')
if idx == 4 or idx == 5:
plt.xlabel("step", fontsize=fontsize-1)
plt.ylabel("loss value", fontsize=fontsize-1)
# 设置刻度字体大小
plt.xticks(scale_x, ticks_x, fontsize=fontsize-1)
plt.yticks(ticks_y[idx], fontsize=fontsize-1)
idx += 1
plt.savefig("1.jpg")
plt.show()
fontsize = 20
plt.figure(dpi=80)
plt.plot(step[::space], loss['total'][::space], linewidth=0.2, color='k')
plt.xlabel("step", fontsize=fontsize-6)
plt.ylabel("loss value", fontsize=fontsize-6)
# 设置刻度字体大小
plt.xticks(scale_x, ticks_x, fontsize=fontsize-6)
plt.yticks(fontsize=fontsize-1)
plt.savefig("2.jpg")
plt.show()
| [
"0yunhow@gmail.com"
] | 0yunhow@gmail.com |
c34d8dc77d8a8ad410f2ef2a3c61cecfb5650440 | ee4e8acfe29e70598c1bcd6a3ba9ce57aaa89498 | /project1/agent.py | 294f391c6e403949ff6b0ff35fcb92ca2380b52f | [] | no_license | kushal433/motion-planning | 7684137f36624cafcc93429d72ea7a3bef2d2a67 | 976e5c4cf41e203ef932b3d1dfc5a8ed99e5b284 | refs/heads/master | 2022-12-18T16:07:37.134835 | 2020-09-15T21:59:23 | 2020-09-15T21:59:23 | 295,829,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,709 | py | # agent.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to Clemson University and the author.
#
# Author: Ioannis Karamouzas (ioannis@g.clemson.edu)
#
import numpy as np
from math import sqrt
class Agent(object):
def __init__(self, csvParameters, dhor = 10, goalRadiusSq=1):
"""
Takes an input line from the csv file,
and initializes the agent
"""
self.id = int(csvParameters[0]) # the id of the agent
self.gid = int(csvParameters[1]) # the group id of the agent
self.pos = np.array([float(csvParameters[2]), float(csvParameters[3])]) # the position of the agent
self.vel = np.zeros(2) # the velocity of the agent
self.goal = np.array([float(csvParameters[4]), float(csvParameters[5])]) # the goal of the agent
self.prefspeed = float(csvParameters[6]) # the preferred speed of the agent
self.gvel = self.goal-self.pos # the goal velocity of the agent
self.gvel = self.gvel/(sqrt(self.gvel.dot(self.gvel )))*self.prefspeed
self.maxspeed = float(csvParameters[7]) # the maximum sped of the agent
self.radius = float(csvParameters[8]) # the radius of the agent
self.goalRadiusSq =goalRadiusSq # parameter to determine if agent is close to the goal
self.atGoal = False # has the agent reached its goal?
self.dhor = dhor # the sensing radius
self.vnew = np.zeros(2) # the new velocity of the agent
def computeNewVelocity(self, neighbors=[]):
cost_old = float('inf')
nearby=[]
curr_vel = self.gvel[:]
#To determine the neighbors
for i in neighbors:
if self.id != i.id:
if sqrt((self.pos[0]-i.pos[0])**2+(self.pos[1]-i.pos[1])**2)<self.dhor:
nearby.append(i)
def ttc(i,j,vcan):
rad=i.radius+j.radius
w = [i.pos[0] - j.pos[0],i.pos[1] - j.pos[1]]
c = np.dot(w,w)-np.dot(rad,rad)
if c<0:
return 0
v = [vc[0] - j.vel[0],vc[1] - j.vel[1]]
a=np.dot(v,v)
b = np.dot(w,v)
if b>0:
print("Collision")
return float('inf')
discr=b*b-a*c
if discr<=0:
return float('inf')
tau=(c/(-b+sqrt(discr))) #smallest root
print(tau)
if tau<0:
return float('inf')
return tau
#uniformly sampling 200 velocities and determining candidate velocity
vc = []
for i in range(0,200):
length = self.maxspeed*np.sqrt(np.random.uniform(0, 1))
#length=np.random.uniform(0,1)
angle= np.pi * np.random.uniform(0, 2)
vc = [length * np.cos(angle),length * np.sin(angle)]
#to find time to collision ttc
mt = float('inf')
for j in nearby:
tc = ttc(self,j,vc)
if tc > 0 and tc < mt:
mt = tc
#to find cost function
v1 = vc - self.gvel
v2 = np.sqrt(v1.dot(v1))
v3 = vc - self.vel
v4 = np.sqrt(v3.dot(v3))
cost=1*v2 + 1 * v4 + (3 / mt)
if(cost < cost_old):
curr_vel = vc
cost_old = cost
if not self.atGoal:
self.vnew[:] = curr_vel[:] # here I just set the new velocity to be the goal velocity
def update(self, dt):
"""
Code to update the velocity and position of the agent
as well as determine the new goal velocity
"""
if not self.atGoal:
self.vel[:] = self.vnew[:]
self.pos += self.vel*dt #update the position
# compute the goal velocity for the next time step. Do not modify ;
self.gvel = self.goal - self.pos
distGoalSq = self.gvel.dot(self.gvel)
if distGoalSq < self.goalRadiusSq:
self.atGoal = True # goal has been reached
else:
self.gvel = self.gvel/sqrt(distGoalSq)*self.prefspeed
| [
"noreply@github.com"
] | noreply@github.com |
fd8d503808c9cd1b25d24b4d703e2d6833011276 | 5f358a90c01a19e3bb8b7e7f46b99dd3748e317a | /config.py | 5b62a029bd60f5461e691abec32607e788235598 | [] | no_license | iliaantonov/JestiBot | ff04d02c5186632816335a573a2a467c8fcb8241 | 1ab634dfce415672b281b5570de2dec25086c53f | refs/heads/main | 2023-01-19T20:36:58.559953 | 2020-11-27T23:58:21 | 2020-11-27T23:58:21 | 316,623,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py | # Токен Бота
token = '1415264147:AAGjsbQhaVoRJOG1VpC7ja4GZjADAJu_NZc'
# Выгрузка файлов, заполнение списков
# Список стикеров
stickers_file = open('stickers.txt', 'r').read().split('\n')
stickers_list = []
for line in stickers_file:
stickers_list.append(line)
# Казни
execution_urls_txt = open('execution_url.txt', 'r', encoding='utf-8').read().split('\n')
execution_urls = []
for line in execution_urls_txt:
execution_urls.append(line)
execution_descriptions_txt = open('execution_description.txt', 'r', encoding='utf-8').read().split('\n')
execution_descriptions = []
for line in execution_descriptions_txt:
execution_descriptions.append(line)
# Сообщения бота
info = '''
Я Жесть-Бот
У меня есть кровь, мясо, матюки и голые сиськи
Всё просто: вводишь /pain и наслаждаешься
\nПосле ввода комманды ты соглашаешься с тем, что:
1. Я не несу ответственности за твою психику!
2. Я не призываю тебя ни к какому действию!
3. Тебе больше 18 лет
\nАдмин: @iliaantonov
'''
| [
"noreply@github.com"
] | noreply@github.com |
24d8ef27b15e6d40d065dc60f829d83782dc76e9 | 5013ea0b4548f3576ad781040d0903825005e44a | /tests/terraform/checks/resource/aws/test_WorkspaceUserVolumeEncrypted.py | 0e7153473f378e21aeed1d2ab0f7b3dc22e86ce1 | [
"Apache-2.0"
] | permissive | tronxd/checkov | a00aa29694a3788880db212c3200648c7d3ab9d1 | e7c6a829995acd5bb6f6a54e08f780d2c8163094 | refs/heads/master | 2022-11-05T14:48:36.384598 | 2021-11-09T17:51:04 | 2021-11-09T18:09:10 | 239,376,520 | 0 | 0 | Apache-2.0 | 2023-09-11T07:06:21 | 2020-02-09T21:23:48 | Python | UTF-8 | Python | false | false | 1,337 | py | import os
import unittest
from checkov.runner_filter import RunnerFilter
from checkov.terraform.checks.resource.aws.WorkspaceUserVolumeEncrypted import check
from checkov.terraform.runner import Runner
class TestWorkspaceUserVolumeEncrypted(unittest.TestCase):
def test(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
test_files_dir = current_dir + "/example_WorkspaceUserVolumeEncrypted"
report = runner.run(
root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id])
)
summary = report.get_summary()
passing_resources = {
"aws_workspaces_workspace.pass",
}
failing_resources = {
"aws_workspaces_workspace.fail",
}
passed_check_resources = set([c.resource for c in report.passed_checks])
failed_check_resources = set([c.resource for c in report.failed_checks])
self.assertEqual(summary["passed"], 1)
self.assertEqual(summary["failed"], 1)
self.assertEqual(summary["skipped"], 0)
self.assertEqual(summary["parsing_errors"], 0)
self.assertEqual(passing_resources, passed_check_resources)
self.assertEqual(failing_resources, failed_check_resources)
if __name__ == "__main__":
unittest.main()
| [
"james.woolfenden@gmail.com"
] | james.woolfenden@gmail.com |
d04cecf33f57f5b0070e9c70fd72c137b1a92202 | f28d9d2e0ca7a14bf0744abb7f57413d74edb26b | /api/cwod/views.py | aaf638b263969984a7f1f4e38e333ab69c7f4320 | [] | no_license | sbma44/Capitol-Words | ea63da87e2f122e48bb1ff0ac990978d51832308 | d41e2dcd42d70ecf7e1d7b06463b46f4d7337e13 | refs/heads/master | 2021-01-17T23:14:38.140374 | 2010-12-09T17:02:53 | 2010-12-09T17:02:53 | 1,154,134 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,854 | py | from operator import itemgetter
import sys
import os
sys.path.append(os.pardir)
from django.http import HttpResponse
from django.shortcuts import get_list_or_404
from solr.api import *
from piston.handler import BaseHandler
from piston.resource import Resource
from bioguide.models import *
class GenericHandler(BaseHandler):
def __init__(self, func):
self.func = func
self.allowed_methods = ('GET', )
self.allowed_keys = self._allowed_keys()
def _allowed_keys(self):
return self.func.func_code.co_varnames[:self.func.func_code.co_argcount]
def create_results_list(self, data, *args, **kwargs):
key = data[0]['facet_counts']['facet_fields'].keys()[0]
data = data[0]['facet_counts']['facet_fields'][key]
phrases = data[::2]
counts = data[1::2]
results_keys = kwargs.get('results_keys', ['phrase', 'count', ])
results = [dict(zip(results_keys, x)) for x in zip(phrases, counts)]
return results
def read(self, request, *args, **kwargs):
params = {}
for key, val in request.GET.items():
if key in self.allowed_keys:
params[str(key)] = val
for key, val in kwargs.items():
if key in self.allowed_keys:
params[str(key)] = val
data = self.func(**params)
results = self.create_results_list(data, *args, **kwargs)
data = {'results': results, 'parameters': params, }
return data
class PopularPhraseHandler(GenericHandler):
def __init__(self):
super(PopularPhraseHandler, self).__init__(most_frequent_phrases)
class PhraseByCategoryHandler(GenericHandler):
def __init__(self):
super(PhraseByCategoryHandler, self).__init__(phrase_by_category)
def read(self, request, *args, **kwargs):
entity_type = kwargs.get('entity_type')
if entity_type == 'legislator':
kwargs['results_keys'] = ['legislator_id', 'count', ]
elif entity_type == 'state':
kwargs['results_keys'] = ['state', 'count', ]
elif entity_type == 'party':
kwargs['results_keys'] = ['party', 'count', ]
return super(PhraseByCategoryHandler, self).read(request, *args, **kwargs)
class PhraseOverTimeHandler(GenericHandler):
def __init__(self):
super(PhraseOverTimeHandler, self).__init__(phrase_over_time)
def create_results_list(self, data):
key = data[0]['facet_counts']['facet_dates'].keys()[0]
data = data[0]['facet_counts']['facet_dates'][key]
dates = []
for k, v in data.iteritems():
if k.find('T00:00:00Z') > -1:
date = k.rstrip('T00:00:00Z')
dates.append({'date': date, 'count': v})
dates.sort(key=itemgetter('date'))
return dates
class LegislatorLookupHandler(BaseHandler):
def read(self, request, *args, **kwargs):
bioguide_id = request.GET.get('bioguide_id', None)
if not bioguide_id:
return {}
legislators = get_list_or_404(Legislator, bioguide_id=bioguide_id)
legislator = legislators[0]
results = {
'bioguide_id': bioguide_id,
'prefix': legislator.prefix,
'first': legislator.first,
'last': legislator.last,
'suffix': legislator.suffix,
'sessions': [],
}
for legislator in legislators:
results['sessions'].append({'position': legislator.position,
'party': legislator.party,
'state': legislator.state,
'congress': legislator.congress, })
return results
def index(request):
return HttpResponse('index')
| [
"bycoffe@gmail.com"
] | bycoffe@gmail.com |
7284d0ac359fe59eb9586c8162a9c0f37c534875 | 8a35e59797c8bad4d69db38ab202f8873bb9ca49 | /RUM Library/rum/Interceptor.py | cd6c81aef29cb541333515f28c605ed93befbf1e | [] | no_license | S0166251-FinalProject/RUM | 485cae7c082dae453e9c3299d4cfb13e440a42a1 | caf39e924ef75efcd9504dd2b7e824b142d0dd60 | refs/heads/master | 2021-01-10T19:43:41.210429 | 2014-12-04T12:20:32 | 2014-12-04T12:20:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,374 | py | import types
from collections import namedtuple
class Interceptor(object):
wrappers = {}
Service = namedtuple("Service", ["Instance", "methodName"])
def __init__(self, lightWeight = True):
if lightWeight:
self.__createWrapper__ = Interceptor.__createLightWeightWrapper__
else:
self.__createWrapper__ = Interceptor.__createWrapper__
'''registers the current instance to the interception of the given service invocation.
If no other instance is registered to this service invocation then a new wrapper is deployed.'''
def registerToServiceInvocation(self, component, method):
name = method.__name__
service = Interceptor.Service(Instance = component, methodName = name)
if service in Interceptor.wrappers:
Interceptor.wrappers[service].append(self)
else:
wrapper = self.__createWrapper__(component, method)
wrapper.__name__ = name
setattr(component, name, types.MethodType(wrapper, self))
Interceptor.wrappers[service] = [self]
@staticmethod
def __createWrapper__(component, method):
def wrapper(self, *args, **kwargs):
old = {}
for var in component.__dict__:
old[var] = getattr(component, var)
result = method(*args, **kwargs)
new = {}
for var in component.__dict__:
new[var] = getattr(component, var)
Interceptor.interceptInvocation(old, new, component, method)
return result
return wrapper
@staticmethod
def __createLightWeightWrapper__(component, method):
def wrapper(self, *args, **kwargs):
result = method(*args, **kwargs)
Interceptor.interceptInvocation({}, {}, component, method)
return result
return wrapper
@staticmethod
def interceptInvocation(old, new, component, method):
name = method.__name__
service = Interceptor.Service(Instance = component, methodName = name)
if service in Interceptor.wrappers:
listeningRUMs = Interceptor.wrappers[service]
for rum in listeningRUMs:
rum.checkForTransition(old, new, component, method)
| [
"d.windhouwer@student.utwente.nl"
] | d.windhouwer@student.utwente.nl |
eeecbd2cb9ae14c3959919f4b325827fd2ffe54d | ff0fedb488ac109e0d51c6d26dc9f3b2cb680e5f | /Python_14_Sep-main/test.py | ecd9d72433057ede05e9d4ede0cddd5dd50981a9 | [] | no_license | Namita123456789/Python_14_Sep-main | b6787ee625a80a7696ebad2b77e2afc320bd5101 | 045ae26eb66d6485f100b04c384268461875abc4 | refs/heads/master | 2023-08-23T07:04:59.788863 | 2021-10-26T06:16:12 | 2021-10-26T06:16:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py |
count(1)
thislist.append("asdadasds")
thislist.extend(tropical)
newlist = ['hello' for x in fruits]
list1 = [10, 20, 30, 40, 50]
# start = list's size
# stop = -1
# step = -1
# reverse a list
for i in range(len(list1) - 1, -1, -2):
print(list1[i], end=" ")
# Output 50 40 30 20 10
# class ComplexNumber:
# def __init__(self, r=0, i=0):
# self.real = r
# self.imag = i
# def get_data(self):
# print(f'{self.real}+{self.imag}j')
# # Create a new ComplexNumber object
# num1 = ComplexNumber(2, 3)
# # Call get_data() method
# # Output: 2+3j
# num1.get_data()
# # Create another ComplexNumber object
# # and create a new attribute 'attr'
# num2 = ComplexNumber(5)
# num2.attr = 10
# # Output: (5, 0, 10)
# print((num2.real, num2.imag, num2.attr))
# # but c1 object doesn't have attribute 'attr'
# # AttributeError: 'ComplexNumber' object has no attribute 'attr'
# #print(num1.attr) | [
"namitashah42@gmail.com"
] | namitashah42@gmail.com |
a4fa5e2176aea500e21b13cbfb3acbda776ea850 | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/mm/opt/src/tools/utils.py | 39eafdedf84ab82c45e868f2cc33485fc23436ca | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 15,285 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
network config setting, gradient clip function and dynamic learning rate function
"""
from multiprocessing import Process
import numpy as np
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore.ops import composite as C
from mindspore.ops import functional as F
import mindspore.common.dtype as mstype
from mindspore.common.tensor import Tensor
from mindspore.nn.learning_rate_schedule import LearningRateSchedule, PolynomialDecayLR, WarmUpLR, CosineDecayLR
from mindspore.parallel._auto_parallel_context import auto_parallel_context
from mindspore.communication.management import get_rank, get_group_size, create_group
from mindspore.train.callback import Callback
from mindspore.train.summary import SummaryRecord
import moxing as mox
class GPTConfig:
"""
GPT config class which defines the model size
"""
def __init__(self,
data_parallel_num,
model_parallel_num,
batch_size=32,
seq_length=1024,
vocab_size=50257,
embedding_size=768,
num_layers=12,
num_heads=12,
expand_ratio=4,
post_layernorm_residual=False,
dropout_rate=0.1,
compute_dtype=mstype.float16,
use_past=False,
self_layernorm=True,
forward_reduce_scatter=True,
word_emb_dp=True,
stage_num=16,
micro_size=32,
eod_reset=True,
use_top_query_attention=True):
self.batch_size = batch_size
self.seq_length = seq_length
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.num_layers = num_layers
self.num_heads = num_heads
self.expand_ratio = expand_ratio
self.post_layernorm_residual = post_layernorm_residual
self.dropout_rate = dropout_rate
self.compute_dtype = compute_dtype
self.use_past = use_past
self.dp = data_parallel_num
self.mp = model_parallel_num
self.self_layernorm = self_layernorm
self.forward_reduce_scatter = forward_reduce_scatter
self.stage_num = stage_num
self.micro_size = micro_size
self.word_emb_dp = word_emb_dp
self.eod_reset = eod_reset
self.use_top_query_attention = use_top_query_attention
def __str__(self):
info = "[GPTConfig]" + '===' * 10 + '\n'
for k, v in self.__dict__.items():
var_info = "{}:{}\n".format(k, v)
info += var_info
info += '=' * 10
return info
get_square_sum = C.MultitypeFuncGraph("get_square_sum")
@get_square_sum.register("Tensor", "Number")
def _get_square_sum(grad, value):
norm = P.ReduceSum(False)(F.square(grad), ()) / value
norm = F.expand_dims(F.cast(norm, mstype.float32), 0)
return norm
apply_global_norm = C.MultitypeFuncGraph("apply_global_norm")
@apply_global_norm.register("Tensor", "Tensor", "Tensor")
def _apply_global_norm(clip_norm, global_norm, grad):
grad = grad * clip_norm / global_norm
return grad
class GlobalNorm(nn.Cell):
"""
Calculate the global norm value of given tensors
"""
def __init__(self, params, config):
super(GlobalNorm, self).__init__()
self.hyper_map = C.HyperMap()
self.allreduce_filter = tuple(
"projection.bias" not in x.name and
"layernorm" not in x.name and "position_embedding.embedding_table" not in x.name
for x in params)
self.allreduce_group_size = ()
for item in self.allreduce_filter:
if item:
self.allreduce_group_size = self.allreduce_group_size + (1.0,)
else:
self.allreduce_group_size = self.allreduce_group_size + (config.mp * 1.0,)
self.length = len(params)
group_list, group_name = _get_model_parallel_group(config.mp)
print("rank_list", group_name)
print("group_size_list", self.allreduce_group_size)
create_group(group_name, group_list)
self.allreduce = P.AllReduce(group=group_name)
pipeline_group_list, pipeline_group_name = _get_pipeline_group()
print("pipeline_group_name", pipeline_group_name)
create_group(pipeline_group_name, pipeline_group_list)
self.allreduce2 = P.AllReduce(group=pipeline_group_name)
self.group_name1 = group_name
self.group_name2 = pipeline_group_name
def construct(self, grads):
square_sum = self.hyper_map(
get_square_sum, grads, self.allreduce_group_size)
square_reduce_sum = F.addn(square_sum)
stage_square_reduce_sum = self.allreduce(square_reduce_sum)
global_square_reduce_sum = self.allreduce2(stage_square_reduce_sum)
global_norms = F.sqrt(global_square_reduce_sum)
return global_norms
class GlobalNormOptShard(nn.Cell):
"""
Calculate the global norm value of given tensors
"""
def __init__(self, params, config):
super(GlobalNormOptShard, self).__init__()
self.hyper_map = C.HyperMap()
device_nums = get_group_size()
per_stage_device_num = device_nums // config.stage_num
self.allreduce_group_size = ()
for x in params:
if "projection.bias" not in x.name and "embedding_table" not in x.name:
self.allreduce_group_size = self.allreduce_group_size + (1.0,)
elif "position_embedding.embedding_table" not in x.name and "projection.bias" not in x.name:
self.allreduce_group_size = self.allreduce_group_size + (config.dp * 1.0,)
else:
self.allreduce_group_size = self.allreduce_group_size + (per_stage_device_num * 1.0,)
self.length = len(params)
group_list, group_name = _get_model_parallel_group(per_stage_device_num)
print("rank_list", group_name)
print("group_size_list", self.allreduce_group_size)
create_group(group_name, group_list)
self.allreduce = P.AllReduce(group=group_name)
pipeline_group_list, pipeline_group_name = _get_pipeline_group()
print("pipeline_group_name", pipeline_group_name)
create_group(pipeline_group_name, pipeline_group_list)
self.allreduce2 = P.AllReduce(group=pipeline_group_name)
self.group_name1 = group_name
self.group_name2 = pipeline_group_name
def construct(self, grads):
square_sum = self.hyper_map(
get_square_sum, grads, self.allreduce_group_size)
square_reduce_sum = F.addn(square_sum)
stage_square_reduce_sum = self.allreduce(square_reduce_sum)
global_square_reduce_sum = self.allreduce2(stage_square_reduce_sum)
global_norms = F.sqrt(global_square_reduce_sum)
return global_norms
class ClipByGlobalNorm(nn.Cell):
"""
Clip grads by global norm
"""
def __init__(self, params, config, clip_norm=1.0):
super(ClipByGlobalNorm, self).__init__()
self.global_norm = GlobalNorm(params, config)
self.clip_norm = Tensor([clip_norm], mstype.float32)
self.hyper_map = C.HyperMap()
def construct(self, grads):
global_norm_origin = self.global_norm(grads)
cond = P.GreaterEqual()(global_norm_origin, self.clip_norm)
global_norm = F.select(cond, global_norm_origin, self.clip_norm)
grads = self.hyper_map(
F.partial(apply_global_norm, self.clip_norm, global_norm), grads)
return grads, global_norm_origin
class ClipByGlobalNormOptShard(nn.Cell):
"""
Clip grads by global norm
"""
def __init__(self, params, config, clip_norm=1.0):
super(ClipByGlobalNormOptShard, self).__init__()
self.global_norm = GlobalNormOptShard(params, config)
self.clip_norm = Tensor([clip_norm], mstype.float32)
self.hyper_map = C.HyperMap()
def construct(self, grads):
global_norm_origin = self.global_norm(grads)
cond = P.GreaterEqual()(global_norm_origin, self.clip_norm)
global_norm = F.select(cond, global_norm_origin, self.clip_norm)
grads = self.hyper_map(
F.partial(apply_global_norm, self.clip_norm, global_norm), grads)
return grads, global_norm_origin
def _get_model_parallel_group(mp):
"""get model parallel group"""
rank = get_rank()
stage_nums = auto_parallel_context().get_pipeline_stages()
device_nums = get_group_size()
per_stage_device_nums = device_nums // stage_nums
stage_id = rank // per_stage_device_nums
local_stage_rank_id = rank % per_stage_device_nums
index = local_stage_rank_id // mp
group = range(0, mp)
rank_str_list = [str(x + index * mp + stage_id * per_stage_device_nums) for x in group]
if len(rank_str_list) < 30:
rank_list_str = "-".join(rank_str_list)
else:
rank_list_str = rank_str_list[0] + "-to-" + rank_str_list[len(rank_str_list) - 1] + "-" + str(
len(rank_str_list))
rank_list = [x + index * mp + stage_id * per_stage_device_nums for x in group]
return rank_list, rank_list_str
def _get_pipeline_group():
"""get pipeline group"""
rank = get_rank()
stage_nums = auto_parallel_context().get_pipeline_stages()
device_nums = get_group_size()
per_stage_device_nums = device_nums // stage_nums
local_stage_rank_id = rank % per_stage_device_nums
group = range(0, stage_nums)
rank_list = [local_stage_rank_id + x *
per_stage_device_nums for x in group]
rank_str_list = [str(local_stage_rank_id + x *
per_stage_device_nums) for x in group]
if len(rank_str_list) < 30:
rank_list_str = "-".join(rank_str_list)
else:
rank_list_str = rank_str_list[0] + "-to-" + rank_str_list[len(rank_str_list) - 1] + "-" + str(
len(rank_str_list))
return rank_list, rank_list_str
class LearningRate(LearningRateSchedule):
"""
Warmup-decay learning rate for GPT network.
"""
def __init__(self,
learning_rate,
end_learning_rate,
warmup_steps,
decay_steps,
power=1.0,
use_cosine=True,
lr_scale=0.125):
super(LearningRate, self).__init__()
self.warmup_flag = False
if warmup_steps > 0:
self.warmup_flag = True
self.warmup_lr = WarmUpLR(learning_rate, warmup_steps)
self.decay_lr = PolynomialDecayLR(learning_rate, end_learning_rate,
decay_steps, power)
self.cosine_decay_lr = CosineDecayLR(end_learning_rate, learning_rate,
decay_steps)
self.warmup_steps = Tensor(np.array([warmup_steps]).astype(np.float32))
self.greater = P.Greater()
self.one = Tensor(np.array([1.0]).astype(np.float32))
self.cast = P.Cast()
self.use_cosine = use_cosine
self.lr_scale = lr_scale
def construct(self, global_step):
"""dynamic learning rate"""
if not self.use_cosine:
decay_lr = self.decay_lr(global_step)
else:
decay_lr = self.cosine_decay_lr(global_step)
if self.warmup_flag:
is_warmup = self.cast(self.greater(self.warmup_steps, global_step),
mstype.float32)
warmup_lr = self.warmup_lr(global_step)
lr = (self.one - is_warmup) * decay_lr + is_warmup * warmup_lr
else:
lr = decay_lr
return lr * self.lr_scale
class LossSummaryCallback(Callback):
"""Loss Summary Callback"""
def __init__(self, summary_dir, local_rank=0, has_trained_epoch=0, has_trained_step=0,
bucket='obs://mindspore-file/loss_file/summary/', syn_times=100):
self._summary_dir = summary_dir
self.local_rank = local_rank
self.has_trained_epoch = has_trained_epoch
self.has_trained_step = has_trained_step
self.bucket = bucket
self.syn_times = syn_times
if not mox.file.exists(self.bucket):
print("Creating summary bueckt dir {}".format(self.bucket))
mox.file.make_dirs(self.bucket)
print("entering")
self.summary_record = SummaryRecord(self._summary_dir)
def step_end(self, run_context):
"""step end"""
cb_params = run_context.original_args()
cur_step = cb_params.cur_step_num + self.has_trained_step
# create a confusion matric image, and record it to summary file
print("writing")
self.summary_record.add_value('scalar', 'loss', cb_params.net_outputs[0])
self.summary_record.add_value('scalar', 'scale', cb_params.net_outputs[2])
if len(cb_params.net_outputs) > 3:
self.summary_record.add_value('scalar', 'global_norm', cb_params.net_outputs[3])
self.summary_record.record(cur_step)
print("writing finished...", cur_step, self.syn_times)
if cur_step % self.syn_times == 0:
print("Copying summary to the bueckets start", flush=True)
self.summary_record.flush()
self.syn_files()
print("Copying summary to the bueckets ends", flush=True)
def syn_files(self):
process = Process(target=mox.file.copy_parallel, args=(
self._summary_dir, self.bucket), name="file_sync")
process.start()
class StrategyCkptCallback(Callback):
"""Strategy Ckpt Callback"""
def __init__(self, strategy_file, local_rank=0, bucket='s3://muti-modal/strategy_ckpt/opt/'):
self._strategy_file = strategy_file
self.local_rank = local_rank
self.bucket = bucket + str(local_rank) + "/"
self.obs_file = self.bucket + "strategy" + str(local_rank) + ".ckpt"
self.has_synced = False
if not mox.file.exists(self.bucket):
print("Creating strategy ckpt bueckt dir {}".format(self.bucket))
mox.file.make_dirs(self.bucket)
def step_end(self, run_context):
if not self.has_synced:
print("Copying strategy_ckpt to the bueckets start", flush=True)
self.syn_files()
print("Copying strategy_ckpt to the bueckets ends", flush=True)
self.has_synced = True
def syn_files(self):
process = Process(target=mox.file.copy_parallel, args=(
self._strategy_file, self.obs_file), name="file_sync")
process.start()
| [
"450076077@qq.com"
] | 450076077@qq.com |
619adbc9e0846c415c923477aaeb13ecbea85612 | 3b5f8c4838b8eb7ae7c9566cd64ee8dc466b64c3 | /code/notejam/wsgi.py | a75e88f53705a4601ec4ed59399d1e5c0a91e3c4 | [
"MIT"
] | permissive | ganesh35/notejam | 10b018c847a83775c3470b086f0fd10c67b2c3b0 | 6e93c9b127ee1627e6125bcccccdad0640e4be5c | refs/heads/main | 2023-03-09T05:26:13.071265 | 2021-02-25T15:51:39 | 2021-02-25T15:51:39 | 338,671,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | """
WSGI config for notejam project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "notejam.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
"ganesh35@gmail.com"
] | ganesh35@gmail.com |
37df40f64062cc7835c3b5a154205a1ed7cd9747 | b227a0f894e23755913945976c3571c05f00b1c7 | /fbnet/train_FBnet_Se.py | 75fd208a228722971775527461028b565c187692 | [] | no_license | CuriousCat-7/NAS | 918d7379572bad31de370d12879f882dbc99570e | c47d22a52889b7f5bc2e25449590557ab53b9d66 | refs/heads/master | 2021-01-25T22:44:00.079900 | 2020-03-03T10:29:50 | 2020-03-03T10:29:50 | 243,212,262 | 0 | 0 | null | 2020-02-26T08:39:57 | 2020-02-26T08:39:56 | null | UTF-8 | Python | false | false | 3,714 | py | import os
import argparse
import sys
import logging
from time import gmtime, strftime
import time
sys.path.insert(0, '/home/zhouchangqing/mxnet/incubator-mxnet_12_26/python')
import mxnet as mx
from FBNet import FBNet
from FBNet_SE import FBNet_SE
from util import _logger, get_train_ds, _set_file, get_mnist_iter
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Train a model with data parallel for base net \
and model parallel for classify net.")
parser.add_argument('--batch-size', type=int, default=256,
help='training batch size of all devices.')
parser.add_argument('--epochs', type=int, default=20,
help='number of training epochs.')
parser.add_argument('--queue-size', type=int, default=20,
help='train data queue size, used for shuffle.')
parser.add_argument('--model-type', type=str, default='amsoftmax',
help='top model type, default is amsoftmax')
parser.add_argument('--log-frequence', type=int, default=400,
help='log frequence, default is 400')
parser.add_argument('--patch-idx', type=int, default=0,
help='patch index, default is 0')
parser.add_argument('--patch-size', type=int, default=1,
help='patch size, default is 1')
parser.add_argument('--gpu', type=int, default=0,
help='gpu, default is 0')
parser.add_argument('--load-model-path', type=str, default=None,
help='re_train, default is None')
parser.set_defaults(
num_classes=2000,
# num_classes=10,
num_examples=107588,
image_shape='3,108,108',
# image_shape='1,28,28',
feature_dim=192,
conv_workspace=1024, # this is the default value
save_checkpoint_frequence=5000,
restore=False,
optimizer='sgd',
data_nthreads=16,
force2gray='false',
force2color='false',
illum_trans_prob=0.3,
hsv_adjust_prob=0.1,
train_rec_path='/home1/data/zhuzhou/MsCeleb_SrvA2_clean/MsCeleb_clean1_2w_train_2k.rec',
isgray=False,
save_model_path = './model',
lr_decay_step=[35, 75,125,150,175,200,225,250,275,300,325,350],
cosine_decay_step=3000,
)
args = parser.parse_args()
train_w_ds = get_train_ds(args)
args.model_save_path = './log/%s/' % \
(time.strftime('%Y-%m-%d', time.localtime(time.time())))
if not os.path.exists(args.model_save_path):
_logger.warn("{} not exists, create it".format(args.model_save_path))
os.makedirs(args.model_save_path)
_set_file(args.model_save_path + 'log.log')
args.num_examples = 26246
args.train_rec_path = '/home1/data/zhuzhou/MsCeleb_SrvA2_clean/MsCeleb_clean1_2w_val_2k.rec'
train_theta_ds = get_train_ds(args)
# train, val = get_mnist_iter(args)
train, val = train_w_ds, train_theta_ds
fbnet = FBNet_SE(batch_size=args.batch_size,
output_dim=args.num_classes,
label_shape=(args.num_classes, ),
logger=_logger,
alpha=0.2, beta=0.6,
input_shape=[int(i) for i in args.image_shape.split(',')],
ctxs=mx.gpu(args.gpu),
# eval_metric=['acc', 'ce'] # TODO
num_examples=args.num_examples,
log_frequence=args.log_frequence,
save_frequence=args.save_checkpoint_frequence,
feature_dim=args.feature_dim,
load_model_path = args.load_model_path,
save_model_path = args.save_model_path,
model_type=args.model_type)
fbnet.search(train, val, start_w_epochs=6,
#lr_decay_step=args.lr_decay_step,
result_prefix=args.model_type + 'senet_cosine_1080Ti',
cosine_decay_step=args.cosine_decay_step)
| [
"wujunqiang@bupt.edu.cn"
] | wujunqiang@bupt.edu.cn |
4798d96033333ba5a50cb444ae683e4d1a6ee0d6 | b3839099049a5d34e14b2452c4969fd4f2a3333c | /conanfile.py | cfcf6015db568caa261688eb7d7ec15c668440cb | [
"MIT"
] | permissive | jeffw387/vkaEngine | bb71827118929ec5aaa883e7bb41bbfbf26d0e22 | 69bc21d4c10229ab823a887147cb45888d9afbaf | refs/heads/master | 2021-07-08T19:07:34.049415 | 2019-02-24T06:53:45 | 2019-02-24T06:53:45 | 144,421,157 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,913 | py | from conans import ConanFile, CMake
class vkaEngineConan(ConanFile):
name = "vkaEngine"
versionfile = open(".version")
version = versionfile.read()
versionfile.close()
license = "MIT"
author = "Jeff Wright jeffw387@gmail.com"
url = "https://github.com/jeffw387/vkaEngine.git"
description = "A vulkan rendering framework"
settings = "os", "compiler", "build_type", "arch", "cppstd"
options = {"shared": [True, False]}
default_options = "shared=False"
generators = "cmake"
exports = "CMakeLists.txt", ".version", "!build"
exports_sources = "!build", "*.hpp", "*.cpp"
build_policy = "missing"
requires = (
"vulkan-sdk/1.X.X@jeffw387/testing",
"libcpp/latest@jeffw387/testing",
"libstdcpp/latest@jeffw387/testing",
"pthread/latest@jeffw387/testing",
"cppfs/experimental@jeffw387/testing",
"glfw/3.2.1@jeffw387/testing",
"glm/0.9.9.1@g-truc/stable",
"VulkanMemoryAllocator/2.2.0@jeffw387/testing",
"spdlog/1.3.0@bincrafters/stable",
"stb/20180214@conan/stable",
"jsonformoderncpp/3.5.0@vthiery/stable",
"Catch2/2.5.0@catchorg/stable",
"tl_expected/0.2@jeffw387/testing",
"tl_optional/0.5@jeffw387/testing",
"json-shader/latest@jeffw387/testing")
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
self.copy("*.hpp", dst="src", src="src")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.dylib*", dst="lib", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["vkaEngine"]
self.cpp_info.includedirs = ["src"]
| [
"jeffw387@gmail.com"
] | jeffw387@gmail.com |
5c53f6b52bfd5c8e79b2bb0168bfe2d2739fa67f | 8cda31ba684002db781d6b5af66227450de43ba8 | /biosys/settings/dev.py | a826f09c4e80365bd0f564089b43bba9b1393a34 | [] | no_license | sergedpaw/biosys | e1c1c3a3ff585ec41f4bc65f550ad117617c2ded | e1210b602cd9bcfa2a12244024a02db9a9223343 | refs/heads/master | 2021-01-02T22:16:45.463777 | 2015-04-17T08:19:25 | 2015-04-17T08:19:25 | 34,037,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | from __future__ import absolute_import
import sys
from .base import *
INSTALLED_APPS += (
'django_extensions',
)
# You might want to use sqlite3 for testing in local as it's much faster.
if len(sys.argv) > 1 and 'test' in sys.argv[1]:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/tmp/biosys.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
} | [
"sergel@dpaw.wa.gov.au"
] | sergel@dpaw.wa.gov.au |
1fb3703b0fbecdc2e464bc3c3a63822b36661a44 | 97e7984431f79e370614d7efb9b2fc20de3e066d | /scripts/detect/db/train_db.py | afc0a1abcf563eb3e014142b4b2018862ee8ee5f | [
"Apache-2.0"
] | permissive | xuncv/gluon-ocr | 5f96dcc5d2b497707fd029b0a7937ea06fb7954a | 5ac697cc030058cb9ae9f4b1f4e85f901d4d1c40 | refs/heads/master | 2023-01-22T16:09:01.114328 | 2020-12-03T07:27:28 | 2020-12-03T07:27:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,092 | py | #coding=utf-8
import cv2
import os
import sys
import logging
import warnings
import time
import mxnet as mx
from mxnet import gluon
from mxnet.gluon.data import DataLoader
from gluoncv import utils as gutils
sys.path.append(os.path.expanduser('~/gluon-ocr'))
from gluonocr.model_zoo import get_db
from gluonocr.data import DBDataset
from gluonocr.data import PointAugmenter
from gluonocr.loss import DBLoss
from gluonocr.post_process import DBPostProcess
from gluonocr.utils.detect_metric import DetectionIoUEvaluator
from config import args
gutils.random.seed(args.seed)
class Trainer(object):
def __init__(self):
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',')]
self.ctx = ctx if ctx != 0 else [mx.cpu()]
if args.syncbn and len(self.ctx) > 1:
self.net = get_db(args.network, args.num_layers, pretrained_base=True,
norm_layer=gluon.contrib.nn.SyncBatchNorm,
norm_kwargs={'num_devices': len(self.ctx)})
self.async_net = get_db(args.network, args.num_layers, pretrained_base=False) # used by cpu worker
else:
self.net = get_db(args.network, args.num_layers, pretrained_base=True)
self.async_net = self.net
model_name = '%s-%s%d-db'%(args.dataset_name, args.network, args.num_layers)
if not os.path.exists(args.save_prefix):
os.mkdir(args.save_prefix)
args.save_prefix += model_name
self.init_model()
self.net.hybridize()
self.net.collect_params().reset_ctx(self.ctx)
if args.export_model:
self.export_model()
self.train_dataloader, self.val_dataloader = self.get_dataloader()
self.loss = DBLoss()
self.post_proc = DBPostProcess()
self.metric = DetectionIoUEvaluator()
self.sum_loss = mx.metric.Loss('SumLoss')
self.bce_loss = mx.metric.Loss('BalanceCELoss')
self.l1_loss = mx.metric.Loss('L1Loss')
self.dice_loss = mx.metric.Loss('DiceLoss')
def init_model(self):
if args.resume.strip():
self.net.load_parameters(args.resume.strip())
self.async_net.load_parameters(args.resume.strip())
else:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.net.initialize(init=mx.init.Xavier())
self.async_net.initialize(init=mx.init.Xavier())
def get_lr_scheduler(self):
if args.lr_decay_period > 0:
lr_decay_epoch = list(range(args.lr_decay_period, args.epochs, args.lr_decay_period))
else:
lr_decay_epoch = [int(i) for i in args.lr_decay_epoch.split(',')]
lr_decay_epoch = [e - args.warmup_epochs for e in lr_decay_epoch]
num_batches = args.num_samples // args.batch_size
lr_scheduler = gutils.LRSequential([
gutils.LRScheduler('linear', base_lr=0, target_lr=args.lr,
nepochs=args.warmup_epochs, iters_per_epoch=num_batches),
gutils.LRScheduler(args.lr_mode, base_lr=args.lr,
nepochs=args.epochs - args.warmup_epochs,
iters_per_epoch=num_batches,
step_epoch=lr_decay_epoch,
step_factor=args.lr_decay, power=2),
])
return lr_scheduler
def get_dataloader(self):
augment = PointAugmenter()
train_dataset = DBDataset(args.train_img_dir,
args.train_lab_dir,
augment, mode='train',
img_size=(args.data_shape, args.data_shape))
args.num_samples = len(train_dataset)
val_dataset = DBDataset(args.val_img_dir,
args.val_lab_dir, mode='val',
img_size=(args.data_shape, args.data_shape))
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size,
last_batch='discard', shuffle=True,
num_workers=args.num_workers, pin_memory=True)
val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size,
num_workers=args.num_workers, last_batch='keep')
return train_dataloader, val_dataloader
def train(self):
lr_scheduler = self.get_lr_scheduler()
trainer = gluon.Trainer(self.net.collect_params(), 'sgd',
{'wd': args.wd, 'momentum': args.momentum, 'lr_scheduler': lr_scheduler}) #
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_path = args.save_prefix + '_train.log'
log_dir = os.path.dirname(log_file_path)
if log_dir and not os.path.exists(log_dir):
os.makedirs(log_dir)
fh = logging.FileHandler(log_file_path)
logger.addHandler(fh)
logger.info(args)
logger.info('Start training from [Epoch {}]'.format(args.start_epoch))
best_loss = 10000
for epoch in range(args.start_epoch, args.epochs):
tic = time.time()
btic = time.time()
self.net.hybridize()
for i, batch in enumerate(self.train_dataloader):
data = gluon.utils.split_and_load(batch[0], ctx_list=self.ctx, batch_axis=0)
labs = [gluon.utils.split_and_load(batch[it], ctx_list=self.ctx, batch_axis=0) for it in range(1, 5)]
sum_losses, bce_losses, l1_losses, dice_losses = [], [], [], []
with mx.autograd.record():
for it, x in enumerate(data):
bina, thresh, thresh_bina = self.net(x)
pred = {'binary':bina, 'thresh':thresh, 'thresh_binary':thresh_bina}
lab = {'gt':labs[0][it], 'mask':labs[1][it], 'thresh_map':labs[2][it], 'thresh_mask':labs[3][it]}
loss, metric = self.loss(pred, lab)
sum_losses.append(loss)
bce_losses.append(metric['bce_loss'])
l1_losses.append(metric['l1_loss'])
dice_losses.append(metric['thresh_loss'])
mx.autograd.backward(sum_losses)
trainer.step(1)
self.sum_loss.update(0, sum_losses)
self.bce_loss.update(0, bce_losses)
self.l1_loss.update(0, l1_losses)
self.dice_loss.update(0, dice_losses)
if args.log_interval and not (i + 1) % args.log_interval:
name0, loss0 = self.sum_loss.get()
name1, loss1 = self.bce_loss.get()
name2, loss2 = self.l1_loss.get()
name3, loss3 = self.dice_loss.get()
logger.info('[Epoch {}][Batch {}], LR: {:.2E}, Speed: {:.3f} samples/sec, {}={:.3f}, {}={:.3f}, {}={:.3f}, {}={:.3f}'.format(
epoch, i+1, trainer.learning_rate, args.batch_size/(time.time()-btic), name0, loss0, name1, loss1, name2, loss2, name3, loss3))
btic = time.time()
name0, loss0 = self.sum_loss.get()
name1, loss1 =self.bce_loss.get()
name2, loss2 = self.l1_loss.get()
name3, loss3 = self.dice_loss.get()
logger.info('[Epoch {}] Training cost: {:.3f}, {}={:.3f}, {}={:.3f}, {}={:.3f}, {}={:.3f}'.format(
epoch, time.time()-tic, name0, loss0, name1, loss1, name2, loss2, name3, loss3))
if not (epoch + 1) % args.val_interval:
# consider reduce the frequency of validation to save time
mean_loss = self.validate(logger)
if mean_loss < best_loss:
best_loss = mean_loss
self.net.save_parameters('{:s}_best.params'.format(args.save_prefix))
if args.save_interval and (epoch+1) % args.save_interval == 0:
self.net.save_parameters('{:s}_{:04d}_{:.3f}.params'.format(args.save_prefix, epoch+1, mean_loss))
def validate(self, logger):
if self.val_dataloader is None:
return 0
logger.info('Start validate.')
self.sum_loss.reset()
self.bce_loss.reset()
self.l1_loss.reset()
self.dice_loss.reset()
tic = time.time()
for batch in self.val_dataloader:
data = gluon.utils.split_and_load(batch[0], ctx_list=self.ctx, batch_axis=0)
labs = [gluon.utils.split_and_load(batch[it], ctx_list=self.ctx, batch_axis=0) for it in range(1, 5)]
for it, x in enumerate(data):
bina, thresh, thresh_bina = self.net(x)
pred = {'binary':bina, 'thresh':thresh, 'thresh_binary':thresh_bina}
lab = {'gt':labs[0][it], 'mask':labs[1][it], 'thresh_map':labs[2][it], 'thresh_mask':labs[3][it]}
loss, metric = self.loss(pred, lab)
self.sum_loss.update(0, loss)
self.bce_loss.update(0, metric['bce_loss'])
self.l1_loss.update(0, metric['l1_loss'])
self.dice_loss.update(0, metric['thresh_loss'])
name0, loss0 = self.sum_loss.get()
name1, loss1 = self.bce_loss.get()
name2, loss2 = self.l1_loss.get()
name3, loss3 = self.dice_loss.get()
logger.info('Evaling cost: {:.3f}, {}={:.3f}, {}={:.3f}, {}={:.3f}, {}={:.3f}'.format(
time.time()-tic, name0, loss0, name1, loss1, name2, loss2, name3, loss3))
self.sum_loss.reset()
self.bce_loss.reset()
self.l1_loss.reset()
self.dice_loss.reset()
return loss0
def export_model(self):
self.net.export_block(args.save_prefix, args.resume.strip(), self.ctx)
sys.exit()
if __name__ == '__main__':
trainer = Trainer()
trainer.train() | [
"448627058@qq.com"
] | 448627058@qq.com |
395326db8f49bcc62b832e19268b8b29aecfb822 | 1b8ae90527e93aab7f80ac7b908a5eefe1b1384e | /Data_Structures/queue_with_max.py | 5423249c1576b9d2c695a000e3a2f44bc7861135 | [] | no_license | Frikster/CodingNotebook | 4cbdff626e2b86fde45223775d27078291d41621 | c371cd43bcbac02cb915a9620b291d013d8fb485 | refs/heads/master | 2020-04-14T22:53:58.725429 | 2019-05-11T21:44:54 | 2019-05-11T21:44:54 | 164,182,563 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,857 | py | # Implement a queue with #enqueue and #dequeue, as well as a #max API,
# a method which returns the maximum element still in the queue. This
# is trivial to do by spending O(n) time upon dequeuing.
# Can you do it in O(1) amortized? Maybe use an auxiliary storage structure?
import pdb
import unittest
from Data_Structures.ring_buffer import RingBuffer
from Data_Structures.min_max_stack_queue import MinMaxStackQueue
class QueueWithMax:
def __init__(self):
self.store = MinMaxStackQueue()
def enqueue(self, val):
self.store.enqueue(val)
def dequeue(self):
return self.store.dequeue()
def max(self):
if len(self) == 0: return float("-inf")
return self.store.max()
def __len__(self):
return len(self.store)
class Queue:
def __init__(self):
self.store = RingBuffer()
def enqueue(self, val):
self.store.append(val)
def dequeue(self):
return self.store.shift()
def __len__(self):
return len(self.store)
def __str__(self):
return self.store.__str__()
class Test(unittest.TestCase):
def test_queue_with_max(self):
q = QueueWithMax()
print(q.max())
q.enqueue(5)
print(q.max())
q.enqueue(1)
print(q.max())
q.enqueue(50)
print(q.max())
q.enqueue(5)
print(q.max())
q.dequeue()
q.dequeue()
print(q.max())
q.dequeue()
print(q.max())
def test_queue(self):
q = Queue()
print(q)
q.enqueue(5)
print(q)
q.enqueue(1)
print(q)
q.enqueue(50)
print(q)
q.enqueue(5)
print(q)
q.dequeue()
q.dequeue()
print(q)
q.dequeue()
print(q)
if __name__ == "__main__":
unittest.main()
| [
"dirk.haupt@gmail.com"
] | dirk.haupt@gmail.com |
cb6578c9c54c4b19d30773f027845d26b967021a | cf7dedf604be53b2bd5749a14a83630ab0ee287d | /server.py | 06a6f4fa2009b8ca26d83db4c6e34433d8747322 | [] | no_license | jqjZhu/flask-intro | 9aa4159aabb58e4f24c3f1d0dc2e33742cbc33c5 | 62f88114171ea2d97552efb42a3c261d03daa910 | refs/heads/master | 2023-03-20T23:35:05.165849 | 2020-05-13T21:01:10 | 2020-05-13T21:01:10 | 263,742,100 | 0 | 0 | null | 2021-03-20T03:57:06 | 2020-05-13T21:00:27 | Python | UTF-8 | Python | false | false | 2,845 | py | """Greeting Flask app."""
from random import choice
from flask import Flask, request
# "__name__" is a special Python variable for the name of the current module
# Flask wants to know this to know what any imported things are relative to.
app = Flask(__name__)
AWESOMENESS = [
'awesome', 'terrific', 'fantastic', 'neato', 'fantabulous', 'wowza',
'oh-so-not-meh', 'brilliant', 'ducky', 'coolio', 'incredible',
'wonderful', 'smashing', 'lovely']
@app.route('/')
def start_here():
"""Home page."""
return """
<!doctype html>
<html>
<head>
<title>Start Here</title>
</head>
<body>
<a href="/hello">Take me to the start</a>
</body>
</html>
"""
@app.route('/hello')
def say_hello():
"""Say hello and prompt for user's name."""
return """
<!doctype html>
<html>
<head>
<title>Hi There!</title>
</head>
<body>
<h1>Hi There!</h1>
<form action="/greet" method='GET'>
What's your name? <input type="text" name="person">
What compliment would you like?
<input type="radio" name="compliment" value="awesome">Awesome<br>
<input type="radio" name="compliment" value="terrific">Terrific<br>
<input type="radio" name="compliment" value="fantastic">Fantastic<br>
<input type="radio" name="compliment" value="neato">Neato<br>
<input type="radio" name="compliment" value="fantabulous">Fantabulous<br>
<input type="radio" name="compliment" value="wowza">Wowza<br>
<input type="radio" name="compliment" value="oh-so-not-meh">Oh-so-not-meh<br>
<input type="radio" name="compliment" value="brilliant">Brilliant<br>
<input type="radio" name="compliment" value="ducky">Ducky<br>
<input type="radio" name="compliment" value="coolio">Coolio<br>
<input type="radio" name="compliment" value="incredible">Incredible<br>
<input type="radio" name="compliment" value="wonderful">Wonderful<br>
<input type="radio" name="compliment" value="smashing">Smashing<br>
<input type="radio" name="compliment" value="lovely">Lovely<br>
<input type="submit" value="Submit">
</form>
</body>
</html>
"""
@app.route('/greet')
def greet_person():
"""Get user by name."""
player = request.args.get("person")
compliment = request.args.get("compliment")
return f"""
<!doctype html>
<html>
<head>
<title>A Compliment</title>
</head>
<body>
Hi, {player}! I think you're {compliment}!
</body>
</html>
"""
if __name__ == '__main__':
# debug=True gives us error messages in the browser and also "reloads"
# our web app if we change the code.
app.run(debug=True, host="0.0.0.0")
| [
"Zqijing0815@gmail.com"
] | Zqijing0815@gmail.com |
0c6d75b18504836247f6f318140c34b34537ce19 | 300193e541e7e03dbaf2002b84bd89cef3ef99d5 | /personal.py | f82aaa4092a53069eb473abe951fa3e394883e26 | [] | no_license | geeogi/geeogi_talon | ca99e88338f2d9338bf0f9b04d911fc4b2ef6fad | 43c9aa34608aa2d5f4e12096c45332f22a32beea | refs/heads/main | 2023-01-15T06:19:19.217543 | 2020-11-23T08:20:44 | 2020-11-23T08:20:44 | 311,019,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | from talon import Module, Context, actions
mod = Module()
ctx = Context()
mod.list("link", desc="common quick links")
ctx.lists['self.link'] = {
'whats app':'https://web.whatsapp.com',
'coin market cap':'https://coinmarketcap.com',
'gmail':'https://mail.google.com/mail/u/0/#inbox',
'news':'https://www.bbc.co.uk/news'
}
@mod.action_class
class Actions:
def open_link(link: str) -> str:
"""open a quick link"""
actions.insert(link)
actions.key("enter")
| [
"george6bennett@gmail.com"
] | george6bennett@gmail.com |
6bbcd41880eb25da9be5cc151ebab7f50751e881 | 9045fd1d794bcb0e0cce44b0eb1c1bb8166fcf24 | /courses/urls.py | 6b213b775ac2830a606a39f1d36e0e8e090971f6 | [] | no_license | bharrison21/Fyuzed | 31428fb0e8efc0bde149da7e149966eafbb9a478 | deda76c4c6bae2178ec50b3ffebed47b928eb76d | refs/heads/master | 2023-07-19T04:21:20.403535 | 2021-09-05T18:13:18 | 2021-09-05T18:13:18 | 265,630,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | from django.urls import path
#from .views import
from . import views
from .views import Listings, ViewListing
from django.views.generic.base import TemplateView
#define paths.
# When you are at '<url>/signup/', it will show SignUpView (as_view needed bc its class based)
# it's name allows it to be refered to as just 'signup' from other parts of the code, like in home.html
urlpatterns = [
path('courses_home/', TemplateView.as_view(template_name='courses/courses_home.html'), name="courses_home"),
path('create_course/', views.create_course, name="create_course"),
path('create_listing/', views.create_listing, name="create_listing"),
path('listings/', Listings.as_view(), name="listings"),
path('<slug:the_slug>/view_listing/', ViewListing.as_view(), name="view_listing"),
path('fill_listing/', views.fill_listing, name="fill_listing")
] | [
"timstauder26@gmail.com"
] | timstauder26@gmail.com |
91a0fd3c2c049c06e58486ec8a171240d7f057f8 | e748e6d96aace1c9149327f384e0de07d743715a | /challange/largestPlateau.py | 14569e16c073b32674b62202fc1a064fd2c5fbe3 | [] | no_license | jorzel/codefights | cdfc4cb32261b064ffc605bfd927bf237885b5d2 | 28b62a2ae3809f0eb487198044c0fe74be09d4e8 | refs/heads/master | 2022-04-28T06:54:26.170503 | 2022-03-23T22:22:20 | 2022-03-23T22:22:20 | 110,818,719 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,968 | py | """
Your terraforming crew is busy at work on a mountaintop, but it's time to break for lunch. In order to allow everyone to have lunch together, we'd like to find a plateau on the mountain where everyone can sit.
Given a topographic map in the form of a matrix of integers map, find the area of the largest plateau.
Example
For
map = [[1,0,0,2,2,0],
[0,0,2,1,0,2],
[0,1,1,2,2,2],
[1,2,1,0,2,1]]
the output should be largestPlateau(map) = 5. The crew could either choose the plateau with elevation 0 or the one with elevation 2; both of which have an area of 5:
"""
from collections import defaultdict
def dfs(graph, start):
visited, stack = set(), [start]
while stack:
vertex = stack.pop()
if vertex not in visited:
visited.add(vertex)
stack.extend(graph[vertex] - visited)
return visited
def build_graph(maps):
graph = defaultdict(set)
rows = len(maps)
cols = len(maps[0])
for y in range(rows):
for x in range(cols):
neighbours = [(x - 1, y),
(x, y - 1),
(x + 1, y),
(x, y + 1)]
while True:
p = neighbours.pop(0)
if p[1] >= 0 and p[1] < rows and p[0] >= 0 and p[0] < cols:
if maps[p[1]][p[0]] == maps[y][x]:
graph[(y, x)] |= {(p[1], p[0])}
if not neighbours:
break
return graph
def largestPlateau(maps):
if not maps:
return 0
graph = build_graph(maps)
rows = len(maps)
cols = len(maps[0])
visited = set()
max_plateu = 0
for y in range(rows):
for x in range(cols):
if (y, x) not in visited:
plateu = dfs(graph, (y,x))
visited |= plateu
if len(plateu) > max_plateu:
max_plateu = len(plateu)
return max_plateu | [
"jaroslaw.orzel@emplocity.pl"
] | jaroslaw.orzel@emplocity.pl |
ee60cd02829d54fe8b02d44339527c5b45fa47b4 | 2e47f91e6401eb7f36746e3078b0baac7fd4b9d1 | /silot/sspair_model.py | 3fb03a074b22eeed01b72335f81bba9b4b96c79b | [
"MIT"
] | permissive | lqiang2003cn/silot | fdf7fb9e6ed0d814fb6a9f0630cd7913478a870a | d49a41dde74db62d62bdd9ba5d35ff54c07fd9bc | refs/heads/master | 2023-01-15T19:37:37.276344 | 2020-06-18T15:28:19 | 2020-06-18T15:40:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,805 | py | import numpy as np
import tensorflow as tf
from collections import defaultdict
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.colors import to_rgb
from dps import cfg
from dps.utils import Param
from dps.utils.tf import RenderHook, tf_mean_sum, tf_shape
from auto_yolo.models.core import AP, xent_loss
from auto_yolo.models.object_layer import ObjectLayer
from silot.core import VideoNetwork
class SequentialSpair(VideoNetwork):
build_backbone = Param()
build_feature_fuser = Param()
build_obj_feature_extractor = Param()
n_backbone_features = Param()
anchor_boxes = Param()
train_reconstruction = Param()
reconstruction_weight = Param()
train_kl = Param()
kl_weight = Param()
backbone = None
object_layer = None
feature_fuser = None
obj_feature_extractor = None
@property
def eval_funcs(self):
if getattr(self, '_eval_funcs', None) is None:
if "annotations" in self._tensors:
ap_iou_values = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
eval_funcs = {"AP_at_point_{}".format(int(10 * v)): AP(v) for v in ap_iou_values}
eval_funcs["AP"] = AP(ap_iou_values)
self._eval_funcs = eval_funcs
else:
self._eval_funcs = {}
return self._eval_funcs
def build_representation(self):
# --- init modules ---
self.B = len(self.anchor_boxes)
if self.backbone is None:
self.backbone = self.build_backbone(scope="backbone")
if "backbone" in self.fixed_weights:
self.backbone.fix_variables()
if self.feature_fuser is None:
self.feature_fuser = self.build_feature_fuser(scope="feature_fuser")
if "feature_fuser" in self.fixed_weights:
self.feature_fuser.fix_variables()
if self.obj_feature_extractor is None and self.build_obj_feature_extractor is not None:
self.obj_feature_extractor = self.build_obj_feature_extractor(scope="obj_feature_extractor")
if "obj_feature_extractor" in self.fixed_weights:
self.obj_feature_extractor.fix_variables()
backbone_output, n_grid_cells, grid_cell_size = self.backbone(
self.inp, self.B*self.n_backbone_features, self.is_training)
self.H, self.W = [int(i) for i in n_grid_cells]
self.HWB = self.H * self.W * self.B
self.pixels_per_cell = tuple(int(i) for i in grid_cell_size)
H, W, B = self.H, self.W, self.B
if self.object_layer is None:
self.object_layer = ObjectLayer(self.pixels_per_cell, scope="objects")
self.object_rep_tensors = []
object_rep_tensors = None
_tensors = defaultdict(list)
for f in range(self.n_frames):
print("Bulding network for frame {}".format(f))
early_frame_features = backbone_output[:, f]
if f > 0 and self.obj_feature_extractor is not None:
object_features = object_rep_tensors["all"]
object_features = tf.reshape(
object_features, (self.batch_size, H, W, B*tf_shape(object_features)[-1]))
early_frame_features += self.obj_feature_extractor(
object_features, B*self.n_backbone_features, self.is_training)
frame_features = self.feature_fuser(
early_frame_features, B*self.n_backbone_features, self.is_training)
frame_features = tf.reshape(
frame_features, (self.batch_size, H, W, B, self.n_backbone_features))
object_rep_tensors = self.object_layer(
self.inp[:, f], frame_features, self._tensors["background"][:, f], self.is_training)
self.object_rep_tensors.append(object_rep_tensors)
for k, v in object_rep_tensors.items():
_tensors[k].append(v)
self._tensors.update(**{k: tf.stack(v, axis=1) for k, v in _tensors.items()})
# --- specify values to record ---
obj = self._tensors["obj"]
pred_n_objects = self._tensors["pred_n_objects"]
self.record_tensors(
batch_size=self.batch_size,
float_is_training=self.float_is_training,
cell_y=self._tensors["cell_y"],
cell_x=self._tensors["cell_x"],
h=self._tensors["h"],
w=self._tensors["w"],
z=self._tensors["z"],
area=self._tensors["area"],
cell_y_std=self._tensors["cell_y_std"],
cell_x_std=self._tensors["cell_x_std"],
h_std=self._tensors["h_std"],
w_std=self._tensors["w_std"],
z_std=self._tensors["z_std"],
n_objects=pred_n_objects,
obj=obj,
latent_area=self._tensors["latent_area"],
latent_hw=self._tensors["latent_hw"],
attr=self._tensors["attr"],
)
# --- losses ---
if self.train_reconstruction:
output = self._tensors['output']
inp = self._tensors['inp']
self._tensors['per_pixel_reconstruction_loss'] = xent_loss(pred=output, label=inp)
self.losses['reconstruction'] = (
self.reconstruction_weight * tf_mean_sum(self._tensors['per_pixel_reconstruction_loss'])
)
if self.train_kl:
self.losses.update(
obj_kl=self.kl_weight * tf_mean_sum(self._tensors["obj_kl"]),
cell_y_kl=self.kl_weight * tf_mean_sum(obj * self._tensors["cell_y_kl"]),
cell_x_kl=self.kl_weight * tf_mean_sum(obj * self._tensors["cell_x_kl"]),
h_kl=self.kl_weight * tf_mean_sum(obj * self._tensors["h_kl"]),
w_kl=self.kl_weight * tf_mean_sum(obj * self._tensors["w_kl"]),
z_kl=self.kl_weight * tf_mean_sum(obj * self._tensors["z_kl"]),
attr_kl=self.kl_weight * tf_mean_sum(obj * self._tensors["attr_kl"]),
)
if cfg.background_cfg.mode == "learn_and_transform":
self.losses.update(
bg_attr_kl=self.kl_weight * tf_mean_sum(self._tensors["bg_attr_kl"]),
bg_transform_kl=self.kl_weight * tf_mean_sum(self._tensors["bg_transform_kl"]),
)
# --- other evaluation metrics ---
if "n_annotations" in self._tensors:
count_1norm = tf.to_float(
tf.abs(tf.to_int32(self._tensors["pred_n_objects_hard"]) - self._tensors["n_valid_annotations"]))
self.record_tensors(
count_1norm=count_1norm,
count_error=count_1norm > 0.5,
)
class SequentialSpair_RenderHook(RenderHook):
N = 4
linewidth = 2
on_color = np.array(to_rgb("xkcd:azure"))
off_color = np.array(to_rgb("xkcd:red"))
gt_color = "xkcd:yellow"
cutoff = 0.5
fetches = "obj z inp output appearance normalized_box background glimpse"
def __call__(self, updater):
network = updater.network
if "n_annotations" in network._tensors:
self.fetches += " annotations n_annotations"
if 'prediction' in network._tensors:
self.fetches += " prediction targets"
if "actions" in network._tensors:
self.fetches += " actions"
if "bg_y" in network._tensors:
self.fetches += " bg_y bg_x bg_h bg_w bg_raw"
fetched = self._fetch(updater)
self._prepare_fetched(fetched)
# self._plot_reconstruction(updater, fetched)
self._plot_patches(updater, fetched)
# try:
# self._plot_reconstruction(updater, fetched)
# except Exception:
# pass
@staticmethod
def normalize_images(images):
mx = images.reshape(*images.shape[:-3], -1).max(axis=-1)
return images / mx[..., None, None, None]
def _prepare_fetched(self, fetched):
inp = fetched['inp']
output = fetched['output']
prediction = fetched.get("prediction", None)
targets = fetched.get("targets", None)
N, T, image_height, image_width, _ = inp.shape
flat_obj = fetched['obj'].reshape(N, T, -1)
background = fetched['background']
box = (
fetched['normalized_box']
* [image_height, image_width, image_height, image_width]
)
flat_box = box.reshape(N, T, -1, 4)
n_annotations = fetched.get("n_annotations", np.zeros(N, dtype='i'))
annotations = fetched.get("annotations", None)
# actions = fetched.get("actions", None)
diff = self.normalize_images(np.abs(inp - output).mean(axis=-1, keepdims=True))
xent = self.normalize_images(
xent_loss(pred=output, label=inp, tf=False).mean(axis=-1, keepdims=True))
learned_bg = "bg_y" in fetched
bg_y = fetched.get("bg_y", None)
bg_x = fetched.get("bg_x", None)
bg_h = fetched.get("bg_h", None)
bg_w = fetched.get("bg_w", None)
bg_raw = fetched.get("bg_raw", None)
fetched.update(
prediction=prediction,
targets=targets,
flat_obj=flat_obj,
background=background,
box=box,
flat_box=flat_box,
n_annotations=n_annotations,
annotations=annotations,
diff=diff,
xent=xent,
learned_bg=learned_bg,
bg_y=bg_y,
bg_x=bg_x,
bg_h=bg_h,
bg_w=bg_w,
bg_raw=bg_raw,
)
def _plot_reconstruction(self, updater, fetched):
N, T, image_height, image_width, _ = fetched['inp'].shape
print("Plotting for {} data points...".format(N))
n_images = 8 if fetched['learned_bg'] else 7
fig_unit_size = 4
fig_height = T * fig_unit_size
fig_width = n_images * fig_unit_size
for n in range(N):
fig, axes = plt.subplots(T, n_images, figsize=(fig_width, fig_height))
if fetched['prediction'] is not None:
fig_title = "target={}, prediction={}".format(
np.argmax(fetched['targets'][n]),
np.argmax(fetched['prediction'][n]))
fig.suptitle(fig_title, fontsize=16)
for ax in axes.flatten():
ax.set_axis_off()
for t in range(T):
self._plot_helper(n, t, axes[t], **fetched)
plt.subplots_adjust(left=0.02, right=.98, top=.98, bottom=0.02, wspace=0.1, hspace=0.1)
self.savefig("reconstruction/" + str(n), fig, updater)
def _plot_helper(
self, n, t, axes, *, inp, output, diff, xent, background, flat_obj, flat_box,
n_annotations, annotations, learned_bg, bg_y, bg_x, bg_h, bg_w, bg_raw, **kwargs):
N, T, image_height, image_width, _ = inp.shape
lw = self.linewidth
def safe_remove(obj):
try:
obj.remove()
except NotImplementedError:
pass
ax_inp = axes[0]
self.imshow(ax_inp, inp[n, t])
for obj in ax_inp.findobj(match=plt.Rectangle):
safe_remove(obj)
if t == 0:
ax_inp.set_title('input')
ax = axes[1]
self.imshow(ax, output[n, t])
if t == 0:
ax.set_title('reconstruction')
ax = axes[2]
self.imshow(ax, diff[n, t])
if t == 0:
ax.set_title('abs error')
ax = axes[3]
self.imshow(ax, xent[n, t])
if t == 0:
ax.set_title('xent')
ax_all_bb = axes[4]
self.imshow(ax_all_bb, output[n, t])
for obj in ax_all_bb.findobj(match=plt.Rectangle):
safe_remove(obj)
if t == 0:
ax_all_bb.set_title('all bb')
ax_proposed_bb = axes[5]
self.imshow(ax_proposed_bb, output[n, t])
for obj in ax_proposed_bb.findobj(match=plt.Rectangle):
safe_remove(obj)
if t == 0:
ax_proposed_bb.set_title('proposed bb')
ax = axes[6]
self.imshow(ax, background[n, t])
if t == 0:
ax.set_title('background')
# Plot proposed bounding boxes
for o, (top, left, height, width) in zip(flat_obj[n, t], flat_box[n, t]):
color = o * self.on_color + (1-o) * self.off_color
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=color, facecolor='none')
ax_all_bb.add_patch(rect)
if o > self.cutoff:
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=color, facecolor='none')
ax_proposed_bb.add_patch(rect)
# Plot true bounding boxes
for k in range(n_annotations[n]):
valid, _, _, top, bottom, left, right = annotations[n, t, k]
if not valid:
continue
height = bottom - top
width = right - left
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=self.gt_color, facecolor='none')
ax_inp.add_patch(rect)
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=self.gt_color, facecolor='none')
ax_all_bb.add_patch(rect)
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=self.gt_color, facecolor='none')
ax_proposed_bb.add_patch(rect)
if learned_bg:
ax = axes[7]
self.imshow(ax, bg_raw[n])
for obj in ax.findobj(match=plt.Rectangle):
safe_remove(obj)
if t == 0:
ax.set_title('raw_bg, y={:.2f}, x={:.2f}, h={:.2f}, w={:.2f}'.format(
bg_y[n, t, 0], bg_x[n, t, 0], bg_h[n, t, 0], bg_w[n, t, 0]))
height = bg_h[n, t, 0] * image_height
top = (bg_y[n, t, 0] + 1) / 2 * image_height - height / 2
width = bg_w[n, t, 0] * image_width
left = (bg_x[n, t, 0] + 1) / 2 * image_width - width / 2
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor="xkcd:green", facecolor='none')
ax.add_patch(rect)
def _plot_patches(self, updater, fetched):
# Create a plot showing what each object is generating
import matplotlib.pyplot as plt
plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'
from matplotlib import animation
import matplotlib.gridspec as gridspec
from itertools import product
N, T, image_height, image_width, _ = fetched['inp'].shape
H, W, B = updater.network.H, updater.network.W, updater.network.B
glimpse = fetched['glimpse']
appearance = fetched['appearance']
obj = fetched['obj']
z = fetched['z']
fig_unit_size = 3
fig_height = 2 * B * H * fig_unit_size
fig_width = 3 * W * fig_unit_size
for idx in range(N):
fig = plt.figure(figsize=(fig_width, fig_height))
time_text = fig.suptitle('', fontsize=20, fontweight='bold')
gs = gridspec.GridSpec(2*B*H, 3*W, figure=fig)
axes = np.array([[fig.add_subplot(gs[i, j]) for j in range(3*W)] for i in range(B*H)])
for ax in axes.flatten():
ax.set_axis_off()
other_axes = []
for i in range(2):
for j in range(4):
start_y = B*H + 2*i
end_y = start_y + 2
start_x = 2*j
end_x = start_x + 2
ax = fig.add_subplot(gs[start_y:end_y, start_x:end_x])
other_axes.append(ax)
other_axes = np.array(other_axes)
for ax in other_axes.flatten():
ax.set_axis_off()
print("Plotting patches for {}...".format(idx))
def func(t, axes=axes, other_axes=other_axes):
print("timestep {}".format(t))
time_text.set_text('t = {}'.format(t))
for h, w, b in product(range(H), range(W), range(B)):
_obj = obj[idx, t, h, w, b, 0]
_z = z[idx, t, h, w, b, 0]
ax = axes[h * B + b, 3 * w]
color = _obj * self.on_color + (1-_obj) * self.off_color
obj_rect = patches.Rectangle(
(-0.2, 0), 0.2, 1, clip_on=False, transform=ax.transAxes, facecolor=color)
ax.add_patch(obj_rect)
if t == 0 and h == 0 and b == 0:
ax.set_title("w={}".format(w))
if t == 0 and w == 0 and b == 0:
ax.set_ylabel("h={}".format(h))
self.imshow(ax, glimpse[idx, t, h, w, b, :, :, :])
ax = axes[h * B + b, 3 * w + 1]
self.imshow(ax, appearance[idx, t, h, w, b, :, :, :3])
ax.set_title("obj={:.2f}, z={:.2f}".format(_obj, _z, b))
ax = axes[h * B + b, 3 * w + 2]
self.imshow(ax, appearance[idx, t, h, w, b, :, :, 3], cmap="gray")
self._plot_helper(idx, t, other_axes, **fetched)
plt.subplots_adjust(left=0.02, right=.98, top=.9, bottom=0.02, wspace=0.1, hspace=0.1)
anim = animation.FuncAnimation(fig, func, frames=T, interval=500)
path = self.path_for('patches/{}'.format(idx), updater, ext="mp4")
anim.save(path, writer='ffmpeg', codec='hevc')
plt.close(fig)
| [
"eric.crawford@mail.mcgill.ca"
] | eric.crawford@mail.mcgill.ca |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.