blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32cbc365f1bcb79e52d198486af3c6907efeebcc | 5a7c1e936c7233365ccc123fea40380c2e311004 | /utils/datasets/dataset_megadepth.py | 2cebcd0ee2d680c4b6599e7aa01cf3eacb447b71 | [
"MIT"
] | permissive | bgr1993/patch2pix | 588d664662bddab20e57df5e85b536cc6f21a956 | ad26ef065568eabf9a0bb6dc09f53462e9aeef36 | refs/heads/main | 2023-08-14T15:10:14.149924 | 2021-09-21T18:54:45 | 2021-09-21T18:54:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,668 | py | import os
from PIL import Image
import numpy as np
import torch.utils.data as data
from utils.datasets.preprocess import get_tuple_transform_ops
from utils.eval.measure import sampson_distance
from utils.eval.geometry import pose2fund
class ImMatchDatasetMega(data.Dataset):
'''Data wrapper for train image-matching with triplets'''
def __init__(self, data_root, match_file, scene_list=None, wt=480, ht=320, item_type='triplet'):
print('\nInitialize ImMatchDatasetMega...')
self.dataset = 'MegaDepth_undistort'
self.data_root = os.path.join(data_root, self.dataset)
self.match_file = match_file
self.transform_ops = get_tuple_transform_ops(resize=(ht, wt), normalize=True)
self.wt, self.ht = wt, ht
self.item_type = item_type
# Initialize data
self.ims = {} # {scene: {im: imsize}}
self.pos_pair_pool = [] # [pair]
self.load_pairs(scene_list)
self.Fs = {}
self.Ks = {}
def load_im(self, im_ref, crop=None):
im = Image.open(im_ref)
if crop:
dw, dh = crop
im = np.array(im)
# Crop from right and buttom to keep the target aspect ratio
h, w, _ = im.shape
im = im[0: h - int(dh), 0: w - int(dw)]
#print(h, w, im.shape)
im = Image.fromarray(im)
return im
def load_pairs(self, scene_list=None):
match_dict = np.load(self.match_file, allow_pickle=True).item()
self.scenes = scene_list if scene_list else match_dict.keys()
print('Loading data from {}'.format(self.match_file))
num_ims = 0
for sc in self.scenes:
self.pos_pair_pool += match_dict[sc]['pairs']
self.ims[sc] = match_dict[sc]['ims']
num_ims += len(match_dict[sc]['ims'])
print('Loaded scenes {} ims: {} pos pairs:{}'.format(len(self.scenes), num_ims, len(self.pos_pair_pool)))
def get_fundmat(self, pair, im1, im2):
def scale_intrinsic(K, wi, hi):
sx, sy = self.wt / wi, self.ht / hi
sK = np.array([[sx, 0, 0],
[0, sy, 0],
[0, 0, 1]])
return sK.dot(K)
pair_key = (pair.im1, pair.im2)
if pair_key not in self.Fs:
# Recompute camera intrinsic matrix due to the resize
K1 = scale_intrinsic(pair.K1, im1.width, im1.height)
K2 = scale_intrinsic(pair.K2, im2.width, im2.height)
# Calculate F
F = pose2fund(K1, K2, pair.R, pair.t)
self.Fs[pair_key] = (F, K1, K2)
# Sanity check
# scale = np.array([[im1.width/self.wt, im1.height/self.ht, im2.width/self.wt, im2.height/self.ht]])
# matches = pair.sanity_matches * scale
# dists = sampson_distance(matches[:, :2], matches[:,2:], F)
# print(np.mean(dists))
return self.Fs[pair_key]
def __getitem__(self, index):
"""
Batch dict:
- 'src_im': anchor image
- 'pos_im': positive image sample to the anchor
- 'neg_im': negative image sample to the anchor
- 'im_pair_refs': path of images (src, pos, neg)
- 'pair_data': namedtuple contains relative pose information between src and pos ims.
"""
data_dict = {}
# Load positive pair data
pair = self.pos_pair_pool[index]
im_src_ref = os.path.join(self.data_root, pair.im1)
im_pos_ref = os.path.join(self.data_root, pair.im2)
im_src = self.load_im(im_src_ref, crop=pair.crop1)
im_pos = self.load_im(im_pos_ref, crop=pair.crop2)
# Select a negative image from other scences
if self.item_type == 'triplet':
other_scenes = list(self.scenes)
other_scenes.remove(pair.im1.split('/')[0])
neg_scene = np.random.choice(other_scenes)
im_neg_data = np.random.choice(self.ims[neg_scene])
im_neg_ref = os.path.join(self.data_root, im_neg_data.name)
im_neg = self.load_im(im_neg_ref, crop=im_neg_data.crop)
im_neg = self.transform_ops([im_neg])[0]
#print(im_neg.shape)
else:
im_neg = None
im_neg_ref = None
# Compute fundamental matrix before RESIZE
F, K1, K2 = self.get_fundmat(pair, im_src, im_pos)
# Process images
im_src, im_pos = self.transform_ops((im_src, im_pos))
#print(im_src.shape, im_pos.shape)
# Wrap data item
data_dict = {'src_im': im_src,
'pos_im': im_pos,
'neg_im': im_neg,
'im_pair_refs': (im_src_ref, im_pos_ref, im_neg_ref),
'F': F,
'K1': K1,
'K2': K2
}
return data_dict
def __len__(self):
return len(self.pos_pair_pool)
def __repr__(self):
fmt_str = 'ImMatchDatasetMega scenes:{} data type:{}\n'.format(len(self.scenes), self.item_type)
fmt_str += 'Number of data pairs: {}\n'.format(self.__len__())
fmt_str += 'Image root location: {}\n'.format(self.data_root)
fmt_str += 'Match file: {}\n'.format(self.match_file)
fmt_str += 'Transforms: {}\n'.format(self.transform_ops.__repr__().replace('\n', '\n '))
return fmt_str
| [
"jennyzqj00@gmail.com"
] | jennyzqj00@gmail.com |
39bc48f8e547d77650dab9624fbf2778bf929f02 | f440dcbd85d8f33540a1efea5a922e759bb1b8ba | /tests/test_mappings.py | 5f5b4d6ecb5b86b2fa4f54fc8f45e8f99bf6b128 | [
"MIT"
] | permissive | isabella232/mangrove | 652e74826c4598830e607bc5d25688f26ecff334 | 234eff15aaeae0064594fd99afe614f436c5d305 | refs/heads/master | 2023-03-16T14:35:49.821537 | 2014-03-20T14:18:21 | 2014-03-20T14:18:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,818 | py | import pytest
import boto
from concurrent.futures import ThreadPoolExecutor
from moto import mock_s3
from mangrove.mappings import ConnectionsMapping
class TestConnectionsMapping:
def test_getitem_evaluates_future_connexion(self):
executor = ThreadPoolExecutor(max_workers=1)
f = executor.submit(lambda: 1 + 1)
collection = ConnectionsMapping()
collection['eu-west-1'] = f
# If succesfull, an int should be returned rather
# than a Future.
value = collection['eu-west-1'] # Calls collection __getitem__
assert isinstance(value, int) is True
assert value == 2
def test_getitem_protects_its_classic_behavior_with_common_types(self):
collection = ConnectionsMapping()
collection['eu-west-1'] = 2
# Make sure __getitem__ has the classic behavior
value = collection['eu-west-1'] # Calls collection __getitem__
assert isinstance(value, int) is True
assert value == 2
def test_set_default_with_not_existing_connection_raises(self):
collection = ConnectionsMapping()
with pytest.raises(ValueError):
collection.default = 'abc 123'
def test_set_default_with_invalid_type_raises(self):
collection = ConnectionsMapping()
with pytest.raises(TypeError):
collection.default = 123
@mock_s3
def test_set_default_with_valid_connection(self):
collection = ConnectionsMapping()
collection['eu-west-1'] = boto.connect_s3()
collection.default = 'eu-west-1'
assert collection._default_name is not None
assert collection._default_name == 'eu-west-1'
assert collection.default is not None
assert isinstance(collection.default, boto.connection.AWSAuthConnection) is True
| [
"tcrevon@gmail.com"
] | tcrevon@gmail.com |
776c061485addbfd028ca7f580793265843308cf | 39d2fa06fa28617116295f71b2081a392859ccab | /captcha/tests/views.py | 230d4785892e765ddb95b9959f11197a3b075676 | [] | no_license | rezaduty/Django-Bookstore | f535d636532e6a514fc55434bb86214f837525a9 | 94b8ca1fc263fd6b952b5efa9696b2c31843248f | refs/heads/master | 2021-01-19T00:15:01.086788 | 2018-04-07T14:17:16 | 2018-04-07T14:17:16 | 63,248,185 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,642 | py | from django import forms
from captcha.fields import CaptchaField
from django.http import HttpResponse
from django.contrib.auth.models import User
from six import u
import django
try:
from django.template import engines, RequestContext
__is_18 = True
except ImportError:
from django.template import RequestContext, loader
__is_18 = False
TEST_TEMPLATE = r'''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html>
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8">
<title>captcha test</title>
</head>
<body>
{% if passed %}
<p style="color:green">Form validated</p>
{% endif %}
{% if form.errors %}
{{form.errors}}
{% endif %}
<form action="{% url 'captcha-test' %}" method="post">
{{form.as_p}}
<p><input type="submit" value="Continue →"></p>
</form>
</body>
</html>
'''
def _get_template(template_string):
if __is_18:
return engines['django'].from_string(template_string)
else:
return loader.get_template_from_string(template_string)
def _test(request, form_class):
passed = False
if request.POST:
form = form_class(request.POST)
if form.is_valid():
passed = True
else:
form = form_class()
t = _get_template(TEST_TEMPLATE)
if django.VERSION >= (1, 9):
return HttpResponse(
t.render(context=dict(passed=passed, form=form), request=request))
else:
return HttpResponse(
t.render(RequestContext(request, dict(passed=passed, form=form))))
def test(request):
class CaptchaTestForm(forms.Form):
subject = forms.CharField(max_length=100)
sender = forms.EmailField()
captcha = CaptchaField(help_text='asdasd')
return _test(request, CaptchaTestForm)
def test_model_form(request):
class CaptchaTestModelForm(forms.ModelForm):
subject = forms.CharField(max_length=100)
sender = forms.EmailField()
captcha = CaptchaField(help_text='asdasd')
class Meta:
model = User
fields = ('subject', 'sender', 'captcha', )
return _test(request, CaptchaTestModelForm)
def test_custom_error_message(request):
class CaptchaTestErrorMessageForm(forms.Form):
captcha = CaptchaField(
help_text='asdasd',
error_messages=dict(invalid='TEST CUSTOM ERROR MESSAGE')
)
return _test(request, CaptchaTestErrorMessageForm)
def test_per_form_format(request):
class CaptchaTestFormatForm(forms.Form):
captcha = CaptchaField(
help_text='asdasd',
error_messages=dict(invalid='TEST CUSTOM ERROR MESSAGE'),
output_format=(
u(
'%(image)s testPerFieldCustomFormatString '
'%(hidden_field)s %(text_field)s'
)
)
)
return _test(request, CaptchaTestFormatForm)
def test_non_required(request):
class CaptchaTestForm(forms.Form):
sender = forms.EmailField()
subject = forms.CharField(max_length=100)
captcha = CaptchaField(help_text='asdasd', required=False)
return _test(request, CaptchaTestForm)
def test_id_prefix(request):
class CaptchaTestForm(forms.Form):
sender = forms.EmailField()
subject = forms.CharField(max_length=100)
captcha1 = CaptchaField(id_prefix="form1")
captcha2 = CaptchaField(id_prefix="form2")
return _test(request, CaptchaTestForm)
| [
"mohamad.bax98@yahoo.com"
] | mohamad.bax98@yahoo.com |
74b0c9b69b1067d525471ffd7dbfebf2967a12de | 1f6c73435d6f5f3f388735e34e57d89296c515b8 | /users/models.py | 5a73567ce12fb0c331e013880fe496ed9a91ffe0 | [] | no_license | ugurhmz/Fantom-Blog-Volkan-Atis | aece4f1385a301cee93cfa3b62d88969c06a5f0a | 08e17d118dae06a53fe0f5bdad947f8bd37c0a65 | refs/heads/master | 2023-04-02T01:17:08.954711 | 2021-03-30T03:49:14 | 2021-03-30T03:49:14 | 350,883,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | from PIL import Image
from django.db import models
from django.conf import settings
from django.db.models.signals import post_save
from django.template.defaultfilters import slugify
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
birth_day = models.DateField(null=True,blank=True)
bio = models.TextField(max_length = 1000,blank=True)
image = models.ImageField(blank=True,null=True, default='users/django.png',upload_to='users',max_length=250)
slug = models.SlugField(unique=True,editable=False)
def save(self,*args,**kwargs):
self.slug = slugify(self.user.username)
super(UserProfile, self).save(*args,**kwargs)
img= Image.open(self.image.path)
if img.height > 200 or img.width > 200:
new_size=(200,200)
img.thumbnail(new_size)
img.save(self.image.path)
def __str__(self):
return self.user.username
#Otomatik kullanıcı profili oluşturma
def create_user_profile(sender,instance,created,**kwargs):
if created:
UserProfile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=settings.AUTH_USER_MODEL)
| [
"craxx67@gmail.com"
] | craxx67@gmail.com |
240219ff4877b199e6a92ef75a653f18e431bf3a | 4c8eaccc3b55565c2b0eddac75d713b6d60d98a8 | /onnx_tf/handlers/frontend/log.py | b53635f6c0386a2b6a74d6e7fc63f11d777a98b3 | [
"Apache-2.0"
] | permissive | parth1595/onnx-tensorflow | a3b96b7513754d9f734874d5cdb00c60312a60eb | 060e5cdb8fd436152327527811d29a0b957c3138 | refs/heads/master | 2020-04-03T19:34:57.268392 | 2018-10-29T20:16:27 | 2018-10-29T20:16:27 | 155,527,882 | 1 | 0 | Apache-2.0 | 2018-10-31T09:03:09 | 2018-10-31T09:03:09 | null | UTF-8 | Python | false | false | 463 | py | from onnx_tf.handlers.frontend_handler import FrontendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import tf_op
from .math_mixin import BasicMathMixin
@onnx_op("Log")
@tf_op("Log")
class Log(BasicMathMixin, FrontendHandler):
@classmethod
def version_1(cls, node, **kwargs):
return cls.basic_math_op(node, **kwargs)
@classmethod
def version_6(cls, node, **kwargs):
return cls.basic_math_op(node, **kwargs)
| [
"noreply@github.com"
] | noreply@github.com |
91357c211e5073d5b50569facfbbda0b406a9886 | 167c6226bc77c5daaedab007dfdad4377f588ef4 | /python/ql/test/library-tests/variables/scopes/test.py | 940576d44dfe9eff4e4399fd52b40809619cecb7 | [
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] | permissive | github/codeql | 1eebb449a34f774db9e881b52cb8f7a1b1a53612 | d109637e2d7ab3b819812eb960c05cb31d9d2168 | refs/heads/main | 2023-08-20T11:32:39.162059 | 2023-08-18T14:33:32 | 2023-08-18T14:33:32 | 143,040,428 | 5,987 | 1,363 | MIT | 2023-09-14T19:36:50 | 2018-07-31T16:35:51 | CodeQL | UTF-8 | Python | false | false | 987 | py |
global0 = 0
global1 = 1
def func0(param0, param1):
return param0 + param1
def func1():
global global0, global_local
local0 = 0
local1 = 1
global_local
global0 = local0 + local1 + global1
def func2():
local2 = 2
def inner1(param2):
local3 = local2
return local3
return inner1
def func3(param4, param5):
local4 = 4
def inner_outer():
def inner2(param3):
return local5 + local4 + param3 + param4
local5 = 3
return inner2(local4 + param4 + param5)
class C(base):
class_local = 7
def meth(self):
mlocal = self
return mlocal
def func4(param6):
class Local:
def meth_inner(self):
return param6
return Local()
def func5(seq):
return [x for x in seq]
def func6(y, z):
return [y+z for y in seq]
#FP observed in sembuild
def use_in_loop(seq):
[v for v in range(3)]
for v in seq:
v #x redefined -- fine in 2 and 3.
| [
"mark@hotpy.org"
] | mark@hotpy.org |
bc66ab3d60470cc43b19347d3be1f0136d69ad59 | 744a9a1f3fe7bb2b067c694ca7e598393954b96d | /game_server.py | e201df50d69823ded5e59cacdb5251575c85629a | [] | no_license | josephdiniso/PyPong | 7970f704b14bf90e4c02dc6591e0c6408e0bf779 | a751e305a714361b491ecd0624abc62c22f02dcf | refs/heads/master | 2022-10-25T03:55:57.815029 | 2020-06-14T20:41:46 | 2020-06-14T20:41:46 | 268,941,669 | 0 | 0 | null | 2020-06-06T14:05:21 | 2020-06-03T00:56:34 | Python | UTF-8 | Python | false | false | 2,346 | py | #!/usr/bin/env python3
import socket
import pickle
import threading
import sys
import os
class Socket():
def __init__(self):
self.clients = []
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.settimeout(20)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host_name = socket.gethostname()
self.server_IP_address = socket.gethostbyname(host_name)
print("Server Ipv4: " + self.server_IP_address)
self.port = 5555
self.s.bind((self.server_IP_address, self.port))
print("socket binded to %s" %(self.port))
self.s.listen(100)
self.pos_left = 250
self.pos_right = 250
self.ball_x = 250
self.ball_y = 250
print("socket is listening")
first = True
while len(self.clients)<2:
c, addr = self.s.accept()
c.setblocking(1)
if c not in self.clients:
self.clients.append(c)
if first:
c.send(pickle.dumps('L'))
first = False
else:
c.send(pickle.dumps('R'))
threading.Thread(target=self.recv_msg, args=(c,addr)).start()
threading.Thread(target=self.send_msg).start()
def recv_msg(self, c, addr):
while(1):
data = c.recv(4096)
if data:
try:
data = pickle.loads(data)
if self.clients.index(c) == 1:
self.pos_right = int(data[1])
else:
self.pos_left = int(data[0])
self.ball_x = int(data[2])
self.ball_y = int(data[3])
except:
pass
else:
pass
def send_msg(self):
while 1:
msg = [self.pos_left, self.pos_right, self.ball_x, self.ball_y]
msg = pickle.dumps(msg)
for client in self.clients:
client.send(msg)
# def remove(self, connection):
# if connection in self.clients: remove(self.clients)
def main():
server = Socket()
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | noreply@github.com |
eeff7aa25e3673469b0ac401e1cdec5018a9db45 | 58f2aa32360a7d09f095794e606b44cbdde79cfa | /pelican/bin/pelican-quickstart | 4500bea86bb9f942a526c210573d87f192eaeb5e | [] | no_license | langhai/pelican | 14ed4ceb8d73afe8351b4aa46baebdabc3cde80c | 7ac9024099e8b0a9894264c6c204abe2567bf343 | refs/heads/master | 2020-12-24T14:18:49.106864 | 2013-09-16T20:11:42 | 2013-09-16T20:11:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | #!/home/bestwc/Developer/pelican/pelican/bin/python2
# EASY-INSTALL-ENTRY-SCRIPT: 'pelican==3.1.1','console_scripts','pelican-quickstart'
__requires__ = 'pelican==3.1.1'
import sys
from pkg_resources import load_entry_point
sys.exit(
load_entry_point('pelican==3.1.1', 'console_scripts', 'pelican-quickstart')()
)
| [
"hailang@apu.edu.my"
] | hailang@apu.edu.my | |
a786bf475cbf48015a192eb7d585e6b6014972f7 | f287086f47e004c8e7e018c686f8937133fe43b5 | /scripts/drone_motion.py | 9e5db80a6b91c52a1b1699ef6fa2fe178844332c | [] | no_license | afaroo01/drone_gazebo | 838f5b7679787811e67deb82cb2df26f4954fb76 | ad138e698384b17277cff402d69b7a34001696e6 | refs/heads/master | 2023-06-29T20:48:51.942674 | 2021-07-24T14:12:31 | 2021-07-24T14:12:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,332 | py | #! /usr/bin/env python
import rospy
from gazebo_msgs.msg import ModelState, ModelStates
from geometry_msgs.msg import Pose, Twist
import message_filters
import math
from tf.transformations import euler_from_quaternion, quaternion_from_euler
import sys
import ast
linear_velocity = 0.3
angular_velocity = 0.8
waypoints_idx = 0;
target_state = ModelState()
target_twist = Twist()
target_pose = Pose()
once_rotation = False
once_motion = False
first_time = True
first_time_meet = True
def callback(all_states):
global waypoints_idx, once_rotation, once_motion, first_time, correct_angle_once, first_time_meet
idx = 0
drone_idx = 0
for name in all_states.name:
if (name == target_model_name):
break
idx += 1
current_pose = all_states.pose[idx]
cx, cy, cz, crx, cry, crz, crw = current_pose.position.x, current_pose.position.y, current_pose.position.z,\
current_pose.orientation.x, current_pose.orientation.y, current_pose.orientation.z, current_pose.orientation.w
(croll, cpitch, cyaw) = euler_from_quaternion([crx, cry, crz, crw])
# if (target_model_name[0]=="p"):
# if (cyaw > -math.pi/2):
# cyaw -= math.pi/2
# else:
# cyaw = (2*math.pi + (cyaw-math.pi/2))
if (waypoints_idx == 0 and first_time):
if (target_model_name[0]=="p"):
current_pose.position.x, current_pose.position.y, current_pose.position.z = waypoints[0][0], waypoints[0][1], 2
else:
current_pose.position.x, current_pose.position.y = waypoints[0][0], waypoints[0][1]
current_pose.orientation.x, current_pose.orientation.y, current_pose.orientation.z, current_pose.orientation.w = 0, 0, 0, 1
target_pose = current_pose
target_twist.linear.x = 0
target_twist.linear.y = 0
target_twist.linear.z = 0
target_twist.angular.z = 0
target_state.pose = target_pose
target_state.twist = target_twist
target_state.model_name = target_model_name
first_time = False
# First move the orientation:
goal_waypoint = waypoints[waypoints_idx]
gx, gy = goal_waypoint[0], goal_waypoint[1]
dx, dy = gx-cx, gy-cy
gyaw = math.atan2(dy, dx)
# gyaw += math.pi/2
# cyaw -= math.pi/2
dyaw = gyaw - cyaw
angular_velocity_direction = (dyaw/(abs(dyaw)+1e-9))
# if (gyaw > -math.pi/2):
# gyaw -= math.pi/2
# else:
# gyaw = (2*math.pi + (gyaw-math.pi/2))
if (abs(dyaw) > math.pi):
dyaw = (2*math.pi - abs(dyaw)) * -angular_velocity_direction
angular_velocity_direction = -angular_velocity_direction
complete_rotation = abs(dyaw) < 0.05
delta = math.sqrt((dx**2 + dy**2))
# complete_motion = delta < 0.1
complete_motion = delta < 0.05
# rospy.loginfo("current pose/dyaw: %s, %s, %s, %s, %s", cx, cy, cz, cyaw*180/math.pi, dyaw*180/math.pi)
# rospy.loginfo("goal: %s, %s, %s, %s", gx, gy, gyaw, cyaw)
if (not complete_rotation):
# rotate
if (once_rotation == False):
target_twist.linear.x = 0
target_twist.linear.y = 0
target_twist.angular.z = angular_velocity_direction * angular_velocity
q = quaternion_from_euler(0, 0, cyaw)
target_pose = current_pose
target_pose.orientation.x, target_pose.orientation.y, target_pose.orientation.z, target_pose.orientation.w = q[0], q[1], q[2], q[3]
target_state.pose = target_pose
target_state.twist = target_twist
target_state.model_name = target_model_name
once_rotation = True
while not rospy.is_shutdown():
connections = pub.get_num_connections()
if (connections > 0):
pub.publish(target_state)
break
rospy.Rate(10).sleep()
return
# rospy.loginfo("complete rotation")
if (not complete_motion):
if (once_motion == False):
target_twist.linear.x = dx/delta * linear_velocity
target_twist.linear.y = dy/delta *linear_velocity
target_twist.angular.z = 0
target_pose = current_pose
target_state.pose = target_pose
target_state.twist = target_twist
target_state.model_name = target_model_name
once_motion = True
while not rospy.is_shutdown():
connections = pub.get_num_connections()
if (connections > 0):
pub.publish(target_state)
break
rospy.Rate(10).sleep()
return
# rospy.loginfo(waypoints_idx)
# rospy.loginfo(cyaw)
# rospy.loginfo(gyaw)
first_time_meet = True
target_pose = Pose()
target_pose.position.x, target_pose.position.y, target_pose.position.z = gx, gy, 2
q = quaternion_from_euler(0, 0, gyaw)
target_pose.orientation.x, target_pose.orientation.y, target_pose.orientation.z, target_pose.orientation.w = q[0], q[1], q[2], q[3]
target_state.pose = target_pose
target_twist.linear.x = 0
target_twist.linear.y = 0
target_twist.angular.z = 0
target_state.twist = target_twist
target_state.model_name = target_model_name
pub.publish(target_state)
waypoints_idx += 1
if (waypoints_idx == len(waypoints)):
waypoints_idx = 0
once_motion = False
once_rotation = False
rospy.init_node("field_motion", anonymous=True)
args = rospy.myargv(argv=sys.argv)
if len(args) != 3:
print("Please enter correct model name and waypoints")
target_model_name = args[1]
waypoints = ast.literal_eval(args[2])
# waypoints = [n.strip() for n in waypoints]
print(waypoints)
# target_model_name = "person_walking"
# waypoints = [(5, 5), (5, -6), (-2, -8), (-6, 6)]
rospy.Subscriber("/gazebo/model_states", ModelStates, callback)
pub = rospy.Publisher("/gazebo/set_model_state", ModelState, queue_size=10)
rospy.spin() | [
"zhefanx@andrew.cmu.edu"
] | zhefanx@andrew.cmu.edu |
cb16a65fddaf90da83c8635395f50dcbd00f2645 | d9b82d91c5feb77238c6852742af6455a5f77d90 | /rip/hashtable.py | 36dd2ab22e3249928ac3d234b66515a228baa03f | [
"MIT"
] | permissive | antonioan/repeat-free-py | bf114e39c531cb5554f7bcf346695991958c87ff | 4641f9bdaef2adb56e3aa2ec8f45aa3a487b1617 | refs/heads/master | 2022-04-12T23:47:30.002807 | 2020-04-09T02:02:06 | 2020-04-09T02:02:06 | 241,734,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,635 | py | # Source: https://github.com/Darthfett/Hashtable/blob/master/Hashtable.py
import abc
import math
from typing import List, Callable
from rip.linked_list import *
class Hash:
@staticmethod
def division_hash(key, size) -> int:
return key % size
@staticmethod
def auxiliary_hash_quad(key, size) -> int:
a = 0.618
return int(math.floor(size * ((key * a) % 1)))
@staticmethod
def linear_hash(key, i, size) -> int:
return (Hash.auxiliary_hash_quad(key, size) + i) % size
@staticmethod
def auxiliary_hash_double(key, size) -> int:
return 1 + (key % (size - 1))
@staticmethod
def quadratic_hash(key, i, size) -> int:
c_1 = 0.5
c_2 = 0.5
return int((Hash.auxiliary_hash_quad(key, size) + c_1 * i + c_2 * i * i) % size)
@staticmethod
def double_hash(key, i, size) -> int:
return int((Hash.division_hash(key, size) + i * Hash.auxiliary_hash_double(key, size)) % size)
class ChainedHashtable:
""" Chained Hashtable
A linked list of Keys and Values are stored in the links array, which holds a linked list of all mapped values
"""
def __init__(self, size=32, pre_hash: Optional[Callable] = None):
self.size: int = size
self.links: List[Optional[LinkedList]] = [None] * self.size
self.pre_hash = pre_hash if pre_hash else lambda x: x.__hash__()
def get(self, key):
llist = self.links[self.hash(key)]
if llist is None:
return None
cur_node = llist.head
while cur_node is not None:
if cur_node.key == key:
return cur_node.value
else:
cur_node = cur_node.next
return None
def __getitem__(self, item):
return self.get(item)
def search(self, key):
llist = self.links[self.hash(key)]
if llist is None:
return str(self.hash(key))
search_result = ""
cur_node = llist.head
search_result += str(self.hash(key)) + " "
while cur_node is not None:
search_result += str(cur_node.value) + " "
if cur_node.key == key:
return search_result
else:
cur_node = cur_node.next
return search_result
def put(self, key, value):
llist = self.links[self.hash(key)]
if llist is None:
node = Link(key=key, value=value)
llist = LinkedList(head=node)
self.links[self.hash(key)] = llist
return
cur_node = llist.head
while cur_node is not None:
if cur_node.key == key:
cur_node.value = value
return cur_node
else:
cur_node = cur_node.next
link = Link(key=key, value=value)
llist.push(link)
return link
def __setitem__(self, key, value):
self.put(key, value)
def insert(self, value):
self.put(value, value)
def hash(self, key):
return Hash.division_hash(self.pre_hash(key) if self.pre_hash else key, self.size)
def __str__(self):
lines = []
for i in range(len(self.links)):
if self.links[i] is None:
lines.append("" + str(i) + "\t")
else:
lines.append("" + str(i) + "\t" + str(self.links[i]))
return "\n".join(lines)
# region Associative Hashtables
class Entry:
""" Entry
Used in every hashtable but the ChainedHashtable, an Entry is a (key, value) pair
"""
def __init__(self, key=0, value=0):
self.key = key
self.value = value
def __str__(self):
return str(self.value)
class AssociativeHashtable:
""" Associative Hashtable
Keys and Values are stored in an associative array, probed for values by some associative hash function
"""
def __init__(self, size=32):
self.size: int = size
self.entries: List[Optional[Entry]] = [None] * self.size
def get(self, key):
i = 0
entry = self.entries[self.hash(key, i)]
while entry is None or entry.key != key:
i += 1
if i == self.size:
return None
entry = self.entries[self.hash(key, i)]
return entry.value
def __getitem__(self, item):
return self.get(item)
def search(self, key):
i = 0
entry = self.entries[self.hash(key, i)]
search_result = str(self.hash(key, i)) + " "
while entry is None or entry.key != key:
i += 1
if i == self.size:
return search_result + "-1"
entry = self.entries[self.hash(key, i)]
search_result += str(self.hash(key, i)) + " "
return search_result
def put(self, key, value):
i = 0
entry = self.entries[self.hash(key, i)]
while entry is not None and entry.key != key:
i += 1
if i == self.size:
raise Exception("Table is full!")
entry = self.entries[self.hash(key, i)]
if entry is None:
entry = Entry(key=key, value=value)
self.entries[self.hash(key, i)] = entry
else:
entry.value = value
def __setitem__(self, key, value):
self.put(key, value)
def insert(self, value):
self.put(value, value)
@abc.abstractmethod
def hash(self, key, i):
pass
def __str__(self):
lines = []
for i in range(len(self.entries)):
if self.entries[i] is None:
lines.append("" + str(i) + "\t" + "-1")
else:
lines.append("" + str(i) + "\t" + str(self.entries[i].value))
return "\n".join(lines)
class LinearHashtable(AssociativeHashtable):
""" Linear Hashtable
Keys and Values are stored in an associative array, probed for values by searching linearly through the table
"""
def hash(self, key, i):
return Hash.linear_hash(key, i, self.size)
class QuadraticHashtable(AssociativeHashtable):
""" Quadratic Hashtable
Keys and Values are stored in an associative array, probed for values by searching quadratically through table
"""
def hash(self, key, i):
return Hash.quadratic_hash(key, i, self.size)
class DoubleHashtable(AssociativeHashtable):
""" Double Hashtable
Keys and Values are stored in an associative array, probed for values by searching with double hashing
"""
def hash(self, key, i):
return Hash.double_hash(key, i, self.size)
# endregion
| [
"antonioabunassar@gmail.com"
] | antonioabunassar@gmail.com |
646f560791c6440e8eaeb5fba21c78512ccb2020 | 0ff92f9d580d373908eecbb7fb773993e12e6657 | /B_fin.py | 94342fc6a52af1d7489cdc9586defe69e8296363 | [] | no_license | OllieEm/13-sprint_fin | 549d4ad69970c3388b9a4895abc7850f607e2c1d | 4892a2678a58b778064d872dc174be38ab0f2c87 | refs/heads/main | 2023-05-07T03:29:19.166337 | 2021-06-01T18:10:12 | 2021-06-01T18:10:12 | 372,921,796 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | # ID 51774953
from math import floor
ACTION = {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'/': lambda x, y: floor(x / y),
}
class Stack:
def __init__(self):
self.items = []
def push(self, item):
self.items.append(item)
def pop(self):
try:
return self.items.pop()
except IndexError:
return 'Error: no items to delete'
def size(self):
return len(self.items)
def calculator(input_string):
stack = Stack()
for operand in input_string:
if operand in ACTION.keys():
operand1 = int(stack.pop())
operand2 = int(stack.pop())
operator = ACTION[operand]
result = operator(operand2, operand1)
stack.push(result)
else:
stack.push(operand)
if stack.size == 1:
return stack[0]
else:
return stack.pop()
if __name__ == '__main__':
input_data = input().split()
res = calculator(input_data)
print(res)
| [
"noreply@github.com"
] | noreply@github.com |
1fdbaaded79fc2233276a57d7482c3fa6516b403 | 8b95e494ed9b0871953e9a83127610a31ebab31c | /CVE-2021-41794/PoC.py | 1ca2aaaf56e106d82d9920d10018d69ceacb27d2 | [] | no_license | MYTFX/CVE-mitre | fe8389f7a0bbe93aedfb78356d0982910d2045ec | 16c22fc5e33fa07fdbb6aa7731227a944b25c3ba | refs/heads/main | 2023-08-18T09:32:32.333487 | 2021-10-19T14:55:27 | 2021-10-19T14:55:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,588 | py | #!/usr/bin/env python3
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(1.0)
pfcp_association_setup_req = b'\x20\x05\x00\x1a\x00\x00\x01\x00\x00\x3c\x00\x05\x00\xc0\xa8\x3f\x88\x00\x60\x00\x04\x5f\xf4\x38\x25\x00\x59\x00\x01\x00'
pfcp_session_establishment_req = b'\x21\x32\x00\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x3c\x00\x05\x00\xc0\xa8\x3f\x88\x00\x39\x00\x0d\x5a\x00\x00\x00\x00\x00\x00\x00\x01\xc0\xa8\x3f\x88\x00\x01\x00\x46\x00\x38\x00\x02\x00\x01\x00\x1d\x00\x04\x1f\x00\x00\x00\x00\x02\x00\x27\x00\x14\x00\x01\x00\x00\x15\x00\x09\x01\x00\x00\x00\x0a\xc0\xa8\x3f\x88\x00\x16\x00\x08\x69\x6e\x74\x65\x72\x6e\x65\x74\x00\x5d\x00\x05\x02\x2d\x2d\x00\x02\x00\x5f\x00\x01\x00\x00\x6c\x00\x04\x00\x00\x00\x01\x00\x01\x00\x34\x00\x38\x00\x02\x00\x02\x00\x1d\x00\x04\x1f\x00\x00\x00\x00\x02\x00\x1a\x00\x14\x00\x01\x01\x00\x16\x00\x08\x69\x6e\x74\x65\x72\x6e\x65\x74\x00\x5d\x00\x05\x06\x2d\x2d\x00\x02\x00\x6c\x00\x04\x00\x00\x00\x02\x00\x03\x00\x16\x00\x6c\x00\x04\x00\x00\x00\x01\x00\x2c\x00\x01\x02\x00\x04\x00\x05\x00\x2a\x00\x01\x01\x00\x03\x00\x24\x00\x6c\x00\x04\x00\x00\x00\x02\x00\x2c\x00\x01\x02\x00\x04\x00\x13\x00\x2a\x00\x01\x00\x00\x54\x00\x0a\x01\x00\x00\x00\x00\x0a\xc0\xa8\x3f\x88\x00\x71\x00\x01\x01'
sock.sendto(pfcp_association_setup_req, ('127.0.0.7', 8805))
try:
sock.recv(65535)
except Exception as ex:
print(f"Receive failed: {ex}")
sock.sendto(pfcp_session_establishment_req, ('127.0.0.7', 8805))
try:
sock.recv(65535)
except Exception as ex:
print(f"Receive failed: {ex}")
sock.close()
| [
"noreply@github.com"
] | noreply@github.com |
e5afe11339814efc010060f141562f2c0f6a8e6c | 7d667b70c8ae1c8f214b85d613d3a98462af9d0c | /froide/account/forms.py | db1cd0962ee519c64b00d35ca627351437a19328 | [
"MIT"
] | permissive | handlingar/froide | c57653a87a05fb402c1fe61f0df1ff480391f911 | 5ed80cf6550fb4cbc757029b2c860b53e784eb93 | refs/heads/master | 2021-05-28T18:13:17.573095 | 2015-06-18T13:00:16 | 2015-06-18T13:00:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,675 | py | import floppyforms as forms
from django.utils.six import text_type as str
from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from django.contrib import auth
from django.contrib.auth import get_user_model
from django.conf import settings
from froide.helper.widgets import AgreeCheckboxInput
from .widgets import ConfirmationWidget
from .models import AccountManager
USER_CAN_HIDE_WEB = settings.FROIDE_CONFIG.get("user_can_hide_web", True)
HAVE_ORGANIZATION = settings.FROIDE_CONFIG.get("user_has_organization", True)
ALLOW_PSEUDONYM = settings.FROIDE_CONFIG.get("allow_pseudonym", False)
HAVE_NEWSLETTER = settings.FROIDE_CONFIG.get("have_newsletter", False)
class NewUserBaseForm(forms.Form):
first_name = forms.CharField(max_length=30,
label=_('First name'),
widget=forms.TextInput(attrs={'placeholder': _('First Name'),
'class': 'form-control'}))
last_name = forms.CharField(max_length=30,
label=_('Last name'),
widget=forms.TextInput(attrs={'placeholder': _('Last Name'),
'class': 'form-control'}))
address = forms.CharField(max_length=300,
required=False,
label=_('Mailing Address'),
help_text=_('Optional. Your address will not be displayed publicly and is only needed in case a public body needs to send you paper.'),
widget=forms.Textarea(attrs={
'rows': '3',
'class': 'form-control',
'placeholder': _('Street, Post Code, City'),
}))
user_email = forms.EmailField(label=_('Email address'),
max_length=75,
help_text=_('Not public. The given address will '
'need to be confirmed.'),
widget=forms.EmailInput(attrs={
'placeholder': _('mail@ddress.net'),
'class': 'form-control'
}))
if HAVE_ORGANIZATION:
organization = forms.CharField(required=False,
label=_("Organization"),
help_text=_('Optional. Affiliation will be shown next to your name'),
widget=forms.TextInput(attrs={
'placeholder': _('Organization'),
'class': 'form-control'})
)
if USER_CAN_HIDE_WEB:
private = forms.BooleanField(required=False,
label=_("Hide my name on the web"),
help_text=mark_safe(_("If you check this, your name will still appear in requests to public bodies, but we will do our best to not display it publicly. However, we cannot guarantee your anonymity")))
def __init__(self, *args, **kwargs):
super(NewUserBaseForm, self).__init__(*args, **kwargs)
if ALLOW_PSEUDONYM:
self.fields["last_name"].help_text = mark_safe(
_('<a target="_blank" href="{url}">You may use a pseudonym if you don\'t need to receive postal messages</a>.')
.format(url=reverse("help-privacy") + '#pseudonym'))
def clean_first_name(self):
return self.cleaned_data['first_name'].strip()
def clean_last_name(self):
return self.cleaned_data['last_name'].strip()
def clean_user_email(self):
email = self.cleaned_data['user_email']
user_model = get_user_model()
try:
user = user_model.objects.get(email=email)
except user_model.DoesNotExist:
pass
else:
if user.is_active:
raise forms.ValidationError(mark_safe(
_('This email address already has an account. <a href="%(url)s?simple&email=%(email)s" class="btn btn-warning target-small">Click here to login using that email address.</a>') % {
'url': reverse("account-login"),
'email': email
}))
else:
raise forms.ValidationError(
_('This email address is already registered, but not yet confirmed! Please click on the confirmation link in the mail we send you.'))
return email
class TermsForm(forms.Form):
terms = forms.BooleanField(
label=mark_safe(_("Terms and Conditions and Privacy Statement")),
error_messages={'required':
_('You need to accept our Terms and Conditions and Priavcy Statement.')},
widget=AgreeCheckboxInput(
agree_to=_(u'You agree to our <a href="%(url_terms)s" class="target-new">Terms and Conditions</a> and <a href="%(url_privacy)s" class="target-new">Privacy Statement</a>'),
url_names={"url_terms": "help-terms", "url_privacy": "help-privacy"}))
if HAVE_NEWSLETTER:
newsletter = forms.BooleanField(required=False,
label=_("Check if you want to receive our newsletter."))
def save(self, user):
user.terms = True
if HAVE_NEWSLETTER:
user.newsletter = self.cleaned_data['newsletter']
user.save()
class NewUserForm(NewUserBaseForm, TermsForm):
pass
class NewUserWithPasswordForm(NewUserForm):
password = forms.CharField(widget=forms.PasswordInput,
label=_('Password'))
password2 = forms.CharField(widget=forms.PasswordInput,
label=_('Password (repeat)'))
def clean(self):
cleaned = super(NewUserWithPasswordForm, self).clean()
if cleaned['password'] != cleaned['password2']:
raise forms.ValidationError(_("Passwords do not match!"))
return cleaned
class UserLoginForm(forms.Form):
email = forms.EmailField(widget=forms.EmailInput(
attrs={
'placeholder': _('mail@ddress.net'),
'class': 'form-control'
}),
label=_('Email address'))
password = forms.CharField(widget=forms.PasswordInput(
attrs={
'class': 'form-control'
}),
label=_('Password'))
class PasswordResetForm(auth.forms.PasswordResetForm):
email = forms.EmailField(widget=forms.EmailInput(
attrs={
'placeholder': _('mail@ddress.net'),
'class': 'form-control'
}),
label=_('Email address'))
class UserChangeAddressForm(forms.Form):
address = forms.CharField(max_length=300,
label=_('Mailing Address'),
help_text=_('Your address will never be displayed publicly.'),
widget=forms.Textarea(attrs={'placeholder': _('Street, Post Code, City'),
'class': 'inline smalltext'}))
def __init__(self, profile, *args, **kwargs):
super(UserChangeAddressForm, self).__init__(*args, **kwargs)
self.profile = profile
self.fields['address'].initial = self.profile.address
def save(self):
self.profile.address = self.cleaned_data['address']
self.profile.save()
class UserChangeEmailForm(forms.Form):
email = forms.EmailField(widget=forms.EmailInput(
attrs={'placeholder': _('mail@ddress.net')}),
label=_('New email address'))
def clean_email(self):
email = self.cleaned_data['email'].lower()
if get_user_model().objects.filter(email=email).exists():
raise forms.ValidationError(
_('A user with that email address already exists!')
)
return email
class UserEmailConfirmationForm(forms.Form):
email = forms.EmailField()
secret = forms.CharField(min_length=32, max_length=32)
user_id = forms.IntegerField()
def __init__(self, user, *args, **kwargs):
self.user = user
super(UserEmailConfirmationForm, self).__init__(*args, **kwargs)
def clean_user_id(self):
user_id = self.cleaned_data['user_id']
if user_id != self.user.pk:
raise forms.ValidationError(
_('Logged in user does not match this link!')
)
return user_id
def clean(self):
check = AccountManager(self.user).check_confirmation_secret(
self.cleaned_data['secret'],
self.cleaned_data['email'],
)
if not check:
raise forms.ValidationError(
_('Link is invalid or has expired!')
)
return self.cleaned_data
def save(self):
self.user.email = self.cleaned_data['email']
self.user.save()
class UserDeleteForm(forms.Form):
CONFIRMATION_PHRASE = str(_('Freedom of Information Act'))
password = forms.CharField(
widget=forms.PasswordInput,
label=_('Password'),
help_text=_('Please type your password to confirm.')
)
confirmation = forms.CharField(
widget=ConfirmationWidget(
{'placeholder': CONFIRMATION_PHRASE}
),
label=_('Confirmation Phrase'),
help_text=_('Type the phrase above exactly as displayed.'))
def __init__(self, user, *args, **kwargs):
self.user = user
super(UserDeleteForm, self).__init__(*args, **kwargs)
def clean_password(self):
password = self.cleaned_data['password']
user = auth.authenticate(
username=self.user.email,
password=password
)
if not user:
raise forms.ValidationError(
_('You provided the wrong password!')
)
return ''
def clean_confirmation(self):
confirmation = self.cleaned_data['confirmation']
if confirmation != self.CONFIRMATION_PHRASE:
raise forms.ValidationError(
_('You did not type the confirmation phrase exactly right!')
)
return ''
| [
"mail@stefanwehrmeyer.com"
] | mail@stefanwehrmeyer.com |
0b9f0412e790c2a383c2c3f9d7db1590eaf6de98 | f26680a3694fa1cc7662ce0a7774719b5624169a | /src/dynamic_graph/sot/pattern_generator/meta_pg.py | 4d5d3629764a73f981e68c762f43cb84b1ff51e8 | [
"ISC"
] | permissive | imaroger/sot-pattern-generator | 3251986abf9ba9e3596b3eb9b458784da7d879e2 | f4c07418ae26b7085b5fe2b0c8cbb3a61cbb611b | refs/heads/master | 2021-08-25T09:28:38.024262 | 2018-08-17T13:18:08 | 2018-08-17T13:19:12 | 198,965,902 | 0 | 1 | null | 2019-07-26T07:09:30 | 2019-07-26T07:09:30 | null | UTF-8 | Python | false | false | 8,778 | py | from dynamic_graph.sot.core import *
from dynamic_graph import plug
from dynamic_graph.sot.pattern_generator import PatternGenerator,Selector
from dynamic_graph.sot.dynamics import *
class MetaPG:
def __init__(self,dyn):
self.pg = PatternGenerator('pg')
modelDir = dyn.getProperty('vrmlDirectory')
modelName = dyn.getProperty('vrmlMainFile')
specificitiesPath = dyn.getProperty('xmlSpecificityFile')
jointRankPath = dyn.getProperty('xmlRankFile')
robotDim = len(dyn.position.value)
#print(modelDir,modelName,specificitiesPath,jointRankPath,robotDim)
self.pg.setVrmlDir(modelDir+'/')
self.pg.setVrml(modelName)
self.pg.setXmlSpec(specificitiesPath)
self.pg.setXmlRank(jointRankPath)
self.pg.buildModel()
# Standard initialization
self.pg.parseCmd(":samplingperiod 0.005")
self.pg.parseCmd(":previewcontroltime 1.6")
self.pg.parseCmd(":comheight 0.814")
self.pg.parseCmd(":omega 0.0")
self.pg.parseCmd(":stepheight 0.05")
self.pg.parseCmd(":singlesupporttime 0.780")
self.pg.parseCmd(":doublesupporttime 0.020")
self.pg.parseCmd(":armparameters 0.5")
self.pg.parseCmd(":LimitsFeasibility 0.0")
self.pg.parseCmd(":ZMPShiftParameters 0.015 0.015 0.015 0.015")
self.pg.parseCmd(":TimeDistributeParameters 2.0 3.5 1.0 3.0")
self.pg.parseCmd(":UpperBodyMotionParameters 0.0 -0.5 0.0")
self.pg.parseCmd(":comheight 0.814")
self.pg.parseCmd(":SetAlgoForZmpTrajectory Morisawa")
plug(dyn.position,self.pg.position)
plug(dyn.com,self.pg.com)
self.pg.motorcontrol.value = robotDim*(0,)
self.pg.zmppreviouscontroller.value = (0,0,0)
self.pg.initState()
# --- PG INIT FRAMES ---
self.geom = Dynamic("geom")
self.geom.setFiles(modelDir, modelName,specificitiesPath,jointRankPath)
self.geom.parse()
self.geom.createOpPoint('rf','right-ankle')
self.geom.createOpPoint('lf','left-ankle')
plug(dyn.position,self.geom.position)
self.geom.ffposition.value = 6*(0,)
self.geom.velocity.value = robotDim*(0,)
self.geom.acceleration.value = robotDim*(0,)
# --- Selector of Com Ref: when pg is stopped, pg.inprocess becomes 0
self.comRef = Selector('comRef'
,['vector','ref',dyn.com,self.pg.comref])
plug(self.pg.inprocess,self.comRef.selec)
self.selecSupportFoot = Selector('selecSupportFoot'
,['matrixHomo','pg_H_sf'
,self.pg.rightfootref
,self.pg.leftfootref]
,['matrixHomo','wa_H_sf'
,self.geom.rf,self.geom.lf])
plug(self.pg.SupportFoot,self.selecSupportFoot.selec)
self.sf_H_wa = Inverse_of_matrixHomo('sf_H_wa')
plug(self.selecSupportFoot.wa_H_sf,self.sf_H_wa.sin)
self.pg_H_wa = Multiply_of_matrixHomo('pg_H_wa')
plug(self.selecSupportFoot.pg_H_sf,self.pg_H_wa.sin1)
plug(self.sf_H_wa.sout,self.pg_H_wa.sin2)
# --- Compute the ZMP ref in the Waist reference frame.
self.wa_H_pg = Inverse_of_matrixHomo('wa_H_pg')
plug(self.pg_H_wa.sout,self.wa_H_pg.sin)
self.wa_zmp = Multiply_matrixHomo_vector('wa_zmp')
plug(self.wa_H_pg.sout,self.wa_zmp.sin1)
plug(self.pg.zmpref,self.wa_zmp.sin2)
# --- Build the converter object for the waist task
self.waistReferenceVector = Stack_of_vector('waistReferenceVector')
plug(self.pg.initwaistposref,self.waistReferenceVector.sin1)
#plug(self.pg.initwaistattref,self.waistReferenceVector.sin2)
plug(self.pg.comattitude,self.waistReferenceVector.sin2)
self.waistReferenceVector.selec1(0,3)
self.waistReferenceVector.selec2(0,3)
self.waistReference=PoseRollPitchYawToMatrixHomo('waistReference')
plug(self.waistReferenceVector.sout,self.waistReference.sin)
def plugZmp(self,robot):
# Connect the ZMPref to OpenHRP in the waist reference frame.
self.pg.parseCmd(':SetZMPFrame world')
plug(self.wa_zmp.sout,robot.zmp)
def plugWaistTask(self,taskWaist):
plug(self.waistReference.sout,taskWaist.featureDes.position)
taskWaist.feature.selec.value = '111100'
def plugComTask(self,taskCom):
plug(self.comRef.ref,taskCom.featureDes.errorIN)
plug(self.pg.dcomref,taskCom.featureDes.errordotIN)
taskCom.task = TaskPD('taskComPD')
taskCom.task.add(taskCom.feature.name)
# This next line is not very nice. The principle should be reported in Task.
plug(taskCom.feature.errordot,taskCom.task.errorDot)
plug(taskCom.task.error,taskCom.gain.error)
plug(taskCom.gain.gain,taskCom.task.controlGain)
taskCom.gain.setConstant(40)
taskCom.task.setBeta(-1)
def startHerdt(self,xyconstraint=True):
self.pg.parseCmd(':SetAlgoForZmpTrajectory Herdt')
self.pg.parseCmd(':doublesupporttime 0.1')
self.pg.parseCmd(':singlesupporttime 0.7')
# When velocity reference is at zero, the robot stops all motion after n steps
self.pg.parseCmd(':numberstepsbeforestop 2')
# Set constraints on XY
if xyconstraint:
self.pg.parseCmd(':setfeetconstraint XY 0.09 0.06')
# The next command must be runned after a OpenHRP.inc ... ???
# Start the robot with a speed of 0.1 m/0.8 s.
self.pg.parseCmd(':HerdtOnline 0.1 0.0 0.0')
from math import atan2,pi
from numpy import array,dot
# --- WALK TRACKER -------------------------------------------------------------
# --- WALK TRACKER -------------------------------------------------------------
# --- WALK TRACKER -------------------------------------------------------------
class WalkTracker:
'''
PD controller on the Herdt PG input: given a time-dependant function of the
target position, compute the velocity input to be apply on the PG to track
the target.
'''
def __init__(self,traj,dyn,pg,Kp=1.0,dt=0.005):
'''
traj is the functor (input T, output arrayX2 for the 2D position, or
pair of arrayX2 for position and derivative). dyn and pg are the
entities of the dynamic model (to get the current position) and of the
PG computer (to send the reference). Kp is the gain of the PD. dt is
the period of the control, to be used to compute the target derivative
if traj returns only the position.
'''
self.Kp = Kp
self.trajfunction = traj
self.pg = pg
self.rate = 200
self.dyn = dyn
self.dt = dt
def setDisplay(self,viewer,target='zmp',vector='axis1'):
'''
If needed, the PD can be displayed on a viewer, using the two object
names given in input.
'''
self.viewer = viewer
self.viewTarget = target
self.viewVelocity = vector
def getPositionAndDerivative(self,t):
'''
Call the traj function, and if needed compute the numerical derivative.
'''
res = self.trajfunction(t)
if isinstance(res[0],float):
if 'trajprec' in self.__dict__ and 'dt' in self.__dict__:
deriv = (res-self.trajprec) / ( (t-self.tprec)*self.dt)
else: deriv = array([0,0])
self.trajprec = res
self.tprec = t
res = (res,deriv)
return res
def update(self,t):
'''
Compute the PD law and send it to the PG. This function can be called
at every time iteration, but only works along the defined rate.
'''
if t%self.rate != 1: return
p = self.dyn.com.value[0:2]
waRwo = (array(self.dyn.waist.value)[0:2,0:2]).T
th = atan2(self.dyn.waist.value[1][0],self.dyn.waist.value[0][0])
(pref,v) = self.getPositionAndDerivative(t)
thref = atan2(v[1],v[0])
wo_vref = self.Kp*(pref-p)+v
wa_vref = dot(waRwo , wo_vref)
dth = ( (thref-th+pi)%(2*pi)-pi)
vref = (wa_vref[0], wa_vref[1], self.Kp*dth )
self.pg.velocitydes.value = vref
if 'viewer' in self.__dict__:
self.viewer.updateElementConfig(self.viewTarget
,(float(pref[0]),float(pref[1]),0,0,0,0))
self.viewer.updateElementConfig(self.viewVelocity
,(float((p+10*v)[0]),float((p+10*v)[1])
,0,0,0,thref))
| [
"olivier.stasse@gmail.com"
] | olivier.stasse@gmail.com |
8fb7ab55705948bdd8ed17f4432312c23d69569b | c42d878b9d27b3e08fb53b87af24ee6d49d67942 | /phase_3/src/Form/ViewGraphGen.py | 9d2a92c19310449d18b57d2d2121df98124be0a2 | [] | no_license | e-prakash/fall2020-cse111-project | 9a970389e0559ed13abcf014df087e901396cd4e | b8c5c61d197beea1722de8b444d898e1653415a7 | refs/heads/main | 2023-03-19T02:50:38.155902 | 2021-03-03T19:51:28 | 2021-03-03T19:51:28 | 306,124,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,819 | py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib.colors as mcolors
from ..Common import DBTypes
from .import ValueCheck
from time import gmtime, strftime
import folium
Action = DBTypes.EnumReq.Action
Fields = DBTypes.EnumReq.Fields
ResFields = DBTypes.EnumRes
eq_template = """
<h5>{}</h5>
<h3>{}, {}, {} - {}</h3>
<h4>{} {}, {} {}:{}:{} UTC</h4>
<h5>(Lon: {}°, Lat: {}°), Depth: {} km</h5>
<h5>{}</h5>
<h5>Verified by: {}</h5>
"""
ne_template = """
<h5>{}</h5>
<h3>{}, {}, {} - {}</h3>
<h4>{} {}, {} {}:{}:{} UTC</h4>
<h5>(Lon: {}°, Lat: {}°), Depth: {} km</h5>
<h5>{}</h5>
<h5>Verified by: {}</h5>
<h5>Yield: {} kt, {}</h5>
"""
def getColor(mag):
if mag >= 0 and mag < 2:
return "#00ff00"
elif mag>=2 and mag <5:
return "#ffff00"
elif mag>=5 and mag<7:
return "#ffa500"
else:
return "#ff0000"
def getRadius(mag):
return mag * 2.5
def showGraph(data, graphSelection):
if "Aggregate" in graphSelection:
x = data[ResFields.Graph.XValue]
y = data[ResFields.Graph.YValue]
x_pos = [i for i, _ in enumerate(x)]
plt.bar(x_pos, y, color=['g', 'r', 'c', 'm', 'y', 'b'])
plt.title(graphSelection)
plt.xticks(x_pos, x)
plt.xticks(rotation=90)
plt.show()
else:
names = list(set(data[ResFields.Graph.XValue]))
values = {}
for name in names:
values[name] = [[],[]]
for name, xval, yval in zip(data[ResFields.Graph.XValue], data[ResFields.Graph.Year], data[ResFields.Graph.YValue]):
values[name][0].append(xval)
values[name][1].append(yval)
fig, ax = plt.subplots()
for name in values:
values[name][0], values[name][1] = zip(*sorted(zip(values[name][0], values[name][1])))
ax.plot(values[name][0], values[name][1], label=name)
if len(values) == 0:
ax.plot([0,1],[1,1],label = 'NO VALUES FOR CURRENT SELECTION')
ax.legend()
plt.title(graphSelection)
plt.show()
def showMap(data, isNuclear):
months = ['None', 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
m = folium.Map(location=[20,0], tiles="OpenStreetMap", zoom_start=2)
for i in range(len(data[ResFields.Earthquake.Key])):
entry = {}
for key in data:
entry[key] = data[key][i]
dt = ValueCheck.julianToDatetime(entry[ResFields.Earthquake.Time])
eqsrcs = ','.join(list(set(entry[ResFields.Report.EarthquakeSourceMappingSourceKeys].split(','))))
if not isNuclear:
popup = eq_template.format(
entry[ResFields.Earthquake.Key],
entry[ResFields.City.Name],
entry[ResFields.State.Name],
entry[ResFields.Nation.Name],
entry[ResFields.Earthquake.Mag],
dt[2], months[dt[1]], dt[0], dt[3], dt[4], dt[5],
entry[ResFields.Earthquake.Longitude],
entry[ResFields.Earthquake.Latitude],
entry[ResFields.Earthquake.Depth],
entry[ResFields.Earthquake.Type],
eqsrcs
)
else:
popup = ne_template.format(
entry[ResFields.Earthquake.Key],
entry[ResFields.City.Name],
entry[ResFields.State.Name],
entry[ResFields.Nation.Name],
entry[ResFields.Earthquake.Mag],
dt[2], months[dt[1]], dt[0], dt[3], dt[4], dt[5],
entry[ResFields.Earthquake.Longitude],
entry[ResFields.Earthquake.Latitude],
entry[ResFields.Earthquake.Depth],
entry[ResFields.Earthquake.Type],
eqsrcs,
entry[ResFields.Nuclear.Yield],
entry['ne_nationname']
)
folium.CircleMarker(
location=[entry[ResFields.Earthquake.Latitude], entry[ResFields.Earthquake.Longitude]],
popup=popup,
radius=getRadius(entry[ResFields.Earthquake.Mag]),
color=getColor(entry[ResFields.Earthquake.Mag]),
fill=True,
fill_color=getColor(entry[ResFields.Earthquake.Mag]),
fill_opacity=0.4,
weight=1,
opacity=0
).add_to(m)
fname = "output/view-" + strftime("%Y-%m-%d--%H-%M-%S", gmtime()) + ".html"
m.save(fname)
with open(fname, "r") as in_file:
buf = in_file.readlines()
with open(fname, "w") as out_file:
for line in buf:
if "</head>" in line:
line = "<style>.leaflet-popup.leaflet-zoom-animated{width:350px}</style>" + line
out_file.write(line)
return fname | [
"noreply@github.com"
] | noreply@github.com |
4bc52290347637c5b2fd2e1bab0b443fed88b2ab | 40b76594127c409204a578c32a9b88e9e6c1a44f | /层次聚类_AGNES.py | 77c718069dfc1d1c1232401ef71c9386f8378c7b | [] | no_license | prosperhitfz/Machine-Learning_Cluster-Analysis_kmeans-FCM-DBSCAN-AGNES_iris | 7e99249982ff96e3cff9e8343879950e613e5e28 | bb0f220a4d2ddd12a198a509026b847e88526174 | refs/heads/main | 2023-06-24T19:32:31.750373 | 2021-06-25T08:16:36 | 2021-06-25T08:16:36 | 380,169,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | # 凝聚层次聚类:AGNES算法(自底向上)
# 首先将每个对象作为一个簇,然后合并这些原子簇为越来越大的簇,直到某个终结条件被满足
from sklearn import datasets
from sklearn.cluster import AgglomerativeClustering
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import pandas as pd
# 解决中文显示问题
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
iris = datasets.load_iris()
iris_data = iris.data
clustering = AgglomerativeClustering(linkage='ward', n_clusters=3)
result = clustering.fit(iris_data)
print("各类别的样本数目:")
print(pd.Series(clustering.labels_).value_counts())
print("聚类结果:")
print(confusion_matrix(clustering.labels_, iris.target))
plt.figure()
d0 = iris_data[clustering.labels_ == 0]
plt.scatter(d0[:, 0], d0[:, 1])
d1 = iris_data[clustering.labels_ == 1]
plt.scatter(d1[:, 0], d1[:, 1])
d2 = iris_data[clustering.labels_ == 2]
plt.scatter(d2[:, 0], d2[:, 1])
plt.xlabel('sepal length')
plt.ylabel('sepal width')
plt.title("层次聚类自底向上算法聚类AGNES Clustering")
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
ceeccd13ee0b16ef28e303cbc40be7a296dffd3c | 958231073be5a0978d3e3fbb648da8f26b2cbb03 | /python testing/testing_web/homework2/tests_buy.am/test_section.py | bd26f82dc1efce63d0863c7a8fdbe9e62cc9a70b | [] | no_license | Suren76/Python | afc9589c677496db6bbfb38166164b6e28da41ab | c843d43f7384397aeabcc16155b7ef6617ce256c | refs/heads/main | 2023-03-02T22:36:17.904471 | 2021-01-31T23:48:29 | 2021-01-31T23:48:29 | 329,370,489 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,717 | py | import pytest
import allure
from selenium.webdriver.common.by import By
@pytest.mark.usefixtures('add_page_attribute')
@pytest.mark.usefixtures('chrome_driver')
class TestSection:
def test_banner_slider(self):
slider_elm = self.page.driver.find_elements(By.XPATH, "//section/div[3]/div/div/div/div")
assert len(slider_elm) == 5
def test_wishlist_button(self):
@allure.story("Login negative")
if self.page.find_element(By.XPATH, "//nav/ul/li[5]/div/div[2]/div/ul/li[7]/a").text == 'Մուտք':
self.page.driver.find_element(By.XPATH, "//section/div/div/div/div\
/div[1]/section/div[2]/div/div/div/div/div/div/form/button").click()
assert self.page.driver.current_url == "https://buy.am/hy/register/index/sTarget/note/sTargetAction/index"
@allure.story("Login positive")
if self.page.find_element(By.XPATH, "//nav/ul/li[5]/div/div[2]/div/ul/li[7]/a/span").text == 'Դուրս գալ':
self.page.driver.find_element(By.XPATH, "//section/div/div/div/div\
/div[1]/section/div[2]/div/div/div/div/div/div/form/button").click()
assert self.page.driver.current_url == "https://buy.am/hy/note"
def test_add_busket_button(self):
busket_len_before = int(self.page.driver.find_element(By.XPATH, "//header/div/nav/ul/li[4]/a/span").text)
self.page.driver.find_element(By.XPATH, "//section/div/div/div/div\
/div[1]/section/div[2]/div/div/div/div/div/div/div[2]/div/div[3]/div/form/button").click()
basket_len_after = int(self.page.driver.find_element(By.XPATH, "//header/div/nav/ul/li[4]/a/span").text)
assert busket_len_before + 1 == basket_len_after
| [
"parsyan_suren@mail.ru"
] | parsyan_suren@mail.ru |
3f6f23ae079267fb63b546d8461f4b288ddf16b4 | 9d4728585c97b1bf2e087e632eb2e69655ca6320 | /lagou/spiders/lagou.py | 16a67958a63e98e4dacf5f049419e4dcf85c8149 | [] | no_license | 602437897/lagou | fe243068226e24038272c1eec267218cd8a87f53 | 9da242629811d50efc067266c9e6fd3430f07333 | refs/heads/master | 2022-12-06T00:13:09.333623 | 2018-01-21T11:57:08 | 2018-01-21T11:57:08 | 118,330,034 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,739 | py | # -*- coding: utf-8 -*-
import scrapy
from lagou.items import LagouItem
from lagou.settings import DEFAULT_REQUEST_HEADERS, COOKIES
import time, random
class LagouSpider(scrapy.Spider):
name = 'lagou'
allowed_domains = ['lagou.com']
start_urls = ['https://www.lagou.com/zhaopin/C%23/?labelWords=label']
pn = 1
cookie = []
def parse(self, response):
work_lists = response.xpath('//*[@id="s_position_list"]/ul/li')
item = LagouItem()
self.pn += 1
url_next = response.xpath('//div[@class="pager_container"]/a/@href').extract()[-1] + \
'?filterOption={}'.format(self.pn)
print(url_next)
if url_next == 'javascript:;':
return
for work_list in work_lists:
item['position_name'] = work_list.xpath('//a[@class="position_link"]/h3').extract()[0]
item['position_link'] = work_list.xpath('//a[@class="position_link"]/@href').extract()[0]
item['work_location'] = work_list.xpath('//a[@class="position_link"]/span/em/text()').extract()[0]
item['pulish_time'] = work_list.xpath('//span[@class="format-time"]/text()').extract()[0]
item['salary'] = work_list.xpath('//span[@class="money"]/text()').extract()[0]
item['work_exprience'] = work_list.xpath('//div[@class="li_b_l"]/text()').extract()[2]
item['company'] = work_list.xpath('//div[@class="company_name"]/a/text()').extract()[0]
item['industry'] = work_list.xpath('//div[@class="industry"]/text()').extract()[0]
yield item
time.sleep(random.randint(2,5))
yield scrapy.Request(url_next, headers=DEFAULT_REQUEST_HEADERS, cookies=COOKIES, callback=self.parse)
| [
"602437897@qq.com"
] | 602437897@qq.com |
dbcba9ad27062bad24db1012950d38e19bd1f169 | ffde0966c32c07043f2ec9bb23da3e6cdbe9d0fa | /CirclePlotter.py | fd4544a31c9f98c67ad5e44288e8a066e13d213d | [] | no_license | matthj1/OS-API-Learning | dac921e33a3d9d40f11885c0c6a91833fd712fb4 | f6111c9cf25101e8591591ed6d9a726ae90473ad | refs/heads/master | 2022-12-25T11:29:36.110765 | 2020-09-29T14:31:33 | 2020-09-29T14:31:33 | 298,582,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | import math
pi = math.pi
def circle_coords(radius, points, centre):
return [((math.cos(2 * pi / points * x) * radius)+centre[0], (math.sin(2 * pi / points * x) * radius)+centre[1]) for x in range(0, points)]
if __name__ == "__main__":
print(circle_coords(10, 36, (10.05476, 6.09875)))
| [
"joematthewsphotography@gmail.com"
] | joematthewsphotography@gmail.com |
e264997a567934de5c318c0c7c6f24c252c280b3 | eed3c272b0fcb379967df62e161e46869b4dd1b7 | /src/SF/Exp1_2.py | 5d9ef509732ea7d43a6ad1507a00a63226b02ec1 | [] | no_license | juanfung/retrofit-policy | 64d76fd98c6797a2e893cacaf690bcf747ec37df | f8f390eb446fae4c21f1983933643d3737d772a1 | refs/heads/main | 2023-01-24T11:41:43.380121 | 2020-11-23T18:24:22 | 2020-11-23T18:24:22 | 152,454,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,156 | py | # Post-1980 wood-frame buildings
# 1. Add building characteristics
# 2. Add sales data, convert sales data to 2019 USD
# 3. Add assessor data, convert assessor data to 2019 USD
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# path = "F:/RetrofitPolicy/SF Exp1/"
# os.chdir(path)
################################ Functions ####################################
url_inflation ='https://raw.githubusercontent.com/juanfung/retrofit-policy/master/data/raw/SF_Exp1/CPI.csv'
df_inflation = pd.read_csv(url,index_col=0)
# df_inflation = pd.read_csv('CPI.csv')
CPI = df_inflation['Annual']
CPI_matrix = np.tile(CPI,(21,1))
CPI_matrix2 = np.transpose(CPI_matrix)
inflation = CPI_matrix/CPI_matrix2
def convertor(dataframe,column_price,column_year):
value = []
for index, row in dataframe.iterrows():
Estimate = np.repeat(row[str(column_price)],21)
try:
year = int(row[str(column_year)])
Estimate = Estimate * inflation[year-2000]
value.append(Estimate[-2])
except ValueError:
value.append(Estimate[-2]) # Assessor data is not available
return value
def convertor2(dataframe,year_begin,year_end):
value = []
price = dataframe[str(year_begin)]
for year in range(year_begin+1, year_end+1, 1):
price = pd.concat([price,dataframe[str(year)]], axis=1)
for index, row in price.iterrows():
try:
Estimate = row * inflation[(year_begin-2000):(year_end-2000+1),19]
value.append(Estimate)
except ValueError:
value.append(Estimate) # Assessor data is not available
return value
################################################################################
## Add building characteristics
url ='https://raw.githubusercontent.com/juanfung/retrofit-policy/master/data/processed/SF_Exp1/Wood%20frame/post1980_nonsoft_wood_single.csv'
df = pd.read_csv(url,index_col=False)
url2 = 'https://raw.githubusercontent.com/juanfung/retrofit-policy/master/data/raw/SF_Exp1/Assessor%20data/SF_property_tax2017-2018.csv'
df2 = pd.read_csv(url2,index_col=False)
# df = pd.read_csv('post1980_nonsoft_wood_single.csv')
# df2 = pd.read_csv('Assessor data/SF_property_tax2017-2018.csv')
df['Block'] = df['Block'].astype(str)
df2['Block'] = df2['Block'].astype(str)
df['Lot'] = df['Lot'].astype(str)
df2['Lot'] = df2['Lot'].astype(str)
df3 = pd.merge(df, df2, how='left', on=['Block', 'Lot'])
## Add building use categories
url4 ='https://raw.githubusercontent.com/juanfung/retrofit-policy/master/data/raw/SF_Exp1/Assessor%20data/Class_code.csv'
df4 = pd.read_csv(url4,index_col=0)
# df4 = pd.read_csv('Assessor data/Class_code.csv')
use = []
use_missing = 0
for index, row in df3.iterrows():
try:
description = df4.loc[df4.CODE==row['RP1CLACDE'], 'DESCRIPTION'].values[0]
use.append(description)
except IndexError:
use.append(np.nan)
use_missing += 1
df3['Use'] = use
## Statistics of buildings
use_list = list(set(use))
use_count = []
for item in use_list:
use_count.append(use.count(item))
y = np.arange(len(use_list))
plt.bar(y, use_count, align='center', alpha=0.5)
plt.xticks(y,use_list,rotation=90)
plt.xlabel('Usage', fontsize=12)
plt.ylabel('Number of buildings', fontsize=12)
plt.show()
## Add sales data
url5 ='https://raw.githubusercontent.com/juanfung/retrofit-policy/master/data/processed/SF_Exp1/sales_history_clean.csv'
df5 = pd.read_csv(url5,index_col=0,parse_dates=[0])
# df5 = pd.read_csv('sales_history_clean.csv')
df5 = df5[['APN','Sale_price','Sale_year']]
df3 = pd.merge(df3, df5, how='left',left_on='RP1PRCLID', right_on='APN')
## Convert sales data to 2019 USD
value1 = convertor(df3,'Sale_price','Sale_year')
df3['Sale_price_2019'] = value1
df3.to_csv('Post1980_woodframe.csv') # Export data to a csv file
## Add assessor data
for y in range(2008,2019,1):
url6 = 'https://raw.githubusercontent.com/juanfung/retrofit-policy/master/data/\
raw/SF_Exp1/Assessor%20data/SF_property_tax'+str(y-1)+'-'+str(y)+'.csv'
df6 = pd.read_csv(url6,index_col=False)
# df6 = pd.read_csv('Assessor data/SF_property_tax'+str(y-1)+'-'+str(y)+'.csv')
assessed_value = df6['RP1IMPVAL'] + df6['RP1LNDVAL']
df6[str(y)] = assessed_value
df6 = df6[['RP1PRCLID',str(y)]]
df3 = pd.merge(df3, df6, how='left', on='RP1PRCLID')
## Convert assessor data to 2019 USD
value2 = convertor2(df3, 2008, 2018)
df7 = pd.concat(value2, axis=1).T # Convert list of pd series to pd frame
df3 = pd.concat([df3, df7], axis=1)
df3.to_csv('Wood_assessment_post1980.csv') # Export data to a csv file
df7.dropna(how='any', inplace=True) # Drop the rows where at least one element is missing
df7[(df7 != 0).all(1)] # Drop the rows containing 0
summary = df7.describe()
print(summary)
plt.errorbar(range(2008,2019,1), summary.iloc[1,:11],\
summary.iloc[2,:11], linestyle='None', marker='_')
plt.xlabel('Year', fontsize=12)
plt.ylabel('Assessed property value (2019 USD)', fontsize=12)
plt.show()
| [
"juan.fung@nist.gov"
] | juan.fung@nist.gov |
5ada850496b766d56da6dc90b7d634e1aa9f19c4 | 1cf3a339c0f94bce94cf142fde9a9f6ab38369a8 | /yt_arch/core/api_client.py | 226b998e4c8765b26d726b26d53496c6d0694b0e | [
"MIT"
] | permissive | hilbertqqc/youtube-playlist-archiver | 959f9afc541c293ff05b37b99833f640d39f4c2a | 69727075e0151d03259c373647278312b11f0299 | refs/heads/master | 2023-03-24T03:34:36.507215 | 2021-03-17T20:57:48 | 2021-03-17T20:57:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | import httpapiclient
from httpapiclient.mixins import JsonResponseMixin, HelperMethodsMixin
class ApiClient(JsonResponseMixin, HelperMethodsMixin, httpapiclient.BaseApiClient):
base_url = 'https://www.googleapis.com/youtube/v3/'
| [
"dbashkatov@gmail.com"
] | dbashkatov@gmail.com |
35b5605675d38e47f6e9113f00cec7ad47b2cd14 | 39d26bedd4049d58265fcd6c480cc7a5b73c7ece | /Tutorial_SimpleTeacherAPI/python-sample-code/tests/conftest.py | 5698513372a065217fe1f856dfe1735a2f254317 | [] | no_license | sramirezh/Developing | 7adc6dbb5c8436db6a3ab125018186ea7bdd1b40 | a07ed07899911b9860830f9498c08144c4eca3d4 | refs/heads/master | 2022-11-08T01:43:05.755215 | 2021-08-23T03:57:39 | 2021-08-23T03:57:39 | 249,786,342 | 0 | 1 | null | 2022-10-24T21:02:22 | 2020-03-24T18:26:46 | Python | UTF-8 | Python | false | false | 281 | py | import pytest
@pytest.fixture
def basic_tree():
return [
(23, "23"),
(4, "4"),
(30, "30"),
(11, "11"),
(7, "7"),
(34, "34"),
(20, "20"),
(24, "24"),
(22, "22"),
(15, "15"),
(1, "1")
]
| [
"sramirez.hinestrosa@gmail.com"
] | sramirez.hinestrosa@gmail.com |
c4471486670e06f8fc2d38a46f1455aac165f69f | a4b3c10ae7e82995ffb012d2787ca5e622cc83b5 | /LibOM/Tools.py | ff729d814c3c4b0d1a7668527f08d0b4f0ba952b | [] | no_license | hamzaz/OpenMaker | 1b8edd321d9ae37923249ded05b71bd002255d60 | a8b0ed13b6a5059451b92ffd5453ea6b2d6d094c | refs/heads/master | 2021-01-20T17:03:41.564352 | 2017-05-09T14:43:11 | 2017-05-09T14:43:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,946 | py | """This module contains a set of functions and classes to be used while analyzing themes/memes within maker community.
Attributes:
*
Classes:
* WatchTower: Connects to BOUNs Media Watch Tower API to retrieve influencers and themes.
* Twitter: Connects to Twitter API to harvest tweets of the predetermined user(s)
* MakerDictionary: The class has a set of tools (methods) and attributes that is used to to load and compile
regular expressions/patterns for maker related keywords and phrases.
* ScoreBoard: The class has a set of tools to compute, normalize and rank content of tweets/texts of an actor or a group of actors/
Functions:
* extract_features: It extracts maker related features using other objects in the library.
Todo:
* Adding Python Docstrings
*
"""
import requests, oauth2, json
import pickle, csv
import re
class WatchTower:
url_api = 'http://138.68.92.181:8484/api'
path_themes = '/get_themes'
path_influencers = '/get_influencers'
def __init__(self, influencers=None):
self.influencers = influencers
def retrieve_influencers(self):
resp = requests.get(WatchTower.url_api + WatchTower.path_themes)
if resp.status_code != 200: return None
self.influencers = list()
for theme in resp.json()['themes']:
tname = theme['name']
rurl = WatchTower.url_api + WatchTower.path_influencers + '/' + tname
resp = requests.get(rurl)
influencers = resp.json()['influencers']
for influencer in influencers:
name = influencer['username']
if name in self.path_influencers: continue
self.influencers.append(name)
return self.influencers
class Twitter:
Consumer_Key = "" # API key
Consumer_Secret = "" # API secret
Access_Token = ""
Access_Token_Secret = ""
url_api = "https://api.twitter.com/1.1/statuses/user_timeline.json?tweet_mode=extended"
def __init__(self, new=None):
if new:
self.consumer = oauth2.Consumer(key=new['Consumer_Key'], secret=new['Consumer_Secret'])
self.access_token = oauth2.Token(key=new['Access_Token'], secret=new['Access_Token_Secret'])
self.Client = oauth2.Client(self.consumer, self.access_token)
else:
self.consumer = oauth2.Consumer(key=Twitter.Consumer_Key, secret=Twitter.Consumer_Secret)
self.access_token = oauth2.Token(key=Twitter.Access_Token, secret=Twitter.Access_Token_Secret)
self.Client = oauth2.Client(self.consumer, self.access_token)
def accumulate_auser_tweets(self, auser, nlatest = 200):
# type: (str, int) -> dict
request = Twitter.url_api + "&screen_name="+ auser + "&count=" + str(nlatest)
response, data = self.Client.request(request)
tweets = json.loads(data)
if isinstance(tweets, dict):
message = "Error at Connection to Twitter API: {1}".format(tweets['errors'])
print "Connection errot to the account ", auser
raise Exception(message)
content = ""
ntweet = 0
for tweet in tweets:
if tweet["lang"] != "en": continue
ntweet += 1
content += tweet["full_text"] + '\n'
user_content = {"ntweets": ntweet, "content": content}
return user_content
def retrieve_tweets(self, userlist, nlatest=200):
tweets = dict()
userlist = [str(x) for x in userlist if isinstance(x, basestring)]
for user in userlist:
try:
tweets[user] = self.accumulate_auser_tweets(user,nlatest)
except Exception:
tweets[user] = {"ntweets": 0, "content": ""}
return tweets
class MakerDictionary:
csv_terms = "./data/MakerDict.csv"
csv_categories = "./data/MakerCat.csv"
@staticmethod
def load_mapped_terms(csv_terms, csv_categories):
table_Mmappings = dict()
with open(csv_terms, mode='r') as infile:
reader = csv.reader(infile)
for row in reader:
entry = row[0]
categories = row[1:]
categories = [cat for cat in categories if cat]
table_Mmappings[entry] = categories
pickle.dump(table_Mmappings, open("./data/table_Mmappings.p", "wb"))
# Loading features to words table:
table_Mfeatures = dict()
try:
table_Mfeatures = pickle.load(open("./data/table_Mfeatures.p", "rb"))
except IOError:
table_Mfeatures = {}
with open(csv_categories, mode='r') as infile:
reader = csv.reader(infile)
category_names = reader.next()
category_names = [cat.lower() for cat in category_names]
category_codes = reader.next()
for i, cat in enumerate(category_codes):
table_Mfeatures[cat] = {'name':category_names[i], 'content':list()}
for entry in table_Mmappings.keys():
for cat in table_Mmappings[entry]:
table_Mfeatures[cat]['content'].append(entry)
pickle.dump(table_Mfeatures, open("./data/table_Mfeatures.p", "wb"))
# compiling patterns::
def form_word_pattern(key, isolate=True):
if key[-1] == "*":
thepattern = key[0:-1]
thepattern += "[a-z']" + "*"
else:
thepattern = key
thepattern = thepattern.replace("(", "\(")
thepattern = thepattern.replace(")", "\)")
if isolate: thepattern += "[.,;:]*"
return thepattern
def form_phrase_pattern(words):
thepattern = ''
for word in words:
if thepattern:
thepattern += '\s+' + form_word_pattern(word, isolate=False)
else:
thepattern += form_word_pattern(word, isolate=False)
thepattern += "[.,;:]*"
return thepattern
table_patterns_keywords = dict()
table_patterns_phrases = dict()
for key, value in table_Mmappings.items():
words = key.split()
isphrase = False
print words
if len(words) > 1:
thepattern = form_phrase_pattern(words)
isphrase = True
else:
thepattern = form_word_pattern(key)
p = re.compile(thepattern, re.IGNORECASE)
if isphrase:
table_patterns_phrases[p] = {"key": key, "categories": value}
else:
table_patterns_keywords[p] = {"key": key, "categories": value}
pickle.dump(table_patterns_keywords, open("./data/patterns_words.p", "wb"))
pickle.dump(table_patterns_phrases, open("./data/patterns_phrases.p", "wb"))
return table_patterns_keywords, table_patterns_phrases, table_Mfeatures, table_Mmappings
def __init__(self):
self.categories = dict()
with open(MakerDictionary.csv_categories, mode='r') as infile:
reader = csv.reader(infile)
category_names = reader.next()
category_names = [cat.lower() for cat in category_names]
category_codes = reader.next()
self.categories = {category_names[i]:cat for i, cat in enumerate(category_codes)}
self._load_patterns()
def update_patterns(self,csv_terms=None,csv_categories=None):
fterms = csv_terms if csv_terms else MakerDictionary.csv_terms
fcats = csv_categories if csv_categories else MakerDictionary.csv_categories
compiled = MakerDictionary.load_mapped_terms(fterms, fcats)
self.pattern_words = compiled[0]
self.pattern_phrases = compiled[1]
self.table_Mfeatures = compiled[2]
self.table_Mmappings = compiled[3]
def match_words(self, text):
# type: str -> dict
table_counts = dict()
for key in self.table_Mfeatures: table_counts[key] = 0
mapping_count = 0
word_count = 0
matched_list = list()
not_matched_list = list()
for line in text.split('\n'):
words = line.strip().split()
words = [word.strip() for word in words if word]
for word in words:
word_count += 1
matched = 0
for p in self.pattern_words.keys():
m = p.match(word)
if not m: continue
# print word
for cat in self.pattern_words[p]["categories"]:
matched += 1
table_counts[cat] += 1
mapping_count += matched
matched_list.append(word) if matched else not_matched_list.append(word)
results = dict()
results["nmappings"] = mapping_count
results["counts"] = table_counts
results["nwords"] = word_count
results["matched"] = matched_list
results["not_matched"] = not_matched_list
return results
def match_phrases(self, text):
# type: str -> dict
table_counts = dict()
for key in self.table_Mfeatures: table_counts[key] = 0
mapping_count = 0
for line in text.split('\n'):
for p in self.pattern_phrases.keys():
m = p.findall(line)
nfind = len(m)
if not nfind: continue
matched = 0
# print m
for cat in self.pattern_phrases[p]["categories"]:
matched += nfind
table_counts[cat] += nfind
mapping_count += matched
results = dict()
results["nmappings"] = mapping_count
results["counts"] = table_counts
return results
def get_category_name(self, code):
cat = self.categories.keys()[self.categories.values().index(code)]
return cat
def _load_patterns(self):
try:
self.table_Mmappings = pickle.load(open("./data/table_Mmappings.p", "rb"))
self.table_Mfeatures = pickle.load(open("./data/table_Mfeatures.p", "rb"))
self.pattern_words = pickle.load(open("./data/patterns_words.p", "rb"))
self.pattern_phrases = pickle.load(open("./data/patterns_phrases.p", "rb"))
except IOError:
fterms = MakerDictionary.csv_terms
fcats = MakerDictionary.csv_categories
compiled = MakerDictionary.load_mapped_terms(fterms, fcats)
self.pattern_words = compiled[0]
self.pattern_phrases = compiled[1]
self.table_Mfeatures = compiled[2]
self.table_Mmappings = compiled[3]
class ScoreBoard:
@staticmethod
def compute_scores(features):
ntweets = features['ntweets']
nwords = features['nwords']
nmappings = features['nmappings']
counts = features['counts']
def transform(count, ntweets, nwords):
count = float(count)
per_tweet = count / ntweets if ntweets else 0
per_word = count / nwords if nwords else 0
return {'raw': count, 'per_tweet': per_tweet, 'per_word': per_word}
overall = transform(nmappings, ntweets, nwords)
scores = {key: transform(counts[key], ntweets, nwords) for key in counts.keys() if counts[key]}
scores.update({"all": overall})
return scores
def __init__(self):
self.table = dict()
self.rankings = dict()
def add_actor(self, actor, features):
self.post_scores(actor, features)
self.update_rankings()
def remove_all(self):
self.table = dict()
self.rankings = dict()
def remove_one(self, actor):
del self.table[actor]
self.update_rankings()
def compute_rankings(self, category='all', stype = "per_tweet"):
rankings = {a:self.table[a]['scores'][category][stype]
for a in self.table.keys()
if category in self.table[a]['scores'].keys()}
rankings = sorted(rankings.items(), key=lambda x: x[1], reverse=True)
self.rankings[(category,stype)] = rankings
def get_rankings_one(self, category='all', stype='per_tweet'):
key = (category,stype)
if not key in self.rankings.keys():
self.compute_rankings(category,stype)
rankings = [x for x in self.rankings[key] if x[1] > 0]
return rankings
def get_score_one(self, actor, category='all', stype='per_tweet'):
if not actor in self.table.keys(): return
if not category in self.table[actor]['scores'].keys(): return
return self.table[actor]['scores'][category][stype]
def get_scores(self, actor, categories, stype='per_tweet'):
scores = dict()
for cat in categories:
scores[cat] = self.get_score_one(actor,cat,stype)
scores = {k:v for k,v in scores.items() if v}
return scores
def is_on_board(self,actor):
return actor in self.table.keys()
def update_rankings(self):
for key in self.rankings.keys():
category = key[0]
stype = key[1]
self.compute_rankings(category,stype)
def post_scores(self, actor, features):
self.table[actor] = {
'ntweets': features['ntweets'],
'nwords': features['nwords'],
'nmappings': features['nmappings'],
'scores': ScoreBoard.compute_scores(features)}
def extract_features(text, MDict):
matchings_phrases = MDict.match_phrases(text)
nmappings = matchings_phrases['nmappings']
matchings_words = MDict.match_words(text)
nmappings += matchings_words['nmappings']
nwords = matchings_words['nwords']
counts = dict()
for key in matchings_words['counts'].keys():
counts[key] = matchings_words['counts'][key] + matchings_phrases['counts'][key]
return nmappings, nwords, counts
if __name__ == '__main__':
Client_WT = WatchTower()
Client_Twitter = Twitter()
influencer_names = Client_WT.retrieve_influencers()
debates = Client_Twitter.retrieve_tweets(influencer_names[0:5], 5)
SB = ScoreBoard()
MD = MakerDictionary()
for inf in debates.keys():
ntweets = debates[inf]['ntweets']
text = debates[inf]['content']
nmappings, nwords, counts = extract_features(text, MD)
features = {"ntweets":ntweets, "nwords":nwords, "nmappings":nmappings, "counts":counts}
SB.add_actor(inf, features)
for k, v in SB.table.items():
print "_" * 20
print k, v["ntweets"], v["nwords"], v["nmappings"]
for type in v['scores'].keys():
print type, v['scores'][type]
print "_" * 100
print "_" * 100
SB.compute_rankings('0', 'per_word')
SB.compute_rankings('5', 'per_word')
SB.compute_rankings('all', 'per_word')
print "_" * 60
for k, v in SB.rankings.items(): print k, v
print SB.get_rankings_one('all', 'per_word')
print SB.get_rankings_one('1', 'raw')
print SB.get_rankings_one('7', 'per_tweet')
print "_" * 60
for k, v in SB.rankings.items(): print k, v
print SB.get_rankings_one("3DPrintGirl")
print SB.get_rankings_one("shapeways", "all", "per_tweet")
print SB.get_rankings_one("ozel", "5", "per_tweet")
""" Example score table:
score_table = {
"maker1": { "overall": {'raw':100, 'per_tweet':2, 'per_word':0.1},
"openness": {'raw':50, 'per_tweet':1.0, 'per_word':0.2}
},
"maker2": { "overall": {'raw':200, 'per_tweet':2, 'per_word':0.1},
"openness": {'raw':50, 'per_tweet':1.0, 'per_word':0.3},
"sustainability": {'raw':40, 'per_tweet':2.0, 'per_word':0.5}
}
}
"""
| [
"bulentozel@gmail.com"
] | bulentozel@gmail.com |
243ef68fe11d18e22369979cd2bf46125b0e0df8 | c97fc7658c39feb51c0ed42c04783797c8675b8a | /2018/pcy1/day12_mysql/orm8_fk3_update.py | 8e4124a903201d0e359e84c71a75f1bf66cd9c77 | [] | no_license | githubvit/study | 8bff13b18bea4954e8ed1b4619a091b134b8ff97 | 845e19d1225f1aa51c828b15effac30be42fdc1b | refs/heads/master | 2023-02-20T15:59:19.635611 | 2021-12-15T08:30:54 | 2021-12-15T08:30:54 | 241,928,274 | 1 | 1 | null | 2023-02-02T06:18:48 | 2020-02-20T16:08:06 | Python | UTF-8 | Python | false | false | 2,731 | py | #_*_coding:utf-8_*_
'''
8,外键foreign key
8.3修改数据
study_record考勤表在插入时stu_id字段为null,修改
'''
from sqlalchemy import create_engine,ForeignKey,bindparam
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String,DATE,Enum
from sqlalchemy.orm import sessionmaker
# 1,连接数据库
engine = create_engine("mysql+pymysql://root:another333@localhost/test_db",
encoding='utf-8',echo=True)
#mysql+pymysql表示采用pymysql执行原生sql,
#echo=True表示显示创建过程,可以看到生成的原生sql。
# 2,创建表和类的映射
Base = declarative_base() # 生成orm基类
class Student(Base):#建立学生表
__tablename__='student'
id=Column(Integer,primary_key=True)
name=Column(String(32),nullable=False)
register_date=Column(DATE,nullable=False)
class StudyRecody(Base):#建立考勤表
__tablename__='study_record'
id=Column(Integer,primary_key=True)
day=Column(Integer,nullable=False)
status=Column(String(32),nullable=False)
# 3,外键关联,关联student表的id字段
stu_id=Column(Integer,ForeignKey('student.id'))
# 3,创建与数据库的会话session,相当于操作文件的句柄。
Session_class = sessionmaker(bind=engine)# 创建与数据库的会话session class ,注意,这里返回给session的是个class,不是实例
session = Session_class() # 生成session实例
# 4,修改考勤表 update(字典)
# 这是自己参考大量资料,摸索出来的批量更新方法,首先update有where,然后where要绑定参数,这就要在import上引入bindparam。
session.execute(
StudyRecody.__table__.update().where(StudyRecody.id==bindparam('b_id')),
[ {'b_id':1,'day':1,'status':'yes','stu_id':1},
{'b_id':2,'day':1,'status':'yes','stu_id':2},
{'b_id':3,'day':1,'status':'no','stu_id':3},
{'b_id':4,'day':2,'status':'no','stu_id':1},
]
)
'''UPDATE study_record SET day=%(day)s, status=%(status)s WHERE study_record.id = %(b_id)s
这是批量修改的方法,execute可以执行多条数据插入或更新,但是数据的格式必须相同,不可以像:
[{'b_id':1,'day':1,'status':'yes'},
{'b_id':3,'status':'no'}]
这样无法执行,因为第2条需要的原生sql和第一条是不同的'''
'''
上面的执行结果如下:
mysql> select * from study_record;
+----+-----+--------+--------+
| id | day | status | stu_id |
+----+-----+--------+--------+
| 1 | 1 | yes | 1 |
| 2 | 1 | yes | 2 |
| 3 | 1 | no | 3 |
| 4 | 2 | no | 1 |
+----+-----+--------+--------+
4 rows in set (0.00 sec)
mysql>
'''
session.commit() | [
"sgq523@163.com"
] | sgq523@163.com |
71a5825c6ebf0e58ab701d593092a5a502881890 | bec959395820d3e273f292becede91afa8053aba | /2.231.py | 6b3ee0f17cd6e8814cd495d5ec07f0c785d45c5e | [] | no_license | smartu3/kernel | 0779f776fac53b14f6b7c172414a9fd5def8d801 | 34f1a1a35c5e35bcfcf8bcdd87735fcd42f04abc | refs/heads/master | 2021-01-22T04:41:50.028909 | 2017-04-26T08:00:22 | 2017-04-26T08:00:22 | 81,574,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,438 | py | # -*-coding:utf-8 -*-
class Itr(object):
def __init__(self):
self.result = ['a','b','c','d']
self.i = iter(self.result)
def __call__(self):
res = next(self.i)
print "__call__ called,which would return %s" %res
return res
def __iter__(self):
print "__iter__ called"
return iter(self.result)
#iter函数返回可迭代对象
itr = Itr()
i1 = iter(itr,'c')#itr可调用,返回一个可调用iterator,即每次调用__call__函数;;迭代出c立马停止
print "i1=%s" %i1
i2 = iter(itr)#itr可迭代,__iter__函数返回一个iteration,即调用了定义的__iter__函数,(返回了一个listiterator)
print "i2=%s" %i2#即当使用iter函数时,会查找参数对象是否包含__iter__方法并能返回一个iterator!
for i in i1:
print i
for i in i2:
print i
#要想真正定义一个迭代器类iterator类,需要定义__next__方法,每次返回下一个值
class Next(object):
def __init__(self,data =1):
self.data = data
def __iter__(self):#定义__iter__方法,当调用iter(Next(data))时,可以实现返回实例本身,实例本身定义了__next__方法,即是一个iterator!
return self
def next(self):#注意python3中,__next__;python2中为next。
print "__next__ called"
if self.data >5:
raise StopIteration
else:
self.data +=1
return self.data
for i in Next(3):
print i # 4..5..6..StopIteration
for i in iter(Next(3)):
print i #同上 | [
"zhze93@qq.com"
] | zhze93@qq.com |
87feffb75938933a0676f94999c40d3b1256618a | d36074240972041b1f8ac19acac154cd707c0628 | /manage.py | b7a4fd392e450c05aaa42010b1f3c0b0c0445269 | [] | no_license | jaya-joseph/erp | d45b99c41cc92b2e9cd2f6b011419eeb99d7a110 | 1c8bbd3eca4a7b14971bd2238f78176834f2cbda | refs/heads/master | 2023-05-17T08:36:55.988890 | 2021-06-10T10:17:43 | 2021-06-10T10:17:43 | 375,663,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'RITSOFT.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"62609770+jaya-joseph@users.noreply.github.com"
] | 62609770+jaya-joseph@users.noreply.github.com |
e0a8e099387d137239284405b8a10b388eca81c7 | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/uv_align_distribute/pack_islands.py | a9fd3e306ce026b741f500faf275f51c1a81050d | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,153 | py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
"""
PackIslands Module(attention still wp).
contain the operator used by blender to perform Island Packing
"""
from collections import defaultdict
import bpy.ops
from bpy.props import FloatProperty, BoolProperty, IntProperty
import mathutils
from . import make_islands, templates, utils, operator_manager, global_def
class _Rect:
"""Class rappresenting a rectangle."""
def __init__(self, x, y, width, height):
"""Initialize the class with origin(x, y), width and height."""
self.x = x
self.y = y
self.width = width
self.height = height
def __repr__(self):
"""String representation of Rect."""
return "Rect: x: {0}, y: {1}, width: {2}, height: {3}"\
.format(self.x, self.y, self.width, self.height)
def fit(self, other):
"""Test if other can be contained."""
if other.width <= self.width and other.height <= self.height:
return True
else:
return False
class _Node:
def __init__(self, rect):
self.used = False
self.left = None
self.right = None
self.rect = rect
def __repr__(self):
return "Node {0}: \n\tUsed: {1}, rect: {2}"\
.format(hex(id(self)), self.used, self.rect)
class _BinTree:
def __init__(self, rect):
self._root = _Node(rect)
def insert(self, rect):
width = rect.width
height = rect.height
node = self.__findNode(self._root, width, height)
if node:
node = self.__splitNode(node, width, height)
return node
else:
return self.__growNode(width, height)
def __findNode(self, node, width, height):
if node.used:
return self.__findNode(node.left, width, height) or \
self.__findNode(node.right, width, height)
elif round(width, 5) <= round(node.rect.width, 5) and \
round(height, 5) <= round(node.rect.height, 5):
return node
else:
return None
def __splitNode(self, node, width, height):
node.used = True
lRect = _Rect(node.rect.x, node.rect.y + height,
width, node.rect.height - height)
print("Left: ", lRect)
node.left = _Node(lRect)
rRect = _Rect(node.rect.x + width, node.rect.y,
node.rect.width - width, node.rect.height)
print("Right: ", rRect)
node.right = _Node(rRect)
return node
def __growNode(self, width, height):
canGrowLeft = (width <= self._root.rect.width)
canGrowRight = (width <= self._root.rect.height)
shouldGrowRight = canGrowRight and \
(self._root.rect.height >= (self._root.rect.width + width))
shouldGrowLeft = canGrowLeft and \
(self._root.rect.width >= (self._root.rect.height + height))
if shouldGrowRight:
return self.__growRight(width, height)
elif shouldGrowLeft:
return self.__growLeft(width, height)
elif canGrowRight:
return self.__growRight(width, height)
elif canGrowLeft:
return self.__growLeft(width, height)
else:
return None
def __growRight(self, width, height):
print("growing right")
self._root.used = True
self._root.rect.width += width
# self._root.left = self._root
self._root.right = _Node(_Rect(self._root.rect.width - width, 0,
width, self._root.rect.height))
node = self.__findNode(self._root, width, height)
if node:
return self.__splitNode(node, width, height)
else:
return None
def __growLeft(self, width, height):
print("growing Left")
self._root.used = True
self._root.rect.height += height
# self._root.right = None
self._root.left = _Node(_Rect(0, self._root.rect.height - height,
self._root.rect.width, height))
node = self.__findNode(self._root, width, height)
if node:
return self.__splitNode(node, width, height)
else:
return None
class PackIslands_not_working(templates.UvOperatorTemplate):
"""Pack UV Islands in the uv space."""
bl_idname = "uv.pack_pile_islands"
bl_label = "Pack Pile Islands"
bl_options = {'REGISTER', 'UNDO'}
selectedOnly = BoolProperty(
name="Selection Only",
description="Pack only selected islands",
default=False
)
islandMargin = FloatProperty(
name="Margin",
description="Margin between islands",
default=0,
min=0,
max=1,
soft_min=0,
soft_max=1,
step=1,
precision=4)
pile = BoolProperty(
name="Pile",
description="Pile similar island to save uv space",
default=False
)
numOfPiles = IntProperty(
name="Number of piles",
description="number of piles to create",
default=1,
min=1,
max=2**31-1,
soft_min=1,
soft_max=10,
step=1
)
def execute(self, context):
"""Execute the script."""
def getMax(island):
bbox = island.BBox()
width = bbox.right() - bbox.left()
height = bbox.top() - bbox.bottom()
val = max(width, height)
return val
makeIslands = make_islands.MakeIslands()
islands = makeIslands.getIslands()
selectedIslands = makeIslands.selectedIslands()
activeIsland = makeIslands.activeIsland()
hiddenIslands = makeIslands.hiddenIslands()
# choose which island should be used
usableIslands = islands
if self.selectedOnly:
usableIslands = selectedIslands
# sort island with maxside:
usableIslands.sort(key=lambda island: getMax(island), reverse=True)
# bin pack the island
islandBBox = usableIslands[0].BBox()
width = islandBBox.right() - islandBBox.left()
height = islandBBox.top() - islandBBox.bottom()
rect = _Rect(0, 0, width, height)
btree = _BinTree(rect)
for island in usableIslands:
islandBBox = island.BBox()
width = islandBBox.right() - islandBBox.left()
height = islandBBox.top() - islandBBox.bottom()
rect = _Rect(0, 0, width, height)
node = btree.insert(rect)
if node:
vector = mathutils.Vector((node.rect.x, node.rect.y)) - island.BBox().bottomLeft()
island.move(vector)
# scale the islands to fit uv space
# get the whole BBox:
bbox = utils.GBBox(usableIslands)
width = bbox.right() - bbox.left()
height = bbox.top() - bbox.bottom()
scale = 1 / max(width, height)
for island in usableIslands:
for face_id in island:
face = global_def.bm.faces[face_id]
for loop in face.loops:
x = loop[global_def.bm.loops.layers.uv.active].uv.x
y = loop[global_def.bm.loops.layers.uv.active].uv.y
loop[global_def.bm.loops.layers.uv.active].uv.x = x * scale
loop[global_def.bm.loops.layers.uv.active].uv.y = y * scale
utils.update()
return{'FINISHED'}
def draw(self, context):
"""Draw the operator props."""
layout = self.layout
layout.prop(self, "selectedOnly")
layout.prop(self, "islandMargin")
layout.prop(self, "pile")
if self.pile:
layout.prop(self, "numOfPiles")
class PackIslands(templates.UvOperatorTemplate):
"""Pack UV Islands in the uv space."""
bl_idname = "uv.pack_pile_islands"
bl_label = "Pack Pile Islands"
bl_options = {'REGISTER', 'UNDO'}
selectedOnly = BoolProperty(
name="Selection Only",
description="Pack only selected islands",
default=False
)
rotate = BoolProperty(
name="Rotate",
description="Rotate island",
default=False
)
islandMargin = FloatProperty(
name="Margin",
description="Margin between islands",
default=0,
min=0,
max=1,
soft_min=0,
soft_max=1,
step=1,
precision=4)
pile = BoolProperty(
name="Pile",
description="Pile similar island to save uv space",
default=False
)
numOfPiles = IntProperty(
name="Number of piles",
description="Number of piles to create for each similar islands",
default=1,
min=1,
max=2**31-1,
soft_min=1,
soft_max=10,
step=1
)
def execute(self, context):
"""Execute the script."""
def getMax(island):
bbox = island.BBox()
width = bbox.right() - bbox.left()
height = bbox.top() - bbox.bottom()
val = max(width, height)
return val
def makePiles(self, data):
newDict = defaultdict(list)
for islandIndex in data:
mList = data[islandIndex].copy()
mList.insert(0, islandIndex)
numOfIsoIsland = len(mList)
chunk = numOfIsoIsland // self.numOfPiles
remainder = numOfIsoIsland % self.numOfPiles
pad = 0
for i in range(0, numOfIsoIsland):
bit = 0
if remainder:
bit = 1
for j in range(1, chunk + bit):
if len(mList) > pad + j:
newDict[mList[pad]].append(mList[pad+j])
pad += chunk+bit
if remainder:
remainder -= 1
return newDict
makeIslands = make_islands.MakeIslands()
islands = makeIslands.getIslands()
selectedIslands = makeIslands.selectedIslands()
activeIsland = makeIslands.activeIsland()
hiddenIslands = makeIslands.hiddenIslands()
# search for isomorphic island
isoIslandVisited = []
isoIsland = defaultdict(list)
if self.pile:
for island in selectedIslands:
for other in selectedIslands:
if island in isoIslandVisited or island == other:
continue
isoVerts = island.isIsomorphic(other)
if isoVerts:
isoIsland[selectedIslands.index(island)].append(selectedIslands.index(other))
isoIslandVisited.append(other)
isoIsland = makePiles(self, isoIsland)
# remove isomorphic island from selection
for island in isoIsland.values():
for other in island:
for face_id in selectedIslands[other]:
face = global_def.bm.faces[face_id]
face.select = False
print(isoIsland)
utils.update()
bpy.ops.uv.pack_islands(rotate=self.rotate, margin=self.islandMargin)
if self.pile and len(islands) != 0:
# map each uv vert to corresponding vert for selectedIslands
uv_to_vert = dict((i, list()) for i in range(len(global_def.bm.verts)))
perIslandVerts = dict((i, set()) for i in range(len(selectedIslands)))
# activeIslandUVData = dict((i, list()) for i in range(numOfVertex))
for island in selectedIslands:
for face_id in island:
face = global_def.bm.faces[face_id]
for loop in face.loops:
index = loop.vert.index
uv_to_vert[index].append(loop[global_def.uvlayer])
perIslandVerts[selectedIslands.index(island)].add(index)
for islandIndex in isoIsland:
for isoIndex in isoIsland[islandIndex]:
islandVerts = perIslandVerts[islandIndex]
isoVerts = perIslandVerts[isoIndex]
vertmap = selectedIslands[islandIndex].isIsomorphic(selectedIslands[isoIndex])
for v in islandVerts:
mappedVert = vertmap[v]
for uv_loop in uv_to_vert[v]:
for iso_uv_loop in uv_to_vert[mappedVert]:
iso_uv_loop.uv = uv_loop.uv
# reselct faces
for island in isoIsland.values():
for other in island:
for face_id in selectedIslands[other]:
face = global_def.bm.faces[face_id]
face.select = True
utils.update()
return{'FINISHED'}
def draw(self, context):
"""Draw the operator props."""
layout = self.layout
layout.prop(self, "selectedOnly")
layout.prop(self, "rotate")
layout.prop(self, "islandMargin")
layout.prop(self, "pile")
if self.pile:
layout.prop(self, "numOfPiles")
#################################
# REGISTRATION
#################################
_om = operator_manager.om
_om.addOperator(PackIslands)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
2c12ad60738b486115434e2d07faa622a1586cef | c056f0f8b1cd896996225d5c2bcfc20bfbfc185e | /pymdb/models/company.py | 0bd5329cd9d01a2a2dd8fe508eeb4aebdc2bd800 | [
"MIT"
] | permissive | zembrodt/pymdb | 30baeb52972ac2d92d587d9d8024a3a6d0c1f6f8 | 1708438630bff18ab961241657e99ad7511652e3 | refs/heads/master | 2022-12-12T14:16:47.266359 | 2021-05-21T20:07:17 | 2021-05-21T20:07:17 | 211,527,115 | 4 | 0 | MIT | 2022-12-08T07:43:52 | 2019-09-28T16:11:23 | Python | UTF-8 | Python | false | false | 3,617 | py | """The classes used to represent various information about companies on IMDb.
All information for the classes here will be scraped from IMDb web pages.
"""
from ..utils import is_int
class CompanyScrape:
"""Stores a title a company is credited for on IMDb.
This information is taken from IMDb's search by company ID for titles
that include it in their credits.
Args:
company_id (:obj:`str`): The companys's ID used by IMDb prefixed with `co`.
title_id (:obj:`str`): The titles's ID used by IMDb prefixed with `tt`.
start_year (:obj:`int`): The year the title released, or the year the company
started being credited for a TV series.
end_year (:obj:`int`): The year the company stopped being credited for a TV series,
or `None` otherwise.
notes (:obj:`list` of :obj:`str`): A list of further notes IMDb gives about the credit.
"""
__slots__ = '_company_id','_title_id', '_start_year', '_end_year', '_notes'
def __init__(self, company_id, title_id, start_year, end_year, notes):
self._company_id = company_id
self._title_id = title_id
self._start_year = None
self._end_year = None
self._notes = notes
self.start_year = start_year
self.end_year = end_year
@property
def company_id(self):
return self._company_id
@property
def title_id(self):
return self._title_id
@property
def start_year(self):
return self._start_year
@start_year.setter
def start_year(self, value):
if is_int(value):
self._start_year = int(value)
@property
def end_year(self):
return self._end_year
@end_year.setter
def end_year(self, value):
if is_int(value):
self._end_year = int(value)
@property
def notes(self):
return self._notes
def __str__(self):
return f'{self.company_id} produced {self.title_id} ({self.start_year}' + \
f'{f"-{self.end_year}" if self.end_year is not None else ""}). Notes: {self.notes}'
class CompanyCreditScrape:
"""Stores a company that is credited on a title's IMDb page.
This information is taken from a title's IMDb company credits page, and contains
more information on what a company is credited on a title for.
Args:
company_id (:obj:`str`): The company's ID used by IMDb prefixed with `co`.
title_id (:obj:`str`): The titles's ID used by IMDb prefixed with `tt`.
company_name (:obj:`str`): The company's name it was credited under.
category (:obj:`str`): The category the company was credited for.
notes (:obj:`list` of :obj:`str`): A list of further notes IMDb gives about the credit.
"""
__slots__ = '_company_id', '_title_id', '_company_name', '_category', '_notes'
def __init__(self, company_id, title_id, company_name, category, notes):
self._company_id = company_id
self._title_id = title_id
self._company_name = company_name
self._category = category
self._notes = notes
@property
def company_id(self):
return self._company_id
@property
def title_id(self):
return self._title_id
@property
def company_name(self):
return self._company_name
@property
def category(self):
return self._category
@property
def notes(self):
return self._notes
def __str__(self):
return f'{self.company_name} ({self.company_id}) is a {self.category} for {self.title_id}. Notes: {self.notes}'
| [
"ryan.zembrodt@uky.edu"
] | ryan.zembrodt@uky.edu |
ff971e59f7f7d80e1e4ae02f404ef6913b40ce43 | c7265f5bfba2a5b28372af870946b3012198e656 | /models/__init__.py | c250cba660d853f15b16d70829ac0690a88c1705 | [] | no_license | Sanada-lab/StereoNet | 0657dac9150d1a00f2e1af2446dabd8f6afe95f5 | 0b919457d1c76cdb9b0d1ec55b8f3f5cb7c57a88 | refs/heads/master | 2023-09-01T08:04:58.194425 | 2021-10-26T02:17:35 | 2021-10-26T02:17:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | from .stereonet import StereoNet as stereonet
| [
"zhixuanli520@gmail.com"
] | zhixuanli520@gmail.com |
510cbc0e160a524f152c2d68971ddc0e7654aad9 | 4f09a69d70626696bd90a86ee45b006373b9e846 | /flaskr/location_code_fetcher.py | 151a666cec5b6e73561705eb68d92403b74063f0 | [] | no_license | seyoung755/place_project | 9d7903ea638de0446fde04a5bf0de8bc9542dc82 | 7e0de3e98389b7cca1ff1f4ea640d6735f7a173c | refs/heads/master | 2023-04-23T20:07:19.191715 | 2021-05-05T11:17:44 | 2021-05-05T11:17:44 | 346,026,713 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,461 | py | import math
NX = 149 ## X축 격자점 수
NY = 253 ## Y축 격자점 수
Re = 6371.00877 ## 지도반경
grid = 5.0 ## 격자간격 (km)
slat1 = 30.0 ## 표준위도 1
slat2 = 60.0 ## 표준위도 2
olon = 126.0 ## 기준점 경도
olat = 38.0 ## 기준점 위도
xo = 210 / grid ## 기준점 X좌표
yo = 675 / grid ## 기준점 Y좌표
first = 0
if first == 0:
PI = math.asin(1.0) * 2.0
DEGRAD = PI/ 180.0
RADDEG = 180.0 / PI
re = Re / grid
slat1 = slat1 * DEGRAD
slat2 = slat2 * DEGRAD
olon = olon * DEGRAD
olat = olat * DEGRAD
sn = math.tan(PI * 0.25 + slat2 * 0.5) / math.tan(PI * 0.25 + slat1 * 0.5)
sn = math.log(math.cos(slat1) / math.cos(slat2)) / math.log(sn)
sf = math.tan(PI * 0.25 + slat1 * 0.5)
sf = math.pow(sf, sn) * math.cos(slat1) / sn
ro = math.tan(PI * 0.25 + olat * 0.5)
ro = re * sf / math.pow(ro, sn)
first = 1
def mapToGrid(lat, lon, code = 0 ):
ra = math.tan(PI * 0.25 + lat * DEGRAD * 0.5)
ra = re * sf / pow(ra, sn)
theta = lon * DEGRAD - olon
if theta > PI :
theta -= 2.0 * PI
if theta < -PI :
theta += 2.0 * PI
theta *= sn
x = (ra * math.sin(theta)) + xo
y = (ro - ra * math.cos(theta)) + yo
try:
x = int(x + 1.5)
y = int(y + 1.5)
return x, y
except Exception as e:
return e
# print(mapToGrid(37.579871128849334, 126.98935225645432))
# print(mapToGrid(35.101148844565955, 129.02478725562108))
# print(mapToGrid(33.500946412305076, 126.54663058817043))
# import json
# import requests
# import math
#
# class location_code_fetcher:
# def __init__(self):
# # kakao = kakao_local.kakao_local_api()
# # kakao.
# # Url = "http://www.kma.go.kr/DFSROOT/POINT/DATA/"
# # self.top = Url + "top.json.txt"
# self.RE = 6371.00877 # 지구 반경(km)
# self.GRID = 5.0 # 격자 간격(km)
# self.SLAT1 = 30.0 # 투영 위도1(degree)
# self.SLAT2 = 60.0 # 투영 위도2(degree)
# self.OLON = 126.0 # 기준점 경도(degree)
# self.OLAT = 38.0 # 기준점 위도(degree)
# self.XO = 210 / self.GRID # 기준점 X좌표(GRID 좌표)
# self.YO = 675 / self.GRID # 기준점 Y좌표(GRID 좌표)
#
# def location_code_fetcher(self, lat: float, lon: float, mode='toXY',):
# # rad <-> deg 변환을 위한 변수 설정
# PI = math.asin(1.0) * 2.0
# DEGRAD = PI / 180.0
# RADDEG = 180.0 / PI
#
# # DEG 값들을 RAD으로 변환
# re = self.RE / self.GRID
# slat1 = self.SLAT1 * DEGRAD
# slat2 = self.SLAT2 * DEGRAD
# olon = self.OLON * DEGRAD
# olat = self.OLAT * DEGRAD
#
# sn = math.tan(PI * 0.25 + slat2 * 0.5) / \
# math.tan(PI * 0.25 + slat1 * 0.5)
# sn = math.log(math.cos(slat1) / math.cos(slat2)) / math.log(sn)
# sf = math.tan(math.pi * 0.25 + slat1 * 0.5)
# sf = math.pow(sf, sn) * math.cos(slat1) / sn
# ro = math.tan(math.pi * 0.25 + olat * 0.5)
# ro = re * sf / math.pow(ro, sn)
# rs = {}
# if mode == "toXY":
# rs['lat'] = lat
# rs['lng'] = lon
# ra = math.tan(math.pi * 0.25 + lat * DEGRAD * 0.5)
# re = re * sf / math.pow(ra, sn)
# theta = lon * DEGRAD - olon
# if theta > math.pi:
# theta -= 2.0 * math.pi
# if theta < -math.pi:
# theta += 2.0 * math.pi
# theta *= sn
# rs['x'] = math.floor(ra * math.sin(theta) + self.XO + 0.5)
# rs['y'] = math.floor(ro - ra * math.cos(theta) + self.YO + 0.5)
#
# return rs
#
#
#
#
# # res = requests.get(self.top)
# # res.encoding=None
# # documents = json.loads(res.text)
# # json.dumps(documents, ensure_ascii=False).encode('utf8')
# #
# # for docu in documents:
# # if '서울' in docu['value']:
# # print(docu['code'])
# # elif '경기' in docu['value']:
# # print(docu['code'])
# #
# # elif '충%남%' in docu['value']:
# # print(docu['code'])
# # return documents[0]
# return sn
#
#
# location = location_code_fetcher()
# print(location.location_code_fetcher(36.4, 127.1)) | [
"seyoung7555@naver.com"
] | seyoung7555@naver.com |
09414566219ff95bbe9dd52a53d58304dcb9b59f | b9283cc20c0c26d7be7129452d3203e54fffed02 | /src/newsletter/admin.py | 66f5b95ecbde96b8ddd119a885b643485daddcae | [] | no_license | gagannaidu/trydjango18 | 6c99aa89c33a6823832756559465ddc21bc5ae05 | 175c4ee80cefe71f6c2d67f0a91ca04332c7727e | refs/heads/master | 2021-01-20T00:55:52.552224 | 2017-05-04T06:30:22 | 2017-05-04T06:30:22 | 89,210,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | from django.contrib import admin
# Register your models here.
from .models import SignUp
from .forms import SignUpForm
class SignUpAdmin(admin.ModelAdmin):
list_display = ["__str__","timestamp","updated"]
form = SignUpForm
#class Meta:
# model = SignUp
admin.site.register(SignUp, SignUpAdmin)
| [
"gagan.globin76@gmail.com"
] | gagan.globin76@gmail.com |
737133a9d5ff1fea555972ced06e52451d461166 | a7c15e8a4de1838aee0d8a8409932ec0ed4da29f | /adapt/views.py | 69c2d9282bb5e16ba791cd06358f0d531f8471bd | [
"Apache-2.0"
] | permissive | bhagvank/adapto_lernado | 42da4b06400bf687f40ce3a805eb3222d0a5471d | 416f261d7943d7e8982dcc6b04e57df5dff74d6e | refs/heads/master | 2020-04-01T13:43:21.471692 | 2020-01-19T20:22:52 | 2020-01-19T20:22:52 | 153,264,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,870 | py | from django.shortcuts import get_object_or_404, render
# Create your views here.
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from django.template import loader
from .models import SlackUser
from .models import Recommendation
from .models import Course
from .slackutils import SlackUtil
from .NLPUtils import NLPUtil
import os
import logging
import base64
import sys, traceback
logger = logging.getLogger("nlp_logger")
def login(request):
"""
login page call
Parameters
----------
request : HttpRequest
request object
Returns
-----------
HttpResponse
content is the result of render method
"""
#slack = SlackUtil()
#channels = slack.listChannels()
#printf("channels", channels)
# messages = listMessages("CBR05AS5N")
template_name = 'adapt/login.html'
#context = {'channels': channels}
# context_object_name = 'channels'
return render(request, template_name)
def logout(request):
"""
logout page call
Parameters
----------
request : HttpRequest
request object
Returns
-----------
HttpResponse
content is the result of render method
"""
#slack = SlackUtil()
#channels = slack.listChannels()
#printf("channels", channels)
# messages = listMessages("CBR05AS5N")
try:
del request.session["slack_token"]
except KeyError:
pass
template_name = 'adapt/login.html'
#context = {'channels': channels}
# context_object_name = 'channels'
return render(request, template_name)
def authenticate(request):
"""
page authentication
Parameters
----------
request : HttpRequest
request object
Returns
-----------
HttpResponse
content is the result of render method
"""
#slack = SlackUtil()
#channels = slack.listChannels()
print("authenticating")
# messages = listMessages("CBR05AS5N")
username = request.POST['useremail']
password = request.POST['password']
logger.info("authenticate username "+username )
#+ "password "+ password.encode('base64')
error_password = None
try:
user = get_object_or_404(SlackUser, username=username)
except:
#traceback.print_stack()
traceback.print_exc(file=sys.stdout)
template_name = 'adapt/login.html'
error_username = "Invalid username"
context = {'error_useremail': error_username,
'error_password': error_password}
return render(request, template_name,context)
#print(user)
if user:
check, error_username, error_password = user.authenticate(username, password)
print(check,error_username,error_password)
if check:
# request.session["slack_token"] = user.getSlackToken()
template_name = 'adapt/main.html'
#print("authenticated")
logger.info("authenticated username "+username)
# + "password "+ password.encode('base64'))
else :
print("setting template as login")
template_name = 'adapt/login.html'
logger.info("authenticate failure username "+username )
#+ "password "+ password.encode('base64'))
else :
print("setting template as login")
template_name = 'adapt/login.html'
error_username = "Invalid username"
logger.info("validation failure username "+username )
#+ "password "+ password.encode('base64'))
context = {'error_useremail': error_username,
'error_password': error_password}
# context_object_name = 'channels'
return render(request, template_name,context)
def main(request):
"""
main page call
Parameters
----------
request : HttpRequest
request object
Returns
-----------
HttpResponse
content is the result of render method
"""
#slack = SlackUtil()
#channels = slack.listChannels()
#printf("channels", channels)
# messages = listMessages("CBR05AS5N")
#recommendations = Recommendation.objects
template_name = 'adapt/main.html'
#context = {'channels': channels}
# context_object_name = 'channels'
context = {}
return render(request, template_name,context)
def signup(request):
"""
sign up page call
Parameters
----------
request : HttpRequest
request object
Returns
-----------
HttpResponse
content is the result of render method
"""
template_name = 'adapt/signup.html'
#context = {'channels': channels}
# context_object_name = 'channels'
return render(request, template_name)
def signin(request):
"""
sign in - sign up processing
Parameters
----------
request : HttpRequest
request object
Returns
-----------
HttpResponse
content is the result of render method
"""
#slack = SlackUtil()
#channels = slack.listChannels()
#printf("signup")
# messages = listMessages("CBR05AS5N")
username = request.POST['useremail']
password = request.POST['password']
confirmPassword = request.POST['confirmPassword']
#slackToken = request.POST['slacktoken']
print("password, confirmPassword",password,confirmPassword)
#if confirmPassword == password:
error_confirm_password = None
error_username = None
error_password = None
#error_slack_token = None
#template_name = 'nlp/signup.html'
error_username = _validate_username(username)
error_password, error_confirm_password = _validate_password(password,confirmPassword)
#error_slack_token = _validate_slack_token(slackToken)
if error_username == None and error_password == None and error_confirm_password == None:
if password == confirmPassword:
#print("password is equal to confirmPassword")
user = SlackUser(username=username,password=password)
user.save()
template_name = 'adapt/login.html'
else :
#error_confirm_password = "password and confirm password do not match"
template_name = 'adapt/signup.html'
else :
#error_password = "password is not valid"
#error_confirm_password = "confirm_password is not valid"
template_name = 'adapt/signup.html'
context = {'error_confirm_password': error_confirm_password,
'error_useremail': error_username,
'error_password': error_password
}
# context_object_name = 'channels'
return render(request, template_name,context)
def search(request):
"""
sign in - sign up processing
Parameters
----------
request : HttpRequest
request object
Returns
-----------
HttpResponse
content is the result of render method
"""
#slack = SlackUtil()
#channels = slack.listChannels()
#printf("signup")
# messages = listMessages("CBR05AS5N")
count =10
page,count,query = _parsePageQuery(request)
search_text = None
error_search = None
if page == None:
page =1
search_text = request.POST['Search']
print("search text",search_text)
error_search = _validate_search(search_text)
else :
search_text = query
messages = []
page_count = 0
if error_search == None:
slack_token = request.session["slack_token"]
slack = SlackUtil(slack_token)
messages,page_count = slack.searchAll(search_text,page,count)
#error_confirm_password = "password and confirm password do not match"
template_name = 'adapt/tabs.html'
nlp = NLPUtil()
messagesDict = {}
for message in messages :
messagesDict[message['ts']] = message
#message
sentiments = nlp.analyseContentSentiment(messagesDict)
messageEntities = nlp.analyseEntities(messagesDict)
print("entities",messageEntities)
messageSentiments = nlp.analyseEntitySentiments(messagesDict)
print("entities",messageEntities)
else :
#error_password = "password is not valid"
#error_confirm_password = "confirm_password is not valid"
template_name = 'adapt/search.html'
context = { 'error_search': error_search,
'query': search_text,
'messages' : messages,
'page_num' : int(page),
'page_sen' : int(page),
'page_en' : int(page),
'page_ensen': int(page),
'page_count': page_count,
'loop_count': range(1,page_count+1),
'sentiments': sentiments,
'entities': messageEntities,
'entitysentiments': messageSentiments
}
# context_object_name = 'channels'
return render(request, template_name,context)
def index(request):
"""
index page
Parameters
----------
request : HttpRequest
request object
Returns
-----------
HttpResponse
content is the result of render method
"""
#print("index")
#page,count = _parsePage(request)
#print("page", page)
#slack_token = request.session["slack_token"]
#slack = SlackUtil(slack_token)
#channels = slack.listChannels()
#channels,nextCursor = slack.listChannelsPage(page,count)
#printf("channels", channels)
# messages = listMessages("CBR05AS5N")
recommendations = Recommendation.objects.all()
template_name = 'adapt/index.html'
#context = {'channels': channels,
# 'nextCursor': nextCursor
# }
# context_object_name = 'channels'
context = {'recommendations': recommendations}
return render(request, template_name, context)
def courses(request):
courses = Course.objects.all()
template_name = 'adapt/courses.html'
context = {'courses': courses}
return render(request, template_name, context)
def detail(request, channel_id):
"""
detail page
Parameters
----------
request : HttpRequest
request object
Returns
-----------
HttpResponse
content is the result of render method
"""
#return HttpResponse("You're looking at question %s." % channel_id)
page,count = _parsePage(request)
slack_token = request.session["slack_token"]
slack = SlackUtil(slack_token)
#messages = slack.listMessages(channel_id)
messages,nextCursor = slack.listMessagesPage(channel_id,page,count)
#print("messages in view", messages)
channelMessages = []
for key,value in messages.items():
channelMessage = value
channelMessages.append(channelMessage)
channel_name = slack.getChannelById(channel_id)
template_name = 'adapt/detail.html'
context = {'messages': channelMessages,
'channel': channel_name,
'channel_id': channel_id,
'nextCursor': nextCursor}
return render(request, template_name, context)
def results(request, user_id):
"""
results page
Parameters
----------
request : HttpRequest
request object
Returns
-----------
HttpResponse
content is the result of render method
"""
full_path = request.get_full_path()
split_path = full_path.split("&")
page = None
channel_id = None
if "&" in full_path:
pagePath = split_path[-1].split("=")
page = pagePath[-1]
print("page ", page)
previous_path = split_path[0].split("=")
channel_id = previous_path[-1]
else:
split_path = full_path.split("=")
channel_id = split_path[-1]
count = 10
template_name = 'adapt/results.html'
slack_token = request.session["slack_token"]
slack = SlackUtil(slack_token)
#messages= {}
messages, nextCursor = slack.getMessagesByUserPage(channel_id,user_id,page,count)
channel_name = slack.getChannelById(channel_id)
nlp = NLPUtil()
sentiments = nlp.analyseContentSentiment(messages)
#print("in results",sentiments)
user_name = slack.getUserById(user_id)
context = {'sentiments': sentiments,
'user': user_name,
'user_id':user_id,
'channel_id':channel_id,
'channel': channel_name,
'nextCursor': nextCursor
}
return render(request, template_name, context)
def threads(request, thread_id):
"""
threads page
Parameters
----------
request : HttpRequest
request object
Returns
-----------
HttpResponse
content is the result of render method
"""
full_path = request.get_full_path()
split_path = full_path.split("&")
page = None
channel_id = None
if "&" in full_path:
pagePath = split_path[-1].split("=")
page = pagePath[-1]
print("page ", page)
previous_path = split_path[0].split("=")
channel_id = previous_path[-1]
else:
split_path = full_path.split("=")
channel_id = split_path[-1]
count = 10
slack_token = request.session["slack_token"]
slack = SlackUtil(slack_token)
messages,nextCursor = slack.getRepliesByThreadIdPage(channel_id,thread_id,page,count)
#threadMessages = {}
#for message in messages:
# threadMessages[message["ts"]]= message
nlp = NLPUtil()
#print("in threads userspecific",threadMessages)
sentiments = nlp.analyseContentSentiment(messages)
channel = slack.getChannelById(channel_id)
#print("in results",sentiments)
context = {'sentiments': sentiments,
'thread': thread_id,
'channel': channel,
'channel_id': channel_id,
'nextCursor': nextCursor
}
template_name = 'adapt/threads.html'
return render(request, template_name, context)
def _parsePage(request):
full_path = request.get_full_path()
split_path = full_path.split("?")
#print("split_path-1",split_path[-1])
page = None
if "?" in full_path:
pagePath = split_path[1].split("page=")
page = pagePath[-1]
count = 10
return page,count
def _parsePageQuery(request):
full_path = request.get_full_path()
split_path = full_path.split("?")
#print("split_path-1",split_path[-1])
page = None
query = None
if "?" in full_path:
if "&" in full_path:
parameterPath = split_path[1].split("&")
pagePath = parameterPath[0].split("page=")
page = pagePath[-1]
queryPath = parameterPath[1].split("search=")
query = queryPath[-1]
else :
pagePath = split_path[1].split("page=")
page = pagePath[-1]
count = 10
return page,count,query
def _validate_username(username):
error_username = None
if username == None:
#print("error in username")
error_username = "user email is blank"
#template_name = 'nlp/signup.html'
if "@" not in username or "." not in username :
error_username = "user email is not valid"
#template_name = 'nlp/signup.html'
return error_username
def _validate_search(search):
error_search = None
if search == None or search == "":
error_search = "search query is blank"
return error_search
def _validate_slack_token(slack_token):
error_slack_token = None
print("slack_token", slack_token)
if slack_token == None or slack_token == "":
slack_os_token = os.environ['SLACK_TOKEN']
error_slack_token = "slack token is blank, if you do not have one, you can use "+slack_os_token
return error_slack_token
def _validate_password(password,confirm_password):
error_password = None
error_confirm_password = None
if password == None:
error_password = "password is blank"
if confirm_password == None:
error_confirm_password = "confirm password is blank"
if password != None and confirm_password != None:
if password == confirm_password:
error_password = None
error_confirm_password = None
else :
error_password = "password and confirm_password do not match"
error_confirm_password = "password and confirm_password do not match"
return error_password, error_confirm_password
| [
"bhagvan.kommadi@apples-MacBook-Air.local"
] | bhagvan.kommadi@apples-MacBook-Air.local |
a90ce32550af5fc89fe8d8fc64a7c1c99513cd00 | 2d4a32dd7cde180b09286fc8e8fc2e242fbfad42 | /xdmfview/output.py | 1052de2ce9ea09b3ad8c2e9fa50c426610b457b1 | [] | no_license | HIT-CTC/DNS-postprocessing | b26f382cd99da8eb1f7f21e4cb6b898bc7a37780 | 63ef1609fe8e6cbbd58453e357e30a7a4fbe198b | refs/heads/master | 2023-04-14T20:08:04.572910 | 2021-04-24T03:06:44 | 2021-04-24T03:06:44 | 318,992,123 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,967 | py | #!/usr/bin/python
import h5py
import numpy as np
import re
from pathlib import Path
from prof import avg, div
#----------------------------------------#
#---------- Get Var Dictionary ----------#
#----------------------------------------#
def varDict(fn):
vardict = {}
with h5py.File(fn, 'r') as f:
varlist = f.keys()
for var in varlist:
if var[0] in ['u', 'v', 'w', 'p']:
vardict[var] = False
return vardict
def confDict(varlist, vardict):
for var in varlist:
if var in vardict.keys():
vardict[var] = True
return vardict
def readHDF(fn, var):
with h5py.File(fn, 'r') as f:
sh_in = f[var].shape
dims = len(sh_in)
if dims == 1:
if sh_in[0] == 1:
var_out = f[var][0]
else:
var_out = f[var][:]
elif dims == 2:
var_out = f[var][:,:]
elif dims == 3:
var_out = f[var][:,:,:]
f.close()
return var_out
def norAll(var):
return avg.avg2d(avg.avg3d(var, 3), 2)
#----------------------------------------#
#------------ Postprocessing ------------#
#----------------------------------------#
class outputData(object):
def __init__(self, path, fn, varlist, flag_nor=True):
self.path = Path(path).expanduser().resolve()
self.fn = self.path/fn
self.varlist = varlist
self.var_dict = varDict(self.fn)
self.var_dict = confDict(self.varlist, self.var_dict)
#---- Get Basic Information From File ----#
self.zc = readHDF(self.fn, 'zc' )
self.nu = readHDF(self.fn, 'nu' )
self.tau = readHDF(self.fn, 'tau' )
self.utau = readHDF(self.fn, 'utau')
self.zplus= self.zc*self.utau/self.nu
for var, var_flag in self.var_dict.items():
if var_flag:
if flag_nor:
nor = 1
for str in var:
if str in ['u', 'v', 'w']:
nor *= 1/self.utau
elif str in ['p']:
nor *= 1/self.tau
elif str in ['x', 'y', 'z']:
nor *= self.nu/self.utau
setattr(self, var, norAll(readHDF(self.fn, var))*nor)
def outputData(self, outputname):
i = 0
varlist = []
for var_str, var_flag in self.var_dict.items():
if var_flag:
varlist.append(var_str)
varlist.insert(0, 'zplus')
varlist.insert(0, 'zc')
filename = '{}.dat'.format(outputname)
filename = self.path/filename
head_str1 = ''
head_str2 = ''
for var_str in varlist:
i += 1
head_str1 += '{:^14s}'.format('C'+str(i))
head_str2 += '{:^14s}'.format(var_str)
title_head = '{:40s}'.format('Statistics of Retau={} Rem={}'.format(self.utau/self.nu, 1/self.nu))+'\n'
head_str1 += '\n'
head_str2 += '\n'
spl_head = 80*'-'+'\n'
# Write text in file
outfile = open(filename, 'w')
outfile.write(title_head+head_str1+head_str2+spl_head*2)
for i in range(len(self.zc)):
for var_str in varlist:
var = getattr(self, var_str)[i]
if var_str in ['rethe', 'redel', 'rex', 'retau', 'redeldi', 'redelen']:
outfile.write('{:14.2e}'.format(var))
elif var_str in ['zc', 'zplus']:
outfile.write('{:14.6f}'.format(var))
else:
outfile.write('{:14.6e}'.format(var))
outfile.write('\n')
print('File generated complete')
outfile.close()
if __name__ == '__main__':
path = '/home/xlc/DATA/it_Data'
fn = 'avg_recy.h5'
varlist = ['u','v','w']
a = outputData(path,fn,varlist).outputData('doubt_vec')
| [
"xilingchu@163.com"
] | xilingchu@163.com |
f59da9d8547eeaf30bbdec1aea66028e443f9349 | 166a791898fc2ce80f15bf3c9dd535411eb65370 | /tournaments/migrations/0004_auto_20180312_2122.py | 0594046942a51268b5ddf21dcd65f50b1020a221 | [] | no_license | FrostFlame/bowling_p | 17dcba9fb4643006d54f9db212fcbffc18616d87 | ca18a91ea9ccbd245c2d263c0e9ff3ae58ff9204 | refs/heads/master | 2020-04-25T05:49:14.889010 | 2018-09-04T19:21:02 | 2018-09-04T19:21:02 | 172,556,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-12 18:22
from __future__ import unicode_literals
from django.db import migrations, models
import tournaments.models
class Migration(migrations.Migration):
dependencies = [
('tournaments', '0003_auto_20180312_2116'),
]
operations = [
migrations.AlterField(
model_name='tournament',
name='photo',
field=models.ImageField(blank=True, default='default/tournament_avatar.png', upload_to=tournaments.models.filename),
),
]
| [
"kair493@mail.ru"
] | kair493@mail.ru |
07071eecb441b9fcf357cd53c2e787e0521f8b7b | aa90434900a007c572b5147efdb8d5f1e8d4b9a1 | /Yaman's Experiment/scoring_test.py | 274cc01a7883044b395ccaf61182ff4bea7d6aae | [] | no_license | harshgupta9723/ML_Experiments | d563c45116a8a06ed2a3d63eb93c8712223809fb | 3997531a78d5f173af1c4f1eeba6cd4d4b8688b4 | refs/heads/main | 2023-09-04T13:26:10.592725 | 2021-11-19T06:28:10 | 2021-11-19T06:28:10 | 406,354,066 | 0 | 0 | null | 2021-11-19T06:28:11 | 2021-09-14T12:16:45 | Jupyter Notebook | UTF-8 | Python | false | false | 857 | py | import pandas as pd
import numpy as np
import requests
import json
df = pd.read_csv('user.csv', index_col='job_id')
neg = np.random.randint(11, size=len(df))
df['negative'] = neg
pos = np.random.randint(11, size=len(df))
df['positive'] = pos
df.drop(columns=['userfeedback'], inplace=True)
df['total_reviews'] = df['negative']+df['positive']
df['percentage'] = df['positive']/df['total_reviews']*100
link = "http://143.198.118.108:5080/jobs"
result = requests.post(link, data={'job_id': 5})
json_data = json.loads(result.text)
df_percentage = df[['key', 'positive', 'negative', 'percentage']]
for key, value in json_data['recommended_jobs'].items():
try:
if df_percentage.iloc[value['job_id']]['percentage'] >= 36:
print(df_percentage.iloc[value['job_id']])
except Exception:
pass
| [
"noreply@github.com"
] | noreply@github.com |
e77f2772ce0f33c9076d9680c91c0165196bf144 | b71707810f2eb713257d9c39f16992af9322674b | /python3/venv/bin/easy_install | 988b0d659c99084bd3faca7d7b47ee0d15c3e769 | [] | no_license | kennykim11/advancedPython | 9840d74b7857affab5a30a5d6a974c510ccc5d44 | 32ddd7f1f316104ced32cdcd0910d5dc9cb49de7 | refs/heads/master | 2020-12-22T17:54:04.454592 | 2020-06-07T12:54:01 | 2020-06-07T12:54:01 | 236,880,962 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 466 | #!/Users/kenny/Desktop/Programming/Python/advancedPython/python3/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"kennykim11@gmail.com"
] | kennykim11@gmail.com | |
6ace55c0f84dd4805eb1026e871e5d7ed6b7ed97 | a5b9a8f7ca2e831c3c84467b23d9c3e1ae382385 | /fri3d/button.py | 6ee994c31391c30834c15e9e7113bd6cfa529244 | [] | no_license | Fri3dCamp/api | d60e9048cb56e118487d9344897b5e6220adb03c | 9373b0b9d8f5ccc52941c766a539a3f9bb41b843 | refs/heads/master | 2022-12-15T14:30:07.861022 | 2017-08-09T13:23:12 | 2017-08-09T13:23:12 | 99,805,938 | 0 | 0 | null | 2022-12-07T23:56:30 | 2017-08-09T12:29:38 | HTML | UTF-8 | Python | false | false | 1,455 | py | import os
from flask import render_template
from flask import request
from flask import request, abort
from flask_restful import Resource
from fri3d import api, rest, mongo
@api.route("/button")
def render_button():
return render_template("button.html")
class Button(Resource):
def get(self):
try:
args = request.args
target = args["target"]
return [ x for x in mongo.db.button.aggregate([
{"$match" : { "_id" : target } },
{"$project": { "count": {"$size": "$users"}}}
]) ][0]["count"]
except Exception as e:
return 0
def post(self):
try:
data = request.get_json()
target = data["target"]
token = request.cookies.get("fri3d")
assert not token is None
# TODO map token to user
user = token
# get current set of users for target and toggle user
record = mongo.db.button.find_one({"_id" : target})
users = []
if record:
users = record["users"]
if user in users:
users.remove(user)
else:
users.append(user)
else:
users.append(user)
# update/create record
result = mongo.db.button.update_one(
{ "_id" : target },
{ "$set" : { "users": users } },
upsert=True
)
return result.matched_count == 1 or (not result.upserted_id is None)
except Exception as e:
return False
rest.add_resource(Button,
"/api/button"
)
| [
"contact@christophe.vg"
] | contact@christophe.vg |
5630a1f755bcb97d80903c68d63d85f599297cb6 | 401d664697e0df149281ca94ea88f6eee3e609f0 | /python/04_OOP/descriptor/08property_and_descriptors.py | 820da9fddcbe2a34c95bc12cade297a6478c6fe2 | [] | no_license | git-mih/Learning | 6806fd14f174b908781714e4c3d15da1c6f63307 | 36ea5cb3ed8da0fb2de43f718d4446bbf000a670 | refs/heads/main | 2023-08-30T02:09:18.299638 | 2021-10-26T01:36:23 | 2021-10-26T01:36:23 | 360,233,001 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,747 | py | # Properties and decorators
from numbers import Integral
# __________________________________________________________________________________________________
# property object: using decorator syntax:
class Person:
@property
def age(self):
return getattr(self, '_age')
@age.setter
def age(self, value):
if not isinstance(value, Integral):
raise ValueError('age: must be an integer.')
if value < 0:
raise ValueError('age: must be a non-negative integer.')
self._age = value
Person.age # <property object at 0x000001> descriptor instance
# Properties are actually data descriptors and these methods will always be there:
hasattr(Person.age, '__get__') # True
hasattr(Person.age, '__set__') # True
hasattr(Person.age, '__delete__') # True
p = Person() # {}
p.age = 26
p.__dict__ # {'_age': 26}
# property object: without decorator syntax:
class Person:
# fget will points to this method:
def get_fn(self):
return getattr(self, '_age')
# fset points to this method:
def set_fn(self, value):
if not isinstance(value, Integral):
raise ValueError('age: must be an integer.')
if value < 0:
raise ValueError('age: must be a non-negative integer.')
# creating the '_age' entry in the object instance namespace:
self._age = value
age = property(fget=get_fn)
age = age.setter(set_fn)
# we could also create the property in a single line:
# age = property(fget=get_fn, fset=set_fn)
# no matter if we specify the fget/fset or not, these methods will always be there:
hasattr(Person.age, '__get__') # True
hasattr(Person.age, '__set__') # True
hasattr(Person.age, '__delete__') # True
p = Person()
p.age = 10
p.age # 10
class TimeUTC:
@property
def current_time(self):
return 'current time...'
hasattr(TimeUTC.current_time, '__get__') # True
# we are not specifiing the fset:
hasattr(TimeUTC.current_time, '__set__') # True
t = TimeUTC()
# __get__ will works properly:
t.current_time # current time...
# if we try to set the 'current_time' to other value like:
# t.current_time = 100 AttributeError: can't set attribute
# that is not because the __set__ does not exists. it does. we got this error message cause
# the property doesnot have the fset defined.
# what is happening is that, the __set__ is trying to call the fset method in our class
# and we did not defined the fset=.
#________________________________________________________________________________________________
# creating our own class that create Properties
class MakeProperty:
# receive the getter and setter methods we want to use:
def __init__(self, fget=None, fset=None):
self.fget = fget # Person.name.fget = get_name_fn
self.fset = fset # Person.name.fset = set_name_fn
def __set_name__(self, owner_class, property_name):
self.property_name = property_name
def __set__(self, instance, value):
print(f'__set__ called... ')
if self.fset is None:
raise AttributeError('cant set attribute')
# after some validation, we call the setter function:
self.fset(instance, value)
# Essentyally:
# Person.name.set_name_fn(p, 'Fabio')
# Person.name._name = 'Fabio' or setattr(Person.name, '_name', 'Fabio')
def __get__(self, instance, owner_class):
print(f'__get__ called...')
if instance is None:
return self
if self.fget is None:
raise AttributeError('not readable')
return self.fget(instance)
# Essentyally:
# Person.name.get_name_fn(p)
# getattr(Person.name, '_name') # Fabio
class Person:
# defining our getter function that will be called from inside the descriptor whenever
# we try to access the 'name' property:
def get_name_fn(self):
return getattr(self, '_name')
def set_name_fn(self, value):
# after some validation the descriptor will call it from there and add the '_name' entry
# inside the object instance namespace:
self._name = value
# now lets make our Property by using our data descriptor (MakeProperty):
name = MakeProperty(fget=get_name_fn, fset=set_name_fn)
# whenever we try to get or set the 'name' property, it will automatically call the
# descriptor get/set method which will then call these functions.
Person.name.__dict__
# __get__ called... calling get_name_fn(<descriptor_instance_name>, None)
# {'fget': <function Person.get_name_fn at 0x0000011>,
# 'fset': <function Person.set_name_fn at 0x0000022>, 'property_name': 'name'}
p = Person()
p.name = 'Fabio' # __set__ called...
# Essentyally:
# MakeProperty.__set__(Person.name, p, 'Fabio')
# then the descriptor calls the set_name_fn:
# Person.set_name_fn(p, 'Fabio') # self=p, value='Fabio'
# which will creates the '_name' entry inside the object instance namespace:
# setattr(p, '_name', 'Fabio')
p.__dict__ # {'_name': 'Fabio'}
p.name # Fabio
# MakeProperty.__get__(Person.name, p, Person)
# the descriptor will calls the get_name_fn from there:
# getattr(p, '_name') # Fabio
#________________________________________________________________________________________________
# we can also use the decorator approach as well, we dont require to change anything tho:
class MakeProperty:
def __init__(self, fget=None, fset=None):
self.fget = fget
self.fset = fset
def __set_name__(self, owner_class, property_name):
self.property_name = property_name
def __set__(self, instance, value):
if self.fset is None:
raise AttributeError('cant set attribute')
self.fset(instance, value)
def __get__(self, instance, owner_class):
if instance is None:
return self
if self.fget is None:
raise AttributeError('not readable')
return self.fget(instance)
class Person:
@MakeProperty
def age(self):
return 26
# essentially, Python will do it:
# age = MakeProperty(age) where the 1st argument is the fget=
Person.age # <__main__.MakeProperty object at 0x000001> descriptor instance.
p = Person()
p.age # 26
# to be able to use the `@age.setter` syntax, we have to define the 'setter' method inside
# the descriptor:
class MakeProperty:
def __init__(self, fget=None, fset=None):
self.fget = fget
self.fset = fset
def __set_name__(self, owner_class, property_name):
self.property_name = property_name
def __set__(self, instance, value):
if self.fset is None:
raise AttributeError('cant set attribute')
self.fset(instance, value)
def __get__(self, instance, owner_class):
if instance is None:
return self
if self.fget is None:
raise AttributeError('not readable')
return self.fget(instance)
# appending the setter method to be able to use the @property.setter syntax:
def setter(self, fset):
self.fset = fset
# returning a new property object with the setter on it:
return self
class Person:
@MakeProperty
def age(self):
return getattr(self, '_age')
# age = <__main__.MakeProperty object at 0x000001>
@age.setter
def age(self, value):
self._age = value
# essentially:
# age = age.setter(p, 10) -> <__main__.MakeProperty object at 0x000002> (self)
# now we have a bare new property object that have the old getter and the setter:
# age = <__main__.MakeProperty object at 0x000002>
p = Person()
p.age = 10
p.age # 10
| [
"git.mih@gmail.com"
] | git.mih@gmail.com |
a6b08df42fd248573eb7c8fa360cc0f27060166f | ca8f4b3e28ca79367f245a5912bb43e1c8b64797 | /xcom/xcom_dag.py | d5bc5e2ff5778d2be036797ed58ab13c385285b9 | [] | no_license | aTechGuide/airflow-dags | 797a631e0034513a797500a9a84770b0513ba033 | 8aa3280ada687cf323e43c61c2da9d72f18bda6c | refs/heads/master | 2020-08-21T17:23:46.962268 | 2020-06-15T10:48:30 | 2020-06-15T10:48:30 | 216,207,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,496 | py | from datetime import datetime, timedelta
from airflow import DAG
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
default_args = {
'owner': 'airflow',
'depend_on_past': False,
'start_date': datetime(2018, 11, 5, 10, 00, 00),
'retries': 1,
'retry_delay': timedelta(minutes=1)
}
# Read the table course.source to fetch the data and return the name of first source having its column "activated" sets to true
# No need to call xcom_push here since we use the keyword "return" which has the same effect.
def get_activated_sources():
request = "SELECT * FROM course.source"
pg_hook = PostgresHook(postgres_conn_id="postgre_sql", schema="airflow_mdb")
connection = pg_hook.get_conn()
cursor = connection.cursor()
cursor.execute(request)
sources = cursor.fetchall()
for source in sources:
if source[1]:
return source[0]
return None
def source_to_use(**kwargs):
ti = kwargs['ti']
source = ti.xcom_pull(task_ids='hook_task')
print("source fetch from XCOM: {}".format(source))
with DAG('xcom_dag',
default_args=default_args,
schedule_interval='@once',
catchup=False) as dag:
start_task = DummyOperator(task_id='start_task')
hook_task = PythonOperator(task_id='hook_task', python_callable=get_activated_sources)
xcom_task = PythonOperator(task_id='xcom_task', python_callable=source_to_use, provide_context=True)
start_task >> hook_task >> xcom_task
| [
"kamali@expedia.com"
] | kamali@expedia.com |
c39c32eab42926735834b4162c43fea51ebe0067 | 0e478561dfff017c543395e68f1e70967a3383d2 | /02_L3.py | cea84c2b608866019cf6b89515dd8dd568ddb957 | [] | no_license | wernerweinhartner/aulas_python | f1ad3bfa475075f02059d85a9f3d90efb5b0a62f | a59a98de934a6292e56b06ad036e328284c57d2f | refs/heads/master | 2020-05-25T09:11:27.281668 | 2019-06-24T22:42:09 | 2019-06-24T22:42:09 | 187,729,151 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | #Faça um programa que leia um nome de usuário e a sua senha e
#não aceite a senha igual ao nome do usuário,
# mostrando uma mensagem de erro e voltando a pedir as informações.
usuario = ''
senha = ''
while (usuario == senha):
usuario = input('Informe seu nome de usuário: ')
senha = input('Informe sua senha: ')
if usuario == senha:
print('O nome de usuario e senha devem ser difirentes')
print ('Nome de usuario e senha validos')
| [
"wernerweinhartner@gmail.com"
] | wernerweinhartner@gmail.com |
e815bc00ac8a9f39a473d1ae169a929143560be6 | c93f51492cfee3f98040f07d7f4323ec27ac81a5 | /refinery/units/obfuscation/ps1/concat.py | 40bc8a8c7d142f8dcdff27d0265cce17adba6673 | [
"BSD-3-Clause"
] | permissive | prats84/refinery | cbe9ebfeb570c9c0531e13bbf13ec18801f12aca | 5f961051e9cc1857a06108ce4d36a6799ac9d720 | refs/heads/master | 2023-07-13T02:32:04.998285 | 2021-08-20T09:08:01 | 2021-08-20T09:08:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,398 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from .. import IterativeDeobfuscator
from . import string_unquote, string_quote, Ps1StringLiterals
class deob_ps1_concat(IterativeDeobfuscator):
_SENTINEL = re.compile(R'''['"]\s*[+&]\s*['"]''')
def deobfuscate(self, data):
def concat(data):
strlit = Ps1StringLiterals(data)
repeat = True
while repeat:
for match in self._SENTINEL.finditer(data):
a, b = match.span()
a = strlit.get_container(a)
if a is None:
continue
b = strlit.get_container(b)
if b is None or b != a + 1:
continue
a = strlit.ranges[a]
b = strlit.ranges[b]
stra = data[slice(*a)]
strb = data[slice(*b)]
parts = list(string_unquote(stra))
it = iter(string_unquote(strb))
parts[~0] += next(it)
parts.extend(it)
yield data[:a[0]] + string_quote(parts)
data = data[b[1]:]
strlit.update(data)
break
else:
repeat = False
yield data
return ''.join(concat(data))
| [
"rattle@nullteilerfrei.de"
] | rattle@nullteilerfrei.de |
ea1b544ed273f1a243034912b650460c0c769fc9 | c37e93862d118209a8976b1e6b7ab502b2b62e55 | /minha_casinha.py | 74fda26d1bdebd04629ff60e427f2b2dba6af7a5 | [] | no_license | Larissapy/aula-remota-5 | c8162df9ac80ad9e30c99f209632731f57fda000 | 919cbd665a3e8c97c000f86710c14a14daf387ac | refs/heads/master | 2022-12-13T11:06:56.586659 | 2020-09-14T13:39:53 | 2020-09-14T13:39:53 | 295,423,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | from turtle import *
shape("turtle")
speed(5)
color("Gray")
pensize(5)
forward(100)
right(90)
forward(100)
right(90)
forward(100)
right(90)
forward(100)
color("Red")
right(90)
forward(103)
left(140)
forward(70)
left(80)
forward(70)
penup()
forward(20)
right(45)
left(90)
forward(83)
left(90)
forward(57)
left(95)
pendown()
color("Brown")
forward(50)
right(90)
forward(40)
right(90)
forward(50)
penup()
forward(20)
color("White")
done()
| [
"larissa20carvalho17@gmail.com"
] | larissa20carvalho17@gmail.com |
7b9ce6335d49f7b2c29528005a59eb78c4361630 | b2b56763d26ce4edd30b904a0148f3736aaca126 | /learn_python/learn_python_04/01_if_else.py | ffb58d5ef513ae22615e9f0ebc02c704916814fd | [] | no_license | 0xTiefer-Atem/Python_exercise_demo | d042dfb90b0912948c5f3eb06cf7f0cf907b28dd | 241322dc0913731940757a1aef3a540de3af39d9 | refs/heads/main | 2023-04-24T13:10:54.463789 | 2021-05-10T08:57:43 | 2021-05-10T08:57:43 | 365,975,659 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | # if判断语法
# 语法1:
# if 条件:
# 代码一
# 代码三
# 。。。。
# age = int(input("请输入年龄: "))
# is_beautiful = True
# star = "水瓶座"
#
# if 16 < age < 20 and is_beautiful and star == "水瓶座":
# print("我喜欢你")
# else:
# print("咱俩不合适")
#
# print("其他代码.....")
# if语法二:
# if 条件:
# 代码1
# 代码2
# ....
# else :
# 代码1
# 代码2
# ....
# if语法三:
# if 条件:
# 代码1
# 代码2
# ....
# elif 条件:
# 代码1
# 代码2
# ....
# score = int(input("请输入你的成绩: "))
# if score < 60:
# print("不及格")
# elif 60 <= score <= 70:
# print("合格")
# elif 70 < score <= 80:
# print("良")
# else:
# print("优秀")
| [
"1144502582@qq.com"
] | 1144502582@qq.com |
be878f61f2658494ecef8ee6b0d6eba050723a8f | e986bc8871f927e4f9739023d37d112a72b140c4 | /blog/models.py | 93db3b56aca242d87af35170683c3d1fb4cec8af | [] | no_license | Arnold-git/Blog-Web-App-with-Django | 0584c3297865444ee1c65e781a2bd67ae2de49a8 | 8ef73990a3dff8029f20631fb6dd56a52fb8af72 | refs/heads/main | 2022-12-31T11:25:00.181709 | 2020-10-27T20:15:06 | 2020-10-27T20:15:06 | 307,816,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
class Post(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
| [
"arnold.ighiwiyisi@eng.uniben.edu"
] | arnold.ighiwiyisi@eng.uniben.edu |
e8b54d9e903c75d119d9734ed0e8ed68ba5aebea | f98502c5331a44712bce28af90faa4c3184cb168 | /client.py | 63d26e9125c06848f9a6d8c2944fc89dcc44eae6 | [] | no_license | dpmR/python-tcp | 4aa4365077b04782433ceafc2a8dc8995ca70608 | 2e6a917254c671bcaa5444ecc8c430561a356391 | refs/heads/master | 2021-01-11T17:07:19.811305 | 2017-01-22T15:30:29 | 2017-01-22T15:30:29 | 79,724,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,064 | py | #Imports
import socket
import time
import random
import subprocess
#Variables
lHost = "127.0.0.1" #Server IP
port = 4711 #Connection Port
filename_inc = ""
filename_out = ""
#Functions
def send(msg):
s.send(msg.encode("UTF-8"))
print("Sent: " + msg)
def sendFile(filename):
f = open(filename,'rb')
print('Sending...')
send("$")
l = f.read(1024)
while (l):
print('Sending...')
s.send(l)
l = f.read(1024)
f.close()
send("!")
print("Done Sending")
#s.shutdown(socket.SHUT_WR)
#print s.recv(1024)
def getFile(filename):
f = open(filename,'wb')
start_ctrl = s.recv(1)
if start_ctrl == "$":
print("Receiving...")
l = s.recv(1024)
run = True
while (run):
print("Receiving...")
f.write(l)
l = s.recv(1024)
if l[-1:] == "!" or l == "":
print("Done Receiving")
run = False
f.close()
print("File saved!")
#clientsocket.send('Thank you for connecting')
def getInstructions():
connected = True
while (connected):
msg = s.recv(4096)
inst = msg.decode("UTF-8")
#Instructions
if inst == "test":
try:
print("REC: test")
send("[OK]Test works!")
except:
pass
elif inst == "ping":
try:
print("REC: ping")
send("pong")
except:
pass
elif inst[0:8] == "sendFile":
print("REC: sendFile")
try:
print(inst[9:])
filename_out = inst[9:]
print("sending file..")
sleepTime = 1
time.sleep(sleepTime)
sendFile(filename_out)
except:
pass
elif inst[0:7] == "getFile":
print("REC: getFile")
try:
print(inst[8:])
filename_inc = inst[8:]
print("receiving file..")
getFile(filename_inc)
except:
pass
elif inst == "executeTop":
print("REC: executeTop")
try:
print("executing top...")
subprocess.Popen("top", shell=True)
except:
pass
elif inst == "executeFile":
print("REC: executeFile")
try:
print("executing file...")
subprocess.Popen("./client.py", shell=True)
except:
pass
elif inst == "":
connected = False
else:
print("wrong command:")
print(msg)
send("WC")
#Connection
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
#host = lHost
conn | [
"dr@debian"
] | dr@debian |
032759bf15afbf28050b5da8834741e808f9c101 | ed1caa7dab36afb3b4083653c9f87a1575817140 | /maakay/models/tournaments.py | aaaf9642c57bf0aab8472d3c0c1e33e659e07d52 | [] | no_license | hussu010/maakay-discord-bot | 9e79cc1cdfd487b21d2921994fc9cdf2688e7569 | c839e27732d88d53c90076f61a408167f3dae6fe | refs/heads/main | 2023-08-13T15:00:50.785941 | 2021-10-01T10:37:03 | 2021-10-01T10:37:03 | 412,343,143 | 0 | 0 | null | 2021-10-01T05:42:15 | 2021-10-01T05:42:14 | null | UTF-8 | Python | false | false | 1,779 | py | import uuid
from django.db import models
from core.models.users import User
class Tournament(models.Model):
ONGOING = 'ONGOING'
CANCELLED = 'CANCELLED'
COMPLETED = 'COMPLETED'
status_choices = [
(ONGOING, 'Ongoing'),
(CANCELLED, 'Cancelled'),
(COMPLETED, 'Completed')
]
uuid = models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True)
uuid_hex = models.CharField(max_length=255, unique=True)
title = models.CharField(max_length=255)
description = models.CharField(max_length=255)
url = models.URLField(null=True, blank=True)
amount = models.BigIntegerField()
hosted_by = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='hosted_by')
winner = models.ForeignKey(User, on_delete=models.DO_NOTHING, blank=True, null=True, related_name='winner')
status = models.CharField(max_length=255, choices=status_choices, default='ONGOING')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'Title: {self.title}; Amount: {self.amount}; Status: {self.status}'
# generate a random memo and check if its already taken.
# If taken, generate another memo again until we find a valid memo
def generate_hex_uuid(instance):
while True:
uuid_hex = f'{uuid.uuid4().hex}'
if not Tournament.objects.filter(uuid_hex=uuid_hex).exists():
return uuid_hex
def pre_save_post_receiver(sender, instance, *args, **kwargs):
if not instance.uuid_hex:
instance.uuid_hex = generate_hex_uuid(instance)
# save the memo before the User model is saved with the unique memo
models.signals.pre_save.connect(pre_save_post_receiver, sender=Tournament)
| [
"55182298+dhakalkushal@users.noreply.github.com"
] | 55182298+dhakalkushal@users.noreply.github.com |
4557746799910cdf94ffee6c12d27a8671793a33 | 0787cae7417d0702ac74b63408feb19e82216ae8 | /src/rastervision/semseg/models/conv_logistic.py | 969283adf6c7b7217c1133ed94bcaaa09ab0d1b7 | [
"Apache-2.0"
] | permissive | rahulsingh24/ML | f57b8745cfc307095d3bac110f7de6509f3bdef3 | d5dd7215ab7d3041dc86b03c718bf8d1afa0518c | refs/heads/master | 2021-08-11T04:40:47.179010 | 2021-04-20T19:42:59 | 2021-04-20T19:42:59 | 126,322,203 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | """Convolutional logistic regression model.
An extremely simple model for semantic labeling which is expected to have
poor results. It does logistic regression across sliding windows in the
image. This is just to test that our data is processed correctly and that we
know how to use Keras.
"""
from keras.models import Sequential
from keras.layers import Activation, Conv2D, Reshape
CONV_LOGISTIC = 'conv_logistic'
def make_conv_logistic(input_shape, nb_labels, kernel_size):
"""Make a convolutional logistic regression model.
# Arguments
input_shape: tuple of form (nb_rows, nb_cols, nb_channels)
nb_labels: number of labels in dataset
kernel_size: 2d tuple with shape of convolutional kernel
# Return
The Keras model
"""
nb_rows, nb_cols, _ = input_shape
nb_labels = nb_labels
model = Sequential()
model.add(Conv2D(
nb_labels, (kernel_size[0], kernel_size[1]), padding='same',
input_shape=input_shape, name='conv_labels'))
model.add(Reshape([nb_rows * nb_cols, nb_labels]))
model.add(Activation('softmax'))
model.add(Reshape([nb_rows, nb_cols, nb_labels]))
return model
| [
"sahudhanraj123@gmail.com"
] | sahudhanraj123@gmail.com |
f940bcf1ea682999bed19fc60ca0f4af0c8a6610 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5695413893988352_1/Python/sempav/b.py | 8ad8a3ac5ec44bbc0fd72c27fcc7cc57a5f07a7f | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,416 | py | POS = 1
NEG = 2
BOTH = 3
NONE = 0
ans_c = ''
ans_j = ''
ans_diff = 10**20
def extract_num(score_str):
pow10 = 1
res = 0
for ch in reversed(score_str):
if ch != '?':
res += (ord(ch) - ord('0')) * pow10
pow10 *= 10
return res
def check(diff, ans, positions, score_c, score_j):
global ans_c
global ans_j
global ans_diff
if abs(diff) > abs(ans_diff):
return
c_str = ''
j_str = ''
for d, p, c, j in zip(ans, positions, score_c, score_j):
if p == NONE:
c_str += c
j_str += j
elif p == BOTH:
if d >= 0:
c_str += str(d)
j_str += '0'
else:
c_str += '0'
j_str += str(-d)
elif p == POS:
c_str += str(d)
j_str += j
else: # p == NEG:
c_str += c
j_str += str(-d)
if abs(diff) < abs(ans_diff):
ans_diff = diff
ans_c = c_str
ans_j = j_str
elif abs(diff) == abs(ans_diff):
c_int = int(c_str)
j_int = int(j_str)
ans_c_int = int(ans_c)
ans_j_int = int(ans_j)
if c_int < ans_c_int:
ans_c = c_str
ans_j = j_str
elif c_int == ans_c_int and j_int < ans_j_int:
ans_c = c_str
ans_j = j_str
def solve(i, ans, diff, positions, score_c, score_j):
if i == len(positions):
check(diff, ans, positions, score_c, score_j)
return
pow10 = 10 ** (len(positions) - i - 1)
if positions[i] == NONE:
ans[i] = 0
solve(i + 1, ans, diff, positions, score_c, score_j)
return
if positions[i] == POS:
cur_range = range(0, 10)
elif positions[i] == NEG:
cur_range = range(-9, 1)
elif positions[i] == BOTH:
cur_range = range(-9, 10)
#print(positions[i], diff, list(cur_range))
just_above = cur_range[-1]
for digit in cur_range:
if diff - pow10 * digit == 0:
just_above = digit
break
if diff - pow10 * digit < 0:
just_above = digit - 1
break
if just_above not in cur_range:
just_above = cur_range[0]
just_below = cur_range[0]
for digit in reversed(cur_range):
if diff - pow10 * digit == 0:
just_below = digit
break
if diff - pow10 * digit > 0:
just_below = digit + 1
break
if just_below not in cur_range:
just_below = cur_range[-1]
ans[i] = just_below
solve(i + 1, ans, diff - pow10 * just_below, positions, score_c, score_j)
ans[i] = just_above
solve(i + 1, ans, diff - pow10 * just_above, positions, score_c, score_j)
t = int(input())
for testCase in range(1, t + 1):
score_c, score_j = input().split()
ans_c = ''
ans_j = ''
ans_diff = 10**20
a = extract_num(score_c)
b = extract_num(score_j)
positions = []
for ch_a, ch_b in zip(score_c, score_j):
if ch_a == '?' and ch_b == '?':
positions.append(BOTH)
elif ch_a == '?':
positions.append(POS)
elif ch_b == '?':
positions.append(NEG)
else:
positions.append(NONE)
ans = [0 for tmp in score_c]
solve(0, ans, b - a, tuple(positions), score_c, score_j)
print('Case #{}: {} {}'.format(testCase, ans_c, ans_j))
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
b9d4b40eb8ee711ce836773b1102fa904d5d7281 | fd3460952febec5d4d4cbe16a91fd3f06577d9a6 | /app/views.py | da055749ba8cca16713ebc16eced687cece05cf4 | [] | no_license | sakkhar/accounts | 9ea52783bc720b0422fef00e80b83947f4d9c566 | 7da8357d548cca20426b39c4f39ff0f5608a906f | refs/heads/master | 2020-03-27T20:18:05.622381 | 2018-09-02T05:21:39 | 2018-09-02T05:21:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,472 | py |
from django.contrib import messages
from django.contrib.auth import login, authenticate, REDIRECT_FIELD_NAME
from django.contrib.auth.tokens import default_token_generator
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.views import (
LogoutView as BaseLogoutView, PasswordChangeView as BasePasswordChangeView,
PasswordResetDoneView as BasePasswordResetDoneView, PasswordResetConfirmView as BasePasswordResetConfirmView,
)
from django.shortcuts import get_object_or_404, redirect
from django.utils.crypto import get_random_string
from django.utils.decorators import method_decorator
from django.utils.http import is_safe_url
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.utils.translation import gettext_lazy as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import View, FormView
from django.conf import settings
from .utils import (
send_activation_email, send_reset_password_email, send_forgotten_username_email, send_activation_change_email,
)
from .forms import (
SignInViaUsernameForm, SignInViaEmailForm, SignInViaEmailOrUsernameForm, SignUpForm,
RestorePasswordForm, RestorePasswordViaEmailOrUsernameForm, RemindUsernameForm,
ResendActivationCodeForm, ResendActivationCodeViaEmailForm, ChangeProfileForm, ChangeEmailForm,
)
from .models import Activation
class GuestOnlyView(View):
def dispatch(self, request, *args, **kwargs):
# Redirect to the index page if the user already authenticated
if request.user.is_authenticated:
return redirect(settings.LOGIN_REDIRECT_URL)
return super().dispatch(request, *args, **kwargs)
class LogInView(GuestOnlyView, FormView):
template_name = 'accounts/log_in.html'
@staticmethod
def get_form_class(**kwargs):
if settings.DISABLE_USERNAME or settings.LOGIN_VIA_EMAIL:
return SignInViaEmailForm
if settings.LOGIN_VIA_EMAIL_OR_USERNAME:
return SignInViaEmailOrUsernameForm
return SignInViaUsernameForm
@method_decorator(sensitive_post_parameters('password'))
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
# Sets a test cookie to make sure the user has cookies enabled
request.session.set_test_cookie()
return super().dispatch(request, *args, **kwargs)
def form_valid(self, form):
request = self.request
# If the test cookie worked, go ahead and delete it since its no longer needed
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
# The default Django's "remember me" lifetime is 2 weeks and can be changed by modifying
# the SESSION_COOKIE_AGE settings' option.
if settings.USE_REMEMBER_ME:
if not form.cleaned_data['remember_me']:
request.session.set_expiry(0)
login(request, form.user_cache)
redirect_to = request.POST.get(REDIRECT_FIELD_NAME, request.GET.get(REDIRECT_FIELD_NAME))
url_is_safe = is_safe_url(redirect_to, allowed_hosts=request.get_host(), require_https=request.is_secure())
if url_is_safe:
return redirect(redirect_to)
return redirect(settings.LOGIN_REDIRECT_URL)
class SignUpView(GuestOnlyView, FormView):
template_name = 'accounts/sign_up.html'
form_class = SignUpForm
def form_valid(self, form):
request = self.request
user = form.save(commit=False)
if settings.DISABLE_USERNAME:
# Set a temporary username
user.username = get_random_string()
else:
user.username = form.cleaned_data['username']
if settings.ENABLE_USER_ACTIVATION:
user.is_active = False
# Create a user record
user.save()
# Change the username to the "user_ID" form
if settings.DISABLE_USERNAME:
user.username = f'user_{user.id}'
user.save()
if settings.ENABLE_USER_ACTIVATION:
code = get_random_string(20)
act = Activation()
act.code = code
act.user = user
act.save()
send_activation_email(request, user.email, code)
messages.success(
request, _('You are signed up. To activate the account, follow the link sent to the mail.'))
else:
raw_password = form.cleaned_data['password1']
user = authenticate(username=user.username, password=raw_password)
login(request, user)
messages.success(request, _('You are successfully signed up!'))
return redirect('index')
class ActivateView(View):
@staticmethod
def get(request, code):
act = get_object_or_404(Activation, code=code)
# Activate profile
user = act.user
user.is_active = True
user.save()
# Remove the activation record
act.delete()
messages.success(request, _('You have successfully activated your account!'))
return redirect('accounts:log_in')
class ResendActivationCodeView(GuestOnlyView, FormView):
template_name = 'accounts/resend_activation_code.html'
@staticmethod
def get_form_class(**kwargs):
if settings.DISABLE_USERNAME:
return ResendActivationCodeViaEmailForm
return ResendActivationCodeForm
def form_valid(self, form):
user = form.user_cache
activation = user.activation_set.first()
activation.delete()
code = get_random_string(20)
act = Activation()
act.code = code
act.user = user
act.save()
send_activation_email(self.request, user.email, code)
messages.success(self.request, _('A new activation code has been sent to your email address.'))
return redirect('accounts:resend_activation_code')
class RestorePasswordView(GuestOnlyView, FormView):
template_name = 'accounts/restore_password.html'
@staticmethod
def get_form_class(**kwargs):
if settings.RESTORE_PASSWORD_VIA_EMAIL_OR_USERNAME:
return RestorePasswordViaEmailOrUsernameForm
return RestorePasswordForm
def form_valid(self, form):
user = form.user_cache
token = default_token_generator.make_token(user)
uid = urlsafe_base64_encode(force_bytes(user.pk)).decode()
send_reset_password_email(self.request, user.email, token, uid)
return redirect('accounts:restore_password_done')
class ChangeProfileView(LoginRequiredMixin, FormView):
template_name = 'accounts/profile/change_profile.html'
form_class = ChangeProfileForm
def get_initial(self):
user = self.request.user
initial = super().get_initial()
initial['first_name'] = user.first_name
initial['last_name'] = user.last_name
return initial
def form_valid(self, form):
user = self.request.user
user.first_name = form.cleaned_data['first_name']
user.last_name = form.cleaned_data['last_name']
user.save()
messages.success(self.request, _('Profile data has been successfully updated.'))
return redirect('accounts:change_profile')
class ChangeEmailView(LoginRequiredMixin, FormView):
template_name = 'accounts/profile/change_email.html'
form_class = ChangeEmailForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def get_initial(self):
initial = super().get_initial()
initial['email'] = self.request.user.email
return initial
def form_valid(self, form):
user = self.request.user
email = form.cleaned_data['email']
if settings.ENABLE_ACTIVATION_AFTER_EMAIL_CHANGE:
code = get_random_string(20)
act = Activation()
act.code = code
act.user = user
act.email = email
act.save()
send_activation_change_email(self.request, email, code)
messages.success(self.request, _('To complete the change of email address, click on the link sent to it.'))
else:
user.email = email
user.save()
messages.success(self.request, _('Email successfully changed.'))
return redirect('accounts:change_email')
class ChangeEmailActivateView(View):
@staticmethod
def get(request, code):
act = get_object_or_404(Activation, code=code)
# Change the email
user = act.user
user.email = act.email
user.save()
# Remove the activation record
act.delete()
messages.success(request, _('You have successfully changed your email!'))
return redirect('accounts:change_email')
class RemindUsernameView(GuestOnlyView, FormView):
template_name = 'accounts/remind_username.html'
form_class = RemindUsernameForm
def form_valid(self, form):
user = form.user_cache
send_forgotten_username_email(user.email, user.username)
messages.success(self.request, _('Your username has been successfully sent to your email.'))
return redirect('accounts:remind_username')
class ChangePasswordView(BasePasswordChangeView):
template_name = 'accounts/profile/change_password.html'
def form_valid(self, form):
# Change the password
user = form.save()
# Re-authentication
login(self.request, user)
messages.success(self.request, _('Your password was changed.'))
return redirect('accounts:change_password')
class RestorePasswordConfirmView(BasePasswordResetConfirmView):
template_name = 'accounts/restore_password_confirm.html'
def form_valid(self, form):
# Change the password
form.save()
messages.success(self.request, _('Your password has been set. You may go ahead and log in now.'))
return redirect('accounts:log_in')
class RestorePasswordDoneView(BasePasswordResetDoneView):
template_name = 'accounts/restore_password_done.html'
class LogOutView(LoginRequiredMixin, BaseLogoutView):
template_name = 'accounts/log_out.html' | [
"sakkhar@mail.com"
] | sakkhar@mail.com |
6fc3e353a8326a114fc60b18e3229535220c28c9 | 0a118477c8b6d1ef79b26310a1d3fb06716743e9 | /contributer_demo/demo2/coordination/formation_demo/my_leader.py | 6a3f3c47ca5aa8e260d12d37928babc195285821 | [
"MIT"
] | permissive | nsgcjdsz/XTDrone | 773ea65421044a895e427cfc68d9e3669210c12a | ebefd6cf943b95998e1b47de6be9052a146d667d | refs/heads/master | 2023-08-25T18:12:48.338686 | 2021-10-23T12:03:47 | 2021-10-23T12:03:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,406 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import rospy
from geometry_msgs.msg import Twist, Vector3, PoseStamped
from std_msgs.msg import String
from pyquaternion import Quaternion
import time
import math
import numpy
import sys
#if sys.argv[2] == '6': #formation_dict是该文件夹下的文件
# from formation_dict import formation_dict_6 as formation_dict
#elif sys.argv[2] == '9':
# from formation_dict import formation_dict_9 as formation_dict
#elif sys.argv[2] == '18':
# from formation_dict import formation_dict_18 as formation_dict
if sys.argv[2] == '21':
from my_formation_dict import formation_dict_my as formation_dict
elif sys.argv[2] == '34':
from my_formation_dict import formation_dict_my as formation_dict
class Leader:
def __init__(self, uav_type, leader_id, uav_num):
self.hover = True
self.id = leader_id
self.local_pose = PoseStamped()
self.cmd_vel_enu = Twist()
self.follower_num = uav_num - 1
self.followers_info = ["Moving"]*self.follower_num
self.follower_arrived_num = 0
self.follower_all_arrived = True
self.avoid_accel = Vector3(0,0,0)
self.formation_config = 'waiting'
self.target_height_recorded = False
self.cmd = String()
self.f = 200
self.Kz = 0.5
self.local_pose_sub = rospy.Subscriber(uav_type+'_'+str(self.id)+"/mavros/local_position/pose", PoseStamped , self.local_pose_callback)
self.cmd_vel_sub = rospy.Subscriber("/xtdrone/leader/cmd_vel_flu", Twist, self.cmd_vel_callback)
self.avoid_vel_sub = rospy.Subscriber("/xtdrone/"+uav_type+'_'+str(self.id)+"/avoid_accel", Vector3, self.avoid_accel_callback)
self.leader_cmd_sub = rospy.Subscriber("/xtdrone/leader/cmd",String, self.cmd_callback)
for i in range(self.follower_num):#遍历所有跟随者
rospy.Subscriber('/xtdrone/'+uav_type+'_'+str(i+1)+'/info',String,self.followers_info_callback,i)
self.local_pose_pub = rospy.Publisher("/xtdrone/leader/pose", PoseStamped , queue_size=10)
self.formation_switch_pub = rospy.Publisher("/xtdrone/formation_switch",String, queue_size=10)
self.vel_enu_pub = rospy.Publisher('/xtdrone/'+uav_type+'_'+str(self.id)+'/cmd_vel_enu', Twist, queue_size=10)
self.cmd_pub = rospy.Publisher('/xtdrone/'+uav_type+'_'+str(self.id)+'/cmd', String, queue_size=10)
def local_pose_callback(self, msg):
self.local_pose = msg
def cmd_vel_callback(self, msg):
self.cmd_vel_enu = msg
if msg.linear.z == 0:
self.hover = True #悬停
else:
self.hover = False
def cmd_callback(self, msg):
if msg.data in formation_dict.keys():
self.formation_config = msg.data
else:
self.cmd = msg.data
def avoid_accel_callback(self, msg):
self.avoid_accel = msg
def followers_info_callback(self, msg, id):
self.followers_info[id] = msg.data
#print("follower"+str(id)+":"+ msg.data)
def loop(self):
rospy.init_node('leader')
rate = rospy.Rate(self.f)
while True:
#self.cmd_vel_enu = Twist()
for follower_info in self.followers_info:
if follower_info == "Arrived": # 一架到达
self.follower_arrived_num += 1
if self.follower_arrived_num > self.follower_num - 1:
self.follower_all_arrived = True #全部到达
if self.follower_all_arrived:
self.formation_switch_pub.publish(self.formation_config)
if self.formation_config == 'pyramid':
if not self.target_height_recorded:
target_height = self.local_pose.pose.position.z + 2
self.target_height_recorded = True
self.cmd_vel_enu.linear.z = self.Kz * (target_height - self.local_pose.pose.position.z)
self.cmd_vel_enu.linear.x += self.avoid_accel.x
self.cmd_vel_enu.linear.y += self.avoid_accel.y
self.cmd_vel_enu.linear.z += self.avoid_accel.z
self.vel_enu_pub.publish(self.cmd_vel_enu)
self.local_pose_pub.publish(self.local_pose)
self.cmd_pub.publish(self.cmd)
rate.sleep()
if __name__ == '__main__':
leader = Leader(sys.argv[1], 0, int(sys.argv[2]))
leader.loop()
| [
"robin_shaun@foxmail.com"
] | robin_shaun@foxmail.com |
41dbc5a92725aee961175682a2fa30a8d0cafdd6 | 7ad3b98ba2dd4736768da218afbb3e61abe67b5d | /contin/feed/urls.py | 47882ce737323edd06f0301757f54527c3f4a8ea | [] | no_license | asimunit/django-SocialNetwork | d208409910154df9f793371f65a28152af08d828 | f385a2d31573a9e326860414e8c4c399270ab7ce | refs/heads/master | 2023-05-15T03:27:26.070850 | 2021-06-14T02:56:55 | 2021-06-14T02:56:55 | 369,500,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.Home.as_view(), name='home'),
path('post/new/', views.create_post, name='create_post'),
path('post/<int:pk>/', views.post_detail, name='post-detail'),
path('post/<int:pk>/update/', views.PostUpdateView.as_view(), name='post'
'-update'),
path('post/<int:pk>/delete/', views.post_delete, name='post-delete'),
path('post/save/', views.save_post, name='post-save'),
path('post/<int:pk>/space_ner/', views.name_entity_recognition_spacy,
name='name_entity_recognition_spacy'),
path('post/<int:pk>/stanford_ner/', views.name_entity_recognition_stanford,
name='name_entity_recognition_stanford'),
path('post/<int:pk>/stanford_ner_cmd/', views.name_entity_recognition_cmd,
name='name_entity_recognition_cmd'),
]
| [
"asimunit.2015@gmail.com"
] | asimunit.2015@gmail.com |
49c123e66a9ca564f05e711b6a61d32c9dcd56b6 | b4ea8a4c451aedf955f121c4331c46edd3879846 | /aifedayoscrumy/views.py | a48b66310694220c3222e21d33c59d95c78e42e7 | [] | no_license | Aifedayo/scrumyapp | 04fa6a61d7bbd79d7b367774296500c4383fd64a | a8f4fac4419be4159e01127a46a5423a525f7777 | refs/heads/master | 2022-07-04T18:05:43.120727 | 2020-05-25T10:57:38 | 2020-05-25T10:57:38 | 266,754,398 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,751 | py | from django.shortcuts import render, get_object_or_404, Http404, redirect
from django.http import HttpResponse, HttpResponseRedirect
from .models import *
import random
from django.contrib.auth import authenticate, login, logout
from django.urls import reverse
from .forms import *
from django.contrib.auth.models import User, Group
from django.db.models import Q
###############################################################################################
# C R E A T I N G S U P E R U S E R S #
###############################################################################################
def index(request):
if request.method == 'POST':
#Form was subitted
form = SignUpForm(request.POST)
if form.is_valid():
post = form.save(commit = False)
my_group = Group.objects.get(name = 'Developer')
post.user = User.objects.create_superuser(
first_name = request.POST.get('first_name'),
last_name = request.POST.get('last_name'),
email = request.POST.get('email'),
username = request.POST.get('username'),
password = request.POST.get('password'))
my_group.user_set.add(post.user)
# Form field passed validation
post.user.save()
return render(request, 'success.html', {'form' : form})
else:
form = SignUpForm()
return render(request, 'aifedayoscrumy/index.html', {'form' : form})
#return HttpResponse("This is a Scrum Application")
###############################################################################################
# D I S P L A Y I N G I N D I V I D U A L G O A L A T T R I B U T E S #
###############################################################################################
def goal_detail(request, goal_id):
post = get_object_or_404(ScrumyGoals, goal_id = goal_id)
return render(request, 'aifedayoscrumy/goal_detail.html', {'post' : post})
###############################################################################################
# H O M E P A G E F O R D I S P L A Y I N G U S E R S' G O A L S #
###############################################################################################
def home(request):
#obj2 = ScrumyGoals.objects.filter(goal_name__startswith = 'Keep Learning Django')
#output = ", ".join([eachgoal.goal_name for eachgoal in obj2])
scrumygoals = ScrumyGoals.objects.all()
context = {
'scrumygoals' : scrumygoals
}
#return HttpResponse(output)
return render(request, 'aifedayoscrumy/home.html', context)
###############################################################################################
# C R E A T I N G G O A L S A S P E R U S E R S #
###############################################################################################
def add_goal(request):
user = request.user
# Aasigning a value to the currently logged in user
sample_dict = {}
# Creating a dic to store the random values to avoid repitition
number = random.randint(1000, 9999)
if number not in sample_dict:
sample_dict[number] = number
if User.objects.filter(username = user, groups__name = 'Owner').exists() or \
User.objects.filter(username = user, groups__name = 'Developer').exists() or \
User.objects.filter(username = user, groups__name = 'Quality Assurance').exists():
if request.method == "POST":
form = CreateGoalForm(request.POST)
if form.is_valid():
post = form.save(commit = False)
if post.user == request.user:
if post.goal_status == GoalStatus.objects.get(status_name = 'Weekly Goal'):
post.goal_id = number
post.save()
return redirect('goal_detail', goal_id = post.goal_id)
else:
return render(request, 'move_error.html', {})
else:
return render(request, 'user_error.html', {})
else:
form = CreateGoalForm()
return render(request, 'aifedayoscrumy/addgoal.html', {'form' : form})
else:
form = CreateGoalForm()
return render(request, 'aifedayoscrumy/addgoal.html', {'form' : form})
###############################################################################################
# M O V I N G G O A L S A S P E R U S E R S #
###############################################################################################
def move_goal(request, goal_id):
user = request.user
post = get_object_or_404(ScrumyGoals, goal_id = goal_id)
if User.objects.filter(username = user, groups__name = 'Developer').exists():
if request.method == 'POST':
form = MoveGoalForm(request.POST, instance = post )
if post.user == request.user:
if form.is_valid():
if GoalStatus.objects.get(status_name = 'Done Goal') != post.goal_status:
post = form.save(commit = False)
post.save()
return redirect('goal_detail', goal_id = post.goal_id)
else:
return render(request, 'move_error.html', {})
else:
return render(request, 'user_error.html', {})
else:
form = MoveGoalForm(instance=post)
return render(request, 'aifedayoscrumy/move_goal.html', {'form' : form, 'post' : post})
elif User.objects.filter(username = user, groups__name = 'Quality Assurance').exists():
if request.method == 'POST':
form = MoveGoalForm(request.POST, instance = post )
if form.is_valid():
post = form.save(commit = False)
post.save()
return redirect('goal_detail', goal_id = post.goal_id)
else:
form = MoveGoalForm(instance=post)
return render(request, 'aifedayoscrumy/move_goal.html', {'form' : form, 'post' : post})
elif User.objects.filter(username = user, groups__name = 'Admin').exists():
if request.method == 'POST':
form = MoveGoalForm(request.POST, instance = post )
if form.is_valid():
post = form.save(commit = False)
post.save()
return redirect('goal_detail', goal_id = post.goal_id)
else:
form = MoveGoalForm(instance=post)
return render(request, 'aifedayoscrumy/move_goal.html', {'form' : form, 'post' : post})
elif User.objects.filter(username = user, groups__name = 'Owner').exists():
if request.method == 'POST':
form = MoveGoalForm(request.POST, instance = post )
if post.user == request.user:
if form.is_valid():
post = form.save(commit = False)
post.save()
return redirect('goal_detail', goal_id = post.goal_id)
else:
return render(request, 'user_error.html', {})
else:
form = MoveGoalForm(instance=post)
return render(request, 'aifedayoscrumy/move_goal.html', {'form' : form, 'post' : post})
else:
form = MoveGoalForm(instance=post)
return render(request, 'aifedayoscrumy/move_goal.html', {'form' : form, 'post' : post})
| [
"akeemifedayolag@gmail.com"
] | akeemifedayolag@gmail.com |
84072dbb16d09e04624c613e573e99e06f88a20d | 0442edb5a817569b46151c5f131c90c0f0fba313 | /__init__.py | 15a258e137fed24740151aeb6bfe11e3b463e2d8 | [
"BSD-3-Clause"
] | permissive | RiverArchitect/riverarchitect | 6847d7a8705530b226ef3ed01e9aa38b7abf316d | be21ff3c69c9cc0e50a7360c360cc435ad9eafb3 | refs/heads/main | 2023-08-31T14:45:42.064572 | 2021-09-20T12:56:46 | 2021-09-20T12:56:46 | 408,382,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | print("Hi from RiverArchitect RTD")
| [
"sebastian.schwindt@alumni.epfl.ch"
] | sebastian.schwindt@alumni.epfl.ch |
7053bfbfc4d5df12a1ea652236039b7e779c28cb | 08e411f248252de190a60b5643ead021c1bde19e | /hire/admin.py | 41f77f05f8e08104930f93acb1c6e361f3576f59 | [] | no_license | Priteshkamde/SP_Tbi | ea9756d1e95ebcf4fad3526101d5cc956aaa7788 | 64dcbc9be092e8469539672107cfe0b3c5318391 | refs/heads/master | 2020-04-11T20:57:50.693099 | 2018-12-26T13:14:12 | 2018-12-26T13:14:12 | 162,089,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | from django.contrib import admin
from .models import *
admin.site.register(Company)
admin.site.register(JobPost)
admin.site.register(Applications)
| [
"40750377+Priteshkamde@users.noreply.github.com"
] | 40750377+Priteshkamde@users.noreply.github.com |
30d65da0cff0e93c6725787294a07ddd9d4abcb3 | d161a5ea1e4fff3540f488e953619d06da715b7e | /dtf_tasks/__init__.py | ca8f2e17c6c9df1eec1c73148ef5371ba61d6aad | [] | no_license | mjtorn/prkl | 017e734911d5c557c046bfa32f32db016bfc2145 | 1c95d454c9efc180058156694b5c9900ef526ed0 | refs/heads/master | 2021-01-13T01:37:55.214514 | 2009-12-22T17:07:39 | 2009-12-22T17:07:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | # vim: tabstop=4 expandtab autoindent shiftwidth=4 fileencoding=utf-8
from tweeter import Tweeter
# EOF
| [
"mjt@fad3.fadgames.com"
] | mjt@fad3.fadgames.com |
a5d7909a17f6af66a01de4d84f29e1e7ee96e4b5 | 380a47268c5975473a2e7c38c747bc3bdbd981b1 | /benchmark/third_party/transformers/examples/pytorch/summarization/run_summarization_no_trainer.py | 3bd925569bb6a8347b8f7e8562568fc34cfb023a | [
"Apache-2.0"
] | permissive | FMInference/FlexGen | 07aa9b1918c19b02077e13ad07e76840843810dd | d34f7b4b43ed87a374f394b0535ed685af66197b | refs/heads/main | 2023-07-24T02:29:51.179817 | 2023-07-21T22:38:31 | 2023-07-21T22:38:31 | 602,270,517 | 6,821 | 411 | Apache-2.0 | 2023-07-07T22:59:24 | 2023-02-15T21:18:53 | Python | UTF-8 | Python | false | false | 31,761 | py | #!/usr/bin/env python
# coding=utf-8
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning a 🤗 Transformers model on summarization.
"""
# You can also adapt this script on your own summarization task. Pointers for this are left as comments.
import argparse
import json
import logging
import math
import os
import random
from pathlib import Path
import datasets
import nltk
import numpy as np
import torch
from datasets import load_dataset
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
import evaluate
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from filelock import FileLock
from huggingface_hub import Repository
from transformers import (
CONFIG_MAPPING,
MODEL_MAPPING,
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
SchedulerType,
get_scheduler,
)
from transformers.utils import check_min_version, get_full_repo_name, is_offline_mode, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.24.0")
logger = get_logger(__name__)
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
# You should update this to your particular problem to have better documentation of `model_type`
MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
try:
nltk.data.find("tokenizers/punkt")
except (LookupError, OSError):
if is_offline_mode():
raise LookupError(
"Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files"
)
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
summarization_name_mapping = {
"amazon_reviews_multi": ("review_body", "review_title"),
"big_patent": ("description", "abstract"),
"cnn_dailymail": ("article", "highlights"),
"orange_sum": ("text", "summary"),
"pn_summary": ("article", "summary"),
"psc": ("extract_text", "summary_text"),
"samsum": ("dialogue", "summary"),
"thaisum": ("body", "summary"),
"xglue": ("news_body", "news_title"),
"xsum": ("document", "summary"),
"wiki_summary": ("article", "highlights"),
}
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a summarization task")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--ignore_pad_token_for_loss",
type=bool,
default=True,
help="Whether to ignore the tokens corresponding to padded labels in the loss computation or not.",
)
parser.add_argument(
"--max_source_length",
type=int,
default=1024,
help=(
"The maximum total input sequence length after "
"tokenization.Sequences longer than this will be truncated, sequences shorter will be padded."
),
)
parser.add_argument(
"--source_prefix",
type=str,
default=None,
help="A prefix to add before every source text (useful for T5 models).",
)
parser.add_argument(
"--preprocessing_num_workers",
type=int,
default=None,
help="The number of processes to use for the preprocessing.",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--max_target_length",
type=int,
default=128,
help=(
"The maximum total sequence length for target text after "
"tokenization. Sequences longer than this will be truncated, sequences shorter will be padded."
"during ``evaluate`` and ``predict``."
),
)
parser.add_argument(
"--val_max_target_length",
type=int,
default=None,
help=(
"The maximum total sequence length for validation "
"target text after tokenization.Sequences longer than this will be truncated, sequences shorter will be "
"padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` "
"param of ``model.generate``, which is used during ``evaluate`` and ``predict``."
),
)
parser.add_argument(
"--max_length",
type=int,
default=128,
help=(
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed."
),
)
parser.add_argument(
"--num_beams",
type=int,
default=None,
help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
),
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=False,
)
parser.add_argument(
"--config_name",
type=str,
default=None,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--text_column",
type=str,
default=None,
help="The name of the column in the datasets containing the full texts (for summarization).",
)
parser.add_argument(
"--summary_column",
type=str,
default=None,
help="The name of the column in the datasets containing the summaries (for summarization).",
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--model_type",
type=str,
default=None,
help="Model type to use if training from scratch.",
choices=MODEL_TYPES,
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
)
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to enable experiment trackers for logging.",
)
parser.add_argument(
"--report_to",
type=str,
default="all",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.'
"Only applicable when `--with_tracking` is passed."
),
)
args = parser.parse_args()
# Sanity checks
if args.dataset_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if args.push_to_hub:
assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed."
return args
def main():
args = parse_args()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_summarization_no_trainer", args)
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
# in the environment
accelerator_log_kwargs = {}
if args.with_tracking:
accelerator_log_kwargs["log_with"] = args.report_to
accelerator_log_kwargs["logging_dir"] = args.output_dir
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs)
if args.source_prefix is None and args.model_name_or_path in [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
]:
logger.warning(
"You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with "
"`--source_prefix 'summarize: ' `"
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
if "step_*" not in gitignore:
gitignore.write("step_*\n")
if "epoch_*" not in gitignore:
gitignore.write("epoch_*\n")
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
else:
data_files = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = args.train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if args.config_name:
config = AutoConfig.from_pretrained(args.config_name)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer)
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if args.model_name_or_path:
model = AutoModelForSeq2SeqLM.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForSeq2SeqLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = args.source_prefix if args.source_prefix is not None else ""
# Preprocessing the datasets.
# First we tokenize all the texts.
column_names = raw_datasets["train"].column_names
# Get the column names for input/target.
dataset_columns = summarization_name_mapping.get(args.dataset_name, None)
if args.text_column is None:
text_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
else:
text_column = args.text_column
if text_column not in column_names:
raise ValueError(
f"--text_column' value '{args.text_column}' needs to be one of: {', '.join(column_names)}"
)
if args.summary_column is None:
summary_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
else:
summary_column = args.summary_column
if summary_column not in column_names:
raise ValueError(
f"--summary_column' value '{args.summary_column}' needs to be one of: {', '.join(column_names)}"
)
# Temporarily set max_target_length for training.
max_target_length = args.max_target_length
padding = "max_length" if args.pad_to_max_length else False
def preprocess_function(examples):
inputs = examples[text_column]
targets = examples[summary_column]
inputs = [prefix + inp for inp in inputs]
model_inputs = tokenizer(inputs, max_length=args.max_source_length, padding=padding, truncation=True)
# Tokenize targets with the `text_target` keyword argument
labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
with accelerator.main_process_first():
processed_datasets = raw_datasets.map(
preprocess_function,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on dataset",
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 1):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
label_pad_token_id = -100 if args.ignore_pad_token_for_loss else tokenizer.pad_token_id
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if accelerator.use_fp16 else None,
)
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
# rougeLSum expects newline after each sentence
preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds]
labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels]
return preds, labels
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight", "layer_norm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps,
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# Figure out how many steps we should save the Accelerator states
checkpointing_steps = args.checkpointing_steps
if checkpointing_steps is not None and checkpointing_steps.isdigit():
checkpointing_steps = int(checkpointing_steps)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if args.with_tracking:
experiment_config = vars(args)
# TensorBoard cannot log Enums, need the raw value
experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value
accelerator.init_trackers("summarization_no_trainer", experiment_config)
# Metric
metric = evaluate.load("rouge")
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
starting_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}")
accelerator.load_state(args.resume_from_checkpoint)
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
dirs.sort(key=os.path.getctime)
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
training_difference = os.path.splitext(path)[0]
if "epoch" in training_difference:
starting_epoch = int(training_difference.replace("epoch_", "")) + 1
resume_step = None
else:
resume_step = int(training_difference.replace("step_", ""))
starting_epoch = resume_step // len(train_dataloader)
resume_step -= starting_epoch * len(train_dataloader)
for epoch in range(starting_epoch, args.num_train_epochs):
model.train()
if args.with_tracking:
total_loss = 0
for step, batch in enumerate(train_dataloader):
# We need to skip steps until we reach the resumed step
if args.resume_from_checkpoint and epoch == starting_epoch:
if resume_step is not None and step < resume_step:
completed_steps += 1
continue
with accelerator.accumulate(model):
outputs = model(**batch)
loss = outputs.loss
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if isinstance(checkpointing_steps, int):
if completed_steps % checkpointing_steps == 0:
output_dir = f"step_{completed_steps }"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= args.max_train_steps:
break
model.eval()
if args.val_max_target_length is None:
args.val_max_target_length = args.max_target_length
gen_kwargs = {
"max_length": args.val_max_target_length if args is not None else config.max_length,
"num_beams": args.num_beams,
}
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
generated_tokens = accelerator.unwrap_model(model).generate(
batch["input_ids"],
attention_mask=batch["attention_mask"],
**gen_kwargs,
)
generated_tokens = accelerator.pad_across_processes(
generated_tokens, dim=1, pad_index=tokenizer.pad_token_id
)
labels = batch["labels"]
if not args.pad_to_max_length:
# If we did not pad to max length, we need to pad the labels too
labels = accelerator.pad_across_processes(batch["labels"], dim=1, pad_index=tokenizer.pad_token_id)
generated_tokens, labels = accelerator.gather_for_metrics((generated_tokens, labels))
generated_tokens = generated_tokens.cpu().numpy()
labels = labels.cpu().numpy()
if args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
if isinstance(generated_tokens, tuple):
generated_tokens = generated_tokens[0]
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
decoded_preds, decoded_labels = accelerator.gather_for_metrics(decoded_preds, decoded_labels)
metric.add_batch(
predictions=decoded_preds,
references=decoded_labels,
)
result = metric.compute(use_stemmer=True)
result = {k: round(v * 100, 4) for k, v in result.items()}
logger.info(result)
if args.with_tracking:
result["train_loss"] = total_loss.item() / len(train_dataloader)
result["epoch"] = epoch
result["step"] = completed_steps
accelerator.log(result, step=completed_steps)
if args.push_to_hub and epoch < args.num_train_epochs - 1:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(
args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save
)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
repo.push_to_hub(
commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True
)
if args.checkpointing_steps == "epoch":
output_dir = f"epoch_{epoch}"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(
args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save
)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
with open(os.path.join(args.output_dir, "all_results.json"), "w") as f:
json.dump(
{
"eval_rouge1": result["rouge1"],
"eval_rouge2": result["rouge2"],
"eval_rougeL": result["rougeL"],
"eval_rougeLsum": result["rougeLsum"],
},
f,
)
if __name__ == "__main__":
main()
| [
"sqy1415@gmail.com"
] | sqy1415@gmail.com |
3ce82361cee6e451334ba20a496ae2c3cdf43949 | c597fbaa224302fafba7f1ee893dc340eeefd4af | /services/views.py | ca9ee1e2b8c696c5ef10a68e109a3a7e64a726a9 | [] | no_license | MohamedMawed/government-service-provider-backend | cc49b31d18f647cd7d13c47c78cd5dd734052f83 | 0fdb0bdd09226f22651d9cfd1bcc0595c34c7eb3 | refs/heads/master | 2020-12-14T10:57:26.341057 | 2019-12-18T09:09:04 | 2019-12-18T09:09:04 | 234,719,183 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,806 | py | from .models import *
from rest_framework.views import APIView
from .serializers import *
from accounts.serializers import UserSerializer
from accounts.models import User
from rest_framework import generics
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from django.core.exceptions import PermissionDenied
from rest_framework import status
# Create your views here.
class ServiceRUD(generics.RetrieveUpdateDestroyAPIView):
queryset = Service.objects.all()
serializer_class = ServiceSerializer
# lookup_url_kwarg = 'off_id'
permission_classes = (IsAuthenticated,)
class ServiceAddonRUD(generics.RetrieveUpdateDestroyAPIView):
queryset = ServiceAddon.objects.all()
serializer_class = ServiceAddonSerializer
# lookup_url_kwarg = 'off_id'
permission_classes = (IsAuthenticated,)
class OfficeRUD(generics.RetrieveUpdateDestroyAPIView):
queryset = Office.objects.all()
serializer_class = OfficeSerializer
# lookup_url_kwarg = 'off_id'
permission_classes = (IsAuthenticated,)
class GehaAllList(generics.ListAPIView):
queryset = Office.objects.all()
serializer_class = OfficeSerializer
permission_classes = (IsAuthenticated,)
class CreateService(generics.CreateAPIView):
queryset = Service.objects.all()
serializer_class = ServiceSerializer
permission_classes = (IsAuthenticated,)
class CreateServiceAddon(generics.CreateAPIView):
queryset = ServiceAddon.objects.all()
serializer_class = ServiceAddonSerializer
permission_classes = (IsAuthenticated,)
class CreateOffice(generics.CreateAPIView):
queryset = Office.objects.all()
serializer_class = OfficeSerializer
permission_classes = (IsAuthenticated,)
class OfficeList(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, pk, format=None):
offices = []
if pk is '0':
offices = Office.objects.filter(parent_id__isnull = True)
else:
offices = Office.objects.filter(parent_id = pk)
# for office in offices:
# count = Service.objects.filter(off_id_id = office.off_id)
# office.count = count
serializer = OfficeSerializer(offices , many=True)
for office in serializer.data:
count = Service.objects.filter(off_id_id = office['off_id'])
office['count'] = len(count)
return Response(serializer.data)
class ServiceList(generics.ListAPIView):
serializer_class = ServiceSerializer
permission_classes = (IsAuthenticated,)
def get_queryset(self):
queryset = Service.objects.all()
# geha = self.request.query_params.get('geha')
office = self.request.query_params.get('office')
# if geha:
# queryset = queryset.filter(geha_id_id=geha)
if office:
queryset = queryset.filter(off_id_id=office)
return queryset
class ServiceAddonList(generics.ListAPIView):
serializer_class = ServiceAddonSerializer
permission_classes = (IsAuthenticated,)
def get_queryset(self):
queryset = ServiceAddon.objects.all()
# geha = self.request.query_params.get('geha')
service = self.request.query_params.get('service')
# if geha:
# queryset = queryset.filter(geha_id_id=geha)
if service:
queryset = queryset.filter(srv_id_id=service)
return queryset
class ParametersList(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, format=None):
subServiceParameters = ServiceParameter.objects.filter(srv_id = self.request.query_params.get('service'))
serializer = ServiceParameterSerializer(subServiceParameters , many=True)
delivaryPlaces = DelivaryPlaces.objects.filter(off_id = self.request.query_params.get('geha'))
serializer1 = DelivaryPlacesSerializer(delivaryPlaces , many=True)
return Response({"parameters" : serializer.data , "deliveryPlaces" :serializer1.data })
class CreateOrder(APIView):
permission_classes = (IsAuthenticated,)
def post(self, request, format=None):
user = request.user
context = dict()
if not self.request.user.active:
return Response({"detail": "You are not active please contact MP system admin"}, status=status.HTTP_400_BAD_REQUEST)
request.data['user_id'] = user.user_id
serializer = OrderSerializer(
data=request.data, partial=True)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
serializer.save()
context['details'] = "Request submitted successfully!"
return Response(context, status=status.HTTP_201_CREATED) | [
"mohmmedmoued55@gmail.com"
] | mohmmedmoued55@gmail.com |
5fe337f75f189524749d169396ed19b821be42af | 474525154a4e1d48ef5242d1f44164d05399b145 | /tensorflow_probability/python/distributions/hidden_markov_model_test.py | dec2ea82c27a1efb1f509b75bdb840c5ac4184d4 | [
"Apache-2.0"
] | permissive | svshivapuja/probability | 9855737790f74a39169688fbfec9671deef804d9 | af7ccb22d972329633530c3b754ed1f49472f6a7 | refs/heads/main | 2023-07-17T04:14:53.703622 | 2021-08-30T17:47:06 | 2021-08-30T17:47:06 | 400,983,015 | 1 | 0 | Apache-2.0 | 2021-08-29T07:51:29 | 2021-08-29T07:51:29 | null | UTF-8 | Python | false | false | 90,147 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The HiddenMarkovModel distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util
JAX_MODE = False
@test_util.test_all_tf_execution_regimes
class _HiddenMarkovModelTest(
test_util.VectorDistributionTestHelpers,
test_util.DiscreteScalarDistributionTestHelpers,
test_util.TestCase):
def make_placeholders(self, constants):
variables = [tf.Variable(c, shape=tf.TensorShape(None)) for c in constants]
self.evaluate([v.initializer for v in variables])
return variables
def test_reproducibility(self):
initial_prob_data = tf.constant([0.01, 0.99], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.6, 0.4],
[0.3, 0.7]], dtype=self.dtype)
observation_locs_data = tf.constant([0.0, 1.0], dtype=self.dtype)
observation_scale_data = tf.constant(0.5, dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scale_data])
[num_steps] = self.make_placeholders([30])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True)
seed = test_util.test_seed()
s1 = self.evaluate(model.sample(5, seed=seed))
if tf.executing_eagerly():
tf.random.set_seed(seed)
s2 = self.evaluate(model.sample(5, seed=seed))
self.assertAllEqual(s1, s2)
def test_supports_dynamic_observation_size(self):
initial_prob_data = tf.constant([0.01, 0.99], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.6, 0.4],
[0.3, 0.7]], dtype=self.dtype)
observation_locs_data = tf.constant([[0.0, 1.0],
[1.0, 0.0]], dtype=self.dtype)
observation_scale_data = tf.constant([0.5, 0.5], dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scale_data])
[num_steps] = self.make_placeholders([30])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.MultivariateNormalDiag(loc=observation_locs,
scale_diag=observation_scale),
num_steps=num_steps,
validate_args=True)
self.evaluate(model.sample(5, seed=test_util.test_seed()))
observation_data = tf.constant(30 * [[0.5, 0.5]], dtype=self.dtype)
self.evaluate(model.log_prob(observation_data))
self.evaluate(model.posterior_marginals(observation_data).probs_parameter())
self.evaluate(model.posterior_mode(observation_data))
def test_consistency(self):
initial_prob_data = tf.constant([0.01, 0.99], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.6, 0.4],
[0.3, 0.7]], dtype=self.dtype)
observation_locs_data = tf.constant([0.0, 1.0], dtype=self.dtype)
observation_scale_data = tf.constant(0.5, dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scale_data])
[num_steps] = self.make_placeholders([3])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs,
scale=observation_scale),
num_steps=num_steps,
validate_args=True)
self.run_test_sample_consistent_log_prob(
self.evaluate, model,
num_samples=100000,
center=0.5, radius=0.5,
rtol=0.05, seed=test_util.test_seed())
def test_broadcast_initial_probs(self):
initial_prob_data = tf.constant([0.01, 0.99], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.6, 0.4],
[0.3, 0.7]], dtype=self.dtype)
observation_locs_data = tf.constant([0.0, 1.0], dtype=self.dtype)
observation_scale_data = tf.constant(0.5, dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scale_data])
[num_steps] = self.make_placeholders([3])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs,
scale=observation_scale),
num_steps=num_steps,
validate_args=True)
self.run_test_sample_consistent_log_prob(
self.evaluate, model,
num_samples=100000,
center=0.5, radius=1.,
rtol=0.02, seed=test_util.test_seed())
def test_broadcast_transitions(self):
initial_prob_data = tf.constant([0.01, 0.99], dtype=self.dtype)
transition_matrix_data = tf.constant([[[0.8, 0.2],
[0.3, 0.7]],
[[0.9, 0.1],
[0.2, 0.8]]],
dtype=self.dtype)
observation_locs_data = tf.constant([0.0, 1.0], dtype=self.dtype)
observation_scale_data = tf.constant(0.5, dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scale_data])
[num_steps] = self.make_placeholders([3])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs,
scale=observation_scale),
num_steps=num_steps,
validate_args=True)
self.run_test_sample_consistent_log_prob(
self.evaluate, model,
num_samples=100000,
center=0.5, radius=1.,
rtol=2e-2, seed=test_util.test_seed())
def test_broadcast_observations(self):
initial_prob_data = tf.constant([0.01, 0.99], dtype=self.dtype)
transition_matrix_data = tf.constant([[[0.8, 0.2],
[0.3, 0.7]],
[[0.9, 0.1],
[0.2, 0.8]]], dtype=self.dtype)
observation_locs_data = tf.constant([[0.9, 0.1],
[0.2, 0.8]], dtype=self.dtype)
observation_scale_data = tf.constant(0.5, dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scale_data])
[num_steps] = self.make_placeholders([3])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs,
scale=observation_scale),
num_steps=num_steps,
validate_args=True)
self.run_test_sample_consistent_log_prob(
self.evaluate, model,
num_samples=100000,
center=0.5, radius=1.,
rtol=2e-2, seed=test_util.test_seed())
def test_edge_case_sample_n_no_transitions(self):
initial_prob_data = tf.constant([0.9, 0.1], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.5, 0.5],
[0.5, 0.5]], dtype=self.dtype)
observation_probs_data = tf.constant([[1.0, 0.0],
[0.0, 1.0]], dtype=self.dtype)
(initial_prob, transition_matrix,
observation_probs) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_probs_data])
[num_steps] = self.make_placeholders([1])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
x = model.sample(2, seed=test_util.test_seed())
x_shape = self.evaluate(tf.shape(x))
self.assertAllEqual(x_shape, [2, 1])
def test_edge_case_log_prob_no_transitions(self):
initial_prob_data = tf.constant([0.9, 0.1], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.5, 0.5],
[0.5, 0.5]], dtype=self.dtype)
observation_probs_data = tf.constant([[1.0, 0.0],
[0.0, 1.0]], dtype=self.dtype)
(initial_prob, transition_matrix,
observation_probs) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_probs_data])
(initial_prob, transition_matrix,
observation_probs) = ([
initial_prob_data, transition_matrix_data,
observation_probs_data])
[num_steps] = self.make_placeholders([1])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
x = model.log_prob([0])
self.assertAllClose(x, np.log(0.9), rtol=1e-5, atol=0.0)
def test_edge_case_mean_no_transitions(self):
initial_prob_data = tf.constant([0.9, 0.1], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.5, 0.5],
[0.5, 0.5]], dtype=self.dtype)
observation_locs_data = tf.constant([0.0, 1.0], dtype=self.dtype)
observation_scale_data = tf.constant(0.5, dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scale_data])
[num_steps] = self.make_placeholders([1])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs,
scale=observation_scale),
num_steps=num_steps,
validate_args=True)
x = model.mean()
x_shape = self.evaluate(tf.shape(x))
self.assertAllEqual(x_shape, [1])
def test_num_states(self):
initial_prob_data = tf.constant([0.9, 0.1], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.5, 0.5],
[0.5, 0.5]], dtype=self.dtype)
observation_probs_data = tf.constant([[0.5, 0.0, 0.5],
[0.0, 1.0, 0.0]], dtype=self.dtype)
(initial_prob, transition_matrix,
observation_probs) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_probs_data])
[num_steps] = self.make_placeholders([5])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
x = model.num_states_tensor()
self.assertAllEqual(x, 2)
def test_coin_tosses(self):
initial_prob_data = tf.constant([0.5, 0.5], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.5, 0.5],
[0.5, 0.5]], dtype=self.dtype)
observation_probs_data = tf.constant([[1.0, 0.0],
[0.0, 1.0]], dtype=self.dtype)
(initial_prob, transition_matrix,
observation_probs) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_probs_data])
[num_steps] = self.make_placeholders([5])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
x = model.log_prob([0, 0, 0, 0, 0])
self.assertAllClose(x, np.log(.5**5), rtol=1e-5, atol=0.0)
def test_coin_toss_batch(self):
initial_prob_data = tf.constant([0.5, 0.5], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.5, 0.5],
[0.5, 0.5]], dtype=self.dtype)
observation_probs_data = tf.constant([[1.0, 0.0],
[0.0, 1.0]], dtype=self.dtype)
(initial_prob, transition_matrix,
observation_probs) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_probs_data])
initial_prob = tf.broadcast_to(initial_prob, [3, 2, 2])
transition_matrix = tf.broadcast_to(transition_matrix, [3, 2, 2, 2])
[num_steps] = self.make_placeholders([5])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
examples = [tf.zeros(5, dtype=tf.int32), tf.ones(5, dtype=tf.int32)]
examples = tf.broadcast_to(examples, [7, 3, 2, 5])
computed_log_prob = model.log_prob(examples)
expected_log_prob = tf.broadcast_to([np.log(.5**5)], [7, 3, 2])
self.assertAllClose(computed_log_prob, expected_log_prob,
rtol=1e-4, atol=0.0)
def test_mean_shape(self):
initial_prob_data = tf.constant([0.8, 0.2], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.7, 0.3],
[0.2, 0.8]], dtype=self.dtype)
observation_locs_data = tf.constant([[0.0, 0.0],
[10.0, 10.0]], dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data])
[num_steps] = self.make_placeholders([7])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.MultivariateNormalDiag(
loc=observation_locs),
num_steps=num_steps,
validate_args=True)
x = model.mean()
x_shape = self.evaluate(tf.shape(x))
self.assertAllEqual(x_shape, [7, 2])
def test_batch_mean_shape(self):
initial_prob_data = tf.constant([[0.8, 0.2],
[0.5, 0.5],
[0.2, 0.8]], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.7, 0.3],
[0.2, 0.8]], dtype=self.dtype)
observation_locs_data = tf.constant([[0.0, 0.0],
[10.0, 10.0]], dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data])
[num_steps] = self.make_placeholders([7])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.MultivariateNormalDiag(
loc=observation_locs),
num_steps=num_steps,
validate_args=True)
x = model.mean()
x_shape = self.evaluate(tf.shape(x))
self.assertAllEqual(x_shape, [3, 7, 2])
def test_mean_and_variance(self):
initial_prob_data = tf.constant([0.6, 0.1, 0.3], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.2, 0.6, 0.2],
[0.5, 0.3, 0.2],
[0.0, 1.0, 0.0]], dtype=self.dtype)
observation_locs_data = tf.constant([0.0, 1.0, 2.0], dtype=self.dtype)
observation_scale_data = tf.constant(0.5, dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scale_data])
[num_steps] = self.make_placeholders([5])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True)
self.run_test_sample_consistent_mean_variance(self.evaluate, model,
num_samples=100000,
rtol=0.03)
def test_single_sequence_posterior_marginals(self):
# In this test we have a 9-vertex graph with precisely one
# 7-vertex path from vertex 0 to vertex 8.
# The hidden Markov model is a random walk on this
# graph where the only observations are
# "observed at 0", "observed in {1, 2, ..., 7}",
# "observed at 8".
# The purpose of this test is to ensure that transition
# and observation matrices with many log probabilities
# equal to -infinity, and where the result contains many
# -infinities, are handled correctly.
initial_prob = tf.constant(np.ones(9) / 9.0, dtype=self.dtype)
edges = [(0, 1), (1, 2), (2, 3), (3, 4),
(4, 6), (2, 5), (5, 6), (6, 7),
(6, 8)]
transition_matrix = np.zeros((9, 9))
for (i, j) in edges:
transition_matrix[i, j] = 1.
transition_matrix[j, i] = 1.
transition_matrix = tf.constant(
transition_matrix / np.sum(transition_matrix, axis=1, keepdims=True),
dtype=self.dtype)
observation_probs = tf.constant(
np.block([[1, 0, 0],
[np.zeros((7, 1)), np.ones((7, 1)), np.zeros((7, 1))],
[0, 0, 1]]),
dtype=self.dtype)
[num_steps] = self.make_placeholders([7])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
observations = [0, 1, 1, 1, 1, 1, 2]
probs = self.evaluate(
model.posterior_marginals(observations).probs_parameter())
expected_probs = np.eye(9)[[0, 1, 2, 3, 4, 6, 8]]
self.assertAllClose(probs, expected_probs, rtol=1e-4, atol=0.0)
@parameterized.parameters(
(3, 2, 1, 0),
(1, 2, 3, 0),
(1, 0, 2, 3))
def test_posterior_marginals_high_rank(self, rank_o, rank_t, rank_i, rank_s):
def increase_rank(n, x):
# By choosing prime number dimensions we make it less
# likely that a test will pass for accidental reasons.
primes = [3, 5, 7]
for i in range(n):
x = primes[i] * [x]
return x
observation_locs_data = tf.identity(
increase_rank(rank_o, tf.eye(4, dtype=self.dtype)))
observation_scales_data = tf.constant(
[0.25, 0.25, 0.25, 0.25],
dtype=self.dtype)
transition_matrix_data = tf.constant(
increase_rank(rank_t, [[0.8, 0.1, 0.1, 0.0],
[0.1, 0.8, 0.0, 0.1],
[0.1, 0.0, 0.8, 0.1],
[0.0, 0.1, 0.1, 0.8]]),
dtype=self.dtype)
initial_prob_data = tf.constant(
increase_rank(rank_i, [0.25, 0.25, 0.25, 0.25]),
dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scales) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scales_data])
observations = tf.constant(
increase_rank(rank_s,
[[[0.91, 0.11], [0.21, 0.09]],
[[0.11, 0.97], [0.12, 0.08]],
[[0.01, 0.12], [0.92, 0.11]],
[[0.02, 0.11], [0.77, 0.11]],
[[0.81, 0.15], [0.21, 0.03]],
[[0.01, 0.13], [0.23, 0.91]],
[[0.11, 0.12], [0.23, 0.79]],
[[0.13, 0.11], [0.91, 0.29]]]),
dtype=self.dtype)
observation_distribution = tfp.distributions.TransformedDistribution(
tfd.MultivariateNormalDiag(observation_locs,
scale_diag=observation_scales),
tfp.bijectors.Reshape((2, 2)))
[num_steps] = self.make_placeholders([8])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
observation_distribution,
num_steps=num_steps,
validate_args=True)
inferred_probs = self.evaluate(
model.posterior_marginals(observations).probs_parameter())
rank_e = max(rank_o, rank_t, rank_i, rank_s)
expected_probs = increase_rank(rank_e,
[[0.99994, 0.00000, 0.00006, 0.00000],
[0.45137, 0.01888, 0.52975, 0.00000],
[0.00317, 0.00002, 0.98112, 0.01570],
[0.00000, 0.00001, 0.99998, 0.00001],
[0.00495, 0.00001, 0.94214, 0.05289],
[0.00000, 0.00083, 0.00414, 0.99503],
[0.00000, 0.00000, 0.00016, 0.99984],
[0.00000, 0.00000, 0.99960, 0.00039]])
self.assertAllClose(inferred_probs, expected_probs, rtol=0., atol=1e-4)
def test_posterior_mode_basic_example(self):
observation_locs_data = tf.constant([0.0, 1.0, 2.0, 3.0],
dtype=self.dtype)
observation_scale_data = tf.constant(0.25, dtype=self.dtype)
transition_matrix_data = tf.constant([[0.9, 0.1, 0.0, 0.0],
[0.1, 0.8, 0.1, 0.0],
[0.0, 0.1, 0.8, 0.1],
[0.0, 0.0, 0.1, 0.9]],
dtype=self.dtype)
initial_prob_data = tf.constant([0.25, 0.25, 0.25, 0.25],
dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scale_data])
observations = tf.constant([0.1, 0.2, 0.3, 0.4, 0.5,
3.0, 2.9, 2.8, 2.7, 2.6],
dtype=self.dtype)
[num_steps] = self.make_placeholders([10])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True)
inferred_states = model.posterior_mode(observations)
expected_states = [0, 0, 0, 0, 1, 2, 3, 3, 3, 3]
self.assertAllEqual(inferred_states, expected_states)
@parameterized.parameters(
(3, 2, 1, 0),
(1, 2, 3, 0),
(1, 0, 2, 3))
def test_posterior_mode_high_rank(self, rank_o, rank_t, rank_i, rank_s):
def increase_rank(n, x):
# By choosing prime number dimensions we make it less
# likely that a test will pass for accidental reasons.
primes = [3, 5, 7]
for i in range(n):
x = primes[i] * [x]
return x
observation_locs_data = tf.constant(increase_rank(rank_o,
[[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]]),
dtype=self.dtype)
observation_scales_data = tf.constant(
[0.25, 0.25, 0.25, 0.25],
dtype=self.dtype)
transition_matrix_data = tf.constant(
increase_rank(rank_t, [[0.8, 0.1, 0.1, 0.0],
[0.1, 0.8, 0.0, 0.1],
[0.1, 0.0, 0.8, 0.1],
[0.0, 0.1, 0.1, 0.8]]),
dtype=self.dtype)
initial_prob_data = tf.constant(
increase_rank(rank_i, [0.25, 0.25, 0.25, 0.25]),
dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scales) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scales_data])
observations = tf.constant(
increase_rank(rank_s,
[[[0.91, 0.11], [0.21, 0.09]],
[[0.11, 0.97], [0.12, 0.08]],
[[0.01, 0.12], [0.92, 0.11]],
[[0.02, 0.11], [0.77, 0.11]],
[[0.81, 0.15], [0.21, 0.03]],
[[0.01, 0.13], [0.23, 0.91]],
[[0.11, 0.12], [0.23, 0.79]],
[[0.13, 0.11], [0.91, 0.29]]]),
dtype=self.dtype)
observation_distribution = tfp.distributions.TransformedDistribution(
tfd.MultivariateNormalDiag(observation_locs,
scale_diag=observation_scales),
tfp.bijectors.Reshape((2, 2)))
[num_steps] = self.make_placeholders([8])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
observation_distribution,
num_steps=num_steps,
validate_args=True)
inferred_states = model.posterior_mode(observations)
rank_e = max(rank_o, rank_t, rank_i, rank_s)
expected_states = increase_rank(rank_e, [0, 2, 2, 2, 2, 3, 3, 2])
self.assertAllEqual(inferred_states, expected_states)
def test_posterior_mode_high_rank_batch(self):
observation_probs_data = tf.constant([[1.0, 0.0],
[0.0, 1.0]],
dtype=self.dtype)
transition_matrix_data = tf.constant([[1.0, 0.0],
[0.0, 1.0]],
dtype=self.dtype)
initial_prob_data = tf.constant([0.5, 0.5],
dtype=self.dtype)
(initial_prob, transition_matrix,
observation_probs) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_probs_data])
observations = tf.constant(2*[3*[[5*[0], 5*[1]]]])
[num_steps] = self.make_placeholders([5])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
inferred_states = model.posterior_mode(observations)
expected_states = 2*[3*[[5*[0], 5*[1]]]]
self.assertAllEqual(inferred_states, expected_states)
# Check that the Viterbi algorithm is invariant under permutations of the
# names of the observations of the HMM (when there is a unique most
# likely sequence of hidden states).
def test_posterior_mode_invariance_observations(self):
observation_probs_data = tf.constant([[0.09, 0.48, 0.52, 0.11],
[0.31, 0.21, 0.21, 0.27]],
dtype=self.dtype)
transition_matrix_data = tf.constant([[0.90, 0.10],
[0.30, 0.70]],
dtype=self.dtype)
initial_prob_data = tf.constant([0.65, 0.35],
dtype=self.dtype)
(initial_prob, transition_matrix,
observation_probs) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_probs_data])
permutations = tf.identity(np.array([np.random.permutation(4)
for _ in range(8)]))
inverse_permutations = tf.argsort(permutations)
observation_probs_permuted = tf.transpose(
a=tf.gather(tf.transpose(a=observation_probs),
inverse_permutations),
perm=[0, 2, 1])
observations = tf.constant([1, 0, 3, 1, 3, 0, 2, 1, 2, 1, 3, 0, 0, 1, 1, 2])
observations_permuted = tf.transpose(
a=tf.gather(tf.transpose(a=permutations)[..., tf.newaxis],
observations,
batch_dims=(
tensorshape_util.rank(observations.shape) - 1))[..., 0])
[num_steps] = self.make_placeholders([16])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs_permuted),
num_steps=num_steps,
validate_args=True)
inferred_states = model.posterior_mode(observations_permuted)
expected_states = [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0]
self.assertAllEqual(inferred_states, 8*[expected_states])
# Check that the Viterbi algorithm is invariant under permutations of the
# names of the hidden states of the HMM (when there is a unique most
# likely sequence of hidden states).
def test_posterior_mode_invariance_states(self):
observation_probs_data = tf.constant([[0.12, 0.48, 0.5, 0.1],
[0.4, 0.1, 0.5, 0.0],
[0.1, 0.2, 0.3, 0.4]],
dtype=self.dtype)
transition_matrix_data = tf.constant([[0.21, 0.49, 0.3],
[0.18, 0.12, 0.7],
[0.75, 0.15, 0.1]],
dtype=self.dtype)
initial_prob_data = tf.constant([0.8, 0.13, 0.07],
dtype=self.dtype)
(initial_prob, transition_matrix,
observation_probs) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_probs_data])
permutations = tf.identity(np.array([np.random.permutation(3)
for _ in range(8)]))
inverse_permutations = tf.argsort(permutations)
initial_prob_permuted = tf.gather(initial_prob, inverse_permutations)
# Permute rows of observation matrix
observation_probs_permuted = tf.gather(observation_probs,
inverse_permutations)
# Permute both rows and columns of transition matrix
transition_matrix_permuted = tf.transpose(
tf.gather(tf.transpose(transition_matrix), inverse_permutations),
perm=[0, 2, 1])
transition_matrix_permuted = tf.gather(
transition_matrix_permuted,
inverse_permutations,
batch_dims=1)
observations = tf.constant([1, 0, 3, 1, 3, 0, 2, 1, 2, 1, 3, 0, 0, 1, 1, 2])
[num_steps] = self.make_placeholders([16])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob_permuted),
tfd.Categorical(probs=transition_matrix_permuted),
tfd.Categorical(probs=observation_probs_permuted),
num_steps=num_steps,
validate_args=True)
inferred_states = model.posterior_mode(observations)
expected_states = [0, 1, 2, 0, 2, 1, 2, 0, 2, 0, 2, 0, 1, 2, 0, 1]
expected_states_permuted = tf.transpose(
tf.gather(
tf.transpose(permutations)[..., tf.newaxis],
expected_states)[..., 0])
self.assertAllEqual(inferred_states, expected_states_permuted)
def test_posterior_mode_missing_continuous_observations(self):
initial_prob_data = tf.constant([0.9, 0.1], dtype=self.dtype)
transition_matrix_data = tf.constant([[[0.6, 0.4],
[0.6, 0.4]],
[[0.4, 0.6],
[0.4, 0.6]]], dtype=self.dtype)
observation_locs_data = tf.constant([[0.0, 0.0],
[10.0, 10.0]], dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data])
[num_steps] = self.make_placeholders([3])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.MultivariateNormalDiag(loc=observation_locs),
num_steps=num_steps,
validate_args=True)
observations = tf.constant([[0.0, 0.0],
[0.0, 0.0],
[10.0, 10.0]], dtype=self.dtype)
# We test two different transition matrices as well as two
# different masks.
# As a result we have a 2x2 tensor of sequences of states
# returned by `posterior_mode`.
x = model.posterior_mode(observations, mask=[[[False, True, False]],
[[False, False, False]]])
self.assertAllEqual(x, [[[0, 0, 1], [0, 1, 1]],
[[0, 0, 1], [0, 0, 1]]])
def test_posterior_mode_missing_discrete_observations(self):
initial_prob = tf.constant([1.0, 0.0, 0.0, 0.0], dtype=self.dtype)
# This test uses a model with a random walk that can make a change of
# of -1, 0 or +1 at each step.
transition_data = (0.5 * np.diag(np.ones(4)) +
0.25*np.diag(np.ones(3), -1) +
0.25*np.diag(np.ones(3), 1))
transition_data[0, 0] += 0.25
transition_data[3, 3] += 0.25
transition_matrix = tf.constant(transition_data, dtype=self.dtype)
# Observations of the random walk are unreliable and give the
# correct position with probability `0.25 + 0.75 * reliability`
def observation_fn(reliability):
return np.array(reliability * np.diag(np.ones(4)) +
(1 - reliability) * 0.25 * np.ones((4, 4)))
observation_data = np.array(
[observation_fn(reliability)
for reliability in [0.993, 0.994, 0.995, 0.996]])
observation_probs = tf.constant(observation_data, dtype=self.dtype)
[num_steps] = self.make_placeholders([7])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
observations = tf.constant([0, 1, 2, 3, 2, 1, 0])
mask = tf.constant([False, True, True, False, True, True, False])
inferred_states = model.posterior_mode(observations, mask)
# This example has been tuned so that there are two local maxima in the
# space of paths.
# As the `reliability` parameter increases, the mode switches from one of
# the two paths to the other.
expected_states = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 2, 3, 2, 1, 0],
[0, 1, 2, 3, 2, 1, 0]]
self.assertAllEqual(inferred_states, expected_states)
def test_posterior_marginals_missing_observations(self):
initial_prob = tf.constant([1., 0., 0., 0.], dtype=self.dtype)
# This test uses a model with a random walk that can make a change of
# of -1, 0 or +1 at each step.
transition_data = [[0.75, 0.25, 0., 0.],
[0.25, 0.5, 0.25, 0.],
[0., 0.25, 0.5, 0.25],
[0.0, 0.0, 0.25, 0.75]]
transition_matrix = tf.constant(transition_data, dtype=self.dtype)
observation_data = np.array(np.eye(4))
observation_probs = tf.constant(observation_data, dtype=self.dtype)
[num_steps] = self.make_placeholders([7])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
observations = tf.constant([0, 1, 2, 3, 2, 1, 0])
mask = tf.constant([False, True, True, True, True, True, False])
marginals = self.evaluate(
model.posterior_marginals(observations, mask).probs_parameter())
expected_marginals = [[1., 0., 0., 0.],
[21./26, 5./26, 0., 0.],
[105./143, 35./143, 3./143, 0.],
[1225./1716, 147./572, 49./1716, 1./1716],
[105./143, 35./143, 3./143, 0.],
[21./26, 5./26, 0., 0.],
[1., 0., 0., 0.]]
self.assertAllClose(marginals, expected_marginals)
def test_posterior_mode_edge_case_no_transitions(self):
# Test all eight combinations of a single state that is
# 1. unmasked/masked
# 2. observed at state 0/state 1
# 3. more likely started at state 0/state 1
initial_prob_data = tf.constant([[0.9, 0.1], [0.1, 0.9]], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.5, 0.5],
[0.5, 0.5]], dtype=self.dtype)
observation_probs_data = tf.constant([[1.0, 0.0],
[0.0, 1.0]], dtype=self.dtype)
(initial_prob, transition_matrix,
observation_probs) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_probs_data])
[num_steps] = self.make_placeholders([1])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
inferred_state = model.posterior_mode(
observations=[[[0]], [[1]]],
mask=[[[[True]]], [[[False]]]])
expected_state = [[[[0], [1]], [[0], [1]]],
[[[0], [0]], [[1], [1]]]]
self.assertAllEqual(inferred_state, expected_state)
def test_posterior_marginals_edge_case_no_transitions(self):
# Test all eight combinations of a single state that is
# 1. unmasked/masked
# 2. observed at state 0/state 1
# 3. more likely started at state 0/state 1
initial_prob_data = tf.constant([[0.9, 0.1], [0.1, 0.9]], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.5, 0.5],
[0.5, 0.5]], dtype=self.dtype)
observation_probs_data = tf.constant([[1.0, 0.0],
[0.0, 1.0]], dtype=self.dtype)
(initial_prob, transition_matrix,
observation_probs) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_probs_data])
[num_steps] = self.make_placeholders([1])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
inferred_marginals = self.evaluate(
model.posterior_marginals(
observations=[[[0]], [[1]]],
mask=[[[[True]]], [[[False]]]]).probs_parameter())
# Result is a [2,2,2] batch of sequences of length 1 of
# [2]-vectors of probabilities.
expected_marginals = [[[[[0.9, 0.1]],
[[0.1, 0.9]]],
[[[0.9, 0.1]],
[[0.1, 0.9]]]],
[[[[1., 0.]],
[[1., 0.]]],
[[[0., 1.]],
[[0., 1.]]]]]
self.assertAllClose(inferred_marginals, expected_marginals)
def test_prior_many_steps(self):
initial_prob_data = tf.constant([[0.9, 0.1], [0.1, 0.9]], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.5, 0.5],
[0.5, 0.5]], dtype=self.dtype)
observation_probs_data = tf.constant([[1.0, 0.0],
[0.0, 1.0]], dtype=self.dtype)
(initial_prob, transition_matrix,
observation_probs) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_probs_data])
[num_steps] = self.make_placeholders([3])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
inferred_priors = self.evaluate(model.prior_marginals().probs_parameter())
expected_priors = [
[[0.9, 0.1], [0.5, 0.5], [0.5, 0.5]],
[[0.1, 0.9], [0.5, 0.5], [0.5, 0.5]]
]
self.assertAllClose(inferred_priors, expected_priors)
def test_prior_one_step(self):
initial_prob_data = tf.constant([[0.9, 0.1], [0.1, 0.9]], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.5, 0.5],
[0.5, 0.5]], dtype=self.dtype)
observation_probs_data = tf.constant([[1.0, 0.0],
[0.0, 1.0]], dtype=self.dtype)
(initial_prob, transition_matrix,
observation_probs) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_probs_data])
[num_steps] = self.make_placeholders([1])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
inferred_priors = self.evaluate(model.prior_marginals().probs_parameter())
expected_priors = [
[[0.9, 0.1]],
[[0.1, 0.9]]
]
self.assertAllClose(inferred_priors, expected_priors)
def test_prior_multiple_steps(self):
initial_prob_data = tf.constant([[0.9, 0.1], [0.1, 0.9]], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.9, 0.1],
[0.7, 0.3]], dtype=self.dtype)
observation_probs_data = tf.constant([[1.0, 0.0],
[0.0, 1.0]], dtype=self.dtype)
(initial_prob, transition_matrix,
observation_probs) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_probs_data])
[num_steps] = self.make_placeholders([33])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
inferred_priors = self.evaluate(
model.prior_marginals().probs_parameter())[:, -1]
expected_priors = [[0.875, 0.125], [0.875, 0.125]]
self.assertAllClose(inferred_priors, expected_priors)
def test_prior_dynamic_transition(self):
initial_prob_data = tf.constant([[0.5, 0.5], [0.1, 0.9]], dtype=self.dtype)
transition_matrix_data = tf.constant([
[[[0.6, 0.4], [0.5, 0.5]],
[[0.7, 0.3], [0.4, 0.6]],
[[0.9, 0.1], [0.3, 0.7]]],
[[[0.5, 0.5], [0.5, 0.5]],
[[0.1, 0.9], [0.1, 0.9]],
[[0.5, 0.5], [0.5, 0.5]]]
], dtype=self.dtype)
observation_probs_data = tf.constant([[1.0, 0.0],
[0.0, 1.0]], dtype=self.dtype)
(initial_prob, transition_matrix,
observation_probs) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_probs_data])
[num_steps] = self.make_placeholders([4])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
time_varying_transition_distribution=True,
num_steps=num_steps,
validate_args=True)
inferred_priors = self.evaluate(model.prior_marginals().probs_parameter())
expected_priors = [
[[0.5, 0.5], [0.55, 0.45], [0.565, 0.435], [0.639, 0.361]],
[[0.1, 0.9], [0.5, 0.5], [0.1, 0.9], [0.5, 0.5]]
]
self.assertAllClose(inferred_priors, expected_priors)
def test_prior_dynamic_transition_broadcat_init(self):
initial_prob_data = tf.constant([[0.5, 0.5]], dtype=self.dtype)
transition_matrix_data = tf.constant([
[[[0.6, 0.4], [0.5, 0.5]],
[[0.7, 0.3], [0.4, 0.6]],
[[0.9, 0.1], [0.3, 0.7]]],
[[[0.5, 0.5], [0.5, 0.5]],
[[0.1, 0.9], [0.1, 0.9]],
[[0.5, 0.5], [0.5, 0.5]]]
], dtype=self.dtype) # [BS, 3, K, K]
observation_probs_data = tf.constant([[1.0, 0.0],
[0.0, 1.0]], dtype=self.dtype)
(initial_prob, transition_matrix,
observation_probs) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_probs_data])
[num_steps] = self.make_placeholders([4])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
time_varying_transition_distribution=True,
num_steps=num_steps,
validate_args=True)
inferred_priors = self.evaluate(model.prior_marginals().probs_parameter())
expected_priors = [
[[0.5, 0.5], [0.55, 0.45], [0.565, 0.435], [0.639, 0.361]],
[[0.5, 0.5], [0.5, 0.5], [0.1, 0.9], [0.5, 0.5]]
]
self.assertAllClose(inferred_priors, expected_priors)
def test_time_dimension_observation_sample_consistent_mean_variance(self):
initial_prob_data = tf.constant([0.6, 0.1, 0.3], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.2, 0.6, 0.2],
[0.5, 0.3, 0.2],
[0.0, 1.0, 0.0]], dtype=self.dtype)
observation_locs_data = tf.constant([[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0]], dtype=self.dtype)
observation_scale_data = tf.constant(0.5, dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scale_data])
[num_steps] = self.make_placeholders([5])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True,
time_varying_observation_distribution=True)
self.run_test_sample_consistent_mean_variance(self.evaluate, model,
num_samples=100000,
rtol=0.03)
def test_dynamic_observation_sample_consistent_mean_variance(self):
initial_prob_data = tf.constant([0.6, 0.1, 0.3], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.2, 0.6, 0.2],
[0.5, 0.3, 0.2],
[0.0, 1.0, 0.0]], dtype=self.dtype)
observation_locs_data = tf.constant([[0.0, 1.0, 2.0],
[0.0, 2.0, 1.0],
[2.0, 1.0, 0.0],
[0.0, 1.0, 2.0],
[2.0, 1.0, 0.0]], dtype=self.dtype)
observation_scale_data = tf.constant(0.5, dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scale_data])
[num_steps] = self.make_placeholders([5])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True,
time_varying_observation_distribution=True)
self.run_test_sample_consistent_mean_variance(self.evaluate, model,
num_samples=100000,
rtol=0.03)
def test_high_rank_dynamic_observation_mean_shape(self):
initial_prob_data = tf.constant([[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3]], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.2, 0.6, 0.2],
[0.5, 0.3, 0.2],
[0.0, 1.0, 0.0]],
dtype=self.dtype)
observation_locs_data = tf.constant([[0.0, 1.0, 2.0],
[0.0, 2.0, 1.0],
[2.0, 1.0, 0.0],
[0.0, 1.0, 2.0],
[2.0, 1.0, 0.0]],
dtype=self.dtype)
observation_scale_data = tf.constant(0.5, dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scale_data])
[num_steps] = self.make_placeholders([5])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True,
time_varying_observation_distribution=True)
x = model.mean()
x_shape = self.evaluate(tf.shape(x))
self.assertAllEqual(x_shape, [7, 5])
def test_dynamic_observation_mean_and_variance_match(self):
initial_prob_data = tf.constant([0.6, 0.1, 0.3], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.2, 0.6, 0.2],
[0.5, 0.3, 0.2],
[0.0, 1.0, 0.0]], dtype=self.dtype)
static_observation_locs_data = tf.constant([0.0, 1.0, 2.0],
dtype=self.dtype)
dynamic_observation_locs_data = tf.constant(
[[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0]], dtype=self.dtype)
observation_scale_data = tf.constant(0.5, dtype=self.dtype)
(initial_prob, transition_matrix, static_observation_locs,
dynamic_observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
static_observation_locs_data, dynamic_observation_locs_data,
observation_scale_data])
[num_steps] = self.make_placeholders([5])
static_model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=static_observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True)
dynamic_model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=dynamic_observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True,
time_varying_observation_distribution=True)
self.assertAllClose(self.evaluate(static_model.mean()),
self.evaluate(dynamic_model.mean()),
0.03)
self.assertAllClose(self.evaluate(static_model.variance()),
self.evaluate(dynamic_model.variance()),
0.03)
def test_dynamic_observation_posterior_is_appropriate(self):
# This test forces evaluation of the _observation_log_probs method.
initial_prob_data = tf.constant([0.9, 0.1], dtype=self.dtype)
transition_matrix_data = tf.constant([[0.9, 0.1],
[0.1, 0.9]], dtype=self.dtype)
observation_scale_data = tf.constant(0.1, dtype=self.dtype)
observation_loc_data = tf.constant([[-2., 2.],
[-1., 1.],
[0., 0.],
[1., -1.],
[2., -2.]], dtype=self.dtype)
observations_data = tf.range(5, dtype=self.dtype) - 2
(initial_prob, transition_matrix, observation_scale, observations,
observation_locs) = self.make_placeholders(
[initial_prob_data, transition_matrix_data, observation_scale_data,
observations_data, observation_loc_data])
[num_steps] = self.make_placeholders([5])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True,
time_varying_observation_distribution=True)
# Observations are a line from -2 to 2. Model has two states, one that
# matches the observations, and one that is the negated version. The
# maximum confusion should occur on the third step, but state 0 should
# always be much more probable.
marginals = self.evaluate(model.posterior_marginals(observations).logits)
self.assertAllClose(marginals[:, 0], np.full((5,), 0.), atol=0.03)
self.assertAllLess(marginals[:, 1], -4.)
# marginals[0:2, 0] both round to 0, so only compare them to marginals[2, 0]
self.assertGreater(marginals[0, 0], marginals[2, 0])
self.assertGreater(marginals[1, 0], marginals[2, 0])
self.assertLess(marginals[2, 0], marginals[3, 0])
self.assertLess(marginals[2, 0], marginals[4, 0])
self.assertLess(marginals[0, 1], marginals[1, 1])
self.assertLess(marginals[1, 1], marginals[2, 1])
self.assertGreater(marginals[2, 1], marginals[3, 1])
self.assertGreater(marginals[3, 1], marginals[4, 1])
mode = self.evaluate(model.posterior_mode(observations))
self.assertAllEqual(mode, np.full((5,), 0))
def test_batched_observations_with_dynamic_observation(self):
initial_prob_data = tf.constant([0.6, 0.1, 0.3],
dtype=self.dtype)
transition_matrix_data = tf.constant([[0.2, 0.6, 0.2],
[0.5, 0.3, 0.2],
[0.0, 1.0, 0.0]],
dtype=self.dtype)
observation_locs_data = tf.constant([[0.0, 1.0, 2.0],
[0.0, 2.0, 1.0],
[2.0, 1.0, 0.0],
[0.0, 1.0, 2.0],
[2.0, 1.0, 0.0]],
dtype=self.dtype)
observation_scale_data = tf.constant(0.5, dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scale_data])
[num_steps] = self.make_placeholders([5])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True,
time_varying_observation_distribution=True)
observations = tf.constant(np.random.normal(
size=[12, 17, 5]), dtype=self.dtype)
self.evaluate(model.log_prob(observations))
self.evaluate(model.posterior_marginals(observations).logits)
self.evaluate(model.posterior_mode(observations))
def test_dynamic_transition_log_prob_and_posterior_is_appropriate(self):
initial_prob_data = tf.constant([0.999, 0.001], dtype=self.dtype)
transition_matrix_data = tf.constant([[[0.999, 0.001],
[0.001, 0.999]],
[[0.999, 0.001],
[0.001, 0.999]],
[[0.001, 0.999],
[0.999, 0.001]]],
dtype=self.dtype)
observation_scale_data = tf.constant(0.1, dtype=self.dtype)
observation_loc_data = tf.constant([0.0, 1.0], dtype=self.dtype)
observations_data_unlikely = tf.constant([0.0, 0.0, 1.0, 1.0],
dtype=self.dtype)
observations_data_likely = tf.constant([0.0, 0.0, 0.0, 1.0],
dtype=self.dtype)
(initial_prob, transition_matrix, observation_scale, observations_unlikely,
observations_likely, observation_locs) = self.make_placeholders(
[initial_prob_data, transition_matrix_data, observation_scale_data,
observations_data_unlikely, observations_data_likely,
observation_loc_data])
[num_steps] = self.make_placeholders([4])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True,
time_varying_transition_distribution=True)
likely_log_prob = self.evaluate(model.log_prob(observations_likely))
unlikely_log_prob = self.evaluate(model.log_prob(observations_unlikely))
self.assertLess(likely_log_prob, 1e1)
self.assertLess(unlikely_log_prob, 1e-6)
marginals = self.evaluate(model.posterior_marginals(
observations_data_likely).probs_parameter())
self.assertAllClose(marginals, [[1., 0.],
[1., 0.],
[1., 0.],
[0., 1.]], 1e-2)
mode = self.evaluate(model.posterior_mode(observations_data_likely))
self.assertAllClose(mode, [0., 0., 0., 1.], 1e-2)
def test_dynamic_transition_sample_consistent_mean_variance(self):
initial_prob_data = tf.constant([0.6, 0.1, 0.3], dtype=self.dtype)
transition_matrix_data = tf.constant([[[0.2, 0.6, 0.2],
[0.5, 0.3, 0.2],
[0.0, 1.0, 0.0]],
[[0.998, 0.001, 0.001],
[0.001, 0.998, 0.001],
[0.001, 0.001, 0.998]]],
dtype=self.dtype)
observation_locs_data = tf.constant([0.0, 1.0, 2.0], dtype=self.dtype)
observation_scale_data = tf.constant(0.5, dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scale_data])
[num_steps] = self.make_placeholders([3])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True,
time_varying_transition_distribution=True)
self.run_test_sample_consistent_mean_variance(self.evaluate, model,
num_samples=100000,
rtol=0.03)
def test_high_rank_dynamic_transition_sample_consistent_mean_variance(self):
initial_prob_data = tf.constant([[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3]], dtype=self.dtype)
transition_matrix_data = tf.constant([[[0.2, 0.6, 0.2],
[0.5, 0.3, 0.2],
[0.0, 1.0, 0.0]],
[[0.998, 0.001, 0.001],
[0.001, 0.998, 0.001],
[0.001, 0.001, 0.998]]],
dtype=self.dtype)
observation_locs_data = tf.constant([0.0, 1.0, 2.0],
dtype=self.dtype)
observation_scale_data = tf.constant(0.5, dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scale_data])
[num_steps] = self.make_placeholders([3])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True,
time_varying_transition_distribution=True)
x = model.mean()
x_shape = self.evaluate(tf.shape(x))
self.assertAllEqual(x_shape, [7, 3])
def test_dynamic_transition_mean_and_variance_match(self):
initial_prob_data = tf.constant([[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3],
[0.6, 0.1, 0.3]], dtype=self.dtype)
transition_matrix_data = tf.constant([[[0.2, 0.6, 0.2],
[0.5, 0.3, 0.2],
[0.0, 1.0, 0.0]],
[[0.2, 0.6, 0.2],
[0.5, 0.3, 0.2],
[0.0, 1.0, 0.0]]],
dtype=self.dtype)
observation_locs_data = tf.constant([[0.0, 1.0, 2.0]],
dtype=self.dtype)
observation_scale_data = tf.constant(0.5, dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scale_data])
[num_steps] = self.make_placeholders([3])
static_model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix[0, :, :]),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True,
time_varying_transition_distribution=False)
dynamic_model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True,
time_varying_transition_distribution=True)
self.assertAllClose(self.evaluate(static_model.mean()),
self.evaluate(dynamic_model.mean()),
0.03)
self.assertAllClose(self.evaluate(static_model.variance()),
self.evaluate(dynamic_model.variance()),
0.03)
def test_batched_observations_with_dynamic_transition(self):
initial_prob_data = tf.constant([0.6, 0.1, 0.3],
dtype=self.dtype)
transition_matrix_data = tf.constant(4 * [[[0.2, 0.6, 0.2],
[0.5, 0.3, 0.2],
[0.0, 1.0, 0.0]]],
dtype=self.dtype)
observation_locs_data = tf.constant([0.0, 1.0, 2.0],
dtype=self.dtype)
observation_scale_data = tf.constant(0.5, dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scale_data])
[num_steps] = self.make_placeholders([5])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True,
time_varying_transition_distribution=True)
observations = tf.constant(np.random.normal(
size=[12, 17, 5]), dtype=self.dtype)
self.evaluate(model.log_prob(observations))
self.evaluate(model.posterior_marginals(observations).logits)
self.evaluate(model.posterior_mode(observations))
def test_batched_observations_with_dynamic_transition_and_observation(self):
initial_prob_data = tf.constant([0.6, 0.1, 0.3],
dtype=self.dtype)
transition_matrix_data = tf.constant(4 * [[[0.2, 0.6, 0.2],
[0.5, 0.3, 0.2],
[0.0, 1.0, 0.0]]],
dtype=self.dtype)
observation_locs_data = tf.constant(5 * [[0.0, 1.0, 2.0]],
dtype=self.dtype)
observation_scale_data = tf.constant(0.5, dtype=self.dtype)
(initial_prob, transition_matrix,
observation_locs, observation_scale) = self.make_placeholders([
initial_prob_data, transition_matrix_data,
observation_locs_data, observation_scale_data])
[num_steps] = self.make_placeholders([5])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True,
time_varying_transition_distribution=True,
time_varying_observation_distribution=True)
observations = tf.constant(np.random.normal(
size=[12, 17, 5]), dtype=self.dtype)
self.evaluate(model.log_prob(observations))
self.evaluate(model.posterior_marginals(observations).logits)
self.evaluate(model.posterior_mode(observations))
def test_dynamic_distributions_log_prob_and_posterior_is_appropriate(self):
initial_prob_data = tf.constant([0.999, 0.001], dtype=self.dtype)
transition_matrix_data = tf.constant([[[0.999, 0.001],
[0.001, 0.999]],
[[0.999, 0.001],
[0.001, 0.999]],
[[0.001, 0.999],
[0.999, 0.001]]],
dtype=self.dtype)
observation_scale_data = tf.constant(0.1, dtype=self.dtype)
observation_loc_data = tf.constant([[0.0, 1.0],
[0.0, 1.0],
[1.0, 0.0],
[1.0, 0.0]], dtype=self.dtype)
observations_data_unlikely = tf.constant([0.0, 0.0, 0.0, 1.0],
dtype=self.dtype)
observations_data_likely = tf.constant([0.0, 0.0, 1.0, 0.0],
dtype=self.dtype)
(initial_prob, transition_matrix, observation_scale, observations_unlikely,
observations_likely, observation_locs) = self.make_placeholders(
[initial_prob_data, transition_matrix_data, observation_scale_data,
observations_data_unlikely, observations_data_likely,
observation_loc_data])
[num_steps] = self.make_placeholders([4])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True,
time_varying_transition_distribution=True,
time_varying_observation_distribution=True)
likely_log_prob = self.evaluate(model.log_prob(observations_likely))
unlikely_log_prob = self.evaluate(model.log_prob(observations_unlikely))
self.assertLess(likely_log_prob, 1e1)
self.assertLess(unlikely_log_prob, 1e-6)
marginals = self.evaluate(model.posterior_marginals(
observations_data_likely).probs_parameter())
self.assertAllClose(marginals, [[1., 0.],
[1., 0.],
[1., 0.],
[0., 1.]], 1e-2)
mode = self.evaluate(model.posterior_mode(observations_data_likely))
self.assertAllClose(mode, [0., 0., 0., 1.], 1e-2)
def test_batch_dynamic_distributions_log_prob_and_posterior(self):
initial_prob_data = tf.constant([0.999, 0.001], dtype=self.dtype)
transition_matrix_data = tf.constant(6 * [[[[0.999, 0.001],
[0.001, 0.999]],
[[0.999, 0.001],
[0.001, 0.999]],
[[0.001, 0.999],
[0.999, 0.001]]]],
dtype=self.dtype)
observation_scale_data = tf.constant(0.1, dtype=self.dtype)
observation_loc_data = tf.constant(1 * [[[0.0, 1.0],
[0.0, 1.0],
[1.0, 0.0],
[1.0, 0.0]]], dtype=self.dtype)
observations_data_unlikely = tf.constant([0.0, 0.0, 0.0, 1.0],
dtype=self.dtype)
observations_data_likely = tf.constant(5 * [6 * [[0.0, 0.0, 1.0, 0.0]]],
dtype=self.dtype)
(initial_prob, transition_matrix, observation_scale, observations_unlikely,
observations_likely, observation_locs) = self.make_placeholders(
[initial_prob_data, transition_matrix_data, observation_scale_data,
observations_data_unlikely, observations_data_likely,
observation_loc_data])
[num_steps] = self.make_placeholders([4])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True,
time_varying_transition_distribution=True,
time_varying_observation_distribution=True)
likely_log_prob = self.evaluate(model.log_prob(observations_likely))
unlikely_log_prob = self.evaluate(model.log_prob(observations_unlikely))
self.assertLess(likely_log_prob[0, 0], 1e1)
self.assertLess(unlikely_log_prob[0], 1e-6)
marginals = self.evaluate(model.posterior_marginals(
observations_data_likely).probs_parameter())
self.assertAllClose(marginals[0, 0], [[1., 0.],
[1., 0.],
[1., 0.],
[0., 1.]], 1e-2)
mode = self.evaluate(model.posterior_mode(observations_data_likely))
self.assertAllClose(mode[0, 0], [0., 0., 0., 1.], 1e-2)
@parameterized.named_parameters(('', False),
('_dynamic', True))
def test_log_prob_mask(self, dynamic):
num_steps = 4
initial_prob = tf.constant([0.999, 0.001], dtype=self.dtype)
transition_matrix = tf.constant([[0.999, 0.001],
[0.001, 0.999]],
dtype=self.dtype)
observation_scale = tf.constant(0.1, dtype=self.dtype)
observation_locs = tf.constant([0.0, 1.0], dtype=self.dtype)
observations_unlikely = tf.constant([0.0, 1.0, 0.0, 1.0],
dtype=self.dtype)
mask_none = tf.constant([False, False, False, False])
mask_all = tf.constant([True, True, True, True])
mask_unlikely = tf.constant([False, True, False, True])
mask_batch = tf.stack([mask_none, mask_all, mask_unlikely], axis=0)
if dynamic:
(initial_prob, transition_matrix, observation_scale,
observations_unlikely, observation_locs, mask_none, mask_all,
mask_unlikely, mask_batch, num_steps) = self.make_placeholders(
[initial_prob, transition_matrix, observation_scale,
observations_unlikely, observation_locs,
mask_none, mask_all, mask_unlikely, mask_batch, num_steps])
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
mask=mask_unlikely,
validate_args=True)
log_prob_mask_unlikely = self.evaluate(
model.log_prob(observations_unlikely))
log_prob_mask_unlikely_explicit = self.evaluate(
model.log_prob(observations_unlikely, mask=mask_unlikely))
self.assertAllEqual(log_prob_mask_unlikely, log_prob_mask_unlikely_explicit)
log_prob_mask_all = self.evaluate(
model.log_prob(observations_unlikely, mask=mask_all))
self.assertAllClose(log_prob_mask_all, 0.)
log_prob_mask_none = self.evaluate(
model.log_prob(observations_unlikely, mask=mask_none))
self.assertLess(log_prob_mask_none, log_prob_mask_unlikely)
log_prob_mask_batch = self.evaluate(
model.log_prob(observations_unlikely, mask=mask_batch))
self.assertAllClose(log_prob_mask_batch,
[log_prob_mask_none,
log_prob_mask_all,
log_prob_mask_unlikely])
batch_model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
mask=mask_batch,
validate_args=True)
self.assertAllEqual(batch_model.batch_shape_tensor(), [3])
self.assertAllClose(
log_prob_mask_batch,
batch_model.log_prob(observations_unlikely))
class HiddenMarkovModelTestFloat32(_HiddenMarkovModelTest):
dtype = tf.float32
class HiddenMarkovModelTestFloat64(_HiddenMarkovModelTest):
dtype = tf.float64
del _HiddenMarkovModelTest
class _HiddenMarkovModelAssertionTest(
test_util.VectorDistributionTestHelpers,
test_util.DiscreteScalarDistributionTestHelpers,
test_util.TestCase):
def test_integer_initial_state_assertion(self):
transition_matrix = np.array([[0.9, 0.1],
[0.1, 0.9]])
observation_probs = np.array([[1.0, 0.0],
[0.0, 1.0]])
num_steps = 2
message = 'is not over integers'
with self.assertRaisesRegexp(Exception, message):
model = tfd.HiddenMarkovModel(
tfd.Normal(loc=0.0, scale=1.0),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
_ = self.evaluate(model.sample())
def test_integer_transition_state_assertion(self):
initial_prob = np.array([0.9, 0.1])
observation_probs = np.array([[1.0, 0.0],
[0.0, 1.0]])
num_steps = 2
message = 'is not over integers'
with self.assertRaisesRegexp(Exception, message):
model = tfd.HiddenMarkovModel(tfd.Categorical(probs=initial_prob),
tfd.Normal(loc=0.0, scale=1.0),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
_ = self.evaluate(model.sample())
def test_scalar_num_steps_assertion(self):
initial_prob = np.array([0.9, 0.1])
transition_matrix = np.array([[0.9, 0.1],
[0.1, 0.9]])
observation_probs = np.array([[1.0, 0.0],
[0.0, 1.0]])
num_steps = np.array([2, 3])
message = '`num_steps` must be a scalar'
with self.assertRaisesRegexp(Exception, message):
model = tfd.HiddenMarkovModel(tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
_ = self.evaluate(model.sample())
def test_variable_num_steps_assertion(self):
initial_prob = np.array([0.9, 0.1])
transition_matrix = np.array([[0.9, 0.1],
[0.1, 0.9]])
observation_probs = np.array([[1.0, 0.0],
[0.0, 1.0]])
num_steps = tf.Variable(np.array([2, 3]))
message = '`num_steps` must be a scalar'
with self.assertRaisesRegexp(Exception, message):
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
_ = self.evaluate(model.sample())
def test_num_steps_greater1_assertion(self):
initial_prob = np.array([0.9, 0.1])
transition_matrix = np.array([[0.9, 0.1],
[0.1, 0.9]])
observation_probs = np.array([[1.0, 0.0],
[0.0, 1.0]])
num_steps = 0
message = '`num_steps` must be at least 1'
with self.assertRaisesRegexp(Exception, message):
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
_ = self.evaluate(model.sample())
def test_initial_scalar_assertion(self):
initial_prob = np.array([0.9, 0.1])
transition_matrix = np.array([[0.9, 0.1],
[0.1, 0.9]])
observation_probs = np.array([[1.0, 0.0],
[0.0, 1.0]])
num_steps = 2
message = 'must have scalar'
with self.assertRaisesRegexp(Exception, message):
model = tfd.HiddenMarkovModel(
tfd.Sample(tfd.Categorical(probs=initial_prob), sample_shape=2),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
_ = self.evaluate(model.sample())
def test_batch_agreement_assertion(self):
initial_prob = np.array([[0.9, 0.1],
[0.1, 0.9]])
transition_matrix = np.array([[1.0]])
observation_probs = np.array([[1.0, 0.0],
[0.0, 1.0]])
num_steps = 1
message = 'must agree on'
with self.assertRaisesRegexp(Exception, message):
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
_ = self.evaluate(model.sample())
def test_variable_batch_agreement_assertion(self):
initial_prob = np.array([[0.9, 0.1],
[0.1, 0.9]])
transition_matrix_data = np.array([[1.0]])
observation_probs_data = np.array([[1.0, 0.0],
[0.0, 1.0]])
transition_matrix = tf.Variable(transition_matrix_data)
observation_probs = tf.Variable(observation_probs_data)
self.evaluate(transition_matrix.initializer)
self.evaluate(observation_probs.initializer)
num_steps = 1
message = 'must agree on'
with self.assertRaisesRegexp(Exception, message):
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
_ = self.evaluate(model.sample())
def test_modified_variable_batch_agreement_assertion(self):
initial_prob = np.array([[0.9, 0.1],
[0.1, 0.9]])
transition_matrix_data = np.array([[1.0, 0.0],
[0.0, 1.0]])
transition_matrix_data2 = np.array([[1.0]])
observation_probs_data = np.array([[1.0, 0.0],
[0.0, 1.0]])
transition_matrix = tf.Variable(transition_matrix_data,
shape=tf.TensorShape(None))
observation_probs = tf.Variable(observation_probs_data,
shape=tf.TensorShape(None))
self.evaluate(transition_matrix.initializer)
self.evaluate(observation_probs.initializer)
num_steps = 1
message = 'transition_distribution` and `observation_distribution` must'
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Categorical(probs=observation_probs),
num_steps=num_steps,
validate_args=True)
with self.assertRaisesRegexp(Exception, message):
with tf.control_dependencies([
transition_matrix.assign(transition_matrix_data2)]):
_ = self.evaluate(model.sample())
def test_non_scalar_transition_batch(self):
initial_prob = tf.constant([0.6, 0.4])
# The HMM class expect a `Categorical` distribution for each state.
# This test provides only a single scalar distribution.
# For this test to pass it must raise an appropriate exception.
transition_matrix = tf.constant([0.6, 0.4])
observation_locs = tf.constant(0.0)
observation_scale = tf.constant(0.5)
num_steps = 4
with self.assertRaisesRegexp(Exception, 'can\'t have scalar batches'):
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True)
self.evaluate(model.mean())
def test_variable_non_scalar_transition_batch(self):
initial_prob = tf.constant([0.6, 0.4])
# The HMM class expect a `Categorical` distribution for each state.
# This test provides only a single scalar distribution.
# For this test to pass it must raise an appropriate exception.
transition_matrix_data = tf.constant([0.6, 0.4])
transition_matrix = tf.Variable(transition_matrix_data)
self.evaluate(transition_matrix.initializer)
observation_locs = tf.constant([0.0, 1.0])
observation_scale = tf.constant([0.5, 0.5])
num_steps = 4
with self.assertRaisesRegexp(Exception, 'can\'t have scalar batches'):
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(loc=observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True)
self.evaluate(model.mean())
def test_modified_variable_non_scalar_transition_batch(self):
initial_prob = tf.constant([0.6, 0.4])
transition_matrix_data = tf.constant([[0.6, 0.4], [0.5, 0.5]])
transition_matrix = tf.Variable(
transition_matrix_data,
shape=tf.TensorShape(None))
transition_matrix_data2 = tf.constant([0.6, 0.4])
self.evaluate(transition_matrix.initializer)
observation_locs = tf.constant([0.0, 1.0])
observation_scale = tf.constant([0.5, 0.5])
num_steps = 4
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(observation_locs, scale=observation_scale),
num_steps=num_steps,
validate_args=True)
with self.assertRaisesRegexp(
Exception,
'have scalar batches'):
with tf.control_dependencies([
transition_matrix.assign(transition_matrix_data2)]):
self.evaluate(model.mean())
def test_github_issue_854(self):
nstates = 3
data = np.random.randint(low=0, high=10, size=(5, 7, 11))
p_init = tfd.Categorical(probs=np.float32(np.ones(nstates) / nstates))
pswitch = 0.05
pt = pswitch / (nstates - 1) * np.ones([nstates, nstates])
np.fill_diagonal(pt, 1 - pswitch)
p_trans = tfd.Categorical(probs=np.float32(pt))
# prior on NB probability
p_nb = self.evaluate(tfd.Beta(2, 5).sample([nstates, data.shape[-1]],
seed=test_util.test_seed()))
p_emission = tfd.Independent(tfd.NegativeBinomial(1, probs=p_nb),
reinterpreted_batch_ndims=1)
hmm = tfd.HiddenMarkovModel(
initial_distribution=p_init,
transition_distribution=p_trans,
observation_distribution=p_emission,
num_steps=data.shape[-2])
self.assertAllEqual(data.shape[-2:],
tf.shape(hmm.sample(seed=test_util.test_seed())))
self.assertAllEqual(data.shape[:1],
tf.shape(hmm.log_prob(data)))
def test_time_varying_transition_batch_size(self):
initial_prob = tf.constant([0.6, 0.4])
transition_matrix = tf.constant(7 * [[[0.6, 0.4], [0.6, 0.4]]])
observation_locs = tf.constant([0.0, 1.0])
observation_scale = tf.constant(0.5)
num_steps = 5
with self.assertRaisesRegexp(Exception, 'matches num_steps - 1.'):
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(observation_locs, scale=observation_scale),
num_steps=num_steps,
time_varying_transition_distribution=True,
validate_args=True)
self.evaluate(model.mean())
def test_time_varying_observation_batch_size(self):
initial_prob = tf.constant([0.6, 0.4])
transition_matrix = tf.constant([[0.6, 0.4], [0.6, 0.4]])
observation_locs = tf.constant(7 * [[0.0, 1.0]])
observation_scale = tf.constant(0.5)
num_steps = 6
with self.assertRaisesRegexp(Exception, 'matches num_steps.'):
model = tfd.HiddenMarkovModel(
tfd.Categorical(probs=initial_prob),
tfd.Categorical(probs=transition_matrix),
tfd.Normal(observation_locs, scale=observation_scale),
num_steps=num_steps,
time_varying_observation_distribution=True,
validate_args=True)
self.evaluate(model.mean())
class HiddenMarkovModelJaxTest(test_util.TestCase):
def test_jit(self):
if not JAX_MODE:
self.skipTest('JAX-only test')
import jax # pylint: disable=g-import-not-at-top
@jax.jit
def test(data):
p_c = tf.constant([0.1, 0.2])
p_e = tf.constant([0.2, 0.3])
one = tf.ones_like(p_c)
zero = tf.zeros_like(p_c)
dist = tfd.HiddenMarkovModel(
initial_distribution=tfd.Bernoulli(probs=0.),
transition_distribution=tfd.Bernoulli(probs=tf.stack([p_c, one], -1)),
observation_distribution=tfd.Bernoulli(
probs=tf.stack([p_e, zero], -1)),
num_steps=data.shape[-1])
lp = dist.log_prob(data)
pom = dist.posterior_mode(data)
s = dist.sample(seed=jax.random.PRNGKey(0))
prm = dist.prior_marginals()
pom2 = dist.posterior_marginals(data)
return lp, pom, s, prm, pom2
data = tf.ones(5)
test(data)
if __name__ == '__main__':
test_util.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
8766db5f17f73ece19a8e050eb0f6c2da93a0634 | 02d8a026d63127f045042e03e23acbe6c9675db8 | /vb2py/test/testcollection.py | 68f0506871cd8ec816e607dfee324b6b6168fe80 | [
"BSD-3-Clause"
] | permissive | VB6Hobbyst7/xl_vb2py | 40e77976b452732575e2726fb1f0675b1ab9f86f | 899fec0301140fd8bd313e8c80b3fa839b3f5ee4 | refs/heads/main | 2023-07-28T20:12:11.933183 | 2021-09-23T18:12:02 | 2021-09-23T18:12:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,618 | py | from vb2py.vbclasses import Collection
import unittest
class TestCollection(unittest.TestCase):
def setUp(self):
"""Set up the test"""
self.c = Collection()
# << Collection tests >> (1 of 9)
def testAddNumeric(self):
"""testAddNumeric: should be able to add with numeric indexes"""
for i in range(10):
self.c.Add(i)
for expect, actual in zip(list(range(10)), self.c):
self.assertEqual(expect, actual)
self.assertEqual(self.c.Count(), 10)
# << Collection tests >> (2 of 9)
def testAddBeforeNumeric(self):
"""testAddBeforeNumeric: should be able to add something before something else"""
# Put 1 ... 9 in with 5 missing
for i in range(1, 10):
if i != 5:
self.c.Add(i)
self.c.Add(5, Before=5) # ie before the index 5
for expect, actual in zip(list(range(1, 10)), self.c):
self.assertEqual(expect, actual)
self.assertEqual(self.c.Count(), 9)
# << Collection tests >> (3 of 9)
def testAddAfterNumeric(self):
"""testAddAfterNumeric: should be able to add something after something else"""
# Put 1 ... 9 in with 5 missing
for i in range(1, 10):
if i != 5:
self.c.Add(i)
self.c.Add(5, After=4)
for expect, actual in zip(list(range(1, 10)), self.c):
self.assertEqual(expect, actual)
self.assertEqual(self.c.Count(), 9)
# << Collection tests >> (4 of 9)
def testAddText(self):
"""testAddText: should be able to add with text indexes"""
for i in range(10):
self.c.Add(i, "txt%d" % i)
for expect, actual in zip(list(range(10)), self.c):
self.assertEqual(expect, actual)
self.assertEqual(self.c.Count(), 10)
# << Collection tests >> (5 of 9)
def testAddTextandNumeric(self):
"""testAddTextandNumeric: should be able to add with text and numeric indexes"""
for i in range(10):
self.c.Add(i, "txt%d" % i)
self.c.Add(i)
for i in range(10):
self.assertEqual(self.c.Item("txt%d" % i), i)
self.assertEqual(self.c.Item(i*2+2), i)
self.assertEqual(self.c.Count(), 20)
# << Collection tests >> (6 of 9)
def testItemNumeric(self):
"""testItemNumeric: should be able to get with numeric indexes"""
for i in range(10):
self.c.Add(i)
for i in range(10):
self.assertEqual(i, self.c.Item(i+1))
# << Collection tests >> (7 of 9)
def testItemText(self):
"""testItemText: should be able to get with text indexes"""
for i in range(10):
self.c.Add(i, "txt%d" % i)
for i in range(10):
self.assertEqual(i, self.c.Item("txt%d" % i))
# << Collection tests >> (8 of 9)
def testRemoveNumeric(self):
"""testRemoveNumeric: should be able to remove with numeric indexes"""
for i in range(10):
self.c.Add(i+1)
self.c.Remove(5)
self.assertEqual(self.c.Count(), 9)
for i in self.c:
self.assertNotEqual(i, 5)
# << Collection tests >> (9 of 9)
def testRemoveText(self):
"""testRemoveText: should be able to remove with text indexes"""
for i in range(10):
self.c.Add(i, "txt%d" % i)
self.c.Remove("txt%d" % 5)
self.assertEqual(self.c.Count(), 9)
for i in self.c:
self.assertNotEqual(i, 5)
# -- end -- << Collection tests >>
if __name__ == "__main__":
unittest.main()
| [
"c.git@pronovost.net"
] | c.git@pronovost.net |
4c9d7417da9957ec7e079ccb9a71bfe48c768581 | 956bfd26912c1df979907f3849be576cafc31819 | /0_data_gen/analyze_campaign.py | 2196e004ecf07488583b55d95106e3d0a488ae89 | [] | no_license | muskaankularia/Fair-recommendation-system- | cb1868441a5840c90ec3fc831675ed4350498537 | 7bbc04e43201300a01b9d549677250ceb4a946ab | refs/heads/master | 2020-05-16T19:12:40.758644 | 2019-05-28T07:16:51 | 2019-05-28T07:16:51 | 183,251,681 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,029 | py | # import numpy as np
# import pandas
# from collections import Counter
# if __name__ == '__main__':
# data = pandas.read_csv('merge_11col.csv', low_memory=False).as_matrix()
# theme_list = ['others', 'Health', 'SHG', 'Education', 'Curated Local Updates', 'Social Welfare', 'Social Entertainment', 'Livelihoods', 'Agriculture', 'PFI', 'CF', 'Social Issues', 'MDD', 'Sanitation', 'KDKK', 'VHSND', 'ORS and diarrhea management', 'KITCHEN GARDENING', 'Social Entitlements', 'Family Planning']
# ## translator to remove ""
# tr = str.maketrans("", "", "\"")
# themed = {}
# camp_item = {}
# camp_user = {}
# for i in range(data.shape[0]):
# curr_theme = data[i, 6]
# curr_camp = data[i, 10]
# curr_item = data[i, 1]
# curr_user = data[i, 0]
# if type(curr_camp) != str:
# continue
# if curr_theme not in themed:
# themed[curr_theme] = Counter()
# # print(curr_camp)
# # print(data[i, :])
# clist = curr_camp.split(",")
# for camp in clist:
# camp = camp.translate(tr)
# camp = str(curr_theme) + camp
# if camp != "NA":
# themed[curr_theme][camp] += 1
# if camp not in camp_item:
# camp_item[camp] = Counter()
# camp_item[camp][curr_item] += 1
# if camp not in camp_user:
# camp_user[camp] = Counter()
# camp_user[camp][curr_user] += 1
# for theme in themed:
# print(theme_list[theme]+" "+ str(len(themed[theme])))
# print("\n\n\n\n")
# for theme in themed:
# print(theme_list[theme]+" "+str(len(themed[theme])))
# item_sum = 0
# user_sum = 0
# for camp in themed[theme]:
# if theme>9:
# print(camp[2:]+" "+str(len(camp_item[camp]))+" "+str(len(camp_user[camp])))
# else:
# print(camp[1:]+" "+str(len(camp_item[camp]))+" "+str(len(camp_user[camp])))
# # item_sum += len(camp_item[camp])
# # user_sum += len(camp_user[camp])
# # print(theme+" "+str(item_sum)+" "+str(user_sum))
# print("----------------------------------------")
# # for camp in camp_item:
# # print(camp+": "+str(len(camp_item[camp]))+" "+camp_user[camp])
## new_11col
import numpy as np
import pandas
from collections import Counter
if __name__ == '__main__':
data = pandas.read_csv('../data/new_11col.csv', low_memory=False).as_matrix()
theme_list = ['others', 'Health', 'SHG', 'Education', 'Curated Local Updates', 'Social Welfare', 'Social Entertainment', 'Livelihoods', 'Agriculture', 'PFI', 'CF', 'Social Issues', 'MDD', 'Sanitation', 'KDKK', 'VHSND', 'ORS and diarrhea management', 'KITCHEN GARDENING', 'Social Entitlements', 'Family Planning']
## translator to remove ""
tr = str.maketrans("", "", "\"")
themed = {}
camp_item = {}
camp_user = {}
for i in range(data.shape[0]):
curr_theme = data[i, 6]
curr_camp = data[i, 10]
curr_item = data[i, 1]
curr_user = data[i, 0]
if type(curr_camp) != str:
continue
if curr_theme not in themed:
themed[curr_theme] = Counter()
# print(curr_camp)
# print(data[i, :])
clist = curr_camp.split(",")
for camp in clist:
camp = camp.translate(tr)
# if curr_theme == "MDD" and camp.startswith("In-depth knowledge on age-appropriate"):
# print(data[i, :])
# exit()
camp = str(curr_theme) + "_"+camp
if camp != "NA":
themed[curr_theme][camp] += 1
if camp not in camp_item:
camp_item[camp] = Counter()
camp_item[camp][curr_item] += 1
if camp not in camp_user:
camp_user[camp] = Counter()
camp_user[camp][curr_user] += 1
for theme in themed:
print(theme+" "+ str(len(themed[theme])))
print("\n\n\n\n")
for theme in themed:
print(theme+" "+str(len(themed[theme])))
item_sum = 0
user_sum = 0
for camp in themed[theme]:
print(camp+" "+str(len(camp_item[camp]))+" "+str(len(camp_user[camp])))
# item_sum += len(camp_item[camp])
# user_sum += len(camp_user[camp])
# print(theme+" "+str(item_sum)+" "+str(user_sum))
print("----------------------------------------")
# for camp in camp_item:
# print(camp+": "+str(len(camp_item[camp]))+" "+camp_user[camp])
| [
"noreply@github.com"
] | noreply@github.com |
ce74e3ac93652f4b96e347842bc4da8db56e9bb4 | dcce41221d1661a5b5406d6d94863838bf792100 | /pointnet2/utils/pointnet2_modules.py | 459436cd5f49b868aea24e42f3413a53b64eebf9 | [] | no_license | whuhxb/PointMixup | 69514a1c385667a2162ec7a75986f76eb9d2530c | 4752b16407a9781795b1ad4600746e7eeb5db05f | refs/heads/master | 2023-01-24T12:54:42.694531 | 2020-12-05T21:36:16 | 2020-12-05T21:36:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,383 | py | from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
import torch.nn as nn
import torch.nn.functional as F
import pointnet2.train.etw_pytorch_utils as pt_utils
from pointnet2.utils import pointnet2_utils
if False:
# Workaround for type hints without depending on the `typing` module
from typing import *
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super(_PointnetSAModuleBase, self).__init__()
self.npoint = None
self.groupers = None
self.mlps = None
def forward(self, xyz, features=None):
# type: (_PointnetSAModuleBase, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, N, C) tensor of the descriptors of the the features
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous() # torch.Size([16, 3, 1024])
new_xyz = (
pointnet2_utils.gather_operation(
xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)
)
.transpose(1, 2)
.contiguous()
if self.npoint is not None
else None
)
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
r"""Pointnet set abstrction layer with multiscale grouping
Parameters
----------
npoint : int
Number of features
radii : list of float32
list of radii to group with
nsamples : list of int32
Number of samples in each ball query
mlps : list of list of int32
Spec of the pointnet before the global max_pool for each scale
bn : bool
Use batchnorm
"""
def __init__(self, npoint, radii, nsamples, mlps, bn=True, use_xyz=True):
# type: (PointnetSAModuleMSG, int, List[float], List[int], List[List[int]], bool, bool) -> None
super(PointnetSAModuleMSG, self).__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
if npoint is not None
else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
class PointnetSAModule(PointnetSAModuleMSG):
r"""Pointnet set abstrction layer
Parameters
----------
npoint : int
Number of features
radius : float
Radius of ball
nsample : int
Number of samples in the ball query
mlp : list
Spec of the pointnet before the global max_pool
bn : bool
Use batchnorm
"""
def __init__(
self, mlp, npoint=None, radius=None, nsample=None, bn=True, use_xyz=True
):
# type: (PointnetSAModule, List[int], int, float, int, bool, bool) -> None
super(PointnetSAModule, self).__init__(
mlps=[mlp],
npoint=npoint,
radii=[radius],
nsamples=[nsample],
bn=bn,
use_xyz=use_xyz,
)
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another
Parameters
----------
mlp : list
Pointnet module parameters
bn : bool
Use batchnorm
"""
def __init__(self, mlp, bn=True):
# type: (PointnetFPModule, List[int], bool) -> None
super(PointnetFPModule, self).__init__()
self.mlp = pt_utils.SharedMLP(mlp, bn=bn)
def forward(self, unknown, known, unknow_feats, known_feats):
# type: (PointnetFPModule, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of the xyz positions of the unknown features
known : torch.Tensor
(B, m, 3) tensor of the xyz positions of the known features
unknow_feats : torch.Tensor
(B, C1, n) tensor of the features to be propigated to
known_feats : torch.Tensor
(B, C2, m) tensor of features to be propigated
Returns
-------
new_features : torch.Tensor
(B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(
known_feats, idx, weight
)
else:
interpolated_feats = known_feats.expand(
*(known_feats.size()[0:2] + [unknown.size(1)])
)
if unknow_feats is not None:
new_features = torch.cat(
[interpolated_feats, unknow_feats], dim=1
) # (B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
if __name__ == "__main__":
from torch.autograd import Variable
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
xyz = Variable(torch.randn(2, 9, 3).cuda(), requires_grad=True)
xyz_feats = Variable(torch.randn(2, 9, 6).cuda(), requires_grad=True)
test_module = PointnetSAModuleMSG(
npoint=2, radii=[5.0, 10.0], nsamples=[6, 3], mlps=[[9, 3], [9, 6]]
)
test_module.cuda()
print(test_module(xyz, xyz_feats))
# test_module = PointnetFPModule(mlp=[6, 6])
# test_module.cuda()
# from torch.autograd import gradcheck
# inputs = (xyz, xyz, None, xyz_feats)
# test = gradcheck(test_module, inputs, eps=1e-6, atol=1e-4)
# print(test)
for _ in range(1):
_, new_features = test_module(xyz, xyz_feats)
new_features.backward(torch.cuda.FloatTensor(*new_features.size()).fill_(1))
print(new_features)
print(xyz.grad)
| [
"rex_cyl@hotmail.com"
] | rex_cyl@hotmail.com |
a270f0bfe251afd4d77425392a011b26bcdbb29d | 39a41840311643f5a576bb3f51a37fb744dc4bed | /jaiku/admin.py | c7b31205df7ad7869bbb5effc68880843fc9f19a | [] | no_license | Tattoo/lifestream | 79a1180911c262a5b56960b611128b7a57e1811b | 3be88bc994ed41312085f2b8d7060fc9e3a1a1f4 | refs/heads/master | 2021-01-23T15:53:31.538380 | 2011-07-11T14:08:59 | 2011-07-11T14:08:59 | 2,030,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | from django.contrib import admin
from lifestream.jaiku.models import JaikuEntry
class AuthorAdmin(admin.ModelAdmin):
pass
admin.site.register(JaikuEntry, AuthorAdmin) | [
"tatu.kairi@gmail.com"
] | tatu.kairi@gmail.com |
6218f48765dc1861e53e853bb3e08eae1296574c | 87330e22ad670a227fa0aa140d96d099817e9177 | /screen.py | 8441e14279520b57c46bb924d8fe95ac67fa38ac | [] | no_license | aakashthapa060/Table-Tennis- | 4e7c8f87f7e6f801279fbf4cf7dfff281f914548 | 1684e6eddd202ea78c56fa6205642c962d7e925e | refs/heads/main | 2023-04-13T21:52:46.169030 | 2021-04-22T12:07:26 | 2021-04-22T12:07:26 | 360,499,485 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | import pygame
class Screen:
def __init__(self, width, height):
self.width = width
self.height = height
def screen_display(self):
return pygame.display.set_mode((self.width,self.height)) | [
"noreply@github.com"
] | noreply@github.com |
312b52cb1b4319add74ab61694c18b56da2451a1 | f80ef3a3cf859b13e8af8433af549b6b1043bf6e | /pyobjc-framework-Cocoa/PyObjCTest/test_nscolorsampler.py | 1b666e6faca75d95e31360218b6b1b293f053d99 | [
"MIT"
] | permissive | ronaldoussoren/pyobjc | 29dc9ca0af838a56105a9ddd62fb38ec415f0b86 | 77b98382e52818690449111cd2e23cd469b53cf5 | refs/heads/master | 2023-09-01T05:15:21.814504 | 2023-06-13T20:00:17 | 2023-06-13T20:00:17 | 243,933,900 | 439 | 49 | null | 2023-06-25T02:49:07 | 2020-02-29T08:43:12 | Python | UTF-8 | Python | false | false | 290 | py | import AppKit
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestNSColorSampler(TestCase):
@min_os_level("10.15")
def test_methods_10_15(self):
self.assertArgIsBlock(
AppKit.NSColorSampler.showSamplerWithSelectionHandler_, 0, b"v@"
)
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
9cb8c99b0fc2cbe805cbf28784627800a3e630bf | 322cc47eb3b7e596a7b136ea44a82f19b140fff9 | /src/notifications/migrations/0002_auto_20151011_2113.py | 30bf3a17a56fbda56aa0f5326b9e0e464cbcdc41 | [] | no_license | summerbt/srvup_dance | e3b753e809858d132a2ed5279098692f08660375 | 9fff98ea1eb09d0141256197d3a7ad207e98aea4 | refs/heads/master | 2021-01-10T03:30:26.232837 | 2015-11-08T22:09:54 | 2015-11-08T22:09:54 | 45,801,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,754 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
('notifications', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='notification',
name='action_content_type',
field=models.ForeignKey(related_name='notify_acting_object', blank=True, to='contenttypes.ContentType', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='notification',
name='action_object_id',
field=models.PositiveIntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='notification',
name='sender_content_type',
field=models.ForeignKey(related_name='notify_sending_object', default=0, to='contenttypes.ContentType'),
preserve_default=False,
),
migrations.AddField(
model_name='notification',
name='sender_object_id',
field=models.PositiveIntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='notification',
name='target_content_type',
field=models.ForeignKey(related_name='notify_targeted_object', blank=True, to='contenttypes.ContentType', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='notification',
name='target_object_id',
field=models.PositiveIntegerField(default=0),
preserve_default=False,
),
]
| [
"summerbartontaylor@gmail.com"
] | summerbartontaylor@gmail.com |
655a961672c361b1ced6b82454352d0a7220e958 | 27cb0aab1d6525b9689853f99af9f4561d47d38e | /OCR/main.py | 860fef99daefd70aa7244212bdaa89582f96085e | [] | no_license | parthjain99/Pedagogy-System-for-Assessing-Students-Performance | 85605e452f571623e8b097206e2ea58253439e5a | 4a2e78474806f01ced8cb664190bbd5ffd725d8b | refs/heads/main | 2023-08-02T02:01:49.038508 | 2021-10-05T21:46:13 | 2021-10-05T21:46:13 | 413,986,789 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,194 | py | import cv2
from time import sleep
import requests
import io
import json
import os
import random
key = cv2.waitKey(1)
webcam = cv2.VideoCapture(0)
sleep(2)
print("For Recognize Image PRESS 'S'\n"
"For QUIT PRESS 'Q\n"
"After run time if 'images.jpg' is still visible,Please re-run the program.\n")
while True:
try:
check, frame = webcam.read()
print(check) # prints true as long as the webcam is running
print(frame) # prints matrix values of each framecd
cv2.imshow("Capturing", frame)
key = cv2.waitKey(1)
if key == ord('s'):
cv2.imwrite(filename='images.jpg', img=frame)
r = random.randint(1, 20000000)
img_file = 'images' + str(r) + '.jpg'
cv2.imwrite(filename='data/' + img_file, img=frame)
webcam.release()
print("Processing image...")
img_ = cv2.imread('images.jpg', cv2.IMREAD_ANYCOLOR)
print("Image saved!")
cv2.destroyAllWindows()
break
elif key == ord('q'):
webcam.release()
cv2.destroyAllWindows()
break
except KeyboardInterrupt:
print("Turning off camera.")
webcam.release()
print("Camera off.")
print("Program ended.")
cv2.destroyAllWindows()
break
sleep(2)
resim = "images.jpg"
img = cv2.imread(resim)
print("Picture is Detected")
api = img
# Ocr
url_api = "https://api.ocr.space/parse/image"
_, compressedimage = cv2.imencode(".jpg", api, [1, 90])
file_bytes = io.BytesIO(compressedimage)
result = requests.post(url_api,
files={resim: file_bytes},
data={"apikey": "helloworld",
"language": "eng"})
result = result.content.decode()
print(result)
result = json.loads(result)
parsed_results = result.get("ParsedResults")[0]
text_detected = parsed_results.get("ParsedText")
print(text_detected)
print("Text is writing to file...")
f = open("text_detected.txt", "a+")
f.write(text_detected)
f.close()
print("Operation is successful")
cv2.imshow("roi", api)
cv2.imshow("Img", img)
cv2.waitKey(0)
os.remove(resim)
| [
"pchamp22.pj@gmail.com"
] | pchamp22.pj@gmail.com |
35d9d78251c7c409384d361152329bb9a09e2a50 | 52cef818e83ea70957411940bc9f562500e11bf0 | /assignments/shell/cmd_pkg/tail.py | 18712921c668dfdf9f930c7ec03366e81d9bd664 | [] | no_license | shailamogalapu/5143-201-OpSys-Mengaraboina | 27b524cb8b03ba6dcda066f543b9e8e000bc3309 | fd5df8805536ac9c199cc9674b7bc34c124f3b0e | refs/heads/master | 2021-01-09T05:27:01.835203 | 2017-02-21T20:30:37 | 2017-02-21T20:30:37 | 82,729,220 | 0 | 0 | null | 2017-02-21T21:31:18 | 2017-02-21T21:31:18 | null | UTF-8 | Python | false | false | 232 | py | def tail(filename):
if os.path.exists(filename):
nlines=-10
lines=open(filename,'r').readlines()
tot_lines = len(lines)
for i in range(nlines,0):
print lines[i]
else:
print("file doesnt exists") | [
"lavanyamengaraboina@gmail.com"
] | lavanyamengaraboina@gmail.com |
08cf0f0f502f7707e27dbb1c434bf4865d542d55 | 81a0995bd9278e646cfe8e1765744a3497c2c025 | /backend/database/migrations/0002_auto_20210512_1720.py | 071b336f199dca93c9167506e09728657e25dbb4 | [] | no_license | wawel37/crypto-app | 475c1bda37660a7aaa1f16a1661fa95ace66be9c | 094fb1990731ffaf15421ff98702b7ca20c4e04b | refs/heads/main | 2023-06-02T06:36:36.455985 | 2021-06-14T10:21:33 | 2021-06-14T10:21:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | # Generated by Django 3.2 on 2021-05-12 17:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('database', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='dollars',
field=models.FloatField(default=0),
),
migrations.CreateModel(
name='Wallet',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cryptoID', models.CharField(max_length=40)),
('amount', models.FloatField(default=0)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='database.user')),
],
),
]
| [
"pstecyk@student.agh.edu.pl"
] | pstecyk@student.agh.edu.pl |
9493df6cb5e308abd83f10492fb5f33eeba389ea | 8ee6b60ec93f858e90d1db2728a5b827c91be93f | /config/settings/local.py | 913d3ec7bae80650038f4175c736e6840768e8f8 | [
"MIT"
] | permissive | TrellixVulnTeam/auto-review_28F3 | df9708c15156140543d65542450a47c127bdb801 | 64d4c1e40a4c415e14680ed333be818ce7d320a8 | refs/heads/master | 2023-03-19T11:18:25.261191 | 2017-10-27T01:09:47 | 2017-10-27T01:09:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | from .base import *
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_$l*dwz%)f7nv)6behbdfd*wh$x^g&5(=&@2o=%%fiykcqd-ne'
DEBUG = env.bool('DJANGO_DEBUG', default=True) | [
"breakfastalways@gmail.com"
] | breakfastalways@gmail.com |
856e1b9f8b32d71adc877918ddfca7855f87d8e9 | 47f7925d2a2e74fa2fa54308bfd465f39cf5164f | /home/migrations/0002_danisman.py | 0ec2ad19e23124560021aa4bbdb3ec90111032fb | [] | no_license | asitaslan/Homlook | c13b937e85d3300f8732603b92c0c93939e39140 | c1a0ee937dee27c90e09dc3c5f1329b6ae53bccf | refs/heads/master | 2022-11-06T22:17:00.222347 | 2020-07-12T09:20:50 | 2020-07-12T09:20:50 | 279,030,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | # Generated by Django 3.0.4 on 2020-07-04 08:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='danisman',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('isim', models.CharField(max_length=30)),
('soyisim', models.CharField(max_length=20)),
('telefon', models.CharField(max_length=15)),
('email', models.CharField(max_length=200)),
('image', models.ImageField(blank=True, upload_to='images/')),
],
),
]
| [
"asitaslan95@gmail.com"
] | asitaslan95@gmail.com |
bfc74cf81837266fab6836d5440c75ebfa920a50 | 28eb3c2c5f63edc9da9ff6e0b2c6decda076278f | /2515/project2/q1.py | ac9011f507578de9b61e1811eb25dd1b742f2e32 | [] | no_license | ljh6993/CSC2515_Intro-to-Machine-learning | 3ff2a2de9b2bc9b1f2558c87cd06a4046b1aac94 | 3845481d4e26490047e53a05bb051144a09e4468 | refs/heads/master | 2021-03-31T14:22:23.721095 | 2020-03-18T01:47:25 | 2020-03-18T01:47:25 | 248,112,098 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,485 | py | import numpy as np
from sklearn.datasets import load_boston
def Huber_loss(a, delta):
# a=y-t
H = np.where(abs(a) <= delta, 1 / 2 * a ** 2, delta * (abs(a) - delta / 2))
dH = np.where(abs(a) <= delta, a, np.where(a > delta, delta, -delta))
return H, dH
def gradient_descent(rate, X, y, delta, iteration_time):
# y target
# X design matrix
# w b initial to be zero
w = np.zeros((1, X.shape[1])) # w is a row =1 column =size_x
b = np.zeros((X.shape[0], 1))
# parameter for huber loss, to get a robust regression
# loop for gradient descent
# y is target, set the fx as the current prediction value
# fx=np.dot(X,w.T)+b
# while abs(fx-y).all>0.01:
for i in range(iteration_time):
fx = np.dot(X, w.T) + b
a = fx - y # column =1
# print(a)
H, dldw = Huber_loss(a, delta)
#print(w)
print("the interation time", i, " the value is ", np.sum(H))
# from loss function:
# dl/dw dl/b
# we need the cost funciton derivative
dlw = np.dot(X.T, dldw) / X.shape[0] # row =1
w -= (dlw * rate).T
b -= dldw * rate / X.shape[0]
def main():
learning_rate = 0.1
delta = 1
iteration_time = 20000
# test 1
m = 13 # each sample has m size features
N = 506 # 506 samples
x = np.random.rand(N, m)
y = np.zeros((N, 1))
y.fill(1)
gradient_descent(learning_rate, x, y, delta, iteration_time)
main()
| [
"ljh69993@gmail.com"
] | ljh69993@gmail.com |
36cc0b54b41fcc7f8fe680953ecdd8685005c0bc | 6a746abb4dd3f2e0538936f272ed5d051a120c5b | /message_ix_models/model/build.py | f92ab09a1bd72b04109c697d15c7faf224e8c6b0 | [
"Apache-2.0"
] | permissive | OFR-IIASA/message-ix-models | d902d26c10db8215a856032d09f4252e16500c99 | 7459065505f8f3a418086aa620b789b5c5f39cde | refs/heads/main | 2023-06-15T00:16:56.654237 | 2021-07-02T09:33:49 | 2021-07-02T09:33:49 | 380,197,167 | 0 | 0 | Apache-2.0 | 2021-06-25T10:01:47 | 2021-06-25T10:01:47 | null | UTF-8 | Python | false | false | 4,795 | py | import logging
from typing import Callable, Dict, Mapping
import pandas as pd
from ixmp.utils import maybe_check_out, maybe_commit
from message_ix import Scenario
from sdmx.model import Code
from message_ix_models.util import add_par_data, strip_par_data
from message_ix_models.util.scenarioinfo import ScenarioInfo
log = logging.getLogger(__name__)
def apply_spec(
scenario: Scenario,
spec: Mapping[str, ScenarioInfo],
data: Callable = None,
**options,
):
"""Apply `spec` to `scenario`.
Parameters
----------
spec
A 'specification': :class:`dict` with 'require', 'remove', and 'add' keys and
:class:`.ScenarioInfo` objects as values.
data : callable, optional
Function to add data to `scenario`. `data` can either manipulate the scenario
directly, or return a :class:`dict` compatible with :func:`.add_par_data`.
Other parameters
----------------
dry_run : bool
Don't modify `scenario`; only show what would be done. Default :obj:`False`.
Exceptions will still be raised if the elements from ``spec['required']`` are
missing; this serves as a check that the scenario has the required features for
applying the spec.
fast : bool
Do not remove existing parameter data; increases speed on large scenarios.
quiet : bool
Only show log messages at level ``ERROR`` and higher. If :obj:`False` (default),
show log messages at level ``DEBUG`` and higher.
message : str
Commit message.
See also
--------
.add_par_data
.strip_par_data
.Code
.ScenarioInfo
"""
dry_run = options.get("dry_run", False)
log.setLevel(logging.ERROR if options.get("quiet", False) else logging.DEBUG)
if not dry_run:
try:
scenario.remove_solution()
except ValueError:
pass
maybe_check_out(scenario)
dump: Dict[str, pd.DataFrame] = {} # Removed data
for set_name in scenario.set_list():
# Check whether this set is mentioned at all in the spec
if 0 == sum(map(lambda info: len(info.set[set_name]), spec.values())):
# Not mentioned; don't do anything
continue
log.info(f"Set {repr(set_name)}")
# Base contents of the set
base_set = scenario.set(set_name)
# Unpack a multi-dimensional/indexed set to a list of tuples
base = (
list(base_set.itertuples(index=False))
if isinstance(base_set, pd.DataFrame)
else base_set.tolist()
)
log.info(f" {len(base)} elements")
# log.debug(', '.join(map(repr, base))) # All elements; verbose
# Check for required elements
require = spec["require"].set[set_name]
log.info(f" Check {len(require)} required elements")
# Raise an exception about the first missing element
missing = list(filter(lambda e: e not in base, require))
if len(missing):
log.error(f" {len(missing)} elements not found: {repr(missing)}")
raise ValueError
# Remove elements and associated parameter values
remove = spec["remove"].set[set_name]
for element in remove:
msg = f"{repr(element)} and associated parameter elements"
if options.get("fast", False):
log.info(f" Skip removing {msg} (fast=True)")
continue
log.info(f" Remove {msg}")
strip_par_data(scenario, set_name, element, dry_run=dry_run, dump=dump)
# Add elements
add = [] if dry_run else spec["add"].set[set_name]
for element in add:
scenario.add_set(
set_name,
element.id if isinstance(element, Code) else element,
)
if len(add):
log.info(f" Add {len(add)} element(s)")
log.debug(" " + ", ".join(map(repr, add)))
log.info(" ---")
N_removed = sum(len(d) for d in dump.values())
log.info(f"{N_removed} parameter elements removed")
# Add units to the Platform before adding data
for unit in spec["add"].set["unit"]:
unit = unit if isinstance(unit, Code) else Code(id=unit, name=unit)
log.info(f"Add unit {repr(unit)}")
scenario.platform.add_unit(unit.id, comment=str(unit.name))
# Add data
if callable(data):
result = data(scenario, dry_run=dry_run)
if result:
# `data` function returned some data; use add_par_data()
add_par_data(scenario, result, dry_run=dry_run)
# Finalize
log.info("Commit results.")
maybe_commit(
scenario,
condition=not dry_run,
message=options.get("message", f"{__name__}.apply_spec()"),
)
| [
"mail@paul.kishimoto.name"
] | mail@paul.kishimoto.name |
5d4d2ed476aea05494ec90081e7dd8d67f9f8cb0 | 602ea2edb853c5561a45b6aa2783ac894ef408e4 | /res_mlp_pytorch/res_mlp_pytorch.py | 1ac60d23a7068840b29608111b116cc789825440 | [
"MIT"
] | permissive | BadGuy-wang/res-mlp-pytorch | 427d6f1f2279dcfe59d7cee02befb26a0a4dad79 | 562814a406cc418bdb4710aa3bdc569206ac171b | refs/heads/main | 2023-05-05T13:22:46.575901 | 2021-06-03T22:30:40 | 2021-06-03T22:30:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,989 | py | import torch
from torch import nn, einsum
from einops.layers.torch import Rearrange, Reduce
# helpers
def pair(val):
return (val, val) if not isinstance(val, tuple) else val
# classes
class Affine(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(1, 1, dim))
self.b = nn.Parameter(torch.zeros(1, 1, dim))
def forward(self, x):
return x * self.g + self.b
class PreAffinePostLayerScale(nn.Module): # https://arxiv.org/abs/2103.17239
def __init__(self, dim, depth, fn):
super().__init__()
if depth <= 18:
init_eps = 0.1
elif depth > 18 and depth <= 24:
init_eps = 1e-5
else:
init_eps = 1e-6
scale = torch.zeros(1, 1, dim).fill_(init_eps)
self.scale = nn.Parameter(scale)
self.affine = Affine(dim)
self.fn = fn
def forward(self, x):
return self.fn(self.affine(x)) * self.scale + x
def ResMLP(*, image_size, patch_size, dim, depth, num_classes, expansion_factor = 4):
image_height, image_width = pair(image_size)
assert (image_height % patch_size) == 0 and (image_width % patch_size) == 0, 'image height and width must be divisible by patch size'
num_patches = (image_height // patch_size) * (image_width // patch_size)
wrapper = lambda i, fn: PreAffinePostLayerScale(dim, i + 1, fn)
return nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),
nn.Linear((patch_size ** 2) * 3, dim),
*[nn.Sequential(
wrapper(i, nn.Conv1d(num_patches, num_patches, 1)),
wrapper(i, nn.Sequential(
nn.Linear(dim, dim * expansion_factor),
nn.GELU(),
nn.Linear(dim * expansion_factor, dim)
))
) for i in range(depth)],
Affine(dim),
Reduce('b n c -> b c', 'mean'),
nn.Linear(dim, num_classes)
)
| [
"lucidrains@gmail.com"
] | lucidrains@gmail.com |
f083e527a4b3f1412943ad2d21140a45ff657c27 | bc441bb06b8948288f110af63feda4e798f30225 | /monitor_sdk/model/monitor/alert_rule_pb2.py | 890d5bd10e21cba46074562e7086cd679fd504d1 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 23,813 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: alert_rule.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from monitor_sdk.model.monitor import alert_dims_pb2 as monitor__sdk_dot_model_dot_monitor_dot_alert__dims__pb2
from monitor_sdk.model.monitor import alert_conditions_pb2 as monitor__sdk_dot_model_dot_monitor_dot_alert__conditions__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='alert_rule.proto',
package='monitor',
syntax='proto3',
serialized_options=_b('ZAgo.easyops.local/contracts/protorepo-models/easyops/model/monitor'),
serialized_pb=_b('\n\x10\x61lert_rule.proto\x12\x07monitor\x1a*monitor_sdk/model/monitor/alert_dims.proto\x1a\x30monitor_sdk/model/monitor/alert_conditions.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xc2\x08\n\tAlertRule\x12\x0b\n\x03org\x18\x01 \x01(\x05\x12\n\n\x02id\x18\x02 \x01(\t\x12\x11\n\trule_name\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\x05\x12\x12\n\nversion_id\x18\x05 \x01(\t\x12&\n\nalert_dims\x18\x06 \x03(\x0b\x32\x12.monitor.AlertDims\x12\x15\n\rrule_priority\x18\x07 \x01(\x05\x12\x32\n\x10\x61lert_conditions\x18\x08 \x01(\x0b\x32\x18.monitor.AlertConditions\x12\x15\n\rdetect_window\x18\t \x01(\x05\x12\x13\n\x0b\x61lert_count\x18\n \x01(\x05\x12\x16\n\x0e\x61lert_interval\x18\x0b \x01(\x05\x12\x15\n\rrecover_count\x18\x0c \x01(\x05\x12+\n\x07\x61\x63tions\x18\r \x03(\x0b\x32\x1a.monitor.AlertRule.Actions\x12/\n\ttemplates\x18\x0e \x01(\x0b\x32\x1c.monitor.AlertRule.Templates\x12\x0f\n\x07\x63reator\x18\x0f \x01(\t\x12\r\n\x05\x63time\x18\x10 \x01(\x05\x12\r\n\x05mtime\x18\x11 \x01(\x05\x12/\n\tinstances\x18\x12 \x01(\x0b\x32\x1c.monitor.AlertRule.Instances\x12\x10\n\x08objectId\x18\x13 \x01(\t\x12\x10\n\x08\x64isabled\x18\x14 \x01(\x08\x12\x0e\n\x06source\x18\x15 \x01(\t\x1a\xe8\x02\n\x07\x41\x63tions\x12\x37\n\tcondition\x18\x01 \x01(\x0b\x32$.monitor.AlertRule.Actions.Condition\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0f\n\x07upgrade\x18\x03 \x01(\x08\x12\x0f\n\x07methods\x18\x04 \x03(\t\x12\x11\n\treceivers\x18\x05 \x03(\t\x12\x1c\n\x14receiver_user_groups\x18\x06 \x03(\t\x12\x42\n\x0freceiver_owners\x18\x07 \x03(\x0b\x32).monitor.AlertRule.Actions.ReceiverOwners\x1a/\n\tCondition\x12\x13\n\x0blasting_for\x18\x01 \x01(\x05\x12\r\n\x05level\x18\x02 \x01(\x05\x1aN\n\x0eReceiverOwners\x12\x11\n\tobject_id\x18\x01 \x01(\t\x12\x16\n\x0eobject_attr_id\x18\x02 \x01(\t\x12\x11\n\ttranslate\x18\x03 \x01(\t\x1a\x61\n\tTemplates\x12\x18\n\x10\x63ontent_template\x18\x01 \x01(\t\x12\x17\n\x0ftarget_template\x18\x02 \x01(\t\x12!\n\x19recovery_content_template\x18\x03 \x01(\t\x1aV\n\tInstances\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x13\n\x0binstanceIds\x18\x02 \x03(\t\x12&\n\x05query\x18\x03 \x01(\x0b\x32\x17.google.protobuf.StructBCZAgo.easyops.local/contracts/protorepo-models/easyops/model/monitorb\x06proto3')
,
dependencies=[monitor__sdk_dot_model_dot_monitor_dot_alert__dims__pb2.DESCRIPTOR,monitor__sdk_dot_model_dot_monitor_dot_alert__conditions__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_ALERTRULE_ACTIONS_CONDITION = _descriptor.Descriptor(
name='Condition',
full_name='monitor.AlertRule.Actions.Condition',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lasting_for', full_name='monitor.AlertRule.Actions.Condition.lasting_for', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='level', full_name='monitor.AlertRule.Actions.Condition.level', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=930,
serialized_end=977,
)
_ALERTRULE_ACTIONS_RECEIVEROWNERS = _descriptor.Descriptor(
name='ReceiverOwners',
full_name='monitor.AlertRule.Actions.ReceiverOwners',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='object_id', full_name='monitor.AlertRule.Actions.ReceiverOwners.object_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='object_attr_id', full_name='monitor.AlertRule.Actions.ReceiverOwners.object_attr_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='translate', full_name='monitor.AlertRule.Actions.ReceiverOwners.translate', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=979,
serialized_end=1057,
)
_ALERTRULE_ACTIONS = _descriptor.Descriptor(
name='Actions',
full_name='monitor.AlertRule.Actions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='condition', full_name='monitor.AlertRule.Actions.condition', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='monitor.AlertRule.Actions.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='upgrade', full_name='monitor.AlertRule.Actions.upgrade', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='methods', full_name='monitor.AlertRule.Actions.methods', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='receivers', full_name='monitor.AlertRule.Actions.receivers', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='receiver_user_groups', full_name='monitor.AlertRule.Actions.receiver_user_groups', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='receiver_owners', full_name='monitor.AlertRule.Actions.receiver_owners', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ALERTRULE_ACTIONS_CONDITION, _ALERTRULE_ACTIONS_RECEIVEROWNERS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=697,
serialized_end=1057,
)
_ALERTRULE_TEMPLATES = _descriptor.Descriptor(
name='Templates',
full_name='monitor.AlertRule.Templates',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='content_template', full_name='monitor.AlertRule.Templates.content_template', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_template', full_name='monitor.AlertRule.Templates.target_template', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='recovery_content_template', full_name='monitor.AlertRule.Templates.recovery_content_template', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1059,
serialized_end=1156,
)
_ALERTRULE_INSTANCES = _descriptor.Descriptor(
name='Instances',
full_name='monitor.AlertRule.Instances',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='monitor.AlertRule.Instances.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceIds', full_name='monitor.AlertRule.Instances.instanceIds', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='query', full_name='monitor.AlertRule.Instances.query', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1158,
serialized_end=1244,
)
_ALERTRULE = _descriptor.Descriptor(
name='AlertRule',
full_name='monitor.AlertRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='org', full_name='monitor.AlertRule.org', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='monitor.AlertRule.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rule_name', full_name='monitor.AlertRule.rule_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='monitor.AlertRule.version', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version_id', full_name='monitor.AlertRule.version_id', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alert_dims', full_name='monitor.AlertRule.alert_dims', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rule_priority', full_name='monitor.AlertRule.rule_priority', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alert_conditions', full_name='monitor.AlertRule.alert_conditions', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='detect_window', full_name='monitor.AlertRule.detect_window', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alert_count', full_name='monitor.AlertRule.alert_count', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alert_interval', full_name='monitor.AlertRule.alert_interval', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='recover_count', full_name='monitor.AlertRule.recover_count', index=11,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='actions', full_name='monitor.AlertRule.actions', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='templates', full_name='monitor.AlertRule.templates', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='monitor.AlertRule.creator', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ctime', full_name='monitor.AlertRule.ctime', index=15,
number=16, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mtime', full_name='monitor.AlertRule.mtime', index=16,
number=17, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instances', full_name='monitor.AlertRule.instances', index=17,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectId', full_name='monitor.AlertRule.objectId', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disabled', full_name='monitor.AlertRule.disabled', index=19,
number=20, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source', full_name='monitor.AlertRule.source', index=20,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ALERTRULE_ACTIONS, _ALERTRULE_TEMPLATES, _ALERTRULE_INSTANCES, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=154,
serialized_end=1244,
)
_ALERTRULE_ACTIONS_CONDITION.containing_type = _ALERTRULE_ACTIONS
_ALERTRULE_ACTIONS_RECEIVEROWNERS.containing_type = _ALERTRULE_ACTIONS
_ALERTRULE_ACTIONS.fields_by_name['condition'].message_type = _ALERTRULE_ACTIONS_CONDITION
_ALERTRULE_ACTIONS.fields_by_name['receiver_owners'].message_type = _ALERTRULE_ACTIONS_RECEIVEROWNERS
_ALERTRULE_ACTIONS.containing_type = _ALERTRULE
_ALERTRULE_TEMPLATES.containing_type = _ALERTRULE
_ALERTRULE_INSTANCES.fields_by_name['query'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_ALERTRULE_INSTANCES.containing_type = _ALERTRULE
_ALERTRULE.fields_by_name['alert_dims'].message_type = monitor__sdk_dot_model_dot_monitor_dot_alert__dims__pb2._ALERTDIMS
_ALERTRULE.fields_by_name['alert_conditions'].message_type = monitor__sdk_dot_model_dot_monitor_dot_alert__conditions__pb2._ALERTCONDITIONS
_ALERTRULE.fields_by_name['actions'].message_type = _ALERTRULE_ACTIONS
_ALERTRULE.fields_by_name['templates'].message_type = _ALERTRULE_TEMPLATES
_ALERTRULE.fields_by_name['instances'].message_type = _ALERTRULE_INSTANCES
DESCRIPTOR.message_types_by_name['AlertRule'] = _ALERTRULE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AlertRule = _reflection.GeneratedProtocolMessageType('AlertRule', (_message.Message,), {
'Actions' : _reflection.GeneratedProtocolMessageType('Actions', (_message.Message,), {
'Condition' : _reflection.GeneratedProtocolMessageType('Condition', (_message.Message,), {
'DESCRIPTOR' : _ALERTRULE_ACTIONS_CONDITION,
'__module__' : 'alert_rule_pb2'
# @@protoc_insertion_point(class_scope:monitor.AlertRule.Actions.Condition)
})
,
'ReceiverOwners' : _reflection.GeneratedProtocolMessageType('ReceiverOwners', (_message.Message,), {
'DESCRIPTOR' : _ALERTRULE_ACTIONS_RECEIVEROWNERS,
'__module__' : 'alert_rule_pb2'
# @@protoc_insertion_point(class_scope:monitor.AlertRule.Actions.ReceiverOwners)
})
,
'DESCRIPTOR' : _ALERTRULE_ACTIONS,
'__module__' : 'alert_rule_pb2'
# @@protoc_insertion_point(class_scope:monitor.AlertRule.Actions)
})
,
'Templates' : _reflection.GeneratedProtocolMessageType('Templates', (_message.Message,), {
'DESCRIPTOR' : _ALERTRULE_TEMPLATES,
'__module__' : 'alert_rule_pb2'
# @@protoc_insertion_point(class_scope:monitor.AlertRule.Templates)
})
,
'Instances' : _reflection.GeneratedProtocolMessageType('Instances', (_message.Message,), {
'DESCRIPTOR' : _ALERTRULE_INSTANCES,
'__module__' : 'alert_rule_pb2'
# @@protoc_insertion_point(class_scope:monitor.AlertRule.Instances)
})
,
'DESCRIPTOR' : _ALERTRULE,
'__module__' : 'alert_rule_pb2'
# @@protoc_insertion_point(class_scope:monitor.AlertRule)
})
_sym_db.RegisterMessage(AlertRule)
_sym_db.RegisterMessage(AlertRule.Actions)
_sym_db.RegisterMessage(AlertRule.Actions.Condition)
_sym_db.RegisterMessage(AlertRule.Actions.ReceiverOwners)
_sym_db.RegisterMessage(AlertRule.Templates)
_sym_db.RegisterMessage(AlertRule.Instances)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
de7fe8a3116f89860feb58cc06238a1c9f045460 | 924763dfaa833a898a120c411a5ed3b2d9b2f8c7 | /compiled/construct/enum_int_range_s.py | bc6c06e80d04163057a993b93a7ea82933e7a6d2 | [
"MIT"
] | permissive | kaitai-io/ci_targets | 31257dfdf77044d32a659ab7b8ec7da083f12d25 | 2f06d144c5789ae909225583df32e2ceb41483a3 | refs/heads/master | 2023-08-25T02:27:30.233334 | 2023-08-04T18:54:45 | 2023-08-04T18:54:45 | 87,530,818 | 4 | 6 | MIT | 2023-07-28T22:12:01 | 2017-04-07T09:44:44 | C++ | UTF-8 | Python | false | false | 383 | py | from construct import *
from construct.lib import *
import enum
class enum_int_range_s__constants(enum.IntEnum):
int_min = -2147483648
zero = 0
int_max = 2147483647
enum_int_range_s = Struct(
'f1' / Enum(Int32sb, enum_int_range_s__constants),
'f2' / Enum(Int32sb, enum_int_range_s__constants),
'f3' / Enum(Int32sb, enum_int_range_s__constants),
)
_schema = enum_int_range_s
| [
"kaitai-bot@kaitai.io"
] | kaitai-bot@kaitai.io |
247ff4fa309b29c41e2df933d58baaa072ccde67 | 5f430fe2e8acc0056ff0da03f704c08077acb6e9 | /Graduation-thesis_2017-master/Linear Regression/Linear_Regression_DT1.py | 651867073bc7a40af1aceed7471bda5433e27ac7 | [] | no_license | YoonDongMin/Graduation-thesis_2017 | 1dee3f69f8ff6d6c0c5cb0bc793373d33a798e22 | 73e7b5205d76e30092fc88f45afda62318f2be3b | refs/heads/master | 2020-03-06T20:53:13.482857 | 2018-03-28T01:21:27 | 2018-03-28T01:21:27 | 109,697,911 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,336 | py | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pickle
tf.set_random_seed(777) # for keep the random number constant
xy = np.loadtxt('DT1.csv', delimiter=',', dtype=np.int32) # Order Data load
sale = np.loadtxt('Sale_Data.csv', delimiter=',') # Sale Data load
x_data = xy[0:500, [0]]
y_data = xy[0:500, [-1]]
x_test = xy[500:700, [0]]
y_real = sale[500:700]
X = tf.placeholder(tf.float32, shape=[None, 1])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([1, 1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
hypothesis = tf.matmul(X, W) + b
# Simplified cost/loss function
cost = tf.reduce_mean(tf.square(hypothesis - Y))
prediction = tf.arg_max(hypothesis, 1)
is_correct = tf.equal(prediction, tf.arg_max(Y, 1))
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
# Minimize
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train = optimizer.minimize(cost)
# Launch the graph in a session.
sess = tf.Session()
# Initializes global variables in the graph.
sess.run(tf.global_variables_initializer())
for step in range(1000):
cost_val, hy_val, acc, _ = sess.run(
[cost, hypothesis, accuracy, train], feed_dict={X: x_data, Y: y_data})
print(step, "Cost: ", cost_val, "\nAccuracy:\n", acc)
def test_hy(data):
return sess.run(hypothesis, feed_dict={X:data})
BT = y_data - y_real
BT = np.std(BT) # Standard Deviation
prediction_1000 = test_hy(x_test)
std_1000 = y_real - prediction_1000
std_1000 = np.std(std_1000)
prediction_10000 = test_hy(x_test)
std_10000 = y_real - prediction_10000
std_10000 = np.std(std_10000)
prediction_50000 = test_hy(x_test)
std_50000 = y_real - prediction_50000
std_50000 = np.std(std_50000)
prediction_100000 = test_hy(x_test)
std_100000 = y_real - prediction_100000
std_100000 = np.std(std_100000)
prediction_200000 = test_hy(x_test)
std_200000 = y_real - prediction_200000
std_200000 = np.std(std_200000)
prediction_500000 = test_hy(x_test)
std_500000 = y_real - prediction_500000
std_500000 = np.std(std_500000)
plt.plot(y_real, label='Sale Data')
plt.plot(prediction_100000, label='prediction_100,000_step')
plt.plot(prediction_200000, label='prediction_200,000_step')
plt.plot(prediction_500000, label='prediction_500,000_step')
plt.legend()
plt.show()
| [
"remainer00@hanmail.net"
] | remainer00@hanmail.net |
02004b7cb6d2ce6d006cbd1271aea766b8b73311 | 98a9db9a042192f16ea56ffee8a8be6d0f9d1045 | /Start.py | dda845283589661580522f3b411ce6cfe53952ad | [] | no_license | RobotLovingGuy/binaryclockgerberfiles | 3e12797e7aa61712a9012ba7c992c6c3c9dafb22 | a249131f4bb83a58db134e6b393a37ceda3b1504 | refs/heads/main | 2023-03-27T18:24:59.290556 | 2021-03-24T03:57:29 | 2021-03-24T03:57:29 | 345,001,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,768 | py | import machine
import utime
from machine import Timer
aa = machine.Pin(17, machine.Pin.OUT)
ba = machine.Pin(16, machine.Pin.OUT)
ab = machine.Pin(26, machine.Pin.OUT)
bb = machine.Pin(22, machine.Pin.OUT)
cb = machine.Pin(20, machine.Pin.OUT)
db = machine.Pin(18, machine.Pin.OUT)
ac = machine.Pin(12, machine.Pin.OUT)
bc = machine.Pin(11, machine.Pin.OUT)
cc = machine.Pin(10, machine.Pin.OUT)
ad = machine.Pin(9, machine.Pin.OUT)
bd = machine.Pin(8, machine.Pin.OUT)
cd = machine.Pin(7, machine.Pin.OUT)
dd = machine.Pin(6, machine.Pin.OUT)
button1 = machine.Pin(0, machine.Pin.IN)
button2 = machine.Pin(1, machine.Pin.IN)
onboard_led = machine.Pin(25, machine.Pin.OUT)
onboard_led.value(0)
aa.value(0)
ba.value(0)
ab.value(0)
bb.value(0)
cb.value(0)
db.value(0)
ac.value(0)
bc.value(0)
cc.value(0)
ad.value(0)
bd.value(0)
cd.value(0)
dd.value(0)
utime.sleep(1)
onboard_led.value(1)
aa.value(1)
ba.value(1)
ab.value(1)
bb.value(1)
cb.value(1)
db.value(1)
ac.value(1)
bc.value(1)
cc.value(1)
ad.value(1)
bd.value(1)
cd.value(1)
dd.value(1)
utime.sleep(1)
onboard_led.value(0)
aa.value(0)
ba.value(0)
ab.value(0)
bb.value(0)
cb.value(0)
db.value(0)
ac.value(0)
bc.value(0)
cc.value(0)
ad.value(0)
bd.value(0)
cd.value(0)
dd.value(0)
utime.sleep(1)
aa.value(1)
ba.value(0)
ab.value(0)
bb.value(1)
cb.value(0)
db.value(0)
ac.value(0)
bc.value(1)
cc.value(0)
ad.value(0)
bd.value(1)
cd.value(0)
dd.value(0)
run = 1
b1 = 0
b2 = 0
while run == 1:
b1 = button1.value()
b2 = button2.value()
if b1 == 1:
import BinaryRTCClock12hr
if b2 == 1:
import BinaryRTCClock24hr
utime.sleep(.1)
| [
"noreply@github.com"
] | noreply@github.com |
2e0ce8e8f05880e97b477dffefdf4d4f3920593b | d91f987c064f921c5112ed638b39f2728c11ea5e | /PostProcessing/OutlierDetection.py | 59a5ea5c1a21470ce8abbc249a2b1bc2bc16c759 | [] | no_license | p-lothaller/Air-Quality-Forecast-Challenge | 1f3b44f8aa1ab6c31575f3cbc5bf70ed73a39e12 | a21ac0eafb3f352d030fedcd5d3aafed9360f16b | refs/heads/main | 2023-04-26T02:15:34.453975 | 2021-05-10T15:56:24 | 2021-05-10T15:56:24 | 344,584,338 | 1 | 2 | null | 2021-05-05T18:37:30 | 2021-03-04T19:19:56 | Jupyter Notebook | UTF-8 | Python | false | false | 7,995 | py | import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.cluster import DBSCAN
from sklearn.neighbors import NearestNeighbors
import numpy as np
from kneed import KneeLocator
import matplotlib as mpl
from sklearn.metrics import mean_absolute_error
import seaborn as sns
from statsmodels.tsa.arima.model import ARIMA
#import plottools
from matplotlib.dates import DateFormatter
from pandas.plotting import autocorrelation_plot
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.stattools import adfuller
sns.set()
sns.set_style("whitegrid")
mpl.use('TkAgg')
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return diff
# Read in csv file
raw_data = pd.read_csv('airdata_all.csv', index_col=False)
raw_data = raw_data.dropna()
# Print Statistical Data about selected data to analyse
# for data in analyse_data:
# print(raw_data[data].describe())
# add data that needs to be analysed
analyse_data = ['PM2.5']
# analyse_data = ['PM1', 'PM2.5', 'PM10', 'hum']
for data in analyse_data:
# ax = raw_data[[data]].plot(linewidth=2, figsize=(12, 6), alpha=0.8)
# plt.xlabel('Data points')
# plt.ylabel(data + ' concentration (µg/m³)')
# plt.title('Raw ' + data + ' Data')
# plt.show()
# create upper and lower bounds using a exponetial moving average and confidence intervals
raw_data['EMA_0.1-' + data] = raw_data[data].ewm(alpha=0.1, adjust=False).mean()
mae = mean_absolute_error(raw_data[data], raw_data['EMA_0.1-' + data])
deviation = np.std(raw_data[data] - raw_data['EMA_0.1-' + data])
raw_data['upper_bound'] = raw_data['EMA_0.1-' + data] + (mae + 1.96 * deviation)
raw_data['lower_bound'] = raw_data['EMA_0.1-' + data] - (mae + 1.96 * deviation)
# identifying outliers:
upper_outliers = np.array(raw_data[data] > raw_data['upper_bound'])
lower_outliers = np.array(raw_data[data] < raw_data['lower_bound'])
outlier_pos_ema = []
for counter in range(len(upper_outliers)):
if upper_outliers[counter] == True or lower_outliers[counter] == True:
raw_data['outlier_ema'] = True
outlier_pos_ema.append(counter)
else:
raw_data['outlier_ema'] = False
x_ema = []
y_ema = []
for pos in outlier_pos_ema:
x_ema.append(np.array(raw_data[data])[pos])
y_ema.append(raw_data[data].index[pos])
ax = plt.gca()
# raw_data[[data, 'EMA_0.1-' + data]].plot(linewidth=2, figsize=(12, 6), alpha=0.8, ax=ax, label='EMA')
# raw_data[['upper_bound']].plot(linewidth=1, color='red', alpha=0.8, ax=ax, label='Upper bound')
# raw_data[['lower_bound']].plot(linewidth=1, color='green', alpha=0.8, ax=ax, label='Lower bound')
# plt.plot(y_ema, x_ema, 'ro', markersize=4, label='Outliers')
# plt.title('Statistical Outliers')
# plt.xlabel('Data points')
# plt.ylabel(data + ' concentration (µg/m³)')
# plt.legend()
# plt.show()
# Identifying outliers using unsupervised machine learning
# Calculating eps --> this changes depending on the data
minPts = 4 # For 2D data the default value is a good approximation for the minimum sample points in a cluster (Ester et al., 1996)
neighbors = NearestNeighbors(n_neighbors=minPts)
neighbors_fit = neighbors.fit(np.array(raw_data[data]).reshape(-1, 1))
distances, indices = neighbors_fit.kneighbors(np.array(raw_data[data]).reshape(-1, 1))
distances = np.sort(distances, axis=0)
distances = distances[:, 1]
x = np.linspace(0, len(distances), len(distances))
# plt.plot(x, distances)
# plt.xlabel('Data points')
# plt.ylabel('k-distance')
# plt.show()
kn = KneeLocator(x, distances, curve='convex', direction='increasing')
# kn.plot_knee()
# plt.show()
epsilon = kn.elbow_y
minPts = 4 # For 2D data the default value is a good approximation for the minimum sample points in a cluster (Ester et al., 1996)
clustering1 = DBSCAN(eps=epsilon, min_samples=minPts).fit(np.array(raw_data[data]).reshape(-1, 1))
labels = clustering1.labels_
outlier_pos_DBSCAN = np.where(labels == -1)[0]
# Creating new entry for outlier tracking
for counter in range(len(raw_data[data])):
if counter in outlier_pos_DBSCAN:
raw_data['outlier_DBSCAN'] = True
else:
raw_data['outlier_DBSCAN'] = False
x_DBSCAN = []
y_DBSCAN = []
for pos in outlier_pos_DBSCAN:
x_DBSCAN.append(np.array(raw_data[data])[pos])
y_DBSCAN.append(raw_data[data].index[pos])
# raw_data[[data]].plot(linewidth=2, figsize=(12, 6), alpha=0.8)
# plt.plot(y_DBSCAN, x_DBSCAN, 'ro', markersize=4, label='Outliers')
# plt.title('DBSCAN Outliers')
# plt.xlabel('Data points')
# plt.ylabel(data + ' concentration (µg/m³)')
# plt.legend()
# plt.show()
outlier_pos = sorted(set(outlier_pos_ema).intersection(outlier_pos_DBSCAN))
x = []
y = []
for pos in outlier_pos:
x.append(np.array(raw_data[data])[pos])
y.append(raw_data[data].index[pos])
# raw_data[[data]].plot(linewidth=2, figsize=(12, 6), alpha=0.8)
# plt.plot(y, x, 'r*', markersize=8, label='Outliers')
# plt.title('Detected Outliers')
# plt.xlabel('Data points')
# plt.ylabel(data + ' concentration (µg/m³)')
# plt.legend()
# plt.show()
# Updating values in raw file and adding outlier locations to csv file
raw_data[data + '_updated'] = raw_data[data]
raw_data[data + '_outlier'] = False
for index in outlier_pos:
raw_data.at[index + 1, data + '_updated'] = (raw_data[data][index - 1] + raw_data[data][index + 3]) / 2
raw_data.at[index + 1, data + '_outlier'] = True
# Using ARIMA to update outlier values
raw_data[data + '_updatedARIMA'] = raw_data[data]
X = raw_data[data].values
diff = difference(X,1) #Differntiate to make data stationary
# plt.title('Differentiated data')
# plt.plot(diff) #Is data stationary?
# plt.show()
#Determine ARIMA values based of these plots
# autocorrelation_plot(diff)
# plt.show()
# plot_pacf(diff, lags = 50)
# plt.show()
for outlier_position in outlier_pos:
time_series = raw_data[data + '_updatedARIMA']
series = time_series[0:outlier_position]
X = series.values
model = ARIMA(X, order=(0, 1, 2))
model_fit = model.fit()
forecast = model_fit.forecast()[0]
raw_data.at[outlier_position + 1, data + '_updatedARIMA'] = forecast
# print('Forecast: %f' % forecast)
#ax = raw_data[[data]].plot(linewidth=2, figsize=(12, 6), alpha=0.8)
raw_data[data + '_updatedARIMA'].plot(linewidth=2, figsize=(12, 6), alpha=0.8, label = 'Updated')
plt.title('Updated PM2.5 Time series')
plt.xlabel('Data points')
plt.ylabel(data + ' concentration (µg/m³)')
plt.legend()
plt.show()
frame = 25
for i in outlier_pos:
x_zoom = []
y_zoomUpdate = []
y_zoomOld = []
x_outlier = []
y_new = []
y_old = []
for j in range(i - frame, i + frame):
y_zoomUpdate.append(raw_data[data + '_updatedARIMA'][j])
y_zoomOld.append(raw_data[data][j])
x_zoom.append(j)
if j in outlier_pos:
x_outlier.append(j+1)
y_new.append(raw_data[data + '_updatedARIMA'][j+1])
y_old.append(raw_data[data][j+1])
plt.plot(x_zoom, y_zoomOld)
plt.plot(x_zoom, y_zoomUpdate)
plt.plot(x_outlier, y_new, 'go', markersize=6, label = 'Update using ARIMA')
plt.plot(x_outlier, y_old, 'ro', markersize=6, label = 'Outlier')
plt.xlabel('Data points')
plt.ylabel(data + ' concentration (µg/m³)')
plt.legend()
plt.show()
raw_data.to_csv('processedData.csv')
| [
"77859089+p-lothaller@users.noreply.github.com"
] | 77859089+p-lothaller@users.noreply.github.com |
c76a9ab060509bbcd85398bc82c1cc512d5b668b | bfa1e38b05fb7b657d5126c656a2f52ea44d2e9a | /venv/Lib/site-packages/darr/raggedarray.py | 220fab4dea4e9538773608b46e016fd71394797d | [
"BSD-3-Clause"
] | permissive | Maksims0230/VolumeControl | 880921a900bc81a4c9e6cc3198f5829a4e5199dd | e7f5e984fb653f9176f38846a9744cff427f556a | refs/heads/master | 2023-04-10T17:33:20.534155 | 2021-04-16T09:22:30 | 2021-04-16T09:22:30 | 358,544,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,249 | py | from pathlib import Path
from contextlib import contextmanager
import numpy as np
from ._version import get_versions
from .array import Array, MetaData, asarray, \
check_accessmode, delete_array, create_array, \
truncate_array
from .datadir import DataDir, create_datadir
from .metadata import MetaData
from .readcoderaggedarray import readcode
from .utils import wrap
__all__ = ['RaggedArray', 'asraggedarray', 'create_raggedarray',
'delete_raggedarray', 'truncate_raggedarray']
# FIXME needs doc
class RaggedArray:
"""
Disk-based sequence of arrays that may have a variable length in maximally
one dimension.
"""
_valuesdirname = 'values'
_indicesdirname = 'indices'
_arraydescrfilename = 'arraydescription.json'
_metadatafilename = 'metadata.json'
_readmefilename = 'README.txt'
_protectedfiles = {_valuesdirname, _indicesdirname,
_readmefilename, _metadatafilename,
_arraydescrfilename}
_formatversion = get_versions()['version']
def __init__(self, path, accessmode='r'):
self._datadir = DataDir(path=path,
protectedpaths=self._protectedfiles)
self._path = self._datadir._path
self._accessmode = check_accessmode(accessmode)
self._valuespath = self._path / self._valuesdirname
self._indicespath = self._path / self._indicesdirname
self._arraydescrpath = self._path / self._arraydescrfilename
self._values = Array(self._valuespath, accessmode=self._accessmode)
self._indices = Array(self._indicespath, accessmode=self._accessmode)
self._metadata = MetaData(self._path / self._metadatafilename,
accessmode=accessmode)
arrayinfo = {}
arrayinfo['len'] = len(self._indices)
arrayinfo['size'] = self._values.size
arrayinfo['atom'] = self._values.shape[1:]
arrayinfo['numtype'] = self._values._arrayinfo['numtype']
arrayinfo['darrversion'] = RaggedArray._formatversion
arrayinfo['darrobject'] = 'RaggedArray'
self._arrayinfo = arrayinfo
@property
def accessmode(self):
"""Data access mode of metadata, {'r', 'r+'}."""
return self._accessmode
@accessmode.setter
def accessmode(self, value):
self._accessmode = check_accessmode(value)
self._metadata.accessmode = value
self._values.accessmode = value
self._indices.accessmode = value
@property
def dtype(self):
"""Numpy data type of the array values.
"""
return self._values._dtype
@property
def atom(self):
"""Dimensions of the non-variable axes of the arrays.
"""
return tuple(self._values._shape[1:])
@property
def datadir(self):
"""Data directory object with many useful methods, such as
writing information to text or json files, archiving all data,
calculating checksums etc."""
return self._datadir
@property
def narrays(self):
"""Numpy data type of the array values.
"""
return self._indices.shape[0]
@property
def metadata(self):
"""
Dictionary of meta data.
"""
return self._metadata
@property
def mb(self):
"""Storage size in megabytes of the ragged array.
"""
return self._values.mb + self._indices.mb
@property
def path(self):
"""File system path to array data"""
return self._path
@property
def size(self):
"""Total number of values in the data array.
"""
return int(self._values._size)
def __getitem__(self, item):
if not np.issubdtype(type(item), np.integer):
raise TypeError("Only integers can be used for indexing " \
"darraylists, which '{}' is not".format(item))
index = slice(*self._indices[item])
return self._values[index]
def __len__(self):
return self._indices.shape[0]
def __repr__(self):
return f'darr ragged array ({self.narrays} arrays with at' \
f'om shape {self.atom}, {self.accessmode})'
__str__ = __repr__
def _update_readmetxt(self):
txt = readcodetxt(self)
self._datadir._write_txt(self._readmefilename, txt, overwrite=True)
def _update_arraydescr(self, **kwargs):
self._arrayinfo.update(kwargs)
self._datadir._write_jsondict(filename=self._arraydescrfilename,
d=self._arrayinfo, overwrite=True)
def _append(self, array):
size = len(array)
endindex = self._values.shape[0]
self._values.append(np.asarray(array, dtype=self.dtype))
self._indices.append([[endindex, endindex + size]])
def append(self, array):
self._append(array)
self._update_readmetxt()
self._update_arraydescr(len=len(self._indices),
size=self._values.size)
def copy(self, path, accessmode='r', overwrite=False):
arrayiterable = (self[i] for i in range(len(self)))
metadata = dict(self.metadata)
return asraggedarray(path=path, arrayiterable=arrayiterable,
dtype=self.dtype, metadata=metadata,
accessmode=accessmode, overwrite=overwrite)
@contextmanager
def _view(self, accessmode=None):
with self._indices._open_array(accessmode=accessmode) as (iv, _), \
self._values._open_array(accessmode=accessmode) as (vv, _):
yield iv, vv
def iter_arrays(self, startindex=0, endindex=None, stepsize=1,
accessmode=None):
if endindex is None:
endindex = self.narrays
with self._view(accessmode=accessmode):
for i in range(startindex, endindex, stepsize):
yield np.array(self[i], copy=True)
def iterappend(self, arrayiterable):
"""Iteratively append data from a data iterable.
The iterable has to yield array-like objects compliant with darr.
The length of first dimension of these objects may be different,
but the length of other dimensions, if any, has to be the same.
Parameters
----------
arrayiterable: an iterable that yield array-like objects
Returns
-------
None
"""
# TODO refactor such that info files are not updated at each append?
with self._view():
for a in arrayiterable:
self._append(a)
self._update_readmetxt()
self._update_arraydescr(len=len(self._indices),
size=self._values.size)
# FIXME empty arrayiterable
def asraggedarray(path, arrayiterable, dtype=None, metadata=None,
accessmode='r+', overwrite=False):
path = Path(path)
if not hasattr(arrayiterable, 'next'):
arrayiterable = (a for a in arrayiterable)
bd = create_datadir(path=path, overwrite=overwrite)
firstarray = np.asarray(next(arrayiterable), dtype=dtype)
dtype = firstarray.dtype
valuespath = bd.path.joinpath(RaggedArray._valuesdirname)
indicespath = bd.path.joinpath(RaggedArray._indicesdirname)
valuesda = asarray(path=valuespath, array=firstarray, dtype=dtype,
accessmode='r+', overwrite=overwrite)
firstindices = [[0, len(firstarray)]]
indicesda = asarray(path=indicespath, array=firstindices,
dtype=np.int64, accessmode='r+',
overwrite=overwrite)
valueslen = firstindices[0][1]
indiceslen = 1
with valuesda._open_array(accessmode='r+') as (_, vfd), \
indicesda._open_array(accessmode='r+') as (_, ifd):
for array in arrayiterable:
lenincreasevalues = valuesda._append(array, fd=vfd)
starti, endi = valueslen, valueslen + lenincreasevalues
lenincreaseindices = indicesda._append([[starti, endi]], fd=ifd)
valueslen += lenincreasevalues
indiceslen += lenincreaseindices
valuesda._update_len(lenincrease=valueslen-firstindices[0][1])
valuesda._update_readmetxt()
indicesda._update_len(lenincrease=indiceslen-1)
indicesda._update_readmetxt()
datainfo = {}
datainfo['len'] = len(indicesda)
datainfo['size'] = valuesda.size
datainfo['atom'] = valuesda.shape[1:]
datainfo['numtype'] = valuesda._arrayinfo['numtype']
datainfo['darrversion'] = Array._formatversion
datainfo['darrobject'] = 'RaggedArray'
bd._write_jsondict(filename=RaggedArray._arraydescrfilename,
d=datainfo, overwrite=overwrite)
metadatapath = path.joinpath(Array._metadatafilename)
if metadata is not None:
bd._write_jsondict(filename=Array._metadatafilename,
d=metadata, overwrite=overwrite)
elif metadatapath.exists(): # no metadata but file exists, remove it
metadatapath.unlink()
ra = RaggedArray(path=path, accessmode=accessmode)
ra._update_readmetxt()
return RaggedArray(path=path, accessmode=accessmode)
def create_raggedarray(path, atom=(), dtype='float64', metadata=None,
accessmode='r+', overwrite=False):
if not hasattr(atom, '__len__'):
raise TypeError(f'shape "{atom}" is not a sequence of dimensions.\n'
f'If you want just a list of 1-dimensional arrays, '
f'use "()"')
shape = [0] + list(atom)
ar = np.zeros(shape, dtype=dtype)
ra = asraggedarray(path=path, arrayiterable=[ar], metadata=metadata,
accessmode=accessmode, overwrite=overwrite)
# the current ragged array has one element, which is an empty array
# but we want an empty ragged array => we should get rid of the indices
create_array(path=ra._indicespath, shape=(0,2), dtype=np.int64,
overwrite=True)
ra._update_arraydescr(len=0, size=0)
return RaggedArray(ra.path, accessmode=accessmode)
readmetxt = wrap('Disk-based storage of a ragged array') + '\n' + \
wrap('====================================') + '\n\n' + \
wrap('This directory is a data store for a numeric ragged array, '
'which is a sequence of arrays in which one dimension varies '
'in length. On disk, these arrays are concatenated along '
'their variable dimension. The easiest way to access the '
'data is to use the Darr library '
'(https://pypi.org/project/darr/) in Python, as follows:') \
+ '\n\n' \
+ '>>> import darr\n' \
+ ">>> a = darr.RaggedArray('path_to_array_dir')\n\n" + \
wrap("where 'path_to_array_dir' is the name of the array "
"directory, which is the one that also contains this README.")\
+ "\n\n" + \
wrap('If Darr is not available, the data can also be read in '\
'other environments, with more effort, using the '\
'description and example code below.') + '\n\n\n' \
+ 'Description of data storage\n' \
+ '---------------------------\n' + \
wrap('There are two subdirectories, each containing an array '
'stored in a self-explanatory format. See the READMEs in '
'the corresponding directories to find out in detail out '
'how to read them. Example code is provided below '
'for a number of analysis environments, which in many cases '
'is sufficient.') + '\n\n' + \
wrap('The subdirectory "values" holds the numerical data itself, '
'where subarrays are simply appended along their variable '
'length dimension (first axis). So the number of dimensions '
'of the values array is one less than that of the ragged '
'array. A particular subarray can be be retrieved using the '
'appropriate start and end index along the first axis of the '
'values array. These indices (counting from 0) are stored in '
'a different 2-dimensional array in the subdirectory '
'"indices". The first axis of the index array represents the '
'sequence number of the subarray and the second axis '
'(length 2) represents start and (non-inclusive) end '
'indices to be used on the values array. To read the n-th '
'subarray, read the nt-h start and end indices from the '
'indices array and use these to read the array data from '
'the values array.') + '\n\n\n'
def readcodetxt(ra):
"""Returns text on how to read a Darr ragged array numeric binary data in
various programming languages.
Parameters
----------
ra: Darr raggedarray
"""
s = readmetxt
s += wrap(f'This ragged array has {len(ra)} subarrays. ') + '\n\n' + \
wrap(f'Example code for reading the data') + '\n' + \
wrap(f'=================================') + '\n\n'
languages = (
("Python with Numpy (memmap):", "numpymemmap"),
("R:", "R"),
("Matlab:", "matlab")
)
for heading, language in languages:
codetext = readcode(ra, language)
if codetext is not None:
s += f"{heading}\n{'-' * len(heading)}\n{codetext}\n"
return s
def delete_raggedarray(ra):
"""
Delete Darr ragged array data from disk.
Parameters
----------
path: path to data directory
"""
try:
if not isinstance(ra, RaggedArray):
ra = RaggedArray(ra, accessmode='r+')
except:
raise TypeError(f"'{ra}' not recognized as a Darr ragged array")
if not ra.accessmode == 'r+':
raise OSError('Darr ragged array is read-only; set accessmode to '
'"r+" to change')
for fn in ra._protectedfiles:
path = ra.path.joinpath(fn)
if path.exists() and not path.is_dir():
path.unlink()
delete_array(ra._values)
delete_array(ra._indices)
try:
ra._path.rmdir()
except OSError as error:
message = f"Error: could not fully delete Darr ragged array " \
f"directory " \
f"'{ra.path}'. It may contain additional files that are " \
f"not part of the darr. If so, these should be removed " \
f"manually."
raise OSError(message) from error
def truncate_raggedarray(ra, index):
"""Truncate darr ragged array.
Parameters
----------
ra: array or str or pathlib.Path
The darr object to be truncated or file system path to it.
index: int
The index along the first axis at which the darr ragged array should
be truncated. Negative indices can be used but the resulting length of
the truncated darr should be 0 or larger and smaller than the
current length.
"""
try:
if not isinstance(ra, RaggedArray):
ra = RaggedArray(ra, accessmode='r+')
except Exception:
raise TypeError(f"'{ra}' not recognized as a darr Ragged Array")
# FIXME allow for numpy ints
if not isinstance(index, int):
raise TypeError(f"'index' should be an int (is {type(index)})")
with ra._indices._open_array() as (mmap, _):
newlen = len(mmap[:index])
del mmap
ra._values.check_arraywriteable()
ra._indices.check_arraywriteable()
if 0 <= newlen < len(ra):
truncate_array(ra._indices, index=newlen)
if newlen == 0:
vi = 0
else:
vi = int(ra._indices[-1][-1])
truncate_array(ra._values, index=vi)
ra._update_readmetxt()
ra._update_arraydescr(len=len(ra._indices), size=ra._values.size)
else:
raise IndexError(f"'index' {index} would yield a ragged array of "
f"length {newlen}, which is invalid (current length "
f"is {len(ra)})")
| [
"maksims023@gmail.com"
] | maksims023@gmail.com |
0851909e363479ba235d4695efcaea65d70d7f9d | 7acc8ec7bc81a2acecb91ab513710c38aeaa1299 | /HashTable/LongestChain.py | fed737b14b7d0d49219d5b0f3f1d5f856da8617f | [] | no_license | zhengli0817/ProgrammingInterviewQuestions | 453d29a2d29020d5f12b129b1c71839cd88fbfda | 595c709b1d0516c16a616a0138c6c84dcdec5b46 | refs/heads/master | 2021-01-10T01:49:03.294103 | 2016-12-01T00:12:12 | 2016-12-01T00:12:12 | 50,531,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,130 | py | # https://instant.1point3acres.com/thread/189098
# http://massivealgorithms.blogspot.com/2016/05/like-coding-mj-56-longest-string-chain.html
# http://buttercola.blogspot.com/2015/10/zenefits-oa-longest-chain.html?m=1
def LongestChain(words):
if not words or len(words)==0:
return 0
words = sorted(words, key=len)
res = 0
# this dict stores the map: word -> length of its local longest chain
wordDict = {}
wordSet = set(words)
for word in words:
if (len(word)>res):
res = max(res, dfs(word, wordSet, wordDict)+1)
return res
def dfs(word, wordSet, wordDict):
res = 0
for i,c in enumerate(word):
nextWord = word[:i]+word[i+1:]
if nextWord not in wordSet: continue
if nextWord in wordDict:
res = max(res, wordDict[nextWord])
else:
res = max(res, dfs(nextWord, wordSet, wordDict)+1)
wordDict[word] = res + 1
return res
def Test():
n = int(raw_input())
words = []
for i in range(n):
words.append(raw_input())
print LongestChain(words)
| [
"zhengli0817@gmail.com"
] | zhengli0817@gmail.com |
27af4d42e1a0cdc16826948e7d69e7e6b8a9ef94 | 5b683c7f0cc23b1a2b8927755f5831148f4f7e1c | /Python_Study/DataStructureAndAlgorithm/classical_algorithm/binary_search.py | 556f7aa8a3e48cec1ab4feb7b9ccb23c04cbbe3c | [] | no_license | Shmilyqjj/Shmily-py | 970def5a53a77aa33b93404e18c57130f134772a | 770fc26607ad3e05a4d7774a769bc742582c7b64 | refs/heads/master | 2023-09-02T04:43:39.192052 | 2023-08-31T03:28:39 | 2023-08-31T03:28:39 | 199,372,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,408 | py | #!/usr/bin/env python
# encoding: utf-8
"""
:Description: 二分查找算法
:Author: 佳境Shmily
:Create Time: 2020/3/15 21:34
:File: binary_search
:Site: shmily-qjj.top
:Desc:
二分查找场景:寻找一个数、寻找左侧边界、寻找右侧边界。
"""
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# import sys
#
# sys.setrecursionlimit(9000000)
def binary_search(sorted_list, item, asc=True):
"""
非递归的二分查找
寻找一个数 如果存在,返回其索引值
最基本的二分查找
首先,假设表中元素是按升序排列,将表中间位置记录的关键字与查找关键字比较,如果两者相等,则查找成功;
否则利用中间位置记录将表分成前、后两个子表,如果中间位置记录的关键字大于查找关键字,则进一步查找前一子表,否则进一步查找后一子表。
重复以上过程,直到找到满足条件的记录,使查找成功,或直到子表不存在为止,此时查找不成功。
:param asc: 默认认为传入的list是升序的 如果降序 需要反转
:param sorted_list: 有序列表
:param item: int 要找的元素
:return: 找到了返回下标 否则返回-1
"""
sorted_list = sorted_list if asc else list(reversed(sorted_list))
low = 0 # 最小数的下标
high = len(sorted_list)-1 # 最大数的下标
n = 0 # 分的次数
while low <= high:
mid = (low + high) >> 1 if (low + high) % 2 == 1 else ((low + high) >> 1) + 1 # 精确获取中间值 下标
n += 1
if sorted_list[mid]==item:
logger.info('二分法分了%s次,找到元素' % n)
return mid
if sorted_list[mid]<item: # 要找的元素大于中间的 则从后半个list找
low = mid + 1
else: # 要找的元素小于中间的 则从前半个list找
high = (mid-1)
logger.info('二分法分了%s次,未找到元素。' % n)
return -1
def recursion_binary_search(sorted_list, start, end, item):
"""
递归二分查找 查找有序数组的一个元素
:param sorted_list: 有序数组 默认传升序数组
:param start: 初始下标
:param end: 结束下标
:param item: 待查找元素
:return: 如果找到,返回index 否则 -1
"""
if start > end: # 一定不能是大于等于 mid + 1等于end的时候很有可能mid+1就是找到的结果
return -1
# mid = (end + start) // 2 # 不四舍五入 得到中间元素
mid = (start + end) >> 1 if (start + end) % 2 == 1 else ((start + end) >> 1) + 1 # 精确获取中间值 下标
if sorted_list[mid] == item:
return mid
elif item > sorted_list[mid]:
return recursion_binary_search(sorted_list, mid + 1, end, item)
elif item < sorted_list[mid]:
return recursion_binary_search(sorted_list, start, mid - 1, item)
return -1
if __name__ == '__main__':
m=[1,2,3,4,8,9,11,12,14,18,19,20,28,29]
print(binary_search(m,20))
m1 = [28, 20, 19, 18, 14, 12, 11, 9, 8, 4, 3, 2, 1]
print(binary_search(m1,14,False))
# #########################################################
m=[1,2,3,4,8,9,11,12,14,18,19,20,28]
print(recursion_binary_search(m, 0, len(m) - 1, 14))
| [
"710552907@qq.com"
] | 710552907@qq.com |
88cd9ffbf994ee8b07ef7aaf89f1231ef2c7d509 | 21ce617f03d7b38591866ae8d7064bcbd0099e8f | /SpellingCorrector/test.py | dbedf9e510a486eee3a225ac5395ab5f3ab03f2d | [] | no_license | zhanghaiting001/PythonCoding | c19fde15495457f2118835f83cd99d1ab8c82a3a | 73465f7ac7437305081235dc5999752ed9ef5a2c | refs/heads/master | 2020-07-10T13:03:15.680887 | 2019-10-22T15:32:12 | 2019-10-22T15:32:12 | 204,268,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py |
def multipy(y):
print('multipy')
return y*y
"函数参数的multipy只计算一次"
def testFunc(x,y=multipy(10)):
return x*y
#print(testFunc(20))
#print(testFunc(10))
def testMax(x):
return countDic[x]
countDic = {'a':10,'b':10,'c':3,'d':3}
"dict countDic迭代是key .values()迭代是values .items()是key,value对"
for x in countDic: #key 如果想两个,需要countDic.items()
print(x) # a b c d
"max sorted 的时间复杂度??查找dict中value的时间复杂度??查找key是o(1)"
print(max(countDic,key=testMax)) #'a'
print(max(countDic,key=lambda x:countDic[x])) #a
maxvalues = max(countDic.values())
print([k for k,v in countDic.items() if v==maxvalues]) #这个时间复杂度也不低
print(sorted(countDic,key=lambda d:countDic[d],reverse=True)) #['a', 'b', 'c', 'd']
print(sorted(countDic.items(),key=lambda d:d[1],reverse=True)) #[('a', 10), ('b', 10), ('c', 3), ('d', 3)]
| [
"zhanghaiting001@126.com"
] | zhanghaiting001@126.com |
25428ab231ed9df3e44a578437a041fdaa2fea36 | a61111856d5b5cec07e262816a3abe7b3221cf8f | /code/optical_flow.py | 7ba27cc4cbdd04a9bf8b06f65d00cdfd85d02527 | [] | no_license | ColinJLZhang/thesis | 055d8842054aadf451c752e3fd00bfac01534c9d | c4ddd445855332b5fc5ed08d0b0c4595a2159e29 | refs/heads/master | 2020-10-01T18:43:32.571784 | 2020-06-20T03:21:19 | 2020-06-20T03:21:19 | 227,601,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,059 | py | #!/usr/bin/python
# coding:utf8
import numpy as np
import cv2
root = r"E:\postgraduate\渔机所\烟台东方海洋出差\2018_10出差\GOPR6437.MP4"
savepath = r"E:\postgraduate\论文\thesis\img"
cap = cv2.VideoCapture(root)
print("info:\n width:{}, height:{}, fps:{}".format(cap.get(3), cap.get(4), cap.get(5)))
start = 20030
count = 0
num = 60
col = 6
row = 5
step=5
img1 = np.zeros((row*108, col*192, 3))
img2 = np.zeros((row*108, col*192, 3))
def mean_flow(flow):
res = []
for c in flow:
for e in c:
res.append(np.sqrt(e[0]**2 + e[1]**2))
mf = np.sum(res)
return mf
if __name__ == '__main__':
cam = cv2.VideoCapture(root)
cam.set(0,start)
ret, prev = cam.read()
prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
prevgray = cv2.resize(prevgray, (192,108), interpolation=cv2.INTER_CUBIC)
flows = []
while True:
ret, img = cam.read()
img = cv2.resize(img, (192,108), interpolation=cv2.INTER_CUBIC)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 使用Gunnar Farneback算法计算密集光流
flow = cv2.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
flows.append(mean_flow(flow))
prevgray = gray
# 绘制线
h, w = gray.shape[:2]
y, x = np.mgrid[step / 2:h:step, step / 2:w:step].reshape(2, -1).astype(int)
fx, fy = flow[y, x].T
lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines)
line = []
for l in lines:
if l[0][0]-l[1][0]>3 or l[0][1]-l[1][1]>3:
line.append(l)
cv2.polylines(img, line, 0, (0,255,255))
cv2.imshow('flow', img)
img1[count//col*108:count//col*108+108,(count%col)*192:(count%col+1)*192,:] = img
count = count +1
if count >= 30:
break
print(count)
ch = cv2.waitKey(5)
if ch == 27:
break
cv2.destroyAllWindows()
cv2.imwrite("flow31.png",img1)
print("mean flow:",np.mean(flows)) | [
"mrobotor@gmail.com"
] | mrobotor@gmail.com |
45e4801e5547dad69252ada8dcd2810f206cc64b | 29daec5d741a3c6c4c05bbf8080eba883e7351b1 | /ecchronos-binary/src/pylib/ecchronoslib/rest.py | be1d71f6c3226a1f60822b420dabab148442de5a | [
"Apache-2.0"
] | permissive | itskarlsson/ecchronos | 3cb44a1b2d6ea962cc2a75819bba83daa0703421 | bcbb29af283d4319ef9f63341d036fd3ce36f6f6 | refs/heads/master | 2023-08-08T03:51:02.019545 | 2023-06-29T12:03:09 | 2023-06-29T12:28:41 | 168,316,880 | 0 | 0 | Apache-2.0 | 2021-06-24T08:31:49 | 2019-01-30T09:37:00 | Java | UTF-8 | Python | false | false | 8,654 | py | #
# Copyright 2019 Telefonaktiebolaget LM Ericsson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
try:
from urllib.request import urlopen, Request
from urllib.error import HTTPError, URLError
from urllib.parse import quote
except ImportError:
from urllib2 import urlopen, Request, HTTPError, URLError
from urllib import quote # pylint: disable=ungrouped-imports
import json
import os
import ssl
from ecchronoslib.types import FullSchedule, Repair, Schedule, RepairInfo
class RequestResult(object):
def __init__(self, status_code=None, data=None, exception=None, message=None):
self.status_code = status_code
self.data = data
self.exception = exception
self.message = message
def format_exception(self):
msg = "Encountered issue"
if self.status_code is not None:
msg = "{0} ({1})".format(msg, self.status_code)
if self.message is not None:
msg = "{0} '{1}'".format(msg, self.message)
if self.exception is not None:
msg = "{0}: {1}".format(msg, self.exception)
return msg
def is_successful(self):
return self.status_code == 200
def transform_with_data(self, new_data):
return RequestResult(status_code=self.status_code,
data=new_data,
exception=self.exception,
message=self.message)
class RestRequest(object):
default_base_url = 'http://localhost:8080'
default_https_base_url = 'https://localhost:8080'
def __init__(self, base_url=None):
if base_url:
self.base_url = base_url
elif os.getenv("ECCTOOL_CERT_FILE") and os.getenv("ECCTOOL_KEY_FILE") and os.getenv("ECCTOOL_CA_FILE"):
self.base_url = RestRequest.default_https_base_url
else:
self.base_url = RestRequest.default_base_url
@staticmethod
def get_param(httpmessage, param):
try:
return httpmessage.get_param(param)
except AttributeError:
return httpmessage.getparam(param)
@staticmethod
def get_charset(response):
return RestRequest.get_param(response.info(), 'charset') or 'utf-8'
def request(self, url, method='GET'):
request_url = "{0}/{1}".format(self.base_url, url)
try:
request = Request(request_url)
request.get_method = lambda: method
cert_file = os.getenv("ECCTOOL_CERT_FILE")
key_file = os.getenv("ECCTOOL_KEY_FILE")
ca_file = os.getenv("ECCTOOL_CA_FILE")
if cert_file and key_file and ca_file:
context = ssl.create_default_context(cafile=ca_file)
context.load_cert_chain(cert_file, key_file)
response = urlopen(request, context=context)
else:
response = urlopen(request)
json_data = json.loads(response.read().decode(RestRequest.get_charset(response)))
response.close()
return RequestResult(status_code=response.getcode(), data=json_data)
except HTTPError as e:
return RequestResult(status_code=e.code,
message="Unable to retrieve resource {0}".format(request_url),
exception=e)
except URLError as e:
return RequestResult(status_code=404,
message="Unable to connect to {0}".format(request_url),
exception=e)
except Exception as e: # pylint: disable=broad-except
return RequestResult(exception=e,
message="Unable to retrieve resource {0}".format(request_url))
class V2RepairSchedulerRequest(RestRequest):
ROOT = 'repair-management/'
PROTOCOL = ROOT + 'v2/'
REPAIRS = PROTOCOL + 'repairs'
SCHEDULES = PROTOCOL + 'schedules'
v2_schedule_status_url = SCHEDULES
v2_schedule_id_status_url = SCHEDULES + '/{0}'
v2_schedule_id_full_status_url = SCHEDULES + '/{0}?full=true'
v2_repair_status_url = REPAIRS
v2_repair_id_status_url = REPAIRS + '/{0}'
v2_repair_trigger_url = REPAIRS
repair_info_url = PROTOCOL + 'repairInfo'
def __init__(self, base_url=None):
RestRequest.__init__(self, base_url)
def get_schedule(self, job_id, full=False):
if full:
request_url = V2RepairSchedulerRequest.v2_schedule_id_full_status_url.format(job_id)
else:
request_url = V2RepairSchedulerRequest.v2_schedule_id_status_url.format(job_id)
result = self.request(request_url)
if result.is_successful():
result = result.transform_with_data(new_data=FullSchedule(result.data))
return result
def get_repair(self, job_id, host_id=None):
request_url = V2RepairSchedulerRequest.v2_repair_id_status_url.format(job_id)
if host_id:
request_url += "?hostId={0}".format(host_id)
result = self.request(request_url)
if result.is_successful():
result = result.transform_with_data(new_data=[Repair(x) for x in result.data])
return result
def list_schedules(self, keyspace=None, table=None):
request_url = V2RepairSchedulerRequest.v2_schedule_status_url
if keyspace and table:
request_url = "{0}?keyspace={1}&table={2}".format(request_url, keyspace, table)
elif keyspace:
request_url = "{0}?keyspace={1}".format(request_url, keyspace)
result = self.request(request_url)
if result.is_successful():
result = result.transform_with_data(new_data=[Schedule(x) for x in result.data])
return result
def list_repairs(self, keyspace=None, table=None, host_id=None):
request_url = V2RepairSchedulerRequest.v2_repair_status_url
if keyspace:
request_url = "{0}?keyspace={1}".format(request_url, keyspace)
if table:
request_url += "&table={0}".format(table)
if host_id:
request_url += "&hostId={0}".format(host_id)
elif host_id:
request_url += "?hostId={0}".format(host_id)
result = self.request(request_url)
if result.is_successful():
result = result.transform_with_data(new_data=[Repair(x) for x in result.data])
return result
def post(self, keyspace=None, table=None, local=False):
request_url = V2RepairSchedulerRequest.v2_repair_trigger_url
if keyspace:
request_url += "?keyspace=" + keyspace
if table:
request_url += "&table=" + table
if local:
if keyspace:
request_url += "&isLocal=true"
else:
request_url += "?isLocal=true"
result = self.request(request_url, 'POST')
if result.is_successful():
result = result.transform_with_data(new_data=[Repair(x) for x in result.data])
return result
def get_repair_info(self, keyspace=None, table=None, since=None, # pylint: disable=too-many-arguments
duration=None, local=False):
request_url = V2RepairSchedulerRequest.repair_info_url
if keyspace:
request_url += "?keyspace=" + quote(keyspace)
if table:
request_url += "&table=" + quote(table)
if local:
if keyspace:
request_url += "&isLocal=true"
else:
request_url += "?isLocal=true"
if since:
if keyspace or local:
request_url += "&since=" + quote(since)
else:
request_url += "?since=" + quote(since)
if duration:
if keyspace or since or local:
request_url += "&duration=" + quote(duration)
else:
request_url += "?duration=" + quote(duration)
result = self.request(request_url)
if result.is_successful():
result = result.transform_with_data(new_data=RepairInfo(result.data))
return result
| [
"noreply@github.com"
] | noreply@github.com |
308e6b9e3059ec9e125d0eaddd98e486959c8ed9 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1444+236/sdB_pg_1444+236_coadd.py | a65a6188dc4e228dc8635b076832771c6f17f941 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[221.784042,23.360553], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_pg_1444+236/sdB_pg_1444+236_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_pg_1444+236/sdB_pg_1444+236_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
11d28f3aa560d3a574d306c77fa96b0c8c08e349 | ba861a90a0d3bdc7f69ad4b00ba5e89fb16104f7 | /BOJ/1766.py | df10cfe7a906603ee056f31316802ed1719480f9 | [] | no_license | parkjaehyeun/practice-for-coding-test | 2e25f5620738f0f73dfcd385e45f7f2907a20de0 | 6f55f1cce503e918aff6b687946576624277aa3a | refs/heads/master | 2023-05-03T00:01:12.989089 | 2021-05-25T08:59:54 | 2021-05-25T08:59:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | import heapq
n, m = map(int, input().split())
array = [[] for i in range(n + 1)]
indegree = [0] * (n + 1)
heap = []
result = []
for _ in range(m):
x, y = map(int, input().split())
array[x].append(y)
indegree[y] += 1
for i in range(1, n + 1):
if indegree[i] == 0:
heapq.heappush(heap, i)
result = []
while heap:
data = heapq.heappop(heap)
result.append(data)
for y in array[data]:
indegree[y] -= 1
if indegree[y] == 0:
heapq.heappush(heap, y)
for i in result:
print(i, end=' ')
| [
"elxm6123@gmail.com"
] | elxm6123@gmail.com |
fbfe830c4c1db56944173198cf8a81fd11c5ab41 | 0d61f90e3a7877e91d72fed71b0895c7070dc046 | /final_project/.history/project/menu_app/views_20201231155853.py | 69a782ad142ac3afd83b74830f621b95b6557bc3 | [] | no_license | lienusrob/final_project | 44d7d90dc0b7efc0cf55501549a5af0110d09b3b | 4164769626813f044ec2af3e7842514b5699ef77 | refs/heads/master | 2023-02-10T16:36:33.439215 | 2021-01-05T09:34:01 | 2021-01-05T09:34:01 | 325,002,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,592 | py |
from .models import MenuItem, ItemsCategory, Order, generate_order_id
from account_app.models import Profile
from django.views.generic import ListView
from django.shortcuts import render, get_object_or_404
class MenuListView(ListView):
model = MenuItem
template_name = 'items/menu_list.html'
def menu_list_view(request):
item_list = MenuItem.objects.all()
context = {'item_list': item_list,
'item_categories':reversed(ItemsCategory.objects.all()),
'item_categories_side_nav':reversed(ItemsCategory.objects.all())}
return render(request, 'menu_app/menu_list.html', context)
def home(request):
category_menu = ItemsCategory.objects.all()
context = {'category_menu': category_menu}
return render (request, 'homepage.html', context)
def menu_item_detail(request, **kwargs):
item = MenuItem.objects.filter(id=kwargs.get('pk')).first()
context = {'item':item}
return render(request, 'menu_app/item_details.html', context)
def new_order_info(request):
user_profile = get_object_or_404(Profile, user=request.user)
order, created = Order.objects.get_or_create(customer=user_profile.user, is_ordered=False)
if created:
order.ref_code = generate_order_id()
order.save()
context = {'order':order}
return render(request, 'items/order_info.html', context)
def menu_details (request, name):
category = ItemsCategory.objects.get(name = name)
menu_details = MenuItem.objects.filter(category = category)
context = {'menu_details': menu_details, 'category': name}
| [
"lienus.rob@hotmail.de"
] | lienus.rob@hotmail.de |
4deb419b3ab9e429e64970439cb1ee54b22162d9 | 14c7fa212c1d1801ed542115d2982a81d859ae04 | /models/WaveNet/estimator_def.py | afb5e82164e07a5282c7023419c3891193fef0cd | [] | no_license | EllenRushe/AudioAnomalyDetectionWaveNet | deede10b41ee67ed1322c7616cff38f1a01633db | 329b897362fc4f6c17685c8e82a5c4075e3c2838 | refs/heads/master | 2023-03-06T16:02:49.504817 | 2023-02-27T10:49:23 | 2023-02-27T10:49:23 | 171,162,261 | 20 | 5 | null | 2022-11-21T20:55:25 | 2019-02-17T19:23:52 | Jupyter Notebook | UTF-8 | Python | false | false | 4,385 | py | import tensorflow as tf
from .WaveNet import WaveNet
def _parse_function(file):
features = {
'label': tf.FixedLenFeature([], tf.int64),
'audio_inputs': tf.FixedLenFeature([], tf.string),
'scene': tf.FixedLenFeature([], tf.string),
'source_file': tf.FixedLenFeature([], tf.string)
}
parsed_features = tf.parse_single_example(file, features=features)
label = []
# Quanitised values in the range 0 to 255
audio_quant = tf.decode_raw(parsed_features['audio_inputs'], tf.int64)
# The data is first scaled (between 0 and 255) by dividing by
# 128, -1 then adjusts the range to values between -1 and 1.
# tensorflow division: '[...] if one of x or y is a float,
# then the result will be a float.'
# See: https://www.tensorflow.org/api_docs/python/tf/div
audio_inputs = tf.cast(audio_quant, dtype=tf.float32)/128 - 1
scene = tf.decode_raw(parsed_features['scene'], tf.uint8)
source_file = tf.decode_raw(parsed_features['source_file'], tf.uint8)
return (audio_inputs, audio_quant), label
def _test_parse_function(file):
features = {
'label': tf.FixedLenFeature([], tf.int64),
'audio_inputs': tf.FixedLenFeature([], tf.string),
'scene': tf.FixedLenFeature([], tf.string),
'source_file': tf.FixedLenFeature([], tf.string)
}
parsed_features = tf.parse_single_example(file, features=features)
label = tf.cast(parsed_features['label'], tf.int64)
# Quanitised values in the range 0 to 255
audio_quant = tf.decode_raw(parsed_features['audio_inputs'], tf.int64)
# The data is first scaled (between 0 and 255) by dividing by
# 128, -1 then adjusts the range to values between -1 and 1.
# tensorflow division: '[...] if one of x or y is a float,
# then the result will be a float.'
# See: https://www.tensorflow.org/api_docs/python/tf/div
audio_inputs = tf.cast(audio_quant, dtype=tf.float32)/128 - 1
scene = tf.decode_raw(parsed_features['scene'], tf.uint8)
source_file = tf.decode_raw(parsed_features['source_file'], tf.uint8)
return (audio_inputs, audio_quant), label
def get_train_input_fn(filenames, batch_size, shuffle_size, num_epochs):
def train_input_fn():
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.shuffle(shuffle_size)
dataset = dataset.repeat(num_epochs)
dataset = dataset.map(_parse_function)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(4)
return dataset
return train_input_fn
def get_eval_input_fn(filenames, batch_size):
def eval_input_fn():
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(_test_parse_function)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
return next_element
return eval_input_fn
def model_fn(features, labels, mode, params):
wavenet_model = WaveNet(features[0], params)
# There is no need for an if statement here becaue there are no
# different parameters if the model is in training or testing.
logits= wavenet_model.model()
preds_prob= tf.nn.softmax(logits, name='softmax_op')
preds = tf.argmax(preds_prob, 2, name='preds_argmax')
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'probabilities': preds_prob,
'predictions': preds,
'targets': features[1]
}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits,
labels=features[1],
name='loss')
)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
# eval_metrics_ops=metrics
)
assert mode == tf.estimator.ModeKeys.TRAIN
# logging_hook = tf.train.LoggingTensorHook(
# {"loss" : loss, "step": tf.train.get_global_step()},
# every_n_iter=1
# )
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# Used for switching batch normalisation.
with tf.control_dependencies(update_ops):
optimizer = tf.train.AdamOptimizer(params['learning_rate'])
update_op = optimizer.minimize(
loss,
tf.train.get_global_step(),
name='update_op'
)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=update_op,
# training_hooks = [logging_hook]
)
| [
"ellen@theengine.ucd.ie"
] | ellen@theengine.ucd.ie |
2f159bc5f771cc42abdc4a2bf06ab55734c32d40 | 03b7e68fa845d2f67743358a98ad28891132fb56 | /apps/travel_buddy/urls.py | 722cc35dc7c171755456b55135aa39d5d9918726 | [] | no_license | Jason-R-Scott/django-exam | e9f7800ad652965ac97fb05bf2c6f4a0e4bb6505 | 814181cdaf0c72e5afcf03928f96985620199bee | refs/heads/master | 2020-04-02T19:16:46.562537 | 2018-10-25T21:14:13 | 2018-10-25T21:14:13 | 154,729,019 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^register$', views.register),
url(r'^login$', views.login),
url(r'^logout$', views.logout),
url(r'^travels/$', views.travels),
url(r'^join/(?P<id>\d+)/$', views.join),
url(r'^view/(?P<id>\d+)/$', views.view),
url(r'^new/$', views.new),
url(r'^create/$', views.create),
url(r'^destroy/(?P<id>\d+)/$', views.destroy),
url(r'^cancel/(?P<id>\d+)/$', views.cancel)
] | [
"jasonscott613@gmail.com"
] | jasonscott613@gmail.com |
e92928fc105a84d170b6d2cf43d303bf7e417a2f | 07b41984d04acc5f328fc43ec18b76282474a28d | /main.py | 4ef9963d35d4a67e653cc94b5383b014000613bd | [] | no_license | iamAkolab/ML_Flask_App | d4cd21458f9d8ff6ebd7e5730042c213a01e742a | 162a89ff040a0c6961bd564a74e81d8fa0a9e253 | refs/heads/main | 2023-08-20T17:47:39.866415 | 2021-10-10T16:26:41 | 2021-10-10T16:26:41 | 415,636,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | import pickle
from flask import Flask, request , jsonify
from model_files.ml_model import predict_mpg
app = Flask("mpg_prediction")
# create another route
@app.route('/', methods = ['POST'])
def predict():
vehicle_config = request.get_json()
with open('./model_files/model.bin', 'rb') as f_in:
model = pickle.load(f_in)
f_in.close()
predictions = predict_mpg(vehicle_config, model)
response = {
'mpg_predictions': list(predictions)
}
return jsonify(response)
#initial test for the ping
# Create a route to get the PING
# @app.route('/', methods = ['GET'])
#def ping():
# return "Pinging Model Application!!"
if __name__ == '__main__':
app.run(debug = True, host = '0.0.0.0', port = 9696) | [
"noreply@github.com"
] | noreply@github.com |
26a9384368ce014b5b18474fee6ca847f4944df1 | b94a9e4388c54d27dd40826885f7a65b083848f3 | /kinetics-i3d/i3d.py | 9cdd946d30c1a085ec9772824091beefaa7e7d68 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | chi3236/TVQA_with_I3D | 8480badf61cade15c6cc5c5bd7866c7b99caa0d7 | 655f39fc0e641296ff37b56135faa9066fed9dad | refs/heads/master | 2020-06-04T05:38:02.376050 | 2019-06-14T06:55:02 | 2019-06-14T06:55:02 | 191,891,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,526 | py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Inception-v1 Inflated 3D ConvNet used for Kinetics CVPR paper.
The model is introduced in:
Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset
Joao Carreira, Andrew Zisserman
https://arxiv.org/pdf/1705.07750v1.pdf.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sonnet as snt
import tensorflow as tf
regularizer = tf.contrib.layers.l2_regularizer(scale=1e-7)
class Unit3D(snt.AbstractModule):
"""Basic unit containing Conv3D + BatchNorm + non-linearity."""
def __init__(self, output_channels,
kernel_shape=(1, 1, 1),
stride=(1, 1, 1),
activation_fn=tf.nn.relu,
use_batch_norm=True,
use_bias=False,
name='unit_3d'):
"""Initializes Unit3D module."""
super(Unit3D, self).__init__(name=name)
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._use_batch_norm = use_batch_norm
self._activation_fn = activation_fn
self._use_bias = use_bias
def _build(self, inputs, is_training):
"""Connects the module to inputs.
Args:
inputs: Inputs to the Unit3D component.
is_training: whether to use training mode for snt.BatchNorm (boolean).
Returns:
Outputs from the module.
"""
net = snt.Conv3D(output_channels=self._output_channels,
kernel_shape=self._kernel_shape,
stride=self._stride,
padding=snt.SAME, regularizers={'w': regularizer},
use_bias=self._use_bias)(inputs)
if self._use_batch_norm:
bn = snt.BatchNorm()
net = bn(net, is_training=is_training, test_local_stats=False)
if self._activation_fn is not None:
net = self._activation_fn(net)
return net
class InceptionI3d(snt.AbstractModule):
"""Inception-v1 I3D architecture.
The model is introduced in:
Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset
Joao Carreira, Andrew Zisserman
https://arxiv.org/pdf/1705.07750v1.pdf.
See also the Inception architecture, introduced in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
"""
# Endpoints of the model in order. During construction, all the endpoints up
# to a designated `final_endpoint` are returned in a dictionary as the
# second return value.
VALID_ENDPOINTS = (
'Conv3d_1a_7x7',
'MaxPool3d_2a_3x3',
'Conv3d_2b_1x1',
'Conv3d_2c_3x3',
'MaxPool3d_3a_3x3',
'Mixed_3b',
'Mixed_3c',
'MaxPool3d_4a_3x3',
'Mixed_4b',
'Mixed_4c',
'Mixed_4d',
'Mixed_4e',
'Mixed_4f',
'MaxPool3d_5a_2x2',
'Mixed_5b',
'Mixed_5c',
'Logits',
'Predictions',
)
def __init__(self, num_classes=400, spatial_squeeze=True,
final_endpoint='Logits', name='inception_i3d'):
"""Initializes I3D model instance.
Args:
num_classes: The number of outputs in the logit layer (default 400, which
matches the Kinetics dataset).
spatial_squeeze: Whether to squeeze the spatial dimensions for the logits
before returning (default True).
final_endpoint: The model contains many possible endpoints.
`final_endpoint` specifies the last endpoint for the model to be built
up to. In addition to the output at `final_endpoint`, all the outputs
at endpoints up to `final_endpoint` will also be returned, in a
dictionary. `final_endpoint` must be one of
InceptionI3d.VALID_ENDPOINTS (default 'Logits').
name: A string (optional). The name of this module.
Raises:
ValueError: if `final_endpoint` is not recognized.
"""
if final_endpoint not in self.VALID_ENDPOINTS:
raise ValueError('Unknown final endpoint %s' % final_endpoint)
super(InceptionI3d, self).__init__(name=name)
self._num_classes = num_classes
self._spatial_squeeze = spatial_squeeze
self._final_endpoint = final_endpoint
def _build(self, inputs, is_training, dropout_keep_prob=1.0):
"""Connects the model to inputs.
Args:
inputs: Inputs to the model, which should have dimensions
`batch_size` x `num_frames` x 224 x 224 x `num_channels`.
is_training: whether to use training mode for snt.BatchNorm (boolean).
dropout_keep_prob: Probability for the tf.nn.dropout layer (float in
[0, 1)).
Returns:
A tuple consisting of:
1. Network output at location `self._final_endpoint`.
2. Dictionary containing all endpoints up to `self._final_endpoint`,
indexed by endpoint name.
Raises:
ValueError: if `self._final_endpoint` is not recognized.
"""
if self._final_endpoint not in self.VALID_ENDPOINTS:
raise ValueError('Unknown final endpoint %s' % self._final_endpoint)
net = inputs
end_points = {}
end_point = 'Conv3d_1a_7x7'
net = Unit3D(output_channels=64, kernel_shape=[7, 7, 7],
stride=[2, 2, 2], name=end_point)(net, is_training=is_training)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'MaxPool3d_2a_3x3'
net = tf.nn.max_pool3d(net, ksize=[1, 1, 3, 3, 1], strides=[1, 1, 2, 2, 1],
padding=snt.SAME, name=end_point)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Conv3d_2b_1x1'
net = Unit3D(output_channels=64, kernel_shape=[1, 1, 1],
name=end_point)(net, is_training=is_training)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Conv3d_2c_3x3'
net = Unit3D(output_channels=192, kernel_shape=[3, 3, 3],
name=end_point)(net, is_training=is_training)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'MaxPool3d_3a_3x3'
net = tf.nn.max_pool3d(net, ksize=[1, 1, 3, 3, 1], strides=[1, 1, 2, 2, 1],
padding=snt.SAME, name=end_point)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=96, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_1,
is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=16, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=32, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_2,
is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1],
strides=[1, 1, 1, 1, 1], padding=snt.SAME,
name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1],
name='Conv3d_0b_1x1')(branch_3,
is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=192, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_1,
is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=96, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_2,
is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1],
strides=[1, 1, 1, 1, 1], padding=snt.SAME,
name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1],
name='Conv3d_0b_1x1')(branch_3,
is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'MaxPool3d_4a_3x3'
net = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 2, 2, 2, 1],
padding=snt.SAME, name=end_point)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=192, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=96, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=208, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_1,
is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=16, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=48, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_2,
is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1],
strides=[1, 1, 1, 1, 1], padding=snt.SAME,
name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1],
name='Conv3d_0b_1x1')(branch_3,
is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=160, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=112, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=224, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_1,
is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=24, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=64, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_2,
is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1],
strides=[1, 1, 1, 1, 1], padding=snt.SAME,
name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1],
name='Conv3d_0b_1x1')(branch_3,
is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=256, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_1,
is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=24, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=64, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_2,
is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1],
strides=[1, 1, 1, 1, 1], padding=snt.SAME,
name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1],
name='Conv3d_0b_1x1')(branch_3,
is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=112, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=144, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=288, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_1,
is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=64, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_2,
is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1],
strides=[1, 1, 1, 1, 1], padding=snt.SAME,
name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1],
name='Conv3d_0b_1x1')(branch_3,
is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4f'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=256, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=160, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=320, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_1,
is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_2,
is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1],
strides=[1, 1, 1, 1, 1], padding=snt.SAME,
name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1],
name='Conv3d_0b_1x1')(branch_3,
is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'MaxPool3d_5a_2x2'
net = tf.nn.max_pool3d(net, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1],
padding=snt.SAME, name=end_point)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=256, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=160, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=320, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_1,
is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3],
name='Conv3d_0a_3x3')(branch_2,
is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1],
strides=[1, 1, 1, 1, 1], padding=snt.SAME,
name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1],
name='Conv3d_0b_1x1')(branch_3,
is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=384, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=192, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=384, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_1,
is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=48, kernel_shape=[1, 1, 1],
name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3],
name='Conv3d_0b_3x3')(branch_2,
is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1],
strides=[1, 1, 1, 1, 1], padding=snt.SAME,
name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1],
name='Conv3d_0b_1x1')(branch_3,
is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if self._final_endpoint == end_point: return net, end_points
end_point = 'Logits'
with tf.variable_scope(end_point):
net = tf.nn.avg_pool3d(net, ksize=[1, 2, 7, 7, 1],
strides=[1, 1, 1, 1, 1], padding=snt.VALID)
net = tf.nn.dropout(net, dropout_keep_prob)
logits = Unit3D(output_channels=self._num_classes,
kernel_shape=[1, 1, 1],
activation_fn=None,
use_batch_norm=False,
use_bias=True,
name='Conv3d_0c_1x1')(net, is_training=is_training)
if self._spatial_squeeze:
logits = tf.squeeze(logits, [2, 3], name='SpatialSqueeze')
averaged_logits = tf.reduce_mean(logits, axis=1)
end_points[end_point] = averaged_logits
if self._final_endpoint == end_point: return averaged_logits, end_points
end_point = 'Predictions'
predictions = tf.nn.softmax(averaged_logits)
end_points[end_point] = predictions
return predictions, end_points | [
"chi3236@gmail.com"
] | chi3236@gmail.com |
9adb2f941bf5d47c462c44dae8d72de1b9269d95 | d4a569dcf616b7f05e53a44803e38196b436b8b9 | /Thesis@3.9.1/Lib/site-packages/django/test/utils.py | 235136ed67432f1217462c16026362efa3fe60c3 | [
"MIT"
] | permissive | nverbois/TFE21-232 | ac3178d24939c872c02a671c0f1d8cc471af516b | 7113837b5263b5c508bfc6903cb6982b48aa7ee4 | refs/heads/main | 2023-06-05T18:50:59.207392 | 2021-06-25T19:54:40 | 2021-06-25T19:54:40 | 337,691,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,893 | py | import asyncio
import logging
import re
import sys
import time
import warnings
from contextlib import contextmanager
from functools import wraps
from io import StringIO
from itertools import chain
from types import SimpleNamespace
from unittest import TestCase, skipIf, skipUnless
from xml.dom.minidom import Node, parseString
from django.apps import apps
from django.apps.registry import Apps
from django.conf import UserSettingsHolder, settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import request_started
from django.db import DEFAULT_DB_ALIAS, connections, reset_queries
from django.db.models.options import Options
from django.template import Template
from django.test.signals import setting_changed, template_rendered
from django.urls import get_script_prefix, set_script_prefix
from django.utils.translation import deactivate
try:
import jinja2
except ImportError:
jinja2 = None
__all__ = (
"Approximate",
"ContextList",
"isolate_lru_cache",
"get_runner",
"modify_settings",
"override_settings",
"requires_tz_support",
"setup_test_environment",
"teardown_test_environment",
)
TZ_SUPPORT = hasattr(time, "tzset")
class Approximate:
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
return self.val == other or round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""
A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, str):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super().__getitem__(key)
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def keys(self):
"""
Flattened keys of subcontexts.
"""
return set(chain.from_iterable(d for subcontext in self for d in subcontext))
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal that can be
intercepted by the test Client.
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
class _TestState:
pass
def setup_test_environment(debug=None):
"""
Perform global pre-test setup, such as installing the instrumented template
renderer and setting the email backend to the locmem email backend.
"""
if hasattr(_TestState, "saved_data"):
# Executing this function twice would overwrite the saved values.
raise RuntimeError(
"setup_test_environment() was already called and can't be called "
"again without first calling teardown_test_environment()."
)
if debug is None:
debug = settings.DEBUG
saved_data = SimpleNamespace()
_TestState.saved_data = saved_data
saved_data.allowed_hosts = settings.ALLOWED_HOSTS
# Add the default host of the test client.
settings.ALLOWED_HOSTS = [*settings.ALLOWED_HOSTS, "testserver"]
saved_data.debug = settings.DEBUG
settings.DEBUG = debug
saved_data.email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
saved_data.template_render = Template._render
Template._render = instrumented_test_render
mail.outbox = []
deactivate()
def teardown_test_environment():
"""
Perform any global post-test teardown, such as restoring the original
template renderer and restoring the email sending functions.
"""
saved_data = _TestState.saved_data
settings.ALLOWED_HOSTS = saved_data.allowed_hosts
settings.DEBUG = saved_data.debug
settings.EMAIL_BACKEND = saved_data.email_backend
Template._render = saved_data.template_render
del _TestState.saved_data
del mail.outbox
def setup_databases(
verbosity,
interactive,
keepdb=False,
debug_sql=False,
parallel=0,
aliases=None,
**kwargs
):
"""Create the test databases."""
test_databases, mirrored_aliases = get_unique_databases_and_mirrors(aliases)
old_names = []
for db_name, aliases in test_databases.values():
first_alias = None
for alias in aliases:
connection = connections[alias]
old_names.append((connection, db_name, first_alias is None))
# Actually create the database for the first connection
if first_alias is None:
first_alias = alias
connection.creation.create_test_db(
verbosity=verbosity,
autoclobber=not interactive,
keepdb=keepdb,
serialize=connection.settings_dict["TEST"].get("SERIALIZE", True),
)
if parallel > 1:
for index in range(parallel):
connection.creation.clone_test_db(
suffix=str(index + 1), verbosity=verbosity, keepdb=keepdb,
)
# Configure all other connections as mirrors of the first one
else:
connections[alias].creation.set_as_test_mirror(
connections[first_alias].settings_dict
)
# Configure the test mirrors.
for alias, mirror_alias in mirrored_aliases.items():
connections[alias].creation.set_as_test_mirror(
connections[mirror_alias].settings_dict
)
if debug_sql:
for alias in connections:
connections[alias].force_debug_cursor = True
return old_names
def dependency_ordered(test_databases, dependencies):
"""
Reorder test_databases into an order that honors the dependencies
described in TEST[DEPENDENCIES].
"""
ordered_test_databases = []
resolved_databases = set()
# Maps db signature to dependencies of all its aliases
dependencies_map = {}
# Check that no database depends on its own alias
for sig, (_, aliases) in test_databases:
all_deps = set()
for alias in aliases:
all_deps.update(dependencies.get(alias, []))
if not all_deps.isdisjoint(aliases):
raise ImproperlyConfigured(
"Circular dependency: databases %r depend on each other, "
"but are aliases." % aliases
)
dependencies_map[sig] = all_deps
while test_databases:
changed = False
deferred = []
# Try to find a DB that has all its dependencies met
for signature, (db_name, aliases) in test_databases:
if dependencies_map[signature].issubset(resolved_databases):
resolved_databases.update(aliases)
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured("Circular dependency in TEST[DEPENDENCIES]")
test_databases = deferred
return ordered_test_databases
def get_unique_databases_and_mirrors(aliases=None):
"""
Figure out which databases actually need to be created.
Deduplicate entries in DATABASES that correspond the same database or are
configured as test mirrors.
Return two values:
- test_databases: ordered mapping of signatures to (name, list of aliases)
where all aliases share the same underlying database.
- mirrored_aliases: mapping of mirror aliases to original aliases.
"""
if aliases is None:
aliases = connections
mirrored_aliases = {}
test_databases = {}
dependencies = {}
default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature()
for alias in connections:
connection = connections[alias]
test_settings = connection.settings_dict["TEST"]
if test_settings["MIRROR"]:
# If the database is marked as a test mirror, save the alias.
mirrored_aliases[alias] = test_settings["MIRROR"]
elif alias in aliases:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict["NAME"], set()),
)
item[1].add(alias)
if "DEPENDENCIES" in test_settings:
dependencies[alias] = test_settings["DEPENDENCIES"]
else:
if (
alias != DEFAULT_DB_ALIAS
and connection.creation.test_db_signature() != default_sig
):
dependencies[alias] = test_settings.get(
"DEPENDENCIES", [DEFAULT_DB_ALIAS]
)
test_databases = dict(dependency_ordered(test_databases.items(), dependencies))
return test_databases, mirrored_aliases
def teardown_databases(old_config, verbosity, parallel=0, keepdb=False):
"""Destroy all the non-mirror databases."""
for connection, old_name, destroy in old_config:
if destroy:
if parallel > 1:
for index in range(parallel):
connection.creation.destroy_test_db(
suffix=str(index + 1), verbosity=verbosity, keepdb=keepdb,
)
connection.creation.destroy_test_db(old_name, verbosity, keepdb)
def get_runner(settings, test_runner_class=None):
test_runner_class = test_runner_class or settings.TEST_RUNNER
test_path = test_runner_class.split(".")
# Allow for relative paths
if len(test_path) > 1:
test_module_name = ".".join(test_path[:-1])
else:
test_module_name = "."
test_module = __import__(test_module_name, {}, {}, test_path[-1])
return getattr(test_module, test_path[-1])
class TestContextDecorator:
"""
A base class that can either be used as a context manager during tests
or as a test function or unittest.TestCase subclass decorator to perform
temporary alterations.
`attr_name`: attribute assigned the return value of enable() if used as
a class decorator.
`kwarg_name`: keyword argument passing the return value of enable() if
used as a function decorator.
"""
def __init__(self, attr_name=None, kwarg_name=None):
self.attr_name = attr_name
self.kwarg_name = kwarg_name
def enable(self):
raise NotImplementedError
def disable(self):
raise NotImplementedError
def __enter__(self):
return self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def decorate_class(self, cls):
if issubclass(cls, TestCase):
decorated_setUp = cls.setUp
decorated_tearDown = cls.tearDown
def setUp(inner_self):
context = self.enable()
if self.attr_name:
setattr(inner_self, self.attr_name, context)
try:
decorated_setUp(inner_self)
except Exception:
self.disable()
raise
def tearDown(inner_self):
decorated_tearDown(inner_self)
self.disable()
cls.setUp = setUp
cls.tearDown = tearDown
return cls
raise TypeError("Can only decorate subclasses of unittest.TestCase")
def decorate_callable(self, func):
if asyncio.iscoroutinefunction(func):
# If the inner function is an async function, we must execute async
# as well so that the `with` statement executes at the right time.
@wraps(func)
async def inner(*args, **kwargs):
with self as context:
if self.kwarg_name:
kwargs[self.kwarg_name] = context
return await func(*args, **kwargs)
else:
@wraps(func)
def inner(*args, **kwargs):
with self as context:
if self.kwarg_name:
kwargs[self.kwarg_name] = context
return func(*args, **kwargs)
return inner
def __call__(self, decorated):
if isinstance(decorated, type):
return self.decorate_class(decorated)
elif callable(decorated):
return self.decorate_callable(decorated)
raise TypeError("Cannot decorate object of type %s" % type(decorated))
class override_settings(TestContextDecorator):
"""
Act as either a decorator or a context manager. If it's a decorator, take a
function and return a wrapped function. If it's a contextmanager, use it
with the ``with`` statement. In either event, entering/exiting are called
before and after, respectively, the function/block is executed.
"""
enable_exception = None
def __init__(self, **kwargs):
self.options = kwargs
super().__init__()
def enable(self):
# Keep this code at the beginning to leave the settings unchanged
# in case it raises an exception because INSTALLED_APPS is invalid.
if "INSTALLED_APPS" in self.options:
try:
apps.set_installed_apps(self.options["INSTALLED_APPS"])
except Exception:
apps.unset_installed_apps()
raise
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = settings._wrapped
settings._wrapped = override
for key, new_value in self.options.items():
try:
setting_changed.send(
sender=settings._wrapped.__class__,
setting=key,
value=new_value,
enter=True,
)
except Exception as exc:
self.enable_exception = exc
self.disable()
def disable(self):
if "INSTALLED_APPS" in self.options:
apps.unset_installed_apps()
settings._wrapped = self.wrapped
del self.wrapped
responses = []
for key in self.options:
new_value = getattr(settings, key, None)
responses_for_setting = setting_changed.send_robust(
sender=settings._wrapped.__class__,
setting=key,
value=new_value,
enter=False,
)
responses.extend(responses_for_setting)
if self.enable_exception is not None:
exc = self.enable_exception
self.enable_exception = None
raise exc
for _, response in responses:
if isinstance(response, Exception):
raise response
def save_options(self, test_func):
if test_func._overridden_settings is None:
test_func._overridden_settings = self.options
else:
# Duplicate dict to prevent subclasses from altering their parent.
test_func._overridden_settings = {
**test_func._overridden_settings,
**self.options,
}
def decorate_class(self, cls):
from django.test import SimpleTestCase
if not issubclass(cls, SimpleTestCase):
raise ValueError(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings"
)
self.save_options(cls)
return cls
class modify_settings(override_settings):
"""
Like override_settings, but makes it possible to append, prepend, or remove
items instead of redefining the entire list.
"""
def __init__(self, *args, **kwargs):
if args:
# Hack used when instantiating from SimpleTestCase.setUpClass.
assert not kwargs
self.operations = args[0]
else:
assert not args
self.operations = list(kwargs.items())
super(override_settings, self).__init__()
def save_options(self, test_func):
if test_func._modified_settings is None:
test_func._modified_settings = self.operations
else:
# Duplicate list to prevent subclasses from altering their parent.
test_func._modified_settings = (
list(test_func._modified_settings) + self.operations
)
def enable(self):
self.options = {}
for name, operations in self.operations:
try:
# When called from SimpleTestCase.setUpClass, values may be
# overridden several times; cumulate changes.
value = self.options[name]
except KeyError:
value = list(getattr(settings, name, []))
for action, items in operations.items():
# items my be a single value or an iterable.
if isinstance(items, str):
items = [items]
if action == "append":
value = value + [item for item in items if item not in value]
elif action == "prepend":
value = [item for item in items if item not in value] + value
elif action == "remove":
value = [item for item in value if item not in items]
else:
raise ValueError("Unsupported action: %s" % action)
self.options[name] = value
super().enable()
class override_system_checks(TestContextDecorator):
"""
Act as a decorator. Override list of registered system checks.
Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app,
you also need to exclude its system checks.
"""
def __init__(self, new_checks, deployment_checks=None):
from django.core.checks.registry import registry
self.registry = registry
self.new_checks = new_checks
self.deployment_checks = deployment_checks
super().__init__()
def enable(self):
self.old_checks = self.registry.registered_checks
self.registry.registered_checks = set()
for check in self.new_checks:
self.registry.register(check, *getattr(check, "tags", ()))
self.old_deployment_checks = self.registry.deployment_checks
if self.deployment_checks is not None:
self.registry.deployment_checks = set()
for check in self.deployment_checks:
self.registry.register(check, *getattr(check, "tags", ()), deploy=True)
def disable(self):
self.registry.registered_checks = self.old_checks
self.registry.deployment_checks = self.old_deployment_checks
def compare_xml(want, got):
"""
Try to do a 'xml-comparison' of want and got. Plain string comparison
doesn't always work because, for example, attribute ordering should not be
important. Ignore comment nodes, processing instructions, document type
node, and leading and trailing whitespaces.
Based on https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r"[ \t\n][ \t\n]+")
def norm_whitespace(v):
return _norm_whitespace_re.sub(" ", v)
def child_text(element):
return "".join(
c.data for c in element.childNodes if c.nodeType == Node.TEXT_NODE
)
def children(element):
return [c for c in element.childNodes if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
return all(
check_element(want, got) for want, got in zip(want_children, got_children)
)
def first_node(document):
for node in document.childNodes:
if node.nodeType not in (
Node.COMMENT_NODE,
Node.DOCUMENT_TYPE_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
):
return node
want = want.strip().replace("\\n", "\n")
got = got.strip().replace("\\n", "\n")
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith("<?xml"):
wrapper = "<root>%s</root>"
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
class CaptureQueriesContext:
"""
Context manager that captures queries executed by the specified connection.
"""
def __init__(self, connection):
self.connection = connection
def __iter__(self):
return iter(self.captured_queries)
def __getitem__(self, index):
return self.captured_queries[index]
def __len__(self):
return len(self.captured_queries)
@property
def captured_queries(self):
return self.connection.queries[self.initial_queries : self.final_queries]
def __enter__(self):
self.force_debug_cursor = self.connection.force_debug_cursor
self.connection.force_debug_cursor = True
# Run any initialization queries if needed so that they won't be
# included as part of the count.
self.connection.ensure_connection()
self.initial_queries = len(self.connection.queries_log)
self.final_queries = None
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.force_debug_cursor = self.force_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
self.final_queries = len(self.connection.queries_log)
class ignore_warnings(TestContextDecorator):
def __init__(self, **kwargs):
self.ignore_kwargs = kwargs
if "message" in self.ignore_kwargs or "module" in self.ignore_kwargs:
self.filter_func = warnings.filterwarnings
else:
self.filter_func = warnings.simplefilter
super().__init__()
def enable(self):
self.catch_warnings = warnings.catch_warnings()
self.catch_warnings.__enter__()
self.filter_func("ignore", **self.ignore_kwargs)
def disable(self):
self.catch_warnings.__exit__(*sys.exc_info())
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(
TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that.",
)
@contextmanager
def extend_sys_path(*paths):
"""Context manager to temporarily add paths to sys.path."""
_orig_sys_path = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = _orig_sys_path
@contextmanager
def isolate_lru_cache(lru_cache_object):
"""Clear the cache of an LRU cache object on entering and exiting."""
lru_cache_object.cache_clear()
try:
yield
finally:
lru_cache_object.cache_clear()
@contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Note: This function and the following ``captured_std*`` are copied
from CPython's ``test.support`` module."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
@contextmanager
def freeze_time(t):
"""
Context manager to temporarily freeze time.time(). This temporarily
modifies the time function of the time module. Modules which import the
time function directly (e.g. `from time import time`) won't be affected
This isn't meant as a public API, but helps reduce some repetitive code in
Django's test suite.
"""
_real_time = time.time
time.time = lambda: t
try:
yield
finally:
time.time = _real_time
def require_jinja2(test_func):
"""
Decorator to enable a Jinja2 template engine in addition to the regular
Django template engine for a test or skip it if Jinja2 isn't available.
"""
test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func)
return override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
},
{
"BACKEND": "django.template.backends.jinja2.Jinja2",
"APP_DIRS": True,
"OPTIONS": {"keep_trailing_newline": True},
},
]
)(test_func)
class override_script_prefix(TestContextDecorator):
"""Decorator or context manager to temporary override the script prefix."""
def __init__(self, prefix):
self.prefix = prefix
super().__init__()
def enable(self):
self.old_prefix = get_script_prefix()
set_script_prefix(self.prefix)
def disable(self):
set_script_prefix(self.old_prefix)
class LoggingCaptureMixin:
"""
Capture the output from the 'django' logger and store it on the class's
logger_output attribute.
"""
def setUp(self):
self.logger = logging.getLogger("django")
self.old_stream = self.logger.handlers[0].stream
self.logger_output = StringIO()
self.logger.handlers[0].stream = self.logger_output
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
class isolate_apps(TestContextDecorator):
"""
Act as either a decorator or a context manager to register models defined
in its wrapped context to an isolated registry.
The list of installed apps the isolated registry should contain must be
passed as arguments.
Two optional keyword arguments can be specified:
`attr_name`: attribute assigned the isolated registry if used as a class
decorator.
`kwarg_name`: keyword argument passing the isolated registry if used as a
function decorator.
"""
def __init__(self, *installed_apps, **kwargs):
self.installed_apps = installed_apps
super().__init__(**kwargs)
def enable(self):
self.old_apps = Options.default_apps
apps = Apps(self.installed_apps)
setattr(Options, "default_apps", apps)
return apps
def disable(self):
setattr(Options, "default_apps", self.old_apps)
def tag(*tags):
"""Decorator to add tags to a test class or method."""
def decorator(obj):
if hasattr(obj, "tags"):
obj.tags = obj.tags.union(tags)
else:
setattr(obj, "tags", set(tags))
return obj
return decorator
@contextmanager
def register_lookup(field, *lookups, lookup_name=None):
"""
Context manager to temporarily register lookups on a model field using
lookup_name (or the lookup's lookup_name if not provided).
"""
try:
for lookup in lookups:
field.register_lookup(lookup, lookup_name)
yield
finally:
for lookup in lookups:
field._unregister_lookup(lookup, lookup_name)
| [
"38432529+nverbois@users.noreply.github.com"
] | 38432529+nverbois@users.noreply.github.com |
63821e410ac7923d403cc949bd2ccb80dd6be7ab | bd0b80b921e1a8e03c9bd5ec18db93f03cc528a8 | /administration/helpers/tokenHelper.py | a536e0e9f02b90d983d4cc20c1830cf18dbdf2f4 | [] | no_license | jamesngangabest/james-best-banking-system | 4d38e74987f836a250e489533c4197a2e70b2995 | a03636b1032293df2c4e80ce41ef6eb1b7ca172d | refs/heads/master | 2022-10-26T19:31:45.265904 | 2020-06-18T13:36:04 | 2020-06-18T13:36:04 | 273,245,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,224 | py |
from rest_framework_simplejwt.tokens import AccessToken
class TokenHelper:
@staticmethod
def get_token(requestObj=None):
if requestObj == None:
return None
try:
token_str = (requestObj.META.get('HTTP_AUTHORIZATION') or requestObj.GET.get('token'))
#print("token string: ",token_str)
token = AccessToken(token_str.split(' ')[1])
#print("the token: ", token)
return token
except Exception as e:
return None
@staticmethod
def get_token_key(requestObj, key):
token = TokenHelper.get_token(requestObj)
if token == None:
return None
try:
keyValue = token.payload[key]
return keyValue
except Exception as e:
return None
@staticmethod
def get_company_id(requestObj):
#c_id = TokenHelper.get_token_key(requestObj, "company_id")
c_id = requestObj.user.systemCompany.id
return c_id
@staticmethod
def get_user_id(requestObj):
#c_id = TokenHelper.get_token_key(requestObj, "company_id")
id = requestObj.user.id
return id
| [
"noreply@github.com"
] | noreply@github.com |
b5958304575c340146e7b8af97c16d0dafbe093b | a9aeb5a190e1da997ee855c072c58b608d491dca | /app/main.py | 25028c42c56aecb8011b2eb39993cc3ca9aafd88 | [] | no_license | moorthikanthasamy/my-dev-python | 4ff145240f12fee2040ff25b9f351ed336fd7a16 | 1d0d22d0998e69ca3cc481978c817d6525ae2737 | refs/heads/master | 2020-06-25T10:08:10.426823 | 2017-08-03T10:33:00 | 2017-08-03T10:33:00 | 96,971,993 | 0 | 1 | null | 2017-07-12T08:29:43 | 2017-07-12T06:16:24 | null | UTF-8 | Python | false | false | 118 | py | from package1 import module1
def main():
print("This is main function")
if __name__ == "__main__":
main()
| [
"Moorthi.Kanthasamy@csscorp.com"
] | Moorthi.Kanthasamy@csscorp.com |
62a0402314ba944050a24978a67b564db631e65c | ad88d24efaee55ed43c1e1f80fed59d5094a0cdc | /assignment1/asgn1.py | 7b98ed9aeb805b46b8a66be13bfb961a0123ea27 | [] | no_license | fantaichen/Quant_hw | 3c7d1afd8d9aa138d55d716f7e0d8c5c90f71438 | 258d55b770832a0fd3ad4403c2dc17b94b512f86 | refs/heads/main | 2023-03-20T05:29:39.128304 | 2021-03-22T15:04:53 | 2021-03-22T15:04:53 | 336,995,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,817 | py | # _*_ coding: utf-8 _*_
"""
Time: 8/2/2021 15:57
Author: FAN Taichen
Version: V 0.1
File: asgn1.py
Describe: CUHK course homework
"""
import pandas as pd
def min5(a):
minute = int(a[-2:])
minutenew = int(minute / 5) * 5
if len(str(minutenew)) == 1:
minutenew = '0' + str(minutenew)
b = a[:-2] + str(minutenew)
return b
quote = pd.read_csv('trade_quote_data/quote.csv')
trade = pd.read_csv('trade_quote_data/trade.csv')
trade['notional'] = trade['price'] * trade['size']
quote['minute'] = quote['time'].apply(lambda x: x[:5])
trade['minute'] = trade['time'].apply(lambda x: x[:5])
newtrade = trade.groupby(['date', 'sym']).agg({'price': 'last', 'size': 'sum', 'notional': 'sum'})
tradedict = {}
for i, tradedf in newtrade.groupby('sym'):
tradedf['return'] = tradedf['price'].pct_change()
tradedict[str(i)] = tradedf.copy()
sz = tradedict['000001.SZSE'].groupby('sym').agg({'size': 'mean', 'notional': 'mean', 'return': 'std'})
sh = tradedict['600030.SHSE'].groupby('sym').agg({'size': 'mean', 'notional': 'mean', 'return': 'std'})
leftdf = pd.concat([sz, sh])
mintrade = trade.groupby(['date', 'sym', 'minute']).agg({'price': 'last'})
mintrade_sym = mintrade.groupby(['date', 'sym'])
mindict = []
for syms, symtradedf in mintrade_sym:
symtradedf['minret'] = symtradedf['price'].pct_change()
mindict.append(symtradedf.copy())
middf = pd.concat(mindict).groupby('sym').agg({'minret': 'std'})
t = 240 ** 0.5
middf['Volatility5'] = middf['minret'] * t
quote["spread"] = 10000 * (quote["ask"] - quote["bid"]) / 0.5 / (quote["ask"] + quote["bid"])
quote["qsize"] = 0.5 * (quote["asize"] + quote["bsize"])
rightdf = quote.groupby('sym').agg({'spread': 'mean', 'qsize': 'mean'})
q1 = pd.concat([leftdf, middf['Volatility5'], rightdf], axis=1)
q1 = q1.reset_index()
q1.columns = ['Stock', 'ADV', 'ADTV', 'Volatility', 'Volatility5', 'Spread(bps)', 'Quote Size']
quotenew = quote[quote['sym'] == '600030.SHSE']
tradenew = trade[trade['sym'] == '600030.SHSE']
tradenew['mins5'] = tradenew['minute'].apply(min5)
quotenew['mins5'] = quotenew['minute'].apply(min5)
tgdf = tradenew.groupby(['mins5']).agg({'size': 'sum'})
total_size = tradenew['size'].sum()
volpct = tgdf / total_size
newret = pd.concat(mindict)
newret = newret.reset_index()
newret = newret[newret['sym'] == '600030.SHSE']
newret['mins5'] = newret['minute'].apply(min5)
minretstd = newret.groupby('mins5').agg({'minret': 'std'})
qsmean = quotenew.groupby('mins5').agg({'spread': 'mean', 'qsize': 'mean'})
q2 = pd.concat([minretstd, qsmean, volpct], axis=1)
q2 = q2.reset_index()
q2.columns = ['time', 'vol5', 'spread', 'qsize', 'volpct']
q1.to_excel('asgn1_1.xlsx', index=False)
q2.to_excel('asgn1_2.xlsx', index=False)
| [
"noreply@github.com"
] | noreply@github.com |
8839acff26034ba1c625d12d98a1ce8c632e5eea | 13a4b193135579beadcdb94e5bdf842ac4d93f5a | /workserver/service/getGameResultSRV.py | 23062a2872b2e2ed4304d820787c60b625dbcd3d | [] | no_license | LiqunHu/lottery | 69ba05fa7e263b7c454d1ffb04071e2b6f4f0ea5 | 20899b50be4094b1da0aa9f7c5e1a163bf441423 | refs/heads/master | 2021-01-15T17:28:48.047187 | 2017-08-14T01:32:48 | 2017-08-14T01:32:48 | 99,753,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,876 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 13 17:05:35 2016
@author: huliqun
"""
import falcon
from workserver.util import GLBConfig
from workserver.service.ServiceBase import ServiceBase
from workserver.module.models import User, MatchData, MatchInfo500Time, MatchInfoD
from workserver.util import SysUtil
class getGameResultResource(ServiceBase):
def on_get(self, req, resp):
self.initialize()
req_para = falcon.util.uri.parse_query_string(req.query_string)
matches = []
if 'userid' in req_para.keys():
u = self.session.query(User).filter(
User.userid == req_para['userid']).first()
if u is None:
self.errorReturn(GLBConfig.API_ERROR, '用户不存在.')
for md, mi, m in self.session.query(MatchData, MatchInfoD, MatchInfo500Time).\
filter(MatchData.userid == u.userid).\
filter(MatchData.date >= SysUtil.getYesterday()).\
filter(MatchData.matchAID == MatchInfoD.matchid).\
filter(MatchInfoD.match == MatchInfo500Time.match).all():
matches.append(m)
for md, mi, m in self.session.query(MatchData, MatchInfoD, MatchInfo500Time).\
filter(MatchData.singleFlag == GLBConfig.M_DUAL).\
filter(MatchData.userid == u.userid).\
filter(MatchData.date >= SysUtil.getYesterday()).\
filter(MatchData.matchBID == MatchInfoD.matchid).\
filter(MatchInfoD.match == MatchInfo500Time.match).all():
matches.append(m)
else:
matches = self.session.query(MatchInfo500Time).\
filter(MatchInfo500Time.mststus != '完').all()
matchesB = self.session.query(MatchInfo500Time).\
filter(MatchInfo500Time.mststus == '完').all()
for m in matchesB:
matches.append(m)
maData = []
if matches:
for m in matches:
maData.append({'match': m.match,
'mtime': m.mtime.strftime('%Y-%m-%d %H:%M'),
'matchtype': m.matchtype,
'matchzhu': m.matchzhu,
'matchke': m.matchke,
'zhuScore': m.zhuScore,
'keScore': m.keScore,
'zhuHScore': m.zhuHScore,
'keHScore': m.keHScore,
'mststus': m.mststus})
self.result['data'] = maData
req.context['result'] = self.result
resp.set_header('Powered-By', 'huliquns@126.com')
resp.status = falcon.HTTP_200
self.release()
| [
"huliquns@126.com"
] | huliquns@126.com |
202fe2674dddb0798aa94d61fc5dd00d5899b777 | a76401f82ed1c9ac47ddaff27681b90f37627426 | /.history/student_olx/main/views_20210916141012.py | edb998f41256d3683fcb91d308dfb9053702e68b | [] | no_license | RiteshK555/itw-project | e90e1dd13517ee8b07d72cc3bd5a42af367ab587 | a2e4c8682c2030ff77da9ade5ae4677bd475f87a | refs/heads/master | 2023-08-30T03:48:58.904979 | 2021-11-10T09:50:59 | 2021-11-10T09:50:59 | 410,032,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | from django.shortcuts import render
from .models import ToDoList,Item
# Create your views here.
from django.http import HttpResponse
from .forms import CreateNewList
def index(response,id):
lis=ToDoList.objects.get(id=id)
return render(response,"main/base.html",{"name":lis})
def home(response):
return render(response,"main/home.html",{"name":"test"})
def create(response):
if response.method == "POST":
form=CreateNewList()
else:
form=CreateN
return render(response,"main/create.html",{"form":form}) | [
""
] | |
f5329d285bec6749f74d5c53bd5eb3451e518110 | f06393bbc532e1feb2402ff5b30000d3472adc33 | /description/app.py | 52d44f48e151751c6fb2e254dd5c5aaab5ba0b9e | [
"MIT"
] | permissive | Build-Week-AirBnB-Optimal-Price/Data-science | d6013325edbddf9c78eedc17e1be5a5bf25f737d | f5f1934fe9ad4045aa0b491968f3bf7565df9385 | refs/heads/master | 2022-12-19T06:49:44.854532 | 2020-01-11T02:20:17 | 2020-01-11T02:20:17 | 228,754,469 | 0 | 5 | MIT | 2022-12-08T03:28:32 | 2019-12-18T04:00:20 | Jupyter Notebook | UTF-8 | Python | false | false | 766 | py | from flask import Flask, render_template, request, jsonify
import pickle
import requests
import json
import numpy as np
import nltk
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# Model
sid = SentimentIntensityAnalyzer()
# Pickle it
pickle.dump(sid, open('model.pkl', 'wb'))
model = pickle.load(open('model.pkl', 'rb'))
# Create App
app = Flask(__name__)
@app.route('/', methods = ['GET', 'POST'])
def analysis():
# Take JSON input
text = request.get_json(force = True)
# Run JSON as text through model
prediction = model.polarity_scores(str(text))
# Re-convert results to JSON
return jsonify(results = prediction)
if __name__ == '__main__':
app.run()
| [
"noreply@github.com"
] | noreply@github.com |
0d20c22822d20ba246ec32ccc5d4da9a661a23db | b927ea68bf3743c83ef39dbade04fdd8c9b7f1af | /transforms/images/polus-rolling-ball-plugin/src/main.py | 58553d18d879100a14e534a8457123819110141c | [
"MIT"
] | permissive | Nicholas-Schaub/polus-plugins | 9561fb93b7349888a58ad786d4b72734a272a975 | 1a1e70456b8e7acc4a4c92b5fdb0defb12edcb1e | refs/heads/master | 2023-07-23T23:09:33.364942 | 2023-02-16T14:58:10 | 2023-02-16T14:58:10 | 208,253,925 | 0 | 3 | MIT | 2022-09-13T15:32:33 | 2019-09-13T11:47:01 | Python | UTF-8 | Python | false | false | 3,582 | py | import argparse
import logging
from multiprocessing import cpu_count
from pathlib import Path
from bfio.bfio import BioReader
from bfio.bfio import BioWriter
from rolling_ball import rolling_ball
# Initialize the logger
logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger("main")
logger.setLevel(logging.INFO)
def main(
input_dir: Path,
ball_radius: int,
light_background: bool,
output_dir: Path,
) -> None:
""" Main execution function
Args:
input_dir: path to directory containing the input images.
ball_radius: radius of ball to use for the rolling-ball algorithm.
light_background: whether the image has a light or dark background.
output_dir: path to directory where to store the output images.
"""
for in_path in input_dir.iterdir():
in_path = Path(in_path)
out_path = Path(output_dir).joinpath(in_path.name)
# Load the input image
with BioReader(in_path) as reader:
logger.info(f'Working on {in_path.name} with shape {reader.shape}')
# Initialize the output image
with BioWriter(out_path, metadata=reader.metadata, max_workers=cpu_count()) as writer:
rolling_ball(
reader=reader,
writer=writer,
ball_radius=ball_radius,
light_background=light_background,
)
return
if __name__ == "__main__":
""" Argument parsing """
logger.info("Parsing arguments...")
parser = argparse.ArgumentParser(prog='main', description='A WIPP plugin to perform background subtraction using the rolling-ball algorithm.')
# Input arguments
parser.add_argument(
'--inputDir',
dest='input_dir',
type=str,
help='Input image collection to be processed by this plugin.',
required=True,
)
parser.add_argument(
'--ballRadius',
dest='ball_radius',
type=str,
default='25',
help='Radius of the ball used to perform background subtraction.',
required=False,
)
parser.add_argument(
'--lightBackground',
dest='light_background',
type=str,
default='false',
help='Whether the image has a light or dark background.',
required=False,
)
# Output arguments
parser.add_argument(
'--outputDir',
dest='output_dir',
type=str,
help='Output collection',
required=True,
)
# Parse the arguments
args = parser.parse_args()
_input_dir = Path(args.input_dir).resolve()
if _input_dir.joinpath('images').is_dir():
# switch to images folder if present
_input_dir = _input_dir.joinpath('images').resolve()
logger.info(f'inputDir = {_input_dir}')
_ball_radius = int(args.ball_radius)
logger.info(f'ballRadius = {_ball_radius}')
_light_background = args.light_background
if _light_background in {'true', 'false'}:
_light_background = (_light_background == 'true')
else:
raise ValueError(f'lightBackground must be either \'true\' or \'false\'')
logger.info(f'lightBackground = {_light_background}')
_output_dir = args.output_dir
logger.info(f'outputDir = {_output_dir}')
main(
input_dir=_input_dir,
ball_radius=_ball_radius,
light_background=_light_background,
output_dir=_output_dir,
)
| [
"noreply@github.com"
] | noreply@github.com |
f9cf9f7abfa6ffb2a598675b2ec9c0951eb20071 | f5baec3afc326288bb96078243863a042b0d54f2 | /main.py | 0b455018f4e060a42b643ca05b553c34e92f33c4 | [] | no_license | DudeAbides0/textadventure | 4255f7472d0466ebb5b593b53dff2146a8a2109a | 2e90d794a99fd739c320fe177922dc7bfe986d1a | refs/heads/master | 2021-04-30T00:01:26.362649 | 2018-04-25T21:48:03 | 2018-04-25T21:48:03 | 121,567,489 | 0 | 0 | null | 2018-02-28T22:45:40 | 2018-02-14T22:14:08 | Python | UTF-8 | Python | false | false | 7,239 | py | import self as self
class Inventory():
def __init__(self):
self.items = []
def add(self, item):
self.items.append(item)
def drop(self, item):
self.items.remove(item)
def list(self):
print ("You are carrying:")
for item in self.items:
print (item.get_name())
def get(self, type):
items_of_type = []
for item in self.items:
if isinstance(item, type):
items_of_type.append(item)
return items_of_type
def process_command(self, command):
result = []
for item in self.items:
if item.get_name() in command:
result.append(item.process_command(command))
return result
class Item():
def __init__(self, name):
self.name = name
self.known_commands = {}
def get_name(self):
return self.name
def process_command(self, command):
for a_command in self.known_commands:
if a_command in command:
self.known_commands[a_command](command)
class Literature(Item):
def __init__(self, name, contents="This item is blank."):
Item.__init__(self, name)
self.contents = contents
self.known_commands["read"] = self.read
self.known_commands["write"] = self.write(command)
def read(self, command):
print (self.contents)
def write(self, contents):
self.contents = contents
class Room():
def __init__(self, name, description, id):
self.name = name
self.description = description
self.id = id
self.items = []
self.connectors = []
self.rooms = {}
def add_item(self, item):
self.items.append(item)
def add_room(self, direction, room):
self.rooms[direction] = room
def add_connection(self, room, connector, actions):
for direction in actions:
self.rooms[direction] = room
self.connectors.append((connector, actions[0]))
def enter_room(self, Inventory):
print (self.name)
print
print (self.description)
print
for connector in self.connectors:
print ("There is a " + connector[0] +
" that goes " + connector[1] + ".")
print
for item in self.items:
print ("You see a " + item.name + " here.")
print
def get_name(self):
return self.name
def is_valid_direction(self, direction):
return direction in self.rooms.keys()
def next_room(self, direction):
return self.rooms[direction]
def process_command(self, command, inventory):
if command in self.rooms.keys():
new_room = self.next_room(command)
return new_room
elif "get" in command:
for item in self.items:
if item.name in command:
inventory.add(item)
self.items.remove(item)
return "You picked up the " + item.name + "."
else:
return "I don't know what you want to pick up."
else:
return None
class LightSource(Item):
def __init__(self, name, on=False):
self.on = on
Item.__init__(self, name)
self.known_commands["turn on"] = self.turn_on
self.known_commands["turn off"] = self.turn_off
@staticmethod
def is_one_on(sources):
if len(sources) > 0:
for source in sources:
if source.is_on():
return True
return False
def is_on(self):
return self.on
def turn_on(self, command):
self.on = True
print ("The " + self.name + " is on.")
def turn_off(self, command):
self.on = False
print ("The " + self.name + " is off.")
class Flashlight(LightSource):
def __init__(self, name="flashlight", battery_level=100, on=False):
LightSource.__init__(self, name, on)
self.battery_level = battery_level
def change_batteries(self):
self.battery_level = 100
def compute_usage(self):
# Compute the time it's been on and then drain the battery an equal amount
pass
class DarkRoom(Room):
def enter_room(self, inventory):
light_sources = inventory.get(LightSource)
if LightSource.is_one_on(light_sources):
Room.enter_room(self, Inventory)
else:
print ("A ghost came up from behind and possessed you.")
print ("Game over.")
exit()
lobby = Room ('You have reached Coronado Hotel, Welcome', 'You are in the lobby', 'l')
frontdesk = Room('Front Desk', 'You reached the front desk, there is no one here ', 'f')
patio = Room('Patio', 'You are in the patio, smoking is allowed.', 'p')
elevator = Room('Elevator', 'You attempt to take the elevator but you need an elevator pass', 'e')
openbar = Room('Open Bar', 'You reached the open bar, there is a man lying on the floor', 'o')
cafe = Room('Cafe', 'You are in the Cafe, there is a cash register to your right', 'c')
parkinglot = Room('Parking Lot', 'You are in the parking lot, there is a car in the distance', 'pl')
entrance = Room('Entrance', 'You reached the entrance of the hotel, you try to open it but its locked','ent')
cellar = DarkRoom('Cellar', 'You are in the hidden cellar, the only lightsource is your flashlight, exercise caution.', 'cell')
frontdesk.add_connection(lobby, "passage", ["south", "s"])
lobby.add_connection(frontdesk, "passage", ["north", "n"])
lobby.add_connection(patio, "hallway", ["west", "w"])
patio.add_connection(lobby, "hallway", ["east", "e"])
lobby.add_connection(elevator, "elevator", ["east", "e"])
elevator.add_connection(lobby, "passage", ["west", "w"])
patio.add_connection(openbar, "passage", ["north", "n"])
patio.add_connection(cafe, "path", ["south", "s"])
openbar.add_connection(patio, "passage", ["south", "w"])
cafe.add_connection(patio, "path", ["north", "n"])
cafe.add_connection(parkinglot, "passage", ["east", "e"])
parkinglot.add_connection(cafe, "passage", ["west", "w"])
parkinglot.add_connection(entrance, "passage", ["south", "s"])
entrance.add_connection(parkinglot, "passage", ["north", "n"])
cellar.add_connection(frontdesk, "passage", ["south", "s"])
frontdesk.add_connection(cellar, "passage", ["north", "n"])
# kitchen.add_room('North', dining)
# dining.add_room('South', kitchen)
# dining.add_room('North', hallway)
# hallway.add_room('South', dining)
# hallway.add_room('East', living)
inventory = Inventory()
current_room = lobby
current_room.enter_room(inventory)
frontdesk.add_item(Flashlight())
while True:
command = raw_input("What do you want to do?")
if command in ["exit","x", "quit", "q"]:
break
result = current_room.process_command(command, inventory)
if isinstance(result, Room):
current_room = result
result.enter_room(inventory)
continue
elif isinstance(result, str):
print result
continue
else:
result = inventory.process_command(command)
if len(result) == 0:
print "I don't know what you mean"
| [
"isaarobl23@e3civichigh.com"
] | isaarobl23@e3civichigh.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.