blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3f35f2a8b17f35df510599c29d815a6b083efd36 | ff5892487c262ce845a9996a282d3a2fdb1a3b15 | /URI_1254.py | 17a978a92191caec16353d8fd8ca9417daec8b41 | [] | no_license | dankoga/URIOnlineJudge--Python-3.9 | d424a47671f106d665a4e255382fc0ec3059096a | f1c99521caeff59be0843af5f63a74013b63f7f0 | refs/heads/master | 2023-07-15T08:32:11.040426 | 2021-09-03T13:27:17 | 2021-09-03T13:27:17 | 393,991,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | import re
while True:
try:
tag = input().lower()
except EOFError:
break
tag_replacement = input()
text = input()
text_replaced = []
index_begin = 0
index_end = 0
regex = re.compile(tag, re.IGNORECASE)
while index_end < len(text):
while index_end < len(text) and text[index_end] != '<':
index_end += 1
text_replaced += text[index_begin:index_end]
index_begin = index_end
while index_end < len(text) and text[index_end] != '>':
index_end += 1
text_replaced += regex.sub(tag_replacement, text[index_begin:index_end])
index_begin = index_end
print(''.join(text_replaced))
| [
"dankoga2@gmail.com"
] | dankoga2@gmail.com |
7870f65dc0b7e24d9079a084ded746c988bdb9bb | 1bd3076902117867ec048210905195ba2aaaaa6b | /exercise/leetcode/python_src/by2017_Sep/Leet279.py | d70c3fe111c535970d12a2902656ed8da5306c9a | [] | no_license | SS4G/AlgorithmTraining | d75987929f1f86cd5735bc146e86b76c7747a1ab | 7a1c3aba65f338f6e11afd2864dabd2b26142b6c | refs/heads/master | 2021-01-17T20:54:31.120884 | 2020-06-03T15:04:10 | 2020-06-03T15:04:10 | 84,150,587 | 2 | 0 | null | 2017-10-19T11:50:38 | 2017-03-07T03:33:04 | Python | UTF-8 | Python | false | false | 809 | py | class Solution(object):
"""
my first dp code
"""
def __init__(self):
self.dpstate = [0, 1, 2, 3, ] + ([-1, ] * 10000)
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
res = self.dpRecursive(n, self.dpstate)
return res
def dpRecursive(self, n, stateRecord):
if stateRecord[n] != -1:
return stateRecord[n]
else:
maxSqrt = int(n**0.5)
min = 0xffffffff
while maxSqrt >= 1:
tmp = self.dpRecursive(n - maxSqrt**2, stateRecord)
min = tmp if tmp < min else min
maxSqrt -= 1
stateRecord[n] = min + 1
return min + 1
if __name__ == "__main__":
s = Solution()
print(s.numSquares(6405)) | [
"ziheng_song@126.com"
] | ziheng_song@126.com |
ce52a7bfc0bc1f7c3bc2700efd500ff2e1c4b9d7 | 36f6a64e74b4f3f8c936ede0ca6ac781cc0b0a84 | /tests/serializers/test_room.py | ab8e6300cd498141a1043603788b292112bb0545 | [
"MIT"
] | permissive | bayatim/cleanarc | 3b7613702d38387010c5bf7e9cc83442bda8db69 | d393ac1c856180efa38872dab02ae045af92e4fe | refs/heads/master | 2023-07-26T14:50:28.851653 | 2021-09-10T19:36:41 | 2021-09-10T19:36:41 | 402,742,678 | 0 | 0 | MIT | 2021-09-10T19:36:42 | 2021-09-03T11:12:09 | Python | UTF-8 | Python | false | false | 679 | py | import json
import uuid
from rentomatic.serializers.room import RoomJsonEncoder
from rentomatic.domain.room import Room
def test_serialize_domain_room():
code = uuid.uuid4()
room = Room(
code,
size=200,
price=10,
longitude=-0.09998975,
latitude=51.75436293,
)
expected_json = f"""
{{
"code": "{code}",
"size": 200,
"price": 10,
"longitude": -0.09998975,
"latitude": 51.75436293
}}
"""
json_room = json.dumps(room, cls=RoomJsonEncoder)
assert json.loads(json_room) == json.loads(expected_json)
if __name__ == '__main__':
pass | [
"mehdi.bayati@siegwerk.com"
] | mehdi.bayati@siegwerk.com |
2eae42fa8e4b1dc07aa735f7b8fc312778f409cd | 4b4df51041551c9a855468ddf1d5004a988f59a2 | /leetcode_python/Array/rotate-function.py | 3d952365fd1c669f093f899be1b8236df3d9be1b | [] | no_license | yennanliu/CS_basics | 99b7ad3ef6817f04881d6a1993ec634f81525596 | 035ef08434fa1ca781a6fb2f9eed3538b7d20c02 | refs/heads/master | 2023-09-03T13:42:26.611712 | 2023-09-03T12:46:08 | 2023-09-03T12:46:08 | 66,194,791 | 64 | 40 | null | 2022-08-20T09:44:48 | 2016-08-21T11:11:35 | Python | UTF-8 | Python | false | false | 4,546 | py | """
396. Rotate Function
Medium
You are given an integer array nums of length n.
Assume arrk to be an array obtained by rotating nums by k positions clock-wise. We define the rotation function F on nums as follow:
F(k) = 0 * arrk[0] + 1 * arrk[1] + ... + (n - 1) * arrk[n - 1].
Return the maximum value of F(0), F(1), ..., F(n-1).
The test cases are generated so that the answer fits in a 32-bit integer.
Example 1:
Input: nums = [4,3,2,6]
Output: 26
Explanation:
F(0) = (0 * 4) + (1 * 3) + (2 * 2) + (3 * 6) = 0 + 3 + 4 + 18 = 25
F(1) = (0 * 6) + (1 * 4) + (2 * 3) + (3 * 2) = 0 + 4 + 6 + 6 = 16
F(2) = (0 * 2) + (1 * 6) + (2 * 4) + (3 * 3) = 0 + 6 + 8 + 9 = 23
F(3) = (0 * 3) + (1 * 2) + (2 * 6) + (3 * 4) = 0 + 2 + 12 + 12 = 26
So the maximum value of F(0), F(1), F(2), F(3) is F(3) = 26.
Example 2:
Input: nums = [100]
Output: 0
Constraints:
n == nums.length
1 <= n <= 105
-100 <= nums[i] <= 100
"""
# V0
# IDEA : MATH
# first, we represent the F(1) op as below:
#
# F(0) = 0A + 1B + 2C +3D
#
# F(1) = 0D + 1A + 2B +3C
#
# F(2) = 0C + 1D + 2A +3B
#
# F(3) = 0B + 1C + 2D +3A
#
# then, by some math manipulation, we have below relation:
#
# set sum = 1A + 1B + 1C + 1D
#
# -> F(1) = F(0) + sum - 4D
#
# -> F(2) = F(1) + sum - 4C
#
# -> F(3) = F(2) + sum - 4B
#
# so we find the rules!
#
# => F(i) = F(i-1) + sum - n*A[n-i]
#
# https://www.cnblogs.com/grandyang/p/5869791.html
# http://bookshadow.com/weblog/2016/09/11/leetcode-rotate-function/
class Solution(object):
def maxRotateFunction(self, A):
size = len(A)
sums = sum(A)
sumn = sum(x * n for x, n in enumerate(A))
ans = sumn
for x in range(size - 1, 0, -1):
sumn += sums - size * A[x]
ans = max(ans, sumn)
return ans
# V0'
# IDEA : BRUTE FORCE (TLE)
class Solution(object):
def maxRotateFunction(self, nums):
# help func
def help(arr):
ans = 0
for i in range(len(arr)):
tmp = i * arr[i]
ans += tmp
return ans
# edge case
if not nums:
return 0
# rotate
ans = -float('inf')
for i in range(len(nums)):
tmp = nums.pop(-1)
nums.insert(0, tmp)
cur = help(nums)
ans = max(ans, cur)
#print("nums = " + str(nums) + " cur = " + str(cur))
return ans
# V1
# https://blog.csdn.net/fuxuemingzhu/article/details/83002609
# IDEA : MATH PATTERN
# -> SINCE
# F(0) = 0A + 1B + 2C +3D
# F(1) = 0D + 1A + 2B +3C
# F(2) = 0C + 1D + 2A +3B
# F(3) = 0B + 1C + 2D +3A
# -> SO
# F(1) = F(0) + sum - 4D
# F(2) = F(1) + sum - 4C
# F(3) = F(2) + sum - 4B
# -> THEN WE KNOW THE PATTERN OF ROTATE OPERATION IS ACTUAL :
# ---> F(i) = F(i-1) + sum - n * A[n-i]
class Solution:
def maxRotateFunction(self, A):
"""
:type A: List[int]
:rtype: int
"""
_sum = 0
N = len(A)
f = 0
for i, a in enumerate(A):
_sum += a
f += i * a
res = f
for i in range(N - 1, 0, -1):
f = f + _sum - N * A[i]
res = max(res, f) # since we want to calculate the MAX value of F(0), F(1), ..., F(n-1).
return res
### Test case
s=Solution()
assert s.maxRotateFunction([]) == 0
assert s.maxRotateFunction([7]) == 0
assert s.maxRotateFunction([7,2,1]) == 15
assert s.maxRotateFunction([4, 3, 2, 6]) == 26
assert s.maxRotateFunction([0,0,0,0]) == 0
assert s.maxRotateFunction([3,7,0,1]) == 28
assert s.maxRotateFunction([1,1,1,1]) == 6
assert s.maxRotateFunction([-1,-1,-1,-1]) == -6
assert s.maxRotateFunction([-1,10,-5,1]) == 29
# V1'
# http://bookshadow.com/weblog/2016/09/11/leetcode-rotate-function/
class Solution(object):
def maxRotateFunction(self, A):
"""
:type A: List[int]
:rtype: int
"""
size = len(A)
sums = sum(A)
sumn = sum(x * n for x, n in enumerate(A))
ans = sumn
for x in range(size - 1, 0, -1):
sumn += sums - size * A[x]
ans = max(ans, sumn)
return ans
# V2
# Time: O(n)
# Space: O(1)
class Solution(object):
def maxRotateFunction(self, A):
"""
:type A: List[int]
:rtype: int
"""
s = sum(A)
fi = 0
for i in range(len(A)):
fi += i * A[i]
result = fi
for i in range(1, len(A)+1):
fi += s - len(A) * A[-i]
result = max(result, fi)
return result | [
"f339339@gmail.com"
] | f339339@gmail.com |
2d9cd6536ba710b20ea94ed190d29056e9234af9 | bf760c913aca7bc12e318fadd72b9c9d50ec5c97 | /feedback/migrations/0006_auto_20151105_0821.py | 21d6e8b2a0304816e2ba9e516914b2bf870a1ee3 | [] | no_license | bharat-gera/Nautlus | 29393180da9465a349e7ff4b884a1e495c95d610 | 72f3d06b4d91b987f1cd5ed7044f442e5eafc882 | refs/heads/master | 2023-01-07T13:36:31.495638 | 2020-05-07T07:30:03 | 2020-05-07T07:30:03 | 74,740,143 | 1 | 0 | null | 2022-12-26T19:47:15 | 2016-11-25T08:36:22 | JavaScript | UTF-8 | Python | false | false | 422 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('feedback', '0005_auto_20151104_1317'),
]
operations = [
migrations.RemoveField(
model_name='wallet',
name='owner',
),
migrations.DeleteModel(
name='Wallet',
),
]
| [
"bharatgeraan@gmail.com"
] | bharatgeraan@gmail.com |
9b7776469b5fc184cd1f5ca602b2368f3d9a3ec8 | 978fa45efa8b57d10baa630df901e8bcd8527e96 | /gochiusa/views.py | bceb8a85324b267bb33ff8a79a0e0f09f9d18fa9 | [] | no_license | SeokJunYeom/first-django-project | 2183a7aae27ff547c25b85da3284500cf2834404 | 788d47e22cdb47086d980605a7fb0825d3707a11 | refs/heads/master | 2021-01-16T18:23:21.114165 | 2016-10-11T02:05:43 | 2016-10-11T02:05:43 | 65,343,247 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,090 | py | from django.shortcuts import render
from django.utils import timezone
from django.http import HttpResponseRedirect, QueryDict
from django.conf import settings
from django.contrib.auth.decorators import login_required
from .models import Character
from .forms import CharacterForm
import os
# Create your views here.
month_dic = {
"January" : '1',
"February" : '2',
"March" : '3',
"April" : '4',
"May" : '5',
"June" : '6',
"July" : '7',
"August" : '8',
"September" : '9',
"October" : '10',
"November" : '11',
"December" : '12'
}
def gochiusa(request):
if request.method == "POST":
if not request.user.is_authenticated():
return HttpResponseRedirect(settings.LOGIN_URL)
else:
character_pk = request.POST.get("pk")
character = Character.objects.get(pk = character_pk)
os.remove("media/" + character.image.name )
character.delete()
return HttpResponseRedirect("/")
if not request.user.is_authenticated():
user = ""
else:
user = request.user
characters = Character.objects.all().order_by("name")
context = {"characters" : characters, "user" : user}
return render(request, "gochius.html", context)
def character(request, name):
character = Character.objects.get(name = name)
return render(request, "character.html", {"character" : character})
@login_required
def character_post(request):
if request.method == "POST":
birth = request.POST.get("birth").split(',')[0]
request.POST = request.POST.copy()
request.POST["birth"] = stdBirth(birth)
form = CharacterForm(request.POST, request.FILES)
if form.is_valid():
new_form = form.save(commit = False)
new_form.user = request.user
new_form.save()
return HttpResponseRedirect("../../")
return render(request, 'post.html', {})
def stdBirth(birth):
month = month_dic[birth.split(' ')[1]]
day = birth.split(' ')[0]
return month + '/' + day
| [
"tjrwnsdl0720@naver.com"
] | tjrwnsdl0720@naver.com |
e9459fc693155f61282ad9a592262302bf7e81c3 | afecdd0aa85b4e910bbb9d99ce5aabe32599f2d7 | /VehiclePhysicsExample2/MyVehicle.py | 0840f9a34a695f2b2283b6089ea7afe05c23fb03 | [] | no_license | Blenderozodbek/CustomVehiclePhysics-UPBGE | 076c14136b36049bd47c4a262de8b52ab4f85002 | eccbaea4ea54494d35d439803e060ddeac44592f | refs/heads/master | 2022-04-09T14:36:28.966733 | 2020-03-02T07:52:39 | 2020-03-02T07:52:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | import bge
from collections import OrderedDict
import VehiclePhysics
import MathLib
import math
if not hasattr(bge, "__component__"):
scene = bge.logic.getCurrentScene()
class MyVehicle(bge.types.KX_PythonComponent):
args = OrderedDict([
])
def start(self, args):
self.Vehicle = VehiclePhysics.VehiclePhysics(self.object)
self.Camera = [obj for obj in self.Vehicle.childrenRecursive if "Camera" in obj][0]
self.CameraFollow = False
def update(self):
#self.Vehicle.PreUpdate()
if self.CameraFollow:
self.UpdateCamera()
def UpdateCamera(self):
#VehicleRotWorld = self.Vehicle.worldOrientation.to_euler()
#VehicleRotWorld.rotate_axis('Z', math.radians(90))
VehicleRotLocal = self.Vehicle.localOrientation.to_euler()
VehicleRotLocal.rotate_axis('X', math.radians(-5.0))
VehicleRotLocal[1] = 0.0
#TargetWorld = VehicleRotWorld.to_matrix()
TargetLocal = VehicleRotLocal.to_matrix()
Speed = self.Vehicle.DeltaTime*5
self.Camera.localOrientation = self.Camera.localOrientation.lerp(TargetLocal, Speed)
#self.Camera.worldOrientation = TargetWorld#self.Camera.worldOrientation.lerp(TargetWorld, Speed) | [
"bianca.oliveira.51@outlook.com"
] | bianca.oliveira.51@outlook.com |
e6ac0a4377f1efeaa6ced9a1f60ff1064ee4f9d5 | 48894ae68f0234e263d325470178d67ab313c73e | /sa/apps/mrt/views.py | 2436dec1eafdb325d310c8be9f817091229bae4b | [
"BSD-3-Clause"
] | permissive | DreamerDDL/noc | 7f949f55bb2c02c15ac2cc46bc62d957aee43a86 | 2ab0ab7718bb7116da2c3953efd466757e11d9ce | refs/heads/master | 2021-05-10T18:22:53.678588 | 2015-06-29T12:28:20 | 2015-06-29T12:28:20 | 118,628,133 | 0 | 0 | null | 2018-01-23T15:19:51 | 2018-01-23T15:19:51 | null | UTF-8 | Python | false | false | 4,323 | py | # -*- coding: utf-8 -*-
##----------------------------------------------------------------------
## sa.mrt application
##----------------------------------------------------------------------
## Copyright (C) 2007-2011 The NOC Project
## See LICENSE for details
##----------------------------------------------------------------------
## Python modules
import datetime
## NOC modules
from noc.lib.app import ExtApplication, view
from noc.sa.models import (ManagedObjectSelector, ManagedObject,
ReduceTask, MRTConfig)
from noc.main.models import Permission
from noc.lib.serialize import json_decode
class MRTAppplication(ExtApplication):
"""
sa.mrt application
"""
def extra_permissions(self):
"""
Get list of additional permissions
:return:
"""
x = set([p.permission_name for p in
MRTConfig.objects.filter(is_active=True)])
return list(x)
@view(url="^(?P<task>[0-9a-zA-Z_\-]+)/$", method=["POST"],
access="launch", api=True)
def api_run(self, request, task):
"""
Run new MRT
:param request:
:param task:
:return:
"""
# Get task
config = MRTConfig.objects.filter(
name=task, is_active=True).first()
if not config:
return self.response_not_found("Task not found")
# Check permissions
pn = "sa:mrt:%s" % config.permission_name
if not Permission.has_perm(request.user, pn):
return self.response_forbidden(
"Permission denied: '%s' permission required" % pn)
# Parse request
try:
r = json_decode(request.raw_post_data)
except Exception, why:
return self.response_bad_request(str(why))
if type(r) != dict:
return self.response_bad_request("dict required")
if "selector" not in r:
return self.response_bad_request("'selector' is missed")
# Resolve objects from selector
try:
objects = ManagedObjectSelector.resolve_expression(r["selector"])
except ManagedObjectSelector.DoesNotExist, why:
return self.response_not_found(str(why))
except ManagedObject.DoesNotExist, why:
return self.response_not_found(str(why))
# Check all objects fall within MRTConfig selector
unauthorized = set(objects).difference(set(
config.selector.managed_objects))
if unauthorized:
return self.response_forbidden("Unauthorized objects: %s" % (
", ".join([o.name for o in unauthorized])
))
# Run MRT
timeout = r.get("timeout", None) or config.timeout
t = ReduceTask.create_task(
objects,
"pyrule:%s" % config.reduce_pyrule.name, {},
config.map_script, r.get("map_args", {}),
timeout)
return self.response_accepted(
location="/sa/mrt/%s/%d/" % (task, t.id))
@view(url="^(?P<task>[0-9a-zA-Z_\-]+)/(?P<task_id>\d+)/$", method=["GET"],
access="launch", api=True)
def api_result(self, request, task, task_id):
# Get task
config = MRTConfig.objects.filter(name=task, is_active=True).first()
if not config:
return self.response_not_found("Task not found")
# Check permissions
pn = "sa:mrt:%s" % config.permission_name
if not Permission.has_perm(request.user, pn):
return self.response_forbidden(
"Permission denied: '%s' permission required" % pn)
#
t = self.get_object_or_404(ReduceTask, id=int(task_id))
try:
r = t.get_result(block=False)
except ReduceTask.NotReady:
# Not ready
completed = t.maptask_set.filter(status__in=("C", "F")).count()
total = t.maptask_set.count()
return {
"ready": False,
"progress": int(completed * 100 / total),
"max_timeout": (t.stop_time - datetime.datetime.now()).seconds,
"result": None
}
# Return result
return {
"ready": True,
"progress": 100,
"max_timeout": 0,
"result": r
}
| [
"dv@nocproject.org"
] | dv@nocproject.org |
c63304d6ce62dc737cfebc3acc110f5104f5f745 | 8def1a3a520797a8e958b0a03cfb9957ff0845b4 | /tests/test_map.py | 7994bbc29498182797c834de705b28bfc89fb352 | [] | no_license | Raragyay/Snake | 6a2a5c24fb012cb2057015cca07f4ca758d3c670 | 36c486a1fb5fd52225db7ccfce9b0f1438509075 | refs/heads/master | 2021-09-10T02:55:16.265421 | 2018-03-20T20:08:08 | 2018-03-20T20:08:08 | 126,075,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # coding=utf-8
"""
Tests for the Map class.
We test if the creation of a map class is as expected.
"""
from unittest import TestCase
import pytest
from snake.map import Map, PointType, Pos
class TestMap(TestCase):
def test_init(self):
with pytest.raises(TypeError):
_ = Map(5, 1.5)
with pytest.raises(ValueError):
_ = Map(4, 5)
m = Map(12, 12)
for i in range(m.num_rows):
for j in range(m.num_cols):
if i == 0 or i == m.num_rows - 1 or j == 0 or j == m.num_cols - 1:
assert m.point(Pos(i, j)).type == PointType.WALL
else:
assert m.point(Pos(i, j)).type == PointType.EMPTY
| [
"let987let987@gmail.com"
] | let987let987@gmail.com |
4b924366ee45f1578a34ab33ae2c95bc333001c1 | 05b2bfe4c5bb716aa2e283567095d613b26f3667 | /listings/migrations/0001_initial.py | 3076ffe7dffbc57378385bc0d5b371b7ddb2d2c2 | [] | no_license | anshupal11/Ashiyana-Estate | b0b8d09e42846b1cf4fc556e41557fe8523bd114 | ffdb8e0ae477c189b3002b6765d66eef638b0a4a | refs/heads/main | 2023-06-06T21:01:31.698523 | 2021-07-14T11:44:43 | 2021-07-14T11:44:43 | 360,515,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,169 | py | # Generated by Django 3.1.7 on 2021-03-30 11:09
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('realtors', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('address', models.CharField(max_length=200)),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=80)),
('zipcode', models.CharField(max_length=20)),
('description', models.TextField(blank=True)),
('price', models.IntegerField()),
('bedrooms', models.IntegerField()),
('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)),
('garbage', models.IntegerField(default=0)),
('sqft', models.IntegerField()),
('lot_size', models.DecimalField(decimal_places=1, max_digits=5)),
('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d/')),
('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_3', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_4', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_5', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_6', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('is_published', models.BooleanField(default=True)),
('list_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('relators', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='realtors.realtor')),
],
),
]
| [
"anshu.pal108@gmail.com"
] | anshu.pal108@gmail.com |
7d59991c8b0a76d9c9417505d7349091318f74c7 | 290b66010fc270cee3910b010c917c016c4e048a | /lesson13_UnitTesting/venv/bin/pbr | ade459b67bcd8f37361fb671e162ee1d904558a9 | [] | no_license | zagravsky/cursor_hw | 3e3996352037799bfe56072c99db79173b062ce3 | f12b83ce2589778046432326125ef327374263ad | refs/heads/master | 2020-04-19T16:11:19.067325 | 2019-04-14T17:38:21 | 2019-04-14T17:38:21 | 168,296,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | #!/Users/admin/cursor_hw/python-cursor/lesson13_UnitTesting/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pbr.cmd.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"zagravsky@gmail.com"
] | zagravsky@gmail.com | |
8bb4aab4b937bc373eeccd0b26288f72c494a8ad | f2781e079656656768d181e4d73b888f985488d8 | /utils/img.py | 812083e4d9fb0b75e9bf11da09e5fb282c83cf6e | [
"MIT"
] | permissive | singhay/DeepPredictiveMotionTracking | 688281badb05767ecab7e3ed63f03200b8be0947 | cacef66088d356351a17118b7f831760519a0e2c | refs/heads/master | 2023-07-23T22:32:10.537703 | 2019-10-27T22:18:22 | 2019-10-27T22:18:22 | 189,454,175 | 2 | 0 | MIT | 2023-07-06T21:42:25 | 2019-05-30T17:20:36 | Python | UTF-8 | Python | false | false | 12,603 | py | import SimpleITK as sitk
import cv2 as cv
import nibabel as nib
import nilearn.image as nil_image
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
def create_rotation_matrix(param):
"""
Create a rotation matrix from 3 rotation angles around X, Y, and Z:
=================
Arguments:
param: numpy 1*3 array for [x, y, z] angles in degree.
Output:
rot: Correspond 3*3 rotation matrix rotated around y->x->z axises.
"""
theta_x = param[0] * np.pi / 180
cx = np.cos(theta_x)
sx = np.sin(theta_x)
theta_y = param[1] * np.pi / 180
cy = np.cos(theta_y)
sy = np.sin(theta_y)
theta_z = param[2] * np.pi / 180
cz = np.cos(theta_z)
sz = np.sin(theta_z)
Rx = [[1, 0, 0],
[0, cx, -sx],
[0, sx, cx]]
Ry = [[cy, 0, sy],
[0, 1, 0],
[-sy, 0, cy]]
Rz = [[cz, -sz, 0],
[sz, cz, 0],
[0, 0, 1]]
# Apply the rotation first around Y then X then Z.
# To follow ITK transformation functions.
rot = np.matmul(Rz, Ry)
rot = np.matmul(rot, Rx)
return rot
def similarity_transform_volumes(
image,
affine_trans,
target_size,
interpolation='continuous'):
image_size = np.shape(image)
possible_scales = np.true_divide(image_size, target_size)
crop_scale = np.max(possible_scales)
if crop_scale <= 1:
crop_scale = 1
scale_transform = np.diag((crop_scale,
crop_scale,
crop_scale,
1))
shift = -(
np.asarray(target_size) - np.asarray(
image_size // np.asarray(crop_scale),
)
) // 2
affine_trans_to_center = np.eye(4)
affine_trans_to_center[:, 3] = [shift[0],
shift[1],
shift[2],
1]
transform = np.matmul(affine_trans, scale_transform)
transform = np.matmul(transform, affine_trans_to_center)
nifti_img = nib.Nifti1Image(image, affine=np.eye(4))
nifti_image_t = nil_image.resample_img(
nifti_img,
target_affine=transform,
target_shape=target_size,
interpolation=interpolation,
)
image_t = nifti_image_t.get_data()
return image_t, transform
def vrrotvec2mat(ax_ang):
"""
Create a rotation matrix corresponding to the rotation around a general
axis by a specified angle.
"""
if ax_ang.ndim == 1:
if np.size(ax_ang) == 5:
ax_ang = np.reshape(ax_ang, (5, 1))
msz = 1
elif np.size(ax_ang) == 4:
ax_ang = np.reshape(np.hstack((ax_ang, np.array([1]))), (5, 1))
msz = 1
else:
raise Exception('Wrong Input Type')
elif ax_ang.ndim == 2:
if np.shape(ax_ang)[0] == 5:
msz = np.shape(ax_ang)[1]
elif np.shape(ax_ang)[1] == 5:
ax_ang = ax_ang.transpose()
msz = np.shape(ax_ang)[1]
else:
raise Exception('Wrong Input Type')
else:
raise Exception('Wrong Input Type')
direction = ax_ang[0:3, :]
angle = ax_ang[3, :]
d = np.array(direction, dtype=np.float64)
d /= np.linalg.norm(d, axis=0)
x = d[0, :]
y = d[1, :]
z = d[2, :]
c = np.cos(angle)
s = np.sin(angle)
tc = 1 - c
mt11 = tc * x * x + c
mt12 = tc * x * y - s * z
mt13 = tc * x * z + s * y
mt21 = tc * x * y + s * z
mt22 = tc * y * y + c
mt23 = tc * y * z - s * x
mt31 = tc * x * z - s * y
mt32 = tc * y * z + s * x
mt33 = tc * z * z + c
mtx = np.column_stack((mt11, mt12, mt13, mt21, mt22, mt23, mt31, mt32, mt33))
inds1 = np.where(ax_ang[4, :] == -1)
mtx[inds1, :] = -mtx[inds1, :]
if msz == 1:
mtx = mtx.reshape(3, 3)
else:
mtx = mtx.reshape(msz, 3, 3)
return mtx
def vec3_to_vec5(vec3):
teta = np.linalg.norm(vec3)
vec = vec3 / teta
vec5 = np.zeros((5, 1))
vec5[0] = vec[0]
vec5[1] = vec[1]
vec5[2] = vec[2]
vec5[3] = teta
vec5[4] = 1
return vec5
def vec5_to_vec3(vec5):
return vec5[3, 0] * vec5[:3, 0]
def vrrotmat2vec(mat_src, rot_type='proper'):
"""
Create an axis-angle np.array from Rotation Matrix:
====================
@param mat_src: The nx3x3 rotation matrices to convert
@type mat_src: nx3x3 numpy array
@param rot_type: 'improper' if there is a possibility of
having improper matrices in the input,
'proper' otherwise. 'proper' by default
@type rot_type: string ('proper' or 'improper')
@return: The 3D rotation axis and angle (ax_ang)
5 entries:
First 3: axis
4: angle
5: 1 for proper and -1 for improper
@rtype: numpy 5xn array
"""
mat = np.copy(mat_src)
if mat.ndim == 2:
if np.shape(mat) == (3, 3):
mat = np.copy(np.reshape(mat, (1, 3, 3)))
else:
raise Exception('Wrong Input Type')
elif mat.ndim == 3:
if np.shape(mat)[1:] != (3, 3):
raise Exception('Wrong Input Type')
else:
raise Exception('Wrong Input Type')
msz = np.shape(mat)[0]
ax_ang = np.zeros((5, msz))
epsilon = 1e-12
if rot_type == 'proper':
ax_ang[4, :] = np.ones(np.shape(ax_ang[4, :]))
elif rot_type == 'improper':
for i in range(msz):
det1 = np.linalg.det(mat[i, :, :])
if abs(det1 - 1) < epsilon:
ax_ang[4, i] = 1
elif abs(det1 + 1) < epsilon:
ax_ang[4, i] = -1
mat[i, :, :] = -mat[i, :, :]
else:
raise Exception('Matrix is not a rotation: |det| != 1')
else:
raise Exception('Wrong Input parameter for rot_type')
mtrc = mat[:, 0, 0] + mat[:, 1, 1] + mat[:, 2, 2]
ind1 = np.where(abs(mtrc - 3) <= epsilon)[0]
ind1_sz = np.size(ind1)
if np.size(ind1) > 0:
ax_ang[:4, ind1] = np.tile(np.array([0, 1, 0, 0]), (ind1_sz, 1)).transpose()
ind2 = np.where(abs(mtrc + 1) <= epsilon)[0]
ind2_sz = np.size(ind2)
if ind2_sz > 0:
# phi = pi
# This singularity requires elaborate sign ambiguity resolution
# Compute axis of rotation, make sure all elements >= 0
# real signs are obtained by flipping algorithm below
diag_elems = np.concatenate((mat[ind2, 0, 0].reshape(ind2_sz, 1),
mat[ind2, 1, 1].reshape(ind2_sz, 1),
mat[ind2, 2, 2].reshape(ind2_sz, 1)), axis=1)
axis = np.sqrt(np.maximum((diag_elems + 1) / 2, np.zeros((ind2_sz, 3))))
# axis elements that are <= epsilon are set to zero
axis = axis * ((axis > epsilon).astype(int))
# Flipping
#
# The algorithm uses the elements above diagonal to determine the signs
# of rotation axis coordinate in the singular case Phi = pi.
# All valid combinations of 0, positive and negative values lead to
# 3 different cases:
# If (Sum(signs)) >= 0 ... leave all coordinates positive
# If (Sum(signs)) == -1 and all values are non-zero
# ... flip the coordinate that is missing in the term that has + sign,
# e.g. if 2AyAz is positive, flip x
# If (Sum(signs)) == -1 and 2 values are zero
# ... flip the coord next to the one with non-zero value
# ... ambiguous, we have chosen shift right
# construct vector [M23 M13 M12] ~ [2AyAz 2AxAz 2AxAy]
# (in the order to facilitate flipping): ^
# [no_x no_y no_z ]
m_upper = np.concatenate((mat[ind2, 1, 2].reshape(ind2_sz, 1),
mat[ind2, 0, 2].reshape(ind2_sz, 1),
mat[ind2, 0, 1].reshape(ind2_sz, 1)), axis=1)
# elements with || smaller than epsilon are considered to be zero
signs = np.sign(m_upper) * ((abs(m_upper) > epsilon).astype(int))
sum_signs = np.sum(signs, axis=1)
t1 = np.zeros(ind2_sz, )
tind1 = np.where(sum_signs >= 0)[0]
t1[tind1] = np.ones(np.shape(tind1))
tind2 = \
np.where(np.all(np.vstack(((np.any(signs == 0, axis=1) == False), t1 == 0)), axis=0))[0]
t1[tind2] = 2 * np.ones(np.shape(tind2))
tind3 = np.where(t1 == 0)[0]
flip = np.zeros((ind2_sz, 3))
flip[tind1, :] = np.ones((np.shape(tind1)[0], 3))
flip[tind2, :] = np.copy(-signs[tind2, :])
t2 = np.copy(signs[tind3, :])
shifted = np.column_stack((t2[:, 2], t2[:, 0], t2[:, 1]))
flip[tind3, :] = np.copy(shifted + (shifted == 0).astype(int))
axis = axis * flip
ax_ang[:4, ind2] = np.vstack((axis.transpose(), np.pi * (np.ones((1, ind2_sz)))))
ind3 = np.where(np.all(np.vstack((abs(mtrc + 1) > epsilon, abs(mtrc - 3) > epsilon)), axis=0))[
0]
ind3_sz = np.size(ind3)
if ind3_sz > 0:
phi = np.arccos((mtrc[ind3] - 1) / 2)
den = 2 * np.sin(phi)
a1 = (mat[ind3, 2, 1] - mat[ind3, 1, 2]) / den
a2 = (mat[ind3, 0, 2] - mat[ind3, 2, 0]) / den
a3 = (mat[ind3, 1, 0] - mat[ind3, 0, 1]) / den
axis = np.column_stack((a1, a2, a3))
ax_ang[:4, ind3] = np.vstack((axis.transpose(), phi.transpose()))
return ax_ang
def generate_img_angle(timestep, image, ius_x, ius_y, ius_z, rotation_matrix_init):
xrot = ius_x(timestep)
yrot = ius_y(timestep)
zrot = ius_z(timestep)
rotation_matrix_mov = create_rotation_matrix([xrot, yrot, zrot])
rotation_matrix = np.matmul(rotation_matrix_mov, rotation_matrix_init)
center_idx = np.asanyarray(image.GetSize()) / 2.
rotation_center = image.TransformContinuousIndexToPhysicalPoint(center_idx)
transformation = sitk.VersorRigid3DTransform()
transformation.SetMatrix(rotation_matrix.ravel())
transformation.SetCenter(rotation_center)
transformedImg = sitk.Resample(image, transformation)
img = sitk.GetArrayFromImage(transformedImg)
vector = vrrotmat2vec(rotation_matrix)
rotational_offsets = vec5_to_vec3(vector)
return img[:, 60 + timestep * 5, :], rotational_offsets
def get_rotational_spline_generators(nb_timesteps, angle=60, speed=5):
rotation = np.random.uniform(-angle, angle, 3)
rotation_matrix_init = create_rotation_matrix(rotation)
intra_angle = angle // 2
x = np.linspace(0, nb_timesteps - 1, speed)
yx = np.random.uniform(-intra_angle, intra_angle, speed)
yy = np.random.uniform(-intra_angle, intra_angle, speed)
yz = np.random.uniform(-intra_angle, intra_angle, speed)
ius_x = InterpolatedUnivariateSpline(x, yx)
ius_y = InterpolatedUnivariateSpline(x, yy)
ius_z = InterpolatedUnivariateSpline(x, yz)
return ius_x, ius_y, ius_z, rotation_matrix_init
def resample_img(itk_image, out_spacing=[1.0, 1.0, 1.0], is_label=False):
# Resample images to 1mm spacing with SimpleITK
original_spacing = itk_image.GetSpacing()
original_size = itk_image.GetSize()
out_size = [
int(np.round(original_size[0] * (original_spacing[0] / out_spacing[0]))),
int(np.round(original_size[1] * (original_spacing[1] / out_spacing[1]))),
int(np.round(original_size[2] * (original_spacing[2] / out_spacing[2])))]
resample = sitk.ResampleImageFilter()
resample.SetOutputSpacing(out_spacing)
resample.SetSize(out_size)
resample.SetOutputDirection(itk_image.GetDirection())
resample.SetOutputOrigin(itk_image.GetOrigin())
resample.SetTransform(sitk.Transform())
resample.SetDefaultPixelValue(itk_image.GetPixelIDValue())
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
def whitening(image):
"""Whitening. Normalises image to zero mean and unit variance."""
image = image.astype(np.float32)
mean = np.mean(image)
std = np.std(image)
if std > 0:
ret = (image - mean) / std
else:
ret = image * 0.
return ret
def rescale_resize(img, h=155, w=135):
resize_img_t = cv.resize(img,
dsize=(h, w),
interpolation=cv.INTER_NEAREST)
rescaled_resize_img_t = whitening(resize_img_t)
return rescaled_resize_img_t
| [
"singh.ay@husky.neu.edu"
] | singh.ay@husky.neu.edu |
41d0b7f4129362904011ff884c66a4b05b9a9a13 | c4c6b03e6b9cab4d10e49726647e107e69972f8f | /awx/ui/fields.py | 4d96165d4d8b8fb818bfbf74e76aad0b8c83ed60 | [
"Apache-2.0"
] | permissive | Saurabh-Thakre/awx | f9f881d7620f681c393c16bbb501eaa401750320 | 8eb377a3ea8303c394ad4c958cc828c7239c1e11 | refs/heads/devel | 2021-06-22T17:29:35.212057 | 2021-02-06T14:30:51 | 2021-02-06T14:30:51 | 336,553,122 | 1 | 0 | NOASSERTION | 2021-02-06T14:13:25 | 2021-02-06T14:13:24 | null | UTF-8 | Python | false | false | 1,354 | py | # Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
# Python
import base64
import binascii
import re
# Django
from django.utils.translation import ugettext_lazy as _
# Tower
from awx.conf import fields, register
class PendoTrackingStateField(fields.ChoiceField):
def to_internal_value(self, data):
# Any false/null values get converted to 'off'.
if data in fields.NullBooleanField.FALSE_VALUES or data in fields.NullBooleanField.NULL_VALUES:
return 'off'
return super(PendoTrackingStateField, self).to_internal_value(data)
class CustomLogoField(fields.CharField):
CUSTOM_LOGO_RE = re.compile(r'^data:image/(?:png|jpeg|gif);base64,([A-Za-z0-9+/=]+?)$')
default_error_messages = {
'invalid_format': _('Invalid format for custom logo. Must be a data URL with a base64-encoded GIF, PNG or JPEG image.'),
'invalid_data': _('Invalid base64-encoded data in data URL.'),
}
def to_internal_value(self, data):
data = super(CustomLogoField, self).to_internal_value(data)
match = self.CUSTOM_LOGO_RE.match(data)
if not match:
self.fail('invalid_format')
b64data = match.group(1)
try:
base64.b64decode(b64data)
except (TypeError, binascii.Error):
self.fail('invalid_data')
return data
| [
"yo@jakemcdermott.me"
] | yo@jakemcdermott.me |
0abc1c2ac2cd095cf5c4d4fb48f3311e312bad2e | 3006b8b05a5363a099f4d6a896e942eaec26fb6b | /confirmationreserve.py | 36ddeaa5c4e2faea9232507eed703afaa24832d9 | [] | no_license | lee-t/CS-4400-Trainwizards | 1e15ae3df4429a5a9b42c188845b63aec4cf1a84 | 1539c18615916cd29cfe5e3d9542af6fedc7d0b3 | refs/heads/master | 2021-01-10T08:38:32.964122 | 2016-04-26T00:28:22 | 2016-04-26T00:28:22 | 51,272,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,003 | py | #! /usr/bin/env python
#
# GUI module generated by PAGE version 4.7
# In conjunction with Tcl version 8.6
# Apr 24, 2016 01:35:43 AM
import sys
try:
from Tkinter import *
except ImportError:
from tkinter import *
try:
import ttk
py3 = 0
except ImportError:
import tkinter.ttk as ttk
py3 = 1
import confirmationreserve_support
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
root = Tk()
top = Confirm_screen (root)
confirmationreserve_support.init(root, top)
root.mainloop()
w = None
def create_Confirm_screen(root, *args, **kwargs):
'''Starting point when module is imported by another program.'''
global w, w_win, rt
rt = root
w = Toplevel (root)
top = Confirm_screen (w)
confirmationreserve_support.init(w, top, *args, **kwargs)
return (w, top)
def destroy_Confirm_screen():
global w
w.destroy()
w = None
class Confirm_screen:
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#d9d9d9' # X11 color: 'gray85'
font10 = "-family {DejaVu Sans} -size 20 -weight normal -slant" \
" roman -underline 0 -overstrike 0"
top.geometry("600x450+315+169")
top.title("Confirm screen")
self.Label1 = Label(top)
self.Label1.place(relx=0.33, rely=0.09, height=33, width=165)
self.Label1.configure(font=font10)
self.Label1.configure(foreground="#ffb800")
self.Label1.configure(text='''Confirmation''')
self.Label2 = Label(top)
self.Label2.place(relx=0.18, rely=0.29, height=18, width=87)
self.Label2.configure(text='''ReservationID''')
self.Text1 = Text(top)
self.Text1.place(relx=0.33, rely=0.27, relheight=0.09, relwidth=0.23)
self.Text1.configure(background="white")
self.Text1.configure(font="TkTextFont")
self.Text1.configure(selectbackground="#c4c4c4")
self.Text1.configure(width=136)
self.Text1.configure(wrap=WORD)
self.Label3 = Label(top)
self.Label3.place(relx=0.18, rely=0.44, height=18, width=182)
self.Label3.configure(text='''Thank you for your Purchase!''')
self.Label4 = Label(top)
self.Label4.place(relx=0.18, rely=0.51, height=18, width=220)
self.Label4.configure(text='''Save ReservationID for your records''')
self.Button1 = Button(top)
self.Button1.place(relx=0.3, rely=0.67, height=26, width=209)
self.Button1.configure(activebackground="#d9d9d9")
self.Button1.configure(text='''Return to Functionality screen''')
if __name__ == '__main__':
vp_start_gui()
| [
"tlee361@gatech.edu"
] | tlee361@gatech.edu |
216c042ef5a6cf580a5ab27cfef073b3193a46d2 | 20774d703ed77a359dd29692d2a6817ddabe10bd | /15598_카카오머니.py | 9342dc637cd97c399452a0ed7f98b36cc9ea7544 | [] | no_license | kimheejoo/BaekJoon | a9af5116e51355f10769b99054b4cc52f48869be | 67f5c9a420b99120f53c3c98f0f7ed9b0de757fc | refs/heads/main | 2023-03-27T02:54:42.227003 | 2021-03-19T16:26:53 | 2021-03-19T16:26:53 | 338,039,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,332 | py | import sys
from math import gcd
N = int(sys.stdin.readline())
wallet = 0
charge = None # 충전금
min_M = pow(10,18)
for _ in range(N):
money , remain = map(int, sys.stdin.readline().split())
if wallet + money < 0: #충전
if remain != 0:
min_M = min(min_M, remain) # min_M(남은 금액의 최소)이 충전금보다 많으면 오버해서 충전한거
charge_tmp = remain - money - wallet #현재 충전된 금액
#charge -> 충전값 정함
if charge == None:
charge = charge_tmp
else:
charge = gcd(charge, charge_tmp)
#오버해서 충전했거나, charge==1일때는 무조건 remain이 0이어야하는데 그 조건을 만족 못 시킬 경우
if (min_M != pow(10,18) and charge <= min_M) or (charge == 1 and remain!=0):
print(-1)
break
else: #충전안해도 될 때는 원래 있던 돈(wallet)과 현재 입출금된 돈(money)를 더하면 현재 남아있는 돈(remain)이 되어야한다.
if wallet + money != remain:
print(-1)
break
wallet = remain #현재 남아있는 돈(remain)을 원래있던 돈(wallet)에 넣는다.
else:
if charge == None: #충전금이 사용되지 않을때
print(1)
else: print(charge) | [
"h32j00@gmail.com"
] | h32j00@gmail.com |
01dc1ba3f66d315ac5155c92bde6fcf52a9edd31 | 673331b00130061e7bb9faad84e6448260ba9040 | /restful_f/urls.py | 19aeb7ce683c87f032e82e57ec2f1b2e27fc1e60 | [] | no_license | shangjunshangmin/restful | 5abc9a452171c17ce9e1293b1d3386842d0e0750 | 661abb0e8945ebc5df67a5447df6440fbad9e7cc | refs/heads/master | 2022-11-16T00:27:47.745672 | 2019-01-29T07:45:33 | 2019-01-29T07:45:33 | 167,967,310 | 0 | 1 | null | 2022-10-24T12:18:18 | 2019-01-28T13:31:28 | Python | UTF-8 | Python | false | false | 1,855 | py | """restful_f URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
\Including another URLconf
\ 1. Import the include() function: from django.urls import include, path
\ 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
# from django.contrib import admin
import xadmin
# from django.urls import path,include
from django.conf.urls import url,include
from restful_f.settings import MEDIA_ROOT
from django.views.static import serve
from goods.views import GoodsList,GoodsTypeList
from rest_framework.routers import DefaultRouter
from rest_framework.authtoken import views
from rest_framework_jwt.views import obtain_jwt_token
from user.views import SmsCodeViewset,UserViewset
# 创建路由器并注册我们的视图。
router = DefaultRouter()
router.register(r'goods', GoodsList,base_name='goods')
router.register(r'categorys', GoodsTypeList,base_name='goodstype')
router.register(r'code', SmsCodeViewset,base_name='code')
router.register(r'users', UserViewset, base_name="users")
urlpatterns = [
url('admin/', xadmin.site.urls),
url(r'^ueditor/',include('DjangoUeditor.urls')),
url(r'^media/(?P<path>.*)$', serve, {"document_root": MEDIA_ROOT}),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^', include(router.urls)),
# drf自带的认证模式
url(r'^api-token-auth/', views.obtain_auth_token),
# jwt的认证模式
url(r'^login/', obtain_jwt_token),
]
| [
"1378828610@qq.com"
] | 1378828610@qq.com |
685d71299402ee819bd321399d5b371b8a29309f | ea4b8ad32345a94ec1c566c30efb4dfc9fd46b8e | /GeoGossip/webapps/geogossip/management/commands/crawl_yelp.py | 7d46dd157297ff320e6108180b527cc4f876c872 | [] | no_license | yyi1/GeoGossip | 76e09b585c0a765485547560def4b2f9aa407777 | 99fa1d06c4f26ad1f0ab8b1c007ab2d54d3cc56f | refs/heads/master | 2020-04-06T04:21:46.476790 | 2017-02-25T04:45:26 | 2017-02-25T04:45:26 | 82,977,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,835 | py | import os
import time
import sys
from django.core.management.base import BaseCommand
from django.db import transaction
from geogossip.models import Business
from geogossip.forms import BusinessForm
from geogossip.utils import merge_categories
from yelp.client import Client
from yelp.oauth1_authenticator import Oauth1Authenticator
class Command(BaseCommand):
help = 'crawl businesses from yelp around CMU'
@transaction.atomic
def handle(self, *args, **options):
reload(sys)
sys.setdefaultencoding('utf-8')
auth = Oauth1Authenticator(
consumer_key=os.environ['CONSUMER_KEY'],
consumer_secret=os.environ['CONSUMER_SECRET'],
token=os.environ['TOKEN'],
token_secret=os.environ['TOKEN_SECRET']
)
client = Client(auth)
params = {
'location': 'Carnegie+Mellon+University',
'radius_filter': 2500
}
response = client.search(**params)
total = response.total
self.stdout.write(self.style.SUCCESS('Found {} businesses'.format(total)))
Business.objects.all().delete()
offset = 0
malformed = 0
while offset < total:
for business in response.businesses:
offset += 1
if not business.location or not business.location.coordinate:
malformed += 1
continue
pass
business_form = BusinessForm({
'name': business.name,
'categories': merge_categories(business.categories),
'lat': business.location.coordinate.latitude,
'lon': business.location.coordinate.longitude,
'is_closed': business.is_closed,
'image_url': business.image_url,
'url': business.url,
'display_phone': business.display_phone,
'review_count': business.review_count,
'rating': business.rating
})
if business_form.is_valid():
business_form.save()
pass
else:
malformed += 1
pass
pass
self.stdout.write(self.style.SUCCESS('{} businesses are saved.'.format(offset - malformed)))
if offset < total:
time.sleep(0.01)
params['offset'] = offset
response = client.search(**params)
pass
pass
self.stdout.write(self.style.SUCCESS('There are {} businesses saved.'.format(len(Business.objects.all()))))
self.stderr.write(self.style.NOTICE('There are totally {} malformed businesses.'.format(malformed)))
pass
pass
| [
"yyi1@andrew.cmu.edu"
] | yyi1@andrew.cmu.edu |
398e431632ab1e171a30c473667a6229cbf94728 | 76b983258793d294b81791ebe72591bfebf78625 | /lib/ia32/optable.py | 314689b3d1c369c472d481e50573e58dabea9a73 | [
"BSD-2-Clause"
] | permissive | lotusexpeditor/syringe | 18ac9cb800a7fefb7d67e31936db6a84e47df9eb | 34a8386b90f534f9a856d0a436bba04dbf5100bd | refs/heads/master | 2023-02-08T10:08:20.295797 | 2020-12-27T00:06:09 | 2020-12-27T00:06:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | from ._optable import OperandLookupTable
from . import typesize
def Lookup(opcode):
'''Lookup specified opcode in the lookup table'''
res = ord(opcode[0])
if res == 0x0f:
res = ord(opcode[1])
return OperandLookupTable[res+0x100]
return OperandLookupTable[res]
def HasModrm(lookup):
'''Returns True if specified opcode requires a modrm byte'''
return bool(ord(lookup) & 0x80)
def HasImmediate(lookup):
'''Returns True if specified opcode contains an immediate value'''
return bool(ord(lookup) & 0x40)
def GetImmediateLength(lookup, prefixes):
res = ord(lookup) & 0x3f
opsizeindex = not int(b'\x66' in prefixes)
if res == 0x3f: # it sucks because i know python has such a horrible optimizer, and i need to redo this as a dict for that reason
size = [ 2*typesize.halfword, 2*typesize.word ][opsizeindex]
elif res == 0x3e:
size = [ typesize.byte, typesize.halfword ][opsizeindex]
elif res == 0x3d:
size = [ typesize.halfword, typesize.word ][opsizeindex]
elif res == 0x3c:
size = [ typesize.word, typesize.word*2][opsizeindex]
elif res == 0x3b:
size = [ typesize.word*2, typesize.halfword ][opsizeindex]
elif res == 0x3a:
size = [ typesize.halfword + typesize.word, typesize.word ][opsizeindex]
else:
size = res
return size
| [
"arizvisa@gmail.com"
] | arizvisa@gmail.com |
af48d996dcd94734f6fb6d4ec8d30e0aeff600b9 | a0a5a6198a524e2dbc237d313a362d6b902487f8 | /trainer.py | 9170f2aab55f09bfd95a53a9c5ee7869f60c878b | [] | no_license | xinleguo/cgo-pytorch | 2a4f34918551a9c741ebf1a96a3e81bf3ebb4542 | 0959396595e8b700690ebb14ff9929a6551bd76a | refs/heads/master | 2023-08-16T17:25:33.919579 | 2021-09-23T18:27:15 | 2021-09-23T18:27:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,363 | py | import torch
from loguru import logger
from nlgeval import NLGEval
from torch.nn.utils import clip_grad_norm_
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
def train_val_loss(model, train_dataset, val_dataset, num_workers, loss_fn,
params, model_save_path, save_every, device):
model = model.to(device)
optimizer = torch.optim.Adam(params=model.parameters(), lr=params['lr'])
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer=optimizer,
step_size=params['decay_every'],
gamma=params['decay_rate'])
train_dataloader = DataLoader(dataset=train_dataset,
batch_size=params['batch_size'],
num_workers=num_workers,
pin_memory=True,
shuffle=True)
val_dataloader = DataLoader(dataset=val_dataset,
batch_size=params['batch_size'],
num_workers=0,
pin_memory=True,
shuffle=True)
last_loss = 0.0
for epoch in range(params['epoch']):
running_loss = 0.0
logger.info('training epoch %d ... ' % (epoch + 1))
model.train()
for i, batch in enumerate(train_dataloader):
optimizer.zero_grad()
inputs = [x.to(device) for x in batch['inputs']]
label = [x.to(device) for x in batch['label']]
prediction = model(inputs)
loss = loss_fn(prediction, label)
running_loss += loss
loss.backward()
clip_grad_norm_(model.parameters(), 0.25)
optimizer.step()
if i % 1 == 0:
logger.info(
'[epoch: {0}/{1}, batch: {2}/{3}] loss: {4}'.format(
epoch + 1, params['epoch'], i + 1,
len(train_dataloader), running_loss))
running_loss = 0.0
with torch.no_grad():
logger.info('validating epoch %d ... ' % (epoch + 1))
running_loss = 0.0
model.eval()
for i, batch in enumerate(val_dataloader):
inputs = [x.to(device) for x in batch['inputs']]
label = [x.to(device) for x in batch['label']]
prediction = model(inputs)
loss = loss_fn(prediction, label)
running_loss += loss
running_loss /= len(val_dataloader)
delta_loss = running_loss - last_loss
last_loss = running_loss
logger.info('loss after epoch %d: %.10f' % (epoch + 1, running_loss))
logger.info('loss change after last epoch: %.10f' % delta_loss)
scheduler.step()
if not (epoch + 1) % save_every:
with open('{0}_{1}.pkl'.format(model_save_path, epoch + 1),
'wb') as fp:
logger.info("writing checkpoint " + str(epoch + 1))
torch.save(model, fp)
logger.info("training complete. writing final model.")
with open('{0}.pkl'.format(model_save_path), 'wb') as fp:
torch.save(model, fp)
def train_val_meteor(model, train_dataset, val_dataset, val_cap_dataset,
word_map, reversed_word_map, num_workers, loss_fn, params,
model_save_path, save_every, device):
model = model.to(device)
optimizer = torch.optim.Adam(params=model.parameters(), lr=params['lr'])
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer=optimizer,
step_size=params['decay_every'],
gamma=params['decay_rate'])
train_dataloader = DataLoader(dataset=train_dataset,
batch_size=params['batch_size'],
num_workers=num_workers,
pin_memory=True,
shuffle=True)
val_dataloader = DataLoader(dataset=val_dataset, shuffle=False)
val_cap_dataloader = DataLoader(dataset=val_cap_dataset, shuffle=False)
last_loss = 0.0
# first prepare val refs
logger.info('preparing references...')
ref_img = {}
for cap_label in val_dataloader:
image_id = str(cap_label['image_id'][0])
if image_id not in ref_img:
ref_img[image_id] = []
seq, seq_length = cap_label['label']
ref_img[image_id].append(
[reversed_word_map[x] for x in seq[0][1:seq_length[0] + 1]])
nlg = NLGEval(False, True, True, ['Bleu_1', 'ROUGE_L', 'CIDEr'])
for epoch in range(params['epoch']):
running_loss = 0.0
logger.info('training epoch %d ... ' % (epoch + 1))
model.train()
for i, batch in enumerate(train_dataloader):
optimizer.zero_grad()
inputs = [x.to(device) for x in batch['inputs']]
label = [x.to(device) for x in batch['label']]
prediction = model(inputs)
loss = loss_fn(prediction, label)
running_loss += loss
loss.backward()
clip_grad_norm_(model.parameters(), 0.25)
optimizer.step()
if i % 1 == 0:
logger.info(
'[epoch: {0}/{1}, batch: {2}/{3}] loss: {4}'.format(
epoch + 1, params['epoch'], i + 1,
len(train_dataloader), running_loss))
running_loss = 0.0
with torch.no_grad():
logger.info('validating epoch %d ... ' % (epoch + 1))
model.eval()
hyp = []
ref = [[] for _ in range(5)]
for val in tqdm(val_cap_dataloader):
image_id = str(val['image_id'][0])
image_features = [x.to(device) for x in val['inputs']][0]
seq = torch.tensor([word_map['<start>']]).view(1,
-1).to(device)
seq_length = torch.tensor([0]).view(1, -1).to(device)
top_results = model.decode((image_features, seq, seq_length),
word_map['<end>'],
beam=1)
decoded = [reversed_word_map[x] for x in top_results[0][1]]
if decoded[-1] != '<end>':
logger.warning('decoded sentence not ending with <end>.')
logger.warning('image_id: {0}'.format(image_id))
logger.warning('decoded: {0}'.format(decoded))
hyp.append(' '.join(decoded))
else:
hyp.append(' '.join(decoded[:-1]))
for i in range(5):
ref[i].append(' '.join(ref_img[image_id][i]))
logger.debug('sample 0 pred: {0}'.format(hyp[1234]))
logger.debug('sample 0 ref 0: {0}'.format(ref[0][1234]))
logger.debug('sample 0 ref 1: {0}'.format(ref[1][1234]))
logger.debug('sample 0 ref 2: {0}'.format(ref[2][1234]))
logger.debug('sample 0 ref 3: {0}'.format(ref[3][1234]))
logger.debug('sample 0 ref 4: {0}'.format(ref[4][1234]))
logger.debug('sample 1 pred: {0}'.format(hyp[2345]))
logger.debug('sample 1 ref 0: {0}'.format(ref[0][2345]))
logger.debug('sample 1 ref 1: {0}'.format(ref[1][2345]))
logger.debug('sample 1 ref 2: {0}'.format(ref[2][2345]))
logger.debug('sample 1 ref 3: {0}'.format(ref[3][2345]))
logger.debug('sample 1 ref 4: {0}'.format(ref[4][2345]))
metrics = nlg.compute_metrics(ref, hyp)
meteor = metrics['METEOR']
delta_loss = meteor - last_loss
last_loss = meteor
logger.info('val METEOR after epoch %d: %.10f' % (epoch + 1, meteor))
logger.info('METEOR change after last epoch: %.10f' % delta_loss)
scheduler.step()
if not (epoch + 1) % save_every:
with open('{0}_{1}.pkl'.format(model_save_path, epoch + 1),
'wb') as fp:
logger.info("writing checkpoint " + str(epoch + 1))
torch.save(model, fp)
logger.info("training complete. writing final model.")
with open('{0}.pkl'.format(model_save_path), 'wb') as fp:
torch.save(model, fp)
| [
"jiangkairong@email.arizona.edu"
] | jiangkairong@email.arizona.edu |
c7c380b18eda56812451f7dd40aed6a0cf14d8ae | 26f9ce896ab8a47f208788a96481eac0ebc5c417 | /elite-btc/elite-btc.py | 80dcf752f96a02155a25bc74293b7ec38e4fe46a | [] | no_license | 0x539/tempa-t-bot | c0dc8105a8717dd045d0fa5a1e2f2078ecad8c3e | 655add12bdcdef6c8ca234408b2cd9d58c114c3b | refs/heads/master | 2021-01-13T01:04:31.756483 | 2017-01-04T23:07:42 | 2017-01-04T23:07:42 | 52,831,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | # coding=utf-8
from slackbot.bot import listen_to
import re
import json
import os
@listen_to('!price', re.IGNORECASE)
def btc_price(message):
with open(os.path.abspath(os.path.join("btc_price_cache.txt"))) as data_file:
data = json.load(data_file)
price = data['data']['amount']
price = price.encode("utf-8").strip()
message.reply(('Current Price: %s%s' % ("£", price)))
| [
"omgbloodypass@gmail.com"
] | omgbloodypass@gmail.com |
ed2cb6dfb5dc32978c14493d70c83acd99fdbf88 | bb458f0256751133e260ff6be17c34f3af4fab39 | /Q_learning_RND/log_utils.py | cb9aa11b9212b7497b0dfd07c79214c8829baa80 | [] | no_license | pvhau169/MDP-RND | b9df1bc922511df2be9dbca1ad559310ab4c349a | eb6e38c5469b60ccba256c12a7a69b3303175e03 | refs/heads/master | 2023-01-18T17:03:51.194989 | 2020-11-25T05:20:46 | 2020-11-25T05:20:46 | 315,836,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | class mean_val:
def __init__(self):
self.k = 0
self.val = 0
self.mean = 0
def append(self, x):
self.k += 1
self.val += x
self.mean = self.val/self.k
def get(self):
return self.mean
class logger:
def __init__(self):
self.log = dict()
def add_log(self, name):
self.log[name] = []
def add_item(self, name, x):
self.log[name].append(x)
def get_log(self, name):
return self.log[name]
def get_keys(self):
return self.log.keys()
def get_current(self, name):
return self.log[name][-1] | [
"pvhau169@gmail.com"
] | pvhau169@gmail.com |
4f5c16106059a785fef9f647a57e7a8ac705d310 | d349f6a13a8b1b7cbb7b36fd467d5ed2af77fd19 | /Python_02_Modify_Date.py | 40e13c89540c27dce9a67a1d346ea3d6710f1d0e | [
"MIT"
] | permissive | rogerolowski/SimpleStockAnalysisPython | 639190c8ac0b4544d6fc7b298d836a32124e5d17 | 758e04b28eda4b8eb4124f4b1e0ed493b0f93106 | refs/heads/master | 2023-08-21T09:05:58.607706 | 2021-10-31T03:40:16 | 2021-10-31T03:40:16 | 423,045,068 | 0 | 0 | MIT | 2021-10-31T03:40:00 | 2021-10-31T03:39:59 | null | UTF-8 | Python | false | false | 3,431 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 27 08:09:11 2020
@author: Tin
"""
# Modify Yahoo Dataframe Date
import pandas as pd # Dataframe Library
pd.set_option('max_columns', None) # To show all columns
import yfinance as yf
yf.pdr_override()
# input
symbol = 'AAPL'
start = '2014-01-01'
end = '2018-01-01'
# dataframe
data = yf.download(symbol,start,end)
# View the first 5 rows
print('First 5 Rows')
print(data.head())
print('-'*80)
# Date becomes a columns
df = data.copy() # Copy the original data
dfn = df.reset_index()
print(dfn.head())
print('-'*80)
# Add Year, Month, Day
df['Year'] = df.index.year
df['Month'] = df.index.month
df['Day'] = df.index.day
print('Year, Month, & Day')
print(df.head())
print('-'*80)
# Convert Daily to Weekly
weekly = data.copy()
weekly = weekly.resample('W').last()
print('Weekly Data')
print(weekly.head())
print('-'*80)
# Convert Daily to Monthly
monthly = data.copy()
monthly = monthly.resample('1M').mean()
print('Monthly Data')
print(monthly.head())
print('-'*80)
# Choose Particular Year to analyze
monthly = data.copy()
monthly = monthly.reset_index()
y2017 = monthly[monthly['Date'].dt.year==2017]
print("Analyze Particular Year in Historical Data")
print(y2017)
print('-'*80)
month_name = data.copy()
# Convert Daily to Monthly
# 'BMS', which stands for "business month start frequency"
# 'BM', which stands for "business month end frequency"
month_name = month_name.asfreq('BM')
print('Number of the Month')
print(month_name.head())
print('-'*80)
import calendar
month_name['Month_Number'] = month_name.index.month
month_name['Month_ABBR'] = month_name['Month_Number'].apply(lambda x: calendar.month_abbr[x])
print('Abbreviation for Months')
print(month_name.head())
print('-'*80)
print('Month Name')
month_name['Month_Name'] = month_name['Month_Number'].apply(lambda x: calendar.month_name[x])
print(month_name.head())
print('-'*80)
# Pivot Table Date
df_months = pd.pivot_table(df, index=df.index.month, columns=df.index.year, values = 'Adj Close') # each months
print('Year by Year')
print(df_months)
print('-'*80)
df_days = pd.pivot_table(df, index=df.index.day, columns=df.index.year, values = 'Adj Close') # daily for one whole months
print('Year by Year in daily rows')
print(df_days)
print('-'*80)
df_all_columns = pd.pivot_table(df, index=df.index.month, columns=df.index.year)
print('All columns in yearly')
print(df_all_columns)
print('-'*80)
stock_data = df.copy()
stock_data['Year'] = df.index.year
stock_data['Month'] = df.index.month
stock_data['Day'] = df.index.day
stock_data['Week_Day'] = df.index.dayofweek
stock_data['Week_Day_Name'] = df.index.strftime('%A')
print('Number of day with M-F')
print(stock_data.tail(10))
print('-'*80)
approach1 = stock_data.groupby(['Year', 'Month']).first()['Adj Close']
print('# of Month')
print(approach1.tail(12))
print('-'*80)
approach2 = stock_data.groupby(['Year', 'Day']).first()['Adj Close']
print('# of Day')
print(approach2.tail(12))
print('-'*80)
print('Convert Date to String')
string_date = data.copy()
string_date['Date'] = string_date.index
print(string_date.head())
print('-'*80)
string_date['Date'] = string_date['Date'].dt.strftime("%Y%m%d").astype(int)
print('Convert Date to Numbers')
print(string_date.head())
| [
"noreply@github.com"
] | rogerolowski.noreply@github.com |
e1256fc5375bcdb7a473231f9e7e1965896ac501 | 1f6d4f3f43ff465b2d5ac44d5630afff75fa4bf9 | /utils/excel.py | a01d475b78ad1dbccd50346334363e4177e56fd1 | [] | no_license | Drag0nfather/huntflow_test_task | 37eba994d98ba59ac43507020663a012587234ec | 2c9ab05e4884ab204ea6298f2068b04ec67d5e94 | refs/heads/main | 2023-08-02T10:07:05.104006 | 2021-09-09T15:24:28 | 2021-09-09T15:24:28 | 401,333,626 | 0 | 0 | null | 2021-09-09T16:59:04 | 2021-08-30T12:21:30 | Python | UTF-8 | Python | false | false | 1,193 | py | from pandas import read_excel
from utils.exceptions import ExcelFileNotFoundException
def convert_excel_to_list(filename: str) -> list:
"""
Экспорт Excel файла в список словарей
"""
try:
df = read_excel(f'{filename}').to_dict(orient='records')
return df
except Exception:
raise ExcelFileNotFoundException
def check_upload_field(filename: str) -> bool:
"""
Проверка, существует ли поле upload в Excel файле
"""
try:
df = read_excel(f'{filename}')
if 'upload' in df.columns:
return True
df.insert(5, 'upload', 0)
df.to_excel(f'{filename}', index=False)
return True
except Exception:
raise ExcelFileNotFoundException
def add_success_point_to_applicant(filename: str, applicant: str) -> bool:
"""
Добавление в колонку upload статуса обработки кандидата
"""
df = read_excel(f'{filename}')
applicant_index = df.index[df['ФИО'] == applicant][0]
df.at[applicant_index, 'upload'] = 1
df.to_excel(f'{filename}', index=False)
return True
| [
"Andrey18651@gmail.com"
] | Andrey18651@gmail.com |
4934b1bcd2efd7f12435fe7bc83e6f0357341b65 | 5e27da01d3c9aceb326feff71b4d59b20eaed595 | /chapters/databases/app/__init__.py | b5c6f225de7f88e3080a7bfa01944b13eea94885 | [
"MIT"
] | permissive | SpencerMycek/WTF-is-Flask | 6f3c1439cc0675309b1d685fb0903c90b90f755b | 4f697b7d2606633caf38ed925e8db4827bf0ee2e | refs/heads/master | 2023-05-12T16:56:10.568176 | 2022-10-19T17:03:12 | 2022-10-19T17:03:12 | 233,667,997 | 0 | 0 | MIT | 2023-05-01T21:19:07 | 2020-01-13T18:46:20 | Python | UTF-8 | Python | false | false | 260 | py | from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
from app import routes, models
| [
"Spence.Mycek@gmail.com"
] | Spence.Mycek@gmail.com |
0b1f54799727b073ebc04476ea71a309f1ef606b | 13aca2d3be5ed6214c45ad310951e2c27ea5a209 | /Lesson 22/docsEdit.py | 293bca61a1a6a64792189de9fbf26e6e68d81290 | [] | no_license | DarkLabel1/API-VKontakte | 756aa1a810a7bbeabf3efc631d1840f7d623818d | 4a613e60360cbd6b42b9190f4d1192e2ae3a60a0 | refs/heads/master | 2023-01-19T13:17:34.029694 | 2020-11-16T17:13:48 | 2020-11-16T17:13:48 | 291,518,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | from KEY.key import API_KEY_DOCS
import requests
OWNER_ID = 533763262
DOC_ID = 568100331
DOC_TITLE = "Какой-то договор в Ижевске"
DOC_TAGS = "Договор,ижевск"
r = requests.get("https://api.vk.com/method/docs.edit", params={
"owner_id": OWNER_ID,
"doc_id": DOC_ID,
"title": DOC_TITLE,
"tags": DOC_TAGS,
"access_token": API_KEY_DOCS,
"v": 5.122
}).json()
if r["response"] == 1:
print(f"Договор изменен. Название договора: {DOC_TITLE}.") | [
"dark.laybel@bk.ru"
] | dark.laybel@bk.ru |
43cd83767fb5b114eb726ddf99e8ae561d91adf5 | aa0270b351402e421631ebc8b51e528448302fab | /sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2022_06_02_preview/aio/operations/_maintenance_configurations_operations.py | e03f2a6cb9fe01b22e856323445e0d162f7c457d | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | fangchen0601/azure-sdk-for-python | d04a22109d0ff8ff209c82e4154b7169b6cb2e53 | c2e11d6682e368b2f062e714490d2de42e1fed36 | refs/heads/master | 2023-05-11T16:53:26.317418 | 2023-05-04T20:02:16 | 2023-05-04T20:02:16 | 300,440,803 | 0 | 0 | MIT | 2020-10-16T18:45:29 | 2020-10-01T22:27:56 | null | UTF-8 | Python | false | false | 19,860 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._maintenance_configurations_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_by_managed_cluster_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class MaintenanceConfigurationsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2022_06_02_preview.aio.ContainerServiceClient`'s
:attr:`maintenance_configurations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_managed_cluster(
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> AsyncIterable["_models.MaintenanceConfiguration"]:
"""Gets a list of maintenance configurations in the specified managed cluster.
Gets a list of maintenance configurations in the specified managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MaintenanceConfiguration or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2022_06_02_preview.models.MaintenanceConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-06-02-preview"))
cls: ClsType[_models.MaintenanceConfigurationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_managed_cluster_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_managed_cluster.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("MaintenanceConfigurationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_managed_cluster.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations"
}
@distributed_trace_async
async def get(
self, resource_group_name: str, resource_name: str, config_name: str, **kwargs: Any
) -> _models.MaintenanceConfiguration:
"""Gets the specified maintenance configuration of a managed cluster.
Gets the specified maintenance configuration of a managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceConfiguration or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_06_02_preview.models.MaintenanceConfiguration
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-06-02-preview"))
cls: ClsType[_models.MaintenanceConfiguration] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
config_name=config_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}"
}
@overload
async def create_or_update(
self,
resource_group_name: str,
resource_name: str,
config_name: str,
parameters: _models.MaintenanceConfiguration,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.MaintenanceConfiguration:
"""Creates or updates a maintenance configuration in the specified managed cluster.
Creates or updates a maintenance configuration in the specified managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:param parameters: The maintenance configuration to create or update. Required.
:type parameters:
~azure.mgmt.containerservice.v2022_06_02_preview.models.MaintenanceConfiguration
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceConfiguration or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_06_02_preview.models.MaintenanceConfiguration
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
resource_name: str,
config_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.MaintenanceConfiguration:
"""Creates or updates a maintenance configuration in the specified managed cluster.
Creates or updates a maintenance configuration in the specified managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:param parameters: The maintenance configuration to create or update. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceConfiguration or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_06_02_preview.models.MaintenanceConfiguration
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
resource_name: str,
config_name: str,
parameters: Union[_models.MaintenanceConfiguration, IO],
**kwargs: Any
) -> _models.MaintenanceConfiguration:
"""Creates or updates a maintenance configuration in the specified managed cluster.
Creates or updates a maintenance configuration in the specified managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:param parameters: The maintenance configuration to create or update. Is either a
MaintenanceConfiguration type or a IO type. Required.
:type parameters:
~azure.mgmt.containerservice.v2022_06_02_preview.models.MaintenanceConfiguration or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceConfiguration or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_06_02_preview.models.MaintenanceConfiguration
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-06-02-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.MaintenanceConfiguration] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "MaintenanceConfiguration")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
config_name=config_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}"
}
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, resource_name: str, config_name: str, **kwargs: Any
) -> None:
"""Deletes a maintenance configuration.
Deletes a maintenance configuration.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-06-02-preview"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
config_name=config_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}"
}
| [
"noreply@github.com"
] | fangchen0601.noreply@github.com |
418128e933eadf203bb45c157fb1159c2f0fd3fc | 04c21e01c7dd002d0d66f26f17294bbe25ab30c1 | /src/core/serializers/authentication/reset_password.py | 0e0b5a6f90c5a3f1927d314da4b45df747402d19 | [] | no_license | unbrokenguy/Q-n-A-rest-api | 29d1a7614d761bf68f38bbbbbd731c3692afccf7 | dd483993e304d6660c8c8f7518bf7414efd8ec28 | refs/heads/master | 2023-06-03T20:19:52.606677 | 2021-06-18T09:35:27 | 2021-06-18T09:35:27 | 376,749,787 | 0 | 0 | null | 2021-06-18T09:35:27 | 2021-06-14T08:08:44 | Python | UTF-8 | Python | false | false | 394 | py | from rest_framework import serializers
from core.models import User
class ResetPasswordSerializer(serializers.ModelSerializer):
"""
Reset password serializer check if new password is strong enough if not raises ValidationError.
"""
class Meta:
model = User
fields = ["password"]
extra_kwargs = {
"password": {"required": True},
}
| [
"khazievbulatphanzilovich@gmail.com"
] | khazievbulatphanzilovich@gmail.com |
a6a98af8cf643fc76cd0b3374946cf376079c216 | d160301e6eec4e9f5ce921aac6f8b8c634689822 | /python/1.py | a9980805283733c3c7ab18447f13a63f6b89c616 | [] | no_license | akashhnag/coding-ninja | 46bd9f1f0383d9bab4ed819842d300527b22d911 | 92b450f96da65ca0c360280ca401a4d8af538b55 | refs/heads/master | 2020-12-27T14:04:46.856614 | 2020-02-14T13:11:09 | 2020-02-14T13:11:09 | 237,929,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | n = int(input())
arr = list(int(i) for i in input().strip().split(' '))
ind=0
evensum=0
oddsum=0
for a in arr:
if a%2==0 and ind%2==0:
evensum+=a
elif a%2==1 and ind%2==1:
oddsum+=a
ind+=1
print(evensum,' ',oddsum)
| [
"akash.nag@cognitif.ai"
] | akash.nag@cognitif.ai |
7718f80d703242913200b6318bd12354622ff8e1 | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-mgmt-loganalytics/azure/mgmt/loganalytics/operations/__init__.py | b9ace78ff9b74043c8e8e5253b7611a5e4bd9da3 | [
"MIT"
] | permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 952 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .linked_services_operations import LinkedServicesOperations
from .data_sources_operations import DataSourcesOperations
from .workspaces_operations import WorkspacesOperations
from .storage_insights_operations import StorageInsightsOperations
from .saved_searches_operations import SavedSearchesOperations
__all__ = [
'LinkedServicesOperations',
'DataSourcesOperations',
'WorkspacesOperations',
'StorageInsightsOperations',
'SavedSearchesOperations',
]
| [
"laurent.mazuel@gmail.com"
] | laurent.mazuel@gmail.com |
c4b6c3920b3eb30c4ae1d4e8265c34ede66d74a4 | 1b8a4f0b63d13708240e3679fc21437c1f466654 | /paracetemol.py | d0c0781f1a40d41acf3d6c5ae00c134a7de68225 | [] | no_license | standrewscollege2018/2021-year-11-python-classwork-JustineLeeNZ | 2d11a26b1ec0a7094e395ef958ed7e6923beb086 | 6709ad3329b31c8c1bbb9ccdf916804390ff00b9 | refs/heads/master | 2023-03-13T19:11:55.732225 | 2021-03-08T23:03:28 | 2021-03-08T23:03:28 | 334,806,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | ''' uses weight and age to calculate correct dose of paracetemol '''
# constants
AGE_LIMIT = 12
DOSE_PER_KG = 10
# ask for age
age = float(input("Enter your age: "))
# check age to see whether also need weight
# child dose
if age < 12 :
# ask for weight
weight = float(input("Enter your weight: "))
# calculate dose - convert dose to string so can add units
dose = str(DOSE_PER_KG * weight) + " milligrams"
# adult dose
else:
dose = "2 x 500mg tablets"
# display dose info
print("Recommended dose every 4-6 hours is {}".format(dose))
| [
"48225095+JustineLeeNZ@users.noreply.github.com"
] | 48225095+JustineLeeNZ@users.noreply.github.com |
c3a88a91df0ca6dd325fb81f7f3f25b014e2a78d | 44b455e1d02445954382ef1d40be41b7798700a1 | /async_request/tornado_request.py | de8b957897f97dcc138d423b81f977d6163bbeb8 | [] | no_license | anstones/Mylib | 58a9f49a784f9dce7ab2053020f5ac754f3203ee | c21a28d9a34cf8c71ad290d61034365fb86bdc86 | refs/heads/master | 2020-08-19T12:51:40.647523 | 2019-11-18T14:19:29 | 2019-11-18T14:19:29 | 215,921,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,589 | py | # coding: utf-8
import json as _json
import urllib.parse
import mimetypes
from functools import partial
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPRequest, HTTPResponse
from tornado import gen
from lib.exceptions import CallServiceException
from lib.utilities import get_unique_id
class AsyncResponse(object):
def __init__(self, method, url, response=None):
self._method = method
self._url = url
self._result = None
self._response = response
@property
def request_method(self):
return self._response.request.method if self._response else self._method
@property
def request_url(self):
return self._response.request.url if self._response else self._url
@property
def request_data(self):
return self._response.request.body
@property
def response(self):
return self._response
@response.setter
def response(self, response):
self._response = response
@property
def headers(self):
return self._response.headers
@property
def result(self):
return self._result
@result.setter
def result(self, result):
self._result = result
@property
def json(self):
return _json.loads(self.text)
@property
def text(self):
return self._decode_content()
@property
def content(self):
return self._response.body
def _decode_content(self):
content = None
if isinstance(self._response, HTTPResponse) and isinstance(self._response.body, bytes):
content = self._response.body.decode("utf-8")
return content
def has_exception(self):
return isinstance(self._result, Exception)
def __str__(self):
return '{} {} request:{} response:{}'.format(self.request_method, self.request_url, self.request_data,
self.text)
__repr__ = __str__
class AsyncResult(object):
def __init__(self, response=None):
self._response = response
@property
def request_method(self):
return self._response.request.method
@property
def request_url(self):
return self._response.request.url
@property
def request_data(self):
return self._response.request.body
@property
def request_headers(self):
return self._response.request.headers
@property
def response_headers(self):
return self._response.headers
@property
def status_code(self):
return self._response.code
@property
def response(self):
return self._response
@response.setter
def response(self, response):
self._response = response
@property
def json(self):
return _json.loads(self.text)
@property
def text(self):
return self._decode_content()
@property
def content(self):
return self._response.body
def _decode_content(self):
content = None
if isinstance(self._response, HTTPResponse) and isinstance(self._response.body, bytes):
content = self._response.body.decode("utf-8")
return content
def abstract(self):
return "{} {} request:{} response:{}".format(self.request_method, self.request_url, self.request_data[:512],
self.text)
def __str__(self):
return '{} {} request:{} response:{}'.format(self.request_method, self.request_url, self.request_data,
self.text)
__repr__ = __str__
class TornadoHttpRequest(object):
def __init__(self, logger):
self._client = AsyncHTTPClient()
self._logger = logger
@gen.coroutine
def get(self, url, is_json_result=True, params=None, headers=None, **kwargs):
"""
:param url:
:param is_json_result:
:param params:
:param headers:
:param kwargs:
:return:
"""
if params is not None:
kwargs.update(params)
if kwargs:
real_url = "{}?{}".format(url, urllib.parse.urlencode(kwargs))
else:
real_url = url
result = AsyncResponse(method="GET", url=real_url)
try:
response = yield self._client.fetch(real_url, headers=headers)
result.response = response
self._logger.debug(result)
if response.error:
raise CallServiceException(method=result.request_method, url=result.request_url, errmsg=response.error)
except Exception as e:
raise CallServiceException(method=result.request_method, url=result.request_url, errmsg=e)
else:
try:
result.result = result.text if not is_json_result else result.json
except _json.JSONDecodeError:
raise CallServiceException(method=result.request_method,
url=result.request_url,
errmsg="Invalid json format")
return result
@gen.coroutine
def post(self, url, is_json_result=True, data=None, json=None, headers=None, use_url_encode=False, **kwargs):
"""
:param url:
:param data:
:param is_json_result:
:param json:
:param headers:
:param use_url_encode:
:param kwargs:
:return:
"""
if use_url_encode:
if headers is None:
headers = {"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
data = urllib.parse.urlencode(json)
else:
if json is not None:
if headers is None:
headers = {"Content-Type": "application/json; charset=UTF-8"}
data = _json.dumps(json)
result = AsyncResponse(method="POST", url=url)
request = HTTPRequest(url=url, method=result.request_method, body=data, headers=headers, **kwargs)
try:
response = yield self._client.fetch(request)
result.response = response
self._logger.debug(result)
if response.error:
raise CallServiceException(method=result.request_method, url=result.request_url, errmsg=response.error)
except Exception as e:
raise CallServiceException(method=result.request_method, url=result.request_url, errmsg=e)
else:
try:
result.result = result.text if not is_json_result else result.json
except _json.JSONDecodeError:
raise CallServiceException(method=result.request_method,
url=result.request_url,
errmsg="Invalid json format")
return result
@gen.coroutine
def send_file(self, url, file_names):
"""
:param url:
:param file_names:
:return:
"""
method = "POST"
boundary = get_unique_id()
headers = {'Content-Type': 'multipart/form-data; boundary=%s' % boundary}
producer = partial(self._multipart_producer, boundary, file_names)
result = AsyncResult()
request = HTTPRequest(url=url, method=method, headers=headers, body_producer=producer)
try:
response = yield self._client.fetch(request)
if response.error:
raise CallServiceException(method=method, url=url, errmsg=response.error)
result.response = response
except Exception as e:
raise CallServiceException(method=method, url=url, errmsg=e)
return result
@gen.coroutine
def send_data_as_file(self, url, raw_data, filename=None, ext="jpg"):
"""
:param url:
:param filename:
:param raw_data:
:param ext:
:return:
"""
method = "POST"
boundary = get_unique_id()
headers = {'Content-Type': 'multipart/form-data; boundary=%s' % boundary}
producer = partial(self._stream_producer, boundary, filename, ext, raw_data)
result = AsyncResult()
request = HTTPRequest(url=url, method=method, headers=headers, body_producer=producer)
try:
response = yield self._client.fetch(request)
if response.error:
raise CallServiceException(method=method, url=url, errmsg=response.error)
result.response = response
except Exception as e:
raise CallServiceException(method=method, url=url, errmsg=e)
return result
@gen.coroutine
def upload_file(self, url, raw_data, filename=None, ext="jpg"):
"""
:param url:
:param raw_data:
:param filename:
:param ext:
:return:
"""
method = "POST"
boundary = get_unique_id()
body = TornadoHttpRequest._encode_formdata(boundary=boundary, filename=filename, ext=ext, raw_data=raw_data)
headers = {'Content-Type': 'multipart/form-data; boundary=%s' % boundary, 'Content-Length': len(body)}
result = AsyncResult()
request = HTTPRequest(url=url, method=method, headers=headers, body=body)
try:
response = yield self._client.fetch(request)
if response.error:
raise CallServiceException(method=method, url=url, errmsg=response.error)
result.response = response
except Exception as e:
raise CallServiceException(method=method, url=url, errmsg=e)
return result
@classmethod
@gen.coroutine
def _multipart_producer(cls, boundary, file_names, write):
"""
:param boundary:
:param file_names:
:param write:
:return:
"""
boundary_bytes = boundary.encode()
for filename in file_names:
filename_bytes = filename.encode()
mime_type = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
buf = (
(b'--%s\r\n' % boundary_bytes) +
(b'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' %
(filename_bytes, filename_bytes)) +
(b'Content-Type: %s\r\n' % mime_type.encode()) +
b'\r\n'
)
yield write(buf)
with open(filename, 'rb') as f:
while True:
chunk = f.read(16 * 1024)
if not chunk:
break
yield write(chunk)
yield write(b'\r\n')
yield write(b'--%s--\r\n' % (boundary_bytes,))
@classmethod
@gen.coroutine
def _stream_producer(cls, boundary, filename, ext, raw_data, write):
"""
:param boundary:
:param filename:
:param ext:
:param raw_data:
:param write:
:return:
"""
boundary_bytes = boundary.encode()
if not filename:
filename = "{}.{}".format(boundary, ext)
filename_bytes = filename.encode()
mime_type = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
buf = (
(b'--%s\r\n' % boundary_bytes) +
(b'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' %
(filename_bytes, filename_bytes)) +
(b'Content-Type: %s\r\n' % mime_type.encode()) +
b'\r\n'
)
yield write(buf)
yield write(raw_data)
yield write(b'\r\n')
yield write(b'--%s--\r\n' % (boundary_bytes,))
@classmethod
def _encode_formdata(cls, boundary, filename, ext, raw_data):
"""
:param boundary:
:param filename:
:param ext:
:param raw_data:
:return:
"""
crlf = b'\r\n'
buffer = list()
boundary_bytes = boundary.encode()
if not filename:
filename = "{}.{}".format(boundary, ext)
filename_bytes = filename.encode()
buffer.append(b'--%s' % boundary_bytes)
buffer.append(b'Content-Disposition: form-data; name="%s"; filename="%s"' % (filename_bytes, filename_bytes))
mime_type = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
buffer.append(b'Content-Type: %s' % mime_type.encode())
buffer.append(b'')
buffer.append(raw_data)
buffer.append(b'--%s--' % boundary_bytes)
buffer.append(b'')
body = crlf.join(buffer)
return body
| [
"stone3790@163.com"
] | stone3790@163.com |
5e72f4a12c7503279a10f1b738bc4bf2b607c98b | d0d5bd6cf9678c445f7b0be32599460131a38ffa | /toolkit/EasyTree/geneconcate.py | 17a0fe7f561394eb29d49b41b7566d0a2fbd1487 | [] | no_license | cucmeliu/workspace | 0b26be248df76159ea6f5af2b7ea96a1edf5b862 | 381676dae18d9bba153d7aacc86d408b12a9e375 | refs/heads/master | 2021-01-20T15:58:32.783143 | 2018-06-29T10:03:33 | 2018-06-29T10:03:33 | 90,805,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,583 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# Filename: geneconcate
# Desc: gene modify
# author: leo.liu
# date: 2018.2.28
from openpyxl import workbook
from openpyxl import load_workbook
import sys
import os
import codecs
#from openpyxl.write.excel import ExcelWriter
# 第一个需求
# 1. Datasheet中保存了Unicode,及其对应的碱基对LSU/ITS/.../..
# 2. 每个碱基对类型(LSU/ITS/.../..)都有一个同名文件对应,该文件保存 碱基对与编码的对应关系,同一个碱基对的编码长度是相同的
#
# 3. 按Datasheet中Unicode的顺序,及其对应的碱基对LSU/ITS/.../..进行重新生成,生成规则为:
# 1)新生成的文件名以碱基对类型拼接(LSU+ITS+....txt)
# 2)新生成的文件内容为:
# Unicode
# 碱基对对应的编码连续拼接
# Datasheet中Unicode对应的碱基对不存在的,以其定长的n符号代替
#
# 实现方案
# 1. 加载Datasheet到数组中
# 2. 加载所有碱基对,{type,{key:value, k:v}} eg. {LSU{KY212762:nnnnnnnn}}
# 3. 每个碱基对类型,添加一个{None:nnnnnnn},其中nnnn的长度与其他基因相同
# 4. 顺序按行扫描Datasheet,拼接Unicode, gen1 gen2.....
# 5. 写入文本文件,名字为gen1+gen2+....
# 碱基对起始列
GEN_START = 4
# 结束列,从后数
GEN_END = 2
# 分隔符
SPLITTER = '\n'
# None str
NONE_STR = 'n'
DATA_PATH = U'./data/'
RST_PATH = U'./data/result/'
DATASHEET = DATA_PATH + u'Datasheet.xlsx'
def LoadDatasheet(filename):
wb = load_workbook(filename)
ws = wb.worksheets[0]
row = len(list(ws.rows))
col = len(list(ws.columns)) # 最后两列不要了
# 初始化空二维数组
data = [[0 for i in range(col)] for i in range(row)]
# 填充
for r in range(1, row+1):
for c in range(1, col+1):
data[r-1][c-1] = ws.cell(row=r, column=c).value
return data, row, col
# def LoadGen(gentypes):
# # 第一维存基因类型
# # 第二维存碱基对(KTxxxxx: nnnnnnnnnnnn)
# allGen = {}
# for t in gentypes:
# filename = DATA_PATH + t + '.txt'
# # print filename
# with open(filename, 'r') as f:
# n = 0
# k = ""
# v = ""
# genLen = 0
# genpair = {}
#
# while True:
# n=n+1
# line = f.readline().strip()
# # print 'line ------', n, line
# if not line:
# break
#
# if n % 2 == 1:
# # 去掉行头的 > 符号
# k = line[1:]
# genpair[k] = ''
# else:
# v = line
# genpair[k] = v
# genLen = len(v)
# #print genpair
# # nn = ""
# # print genLen
# # for i in range(0, genLen):
# # nn = nn + 'n'
# genpair["None"] = NONE_STR*genLen #str("".join('n') for i in range(0, genLen))
# # print 'all gen:', t, '=', genpair
# allGen[t] = genpair
# # print '------------a--------a----a-'
# # print allGen
# return allGen
# dna/rna文件的组织方式不是按行,而是以>为开头标记,因此重写此方法
def LoadGen2(gentypes):
# 第一维存基因类型
# 第二维存碱基对(KTxxxxx: nnnnnnnnnnnn)
#print 'Loading gen pairs...'
allGen = {}
for t in gentypes:
filename = DATA_PATH + t + '.txt'
# print filename
with open(filename, 'r') as f:
k = ''
v = ''
genLen = 0
genpair = {}
while True:
line = f.readline().strip()
# print line
if not line:
break
if line.startswith(">"):
k = line #[1:]
v = ''
genpair[k] = ''
else:
v += line + SPLITTER
genpair[k] = v
genLen = len(v)
genpair[">None"] = 'n'*genLen
allGen[t] = genpair
#print allGen
return allGen
def genconcate(datasheet, gens):
head = datasheet[0]
data = datasheet[1:]
rst = {}
for row in data:
uid = ">" + row[0]
constr = ""
for c in range(GEN_START, len(row)-GEN_END):
#print (head[c], row[c])
if row[c] is None:
row[c] = 'None'
row_modi = ">" + row[c]
constr += gens[head[c]][row_modi]
rst[uid] = constr
#print rst
return rst
def writetofile(gencon, filename):
with open(filename, 'wb') as f:
keys = gencon.keys()
keys.sort()
for k in keys:
v = gencon[k]
#for (k, v) in gencon.items():
f.write(k+'\n')
f.write(v+'\n')
def do_main():
# Load from Datasheet.xlsx
print 'Loading dataset...'
(datasheet, row, col) = LoadDatasheet(DATASHEET)
gentypes = datasheet[0][4:col-GEN_END]
# print gentypes
outfile = RST_PATH
for t in gentypes:
outfile = outfile + t + '+'
outfile = outfile[:len(outfile)-1] + '.txt'
# 基因拼接
print 'Concating...'
writetofile(genconcate(datasheet, LoadGen2(gentypes)), outfile)
print 'File saved in: ', outfile
print 'Done.'
return True
def main():
if not os.path.exists(RST_PATH):
os.mkdir(RST_PATH)
do_main()
if __name__ =="__main__":
main()
| [
"cucmeliu@gmail.com"
] | cucmeliu@gmail.com |
fabe9e3f5637c14c6567fd438a3427cc5b0ec05d | 881e6817a69a7e66d82be664762c917c6920c8e4 | /parser.py | f026143bdc6671512d30450b5f3754833c7b9e27 | [
"Apache-2.0"
] | permissive | z0lope0z/cheathub | 2cd8cbb5390b634d472c4d3a53e48d25239d1431 | ebd94591d4e7bba71906419f771259dd764d1ce3 | refs/heads/master | 2021-01-10T20:49:43.048876 | 2014-02-12T06:53:22 | 2014-02-12T06:53:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | from bs4 import BeautifulSoup as BS
from BeautifulSoup import BeautifulStoneSoup as BSS
import pdb
w = '<'
BSS(w,convertEntities=BSS.HTML_ENTITIES).contents[0]
file = open("sample.txt")
parse = BSS(file.read(),convertEntities=BSS.HTML_ENTITIES).contents[0]
soup = BS(parse)
final = soup.find('lang').getText()
output = open("output.txt","w")
output.write(final)
output.close()
| [
"lopegwapo@gmail.com"
] | lopegwapo@gmail.com |
234f603a62fbcfc25412c15d4df79e54e6129073 | 60f95eff7c43f788af2420813c371152c1e2e5eb | /hulk/broker/oanda/common/constants.py | b37ffd670df513efa144a0da60298fba8d27b29e | [
"BSD-3-Clause"
] | permissive | webclinic017/hulk | 1667c508acb061a8120dc415978a72e83dc38f54 | de326ca1554dc743e225cef4e4b1e2fd4f5090c6 | refs/heads/master | 2022-03-22T20:07:23.276317 | 2019-12-02T01:10:43 | 2019-12-02T01:11:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | from ....base.models import AccountType
OANDA_ENVIRONMENTS = {
"streaming": {
AccountType.REAL: "stream-fxtrade.oanda.com",
AccountType.DEMO: "stream-fxpractice.oanda.com",
},
"api": {
AccountType.REAL: "api-fxtrade.oanda.com",
AccountType.DEMO: "api-fxpractice.oanda.com",
}
}
| [
"dev@luotao.net"
] | dev@luotao.net |
9d9bfc44598c9191f992d75b82629079642b00d6 | d808bf5c61a881c6004bb2b8ba5eddfcb0166b5f | /poll/migrations/0001_initial.py | 166156e755aac29aab508bf79858a04af09c250c | [] | no_license | Athul-dev-7/PollApp-Django | b890e907aa7b426357d82a9cfd489bf3c2f1ed2e | 1bffa4aae83aa1dbd441074ea8c5288ab9e61324 | refs/heads/main | 2023-06-22T18:22:58.763556 | 2021-07-25T11:35:29 | 2021-07-25T11:35:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | # Generated by Django 3.2.5 on 2021-07-25 09:43
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Poll',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.TextField()),
('option_one', models.CharField(max_length=30)),
('option_two', models.CharField(max_length=30)),
('option_three', models.CharField(max_length=30)),
('option_one_count', models.IntegerField(default=0)),
('option_two_count', models.IntegerField(default=0)),
('option_three_count', models.IntegerField(default=0)),
],
),
]
| [
"athuldas7777@gmail.com"
] | athuldas7777@gmail.com |
00235e95145c6baa62930aa5d5ae1be13d532ad4 | 4a2b2c261449e735a08fff005c899981baa13b5d | /manage.py | b1b5bfe98b1ea850717ab1d3c32d12e616af9314 | [] | no_license | dotnet-tech/python-django-contact | c55b048c3d517036e6f343a778b5f88aec3e28ec | 226ced12ade600624feed7247743430ce157e0a3 | refs/heads/master | 2020-05-27T16:51:36.943409 | 2019-05-26T17:11:32 | 2019-05-26T17:11:32 | 188,710,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sampleWS.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"ayyanan@gmail.com"
] | ayyanan@gmail.com |
52826ca862da396fd7d48af0578fb98ec128b540 | 34e3d607f0a9b26ab1220f6164713c7304713996 | /ML_Actual_Combat/柱状图.py | 9d0ac74085806fd29fc64bb83c6dc2d7e019151a | [] | no_license | yuniaohappy/LearningPython | 756bc2926ec13101a8b4061561029cd4ee992b52 | 6470cc76c32acef74a3156953edfe3cc27b33682 | refs/heads/master | 2021-06-07T04:26:21.674320 | 2020-01-06T09:43:59 | 2020-01-06T09:43:59 | 120,276,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,537 | py | import numpy as np
import matplotlib.pyplot as plt
men_means, men_std = (20, 35, 30, 35, 27), (2, 3, 4, 1, 2)
women_means, women_std = (25, 32, 34, 20, 25), (3, 5, 2, 3, 3)
ind = np.arange(len(men_means)) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind - width/2, men_means, width, yerr=men_std,
color='SkyBlue', label='Men')
rects2 = ax.bar(ind + width/2, women_means, width, yerr=women_std,
color='IndianRed', label='Women')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_xticks(ind)
ax.set_xticklabels(('G1', 'G2', 'G3', 'G4', 'G5'))
ax.legend()
def autolabel(rects, xpos='center'):
"""
Attach a text label above each bar in *rects*, displaying its height.
*xpos* indicates which side to place the text w.r.t. the center of
the bar. It can be one of the following {'center', 'right', 'left'}.
"""
xpos = xpos.lower() # normalize the case of the parameter
ha = {'center': 'center', 'right': 'left', 'left': 'right'}
offset = {'center': 0.5, 'right': 0.57, 'left': 0.43} # x_txt = x + w*off
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()*offset[xpos], 1.01*height,
'{}'.format(height), ha=ha[xpos], va='bottom')
autolabel(rects1, "left")
autolabel(rects2, "right")
plt.show() | [
"lpsddz@126.com"
] | lpsddz@126.com |
9fed24fb97a36aed86132f61070c2b1321d9db65 | ac40a6fa952dc5ca4ba88bf16971a3b2ced52009 | /blog/migrations/0002_comment.py | 8cf6f676ab02a80620596fa1de0f7d13d50509e1 | [] | no_license | hryniewiczm/hello | 38c88b8d92da30274ad8fff3e2aeba1f3cb22822 | f7ec56f4374b2daafdbc3582652ccde740b5322b | refs/heads/master | 2020-04-16T09:02:33.403547 | 2019-01-13T17:06:05 | 2019-01-13T17:06:05 | 165,448,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | # Generated by Django 2.1.5 on 2019-01-13 16:46
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved_comment', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
),
]
| [
"hryniewicz.m@o2.pl"
] | hryniewicz.m@o2.pl |
1c40f40b9b417d03a7a6e2c1098eafcda8298552 | 56201a7500b648b20b1be8dcca45859ff2ac516c | /time2freq.py | c67026928790de79957d53eb1e161709ebb8800d | [] | no_license | chuyuanver/nsor_measurement | e7a130766e0fe1605861ce38f7c681a11bbf4753 | 04be77456a336f4bebd21d3062eb7a8763957507 | refs/heads/master | 2021-09-11T00:08:51.538424 | 2018-04-04T18:14:08 | 2018-04-04T18:14:08 | 126,086,939 | 0 | 1 | null | 2018-03-20T22:22:36 | 2018-03-20T21:49:46 | Python | UTF-8 | Python | false | false | 247 | py | import numpy as np
def time2freq(time_data, pad_power):
x = np.ceil(np.log2(len(time_data)))
n = 2**(pad_power-1)
l = 2**x*n
dt = time_data[1]-time_data[0]
f_max =1/(2*dt)
return np.linspace(0, f_max, int(l/2)+1)
| [
"noreply@github.com"
] | chuyuanver.noreply@github.com |
b6c760de8da31dc6fcd7ad788a52518a554c902f | 3927a61ee1503e2a8e1eec67a2f4f68c9da2add9 | /Prelab08/.svn/text-base/exceptionTasks.py.svn-base | 851b8584401b513d082ec2d91a8ce1c6bddabd68 | [] | no_license | Sdattagu/Python | 1349a049c50370e99aba74e7691ee583bdce98a0 | a030e59a15554b2c1cd836fbf098a199a4d8c648 | refs/heads/master | 2020-03-21T09:59:19.908811 | 2018-06-23T19:55:49 | 2018-06-23T19:55:49 | 138,428,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,726 | #Shounak Dattagupta
#10/22/2016
import math
import sys
from points import PointND
from points import Point3D
from points import PointGroup
def createPoint(dataString):
#Takes in a string containing comma separated float values
#"3.14, 2.701, 19.77"
#Returns a PointND instance.
# Note that the string may contain non-float values
#"4.98, 3FA2, None"
#In this case, return a string containing some error message.
errorString = "The string contains non-float values."
argList = dataString.split(",")
tupList = []
for value in argList:
try:
float(value)
except (ValueError):
return errorString
else:
tupList.append(float(value))
return PointND(*tuple(tupList))
def distanceBetween(point1, point2):
#Takes in two PointND instances
#Computes and returns the distance between them.
#Do NOT perform a cardinality check.
#If the operation throws an error, return a string containing some error message.
errorString = "distanceBetween(): Invalid calculation"
try:
result = point1.distanceFrom(point2)
except (ValueError, IOError, OSError, IndexError, KeyError, TypeError):
return errorString
return result
def checkVicinity(point, pointList, radius):
#Takes in:
#A PointND instance
#A list of PointND instances
#A float radius value
#Function should return a 3-element tuple
#First is number of points from the pointList whose distances from point are <= radius
#Second is number of points whose distances are > radius
#Third is number of points who could not be checked because they were invalid.
#Sum of elements in tuple must = len(pointList)
#Do NOT perform a cardinality check
#Use exception handling to solve this question.
tupList = []
tupCountOne = 0
tupCountTwo = 0
tupCountThree = 0
for p in pointList:
try:
if(point.distanceFrom(p) <= radius):
tupCountOne += 1
elif(point.distanceFrom(p) > radius):
tupCountTwo += 1
except (ValueError, IOError, OSError, IndexError, KeyError, TypeError):
tupCountThree += 1
tupList.append(tupCountOne)
tupList.append(tupCountTwo)
tupList.append(tupCountThree)
return tuple(tupList)
def checkOperation(*args):
#Invokes the given performProcessing(*args) function
#Passes the same input params (*args) to it
#Function should return the following:
#If function does not throw an exception, return True.
#If function throws one of the OS errors, return the string:
#"The following Error occurred: [ErrorName]"
#[ErrorName] can be BlockingIOError, InterruptedError, ConnectionResetError, etc.
#If function returns ConnectedRefusedError, re-throw that error
#If function throws any other exception, return False.
from prelab08addon import performProcessing
try:
performProcessing(*args)
except (ConnectionRefusedError):
raise ConnectionRefusedError("Connection refused error was raised.")
except OSError as ose:
return "The following Error occurred: " + repr(ose)[:-2]
except:
return False
return True
if __name__ == "__main__":
#TESTS FOR createPoint(dataString)
argsString = "3.14,2.701,19.77"
argsString2 = "4.32,2.801,18.43"
argsStringInvalid = "4.98,3FAfdls2,None"
print("-------------------")
#Valid case
print("Testing createPoint(dataString)")
print("Testing valid case")
point1 = createPoint(argsString)
point2 = createPoint(argsString2)
print(str(point1))
print(type(point1))
print(str(point2))
print(type(point2))
#Invalid case
print("-------------------")
print("Testing invalid case")
print(str(createPoint(argsStringInvalid)))
#TESTS FOR distanceBetween(point1, point2)
print("-------------------")
print("Testing distanceBetween")
print(distanceBetween(point1, point2))
#TESTS for checkVicinity(point, pointList, radius)
print("-------------------")
print("Testing checkVicinity")
print("Creating pointList...")
pointOneString = "4.31,9.20,3.132"
pointTwoString = "3.32, 1.32, 13.23"
pointThreeString = "3.42"
pointList = [createPoint(pointOneString), createPoint(pointTwoString), createPoint(pointThreeString)]
print("pointList created.")
print(checkVicinity(point1, pointList, 0))
#TESTS for performProcessing(*args)
print("-------------------")
print("Testing checkOperation")
args = (1, 2, 3)
print(checkOperation(*args))
pass | [
"shoshodg00@gmail.com"
] | shoshodg00@gmail.com | |
81b19ecd0c91e6ea8815d0e3b85c9432aea17620 | f1ea1b62071f76a56a20489a2689356cea465e0a | /shallow/shallow_main_task2.py | 57a58b6d46203f46af04905e216fcadec97a1a41 | [] | no_license | erika-tyagi/cfpb-complaints | 7080aa6d72af1b5794c51d6f812d2c90db69b6a3 | d65976eb489ab55c0bd6de535261c5bd30058459 | refs/heads/master | 2022-09-28T18:25:37.144540 | 2020-06-08T21:04:57 | 2020-06-08T21:04:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,328 | py | import shallow_util as util
from shallow_constants import *
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
pd.set_option('expand_frame_repr', False)
def shallow_run():
'''
Runs through the full machine learning pipeline. This includes loading the raw data,
processing the text features, running a grid search over model specifications, and
evaluating theese models.
The configuration is specified in the constants file.
Outputs:
- Writes to a log file.
'''
# Load data
print('\n ---------- LOADING DATE ---------- \n')
X_train, X_test, y_train, y_test = util.load_data_task2()
# NAIVE BAYES
print('\n ---------- NAIVE BAYES---------- \n')
# Create Naive Bayes pipeline
text_clf_nb = Pipeline(
[('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB())])
# Create grid search object
nb_clf = GridSearchCV(estimator=text_clf_nb,
param_grid=PARAMS_NB,
cv=5)
# Fit and predict pipeline
nb_clf = nb_clf.fit(X_train, y_train)
print(nb_clf.best_score_)
print(nb_clf.best_params_)
nb_predictions = nb_clf.predict(X_test)
print(classification_report(y_test, nb_predictions))
# SVM
print('\n ---------- SVM---------- \n')
# Create Naive Bayes pipeline
text_clf_svm = Pipeline(
[('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier(loss='hinge', penalty='l2'))])
# Create grid search object
svm_clf = GridSearchCV(estimator=text_clf_svm,
param_grid=PARAMS_SVM,
cv=5)
# Fit and predict pipeline
svm_clf = svm_clf.fit(X_train, y_train)
print(svm_clf.best_score_)
print(svm_clf.best_params_)
svm_predictions = svm_clf.predict(X_test)
print(classification_report(y_test, svm_predictions))
print('\n -------------------- \n')
if __name__ == "__main__":
shallow_run()
| [
"erika.tyagi@gmail.com"
] | erika.tyagi@gmail.com |
e49684693eb06b85153902a5ee7448e22605100d | 2257936357793d984265bf2073e1500162e6ca19 | /Python/Saper z użyciem GTK/Saper.py | 8334c73e1a2bc9f80c5272fd6a1252efcb7d9efc | [] | no_license | lkaminski96/Projects | 62fe511763d5147ce69467af781f2a32e9194615 | 8c8b0b5ce8aef399bcb4b2edc7c1ad9b5211d10b | refs/heads/master | 2022-12-09T22:29:54.278687 | 2019-08-21T14:38:57 | 2019-08-21T14:38:57 | 193,705,750 | 1 | 0 | null | 2022-04-22T22:12:40 | 2019-06-25T12:43:01 | JavaScript | UTF-8 | Python | false | false | 12,166 | py | #-*- coding: utf-8 -*-
import gi
# biblioteka do losowania liczb
import random
# wymagamy biblioteki w wersji min 3.0
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
class Plansza(Gtk.Grid):
""" Klasa przedstawiajaca plansze."""
def __init__(self):
"""Inicjalizator obiektu, dziedziczenie po klasie Gtk.Grid."""
Gtk.Grid.__init__(self)
# n - stala w programie, odpowiadajaca za wielkosc planszy
# ilosc_min - zawiera informacje o ilosci min na planszy
# ilosc_przyciskow - zawiera informacje o ilosci przyciskow na planszy, potrzebne do kontrolowania wygranej
# plansza - lista zawierajaca informacje o pozycjach min oraz liczbe min siasiadujacych z konkretnymi polami
self.n = 5
self.ilosc_min = 0
self.ilosc_przyciskow = self.n * self.n
self.plansza = list()
self.buttons = []
# generacja siatki
for i in range(self.n):
# dodanie pustej listy do listy buttons oraz planszy
self.plansza.append([])
self.buttons.append([])
for j in range(self.n):
# ustawienie poczatkowych wartosci dla planszy
self.plansza[i].append(0)
# button - tworzenie nowego przycisku z napisem
button = Gtk.Button.new_with_label("")
# dodanie utworzonego przycisku do listy przyciskow
self.buttons[i].append(button)
# dodanie przycisku do siatki
self.attach(button, i, j, 1, 1)
# ustawienie identycznych rozmiarow kolumn oraz wierszy dla siatki
self.set_column_homogeneous(True)
self.set_row_homogeneous(True)
class App(object):
""" Klasa przedstawiajaca gre oraz mechanike Sapera."""
def __init__(self):
""" Inizjalizator obiektu."""
# tytul okna, domyslny rozmiar okna oraz wysrodkowanie okna
self.window = Gtk.Window(title="Saper")
self.window.set_default_size(250, 250)
self.window.set_position(1)
# grid - obiekt zawierajacy wygenerowana plansze z klasy Plansza
self.grid = Plansza()
# podpiecie przyciskow pod zdarzenie clicked
for i in range(self.grid.n):
for j in range(self.grid.n):
self.grid.buttons[i][j].connect("clicked", self.kliknieto, i, j)
# wylosowanie n min w losowych miejscach i dodanie informacji do planszy
while self.grid.ilosc_min < self.grid.n:
# wylosujx - zawiera numer kolumny
# wylosujy - zawiera numer wiersza
wylosujx = random.randint(0, self.grid.n - 1)
wylosujy = random.randint(0, self.grid.n - 1)
# zapewnienie, ze pozycje min beda unikalne
if self.grid.plansza[wylosujx][wylosujy] == 0:
self.grid.ilosc_min += 1
self.grid.plansza[wylosujx][wylosujy] = "M"
# uzupelnienie planszy o ilosc posiadanych min dookola siebie dla kazdego przycisku
for i in range(self.grid.n):
for j in range(self.grid.n):
# siasiedzi - lista zawierajaca pozycje sasiadow dla kazdego przycisku
sasiedzi = list()
# rozpatruje tylko te przyciski ktore posiadaja mine, uzupelniam plansze dla wszystkich przyciskow
# w obrebie danej miny zwiekszajac ilosc min o jeden
if self.grid.plansza[i][j] == "M":
# wywoluje funkcje zwracajaca liste pozycji sisiadow dla kazdej z min
# podajac plansze, wspolrzedne aktualnie rozpatrywanego pola
# oraz promien wokol ktorego szukam sasiadow
sasiedzi = self.znajdz_sasiadow(self.grid.plansza, j, i, 1)
# dla kazdej pozycji roznej od miny dodaje informacje o tym, ze dane pole ma mine kolo siebie
# zwiekszajac wartosc pola o jeden
for para in sasiedzi:
if self.grid.plansza[para[1]][para[0]] == "M":
continue
self.grid.plansza[para[1]][para[0]] += 1
# przycisk pozwalajacy zaczac gre od nowa
od_nowa = Gtk.Button(label="Nowa gra")
# podpiecie przycisku pod zdarzenie clicked
od_nowa.connect("clicked", self.nowa_gra)
# glowny - box zawierajacy plansze oraz przycisk
self.glowny = Gtk.VBox()
# dodanie planszy oraz przycisku do boxa
self.glowny.pack_start(self.grid, True, True, 0)
self.glowny.pack_end(od_nowa, False, False, 0)
# podpiecie zdarzenia wylaczenia okna przyciskiem X
self.window.connect("delete-event", lambda x, y: Gtk.main_quit())
# dodanie glownego boxa do okna
self.window.add(self.glowny)
# wyswietlenie wszystkiego na oknie
self.window.show_all()
def odslon(self):
""" Metoda odslaniajaca wszystkie pola w przypadku wygranej lub przegranej."""
for i in range(self.grid.n):
for j in range(self.grid.n):
# wywolanie funkcji odslaniajacej przyciski
self.podmien(i, j)
def podmien(self, i, j):
""" Metoda odslaniajaca przycisk wraz z nadaniem odpowiedniej labelki.
i - numer kolumny w ktorej znajduje sie przycisk
j - numer wiersza w ktorym znajduje sie przycisk
"""
# Tutaj odbywa sie ustawienie odpowiedniej labelki w zaleznosci od tego, jaka informacje przechowuje
# lista plansza oraz ustawienie przycisku na nieaktywny
if self.grid.plansza[i][j] == "M":
self.grid.buttons[i][j].get_child().set_markup('<span foreground="red"><b>M</b></span>')
self.grid.buttons[i][j].set_sensitive(False)
if self.grid.plansza[i][j] == 0:
self.grid.buttons[i][j].get_child().set_markup('<span foreground="black"><b>0</b></span>')
self.grid.buttons[i][j].set_sensitive(False)
if self.grid.plansza[i][j] == 1:
self.grid.buttons[i][j].get_child().set_markup('<span foreground="orange"><b>1</b></span>')
self.grid.buttons[i][j].set_sensitive(False)
if self.grid.plansza[i][j] == 2:
self.grid.buttons[i][j].get_child().set_markup('<span foreground="orangered"><b>2</b></span>')
self.grid.buttons[i][j].set_sensitive(False)
if self.grid.plansza[i][j] == 3:
self.grid.buttons[i][j].get_child().set_markup('<span foreground="tomato"><b>3</b></span>')
self.grid.buttons[i][j].set_sensitive(False)
if self.grid.plansza[i][j] > 3:
self.grid.buttons[i][j].get_child().set_markup('<span foreground="brown"><b>{}</b></span>'
.format(self.grid.plansza[i][j]))
self.grid.buttons[i][j].set_sensitive(False)
def kliknieto(self, button, i, j):
""" Metoda reagujaca na zdarzenia nacisniecia przycisku.
button - nacisniety przycisk
i - numer kolumny w ktorej znajduje sie przycisk
j - numer wiersza w ktorym znajduje sie przycisk
"""
# uaktualnienie ilosci mozliwych do nacisniecia przyciskow
self.grid.ilosc_przyciskow -= 1
# jezeli uzytkownik trafil na Mine, to odslaniam plansze
if self.grid.plansza[i][j] == "M":
self.odslon()
return
# jezeli uzytkownik nacisnal przycisk rozny od miny i byl to "ostatni" mozliwy przycisk nie zawierajacy miny
# to odslaniam reszte planszy oraz wyswietlam odpowiedni komunikat o wygranej
if self.grid.ilosc_przyciskow == self.grid.ilosc_min:
self.odslon()
# utworzenie komunikatu
dialog = Gtk.MessageDialog(self.window, 0, Gtk.MessageType.INFO, Gtk.ButtonsType.OK, "WYGRANA")
# uruchomienie komunikatu
dialog.run()
# usuniecie komunikatu
dialog.destroy()
return
# jezeli zadna z powyzszych sytuacji nie zaszla, gra trwa dalej i odslaniam tylko nacisniety przycisk
self.podmien(i, j)
def znajdz_sasiadow(self, plansza, i, j, dystans):
""" Metoda znajdujaca sasiadow w promieniu 1.
plansza - plansza rozgrywki
i - wspolrzedna "x" rozpatrywanego pola(numer wiersza)
j - wspolrzedna "y" rozpatrywanego pola(numer kolumny)
dystans - odleglosc w ktorej szukamy siasiadow
Zwraca liste pozycji siasiadow. Rozwiazanie oparte o algorytm znajdowania sasiadow ze stackoverflow.
"""
# sasiedzi - lista zawierajaca pozycje wszystkich siasiadow z wlasnie rozpatrywanym polem
sasiedzi = []
# zasieg_wiersza - lista przechowujaca informacje o tym, jakie numery wierszy sa brane pod uwage w przypadku
# wyszukiwania sasiadow dla danego pola
# zasieg_kolumn - lista przechowujaca informacje o tym, jakie numery kolumn sa brane pod uwage w przypadku
# wyszukiwania sasiadow dla danego pola
zasieg_wiersza = range(max(0, i - dystans), min(len(plansza), i + dystans + 1))
zasieg_kolumny = range(max(0, j - dystans), min(len(plansza[0]), j + dystans + 1))
# dla kazdego wiersza wraz z kolumnami
for wiersz in zasieg_wiersza:
for kolumna in zasieg_kolumny:
# rozpatrywanie przypadkow takich, ktore maja rozne pozycje od wlasnie rozpatrywanego pola(i,j)
# aby uniknac sytuacji wlozenia pozycji pola ktore jest wlasnie rozpatrywane(sasiad dla samego siebie)
if (wiersz != i) or (kolumna != j):
sasiedzi.append((wiersz, kolumna))
return sasiedzi
def nowa_gra(self, button):
""" Metoda pozwalajaca rozpoczac rozgrywke od nowa.
button - nacisniety przycisk
"""
# ustawienie poczatkowych wartosci tak jak w przypadku klasy Plansza
self.grid.ilosc_min = 0
self.grid.ilosc_przyciskow = self.grid.n * self.grid.n
self.grid.plansza = list()
# zerowanie ustawien planszy, przyciskow oraz ich napisow
for i in range(self.grid.n):
self.grid.plansza.append([])
for j in range(self.grid.n):
self.grid.plansza[i].append(0)
self.grid.buttons[i][j].get_child().set_markup("")
self.grid.buttons[i][j].set_sensitive(True)
# losowanie pozycji min na nowo
while self.grid.ilosc_min < self.grid.n:
# wylosujx - zawiera numer kolumny
# wylosujy - zawiera numer wiersza
wylosujx = random.randint(0, self.grid.n - 1)
wylosujy = random.randint(0, self.grid.n - 1)
# zapewnienie unikalnosci pozycji min
if self.grid.plansza[wylosujx][wylosujy] == 0:
self.grid.ilosc_min += 1
self.grid.plansza[wylosujx][wylosujy] = "M"
# uzupelnienie planszy na nowo o ilosc posiadanych min dookola siebie dla kazdego pola
for i in range(self.grid.n):
for j in range(self.grid.n):
# siasiedzi - lista zawierajaca pozycje sasiadow dla kazdego przycisku
sasiedzi = list()
# rozpatruje tylko te przyciski ktore posiadaja mine, uzupelniam plansze dla wszystkich przyciskow
# w obrebie danej miny zwiekszajac ilosc min o jeden
if self.grid.plansza[i][j] == "M":
# wywoluje funkcje zwracajaca liste pozycji sasiadow dla kazdej z min
# podajac plansze, wspolrzedne aktualnie rozpatrywanego pola
# oraz promien wokol ktorego szukam sasiadow
sasiedzi = self.znajdz_sasiadow(self.grid.plansza, j, i, 1)
# dla kazdej pozycji roznej od miny dodaje informacje o tym, ze dane pole ma mine kolo siebie
# zwiekszajac wartosc pola o jeden
for para in sasiedzi:
if self.grid.plansza[para[1]][para[0]] == "M":
continue
self.grid.plansza[para[1]][para[0]] += 1
if __name__ == "__main__":
app = App()
Gtk.main()
| [
"52205017+lkaminski96@users.noreply.github.com"
] | 52205017+lkaminski96@users.noreply.github.com |
cc53f060d460eb0ef9a0249b2bb6c1c52008ea64 | bf8d344b17e2ff9b7e38ad9597d5ce0e3d4da062 | /ppdet/optimizer/optimizer.py | 2d0714078eec14dadd57f5689ae6a41039562202 | [
"Apache-2.0"
] | permissive | PaddlePaddle/PaddleDetection | e7e0f40bef75a4e0b6dcbacfafa7eb1969e44961 | bd83b98342b0a6bc8d8dcd5936233aeda1e32167 | refs/heads/release/2.6 | 2023-08-31T07:04:15.357051 | 2023-08-18T02:24:45 | 2023-08-18T02:24:45 | 217,475,193 | 12,523 | 3,096 | Apache-2.0 | 2023-09-10T10:05:56 | 2019-10-25T07:21:14 | Python | UTF-8 | Python | false | false | 12,296 | py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import math
import paddle
import paddle.nn as nn
import paddle.optimizer as optimizer
import paddle.regularizer as regularizer
from ppdet.core.workspace import register, serializable
import copy
from .adamw import AdamWDL, build_adamwdl
__all__ = ['LearningRate', 'OptimizerBuilder']
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
@serializable
class CosineDecay(object):
"""
Cosine learning rate decay
Args:
max_epochs (int): max epochs for the training process.
if you commbine cosine decay with warmup, it is recommended that
the max_iters is much larger than the warmup iter
use_warmup (bool): whether to use warmup. Default: True.
min_lr_ratio (float): minimum learning rate ratio. Default: 0.
last_plateau_epochs (int): use minimum learning rate in
the last few epochs. Default: 0.
"""
def __init__(self,
max_epochs=1000,
use_warmup=True,
min_lr_ratio=0.,
last_plateau_epochs=0):
self.max_epochs = max_epochs
self.use_warmup = use_warmup
self.min_lr_ratio = min_lr_ratio
self.last_plateau_epochs = last_plateau_epochs
def __call__(self,
base_lr=None,
boundary=None,
value=None,
step_per_epoch=None):
assert base_lr is not None, "either base LR or values should be provided"
max_iters = self.max_epochs * int(step_per_epoch)
last_plateau_iters = self.last_plateau_epochs * int(step_per_epoch)
min_lr = base_lr * self.min_lr_ratio
if boundary is not None and value is not None and self.use_warmup:
# use warmup
warmup_iters = len(boundary)
for i in range(int(boundary[-1]), max_iters):
boundary.append(i)
if i < max_iters - last_plateau_iters:
decayed_lr = min_lr + (base_lr - min_lr) * 0.5 * (math.cos(
(i - warmup_iters) * math.pi /
(max_iters - warmup_iters - last_plateau_iters)) + 1)
value.append(decayed_lr)
else:
value.append(min_lr)
return optimizer.lr.PiecewiseDecay(boundary, value)
elif last_plateau_iters > 0:
# not use warmup, but set `last_plateau_epochs` > 0
boundary = []
value = []
for i in range(max_iters):
if i < max_iters - last_plateau_iters:
decayed_lr = min_lr + (base_lr - min_lr) * 0.5 * (math.cos(
i * math.pi / (max_iters - last_plateau_iters)) + 1)
value.append(decayed_lr)
else:
value.append(min_lr)
if i > 0:
boundary.append(i)
return optimizer.lr.PiecewiseDecay(boundary, value)
return optimizer.lr.CosineAnnealingDecay(
base_lr, T_max=max_iters, eta_min=min_lr)
@serializable
class PiecewiseDecay(object):
"""
Multi step learning rate decay
Args:
gamma (float | list): decay factor
milestones (list): steps at which to decay learning rate
"""
def __init__(self,
gamma=[0.1, 0.01],
milestones=[8, 11],
values=None,
use_warmup=True):
super(PiecewiseDecay, self).__init__()
if type(gamma) is not list:
self.gamma = []
for i in range(len(milestones)):
self.gamma.append(gamma / 10**i)
else:
self.gamma = gamma
self.milestones = milestones
self.values = values
self.use_warmup = use_warmup
def __call__(self,
base_lr=None,
boundary=None,
value=None,
step_per_epoch=None):
if boundary is not None and self.use_warmup:
boundary.extend([int(step_per_epoch) * i for i in self.milestones])
else:
# do not use LinearWarmup
boundary = [int(step_per_epoch) * i for i in self.milestones]
value = [base_lr] # during step[0, boundary[0]] is base_lr
# self.values is setted directly in config
if self.values is not None:
assert len(self.milestones) + 1 == len(self.values)
return optimizer.lr.PiecewiseDecay(boundary, self.values)
# value is computed by self.gamma
value = value if value is not None else [base_lr]
for i in self.gamma:
value.append(base_lr * i)
return optimizer.lr.PiecewiseDecay(boundary, value)
@serializable
class LinearWarmup(object):
"""
Warm up learning rate linearly
Args:
steps (int): warm up steps
start_factor (float): initial learning rate factor
epochs (int|None): use epochs as warm up steps, the priority
of `epochs` is higher than `steps`. Default: None.
"""
def __init__(self, steps=500, start_factor=1. / 3, epochs=None):
super(LinearWarmup, self).__init__()
self.steps = steps
self.start_factor = start_factor
self.epochs = epochs
def __call__(self, base_lr, step_per_epoch):
boundary = []
value = []
warmup_steps = self.epochs * step_per_epoch \
if self.epochs is not None else self.steps
warmup_steps = max(warmup_steps, 1)
for i in range(warmup_steps + 1):
if warmup_steps > 0:
alpha = i / warmup_steps
factor = self.start_factor * (1 - alpha) + alpha
lr = base_lr * factor
value.append(lr)
if i > 0:
boundary.append(i)
return boundary, value
@serializable
class ExpWarmup(object):
"""
Warm up learning rate in exponential mode
Args:
steps (int): warm up steps.
epochs (int|None): use epochs as warm up steps, the priority
of `epochs` is higher than `steps`. Default: None.
power (int): Exponential coefficient. Default: 2.
"""
def __init__(self, steps=1000, epochs=None, power=2):
super(ExpWarmup, self).__init__()
self.steps = steps
self.epochs = epochs
self.power = power
def __call__(self, base_lr, step_per_epoch):
boundary = []
value = []
warmup_steps = self.epochs * step_per_epoch if self.epochs is not None else self.steps
warmup_steps = max(warmup_steps, 1)
for i in range(warmup_steps + 1):
factor = (i / float(warmup_steps))**self.power
value.append(base_lr * factor)
if i > 0:
boundary.append(i)
return boundary, value
@register
class LearningRate(object):
"""
Learning Rate configuration
Args:
base_lr (float): base learning rate
schedulers (list): learning rate schedulers
"""
__category__ = 'optim'
def __init__(self,
base_lr=0.01,
schedulers=[PiecewiseDecay(), LinearWarmup()]):
super(LearningRate, self).__init__()
self.base_lr = base_lr
self.schedulers = []
schedulers = copy.deepcopy(schedulers)
for sched in schedulers:
if isinstance(sched, dict):
# support dict sched instantiate
module = sys.modules[__name__]
type = sched.pop("name")
scheduler = getattr(module, type)(**sched)
self.schedulers.append(scheduler)
else:
self.schedulers.append(sched)
def __call__(self, step_per_epoch):
assert len(self.schedulers) >= 1
if not self.schedulers[0].use_warmup:
return self.schedulers[0](base_lr=self.base_lr,
step_per_epoch=step_per_epoch)
# TODO: split warmup & decay
# warmup
boundary, value = self.schedulers[1](self.base_lr, step_per_epoch)
# decay
decay_lr = self.schedulers[0](self.base_lr, boundary, value,
step_per_epoch)
return decay_lr
@register
class OptimizerBuilder():
"""
Build optimizer handles
Args:
regularizer (object): an `Regularizer` instance
optimizer (object): an `Optimizer` instance
"""
__category__ = 'optim'
def __init__(self,
clip_grad_by_norm=None,
clip_grad_by_value=None,
regularizer={'type': 'L2',
'factor': .0001},
optimizer={'type': 'Momentum',
'momentum': .9}):
self.clip_grad_by_norm = clip_grad_by_norm
self.clip_grad_by_value = clip_grad_by_value
self.regularizer = regularizer
self.optimizer = optimizer
def __call__(self, learning_rate, model=None):
if self.clip_grad_by_norm is not None:
grad_clip = nn.ClipGradByGlobalNorm(
clip_norm=self.clip_grad_by_norm)
elif self.clip_grad_by_value is not None:
var = abs(self.clip_grad_by_value)
grad_clip = nn.ClipGradByValue(min=-var, max=var)
else:
grad_clip = None
if self.regularizer and self.regularizer != 'None':
reg_type = self.regularizer['type'] + 'Decay'
reg_factor = self.regularizer['factor']
regularization = getattr(regularizer, reg_type)(reg_factor)
else:
regularization = None
optim_args = self.optimizer.copy()
optim_type = optim_args['type']
del optim_args['type']
if optim_type == 'AdamWDL':
return build_adamwdl(model, lr=learning_rate, **optim_args)
if optim_type != 'AdamW':
optim_args['weight_decay'] = regularization
op = getattr(optimizer, optim_type)
if 'param_groups' in optim_args:
assert isinstance(optim_args['param_groups'], list), ''
param_groups = optim_args.pop('param_groups')
params, visited = [], []
for group in param_groups:
assert isinstance(group,
dict) and 'params' in group and isinstance(
group['params'], list), ''
_params = {
n: p
for n, p in model.named_parameters()
if any([k in n
for k in group['params']]) and p.trainable is True
}
_group = group.copy()
_group.update({'params': list(_params.values())})
params.append(_group)
visited.extend(list(_params.keys()))
ext_params = [
p for n, p in model.named_parameters()
if n not in visited and p.trainable is True
]
if len(ext_params) < len(model.parameters()):
params.append({'params': ext_params})
elif len(ext_params) > len(model.parameters()):
raise RuntimeError
else:
_params = model.parameters()
params = [param for param in _params if param.trainable is True]
return op(learning_rate=learning_rate,
parameters=params,
grad_clip=grad_clip,
**optim_args)
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
4e86e0e6ff825aaff5a9add1e218622ecce984ed | ca75f7099b93d8083d5b2e9c6db2e8821e63f83b | /z2/part2/batch/jm/parser_errors_2/185179947.py | 546d93745ac3129f50e6b4ee8ebd53a7475e4971 | [
"MIT"
] | permissive | kozakusek/ipp-2020-testy | 210ed201eaea3c86933266bd57ee284c9fbc1b96 | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | refs/heads/master | 2022-10-04T18:55:37.875713 | 2020-06-09T21:15:37 | 2020-06-09T21:15:37 | 262,290,632 | 0 | 0 | MIT | 2020-06-09T21:15:38 | 2020-05-08T10:10:47 | C | UTF-8 | Python | false | false | 916 | py | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 185179947
"""
"""
random actions, total chaos
"""
board = gamma_new(2, 2, 2, 2)
assert board is not None
assert gamma_move(board, 1, 1, 0) == 1
assert gamma_move(board, 2, 0, 1) == 1
board737265096 = gamma_board(board)
assert board737265096 is not None
assert board737265096 == ("2.\n"
".1\n")
del board737265096
board737265096 = None
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 2, 1, 1) == 1
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_move(board, 2, 0, 0) == 1
assert gamma_free_fields(board, 2) == 0
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_golden_move(board, 1, 1, 0) == 0
assert gamma_move(board, 2, 1, 1) == 0
gamma_delete(board)
| [
"jakub@molinski.dev"
] | jakub@molinski.dev |
ad63db827895e58dd1d6453651cac9b390cf59b5 | c4f7a338513bdf11b1b7466649315b4ccb2788ae | /PycharmProjects/Latte-master/Git и случайные окружности.py | c419bb94538cca5185927bc1e0204bee1596220c | [] | no_license | Nastialo/Ufa-Vaskina | 30ba506ab1ed3059acb97f52d4f5385efc4853e4 | c42b7d764c3d1e9dd830fe92884f7dcec60c9afa | refs/heads/master | 2020-12-04T07:01:53.427081 | 2020-05-09T22:03:36 | 2020-05-09T22:03:36 | 231,668,885 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,109 | py | import sys
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.QtGui import QPainter, QColor
from random import randint, choice
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(501, 442)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(40, 330, 401, 51))
self.pushButton.setObjectName("pushButton")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 501, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "Создать круг"))
class MyWidget(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.colors = ["black", "yellow", "red", "green", "blue", "purple", "orange"]
self.pushButton.clicked.connect(self.run)
def run(self):
self.update()
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
self.drawing(qp)
qp.end()
def drawing(self, qp):
qp.setBrush(QColor(choice(self.colors)))
x = randint(4, 495)
y = randint(4, 435)
a = randint(4, 435)
qp.drawEllipse(x, y, a, a)
app = QApplication(sys.argv)
ex = MyWidget()
ex.show()
sys.exit(app.exec_()) | [
"59333815+Nastialo@users.noreply.github.com"
] | 59333815+Nastialo@users.noreply.github.com |
ba97b518db15458fb817d0b872d2356510abc92f | df8438656cc2b15001a03d02949abec9a374cb6f | /test/normalizer_issue_files/E72.py | c39cacc09c68bb48fdc7e3972843eaa5190fa3fb | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | gandhis1/parso | 65fcc7540eb2664691b1ed12203faa617995c4ce | 7b166db0b5b0b46a3b8b2f1ea5c9dcf57bc36197 | refs/heads/master | 2021-01-25T04:36:15.558393 | 2017-06-05T23:20:12 | 2017-06-05T23:20:12 | 93,455,487 | 0 | 0 | null | 2017-06-05T23:18:20 | 2017-06-05T23:18:20 | null | UTF-8 | Python | false | false | 1,089 | py | #: E721
if type(res) == type(42):
pass
#: E721
if type(res) != type(""):
pass
import types
if res == types.IntType:
pass
import types
#: E721:3
if type(res) is not types.ListType:
pass
#: E721:7 E721:35
assert type(res) == type(False) or type(res) == type(None)
#: E721:7
assert type(res) == type([])
#: E721:7
assert type(res) == type(())
#: E721:7
assert type(res) == type((0,))
#: E721:7
assert type(res) == type((0))
#: E721:7
assert type(res) != type((1, ))
#: E721:7
assert type(res) is type((1, ))
#: E721:7
assert type(res) is not type((1, ))
# Okay
#: E402
import types
if isinstance(res, int):
pass
if isinstance(res, str):
pass
if isinstance(res, types.MethodType):
pass
#: E721:3 E721:25
if type(a) != type(b) or type(a) == type(ccc):
pass
#: E721
type(a) != type(b)
#: E721
1 != type(b)
#: E721
type(b) != 1
1 != 1
try:
pass
#: E722
except:
pass
try:
pass
except Exception:
pass
#: E722
except:
pass
# Okay
fake_code = """"
try:
do_something()
except:
pass
"""
try:
pass
except Exception:
pass
| [
"davidhalter88@gmail.com"
] | davidhalter88@gmail.com |
f960787efb67b91348af709a474548bd3c83a751 | 99f145ac3a1b9192e54c114379f16bf992781251 | /venv/lib/python2.7/site-packages/pandas/tests/plotting/test_series.py | 6878ca0e1bc0618a0b53b0b7d150206acc962c7e | [
"MIT"
] | permissive | dushyantRathore/Cricket-API | 0a7df84f9760090e8a24dc61689e63e123c33d1f | d28bc5e6c613052793117e3dbd9035e4540901bb | refs/heads/master | 2021-03-24T10:18:58.362716 | 2020-07-08T17:52:38 | 2020-07-08T17:52:38 | 79,565,447 | 3 | 1 | MIT | 2018-10-01T19:16:47 | 2017-01-20T14:24:08 | Python | UTF-8 | Python | false | false | 30,920 | py | #!/usr/bin/env python
# coding: utf-8
import nose
import itertools
from datetime import datetime
import pandas as pd
from pandas import Series, DataFrame, date_range
from pandas.compat import range, lrange
import pandas.util.testing as tm
from pandas.util.testing import slow
import numpy as np
from numpy.random import randn
import pandas.tools.plotting as plotting
from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works,
_skip_if_no_scipy_gaussian_kde,
_ok_for_gaussian_kde)
""" Test cases for Series.plot """
@tm.mplskip
class TestSeriesPlots(TestPlotBase):
def setUp(self):
TestPlotBase.setUp(self)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
axes = _check_plot_works(self.ts.plot, rot=0)
self._check_ticks_props(axes, xrot=0)
ax = _check_plot_works(self.ts.plot, style='.', logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
_check_plot_works(self.ts[:10].plot.bar)
_check_plot_works(self.ts.plot.area, stacked=False)
_check_plot_works(self.iseries.plot)
for kind in ['line', 'bar', 'barh', 'kde', 'hist', 'box']:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(self.series[:5].plot, kind=kind)
_check_plot_works(self.series[:10].plot.barh)
ax = _check_plot_works(Series(randn(10)).plot.bar, color='black')
self._check_colors([ax.patches[0]], facecolors=['black'])
# GH 6951
ax = _check_plot_works(self.ts.plot, subplots=True)
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(-1, 1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
@slow
def test_plot_figsize_and_title(self):
# figsize and title
ax = self.series.plot(title='Test', figsize=(16, 8))
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8))
def test_dont_modify_rcParams(self):
# GH 8242
if self.mpl_ge_1_5_0:
key = 'axes.prop_cycle'
else:
key = 'axes.color_cycle'
colors = self.plt.rcParams[key]
Series([1, 2, 3]).plot()
self.assertEqual(colors, self.plt.rcParams[key])
def test_ts_line_lim(self):
ax = self.ts.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data(orig=False)[0][0])
self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1])
tm.close()
ax = self.ts.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data(orig=False)[0][0])
self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1])
def test_ts_area_lim(self):
ax = self.ts.plot.area(stacked=False)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
tm.close()
# GH 7471
ax = self.ts.plot.area(stacked=False, x_compat=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
tm.close()
tz_ts = self.ts.copy()
tz_ts.index = tz_ts.tz_localize('GMT').tz_convert('CET')
ax = tz_ts.plot.area(stacked=False, x_compat=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
tm.close()
ax = tz_ts.plot.area(stacked=False, secondary_y=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
def test_label(self):
s = Series([1, 2])
ax = s.plot(label='LABEL', legend=True)
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
ax = s.plot(legend=True)
self._check_legend_labels(ax, labels=['None'])
self.plt.close()
# get name from index
s.name = 'NAME'
ax = s.plot(legend=True)
self._check_legend_labels(ax, labels=['NAME'])
self.plt.close()
# override the default
ax = s.plot(legend=True, label='LABEL')
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
# Add lebel info, but don't draw
ax = s.plot(legend=False, label='LABEL')
self.assertEqual(ax.get_legend(), None) # Hasn't been drawn
ax.legend() # draw it
self._check_legend_labels(ax, labels=['LABEL'])
def test_line_area_nan_series(self):
values = [1, 2, np.nan, 3]
s = Series(values)
ts = Series(values, index=tm.makeDateIndex(k=4))
for d in [s, ts]:
ax = _check_plot_works(d.plot)
masked = ax.lines[0].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
self.assert_numpy_array_equal(np.delete(masked.data, 2), exp)
self.assert_numpy_array_equal(
masked.mask, np.array([False, False, True, False]))
expected = np.array([1, 2, 0, 3], dtype=np.float64)
ax = _check_plot_works(d.plot, stacked=True)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area, stacked=False)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
def test_line_use_index_false(self):
s = Series([1, 2, 3], index=['a', 'b', 'c'])
s.index.name = 'The Index'
ax = s.plot(use_index=False)
label = ax.get_xlabel()
self.assertEqual(label, '')
ax2 = s.plot.bar(use_index=False)
label2 = ax2.get_xlabel()
self.assertEqual(label2, '')
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot.bar(log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
ax = Series([200, 500]).plot.barh(log=True)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
tm.close()
# GH 9905
expected = np.array([1.0e-03, 1.0e-02, 1.0e-01, 1.0e+00])
if not self.mpl_le_1_2_1:
expected = np.hstack((1.0e-04, expected, 1.0e+01))
if self.mpl_ge_2_0_0:
expected = np.hstack((1.0e-05, expected))
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='bar')
ymin = 0.0007943282347242822 if self.mpl_ge_2_0_0 else 0.001
ymax = 0.12589254117941673 if self.mpl_ge_2_0_0 else .10000000000000001
res = ax.get_ylim()
self.assertAlmostEqual(res[0], ymin)
self.assertAlmostEqual(res[1], ymax)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='barh')
res = ax.get_xlim()
self.assertAlmostEqual(res[0], ymin)
self.assertAlmostEqual(res[1], ymax)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
@slow
def test_bar_ignore_index(self):
df = Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
ax = df.plot.bar(use_index=False)
self._check_text_labels(ax.get_xticklabels(), ['0', '1', '2', '3'])
def test_rotation(self):
df = DataFrame(randn(5, 5))
# Default rot 0
axes = df.plot()
self._check_ticks_props(axes, xrot=0)
axes = df.plot(rot=30)
self._check_ticks_props(axes, xrot=30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
self.assertEqual(xp, ax.get_xlim()[0])
@slow
def test_pie_series(self):
# if sum of values is less than 1.0, pie handle them as rate and draw
# semicircle.
series = Series(np.random.randint(1, 5),
index=['a', 'b', 'c', 'd', 'e'], name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, series.index)
self.assertEqual(ax.get_ylabel(), 'YLABEL')
# without wedge labels
ax = _check_plot_works(series.plot.pie, labels=None)
self._check_text_labels(ax.texts, [''] * 5)
# with less colors than elements
color_args = ['r', 'g', 'b']
ax = _check_plot_works(series.plot.pie, colors=color_args)
color_expected = ['r', 'g', 'b', 'r', 'g']
self._check_colors(ax.patches, facecolors=color_expected)
# with labels and colors
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
ax = _check_plot_works(series.plot.pie, labels=labels,
colors=color_args)
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
# with autopct and fontsize
ax = _check_plot_works(series.plot.pie, colors=color_args,
autopct='%.2f', fontsize=7)
pcts = ['{0:.2f}'.format(s * 100)
for s in series.values / float(series.sum())]
iters = [iter(series.index), iter(pcts)]
expected_texts = list(next(it) for it in itertools.cycle(iters))
self._check_text_labels(ax.texts, expected_texts)
for t in ax.texts:
self.assertEqual(t.get_fontsize(), 7)
# includes negative value
with tm.assertRaises(ValueError):
series = Series([1, 2, 0, 4, -1], index=['a', 'b', 'c', 'd', 'e'])
series.plot.pie()
# includes nan
series = Series([1, 2, np.nan, 4], index=['a', 'b', 'c', 'd'],
name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, ['a', 'b', '', 'd'])
def test_pie_nan(self):
s = Series([1, np.nan, 1, 1])
ax = s.plot.pie(legend=True)
expected = ['0', '', '2', '3']
result = [x.get_text() for x in ax.texts]
self.assertEqual(result, expected)
@slow
def test_hist_df_kwargs(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.plot.hist(bins=5)
self.assertEqual(len(ax.patches), 10)
@slow
def test_hist_df_with_nonnumerics(self):
# GH 9853
with tm.RNGContext(1):
df = DataFrame(
np.random.randn(10, 4), columns=['A', 'B', 'C', 'D'])
df['E'] = ['x', 'y'] * 5
ax = df.plot.hist(bins=5)
self.assertEqual(len(ax.patches), 20)
ax = df.plot.hist() # bins=10
self.assertEqual(len(ax.patches), 40)
@slow
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist,
by=self.ts.index.month)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist,
by=self.ts.index.month, bins=5)
fig, ax = self.plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = self.plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with tm.assertRaises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
self.assertEqual(len(ax.patches), 2)
@slow
def test_hist_layout(self):
df = self.hist_df
with tm.assertRaises(ValueError):
df.height.hist(layout=(1, 1))
with tm.assertRaises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
df = self.hist_df
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.gender, layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.gender, layout=(3, -1))
self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(2, -1))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(3, -1))
self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.classroom, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(axes, axes_num=4, layout=(4, 2),
figsize=(12, 7))
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.axes if self.mpl_ge_1_5_0 else fig.get_axes()
self.assertEqual(len(axes), 2)
@slow
def test_hist_secondary_legend(self):
# GH 9610
df = DataFrame(np.random.randn(30, 4), columns=list('abcd'))
# primary -> secondary
ax = df['a'].plot.hist(legend=True)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b (right)'])
self.assertTrue(ax.get_yaxis().get_visible())
self.assertTrue(ax.right_ax.get_yaxis().get_visible())
tm.close()
# secondary -> secondary
ax = df['a'].plot.hist(legend=True, secondary_y=True)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are draw on left ax
# left axis must be invisible, right axis must be visible
self._check_legend_labels(ax.left_ax,
labels=['a (right)', 'b (right)'])
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
# secondary -> primary
ax = df['a'].plot.hist(legend=True, secondary_y=True)
# right axes is returned
df['b'].plot.hist(ax=ax, legend=True)
# both legends are draw on left ax
# left and right axis must be visible
self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b'])
self.assertTrue(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
@slow
def test_df_series_secondary_legend(self):
# GH 9779
df = DataFrame(np.random.randn(30, 3), columns=list('abc'))
s = Series(np.random.randn(30), name='x')
# primary -> secondary (without passing ax)
ax = df.plot()
s.plot(legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
self.assertTrue(ax.get_yaxis().get_visible())
self.assertTrue(ax.right_ax.get_yaxis().get_visible())
tm.close()
# primary -> secondary (with passing ax)
ax = df.plot()
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
self.assertTrue(ax.get_yaxis().get_visible())
self.assertTrue(ax.right_ax.get_yaxis().get_visible())
tm.close()
# seconcary -> secondary (without passing ax)
ax = df.plot(secondary_y=True)
s.plot(legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, labels=expected)
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
# secondary -> secondary (with passing ax)
ax = df.plot(secondary_y=True)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
# secondary -> secondary (with passing ax)
ax = df.plot(secondary_y=True, mark_right=False)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a', 'b', 'c', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
@slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with tm.assertRaises(ValueError):
x.plot(style='k--', color='k')
@slow
def test_hist_kde(self):
ax = self.ts.plot.hist(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
# ticks are values, thus ticklabels are blank
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
_check_plot_works(self.ts.plot.kde)
_check_plot_works(self.ts.plot.density)
ax = self.ts.plot.kde(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@slow
def test_kde_kwargs(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from numpy import linspace
_check_plot_works(self.ts.plot.kde, bw_method=.5,
ind=linspace(-100, 100, 20))
_check_plot_works(self.ts.plot.density, bw_method=.5,
ind=linspace(-100, 100, 20))
ax = self.ts.plot.kde(logy=True, bw_method=.5,
ind=linspace(-100, 100, 20))
self._check_ax_scales(ax, yaxis='log')
self._check_text_labels(ax.yaxis.get_label(), 'Density')
@slow
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
s = Series(np.random.uniform(size=50))
s[0] = np.nan
axes = _check_plot_works(s.plot.kde)
# check if the values have any missing values
# GH14821
self.assertTrue(any(~np.isnan(axes.lines[0].get_xdata())),
msg='Missing Values not dropped')
@slow
def test_hist_kwargs(self):
ax = self.ts.plot.hist(bins=5)
self.assertEqual(len(ax.patches), 5)
self._check_text_labels(ax.yaxis.get_label(), 'Frequency')
tm.close()
if self.mpl_ge_1_3_1:
ax = self.ts.plot.hist(orientation='horizontal')
self._check_text_labels(ax.xaxis.get_label(), 'Frequency')
tm.close()
ax = self.ts.plot.hist(align='left', stacked=True)
tm.close()
@slow
def test_hist_kde_color(self):
ax = self.ts.plot.hist(logy=True, bins=10, color='b')
self._check_ax_scales(ax, yaxis='log')
self.assertEqual(len(ax.patches), 10)
self._check_colors(ax.patches, facecolors=['b'] * 10)
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
ax = self.ts.plot.kde(logy=True, color='r')
self._check_ax_scales(ax, yaxis='log')
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self._check_colors(lines, ['r'])
@slow
def test_boxplot_series(self):
ax = self.ts.plot.box(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [self.ts.name])
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@slow
def test_kind_both_ways(self):
s = Series(range(3))
for kind in plotting._common_kinds + plotting._series_kinds:
if not _ok_for_gaussian_kde(kind):
continue
s.plot(kind=kind)
getattr(s.plot, kind)()
@slow
def test_invalid_plot_data(self):
s = Series(list('abcd'))
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
s.plot(kind=kind)
@slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
s.plot(kind=kind)
def test_invalid_kind(self):
s = Series([1, 2])
with tm.assertRaises(ValueError):
s.plot(kind='aasdf')
@slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@slow
def test_errorbar_plot(self):
s = Series(np.arange(10), name='x')
s_err = np.random.randn(10)
d_err = DataFrame(randn(10, 2), index=s.index, columns=['x', 'y'])
# test line and bar plots
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=1, yerr=1)
ax = _check_plot_works(s.plot, xerr=s_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
# test time series plotting
ix = date_range('1/1/2000', '1/1/2001', freq='M')
ts = Series(np.arange(12), index=ix, name='x')
ts_err = Series(np.random.randn(12), index=ix)
td_err = DataFrame(randn(12, 2), index=ix, columns=['x', 'y'])
ax = _check_plot_works(ts.plot, yerr=ts_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(ts.plot, yerr=td_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
# check incorrect lengths and types
with tm.assertRaises(ValueError):
s.plot(yerr=np.arange(11))
s_err = ['zzz'] * 10
# in mpl 1.5+ this is a TypeError
with tm.assertRaises((ValueError, TypeError)):
s.plot(yerr=s_err)
def test_table(self):
_check_plot_works(self.series.plot, table=True)
_check_plot_works(self.series.plot, table=self.series)
@slow
def test_series_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(Series([1, 2, 3]),
plotting._series_kinds +
plotting._common_kinds)
@slow
def test_standard_colors(self):
for c in ['r', 'red', 'green', '#FF0000']:
result = plotting._get_standard_colors(1, color=c)
self.assertEqual(result, [c])
result = plotting._get_standard_colors(1, color=[c])
self.assertEqual(result, [c])
result = plotting._get_standard_colors(3, color=c)
self.assertEqual(result, [c] * 3)
result = plotting._get_standard_colors(3, color=[c])
self.assertEqual(result, [c] * 3)
@slow
def test_standard_colors_all(self):
import matplotlib.colors as colors
# multiple colors like mediumaquamarine
for c in colors.cnames:
result = plotting._get_standard_colors(num_colors=1, color=c)
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=1, color=[c])
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=3, color=c)
self.assertEqual(result, [c] * 3)
result = plotting._get_standard_colors(num_colors=3, color=[c])
self.assertEqual(result, [c] * 3)
# single letter colors like k
for c in colors.ColorConverter.colors:
result = plotting._get_standard_colors(num_colors=1, color=c)
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=1, color=[c])
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=3, color=c)
self.assertEqual(result, [c] * 3)
result = plotting._get_standard_colors(num_colors=3, color=[c])
self.assertEqual(result, [c] * 3)
def test_series_plot_color_kwargs(self):
# GH1890
ax = Series(np.arange(12) + 1).plot(color='green')
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_kwargs(self):
# #1890
ax = Series(np.arange(12) + 1, index=date_range(
'1/1/2000', periods=12)).plot(color='green')
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_with_empty_kwargs(self):
import matplotlib as mpl
if self.mpl_ge_1_5_0:
def_colors = self._maybe_unpack_cycler(mpl.rcParams)
else:
def_colors = mpl.rcParams['axes.color_cycle']
index = date_range('1/1/2000', periods=12)
s = Series(np.arange(1, 13), index=index)
ncolors = 3
for i in range(ncolors):
ax = s.plot()
self._check_colors(ax.get_lines(), linecolors=def_colors[:ncolors])
def test_xticklabels(self):
# GH11529
s = Series(np.arange(10), index=['P%02d' % i for i in range(10)])
ax = s.plot(xticks=[0, 3, 5, 9])
exp = ['P%02d' % i for i in [0, 3, 5, 9]]
self._check_text_labels(ax.get_xticklabels(), exp)
def test_custom_business_day_freq(self):
# GH7222
from pandas.tseries.offsets import CustomBusinessDay
s = Series(range(100, 121), index=pd.bdate_range(
start='2014-05-01', end='2014-06-01',
freq=CustomBusinessDay(holidays=['2014-05-26'])))
_check_plot_works(s.plot)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| [
"dushyant.bgs@gmail.com"
] | dushyant.bgs@gmail.com |
577de1e8c4b0f5fcc505a90aa84b8b3cecadb643 | 383eb3630aeb21b2fe6d43db25f7a6e450a51488 | /tools/rename_release.py | d719fd043a64da70df7e3348cb350f2c8c64677b | [] | no_license | p5-vbnekit/docker-common-windows-compilers | 10075eaf99c469d2b089bc59f8d8e91f37c38025 | 931370b4413f823f1ad60b7f3254df92efbd35e4 | refs/heads/master | 2020-07-30T19:55:55.127942 | 2019-10-01T20:08:00 | 2019-10-01T20:08:00 | 210,340,564 | 0 | 0 | null | 2019-10-01T20:08:01 | 2019-09-23T11:42:00 | Python | UTF-8 | Python | false | false | 1,426 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os, sys, github, requests, json
if "__main__" == __name__: from __main__ import __file__ as main_path
def routine():
if 4 != len(sys.argv): raise ValueError("invalid command line")
m_old_name = sys.argv[1]
if not m_old_name: raise ValueError("invalid old name")
m_new_name = sys.argv[2]
if not m_new_name: raise ValueError("invalid new name")
if m_old_name == m_new_name: raise ValueError("invalid new name")
m_token = sys.argv[3]
if not m_token: m_token = None
else: m_token = os.environ[m_token]
def make_request_url():
m_release = None
m_repository = github.Github(m_token).get_repo("p5-vbnekit/docker-common-windows-compilers")
for m_item in m_repository.get_releases():
if m_old_name != m_item.tag_name: continue
if m_release is None: m_release = m_item
elif m_item.created_at < m_release.created_at: m_release = m_item
if m_release is None: raise RuntimeError("release not found")
return m_release.url
def make_request_options():
m_result = {
"data": json.dumps({"name": m_new_name, "tag_name": m_new_name})
}
if not (m_token is None): m_result["auth"] = (m_token, "")
return m_result
m_result = requests.patch(make_request_url(), **make_request_options())
if 200 != m_result.status_code: raise RuntimeError(str(m_result.text))
if "__main__" == __name__: routine()
| [
"vbnekit@p5y.su"
] | vbnekit@p5y.su |
552f6f3b1510157c43339643a0395e0d8862395d | 6a4a79f1e2d5c6a6edc08429db5cdd30793e1f64 | /src/ApproveScreen.py | ec64e69c7bdc056a871a9a7d7196ab479a3e926c | [] | no_license | MehmetAran/OCR-Doc-Data-Mining | 6827563083306ddbb67e48ed433eb1c5eb9c4e2d | 5b836e77bc45bfc17f31e1f4cc79e043739dc7f0 | refs/heads/master | 2020-12-11T19:30:07.956375 | 2020-06-29T17:09:57 | 2020-06-29T17:09:57 | 233,938,069 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,766 | py | from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import sqlite3
import os
from sqliteOperations import SqliteOperations
# Belgeyi kırpma işleminden sonra bu class
# belgeden alınan ve veritabanına kaydedilecek olan verileri
# kullanıcı onayına sunar ve onaylanan belgeleri ekler.
# Kullanıcı isterse güncelleme yapılır.
class ApproveScreen(QDialog):
def __init__(self, data,docName,*args, **kwargs):
self.docName = docName
super(ApproveScreen, self).__init__(*args, **kwargs)
self.setWindowTitle("Onaylama İşlemi")
self.setFixedWidth(600)
self.setFixedHeight(600)
layout = QVBoxLayout()
self.approve_screen_table_widget = QTableWidget()
# self.setCentralWidget(self.tableWidget)
self.approve_screen_table_widget.setAlternatingRowColors(True)
self.approve_screen_table_widget.setColumnCount(5)
self.approve_screen_table_widget.horizontalHeader().setCascadingSectionResizes(False)
self.approve_screen_table_widget.horizontalHeader().setSortIndicatorShown(False)
self.approve_screen_table_widget.horizontalHeader().setStretchLastSection(True)
self.approve_screen_table_widget.verticalHeader().setVisible(False)
self.approve_screen_table_widget.verticalHeader().setCascadingSectionResizes(False)
self.approve_screen_table_widget.setHorizontalHeaderLabels(("Verdiğiniz isim", "Soldaki Metin", "Sağdaki Metin", "Yukarıdaki Metin", "Aşağıdaki Metin"))
self.add_btn = QPushButton()
self.add_btn.setText("Ekle")
self.add_btn.clicked.connect(self.selectedIndexAddToSqlite)
layout.addWidget(self.add_btn);
self.delete_btn = QPushButton()
self.delete_btn.clicked.connect(self.selectedIndexDelete)
self.delete_btn.setText("Sil")
layout.addWidget(self.delete_btn);
layout.addWidget(self.approve_screen_table_widget)
self.setLayout(layout)
self.searchstudent(data)
def searchstudent(self,array):
result = array
self.approve_screen_table_widget.setRowCount(0)
for row_number, row_data in enumerate(result):
self.approve_screen_table_widget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.approve_screen_table_widget.setItem(row_number, column_number, QTableWidgetItem(str(data)))
def selectedIndexDelete(self):
try :
row = self.approve_screen_table_widget.currentRow()
except:
QMessageBox.information(self, "Hata",'Lütfen silmek istediğiniz satırı seçiniz.', QMessageBox.Ok)
return
self.approve_screen_table_widget.removeRow(row)
def selectedIndexAddToSqlite(self):
try:
row = self.approve_screen_table_widget.currentRow()
index0 = self.approve_screen_table_widget.item(row,0).text()
index1 = self.approve_screen_table_widget.item(row,1).text()
index2 = self.approve_screen_table_widget.item(row,2).text()
index3 = self.approve_screen_table_widget.item(row,3).text()
index4 = self.approve_screen_table_widget.item(row,4).text()
except:
QMessageBox.information(self, "Hata",'Lütfen eklemek istediğiniz satırı seçiniz.', QMessageBox.Ok)
return
try:
SqliteOperations().insert(self.docName,0,index0,index1,index2,index3,index4)
except:
QMessageBox.information(self, "Hata",'Veritabanına eklenirken bir hata oluştu.', QMessageBox.Ok)
return
QMessageBox.information(self, "Hata",'Eklendi', QMessageBox.Ok)
self.approve_screen_table_widget.removeRow(row)
| [
"160202030@kocaeli.edu.tr"
] | 160202030@kocaeli.edu.tr |
cb20e58b40cb0a1ea1a758ac1a465721e89679cb | ed3bb0905b91fdfbc4a85cfd88c5635823b2ca4f | /icoder/blog/urls.py | 749416254d551afcc043f57eaf0745b6987e271f | [] | no_license | Adityapatel667/adi.github.io | b595b96eb97bbb3d9cf20c6dc24ede2cafefc8b8 | 318ce02039fe6dd34d1f89ce4f40c6fd1efac651 | refs/heads/master | 2022-12-08T14:20:29.138625 | 2020-08-30T13:58:56 | 2020-08-30T13:58:56 | 291,473,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | from django.urls import path
from . import views
urlpatterns=[
path('',views.bloghome, name='bloghome'),
path('blogpost/',views.blogpost, name='blogpost'),
path('test/',views.test, name='blogpost'),
#path ('<str:slug>' , views.blogpost, name='blogpost '),
] | [
"69669679+Adityapatel667@users.noreply.github.com"
] | 69669679+Adityapatel667@users.noreply.github.com |
6a18f0ae5be54533a66e3eca6087ba0b206673dc | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/saddle-points/3a5b8c06b75443c1ba4f3e45cd0ac791.py | bc8cf755d9ace6afd58ad1d6751c8c089df1218d | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 496 | py | def saddle_points(mat):
spoints = set()
if mat:
rowLen = len(mat[0])
for row in mat:
if len(row) != rowLen:
raise ValueError("Irregular matrix. All rows must be the same length.")
for i, row in enumerate(mat):
for j in range(rowLen):
if row[j] == max(row):
if row[j] == min([mat[n][j] for n in range(len(mat))]):
spoints.add((i, j))
return spoints
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
7f6c7d9110e2f13bc4bb18e6472f66eea177cd33 | 11a71b9b0f0066d6023f6ce1fbd3022412fc86c4 | /Atreya/appointments/serializers.py | debfc356460d0cf46c524859ed6b907a23b9ba08 | [] | no_license | Seva-Solutions/MyClinic_Backend | 085615dad37cc5a2b372a7b5fe2897fc46356f0c | 4fb40bc701eefad3e80965ccbb87f44079811d07 | refs/heads/master | 2023-07-16T06:10:02.680357 | 2021-08-30T01:35:44 | 2021-08-30T01:35:44 | 327,069,849 | 0 | 0 | null | 2021-07-24T00:49:47 | 2021-01-05T17:24:34 | Python | UTF-8 | Python | false | false | 2,328 | py | # from typing_extensions import Required
from rest_framework import serializers
from .models import *
from django.contrib.auth.models import User
import datetime
class AppointmentTypeSerializer(serializers.ModelSerializer):
class Meta:
model = AppointmentType
fields = '__all__'
def create(self, validated_data):
# import pdb; pdb.set_trace()
instance = AppointmentType.objects.create(**validated_data)
return instance
class PreAppointmentQuestionSerializer(serializers.ModelSerializer):
class Meta:
model = PreAppointmentQuestion
fields = '__all__'
class PreAppointmentResponseSerializer(serializers.ModelSerializer):
class Meta:
model = PreAppointmentResponse
fields = '__all__'
def validate(self, data):
return data
class AppointmentSerializer(serializers.ModelSerializer):
pre_appointment_responses = PreAppointmentResponseSerializer(required=False, many=True)
class Meta:
model = Appointment
fields = '__all__'
def validate(self, data):
return data
def create(self, validated_data):
responses = []
if 'pre_appointment_responses' in validated_data:
responses = validated_data.pop('pre_appointment_responses')
instance = Appointment.objects.create(**validated_data)
for response in responses:
resp = PreAppointmentResponse(response=response['response'], question=response['question'], appointment=instance)
try:
resp.save()
except Exception as e:
print(e)
return instance
# def update(self, instance, validated_data):
# if 'languageList' in validated_data:
# instance.languageList.clear()
# languages = validated_data.pop('languageList')
# for new_language in languages:
# instance.languageList.add(new_language)
# return super().update(instance, validated_data)
def to_representation(self, instance):
endTime = instance.startTime + datetime.timedelta(minutes=instance.appointment_type.length)
representation = super(AppointmentSerializer, self).to_representation(instance)
representation['endTime'] = endTime
return representation
| [
"s3aryal@uwaterloo.ca"
] | s3aryal@uwaterloo.ca |
aa938fa99e8b11fc478817c6c7ae6d6a70ca2182 | 3e321aa10954d1a206557fe10a06b2bb4cf7e1d3 | /search/hardOnes/add_operators.py | 45252a84b5028d63c06fad75a536ec47b984b437 | [] | no_license | LQXshane/leetcode | 7f5ab93372f95f83996a8d11936a737240f54464 | cc6245c9519d2a249aa469eefc003e340bdbfa7c | refs/heads/master | 2021-01-12T03:03:20.252679 | 2017-10-13T17:31:38 | 2017-10-13T17:31:38 | 78,151,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | class Solution(object):
def addOperators(self, num, target):
"""
:type num: str
:type target: int
:rtype: List[str]
"""
ans, self.target = [], target
for i in range(1, len(num) + 1):
if i == 1 or (i > 1 and num[0] != '0'):
self.helper(num[i:], num[:i], int(num[:i]), int(num[:i]), ans)
return ans
def helper(self, num, tmp, cur, last, res):
"""
:type tmp: str, current string
"""
if not num and self.target == cur:
res.append(tmp)
return
for i in range(1, len(num) + 1):
# print len(num), i
if i == 1 or ( i > 1 and num[0] != '0'):
self.helper(num[i:], tmp + "+" + num[:i], cur + int(num[:i]), int(num[:i]), res)
self.helper(num[i:], tmp + "-" + num[:i], cur - int(num[:i]), - int(num[:i]), res)
self.helper(num[i:], tmp + "*" + num[:i], cur - last + last * int(num[:i]) , last * int(num[:i]), res)
| [
"qxlin@bu.edu"
] | qxlin@bu.edu |
0437d618da2dcb5a07093f73ce71c56b7f6eb7cc | 3893a4cc36eaf30b694d77777ff8ad005af223d9 | /crop_engine/nlde/engine/eddynetwork.py | d88f9093bf3209f5268cf4320e631d3f32557acc | [
"MIT"
] | permissive | Lars-H/slurp | cd6cbfbf16a2aade92c8a094e3aeba97f47e5f3a | 0c7c8a5ca62145bedaff5791d6f54337674da2ea | refs/heads/main | 2023-06-07T21:56:42.157218 | 2021-06-30T14:19:37 | 2021-06-30T14:19:37 | 343,422,320 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,559 | py | """
Created on Mar 23, 2015
@author: Maribel Acosta
Updated on Mar 10, 2020
@author: Lars Heling
"""
from eddyoperator import EddyOperator
from nlde.query.sparql_parser import parse
from crop.query_plan_optimizer.idp_optimizer import IDP_Optimizer
from crop.query_plan_optimizer.federated_optimizer import Federated_Optimizer
from crop.query_plan_optimizer.nlde_optimizer import nLDE_Optimizer
from crop.query_plan_optimizer.ldff_optimizer import LDFF_Optimizer
from nlde.operators.operatorstructures import Tuple
from nlde.policy.nopolicy import NoPolicy
from multiprocessing import Process, Queue, active_children
from time import time
import os, signal
import logging
logger = logging.getLogger("nlde_debug")
class EddyNetwork(object):
def __init__(self, **kwargs):
self.query = kwargs.get("query", None)
self.policy = kwargs.get("policy", NoPolicy())
self.sources = kwargs.get("sources", "")
self.n_eddy = kwargs.get("n_eddy", 2)
self.explain = kwargs.get("explain", False)
self.eofs = 0
self.independent_operators = []
self.join_operators = []
self.eddy_operators = []
self.eddies_queues = []
self.operators_input_queues = {}
self.operators_left_queues = []
self.operators_right_queues = []
self.tree = None
self.operators_desc = None
self.sources_desc = None
self.eofs_operators_desc = None
self.output_queue = None
self.optimizer = kwargs.get("optimizer")
# Stats
self.triple_pattern_cnt = -1 # TODO: Implement tp count in query
self.p_list = Queue()
self.plan = None
if self.query is None:
logger.debug("No query provided")
else:
self.plan = self.__get_query_plan()
def __get_query_plan(self):
# Parse SPARQL query.
queryparsed = parse(self.query)
self.triple_pattern_cnt = 1 #1 queryparsed.triple_pattern_count
# Start Timer
start = time()
# Create Plan
plan = self.optimizer.create_plan(queryparsed)
# Time the execution
self.optimization_time = time() - start
logger.debug(plan)
#return None
return plan
def execute_plan(self, outputqueue, plan):
self.tree = plan.tree
self.eofs_operators_desc = plan.operators_desc
self.sources_desc = plan.sources_desc
self.operators_desc = plan.operators_desc
self.eofs = plan.independent_sources
self.policy.initialize_priorities(plan.plan_order)
# Create eddies queues.
for i in range(0, self.n_eddy+1):
self.eddies_queues.append(Queue())
# Create operators queues (left_plan and right_plan).
for op in plan.operators:
self.operators_input_queues.update({op.id_operator: []})
for i in range(0, op.independent_inputs):
self.operators_input_queues[op.id_operator].append(Queue())
for i in range(1, self.n_eddy+1):
eddy = EddyOperator(i, self.policy, self.eddies_queues, self.operators_desc, self.operators_input_queues,
plan.operators_vars, outputqueue, plan.independent_sources, self.eofs_operators_desc,
plan.operators_sym, plan.operators)
p = Process(target=eddy.execute)
p.start()
self.p_list.put(p.pid)
self.tree.execute(self.operators_input_queues, self.eddies_queues, self.p_list, self.operators_desc)
def execute(self, outputqueue):
if not self.plan is None:
# If there are actually any sources to be contacted in the plan
if len(self.plan) > 0:
self.execute_plan(outputqueue, self.plan)
else:
# Otherwise, just send EOF tuple
eof_tuple = Tuple("EOF", None, None, None)
# One "EOF" per eddy expecetd
for _ in range(self.n_eddy):
outputqueue.put(eof_tuple)
else:
raise Exception("No Query Physical Plan to execute")
def execute_standalone(self, plan):
self.output_queue = Queue()
self.plan = plan
self.execute_plan(self.output_queue, plan)
count = 0
tuples_per_operator = {}
requests_per_subexpression = {}
# Handle the rest of the query answer.
while count < self.n_eddy:
try:
ri = self.output_queue.get(True)
if ri.data == "EOF":
tuples_per_operator.update(ri.tuples_produced)
requests_per_subexpression.update(ri.requests)
count = count + 1
else:
yield ri
except Exception as e:
break
self.plan.logical_plan_stats(tuples_per_operator)
self.plan.execution_requests = sum(requests_per_subexpression.values())
self.stop_execution()
def stop_execution(self, sig=None, err=None):
# Finalize Execution and kill all processes
self.output_queue.close()
while not self.p_list.empty():
pid = self.p_list.get()
try:
os.kill(pid, signal.SIGKILL)
except OSError as e:
pass
for p in active_children():
try:
p.terminate()
except OSError as e:
pass
#import sys
#sys.exit(1) | [
"ilya.filippov@outlook.com"
] | ilya.filippov@outlook.com |
cf9f7cacf1dc9653d2c6e8dd2451a3223b93d4e7 | d7f4518e8a8a62a071e7b9aeb99a2f06026c8768 | /photography/migrations/0001_initial.py | d55033fbe76bb804895305ae9cda2ade7e7ced70 | [] | no_license | knoopr/imagesite | 1e7beafc098963e1114e9d3e341f1de624abcd6a | 12419ad3ed719839295e63689cccbf2de1b0da7e | refs/heads/master | 2021-07-14T10:57:37.209424 | 2017-09-15T13:13:22 | 2017-09-15T13:13:22 | 103,157,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,134 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-13 18:19
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Photograph',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alt_text', models.CharField(max_length=126)),
('date_taken', models.DateField(default=datetime.datetime(2017, 9, 13, 18, 19, 40, 106598, tzinfo=utc))),
('date_uploaded', models.DateField(default=datetime.datetime(2017, 9, 13, 18, 19, 40, 106645, tzinfo=utc))),
('image_data', models.ImageField(upload_to='./')),
],
),
migrations.CreateModel(
name='Photographer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=64)),
('last_name', models.CharField(max_length=64)),
('contact_email', models.EmailField(max_length=254)),
('photo_watermark', models.ImageField(upload_to='./')),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag_text', models.CharField(max_length=126, unique=True)),
('associated_images', models.ManyToManyField(related_name='tags', to='photography.Photograph')),
],
),
migrations.AddField(
model_name='photograph',
name='image_photographer',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='associated_photographs', to='photography.Photographer'),
),
]
| [
"knoop.rick@gmail.com"
] | knoop.rick@gmail.com |
bf9fbd11bd31704162ea27bdca1a7b382ab15680 | 611cd5d8b570ef1e0c1fa6a9b1978ce8104bfe05 | /MainWindow.py | f6da59851a6f4663467cf9f9a18d1218d534a846 | [
"MIT"
] | permissive | mrthundergod/pyResistanceCalculator | eef7301b96b3f1df6846e01a2354eeec193cc88f | f3c990f20c0df8acac2b5147894ce218679b9322 | refs/heads/master | 2021-01-24T13:14:47.147029 | 2019-08-06T06:28:56 | 2019-08-06T06:28:56 | 123,165,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,931 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'res_gui.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setWindowModality(QtCore.Qt.ApplicationModal)
MainWindow.resize(650, 277)
MainWindow.setMinimumSize(QtCore.QSize(650, 277))
MainWindow.setMaximumSize(QtCore.QSize(650, 277))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/newPrefix/lightning.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.comboBox1 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox1.setGeometry(QtCore.QRect(160, 30, 211, 31))
self.comboBox1.setObjectName("comboBox1")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 30, 121, 31))
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(14)
font.setItalic(False)
self.label.setFont(font)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(10, 80, 141, 31))
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(14)
font.setItalic(False)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(10, 130, 131, 31))
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(14)
font.setItalic(False)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(10, 180, 141, 31))
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(400, 10, 231, 41))
font = QtGui.QFont()
font.setFamily("DejaVu Serif")
font.setPointSize(16)
font.setItalic(True)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(610, 50, 81, 51))
font = QtGui.QFont()
font.setPointSize(12)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(self.centralwidget)
self.label_8.setGeometry(QtCore.QRect(450, 100, 111, 31))
font = QtGui.QFont()
font.setFamily("URW Bookman L")
font.setPointSize(10)
font.setItalic(True)
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.label_9 = QtWidgets.QLabel(self.centralwidget)
self.label_9.setGeometry(QtCore.QRect(610, 140, 31, 31))
font = QtGui.QFont()
font.setPointSize(14)
self.label_9.setFont(font)
self.label_9.setObjectName("label_9")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(400, 180, 101, 41))
self.pushButton.setObjectName("pushButton")
self.lineEdit1 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit1.setGeometry(QtCore.QRect(390, 50, 211, 41))
self.lineEdit1.setObjectName("lineEdit1")
self.lineEdit2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit2.setGeometry(QtCore.QRect(390, 130, 211, 41))
self.lineEdit2.setObjectName("lineEdit2")
self.comboBox2 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox2.setGeometry(QtCore.QRect(160, 80, 211, 31))
self.comboBox2.setObjectName("comboBox2")
self.comboBox3 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox3.setGeometry(QtCore.QRect(160, 130, 211, 31))
self.comboBox3.setObjectName("comboBox3")
self.comboBox4 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox4.setGeometry(QtCore.QRect(160, 180, 211, 31))
self.comboBox4.setObjectName("comboBox4")
self.pushButton2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton2.setGeometry(QtCore.QRect(510, 180, 91, 41))
self.pushButton2.setObjectName("pushButton2")
MainWindow.setCentralWidget(self.centralwidget)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 650, 26))
self.menuBar.setObjectName("menuBar")
self.menuFIle = QtWidgets.QMenu(self.menuBar)
self.menuFIle.setObjectName("menuFIle")
MainWindow.setMenuBar(self.menuBar)
self.actionClear = QtWidgets.QAction(MainWindow)
self.actionClear.setObjectName("actionClear")
self.actionExit = QtWidgets.QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
self.actionExit_2 = QtWidgets.QAction(MainWindow)
self.actionExit_2.setObjectName("actionExit_2")
self.menuFIle.addAction(self.actionExit_2)
self.menuBar.addAction(self.menuFIle.menuAction())
self.retranslateUi(MainWindow)
self.actionExit_2.triggered.connect(MainWindow.close)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Resistance Calculator"))
self.comboBox1.setToolTip(_translate("MainWindow", "Enter color of first band"))
self.comboBox1.setStatusTip(_translate("MainWindow", "Enter color of first band"))
self.label.setText(_translate("MainWindow", "First Band"))
self.label_2.setText(_translate("MainWindow", "Second Band"))
self.label_3.setText(_translate("MainWindow", "Third Band"))
self.label_4.setText(_translate("MainWindow", "Fourth Band"))
self.label_6.setText(_translate("MainWindow", "Resistance Value is :"))
self.label_7.setText(_translate("MainWindow", " kΩ"))
self.label_8.setText(_translate("MainWindow", "Tolerance(+/-) :"))
self.label_9.setText(_translate("MainWindow", "%"))
self.pushButton.setToolTip(_translate("MainWindow", "Calculate"))
self.pushButton.setStatusTip(_translate("MainWindow", "Find Value"))
self.pushButton.setText(_translate("MainWindow", "Calculate !"))
self.lineEdit1.setToolTip(_translate("MainWindow", "Resistance Value"))
self.lineEdit1.setStatusTip(_translate("MainWindow", "Resistance Value"))
self.lineEdit2.setToolTip(_translate("MainWindow", "Tolerance"))
self.lineEdit2.setStatusTip(_translate("MainWindow", "Tolerance"))
self.comboBox2.setToolTip(_translate("MainWindow", "Enter color of second band"))
self.comboBox2.setStatusTip(_translate("MainWindow", "Enter color of second band"))
self.comboBox3.setToolTip(_translate("MainWindow", "Enter color of second band"))
self.comboBox3.setStatusTip(_translate("MainWindow", "Enter color of second band"))
self.comboBox4.setToolTip(_translate("MainWindow", "Enter color of first band"))
self.comboBox4.setStatusTip(_translate("MainWindow", "Enter color of first band"))
self.pushButton2.setToolTip(_translate("MainWindow", "Clear"))
self.pushButton2.setStatusTip(_translate("MainWindow", "Clear Boxes"))
self.pushButton2.setText(_translate("MainWindow", "Clear"))
self.menuFIle.setTitle(_translate("MainWindow", "FIle"))
self.actionClear.setText(_translate("MainWindow", "Clear"))
self.actionExit.setText(_translate("MainWindow", "Exit"))
self.actionExit_2.setText(_translate("MainWindow", "Exit"))
self.actionExit_2.setStatusTip(_translate("MainWindow", "Exit the application"))
self.actionExit_2.setShortcut(_translate("MainWindow", "Ctrl+X"))
import resources_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"aj95india@gmail.com"
] | aj95india@gmail.com |
8a2c478a7c55bd6e17bdb6130aaa087cc8b4487b | 46035631e6d76ddea73603fcf139290f5cb4a991 | /aws-python/__main__.py | dfb3e5a12cc77cf4fba6391e7fa0f6e30b3084db | [
"Apache-2.0"
] | permissive | pulumi/templates | c6150fd66d5ba85a312d9ee3102ed456abebda8b | 7c18d24ed7a4e0f5e00801bc133bb19dae630ee3 | refs/heads/master | 2023-08-21T12:46:56.389767 | 2023-08-04T20:36:26 | 2023-08-04T20:36:26 | 124,577,647 | 66 | 52 | Apache-2.0 | 2023-09-13T00:07:57 | 2018-03-09T18:21:12 | Go | UTF-8 | Python | false | false | 219 | py | """An AWS Python Pulumi program"""
import pulumi
from pulumi_aws import s3
# Create an AWS resource (S3 Bucket)
bucket = s3.Bucket('my-bucket')
# Export the name of the bucket
pulumi.export('bucket_name', bucket.id)
| [
"noreply@github.com"
] | pulumi.noreply@github.com |
ea255eb6c23181fc34b84ca6ef8d00b766ef488d | c97a6c60993f56ac364616334e18aa1cb21cba06 | /server/algorithm/Version2/ConsumptionGraph.py | 53a2b4de9aa7f6dc8e2b2a5c9e74869750cc3313 | [] | no_license | elyashivderi17/Distribution-Algorithm | cbad844a729b1df679494b2644032589351fdcce | cf6f17d00b2a918acb0ba3b81c80126165033657 | refs/heads/master | 2022-04-22T01:27:46.054574 | 2020-04-08T07:34:57 | 2020-04-08T07:34:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,470 | py | import itertools
import doctest as doctest
class ConsumptionGraph():
"""
this class represent a graph of consumption of the agents
represent by binary matrix - if graph[i][j] = 1 its mean that agent i
consumption the j object
"""
def __init__(self, graph):
self.__graph = graph
def get_graph(self):
return self.__graph
def num_of_sharing(self) -> int:
"""
this function return the number of
sharing in the ConsumptionGraph
>>> g = ConsumptionGraph([[1, 1, 0.0], [0.0, 1, 1], [0.0, 0.0, 0.0]])
>>> g.num_of_sharing()
1.0
>>> g = ConsumptionGraph([[1, 1, 1], [0.0, 1, 1], [1, 0.0, 0.0]])
>>> g.num_of_sharing()
3.0
>>> g = ConsumptionGraph([[0.0, 0.0, 1], [0.0, 1, 0.0], [1, 0.0, 0.0]])
>>> g.num_of_sharing()
0.0
>>> g = ConsumptionGraph([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
>>> g.num_of_sharing()
0.0
>>> g = ConsumptionGraph([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]])
>>> g.num_of_sharing()
4.0
"""
num_of_edge = 0
for i in range(len(self.__graph)):
num_of_edge += sum(self.__graph[i])
num_of_obj = len(self.__graph[0])
if(num_of_edge - num_of_obj < 0 ):
return 0.0
return num_of_edge - num_of_obj
def generate_all_code(self):
"""
this function generate all the codes for that graph
(the code represent the new graph that can built from this graph and adding new agent)
:return: generator for all the codes
>>> a =[[1,0,1]]
>>> g = ConsumptionGraph(a)
>>> for x in g.generate_all_code():
... print(x)
(0,)
(1,)
(2,)
(3,)
(4,)
"""
agent_prop_counter = self.sum_of_agent_properties()
for element in itertools.product(*(range(x) for x in agent_prop_counter)):
yield element
def sum_of_agent_properties(self):
"""
this function return array that each arr[i] = the number
of properties of agent i in graph multiple by 2 plus 1
:return: the number of properties of each agent in array
>>> a =[[1,0,0],[1,1,1],[1,1,0]]
>>> g = ConsumptionGraph(a)
>>> g.sum_of_agent_properties()
[3, 7, 5]
>>> a =[[1,1,0],[1,1,1]]
>>> g = ConsumptionGraph(a)
>>> g.sum_of_agent_properties()
[5, 7]
>>> a =[[1,0,0],[1,1,1],[1,1,0]]
>>> g = ConsumptionGraph(a)
>>> g.sum_of_agent_properties()
[3, 7, 5]
>>> a =[[1,0,0],[0,0,1],[0,0,0]]
>>> g = ConsumptionGraph(a)
>>> g.sum_of_agent_properties()
[3, 3, 1]
>>> a =[[1,1]]
>>> g = ConsumptionGraph(a)
>>> g.sum_of_agent_properties()
[5]
"""
num_of_agent = len(self.__graph)
agent_prop_counter = [0] * num_of_agent
for i in range(len(self.__graph)):
# agent_prop_counter[i] = f(sum(graph[i]))
for j in range(len(self.__graph[0])):
if (self.__graph[i][j] == 1):
agent_prop_counter[i] += 1
agent_prop_counter = [i * 2 + 1 for i in agent_prop_counter]
return agent_prop_counter
if __name__ == '__main__':
(failures, tests) = doctest.testmod(report=True)
print("{} failures, {} tests".format(failures, tests))
| [
"danielabergel1@gmail.com"
] | danielabergel1@gmail.com |
e81e091bbc08afbe0cc7d3d2af82c37748c96d4f | e8e74ea27a4b2957e49d76f514fec7e30e905318 | /translator/_test.py | 1557553a620727386ff5c6f6e9b8dc1a3167f43e | [
"MIT"
] | permissive | hyeokhyen/gtos | eee545002262f938588c1eb3f53a0029f61e19a5 | 72662bc95004d1fa405f5e7ae91554e7c1f5af75 | refs/heads/master | 2022-11-20T04:58:06.858301 | 2020-07-23T01:46:47 | 2020-07-23T01:46:47 | 274,275,767 | 0 | 0 | MIT | 2020-06-23T01:01:52 | 2020-06-23T01:01:51 | null | UTF-8 | Python | false | false | 2,095 | py | import torch
from torch.autograd import Variable
import numpy as np
from transformer import SelfAttentionMask
if 0:
device = torch.device('cuda', 0)
selfattnmask = SelfAttentionMask(device)
#print (selfattnmask.weights)
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
# return subsequent_mask == 0
return torch.from_numpy(subsequent_mask) == 0
if 0:
print (subsequent_mask(10)[0])
def data_gen(V, batch, nbatches):
"Generate random data for a src-tgt copy task."
for i in range(nbatches):
data = torch.from_numpy(np.random.randint(1, V, size=(batch, 10)))
data[:, 0] = 1
src = Variable(data, requires_grad=False)
tgt = Variable(data, requires_grad=False)
yield Batch(src, tgt, 0)
class Batch:
"Object for holding a batch of data with mask during training."
def __init__(self, src, trg=None, pad=0):
self.src = src
self.src_mask = (src != pad).unsqueeze(-2)
if trg is not None:
self.trg = trg[:, :-1]
self.trg_y = trg[:, 1:]
self.trg_mask = \
self.make_std_mask(self.trg, pad)
self.ntokens = (self.trg_y != pad).data.sum()
@staticmethod
def make_std_mask(tgt, pad):
"Create a mask to hide padding and future words."
tgt_mask = (tgt != pad).unsqueeze(-2)
tgt_mask = tgt_mask & Variable(
subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))
return tgt_mask
V = 11
for batch in data_gen(V, 30, 20):
print (batch.src)
print(batch.src_mask)
print (batch.trg)
print(batch.trg_mask)
print (batch.src)
print (batch.trg)
print (batch.src.size())
print (batch.src_mask.size())
print (batch.trg.size())
print (batch.trg_mask.size())
mask = batch.trg_mask
scores = torch.from_numpy(np.random.randint(1, V, size=(30, 9, 9)))
print (scores)
print (scores.size())
scores = scores.masked_fill(mask == 0, -1e3)
print (scores)
assert False | [
"hyeokalankwon@res380d-128-61-83-199.res.gatech.edu"
] | hyeokalankwon@res380d-128-61-83-199.res.gatech.edu |
91b02645dd8696d38127f29a32e8b4e522a27db1 | ad5a8290fe880d4275ed517887925b955f15d5f9 | /python_crawling/ch2_css_utilization.py | 0f37dd1762e86ef51ac7b52ff668ffcd6cd664ed | [] | no_license | JeongA-Shin/python_crawling | 5284bba5ab9afed72d579d0d74b215ca79f98e0d | 71f625a9018cffc1e0fa1c90c9bfe3ab08616559 | refs/heads/master | 2023-05-30T23:42:35.976380 | 2021-07-01T16:55:52 | 2021-07-01T16:55:52 | 380,309,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | import requests
from bs4 import BeautifulSoup
res=requests.get("https://www.naver.com/")
soup=BeautifulSoup(res.content,'html.parser')
data1=soup.find_all('li','category_item') #크롤링할 때, id로 찾는 게 아닌 이상은 태그도 반드시 표시해줘야 함!
data2=soup.find_all('strong','title elss') #태그를 먼저 표시해주고, 더 구체화시키도록 각종 속성들을 더 표시해주는 거임
for j in data1:
print(j.get_text())
print('\n')
print('\n')
for i in data2:
print(i.get_text())
| [
"jeonga@khu.ac.kr"
] | jeonga@khu.ac.kr |
bcbb4575f70fbf95a79bc2a2908b6208b364e0c0 | 489733ce5fe750958fcf1aa6d281d4406cd37804 | /Blocks.py | f02a8800c32784a239eaf123db4af5aca072f939 | [] | no_license | Yahnit/Bomberman | 76d7423ad62bcda6dd819c16f5bde96b0ea70a61 | 8892d22d63443a0ef3918493c54f88cee7df41fb | refs/heads/master | 2021-01-22T21:18:20.157613 | 2017-10-18T16:56:24 | 2017-10-18T16:56:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | import random
'''
This class contains the descrition of Blocks which
can be destroyed in an explosion
'''
class Blocks:
'''
This class contains only one method to place the
destroyable blocks on the Board
'''
def insertBlocks(self,screen,player):
blocksInserted = 0
max_blocks = 10 + 5*player.getLevel()
while(blocksInserted<max_blocks):
x = random.randint(2,34)
y = random.randint(4,72)
if ((x%4==2 and y%4==0) or (x%4==0 and y%8==4)) and screen[x][y]==' ':
for i in range (x,x+2):
for j in range(y,y+4):
screen[i][j] = '/'
blocksInserted+=1
| [
"yahnit.gogeta@gmail.com"
] | yahnit.gogeta@gmail.com |
47eab23a75584440df0d50dbeef63ac17424ae53 | 543df1b87488b8e0768ea4e1a259436f3ef51b88 | /psutil/initial.py | e75028cf8abc993867de562e03d18b88301df2ff | [] | no_license | williamff11/bachelor-software-engineering | 3c6badb9f0a165f5cc652ad25c68895528749756 | fcfa2e9c6456e419e8e7172578e9123111804481 | refs/heads/master | 2023-01-27T13:03:27.528409 | 2020-12-07T23:47:56 | 2020-12-07T23:47:56 | 301,927,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | import psutil
import psutil
import psutil
dic_interfaces = psutil.net_if_addrs()
print(psutil.cpu_percent())
disco = psutil.disk_usage('.')
print("Total:", disco.total, "B")
print("Em uso:", disco.used, "B")
print("Livre:", disco.free, "B")
print("Total:", round(disco.total/(1024*1024*1024), 2), "GB")
print("Em uso:", round(disco.used/(1024*1024*1024), 2), "GB")
print("Livre:", round(disco.free/(1024*1024*1024), 2), "GB")
print("Percentual de Disco Usado:", disco.percent)
| [
"williamff11@gmail.com"
] | williamff11@gmail.com |
f280ce47f173747f8295fc5ab3ea3ab53120704d | f27e7146dd79842ceecd50e3a8d809b470e725fe | /scripts/scanpy/loom2anndata.py | ea4028e352c9c833facb87e061ba86ea2f1a896d | [] | no_license | Aomisheng/scMouseBcellFlu | badb2526f940182e3c2e6d232a46da36b38c431e | c48b80749b493e870bac2e4267ebf8a61bd87ccb | refs/heads/master | 2023-04-11T16:03:43.798702 | 2021-04-28T11:15:35 | 2021-04-28T11:15:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | import os, sys, argparse
import scanpy as sc
parser = argparse.ArgumentParser(description='Convert loom to scanpy anndata')
parser.add_argument('-i', '--input', type=str, help='Input loom file')
parser.add_argument('-o', '--output', type=str, help='Output h5ad file')
args = parser.parse_args()
#in_loom = "/Users/asbj/projects/sc_projects/single-cell-hackathon-2020/datasets/bone_marrow/scanpy/10x/filt_seurat_object.loom"
#out_adata = "/Users/asbj/projects/sc_projects/single-cell-hackathon-2020/datasets/bone_marrow/scanpy/10x/filt_seurat_object.h5ad"
adata = sc.read_loom(args.input)
adata.write(args.output)
| [
"jonrob@chalmers.se"
] | jonrob@chalmers.se |
c4e918875ec7a2958629ca6a8d541407018065d7 | 01b1a86160eca8c948c80ef506da00ecebe1b933 | /gerapy_auto_extractor/helpers.py | be5a131196cf20284049814772a471f03e95f487 | [
"Apache-2.0"
] | permissive | downdawn/GerapyAutoExtractor | 0b23d10761576a2ebe6b81332dc1ba914fe3e78d | e7085264244aede0207de2641302f79bba42edf5 | refs/heads/master | 2023-06-24T08:54:05.772440 | 2021-07-24T18:29:07 | 2021-07-24T18:29:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | from gerapy_auto_extractor.utils.helper import jsonify, content
| [
"cqc@cuiqingcai.com"
] | cqc@cuiqingcai.com |
f971ef72b2fcde8340c8761891ea8888615faf1d | 5ebdbc630bfdfc582a41d7e353e517604ab336ab | /Exec/SCIENCE/urca/analysis/scripts/lineout-field | 2bfa8e22f7ed785b5a0f5b78169752304b198a7e | [
"BSD-3-Clause"
] | permissive | pgrete/MAESTROeX | 661fd437caa1508dbc910772ba4d6ed8b551176a | 1d7e89365379eea57680f738f271c93d7f28e513 | refs/heads/master | 2020-08-23T17:24:46.488221 | 2019-11-01T21:31:27 | 2019-11-01T21:31:27 | 216,671,997 | 0 | 0 | BSD-3-Clause | 2019-10-21T21:52:22 | 2019-10-21T21:52:22 | null | UTF-8 | Python | false | false | 2,517 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import yt
from yt import derived_field
import numpy as np
from yt_urca_fields import UrcaShellFields, DatasetHelpers
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('infile', type=str, help='Name of input plotfile.')
parser.add_argument('-f', '--field', type=str, default='density',
help='Name of the field to plot. Eg. "tfromp". Default is density.')
parser.add_argument('-axis', '--axis', type=str, default='x',
help='Axis along which to take the lineout. Default is "x". Can be "x", "y", or "z".')
parser.add_argument('-w', '--width', type=float,
help='Width of lineout (cm). Default is the full width of the domain.')
parser.add_argument('-flo', '--flo', type=float,
help='Lower bound of field in lineout plot.')
parser.add_argument('-fhi', '--fhi', type=float,
help='Upper bound of field in lineout plot.')
parser.add_argument('-log', '--logscale', action='store_true', help='If supplied, use a log scale for the field.')
args = parser.parse_args()
if __name__ == "__main__":
ds = yt.load(args.infile)
ushell_fields = UrcaShellFields()
ushell_fields.setup(ds)
field, field_short_name = DatasetHelpers.get_field(ds, args.field)
assert(field)
c = ds.domain_center
axmap = {'x': 0, 'y': 1, 'z': 2}
axis_str = args.axis.lower()
ax = axmap[axis_str]
transverse_indices = [0,1,2]
transverse_indices.pop(ax)
# cut through the transverse axis such that the ray intersects the center of the domain
ray = ds.ortho_ray(ax, (c[transverse_indices[0]], c[transverse_indices[1]]))
# Sort the ray values by axis coordinate so there are no discontinuities
srt = np.argsort(ray[axis_str])
plt.subplot(111)
if args.logscale:
plt.semilogy(np.array(ray[axis_str][srt]), np.array(ray[field][srt]))
else:
plt.plot(np.array(ray[axis_str][srt]), np.array(ray[field][srt]))
if args.width:
center_axis = c[ax].in_units('cm').d
lower = center_axis - 0.5*args.width
upper = center_axis + 0.5*args.width
plt.gca().set_xlim(left=lower, right=upper)
plt.gca().set_ylim(bottom=args.flo, top=args.fhi)
plt.xlabel(axis_str)
plt.ylabel(field_short_name)
plotname = "{}.lineout.{}.{}.png".format(args.infile, axis_str, field_short_name)
print('Saving lineout plot: {}'.format(plotname))
plt.savefig(plotname)
| [
"dewillcox@lbl.gov"
] | dewillcox@lbl.gov | |
1d3e5cb0446df6b0f7f7980c307f3fedb04dd346 | 7e5485028653bf63b92f4ffda453739e423c9e60 | /emp_cli/swagger_client/models/environment_variables.py | e835ef57f34f7f63db153ad6cad5c67014e5369b | [] | no_license | fcribeiro/emp | 41fa687953f65f514797b3e51bad95ef0af14ca5 | ef219ee7093a19f20a6af8ed368e6ec3eb1af190 | refs/heads/master | 2020-03-11T15:04:05.468576 | 2018-09-13T18:55:52 | 2018-09-13T18:55:52 | 130,073,406 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,980 | py | # coding: utf-8
"""
EMP_Server_Controller
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class EnvironmentVariables(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'value': 'str'
}
attribute_map = {
'name': 'name',
'value': 'value'
}
def __init__(self, name=None, value=None): # noqa: E501
"""EnvironmentVariables - a model defined in Swagger""" # noqa: E501
self._name = None
self._value = None
self.discriminator = None
self.name = name
self.value = value
@property
def name(self):
"""Gets the name of this EnvironmentVariables. # noqa: E501
Name of the environment variable # noqa: E501
:return: The name of this EnvironmentVariables. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this EnvironmentVariables.
Name of the environment variable # noqa: E501
:param name: The name of this EnvironmentVariables. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def value(self):
"""Gets the value of this EnvironmentVariables. # noqa: E501
Value of the environment variable # noqa: E501
:return: The value of this EnvironmentVariables. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this EnvironmentVariables.
Value of the environment variable # noqa: E501
:param value: The value of this EnvironmentVariables. # noqa: E501
:type: str
"""
if value is None:
raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EnvironmentVariables):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"fdcr@student.dei.uc.pt"
] | fdcr@student.dei.uc.pt |
b60a0e9cb9758a4a658afd61ec4ec3723abf0a28 | 2dbdbfe0fc8f149e31142d277c5e111583d93f02 | /docker/additional_settings.py | a537dc66cc092674c4d5d8555a8eb0519599b2a0 | [
"MIT"
] | permissive | jb68/proprio | 68a5e7d5db6a5c1949a1a182c4317326f1190834 | fb0acc60ab50c49570f070344940f21d727b8b09 | refs/heads/master | 2021-09-21T02:18:21.158308 | 2017-10-24T17:36:27 | 2017-10-24T17:36:27 | 101,918,995 | 2 | 0 | null | 2017-08-30T19:07:44 | 2017-08-30T19:07:44 | null | UTF-8 | Python | false | false | 991 | py | import os
import random
if 'SECRET_KEY' not in os.environ:
letters = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
r = random.SystemRandom()
generated = ''.join([r.choice(letters) for i in range(50)])
raise ValueError('''missing SECRET_KEY in environment.
You could use this random value :\n\n{}\n\n'''.format(generated))
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = 'DEBUG' in os.environ
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = ['*']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/data/db.sqlite3'
}
}
MEDIA_ROOT = '/data/uploaded_files'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
},
}
| [
"olivieradam666+spam@gmail.com"
] | olivieradam666+spam@gmail.com |
d8157091355ba6e2dfcbf66f7ef4aa5d4f5350f0 | be918598badb564aa134990276a06c3524317e59 | /chaco/tests/test_data_label.py | 93773fec3793db7fd06cbb9ffa68589d6a9dc48a | [
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | janvonrickenbach/Chaco_wxPhoenix_py3 | 3ac11aaa5a452afa526edaf2c68c166709b94b90 | 21a10cfd81100f28e3fbc273357ac45642519f33 | refs/heads/master | 2020-08-03T20:03:55.983524 | 2019-12-18T10:47:24 | 2019-12-18T10:47:24 | 211,870,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | import unittest
from chaco.api import create_scatter_plot, PlotGraphicsContext, DataLabel
class DataLabelTestCase(unittest.TestCase):
def test_data_label_arrow_not_visible(self):
# Regression test for https://github.com/enthought/chaco/issues/281
# Before the problem was fixed, this test (specifically, using
# arrow_visible=False in the DataLabel constructor) would raise an
# exception because of an undefined reference.
size = (50, 50)
plot = create_scatter_plot(data=[list(range(10)), list(range(10))])
label = DataLabel(
component=plot,
data_point=(4, 4),
marker_color="red",
marker_size=3,
label_position=(20, 50),
label_style='bubble',
label_text="Something interesting",
label_format="at x=%(x).2f, y=%(y).2f",
arrow_visible=False)
plot.overlays.append(label)
plot.outer_bounds = list(size)
gc = PlotGraphicsContext(size)
gc.render_component(plot)
if __name__ == "__main__":
unittest.main()
| [
"braidedlogix@users.noreply.github.com"
] | braidedlogix@users.noreply.github.com |
85eb597cb8a0c74728a1f651baff585fbb4356d5 | 9908637f7d56ed5b8d1de4672297d345eb325330 | /pyramid/ga.py | c9718f966a10863163121c5c9e6b4f3bd2629f6c | [
"MIT"
] | permissive | philip928lin/Py-RAMID | 4c4a56d4946d6be75adaa6f535840885eeb9c165 | 18ae25538d78507170a86d2b9c192cbadc138f71 | refs/heads/master | 2023-06-19T03:50:48.112062 | 2021-07-11T21:13:05 | 2021-07-11T21:13:05 | 298,086,505 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 46,140 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 4 08:31:32 2020
@author: CYLin
"""
import pandas as pd
import numpy as np
from datetime import datetime
from func_timeout import func_timeout, FunctionTimedOut
from joblib import Parallel, delayed, parallel_backend
import matplotlib.pyplot as plt
import logging
from tqdm import tqdm
import pickle
import os
from inspect import signature
from .setting import (ConsoleLogParm, MsglevelDict,
addLocalLogFile, removeLocalLogFile)
class GeneticAlgorithm(object):
""" GeneticAlgorithm with parallel in computing."""
def __init__(self, function=lambda x: 0,
dimension=None,
variable_type='bool',
variable_boundaries=None,
variable_type_mixed=None,
wd=None,
saveGADataPerIter=False,
function_timeout=1000,
parallel=0,
threads=None,
algorithm_parameters={'max_num_iteration': None,
'population_size':100,
'mutation_probability':0.1,
'elit_ratio': 0.01,
'crossover_probability': 0.5,
'parents_portion': 0.3,
'crossover_type':'uniform',
'max_iter_without_improv': None},
continue_file=None,
seed=None,
msg_level=None):
"""
Parameters
----------
function : Callable function with input argument <var> or
<var, GA_WD> if parallel = 2. var is a 1-D array.
GA_WD is subfolder path.
dimension : int, The dimension of calibrated parameters.
variable_type : 'bool', 'int', 'real', 'cate'.
The default is 'bool'.
variable_boundaries : A list of boundary for each parameter in
the format of [upper bound, lower bound].
variable_type_mixed : None, True. If True, corresponding
variable_type and variable_boundaries needs to be given.
wd : Needs to be given if saveGADataPerIter is Ture or
parallel = 2.
saveGADataPerIter : True, False. If True, auto-save per
iteration will be opened. The saved GAobject.pickle could be
used later to continue the previous interupted run. We
highly recommend to provide wd and turn on this option. The
default is False.
function_timeout : Maximum seconds for the simulation for each
member. The default is 1000.
parallel : 0, 1, 2. 0: no parallel. 1: parallel without creating
sub-working folders. 2: parallel with creating
sub-working folders. The default is 0.
threads : Number of threads to be used in parallel. -1: Max,
-2: Max-1. The default is None.
algorithm_parameters : dict. The default is
{'max_num_iteration': None,
'population_size':100,
'mutation_probability':0.1,
'elit_ratio': 0.01,
'crossover_probability': 0.5,
'parents_portion': 0.3,
'crossover_type':'uniform',
'max_iter_without_improv': None}.
continue_file : Assign the path of GAobject.pickle to continue
the simulation. The default is None.
seed : Random seed for random number generator.
msg_level : 'debug', 'info', 'warning', 'error'.
Level of print out message. The default is info
(ConsoleLogParm['MsgLevel']).
"""
self.__name__ = "GA"
################################################################
# Setup the log msg (console) (log file is added below.)
self.logger = logging.getLogger(__name__)
if msg_level is None: msg_level = ConsoleLogParm['MsgLevel']
else:
assert msg_level in ['debug', 'info', 'warning', 'error'],\
print("ValueError msg_level must be one of these "+\
"[None, 'debug', 'info', 'warning', 'error'].")
msg_level = MsglevelDict[msg_level]
self.logger.setLevel(msg_level)
self.CreateFileHandler = False
################################################################
# Setup input parameter
if continue_file is not None:
# Load the GAobject.pickle to continue previous run.
assert os.path.exists(continue_file),\
self.logger.error("PathError given continue_file is not "+\
"exist {}.".format(continue_file))
self.continue_file = continue_file
self.load_continue_file()
# Load file will overwrite self.continue_file, so we need to
# assign it again.
self.continue_file = continue_file
# Re-assign fh with mode = "a", appending to the previous
# GA.log.
self.logger, self.fh = addLocalLogFile('GA.log', self.logger,\
self.wd, mode = "a")
self.logger.info("\n========== Continue ==========\n")
# Set random seed
if self.seed is not None:
np.random.seed(self.seed)
else:
# Check all input settings are valid.
############################################################
# Check wd and add GA.log if wd is given.
self.saveGADataPerIter = saveGADataPerIter
if wd is None:
self.wd = None
if saveGADataPerIter:
self.logger.error("ValueError To enable "+\
"saveGADataPerIter and log file, "+\
"valid wd must be given.")
self.saveGADataPerIter = False
else:
assert os.path.isdir(wd),\
self.logger.error("PathError given wd is not exist {}."\
.format(wd))
self.wd = wd
# Add local log file
self.logger, self.fh = addLocalLogFile('GA.log', self.logger,\
self.wd)
self.CreateFileHandler = True
############################################################
# Create output related attributions
self.pop = None
self.best_var = None
self.best_minobj = None
self.report = []
self.iter = 0
self.pop_record = {}
############################################################
# Check inputs
# Dimension
assert isinstance(dimension, (float, int)),\
self.logger.error("TypeError dimension must be integer.")
# Variable types
assert(variable_type=='bool' or variable_type=='int' or \
variable_type=='real' or variable_type=='cate'), \
self.logger.error("TypeError variable_type must be "+\
"'bool', 'int', 'real', or 'cate'.")
# parallel options
assert parallel in [0,1,2],\
self.logger.error("TypeError parallel must be "+\
"0: no parallel, 1: parallel, "+\
"2: parallel with new sub-working folders.")
# function
assert (callable(function)),\
self.logger.error("TypeError function must be callable.")
# function arguments
if parallel == 2:
assert [i for i in signature(function).parameters] == \
['var', 'GA_WD'], self.logger.error("ValueError To "+\
"run GA for parallel = 2 (coupling), given sim function "+\
"has to contain two input arguments: 'var' (1d array) "+\
"and GA_WD, which user should use "+\
"RiverwareWrap.createFiles(GA_WD) to create subfolder "+\
"in their sim function and conduct the simulation under "+\
"this new directory.")
self.SubfolderPath = os.path.join(self.wd, "AutoCalibration")
if os.path.isdir(self.SubfolderPath) is not True:
os.mkdir(self.SubfolderPath)
self.logger.info("Create subfolder AutoCalibration at {}"\
.format(self.SubfolderPath))
# Check and assign threads input
if parallel != 0:
# Max threads number/2
MaxThreads = int(os.cpu_count()/2)
if threads is None or threads > MaxThreads:
self.NumThreads = MaxThreads
elif threads < 0: # -1: = MaxThreads
self.NumThreads = MaxThreads + 1 + threads
else:
self.NumThreads = threads
self.NumThreads = int(self.NumThreads)
else:
self.NumThreads = 1
# Check random seed
assert isinstance(seed, (type(None), int)),\
self.logger.error("TypeError seed must be integer or None.")
############################################################
# Assign input
self.dim = int(dimension)
self.func = function
self.parallel = parallel
self.var_index = {}
if function_timeout is None:
function_timeout = 86400 # If None, we set timeout=1day
self.funtimeout = int(function_timeout)
self.continue_file = continue_file
self.seed = seed
############################################################
# Set random seed
if self.seed is not None:
np.random.seed(self.seed)
# Assign var_type and var_bound and var_index
if variable_type_mixed is None:
# We assign identical type according to variable_type to
# each variable.
if variable_type == 'real':
self.var_type = np.array([['real']]*self.dim)
self.var_index["cate"] = np.array([])
self.var_index["int"] = np.array([])
self.var_index["real"] = \
np.where(self.var_type == 'real')[0]
else: # 'int', 'bool', 'cate'
self.var_type = np.array([['int']]*self.dim)
if variable_type == 'cate':
self.var_index["cate"] = \
np.where(self.var_type == 'int')[0]
self.var_index["int"] = np.array([])
self.var_index["real"] = np.array([])
else:
self.var_index["cate"] = np.array([])
self.var_index["int"] = \
np.where(self.var_type == 'int')[0]
self.var_index["real"] = np.array([])
# Assign var_bound if it is not given
if variable_boundaries is None:
self.var_bound = np.array([[0,1]]*self.dim)
else:
assert isinstance(variable_boundaries, (list,np.ndarray)),\
self.logger.error("TypeError variable_boundaries "+\
"must be numpy array or list.")
variable_boundaries = np.array(variable_boundaries)
assert (variable_boundaries.shape == (self.dim,2)),\
self.logger.error("ValueError variable_type_mixed "+\
"must have a shape (dimension, 2).")
self.var_bound = variable_boundaries
else:
# var types should be defined in variable_type_mixed
assert isinstance(variable_type_mixed, (list, np.ndarray)),\
self.logger.error("TypeError variable_type_mixed must "+\
"be numpy array or list.")
assert isinstance(variable_boundaries, (list, np.ndarray)),\
self.logger.error("TypeError variable_boundaries must "+\
"be numpy array or list.")
variable_type_mixed = np.array(variable_type_mixed)
variable_boundaries = np.array(variable_boundaries)
assert (len(variable_type_mixed) == self.dim),\
self.logger.error("ValueError variable_type_mixed must "+\
"have a length equal dimension.")
assert (variable_boundaries.shape == (self.dim,2)),\
self.logger.error("ValueError variable_type_mixed must "+\
"have a shape (dimension, 2).")
self.var_type = variable_type_mixed
self.var_bound = variable_boundaries
self.var_index["cate"] = np.where(self.var_type == 'cate')[0]
self.var_index["int"] = np.where(self.var_type == 'int')[0]
self.var_index["real"] = np.where(self.var_type == 'real')[0]
# Replace cate as int for rest of the calculation
self.var_type = \
np.where(self.var_type=='cate', 'int', self.var_type)
############################################################
# Check algorithm_parameters
assert set(['max_num_iteration', 'population_size',\
'mutation_probability', 'elit_ratio', \
'crossover_probability', 'parents_portion', \
'crossover_type', 'max_iter_without_improv'])\
.issubset(set(algorithm_parameters.keys())),\
self.logger.error("KeyError Missing keys in the "+\
"algorithm_parameters.")
self.par = algorithm_parameters
self.par['population_size'] = int(self.par['population_size'])
assert (self.par['parents_portion'] <= 1 and \
self.par['parents_portion'] >= 0), \
self.logger.error("ValueError parents_portion must be in "+\
"range [0,1].")
assert (self.par['mutation_probability'] <= 1 and \
self.par['mutation_probability'] >= 0), \
self.logger.error("ValueError mutation_probability must be "+\
"in range [0,1].")
assert (self.par['crossover_probability'] <= 1 and \
self.par['crossover_probability'] >= 0), \
self.logger.error("ValueError crossover_probability must be "+\
"in range [0,1].")
assert (self.par['elit_ratio'] <= 1 and \
self.par['elit_ratio'] >= 0), \
self.logger.error("ValueError elit_ratio must be in "+\
"range [0,1].")
assert (self.par['mutation_probability'] <= 1 and \
self.par['mutation_probability'] >= 0), \
self.logger.error("ValueError mutation_probability must be "+\
"in range [0,1].")
assert (self.par['crossover_type'] == 'uniform' or \
self.par['crossover_type'] == 'one_point' or \
self.par['crossover_type'] == 'two_point'), \
self.logger.error("ValueError crossover_type must be "+\
"'uniform', 'one_point', or 'two_point'")
# Make sure that population_size is properly assigned
self.par['parent_size'] = int(self.par['parents_portion']\
*self.par['population_size'] )
trl = self.par['population_size'] - self.par['parent_size']
if trl % 2 != 0:
self.par['parent_size'] += 1 # To guarentee even number
# Make sure that num_elit is properly assigned
trl = self.par['population_size']*self.par['elit_ratio']
# At least 1 elite
if trl < 1 and self.par['elit_ratio'] > 0:
self.par['num_elit'] = 1
else:
self.par['num_elit'] = int(trl) # Round down
# Make sure that max_num_iteration is properly assigned
if self.par['max_num_iteration'] is None:
self.par['max_num_iteration'] = 0
for i in range (0, self.dim):
if self.var_type[i] == 'int':
self.par['max_num_iteration'] += \
(self.var_bound[i][1] - self.var_bound[i][0]) \
*self.dim*(100/self.par['population_size'])
else:
self.par['max_num_iteration'] += \
(self.var_bound[i][1]-self.var_bound[i][0]) \
*50*(100/self.par['population_size'])
self.par['max_num_iteration'] = \
int(self.par['max_num_iteration'])
if (self.par['max_num_iteration'] \
*self.par['population_size']) > 10000000:
self.par['max_num_iteration'] = \
10000000/self.par['population_size']
else:
self.par['max_num_iteration'] = \
int(self.par['max_num_iteration'])
# Make sure that max_num_iteration is properly assigned
if self.par['max_iter_without_improv'] == None:
self.par['max_iter_without_improv'] = \
self.par['max_num_iteration'] + 1
else:
self.par['max_iter_without_improv'] = \
int(self.par['max_iter_without_improv'])
# Print out the summary of GA object settings.
self.logger.info("The GA object have been initiated: \n"+"\n" \
.join(['{:^23} : {}'.format(keys, values) for \
keys,values in self.par.items()]))
return None
def load_continue_file(self):
"""Load GAobject.pickle """
filepath = self.continue_file
with open(filepath, "rb") as f:
dictionary = pickle.load(f)
#print(dictionary)
# Load back all the previous class attributions.
for key in dictionary:
setattr(self, key, dictionary[key])
self.logger.info("The previous GA object have been loaded back "+\
"and ready to run.")
def save_attribution(self, path):
"""Save GAobject.pickle
Args:
path (str): Save folder directory.
"""
dictionary = self.__dict__.copy()
dictionary.pop('fh', None) # handler cannot be pickled.
dictionary.pop('logger', None) # handler cannot be pickled.
with open(os.path.join(path, "GAobject.pickle"), 'wb') as outfile:
pickle.dump(dictionary, outfile)
def Print(self):
"""Turn the attributions of GA object into dictionary."""
print(self.__dict__)
return self.__dict__
#"""Randomly generate the initial population."""
def initializePop(self, InitialPop=None):
"""Randomly generate the initial population.
Args:
InitialPop (array, optional): Assigned initial population.
InitialPop has to be a 2d array (NumPop, NumPar). NumPop has
to be smaller than the population_size. Defaults to None.
Returns:
None
"""
index_real = self.var_index["real"].astype(int)
index_int = np.concatenate((self.var_index["int"], \
self.var_index["cate"])).astype(int)
pop_size = self.par['population_size']
dim = self.dim
var_bound = self.var_bound
## Create empty arrays
self.pop = np.array([np.zeros(dim + 1)]*pop_size) # +1 for storing obj
self.var = np.zeros(dim)
## Randomly generate the initial variables set for members in the pop.
for p in range(0, pop_size):
for i in index_int:
self.var[i] = np.random.randint(var_bound[i][0], \
var_bound[i][1]+1)
for i in index_real:
self.var[i] = var_bound[i][0] + np.random.random()* \
(var_bound[i][1] - var_bound[i][0])
self.pop[p,:dim] = self.var
self.pop[p, dim] = np.nan # no obj yet
## Replace with given initial value.
if InitialPop is not None:
self.pop[:InitialPop.shape[0],:dim] = InitialPop
return None
def simPop(self, initialRun=False):
"""Simulate the whole population."""
pop = self.pop.copy()
if initialRun:
parent_size = 0
else:
parent_size = self.par['parent_size']
pop_size = self.par['population_size']
dim = self.dim
maxIter = self.par['max_num_iteration']
currentIter = self.iter
saveGADataPerIter = self.saveGADataPerIter
funtimeout = self.funtimeout
function = self.func
def sim0(X):
"""For loop
Args:
X (array): 1d array of parameters.
Returns:
float: Objective value.
"""
def evaluation(): # In order to use func_timeout
return function(X)
obj = None
try:
obj = func_timeout(funtimeout, evaluation)
except FunctionTimedOut:
print("given function is not applicable")
assert (obj!=None), \
self.logger.error("FunctionTimedOut After {} seconds delay, "\
.format(str(funtimeout)) + \
"the given function does not provide any "+\
"output.")
return obj
def sim1(X):
"""Parallel without creating subfolder
Args:
X (array): 1d array of parameters.
Returns:
float: Objective value.
"""
obj = None
try:
obj = function(X)
except:
# Will not be printed out. (Run in backend)
print("FunctionError given function is not applicable.")
return obj
# For riverware coupling model.
def sim2(X, wd, iteration, member):
"""Parallel with assigned copied subfolder path
Args:
X (array): 1d array of parameters.
wd (str): Concurent simulation working folder.
iteration (int): Iteration (generation).
member (int): Member
Returns:
float: Objective value.
"""
SubFolderName = os.path.join(wd,"Iter{}_{}"\
.format(iteration, member))
obj = None
try:
obj = function(X, SubFolderName)
except FunctionTimedOut:
# Will not be printed out. (Run in backend)
print("FunctionError given function is not applicable.")
return obj
################################################################
# Parallel 0: Simple for loop. No parallelization
if self.parallel == 0:
for k in tqdm(range(parent_size, pop_size, 1),\
desc = "Iter {}/{}".format(currentIter, maxIter)):
obj = sim0(pop[k, :dim])
pop[k, dim] = obj
# Parallel 1: User defined function is run in parallel.
# Only use this when no working folder is needed.
elif self.parallel == 1:
self.logger.info("Iter {}/{} Start parallel simulation with {} "\
.format(currentIter, maxIter, self.NumThreads)+\
"threads.")
ParallelResults = Parallel(n_jobs = self.NumThreads, \
prefer="threads", \
timeout=funtimeout)\
(delayed(sim1)(X=pop[k, :dim]) \
for k in range(parent_size, pop_size, 1))
# Collect results
for k in range(parent_size, pop_size, 1):
pop[k, dim] = ParallelResults[k - parent_size]
# Parallel 2: User defined function is run in parallel with
# assigned sub-working folder name. User can copy the necessary
# files into this folder and run the simulation in the isolated
# environment.
elif self.parallel == 2:
SubfolderPath = self.SubfolderPath
self.logger.info("Iter {}/{} Start parallel simulation with {} "\
.format(currentIter, maxIter, self.NumThreads)+\
"threads.")
ParallelResults = Parallel(
n_jobs = self.NumThreads, prefer="threads",
timeout=funtimeout)\
(delayed(sim2)(X=pop[k, :dim], wd=SubfolderPath,
iteration=currentIter, member=k) \
for k in range(parent_size, pop_size, 1))
# Collect results
for k in range(parent_size, pop_size, 1):
pop[k, dim] = ParallelResults[k - parent_size]
# Sorted by obj (last index) to an order of low obj (good) to
# high obj (bad).
pop = pop[pop[:, dim].argsort()]
self.pop = pop
# Save current iteration in case program crush.
# If crush down reload the saved pickle file and continue the
# run.
if saveGADataPerIter:
self.save_attribution(self.wd)
self.logger.info("Iter {}/{} done.".format(currentIter, maxIter))
return None
def runGA(self, plot = True, InitialPop = None, start_from_iter = None):
"""Run the genetic algorithm.
Args:
plot (bool, optional): Plot the progressive plot and
save at GA folder. Defaults to True.
InitialPop (array, optional): Assigned initial population.
InitialPop has to be a 2d array (NumPop, NumPar). NumPop has
to be smaller than the population_size. Defaults to None.
start_from_iter (int, optional): Assign starting iteration
(only for continuous run). Defaults to None.
Returns:
None
"""
# Start timing
self.start_time = datetime.now()
# Initial Population (if it is to continue from last run with
# given pickle file, this step will be skipped.)
if self.continue_file is None:
self.mniwi_counter = 0 # max_iter_without_improv
# Randomly generate self.pop
self.initializePop(InitialPop = InitialPop)
self.pop_record["Iter0"] = self.pop
# Calculate obj for members in self.pop
self.simPop(initialRun=True)
self.pop_record["Iter0"] = self.pop
################################################################
# Start from recorded specific iteration
# So the GA will use this iteration as intial "result" to form
# the next generation. Simulation happens at iteration + 1.
if start_from_iter is not None and start_from_iter <= self.iter and \
start_from_iter != 0:
# Clean report and assign pop and iter
dim = self.dim
self.report = self.report[:start_from_iter]
self.pop = self.pop_record["Iter{}".format(start_from_iter)]
self.iter = start_from_iter
# Store the best var and obj
dim = self.dim
self.best_minobj = self.pop[0, dim].copy()
self.best_var = self.pop[0, :dim].copy()
self.report.append(self.best_minobj) # record the history obj
# Start the while loop for evolution
pop_size = self.par['population_size']
parent_size = self.par['parent_size']
num_elit = self.par['num_elit']
maxIter = self.par['max_num_iteration']
mniwi = self.par['max_iter_without_improv']
prob_cross = self.par['crossover_probability']
cross_type = self.par['crossover_type']
self.iter += 1 # Iteration (generation of the population)
################################################################
while self.iter <= maxIter and self.mniwi_counter <= mniwi:
pop = self.pop.copy()
# Normalizing objective function for calculating prob
normobj = np.zeros(pop_size)
minobj = pop[0, dim]
if minobj < 0: # to nonnegative values
normobj = pop[:, dim] + abs(minobj)
else:
normobj = pop[:, dim]
maxnorm = np.amax(normobj)
# The lowest obj has highest fitness. +1 to avoid 0.
normobj = maxnorm-normobj + 1
# Calculate probability
sum_normobj = np.sum(normobj)
prob = np.zeros(pop_size)
prob = normobj/sum_normobj
cumprob = np.cumsum(prob)
# Select parents
## Create empty parents
parents = np.array([np.zeros(dim + 1)]*parent_size)
## First fill with elites
for k in range(0, num_elit):
parents[k] = pop[k].copy()
## Then fill the rest by wheel withdrawing.
for k in range(num_elit, parent_size):
index = np.searchsorted(cumprob,np.random.random())
parents[k] = pop[index].copy()
## From the selected parents, we further randomly choose
## those who actually reproduce offsprings
ef_par_list = np.array([False]*parent_size)
par_count = 0
# At least 1 parents generate be selected
while par_count == 0:
for k in range(0, parent_size):
if np.random.random() <= prob_cross:
ef_par_list[k] = True
par_count += 1
## Effective parents
ef_parents = parents[ef_par_list].copy()
# New generation
## Create empty new gen pop
pop = np.array([np.zeros(dim + 1)]*pop_size)
## First, fill with those selected parents without any
## modification
for k in range(0, parent_size):
pop[k] = parents[k].copy()
## Then, fill the rest with crossover and mutation process
for k in range(parent_size, pop_size, 2):
r1 = np.random.randint(0, par_count)
r2 = np.random.randint(0, par_count)
parent_var1 = ef_parents[r1, :dim].copy()
parent_var2 = ef_parents[r2, :dim].copy()
# Crossover
children = self.cross(parent_var1, parent_var2, cross_type)
child1 = children[0].copy()
child2 = children[1].copy()
# Mutation
child1 = self.mut(child1) # re-generate vars
## re-generate within parents range except cate type var
child2 = self.mutmiddle(child2, parent_var1, parent_var2)
## Only copy the variables. We haven't calculate obj
pop[k, :dim] = child1.copy() # Assign var
pop[k, dim] = np.nan # No obj yet
pop[k+1, :dim] = child2.copy()
pop[k+1, dim] = np.nan
self.pop = pop # Assign new population ready for simulation.
self.pop_record["Iter{}".format(self.iter)] = self.pop
# Calculate objs for pop
# Here is the safe point if wd is assigned and
# saveGADataPerIter = True
self.simPop() # Will update self.pop
self.pop_record["Iter{}".format(self.iter)] = self.pop
if self.pop[0, dim] >= self.best_minobj:
self.mniwi_counter += 1
self.report.append(self.best_minobj)
if self.mniwi_counter > mniwi:
self.logger.warning("Reach the max_iter_without_improv. "+\
"GA stop.")
else:
self.best_minobj = self.pop[0, dim].copy()
self.best_var = self.pop[0, :dim].copy()
# record the history obj
self.report.append(self.best_minobj)
self.end_time = datetime.now()
self.duration = self.end_time - self.start_time
# Log current result
current_result = {'Variable': self.best_var,
'Objective': self.best_minobj,
'Improve rate': (self.report[-1] - \
self.report[-2])/self.report[-2],
'Duration': self.duration}
self.logger.info("\n===========> Results (Iter {}) <===========\n"\
.format(self.iter) + \
"\n".join(['{:^15} : {}'.format(keys, values) \
for keys,values in current_result.items()]) )
self.logger.info("Obj records: {}\n".format(self.report))
if plot and self.parallel == 2:
self.plotReport()
# Next iteration
self.iter += 1 # Iteration (generation of the population)
# End while
################################################################
# Final report
self.end_time = datetime.now()
self.duration = self.end_time - self.start_time
self.output_dict = {'Variable': self.best_var,
'Objective': self.best_minobj,
'Duration': self.duration,
'Iteration': self.iter}
self.logger.info("\n=============> Results <=============\n" + \
"\n".join(['{:^15} : {}'.format(keys, values)\
for keys,values in self.output_dict.items()]))
self.output_dict["ObjRecords"] = self.report
# Remove the created file handler.
if self.CreateFileHandler:
self.logger = removeLocalLogFile(self.logger, self.fh)
if plot:
self.plotReport()
return None
def plotReport(self):
"""Plot progressive report."""
re = np.array(self.report)
fig, ax = plt.subplots()
ax.plot(re)
ax.set_xlabel('Iteration')
ax.set_ylabel('Objective function (minization)')
ax.set_title('Genetic Algorithm')
props = dict(boxstyle='round', facecolor='wheat', alpha=0.85)
string = "Min objective: {}\nDuration: {}\nIteration: {}" \
.format(round(self.best_minobj, 3), self.duration,
self.iter)
ax.annotate(string, xy= (0.6, 0.95), xycoords='axes fraction',
verticalalignment='top', horizontalalignment='left',
transform=ax.transAxes, fontsize=9, bbox = props)
plt.savefig(os.path.join(self.wd, "GA_report.png"), dpi = 500)
def cross(self, x, y, cross_type):
"""Uniform crossover."""
ofs1 = x.copy()
ofs2 = y.copy()
dim = self.dim
if cross_type == 'one_point':
rnd = np.random.randint(0, dim)
for i in range(0,rnd):
ofs1[i] = y[i].copy()
ofs2[i] = x[i].copy()
if cross_type == 'two_point':
ran1 = np.random.randint(0, dim)
ran2 = np.random.randint(ran1, dim)
for i in range(ran1,ran2):
ofs1[i]=y[i].copy()
ofs2[i]=x[i].copy()
if cross_type == 'uniform':
for i in range(0, dim):
rnd = np.random.random()
if rnd <0.5:
ofs1[i]=y[i].copy()
ofs2[i]=x[i].copy()
return np.array([ofs1,ofs2])
def mut(self, x):
"""Mutation."""
prob_mut = self.par['mutation_probability']
index_real = self.var_index["real"].astype(int)
index_int = np.concatenate((self.var_index["int"], \
self.var_index["cate"])).astype(int)
for i in index_int:
rnd = np.random.random()
if rnd < prob_mut:
x[i]=np.random.randint(self.var_bound[i][0],\
self.var_bound[i][1]+1)
for i in index_real:
rnd = np.random.random()
if rnd < prob_mut:
x[i]=self.var_bound[i][0]+np.random.random()* \
(self.var_bound[i][1]-self.var_bound[i][0])
return x
def mutmiddle(self, x, p1, p2):
"""Mutation bounded by parants."""
prob_mut = self.par['mutation_probability']
index_real = self.var_index["real"].astype(int)
index_int = self.var_index["int"].astype(int)
index_cate = self.var_index["cate"].astype(int)
for i in index_int:
rnd = np.random.random()
if rnd < prob_mut:
if p1[i] < p2[i]:
x[i] = np.random.randint(p1[i],p2[i])
elif p1[i] > p2[i]:
x[i] = np.random.randint(p2[i],p1[i])
else:
x[i] = np.random.randint(self.var_bound[i][0],\
self.var_bound[i][1]+1)
for i in index_cate: # mutmiddle() is not appliable.
rnd = np.random.random()
if rnd < prob_mut:
x[i] = np.random.randint(self.var_bound[i][0],\
self.var_bound[i][1]+1)
for i in index_real:
rnd = np.random.random()
if rnd < prob_mut:
if p1[i] < p2[i]:
x[i] = p1[i]+np.random.random()*(p2[i] - p1[i])
elif p1[i] > p2[i]:
x[i] = p2[i] + np.random.random()*(p1[i] - p2[i])
else:
x[i] = self.var_bound[i][0]+np.random.random()*\
(self.var_bound[i][1] - self.var_bound[i][0])
return x
########################################################################
########################################################################
class GADataConverter(object):
def __init__(self, msg_level=None):
"""The data converter for GA.
Args:
msg_level (str, optional): Message level. Defaults to None.
Returns:
object
"""
# Set loggar
self.logger = logging.getLogger(__name__)
if msg_level is None: msg_level = logging.INFO
self.logger.setLevel(msg_level)
# To check the original data has been converted to var before convert
# var back.
self.orgpar_convert = False
return None
"""Convert a list of 1d or 2d array or df to 1d array.
order: "C", "F", "A". The Default is "C".
#"C" means to flatten in row-major (C-style) order.
#"F" means to flatten in column-major (Fortran- style) order.
#"A" means to flatten in column-major order if a is Fortran
contiguous in memory, row-major order otherwise.
"""
def Covert2GAArray(self, dataList, order="C"):
"""Convert a list of 1d or 2d arrays or dataframes to 1d array.
Args:
dataList (list): List of 1d or 2d arrays or dataframes.
order (str, optional): "C" means to flatten in row-major
(C-style) order.
"F" means to flatten in column-major (Fortran- style) order.
"A" means to flatten in column-major order if a is Fortran .
Defaults to "C".
Returns:
array: 1d array.
"""
assert isinstance(dataList, list),\
self.logger.error("dataList needs to be a list.")
for item in dataList:
assert isinstance(item, (np.ndarray, pd.DataFrame)), \
self.logger.error("Elements in the dataList have to be "+\
"either array or dataframe.")
self.orgpar_shape = []
self.orgpar_type = {}
self.orgpar_order = order
self.orgpar_convert = True
self.orgpar_index = [0]
var = []
for i, data in enumerate(dataList):
if len(data.shape) == 2:
if isinstance(data, pd.DataFrame):
self.orgpar_shape.append(data.shape)
self.orgpar_type[i] = {}
self.orgpar_type[i]["col"] = list(data.columns)
self.orgpar_type[i]["ind"] = list(data.index)
var = var + list( data.to_numpy().flatten(order) )
self.orgpar_index.append(self.orgpar_index[-1] + \
self.orgpar_shape[-1][0]* \
self.orgpar_shape[-1][1])
elif isinstance(data, np.ndarray):
self.orgpar_shape.append(data.shape)
self.orgpar_type[i] = np.ndarray
var = var + list( data.flatten(order) )
self.orgpar_index.append(self.orgpar_index[-1] + \
self.orgpar_shape[-1][0]* \
self.orgpar_shape[-1][1])
else:
print("error")
elif len(data.shape) == 1:
self.orgpar_shape.append(data.shape)
self.orgpar_type[i] = "1d"
var = var + list( data.flatten(order) )
self.orgpar_index.append(self.orgpar_index[-1] + len(data))
return var
def GAArray2OrgPar(self, var, setting=None):
"""Convert 1d array back to original dfs and arrays.
Args:
var (array): 1d array.
setting (dict, optional): Setting of GADataConverter.
Defaults to None.
Returns:
[list]: List of original data formats.
"""
if setting is None:
assert self.orgpar_convert, \
self.logger.error("ValueError The function Covert2GAArray() "+\
"has to be exercuted first or provide "+\
"setting dictionary.")
orgpar_type = self.orgpar_type
order = self.orgpar_order
orgpar_index = self.orgpar_index
orgpar_shape = self.orgpar_shape
else:
assert set(["orgpar_type","orgpar_order","orgpar_index", \
"orgpar_shape"]).issubset(setting.keys()), \
self.logger.error("KeyError Setting dictionary has to "+\
"contain keys: {}".format(["orgpar_type",\
"orgpar_order",\
"orgpar_index",\
"orgpar_shape"]))
orgpar_type = setting["orgpar_type"]
order = setting["orgpar_order"]
orgpar_index = setting["orgpar_index"]
orgpar_shape = setting["orgpar_shape"]
self.orgParList = []
for i, v in orgpar_type.items():
if isinstance(v, dict):
df = np.reshape(var[orgpar_index[i]:orgpar_index[i+1]], \
orgpar_shape[i], order)
df = pd.DataFrame(df)
df.columns = v['col']
df.index = v['ind']
self.orgParList.append(df)
elif v == np.ndarray:
self.orgParList.append(np.reshape(\
var[orgpar_index[i]:orgpar_index[i+1]],\
orgpar_shape[i], order))
elif v == "1d":
self.orgParList.append(\
list(var[orgpar_index[i]:orgpar_index[i+1]]))
return self.orgParList
def outputSetting(self):
"""Output setting dictionary of GADataConverter"""
Setting = {"orgpar_type": self.orgpar_type,
"orgpar_order": self.orgpar_order,
"orgpar_index": self.orgpar_index,
"orgpar_shape": self.orgpar_shape}
return Setting
| [
"philip928lin@gmail.com"
] | philip928lin@gmail.com |
aa1ea3887e798c65c229709486b03915534388dc | 074d01815f99f7e9634296e92479482bea58c26c | /backend/login/migrations/0016_auto_20191127_0137.py | 4e65ea562ea5dd0c576ff6d359ae6052eed8b798 | [] | no_license | 3296f19temple/Facial-Recognition-Attendance | d7e6f4dcc17698fe0fd718ef0eef59067de10851 | e80e7be35d4f292acb7076115e16a18f34aec75f | refs/heads/master | 2020-08-05T06:34:22.022238 | 2019-12-13T00:30:05 | 2019-12-13T00:30:05 | 212,431,723 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | # Generated by Django 2.2.7 on 2019-11-27 01:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0015_auto_20191127_0131'),
]
operations = [
migrations.AlterField(
model_name='students',
name='studentPicture',
field=models.FileField(default=1, upload_to='student_pic'),
),
]
| [
"tyyen5@gmail.com"
] | tyyen5@gmail.com |
110281da96470f0caf9566c89d1a43d65f197812 | 637c4892287929583bdadd8630d5353dd78dc82c | /vendor/otf2/test/python/test_otf2_program.py | c76400699bfb4e7268d7f0f631306badf3231fbe | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | readex-eu/readex-scorep | 6819f074bba6b4fe9d6de07cf7037d2829cbbccf | 38d7b84145f229b44670656d59f84faa51007ced | refs/heads/master | 2020-03-27T01:04:12.730561 | 2018-08-22T14:42:37 | 2018-08-22T14:42:37 | 145,679,275 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,361 | py | import os
import os.path
import unittest
from shutil import rmtree
from tempfile import mkdtemp
import time
import otf2
TIMER_GRANULARITY = 1000000
def t():
return int(round(time.time() * TIMER_GRANULARITY))
class TestOTF2Program(unittest.TestCase):
def setUp(self):
self.old_cwd = os.getcwd()
self.tmp_dirname = mkdtemp(prefix=os.path.basename(os.path.abspath(__file__))[:-3] + '_tmp', dir=self.old_cwd)
self.orig_trace = os.path.join(self.tmp_dirname, "orig")
self.retrace = os.path.join(self.tmp_dirname, "retrc")
os.mkdir(self.orig_trace)
os.mkdir(self.retrace)
os.chdir(self.tmp_dirname)
def tearDown(self):
os.chdir(self.old_cwd)
if 'V' in os.environ and os.environ['V'] != '':
print(self.tmp_dirname)
else:
rmtree(self.tmp_dirname)
def generate_trace(self, archive_name):
with otf2.writer.open(archive_name, timer_resolution=TIMER_GRANULARITY) as trace:
system_tree_node = trace.definitions.system_tree_node("Root", parent=None)
location_group = trace.definitions.location_group("Process",
system_tree_parent=system_tree_node)
parameter = trace.definitions.parameter("Foo")
writer = trace.event_writer("thread", group=location_group)
writer.program_begin(t(), "foo", ["arg0", "arg1"])
writer.program_end(t(), 0)
def read_trace(self, archive_name):
with otf2.reader.open(archive_name) as trace:
# TODO check something
pass
def rewrite_trace(self, old_anchor_path, new_archive_path):
with otf2.reader.open(old_anchor_path) as trace_reader:
with otf2.writer.open(new_archive_path,
definitions=trace_reader.definitions) as write_trace:
for location, event in trace_reader.events:
writer = write_trace.event_writer_from_location(location)
writer(event)
def test_rewrite(self):
self.generate_trace(self.orig_trace)
self.rewrite_trace(os.path.join(self.orig_trace, "traces.otf2"), self.retrace)
self.read_trace(os.path.join(self.retrace, "traces.otf2"))
if __name__ == '__main__':
unittest.main()
| [
"robert.schoene@tu-dresden.de"
] | robert.schoene@tu-dresden.de |
425aeba47f408fb5ecdf65c81cdbd50bb6d27ccf | b067058811b9c5ca7e2ae2f76e66e4dfa5875db3 | /utils.py | a38370858ca935f82bacbe7654b6029cf3151bac | [] | no_license | davidfit-cisco/RegistrationAnalysisTool | bbabad3e9f0e79b41ca1cb1c99f9e60fe7d561b8 | 052954c1d0c4fa2f181397be74839c767d42e7f7 | refs/heads/main | 2023-08-06T15:55:16.056168 | 2021-09-28T10:11:55 | 2021-09-28T10:11:55 | 377,181,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,887 | py | import pprint
import re
from calendar import month_name
from copy import deepcopy
short_month_names = [month[:3] for month in month_name[1:]]
def is_date(cell):
month_pattern = '|^'.join(short_month_names)
return True if re.search(month_pattern, cell, re.IGNORECASE) else False
def is_registered(cell):
return True if re.search(r'REG', cell, re.IGNORECASE) else False
def is_unregistered(cell):
return False if re.search(r'REG|AGE', cell, re.IGNORECASE) else True
def add_date_to_data(index, cell, main_data):
daily_data = {
"Always": {},
"Sometimes": {},
"Never": {},
"columns": []
}
month = re.search(r'[a-zA-Z]+', cell).group()
date = re.search(r'\d+', cell).group()
date_string = f'{month} {date}'
if date_string not in main_data:
main_data[date_string] = deepcopy(daily_data)
main_data[date_string]["columns"].append(index)
class LoggingMiddleware(object):
def __init__(self, app):
self._app = app
def __call__(self, env, resp):
errorlog = env['wsgi.errors']
pprint.pprint(('REQUEST', env), stream=errorlog)
def log_response(status, headers, *args):
pprint.pprint(('RESPONSE', status, headers), stream=errorlog)
return resp(status, headers, *args)
return self._app(env, log_response)
def is_open_dg_store(closed_stores, column_indexes, row):
return "lab" not in row[column_indexes["name1"]].lower() \
and "Dollar General" in row[column_indexes["name2"]] \
and row[column_indexes["name1"]] not in closed_stores
def is_tcp_store(column_indexes, row, tcp_stores=None):
if tcp_stores is not None:
return row[column_indexes["name1"]] in tcp_stores
elif "tcp?" in column_indexes:
return "tcp" in row[column_indexes["tcp?"]].lower()
else:
return False
| [
"davidfit@cisco.com"
] | davidfit@cisco.com |
dd7ec3a636e971e94dd1445bceff79bb3a823ea9 | 294f66cdc65ee669c139365effb69450b41ec607 | /django_blog/users/models.py | 29021da5a98fb7dc036316c03a8087b3923ede3c | [] | no_license | GOUTHAM183/TechFanatic | 956d3474d63d8dc085df80dc0b3137f1ba229b9e | ad2c2b93f630b6ec55673c2502a118687238444e | refs/heads/main | 2023-05-04T19:26:19.731211 | 2021-05-27T07:18:40 | 2021-05-27T07:18:40 | 371,281,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | from django.db import models
from django.contrib.auth.models import User
from PIL import Image
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(default="default.jpg", upload_to="profile_pics")
def __str__(self):
return f"{self.user.username} Profile"
def save(
self, force_insert=False, force_update=False, using=None, update_fields=None
):
super().save(force_insert, force_update, using, update_fields)
img = Image.open(self.image.path)
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.image.path)
| [
"56498436+GOUTHAM183@users.noreply.github.com"
] | 56498436+GOUTHAM183@users.noreply.github.com |
e4910e73c04302fba1ca073f461f43827a177146 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03067/s724811425.py | b3ebbb168ee4d506128efbef3cb4cafbd8f7d694 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | a,b,c=map(int,input().split())
print(["No","Yes"][a>c>b or a<c<b]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b2f32390a956f08986bc842676dfdaa75981f893 | 27cf4886dda866bc4a47f5d2d23b35fb85e82a39 | /code/resources/user.py | c9d228ca1f9ea639ffb9db8f1e6f4dedcc2448b8 | [] | no_license | texnder/flask-rest-api-tutorials | b192ae10403de972ac81edcfdd3e1d2b7d47f2d1 | bf20c5ecb7423d4e335608f980a2ed6a9bc1b0d4 | refs/heads/master | 2023-03-08T07:58:41.005025 | 2021-02-07T05:31:38 | 2021-02-07T05:31:38 | 334,980,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,031 | py | from flask_restful import Resource, reqparse
from werkzeug.security import safe_str_cmp
from models.user import UserModel
from blacklist import BLACKLIST
# use brackets while import is long
from flask_jwt_extended import (
jwt_required,
jwt_refresh_token_required,
get_jwt_identity,
get_raw_jwt,
create_access_token,
create_refresh_token
)
# private convension starts from underscore..
_parse_user_data = reqparse.RequestParser()
_parse_user_data.add_argument("username",
type=str,
required=True,
help="field cannot be empty"
)
_parse_user_data.add_argument("password",
type=str,
required=True,
help="field cannot be empty"
)
class UserRegister(Resource):
def post(self):
data = _parse_user_data.parse_args()
if UserModel.find_by_username(data["username"]):
return {"message": "'{}' already exists".format(data['username'])}, 400
user = UserModel(**data)
user.save_to_db()
return {"message": "User created successfully"}, 201
class User(Resource):
# class method is more useful than the static method
@classmethod
def get(cls, user_id):
user = UserModel.find_by_id(user_id)
if not user:
return {"message": "user not found!!"}, 404
return user.json(), 200
@classmethod
def delete(cls, user_id):
user = UserModel.find_by_id(user_id)
if not user:
return {"message": "user not found!!"}, 404
user.delete_from_db()
return {"message": "user deleted!!"}, 200
class UserLogin(Resource):
def post(self):
data = _parse_user_data.parse_args()
user = UserModel.find_by_username(data["username"])
# similar to authfunction we created in security..
if user and safe_str_cmp(user.password, data["password"]):
# similar to identity in security..
access_token = create_access_token(identity=user.id, fresh=True)
refresh_token = create_refresh_token(user.id)
return {"access_token": access_token, "refresh_token": refresh_token}, 200
return {"message": "invalid credentials!!"}, 401
class UserLogout(Resource):
@jwt_required
def post(self):
# jti unique identifier for JWT
# it is better than sending JWT token in header.. with extended we need Bearer fresh_token
jti = get_raw_jwt()["jti"]
user_id = get_jwt_identity()
# to logout blacklist the token..
# but not sure if after destroing session blacklist data will be saved or not..
BLACKLIST.add(jti)
return {"message": "User id ='{}' successfully logged out.".format(user_id)}, 200
class TokenRefresh(Resource):
# refresh token when user comes after token expires..
# do we use cookie or not?
@jwt_refresh_token_required
def post(self):
user = get_jwt_identity()
new_token = create_access_token(identity=user, fresh=False)
return {"access_token": new_token}, 200
| [
"inderjeetchohana1431996@gmail.com"
] | inderjeetchohana1431996@gmail.com |
eedede5b1c8b1b996365fbd192e649236670552c | e14f405112856c9c2205581bd2fcfbff9b43729c | /pyapi/dictrev04.py | 924909526a5df232c5fcd26fb7723ed2cb7036d1 | [] | no_license | GinVill/mycode | 6b2b74306a5636cee0f300f6df1b2328c56dca51 | c424f771ac24c2a6bf82c08a20f908c467a5b96a | refs/heads/main | 2023-02-17T19:22:40.190947 | 2021-01-15T20:31:05 | 2021-01-15T20:31:05 | 326,742,670 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,234 | py | #!/usr/bin/python3
def main():
vendordict = {'cisco': True, 'juniper': False, 'arista': True, 'netgear': True}
custlist = ['acme', 'globex corporation', 'soylent green', 'initech', 'umbrella corporation']
## display all of the dictionary methods
## dict is a special word that Python treats as a dictionary
## FYI -- dict would be a terrible variable name
print(dir(dict))
# ['clear', 'copy', 'fromkeys', 'get', 'items', 'keys', 'pop', 'popitem', 'setdefault', \
# 'update', 'values']
## display all of the list methods
## list is a special word that Python treats as a list
## FYI -- list would be a terrible variable name
print(dir(list))
# ['append', 'clear', 'copy', 'count', 'extend', 'index', 'insert', 'pop', 'remove', \
# 'reverse', 'sort']
# Some of the code below this line works, and some does not.
# Uncomment the code that works
# ----------------------------------------------------------
#custlist.keys()
print(vendordict.get('juniper'))
# custlist.get('umbrella corporation')
# custlist.update('nsx')
vendordict.update({'cisco': False})
print(vendordict)
# vendordict.sort()
if __name__ == "__main__":
main()
| [
"ginayvillegas@gmail.com"
] | ginayvillegas@gmail.com |
7571f1f889ad6660273c9a9f91afc3087397af3c | 794db286b74e1f74649cbd286a23986ebbc17153 | /celeb_dob_project/settings.py | 2d8466feb0beaabd6bc2a952ee462ded3d58a606 | [] | no_license | CoranC/Celeb-Dob-Django | 0bc0faf645f659ce02c8b0eef194303dfd8db7ff | 0212e907638751e841f83f79525454b3cc1e6b23 | refs/heads/master | 2021-01-12T04:46:40.048044 | 2017-01-01T19:18:18 | 2017-01-01T19:18:18 | 77,792,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,331 | py | """
Django settings for celeb_dob_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lo*u^3yxx$yvt5!c+-^q)5y4=t$sbbvr9iw@q0*evbwce4o%%n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
ADMINS = (
('Coran Corbett', 'coranc@gmail.com'),
)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'celeb_dob',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'celeb_dob_project.urls'
WSGI_APPLICATION = 'celeb_dob_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_PATH = os.path.join(BASE_DIR,'static')
STATICFILES_DIRS = (
STATIC_PATH,
)
TEMPLATE_PATH = os.path.join(BASE_DIR, 'templates')
TEMPLATE_DIRS = [
TEMPLATE_PATH,
]
| [
"coranc@gmail.com"
] | coranc@gmail.com |
43b9f08dec15ea8bafdc95324884d16fe44702c0 | 062e59a68ecc83fec0bfe15e081bcf001909bf67 | /TestModel/migrations/0004_kddcuptest_user.py | 76315717fd1a40d33a56aa53c351d5c575dd98a8 | [] | no_license | falouds/Django | c6b666e20ed24263c8bf8f67979fcd044abe0f43 | f9aac0f76cb0e8eb08033c05fe3c84895d6f0b81 | refs/heads/master | 2021-05-18T13:34:13.612206 | 2020-03-30T10:55:19 | 2020-03-30T10:55:19 | 251,264,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,521 | py | # Generated by Django 2.0 on 2020-03-22 08:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('TestModel', '0003_auto_20200322_1548'),
]
operations = [
migrations.CreateModel(
name='kddcuptest',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('count', models.IntegerField()),
('srv_count', models.IntegerField()),
('dst_host_count', models.IntegerField()),
('dst_host_srv_sount', models.IntegerField()),
('same_srv_rate', models.IntegerField()),
('dst_host_same_src_port_rate', models.DecimalField(decimal_places=3, max_digits=8)),
('dst_host_serror_rate', models.IntegerField()),
('label', models.BooleanField()),
],
options={
'verbose_name': 'kddcup测试数据集',
'db_table': 'kddcuptest',
},
),
migrations.CreateModel(
name='user',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('user', models.CharField(max_length=20, verbose_name='用户')),
('password', models.CharField(max_length=20, verbose_name='密码')),
],
options={
'verbose_name': '用户',
'db_table': 'user',
},
),
]
| [
"1055088623@qq.com"
] | 1055088623@qq.com |
2aca85423c446bc02c5316174601aea98f11d8bb | 1aa61c09db8e37bb6e9e3f8144c6f82a61af8f31 | /mergify_engine/github_types.py | c1f33fc79a421fdb4dde3945c53e93c6c2a12b80 | [
"Apache-2.0"
] | permissive | HarshCasper/mergify-engine | 15460e813eadaaebeeb5942dd07c9dbc8bd18c5b | 02d0a682c14db1c3fefeef4895645161cbb40f4f | refs/heads/master | 2023-02-25T10:51:59.853549 | 2021-01-25T07:17:25 | 2021-01-25T07:17:25 | 332,657,373 | 0 | 0 | Apache-2.0 | 2021-01-25T07:17:26 | 2021-01-25T06:55:32 | Python | UTF-8 | Python | false | false | 10,038 | py | # -*- encoding: utf-8 -*-
#
# Copyright © 2020 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import typing
GitHubLogin = typing.NewType("GitHubLogin", str)
class GitHubInstallationAccessToken(typing.TypedDict):
# https://developer.github.com/v3/apps/#response-7
token: str
expires_at: str
GitHubAccountType = typing.Literal["User", "Organization", "Bot"]
GitHubAccountIdType = typing.NewType("GitHubAccountIdType", int)
class GitHubAccount(typing.TypedDict):
login: GitHubLogin
id: GitHubAccountIdType
type: GitHubAccountType
GitHubInstallationIdType = typing.NewType("GitHubInstallationIdType", int)
class GitHubInstallation(typing.TypedDict):
# https://developer.github.com/v3/apps/#get-an-organization-installation-for-the-authenticated-app
id: GitHubInstallationIdType
account: GitHubAccount
GitHubRefType = typing.NewType("GitHubRefType", str)
SHAType = typing.NewType("SHAType", str)
GitHubRepositoryIdType = typing.NewType("GitHubRepositoryIdType", int)
GitHubRepositoryName = typing.NewType("GitHubRepositoryName", str)
class GitHubRepository(typing.TypedDict):
id: GitHubRepositoryIdType
owner: GitHubAccount
private: bool
name: GitHubRepositoryName
full_name: str
archived: bool
url: str
default_branch: GitHubRefType
class GitHubBranchCommitParent(typing.TypedDict):
sha: SHAType
class GitHubBranchCommitCommit(typing.TypedDict):
message: str
class GitHubBranchCommit(typing.TypedDict):
sha: SHAType
parents: typing.List[GitHubBranchCommitParent]
commit: GitHubBranchCommitCommit
class GitHubBranch(typing.TypedDict):
name: GitHubRefType
commit: GitHubBranchCommit
class GitHubBranchRef(typing.TypedDict):
label: str
ref: GitHubRefType
sha: SHAType
repo: GitHubRepository
user: GitHubAccount
class GitHubLabel(typing.TypedDict):
id: int
name: str
color: str
default: bool
class GitHubComment(typing.TypedDict):
id: int
body: str
user: GitHubAccount
class GitHubIssueOrPullRequest(typing.TypedDict):
pass
GitHubIssueId = typing.NewType("GitHubIssueId", int)
GitHubIssueNumber = typing.NewType("GitHubIssueNumber", int)
class GitHubIssue(GitHubIssueOrPullRequest):
id: GitHubIssueId
number: GitHubIssueNumber
GitHubPullRequestState = typing.Literal["open", "closed"]
# NOTE(sileht): Github mergeable_state is undocumented, here my finding by
# testing and and some info from other project:
#
# unknown: not yet computed by Github
# dirty: pull request conflict with the base branch
# behind: head branch is behind the base branch (only if strict: True)
# unstable: branch up2date (if strict: True) and not required status
# checks are failure or pending
# clean: branch up2date (if strict: True) and all status check OK
# has_hooks: Mergeable with passing commit status and pre-recieve hooks.
#
# https://platform.github.community/t/documentation-about-mergeable-state/4259
# https://github.com/octokit/octokit.net/issues/1763
# https://developer.github.com/v4/enum/mergestatestatus/
GitHubPullRequestMergeableState = typing.Literal[
"unknown",
"dirty",
"behind",
"unstable",
"clean",
"has_hooks",
]
GitHubPullRequestId = typing.NewType("GitHubPullRequestId", int)
GitHubPullRequestNumber = typing.NewType("GitHubPullRequestNumber", int)
ISODateTimeType = typing.NewType("ISODateTimeType", str)
class GitHubPullRequest(GitHubIssueOrPullRequest):
# https://developer.github.com/v3/pulls/#get-a-pull-request
id: GitHubPullRequestId
number: GitHubPullRequestNumber
maintainer_can_modify: bool
base: GitHubBranchRef
head: GitHubBranchRef
state: GitHubPullRequestState
user: GitHubAccount
labels: typing.List[GitHubLabel]
merged: bool
merged_by: typing.Optional[GitHubAccount]
merged_at: typing.Optional[ISODateTimeType]
rebaseable: bool
draft: bool
merge_commit_sha: typing.Optional[SHAType]
mergeable_state: GitHubPullRequestMergeableState
html_url: str
title: str
# https://docs.github.com/en/free-pro-team@latest/developers/webhooks-and-events/webhook-events-and-payloads
GitHubEventType = typing.Literal[
"check_run",
"check_suite",
"pull_request",
"status",
"push",
"issue_comment",
"pull_request_review",
"pull_request_review_comment",
# This does not exist in GitHub, it's a Mergify made one
"refresh",
]
class GitHubEvent(typing.TypedDict):
organization: GitHubAccount
installation: GitHubInstallation
sender: GitHubAccount
GitHubEventRefreshActionType = typing.Literal[
"user",
"forced",
]
# This does not exist in GitHub, it's a Mergify made one
class GitHubEventRefresh(GitHubEvent):
repository: GitHubRepository
action: GitHubEventRefreshActionType
ref: typing.Optional[GitHubRefType]
pull_request: typing.Optional[GitHubPullRequest]
GitHubEventPullRequestActionType = typing.Literal[
"opened",
"edited",
"closed",
"assigned",
"unassigned",
"review_requested",
"review_request_removed",
"ready_for_review",
"labeled",
"unlabeled",
"synchronize",
"locked",
"unlocked",
"reopened",
]
class GitHubEventPullRequest(GitHubEvent):
repository: GitHubRepository
action: GitHubEventPullRequestActionType
pull_request: GitHubPullRequest
GitHubEventPullRequestReviewCommentActionType = typing.Literal[
"created",
"edited",
"deleted",
]
class GitHubEventPullRequestReviewComment(GitHubEvent):
repository: GitHubRepository
action: GitHubEventPullRequestReviewCommentActionType
pull_request: GitHubPullRequest
GitHubEventPullRequestReviewActionType = typing.Literal[
"submitted",
"edited",
"dismissed",
]
GitHubReviewIdType = typing.NewType("GitHubReviewIdType", int)
GitHubReviewStateType = typing.Literal[
"APPROVED", "COMMENTED", "DISMISSED", "CHANGES_REQUESTED"
]
class GitHubReview(typing.TypedDict):
id: GitHubReviewIdType
user: GitHubAccount
body: typing.Optional[str]
pull_request: GitHubPullRequest
repository: GitHubRepository
state: GitHubReviewStateType
class GitHubEventPullRequestReview(GitHubEvent):
repository: GitHubRepository
action: GitHubEventPullRequestReviewActionType
pull_request: GitHubPullRequest
GitHubEventIssueCommentActionType = typing.Literal[
"created",
"edited",
"deleted",
]
class GitHubEventIssueComment(GitHubEvent):
repository: GitHubRepository
action: GitHubEventIssueCommentActionType
issue: GitHubIssue
comment: GitHubComment
class GitHubEventPush(GitHubEvent):
repository: GitHubRepository
ref: GitHubRefType
before: SHAType
after: SHAType
class GitHubEventStatus(GitHubEvent):
repository: GitHubRepository
sha: SHAType
class GitHubApp(typing.TypedDict):
id: int
GitHubCheckRunConclusion = typing.Literal[
"success",
"failure",
"neutral",
"cancelled",
"timed_out",
"action_required",
"stale",
]
class GitHubCheckRunOutput(typing.TypedDict):
title: typing.Optional[str]
summary: typing.Optional[str]
text: typing.Optional[str]
GitHubStatusState = typing.Literal[
"pending",
"success",
"failure",
"error",
]
class GitHubStatus(typing.TypedDict):
context: str
state: GitHubStatusState
class GitHubCheckRun(typing.TypedDict):
id: int
app: GitHubApp
external_id: str
pull_requests: typing.List[GitHubPullRequest]
head_sha: SHAType
before: SHAType
after: SHAType
name: str
output: GitHubCheckRunOutput
conclusion: typing.Optional[GitHubCheckRunConclusion]
completed_at: ISODateTimeType
class GitHubCheckSuite(typing.TypedDict):
id: int
app: GitHubApp
external_id: str
pull_requests: typing.List[GitHubPullRequest]
head_sha: SHAType
before: SHAType
after: SHAType
GitHubCheckRunActionType = typing.Literal[
"created",
"completed",
"rerequested",
"requested_action",
]
class GitHubEventCheckRun(GitHubEvent):
repository: GitHubRepository
action: GitHubCheckRunActionType
check_run: GitHubCheckRun
GitHubCheckSuiteActionType = typing.Literal[
"created",
"completed",
"rerequested",
"requested_action",
]
class GitHubEventCheckSuite(GitHubEvent):
repository: GitHubRepository
action: GitHubCheckSuiteActionType
check_suite: GitHubCheckSuite
GitHubEventOrganizationActionType = typing.Literal[
"deleted",
"renamed",
"member_added",
"member_removed",
"member_invited",
]
class GitHubEventOrganization(GitHubEvent):
action: GitHubEventOrganizationActionType
GitHubEventMemberActionType = typing.Literal["added", "removed", "edited"]
class GitHubEventMember(GitHubEvent):
action: GitHubEventMemberActionType
repository: GitHubRepository
GitHubEventMembershipActionType = typing.Literal["added", "removed"]
class GitHubEventMembership(GitHubEvent):
action: GitHubEventMembershipActionType
GitHubEventTeamActionType = typing.Literal[
"created",
"deleted",
"edited",
"added_to_repository",
"removed_from_repository",
]
class GitHubEventTeam(GitHubEvent):
action: GitHubEventTeamActionType
repository: typing.Optional[GitHubRepository]
class GitHubEventTeamAdd(GitHubEvent, total=False):
# Repository key can be missing on Enterprise installations
repository: GitHubRepository
| [
"37929162+mergify[bot]@users.noreply.github.com"
] | 37929162+mergify[bot]@users.noreply.github.com |
684fc7ef464c7a993ed509a48263880dc368f563 | 8d79fc03f6e5a6df41e824c8573d3ea4646146bf | /IB_Tree_PathSum_if_Exists.py | e0c2d820da30e788bdd1a62a83ce8e103a92d034 | [] | no_license | Cbkhare/Codes | 3bea294dd0f2ec99e7e0ef0b7ff976cbe1765b7f | 5b535795cdd742b7810ea163e0868b022736647d | refs/heads/master | 2021-10-24T03:26:54.983073 | 2019-03-21T14:33:41 | 2019-03-21T14:33:41 | 111,226,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | class Solution:
# @param A : root node of tree
# @param B : integer
# @return an integer
def hasPathSum(self, A, B):
if not A: return 0
def validate(node, some):
#print (node.val, some)
if node.left == node.right == None:
if node.val==some:
return True
else:
return False
else:
if node.left:
r = validate(node.left,some=some-node.val)
if r:
# This is to avoid going to node.right if found true
return True
if node.right:
r = validate(node.right, some=some-node.val)
if r:
return True
return False
if validate(A,B):
return 1
else:
return 0
'''
https://www.interviewbit.com/problems/path-sum/
''' | [
"Chitrabasukhare89@gmail.com"
] | Chitrabasukhare89@gmail.com |
17b9931220d69bf54390e849e06fde75be0d8b22 | 28ade930ac8139b36f7f602d6c7f79c371900403 | /ros/src/waypoint_updater/waypoint_updater.py | 40e273d8db3b35a3f059a62ce67a5d48c99f91bb | [
"MIT"
] | permissive | jasleon/System-Integration | faab29ab24bc200fdada88b70047f35a5551f238 | 46a047f6e31d2950240c9cc1a2e5397ef67224d0 | refs/heads/main | 2023-04-05T19:53:07.337740 | 2021-05-01T20:01:36 | 2021-05-01T20:01:36 | 362,473,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,136 | py | #!/usr/bin/env python
import numpy as np
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
from std_msgs.msg import Int32
import math
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 50 # Number of waypoints we will publish. You can change this number
MAX_DECEL = 0.5
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
self.base_lane = None
self.pose = None
self.stopline_wp_idx = -1
self.waypoints_2d = None
self.waypoint_tree = None
self.loop()
def loop(self):
rate = rospy.Rate(50)
while not rospy.is_shutdown():
if self.pose and self.base_lane:
self.publish_waypoints()
rate.sleep()
def get_closest_waypoint_idx(self):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_idx = self.waypoint_tree.query([x, y], 1)[1]
# Check if closest is ahead or behind vehicle
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx - 1]
# Equation for hyperplane through closest_coords
cl_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x, y])
val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
def publish_waypoints(self):
final_lane = self.generate_lane()
self.final_waypoints_pub.publish(final_lane)
def generate_lane(self):
lane = Lane()
closest_idx = self.get_closest_waypoint_idx()
farthest_idx = closest_idx + LOOKAHEAD_WPS
base_waypoints = self.base_lane.waypoints[closest_idx:farthest_idx]
if self.stopline_wp_idx == -1 or (self.stopline_wp_idx >= farthest_idx):
lane.waypoints = base_waypoints
else:
lane.waypoints = self.decelerate_waypoints(base_waypoints, closest_idx)
return lane
def decelerate_waypoints(self, waypoints, closest_idx):
temp = []
for i, wp in enumerate(waypoints):
p = Waypoint()
p.pose = wp.pose
stop_idx = max(self.stopline_wp_idx - closest_idx - 2, 0) # Two waypoints back from line so front car stops at line
dist = self.distance(waypoints, i, stop_idx)
vel = math.sqrt(2 * MAX_DECEL * dist)
if vel < 1.:
vel = 0.
p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
temp.append(p)
return temp
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.base_lane = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
self.stopline_wp_idx = msg.data
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| [
"antonio.buap@gmail.com"
] | antonio.buap@gmail.com |
aa496f2599c8bd423bf7ec51c2d3ed9d04ba8578 | 053c413f6c0bc68d619216a7b5f7c88dae3bc2b0 | /bot.py | 37b03dbc62b2bfb118d9502a928464b70ca9d6f8 | [
"Apache-2.0"
] | permissive | menlen/jumaa | 096ff5d022f6e9f58170dd46383ab8083613531a | 4c79fe285973f7eb8214d96263f85c19b9dd04b3 | refs/heads/main | 2023-02-09T09:18:29.147337 | 2020-12-22T06:05:27 | 2020-12-22T06:05:27 | 322,281,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,396 | py | import os, sys
from PIL import Image, ImageDraw, ImageFont
import random, time
import telebot
from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton
from telebot import types
TELEGRAM_TOKEN = '1466961621:AAEuwZEqVnIA3PrnOe4hDzcDuZm2FXQcJF0'
bot = telebot.TeleBot(TELEGRAM_TOKEN)
channelId = -1001390673326
user_dict = {}
msgDict = [
'اللَّهُمَّ صَلِّ عَلَى مُحَمَّدٍ وَعَلَى آلِ مُحَمَّدٍ.\nАллоҳумма солли ъалаа муҳаммадив-ва ъалаа аали муҳаммад.',
'صَلَّى اللهُ عَلَى مُحَمَّدٍ.\nСоллаллоҳу ъалаа муҳаммад.',
'صَلَّى اللهُ عَلَيْهِ وَسَلَّمَ.\nСоллаллоҳу ъалайҳи ва саллам.',
'أَللَّهُمَّ صَلِّ وَسَلِّمْ وَبَارِكْ عَلَيْهِ.\nАллоҳумма солли ва саллим ва баарик ъалайҳ.',
'اللَّهُمَّ صَلِّ عَلَى مُحَمَّدٍ وَعَلَى آلِهِ وَسَلِّمْ.\nАллоҳумма солли ъалаа муҳаммадив-ва ъалаа аалиҳий ва саллим.',
'صَلَّى اللهُ وَسَلَّمَ عَلَى نَبِيِّنَا مُحَمَّدٍ وَعَلَى آلِهِ وَأَصْحَابِهِ أَجْمَعِينَ.\nСоллаллоҳу ва саллама ъалаа набиййинаа муҳаммад, ва ъалаа аалиҳий ва асҳаабиҳий ажмаъийн.'
]
msgOne = random.choice(msgDict)
def UImgTextWriter(ext):
IMAGES = [
'juma01.jpg',
'juma02.jpg',
'juma03.jpg',
'juma04.jpg',
'juma05.jpg',
'juma06.jpg',
'juma07.jpg',
'juma08.jpg',
'juma09.jpg',
'juma010.jpg',
'juma011.jpg',
]
try:
img = random.choice(IMAGES)
except:
time.sleep(2)
img = random.choice(IMAGES)
# get an image
base = Image.open(img).convert("RGBA")
ext = ext.upper()
text = ext
# make a blank image for the text, initialized to transparent text color
txt = Image.new("RGBA", base.size, (255,255,255,0))
# get a font
fnt = ImageFont.truetype("OpenSans-Italic.ttf", 40)
# get a drawing context
d = ImageDraw.Draw(txt)
# draw text, half opacity
d.text(((800)/4, 330), text, font=fnt, fill=(231,195,113,255), anchor='mb')
out = Image.alpha_composite(base, txt)
filename = random.randint(30,45)
g = out.save(f'{filename}.png')
return filename
def ImgTextWriter(ext):
IMAGES = [
'juma1.jpg',
'juma2.jpg',
'juma3.jpg',
'juma4.jpg',
'juma5.jpg',
'juma6.jpg',
'juma7.jpg',
'juma8.jpg',
'juma9.jpg',
'juma10.jpg',
'juma11.jpg',
]
try:
img = random.choice(IMAGES)
except:
time.sleep(2)
img = random.choice(IMAGES)
# get an image
base = Image.open(img).convert("RGBA")
ext = ext.upper()
text = ext
# make a blank image for the text, initialized to transparent text color
txt = Image.new("RGBA", base.size, (255,255,255,0))
# get a font
fnt = ImageFont.truetype("OpenSans-Italic.ttf", 40)
# get a drawing context
d = ImageDraw.Draw(txt)
# draw text, half opacity
d.text(((800)/4, 330), text, font=fnt, fill=(231,195,113,255), anchor='mb')
out = Image.alpha_composite(base, txt)
filename = random.randint(1,15)
g = out.save(f'{filename}.png')
return filename
def gen_markup():
markup = InlineKeyboardMarkup()
markup.row_width = 1
markup.add(InlineKeyboardButton("Azo bo'ling", callback_data="cb_yes", url='t.me/onideal'),
InlineKeyboardButton("Tasdiqlash", callback_data="cb_no"))
return markup
def getUserFromChannel(userId):
u = bot.get_chat_member(channelId, userId)
return u.status
@bot.callback_query_handler(func=lambda call: True)
def callback_query(call):
if call.data == "cb_yes":
bot.answer_callback_query(call.id, "Answer is Yes")
elif call.data == "cb_no":
u = getUserFromChannel(call.from_user.id)
if u == 'member':
msg = bot.send_message(call.from_user.id, """\
Juda soz!!!, Do'stingizni ismini yozing
""")
bot.register_next_step_handler(msg, process_name_step)
else:
bot.send_message(call.from_user.id, f"Salom {call.from_user.first_name}, Kanalimizga a'zo bo'ling va A'zolikni tekshirish buyrug'ini tanlang", reply_markup=gen_markup())
def process_name_step(message):
try:
name = message.text
name = name.upper()
myfile = ImgTextWriter(name)
photoSend = open(f'{myfile}.png', 'rb')
caption = f"{name} : Juma Ayyom muborak aziz dindoshim🕌🌙\
\nSizni Sayyid-ul Ayyom bilan qutlayman🌙\n{msgOne}\
\nO'zingiz yaxshi ko'rgan, jannatda xam birga bo'lishni istagan insonlaringizni O'z ismimlari bilan tabriklang. \n@JumaTabriklarbot"
bot.send_photo(message.chat.id, photoSend, caption=caption)
except Exception as e:
bot.reply_to(message, 'oooops')
def process_uname_step(message):
try:
name = message.text
name = name.upper()
myfile = UImgTextWriter(name)
photoSend = open(f'{myfile}.png', 'rb')
caption = f"Juma Ayyom muborak aziz dindoshlarim🕌🌙\
\nSizni Sayyid-ul Ayyom bilan qutlayman🌙,\n{msgOne}\
\nO'zingiz yaxshi ko'rgan, jannatda xam birga bo'lishni istagan insonlaringizga yuboring \n@JumaTabriklarbot"
bot.send_photo(message.chat.id, photoSend, caption=caption)
except Exception as e:
bot.reply_to(message, 'oooops')
@bot.message_handler(commands=['start','help'])
def start(message):
us = getUserFromChannel(message.chat.id)
if us == 'member':
markup = types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=2)
btn1 = types.KeyboardButton("Do'stimga")
btn2 = types.KeyboardButton("O'zimga")
markup.add(btn1, btn2)
bot.send_message(message.chat.id, "Assalomu Aleykum Do'stim", reply_markup=markup)
else:
bot.send_message(message.chat.id, f"Salom {message.from_user.first_name}, Kanalimizga a'zo bo'ling va A'zolikni tekshirish buyrug'ini tasdiqlang", reply_markup=gen_markup())
@bot.message_handler(func=lambda message: True)
def message_handler(message):
us = getUserFromChannel(message.chat.id)
if us == 'member':
msg = bot.send_message(message.chat.id, """\
Juda soz!!!, Do'stingizni ismini yozing. \nYoki /start /help ni bosing
""")
if message.text == "Do'stimga":
bot.register_next_step_handler(msg, process_name_step)
elif message.text == "O'zimga":
bot.register_next_step_handler(msg, process_uname_step)
else:
bot.send_message(message.chat.id, f"Salom {message.from_user.first_name}, kanallarga a'zo bo'ling va A'zolikni tekshirish buyrug'ini tanlang", reply_markup=gen_markup())
bot.polling(none_stop=True)
| [
"noreply@github.com"
] | menlen.noreply@github.com |
67d42f2401139b3fa1e87f0d1102cc5d8b651139 | 02c5f88979c48ae5a52e50373bae611278ad29ba | /paws4thought/migrations/0009_teammember.py | 18ee9c7418447cbeb4f9330599e61226eef35693 | [] | no_license | Comm4nd0/p4t | 2f9c0960162de75f4e0aa9e27efc864d5861d5cd | 745630ad7140cff9fd467617a82895422a8cc78c | refs/heads/master | 2020-03-28T14:52:08.073298 | 2018-09-17T22:06:15 | 2018-09-17T22:06:15 | 148,530,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | # Generated by Django 2.1.1 on 2018-09-16 07:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('paws4thought', '0008_company_home_page_image_800x1000'),
]
operations = [
migrations.CreateModel(
name='TeamMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100)),
('position', models.CharField(blank=True, max_length=100)),
('image_350x350', models.ImageField(blank=True, upload_to='')),
],
),
]
| [
"marcobaldanza@hotmail.co.uk"
] | marcobaldanza@hotmail.co.uk |
6fae06163498067858f995086c69e2c86473bfc5 | 9876a02fb4f6c38271e41995296c6da4d2ec84af | /wagtail_review/templatetags/wagtailreview_tags.py | 0f6c7e5ea88157336aa4fe725a39f1f39153a035 | [
"BSD-3-Clause"
] | permissive | jacobtoppm/wagtail-review | 423c19cecfa17ddeb22de6bb2a34baad0cd10fdb | 23b81d7e5699ecb843a99da1aa207775a8b85bd6 | refs/heads/master | 2020-12-27T18:26:42.182847 | 2019-05-20T11:29:16 | 2019-05-20T11:29:16 | 238,005,148 | 0 | 0 | BSD-3-Clause | 2020-03-13T10:44:00 | 2020-02-03T16:05:08 | null | UTF-8 | Python | false | false | 1,172 | py | from django import template
from wagtail_review.forms import ResponseForm
register = template.Library()
@register.inclusion_tag('wagtail_review/annotate.html', takes_context=True)
def wagtailreview(context):
request = context['request']
review_mode = getattr(request, 'wagtailreview_mode', None)
reviewer = getattr(request, 'wagtailreview_reviewer', None)
if review_mode == 'respond' or review_mode == 'comment':
return {
'mode': review_mode,
'allow_annotations': (reviewer.review.status != 'closed'),
'show_closed': (reviewer.review.status == 'closed'),
'allow_responses': (review_mode == 'respond' and reviewer.review.status != 'closed'),
'reviewer': reviewer,
'token': reviewer.response_token,
'response_form': ResponseForm()
}
elif review_mode == 'view':
return {
'mode': review_mode,
'show_closed': False,
'allow_annotations': False,
'allow_responses': False,
'reviewer': reviewer,
'token': reviewer.view_token
}
else:
return {'mode': None}
| [
"matt@west.co.tt"
] | matt@west.co.tt |
c3021217fbc5a5354bdb7406505f1e16a55ed7f2 | 322b6f0a71260662dbe7211a3fc06f34c487284b | /hw/hw06/hw06.py | 78688a9bd27c72a01d76dfde6d55859c994ebb33 | [] | no_license | yjkweon24/CS61a-Berkeley | 83b0c9fd86eb5e6db5ce3155b337dd5e25f0eb83 | 3bc6799a50b5b168cd388b990ca8ca4a4af19490 | refs/heads/master | 2021-01-23T04:53:14.892306 | 2017-06-05T05:54:36 | 2017-06-05T05:54:36 | 80,402,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,327 | py | # Exceptions
# Avoiding key errors
def avoid_keyerror(dictionary, key):
""" Returns the value associated with key in dictionary. If key
does not exist in the dictionary, print out 'Avoid Exception',
insert KEY in the dictionary with value 'no value' and also return
'no value'.
>>> d = {1: 'one', 3: 'three', 5: 'five'}
>>> avoid_keyerror(d, 3)
'three'
>>> avoid_keyerror(d, 4)
Avoid Exception
'no value'
>>> d[4]
'no value'
>>> avoid_keyerror(d, 4)
'no value'
>>> avoid_keyerror(d, 3)
'three'
"""
"*** YOUR CODE HERE ***"
try:
return dictionary[key]
except KeyError as e:
print("Avoid Exception")
dictionary[key] = "no value"
return dictionary[key]
# List replacement
class Link:
"""A linked list.
>>> s = Link(1, Link(2, Link(3)))
>>> s.first
1
>>> s.rest
Link(2, Link(3))
"""
empty = ()
def __init__(self, first, rest=empty):
assert rest is Link.empty or isinstance(rest, Link)
self.first = first
self.rest = rest
def __repr__(self):
if self.rest is Link.empty:
return 'Link({})'.format(self.first)
else:
return 'Link({}, {})'.format(self.first, repr(self.rest))
def __eq__(self, other):
p = self
while p is not Link.empty and other is not Link.empty:
if other.first != p.first:
return False
p, other = p.rest, other.rest
return p is Link.empty and other is Link.empty
def lst_replace_first_obvious(L, target, replacement):
"""Return the result of replacing the first occurrence of TARGET
in linked-list L with REPLACEMENT. Returns the original L unchanged
if TARGET does not occur. Non-destructive."""
if L is Link.empty:
return Link.empty
elif L.first == target:
return Link(replacement, L.rest)
else:
return Link(L.first, lst_replace(L.rest))
def lst_replace_first(L, target, replacement):
"""Return the result of replacing the first occurrence of TARGET
in linked-list L with REPLACEMENT. Returns the original L unchanged
if TARGET does not occur. Non-destructive.
>>> L1 = Link(0, Link(2, Link(3, Link(2))))
>>> lst_replace_first(L1, 2, 5)
Link(0, Link(5, Link(3, Link(2))))
>>> L1
Link(0, Link(2, Link(3, Link(2))))
>>> L2 = lst_replace_first(L1, 10, 5)
>>> L2
Link(0, Link(2, Link(3, Link(2))))
>>> L2 is L1
True
"""
def lst_replace_first(L, target, replacement):
if L is Link.empty:
raise LookupError()
elif L.first == target:
return (False, Link(replacement, L.rest))
else:
try:
k = lst_replace_first(L.rest, target, replacement)
except:
return (True, L)
else:
return (k[0] , Link(L.first, k[1]))
try:
k = lst_replace_first(L, target, replacement)
except:
return L
else:
if k[0] == True:
return L
else:
return k[1]
# Replace with apppropriate try block.
# Counting paths.
def num_paths(A, r, c, target):
"""Return the number of paths through list of same-length strings A
that match TARGET, starting at A[R][C] and proceeding at each step
one position north, south, east, west, northeast, northwest,
southeast, or southwest.
>>> num_paths([ "AB", "BC" ], 0, 0, "ABBC")
2
>>> num_paths([ "CBB", "BBA" ], 0, 1, "BBCBBA")
12
>>> long = "A" * 10 + "B" * 10
>>> num_paths([long] * 2, 0, 5, long)
82373282112
"""
if target == "":
return 1
M = len(A) # Number of rows
N = M and len(A[0]) # Number of columns, 0 if A is empty
S = len(target)
memo = _________________________________ # REPLACE
def count(r, c, k):
"""The number of paths through A starting at R, C that match
TARGET[k:]."""
if 0 <= r < M and 0 <= c < N:
if A[r][c] == target[k]:
"*** YOUR CODE HERE ***"
else:
return 0
else:
return 0
def memoized_count(r1, c1, k1):
"*** YOUR CODE HERE ***"
r = count(r, c, 0)
return r | [
"noreply@github.com"
] | yjkweon24.noreply@github.com |
4f30e2250904eda972c5b10259dff6e42edb6cf0 | dfc1afd47c70824541e5a268a0a5f35af527d5ba | /populate.py | 8f605614809c58760cd458420497807238359138 | [] | no_license | howard-edson/carmarketvalues | 2805de1604fd2a9ac3a54c0900c995b04a0262a9 | 9c644f3d1316fcfdfbede8283c22bd3484ae58d8 | refs/heads/master | 2016-09-06T00:41:17.425143 | 2014-05-10T00:04:24 | 2014-05-10T00:04:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py | import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cmv_project.settings")
from cmv_app.models import Region, Search
from django.contrib.auth.models import User
import random
def populate():
users=[add_user("user1", 123),add_user("user2",123),add_user("user3",123)]
regions=[add_region("seattle"),add_region("atlanta"),add_region("newyork")]
for (user,region) in zip(users,regions):
for i in range(6):
s=Search(user=user,**random_generators())
s.save()
print "search is ",s
print "region is ", region
s.regions.add(region)
s.save()
# Print out what we have added to the user.
for c in Search.objects.all():
print c
def add_user(name,password):
user, created = User.objects.get_or_create(username=name)
if created:
user.set_password(password)
user.save()
return user
def add_region(name):
r=Region.objects.get_or_create(name=name)
return r[0]
def random_generators():
vehicle_choice=random.choice([("honda","crv"),("toyota","corolla"),
("nissan","leaf"),("ford","mustang")])
kwargs={
'min_year':random.randrange(1985,1990),
'max_year':random.randrange(1991,2013),
'min_price':random.randint(5,30) * 100,
'max_price':random.randint(40,100) * 100,
'vehicle_make':vehicle_choice[0],
'vehicle_model':vehicle_choice[1],
'pic_only':random.choice([True,False]),
'search_title_only':random.choice([True,False]),
'seller_type':random.choice(Search.SELLER_TYPE_CHOICES)[0]
}
return kwargs
# Start execution here!
if __name__ == '__main__':
print "Starting population script..."
#os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cmv_project.settings')
populate()
| [
"sr443@uw.edu"
] | sr443@uw.edu |
1317bf054d5d86a02ab8c70735eac40355d52dc1 | c2b4eb71187dc18d5606bd0eda7775e7fafc728e | /LEAM_TCN/LEAM_TCN.py | d78b56be8fd7ce326ee9da9a2271ea28a4e0aa92 | [] | no_license | keithzzzzzz/NLP | 90c4c5daa6fc06eacc6b2c6db2dbc0a8726d117c | 8af83949756aa161b494603d93c2dff832db2340 | refs/heads/master | 2022-11-02T06:29:03.466096 | 2019-12-03T23:46:13 | 2019-12-03T23:46:13 | 187,530,466 | 0 | 1 | null | 2022-10-16T10:36:08 | 2019-05-19T21:35:17 | Jupyter Notebook | UTF-8 | Python | false | false | 7,551 | py | import pandas as pd
import numpy as np
import tensorflow as tf
import numpy as np
import pickle
import sklearn.metrics as metrics
import os
from sklearn.model_selection import train_test_split
from sklearn.manifold import TSNE
from scipy import spatial
import sklearn.metrics as Metrics
import itertools
from collections import Counter
import time
import seaborn as sns
import matplotlib.pyplot as plt
class Options(object):
def __init__(self):
# GPU id
self.gpu_id = 0
###################### Data ##########################
# Random state of spliting data
self.rs = None
# If include dcodes
self.dcode = False
# Number of tests
self.num_test = None
# Current dataset number
self.num_cur = 0
# Maximum number of words in a review
self.maxlen = None
# Maximum number of notes for one patient
self.maxnotes = None
# Number of unique words in all reviews
self.uniq_words = None
# Number of training samples at validation step
self.samples = None
#################### Embeddings #########################
# Vector size for each word embeddings from GloVe
self.emb_size = 300
# word vectors
self.W_emb = None
# class vectors
self.W_class_emb = None
# Number of classes
self.num_class = None
# class names
self.class_name = None
# ngram
self.ngram = 20
###################### Model #########################
# Training Batch Size
self.batch_size = 20
# Epoch
self.epoch = 251
# Learning rate
self.lr_rate = 0.001
# keep_prob, dropout_rate = 1 - keep_prob, here is the keep_prob rate
self.keep_prob = 0.8
# hidden units for notes
self.H_dis = 4
# Optimizer
self.optimizer = 'Adam'
# Validation Frequency
self.valid_freq = 100
# Early Stopping
self.early_stop = False
# Patience
self.patience = None
# Encoder
self.encoder = "None"
# Dilation rate
self.l = 1
# kernel size for tcn
self.k = 3
# number of filters
self.num_filters = 8
# save model path
self.save_path = './save/leam_att/att_'
def leam(x_emb, x_mask, x_mask_notes, W_class_1, opt, is_training, W_class_2=None):
""" Attention embedding encoder for hierarchical LEAM structure
Args:
x_emb: embedding vectors for one batch
x_mask: x_mask matrix for x_emb
x_mask_notes: x_mask for notes
W_class_tran: transpose of label embeddings
opt: option class
Return:
H_enc: label-based attention score encoder b * e
"""
print("--------------------- Encoding LEAM-hier ----------------------")
x_emb_ = tf.cast(x_emb, tf.float32) # b * m * s * e
x_mask_ = tf.expand_dims(x_mask, -1) # b * m * s * 1
x_mask_ = tf.cast(x_mask_, tf.float32)
x_mask_notes_ = tf.expand_dims(x_mask_notes, -1) # b * m * 1 * 1
x_mask_notes_ = tf.cast(x_mask_notes_, tf.float32)
x_emb_1 = tf.multiply(x_emb_, x_mask_) # b * m * s * e
x_emb_norm = tf.nn.l2_normalize(x_emb_1, axis=-1)
W_class_1 = tf.cast(W_class_1, tf.float32)
W_class_norm_1 = tf.nn.l2_normalize(W_class_1, axis=0)
W_class_norm_1 = tf.cast(W_class_norm_1, tf.float32)
G = tf.contrib.keras.backend.dot(x_emb_norm, W_class_norm_1) # b * m * s * c
# print("shape of cos similarity for emb and class: ", G.shape)
u_conv = tf.layers.conv2d(G, filters=2, kernel_size=[1, opt.ngram], padding="same", activation=tf.nn.relu)
att_v = tf.reduce_max(u_conv, axis=-1, keepdims=True)
# print("shape of maxpooling layer: ", att_v.shape)
att_v_max = partial_softmax(att_v, x_mask_, 2, "Att_v_max", weight_notes=x_mask_notes_)
# print("shape of partial softmax: ", att_v_max.shape)
x_att = tf.multiply(x_emb_, att_v_max)
# print("multiply attention to embeddings: ", x_att.shape)
z = tf.reduce_sum(x_att, axis=2)
print("shape of aggregated attentive embeddings: ", z.shape)
H_enc = z
# print("shape of weighted note embeddings: ", z_weighted.shape)
# H_enc = tf.reduce_max(z_weighted, axis=1)
print("shape of LEAM encoder: ", H_enc.shape)
print("----------------------- End of Encoding --------------------------")
return H_enc
def temporal_block(x, x_mask_notes, dropout, opt, is_training):
print("---- dialation {0} ----".format(opt.l))
padding = (opt.k - 1) * opt.l
# masked note embeddings
x_masked_notes = tf.multiply(x, x_mask_notes)
x_padded = tf.pad(x_masked_notes, tf.constant([(0, 0), (padding, 0), (0, 0)]))
# 1st tcn layer with dialation rate l and kernel size k
tcn_1 = tf.layers.conv1d(x_padded, filters=opt.num_filters, kernel_size=opt.k, padding='valid',
dilation_rate=opt.l, activation=tf.nn.relu)
tcn_1_norm = tf.contrib.layers.layer_norm(tcn_1)
tcn_1_output = tf.layers.dropout(tcn_1_norm, rate=dropout, training=is_training, noise_shape = [1,1,opt.num_filters])
# print(tcn_1_output.shape)
# 2nd tcn layer with same specs
tcn_1_output_masked = tf.multiply(tcn_1_output, x_mask_notes)
x_padded_2 = tf.pad(tcn_1_output_masked, tf.constant([(0, 0), (padding, 0), (0, 0)]))
tcn_2 = tf.layers.conv1d(x_padded_2, filters=opt.num_filters, kernel_size=opt.k, padding='valid',
dilation_rate=opt.l, activation=tf.nn.relu)
tcn_2_norm = tf.contrib.layers.layer_norm(tcn_2)
tcn_2_output = tf.layers.dropout(tcn_2_norm, rate=dropout, training=is_training,
noise_shape = [1,1,opt.num_filters])
print(tcn_2_output.shape)
return tcn_2_output
def emb_classifier(x, x_mask, x_mask_notes, y, dropout, opt, is_training):
x_emb, W_norm = embedding(x, opt) # b * m * s * e
# print("Embedding shape: ", x_emb.shape)
y_pos = tf.argmax(y, -1)
y_emb_1, W_class_1 = embedding_class(y_pos, opt, 'class_emb') # b * e, c * e
# print("-shape of class embedding: ", y_emb.shape)
W_class_tran_1 = tf.transpose(W_class_1, [1, 0]) # e * c
H_enc = leam(x_emb, x_mask, x_mask_notes, W_class_tran_1, opt, is_training)
# first block
layer_1 = temporal_block(H_enc, x_mask_notes, dropout, opt, is_training)
# second block
opt.l = 2
layer_2 = temporal_block(layer_1, x_mask_notes, dropout, opt, is_training)
# third block
opt.l = 4
layer_3 = temporal_block(layer_2, x_mask_notes, dropout, opt, is_training)
# fourth block
opt.l = 8
layer_4 = temporal_block(layer_3, x_mask_notes, dropout, opt, is_training)
# print(layer_3.shape)
# print(layer_3[:, -1, :].shape)
H_enc_fin = layer_4[:, -1, :]
# logits = discriminator_2layer(z_fin, opt, dropout, is_training)
logits = discriminator_2layer(H_enc_fin, opt, dropout, is_training)
# logits = tf.layers.dense(H_enc_fin, 1, activation=None, kernel_initializer=tf.orthogonal_initializer())
prob = tf.nn.sigmoid(logits)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits))
saver = tf.train.Saver()
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_step = tf.train.AdamOptimizer(opt.lr_rate).minimize(loss)
return prob, loss, train_step, H_enc_fin, W_norm, W_class_1, saver, layer_3 | [
"35375325+keithzzzzzz@users.noreply.github.com"
] | 35375325+keithzzzzzz@users.noreply.github.com |
70e0fbda2930cf74706f34ee0a788c206fd587e9 | 07e1aaaf45cdf75d110ebeff998c8c3df24b0968 | /users/views.py | cdb98a7fa69ba77475ebb5c5c8bdd72af63c939e | [] | no_license | luizmpimenta/djangram | 83124ced844d6dc121f4555beb51a049dc23846a | 60041624959cbb170235e80b9ddeab55c816bd0e | refs/heads/master | 2022-12-09T19:19:45.380245 | 2019-08-02T11:53:42 | 2019-08-02T11:53:42 | 199,441,087 | 0 | 0 | null | 2022-12-08T05:57:41 | 2019-07-29T11:37:13 | Python | UTF-8 | Python | false | false | 2,724 | py | from django.shortcuts import render
from .mixins import UserHasAccessToDetailMixin, LoginRequiredMixin
from django.contrib.auth.views import LoginView, LogoutView
from django.views import generic
from .models import User
from django.urls import reverse_lazy
from .forms import UserSignupForm
from .helpers import send_confirm_user_signup_email
#from django.core.mail import send_mail
# Create your views here.
class UserLoginView(LoginView):
template_name = 'users/login.html'
class UserLogoutView(LoginRequiredMixin, LogoutView):
pass
class UserDetailView(generic.DetailView):
model = User
context_object_name = 'user'
template_name = 'users/detail_user.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
request_user = User.objects.get(pk=self.request.user.pk)
follow_user = kwargs['object']
context['request_user_has_followed'] = request_user.following.filter(pk=follow_user.pk)
return context
class UserSignupView(generic.CreateView):
model = User
form_class = UserSignupForm
template_name = 'users/signup_user.html'
success_url = reverse_lazy('users:login_user')
def form_valid(self, form):
self.object = form.save()
send_confirm_user_signup_email(self.object)
return super().form_valid(form)
class UserUpdateView(UserHasAccessToDetailMixin, generic.UpdateView):
model = User
fields = ['username', 'picture']
template_name = 'users/update_user.html'
def get_success_url(self):
return reverse_lazy('users:detail_user', args=[self.object.pk])
class UserFollowView(generic.RedirectView):
def get_redirect_url(self,*args, **kwargs):
# usuario logado
request_user = User.objects.get(pk=self.request.user.pk)
# usuario do perfil a ser seguido ou deixar de seguir
following_user = User.objects.get(pk=kwargs['pk'])
#analisar se o perfil ja esta sendo seguido
request_user_has_followed = request_user.following.filter(pk= following_user.pk)
if not request_user_has_followed:
#seguindo o perfil
request_user.following.add(following_user)
#adicionando a lista de seguidores do perfil , o usuario logado
following_user.followers.add(request_user)
else:
#deixando de seguir o perfil caso ja o siga
request_user.following.remove(following_user)
#removendo o perfil logado da lista de seguidores do perfil selecionado
following_user.followers.remove(request_user)
return reverse_lazy('users:detail_user', args=[following_user.pk]) | [
"luizpimenta.dev@outlook.com"
] | luizpimenta.dev@outlook.com |
40052ce02f4a93b505d8c38b835da3cb1a939199 | b1856d37a1f1f993d19460ffcaf5be980aa4d95f | /graphspace_python/graphs/classes/gsgroup.py | 28464b9613d8ab9367c3313e5bb195313f5d9e61 | [] | no_license | adbharadwaj/graphspace-python | 6b75dd3967102b41eefd2d125f42b6839ee36a64 | 6124a2cd14232ead3cc339cd5c9391167afcd497 | refs/heads/master | 2021-08-06T01:41:15.349483 | 2021-07-25T00:35:40 | 2021-07-25T00:35:40 | 86,537,380 | 3 | 11 | null | 2021-07-25T00:32:34 | 2017-03-29T04:10:13 | Python | UTF-8 | Python | false | false | 2,667 | py | import datetime
class GSGroup(object):
"""GSGroup class.
A GSGroup stores the details of a group that is understood by GraphSpace.
It holds the information about the group such as name and description.
It provides methods to define, modify and delete the details of the group.
Attributes:
name (str): Name of group.
description (str): Description of group.
"""
def __init__(self, name=None, description=None):
"""Construct a new 'GSGroup' object.
Args:
name (str, optional): Name of the group. Defaults to None.
description (str, optional): Description of the group. Defaults to None.
"""
if name is None:
self.set_name('Group ' + datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y"))
else:
self.name = name
self.description = description
def json(self):
"""Get the json representation of group details.
Returns:
dict: Json representation of group details.
Example:
>>> from graphspace_python.graphs.classes.gsgroup import GSGroup
>>> group = GSGroup(name='My sample group', description='a sample group for demo')
>>> group.json()
{'name': 'My sample group', 'description': 'a sample group for demo'}
"""
data = {
'name': self.get_name(),
'description': self.get_description()
}
return data
def get_name(self):
"""Get the name of group.
Returns:
str: Name of group.
Example:
>>> from graphspace_python.graphs.classes.gsgroup import GSGroup
>>> group = GSGroup(name='My sample group', description='a sample group for demo')
>>> group.get_name()
'My sample group'
"""
return self.name
def set_name(self, name):
"""Set the name of the group.
Args:
name (str): Name of group.
Example:
>>> from graphspace_python.graphs.classes.gsgroup import GSGroup
>>> group = GSGroup()
>>> group.set_name('My sample group')
>>> group.get_name()
'My sample group'
"""
self.name = name
def get_description(self):
"""Get description of the group.
Returns:
str: Description of group.
Example:
>>> from graphspace_python.graphs.classes.gsgroup import GSGroup
>>> group = GSGroup(name='My sample group', description='a sample group for demo')
>>> group.get_description()
'a sample group for demo'
"""
return self.description
def set_description(self, description):
"""Set description of the group.
Args:
description (str): Description of group.
Example:
>>> from graphspace_python.graphs.classes.gsgroup import GSGroup
>>> group = GSGroup()
>>> group.set_description('a sample group for demo')
>>> group.get_description()
'a sample group for demo'
"""
self.description = description
| [
"sandeep.mahapatra.5@gmail.com"
] | sandeep.mahapatra.5@gmail.com |
34bf0710871bc69550753c7f12214343abe99030 | 1d3c67c2892f5e78ca1b4e57a1849f02e27c9747 | /TED/TED/urls.py | 9f6cdad9d0e8966218e7f9acb28449d2a213b2d5 | [] | no_license | ThisEnginersDevelop2017/TED | 58abfa0f1fe0dee7078e2aba4bad12bf607783e2 | 92f121dee86b85dd986dee390c3830fd0c8e1b50 | refs/heads/master | 2021-01-02T09:40:26.795740 | 2017-08-03T20:48:32 | 2017-08-03T20:48:32 | 99,273,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | """TED URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
# url(r'^$',include('webinfo.urls', namespace='home')),
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'^', include('webinfo.urls', namespace='developer')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"thisenginersdevelop@gmail.com"
] | thisenginersdevelop@gmail.com |
e23957f96f10bfe92661bb9900a26b41d96b34c3 | 53f048b80718a63881ed7493459773c2a27aad87 | /offchip/schedule/row_table.py | ab5c5b8228bc1599dfe533068406b15eceff5204 | [] | no_license | flowerbeach/ramulator-python-hbm | 317421995b70277ba3004389b96191d48834dc1e | 4eb99b0305887d188299590d8822ae0ac6c68b80 | refs/heads/main | 2023-01-23T18:32:23.515328 | 2020-12-10T14:49:41 | 2020-12-10T14:49:41 | 314,815,896 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,439 | py | from typing import Dict
class RowTable(object):
from offchip.controller import Controller
from offchip.standard import BaseSpec as t_spec
class Entry(object):
def __init__(self, row, hits, timestamp):
self.row = row
self.hits = hits
self.timestamp = timestamp
def __init__(self, controller):
self.ctrl = controller # type: RowTable.Controller
self.table = {} # type: Dict[tuple, RowTable.Entry]
def update(self, cmd, addr_list, cycle_current):
row_group = tuple(addr_list[:self.t_spec.level.row.value])
row = addr_list[self.t_spec.level.row.value]
spec = self.ctrl.channel.t_spec
if spec.is_opening(cmd) is True:
self.table[row_group] = RowTable.Entry(row, 0, cycle_current)
if spec.is_accessing(cmd) is True:
# we are accessing a row -- update its entry
assert row_group in self.table.keys()
entry = self.table[row_group]
assert entry.row == row
entry.timestamp = cycle_current
entry.hits += 1
if spec.is_closing(cmd) is True:
# we are closing one or more rows -- remove their entries
n_rm = 0
if spec.is_accessing(cmd) is True:
scope = self.t_spec.level.row.value - 1
else:
scope = spec.scope[cmd.value]
table_keys = list(self.table.keys())
for key in table_keys:
if key[:(scope.value + 1)] == tuple(addr_list[:(scope.value + 1)]):
n_rm += 1
del self.table[key]
if n_rm <= 0:
raise Exception(n_rm)
elif n_rm != 1:
raise Exception(n_rm)
def get_hits(self, addr_list, to_opened_row=False):
row_group = tuple(addr_list[:self.t_spec.level.row.value])
row = addr_list[self.t_spec.level.row.value]
if row_group not in self.table.keys():
return 0
elif to_opened_row is False and self.table[row_group].row != row:
return 0
return self.table[row_group].hits
def get_open_row(self, addr_list: list):
row_group = tuple(addr_list[:self.t_spec.level.row.value])
if row_group in self.table.keys():
return self.table[row_group].row
else:
return -1
| [
"flowerbeach@163.com"
] | flowerbeach@163.com |
d79d3e9d09ff5b87d5087bd52ae14e8441771e10 | 8907713ef218305a459f7693c2ca7632d995146e | /resources/user.py | dc39e683a7789e624c41ff7165562a35ad1e70e3 | [] | no_license | Nishil07/Stores_and_Items_REST_API | 61b8867421209ccb58490a9ca8bef5ead54b7022 | b8cdd080c73ccb040a2eb29c7037ba923740fee4 | refs/heads/master | 2023-05-26T12:47:47.883674 | 2021-06-10T04:48:15 | 2021-06-10T04:48:15 | 375,310,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | import sqlite3
from sqlite3.dbapi2 import Cursor
from flask_restful import Resource, reqparse
from models.user import UserModel
class UserRegister(Resource):
parser = reqparse.RequestParser()
parser.add_argument('username',
type = str,
required = True,
help = "This field cannot be blank."
)
parser.add_argument('password',
type = str,
required = True,
help = "This field cannot be blank."
)
def post(self):
data = UserRegister.parser.parse_args()
if UserModel.find_by_username(data['username']):
return {"message": "Username already exists"}, 400
user = UserModel(**data)
user.save_to_db()
return {"message": "User created successfully."}, 201
| [
"nishilmadhani@gmail.com"
] | nishilmadhani@gmail.com |
94cf9c0ad4d04b80cd676de6917f14c6efbf4630 | d5eb4d20129b6a1fa87ad5d04ad8617ec0c4c7e1 | /Exercises/exercise-29.py | ff73849535b5a4ec6c9b27fd6e95a946784584ea | [
"MIT"
] | permissive | shoriwe-upb/TallerEjercicios | 3b3ad7c358e9fe3a4546cb1ab1c5ca5df1e63e35 | ba37eb6cd673a8d38a1dfac87c5efac3f728da21 | refs/heads/master | 2020-08-05T07:51:59.328963 | 2019-10-19T21:43:21 | 2019-10-19T21:43:21 | 212,453,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | def main():
number = int(input("Number: "))
if number > 0:
print("Positive")
elif number < 0:
print("Negative")
else:
print("Is Zero")
if number % 2:
print("Es impar")
else:
print("Es par")
if __name__ == '__main__':
main()
| [
"antoniojosedonishung@gmail.com"
] | antoniojosedonishung@gmail.com |
18a16704f66dd1d340db3c65e8ea06fa3b6b70cd | 59f64b5cf799e31c97b11828dba4787afb8f3f17 | /hail/python/hail/ggplot/aes.py | 5497f28d4d22e7863d89af491b89520fe20e5f4b | [
"MIT"
] | permissive | hail-is/hail | 2089e6f3b38548f13fa5c2a8ab67f5cfdd67b4f1 | 07a483ae0f46c66f3ed6fd265b48f48c06298f98 | refs/heads/main | 2023-09-01T15:03:01.450365 | 2023-09-01T02:46:35 | 2023-09-01T02:46:35 | 45,069,467 | 913 | 262 | MIT | 2023-09-14T21:53:32 | 2015-10-27T20:55:42 | Python | UTF-8 | Python | false | false | 1,112 | py | from collections.abc import Mapping
from hail.expr import Expression
from hail import literal
class Aesthetic(Mapping):
def __init__(self, properties):
self.properties = properties
def __getitem__(self, item):
return self.properties[item]
def __len__(self):
return len(self.properties)
def __contains__(self, item):
return item in self.properties
def __iter__(self):
return iter(self.properties)
def __repr__(self):
return self.properties.__repr__()
def merge(self, other):
return Aesthetic({**self.properties, **other.properties})
def aes(**kwargs):
"""Create an aesthetic mapping
Parameters
----------
kwargs:
Map aesthetic names to hail expressions based on table's plot.
Returns
-------
:class:`.Aesthetic`
The aesthetic mapping to be applied.
"""
hail_field_properties = {}
for k, v in kwargs.items():
if not isinstance(v, Expression):
v = literal(v)
hail_field_properties[k] = v
return Aesthetic(hail_field_properties)
| [
"noreply@github.com"
] | hail-is.noreply@github.com |
cffd05aad6e7ec0a8b97f7e2970e5b764364375f | 2ac0e1ca51c473bba04bb08ea3be2015063a6a13 | /galmeko/hospital/models.py | 99b33f90648b820ca4d8c879fc1956e7d0906004 | [] | no_license | guarav00009/Gaurav-Pandey-Latest | 2012aafe643e1fcc915626422e352d1e4411905a | aa38a47a46bc434f5ec608fde5eec0f0f58259b9 | refs/heads/master | 2020-12-22T10:03:17.325527 | 2020-01-28T13:53:52 | 2020-01-28T13:53:52 | 236,746,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,460 | py | from django.db import models
from django.utils.translation import gettext_lazy as _
from django.conf import settings
from django.utils.html import format_html
from django.template.response import TemplateResponse
User = settings.AUTH_USER_MODEL
# Create your models here.
class Hospital(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE)
hospital_name = models.CharField(max_length=100,blank=False,null=False)
phone = models.CharField(max_length=15, blank=True, null=True)
registration_no = models.CharField(max_length=30, unique=True)
address = models.CharField(max_length=150,blank=False,null=False)
file = models.ImageField(null=True, blank=True, upload_to="hospital/")
STATUS_CHOICES = (
(0, 'Pending'),
(1, 'Active'),
(2, 'Rejected'),
(3, 'Deleted'),
)
status = models.IntegerField(
_('status'), choices=STATUS_CHOICES, default=0)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'Hospital'
verbose_name_plural = 'Hospital'
def __str__(self):
return self.hospital_name
def file_link(self):
if self.file:
return format_html("<a href='%s' download>Download</a>" % (self.file.url,))
else:
return "No attachment"
file_link.allow_tags = True
file_link.short_description = 'Attachment' | [
"gauravp@clavax.com"
] | gauravp@clavax.com |
8af698aab033b51b31bb46c525c1c833af760efa | 40e6910cebd11fa00d885271e6382611506a0fae | /mapa-callback.py | 522e06294ee16407a1447224cd1985b87d7547dc | [
"MIT"
] | permissive | paupradel/ecobici_viz | f275c5dc9376bce3613e1e8eb5a3eee9190f453d | 3e98077a37ad75dc76534e21b87604d7e0df7a5f | refs/heads/master | 2023-05-27T21:09:33.826602 | 2022-01-19T16:41:32 | 2022-01-19T16:41:32 | 216,463,367 | 1 | 1 | MIT | 2023-05-01T21:17:19 | 2019-10-21T02:42:51 | Jupyter Notebook | UTF-8 | Python | false | false | 1,550 | py | import dash
import dash_html_components as html
import dash_core_components as dcc
import pandas as pd
import json
import plotly.graph_objects as go
from dash.dependencies import Input, Output
from urllib.request import urlopen
app = dash.Dash(__name__)
server = app.server
with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:
counties = json.load(response)
df = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/fips-unemp-16.csv",
dtype={"fips": str})
data = go.Choroplethmapbox(geojson=counties,
locations=df.fips,
z=df.unemp,
colorscale="Viridis",
zmin=0,
zmax=12,
marker_opacity=0.5,
marker_line_width=0)
layout = go.Layout(mapbox_style="carto-positron",
mapbox_zoom=3,
mapbox_center={"lat": 37.0902, "lon": -95.7129},
margin={"r": 0, "t": 0, "l": 0, "b": 0})
figure = go.Figure(data=data, layout=layout)
num_clics = []
app.layout= html.Div(dcc.Graph(id='map', figure=figure), id='map-container')
@app.callback(Output('map', 'figure'),
[Input('map', 'clickData'),
Input('map-container', 'n_clicks')])
def select_ageb(clickData, n_clicks):
num_clics.append(n_clicks)
print(num_clics)
if __name__ == '__main__':
app.run_server(debug=True)
| [
"pradel.paulina@ciencias.unam.mx"
] | pradel.paulina@ciencias.unam.mx |
9f39e8ce5b2d95f8cb115b32905f66fd93c721a2 | 8660d858ee701efa66e7aac2749e82527614a884 | /metu/data_utils.py | fb6a0a618120be5581d78118741b7a81b1910a62 | [] | no_license | smlblbn/cs231n_metu_hw2 | e7db67d36276595871d5f2743e0a3e7213a446cd | a0fb776f756332778a9dfbb0766b12747e58db36 | refs/heads/master | 2020-03-20T16:28:53.384514 | 2018-08-01T11:24:46 | 2018-08-01T11:24:46 | 137,538,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,677 | py | import numpy as np
import h5py
from scipy import signal as sg
# min-max scaling
def min_max_scaling(data):
min_values = np.amin(data, axis=0)
max_values = np.amax(data, axis=0)
return (data - min_values) / (max_values - min_values)
# normalizing
def normalize(data):
mean = np.mean(data, axis=0)
std = np.std(data, axis=0)
return (data - mean) / std
def load_dataset(filename):
"""
Load your 'PPG to blood pressure' dataset
"""
window = 1000
instances = 10000
index = 0
X = np.ndarray(shape=(instances, window), dtype=float)
Y = np.ndarray(shape=(instances, 2), dtype=float)
with h5py.File(filename, 'r') as file:
data = file['Part_1']
data_length = data.shape[0]
i = 0
while i < data_length and index < instances:
sample_length = int(file[data[i, 0]][()].shape[0] / window)
ppg = file[data[i, 0]][()][:, 0]
abp = file[data[i, 0]][()][:, 1]
j = 0
while j < sample_length and index < instances:
X[index] = ppg[j * window: (j + 1) * window]
abp_max_peak_index = sg.find_peaks_cwt(abp[j * window: (j + 1) * window], np.arange(40, 50))
Y[index][0] = np.mean(abp[j * window: (j + 1) * window][abp_max_peak_index])
abp_min_peak_index = sg.find_peaks_cwt(1.0 / abp[j * window: (j + 1) * window], np.arange(40, 50))
Y[index][1] = np.mean(abp[j * window: (j + 1) * window][abp_min_peak_index])
index += 1
j += 1
i += 1
#X = normalize(X)
X = min_max_scaling(X)
return X, Y
| [
"smlblbn@gmail.com"
] | smlblbn@gmail.com |
971d1c9ced43a45970a9011c6c571edb62e6921b | d3cce0c925bff6da1e797575983097102038b7e3 | /nfwfitter/figureoutmissing.py | f6c18e3c939ea9f1b375dcc237aa2de440fcb008 | [] | no_license | LSSTDESC/clmassmod | 1555a16250fbac172b660d0ec2f4bd80ba6d0095 | b608bfb9d557b59cd18e89b46db898e82a6fd010 | refs/heads/master | 2021-09-08T05:33:29.294384 | 2018-03-07T16:58:44 | 2018-03-07T16:58:44 | 113,226,565 | 3 | 6 | null | 2018-03-07T16:58:45 | 2017-12-05T19:54:02 | Python | UTF-8 | Python | false | false | 3,592 | py | #!/usr/bin/env python
import glob
##HST
#sims = 'bk11snap124 mxxlsnap41'.split()
#rss = 'r5 r16'.split()
#mcs = 'c4 duffy diemer15'.split()
#centers = 'xrayNONE core szxvptcenter szlensingpeak xrayXVP xraylensingpeak'.split()
#deltas = (200, 500, 2500)
#
##Megacam
sims = 'bk11snap124 bk11snap141 mxxlsnap41 mxxlsnap54'.split()
rss = ['r9']
mcs = 'c4 duffy diemer15'.split()
centers = 'corenone sztcenter szxvptcenter core'.split()
deltas = (200, 500)
halosprocessed = {}
for line in open('haloprocessed').readlines():
tokens = line.split()
sim, config,loc = tokens
if sim not in halosprocessed:
halosprocessed[sim] = {}
halosprocessed[sim][config] = loc
finished = {}
for line in open('/vol/euclid1/euclid1_2/dapple/rundlns/finished').readlines():
tokens = line.split()
sim = tokens[0]
config = tokens[1]
deltas = map(int, tokens[2:])
if sim not in finished:
finished[sim] = {}
cursimlist = finished[sim]
cursimlist[config] = deltas
needdln = {}
missinghalos = {}
for sim in finished.keys():
missinghalos[sim] = []
needdln[sim] = {}
for delta in deltas:
needdln[sim][delta] = []
for sim in sims:
cursimlist = finished[sim]
for rs in rss:
for mc in mcs:
for center in centers:
# for line in open('shearprofiles/coresizeindex.list').readlines():
# cluster, coreindex, coresize = line.split()
for line in open('configfiles/megacam_siminput.reduced.list').readlines():
cluster, zcluster, ndensity, beta, core, coreindex = line.split()
curcenter = center
if center == 'core':
curcenter = 'core{}'.format(coreindex)
# config = 'hstnoisebins-{mc}-{rs}-{curcenter}-{cluster}'.format(mc = mc,
# rs = rs,
# curcenter = curcenter,
# cluster = cluster)
config = 'mega-{mc}-{rs}-sigma0.25-{curcenter}-{cluster}'.format(mc = mc,
rs = rs,
curcenter = curcenter,
cluster = cluster)
if config not in cursimlist:
if config in halosprocessed[sim]:
for delta in deltas:
needdln[sim][delta].append(config)
else:
missinghalos[sim].append(config)
else:
for delta in deltas:
if delta not in cursimlist[config]:
needdln[sim][delta].append(config)
for sim in needdln.keys():
for delta in deltas:
with open('dlntorun.{}.{}'.format(sim, delta), 'w') as output:
for config in needdln[sim][delta]:
output.write('{}\n'.format(config))
for sim in missinghalos.keys():
with open('missinghalos.{}'.format(sim),'w') as output:
for config in missinghalos[sim]:
output.write('{}\n'.format(config))
| [
"dapple@astro.uni-bonn.de"
] | dapple@astro.uni-bonn.de |
7a57e14f8c57e923c2047f8a0314348a9ffee6c2 | 8f61479fb88ed79e5860aba7b797e7e3a6c33f25 | /spider/parseHtml.py | 920d2c91c7035822a43a777dade6abd09276a5e0 | [
"MIT"
] | permissive | Lemon-cc-hang/proxiespool | c29ca1e598a263632aa13ed7f09f72afe9868f0a | 6af74b5aa507fbded9199979e7f0f9b1610ee274 | refs/heads/master | 2022-11-25T10:24:59.724242 | 2020-07-31T08:01:37 | 2020-07-31T08:01:37 | 279,520,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,492 | py | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: parseHtml.py
Description :
Author : lemoncc
date: 2020/7/14
-------------------------------------------------
"""
__author__ = 'lemoncc'
from bs4 import BeautifulSoup
from helper.verify import Verify
class ParseHtml(object):
def __init__(self):
self.verify_ip = Verify().verify_ip
def parse_html_kuaidaili(self, html):
if html is not None:
soup = BeautifulSoup(html.text, 'html.parser')
ips = soup.select('table tbody tr')
proxies = []
for line in ips:
ip = line.select_one('td').text
port = line.select('td')[1].text
addr = f'http://{ip}:{port}'
proxies.append({'http': addr, 'https': addr})
self.verify_ip(proxies)
def parse_html_xiladaili(self, html):
if html is not None:
soup = BeautifulSoup(html.text, 'html.parser')
ips = soup.select('table tbody tr')
proxies = []
for line in ips:
ip = line.select_one('td').text
addr = f'http://{ip}'
proxies.append({'http': addr,'https': addr})
self.verify_ip(proxies)
def parse_html_ip89(self, html):
if html is not None:
soup = BeautifulSoup(html.text, 'html.parser')
ips = soup.select('.layui-table tbody tr')
proxies = []
for line in ips:
ip = line.select_one('td').text.strip()
port = line.select('td')[1].text
addr = f'http://{ip}:{port}'
proxies.append({'http': addr, 'https': addr})
self.verify_ip(proxies)
| [
"a1029516811@gmail.com"
] | a1029516811@gmail.com |
f3a28dd133d6ac8424668e897cf9def1b308e65b | f8098142b0369e8ef52200ee101492091cb035a7 | /fairseq/fairseq_cli/hydra_train.py | c1037fb78a3f8a415eb665bc7493b7282e6c577c | [
"MIT"
] | permissive | dumpmemory/Compositional-Attention | 2a4f6c8035333ee33247e7d4d9e23a753d470e60 | c2c9d8391144db4155922bb859183e844eefb568 | refs/heads/main | 2023-08-27T01:21:52.310888 | 2021-11-12T16:54:02 | 2021-11-12T16:54:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,406 | py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from fairseq.dataclass.initialize import hydra_init
from fairseq_cli.train import main as pre_main
from fairseq import distributed_utils, metrics
from fairseq.dataclass.configs import FairseqConfig
import hydra
import torch
from omegaconf import OmegaConf
logger = logging.getLogger("fairseq_cli.hydra_train")
@hydra.main(config_path=os.path.join("..", "fairseq", "config"), config_name="config2")
def hydra_main(cfg: FairseqConfig) -> float:
cfg = OmegaConf.create(OmegaConf.to_container(cfg, resolve=True, enum_to_str=True))
OmegaConf.set_struct(cfg, True)
if cfg.common.reset_logging:
reset_logging() # Hydra hijacks logging, fix that
try:
if cfg.common.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, pre_main)
else:
distributed_utils.call_main(cfg, pre_main)
except BaseException as e:
if not cfg.common.suppress_crashes:
raise
else:
logger.error("Crashed! " + str(e))
# get best val and return - useful for sweepers
try:
best_val = metrics.get_smoothed_value(
"valid", cfg.checkpoint.best_checkpoint_metric
)
except:
best_val = None
if best_val is None:
best_val = float("inf")
return best_val
def reset_logging():
root = logging.getLogger()
for handler in root.handlers:
root.removeHandler(handler)
root.setLevel(os.environ.get("LOGLEVEL", "INFO").upper())
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
root.addHandler(handler)
def cli_main():
try:
from hydra._internal.utils import get_args
cfg_name = get_args().config_name or "config"
except:
logger.warning("Failed to get config name from hydra args")
cfg_name = "config"
hydra_init(cfg_name)
hydra_main()
if __name__ == "__main__":
cli_main()
| [
"sarthmit@gmail.com"
] | sarthmit@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.